xref: /openbmc/qemu/target/i386/kvm/kvm.c (revision 18d5f261d864343f1930263a6812d3fb182e6907)
1 /*
2  * QEMU KVM support
3  *
4  * Copyright (C) 2006-2008 Qumranet Technologies
5  * Copyright IBM, Corp. 2008
6  *
7  * Authors:
8  *  Anthony Liguori   <aliguori@us.ibm.com>
9  *
10  * This work is licensed under the terms of the GNU GPL, version 2 or later.
11  * See the COPYING file in the top-level directory.
12  *
13  */
14 
15 #include "qemu/osdep.h"
16 #include "qapi/qapi-events-run-state.h"
17 #include "qapi/error.h"
18 #include "qapi/visitor.h"
19 #include <math.h>
20 #include <sys/ioctl.h>
21 #include <sys/utsname.h>
22 #include <sys/syscall.h>
23 #include <sys/resource.h>
24 #include <sys/time.h>
25 
26 #include <linux/kvm.h>
27 #include <linux/kvm_para.h>
28 #include "standard-headers/asm-x86/kvm_para.h"
29 #include "hw/xen/interface/arch-x86/cpuid.h"
30 
31 #include "cpu.h"
32 #include "host-cpu.h"
33 #include "vmsr_energy.h"
34 #include "system/system.h"
35 #include "system/hw_accel.h"
36 #include "system/kvm_int.h"
37 #include "system/runstate.h"
38 #include "kvm_i386.h"
39 #include "../confidential-guest.h"
40 #include "sev.h"
41 #include "tdx.h"
42 #include "xen-emu.h"
43 #include "hyperv.h"
44 #include "hyperv-proto.h"
45 
46 #include "gdbstub/enums.h"
47 #include "qemu/host-utils.h"
48 #include "qemu/main-loop.h"
49 #include "qemu/ratelimit.h"
50 #include "qemu/config-file.h"
51 #include "qemu/error-report.h"
52 #include "qemu/memalign.h"
53 #include "hw/i386/x86.h"
54 #include "hw/i386/kvm/xen_evtchn.h"
55 #include "hw/i386/pc.h"
56 #include "hw/i386/apic.h"
57 #include "hw/i386/apic_internal.h"
58 #include "hw/i386/apic-msidef.h"
59 #include "hw/i386/intel_iommu.h"
60 #include "hw/i386/topology.h"
61 #include "hw/i386/x86-iommu.h"
62 #include "hw/i386/e820_memory_layout.h"
63 
64 #include "hw/xen/xen.h"
65 
66 #include "hw/pci/pci.h"
67 #include "hw/pci/msi.h"
68 #include "hw/pci/msix.h"
69 #include "migration/blocker.h"
70 #include "exec/memattrs.h"
71 #include "exec/target_page.h"
72 #include "trace.h"
73 
74 #include CONFIG_DEVICES
75 
76 //#define DEBUG_KVM
77 
78 #ifdef DEBUG_KVM
79 #define DPRINTF(fmt, ...) \
80     do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0)
81 #else
82 #define DPRINTF(fmt, ...) \
83     do { } while (0)
84 #endif
85 
86 /*
87  * On older Intel CPUs, KVM uses vm86 mode to emulate 16-bit code directly.
88  * In order to use vm86 mode, an EPT identity map and a TSS  are needed.
89  * Since these must be part of guest physical memory, we need to allocate
90  * them, both by setting their start addresses in the kernel and by
91  * creating a corresponding e820 entry. We need 4 pages before the BIOS,
92  * so this value allows up to 16M BIOSes.
93  */
94 #define KVM_IDENTITY_BASE 0xfeffc000
95 
96 /* From arch/x86/kvm/lapic.h */
97 #define KVM_APIC_BUS_CYCLE_NS       1
98 #define KVM_APIC_BUS_FREQUENCY      (1000000000ULL / KVM_APIC_BUS_CYCLE_NS)
99 
100 /* A 4096-byte buffer can hold the 8-byte kvm_msrs header, plus
101  * 255 kvm_msr_entry structs */
102 #define MSR_BUF_SIZE 4096
103 
104 typedef bool QEMURDMSRHandler(X86CPU *cpu, uint32_t msr, uint64_t *val);
105 typedef bool QEMUWRMSRHandler(X86CPU *cpu, uint32_t msr, uint64_t val);
106 typedef struct {
107     uint32_t msr;
108     QEMURDMSRHandler *rdmsr;
109     QEMUWRMSRHandler *wrmsr;
110 } KVMMSRHandlers;
111 
112 static void kvm_init_msrs(X86CPU *cpu);
113 static int kvm_filter_msr(KVMState *s, uint32_t msr, QEMURDMSRHandler *rdmsr,
114                           QEMUWRMSRHandler *wrmsr);
115 
116 const KVMCapabilityInfo kvm_arch_required_capabilities[] = {
117     KVM_CAP_INFO(SET_TSS_ADDR),
118     KVM_CAP_INFO(EXT_CPUID),
119     KVM_CAP_INFO(MP_STATE),
120     KVM_CAP_INFO(SIGNAL_MSI),
121     KVM_CAP_INFO(IRQ_ROUTING),
122     KVM_CAP_INFO(DEBUGREGS),
123     KVM_CAP_INFO(XSAVE),
124     KVM_CAP_INFO(VCPU_EVENTS),
125     KVM_CAP_INFO(X86_ROBUST_SINGLESTEP),
126     KVM_CAP_INFO(MCE),
127     KVM_CAP_INFO(ADJUST_CLOCK),
128     KVM_CAP_INFO(SET_IDENTITY_MAP_ADDR),
129     KVM_CAP_LAST_INFO
130 };
131 
132 static bool has_msr_star;
133 static bool has_msr_hsave_pa;
134 static bool has_msr_tsc_aux;
135 static bool has_msr_tsc_adjust;
136 static bool has_msr_tsc_deadline;
137 static bool has_msr_feature_control;
138 static bool has_msr_misc_enable;
139 static bool has_msr_smbase;
140 static bool has_msr_bndcfgs;
141 static int lm_capable_kernel;
142 static bool has_msr_hv_hypercall;
143 static bool has_msr_hv_crash;
144 static bool has_msr_hv_reset;
145 static bool has_msr_hv_vpindex;
146 static bool hv_vpindex_settable;
147 static bool has_msr_hv_runtime;
148 static bool has_msr_hv_synic;
149 static bool has_msr_hv_stimer;
150 static bool has_msr_hv_frequencies;
151 static bool has_msr_hv_reenlightenment;
152 static bool has_msr_hv_syndbg_options;
153 static bool has_msr_xss;
154 static bool has_msr_umwait;
155 static bool has_msr_spec_ctrl;
156 static bool has_tsc_scale_msr;
157 static bool has_msr_tsx_ctrl;
158 static bool has_msr_virt_ssbd;
159 static bool has_msr_smi_count;
160 static bool has_msr_arch_capabs;
161 static bool has_msr_core_capabs;
162 static bool has_msr_vmx_vmfunc;
163 static bool has_msr_ucode_rev;
164 static bool has_msr_vmx_procbased_ctls2;
165 static bool has_msr_perf_capabs;
166 static bool has_msr_pkrs;
167 static bool has_msr_hwcr;
168 
169 static uint32_t has_architectural_pmu_version;
170 static uint32_t num_architectural_pmu_gp_counters;
171 static uint32_t num_architectural_pmu_fixed_counters;
172 
173 static int has_xsave2;
174 static int has_xcrs;
175 static int has_sregs2;
176 static int has_exception_payload;
177 static int has_triple_fault_event;
178 
179 static bool has_msr_mcg_ext_ctl;
180 
181 static struct kvm_cpuid2 *cpuid_cache;
182 static struct kvm_cpuid2 *hv_cpuid_cache;
183 static struct kvm_msr_list *kvm_feature_msrs;
184 
185 static KVMMSRHandlers msr_handlers[KVM_MSR_FILTER_MAX_RANGES];
186 
187 #define BUS_LOCK_SLICE_TIME 1000000000ULL /* ns */
188 static RateLimit bus_lock_ratelimit_ctrl;
189 static int kvm_get_one_msr(X86CPU *cpu, int index, uint64_t *value);
190 
191 static const char *vm_type_name[] = {
192     [KVM_X86_DEFAULT_VM] = "default",
193     [KVM_X86_SEV_VM] = "SEV",
194     [KVM_X86_SEV_ES_VM] = "SEV-ES",
195     [KVM_X86_SNP_VM] = "SEV-SNP",
196     [KVM_X86_TDX_VM] = "TDX",
197 };
198 
kvm_is_vm_type_supported(int type)199 bool kvm_is_vm_type_supported(int type)
200 {
201     uint32_t machine_types;
202 
203     /*
204      * old KVM doesn't support KVM_CAP_VM_TYPES but KVM_X86_DEFAULT_VM
205      * is always supported
206      */
207     if (type == KVM_X86_DEFAULT_VM) {
208         return true;
209     }
210 
211     machine_types = kvm_check_extension(KVM_STATE(current_machine->accelerator),
212                                         KVM_CAP_VM_TYPES);
213     return !!(machine_types & BIT(type));
214 }
215 
kvm_get_vm_type(MachineState * ms)216 int kvm_get_vm_type(MachineState *ms)
217 {
218     int kvm_type = KVM_X86_DEFAULT_VM;
219 
220     if (ms->cgs) {
221         if (!object_dynamic_cast(OBJECT(ms->cgs), TYPE_X86_CONFIDENTIAL_GUEST)) {
222             error_report("configuration type %s not supported for x86 guests",
223                          object_get_typename(OBJECT(ms->cgs)));
224             exit(1);
225         }
226         kvm_type = x86_confidential_guest_kvm_type(
227             X86_CONFIDENTIAL_GUEST(ms->cgs));
228     }
229 
230     if (!kvm_is_vm_type_supported(kvm_type)) {
231         error_report("vm-type %s not supported by KVM", vm_type_name[kvm_type]);
232         exit(1);
233     }
234 
235     return kvm_type;
236 }
237 
kvm_enable_hypercall(uint64_t enable_mask)238 bool kvm_enable_hypercall(uint64_t enable_mask)
239 {
240     KVMState *s = KVM_STATE(current_accel());
241 
242     return !kvm_vm_enable_cap(s, KVM_CAP_EXIT_HYPERCALL, 0, enable_mask);
243 }
244 
kvm_has_smm(void)245 bool kvm_has_smm(void)
246 {
247     return kvm_vm_check_extension(kvm_state, KVM_CAP_X86_SMM);
248 }
249 
kvm_has_adjust_clock_stable(void)250 bool kvm_has_adjust_clock_stable(void)
251 {
252     int ret = kvm_check_extension(kvm_state, KVM_CAP_ADJUST_CLOCK);
253 
254     return (ret & KVM_CLOCK_TSC_STABLE);
255 }
256 
kvm_has_exception_payload(void)257 bool kvm_has_exception_payload(void)
258 {
259     return has_exception_payload;
260 }
261 
kvm_x2apic_api_set_flags(uint64_t flags)262 static bool kvm_x2apic_api_set_flags(uint64_t flags)
263 {
264     KVMState *s = KVM_STATE(current_accel());
265 
266     return !kvm_vm_enable_cap(s, KVM_CAP_X2APIC_API, 0, flags);
267 }
268 
269 #define MEMORIZE(fn, _result) \
270     ({ \
271         static bool _memorized; \
272         \
273         if (_memorized) { \
274             return _result; \
275         } \
276         _memorized = true; \
277         _result = fn; \
278     })
279 
280 static bool has_x2apic_api;
281 
kvm_has_x2apic_api(void)282 bool kvm_has_x2apic_api(void)
283 {
284     return has_x2apic_api;
285 }
286 
kvm_enable_x2apic(void)287 bool kvm_enable_x2apic(void)
288 {
289     return MEMORIZE(
290              kvm_x2apic_api_set_flags(KVM_X2APIC_API_USE_32BIT_IDS |
291                                       KVM_X2APIC_API_DISABLE_BROADCAST_QUIRK),
292              has_x2apic_api);
293 }
294 
kvm_hv_vpindex_settable(void)295 bool kvm_hv_vpindex_settable(void)
296 {
297     return hv_vpindex_settable;
298 }
299 
kvm_get_tsc(CPUState * cs)300 static int kvm_get_tsc(CPUState *cs)
301 {
302     X86CPU *cpu = X86_CPU(cs);
303     CPUX86State *env = &cpu->env;
304     uint64_t value;
305     int ret;
306 
307     if (env->tsc_valid) {
308         return 0;
309     }
310 
311     env->tsc_valid = !runstate_is_running();
312 
313     ret = kvm_get_one_msr(cpu, MSR_IA32_TSC, &value);
314     if (ret < 0) {
315         return ret;
316     }
317 
318     env->tsc = value;
319     return 0;
320 }
321 
do_kvm_synchronize_tsc(CPUState * cpu,run_on_cpu_data arg)322 static inline void do_kvm_synchronize_tsc(CPUState *cpu, run_on_cpu_data arg)
323 {
324     kvm_get_tsc(cpu);
325 }
326 
kvm_synchronize_all_tsc(void)327 void kvm_synchronize_all_tsc(void)
328 {
329     CPUState *cpu;
330 
331     if (kvm_enabled() && !is_tdx_vm()) {
332         CPU_FOREACH(cpu) {
333             run_on_cpu(cpu, do_kvm_synchronize_tsc, RUN_ON_CPU_NULL);
334         }
335     }
336 }
337 
try_get_cpuid(KVMState * s,int max)338 static struct kvm_cpuid2 *try_get_cpuid(KVMState *s, int max)
339 {
340     struct kvm_cpuid2 *cpuid;
341     int r, size;
342 
343     size = sizeof(*cpuid) + max * sizeof(*cpuid->entries);
344     cpuid = g_malloc0(size);
345     cpuid->nent = max;
346     r = kvm_ioctl(s, KVM_GET_SUPPORTED_CPUID, cpuid);
347     if (r == 0 && cpuid->nent >= max) {
348         r = -E2BIG;
349     }
350     if (r < 0) {
351         if (r == -E2BIG) {
352             g_free(cpuid);
353             return NULL;
354         } else {
355             fprintf(stderr, "KVM_GET_SUPPORTED_CPUID failed: %s\n",
356                     strerror(-r));
357             exit(1);
358         }
359     }
360     return cpuid;
361 }
362 
363 /* Run KVM_GET_SUPPORTED_CPUID ioctl(), allocating a buffer large enough
364  * for all entries.
365  */
get_supported_cpuid(KVMState * s)366 static struct kvm_cpuid2 *get_supported_cpuid(KVMState *s)
367 {
368     struct kvm_cpuid2 *cpuid;
369     int max = 1;
370 
371     if (cpuid_cache != NULL) {
372         return cpuid_cache;
373     }
374     while ((cpuid = try_get_cpuid(s, max)) == NULL) {
375         max *= 2;
376     }
377     cpuid_cache = cpuid;
378     return cpuid;
379 }
380 
host_tsx_broken(void)381 static bool host_tsx_broken(void)
382 {
383     int family, model, stepping;\
384     char vendor[CPUID_VENDOR_SZ + 1];
385 
386     host_cpu_vendor_fms(vendor, &family, &model, &stepping);
387 
388     /* Check if we are running on a Haswell host known to have broken TSX */
389     return !strcmp(vendor, CPUID_VENDOR_INTEL) &&
390            (family == 6) &&
391            ((model == 63 && stepping < 4) ||
392             model == 60 || model == 69 || model == 70);
393 }
394 
395 /* Returns the value for a specific register on the cpuid entry
396  */
cpuid_entry_get_reg(struct kvm_cpuid_entry2 * entry,int reg)397 uint32_t cpuid_entry_get_reg(struct kvm_cpuid_entry2 *entry, int reg)
398 {
399     uint32_t ret = 0;
400     switch (reg) {
401     case R_EAX:
402         ret = entry->eax;
403         break;
404     case R_EBX:
405         ret = entry->ebx;
406         break;
407     case R_ECX:
408         ret = entry->ecx;
409         break;
410     case R_EDX:
411         ret = entry->edx;
412         break;
413     }
414     return ret;
415 }
416 
417 /* Find matching entry for function/index on kvm_cpuid2 struct
418  */
cpuid_find_entry(struct kvm_cpuid2 * cpuid,uint32_t function,uint32_t index)419 struct kvm_cpuid_entry2 *cpuid_find_entry(struct kvm_cpuid2 *cpuid,
420                                           uint32_t function,
421                                           uint32_t index)
422 {
423     int i;
424     for (i = 0; i < cpuid->nent; ++i) {
425         if (cpuid->entries[i].function == function &&
426             cpuid->entries[i].index == index) {
427             return &cpuid->entries[i];
428         }
429     }
430     /* not found: */
431     return NULL;
432 }
433 
kvm_arch_get_supported_cpuid(KVMState * s,uint32_t function,uint32_t index,int reg)434 uint32_t kvm_arch_get_supported_cpuid(KVMState *s, uint32_t function,
435                                       uint32_t index, int reg)
436 {
437     struct kvm_cpuid2 *cpuid;
438     uint32_t ret = 0;
439     uint32_t cpuid_1_edx, unused;
440     uint64_t bitmask;
441 
442     cpuid = get_supported_cpuid(s);
443 
444     struct kvm_cpuid_entry2 *entry = cpuid_find_entry(cpuid, function, index);
445     if (entry) {
446         ret = cpuid_entry_get_reg(entry, reg);
447     }
448 
449     /* Fixups for the data returned by KVM, below */
450 
451     if (function == 1 && reg == R_EDX) {
452         /* KVM before 2.6.30 misreports the following features */
453         ret |= CPUID_MTRR | CPUID_PAT | CPUID_MCE | CPUID_MCA;
454         /* KVM never reports CPUID_HT but QEMU can support when vcpus > 1 */
455         ret |= CPUID_HT;
456     } else if (function == 1 && reg == R_ECX) {
457         /* We can set the hypervisor flag, even if KVM does not return it on
458          * GET_SUPPORTED_CPUID
459          */
460         ret |= CPUID_EXT_HYPERVISOR;
461         /* tsc-deadline flag is not returned by GET_SUPPORTED_CPUID, but it
462          * can be enabled if the kernel has KVM_CAP_TSC_DEADLINE_TIMER,
463          * and the irqchip is in the kernel.
464          */
465         if (kvm_irqchip_in_kernel() &&
466                 kvm_check_extension(s, KVM_CAP_TSC_DEADLINE_TIMER)) {
467             ret |= CPUID_EXT_TSC_DEADLINE_TIMER;
468         }
469 
470         /* x2apic is reported by GET_SUPPORTED_CPUID, but it can't be enabled
471          * without the in-kernel irqchip
472          */
473         if (!kvm_irqchip_in_kernel()) {
474             ret &= ~CPUID_EXT_X2APIC;
475         }
476 
477         if (enable_cpu_pm) {
478             int disable_exits = kvm_check_extension(s,
479                                                     KVM_CAP_X86_DISABLE_EXITS);
480 
481             if (disable_exits & KVM_X86_DISABLE_EXITS_MWAIT) {
482                 ret |= CPUID_EXT_MONITOR;
483             }
484         }
485     } else if (function == 6 && reg == R_EAX) {
486         ret |= CPUID_6_EAX_ARAT; /* safe to allow because of emulated APIC */
487     } else if (function == 7 && index == 0 && reg == R_EBX) {
488         /* Not new instructions, just an optimization.  */
489         uint32_t ebx;
490         host_cpuid(7, 0, &unused, &ebx, &unused, &unused);
491         ret |= ebx & CPUID_7_0_EBX_ERMS;
492 
493         if (host_tsx_broken()) {
494             ret &= ~(CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_HLE);
495         }
496     } else if (function == 7 && index == 0 && reg == R_EDX) {
497         /* Not new instructions, just an optimization.  */
498         uint32_t edx;
499         host_cpuid(7, 0, &unused, &unused, &unused, &edx);
500         ret |= edx & CPUID_7_0_EDX_FSRM;
501 
502         /*
503          * Linux v4.17-v4.20 incorrectly return ARCH_CAPABILITIES on SVM hosts.
504          * We can detect the bug by checking if MSR_IA32_ARCH_CAPABILITIES is
505          * returned by KVM_GET_MSR_INDEX_LIST.
506          */
507         if (!has_msr_arch_capabs) {
508             ret &= ~CPUID_7_0_EDX_ARCH_CAPABILITIES;
509         }
510     } else if (function == 7 && index == 1 && reg == R_EAX) {
511         /* Not new instructions, just an optimization.  */
512         uint32_t eax;
513         host_cpuid(7, 1, &eax, &unused, &unused, &unused);
514         ret |= eax & (CPUID_7_1_EAX_FZRM | CPUID_7_1_EAX_FSRS | CPUID_7_1_EAX_FSRC);
515     } else if (function == 7 && index == 2 && reg == R_EDX) {
516         uint32_t edx;
517         host_cpuid(7, 2, &unused, &unused, &unused, &edx);
518         ret |= edx & CPUID_7_2_EDX_MCDT_NO;
519     } else if (function == 0xd && index == 0 &&
520                (reg == R_EAX || reg == R_EDX)) {
521         /*
522          * The value returned by KVM_GET_SUPPORTED_CPUID does not include
523          * features that still have to be enabled with the arch_prctl
524          * system call.  QEMU needs the full value, which is retrieved
525          * with KVM_GET_DEVICE_ATTR.
526          */
527         struct kvm_device_attr attr = {
528             .group = 0,
529             .attr = KVM_X86_XCOMP_GUEST_SUPP,
530             .addr = (unsigned long) &bitmask
531         };
532 
533         bool sys_attr = kvm_check_extension(s, KVM_CAP_SYS_ATTRIBUTES);
534         if (!sys_attr) {
535             return ret;
536         }
537 
538         int rc = kvm_ioctl(s, KVM_GET_DEVICE_ATTR, &attr);
539         if (rc < 0) {
540             if (rc != -ENXIO) {
541                 warn_report("KVM_GET_DEVICE_ATTR(0, KVM_X86_XCOMP_GUEST_SUPP) "
542                             "error: %d", rc);
543             }
544             return ret;
545         }
546         ret = (reg == R_EAX) ? bitmask : bitmask >> 32;
547     } else if (function == 0x80000001 && reg == R_ECX) {
548         /*
549          * It's safe to enable TOPOEXT even if it's not returned by
550          * GET_SUPPORTED_CPUID.  Unconditionally enabling TOPOEXT here allows
551          * us to keep CPU models including TOPOEXT runnable on older kernels.
552          */
553         ret |= CPUID_EXT3_TOPOEXT;
554     } else if (function == 0x80000001 && reg == R_EDX) {
555         /* On Intel, kvm returns cpuid according to the Intel spec,
556          * so add missing bits according to the AMD spec:
557          */
558         cpuid_1_edx = kvm_arch_get_supported_cpuid(s, 1, 0, R_EDX);
559         ret |= cpuid_1_edx & CPUID_EXT2_AMD_ALIASES;
560     } else if (function == 0x80000007 && reg == R_EBX) {
561         ret |= CPUID_8000_0007_EBX_OVERFLOW_RECOV | CPUID_8000_0007_EBX_SUCCOR;
562     } else if (function == KVM_CPUID_FEATURES && reg == R_EAX) {
563         /* kvm_pv_unhalt is reported by GET_SUPPORTED_CPUID, but it can't
564          * be enabled without the in-kernel irqchip
565          */
566         if (!kvm_irqchip_in_kernel()) {
567             ret &= ~CPUID_KVM_PV_UNHALT;
568         }
569         if (kvm_irqchip_is_split()) {
570             ret |= CPUID_KVM_MSI_EXT_DEST_ID;
571         }
572     } else if (function == KVM_CPUID_FEATURES && reg == R_EDX) {
573         ret |= CPUID_KVM_HINTS_REALTIME;
574     }
575 
576     if (current_machine->cgs) {
577         ret = x86_confidential_guest_adjust_cpuid_features(
578             X86_CONFIDENTIAL_GUEST(current_machine->cgs),
579             function, index, reg, ret);
580     }
581     return ret;
582 }
583 
kvm_arch_get_supported_msr_feature(KVMState * s,uint32_t index)584 uint64_t kvm_arch_get_supported_msr_feature(KVMState *s, uint32_t index)
585 {
586     struct {
587         struct kvm_msrs info;
588         struct kvm_msr_entry entries[1];
589     } msr_data = {};
590     uint64_t value;
591     uint32_t ret, can_be_one, must_be_one;
592 
593     if (kvm_feature_msrs == NULL) { /* Host doesn't support feature MSRs */
594         return 0;
595     }
596 
597     /* Check if requested MSR is supported feature MSR */
598     int i;
599     for (i = 0; i < kvm_feature_msrs->nmsrs; i++)
600         if (kvm_feature_msrs->indices[i] == index) {
601             break;
602         }
603     if (i == kvm_feature_msrs->nmsrs) {
604         return 0; /* if the feature MSR is not supported, simply return 0 */
605     }
606 
607     msr_data.info.nmsrs = 1;
608     msr_data.entries[0].index = index;
609 
610     ret = kvm_ioctl(s, KVM_GET_MSRS, &msr_data);
611     if (ret != 1) {
612         error_report("KVM get MSR (index=0x%x) feature failed, %s",
613             index, strerror(-ret));
614         exit(1);
615     }
616 
617     value = msr_data.entries[0].data;
618     switch (index) {
619     case MSR_IA32_VMX_PROCBASED_CTLS2:
620         if (!has_msr_vmx_procbased_ctls2) {
621             /* KVM forgot to add these bits for some time, do this ourselves. */
622             if (kvm_arch_get_supported_cpuid(s, 0xD, 1, R_ECX) &
623                 CPUID_XSAVE_XSAVES) {
624                 value |= (uint64_t)VMX_SECONDARY_EXEC_XSAVES << 32;
625             }
626             if (kvm_arch_get_supported_cpuid(s, 1, 0, R_ECX) &
627                 CPUID_EXT_RDRAND) {
628                 value |= (uint64_t)VMX_SECONDARY_EXEC_RDRAND_EXITING << 32;
629             }
630             if (kvm_arch_get_supported_cpuid(s, 7, 0, R_EBX) &
631                 CPUID_7_0_EBX_INVPCID) {
632                 value |= (uint64_t)VMX_SECONDARY_EXEC_ENABLE_INVPCID << 32;
633             }
634             if (kvm_arch_get_supported_cpuid(s, 7, 0, R_EBX) &
635                 CPUID_7_0_EBX_RDSEED) {
636                 value |= (uint64_t)VMX_SECONDARY_EXEC_RDSEED_EXITING << 32;
637             }
638             if (kvm_arch_get_supported_cpuid(s, 0x80000001, 0, R_EDX) &
639                 CPUID_EXT2_RDTSCP) {
640                 value |= (uint64_t)VMX_SECONDARY_EXEC_RDTSCP << 32;
641             }
642         }
643         /* fall through */
644     case MSR_IA32_VMX_TRUE_PINBASED_CTLS:
645     case MSR_IA32_VMX_TRUE_PROCBASED_CTLS:
646     case MSR_IA32_VMX_TRUE_ENTRY_CTLS:
647     case MSR_IA32_VMX_TRUE_EXIT_CTLS:
648         /*
649          * Return true for bits that can be one, but do not have to be one.
650          * The SDM tells us which bits could have a "must be one" setting,
651          * so we can do the opposite transformation in make_vmx_msr_value.
652          */
653         must_be_one = (uint32_t)value;
654         can_be_one = (uint32_t)(value >> 32);
655         return can_be_one & ~must_be_one;
656     case MSR_IA32_ARCH_CAPABILITIES:
657         /*
658          * Special handling for fb-clear bit in ARCH_CAPABILITIES MSR.
659          * KVM will only report the bit if it is enabled in the host,
660          * but, for live migration capability purposes, we want to
661          * expose the bit to the guest even if it is disabled in the
662          * host, as long as the host itself is not vulnerable to
663          * the issue that the fb-clear bit is meant to mitigate.
664          */
665         if ((value & MSR_ARCH_CAP_MDS_NO) &&
666             (value & MSR_ARCH_CAP_TAA_NO) &&
667             (value & MSR_ARCH_CAP_SBDR_SSDP_NO) &&
668             (value & MSR_ARCH_CAP_FBSDP_NO) &&
669             (value & MSR_ARCH_CAP_PSDP_NO)) {
670                 value |= MSR_ARCH_CAP_FB_CLEAR;
671         }
672         return value;
673 
674     default:
675         return value;
676     }
677 }
678 
kvm_get_mce_cap_supported(KVMState * s,uint64_t * mce_cap,int * max_banks)679 static int kvm_get_mce_cap_supported(KVMState *s, uint64_t *mce_cap,
680                                      int *max_banks)
681 {
682     *max_banks = kvm_check_extension(s, KVM_CAP_MCE);
683     return kvm_ioctl(s, KVM_X86_GET_MCE_CAP_SUPPORTED, mce_cap);
684 }
685 
kvm_mce_inject(X86CPU * cpu,hwaddr paddr,int code)686 static void kvm_mce_inject(X86CPU *cpu, hwaddr paddr, int code)
687 {
688     CPUState *cs = CPU(cpu);
689     CPUX86State *env = &cpu->env;
690     uint64_t status = MCI_STATUS_VAL | MCI_STATUS_EN | MCI_STATUS_MISCV |
691                       MCI_STATUS_ADDRV;
692     uint64_t mcg_status = MCG_STATUS_MCIP | MCG_STATUS_RIPV;
693     int flags = 0;
694 
695     if (!IS_AMD_CPU(env)) {
696         status |= MCI_STATUS_S | MCI_STATUS_UC;
697         if (code == BUS_MCEERR_AR) {
698             status |= MCI_STATUS_AR | 0x134;
699             mcg_status |= MCG_STATUS_EIPV;
700         } else {
701             status |= 0xc0;
702         }
703     } else {
704         if (code == BUS_MCEERR_AR) {
705             status |= MCI_STATUS_UC | MCI_STATUS_POISON;
706             mcg_status |= MCG_STATUS_EIPV;
707         } else {
708             /* Setting the POISON bit for deferred errors indicates to the
709              * guest kernel that the address provided by the MCE is valid
710              * and usable which will ensure that the guest kernel will send
711              * a SIGBUS_AO signal to the guest process. This allows for
712              * more desirable behavior in the case that the guest process
713              * with poisoned memory has set the MCE_KILL_EARLY prctl flag
714              * which indicates that the process would prefer to handle or
715              * shutdown due to the poisoned memory condition before the
716              * memory has been accessed.
717              *
718              * While the POISON bit would not be set in a deferred error
719              * sent from hardware, the bit is not meaningful for deferred
720              * errors and can be reused in this scenario.
721              */
722             status |= MCI_STATUS_DEFERRED | MCI_STATUS_POISON;
723         }
724     }
725 
726     flags = cpu_x86_support_mca_broadcast(env) ? MCE_INJECT_BROADCAST : 0;
727     /* We need to read back the value of MSR_EXT_MCG_CTL that was set by the
728      * guest kernel back into env->mcg_ext_ctl.
729      */
730     cpu_synchronize_state(cs);
731     if (env->mcg_ext_ctl & MCG_EXT_CTL_LMCE_EN) {
732         mcg_status |= MCG_STATUS_LMCE;
733         flags = 0;
734     }
735 
736     cpu_x86_inject_mce(NULL, cpu, 9, status, mcg_status, paddr,
737                        (MCM_ADDR_PHYS << 6) | 0xc, flags);
738 }
739 
emit_hypervisor_memory_failure(MemoryFailureAction action,bool ar)740 static void emit_hypervisor_memory_failure(MemoryFailureAction action, bool ar)
741 {
742     MemoryFailureFlags mff = {.action_required = ar, .recursive = false};
743 
744     qapi_event_send_memory_failure(MEMORY_FAILURE_RECIPIENT_HYPERVISOR, action,
745                                    &mff);
746 }
747 
hardware_memory_error(void * host_addr)748 static void hardware_memory_error(void *host_addr)
749 {
750     emit_hypervisor_memory_failure(MEMORY_FAILURE_ACTION_FATAL, true);
751     error_report("QEMU got Hardware memory error at addr %p", host_addr);
752     exit(1);
753 }
754 
kvm_arch_on_sigbus_vcpu(CPUState * c,int code,void * addr)755 void kvm_arch_on_sigbus_vcpu(CPUState *c, int code, void *addr)
756 {
757     X86CPU *cpu = X86_CPU(c);
758     CPUX86State *env = &cpu->env;
759     ram_addr_t ram_addr;
760     hwaddr paddr;
761 
762     /* If we get an action required MCE, it has been injected by KVM
763      * while the VM was running.  An action optional MCE instead should
764      * be coming from the main thread, which qemu_init_sigbus identifies
765      * as the "early kill" thread.
766      */
767     assert(code == BUS_MCEERR_AR || code == BUS_MCEERR_AO);
768 
769     if ((env->mcg_cap & MCG_SER_P) && addr) {
770         ram_addr = qemu_ram_addr_from_host(addr);
771         if (ram_addr != RAM_ADDR_INVALID &&
772             kvm_physical_memory_addr_from_host(c->kvm_state, addr, &paddr)) {
773             kvm_hwpoison_page_add(ram_addr);
774             kvm_mce_inject(cpu, paddr, code);
775 
776             /*
777              * Use different logging severity based on error type.
778              * If there is additional MCE reporting on the hypervisor, QEMU VA
779              * could be another source to identify the PA and MCE details.
780              */
781             if (code == BUS_MCEERR_AR) {
782                 error_report("Guest MCE Memory Error at QEMU addr %p and "
783                     "GUEST addr 0x%" HWADDR_PRIx " of type %s injected",
784                     addr, paddr, "BUS_MCEERR_AR");
785             } else {
786                  warn_report("Guest MCE Memory Error at QEMU addr %p and "
787                      "GUEST addr 0x%" HWADDR_PRIx " of type %s injected",
788                      addr, paddr, "BUS_MCEERR_AO");
789             }
790 
791             return;
792         }
793 
794         if (code == BUS_MCEERR_AO) {
795             warn_report("Hardware memory error at addr %p of type %s "
796                 "for memory used by QEMU itself instead of guest system!",
797                  addr, "BUS_MCEERR_AO");
798         }
799     }
800 
801     if (code == BUS_MCEERR_AR) {
802         hardware_memory_error(addr);
803     }
804 
805     /* Hope we are lucky for AO MCE, just notify a event */
806     emit_hypervisor_memory_failure(MEMORY_FAILURE_ACTION_IGNORE, false);
807 }
808 
kvm_queue_exception(CPUX86State * env,int32_t exception_nr,uint8_t exception_has_payload,uint64_t exception_payload)809 static void kvm_queue_exception(CPUX86State *env,
810                                 int32_t exception_nr,
811                                 uint8_t exception_has_payload,
812                                 uint64_t exception_payload)
813 {
814     assert(env->exception_nr == -1);
815     assert(!env->exception_pending);
816     assert(!env->exception_injected);
817     assert(!env->exception_has_payload);
818 
819     env->exception_nr = exception_nr;
820 
821     if (has_exception_payload) {
822         env->exception_pending = 1;
823 
824         env->exception_has_payload = exception_has_payload;
825         env->exception_payload = exception_payload;
826     } else {
827         env->exception_injected = 1;
828 
829         if (exception_nr == EXCP01_DB) {
830             assert(exception_has_payload);
831             env->dr[6] = exception_payload;
832         } else if (exception_nr == EXCP0E_PAGE) {
833             assert(exception_has_payload);
834             env->cr[2] = exception_payload;
835         } else {
836             assert(!exception_has_payload);
837         }
838     }
839 }
840 
cpu_update_state(void * opaque,bool running,RunState state)841 static void cpu_update_state(void *opaque, bool running, RunState state)
842 {
843     CPUX86State *env = opaque;
844 
845     if (running) {
846         env->tsc_valid = false;
847     }
848 }
849 
kvm_arch_vcpu_id(CPUState * cs)850 unsigned long kvm_arch_vcpu_id(CPUState *cs)
851 {
852     X86CPU *cpu = X86_CPU(cs);
853     return cpu->apic_id;
854 }
855 
856 #ifndef KVM_CPUID_SIGNATURE_NEXT
857 #define KVM_CPUID_SIGNATURE_NEXT                0x40000100
858 #endif
859 
hyperv_enabled(X86CPU * cpu)860 static bool hyperv_enabled(X86CPU *cpu)
861 {
862     return kvm_check_extension(kvm_state, KVM_CAP_HYPERV) > 0 &&
863         ((cpu->hyperv_spinlock_attempts != HYPERV_SPINLOCK_NEVER_NOTIFY) ||
864          cpu->hyperv_features || cpu->hyperv_passthrough);
865 }
866 
867 /*
868  * Check whether target_freq is within conservative
869  * ntp correctable bounds (250ppm) of freq
870  */
freq_within_bounds(int freq,int target_freq)871 static inline bool freq_within_bounds(int freq, int target_freq)
872 {
873         int max_freq = freq + (freq * 250 / 1000000);
874         int min_freq = freq - (freq * 250 / 1000000);
875 
876         if (target_freq >= min_freq && target_freq <= max_freq) {
877                 return true;
878         }
879 
880         return false;
881 }
882 
kvm_arch_set_tsc_khz(CPUState * cs)883 static int kvm_arch_set_tsc_khz(CPUState *cs)
884 {
885     X86CPU *cpu = X86_CPU(cs);
886     CPUX86State *env = &cpu->env;
887     int r, cur_freq;
888     bool set_ioctl = false;
889 
890     /*
891      * TSC of TD vcpu is immutable, it cannot be set/changed via vcpu scope
892      * VM_SET_TSC_KHZ, but only be initialized via VM scope VM_SET_TSC_KHZ
893      * before ioctl KVM_TDX_INIT_VM in tdx_pre_create_vcpu()
894      */
895     if (is_tdx_vm()) {
896         return 0;
897     }
898 
899     if (!env->tsc_khz) {
900         return 0;
901     }
902 
903     cur_freq = kvm_check_extension(cs->kvm_state, KVM_CAP_GET_TSC_KHZ) ?
904                kvm_vcpu_ioctl(cs, KVM_GET_TSC_KHZ) : -ENOTSUP;
905 
906     /*
907      * If TSC scaling is supported, attempt to set TSC frequency.
908      */
909     if (kvm_check_extension(cs->kvm_state, KVM_CAP_TSC_CONTROL)) {
910         set_ioctl = true;
911     }
912 
913     /*
914      * If desired TSC frequency is within bounds of NTP correction,
915      * attempt to set TSC frequency.
916      */
917     if (cur_freq != -ENOTSUP && freq_within_bounds(cur_freq, env->tsc_khz)) {
918         set_ioctl = true;
919     }
920 
921     r = set_ioctl ?
922         kvm_vcpu_ioctl(cs, KVM_SET_TSC_KHZ, env->tsc_khz) :
923         -ENOTSUP;
924 
925     if (r < 0) {
926         /* When KVM_SET_TSC_KHZ fails, it's an error only if the current
927          * TSC frequency doesn't match the one we want.
928          */
929         cur_freq = kvm_check_extension(cs->kvm_state, KVM_CAP_GET_TSC_KHZ) ?
930                    kvm_vcpu_ioctl(cs, KVM_GET_TSC_KHZ) :
931                    -ENOTSUP;
932         if (cur_freq <= 0 || cur_freq != env->tsc_khz) {
933             warn_report("TSC frequency mismatch between "
934                         "VM (%" PRId64 " kHz) and host (%d kHz), "
935                         "and TSC scaling unavailable",
936                         env->tsc_khz, cur_freq);
937             return r;
938         }
939     }
940 
941     return 0;
942 }
943 
tsc_is_stable_and_known(CPUX86State * env)944 static bool tsc_is_stable_and_known(CPUX86State *env)
945 {
946     if (!env->tsc_khz) {
947         return false;
948     }
949     return (env->features[FEAT_8000_0007_EDX] & CPUID_APM_INVTSC)
950         || env->user_tsc_khz;
951 }
952 
953 #define DEFAULT_EVMCS_VERSION ((1 << 8) | 1)
954 
955 static struct {
956     const char *desc;
957     struct {
958         uint32_t func;
959         int reg;
960         uint32_t bits;
961     } flags[2];
962     uint64_t dependencies;
963     bool skip_passthrough;
964 } kvm_hyperv_properties[] = {
965     [HYPERV_FEAT_RELAXED] = {
966         .desc = "relaxed timing (hv-relaxed)",
967         .flags = {
968             {.func = HV_CPUID_ENLIGHTMENT_INFO, .reg = R_EAX,
969              .bits = HV_RELAXED_TIMING_RECOMMENDED}
970         }
971     },
972     [HYPERV_FEAT_VAPIC] = {
973         .desc = "virtual APIC (hv-vapic)",
974         .flags = {
975             {.func = HV_CPUID_FEATURES, .reg = R_EAX,
976              .bits = HV_APIC_ACCESS_AVAILABLE}
977         }
978     },
979     [HYPERV_FEAT_TIME] = {
980         .desc = "clocksources (hv-time)",
981         .flags = {
982             {.func = HV_CPUID_FEATURES, .reg = R_EAX,
983              .bits = HV_TIME_REF_COUNT_AVAILABLE | HV_REFERENCE_TSC_AVAILABLE}
984         }
985     },
986     [HYPERV_FEAT_CRASH] = {
987         .desc = "crash MSRs (hv-crash)",
988         .flags = {
989             {.func = HV_CPUID_FEATURES, .reg = R_EDX,
990              .bits = HV_GUEST_CRASH_MSR_AVAILABLE}
991         }
992     },
993     [HYPERV_FEAT_RESET] = {
994         .desc = "reset MSR (hv-reset)",
995         .flags = {
996             {.func = HV_CPUID_FEATURES, .reg = R_EAX,
997              .bits = HV_RESET_AVAILABLE}
998         }
999     },
1000     [HYPERV_FEAT_VPINDEX] = {
1001         .desc = "VP_INDEX MSR (hv-vpindex)",
1002         .flags = {
1003             {.func = HV_CPUID_FEATURES, .reg = R_EAX,
1004              .bits = HV_VP_INDEX_AVAILABLE}
1005         }
1006     },
1007     [HYPERV_FEAT_RUNTIME] = {
1008         .desc = "VP_RUNTIME MSR (hv-runtime)",
1009         .flags = {
1010             {.func = HV_CPUID_FEATURES, .reg = R_EAX,
1011              .bits = HV_VP_RUNTIME_AVAILABLE}
1012         }
1013     },
1014     [HYPERV_FEAT_SYNIC] = {
1015         .desc = "synthetic interrupt controller (hv-synic)",
1016         .flags = {
1017             {.func = HV_CPUID_FEATURES, .reg = R_EAX,
1018              .bits = HV_SYNIC_AVAILABLE}
1019         }
1020     },
1021     [HYPERV_FEAT_STIMER] = {
1022         .desc = "synthetic timers (hv-stimer)",
1023         .flags = {
1024             {.func = HV_CPUID_FEATURES, .reg = R_EAX,
1025              .bits = HV_SYNTIMERS_AVAILABLE}
1026         },
1027         .dependencies = BIT(HYPERV_FEAT_SYNIC) | BIT(HYPERV_FEAT_TIME)
1028     },
1029     [HYPERV_FEAT_FREQUENCIES] = {
1030         .desc = "frequency MSRs (hv-frequencies)",
1031         .flags = {
1032             {.func = HV_CPUID_FEATURES, .reg = R_EAX,
1033              .bits = HV_ACCESS_FREQUENCY_MSRS},
1034             {.func = HV_CPUID_FEATURES, .reg = R_EDX,
1035              .bits = HV_FREQUENCY_MSRS_AVAILABLE}
1036         }
1037     },
1038     [HYPERV_FEAT_REENLIGHTENMENT] = {
1039         .desc = "reenlightenment MSRs (hv-reenlightenment)",
1040         .flags = {
1041             {.func = HV_CPUID_FEATURES, .reg = R_EAX,
1042              .bits = HV_ACCESS_REENLIGHTENMENTS_CONTROL}
1043         }
1044     },
1045     [HYPERV_FEAT_TLBFLUSH] = {
1046         .desc = "paravirtualized TLB flush (hv-tlbflush)",
1047         .flags = {
1048             {.func = HV_CPUID_ENLIGHTMENT_INFO, .reg = R_EAX,
1049              .bits = HV_REMOTE_TLB_FLUSH_RECOMMENDED |
1050              HV_EX_PROCESSOR_MASKS_RECOMMENDED}
1051         },
1052         .dependencies = BIT(HYPERV_FEAT_VPINDEX)
1053     },
1054     [HYPERV_FEAT_EVMCS] = {
1055         .desc = "enlightened VMCS (hv-evmcs)",
1056         .flags = {
1057             {.func = HV_CPUID_ENLIGHTMENT_INFO, .reg = R_EAX,
1058              .bits = HV_ENLIGHTENED_VMCS_RECOMMENDED}
1059         },
1060         .dependencies = BIT(HYPERV_FEAT_VAPIC)
1061     },
1062     [HYPERV_FEAT_IPI] = {
1063         .desc = "paravirtualized IPI (hv-ipi)",
1064         .flags = {
1065             {.func = HV_CPUID_ENLIGHTMENT_INFO, .reg = R_EAX,
1066              .bits = HV_CLUSTER_IPI_RECOMMENDED |
1067              HV_EX_PROCESSOR_MASKS_RECOMMENDED}
1068         },
1069         .dependencies = BIT(HYPERV_FEAT_VPINDEX)
1070     },
1071     [HYPERV_FEAT_STIMER_DIRECT] = {
1072         .desc = "direct mode synthetic timers (hv-stimer-direct)",
1073         .flags = {
1074             {.func = HV_CPUID_FEATURES, .reg = R_EDX,
1075              .bits = HV_STIMER_DIRECT_MODE_AVAILABLE}
1076         },
1077         .dependencies = BIT(HYPERV_FEAT_STIMER)
1078     },
1079     [HYPERV_FEAT_AVIC] = {
1080         .desc = "AVIC/APICv support (hv-avic/hv-apicv)",
1081         .flags = {
1082             {.func = HV_CPUID_ENLIGHTMENT_INFO, .reg = R_EAX,
1083              .bits = HV_DEPRECATING_AEOI_RECOMMENDED}
1084         }
1085     },
1086     [HYPERV_FEAT_SYNDBG] = {
1087         .desc = "Enable synthetic kernel debugger channel (hv-syndbg)",
1088         .flags = {
1089             {.func = HV_CPUID_FEATURES, .reg = R_EDX,
1090              .bits = HV_FEATURE_DEBUG_MSRS_AVAILABLE}
1091         },
1092         .dependencies = BIT(HYPERV_FEAT_SYNIC) | BIT(HYPERV_FEAT_RELAXED),
1093         .skip_passthrough = true,
1094     },
1095     [HYPERV_FEAT_MSR_BITMAP] = {
1096         .desc = "enlightened MSR-Bitmap (hv-emsr-bitmap)",
1097         .flags = {
1098             {.func = HV_CPUID_NESTED_FEATURES, .reg = R_EAX,
1099              .bits = HV_NESTED_MSR_BITMAP}
1100         }
1101     },
1102     [HYPERV_FEAT_XMM_INPUT] = {
1103         .desc = "XMM fast hypercall input (hv-xmm-input)",
1104         .flags = {
1105             {.func = HV_CPUID_FEATURES, .reg = R_EDX,
1106              .bits = HV_HYPERCALL_XMM_INPUT_AVAILABLE}
1107         }
1108     },
1109     [HYPERV_FEAT_TLBFLUSH_EXT] = {
1110         .desc = "Extended gva ranges for TLB flush hypercalls (hv-tlbflush-ext)",
1111         .flags = {
1112             {.func = HV_CPUID_FEATURES, .reg = R_EDX,
1113              .bits = HV_EXT_GVA_RANGES_FLUSH_AVAILABLE}
1114         },
1115         .dependencies = BIT(HYPERV_FEAT_TLBFLUSH)
1116     },
1117     [HYPERV_FEAT_TLBFLUSH_DIRECT] = {
1118         .desc = "direct TLB flush (hv-tlbflush-direct)",
1119         .flags = {
1120             {.func = HV_CPUID_NESTED_FEATURES, .reg = R_EAX,
1121              .bits = HV_NESTED_DIRECT_FLUSH}
1122         },
1123         .dependencies = BIT(HYPERV_FEAT_VAPIC)
1124     },
1125 };
1126 
try_get_hv_cpuid(CPUState * cs,int max,bool do_sys_ioctl)1127 static struct kvm_cpuid2 *try_get_hv_cpuid(CPUState *cs, int max,
1128                                            bool do_sys_ioctl)
1129 {
1130     struct kvm_cpuid2 *cpuid;
1131     int r, size;
1132 
1133     size = sizeof(*cpuid) + max * sizeof(*cpuid->entries);
1134     cpuid = g_malloc0(size);
1135     cpuid->nent = max;
1136 
1137     if (do_sys_ioctl) {
1138         r = kvm_ioctl(kvm_state, KVM_GET_SUPPORTED_HV_CPUID, cpuid);
1139     } else {
1140         r = kvm_vcpu_ioctl(cs, KVM_GET_SUPPORTED_HV_CPUID, cpuid);
1141     }
1142     if (r == 0 && cpuid->nent >= max) {
1143         r = -E2BIG;
1144     }
1145     if (r < 0) {
1146         if (r == -E2BIG) {
1147             g_free(cpuid);
1148             return NULL;
1149         } else {
1150             fprintf(stderr, "KVM_GET_SUPPORTED_HV_CPUID failed: %s\n",
1151                     strerror(-r));
1152             exit(1);
1153         }
1154     }
1155     return cpuid;
1156 }
1157 
1158 /*
1159  * Run KVM_GET_SUPPORTED_HV_CPUID ioctl(), allocating a buffer large enough
1160  * for all entries.
1161  */
get_supported_hv_cpuid(CPUState * cs)1162 static struct kvm_cpuid2 *get_supported_hv_cpuid(CPUState *cs)
1163 {
1164     struct kvm_cpuid2 *cpuid;
1165     /* 0x40000000..0x40000005, 0x4000000A, 0x40000080..0x40000082 leaves */
1166     int max = 11;
1167     int i;
1168     bool do_sys_ioctl;
1169 
1170     do_sys_ioctl =
1171         kvm_check_extension(kvm_state, KVM_CAP_SYS_HYPERV_CPUID) > 0;
1172 
1173     /*
1174      * Non-empty KVM context is needed when KVM_CAP_SYS_HYPERV_CPUID is
1175      * unsupported, kvm_hyperv_expand_features() checks for that.
1176      */
1177     assert(do_sys_ioctl || cs->kvm_state);
1178 
1179     /*
1180      * When the buffer is too small, KVM_GET_SUPPORTED_HV_CPUID fails with
1181      * -E2BIG, however, it doesn't report back the right size. Keep increasing
1182      * it and re-trying until we succeed.
1183      */
1184     while ((cpuid = try_get_hv_cpuid(cs, max, do_sys_ioctl)) == NULL) {
1185         max++;
1186     }
1187 
1188     /*
1189      * KVM_GET_SUPPORTED_HV_CPUID does not set EVMCS CPUID bit before
1190      * KVM_CAP_HYPERV_ENLIGHTENED_VMCS is enabled but we want to get the
1191      * information early, just check for the capability and set the bit
1192      * manually.
1193      */
1194     if (!do_sys_ioctl && kvm_check_extension(cs->kvm_state,
1195                             KVM_CAP_HYPERV_ENLIGHTENED_VMCS) > 0) {
1196         for (i = 0; i < cpuid->nent; i++) {
1197             if (cpuid->entries[i].function == HV_CPUID_ENLIGHTMENT_INFO) {
1198                 cpuid->entries[i].eax |= HV_ENLIGHTENED_VMCS_RECOMMENDED;
1199             }
1200         }
1201     }
1202 
1203     return cpuid;
1204 }
1205 
1206 /*
1207  * When KVM_GET_SUPPORTED_HV_CPUID is not supported we fill CPUID feature
1208  * leaves from KVM_CAP_HYPERV* and present MSRs data.
1209  */
get_supported_hv_cpuid_legacy(CPUState * cs)1210 static struct kvm_cpuid2 *get_supported_hv_cpuid_legacy(CPUState *cs)
1211 {
1212     X86CPU *cpu = X86_CPU(cs);
1213     struct kvm_cpuid2 *cpuid;
1214     struct kvm_cpuid_entry2 *entry_feat, *entry_recomm;
1215 
1216     /* HV_CPUID_FEATURES, HV_CPUID_ENLIGHTMENT_INFO */
1217     cpuid = g_malloc0(sizeof(*cpuid) + 2 * sizeof(*cpuid->entries));
1218     cpuid->nent = 2;
1219 
1220     /* HV_CPUID_VENDOR_AND_MAX_FUNCTIONS */
1221     entry_feat = &cpuid->entries[0];
1222     entry_feat->function = HV_CPUID_FEATURES;
1223 
1224     entry_recomm = &cpuid->entries[1];
1225     entry_recomm->function = HV_CPUID_ENLIGHTMENT_INFO;
1226     entry_recomm->ebx = cpu->hyperv_spinlock_attempts;
1227 
1228     if (kvm_check_extension(cs->kvm_state, KVM_CAP_HYPERV) > 0) {
1229         entry_feat->eax |= HV_HYPERCALL_AVAILABLE;
1230         entry_feat->eax |= HV_APIC_ACCESS_AVAILABLE;
1231         entry_feat->edx |= HV_CPU_DYNAMIC_PARTITIONING_AVAILABLE;
1232         entry_recomm->eax |= HV_RELAXED_TIMING_RECOMMENDED;
1233         entry_recomm->eax |= HV_APIC_ACCESS_RECOMMENDED;
1234     }
1235 
1236     if (kvm_check_extension(cs->kvm_state, KVM_CAP_HYPERV_TIME) > 0) {
1237         entry_feat->eax |= HV_TIME_REF_COUNT_AVAILABLE;
1238         entry_feat->eax |= HV_REFERENCE_TSC_AVAILABLE;
1239     }
1240 
1241     if (has_msr_hv_frequencies) {
1242         entry_feat->eax |= HV_ACCESS_FREQUENCY_MSRS;
1243         entry_feat->edx |= HV_FREQUENCY_MSRS_AVAILABLE;
1244     }
1245 
1246     if (has_msr_hv_crash) {
1247         entry_feat->edx |= HV_GUEST_CRASH_MSR_AVAILABLE;
1248     }
1249 
1250     if (has_msr_hv_reenlightenment) {
1251         entry_feat->eax |= HV_ACCESS_REENLIGHTENMENTS_CONTROL;
1252     }
1253 
1254     if (has_msr_hv_reset) {
1255         entry_feat->eax |= HV_RESET_AVAILABLE;
1256     }
1257 
1258     if (has_msr_hv_vpindex) {
1259         entry_feat->eax |= HV_VP_INDEX_AVAILABLE;
1260     }
1261 
1262     if (has_msr_hv_runtime) {
1263         entry_feat->eax |= HV_VP_RUNTIME_AVAILABLE;
1264     }
1265 
1266     if (has_msr_hv_synic) {
1267         unsigned int cap = cpu->hyperv_synic_kvm_only ?
1268             KVM_CAP_HYPERV_SYNIC : KVM_CAP_HYPERV_SYNIC2;
1269 
1270         if (kvm_check_extension(cs->kvm_state, cap) > 0) {
1271             entry_feat->eax |= HV_SYNIC_AVAILABLE;
1272         }
1273     }
1274 
1275     if (has_msr_hv_stimer) {
1276         entry_feat->eax |= HV_SYNTIMERS_AVAILABLE;
1277     }
1278 
1279     if (has_msr_hv_syndbg_options) {
1280         entry_feat->edx |= HV_GUEST_DEBUGGING_AVAILABLE;
1281         entry_feat->edx |= HV_FEATURE_DEBUG_MSRS_AVAILABLE;
1282         entry_feat->ebx |= HV_PARTITION_DEBUGGING_ALLOWED;
1283     }
1284 
1285     if (kvm_check_extension(cs->kvm_state,
1286                             KVM_CAP_HYPERV_TLBFLUSH) > 0) {
1287         entry_recomm->eax |= HV_REMOTE_TLB_FLUSH_RECOMMENDED;
1288         entry_recomm->eax |= HV_EX_PROCESSOR_MASKS_RECOMMENDED;
1289     }
1290 
1291     if (kvm_check_extension(cs->kvm_state,
1292                             KVM_CAP_HYPERV_ENLIGHTENED_VMCS) > 0) {
1293         entry_recomm->eax |= HV_ENLIGHTENED_VMCS_RECOMMENDED;
1294     }
1295 
1296     if (kvm_check_extension(cs->kvm_state,
1297                             KVM_CAP_HYPERV_SEND_IPI) > 0) {
1298         entry_recomm->eax |= HV_CLUSTER_IPI_RECOMMENDED;
1299         entry_recomm->eax |= HV_EX_PROCESSOR_MASKS_RECOMMENDED;
1300     }
1301 
1302     return cpuid;
1303 }
1304 
hv_cpuid_get_host(CPUState * cs,uint32_t func,int reg)1305 static uint32_t hv_cpuid_get_host(CPUState *cs, uint32_t func, int reg)
1306 {
1307     struct kvm_cpuid_entry2 *entry;
1308     struct kvm_cpuid2 *cpuid;
1309 
1310     if (hv_cpuid_cache) {
1311         cpuid = hv_cpuid_cache;
1312     } else {
1313         if (kvm_check_extension(kvm_state, KVM_CAP_HYPERV_CPUID) > 0) {
1314             cpuid = get_supported_hv_cpuid(cs);
1315         } else {
1316             /*
1317              * 'cs->kvm_state' may be NULL when Hyper-V features are expanded
1318              * before KVM context is created but this is only done when
1319              * KVM_CAP_SYS_HYPERV_CPUID is supported and it implies
1320              * KVM_CAP_HYPERV_CPUID.
1321              */
1322             assert(cs->kvm_state);
1323 
1324             cpuid = get_supported_hv_cpuid_legacy(cs);
1325         }
1326         hv_cpuid_cache = cpuid;
1327     }
1328 
1329     if (!cpuid) {
1330         return 0;
1331     }
1332 
1333     entry = cpuid_find_entry(cpuid, func, 0);
1334     if (!entry) {
1335         return 0;
1336     }
1337 
1338     return cpuid_entry_get_reg(entry, reg);
1339 }
1340 
hyperv_feature_supported(CPUState * cs,int feature)1341 static bool hyperv_feature_supported(CPUState *cs, int feature)
1342 {
1343     uint32_t func, bits;
1344     int i, reg;
1345 
1346     /*
1347      * kvm_hyperv_properties needs to define at least one CPUID flag which
1348      * must be used to detect the feature, it's hard to say whether it is
1349      * supported or not otherwise.
1350      */
1351     assert(kvm_hyperv_properties[feature].flags[0].func);
1352 
1353     for (i = 0; i < ARRAY_SIZE(kvm_hyperv_properties[feature].flags); i++) {
1354 
1355         func = kvm_hyperv_properties[feature].flags[i].func;
1356         reg = kvm_hyperv_properties[feature].flags[i].reg;
1357         bits = kvm_hyperv_properties[feature].flags[i].bits;
1358 
1359         if (!func) {
1360             continue;
1361         }
1362 
1363         if ((hv_cpuid_get_host(cs, func, reg) & bits) != bits) {
1364             return false;
1365         }
1366     }
1367 
1368     return true;
1369 }
1370 
1371 /* Checks that all feature dependencies are enabled */
hv_feature_check_deps(X86CPU * cpu,int feature,Error ** errp)1372 static bool hv_feature_check_deps(X86CPU *cpu, int feature, Error **errp)
1373 {
1374     uint64_t deps;
1375     int dep_feat;
1376 
1377     deps = kvm_hyperv_properties[feature].dependencies;
1378     while (deps) {
1379         dep_feat = ctz64(deps);
1380         if (!(hyperv_feat_enabled(cpu, dep_feat))) {
1381             error_setg(errp, "Hyper-V %s requires Hyper-V %s",
1382                        kvm_hyperv_properties[feature].desc,
1383                        kvm_hyperv_properties[dep_feat].desc);
1384             return false;
1385         }
1386         deps &= ~(1ull << dep_feat);
1387     }
1388 
1389     return true;
1390 }
1391 
hv_build_cpuid_leaf(CPUState * cs,uint32_t func,int reg)1392 static uint32_t hv_build_cpuid_leaf(CPUState *cs, uint32_t func, int reg)
1393 {
1394     X86CPU *cpu = X86_CPU(cs);
1395     uint32_t r = 0;
1396     int i, j;
1397 
1398     for (i = 0; i < ARRAY_SIZE(kvm_hyperv_properties); i++) {
1399         if (!hyperv_feat_enabled(cpu, i)) {
1400             continue;
1401         }
1402 
1403         for (j = 0; j < ARRAY_SIZE(kvm_hyperv_properties[i].flags); j++) {
1404             if (kvm_hyperv_properties[i].flags[j].func != func) {
1405                 continue;
1406             }
1407             if (kvm_hyperv_properties[i].flags[j].reg != reg) {
1408                 continue;
1409             }
1410 
1411             r |= kvm_hyperv_properties[i].flags[j].bits;
1412         }
1413     }
1414 
1415     /* HV_CPUID_NESTED_FEATURES.EAX also encodes the supported eVMCS range */
1416     if (func == HV_CPUID_NESTED_FEATURES && reg == R_EAX) {
1417         if (hyperv_feat_enabled(cpu, HYPERV_FEAT_EVMCS)) {
1418             r |= DEFAULT_EVMCS_VERSION;
1419         }
1420     }
1421 
1422     return r;
1423 }
1424 
1425 /*
1426  * Expand Hyper-V CPU features. In partucular, check that all the requested
1427  * features are supported by the host and the sanity of the configuration
1428  * (that all the required dependencies are included). Also, this takes care
1429  * of 'hv_passthrough' mode and fills the environment with all supported
1430  * Hyper-V features.
1431  */
kvm_hyperv_expand_features(X86CPU * cpu,Error ** errp)1432 bool kvm_hyperv_expand_features(X86CPU *cpu, Error **errp)
1433 {
1434     CPUState *cs = CPU(cpu);
1435     Error *local_err = NULL;
1436     int feat;
1437 
1438     if (!hyperv_enabled(cpu))
1439         return true;
1440 
1441     /*
1442      * When kvm_hyperv_expand_features is called at CPU feature expansion
1443      * time per-CPU kvm_state is not available yet so we can only proceed
1444      * when KVM_CAP_SYS_HYPERV_CPUID is supported.
1445      */
1446     if (!cs->kvm_state &&
1447         !kvm_check_extension(kvm_state, KVM_CAP_SYS_HYPERV_CPUID))
1448         return true;
1449 
1450     if (cpu->hyperv_passthrough) {
1451         cpu->hyperv_vendor_id[0] =
1452             hv_cpuid_get_host(cs, HV_CPUID_VENDOR_AND_MAX_FUNCTIONS, R_EBX);
1453         cpu->hyperv_vendor_id[1] =
1454             hv_cpuid_get_host(cs, HV_CPUID_VENDOR_AND_MAX_FUNCTIONS, R_ECX);
1455         cpu->hyperv_vendor_id[2] =
1456             hv_cpuid_get_host(cs, HV_CPUID_VENDOR_AND_MAX_FUNCTIONS, R_EDX);
1457         cpu->hyperv_vendor = g_realloc(cpu->hyperv_vendor,
1458                                        sizeof(cpu->hyperv_vendor_id) + 1);
1459         memcpy(cpu->hyperv_vendor, cpu->hyperv_vendor_id,
1460                sizeof(cpu->hyperv_vendor_id));
1461         cpu->hyperv_vendor[sizeof(cpu->hyperv_vendor_id)] = 0;
1462 
1463         cpu->hyperv_interface_id[0] =
1464             hv_cpuid_get_host(cs, HV_CPUID_INTERFACE, R_EAX);
1465         cpu->hyperv_interface_id[1] =
1466             hv_cpuid_get_host(cs, HV_CPUID_INTERFACE, R_EBX);
1467         cpu->hyperv_interface_id[2] =
1468             hv_cpuid_get_host(cs, HV_CPUID_INTERFACE, R_ECX);
1469         cpu->hyperv_interface_id[3] =
1470             hv_cpuid_get_host(cs, HV_CPUID_INTERFACE, R_EDX);
1471 
1472         cpu->hyperv_ver_id_build =
1473             hv_cpuid_get_host(cs, HV_CPUID_VERSION, R_EAX);
1474         cpu->hyperv_ver_id_major =
1475             hv_cpuid_get_host(cs, HV_CPUID_VERSION, R_EBX) >> 16;
1476         cpu->hyperv_ver_id_minor =
1477             hv_cpuid_get_host(cs, HV_CPUID_VERSION, R_EBX) & 0xffff;
1478         cpu->hyperv_ver_id_sp =
1479             hv_cpuid_get_host(cs, HV_CPUID_VERSION, R_ECX);
1480         cpu->hyperv_ver_id_sb =
1481             hv_cpuid_get_host(cs, HV_CPUID_VERSION, R_EDX) >> 24;
1482         cpu->hyperv_ver_id_sn =
1483             hv_cpuid_get_host(cs, HV_CPUID_VERSION, R_EDX) & 0xffffff;
1484 
1485         cpu->hv_max_vps = hv_cpuid_get_host(cs, HV_CPUID_IMPLEMENT_LIMITS,
1486                                             R_EAX);
1487         cpu->hyperv_limits[0] =
1488             hv_cpuid_get_host(cs, HV_CPUID_IMPLEMENT_LIMITS, R_EBX);
1489         cpu->hyperv_limits[1] =
1490             hv_cpuid_get_host(cs, HV_CPUID_IMPLEMENT_LIMITS, R_ECX);
1491         cpu->hyperv_limits[2] =
1492             hv_cpuid_get_host(cs, HV_CPUID_IMPLEMENT_LIMITS, R_EDX);
1493 
1494         cpu->hyperv_spinlock_attempts =
1495             hv_cpuid_get_host(cs, HV_CPUID_ENLIGHTMENT_INFO, R_EBX);
1496 
1497         /*
1498          * Mark feature as enabled in 'cpu->hyperv_features' as
1499          * hv_build_cpuid_leaf() uses this info to build guest CPUIDs.
1500          */
1501         for (feat = 0; feat < ARRAY_SIZE(kvm_hyperv_properties); feat++) {
1502             if (hyperv_feature_supported(cs, feat) &&
1503                 !kvm_hyperv_properties[feat].skip_passthrough) {
1504                 cpu->hyperv_features |= BIT(feat);
1505             }
1506         }
1507     } else {
1508         /* Check features availability and dependencies */
1509         for (feat = 0; feat < ARRAY_SIZE(kvm_hyperv_properties); feat++) {
1510             /* If the feature was not requested skip it. */
1511             if (!hyperv_feat_enabled(cpu, feat)) {
1512                 continue;
1513             }
1514 
1515             /* Check if the feature is supported by KVM */
1516             if (!hyperv_feature_supported(cs, feat)) {
1517                 error_setg(errp, "Hyper-V %s is not supported by kernel",
1518                            kvm_hyperv_properties[feat].desc);
1519                 return false;
1520             }
1521 
1522             /* Check dependencies */
1523             if (!hv_feature_check_deps(cpu, feat, &local_err)) {
1524                 error_propagate(errp, local_err);
1525                 return false;
1526             }
1527         }
1528     }
1529 
1530     /* Additional dependencies not covered by kvm_hyperv_properties[] */
1531     if (hyperv_feat_enabled(cpu, HYPERV_FEAT_SYNIC) &&
1532         !cpu->hyperv_synic_kvm_only &&
1533         !hyperv_feat_enabled(cpu, HYPERV_FEAT_VPINDEX)) {
1534         error_setg(errp, "Hyper-V %s requires Hyper-V %s",
1535                    kvm_hyperv_properties[HYPERV_FEAT_SYNIC].desc,
1536                    kvm_hyperv_properties[HYPERV_FEAT_VPINDEX].desc);
1537         return false;
1538     }
1539 
1540     return true;
1541 }
1542 
1543 /*
1544  * Fill in Hyper-V CPUIDs. Returns the number of entries filled in cpuid_ent.
1545  */
hyperv_fill_cpuids(CPUState * cs,struct kvm_cpuid_entry2 * cpuid_ent)1546 static int hyperv_fill_cpuids(CPUState *cs,
1547                               struct kvm_cpuid_entry2 *cpuid_ent)
1548 {
1549     X86CPU *cpu = X86_CPU(cs);
1550     struct kvm_cpuid_entry2 *c;
1551     uint32_t signature[3];
1552     uint32_t cpuid_i = 0, max_cpuid_leaf = 0;
1553     uint32_t nested_eax =
1554         hv_build_cpuid_leaf(cs, HV_CPUID_NESTED_FEATURES, R_EAX);
1555 
1556     max_cpuid_leaf = nested_eax ? HV_CPUID_NESTED_FEATURES :
1557         HV_CPUID_IMPLEMENT_LIMITS;
1558 
1559     if (hyperv_feat_enabled(cpu, HYPERV_FEAT_SYNDBG)) {
1560         max_cpuid_leaf =
1561             MAX(max_cpuid_leaf, HV_CPUID_SYNDBG_PLATFORM_CAPABILITIES);
1562     }
1563 
1564     c = &cpuid_ent[cpuid_i++];
1565     c->function = HV_CPUID_VENDOR_AND_MAX_FUNCTIONS;
1566     c->eax = max_cpuid_leaf;
1567     c->ebx = cpu->hyperv_vendor_id[0];
1568     c->ecx = cpu->hyperv_vendor_id[1];
1569     c->edx = cpu->hyperv_vendor_id[2];
1570 
1571     c = &cpuid_ent[cpuid_i++];
1572     c->function = HV_CPUID_INTERFACE;
1573     c->eax = cpu->hyperv_interface_id[0];
1574     c->ebx = cpu->hyperv_interface_id[1];
1575     c->ecx = cpu->hyperv_interface_id[2];
1576     c->edx = cpu->hyperv_interface_id[3];
1577 
1578     c = &cpuid_ent[cpuid_i++];
1579     c->function = HV_CPUID_VERSION;
1580     c->eax = cpu->hyperv_ver_id_build;
1581     c->ebx = (uint32_t)cpu->hyperv_ver_id_major << 16 |
1582         cpu->hyperv_ver_id_minor;
1583     c->ecx = cpu->hyperv_ver_id_sp;
1584     c->edx = (uint32_t)cpu->hyperv_ver_id_sb << 24 |
1585         (cpu->hyperv_ver_id_sn & 0xffffff);
1586 
1587     c = &cpuid_ent[cpuid_i++];
1588     c->function = HV_CPUID_FEATURES;
1589     c->eax = hv_build_cpuid_leaf(cs, HV_CPUID_FEATURES, R_EAX);
1590     c->ebx = hv_build_cpuid_leaf(cs, HV_CPUID_FEATURES, R_EBX);
1591     c->edx = hv_build_cpuid_leaf(cs, HV_CPUID_FEATURES, R_EDX);
1592 
1593     /* Unconditionally required with any Hyper-V enlightenment */
1594     c->eax |= HV_HYPERCALL_AVAILABLE;
1595 
1596     /* SynIC and Vmbus devices require messages/signals hypercalls */
1597     if (hyperv_feat_enabled(cpu, HYPERV_FEAT_SYNIC) &&
1598         !cpu->hyperv_synic_kvm_only) {
1599         c->ebx |= HV_POST_MESSAGES | HV_SIGNAL_EVENTS;
1600     }
1601 
1602 
1603     /* Not exposed by KVM but needed to make CPU hotplug in Windows work */
1604     c->edx |= HV_CPU_DYNAMIC_PARTITIONING_AVAILABLE;
1605 
1606     c = &cpuid_ent[cpuid_i++];
1607     c->function = HV_CPUID_ENLIGHTMENT_INFO;
1608     c->eax = hv_build_cpuid_leaf(cs, HV_CPUID_ENLIGHTMENT_INFO, R_EAX);
1609     c->ebx = cpu->hyperv_spinlock_attempts;
1610 
1611     if (hyperv_feat_enabled(cpu, HYPERV_FEAT_VAPIC) &&
1612         !hyperv_feat_enabled(cpu, HYPERV_FEAT_AVIC)) {
1613         c->eax |= HV_APIC_ACCESS_RECOMMENDED;
1614     }
1615 
1616     if (cpu->hyperv_no_nonarch_cs == ON_OFF_AUTO_ON) {
1617         c->eax |= HV_NO_NONARCH_CORESHARING;
1618     } else if (cpu->hyperv_no_nonarch_cs == ON_OFF_AUTO_AUTO) {
1619         c->eax |= hv_cpuid_get_host(cs, HV_CPUID_ENLIGHTMENT_INFO, R_EAX) &
1620             HV_NO_NONARCH_CORESHARING;
1621     }
1622 
1623     c = &cpuid_ent[cpuid_i++];
1624     c->function = HV_CPUID_IMPLEMENT_LIMITS;
1625     c->eax = cpu->hv_max_vps;
1626     c->ebx = cpu->hyperv_limits[0];
1627     c->ecx = cpu->hyperv_limits[1];
1628     c->edx = cpu->hyperv_limits[2];
1629 
1630     if (nested_eax) {
1631         uint32_t function;
1632 
1633         /* Create zeroed 0x40000006..0x40000009 leaves */
1634         for (function = HV_CPUID_IMPLEMENT_LIMITS + 1;
1635              function < HV_CPUID_NESTED_FEATURES; function++) {
1636             c = &cpuid_ent[cpuid_i++];
1637             c->function = function;
1638         }
1639 
1640         c = &cpuid_ent[cpuid_i++];
1641         c->function = HV_CPUID_NESTED_FEATURES;
1642         c->eax = nested_eax;
1643     }
1644 
1645     if (hyperv_feat_enabled(cpu, HYPERV_FEAT_SYNDBG)) {
1646         c = &cpuid_ent[cpuid_i++];
1647         c->function = HV_CPUID_SYNDBG_VENDOR_AND_MAX_FUNCTIONS;
1648         c->eax = hyperv_feat_enabled(cpu, HYPERV_FEAT_EVMCS) ?
1649             HV_CPUID_NESTED_FEATURES : HV_CPUID_IMPLEMENT_LIMITS;
1650         memcpy(signature, "Microsoft VS", 12);
1651         c->eax = 0;
1652         c->ebx = signature[0];
1653         c->ecx = signature[1];
1654         c->edx = signature[2];
1655 
1656         c = &cpuid_ent[cpuid_i++];
1657         c->function = HV_CPUID_SYNDBG_INTERFACE;
1658         memcpy(signature, "VS#1\0\0\0\0\0\0\0\0", 12);
1659         c->eax = signature[0];
1660         c->ebx = 0;
1661         c->ecx = 0;
1662         c->edx = 0;
1663 
1664         c = &cpuid_ent[cpuid_i++];
1665         c->function = HV_CPUID_SYNDBG_PLATFORM_CAPABILITIES;
1666         c->eax = HV_SYNDBG_CAP_ALLOW_KERNEL_DEBUGGING;
1667         c->ebx = 0;
1668         c->ecx = 0;
1669         c->edx = 0;
1670     }
1671 
1672     return cpuid_i;
1673 }
1674 
1675 static Error *hv_passthrough_mig_blocker;
1676 static Error *hv_no_nonarch_cs_mig_blocker;
1677 
1678 /* Checks that the exposed eVMCS version range is supported by KVM */
evmcs_version_supported(uint16_t evmcs_version,uint16_t supported_evmcs_version)1679 static bool evmcs_version_supported(uint16_t evmcs_version,
1680                                     uint16_t supported_evmcs_version)
1681 {
1682     uint8_t min_version = evmcs_version & 0xff;
1683     uint8_t max_version = evmcs_version >> 8;
1684     uint8_t min_supported_version = supported_evmcs_version & 0xff;
1685     uint8_t max_supported_version = supported_evmcs_version >> 8;
1686 
1687     return (min_version >= min_supported_version) &&
1688         (max_version <= max_supported_version);
1689 }
1690 
hyperv_init_vcpu(X86CPU * cpu)1691 static int hyperv_init_vcpu(X86CPU *cpu)
1692 {
1693     CPUState *cs = CPU(cpu);
1694     Error *local_err = NULL;
1695     int ret;
1696 
1697     if (cpu->hyperv_passthrough && hv_passthrough_mig_blocker == NULL) {
1698         error_setg(&hv_passthrough_mig_blocker,
1699                    "'hv-passthrough' CPU flag prevents migration, use explicit"
1700                    " set of hv-* flags instead");
1701         ret = migrate_add_blocker(&hv_passthrough_mig_blocker, &local_err);
1702         if (ret < 0) {
1703             error_report_err(local_err);
1704             return ret;
1705         }
1706     }
1707 
1708     if (cpu->hyperv_no_nonarch_cs == ON_OFF_AUTO_AUTO &&
1709         hv_no_nonarch_cs_mig_blocker == NULL) {
1710         error_setg(&hv_no_nonarch_cs_mig_blocker,
1711                    "'hv-no-nonarch-coresharing=auto' CPU flag prevents migration"
1712                    " use explicit 'hv-no-nonarch-coresharing=on' instead (but"
1713                    " make sure SMT is disabled and/or that vCPUs are properly"
1714                    " pinned)");
1715         ret = migrate_add_blocker(&hv_no_nonarch_cs_mig_blocker, &local_err);
1716         if (ret < 0) {
1717             error_report_err(local_err);
1718             return ret;
1719         }
1720     }
1721 
1722     if (hyperv_feat_enabled(cpu, HYPERV_FEAT_VPINDEX) && !hv_vpindex_settable) {
1723         /*
1724          * the kernel doesn't support setting vp_index; assert that its value
1725          * is in sync
1726          */
1727         uint64_t value;
1728 
1729         ret = kvm_get_one_msr(cpu, HV_X64_MSR_VP_INDEX, &value);
1730         if (ret < 0) {
1731             return ret;
1732         }
1733 
1734         if (value != hyperv_vp_index(CPU(cpu))) {
1735             error_report("kernel's vp_index != QEMU's vp_index");
1736             return -ENXIO;
1737         }
1738     }
1739 
1740     if (hyperv_feat_enabled(cpu, HYPERV_FEAT_SYNIC)) {
1741         uint32_t synic_cap = cpu->hyperv_synic_kvm_only ?
1742             KVM_CAP_HYPERV_SYNIC : KVM_CAP_HYPERV_SYNIC2;
1743         ret = kvm_vcpu_enable_cap(cs, synic_cap, 0);
1744         if (ret < 0) {
1745             error_report("failed to turn on HyperV SynIC in KVM: %s",
1746                          strerror(-ret));
1747             return ret;
1748         }
1749 
1750         if (!cpu->hyperv_synic_kvm_only) {
1751             ret = hyperv_x86_synic_add(cpu);
1752             if (ret < 0) {
1753                 error_report("failed to create HyperV SynIC: %s",
1754                              strerror(-ret));
1755                 return ret;
1756             }
1757         }
1758     }
1759 
1760     if (hyperv_feat_enabled(cpu, HYPERV_FEAT_EVMCS)) {
1761         uint16_t evmcs_version = DEFAULT_EVMCS_VERSION;
1762         uint16_t supported_evmcs_version;
1763 
1764         ret = kvm_vcpu_enable_cap(cs, KVM_CAP_HYPERV_ENLIGHTENED_VMCS, 0,
1765                                   (uintptr_t)&supported_evmcs_version);
1766 
1767         /*
1768          * KVM is required to support EVMCS ver.1. as that's what 'hv-evmcs'
1769          * option sets. Note: we hardcode the maximum supported eVMCS version
1770          * to '1' as well so 'hv-evmcs' feature is migratable even when (and if)
1771          * ver.2 is implemented. A new option (e.g. 'hv-evmcs=2') will then have
1772          * to be added.
1773          */
1774         if (ret < 0) {
1775             error_report("Hyper-V %s is not supported by kernel",
1776                          kvm_hyperv_properties[HYPERV_FEAT_EVMCS].desc);
1777             return ret;
1778         }
1779 
1780         if (!evmcs_version_supported(evmcs_version, supported_evmcs_version)) {
1781             error_report("eVMCS version range [%d..%d] is not supported by "
1782                          "kernel (supported: [%d..%d])", evmcs_version & 0xff,
1783                          evmcs_version >> 8, supported_evmcs_version & 0xff,
1784                          supported_evmcs_version >> 8);
1785             return -ENOTSUP;
1786         }
1787     }
1788 
1789     if (cpu->hyperv_enforce_cpuid) {
1790         ret = kvm_vcpu_enable_cap(cs, KVM_CAP_HYPERV_ENFORCE_CPUID, 0, 1);
1791         if (ret < 0) {
1792             error_report("failed to enable KVM_CAP_HYPERV_ENFORCE_CPUID: %s",
1793                          strerror(-ret));
1794             return ret;
1795         }
1796     }
1797 
1798     /* Skip SynIC and VP_INDEX since they are hard deps already */
1799     if (hyperv_feat_enabled(cpu, HYPERV_FEAT_STIMER) &&
1800         hyperv_feat_enabled(cpu, HYPERV_FEAT_VAPIC) &&
1801         hyperv_feat_enabled(cpu, HYPERV_FEAT_RUNTIME)) {
1802         hyperv_x86_set_vmbus_recommended_features_enabled();
1803     }
1804 
1805     return 0;
1806 }
1807 
1808 static Error *invtsc_mig_blocker;
1809 
kvm_init_xsave(CPUX86State * env)1810 static void kvm_init_xsave(CPUX86State *env)
1811 {
1812     if (has_xsave2) {
1813         env->xsave_buf_len = QEMU_ALIGN_UP(has_xsave2, 4096);
1814     } else {
1815         env->xsave_buf_len = sizeof(struct kvm_xsave);
1816     }
1817 
1818     env->xsave_buf = qemu_memalign(4096, env->xsave_buf_len);
1819     memset(env->xsave_buf, 0, env->xsave_buf_len);
1820     /*
1821      * The allocated storage must be large enough for all of the
1822      * possible XSAVE state components.
1823      */
1824     assert(kvm_arch_get_supported_cpuid(kvm_state, 0xd, 0, R_ECX) <=
1825            env->xsave_buf_len);
1826 }
1827 
kvm_init_nested_state(CPUX86State * env)1828 static void kvm_init_nested_state(CPUX86State *env)
1829 {
1830     struct kvm_vmx_nested_state_hdr *vmx_hdr;
1831     uint32_t size;
1832 
1833     if (!env->nested_state) {
1834         return;
1835     }
1836 
1837     size = env->nested_state->size;
1838 
1839     memset(env->nested_state, 0, size);
1840     env->nested_state->size = size;
1841 
1842     if (cpu_has_vmx(env)) {
1843         env->nested_state->format = KVM_STATE_NESTED_FORMAT_VMX;
1844         vmx_hdr = &env->nested_state->hdr.vmx;
1845         vmx_hdr->vmxon_pa = -1ull;
1846         vmx_hdr->vmcs12_pa = -1ull;
1847     } else if (cpu_has_svm(env)) {
1848         env->nested_state->format = KVM_STATE_NESTED_FORMAT_SVM;
1849     }
1850 }
1851 
kvm_x86_build_cpuid(CPUX86State * env,struct kvm_cpuid_entry2 * entries,uint32_t cpuid_i)1852 uint32_t kvm_x86_build_cpuid(CPUX86State *env, struct kvm_cpuid_entry2 *entries,
1853                              uint32_t cpuid_i)
1854 {
1855     uint32_t limit, i, j;
1856     uint32_t unused;
1857     struct kvm_cpuid_entry2 *c;
1858 
1859     cpu_x86_cpuid(env, 0, 0, &limit, &unused, &unused, &unused);
1860 
1861     for (i = 0; i <= limit; i++) {
1862         j = 0;
1863         if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
1864             goto full;
1865         }
1866         c = &entries[cpuid_i++];
1867         switch (i) {
1868         case 2: {
1869             /* Keep reading function 2 till all the input is received */
1870             int times;
1871 
1872             c->function = i;
1873             cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
1874             times = c->eax & 0xff;
1875             if (times > 1) {
1876                 c->flags = KVM_CPUID_FLAG_STATEFUL_FUNC |
1877                            KVM_CPUID_FLAG_STATE_READ_NEXT;
1878             }
1879 
1880             for (j = 1; j < times; ++j) {
1881                 if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
1882                     goto full;
1883                 }
1884                 c = &entries[cpuid_i++];
1885                 c->function = i;
1886                 c->flags = KVM_CPUID_FLAG_STATEFUL_FUNC;
1887                 cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
1888             }
1889             break;
1890         }
1891         case 0x1f:
1892             if (!x86_has_cpuid_0x1f(env_archcpu(env))) {
1893                 cpuid_i--;
1894                 break;
1895             }
1896             /* fallthrough */
1897         case 4:
1898         case 0xb:
1899         case 0xd:
1900             for (j = 0; ; j++) {
1901                 c->function = i;
1902                 c->flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
1903                 c->index = j;
1904                 cpu_x86_cpuid(env, i, j, &c->eax, &c->ebx, &c->ecx, &c->edx);
1905 
1906                 if (i == 4 && c->eax == 0) {
1907                     break;
1908                 }
1909                 if (i == 0xb && !(c->ecx & 0xff00)) {
1910                     break;
1911                 }
1912                 if (i == 0x1f && !(c->ecx & 0xff00)) {
1913                     break;
1914                 }
1915                 if (i == 0xd && c->eax == 0) {
1916                     if (j < 63) {
1917                         continue;
1918                     } else {
1919                         cpuid_i--;
1920                         break;
1921                     }
1922                 }
1923                 if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
1924                     goto full;
1925                 }
1926                 c = &entries[cpuid_i++];
1927             }
1928             break;
1929         case 0x12:
1930             for (j = 0; ; j++) {
1931                 c->function = i;
1932                 c->flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
1933                 c->index = j;
1934                 cpu_x86_cpuid(env, i, j, &c->eax, &c->ebx, &c->ecx, &c->edx);
1935 
1936                 if (j > 1 && (c->eax & 0xf) != 1) {
1937                     break;
1938                 }
1939 
1940                 if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
1941                     goto full;
1942                 }
1943                 c = &entries[cpuid_i++];
1944             }
1945             break;
1946         case 0x7:
1947         case 0x14:
1948         case 0x1d:
1949         case 0x1e:
1950         case 0x24: {
1951             uint32_t times;
1952 
1953             c->function = i;
1954             c->index = 0;
1955             c->flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
1956             cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
1957             times = c->eax;
1958 
1959             for (j = 1; j <= times; ++j) {
1960                 if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
1961                     goto full;
1962                 }
1963                 c = &entries[cpuid_i++];
1964                 c->function = i;
1965                 c->index = j;
1966                 c->flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
1967                 cpu_x86_cpuid(env, i, j, &c->eax, &c->ebx, &c->ecx, &c->edx);
1968             }
1969             break;
1970         }
1971         default:
1972             c->function = i;
1973             c->flags = 0;
1974             cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
1975             if (!c->eax && !c->ebx && !c->ecx && !c->edx) {
1976                 /*
1977                  * KVM already returns all zeroes if a CPUID entry is missing,
1978                  * so we can omit it and avoid hitting KVM's 80-entry limit.
1979                  */
1980                 cpuid_i--;
1981             }
1982             break;
1983         }
1984     }
1985 
1986     if (limit >= 0x0a) {
1987         uint32_t eax, edx;
1988 
1989         cpu_x86_cpuid(env, 0x0a, 0, &eax, &unused, &unused, &edx);
1990 
1991         has_architectural_pmu_version = eax & 0xff;
1992         if (has_architectural_pmu_version > 0) {
1993             num_architectural_pmu_gp_counters = (eax & 0xff00) >> 8;
1994 
1995             /* Shouldn't be more than 32, since that's the number of bits
1996              * available in EBX to tell us _which_ counters are available.
1997              * Play it safe.
1998              */
1999             if (num_architectural_pmu_gp_counters > MAX_GP_COUNTERS) {
2000                 num_architectural_pmu_gp_counters = MAX_GP_COUNTERS;
2001             }
2002 
2003             if (has_architectural_pmu_version > 1) {
2004                 num_architectural_pmu_fixed_counters = edx & 0x1f;
2005 
2006                 if (num_architectural_pmu_fixed_counters > MAX_FIXED_COUNTERS) {
2007                     num_architectural_pmu_fixed_counters = MAX_FIXED_COUNTERS;
2008                 }
2009             }
2010         }
2011     }
2012 
2013     cpu_x86_cpuid(env, 0x80000000, 0, &limit, &unused, &unused, &unused);
2014 
2015     for (i = 0x80000000; i <= limit; i++) {
2016         j = 0;
2017         if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
2018             goto full;
2019         }
2020         c = &entries[cpuid_i++];
2021 
2022         switch (i) {
2023         case 0x8000001d:
2024             /* Query for all AMD cache information leaves */
2025             for (j = 0; ; j++) {
2026                 c->function = i;
2027                 c->flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
2028                 c->index = j;
2029                 cpu_x86_cpuid(env, i, j, &c->eax, &c->ebx, &c->ecx, &c->edx);
2030 
2031                 if (c->eax == 0) {
2032                     break;
2033                 }
2034                 if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
2035                     goto full;
2036                 }
2037                 c = &entries[cpuid_i++];
2038             }
2039             break;
2040         default:
2041             c->function = i;
2042             c->flags = 0;
2043             cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
2044             if (!c->eax && !c->ebx && !c->ecx && !c->edx) {
2045                 /*
2046                  * KVM already returns all zeroes if a CPUID entry is missing,
2047                  * so we can omit it and avoid hitting KVM's 80-entry limit.
2048                  */
2049                 cpuid_i--;
2050             }
2051             break;
2052         }
2053     }
2054 
2055     /* Call Centaur's CPUID instructions they are supported. */
2056     if (env->cpuid_xlevel2 > 0) {
2057         cpu_x86_cpuid(env, 0xC0000000, 0, &limit, &unused, &unused, &unused);
2058 
2059         for (i = 0xC0000000; i <= limit; i++) {
2060             j = 0;
2061             if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
2062                 goto full;
2063             }
2064             c = &entries[cpuid_i++];
2065 
2066             c->function = i;
2067             c->flags = 0;
2068             cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
2069         }
2070     }
2071 
2072     return cpuid_i;
2073 
2074 full:
2075     fprintf(stderr, "cpuid_data is full, no space for "
2076             "cpuid(eax:0x%x,ecx:0x%x)\n", i, j);
2077     abort();
2078 }
2079 
kvm_arch_pre_create_vcpu(CPUState * cpu,Error ** errp)2080 int kvm_arch_pre_create_vcpu(CPUState *cpu, Error **errp)
2081 {
2082     if (is_tdx_vm()) {
2083         return tdx_pre_create_vcpu(cpu, errp);
2084     }
2085 
2086     return 0;
2087 }
2088 
kvm_arch_init_vcpu(CPUState * cs)2089 int kvm_arch_init_vcpu(CPUState *cs)
2090 {
2091     struct {
2092         struct kvm_cpuid2 cpuid;
2093         struct kvm_cpuid_entry2 entries[KVM_MAX_CPUID_ENTRIES];
2094     } cpuid_data;
2095     /*
2096      * The kernel defines these structs with padding fields so there
2097      * should be no extra padding in our cpuid_data struct.
2098      */
2099     QEMU_BUILD_BUG_ON(sizeof(cpuid_data) !=
2100                       sizeof(struct kvm_cpuid2) +
2101                       sizeof(struct kvm_cpuid_entry2) * KVM_MAX_CPUID_ENTRIES);
2102 
2103     X86CPU *cpu = X86_CPU(cs);
2104     CPUX86State *env = &cpu->env;
2105     uint32_t cpuid_i;
2106     struct kvm_cpuid_entry2 *c;
2107     uint32_t signature[3];
2108     int kvm_base = KVM_CPUID_SIGNATURE;
2109     int max_nested_state_len;
2110     int r;
2111     Error *local_err = NULL;
2112 
2113     if (current_machine->cgs) {
2114         r = x86_confidential_guest_check_features(
2115                 X86_CONFIDENTIAL_GUEST(current_machine->cgs), cs);
2116         if (r < 0) {
2117             return r;
2118         }
2119     }
2120 
2121     memset(&cpuid_data, 0, sizeof(cpuid_data));
2122 
2123     cpuid_i = 0;
2124 
2125     has_xsave2 = kvm_check_extension(cs->kvm_state, KVM_CAP_XSAVE2);
2126 
2127     r = kvm_arch_set_tsc_khz(cs);
2128     if (r < 0) {
2129         return r;
2130     }
2131 
2132     /* vcpu's TSC frequency is either specified by user, or following
2133      * the value used by KVM if the former is not present. In the
2134      * latter case, we query it from KVM and record in env->tsc_khz,
2135      * so that vcpu's TSC frequency can be migrated later via this field.
2136      */
2137     if (!env->tsc_khz) {
2138         r = kvm_check_extension(cs->kvm_state, KVM_CAP_GET_TSC_KHZ) ?
2139             kvm_vcpu_ioctl(cs, KVM_GET_TSC_KHZ) :
2140             -ENOTSUP;
2141         if (r > 0) {
2142             env->tsc_khz = r;
2143         }
2144     }
2145 
2146     env->apic_bus_freq = KVM_APIC_BUS_FREQUENCY;
2147 
2148     /*
2149      * kvm_hyperv_expand_features() is called here for the second time in case
2150      * KVM_CAP_SYS_HYPERV_CPUID is not supported. While we can't possibly handle
2151      * 'query-cpu-model-expansion' in this case as we don't have a KVM vCPU to
2152      * check which Hyper-V enlightenments are supported and which are not, we
2153      * can still proceed and check/expand Hyper-V enlightenments here so legacy
2154      * behavior is preserved.
2155      */
2156     if (!kvm_hyperv_expand_features(cpu, &local_err)) {
2157         error_report_err(local_err);
2158         return -ENOSYS;
2159     }
2160 
2161     if (hyperv_enabled(cpu)) {
2162         r = hyperv_init_vcpu(cpu);
2163         if (r) {
2164             return r;
2165         }
2166 
2167         cpuid_i = hyperv_fill_cpuids(cs, cpuid_data.entries);
2168         kvm_base = KVM_CPUID_SIGNATURE_NEXT;
2169         has_msr_hv_hypercall = true;
2170     }
2171 
2172     if (cs->kvm_state->xen_version) {
2173 #ifdef CONFIG_XEN_EMU
2174         struct kvm_cpuid_entry2 *xen_max_leaf;
2175 
2176         memcpy(signature, "XenVMMXenVMM", 12);
2177 
2178         xen_max_leaf = c = &cpuid_data.entries[cpuid_i++];
2179         c->function = kvm_base + XEN_CPUID_SIGNATURE;
2180         c->eax = kvm_base + XEN_CPUID_TIME;
2181         c->ebx = signature[0];
2182         c->ecx = signature[1];
2183         c->edx = signature[2];
2184 
2185         c = &cpuid_data.entries[cpuid_i++];
2186         c->function = kvm_base + XEN_CPUID_VENDOR;
2187         c->eax = cs->kvm_state->xen_version;
2188         c->ebx = 0;
2189         c->ecx = 0;
2190         c->edx = 0;
2191 
2192         c = &cpuid_data.entries[cpuid_i++];
2193         c->function = kvm_base + XEN_CPUID_HVM_MSR;
2194         /* Number of hypercall-transfer pages */
2195         c->eax = 1;
2196         /* Hypercall MSR base address */
2197         if (hyperv_enabled(cpu)) {
2198             c->ebx = XEN_HYPERCALL_MSR_HYPERV;
2199             kvm_xen_init(cs->kvm_state, c->ebx);
2200         } else {
2201             c->ebx = XEN_HYPERCALL_MSR;
2202         }
2203         c->ecx = 0;
2204         c->edx = 0;
2205 
2206         c = &cpuid_data.entries[cpuid_i++];
2207         c->function = kvm_base + XEN_CPUID_TIME;
2208         c->eax = ((!!tsc_is_stable_and_known(env) << 1) |
2209             (!!(env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_RDTSCP) << 2));
2210         /* default=0 (emulate if necessary) */
2211         c->ebx = 0;
2212         /* guest tsc frequency */
2213         c->ecx = env->user_tsc_khz;
2214         /* guest tsc incarnation (migration count) */
2215         c->edx = 0;
2216 
2217         c = &cpuid_data.entries[cpuid_i++];
2218         c->function = kvm_base + XEN_CPUID_HVM;
2219         xen_max_leaf->eax = kvm_base + XEN_CPUID_HVM;
2220         if (cs->kvm_state->xen_version >= XEN_VERSION(4, 5)) {
2221             c->function = kvm_base + XEN_CPUID_HVM;
2222 
2223             if (cpu->xen_vapic) {
2224                 c->eax |= XEN_HVM_CPUID_APIC_ACCESS_VIRT;
2225                 c->eax |= XEN_HVM_CPUID_X2APIC_VIRT;
2226             }
2227 
2228             c->eax |= XEN_HVM_CPUID_IOMMU_MAPPINGS;
2229 
2230             if (cs->kvm_state->xen_version >= XEN_VERSION(4, 6)) {
2231                 c->eax |= XEN_HVM_CPUID_VCPU_ID_PRESENT;
2232                 c->ebx = cs->cpu_index;
2233             }
2234 
2235             if (cs->kvm_state->xen_version >= XEN_VERSION(4, 17)) {
2236                 c->eax |= XEN_HVM_CPUID_UPCALL_VECTOR;
2237             }
2238         }
2239 
2240         r = kvm_xen_init_vcpu(cs);
2241         if (r) {
2242             return r;
2243         }
2244 
2245         kvm_base += 0x100;
2246 #else /* CONFIG_XEN_EMU */
2247         /* This should never happen as kvm_arch_init() would have died first. */
2248         fprintf(stderr, "Cannot enable Xen CPUID without Xen support\n");
2249         abort();
2250 #endif
2251     } else if (cpu->expose_kvm) {
2252         memcpy(signature, "KVMKVMKVM\0\0\0", 12);
2253         c = &cpuid_data.entries[cpuid_i++];
2254         c->function = KVM_CPUID_SIGNATURE | kvm_base;
2255         c->eax = KVM_CPUID_FEATURES | kvm_base;
2256         c->ebx = signature[0];
2257         c->ecx = signature[1];
2258         c->edx = signature[2];
2259 
2260         c = &cpuid_data.entries[cpuid_i++];
2261         c->function = KVM_CPUID_FEATURES | kvm_base;
2262         c->eax = env->features[FEAT_KVM];
2263         c->edx = env->features[FEAT_KVM_HINTS];
2264     }
2265 
2266     if (cpu->kvm_pv_enforce_cpuid) {
2267         r = kvm_vcpu_enable_cap(cs, KVM_CAP_ENFORCE_PV_FEATURE_CPUID, 0, 1);
2268         if (r < 0) {
2269             fprintf(stderr,
2270                     "failed to enable KVM_CAP_ENFORCE_PV_FEATURE_CPUID: %s",
2271                     strerror(-r));
2272             abort();
2273         }
2274     }
2275 
2276     cpuid_i = kvm_x86_build_cpuid(env, cpuid_data.entries, cpuid_i);
2277     cpuid_data.cpuid.nent = cpuid_i;
2278 
2279     if (x86_cpu_family(env->cpuid_version) >= 6
2280         && (env->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) ==
2281            (CPUID_MCE | CPUID_MCA)) {
2282         uint64_t mcg_cap, unsupported_caps;
2283         int banks;
2284         int ret;
2285 
2286         ret = kvm_get_mce_cap_supported(cs->kvm_state, &mcg_cap, &banks);
2287         if (ret < 0) {
2288             fprintf(stderr, "kvm_get_mce_cap_supported: %s", strerror(-ret));
2289             return ret;
2290         }
2291 
2292         if (banks < (env->mcg_cap & MCG_CAP_BANKS_MASK)) {
2293             error_report("kvm: Unsupported MCE bank count (QEMU = %d, KVM = %d)",
2294                          (int)(env->mcg_cap & MCG_CAP_BANKS_MASK), banks);
2295             return -ENOTSUP;
2296         }
2297 
2298         unsupported_caps = env->mcg_cap & ~(mcg_cap | MCG_CAP_BANKS_MASK);
2299         if (unsupported_caps) {
2300             if (unsupported_caps & MCG_LMCE_P) {
2301                 error_report("kvm: LMCE not supported");
2302                 return -ENOTSUP;
2303             }
2304             warn_report("Unsupported MCG_CAP bits: 0x%" PRIx64,
2305                         unsupported_caps);
2306         }
2307 
2308         env->mcg_cap &= mcg_cap | MCG_CAP_BANKS_MASK;
2309         ret = kvm_vcpu_ioctl(cs, KVM_X86_SETUP_MCE, &env->mcg_cap);
2310         if (ret < 0) {
2311             fprintf(stderr, "KVM_X86_SETUP_MCE: %s", strerror(-ret));
2312             return ret;
2313         }
2314     }
2315 
2316     cpu->vmsentry = qemu_add_vm_change_state_handler(cpu_update_state, env);
2317 
2318     c = cpuid_find_entry(&cpuid_data.cpuid, 1, 0);
2319     if (c) {
2320         has_msr_feature_control = !!(c->ecx & CPUID_EXT_VMX) ||
2321                                   !!(c->ecx & CPUID_EXT_SMX);
2322     }
2323 
2324     c = cpuid_find_entry(&cpuid_data.cpuid, 7, 0);
2325     if (c && (c->ebx & CPUID_7_0_EBX_SGX)) {
2326         has_msr_feature_control = true;
2327     }
2328 
2329     if (env->mcg_cap & MCG_LMCE_P) {
2330         has_msr_mcg_ext_ctl = has_msr_feature_control = true;
2331     }
2332 
2333     if (!env->user_tsc_khz) {
2334         if ((env->features[FEAT_8000_0007_EDX] & CPUID_APM_INVTSC) &&
2335             invtsc_mig_blocker == NULL) {
2336             error_setg(&invtsc_mig_blocker,
2337                        "State blocked by non-migratable CPU device"
2338                        " (invtsc flag)");
2339             r = migrate_add_blocker(&invtsc_mig_blocker, &local_err);
2340             if (r < 0) {
2341                 error_report_err(local_err);
2342                 return r;
2343             }
2344         }
2345     }
2346 
2347     if (cpu->vmware_cpuid_freq
2348         /* Guests depend on 0x40000000 to detect this feature, so only expose
2349          * it if KVM exposes leaf 0x40000000. (Conflicts with Hyper-V) */
2350         && cpu->expose_kvm
2351         && kvm_base == KVM_CPUID_SIGNATURE
2352         /* TSC clock must be stable and known for this feature. */
2353         && tsc_is_stable_and_known(env)) {
2354 
2355         c = &cpuid_data.entries[cpuid_i++];
2356         c->function = KVM_CPUID_SIGNATURE | 0x10;
2357         c->eax = env->tsc_khz;
2358         c->ebx = env->apic_bus_freq / 1000; /* Hz to KHz */
2359         c->ecx = c->edx = 0;
2360 
2361         c = cpuid_find_entry(&cpuid_data.cpuid, kvm_base, 0);
2362         c->eax = MAX(c->eax, KVM_CPUID_SIGNATURE | 0x10);
2363     }
2364 
2365     cpuid_data.cpuid.nent = cpuid_i;
2366 
2367     cpuid_data.cpuid.padding = 0;
2368     r = kvm_vcpu_ioctl(cs, KVM_SET_CPUID2, &cpuid_data);
2369     if (r) {
2370         goto fail;
2371     }
2372     kvm_init_xsave(env);
2373 
2374     max_nested_state_len = kvm_max_nested_state_length();
2375     if (max_nested_state_len > 0) {
2376         assert(max_nested_state_len >= offsetof(struct kvm_nested_state, data));
2377 
2378         if (cpu_has_vmx(env) || cpu_has_svm(env)) {
2379             env->nested_state = g_malloc0(max_nested_state_len);
2380             env->nested_state->size = max_nested_state_len;
2381 
2382             kvm_init_nested_state(env);
2383         }
2384     }
2385 
2386     cpu->kvm_msr_buf = g_malloc0(MSR_BUF_SIZE);
2387 
2388     if (!(env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_RDTSCP)) {
2389         has_msr_tsc_aux = false;
2390     }
2391 
2392     kvm_init_msrs(cpu);
2393 
2394     return 0;
2395 
2396  fail:
2397     migrate_del_blocker(&invtsc_mig_blocker);
2398 
2399     return r;
2400 }
2401 
kvm_arch_destroy_vcpu(CPUState * cs)2402 int kvm_arch_destroy_vcpu(CPUState *cs)
2403 {
2404     X86CPU *cpu = X86_CPU(cs);
2405     CPUX86State *env = &cpu->env;
2406 
2407     g_free(env->xsave_buf);
2408 
2409     g_free(cpu->kvm_msr_buf);
2410     cpu->kvm_msr_buf = NULL;
2411 
2412     g_free(env->nested_state);
2413     env->nested_state = NULL;
2414 
2415     qemu_del_vm_change_state_handler(cpu->vmsentry);
2416 
2417     return 0;
2418 }
2419 
kvm_arch_reset_vcpu(X86CPU * cpu)2420 void kvm_arch_reset_vcpu(X86CPU *cpu)
2421 {
2422     CPUX86State *env = &cpu->env;
2423 
2424     env->xcr0 = 1;
2425     if (kvm_irqchip_in_kernel()) {
2426         env->mp_state = cpu_is_bsp(cpu) ? KVM_MP_STATE_RUNNABLE :
2427                                           KVM_MP_STATE_UNINITIALIZED;
2428     } else {
2429         env->mp_state = KVM_MP_STATE_RUNNABLE;
2430     }
2431 
2432     /* enabled by default */
2433     env->poll_control_msr = 1;
2434 
2435     kvm_init_nested_state(env);
2436 
2437     sev_es_set_reset_vector(CPU(cpu));
2438 }
2439 
kvm_arch_after_reset_vcpu(X86CPU * cpu)2440 void kvm_arch_after_reset_vcpu(X86CPU *cpu)
2441 {
2442     CPUX86State *env = &cpu->env;
2443     int i;
2444 
2445     /*
2446      * Reset SynIC after all other devices have been reset to let them remove
2447      * their SINT routes first.
2448      */
2449     if (hyperv_feat_enabled(cpu, HYPERV_FEAT_SYNIC)) {
2450         for (i = 0; i < ARRAY_SIZE(env->msr_hv_synic_sint); i++) {
2451             env->msr_hv_synic_sint[i] = HV_SINT_MASKED;
2452         }
2453 
2454         hyperv_x86_synic_reset(cpu);
2455     }
2456 }
2457 
kvm_arch_reset_parked_vcpu(unsigned long vcpu_id,int kvm_fd)2458 void kvm_arch_reset_parked_vcpu(unsigned long vcpu_id, int kvm_fd)
2459 {
2460     g_autofree struct kvm_msrs *msrs = NULL;
2461 
2462     msrs = g_malloc0(sizeof(*msrs) + sizeof(msrs->entries[0]));
2463     msrs->entries[0].index = MSR_IA32_TSC;
2464     msrs->entries[0].data = 1; /* match the value in x86_cpu_reset() */
2465     msrs->nmsrs++;
2466 
2467     if (ioctl(kvm_fd, KVM_SET_MSRS, msrs) != 1) {
2468         warn_report("parked vCPU %lu TSC reset failed: %d",
2469                     vcpu_id, errno);
2470     }
2471 }
2472 
kvm_arch_do_init_vcpu(X86CPU * cpu)2473 void kvm_arch_do_init_vcpu(X86CPU *cpu)
2474 {
2475     CPUX86State *env = &cpu->env;
2476 
2477     /* APs get directly into wait-for-SIPI state.  */
2478     if (env->mp_state == KVM_MP_STATE_UNINITIALIZED) {
2479         env->mp_state = KVM_MP_STATE_INIT_RECEIVED;
2480     }
2481 }
2482 
kvm_get_supported_feature_msrs(KVMState * s)2483 static int kvm_get_supported_feature_msrs(KVMState *s)
2484 {
2485     int ret = 0;
2486 
2487     if (kvm_feature_msrs != NULL) {
2488         return 0;
2489     }
2490 
2491     if (!kvm_check_extension(s, KVM_CAP_GET_MSR_FEATURES)) {
2492         return 0;
2493     }
2494 
2495     struct kvm_msr_list msr_list;
2496 
2497     msr_list.nmsrs = 0;
2498     ret = kvm_ioctl(s, KVM_GET_MSR_FEATURE_INDEX_LIST, &msr_list);
2499     if (ret < 0 && ret != -E2BIG) {
2500         error_report("Fetch KVM feature MSR list failed: %s",
2501             strerror(-ret));
2502         return ret;
2503     }
2504 
2505     assert(msr_list.nmsrs > 0);
2506     kvm_feature_msrs = g_malloc0(sizeof(msr_list) +
2507                  msr_list.nmsrs * sizeof(msr_list.indices[0]));
2508 
2509     kvm_feature_msrs->nmsrs = msr_list.nmsrs;
2510     ret = kvm_ioctl(s, KVM_GET_MSR_FEATURE_INDEX_LIST, kvm_feature_msrs);
2511 
2512     if (ret < 0) {
2513         error_report("Fetch KVM feature MSR list failed: %s",
2514             strerror(-ret));
2515         g_free(kvm_feature_msrs);
2516         kvm_feature_msrs = NULL;
2517         return ret;
2518     }
2519 
2520     return 0;
2521 }
2522 
kvm_get_supported_msrs(KVMState * s)2523 static int kvm_get_supported_msrs(KVMState *s)
2524 {
2525     int ret = 0;
2526     struct kvm_msr_list msr_list, *kvm_msr_list;
2527 
2528     /*
2529      *  Obtain MSR list from KVM.  These are the MSRs that we must
2530      *  save/restore.
2531      */
2532     msr_list.nmsrs = 0;
2533     ret = kvm_ioctl(s, KVM_GET_MSR_INDEX_LIST, &msr_list);
2534     if (ret < 0 && ret != -E2BIG) {
2535         return ret;
2536     }
2537     /*
2538      * Old kernel modules had a bug and could write beyond the provided
2539      * memory. Allocate at least a safe amount of 1K.
2540      */
2541     kvm_msr_list = g_malloc0(MAX(1024, sizeof(msr_list) +
2542                                           msr_list.nmsrs *
2543                                           sizeof(msr_list.indices[0])));
2544 
2545     kvm_msr_list->nmsrs = msr_list.nmsrs;
2546     ret = kvm_ioctl(s, KVM_GET_MSR_INDEX_LIST, kvm_msr_list);
2547     if (ret >= 0) {
2548         int i;
2549 
2550         for (i = 0; i < kvm_msr_list->nmsrs; i++) {
2551             switch (kvm_msr_list->indices[i]) {
2552             case MSR_STAR:
2553                 has_msr_star = true;
2554                 break;
2555             case MSR_VM_HSAVE_PA:
2556                 has_msr_hsave_pa = true;
2557                 break;
2558             case MSR_TSC_AUX:
2559                 has_msr_tsc_aux = true;
2560                 break;
2561             case MSR_TSC_ADJUST:
2562                 has_msr_tsc_adjust = true;
2563                 break;
2564             case MSR_IA32_TSCDEADLINE:
2565                 has_msr_tsc_deadline = true;
2566                 break;
2567             case MSR_IA32_SMBASE:
2568                 has_msr_smbase = true;
2569                 break;
2570             case MSR_SMI_COUNT:
2571                 has_msr_smi_count = true;
2572                 break;
2573             case MSR_IA32_MISC_ENABLE:
2574                 has_msr_misc_enable = true;
2575                 break;
2576             case MSR_IA32_BNDCFGS:
2577                 has_msr_bndcfgs = true;
2578                 break;
2579             case MSR_IA32_XSS:
2580                 has_msr_xss = true;
2581                 break;
2582             case MSR_IA32_UMWAIT_CONTROL:
2583                 has_msr_umwait = true;
2584                 break;
2585             case HV_X64_MSR_CRASH_CTL:
2586                 has_msr_hv_crash = true;
2587                 break;
2588             case HV_X64_MSR_RESET:
2589                 has_msr_hv_reset = true;
2590                 break;
2591             case HV_X64_MSR_VP_INDEX:
2592                 has_msr_hv_vpindex = true;
2593                 break;
2594             case HV_X64_MSR_VP_RUNTIME:
2595                 has_msr_hv_runtime = true;
2596                 break;
2597             case HV_X64_MSR_SCONTROL:
2598                 has_msr_hv_synic = true;
2599                 break;
2600             case HV_X64_MSR_STIMER0_CONFIG:
2601                 has_msr_hv_stimer = true;
2602                 break;
2603             case HV_X64_MSR_TSC_FREQUENCY:
2604                 has_msr_hv_frequencies = true;
2605                 break;
2606             case HV_X64_MSR_REENLIGHTENMENT_CONTROL:
2607                 has_msr_hv_reenlightenment = true;
2608                 break;
2609             case HV_X64_MSR_SYNDBG_OPTIONS:
2610                 has_msr_hv_syndbg_options = true;
2611                 break;
2612             case MSR_IA32_SPEC_CTRL:
2613                 has_msr_spec_ctrl = true;
2614                 break;
2615             case MSR_AMD64_TSC_RATIO:
2616                 has_tsc_scale_msr = true;
2617                 break;
2618             case MSR_IA32_TSX_CTRL:
2619                 has_msr_tsx_ctrl = true;
2620                 break;
2621             case MSR_VIRT_SSBD:
2622                 has_msr_virt_ssbd = true;
2623                 break;
2624             case MSR_IA32_ARCH_CAPABILITIES:
2625                 has_msr_arch_capabs = true;
2626                 break;
2627             case MSR_IA32_CORE_CAPABILITY:
2628                 has_msr_core_capabs = true;
2629                 break;
2630             case MSR_IA32_PERF_CAPABILITIES:
2631                 has_msr_perf_capabs = true;
2632                 break;
2633             case MSR_IA32_VMX_VMFUNC:
2634                 has_msr_vmx_vmfunc = true;
2635                 break;
2636             case MSR_IA32_UCODE_REV:
2637                 has_msr_ucode_rev = true;
2638                 break;
2639             case MSR_IA32_VMX_PROCBASED_CTLS2:
2640                 has_msr_vmx_procbased_ctls2 = true;
2641                 break;
2642             case MSR_IA32_PKRS:
2643                 has_msr_pkrs = true;
2644                 break;
2645             case MSR_K7_HWCR:
2646                 has_msr_hwcr = true;
2647             }
2648         }
2649     }
2650 
2651     g_free(kvm_msr_list);
2652 
2653     return ret;
2654 }
2655 
kvm_rdmsr_core_thread_count(X86CPU * cpu,uint32_t msr,uint64_t * val)2656 static bool kvm_rdmsr_core_thread_count(X86CPU *cpu,
2657                                         uint32_t msr,
2658                                         uint64_t *val)
2659 {
2660     *val = cpu_x86_get_msr_core_thread_count(cpu);
2661 
2662     return true;
2663 }
2664 
kvm_rdmsr_rapl_power_unit(X86CPU * cpu,uint32_t msr,uint64_t * val)2665 static bool kvm_rdmsr_rapl_power_unit(X86CPU *cpu,
2666                                       uint32_t msr,
2667                                       uint64_t *val)
2668 {
2669 
2670     CPUState *cs = CPU(cpu);
2671 
2672     *val = cs->kvm_state->msr_energy.msr_unit;
2673 
2674     return true;
2675 }
2676 
kvm_rdmsr_pkg_power_limit(X86CPU * cpu,uint32_t msr,uint64_t * val)2677 static bool kvm_rdmsr_pkg_power_limit(X86CPU *cpu,
2678                                       uint32_t msr,
2679                                       uint64_t *val)
2680 {
2681 
2682     CPUState *cs = CPU(cpu);
2683 
2684     *val = cs->kvm_state->msr_energy.msr_limit;
2685 
2686     return true;
2687 }
2688 
kvm_rdmsr_pkg_power_info(X86CPU * cpu,uint32_t msr,uint64_t * val)2689 static bool kvm_rdmsr_pkg_power_info(X86CPU *cpu,
2690                                      uint32_t msr,
2691                                      uint64_t *val)
2692 {
2693 
2694     CPUState *cs = CPU(cpu);
2695 
2696     *val = cs->kvm_state->msr_energy.msr_info;
2697 
2698     return true;
2699 }
2700 
kvm_rdmsr_pkg_energy_status(X86CPU * cpu,uint32_t msr,uint64_t * val)2701 static bool kvm_rdmsr_pkg_energy_status(X86CPU *cpu,
2702                                         uint32_t msr,
2703                                         uint64_t *val)
2704 {
2705 
2706     CPUState *cs = CPU(cpu);
2707     *val = cs->kvm_state->msr_energy.msr_value[cs->cpu_index];
2708 
2709     return true;
2710 }
2711 
2712 static Notifier smram_machine_done;
2713 static KVMMemoryListener smram_listener;
2714 static AddressSpace smram_address_space;
2715 static MemoryRegion smram_as_root;
2716 static MemoryRegion smram_as_mem;
2717 
register_smram_listener(Notifier * n,void * unused)2718 static void register_smram_listener(Notifier *n, void *unused)
2719 {
2720     CPUState *cpu;
2721     MemoryRegion *smram =
2722         (MemoryRegion *) object_resolve_path("/machine/smram", NULL);
2723 
2724     /* Outer container... */
2725     memory_region_init(&smram_as_root, OBJECT(kvm_state), "mem-container-smram", ~0ull);
2726     memory_region_set_enabled(&smram_as_root, true);
2727 
2728     /* ... with two regions inside: normal system memory with low
2729      * priority, and...
2730      */
2731     memory_region_init_alias(&smram_as_mem, OBJECT(kvm_state), "mem-smram",
2732                              get_system_memory(), 0, ~0ull);
2733     memory_region_add_subregion_overlap(&smram_as_root, 0, &smram_as_mem, 0);
2734     memory_region_set_enabled(&smram_as_mem, true);
2735 
2736     if (smram) {
2737         /* ... SMRAM with higher priority */
2738         memory_region_add_subregion_overlap(&smram_as_root, 0, smram, 10);
2739         memory_region_set_enabled(smram, true);
2740     }
2741 
2742     address_space_init(&smram_address_space, &smram_as_root, "KVM-SMRAM");
2743     kvm_memory_listener_register(kvm_state, &smram_listener,
2744                                  &smram_address_space, X86ASIdx_SMM, "kvm-smram");
2745 
2746     CPU_FOREACH(cpu) {
2747         cpu_address_space_init(cpu, X86ASIdx_SMM, "cpu-smm", &smram_as_root);
2748     }
2749 }
2750 
2751 /* It should only be called in cpu's hotplug callback */
kvm_smm_cpu_address_space_init(X86CPU * cpu)2752 void kvm_smm_cpu_address_space_init(X86CPU *cpu)
2753 {
2754     cpu_address_space_init(CPU(cpu), X86ASIdx_SMM, "cpu-smm", &smram_as_root);
2755 }
2756 
kvm_msr_energy_thread(void * data)2757 static void *kvm_msr_energy_thread(void *data)
2758 {
2759     KVMState *s = data;
2760     struct KVMMsrEnergy *vmsr = &s->msr_energy;
2761 
2762     g_autofree vmsr_package_energy_stat *pkg_stat = NULL;
2763     g_autofree vmsr_thread_stat *thd_stat = NULL;
2764     g_autofree CPUState *cpu = NULL;
2765     g_autofree unsigned int *vpkgs_energy_stat = NULL;
2766     unsigned int num_threads = 0;
2767 
2768     X86CPUTopoIDs topo_ids;
2769 
2770     rcu_register_thread();
2771 
2772     /* Allocate memory for each package energy status */
2773     pkg_stat = g_new0(vmsr_package_energy_stat, vmsr->host_topo.maxpkgs);
2774 
2775     /* Allocate memory for thread stats */
2776     thd_stat = g_new0(vmsr_thread_stat, 1);
2777 
2778     /* Allocate memory for holding virtual package energy counter */
2779     vpkgs_energy_stat = g_new0(unsigned int, vmsr->guest_vsockets);
2780 
2781     /* Populate the max tick of each packages */
2782     for (int i = 0; i < vmsr->host_topo.maxpkgs; i++) {
2783         /*
2784          * Max numbers of ticks per package
2785          * Time in second * Number of ticks/second * Number of cores/package
2786          * ex: 100 ticks/second/CPU, 12 CPUs per Package gives 1200 ticks max
2787          */
2788         vmsr->host_topo.maxticks[i] = (MSR_ENERGY_THREAD_SLEEP_US / 1000000)
2789                         * sysconf(_SC_CLK_TCK)
2790                         * vmsr->host_topo.pkg_cpu_count[i];
2791     }
2792 
2793     while (true) {
2794         /* Get all qemu threads id */
2795         g_autofree pid_t *thread_ids
2796             = vmsr_get_thread_ids(vmsr->pid, &num_threads);
2797 
2798         if (thread_ids == NULL) {
2799             goto clean;
2800         }
2801 
2802         thd_stat = g_renew(vmsr_thread_stat, thd_stat, num_threads);
2803         /* Unlike g_new0, g_renew0 function doesn't exist yet... */
2804         memset(thd_stat, 0, num_threads * sizeof(vmsr_thread_stat));
2805 
2806         /* Populate all the thread stats */
2807         for (int i = 0; i < num_threads; i++) {
2808             thd_stat[i].utime = g_new0(unsigned long long, 2);
2809             thd_stat[i].stime = g_new0(unsigned long long, 2);
2810             thd_stat[i].thread_id = thread_ids[i];
2811             vmsr_read_thread_stat(vmsr->pid,
2812                                   thd_stat[i].thread_id,
2813                                   &thd_stat[i].utime[0],
2814                                   &thd_stat[i].stime[0],
2815                                   &thd_stat[i].cpu_id);
2816             thd_stat[i].pkg_id =
2817                 vmsr_get_physical_package_id(thd_stat[i].cpu_id);
2818         }
2819 
2820         /* Retrieve all packages power plane energy counter */
2821         for (int i = 0; i < vmsr->host_topo.maxpkgs; i++) {
2822             for (int j = 0; j < num_threads; j++) {
2823                 /*
2824                  * Use the first thread we found that ran on the CPU
2825                  * of the package to read the packages energy counter
2826                  */
2827                 if (thd_stat[j].pkg_id == i) {
2828                     pkg_stat[i].e_start =
2829                     vmsr_read_msr(MSR_PKG_ENERGY_STATUS,
2830                                   thd_stat[j].cpu_id,
2831                                   thd_stat[j].thread_id,
2832                                   s->msr_energy.sioc);
2833                     break;
2834                 }
2835             }
2836         }
2837 
2838         /* Sleep a short period while the other threads are working */
2839         usleep(MSR_ENERGY_THREAD_SLEEP_US);
2840 
2841         /*
2842          * Retrieve all packages power plane energy counter
2843          * Calculate the delta of all packages
2844          */
2845         for (int i = 0; i < vmsr->host_topo.maxpkgs; i++) {
2846             for (int j = 0; j < num_threads; j++) {
2847                 /*
2848                  * Use the first thread we found that ran on the CPU
2849                  * of the package to read the packages energy counter
2850                  */
2851                 if (thd_stat[j].pkg_id == i) {
2852                     pkg_stat[i].e_end =
2853                     vmsr_read_msr(MSR_PKG_ENERGY_STATUS,
2854                                   thd_stat[j].cpu_id,
2855                                   thd_stat[j].thread_id,
2856                                   s->msr_energy.sioc);
2857                     /*
2858                      * Prevent the case we have migrate the VM
2859                      * during the sleep period or any other cases
2860                      * were energy counter might be lower after
2861                      * the sleep period.
2862                      */
2863                     if (pkg_stat[i].e_end > pkg_stat[i].e_start) {
2864                         pkg_stat[i].e_delta =
2865                             pkg_stat[i].e_end - pkg_stat[i].e_start;
2866                     } else {
2867                         pkg_stat[i].e_delta = 0;
2868                     }
2869                     break;
2870                 }
2871             }
2872         }
2873 
2874         /* Delta of ticks spend by each thread between the sample */
2875         for (int i = 0; i < num_threads; i++) {
2876             vmsr_read_thread_stat(vmsr->pid,
2877                                   thd_stat[i].thread_id,
2878                                   &thd_stat[i].utime[1],
2879                                   &thd_stat[i].stime[1],
2880                                   &thd_stat[i].cpu_id);
2881 
2882             if (vmsr->pid < 0) {
2883                 /*
2884                  * We don't count the dead thread
2885                  * i.e threads that existed before the sleep
2886                  * and not anymore
2887                  */
2888                 thd_stat[i].delta_ticks = 0;
2889             } else {
2890                 vmsr_delta_ticks(thd_stat, i);
2891             }
2892         }
2893 
2894         /*
2895          * Identify the vcpu threads
2896          * Calculate the number of vcpu per package
2897          */
2898         CPU_FOREACH(cpu) {
2899             for (int i = 0; i < num_threads; i++) {
2900                 if (cpu->thread_id == thd_stat[i].thread_id) {
2901                     thd_stat[i].is_vcpu = true;
2902                     thd_stat[i].vcpu_id = cpu->cpu_index;
2903                     pkg_stat[thd_stat[i].pkg_id].nb_vcpu++;
2904                     thd_stat[i].acpi_id = kvm_arch_vcpu_id(cpu);
2905                     break;
2906                 }
2907             }
2908         }
2909 
2910         /* Retrieve the virtual package number of each vCPU */
2911         for (int i = 0; i < vmsr->guest_cpu_list->len; i++) {
2912             for (int j = 0; j < num_threads; j++) {
2913                 if ((thd_stat[j].acpi_id ==
2914                         vmsr->guest_cpu_list->cpus[i].arch_id)
2915                     && (thd_stat[j].is_vcpu == true)) {
2916                     x86_topo_ids_from_apicid(thd_stat[j].acpi_id,
2917                         &vmsr->guest_topo_info, &topo_ids);
2918                     thd_stat[j].vpkg_id = topo_ids.pkg_id;
2919                 }
2920             }
2921         }
2922 
2923         /* Calculate the total energy of all non-vCPU thread */
2924         for (int i = 0; i < num_threads; i++) {
2925             if ((thd_stat[i].is_vcpu != true) &&
2926                 (thd_stat[i].delta_ticks > 0)) {
2927                 double temp;
2928                 temp = vmsr_get_ratio(pkg_stat[thd_stat[i].pkg_id].e_delta,
2929                     thd_stat[i].delta_ticks,
2930                     vmsr->host_topo.maxticks[thd_stat[i].pkg_id]);
2931                 pkg_stat[thd_stat[i].pkg_id].e_ratio
2932                     += (uint64_t)lround(temp);
2933             }
2934         }
2935 
2936         /* Calculate the ratio per non-vCPU thread of each package */
2937         for (int i = 0; i < vmsr->host_topo.maxpkgs; i++) {
2938             if (pkg_stat[i].nb_vcpu > 0) {
2939                 pkg_stat[i].e_ratio = pkg_stat[i].e_ratio / pkg_stat[i].nb_vcpu;
2940             }
2941         }
2942 
2943         /*
2944          * Calculate the energy for each Package:
2945          * Energy Package = sum of each vCPU energy that belongs to the package
2946          */
2947         for (int i = 0; i < num_threads; i++) {
2948             if ((thd_stat[i].is_vcpu == true) && \
2949                     (thd_stat[i].delta_ticks > 0)) {
2950                 double temp;
2951                 temp = vmsr_get_ratio(pkg_stat[thd_stat[i].pkg_id].e_delta,
2952                     thd_stat[i].delta_ticks,
2953                     vmsr->host_topo.maxticks[thd_stat[i].pkg_id]);
2954                 vpkgs_energy_stat[thd_stat[i].vpkg_id] +=
2955                     (uint64_t)lround(temp);
2956                 vpkgs_energy_stat[thd_stat[i].vpkg_id] +=
2957                     pkg_stat[thd_stat[i].pkg_id].e_ratio;
2958             }
2959         }
2960 
2961         /*
2962          * Finally populate the vmsr register of each vCPU with the total
2963          * package value to emulate the real hardware where each CPU return the
2964          * value of the package it belongs.
2965          */
2966         for (int i = 0; i < num_threads; i++) {
2967             if ((thd_stat[i].is_vcpu == true) && \
2968                     (thd_stat[i].delta_ticks > 0)) {
2969                 vmsr->msr_value[thd_stat[i].vcpu_id] = \
2970                                         vpkgs_energy_stat[thd_stat[i].vpkg_id];
2971           }
2972         }
2973 
2974         /* Freeing memory before zeroing the pointer */
2975         for (int i = 0; i < num_threads; i++) {
2976             g_free(thd_stat[i].utime);
2977             g_free(thd_stat[i].stime);
2978         }
2979    }
2980 
2981 clean:
2982     rcu_unregister_thread();
2983     return NULL;
2984 }
2985 
kvm_msr_energy_thread_init(KVMState * s,MachineState * ms)2986 static int kvm_msr_energy_thread_init(KVMState *s, MachineState *ms)
2987 {
2988     MachineClass *mc = MACHINE_GET_CLASS(ms);
2989     struct KVMMsrEnergy *r = &s->msr_energy;
2990 
2991     /*
2992      * Sanity check
2993      * 1. Host cpu must be Intel cpu
2994      * 2. RAPL must be enabled on the Host
2995      */
2996     if (!is_host_cpu_intel()) {
2997         error_report("The RAPL feature can only be enabled on hosts "
2998                      "with Intel CPU models");
2999         return -1;
3000     }
3001 
3002     if (!is_rapl_enabled()) {
3003         return -1;
3004     }
3005 
3006     /* Retrieve the virtual topology */
3007     vmsr_init_topo_info(&r->guest_topo_info, ms);
3008 
3009     /* Retrieve the number of vcpu */
3010     r->guest_vcpus = ms->smp.cpus;
3011 
3012     /* Retrieve the number of virtual sockets */
3013     r->guest_vsockets = ms->smp.sockets;
3014 
3015     /* Allocate register memory (MSR_PKG_STATUS) for each vcpu */
3016     r->msr_value = g_new0(uint64_t, r->guest_vcpus);
3017 
3018     /* Retrieve the CPUArchIDlist */
3019     r->guest_cpu_list = mc->possible_cpu_arch_ids(ms);
3020 
3021     /* Max number of cpus on the Host */
3022     r->host_topo.maxcpus = vmsr_get_maxcpus();
3023     if (r->host_topo.maxcpus == 0) {
3024         error_report("host max cpus = 0");
3025         return -1;
3026     }
3027 
3028     /* Max number of packages on the host */
3029     r->host_topo.maxpkgs = vmsr_get_max_physical_package(r->host_topo.maxcpus);
3030     if (r->host_topo.maxpkgs == 0) {
3031         error_report("host max pkgs = 0");
3032         return -1;
3033     }
3034 
3035     /* Allocate memory for each package on the host */
3036     r->host_topo.pkg_cpu_count = g_new0(unsigned int, r->host_topo.maxpkgs);
3037     r->host_topo.maxticks = g_new0(unsigned int, r->host_topo.maxpkgs);
3038 
3039     vmsr_count_cpus_per_package(r->host_topo.pkg_cpu_count,
3040                                 r->host_topo.maxpkgs);
3041     for (int i = 0; i < r->host_topo.maxpkgs; i++) {
3042         if (r->host_topo.pkg_cpu_count[i] == 0) {
3043             error_report("cpu per packages = 0 on package_%d", i);
3044             return -1;
3045         }
3046     }
3047 
3048     /* Get QEMU PID*/
3049     r->pid = getpid();
3050 
3051     /* Compute the socket path if necessary */
3052     if (s->msr_energy.socket_path == NULL) {
3053         s->msr_energy.socket_path = vmsr_compute_default_paths();
3054     }
3055 
3056     /* Open socket with vmsr helper */
3057     s->msr_energy.sioc = vmsr_open_socket(s->msr_energy.socket_path);
3058 
3059     if (s->msr_energy.sioc == NULL) {
3060         error_report("vmsr socket opening failed");
3061         return -1;
3062     }
3063 
3064     /* Those MSR values should not change */
3065     r->msr_unit  = vmsr_read_msr(MSR_RAPL_POWER_UNIT, 0, r->pid,
3066                                     s->msr_energy.sioc);
3067     r->msr_limit = vmsr_read_msr(MSR_PKG_POWER_LIMIT, 0, r->pid,
3068                                     s->msr_energy.sioc);
3069     r->msr_info  = vmsr_read_msr(MSR_PKG_POWER_INFO, 0, r->pid,
3070                                     s->msr_energy.sioc);
3071     if (r->msr_unit == 0 || r->msr_limit == 0 || r->msr_info == 0) {
3072         error_report("can't read any virtual msr");
3073         return -1;
3074     }
3075 
3076     qemu_thread_create(&r->msr_thr, "kvm-msr",
3077                        kvm_msr_energy_thread,
3078                        s, QEMU_THREAD_JOINABLE);
3079     return 0;
3080 }
3081 
kvm_arch_get_default_type(MachineState * ms)3082 int kvm_arch_get_default_type(MachineState *ms)
3083 {
3084     return 0;
3085 }
3086 
kvm_vm_enable_exception_payload(KVMState * s)3087 static int kvm_vm_enable_exception_payload(KVMState *s)
3088 {
3089     int ret = 0;
3090     has_exception_payload = kvm_check_extension(s, KVM_CAP_EXCEPTION_PAYLOAD);
3091     if (has_exception_payload) {
3092         ret = kvm_vm_enable_cap(s, KVM_CAP_EXCEPTION_PAYLOAD, 0, true);
3093         if (ret < 0) {
3094             error_report("kvm: Failed to enable exception payload cap: %s",
3095                          strerror(-ret));
3096         }
3097     }
3098 
3099     return ret;
3100 }
3101 
kvm_vm_enable_triple_fault_event(KVMState * s)3102 static int kvm_vm_enable_triple_fault_event(KVMState *s)
3103 {
3104     int ret = 0;
3105     has_triple_fault_event = \
3106         kvm_check_extension(s,
3107                             KVM_CAP_X86_TRIPLE_FAULT_EVENT);
3108     if (has_triple_fault_event) {
3109         ret = kvm_vm_enable_cap(s, KVM_CAP_X86_TRIPLE_FAULT_EVENT, 0, true);
3110         if (ret < 0) {
3111             error_report("kvm: Failed to enable triple fault event cap: %s",
3112                          strerror(-ret));
3113         }
3114     }
3115     return ret;
3116 }
3117 
kvm_vm_set_identity_map_addr(KVMState * s,uint64_t identity_base)3118 static int kvm_vm_set_identity_map_addr(KVMState *s, uint64_t identity_base)
3119 {
3120     return kvm_vm_ioctl(s, KVM_SET_IDENTITY_MAP_ADDR, &identity_base);
3121 }
3122 
kvm_vm_set_nr_mmu_pages(KVMState * s)3123 static int kvm_vm_set_nr_mmu_pages(KVMState *s)
3124 {
3125     uint64_t shadow_mem;
3126     int ret = 0;
3127     shadow_mem = object_property_get_int(OBJECT(s),
3128                                          "kvm-shadow-mem",
3129                                          &error_abort);
3130     if (shadow_mem != -1) {
3131         shadow_mem /= 4096;
3132         ret = kvm_vm_ioctl(s, KVM_SET_NR_MMU_PAGES, shadow_mem);
3133     }
3134     return ret;
3135 }
3136 
kvm_vm_set_tss_addr(KVMState * s,uint64_t tss_base)3137 static int kvm_vm_set_tss_addr(KVMState *s, uint64_t tss_base)
3138 {
3139     return kvm_vm_ioctl(s, KVM_SET_TSS_ADDR, tss_base);
3140 }
3141 
kvm_vm_enable_disable_exits(KVMState * s)3142 static int kvm_vm_enable_disable_exits(KVMState *s)
3143 {
3144     int disable_exits = kvm_check_extension(s, KVM_CAP_X86_DISABLE_EXITS);
3145 
3146     if (disable_exits) {
3147         disable_exits &= (KVM_X86_DISABLE_EXITS_MWAIT |
3148                           KVM_X86_DISABLE_EXITS_HLT |
3149                           KVM_X86_DISABLE_EXITS_PAUSE |
3150                           KVM_X86_DISABLE_EXITS_CSTATE);
3151     }
3152 
3153     return kvm_vm_enable_cap(s, KVM_CAP_X86_DISABLE_EXITS, 0,
3154                              disable_exits);
3155 }
3156 
kvm_vm_enable_bus_lock_exit(KVMState * s)3157 static int kvm_vm_enable_bus_lock_exit(KVMState *s)
3158 {
3159     int ret = 0;
3160     ret = kvm_check_extension(s, KVM_CAP_X86_BUS_LOCK_EXIT);
3161     if (!(ret & KVM_BUS_LOCK_DETECTION_EXIT)) {
3162         error_report("kvm: bus lock detection unsupported");
3163         return -ENOTSUP;
3164     }
3165     ret = kvm_vm_enable_cap(s, KVM_CAP_X86_BUS_LOCK_EXIT, 0,
3166                             KVM_BUS_LOCK_DETECTION_EXIT);
3167     if (ret < 0) {
3168         error_report("kvm: Failed to enable bus lock detection cap: %s",
3169                      strerror(-ret));
3170     }
3171 
3172     return ret;
3173 }
3174 
kvm_vm_enable_notify_vmexit(KVMState * s)3175 static int kvm_vm_enable_notify_vmexit(KVMState *s)
3176 {
3177     int ret = 0;
3178     if (s->notify_vmexit != NOTIFY_VMEXIT_OPTION_DISABLE) {
3179         uint64_t notify_window_flags =
3180             ((uint64_t)s->notify_window << 32) |
3181             KVM_X86_NOTIFY_VMEXIT_ENABLED |
3182             KVM_X86_NOTIFY_VMEXIT_USER;
3183         ret = kvm_vm_enable_cap(s, KVM_CAP_X86_NOTIFY_VMEXIT, 0,
3184                                 notify_window_flags);
3185         if (ret < 0) {
3186             error_report("kvm: Failed to enable notify vmexit cap: %s",
3187                          strerror(-ret));
3188         }
3189     }
3190     return ret;
3191 }
3192 
kvm_vm_enable_userspace_msr(KVMState * s)3193 static int kvm_vm_enable_userspace_msr(KVMState *s)
3194 {
3195     int ret;
3196 
3197     ret = kvm_vm_enable_cap(s, KVM_CAP_X86_USER_SPACE_MSR, 0,
3198                             KVM_MSR_EXIT_REASON_FILTER);
3199     if (ret < 0) {
3200         error_report("Could not enable user space MSRs: %s",
3201                      strerror(-ret));
3202         exit(1);
3203     }
3204 
3205     ret = kvm_filter_msr(s, MSR_CORE_THREAD_COUNT,
3206                          kvm_rdmsr_core_thread_count, NULL);
3207     if (ret < 0) {
3208         error_report("Could not install MSR_CORE_THREAD_COUNT handler: %s",
3209                      strerror(-ret));
3210         exit(1);
3211     }
3212 
3213     return 0;
3214 }
3215 
kvm_vm_enable_energy_msrs(KVMState * s)3216 static int kvm_vm_enable_energy_msrs(KVMState *s)
3217 {
3218     int ret;
3219 
3220     if (s->msr_energy.enable == true) {
3221         ret = kvm_filter_msr(s, MSR_RAPL_POWER_UNIT,
3222                              kvm_rdmsr_rapl_power_unit, NULL);
3223         if (ret < 0) {
3224             error_report("Could not install MSR_RAPL_POWER_UNIT handler: %s",
3225                          strerror(-ret));
3226             return ret;
3227         }
3228 
3229         ret = kvm_filter_msr(s, MSR_PKG_POWER_LIMIT,
3230                              kvm_rdmsr_pkg_power_limit, NULL);
3231         if (ret < 0) {
3232             error_report("Could not install MSR_PKG_POWER_LIMIT handler: %s",
3233                          strerror(-ret));
3234             return ret;
3235         }
3236 
3237         ret = kvm_filter_msr(s, MSR_PKG_POWER_INFO,
3238                              kvm_rdmsr_pkg_power_info, NULL);
3239         if (ret < 0) {
3240             error_report("Could not install MSR_PKG_POWER_INFO handler: %s",
3241                          strerror(-ret));
3242             return ret;
3243         }
3244         ret = kvm_filter_msr(s, MSR_PKG_ENERGY_STATUS,
3245                              kvm_rdmsr_pkg_energy_status, NULL);
3246         if (ret < 0) {
3247             error_report("Could not install MSR_PKG_ENERGY_STATUS handler: %s",
3248                          strerror(-ret));
3249             return ret;
3250         }
3251     }
3252     return 0;
3253 }
3254 
kvm_arch_init(MachineState * ms,KVMState * s)3255 int kvm_arch_init(MachineState *ms, KVMState *s)
3256 {
3257     int ret;
3258     struct utsname utsname;
3259     Error *local_err = NULL;
3260 
3261     /*
3262      * Initialize confidential guest (SEV/TDX) context, if required
3263      */
3264     if (ms->cgs) {
3265         ret = confidential_guest_kvm_init(ms->cgs, &local_err);
3266         if (ret < 0) {
3267             error_report_err(local_err);
3268             return ret;
3269         }
3270     }
3271 
3272     has_xcrs = kvm_check_extension(s, KVM_CAP_XCRS);
3273     has_sregs2 = kvm_check_extension(s, KVM_CAP_SREGS2) > 0;
3274 
3275     hv_vpindex_settable = kvm_check_extension(s, KVM_CAP_HYPERV_VP_INDEX);
3276 
3277     ret = kvm_vm_enable_exception_payload(s);
3278     if (ret < 0) {
3279         return ret;
3280     }
3281 
3282     ret = kvm_vm_enable_triple_fault_event(s);
3283     if (ret < 0) {
3284         return ret;
3285     }
3286 
3287     if (s->xen_version) {
3288 #ifdef CONFIG_XEN_EMU
3289         if (!object_dynamic_cast(OBJECT(ms), TYPE_PC_MACHINE)) {
3290             error_report("kvm: Xen support only available in PC machine");
3291             return -ENOTSUP;
3292         }
3293         /* hyperv_enabled() doesn't work yet. */
3294         uint32_t msr = XEN_HYPERCALL_MSR;
3295         ret = kvm_xen_init(s, msr);
3296         if (ret < 0) {
3297             return ret;
3298         }
3299 #else
3300         error_report("kvm: Xen support not enabled in qemu");
3301         return -ENOTSUP;
3302 #endif
3303     }
3304 
3305     ret = kvm_get_supported_msrs(s);
3306     if (ret < 0) {
3307         return ret;
3308     }
3309 
3310     ret = kvm_get_supported_feature_msrs(s);
3311     if (ret < 0) {
3312         return ret;
3313     }
3314 
3315     uname(&utsname);
3316     lm_capable_kernel = strcmp(utsname.machine, "x86_64") == 0;
3317 
3318     ret = kvm_vm_set_identity_map_addr(s, KVM_IDENTITY_BASE);
3319     if (ret < 0) {
3320         return ret;
3321     }
3322 
3323     /* Set TSS base one page after EPT identity map. */
3324     ret = kvm_vm_set_tss_addr(s, KVM_IDENTITY_BASE + 0x1000);
3325     if (ret < 0) {
3326         return ret;
3327     }
3328 
3329     /* Tell fw_cfg to notify the BIOS to reserve the range. */
3330     e820_add_entry(KVM_IDENTITY_BASE, 0x4000, E820_RESERVED);
3331 
3332     ret = kvm_vm_set_nr_mmu_pages(s);
3333     if (ret < 0) {
3334         return ret;
3335     }
3336 
3337     if (kvm_check_extension(s, KVM_CAP_X86_SMM) &&
3338         object_dynamic_cast(OBJECT(ms), TYPE_X86_MACHINE) &&
3339         x86_machine_is_smm_enabled(X86_MACHINE(ms))) {
3340         smram_machine_done.notify = register_smram_listener;
3341         qemu_add_machine_init_done_notifier(&smram_machine_done);
3342     }
3343 
3344     if (enable_cpu_pm) {
3345         ret = kvm_vm_enable_disable_exits(s);
3346         if (ret < 0) {
3347             error_report("kvm: guest stopping CPU not supported: %s",
3348                          strerror(-ret));
3349             return ret;
3350         }
3351     }
3352 
3353     if (object_dynamic_cast(OBJECT(ms), TYPE_X86_MACHINE)) {
3354         X86MachineState *x86ms = X86_MACHINE(ms);
3355 
3356         if (x86ms->bus_lock_ratelimit > 0) {
3357             ret = kvm_vm_enable_bus_lock_exit(s);
3358             if (ret < 0) {
3359                 return ret;
3360             }
3361             ratelimit_init(&bus_lock_ratelimit_ctrl);
3362             ratelimit_set_speed(&bus_lock_ratelimit_ctrl,
3363                                 x86ms->bus_lock_ratelimit, BUS_LOCK_SLICE_TIME);
3364         }
3365     }
3366 
3367     if (kvm_check_extension(s, KVM_CAP_X86_NOTIFY_VMEXIT)) {
3368         ret = kvm_vm_enable_notify_vmexit(s);
3369         if (ret < 0) {
3370             return ret;
3371         }
3372     }
3373 
3374     if (kvm_vm_check_extension(s, KVM_CAP_X86_USER_SPACE_MSR)) {
3375         ret = kvm_vm_enable_userspace_msr(s);
3376         if (ret < 0) {
3377             return ret;
3378         }
3379 
3380         if (s->msr_energy.enable == true) {
3381             ret = kvm_vm_enable_energy_msrs(s);
3382             if (ret < 0) {
3383                 return ret;
3384             }
3385 
3386             ret = kvm_msr_energy_thread_init(s, ms);
3387             if (ret < 0) {
3388                 error_report("kvm : error RAPL feature requirement not met");
3389                 return ret;
3390             }
3391         }
3392     }
3393 
3394     return 0;
3395 }
3396 
set_v8086_seg(struct kvm_segment * lhs,const SegmentCache * rhs)3397 static void set_v8086_seg(struct kvm_segment *lhs, const SegmentCache *rhs)
3398 {
3399     lhs->selector = rhs->selector;
3400     lhs->base = rhs->base;
3401     lhs->limit = rhs->limit;
3402     lhs->type = 3;
3403     lhs->present = 1;
3404     lhs->dpl = 3;
3405     lhs->db = 0;
3406     lhs->s = 1;
3407     lhs->l = 0;
3408     lhs->g = 0;
3409     lhs->avl = 0;
3410     lhs->unusable = 0;
3411 }
3412 
set_seg(struct kvm_segment * lhs,const SegmentCache * rhs)3413 static void set_seg(struct kvm_segment *lhs, const SegmentCache *rhs)
3414 {
3415     unsigned flags = rhs->flags;
3416     lhs->selector = rhs->selector;
3417     lhs->base = rhs->base;
3418     lhs->limit = rhs->limit;
3419     lhs->type = (flags >> DESC_TYPE_SHIFT) & 15;
3420     lhs->present = (flags & DESC_P_MASK) != 0;
3421     lhs->dpl = (flags >> DESC_DPL_SHIFT) & 3;
3422     lhs->db = (flags >> DESC_B_SHIFT) & 1;
3423     lhs->s = (flags & DESC_S_MASK) != 0;
3424     lhs->l = (flags >> DESC_L_SHIFT) & 1;
3425     lhs->g = (flags & DESC_G_MASK) != 0;
3426     lhs->avl = (flags & DESC_AVL_MASK) != 0;
3427     lhs->unusable = !lhs->present;
3428     lhs->padding = 0;
3429 }
3430 
get_seg(SegmentCache * lhs,const struct kvm_segment * rhs)3431 static void get_seg(SegmentCache *lhs, const struct kvm_segment *rhs)
3432 {
3433     lhs->selector = rhs->selector;
3434     lhs->base = rhs->base;
3435     lhs->limit = rhs->limit;
3436     lhs->flags = (rhs->type << DESC_TYPE_SHIFT) |
3437                  ((rhs->present && !rhs->unusable) * DESC_P_MASK) |
3438                  (rhs->dpl << DESC_DPL_SHIFT) |
3439                  (rhs->db << DESC_B_SHIFT) |
3440                  (rhs->s * DESC_S_MASK) |
3441                  (rhs->l << DESC_L_SHIFT) |
3442                  (rhs->g * DESC_G_MASK) |
3443                  (rhs->avl * DESC_AVL_MASK);
3444 }
3445 
kvm_getput_reg(__u64 * kvm_reg,target_ulong * qemu_reg,int set)3446 static void kvm_getput_reg(__u64 *kvm_reg, target_ulong *qemu_reg, int set)
3447 {
3448     if (set) {
3449         *kvm_reg = *qemu_reg;
3450     } else {
3451         *qemu_reg = *kvm_reg;
3452     }
3453 }
3454 
kvm_getput_regs(X86CPU * cpu,int set)3455 static int kvm_getput_regs(X86CPU *cpu, int set)
3456 {
3457     CPUX86State *env = &cpu->env;
3458     struct kvm_regs regs;
3459     int ret = 0;
3460 
3461     if (!set) {
3462         ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_REGS, &regs);
3463         if (ret < 0) {
3464             return ret;
3465         }
3466     }
3467 
3468     kvm_getput_reg(&regs.rax, &env->regs[R_EAX], set);
3469     kvm_getput_reg(&regs.rbx, &env->regs[R_EBX], set);
3470     kvm_getput_reg(&regs.rcx, &env->regs[R_ECX], set);
3471     kvm_getput_reg(&regs.rdx, &env->regs[R_EDX], set);
3472     kvm_getput_reg(&regs.rsi, &env->regs[R_ESI], set);
3473     kvm_getput_reg(&regs.rdi, &env->regs[R_EDI], set);
3474     kvm_getput_reg(&regs.rsp, &env->regs[R_ESP], set);
3475     kvm_getput_reg(&regs.rbp, &env->regs[R_EBP], set);
3476 #ifdef TARGET_X86_64
3477     kvm_getput_reg(&regs.r8, &env->regs[8], set);
3478     kvm_getput_reg(&regs.r9, &env->regs[9], set);
3479     kvm_getput_reg(&regs.r10, &env->regs[10], set);
3480     kvm_getput_reg(&regs.r11, &env->regs[11], set);
3481     kvm_getput_reg(&regs.r12, &env->regs[12], set);
3482     kvm_getput_reg(&regs.r13, &env->regs[13], set);
3483     kvm_getput_reg(&regs.r14, &env->regs[14], set);
3484     kvm_getput_reg(&regs.r15, &env->regs[15], set);
3485 #endif
3486 
3487     kvm_getput_reg(&regs.rflags, &env->eflags, set);
3488     kvm_getput_reg(&regs.rip, &env->eip, set);
3489 
3490     if (set) {
3491         ret = kvm_vcpu_ioctl(CPU(cpu), KVM_SET_REGS, &regs);
3492     }
3493 
3494     return ret;
3495 }
3496 
kvm_put_xsave(X86CPU * cpu)3497 static int kvm_put_xsave(X86CPU *cpu)
3498 {
3499     CPUX86State *env = &cpu->env;
3500     void *xsave = env->xsave_buf;
3501 
3502     x86_cpu_xsave_all_areas(cpu, xsave, env->xsave_buf_len);
3503 
3504     return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_XSAVE, xsave);
3505 }
3506 
kvm_put_xcrs(X86CPU * cpu)3507 static int kvm_put_xcrs(X86CPU *cpu)
3508 {
3509     CPUX86State *env = &cpu->env;
3510     struct kvm_xcrs xcrs = {};
3511 
3512     if (!has_xcrs) {
3513         return 0;
3514     }
3515 
3516     xcrs.nr_xcrs = 1;
3517     xcrs.flags = 0;
3518     xcrs.xcrs[0].xcr = 0;
3519     xcrs.xcrs[0].value = env->xcr0;
3520     return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_XCRS, &xcrs);
3521 }
3522 
kvm_put_sregs(X86CPU * cpu)3523 static int kvm_put_sregs(X86CPU *cpu)
3524 {
3525     CPUX86State *env = &cpu->env;
3526     struct kvm_sregs sregs;
3527 
3528     /*
3529      * The interrupt_bitmap is ignored because KVM_SET_SREGS is
3530      * always followed by KVM_SET_VCPU_EVENTS.
3531      */
3532     memset(sregs.interrupt_bitmap, 0, sizeof(sregs.interrupt_bitmap));
3533 
3534     if ((env->eflags & VM_MASK)) {
3535         set_v8086_seg(&sregs.cs, &env->segs[R_CS]);
3536         set_v8086_seg(&sregs.ds, &env->segs[R_DS]);
3537         set_v8086_seg(&sregs.es, &env->segs[R_ES]);
3538         set_v8086_seg(&sregs.fs, &env->segs[R_FS]);
3539         set_v8086_seg(&sregs.gs, &env->segs[R_GS]);
3540         set_v8086_seg(&sregs.ss, &env->segs[R_SS]);
3541     } else {
3542         set_seg(&sregs.cs, &env->segs[R_CS]);
3543         set_seg(&sregs.ds, &env->segs[R_DS]);
3544         set_seg(&sregs.es, &env->segs[R_ES]);
3545         set_seg(&sregs.fs, &env->segs[R_FS]);
3546         set_seg(&sregs.gs, &env->segs[R_GS]);
3547         set_seg(&sregs.ss, &env->segs[R_SS]);
3548     }
3549 
3550     set_seg(&sregs.tr, &env->tr);
3551     set_seg(&sregs.ldt, &env->ldt);
3552 
3553     sregs.idt.limit = env->idt.limit;
3554     sregs.idt.base = env->idt.base;
3555     memset(sregs.idt.padding, 0, sizeof sregs.idt.padding);
3556     sregs.gdt.limit = env->gdt.limit;
3557     sregs.gdt.base = env->gdt.base;
3558     memset(sregs.gdt.padding, 0, sizeof sregs.gdt.padding);
3559 
3560     sregs.cr0 = env->cr[0];
3561     sregs.cr2 = env->cr[2];
3562     sregs.cr3 = env->cr[3];
3563     sregs.cr4 = env->cr[4];
3564 
3565     sregs.cr8 = cpu_get_apic_tpr(cpu->apic_state);
3566     sregs.apic_base = cpu_get_apic_base(cpu->apic_state);
3567 
3568     sregs.efer = env->efer;
3569 
3570     return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_SREGS, &sregs);
3571 }
3572 
kvm_put_sregs2(X86CPU * cpu)3573 static int kvm_put_sregs2(X86CPU *cpu)
3574 {
3575     CPUX86State *env = &cpu->env;
3576     struct kvm_sregs2 sregs;
3577     int i;
3578 
3579     sregs.flags = 0;
3580 
3581     if ((env->eflags & VM_MASK)) {
3582         set_v8086_seg(&sregs.cs, &env->segs[R_CS]);
3583         set_v8086_seg(&sregs.ds, &env->segs[R_DS]);
3584         set_v8086_seg(&sregs.es, &env->segs[R_ES]);
3585         set_v8086_seg(&sregs.fs, &env->segs[R_FS]);
3586         set_v8086_seg(&sregs.gs, &env->segs[R_GS]);
3587         set_v8086_seg(&sregs.ss, &env->segs[R_SS]);
3588     } else {
3589         set_seg(&sregs.cs, &env->segs[R_CS]);
3590         set_seg(&sregs.ds, &env->segs[R_DS]);
3591         set_seg(&sregs.es, &env->segs[R_ES]);
3592         set_seg(&sregs.fs, &env->segs[R_FS]);
3593         set_seg(&sregs.gs, &env->segs[R_GS]);
3594         set_seg(&sregs.ss, &env->segs[R_SS]);
3595     }
3596 
3597     set_seg(&sregs.tr, &env->tr);
3598     set_seg(&sregs.ldt, &env->ldt);
3599 
3600     sregs.idt.limit = env->idt.limit;
3601     sregs.idt.base = env->idt.base;
3602     memset(sregs.idt.padding, 0, sizeof sregs.idt.padding);
3603     sregs.gdt.limit = env->gdt.limit;
3604     sregs.gdt.base = env->gdt.base;
3605     memset(sregs.gdt.padding, 0, sizeof sregs.gdt.padding);
3606 
3607     sregs.cr0 = env->cr[0];
3608     sregs.cr2 = env->cr[2];
3609     sregs.cr3 = env->cr[3];
3610     sregs.cr4 = env->cr[4];
3611 
3612     sregs.cr8 = cpu_get_apic_tpr(cpu->apic_state);
3613     sregs.apic_base = cpu_get_apic_base(cpu->apic_state);
3614 
3615     sregs.efer = env->efer;
3616 
3617     if (env->pdptrs_valid) {
3618         for (i = 0; i < 4; i++) {
3619             sregs.pdptrs[i] = env->pdptrs[i];
3620         }
3621         sregs.flags |= KVM_SREGS2_FLAGS_PDPTRS_VALID;
3622     }
3623 
3624     return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_SREGS2, &sregs);
3625 }
3626 
3627 
kvm_msr_buf_reset(X86CPU * cpu)3628 static void kvm_msr_buf_reset(X86CPU *cpu)
3629 {
3630     memset(cpu->kvm_msr_buf, 0, MSR_BUF_SIZE);
3631 }
3632 
kvm_msr_entry_add(X86CPU * cpu,uint32_t index,uint64_t value)3633 static void kvm_msr_entry_add(X86CPU *cpu, uint32_t index, uint64_t value)
3634 {
3635     struct kvm_msrs *msrs = cpu->kvm_msr_buf;
3636     void *limit = ((void *)msrs) + MSR_BUF_SIZE;
3637     struct kvm_msr_entry *entry = &msrs->entries[msrs->nmsrs];
3638 
3639     assert((void *)(entry + 1) <= limit);
3640 
3641     entry->index = index;
3642     entry->reserved = 0;
3643     entry->data = value;
3644     msrs->nmsrs++;
3645 }
3646 
kvm_put_one_msr(X86CPU * cpu,int index,uint64_t value)3647 static int kvm_put_one_msr(X86CPU *cpu, int index, uint64_t value)
3648 {
3649     kvm_msr_buf_reset(cpu);
3650     kvm_msr_entry_add(cpu, index, value);
3651 
3652     return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MSRS, cpu->kvm_msr_buf);
3653 }
3654 
kvm_get_one_msr(X86CPU * cpu,int index,uint64_t * value)3655 static int kvm_get_one_msr(X86CPU *cpu, int index, uint64_t *value)
3656 {
3657     int ret;
3658     struct {
3659         struct kvm_msrs info;
3660         struct kvm_msr_entry entries[1];
3661     } msr_data = {
3662         .info.nmsrs = 1,
3663         .entries[0].index = index,
3664     };
3665 
3666     ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_MSRS, &msr_data);
3667     if (ret < 0) {
3668         return ret;
3669     }
3670     assert(ret == 1);
3671     *value = msr_data.entries[0].data;
3672     return ret;
3673 }
kvm_put_apicbase(X86CPU * cpu,uint64_t value)3674 void kvm_put_apicbase(X86CPU *cpu, uint64_t value)
3675 {
3676     int ret;
3677 
3678     ret = kvm_put_one_msr(cpu, MSR_IA32_APICBASE, value);
3679     assert(ret == 1);
3680 }
3681 
kvm_put_tscdeadline_msr(X86CPU * cpu)3682 static int kvm_put_tscdeadline_msr(X86CPU *cpu)
3683 {
3684     CPUX86State *env = &cpu->env;
3685     int ret;
3686 
3687     if (!has_msr_tsc_deadline) {
3688         return 0;
3689     }
3690 
3691     ret = kvm_put_one_msr(cpu, MSR_IA32_TSCDEADLINE, env->tsc_deadline);
3692     if (ret < 0) {
3693         return ret;
3694     }
3695 
3696     assert(ret == 1);
3697     return 0;
3698 }
3699 
3700 /*
3701  * Provide a separate write service for the feature control MSR in order to
3702  * kick the VCPU out of VMXON or even guest mode on reset. This has to be done
3703  * before writing any other state because forcibly leaving nested mode
3704  * invalidates the VCPU state.
3705  */
kvm_put_msr_feature_control(X86CPU * cpu)3706 static int kvm_put_msr_feature_control(X86CPU *cpu)
3707 {
3708     int ret;
3709 
3710     if (!has_msr_feature_control) {
3711         return 0;
3712     }
3713 
3714     ret = kvm_put_one_msr(cpu, MSR_IA32_FEATURE_CONTROL,
3715                           cpu->env.msr_ia32_feature_control);
3716     if (ret < 0) {
3717         return ret;
3718     }
3719 
3720     assert(ret == 1);
3721     return 0;
3722 }
3723 
make_vmx_msr_value(uint32_t index,uint32_t features)3724 static uint64_t make_vmx_msr_value(uint32_t index, uint32_t features)
3725 {
3726     uint32_t default1, can_be_one, can_be_zero;
3727     uint32_t must_be_one;
3728 
3729     switch (index) {
3730     case MSR_IA32_VMX_TRUE_PINBASED_CTLS:
3731         default1 = 0x00000016;
3732         break;
3733     case MSR_IA32_VMX_TRUE_PROCBASED_CTLS:
3734         default1 = 0x0401e172;
3735         break;
3736     case MSR_IA32_VMX_TRUE_ENTRY_CTLS:
3737         default1 = 0x000011ff;
3738         break;
3739     case MSR_IA32_VMX_TRUE_EXIT_CTLS:
3740         default1 = 0x00036dff;
3741         break;
3742     case MSR_IA32_VMX_PROCBASED_CTLS2:
3743         default1 = 0;
3744         break;
3745     default:
3746         abort();
3747     }
3748 
3749     /* If a feature bit is set, the control can be either set or clear.
3750      * Otherwise the value is limited to either 0 or 1 by default1.
3751      */
3752     can_be_one = features | default1;
3753     can_be_zero = features | ~default1;
3754     must_be_one = ~can_be_zero;
3755 
3756     /*
3757      * Bit 0:31 -> 0 if the control bit can be zero (i.e. 1 if it must be one).
3758      * Bit 32:63 -> 1 if the control bit can be one.
3759      */
3760     return must_be_one | (((uint64_t)can_be_one) << 32);
3761 }
3762 
kvm_msr_entry_add_vmx(X86CPU * cpu,FeatureWordArray f)3763 static void kvm_msr_entry_add_vmx(X86CPU *cpu, FeatureWordArray f)
3764 {
3765     uint64_t kvm_vmx_basic =
3766         kvm_arch_get_supported_msr_feature(kvm_state,
3767                                            MSR_IA32_VMX_BASIC);
3768 
3769     if (!kvm_vmx_basic) {
3770         /* If the kernel doesn't support VMX feature (kvm_intel.nested=0),
3771          * then kvm_vmx_basic will be 0 and KVM_SET_MSR will fail.
3772          */
3773         return;
3774     }
3775 
3776     uint64_t kvm_vmx_misc =
3777         kvm_arch_get_supported_msr_feature(kvm_state,
3778                                            MSR_IA32_VMX_MISC);
3779     uint64_t kvm_vmx_ept_vpid =
3780         kvm_arch_get_supported_msr_feature(kvm_state,
3781                                            MSR_IA32_VMX_EPT_VPID_CAP);
3782 
3783     /*
3784      * If the guest is 64-bit, a value of 1 is allowed for the host address
3785      * space size vmexit control.
3786      */
3787     uint64_t fixed_vmx_exit = f[FEAT_8000_0001_EDX] & CPUID_EXT2_LM
3788         ? (uint64_t)VMX_VM_EXIT_HOST_ADDR_SPACE_SIZE << 32 : 0;
3789 
3790     /*
3791      * Bits 0-30, 32-44 and 50-53 come from the host.  KVM should
3792      * not change them for backwards compatibility.
3793      */
3794     uint64_t fixed_vmx_basic = kvm_vmx_basic &
3795         (MSR_VMX_BASIC_VMCS_REVISION_MASK |
3796          MSR_VMX_BASIC_VMXON_REGION_SIZE_MASK |
3797          MSR_VMX_BASIC_VMCS_MEM_TYPE_MASK);
3798 
3799     /*
3800      * Same for bits 0-4 and 25-27.  Bits 16-24 (CR3 target count) can
3801      * change in the future but are always zero for now, clear them to be
3802      * future proof.  Bits 32-63 in theory could change, though KVM does
3803      * not support dual-monitor treatment and probably never will; mask
3804      * them out as well.
3805      */
3806     uint64_t fixed_vmx_misc = kvm_vmx_misc &
3807         (MSR_VMX_MISC_PREEMPTION_TIMER_SHIFT_MASK |
3808          MSR_VMX_MISC_MAX_MSR_LIST_SIZE_MASK);
3809 
3810     /*
3811      * EPT memory types should not change either, so we do not bother
3812      * adding features for them.
3813      */
3814     uint64_t fixed_vmx_ept_mask =
3815             (f[FEAT_VMX_SECONDARY_CTLS] & VMX_SECONDARY_EXEC_ENABLE_EPT ?
3816              MSR_VMX_EPT_UC | MSR_VMX_EPT_WB : 0);
3817     uint64_t fixed_vmx_ept_vpid = kvm_vmx_ept_vpid & fixed_vmx_ept_mask;
3818 
3819     kvm_msr_entry_add(cpu, MSR_IA32_VMX_TRUE_PROCBASED_CTLS,
3820                       make_vmx_msr_value(MSR_IA32_VMX_TRUE_PROCBASED_CTLS,
3821                                          f[FEAT_VMX_PROCBASED_CTLS]));
3822     kvm_msr_entry_add(cpu, MSR_IA32_VMX_TRUE_PINBASED_CTLS,
3823                       make_vmx_msr_value(MSR_IA32_VMX_TRUE_PINBASED_CTLS,
3824                                          f[FEAT_VMX_PINBASED_CTLS]));
3825     kvm_msr_entry_add(cpu, MSR_IA32_VMX_TRUE_EXIT_CTLS,
3826                       make_vmx_msr_value(MSR_IA32_VMX_TRUE_EXIT_CTLS,
3827                                          f[FEAT_VMX_EXIT_CTLS]) | fixed_vmx_exit);
3828     kvm_msr_entry_add(cpu, MSR_IA32_VMX_TRUE_ENTRY_CTLS,
3829                       make_vmx_msr_value(MSR_IA32_VMX_TRUE_ENTRY_CTLS,
3830                                          f[FEAT_VMX_ENTRY_CTLS]));
3831     kvm_msr_entry_add(cpu, MSR_IA32_VMX_PROCBASED_CTLS2,
3832                       make_vmx_msr_value(MSR_IA32_VMX_PROCBASED_CTLS2,
3833                                          f[FEAT_VMX_SECONDARY_CTLS]));
3834     kvm_msr_entry_add(cpu, MSR_IA32_VMX_EPT_VPID_CAP,
3835                       f[FEAT_VMX_EPT_VPID_CAPS] | fixed_vmx_ept_vpid);
3836     kvm_msr_entry_add(cpu, MSR_IA32_VMX_BASIC,
3837                       f[FEAT_VMX_BASIC] | fixed_vmx_basic);
3838     kvm_msr_entry_add(cpu, MSR_IA32_VMX_MISC,
3839                       f[FEAT_VMX_MISC] | fixed_vmx_misc);
3840     if (has_msr_vmx_vmfunc) {
3841         kvm_msr_entry_add(cpu, MSR_IA32_VMX_VMFUNC, f[FEAT_VMX_VMFUNC]);
3842     }
3843 
3844     /*
3845      * Just to be safe, write these with constant values.  The CRn_FIXED1
3846      * MSRs are generated by KVM based on the vCPU's CPUID.
3847      */
3848     kvm_msr_entry_add(cpu, MSR_IA32_VMX_CR0_FIXED0,
3849                       CR0_PE_MASK | CR0_PG_MASK | CR0_NE_MASK);
3850     kvm_msr_entry_add(cpu, MSR_IA32_VMX_CR4_FIXED0,
3851                       CR4_VMXE_MASK);
3852 
3853     if (f[FEAT_7_1_EAX] & CPUID_7_1_EAX_FRED) {
3854         /* FRED injected-event data (0x2052).  */
3855         kvm_msr_entry_add(cpu, MSR_IA32_VMX_VMCS_ENUM, 0x52);
3856     } else if (f[FEAT_VMX_EXIT_CTLS] &
3857                VMX_VM_EXIT_ACTIVATE_SECONDARY_CONTROLS) {
3858         /* Secondary VM-exit controls (0x2044).  */
3859         kvm_msr_entry_add(cpu, MSR_IA32_VMX_VMCS_ENUM, 0x44);
3860     } else if (f[FEAT_VMX_SECONDARY_CTLS] & VMX_SECONDARY_EXEC_TSC_SCALING) {
3861         /* TSC multiplier (0x2032).  */
3862         kvm_msr_entry_add(cpu, MSR_IA32_VMX_VMCS_ENUM, 0x32);
3863     } else {
3864         /* Preemption timer (0x482E).  */
3865         kvm_msr_entry_add(cpu, MSR_IA32_VMX_VMCS_ENUM, 0x2E);
3866     }
3867 }
3868 
kvm_msr_entry_add_perf(X86CPU * cpu,FeatureWordArray f)3869 static void kvm_msr_entry_add_perf(X86CPU *cpu, FeatureWordArray f)
3870 {
3871     uint64_t kvm_perf_cap =
3872         kvm_arch_get_supported_msr_feature(kvm_state,
3873                                            MSR_IA32_PERF_CAPABILITIES);
3874 
3875     if (kvm_perf_cap) {
3876         kvm_msr_entry_add(cpu, MSR_IA32_PERF_CAPABILITIES,
3877                         kvm_perf_cap & f[FEAT_PERF_CAPABILITIES]);
3878     }
3879 }
3880 
kvm_buf_set_msrs(X86CPU * cpu)3881 static int kvm_buf_set_msrs(X86CPU *cpu)
3882 {
3883     int ret = kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MSRS, cpu->kvm_msr_buf);
3884     if (ret < 0) {
3885         return ret;
3886     }
3887 
3888     if (ret < cpu->kvm_msr_buf->nmsrs) {
3889         struct kvm_msr_entry *e = &cpu->kvm_msr_buf->entries[ret];
3890         error_report("error: failed to set MSR 0x%" PRIx32 " to 0x%" PRIx64,
3891                      (uint32_t)e->index, (uint64_t)e->data);
3892     }
3893 
3894     assert(ret == cpu->kvm_msr_buf->nmsrs);
3895     return 0;
3896 }
3897 
kvm_init_msrs(X86CPU * cpu)3898 static void kvm_init_msrs(X86CPU *cpu)
3899 {
3900     CPUX86State *env = &cpu->env;
3901 
3902     kvm_msr_buf_reset(cpu);
3903 
3904     if (!is_tdx_vm()) {
3905         if (has_msr_arch_capabs) {
3906             kvm_msr_entry_add(cpu, MSR_IA32_ARCH_CAPABILITIES,
3907                                 env->features[FEAT_ARCH_CAPABILITIES]);
3908         }
3909 
3910         if (has_msr_core_capabs) {
3911             kvm_msr_entry_add(cpu, MSR_IA32_CORE_CAPABILITY,
3912                                 env->features[FEAT_CORE_CAPABILITY]);
3913         }
3914 
3915         if (has_msr_perf_capabs && cpu->enable_pmu) {
3916             kvm_msr_entry_add_perf(cpu, env->features);
3917         }
3918 
3919         /*
3920          * Older kernels do not include VMX MSRs in KVM_GET_MSR_INDEX_LIST, but
3921          * all kernels with MSR features should have them.
3922          */
3923         if (kvm_feature_msrs && cpu_has_vmx(env)) {
3924             kvm_msr_entry_add_vmx(cpu, env->features);
3925         }
3926     }
3927 
3928     if (has_msr_ucode_rev) {
3929         kvm_msr_entry_add(cpu, MSR_IA32_UCODE_REV, cpu->ucode_rev);
3930     }
3931     assert(kvm_buf_set_msrs(cpu) == 0);
3932 }
3933 
kvm_put_msrs(X86CPU * cpu,int level)3934 static int kvm_put_msrs(X86CPU *cpu, int level)
3935 {
3936     CPUX86State *env = &cpu->env;
3937     int i;
3938 
3939     kvm_msr_buf_reset(cpu);
3940 
3941     kvm_msr_entry_add(cpu, MSR_IA32_SYSENTER_CS, env->sysenter_cs);
3942     kvm_msr_entry_add(cpu, MSR_IA32_SYSENTER_ESP, env->sysenter_esp);
3943     kvm_msr_entry_add(cpu, MSR_IA32_SYSENTER_EIP, env->sysenter_eip);
3944     kvm_msr_entry_add(cpu, MSR_PAT, env->pat);
3945     if (has_msr_star) {
3946         kvm_msr_entry_add(cpu, MSR_STAR, env->star);
3947     }
3948     if (has_msr_hsave_pa) {
3949         kvm_msr_entry_add(cpu, MSR_VM_HSAVE_PA, env->vm_hsave);
3950     }
3951     if (has_msr_tsc_aux) {
3952         kvm_msr_entry_add(cpu, MSR_TSC_AUX, env->tsc_aux);
3953     }
3954     if (has_msr_tsc_adjust) {
3955         kvm_msr_entry_add(cpu, MSR_TSC_ADJUST, env->tsc_adjust);
3956     }
3957     if (has_msr_misc_enable) {
3958         kvm_msr_entry_add(cpu, MSR_IA32_MISC_ENABLE,
3959                           env->msr_ia32_misc_enable);
3960     }
3961     if (has_msr_smbase) {
3962         kvm_msr_entry_add(cpu, MSR_IA32_SMBASE, env->smbase);
3963     }
3964     if (has_msr_smi_count) {
3965         kvm_msr_entry_add(cpu, MSR_SMI_COUNT, env->msr_smi_count);
3966     }
3967     if (has_msr_pkrs) {
3968         kvm_msr_entry_add(cpu, MSR_IA32_PKRS, env->pkrs);
3969     }
3970     if (has_msr_bndcfgs) {
3971         kvm_msr_entry_add(cpu, MSR_IA32_BNDCFGS, env->msr_bndcfgs);
3972     }
3973     if (has_msr_xss) {
3974         kvm_msr_entry_add(cpu, MSR_IA32_XSS, env->xss);
3975     }
3976     if (has_msr_umwait) {
3977         kvm_msr_entry_add(cpu, MSR_IA32_UMWAIT_CONTROL, env->umwait);
3978     }
3979     if (has_msr_spec_ctrl) {
3980         kvm_msr_entry_add(cpu, MSR_IA32_SPEC_CTRL, env->spec_ctrl);
3981     }
3982     if (has_tsc_scale_msr) {
3983         kvm_msr_entry_add(cpu, MSR_AMD64_TSC_RATIO, env->amd_tsc_scale_msr);
3984     }
3985 
3986     if (has_msr_tsx_ctrl) {
3987         kvm_msr_entry_add(cpu, MSR_IA32_TSX_CTRL, env->tsx_ctrl);
3988     }
3989     if (has_msr_virt_ssbd) {
3990         kvm_msr_entry_add(cpu, MSR_VIRT_SSBD, env->virt_ssbd);
3991     }
3992     if (has_msr_hwcr) {
3993         kvm_msr_entry_add(cpu, MSR_K7_HWCR, env->msr_hwcr);
3994     }
3995 
3996 #ifdef TARGET_X86_64
3997     if (lm_capable_kernel) {
3998         kvm_msr_entry_add(cpu, MSR_CSTAR, env->cstar);
3999         kvm_msr_entry_add(cpu, MSR_KERNELGSBASE, env->kernelgsbase);
4000         kvm_msr_entry_add(cpu, MSR_FMASK, env->fmask);
4001         kvm_msr_entry_add(cpu, MSR_LSTAR, env->lstar);
4002         if (env->features[FEAT_7_1_EAX] & CPUID_7_1_EAX_FRED) {
4003             kvm_msr_entry_add(cpu, MSR_IA32_FRED_RSP0, env->fred_rsp0);
4004             kvm_msr_entry_add(cpu, MSR_IA32_FRED_RSP1, env->fred_rsp1);
4005             kvm_msr_entry_add(cpu, MSR_IA32_FRED_RSP2, env->fred_rsp2);
4006             kvm_msr_entry_add(cpu, MSR_IA32_FRED_RSP3, env->fred_rsp3);
4007             kvm_msr_entry_add(cpu, MSR_IA32_FRED_STKLVLS, env->fred_stklvls);
4008             kvm_msr_entry_add(cpu, MSR_IA32_FRED_SSP1, env->fred_ssp1);
4009             kvm_msr_entry_add(cpu, MSR_IA32_FRED_SSP2, env->fred_ssp2);
4010             kvm_msr_entry_add(cpu, MSR_IA32_FRED_SSP3, env->fred_ssp3);
4011             kvm_msr_entry_add(cpu, MSR_IA32_FRED_CONFIG, env->fred_config);
4012         }
4013     }
4014 #endif
4015 
4016     /*
4017      * The following MSRs have side effects on the guest or are too heavy
4018      * for normal writeback. Limit them to reset or full state updates.
4019      */
4020     if (level >= KVM_PUT_RESET_STATE) {
4021         kvm_msr_entry_add(cpu, MSR_IA32_TSC, env->tsc);
4022         if (env->features[FEAT_KVM] & (CPUID_KVM_CLOCK | CPUID_KVM_CLOCK2)) {
4023             kvm_msr_entry_add(cpu, MSR_KVM_SYSTEM_TIME, env->system_time_msr);
4024             kvm_msr_entry_add(cpu, MSR_KVM_WALL_CLOCK, env->wall_clock_msr);
4025         }
4026         if (env->features[FEAT_KVM] & CPUID_KVM_ASYNCPF_INT) {
4027             kvm_msr_entry_add(cpu, MSR_KVM_ASYNC_PF_INT, env->async_pf_int_msr);
4028         }
4029         if (env->features[FEAT_KVM] & CPUID_KVM_ASYNCPF) {
4030             kvm_msr_entry_add(cpu, MSR_KVM_ASYNC_PF_EN, env->async_pf_en_msr);
4031         }
4032         if (env->features[FEAT_KVM] & CPUID_KVM_PV_EOI) {
4033             kvm_msr_entry_add(cpu, MSR_KVM_PV_EOI_EN, env->pv_eoi_en_msr);
4034         }
4035         if (env->features[FEAT_KVM] & CPUID_KVM_STEAL_TIME) {
4036             kvm_msr_entry_add(cpu, MSR_KVM_STEAL_TIME, env->steal_time_msr);
4037         }
4038 
4039         if (env->features[FEAT_KVM] & CPUID_KVM_POLL_CONTROL) {
4040             kvm_msr_entry_add(cpu, MSR_KVM_POLL_CONTROL, env->poll_control_msr);
4041         }
4042 
4043         if (has_architectural_pmu_version > 0) {
4044             if (has_architectural_pmu_version > 1) {
4045                 /* Stop the counter.  */
4046                 kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR_CTRL, 0);
4047                 kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_CTRL, 0);
4048             }
4049 
4050             /* Set the counter values.  */
4051             for (i = 0; i < num_architectural_pmu_fixed_counters; i++) {
4052                 kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR0 + i,
4053                                   env->msr_fixed_counters[i]);
4054             }
4055             for (i = 0; i < num_architectural_pmu_gp_counters; i++) {
4056                 kvm_msr_entry_add(cpu, MSR_P6_PERFCTR0 + i,
4057                                   env->msr_gp_counters[i]);
4058                 kvm_msr_entry_add(cpu, MSR_P6_EVNTSEL0 + i,
4059                                   env->msr_gp_evtsel[i]);
4060             }
4061             if (has_architectural_pmu_version > 1) {
4062                 kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_STATUS,
4063                                   env->msr_global_status);
4064                 kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_OVF_CTRL,
4065                                   env->msr_global_ovf_ctrl);
4066 
4067                 /* Now start the PMU.  */
4068                 kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR_CTRL,
4069                                   env->msr_fixed_ctr_ctrl);
4070                 kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_CTRL,
4071                                   env->msr_global_ctrl);
4072             }
4073         }
4074         /*
4075          * Hyper-V partition-wide MSRs: to avoid clearing them on cpu hot-add,
4076          * only sync them to KVM on the first cpu
4077          */
4078         if (current_cpu == first_cpu) {
4079             if (has_msr_hv_hypercall) {
4080                 kvm_msr_entry_add(cpu, HV_X64_MSR_GUEST_OS_ID,
4081                                   env->msr_hv_guest_os_id);
4082                 kvm_msr_entry_add(cpu, HV_X64_MSR_HYPERCALL,
4083                                   env->msr_hv_hypercall);
4084             }
4085             if (hyperv_feat_enabled(cpu, HYPERV_FEAT_TIME)) {
4086                 kvm_msr_entry_add(cpu, HV_X64_MSR_REFERENCE_TSC,
4087                                   env->msr_hv_tsc);
4088             }
4089             if (hyperv_feat_enabled(cpu, HYPERV_FEAT_REENLIGHTENMENT)) {
4090                 kvm_msr_entry_add(cpu, HV_X64_MSR_REENLIGHTENMENT_CONTROL,
4091                                   env->msr_hv_reenlightenment_control);
4092                 kvm_msr_entry_add(cpu, HV_X64_MSR_TSC_EMULATION_CONTROL,
4093                                   env->msr_hv_tsc_emulation_control);
4094                 kvm_msr_entry_add(cpu, HV_X64_MSR_TSC_EMULATION_STATUS,
4095                                   env->msr_hv_tsc_emulation_status);
4096             }
4097             if (hyperv_feat_enabled(cpu, HYPERV_FEAT_SYNDBG) &&
4098                 has_msr_hv_syndbg_options) {
4099                 kvm_msr_entry_add(cpu, HV_X64_MSR_SYNDBG_OPTIONS,
4100                                   hyperv_syndbg_query_options());
4101             }
4102         }
4103         if (hyperv_feat_enabled(cpu, HYPERV_FEAT_VAPIC)) {
4104             kvm_msr_entry_add(cpu, HV_X64_MSR_APIC_ASSIST_PAGE,
4105                               env->msr_hv_vapic);
4106         }
4107         if (has_msr_hv_crash) {
4108             int j;
4109 
4110             for (j = 0; j < HV_CRASH_PARAMS; j++)
4111                 kvm_msr_entry_add(cpu, HV_X64_MSR_CRASH_P0 + j,
4112                                   env->msr_hv_crash_params[j]);
4113 
4114             kvm_msr_entry_add(cpu, HV_X64_MSR_CRASH_CTL, HV_CRASH_CTL_NOTIFY);
4115         }
4116         if (has_msr_hv_runtime) {
4117             kvm_msr_entry_add(cpu, HV_X64_MSR_VP_RUNTIME, env->msr_hv_runtime);
4118         }
4119         if (hyperv_feat_enabled(cpu, HYPERV_FEAT_VPINDEX)
4120             && hv_vpindex_settable) {
4121             kvm_msr_entry_add(cpu, HV_X64_MSR_VP_INDEX,
4122                               hyperv_vp_index(CPU(cpu)));
4123         }
4124         if (hyperv_feat_enabled(cpu, HYPERV_FEAT_SYNIC)) {
4125             int j;
4126 
4127             kvm_msr_entry_add(cpu, HV_X64_MSR_SVERSION, HV_SYNIC_VERSION);
4128 
4129             kvm_msr_entry_add(cpu, HV_X64_MSR_SCONTROL,
4130                               env->msr_hv_synic_control);
4131             kvm_msr_entry_add(cpu, HV_X64_MSR_SIEFP,
4132                               env->msr_hv_synic_evt_page);
4133             kvm_msr_entry_add(cpu, HV_X64_MSR_SIMP,
4134                               env->msr_hv_synic_msg_page);
4135 
4136             for (j = 0; j < ARRAY_SIZE(env->msr_hv_synic_sint); j++) {
4137                 kvm_msr_entry_add(cpu, HV_X64_MSR_SINT0 + j,
4138                                   env->msr_hv_synic_sint[j]);
4139             }
4140         }
4141         if (has_msr_hv_stimer) {
4142             int j;
4143 
4144             for (j = 0; j < ARRAY_SIZE(env->msr_hv_stimer_config); j++) {
4145                 kvm_msr_entry_add(cpu, HV_X64_MSR_STIMER0_CONFIG + j * 2,
4146                                 env->msr_hv_stimer_config[j]);
4147             }
4148 
4149             for (j = 0; j < ARRAY_SIZE(env->msr_hv_stimer_count); j++) {
4150                 kvm_msr_entry_add(cpu, HV_X64_MSR_STIMER0_COUNT + j * 2,
4151                                 env->msr_hv_stimer_count[j]);
4152             }
4153         }
4154         if (env->features[FEAT_1_EDX] & CPUID_MTRR) {
4155             uint64_t phys_mask = MAKE_64BIT_MASK(0, cpu->phys_bits);
4156 
4157             kvm_msr_entry_add(cpu, MSR_MTRRdefType, env->mtrr_deftype);
4158             kvm_msr_entry_add(cpu, MSR_MTRRfix64K_00000, env->mtrr_fixed[0]);
4159             kvm_msr_entry_add(cpu, MSR_MTRRfix16K_80000, env->mtrr_fixed[1]);
4160             kvm_msr_entry_add(cpu, MSR_MTRRfix16K_A0000, env->mtrr_fixed[2]);
4161             kvm_msr_entry_add(cpu, MSR_MTRRfix4K_C0000, env->mtrr_fixed[3]);
4162             kvm_msr_entry_add(cpu, MSR_MTRRfix4K_C8000, env->mtrr_fixed[4]);
4163             kvm_msr_entry_add(cpu, MSR_MTRRfix4K_D0000, env->mtrr_fixed[5]);
4164             kvm_msr_entry_add(cpu, MSR_MTRRfix4K_D8000, env->mtrr_fixed[6]);
4165             kvm_msr_entry_add(cpu, MSR_MTRRfix4K_E0000, env->mtrr_fixed[7]);
4166             kvm_msr_entry_add(cpu, MSR_MTRRfix4K_E8000, env->mtrr_fixed[8]);
4167             kvm_msr_entry_add(cpu, MSR_MTRRfix4K_F0000, env->mtrr_fixed[9]);
4168             kvm_msr_entry_add(cpu, MSR_MTRRfix4K_F8000, env->mtrr_fixed[10]);
4169             for (i = 0; i < MSR_MTRRcap_VCNT; i++) {
4170                 /* The CPU GPs if we write to a bit above the physical limit of
4171                  * the host CPU (and KVM emulates that)
4172                  */
4173                 uint64_t mask = env->mtrr_var[i].mask;
4174                 mask &= phys_mask;
4175 
4176                 kvm_msr_entry_add(cpu, MSR_MTRRphysBase(i),
4177                                   env->mtrr_var[i].base);
4178                 kvm_msr_entry_add(cpu, MSR_MTRRphysMask(i), mask);
4179             }
4180         }
4181         if (env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) {
4182             int addr_num = kvm_arch_get_supported_cpuid(kvm_state,
4183                                                     0x14, 1, R_EAX) & 0x7;
4184 
4185             kvm_msr_entry_add(cpu, MSR_IA32_RTIT_CTL,
4186                             env->msr_rtit_ctrl);
4187             kvm_msr_entry_add(cpu, MSR_IA32_RTIT_STATUS,
4188                             env->msr_rtit_status);
4189             kvm_msr_entry_add(cpu, MSR_IA32_RTIT_OUTPUT_BASE,
4190                             env->msr_rtit_output_base);
4191             kvm_msr_entry_add(cpu, MSR_IA32_RTIT_OUTPUT_MASK,
4192                             env->msr_rtit_output_mask);
4193             kvm_msr_entry_add(cpu, MSR_IA32_RTIT_CR3_MATCH,
4194                             env->msr_rtit_cr3_match);
4195             for (i = 0; i < addr_num; i++) {
4196                 kvm_msr_entry_add(cpu, MSR_IA32_RTIT_ADDR0_A + i,
4197                             env->msr_rtit_addrs[i]);
4198             }
4199         }
4200 
4201         if (env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_SGX_LC) {
4202             kvm_msr_entry_add(cpu, MSR_IA32_SGXLEPUBKEYHASH0,
4203                               env->msr_ia32_sgxlepubkeyhash[0]);
4204             kvm_msr_entry_add(cpu, MSR_IA32_SGXLEPUBKEYHASH1,
4205                               env->msr_ia32_sgxlepubkeyhash[1]);
4206             kvm_msr_entry_add(cpu, MSR_IA32_SGXLEPUBKEYHASH2,
4207                               env->msr_ia32_sgxlepubkeyhash[2]);
4208             kvm_msr_entry_add(cpu, MSR_IA32_SGXLEPUBKEYHASH3,
4209                               env->msr_ia32_sgxlepubkeyhash[3]);
4210         }
4211 
4212         if (env->features[FEAT_XSAVE] & CPUID_D_1_EAX_XFD) {
4213             kvm_msr_entry_add(cpu, MSR_IA32_XFD,
4214                               env->msr_xfd);
4215             kvm_msr_entry_add(cpu, MSR_IA32_XFD_ERR,
4216                               env->msr_xfd_err);
4217         }
4218 
4219         if (kvm_enabled() && cpu->enable_pmu &&
4220             (env->features[FEAT_7_0_EDX] & CPUID_7_0_EDX_ARCH_LBR)) {
4221             uint64_t depth;
4222             int ret;
4223 
4224             /*
4225              * Only migrate Arch LBR states when the host Arch LBR depth
4226              * equals that of source guest's, this is to avoid mismatch
4227              * of guest/host config for the msr hence avoid unexpected
4228              * misbehavior.
4229              */
4230             ret = kvm_get_one_msr(cpu, MSR_ARCH_LBR_DEPTH, &depth);
4231 
4232             if (ret == 1 && !!depth && depth == env->msr_lbr_depth) {
4233                 kvm_msr_entry_add(cpu, MSR_ARCH_LBR_CTL, env->msr_lbr_ctl);
4234                 kvm_msr_entry_add(cpu, MSR_ARCH_LBR_DEPTH, env->msr_lbr_depth);
4235 
4236                 for (i = 0; i < ARCH_LBR_NR_ENTRIES; i++) {
4237                     if (!env->lbr_records[i].from) {
4238                         continue;
4239                     }
4240                     kvm_msr_entry_add(cpu, MSR_ARCH_LBR_FROM_0 + i,
4241                                       env->lbr_records[i].from);
4242                     kvm_msr_entry_add(cpu, MSR_ARCH_LBR_TO_0 + i,
4243                                       env->lbr_records[i].to);
4244                     kvm_msr_entry_add(cpu, MSR_ARCH_LBR_INFO_0 + i,
4245                                       env->lbr_records[i].info);
4246                 }
4247             }
4248         }
4249 
4250         /* Note: MSR_IA32_FEATURE_CONTROL is written separately, see
4251          *       kvm_put_msr_feature_control. */
4252     }
4253 
4254     if (env->mcg_cap) {
4255         kvm_msr_entry_add(cpu, MSR_MCG_STATUS, env->mcg_status);
4256         kvm_msr_entry_add(cpu, MSR_MCG_CTL, env->mcg_ctl);
4257         if (has_msr_mcg_ext_ctl) {
4258             kvm_msr_entry_add(cpu, MSR_MCG_EXT_CTL, env->mcg_ext_ctl);
4259         }
4260         for (i = 0; i < (env->mcg_cap & 0xff) * 4; i++) {
4261             kvm_msr_entry_add(cpu, MSR_MC0_CTL + i, env->mce_banks[i]);
4262         }
4263     }
4264 
4265     return kvm_buf_set_msrs(cpu);
4266 }
4267 
4268 
kvm_get_xsave(X86CPU * cpu)4269 static int kvm_get_xsave(X86CPU *cpu)
4270 {
4271     CPUX86State *env = &cpu->env;
4272     void *xsave = env->xsave_buf;
4273     unsigned long type;
4274     int ret;
4275 
4276     type = has_xsave2 ? KVM_GET_XSAVE2 : KVM_GET_XSAVE;
4277     ret = kvm_vcpu_ioctl(CPU(cpu), type, xsave);
4278     if (ret < 0) {
4279         return ret;
4280     }
4281     x86_cpu_xrstor_all_areas(cpu, xsave, env->xsave_buf_len);
4282 
4283     return 0;
4284 }
4285 
kvm_get_xcrs(X86CPU * cpu)4286 static int kvm_get_xcrs(X86CPU *cpu)
4287 {
4288     CPUX86State *env = &cpu->env;
4289     int i, ret;
4290     struct kvm_xcrs xcrs;
4291 
4292     if (!has_xcrs) {
4293         return 0;
4294     }
4295 
4296     ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_XCRS, &xcrs);
4297     if (ret < 0) {
4298         return ret;
4299     }
4300 
4301     for (i = 0; i < xcrs.nr_xcrs; i++) {
4302         /* Only support xcr0 now */
4303         if (xcrs.xcrs[i].xcr == 0) {
4304             env->xcr0 = xcrs.xcrs[i].value;
4305             break;
4306         }
4307     }
4308     return 0;
4309 }
4310 
kvm_get_sregs(X86CPU * cpu)4311 static int kvm_get_sregs(X86CPU *cpu)
4312 {
4313     CPUX86State *env = &cpu->env;
4314     struct kvm_sregs sregs;
4315     int ret;
4316 
4317     ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_SREGS, &sregs);
4318     if (ret < 0) {
4319         return ret;
4320     }
4321 
4322     /*
4323      * The interrupt_bitmap is ignored because KVM_GET_SREGS is
4324      * always preceded by KVM_GET_VCPU_EVENTS.
4325      */
4326 
4327     get_seg(&env->segs[R_CS], &sregs.cs);
4328     get_seg(&env->segs[R_DS], &sregs.ds);
4329     get_seg(&env->segs[R_ES], &sregs.es);
4330     get_seg(&env->segs[R_FS], &sregs.fs);
4331     get_seg(&env->segs[R_GS], &sregs.gs);
4332     get_seg(&env->segs[R_SS], &sregs.ss);
4333 
4334     get_seg(&env->tr, &sregs.tr);
4335     get_seg(&env->ldt, &sregs.ldt);
4336 
4337     env->idt.limit = sregs.idt.limit;
4338     env->idt.base = sregs.idt.base;
4339     env->gdt.limit = sregs.gdt.limit;
4340     env->gdt.base = sregs.gdt.base;
4341 
4342     env->cr[0] = sregs.cr0;
4343     env->cr[2] = sregs.cr2;
4344     env->cr[3] = sregs.cr3;
4345     env->cr[4] = sregs.cr4;
4346 
4347     env->efer = sregs.efer;
4348     if (sev_es_enabled() && env->efer & MSR_EFER_LME &&
4349         env->cr[0] & CR0_PG_MASK) {
4350         env->efer |= MSR_EFER_LMA;
4351     }
4352 
4353     /* changes to apic base and cr8/tpr are read back via kvm_arch_post_run */
4354     x86_update_hflags(env);
4355 
4356     return 0;
4357 }
4358 
kvm_get_sregs2(X86CPU * cpu)4359 static int kvm_get_sregs2(X86CPU *cpu)
4360 {
4361     CPUX86State *env = &cpu->env;
4362     struct kvm_sregs2 sregs;
4363     int i, ret;
4364 
4365     ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_SREGS2, &sregs);
4366     if (ret < 0) {
4367         return ret;
4368     }
4369 
4370     get_seg(&env->segs[R_CS], &sregs.cs);
4371     get_seg(&env->segs[R_DS], &sregs.ds);
4372     get_seg(&env->segs[R_ES], &sregs.es);
4373     get_seg(&env->segs[R_FS], &sregs.fs);
4374     get_seg(&env->segs[R_GS], &sregs.gs);
4375     get_seg(&env->segs[R_SS], &sregs.ss);
4376 
4377     get_seg(&env->tr, &sregs.tr);
4378     get_seg(&env->ldt, &sregs.ldt);
4379 
4380     env->idt.limit = sregs.idt.limit;
4381     env->idt.base = sregs.idt.base;
4382     env->gdt.limit = sregs.gdt.limit;
4383     env->gdt.base = sregs.gdt.base;
4384 
4385     env->cr[0] = sregs.cr0;
4386     env->cr[2] = sregs.cr2;
4387     env->cr[3] = sregs.cr3;
4388     env->cr[4] = sregs.cr4;
4389 
4390     env->efer = sregs.efer;
4391     if (sev_es_enabled() && env->efer & MSR_EFER_LME &&
4392         env->cr[0] & CR0_PG_MASK) {
4393         env->efer |= MSR_EFER_LMA;
4394     }
4395 
4396     env->pdptrs_valid = sregs.flags & KVM_SREGS2_FLAGS_PDPTRS_VALID;
4397 
4398     if (env->pdptrs_valid) {
4399         for (i = 0; i < 4; i++) {
4400             env->pdptrs[i] = sregs.pdptrs[i];
4401         }
4402     }
4403 
4404     /* changes to apic base and cr8/tpr are read back via kvm_arch_post_run */
4405     x86_update_hflags(env);
4406 
4407     return 0;
4408 }
4409 
kvm_get_msrs(X86CPU * cpu)4410 static int kvm_get_msrs(X86CPU *cpu)
4411 {
4412     CPUX86State *env = &cpu->env;
4413     struct kvm_msr_entry *msrs = cpu->kvm_msr_buf->entries;
4414     int ret, i;
4415     uint64_t mtrr_top_bits;
4416 
4417     kvm_msr_buf_reset(cpu);
4418 
4419     kvm_msr_entry_add(cpu, MSR_IA32_SYSENTER_CS, 0);
4420     kvm_msr_entry_add(cpu, MSR_IA32_SYSENTER_ESP, 0);
4421     kvm_msr_entry_add(cpu, MSR_IA32_SYSENTER_EIP, 0);
4422     kvm_msr_entry_add(cpu, MSR_PAT, 0);
4423     if (has_msr_star) {
4424         kvm_msr_entry_add(cpu, MSR_STAR, 0);
4425     }
4426     if (has_msr_hsave_pa) {
4427         kvm_msr_entry_add(cpu, MSR_VM_HSAVE_PA, 0);
4428     }
4429     if (has_msr_tsc_aux) {
4430         kvm_msr_entry_add(cpu, MSR_TSC_AUX, 0);
4431     }
4432     if (has_msr_tsc_adjust) {
4433         kvm_msr_entry_add(cpu, MSR_TSC_ADJUST, 0);
4434     }
4435     if (has_msr_tsc_deadline) {
4436         kvm_msr_entry_add(cpu, MSR_IA32_TSCDEADLINE, 0);
4437     }
4438     if (has_msr_misc_enable) {
4439         kvm_msr_entry_add(cpu, MSR_IA32_MISC_ENABLE, 0);
4440     }
4441     if (has_msr_smbase) {
4442         kvm_msr_entry_add(cpu, MSR_IA32_SMBASE, 0);
4443     }
4444     if (has_msr_smi_count) {
4445         kvm_msr_entry_add(cpu, MSR_SMI_COUNT, 0);
4446     }
4447     if (has_msr_feature_control) {
4448         kvm_msr_entry_add(cpu, MSR_IA32_FEATURE_CONTROL, 0);
4449     }
4450     if (has_msr_pkrs) {
4451         kvm_msr_entry_add(cpu, MSR_IA32_PKRS, 0);
4452     }
4453     if (has_msr_bndcfgs) {
4454         kvm_msr_entry_add(cpu, MSR_IA32_BNDCFGS, 0);
4455     }
4456     if (has_msr_xss) {
4457         kvm_msr_entry_add(cpu, MSR_IA32_XSS, 0);
4458     }
4459     if (has_msr_umwait) {
4460         kvm_msr_entry_add(cpu, MSR_IA32_UMWAIT_CONTROL, 0);
4461     }
4462     if (has_msr_spec_ctrl) {
4463         kvm_msr_entry_add(cpu, MSR_IA32_SPEC_CTRL, 0);
4464     }
4465     if (has_tsc_scale_msr) {
4466         kvm_msr_entry_add(cpu, MSR_AMD64_TSC_RATIO, 0);
4467     }
4468 
4469     if (has_msr_tsx_ctrl) {
4470         kvm_msr_entry_add(cpu, MSR_IA32_TSX_CTRL, 0);
4471     }
4472     if (has_msr_virt_ssbd) {
4473         kvm_msr_entry_add(cpu, MSR_VIRT_SSBD, 0);
4474     }
4475     if (!env->tsc_valid) {
4476         kvm_msr_entry_add(cpu, MSR_IA32_TSC, 0);
4477         env->tsc_valid = !runstate_is_running();
4478     }
4479     if (has_msr_hwcr) {
4480         kvm_msr_entry_add(cpu, MSR_K7_HWCR, 0);
4481     }
4482 
4483 #ifdef TARGET_X86_64
4484     if (lm_capable_kernel) {
4485         kvm_msr_entry_add(cpu, MSR_CSTAR, 0);
4486         kvm_msr_entry_add(cpu, MSR_KERNELGSBASE, 0);
4487         kvm_msr_entry_add(cpu, MSR_FMASK, 0);
4488         kvm_msr_entry_add(cpu, MSR_LSTAR, 0);
4489         if (env->features[FEAT_7_1_EAX] & CPUID_7_1_EAX_FRED) {
4490             kvm_msr_entry_add(cpu, MSR_IA32_FRED_RSP0, 0);
4491             kvm_msr_entry_add(cpu, MSR_IA32_FRED_RSP1, 0);
4492             kvm_msr_entry_add(cpu, MSR_IA32_FRED_RSP2, 0);
4493             kvm_msr_entry_add(cpu, MSR_IA32_FRED_RSP3, 0);
4494             kvm_msr_entry_add(cpu, MSR_IA32_FRED_STKLVLS, 0);
4495             kvm_msr_entry_add(cpu, MSR_IA32_FRED_SSP1, 0);
4496             kvm_msr_entry_add(cpu, MSR_IA32_FRED_SSP2, 0);
4497             kvm_msr_entry_add(cpu, MSR_IA32_FRED_SSP3, 0);
4498             kvm_msr_entry_add(cpu, MSR_IA32_FRED_CONFIG, 0);
4499         }
4500     }
4501 #endif
4502     if (env->features[FEAT_KVM] & (CPUID_KVM_CLOCK | CPUID_KVM_CLOCK2)) {
4503         kvm_msr_entry_add(cpu, MSR_KVM_SYSTEM_TIME, 0);
4504         kvm_msr_entry_add(cpu, MSR_KVM_WALL_CLOCK, 0);
4505     }
4506     if (env->features[FEAT_KVM] & CPUID_KVM_ASYNCPF_INT) {
4507         kvm_msr_entry_add(cpu, MSR_KVM_ASYNC_PF_INT, 0);
4508     }
4509     if (env->features[FEAT_KVM] & CPUID_KVM_ASYNCPF) {
4510         kvm_msr_entry_add(cpu, MSR_KVM_ASYNC_PF_EN, 0);
4511     }
4512     if (env->features[FEAT_KVM] & CPUID_KVM_PV_EOI) {
4513         kvm_msr_entry_add(cpu, MSR_KVM_PV_EOI_EN, 0);
4514     }
4515     if (env->features[FEAT_KVM] & CPUID_KVM_STEAL_TIME) {
4516         kvm_msr_entry_add(cpu, MSR_KVM_STEAL_TIME, 0);
4517     }
4518     if (env->features[FEAT_KVM] & CPUID_KVM_POLL_CONTROL) {
4519         kvm_msr_entry_add(cpu, MSR_KVM_POLL_CONTROL, 1);
4520     }
4521     if (has_architectural_pmu_version > 0) {
4522         if (has_architectural_pmu_version > 1) {
4523             kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR_CTRL, 0);
4524             kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_CTRL, 0);
4525             kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_STATUS, 0);
4526             kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_OVF_CTRL, 0);
4527         }
4528         for (i = 0; i < num_architectural_pmu_fixed_counters; i++) {
4529             kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR0 + i, 0);
4530         }
4531         for (i = 0; i < num_architectural_pmu_gp_counters; i++) {
4532             kvm_msr_entry_add(cpu, MSR_P6_PERFCTR0 + i, 0);
4533             kvm_msr_entry_add(cpu, MSR_P6_EVNTSEL0 + i, 0);
4534         }
4535     }
4536 
4537     if (env->mcg_cap) {
4538         kvm_msr_entry_add(cpu, MSR_MCG_STATUS, 0);
4539         kvm_msr_entry_add(cpu, MSR_MCG_CTL, 0);
4540         if (has_msr_mcg_ext_ctl) {
4541             kvm_msr_entry_add(cpu, MSR_MCG_EXT_CTL, 0);
4542         }
4543         for (i = 0; i < (env->mcg_cap & 0xff) * 4; i++) {
4544             kvm_msr_entry_add(cpu, MSR_MC0_CTL + i, 0);
4545         }
4546     }
4547 
4548     if (has_msr_hv_hypercall) {
4549         kvm_msr_entry_add(cpu, HV_X64_MSR_HYPERCALL, 0);
4550         kvm_msr_entry_add(cpu, HV_X64_MSR_GUEST_OS_ID, 0);
4551     }
4552     if (hyperv_feat_enabled(cpu, HYPERV_FEAT_VAPIC)) {
4553         kvm_msr_entry_add(cpu, HV_X64_MSR_APIC_ASSIST_PAGE, 0);
4554     }
4555     if (hyperv_feat_enabled(cpu, HYPERV_FEAT_TIME)) {
4556         kvm_msr_entry_add(cpu, HV_X64_MSR_REFERENCE_TSC, 0);
4557     }
4558     if (hyperv_feat_enabled(cpu, HYPERV_FEAT_REENLIGHTENMENT)) {
4559         kvm_msr_entry_add(cpu, HV_X64_MSR_REENLIGHTENMENT_CONTROL, 0);
4560         kvm_msr_entry_add(cpu, HV_X64_MSR_TSC_EMULATION_CONTROL, 0);
4561         kvm_msr_entry_add(cpu, HV_X64_MSR_TSC_EMULATION_STATUS, 0);
4562     }
4563     if (has_msr_hv_syndbg_options) {
4564         kvm_msr_entry_add(cpu, HV_X64_MSR_SYNDBG_OPTIONS, 0);
4565     }
4566     if (has_msr_hv_crash) {
4567         int j;
4568 
4569         for (j = 0; j < HV_CRASH_PARAMS; j++) {
4570             kvm_msr_entry_add(cpu, HV_X64_MSR_CRASH_P0 + j, 0);
4571         }
4572     }
4573     if (has_msr_hv_runtime) {
4574         kvm_msr_entry_add(cpu, HV_X64_MSR_VP_RUNTIME, 0);
4575     }
4576     if (hyperv_feat_enabled(cpu, HYPERV_FEAT_SYNIC)) {
4577         uint32_t msr;
4578 
4579         kvm_msr_entry_add(cpu, HV_X64_MSR_SCONTROL, 0);
4580         kvm_msr_entry_add(cpu, HV_X64_MSR_SIEFP, 0);
4581         kvm_msr_entry_add(cpu, HV_X64_MSR_SIMP, 0);
4582         for (msr = HV_X64_MSR_SINT0; msr <= HV_X64_MSR_SINT15; msr++) {
4583             kvm_msr_entry_add(cpu, msr, 0);
4584         }
4585     }
4586     if (has_msr_hv_stimer) {
4587         uint32_t msr;
4588 
4589         for (msr = HV_X64_MSR_STIMER0_CONFIG; msr <= HV_X64_MSR_STIMER3_COUNT;
4590              msr++) {
4591             kvm_msr_entry_add(cpu, msr, 0);
4592         }
4593     }
4594     if (env->features[FEAT_1_EDX] & CPUID_MTRR) {
4595         kvm_msr_entry_add(cpu, MSR_MTRRdefType, 0);
4596         kvm_msr_entry_add(cpu, MSR_MTRRfix64K_00000, 0);
4597         kvm_msr_entry_add(cpu, MSR_MTRRfix16K_80000, 0);
4598         kvm_msr_entry_add(cpu, MSR_MTRRfix16K_A0000, 0);
4599         kvm_msr_entry_add(cpu, MSR_MTRRfix4K_C0000, 0);
4600         kvm_msr_entry_add(cpu, MSR_MTRRfix4K_C8000, 0);
4601         kvm_msr_entry_add(cpu, MSR_MTRRfix4K_D0000, 0);
4602         kvm_msr_entry_add(cpu, MSR_MTRRfix4K_D8000, 0);
4603         kvm_msr_entry_add(cpu, MSR_MTRRfix4K_E0000, 0);
4604         kvm_msr_entry_add(cpu, MSR_MTRRfix4K_E8000, 0);
4605         kvm_msr_entry_add(cpu, MSR_MTRRfix4K_F0000, 0);
4606         kvm_msr_entry_add(cpu, MSR_MTRRfix4K_F8000, 0);
4607         for (i = 0; i < MSR_MTRRcap_VCNT; i++) {
4608             kvm_msr_entry_add(cpu, MSR_MTRRphysBase(i), 0);
4609             kvm_msr_entry_add(cpu, MSR_MTRRphysMask(i), 0);
4610         }
4611     }
4612 
4613     if (env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) {
4614         int addr_num =
4615             kvm_arch_get_supported_cpuid(kvm_state, 0x14, 1, R_EAX) & 0x7;
4616 
4617         kvm_msr_entry_add(cpu, MSR_IA32_RTIT_CTL, 0);
4618         kvm_msr_entry_add(cpu, MSR_IA32_RTIT_STATUS, 0);
4619         kvm_msr_entry_add(cpu, MSR_IA32_RTIT_OUTPUT_BASE, 0);
4620         kvm_msr_entry_add(cpu, MSR_IA32_RTIT_OUTPUT_MASK, 0);
4621         kvm_msr_entry_add(cpu, MSR_IA32_RTIT_CR3_MATCH, 0);
4622         for (i = 0; i < addr_num; i++) {
4623             kvm_msr_entry_add(cpu, MSR_IA32_RTIT_ADDR0_A + i, 0);
4624         }
4625     }
4626 
4627     if (env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_SGX_LC) {
4628         kvm_msr_entry_add(cpu, MSR_IA32_SGXLEPUBKEYHASH0, 0);
4629         kvm_msr_entry_add(cpu, MSR_IA32_SGXLEPUBKEYHASH1, 0);
4630         kvm_msr_entry_add(cpu, MSR_IA32_SGXLEPUBKEYHASH2, 0);
4631         kvm_msr_entry_add(cpu, MSR_IA32_SGXLEPUBKEYHASH3, 0);
4632     }
4633 
4634     if (env->features[FEAT_XSAVE] & CPUID_D_1_EAX_XFD) {
4635         kvm_msr_entry_add(cpu, MSR_IA32_XFD, 0);
4636         kvm_msr_entry_add(cpu, MSR_IA32_XFD_ERR, 0);
4637     }
4638 
4639     if (kvm_enabled() && cpu->enable_pmu &&
4640         (env->features[FEAT_7_0_EDX] & CPUID_7_0_EDX_ARCH_LBR)) {
4641         uint64_t depth;
4642 
4643         ret = kvm_get_one_msr(cpu, MSR_ARCH_LBR_DEPTH, &depth);
4644         if (ret == 1 && depth == ARCH_LBR_NR_ENTRIES) {
4645             kvm_msr_entry_add(cpu, MSR_ARCH_LBR_CTL, 0);
4646             kvm_msr_entry_add(cpu, MSR_ARCH_LBR_DEPTH, 0);
4647 
4648             for (i = 0; i < ARCH_LBR_NR_ENTRIES; i++) {
4649                 kvm_msr_entry_add(cpu, MSR_ARCH_LBR_FROM_0 + i, 0);
4650                 kvm_msr_entry_add(cpu, MSR_ARCH_LBR_TO_0 + i, 0);
4651                 kvm_msr_entry_add(cpu, MSR_ARCH_LBR_INFO_0 + i, 0);
4652             }
4653         }
4654     }
4655 
4656     ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_MSRS, cpu->kvm_msr_buf);
4657     if (ret < 0) {
4658         return ret;
4659     }
4660 
4661     if (ret < cpu->kvm_msr_buf->nmsrs) {
4662         struct kvm_msr_entry *e = &cpu->kvm_msr_buf->entries[ret];
4663         error_report("error: failed to get MSR 0x%" PRIx32,
4664                      (uint32_t)e->index);
4665     }
4666 
4667     assert(ret == cpu->kvm_msr_buf->nmsrs);
4668     /*
4669      * MTRR masks: Each mask consists of 5 parts
4670      * a  10..0: must be zero
4671      * b  11   : valid bit
4672      * c n-1.12: actual mask bits
4673      * d  51..n: reserved must be zero
4674      * e  63.52: reserved must be zero
4675      *
4676      * 'n' is the number of physical bits supported by the CPU and is
4677      * apparently always <= 52.   We know our 'n' but don't know what
4678      * the destinations 'n' is; it might be smaller, in which case
4679      * it masks (c) on loading. It might be larger, in which case
4680      * we fill 'd' so that d..c is consistent irrespetive of the 'n'
4681      * we're migrating to.
4682      */
4683 
4684     if (cpu->fill_mtrr_mask) {
4685         QEMU_BUILD_BUG_ON(TARGET_PHYS_ADDR_SPACE_BITS > 52);
4686         assert(cpu->phys_bits <= TARGET_PHYS_ADDR_SPACE_BITS);
4687         mtrr_top_bits = MAKE_64BIT_MASK(cpu->phys_bits, 52 - cpu->phys_bits);
4688     } else {
4689         mtrr_top_bits = 0;
4690     }
4691 
4692     for (i = 0; i < ret; i++) {
4693         uint32_t index = msrs[i].index;
4694         switch (index) {
4695         case MSR_IA32_SYSENTER_CS:
4696             env->sysenter_cs = msrs[i].data;
4697             break;
4698         case MSR_IA32_SYSENTER_ESP:
4699             env->sysenter_esp = msrs[i].data;
4700             break;
4701         case MSR_IA32_SYSENTER_EIP:
4702             env->sysenter_eip = msrs[i].data;
4703             break;
4704         case MSR_PAT:
4705             env->pat = msrs[i].data;
4706             break;
4707         case MSR_STAR:
4708             env->star = msrs[i].data;
4709             break;
4710 #ifdef TARGET_X86_64
4711         case MSR_CSTAR:
4712             env->cstar = msrs[i].data;
4713             break;
4714         case MSR_KERNELGSBASE:
4715             env->kernelgsbase = msrs[i].data;
4716             break;
4717         case MSR_FMASK:
4718             env->fmask = msrs[i].data;
4719             break;
4720         case MSR_LSTAR:
4721             env->lstar = msrs[i].data;
4722             break;
4723         case MSR_IA32_FRED_RSP0:
4724             env->fred_rsp0 = msrs[i].data;
4725             break;
4726         case MSR_IA32_FRED_RSP1:
4727             env->fred_rsp1 = msrs[i].data;
4728             break;
4729         case MSR_IA32_FRED_RSP2:
4730             env->fred_rsp2 = msrs[i].data;
4731             break;
4732         case MSR_IA32_FRED_RSP3:
4733             env->fred_rsp3 = msrs[i].data;
4734             break;
4735         case MSR_IA32_FRED_STKLVLS:
4736             env->fred_stklvls = msrs[i].data;
4737             break;
4738         case MSR_IA32_FRED_SSP1:
4739             env->fred_ssp1 = msrs[i].data;
4740             break;
4741         case MSR_IA32_FRED_SSP2:
4742             env->fred_ssp2 = msrs[i].data;
4743             break;
4744         case MSR_IA32_FRED_SSP3:
4745             env->fred_ssp3 = msrs[i].data;
4746             break;
4747         case MSR_IA32_FRED_CONFIG:
4748             env->fred_config = msrs[i].data;
4749             break;
4750 #endif
4751         case MSR_IA32_TSC:
4752             env->tsc = msrs[i].data;
4753             break;
4754         case MSR_TSC_AUX:
4755             env->tsc_aux = msrs[i].data;
4756             break;
4757         case MSR_TSC_ADJUST:
4758             env->tsc_adjust = msrs[i].data;
4759             break;
4760         case MSR_IA32_TSCDEADLINE:
4761             env->tsc_deadline = msrs[i].data;
4762             break;
4763         case MSR_VM_HSAVE_PA:
4764             env->vm_hsave = msrs[i].data;
4765             break;
4766         case MSR_KVM_SYSTEM_TIME:
4767             env->system_time_msr = msrs[i].data;
4768             break;
4769         case MSR_KVM_WALL_CLOCK:
4770             env->wall_clock_msr = msrs[i].data;
4771             break;
4772         case MSR_MCG_STATUS:
4773             env->mcg_status = msrs[i].data;
4774             break;
4775         case MSR_MCG_CTL:
4776             env->mcg_ctl = msrs[i].data;
4777             break;
4778         case MSR_MCG_EXT_CTL:
4779             env->mcg_ext_ctl = msrs[i].data;
4780             break;
4781         case MSR_IA32_MISC_ENABLE:
4782             env->msr_ia32_misc_enable = msrs[i].data;
4783             break;
4784         case MSR_IA32_SMBASE:
4785             env->smbase = msrs[i].data;
4786             break;
4787         case MSR_SMI_COUNT:
4788             env->msr_smi_count = msrs[i].data;
4789             break;
4790         case MSR_IA32_FEATURE_CONTROL:
4791             env->msr_ia32_feature_control = msrs[i].data;
4792             break;
4793         case MSR_IA32_BNDCFGS:
4794             env->msr_bndcfgs = msrs[i].data;
4795             break;
4796         case MSR_IA32_XSS:
4797             env->xss = msrs[i].data;
4798             break;
4799         case MSR_IA32_UMWAIT_CONTROL:
4800             env->umwait = msrs[i].data;
4801             break;
4802         case MSR_IA32_PKRS:
4803             env->pkrs = msrs[i].data;
4804             break;
4805         default:
4806             if (msrs[i].index >= MSR_MC0_CTL &&
4807                 msrs[i].index < MSR_MC0_CTL + (env->mcg_cap & 0xff) * 4) {
4808                 env->mce_banks[msrs[i].index - MSR_MC0_CTL] = msrs[i].data;
4809             }
4810             break;
4811         case MSR_KVM_ASYNC_PF_EN:
4812             env->async_pf_en_msr = msrs[i].data;
4813             break;
4814         case MSR_KVM_ASYNC_PF_INT:
4815             env->async_pf_int_msr = msrs[i].data;
4816             break;
4817         case MSR_KVM_PV_EOI_EN:
4818             env->pv_eoi_en_msr = msrs[i].data;
4819             break;
4820         case MSR_KVM_STEAL_TIME:
4821             env->steal_time_msr = msrs[i].data;
4822             break;
4823         case MSR_KVM_POLL_CONTROL: {
4824             env->poll_control_msr = msrs[i].data;
4825             break;
4826         }
4827         case MSR_CORE_PERF_FIXED_CTR_CTRL:
4828             env->msr_fixed_ctr_ctrl = msrs[i].data;
4829             break;
4830         case MSR_CORE_PERF_GLOBAL_CTRL:
4831             env->msr_global_ctrl = msrs[i].data;
4832             break;
4833         case MSR_CORE_PERF_GLOBAL_STATUS:
4834             env->msr_global_status = msrs[i].data;
4835             break;
4836         case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
4837             env->msr_global_ovf_ctrl = msrs[i].data;
4838             break;
4839         case MSR_CORE_PERF_FIXED_CTR0 ... MSR_CORE_PERF_FIXED_CTR0 + MAX_FIXED_COUNTERS - 1:
4840             env->msr_fixed_counters[index - MSR_CORE_PERF_FIXED_CTR0] = msrs[i].data;
4841             break;
4842         case MSR_P6_PERFCTR0 ... MSR_P6_PERFCTR0 + MAX_GP_COUNTERS - 1:
4843             env->msr_gp_counters[index - MSR_P6_PERFCTR0] = msrs[i].data;
4844             break;
4845         case MSR_P6_EVNTSEL0 ... MSR_P6_EVNTSEL0 + MAX_GP_COUNTERS - 1:
4846             env->msr_gp_evtsel[index - MSR_P6_EVNTSEL0] = msrs[i].data;
4847             break;
4848         case HV_X64_MSR_HYPERCALL:
4849             env->msr_hv_hypercall = msrs[i].data;
4850             break;
4851         case HV_X64_MSR_GUEST_OS_ID:
4852             env->msr_hv_guest_os_id = msrs[i].data;
4853             break;
4854         case HV_X64_MSR_APIC_ASSIST_PAGE:
4855             env->msr_hv_vapic = msrs[i].data;
4856             break;
4857         case HV_X64_MSR_REFERENCE_TSC:
4858             env->msr_hv_tsc = msrs[i].data;
4859             break;
4860         case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
4861             env->msr_hv_crash_params[index - HV_X64_MSR_CRASH_P0] = msrs[i].data;
4862             break;
4863         case HV_X64_MSR_VP_RUNTIME:
4864             env->msr_hv_runtime = msrs[i].data;
4865             break;
4866         case HV_X64_MSR_SCONTROL:
4867             env->msr_hv_synic_control = msrs[i].data;
4868             break;
4869         case HV_X64_MSR_SIEFP:
4870             env->msr_hv_synic_evt_page = msrs[i].data;
4871             break;
4872         case HV_X64_MSR_SIMP:
4873             env->msr_hv_synic_msg_page = msrs[i].data;
4874             break;
4875         case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15:
4876             env->msr_hv_synic_sint[index - HV_X64_MSR_SINT0] = msrs[i].data;
4877             break;
4878         case HV_X64_MSR_STIMER0_CONFIG:
4879         case HV_X64_MSR_STIMER1_CONFIG:
4880         case HV_X64_MSR_STIMER2_CONFIG:
4881         case HV_X64_MSR_STIMER3_CONFIG:
4882             env->msr_hv_stimer_config[(index - HV_X64_MSR_STIMER0_CONFIG)/2] =
4883                                 msrs[i].data;
4884             break;
4885         case HV_X64_MSR_STIMER0_COUNT:
4886         case HV_X64_MSR_STIMER1_COUNT:
4887         case HV_X64_MSR_STIMER2_COUNT:
4888         case HV_X64_MSR_STIMER3_COUNT:
4889             env->msr_hv_stimer_count[(index - HV_X64_MSR_STIMER0_COUNT)/2] =
4890                                 msrs[i].data;
4891             break;
4892         case HV_X64_MSR_REENLIGHTENMENT_CONTROL:
4893             env->msr_hv_reenlightenment_control = msrs[i].data;
4894             break;
4895         case HV_X64_MSR_TSC_EMULATION_CONTROL:
4896             env->msr_hv_tsc_emulation_control = msrs[i].data;
4897             break;
4898         case HV_X64_MSR_TSC_EMULATION_STATUS:
4899             env->msr_hv_tsc_emulation_status = msrs[i].data;
4900             break;
4901         case HV_X64_MSR_SYNDBG_OPTIONS:
4902             env->msr_hv_syndbg_options = msrs[i].data;
4903             break;
4904         case MSR_MTRRdefType:
4905             env->mtrr_deftype = msrs[i].data;
4906             break;
4907         case MSR_MTRRfix64K_00000:
4908             env->mtrr_fixed[0] = msrs[i].data;
4909             break;
4910         case MSR_MTRRfix16K_80000:
4911             env->mtrr_fixed[1] = msrs[i].data;
4912             break;
4913         case MSR_MTRRfix16K_A0000:
4914             env->mtrr_fixed[2] = msrs[i].data;
4915             break;
4916         case MSR_MTRRfix4K_C0000:
4917             env->mtrr_fixed[3] = msrs[i].data;
4918             break;
4919         case MSR_MTRRfix4K_C8000:
4920             env->mtrr_fixed[4] = msrs[i].data;
4921             break;
4922         case MSR_MTRRfix4K_D0000:
4923             env->mtrr_fixed[5] = msrs[i].data;
4924             break;
4925         case MSR_MTRRfix4K_D8000:
4926             env->mtrr_fixed[6] = msrs[i].data;
4927             break;
4928         case MSR_MTRRfix4K_E0000:
4929             env->mtrr_fixed[7] = msrs[i].data;
4930             break;
4931         case MSR_MTRRfix4K_E8000:
4932             env->mtrr_fixed[8] = msrs[i].data;
4933             break;
4934         case MSR_MTRRfix4K_F0000:
4935             env->mtrr_fixed[9] = msrs[i].data;
4936             break;
4937         case MSR_MTRRfix4K_F8000:
4938             env->mtrr_fixed[10] = msrs[i].data;
4939             break;
4940         case MSR_MTRRphysBase(0) ... MSR_MTRRphysMask(MSR_MTRRcap_VCNT - 1):
4941             if (index & 1) {
4942                 env->mtrr_var[MSR_MTRRphysIndex(index)].mask = msrs[i].data |
4943                                                                mtrr_top_bits;
4944             } else {
4945                 env->mtrr_var[MSR_MTRRphysIndex(index)].base = msrs[i].data;
4946             }
4947             break;
4948         case MSR_IA32_SPEC_CTRL:
4949             env->spec_ctrl = msrs[i].data;
4950             break;
4951         case MSR_AMD64_TSC_RATIO:
4952             env->amd_tsc_scale_msr = msrs[i].data;
4953             break;
4954         case MSR_IA32_TSX_CTRL:
4955             env->tsx_ctrl = msrs[i].data;
4956             break;
4957         case MSR_VIRT_SSBD:
4958             env->virt_ssbd = msrs[i].data;
4959             break;
4960         case MSR_IA32_RTIT_CTL:
4961             env->msr_rtit_ctrl = msrs[i].data;
4962             break;
4963         case MSR_IA32_RTIT_STATUS:
4964             env->msr_rtit_status = msrs[i].data;
4965             break;
4966         case MSR_IA32_RTIT_OUTPUT_BASE:
4967             env->msr_rtit_output_base = msrs[i].data;
4968             break;
4969         case MSR_IA32_RTIT_OUTPUT_MASK:
4970             env->msr_rtit_output_mask = msrs[i].data;
4971             break;
4972         case MSR_IA32_RTIT_CR3_MATCH:
4973             env->msr_rtit_cr3_match = msrs[i].data;
4974             break;
4975         case MSR_IA32_RTIT_ADDR0_A ... MSR_IA32_RTIT_ADDR3_B:
4976             env->msr_rtit_addrs[index - MSR_IA32_RTIT_ADDR0_A] = msrs[i].data;
4977             break;
4978         case MSR_IA32_SGXLEPUBKEYHASH0 ... MSR_IA32_SGXLEPUBKEYHASH3:
4979             env->msr_ia32_sgxlepubkeyhash[index - MSR_IA32_SGXLEPUBKEYHASH0] =
4980                            msrs[i].data;
4981             break;
4982         case MSR_IA32_XFD:
4983             env->msr_xfd = msrs[i].data;
4984             break;
4985         case MSR_IA32_XFD_ERR:
4986             env->msr_xfd_err = msrs[i].data;
4987             break;
4988         case MSR_ARCH_LBR_CTL:
4989             env->msr_lbr_ctl = msrs[i].data;
4990             break;
4991         case MSR_ARCH_LBR_DEPTH:
4992             env->msr_lbr_depth = msrs[i].data;
4993             break;
4994         case MSR_ARCH_LBR_FROM_0 ... MSR_ARCH_LBR_FROM_0 + 31:
4995             env->lbr_records[index - MSR_ARCH_LBR_FROM_0].from = msrs[i].data;
4996             break;
4997         case MSR_ARCH_LBR_TO_0 ... MSR_ARCH_LBR_TO_0 + 31:
4998             env->lbr_records[index - MSR_ARCH_LBR_TO_0].to = msrs[i].data;
4999             break;
5000         case MSR_ARCH_LBR_INFO_0 ... MSR_ARCH_LBR_INFO_0 + 31:
5001             env->lbr_records[index - MSR_ARCH_LBR_INFO_0].info = msrs[i].data;
5002             break;
5003         case MSR_K7_HWCR:
5004             env->msr_hwcr = msrs[i].data;
5005             break;
5006         }
5007     }
5008 
5009     return 0;
5010 }
5011 
kvm_put_mp_state(X86CPU * cpu)5012 static int kvm_put_mp_state(X86CPU *cpu)
5013 {
5014     struct kvm_mp_state mp_state = { .mp_state = cpu->env.mp_state };
5015 
5016     return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MP_STATE, &mp_state);
5017 }
5018 
kvm_get_mp_state(X86CPU * cpu)5019 static int kvm_get_mp_state(X86CPU *cpu)
5020 {
5021     CPUState *cs = CPU(cpu);
5022     CPUX86State *env = &cpu->env;
5023     struct kvm_mp_state mp_state;
5024     int ret;
5025 
5026     ret = kvm_vcpu_ioctl(cs, KVM_GET_MP_STATE, &mp_state);
5027     if (ret < 0) {
5028         return ret;
5029     }
5030     env->mp_state = mp_state.mp_state;
5031     if (kvm_irqchip_in_kernel()) {
5032         cs->halted = (mp_state.mp_state == KVM_MP_STATE_HALTED);
5033     }
5034     return 0;
5035 }
5036 
kvm_get_apic(X86CPU * cpu)5037 static int kvm_get_apic(X86CPU *cpu)
5038 {
5039     DeviceState *apic = cpu->apic_state;
5040     struct kvm_lapic_state kapic;
5041     int ret;
5042 
5043     if (apic && kvm_irqchip_in_kernel()) {
5044         ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_LAPIC, &kapic);
5045         if (ret < 0) {
5046             return ret;
5047         }
5048 
5049         kvm_get_apic_state(apic, &kapic);
5050     }
5051     return 0;
5052 }
5053 
kvm_put_vcpu_events(X86CPU * cpu,int level)5054 static int kvm_put_vcpu_events(X86CPU *cpu, int level)
5055 {
5056     CPUState *cs = CPU(cpu);
5057     CPUX86State *env = &cpu->env;
5058     struct kvm_vcpu_events events = {};
5059 
5060     events.flags = 0;
5061 
5062     if (has_exception_payload) {
5063         events.flags |= KVM_VCPUEVENT_VALID_PAYLOAD;
5064         events.exception.pending = env->exception_pending;
5065         events.exception_has_payload = env->exception_has_payload;
5066         events.exception_payload = env->exception_payload;
5067     }
5068     events.exception.nr = env->exception_nr;
5069     events.exception.injected = env->exception_injected;
5070     events.exception.has_error_code = env->has_error_code;
5071     events.exception.error_code = env->error_code;
5072 
5073     events.interrupt.injected = (env->interrupt_injected >= 0);
5074     events.interrupt.nr = env->interrupt_injected;
5075     events.interrupt.soft = env->soft_interrupt;
5076 
5077     events.nmi.injected = env->nmi_injected;
5078     events.nmi.pending = env->nmi_pending;
5079     events.nmi.masked = !!(env->hflags2 & HF2_NMI_MASK);
5080 
5081     events.sipi_vector = env->sipi_vector;
5082 
5083     if (has_msr_smbase) {
5084         events.flags |= KVM_VCPUEVENT_VALID_SMM;
5085         events.smi.smm = !!(env->hflags & HF_SMM_MASK);
5086         events.smi.smm_inside_nmi = !!(env->hflags2 & HF2_SMM_INSIDE_NMI_MASK);
5087         if (kvm_irqchip_in_kernel()) {
5088             /* As soon as these are moved to the kernel, remove them
5089              * from cs->interrupt_request.
5090              */
5091             events.smi.pending = cs->interrupt_request & CPU_INTERRUPT_SMI;
5092             events.smi.latched_init = cs->interrupt_request & CPU_INTERRUPT_INIT;
5093             cs->interrupt_request &= ~(CPU_INTERRUPT_INIT | CPU_INTERRUPT_SMI);
5094         } else {
5095             /* Keep these in cs->interrupt_request.  */
5096             events.smi.pending = 0;
5097             events.smi.latched_init = 0;
5098         }
5099     }
5100 
5101     if (level >= KVM_PUT_RESET_STATE) {
5102         events.flags |= KVM_VCPUEVENT_VALID_NMI_PENDING;
5103         if (env->mp_state == KVM_MP_STATE_SIPI_RECEIVED) {
5104             events.flags |= KVM_VCPUEVENT_VALID_SIPI_VECTOR;
5105         }
5106     }
5107 
5108     if (has_triple_fault_event) {
5109         events.flags |= KVM_VCPUEVENT_VALID_TRIPLE_FAULT;
5110         events.triple_fault.pending = env->triple_fault_pending;
5111     }
5112 
5113     return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_VCPU_EVENTS, &events);
5114 }
5115 
kvm_get_vcpu_events(X86CPU * cpu)5116 static int kvm_get_vcpu_events(X86CPU *cpu)
5117 {
5118     CPUX86State *env = &cpu->env;
5119     struct kvm_vcpu_events events;
5120     int ret;
5121 
5122     memset(&events, 0, sizeof(events));
5123     ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_VCPU_EVENTS, &events);
5124     if (ret < 0) {
5125        return ret;
5126     }
5127 
5128     if (events.flags & KVM_VCPUEVENT_VALID_PAYLOAD) {
5129         env->exception_pending = events.exception.pending;
5130         env->exception_has_payload = events.exception_has_payload;
5131         env->exception_payload = events.exception_payload;
5132     } else {
5133         env->exception_pending = 0;
5134         env->exception_has_payload = false;
5135     }
5136     env->exception_injected = events.exception.injected;
5137     env->exception_nr =
5138         (env->exception_pending || env->exception_injected) ?
5139         events.exception.nr : -1;
5140     env->has_error_code = events.exception.has_error_code;
5141     env->error_code = events.exception.error_code;
5142 
5143     env->interrupt_injected =
5144         events.interrupt.injected ? events.interrupt.nr : -1;
5145     env->soft_interrupt = events.interrupt.soft;
5146 
5147     env->nmi_injected = events.nmi.injected;
5148     env->nmi_pending = events.nmi.pending;
5149     if (events.nmi.masked) {
5150         env->hflags2 |= HF2_NMI_MASK;
5151     } else {
5152         env->hflags2 &= ~HF2_NMI_MASK;
5153     }
5154 
5155     if (events.flags & KVM_VCPUEVENT_VALID_SMM) {
5156         if (events.smi.smm) {
5157             env->hflags |= HF_SMM_MASK;
5158         } else {
5159             env->hflags &= ~HF_SMM_MASK;
5160         }
5161         if (events.smi.pending) {
5162             cpu_interrupt(CPU(cpu), CPU_INTERRUPT_SMI);
5163         } else {
5164             cpu_reset_interrupt(CPU(cpu), CPU_INTERRUPT_SMI);
5165         }
5166         if (events.smi.smm_inside_nmi) {
5167             env->hflags2 |= HF2_SMM_INSIDE_NMI_MASK;
5168         } else {
5169             env->hflags2 &= ~HF2_SMM_INSIDE_NMI_MASK;
5170         }
5171         if (events.smi.latched_init) {
5172             cpu_interrupt(CPU(cpu), CPU_INTERRUPT_INIT);
5173         } else {
5174             cpu_reset_interrupt(CPU(cpu), CPU_INTERRUPT_INIT);
5175         }
5176     }
5177 
5178     if (events.flags & KVM_VCPUEVENT_VALID_TRIPLE_FAULT) {
5179         env->triple_fault_pending = events.triple_fault.pending;
5180     }
5181 
5182     env->sipi_vector = events.sipi_vector;
5183 
5184     return 0;
5185 }
5186 
kvm_put_debugregs(X86CPU * cpu)5187 static int kvm_put_debugregs(X86CPU *cpu)
5188 {
5189     CPUX86State *env = &cpu->env;
5190     struct kvm_debugregs dbgregs;
5191     int i;
5192 
5193     memset(&dbgregs, 0, sizeof(dbgregs));
5194     for (i = 0; i < 4; i++) {
5195         dbgregs.db[i] = env->dr[i];
5196     }
5197     dbgregs.dr6 = env->dr[6];
5198     dbgregs.dr7 = env->dr[7];
5199     dbgregs.flags = 0;
5200 
5201     return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_DEBUGREGS, &dbgregs);
5202 }
5203 
kvm_get_debugregs(X86CPU * cpu)5204 static int kvm_get_debugregs(X86CPU *cpu)
5205 {
5206     CPUX86State *env = &cpu->env;
5207     struct kvm_debugregs dbgregs;
5208     int i, ret;
5209 
5210     ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_DEBUGREGS, &dbgregs);
5211     if (ret < 0) {
5212         return ret;
5213     }
5214     for (i = 0; i < 4; i++) {
5215         env->dr[i] = dbgregs.db[i];
5216     }
5217     env->dr[4] = env->dr[6] = dbgregs.dr6;
5218     env->dr[5] = env->dr[7] = dbgregs.dr7;
5219 
5220     return 0;
5221 }
5222 
kvm_put_nested_state(X86CPU * cpu)5223 static int kvm_put_nested_state(X86CPU *cpu)
5224 {
5225     CPUX86State *env = &cpu->env;
5226     int max_nested_state_len = kvm_max_nested_state_length();
5227 
5228     if (!env->nested_state) {
5229         return 0;
5230     }
5231 
5232     /*
5233      * Copy flags that are affected by reset from env->hflags and env->hflags2.
5234      */
5235     if (env->hflags & HF_GUEST_MASK) {
5236         env->nested_state->flags |= KVM_STATE_NESTED_GUEST_MODE;
5237     } else {
5238         env->nested_state->flags &= ~KVM_STATE_NESTED_GUEST_MODE;
5239     }
5240 
5241     /* Don't set KVM_STATE_NESTED_GIF_SET on VMX as it is illegal */
5242     if (cpu_has_svm(env) && (env->hflags2 & HF2_GIF_MASK)) {
5243         env->nested_state->flags |= KVM_STATE_NESTED_GIF_SET;
5244     } else {
5245         env->nested_state->flags &= ~KVM_STATE_NESTED_GIF_SET;
5246     }
5247 
5248     assert(env->nested_state->size <= max_nested_state_len);
5249     return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_NESTED_STATE, env->nested_state);
5250 }
5251 
kvm_get_nested_state(X86CPU * cpu)5252 static int kvm_get_nested_state(X86CPU *cpu)
5253 {
5254     CPUX86State *env = &cpu->env;
5255     int max_nested_state_len = kvm_max_nested_state_length();
5256     int ret;
5257 
5258     if (!env->nested_state) {
5259         return 0;
5260     }
5261 
5262     /*
5263      * It is possible that migration restored a smaller size into
5264      * nested_state->hdr.size than what our kernel support.
5265      * We preserve migration origin nested_state->hdr.size for
5266      * call to KVM_SET_NESTED_STATE but wish that our next call
5267      * to KVM_GET_NESTED_STATE will use max size our kernel support.
5268      */
5269     env->nested_state->size = max_nested_state_len;
5270 
5271     ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_NESTED_STATE, env->nested_state);
5272     if (ret < 0) {
5273         return ret;
5274     }
5275 
5276     /*
5277      * Copy flags that are affected by reset to env->hflags and env->hflags2.
5278      */
5279     if (env->nested_state->flags & KVM_STATE_NESTED_GUEST_MODE) {
5280         env->hflags |= HF_GUEST_MASK;
5281     } else {
5282         env->hflags &= ~HF_GUEST_MASK;
5283     }
5284 
5285     /* Keep HF2_GIF_MASK set on !SVM as x86_cpu_pending_interrupt() needs it */
5286     if (cpu_has_svm(env)) {
5287         if (env->nested_state->flags & KVM_STATE_NESTED_GIF_SET) {
5288             env->hflags2 |= HF2_GIF_MASK;
5289         } else {
5290             env->hflags2 &= ~HF2_GIF_MASK;
5291         }
5292     }
5293 
5294     return ret;
5295 }
5296 
kvm_arch_put_registers(CPUState * cpu,int level,Error ** errp)5297 int kvm_arch_put_registers(CPUState *cpu, int level, Error **errp)
5298 {
5299     X86CPU *x86_cpu = X86_CPU(cpu);
5300     int ret;
5301 
5302     assert(cpu_is_stopped(cpu) || qemu_cpu_is_self(cpu));
5303 
5304     /*
5305      * Put MSR_IA32_FEATURE_CONTROL first, this ensures the VM gets out of VMX
5306      * root operation upon vCPU reset. kvm_put_msr_feature_control() should also
5307      * precede kvm_put_nested_state() when 'real' nested state is set.
5308      */
5309     if (level >= KVM_PUT_RESET_STATE) {
5310         ret = kvm_put_msr_feature_control(x86_cpu);
5311         if (ret < 0) {
5312             error_setg_errno(errp, -ret, "Failed to set feature control MSR");
5313             return ret;
5314         }
5315     }
5316 
5317     /* must be before kvm_put_nested_state so that EFER.SVME is set */
5318     ret = has_sregs2 ? kvm_put_sregs2(x86_cpu) : kvm_put_sregs(x86_cpu);
5319     if (ret < 0) {
5320         error_setg_errno(errp, -ret, "Failed to set special registers");
5321         return ret;
5322     }
5323 
5324     if (level >= KVM_PUT_RESET_STATE) {
5325         ret = kvm_put_nested_state(x86_cpu);
5326         if (ret < 0) {
5327             error_setg_errno(errp, -ret, "Failed to set nested state");
5328             return ret;
5329         }
5330     }
5331 
5332     if (level == KVM_PUT_FULL_STATE) {
5333         /* We don't check for kvm_arch_set_tsc_khz() errors here,
5334          * because TSC frequency mismatch shouldn't abort migration,
5335          * unless the user explicitly asked for a more strict TSC
5336          * setting (e.g. using an explicit "tsc-freq" option).
5337          */
5338         kvm_arch_set_tsc_khz(cpu);
5339     }
5340 
5341 #ifdef CONFIG_XEN_EMU
5342     if (xen_mode == XEN_EMULATE && level == KVM_PUT_FULL_STATE) {
5343         ret = kvm_put_xen_state(cpu);
5344         if (ret < 0) {
5345             error_setg_errno(errp, -ret, "Failed to set Xen state");
5346             return ret;
5347         }
5348     }
5349 #endif
5350 
5351     ret = kvm_getput_regs(x86_cpu, 1);
5352     if (ret < 0) {
5353         error_setg_errno(errp, -ret, "Failed to set general purpose registers");
5354         return ret;
5355     }
5356     ret = kvm_put_xsave(x86_cpu);
5357     if (ret < 0) {
5358         error_setg_errno(errp, -ret, "Failed to set XSAVE");
5359         return ret;
5360     }
5361     ret = kvm_put_xcrs(x86_cpu);
5362     if (ret < 0) {
5363         error_setg_errno(errp, -ret, "Failed to set XCRs");
5364         return ret;
5365     }
5366     ret = kvm_put_msrs(x86_cpu, level);
5367     if (ret < 0) {
5368         error_setg_errno(errp, -ret, "Failed to set MSRs");
5369         return ret;
5370     }
5371     ret = kvm_put_vcpu_events(x86_cpu, level);
5372     if (ret < 0) {
5373         error_setg_errno(errp, -ret, "Failed to set vCPU events");
5374         return ret;
5375     }
5376     if (level >= KVM_PUT_RESET_STATE) {
5377         ret = kvm_put_mp_state(x86_cpu);
5378         if (ret < 0) {
5379             error_setg_errno(errp, -ret, "Failed to set MP state");
5380             return ret;
5381         }
5382     }
5383 
5384     ret = kvm_put_tscdeadline_msr(x86_cpu);
5385     if (ret < 0) {
5386         error_setg_errno(errp, -ret, "Failed to set TSC deadline MSR");
5387         return ret;
5388     }
5389     ret = kvm_put_debugregs(x86_cpu);
5390     if (ret < 0) {
5391         error_setg_errno(errp, -ret, "Failed to set debug registers");
5392         return ret;
5393     }
5394     return 0;
5395 }
5396 
kvm_arch_get_registers(CPUState * cs,Error ** errp)5397 int kvm_arch_get_registers(CPUState *cs, Error **errp)
5398 {
5399     X86CPU *cpu = X86_CPU(cs);
5400     int ret;
5401 
5402     assert(cpu_is_stopped(cs) || qemu_cpu_is_self(cs));
5403 
5404     ret = kvm_get_vcpu_events(cpu);
5405     if (ret < 0) {
5406         error_setg_errno(errp, -ret, "Failed to get vCPU events");
5407         goto out;
5408     }
5409     /*
5410      * KVM_GET_MPSTATE can modify CS and RIP, call it before
5411      * KVM_GET_REGS and KVM_GET_SREGS.
5412      */
5413     ret = kvm_get_mp_state(cpu);
5414     if (ret < 0) {
5415         error_setg_errno(errp, -ret, "Failed to get MP state");
5416         goto out;
5417     }
5418     ret = kvm_getput_regs(cpu, 0);
5419     if (ret < 0) {
5420         error_setg_errno(errp, -ret, "Failed to get general purpose registers");
5421         goto out;
5422     }
5423     ret = kvm_get_xsave(cpu);
5424     if (ret < 0) {
5425         error_setg_errno(errp, -ret, "Failed to get XSAVE");
5426         goto out;
5427     }
5428     ret = kvm_get_xcrs(cpu);
5429     if (ret < 0) {
5430         error_setg_errno(errp, -ret, "Failed to get XCRs");
5431         goto out;
5432     }
5433     ret = has_sregs2 ? kvm_get_sregs2(cpu) : kvm_get_sregs(cpu);
5434     if (ret < 0) {
5435         error_setg_errno(errp, -ret, "Failed to get special registers");
5436         goto out;
5437     }
5438     ret = kvm_get_msrs(cpu);
5439     if (ret < 0) {
5440         error_setg_errno(errp, -ret, "Failed to get MSRs");
5441         goto out;
5442     }
5443     ret = kvm_get_apic(cpu);
5444     if (ret < 0) {
5445         error_setg_errno(errp, -ret, "Failed to get APIC");
5446         goto out;
5447     }
5448     ret = kvm_get_debugregs(cpu);
5449     if (ret < 0) {
5450         error_setg_errno(errp, -ret, "Failed to get debug registers");
5451         goto out;
5452     }
5453     ret = kvm_get_nested_state(cpu);
5454     if (ret < 0) {
5455         error_setg_errno(errp, -ret, "Failed to get nested state");
5456         goto out;
5457     }
5458 #ifdef CONFIG_XEN_EMU
5459     if (xen_mode == XEN_EMULATE) {
5460         ret = kvm_get_xen_state(cs);
5461         if (ret < 0) {
5462             error_setg_errno(errp, -ret, "Failed to get Xen state");
5463             goto out;
5464         }
5465     }
5466 #endif
5467     ret = 0;
5468  out:
5469     cpu_sync_bndcs_hflags(&cpu->env);
5470     return ret;
5471 }
5472 
kvm_arch_pre_run(CPUState * cpu,struct kvm_run * run)5473 void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run)
5474 {
5475     X86CPU *x86_cpu = X86_CPU(cpu);
5476     CPUX86State *env = &x86_cpu->env;
5477     int ret;
5478 
5479     /* Inject NMI */
5480     if (cpu->interrupt_request & (CPU_INTERRUPT_NMI | CPU_INTERRUPT_SMI)) {
5481         if (cpu->interrupt_request & CPU_INTERRUPT_NMI) {
5482             bql_lock();
5483             cpu->interrupt_request &= ~CPU_INTERRUPT_NMI;
5484             bql_unlock();
5485             DPRINTF("injected NMI\n");
5486             ret = kvm_vcpu_ioctl(cpu, KVM_NMI);
5487             if (ret < 0) {
5488                 fprintf(stderr, "KVM: injection failed, NMI lost (%s)\n",
5489                         strerror(-ret));
5490             }
5491         }
5492         if (cpu->interrupt_request & CPU_INTERRUPT_SMI) {
5493             bql_lock();
5494             cpu->interrupt_request &= ~CPU_INTERRUPT_SMI;
5495             bql_unlock();
5496             DPRINTF("injected SMI\n");
5497             ret = kvm_vcpu_ioctl(cpu, KVM_SMI);
5498             if (ret < 0) {
5499                 fprintf(stderr, "KVM: injection failed, SMI lost (%s)\n",
5500                         strerror(-ret));
5501             }
5502         }
5503     }
5504 
5505     if (!kvm_pic_in_kernel()) {
5506         bql_lock();
5507     }
5508 
5509     /* Force the VCPU out of its inner loop to process any INIT requests
5510      * or (for userspace APIC, but it is cheap to combine the checks here)
5511      * pending TPR access reports.
5512      */
5513     if (cpu->interrupt_request & (CPU_INTERRUPT_INIT | CPU_INTERRUPT_TPR)) {
5514         if ((cpu->interrupt_request & CPU_INTERRUPT_INIT) &&
5515             !(env->hflags & HF_SMM_MASK)) {
5516             cpu->exit_request = 1;
5517         }
5518         if (cpu->interrupt_request & CPU_INTERRUPT_TPR) {
5519             cpu->exit_request = 1;
5520         }
5521     }
5522 
5523     if (!kvm_pic_in_kernel()) {
5524         /* Try to inject an interrupt if the guest can accept it */
5525         if (run->ready_for_interrupt_injection &&
5526             (cpu->interrupt_request & CPU_INTERRUPT_HARD) &&
5527             (env->eflags & IF_MASK)) {
5528             int irq;
5529 
5530             cpu->interrupt_request &= ~CPU_INTERRUPT_HARD;
5531             irq = cpu_get_pic_interrupt(env);
5532             if (irq >= 0) {
5533                 struct kvm_interrupt intr;
5534 
5535                 intr.irq = irq;
5536                 DPRINTF("injected interrupt %d\n", irq);
5537                 ret = kvm_vcpu_ioctl(cpu, KVM_INTERRUPT, &intr);
5538                 if (ret < 0) {
5539                     fprintf(stderr,
5540                             "KVM: injection failed, interrupt lost (%s)\n",
5541                             strerror(-ret));
5542                 }
5543             }
5544         }
5545 
5546         /* If we have an interrupt but the guest is not ready to receive an
5547          * interrupt, request an interrupt window exit.  This will
5548          * cause a return to userspace as soon as the guest is ready to
5549          * receive interrupts. */
5550         if ((cpu->interrupt_request & CPU_INTERRUPT_HARD)) {
5551             run->request_interrupt_window = 1;
5552         } else {
5553             run->request_interrupt_window = 0;
5554         }
5555 
5556         DPRINTF("setting tpr\n");
5557         run->cr8 = cpu_get_apic_tpr(x86_cpu->apic_state);
5558 
5559         bql_unlock();
5560     }
5561 }
5562 
kvm_rate_limit_on_bus_lock(void)5563 static void kvm_rate_limit_on_bus_lock(void)
5564 {
5565     uint64_t delay_ns = ratelimit_calculate_delay(&bus_lock_ratelimit_ctrl, 1);
5566 
5567     if (delay_ns) {
5568         g_usleep(delay_ns / SCALE_US);
5569     }
5570 }
5571 
kvm_arch_post_run(CPUState * cpu,struct kvm_run * run)5572 MemTxAttrs kvm_arch_post_run(CPUState *cpu, struct kvm_run *run)
5573 {
5574     X86CPU *x86_cpu = X86_CPU(cpu);
5575     CPUX86State *env = &x86_cpu->env;
5576 
5577     if (run->flags & KVM_RUN_X86_SMM) {
5578         env->hflags |= HF_SMM_MASK;
5579     } else {
5580         env->hflags &= ~HF_SMM_MASK;
5581     }
5582     if (run->if_flag) {
5583         env->eflags |= IF_MASK;
5584     } else {
5585         env->eflags &= ~IF_MASK;
5586     }
5587     if (run->flags & KVM_RUN_X86_BUS_LOCK) {
5588         kvm_rate_limit_on_bus_lock();
5589     }
5590 
5591 #ifdef CONFIG_XEN_EMU
5592     /*
5593      * If the callback is asserted as a GSI (or PCI INTx) then check if
5594      * vcpu_info->evtchn_upcall_pending has been cleared, and deassert
5595      * the callback IRQ if so. Ideally we could hook into the PIC/IOAPIC
5596      * EOI and only resample then, exactly how the VFIO eventfd pairs
5597      * are designed to work for level triggered interrupts.
5598      */
5599     if (x86_cpu->env.xen_callback_asserted) {
5600         kvm_xen_maybe_deassert_callback(cpu);
5601     }
5602 #endif
5603 
5604     /* We need to protect the apic state against concurrent accesses from
5605      * different threads in case the userspace irqchip is used. */
5606     if (!kvm_irqchip_in_kernel()) {
5607         bql_lock();
5608     }
5609     cpu_set_apic_tpr(x86_cpu->apic_state, run->cr8);
5610     cpu_set_apic_base(x86_cpu->apic_state, run->apic_base);
5611     if (!kvm_irqchip_in_kernel()) {
5612         bql_unlock();
5613     }
5614     return cpu_get_mem_attrs(env);
5615 }
5616 
kvm_arch_process_async_events(CPUState * cs)5617 int kvm_arch_process_async_events(CPUState *cs)
5618 {
5619     X86CPU *cpu = X86_CPU(cs);
5620     CPUX86State *env = &cpu->env;
5621 
5622     if (cs->interrupt_request & CPU_INTERRUPT_MCE) {
5623         /* We must not raise CPU_INTERRUPT_MCE if it's not supported. */
5624         assert(env->mcg_cap);
5625 
5626         cs->interrupt_request &= ~CPU_INTERRUPT_MCE;
5627 
5628         kvm_cpu_synchronize_state(cs);
5629 
5630         if (env->exception_nr == EXCP08_DBLE) {
5631             /* this means triple fault */
5632             qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
5633             cs->exit_request = 1;
5634             return 0;
5635         }
5636         kvm_queue_exception(env, EXCP12_MCHK, 0, 0);
5637         env->has_error_code = 0;
5638 
5639         cs->halted = 0;
5640         if (kvm_irqchip_in_kernel() && env->mp_state == KVM_MP_STATE_HALTED) {
5641             env->mp_state = KVM_MP_STATE_RUNNABLE;
5642         }
5643     }
5644 
5645     if ((cs->interrupt_request & CPU_INTERRUPT_INIT) &&
5646         !(env->hflags & HF_SMM_MASK)) {
5647         kvm_cpu_synchronize_state(cs);
5648         do_cpu_init(cpu);
5649     }
5650 
5651     if (kvm_irqchip_in_kernel()) {
5652         return 0;
5653     }
5654 
5655     if (cs->interrupt_request & CPU_INTERRUPT_POLL) {
5656         cs->interrupt_request &= ~CPU_INTERRUPT_POLL;
5657         apic_poll_irq(cpu->apic_state);
5658     }
5659     if (((cs->interrupt_request & CPU_INTERRUPT_HARD) &&
5660          (env->eflags & IF_MASK)) ||
5661         (cs->interrupt_request & CPU_INTERRUPT_NMI)) {
5662         cs->halted = 0;
5663     }
5664     if (cs->interrupt_request & CPU_INTERRUPT_SIPI) {
5665         cpu_reset_interrupt(cs, CPU_INTERRUPT_SIPI);
5666         kvm_cpu_synchronize_state(cs);
5667         do_cpu_sipi(cpu);
5668     }
5669     if (cs->interrupt_request & CPU_INTERRUPT_TPR) {
5670         cs->interrupt_request &= ~CPU_INTERRUPT_TPR;
5671         kvm_cpu_synchronize_state(cs);
5672         apic_handle_tpr_access_report(cpu->apic_state, env->eip,
5673                                       env->tpr_access_type);
5674     }
5675 
5676     return cs->halted;
5677 }
5678 
kvm_handle_halt(X86CPU * cpu)5679 static int kvm_handle_halt(X86CPU *cpu)
5680 {
5681     CPUState *cs = CPU(cpu);
5682     CPUX86State *env = &cpu->env;
5683 
5684     if (!((cs->interrupt_request & CPU_INTERRUPT_HARD) &&
5685           (env->eflags & IF_MASK)) &&
5686         !(cs->interrupt_request & CPU_INTERRUPT_NMI)) {
5687         cs->halted = 1;
5688         return EXCP_HLT;
5689     }
5690 
5691     return 0;
5692 }
5693 
kvm_handle_tpr_access(X86CPU * cpu)5694 static int kvm_handle_tpr_access(X86CPU *cpu)
5695 {
5696     CPUState *cs = CPU(cpu);
5697     struct kvm_run *run = cs->kvm_run;
5698 
5699     apic_handle_tpr_access_report(cpu->apic_state, run->tpr_access.rip,
5700                                   run->tpr_access.is_write ? TPR_ACCESS_WRITE
5701                                                            : TPR_ACCESS_READ);
5702     return 1;
5703 }
5704 
kvm_arch_insert_sw_breakpoint(CPUState * cs,struct kvm_sw_breakpoint * bp)5705 int kvm_arch_insert_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
5706 {
5707     static const uint8_t int3 = 0xcc;
5708 
5709     if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 1, 0) ||
5710         cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&int3, 1, 1)) {
5711         return -EINVAL;
5712     }
5713     return 0;
5714 }
5715 
kvm_arch_remove_sw_breakpoint(CPUState * cs,struct kvm_sw_breakpoint * bp)5716 int kvm_arch_remove_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
5717 {
5718     uint8_t int3;
5719 
5720     if (cpu_memory_rw_debug(cs, bp->pc, &int3, 1, 0)) {
5721         return -EINVAL;
5722     }
5723     if (int3 != 0xcc) {
5724         return 0;
5725     }
5726     if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 1, 1)) {
5727         return -EINVAL;
5728     }
5729     return 0;
5730 }
5731 
5732 static struct {
5733     target_ulong addr;
5734     int len;
5735     int type;
5736 } hw_breakpoint[4];
5737 
5738 static int nb_hw_breakpoint;
5739 
find_hw_breakpoint(target_ulong addr,int len,int type)5740 static int find_hw_breakpoint(target_ulong addr, int len, int type)
5741 {
5742     int n;
5743 
5744     for (n = 0; n < nb_hw_breakpoint; n++) {
5745         if (hw_breakpoint[n].addr == addr && hw_breakpoint[n].type == type &&
5746             (hw_breakpoint[n].len == len || len == -1)) {
5747             return n;
5748         }
5749     }
5750     return -1;
5751 }
5752 
kvm_arch_insert_hw_breakpoint(vaddr addr,vaddr len,int type)5753 int kvm_arch_insert_hw_breakpoint(vaddr addr, vaddr len, int type)
5754 {
5755     switch (type) {
5756     case GDB_BREAKPOINT_HW:
5757         len = 1;
5758         break;
5759     case GDB_WATCHPOINT_WRITE:
5760     case GDB_WATCHPOINT_ACCESS:
5761         switch (len) {
5762         case 1:
5763             break;
5764         case 2:
5765         case 4:
5766         case 8:
5767             if (addr & (len - 1)) {
5768                 return -EINVAL;
5769             }
5770             break;
5771         default:
5772             return -EINVAL;
5773         }
5774         break;
5775     default:
5776         return -ENOSYS;
5777     }
5778 
5779     if (nb_hw_breakpoint == 4) {
5780         return -ENOBUFS;
5781     }
5782     if (find_hw_breakpoint(addr, len, type) >= 0) {
5783         return -EEXIST;
5784     }
5785     hw_breakpoint[nb_hw_breakpoint].addr = addr;
5786     hw_breakpoint[nb_hw_breakpoint].len = len;
5787     hw_breakpoint[nb_hw_breakpoint].type = type;
5788     nb_hw_breakpoint++;
5789 
5790     return 0;
5791 }
5792 
kvm_arch_remove_hw_breakpoint(vaddr addr,vaddr len,int type)5793 int kvm_arch_remove_hw_breakpoint(vaddr addr, vaddr len, int type)
5794 {
5795     int n;
5796 
5797     n = find_hw_breakpoint(addr, (type == GDB_BREAKPOINT_HW) ? 1 : len, type);
5798     if (n < 0) {
5799         return -ENOENT;
5800     }
5801     nb_hw_breakpoint--;
5802     hw_breakpoint[n] = hw_breakpoint[nb_hw_breakpoint];
5803 
5804     return 0;
5805 }
5806 
kvm_arch_remove_all_hw_breakpoints(void)5807 void kvm_arch_remove_all_hw_breakpoints(void)
5808 {
5809     nb_hw_breakpoint = 0;
5810 }
5811 
5812 static CPUWatchpoint hw_watchpoint;
5813 
kvm_handle_debug(X86CPU * cpu,struct kvm_debug_exit_arch * arch_info)5814 static int kvm_handle_debug(X86CPU *cpu,
5815                             struct kvm_debug_exit_arch *arch_info)
5816 {
5817     CPUState *cs = CPU(cpu);
5818     CPUX86State *env = &cpu->env;
5819     int ret = 0;
5820     int n;
5821 
5822     if (arch_info->exception == EXCP01_DB) {
5823         if (arch_info->dr6 & DR6_BS) {
5824             if (cs->singlestep_enabled) {
5825                 ret = EXCP_DEBUG;
5826             }
5827         } else {
5828             for (n = 0; n < 4; n++) {
5829                 if (arch_info->dr6 & (1 << n)) {
5830                     switch ((arch_info->dr7 >> (16 + n*4)) & 0x3) {
5831                     case 0x0:
5832                         ret = EXCP_DEBUG;
5833                         break;
5834                     case 0x1:
5835                         ret = EXCP_DEBUG;
5836                         cs->watchpoint_hit = &hw_watchpoint;
5837                         hw_watchpoint.vaddr = hw_breakpoint[n].addr;
5838                         hw_watchpoint.flags = BP_MEM_WRITE;
5839                         break;
5840                     case 0x3:
5841                         ret = EXCP_DEBUG;
5842                         cs->watchpoint_hit = &hw_watchpoint;
5843                         hw_watchpoint.vaddr = hw_breakpoint[n].addr;
5844                         hw_watchpoint.flags = BP_MEM_ACCESS;
5845                         break;
5846                     }
5847                 }
5848             }
5849         }
5850     } else if (kvm_find_sw_breakpoint(cs, arch_info->pc)) {
5851         ret = EXCP_DEBUG;
5852     }
5853     if (ret == 0) {
5854         cpu_synchronize_state(cs);
5855         assert(env->exception_nr == -1);
5856 
5857         /* pass to guest */
5858         kvm_queue_exception(env, arch_info->exception,
5859                             arch_info->exception == EXCP01_DB,
5860                             arch_info->dr6);
5861         env->has_error_code = 0;
5862     }
5863 
5864     return ret;
5865 }
5866 
kvm_arch_update_guest_debug(CPUState * cpu,struct kvm_guest_debug * dbg)5867 void kvm_arch_update_guest_debug(CPUState *cpu, struct kvm_guest_debug *dbg)
5868 {
5869     const uint8_t type_code[] = {
5870         [GDB_BREAKPOINT_HW] = 0x0,
5871         [GDB_WATCHPOINT_WRITE] = 0x1,
5872         [GDB_WATCHPOINT_ACCESS] = 0x3
5873     };
5874     const uint8_t len_code[] = {
5875         [1] = 0x0, [2] = 0x1, [4] = 0x3, [8] = 0x2
5876     };
5877     int n;
5878 
5879     if (kvm_sw_breakpoints_active(cpu)) {
5880         dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP;
5881     }
5882     if (nb_hw_breakpoint > 0) {
5883         dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW_BP;
5884         dbg->arch.debugreg[7] = 0x0600;
5885         for (n = 0; n < nb_hw_breakpoint; n++) {
5886             dbg->arch.debugreg[n] = hw_breakpoint[n].addr;
5887             dbg->arch.debugreg[7] |= (2 << (n * 2)) |
5888                 (type_code[hw_breakpoint[n].type] << (16 + n*4)) |
5889                 ((uint32_t)len_code[hw_breakpoint[n].len] << (18 + n*4));
5890         }
5891     }
5892 }
5893 
kvm_install_msr_filters(KVMState * s)5894 static int kvm_install_msr_filters(KVMState *s)
5895 {
5896     uint64_t zero = 0;
5897     struct kvm_msr_filter filter = {
5898         .flags = KVM_MSR_FILTER_DEFAULT_ALLOW,
5899     };
5900     int i, j = 0;
5901 
5902     QEMU_BUILD_BUG_ON(ARRAY_SIZE(msr_handlers) != ARRAY_SIZE(filter.ranges));
5903     for (i = 0; i < ARRAY_SIZE(msr_handlers); i++) {
5904         KVMMSRHandlers *handler = &msr_handlers[i];
5905         if (handler->msr) {
5906             struct kvm_msr_filter_range *range = &filter.ranges[j++];
5907 
5908             *range = (struct kvm_msr_filter_range) {
5909                 .flags = 0,
5910                 .nmsrs = 1,
5911                 .base = handler->msr,
5912                 .bitmap = (__u8 *)&zero,
5913             };
5914 
5915             if (handler->rdmsr) {
5916                 range->flags |= KVM_MSR_FILTER_READ;
5917             }
5918 
5919             if (handler->wrmsr) {
5920                 range->flags |= KVM_MSR_FILTER_WRITE;
5921             }
5922         }
5923     }
5924 
5925     return kvm_vm_ioctl(s, KVM_X86_SET_MSR_FILTER, &filter);
5926 }
5927 
kvm_filter_msr(KVMState * s,uint32_t msr,QEMURDMSRHandler * rdmsr,QEMUWRMSRHandler * wrmsr)5928 static int kvm_filter_msr(KVMState *s, uint32_t msr, QEMURDMSRHandler *rdmsr,
5929                           QEMUWRMSRHandler *wrmsr)
5930 {
5931     int i, ret;
5932 
5933     for (i = 0; i < ARRAY_SIZE(msr_handlers); i++) {
5934         if (!msr_handlers[i].msr) {
5935             msr_handlers[i] = (KVMMSRHandlers) {
5936                 .msr = msr,
5937                 .rdmsr = rdmsr,
5938                 .wrmsr = wrmsr,
5939             };
5940 
5941             ret = kvm_install_msr_filters(s);
5942             if (ret) {
5943                 msr_handlers[i] = (KVMMSRHandlers) { };
5944                 return ret;
5945             }
5946 
5947             return 0;
5948         }
5949     }
5950 
5951     return -EINVAL;
5952 }
5953 
kvm_handle_rdmsr(X86CPU * cpu,struct kvm_run * run)5954 static int kvm_handle_rdmsr(X86CPU *cpu, struct kvm_run *run)
5955 {
5956     int i;
5957     bool r;
5958 
5959     for (i = 0; i < ARRAY_SIZE(msr_handlers); i++) {
5960         KVMMSRHandlers *handler = &msr_handlers[i];
5961         if (run->msr.index == handler->msr) {
5962             if (handler->rdmsr) {
5963                 r = handler->rdmsr(cpu, handler->msr,
5964                                    (uint64_t *)&run->msr.data);
5965                 run->msr.error = r ? 0 : 1;
5966                 return 0;
5967             }
5968         }
5969     }
5970 
5971     g_assert_not_reached();
5972 }
5973 
kvm_handle_wrmsr(X86CPU * cpu,struct kvm_run * run)5974 static int kvm_handle_wrmsr(X86CPU *cpu, struct kvm_run *run)
5975 {
5976     int i;
5977     bool r;
5978 
5979     for (i = 0; i < ARRAY_SIZE(msr_handlers); i++) {
5980         KVMMSRHandlers *handler = &msr_handlers[i];
5981         if (run->msr.index == handler->msr) {
5982             if (handler->wrmsr) {
5983                 r = handler->wrmsr(cpu, handler->msr, run->msr.data);
5984                 run->msr.error = r ? 0 : 1;
5985                 return 0;
5986             }
5987         }
5988     }
5989 
5990     g_assert_not_reached();
5991 }
5992 
5993 static bool has_sgx_provisioning;
5994 
__kvm_enable_sgx_provisioning(KVMState * s)5995 static bool __kvm_enable_sgx_provisioning(KVMState *s)
5996 {
5997     int fd, ret;
5998 
5999     if (!kvm_vm_check_extension(s, KVM_CAP_SGX_ATTRIBUTE)) {
6000         return false;
6001     }
6002 
6003     fd = qemu_open_old("/dev/sgx_provision", O_RDONLY);
6004     if (fd < 0) {
6005         return false;
6006     }
6007 
6008     ret = kvm_vm_enable_cap(s, KVM_CAP_SGX_ATTRIBUTE, 0, fd);
6009     if (ret) {
6010         error_report("Could not enable SGX PROVISIONKEY: %s", strerror(-ret));
6011         exit(1);
6012     }
6013     close(fd);
6014     return true;
6015 }
6016 
kvm_enable_sgx_provisioning(KVMState * s)6017 bool kvm_enable_sgx_provisioning(KVMState *s)
6018 {
6019     return MEMORIZE(__kvm_enable_sgx_provisioning(s), has_sgx_provisioning);
6020 }
6021 
host_supports_vmx(void)6022 static bool host_supports_vmx(void)
6023 {
6024     uint32_t ecx, unused;
6025 
6026     host_cpuid(1, 0, &unused, &unused, &ecx, &unused);
6027     return ecx & CPUID_EXT_VMX;
6028 }
6029 
6030 /*
6031  * Currently the handling here only supports use of KVM_HC_MAP_GPA_RANGE
6032  * to service guest-initiated memory attribute update requests so that
6033  * KVM_SET_MEMORY_ATTRIBUTES can update whether or not a page should be
6034  * backed by the private memory pool provided by guest_memfd, and as such
6035  * is only applicable to guest_memfd-backed guests (e.g. SNP/TDX).
6036  *
6037  * Other other use-cases for KVM_HC_MAP_GPA_RANGE, such as for SEV live
6038  * migration, are not implemented here currently.
6039  *
6040  * For the guest_memfd use-case, these exits will generally be synthesized
6041  * by KVM based on platform-specific hypercalls, like GHCB requests in the
6042  * case of SEV-SNP, and not issued directly within the guest though the
6043  * KVM_HC_MAP_GPA_RANGE hypercall. So in this case, KVM_HC_MAP_GPA_RANGE is
6044  * not actually advertised to guests via the KVM CPUID feature bit, as
6045  * opposed to SEV live migration where it would be. Since it is unlikely the
6046  * SEV live migration use-case would be useful for guest-memfd backed guests,
6047  * because private/shared page tracking is already provided through other
6048  * means, these 2 use-cases should be treated as being mutually-exclusive.
6049  */
kvm_handle_hc_map_gpa_range(X86CPU * cpu,struct kvm_run * run)6050 static int kvm_handle_hc_map_gpa_range(X86CPU *cpu, struct kvm_run *run)
6051 {
6052     struct kvm_pre_fault_memory mem;
6053     uint64_t gpa, size, attributes;
6054     int ret;
6055 
6056     if (!machine_require_guest_memfd(current_machine))
6057         return -EINVAL;
6058 
6059     gpa = run->hypercall.args[0];
6060     size = run->hypercall.args[1] * TARGET_PAGE_SIZE;
6061     attributes = run->hypercall.args[2];
6062 
6063     trace_kvm_hc_map_gpa_range(gpa, size, attributes, run->hypercall.flags);
6064 
6065     ret = kvm_convert_memory(gpa, size, attributes & KVM_MAP_GPA_RANGE_ENCRYPTED);
6066     if (ret || !kvm_pre_fault_memory_supported) {
6067         return ret;
6068     }
6069 
6070     /*
6071      * Opportunistically pre-fault memory in. Failures are ignored so that any
6072      * errors in faulting in the memory will get captured in KVM page fault
6073      * path when the guest first accesses the page.
6074      */
6075     memset(&mem, 0, sizeof(mem));
6076     mem.gpa = gpa;
6077     mem.size = size;
6078     while (mem.size) {
6079         if (kvm_vcpu_ioctl(CPU(cpu), KVM_PRE_FAULT_MEMORY, &mem)) {
6080             break;
6081         }
6082     }
6083 
6084     return 0;
6085 }
6086 
kvm_handle_hypercall(X86CPU * cpu,struct kvm_run * run)6087 static int kvm_handle_hypercall(X86CPU *cpu, struct kvm_run *run)
6088 {
6089     if (run->hypercall.nr == KVM_HC_MAP_GPA_RANGE)
6090         return kvm_handle_hc_map_gpa_range(cpu, run);
6091 
6092     return -EINVAL;
6093 }
6094 
6095 #define VMX_INVALID_GUEST_STATE 0x80000021
6096 
kvm_arch_handle_exit(CPUState * cs,struct kvm_run * run)6097 int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run)
6098 {
6099     X86CPU *cpu = X86_CPU(cs);
6100     uint64_t code;
6101     int ret;
6102     bool ctx_invalid;
6103     KVMState *state;
6104 
6105     switch (run->exit_reason) {
6106     case KVM_EXIT_HLT:
6107         DPRINTF("handle_hlt\n");
6108         bql_lock();
6109         ret = kvm_handle_halt(cpu);
6110         bql_unlock();
6111         break;
6112     case KVM_EXIT_SET_TPR:
6113         ret = 0;
6114         break;
6115     case KVM_EXIT_TPR_ACCESS:
6116         bql_lock();
6117         ret = kvm_handle_tpr_access(cpu);
6118         bql_unlock();
6119         break;
6120     case KVM_EXIT_FAIL_ENTRY:
6121         code = run->fail_entry.hardware_entry_failure_reason;
6122         fprintf(stderr, "KVM: entry failed, hardware error 0x%" PRIx64 "\n",
6123                 code);
6124         if (host_supports_vmx() && code == VMX_INVALID_GUEST_STATE) {
6125             fprintf(stderr,
6126                     "\nIf you're running a guest on an Intel machine without "
6127                         "unrestricted mode\n"
6128                     "support, the failure can be most likely due to the guest "
6129                         "entering an invalid\n"
6130                     "state for Intel VT. For example, the guest maybe running "
6131                         "in big real mode\n"
6132                     "which is not supported on less recent Intel processors."
6133                         "\n\n");
6134         }
6135         ret = -1;
6136         break;
6137     case KVM_EXIT_EXCEPTION:
6138         fprintf(stderr, "KVM: exception %d exit (error code 0x%x)\n",
6139                 run->ex.exception, run->ex.error_code);
6140         ret = -1;
6141         break;
6142     case KVM_EXIT_DEBUG:
6143         DPRINTF("kvm_exit_debug\n");
6144         bql_lock();
6145         ret = kvm_handle_debug(cpu, &run->debug.arch);
6146         bql_unlock();
6147         break;
6148     case KVM_EXIT_HYPERV:
6149         ret = kvm_hv_handle_exit(cpu, &run->hyperv);
6150         break;
6151     case KVM_EXIT_IOAPIC_EOI:
6152         ioapic_eoi_broadcast(run->eoi.vector);
6153         ret = 0;
6154         break;
6155     case KVM_EXIT_X86_BUS_LOCK:
6156         /* already handled in kvm_arch_post_run */
6157         ret = 0;
6158         break;
6159     case KVM_EXIT_NOTIFY:
6160         ctx_invalid = !!(run->notify.flags & KVM_NOTIFY_CONTEXT_INVALID);
6161         state = KVM_STATE(current_accel());
6162         if (ctx_invalid ||
6163             state->notify_vmexit == NOTIFY_VMEXIT_OPTION_INTERNAL_ERROR) {
6164             warn_report("KVM internal error: Encountered a notify exit "
6165                         "with invalid context in guest.");
6166             ret = -1;
6167         } else {
6168             warn_report_once("KVM: Encountered a notify exit with valid "
6169                              "context in guest. "
6170                              "The guest could be misbehaving.");
6171             ret = 0;
6172         }
6173         break;
6174     case KVM_EXIT_X86_RDMSR:
6175         /* We only enable MSR filtering, any other exit is bogus */
6176         assert(run->msr.reason == KVM_MSR_EXIT_REASON_FILTER);
6177         ret = kvm_handle_rdmsr(cpu, run);
6178         break;
6179     case KVM_EXIT_X86_WRMSR:
6180         /* We only enable MSR filtering, any other exit is bogus */
6181         assert(run->msr.reason == KVM_MSR_EXIT_REASON_FILTER);
6182         ret = kvm_handle_wrmsr(cpu, run);
6183         break;
6184 #ifdef CONFIG_XEN_EMU
6185     case KVM_EXIT_XEN:
6186         ret = kvm_xen_handle_exit(cpu, &run->xen);
6187         break;
6188 #endif
6189     case KVM_EXIT_HYPERCALL:
6190         ret = kvm_handle_hypercall(cpu, run);
6191         break;
6192     case KVM_EXIT_SYSTEM_EVENT:
6193         switch (run->system_event.type) {
6194         case KVM_SYSTEM_EVENT_TDX_FATAL:
6195             ret = tdx_handle_report_fatal_error(cpu, run);
6196             break;
6197         default:
6198             ret = -1;
6199             break;
6200         }
6201         break;
6202     case KVM_EXIT_TDX:
6203         /*
6204          * run->tdx is already set up for the case where userspace
6205          * does not handle the TDVMCALL.
6206          */
6207         switch (run->tdx.nr) {
6208         case TDVMCALL_GET_QUOTE:
6209             tdx_handle_get_quote(cpu, run);
6210             break;
6211         case TDVMCALL_GET_TD_VM_CALL_INFO:
6212             tdx_handle_get_tdvmcall_info(cpu, run);
6213             break;
6214         case TDVMCALL_SETUP_EVENT_NOTIFY_INTERRUPT:
6215             tdx_handle_setup_event_notify_interrupt(cpu, run);
6216             break;
6217         }
6218         ret = 0;
6219         break;
6220     default:
6221         fprintf(stderr, "KVM: unknown exit reason %d\n", run->exit_reason);
6222         ret = -1;
6223         break;
6224     }
6225 
6226     return ret;
6227 }
6228 
kvm_arch_stop_on_emulation_error(CPUState * cs)6229 bool kvm_arch_stop_on_emulation_error(CPUState *cs)
6230 {
6231     X86CPU *cpu = X86_CPU(cs);
6232     CPUX86State *env = &cpu->env;
6233 
6234     kvm_cpu_synchronize_state(cs);
6235     return !(env->cr[0] & CR0_PE_MASK) ||
6236            ((env->segs[R_CS].selector  & 3) != 3);
6237 }
6238 
kvm_arch_init_irq_routing(KVMState * s)6239 void kvm_arch_init_irq_routing(KVMState *s)
6240 {
6241     /* We know at this point that we're using the in-kernel
6242      * irqchip, so we can use irqfds, and on x86 we know
6243      * we can use msi via irqfd and GSI routing.
6244      */
6245     kvm_msi_via_irqfd_allowed = true;
6246     kvm_gsi_routing_allowed = true;
6247 
6248     if (kvm_irqchip_is_split()) {
6249         KVMRouteChange c = kvm_irqchip_begin_route_changes(s);
6250         int i;
6251 
6252         /* If the ioapic is in QEMU and the lapics are in KVM, reserve
6253            MSI routes for signaling interrupts to the local apics. */
6254         for (i = 0; i < IOAPIC_NUM_PINS; i++) {
6255             if (kvm_irqchip_add_msi_route(&c, 0, NULL) < 0) {
6256                 error_report("Could not enable split IRQ mode.");
6257                 exit(1);
6258             }
6259         }
6260         kvm_irqchip_commit_route_changes(&c);
6261     }
6262 }
6263 
kvm_arch_irqchip_create(KVMState * s)6264 int kvm_arch_irqchip_create(KVMState *s)
6265 {
6266     int ret;
6267     if (kvm_kernel_irqchip_split()) {
6268         ret = kvm_vm_enable_cap(s, KVM_CAP_SPLIT_IRQCHIP, 0, 24);
6269         if (ret) {
6270             error_report("Could not enable split irqchip mode: %s",
6271                          strerror(-ret));
6272             exit(1);
6273         } else {
6274             DPRINTF("Enabled KVM_CAP_SPLIT_IRQCHIP\n");
6275             kvm_split_irqchip = true;
6276             return 1;
6277         }
6278     } else {
6279         return 0;
6280     }
6281 }
6282 
kvm_swizzle_msi_ext_dest_id(uint64_t address)6283 uint64_t kvm_swizzle_msi_ext_dest_id(uint64_t address)
6284 {
6285     CPUX86State *env;
6286     uint64_t ext_id;
6287 
6288     if (!first_cpu) {
6289         return address;
6290     }
6291     env = &X86_CPU(first_cpu)->env;
6292     if (!(env->features[FEAT_KVM] & CPUID_KVM_MSI_EXT_DEST_ID)) {
6293         return address;
6294     }
6295 
6296     /*
6297      * If the remappable format bit is set, or the upper bits are
6298      * already set in address_hi, or the low extended bits aren't
6299      * there anyway, do nothing.
6300      */
6301     ext_id = address & (0xff << MSI_ADDR_DEST_IDX_SHIFT);
6302     if (!ext_id || (ext_id & (1 << MSI_ADDR_DEST_IDX_SHIFT)) || (address >> 32)) {
6303         return address;
6304     }
6305 
6306     address &= ~ext_id;
6307     address |= ext_id << 35;
6308     return address;
6309 }
6310 
kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry * route,uint64_t address,uint32_t data,PCIDevice * dev)6311 int kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry *route,
6312                              uint64_t address, uint32_t data, PCIDevice *dev)
6313 {
6314     X86IOMMUState *iommu = x86_iommu_get_default();
6315 
6316     if (iommu) {
6317         X86IOMMUClass *class = X86_IOMMU_DEVICE_GET_CLASS(iommu);
6318 
6319         if (class->int_remap) {
6320             int ret;
6321             MSIMessage src, dst;
6322 
6323             src.address = route->u.msi.address_hi;
6324             src.address <<= VTD_MSI_ADDR_HI_SHIFT;
6325             src.address |= route->u.msi.address_lo;
6326             src.data = route->u.msi.data;
6327 
6328             ret = class->int_remap(iommu, &src, &dst, dev ?     \
6329                                    pci_requester_id(dev) :      \
6330                                    X86_IOMMU_SID_INVALID);
6331             if (ret) {
6332                 trace_kvm_x86_fixup_msi_error(route->gsi);
6333                 return 1;
6334             }
6335 
6336             /*
6337              * Handled untranslated compatibility format interrupt with
6338              * extended destination ID in the low bits 11-5. */
6339             dst.address = kvm_swizzle_msi_ext_dest_id(dst.address);
6340 
6341             route->u.msi.address_hi = dst.address >> VTD_MSI_ADDR_HI_SHIFT;
6342             route->u.msi.address_lo = dst.address & VTD_MSI_ADDR_LO_MASK;
6343             route->u.msi.data = dst.data;
6344             return 0;
6345         }
6346     }
6347 
6348 #ifdef CONFIG_XEN_EMU
6349     if (xen_mode == XEN_EMULATE) {
6350         int handled = xen_evtchn_translate_pirq_msi(route, address, data);
6351 
6352         /*
6353          * If it was a PIRQ and successfully routed (handled == 0) or it was
6354          * an error (handled < 0), return. If it wasn't a PIRQ, keep going.
6355          */
6356         if (handled <= 0) {
6357             return handled;
6358         }
6359     }
6360 #endif
6361 
6362     address = kvm_swizzle_msi_ext_dest_id(address);
6363     route->u.msi.address_hi = address >> VTD_MSI_ADDR_HI_SHIFT;
6364     route->u.msi.address_lo = address & VTD_MSI_ADDR_LO_MASK;
6365     return 0;
6366 }
6367 
6368 typedef struct MSIRouteEntry MSIRouteEntry;
6369 
6370 struct MSIRouteEntry {
6371     PCIDevice *dev;             /* Device pointer */
6372     int vector;                 /* MSI/MSIX vector index */
6373     int virq;                   /* Virtual IRQ index */
6374     QLIST_ENTRY(MSIRouteEntry) list;
6375 };
6376 
6377 /* List of used GSI routes */
6378 static QLIST_HEAD(, MSIRouteEntry) msi_route_list = \
6379     QLIST_HEAD_INITIALIZER(msi_route_list);
6380 
kvm_update_msi_routes_all(void * private,bool global,uint32_t index,uint32_t mask)6381 void kvm_update_msi_routes_all(void *private, bool global,
6382                                uint32_t index, uint32_t mask)
6383 {
6384     int cnt = 0, vector;
6385     MSIRouteEntry *entry;
6386     MSIMessage msg;
6387     PCIDevice *dev;
6388 
6389     /* TODO: explicit route update */
6390     QLIST_FOREACH(entry, &msi_route_list, list) {
6391         cnt++;
6392         vector = entry->vector;
6393         dev = entry->dev;
6394         if (msix_enabled(dev) && !msix_is_masked(dev, vector)) {
6395             msg = msix_get_message(dev, vector);
6396         } else if (msi_enabled(dev) && !msi_is_masked(dev, vector)) {
6397             msg = msi_get_message(dev, vector);
6398         } else {
6399             /*
6400              * Either MSI/MSIX is disabled for the device, or the
6401              * specific message was masked out.  Skip this one.
6402              */
6403             continue;
6404         }
6405         kvm_irqchip_update_msi_route(kvm_state, entry->virq, msg, dev);
6406     }
6407     kvm_irqchip_commit_routes(kvm_state);
6408     trace_kvm_x86_update_msi_routes(cnt);
6409 }
6410 
kvm_arch_add_msi_route_post(struct kvm_irq_routing_entry * route,int vector,PCIDevice * dev)6411 int kvm_arch_add_msi_route_post(struct kvm_irq_routing_entry *route,
6412                                 int vector, PCIDevice *dev)
6413 {
6414     static bool notify_list_inited = false;
6415     MSIRouteEntry *entry;
6416 
6417     if (!dev) {
6418         /* These are (possibly) IOAPIC routes only used for split
6419          * kernel irqchip mode, while what we are housekeeping are
6420          * PCI devices only. */
6421         return 0;
6422     }
6423 
6424     entry = g_new0(MSIRouteEntry, 1);
6425     entry->dev = dev;
6426     entry->vector = vector;
6427     entry->virq = route->gsi;
6428     QLIST_INSERT_HEAD(&msi_route_list, entry, list);
6429 
6430     trace_kvm_x86_add_msi_route(route->gsi);
6431 
6432     if (!notify_list_inited) {
6433         /* For the first time we do add route, add ourselves into
6434          * IOMMU's IEC notify list if needed. */
6435         X86IOMMUState *iommu = x86_iommu_get_default();
6436         if (iommu) {
6437             x86_iommu_iec_register_notifier(iommu,
6438                                             kvm_update_msi_routes_all,
6439                                             NULL);
6440         }
6441         notify_list_inited = true;
6442     }
6443     return 0;
6444 }
6445 
kvm_arch_release_virq_post(int virq)6446 int kvm_arch_release_virq_post(int virq)
6447 {
6448     MSIRouteEntry *entry, *next;
6449     QLIST_FOREACH_SAFE(entry, &msi_route_list, list, next) {
6450         if (entry->virq == virq) {
6451             trace_kvm_x86_remove_msi_route(virq);
6452             QLIST_REMOVE(entry, list);
6453             g_free(entry);
6454             break;
6455         }
6456     }
6457     return 0;
6458 }
6459 
kvm_arch_msi_data_to_gsi(uint32_t data)6460 int kvm_arch_msi_data_to_gsi(uint32_t data)
6461 {
6462     abort();
6463 }
6464 
kvm_has_waitpkg(void)6465 bool kvm_has_waitpkg(void)
6466 {
6467     return has_msr_umwait;
6468 }
6469 
6470 #define ARCH_REQ_XCOMP_GUEST_PERM       0x1025
6471 
kvm_request_xsave_components(X86CPU * cpu,uint64_t mask)6472 void kvm_request_xsave_components(X86CPU *cpu, uint64_t mask)
6473 {
6474     KVMState *s = kvm_state;
6475     uint64_t supported;
6476 
6477     mask &= XSTATE_DYNAMIC_MASK;
6478     if (!mask) {
6479         return;
6480     }
6481     /*
6482      * Just ignore bits that are not in CPUID[EAX=0xD,ECX=0].
6483      * ARCH_REQ_XCOMP_GUEST_PERM would fail, and QEMU has warned
6484      * about them already because they are not supported features.
6485      */
6486     supported = kvm_arch_get_supported_cpuid(s, 0xd, 0, R_EAX);
6487     supported |= (uint64_t)kvm_arch_get_supported_cpuid(s, 0xd, 0, R_EDX) << 32;
6488     mask &= supported;
6489 
6490     while (mask) {
6491         int bit = ctz64(mask);
6492         int rc = syscall(SYS_arch_prctl, ARCH_REQ_XCOMP_GUEST_PERM, bit);
6493         if (rc) {
6494             /*
6495              * Older kernel version (<5.17) do not support
6496              * ARCH_REQ_XCOMP_GUEST_PERM, but also do not return
6497              * any dynamic feature from kvm_arch_get_supported_cpuid.
6498              */
6499             warn_report("prctl(ARCH_REQ_XCOMP_GUEST_PERM) failure "
6500                         "for feature bit %d", bit);
6501         }
6502         mask &= ~BIT_ULL(bit);
6503     }
6504 }
6505 
kvm_arch_get_notify_vmexit(Object * obj,Error ** errp)6506 static int kvm_arch_get_notify_vmexit(Object *obj, Error **errp)
6507 {
6508     KVMState *s = KVM_STATE(obj);
6509     return s->notify_vmexit;
6510 }
6511 
kvm_arch_set_notify_vmexit(Object * obj,int value,Error ** errp)6512 static void kvm_arch_set_notify_vmexit(Object *obj, int value, Error **errp)
6513 {
6514     KVMState *s = KVM_STATE(obj);
6515 
6516     if (s->fd != -1) {
6517         error_setg(errp, "Cannot set properties after the accelerator has been initialized");
6518         return;
6519     }
6520 
6521     s->notify_vmexit = value;
6522 }
6523 
kvm_arch_get_notify_window(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)6524 static void kvm_arch_get_notify_window(Object *obj, Visitor *v,
6525                                        const char *name, void *opaque,
6526                                        Error **errp)
6527 {
6528     KVMState *s = KVM_STATE(obj);
6529     uint32_t value = s->notify_window;
6530 
6531     visit_type_uint32(v, name, &value, errp);
6532 }
6533 
kvm_arch_set_notify_window(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)6534 static void kvm_arch_set_notify_window(Object *obj, Visitor *v,
6535                                        const char *name, void *opaque,
6536                                        Error **errp)
6537 {
6538     KVMState *s = KVM_STATE(obj);
6539     uint32_t value;
6540 
6541     if (s->fd != -1) {
6542         error_setg(errp, "Cannot set properties after the accelerator has been initialized");
6543         return;
6544     }
6545 
6546     if (!visit_type_uint32(v, name, &value, errp)) {
6547         return;
6548     }
6549 
6550     s->notify_window = value;
6551 }
6552 
kvm_arch_get_xen_version(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)6553 static void kvm_arch_get_xen_version(Object *obj, Visitor *v,
6554                                      const char *name, void *opaque,
6555                                      Error **errp)
6556 {
6557     KVMState *s = KVM_STATE(obj);
6558     uint32_t value = s->xen_version;
6559 
6560     visit_type_uint32(v, name, &value, errp);
6561 }
6562 
kvm_arch_set_xen_version(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)6563 static void kvm_arch_set_xen_version(Object *obj, Visitor *v,
6564                                      const char *name, void *opaque,
6565                                      Error **errp)
6566 {
6567     KVMState *s = KVM_STATE(obj);
6568     Error *error = NULL;
6569     uint32_t value;
6570 
6571     visit_type_uint32(v, name, &value, &error);
6572     if (error) {
6573         error_propagate(errp, error);
6574         return;
6575     }
6576 
6577     s->xen_version = value;
6578     if (value && xen_mode == XEN_DISABLED) {
6579         xen_mode = XEN_EMULATE;
6580     }
6581 }
6582 
kvm_arch_get_xen_gnttab_max_frames(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)6583 static void kvm_arch_get_xen_gnttab_max_frames(Object *obj, Visitor *v,
6584                                                const char *name, void *opaque,
6585                                                Error **errp)
6586 {
6587     KVMState *s = KVM_STATE(obj);
6588     uint16_t value = s->xen_gnttab_max_frames;
6589 
6590     visit_type_uint16(v, name, &value, errp);
6591 }
6592 
kvm_arch_set_xen_gnttab_max_frames(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)6593 static void kvm_arch_set_xen_gnttab_max_frames(Object *obj, Visitor *v,
6594                                                const char *name, void *opaque,
6595                                                Error **errp)
6596 {
6597     KVMState *s = KVM_STATE(obj);
6598     Error *error = NULL;
6599     uint16_t value;
6600 
6601     visit_type_uint16(v, name, &value, &error);
6602     if (error) {
6603         error_propagate(errp, error);
6604         return;
6605     }
6606 
6607     s->xen_gnttab_max_frames = value;
6608 }
6609 
kvm_arch_get_xen_evtchn_max_pirq(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)6610 static void kvm_arch_get_xen_evtchn_max_pirq(Object *obj, Visitor *v,
6611                                              const char *name, void *opaque,
6612                                              Error **errp)
6613 {
6614     KVMState *s = KVM_STATE(obj);
6615     uint16_t value = s->xen_evtchn_max_pirq;
6616 
6617     visit_type_uint16(v, name, &value, errp);
6618 }
6619 
kvm_arch_set_xen_evtchn_max_pirq(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)6620 static void kvm_arch_set_xen_evtchn_max_pirq(Object *obj, Visitor *v,
6621                                              const char *name, void *opaque,
6622                                              Error **errp)
6623 {
6624     KVMState *s = KVM_STATE(obj);
6625     Error *error = NULL;
6626     uint16_t value;
6627 
6628     visit_type_uint16(v, name, &value, &error);
6629     if (error) {
6630         error_propagate(errp, error);
6631         return;
6632     }
6633 
6634     s->xen_evtchn_max_pirq = value;
6635 }
6636 
kvm_arch_accel_class_init(ObjectClass * oc)6637 void kvm_arch_accel_class_init(ObjectClass *oc)
6638 {
6639     object_class_property_add_enum(oc, "notify-vmexit", "NotifyVMexitOption",
6640                                    &NotifyVmexitOption_lookup,
6641                                    kvm_arch_get_notify_vmexit,
6642                                    kvm_arch_set_notify_vmexit);
6643     object_class_property_set_description(oc, "notify-vmexit",
6644                                           "Enable notify VM exit");
6645 
6646     object_class_property_add(oc, "notify-window", "uint32",
6647                               kvm_arch_get_notify_window,
6648                               kvm_arch_set_notify_window,
6649                               NULL, NULL);
6650     object_class_property_set_description(oc, "notify-window",
6651                                           "Clock cycles without an event window "
6652                                           "after which a notification VM exit occurs");
6653 
6654     object_class_property_add(oc, "xen-version", "uint32",
6655                               kvm_arch_get_xen_version,
6656                               kvm_arch_set_xen_version,
6657                               NULL, NULL);
6658     object_class_property_set_description(oc, "xen-version",
6659                                           "Xen version to be emulated "
6660                                           "(in XENVER_version form "
6661                                           "e.g. 0x4000a for 4.10)");
6662 
6663     object_class_property_add(oc, "xen-gnttab-max-frames", "uint16",
6664                               kvm_arch_get_xen_gnttab_max_frames,
6665                               kvm_arch_set_xen_gnttab_max_frames,
6666                               NULL, NULL);
6667     object_class_property_set_description(oc, "xen-gnttab-max-frames",
6668                                           "Maximum number of grant table frames");
6669 
6670     object_class_property_add(oc, "xen-evtchn-max-pirq", "uint16",
6671                               kvm_arch_get_xen_evtchn_max_pirq,
6672                               kvm_arch_set_xen_evtchn_max_pirq,
6673                               NULL, NULL);
6674     object_class_property_set_description(oc, "xen-evtchn-max-pirq",
6675                                           "Maximum number of Xen PIRQs");
6676 }
6677 
kvm_set_max_apic_id(uint32_t max_apic_id)6678 void kvm_set_max_apic_id(uint32_t max_apic_id)
6679 {
6680     kvm_vm_enable_cap(kvm_state, KVM_CAP_MAX_VCPU_ID, 0, max_apic_id);
6681 }
6682