xref: /openbmc/qemu/target/i386/kvm/kvm.c (revision 4cf41e80)
1 /*
2  * QEMU KVM support
3  *
4  * Copyright (C) 2006-2008 Qumranet Technologies
5  * Copyright IBM, Corp. 2008
6  *
7  * Authors:
8  *  Anthony Liguori   <aliguori@us.ibm.com>
9  *
10  * This work is licensed under the terms of the GNU GPL, version 2 or later.
11  * See the COPYING file in the top-level directory.
12  *
13  */
14 
15 #include "qemu/osdep.h"
16 #include "qapi/qapi-events-run-state.h"
17 #include "qapi/error.h"
18 #include "qapi/visitor.h"
19 #include <sys/ioctl.h>
20 #include <sys/utsname.h>
21 #include <sys/syscall.h>
22 
23 #include <linux/kvm.h>
24 #include "standard-headers/asm-x86/kvm_para.h"
25 
26 #include "cpu.h"
27 #include "host-cpu.h"
28 #include "sysemu/sysemu.h"
29 #include "sysemu/hw_accel.h"
30 #include "sysemu/kvm_int.h"
31 #include "sysemu/runstate.h"
32 #include "kvm_i386.h"
33 #include "sev.h"
34 #include "hyperv.h"
35 #include "hyperv-proto.h"
36 
37 #include "exec/gdbstub.h"
38 #include "qemu/host-utils.h"
39 #include "qemu/main-loop.h"
40 #include "qemu/ratelimit.h"
41 #include "qemu/config-file.h"
42 #include "qemu/error-report.h"
43 #include "qemu/memalign.h"
44 #include "hw/i386/x86.h"
45 #include "hw/i386/apic.h"
46 #include "hw/i386/apic_internal.h"
47 #include "hw/i386/apic-msidef.h"
48 #include "hw/i386/intel_iommu.h"
49 #include "hw/i386/x86-iommu.h"
50 #include "hw/i386/e820_memory_layout.h"
51 
52 #include "hw/pci/pci.h"
53 #include "hw/pci/msi.h"
54 #include "hw/pci/msix.h"
55 #include "migration/blocker.h"
56 #include "exec/memattrs.h"
57 #include "trace.h"
58 
59 #include CONFIG_DEVICES
60 
61 //#define DEBUG_KVM
62 
63 #ifdef DEBUG_KVM
64 #define DPRINTF(fmt, ...) \
65     do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0)
66 #else
67 #define DPRINTF(fmt, ...) \
68     do { } while (0)
69 #endif
70 
71 /* From arch/x86/kvm/lapic.h */
72 #define KVM_APIC_BUS_CYCLE_NS       1
73 #define KVM_APIC_BUS_FREQUENCY      (1000000000ULL / KVM_APIC_BUS_CYCLE_NS)
74 
75 #define MSR_KVM_WALL_CLOCK  0x11
76 #define MSR_KVM_SYSTEM_TIME 0x12
77 
78 /* A 4096-byte buffer can hold the 8-byte kvm_msrs header, plus
79  * 255 kvm_msr_entry structs */
80 #define MSR_BUF_SIZE 4096
81 
82 static void kvm_init_msrs(X86CPU *cpu);
83 
84 const KVMCapabilityInfo kvm_arch_required_capabilities[] = {
85     KVM_CAP_INFO(SET_TSS_ADDR),
86     KVM_CAP_INFO(EXT_CPUID),
87     KVM_CAP_INFO(MP_STATE),
88     KVM_CAP_LAST_INFO
89 };
90 
91 static bool has_msr_star;
92 static bool has_msr_hsave_pa;
93 static bool has_msr_tsc_aux;
94 static bool has_msr_tsc_adjust;
95 static bool has_msr_tsc_deadline;
96 static bool has_msr_feature_control;
97 static bool has_msr_misc_enable;
98 static bool has_msr_smbase;
99 static bool has_msr_bndcfgs;
100 static int lm_capable_kernel;
101 static bool has_msr_hv_hypercall;
102 static bool has_msr_hv_crash;
103 static bool has_msr_hv_reset;
104 static bool has_msr_hv_vpindex;
105 static bool hv_vpindex_settable;
106 static bool has_msr_hv_runtime;
107 static bool has_msr_hv_synic;
108 static bool has_msr_hv_stimer;
109 static bool has_msr_hv_frequencies;
110 static bool has_msr_hv_reenlightenment;
111 static bool has_msr_hv_syndbg_options;
112 static bool has_msr_xss;
113 static bool has_msr_umwait;
114 static bool has_msr_spec_ctrl;
115 static bool has_tsc_scale_msr;
116 static bool has_msr_tsx_ctrl;
117 static bool has_msr_virt_ssbd;
118 static bool has_msr_smi_count;
119 static bool has_msr_arch_capabs;
120 static bool has_msr_core_capabs;
121 static bool has_msr_vmx_vmfunc;
122 static bool has_msr_ucode_rev;
123 static bool has_msr_vmx_procbased_ctls2;
124 static bool has_msr_perf_capabs;
125 static bool has_msr_pkrs;
126 
127 static uint32_t has_architectural_pmu_version;
128 static uint32_t num_architectural_pmu_gp_counters;
129 static uint32_t num_architectural_pmu_fixed_counters;
130 
131 static int has_xsave;
132 static int has_xsave2;
133 static int has_xcrs;
134 static int has_pit_state2;
135 static int has_sregs2;
136 static int has_exception_payload;
137 static int has_triple_fault_event;
138 
139 static bool has_msr_mcg_ext_ctl;
140 
141 static struct kvm_cpuid2 *cpuid_cache;
142 static struct kvm_cpuid2 *hv_cpuid_cache;
143 static struct kvm_msr_list *kvm_feature_msrs;
144 
145 static KVMMSRHandlers msr_handlers[KVM_MSR_FILTER_MAX_RANGES];
146 
147 #define BUS_LOCK_SLICE_TIME 1000000000ULL /* ns */
148 static RateLimit bus_lock_ratelimit_ctrl;
149 static int kvm_get_one_msr(X86CPU *cpu, int index, uint64_t *value);
150 
151 int kvm_has_pit_state2(void)
152 {
153     return has_pit_state2;
154 }
155 
156 bool kvm_has_smm(void)
157 {
158     return kvm_vm_check_extension(kvm_state, KVM_CAP_X86_SMM);
159 }
160 
161 bool kvm_has_adjust_clock_stable(void)
162 {
163     int ret = kvm_check_extension(kvm_state, KVM_CAP_ADJUST_CLOCK);
164 
165     return (ret & KVM_CLOCK_TSC_STABLE);
166 }
167 
168 bool kvm_has_adjust_clock(void)
169 {
170     return kvm_check_extension(kvm_state, KVM_CAP_ADJUST_CLOCK);
171 }
172 
173 bool kvm_has_exception_payload(void)
174 {
175     return has_exception_payload;
176 }
177 
178 static bool kvm_x2apic_api_set_flags(uint64_t flags)
179 {
180     KVMState *s = KVM_STATE(current_accel());
181 
182     return !kvm_vm_enable_cap(s, KVM_CAP_X2APIC_API, 0, flags);
183 }
184 
185 #define MEMORIZE(fn, _result) \
186     ({ \
187         static bool _memorized; \
188         \
189         if (_memorized) { \
190             return _result; \
191         } \
192         _memorized = true; \
193         _result = fn; \
194     })
195 
196 static bool has_x2apic_api;
197 
198 bool kvm_has_x2apic_api(void)
199 {
200     return has_x2apic_api;
201 }
202 
203 bool kvm_enable_x2apic(void)
204 {
205     return MEMORIZE(
206              kvm_x2apic_api_set_flags(KVM_X2APIC_API_USE_32BIT_IDS |
207                                       KVM_X2APIC_API_DISABLE_BROADCAST_QUIRK),
208              has_x2apic_api);
209 }
210 
211 bool kvm_hv_vpindex_settable(void)
212 {
213     return hv_vpindex_settable;
214 }
215 
216 static int kvm_get_tsc(CPUState *cs)
217 {
218     X86CPU *cpu = X86_CPU(cs);
219     CPUX86State *env = &cpu->env;
220     uint64_t value;
221     int ret;
222 
223     if (env->tsc_valid) {
224         return 0;
225     }
226 
227     env->tsc_valid = !runstate_is_running();
228 
229     ret = kvm_get_one_msr(cpu, MSR_IA32_TSC, &value);
230     if (ret < 0) {
231         return ret;
232     }
233 
234     env->tsc = value;
235     return 0;
236 }
237 
238 static inline void do_kvm_synchronize_tsc(CPUState *cpu, run_on_cpu_data arg)
239 {
240     kvm_get_tsc(cpu);
241 }
242 
243 void kvm_synchronize_all_tsc(void)
244 {
245     CPUState *cpu;
246 
247     if (kvm_enabled()) {
248         CPU_FOREACH(cpu) {
249             run_on_cpu(cpu, do_kvm_synchronize_tsc, RUN_ON_CPU_NULL);
250         }
251     }
252 }
253 
254 static struct kvm_cpuid2 *try_get_cpuid(KVMState *s, int max)
255 {
256     struct kvm_cpuid2 *cpuid;
257     int r, size;
258 
259     size = sizeof(*cpuid) + max * sizeof(*cpuid->entries);
260     cpuid = g_malloc0(size);
261     cpuid->nent = max;
262     r = kvm_ioctl(s, KVM_GET_SUPPORTED_CPUID, cpuid);
263     if (r == 0 && cpuid->nent >= max) {
264         r = -E2BIG;
265     }
266     if (r < 0) {
267         if (r == -E2BIG) {
268             g_free(cpuid);
269             return NULL;
270         } else {
271             fprintf(stderr, "KVM_GET_SUPPORTED_CPUID failed: %s\n",
272                     strerror(-r));
273             exit(1);
274         }
275     }
276     return cpuid;
277 }
278 
279 /* Run KVM_GET_SUPPORTED_CPUID ioctl(), allocating a buffer large enough
280  * for all entries.
281  */
282 static struct kvm_cpuid2 *get_supported_cpuid(KVMState *s)
283 {
284     struct kvm_cpuid2 *cpuid;
285     int max = 1;
286 
287     if (cpuid_cache != NULL) {
288         return cpuid_cache;
289     }
290     while ((cpuid = try_get_cpuid(s, max)) == NULL) {
291         max *= 2;
292     }
293     cpuid_cache = cpuid;
294     return cpuid;
295 }
296 
297 static bool host_tsx_broken(void)
298 {
299     int family, model, stepping;\
300     char vendor[CPUID_VENDOR_SZ + 1];
301 
302     host_cpu_vendor_fms(vendor, &family, &model, &stepping);
303 
304     /* Check if we are running on a Haswell host known to have broken TSX */
305     return !strcmp(vendor, CPUID_VENDOR_INTEL) &&
306            (family == 6) &&
307            ((model == 63 && stepping < 4) ||
308             model == 60 || model == 69 || model == 70);
309 }
310 
311 /* Returns the value for a specific register on the cpuid entry
312  */
313 static uint32_t cpuid_entry_get_reg(struct kvm_cpuid_entry2 *entry, int reg)
314 {
315     uint32_t ret = 0;
316     switch (reg) {
317     case R_EAX:
318         ret = entry->eax;
319         break;
320     case R_EBX:
321         ret = entry->ebx;
322         break;
323     case R_ECX:
324         ret = entry->ecx;
325         break;
326     case R_EDX:
327         ret = entry->edx;
328         break;
329     }
330     return ret;
331 }
332 
333 /* Find matching entry for function/index on kvm_cpuid2 struct
334  */
335 static struct kvm_cpuid_entry2 *cpuid_find_entry(struct kvm_cpuid2 *cpuid,
336                                                  uint32_t function,
337                                                  uint32_t index)
338 {
339     int i;
340     for (i = 0; i < cpuid->nent; ++i) {
341         if (cpuid->entries[i].function == function &&
342             cpuid->entries[i].index == index) {
343             return &cpuid->entries[i];
344         }
345     }
346     /* not found: */
347     return NULL;
348 }
349 
350 uint32_t kvm_arch_get_supported_cpuid(KVMState *s, uint32_t function,
351                                       uint32_t index, int reg)
352 {
353     struct kvm_cpuid2 *cpuid;
354     uint32_t ret = 0;
355     uint32_t cpuid_1_edx, unused;
356     uint64_t bitmask;
357 
358     cpuid = get_supported_cpuid(s);
359 
360     struct kvm_cpuid_entry2 *entry = cpuid_find_entry(cpuid, function, index);
361     if (entry) {
362         ret = cpuid_entry_get_reg(entry, reg);
363     }
364 
365     /* Fixups for the data returned by KVM, below */
366 
367     if (function == 1 && reg == R_EDX) {
368         /* KVM before 2.6.30 misreports the following features */
369         ret |= CPUID_MTRR | CPUID_PAT | CPUID_MCE | CPUID_MCA;
370     } else if (function == 1 && reg == R_ECX) {
371         /* We can set the hypervisor flag, even if KVM does not return it on
372          * GET_SUPPORTED_CPUID
373          */
374         ret |= CPUID_EXT_HYPERVISOR;
375         /* tsc-deadline flag is not returned by GET_SUPPORTED_CPUID, but it
376          * can be enabled if the kernel has KVM_CAP_TSC_DEADLINE_TIMER,
377          * and the irqchip is in the kernel.
378          */
379         if (kvm_irqchip_in_kernel() &&
380                 kvm_check_extension(s, KVM_CAP_TSC_DEADLINE_TIMER)) {
381             ret |= CPUID_EXT_TSC_DEADLINE_TIMER;
382         }
383 
384         /* x2apic is reported by GET_SUPPORTED_CPUID, but it can't be enabled
385          * without the in-kernel irqchip
386          */
387         if (!kvm_irqchip_in_kernel()) {
388             ret &= ~CPUID_EXT_X2APIC;
389         }
390 
391         if (enable_cpu_pm) {
392             int disable_exits = kvm_check_extension(s,
393                                                     KVM_CAP_X86_DISABLE_EXITS);
394 
395             if (disable_exits & KVM_X86_DISABLE_EXITS_MWAIT) {
396                 ret |= CPUID_EXT_MONITOR;
397             }
398         }
399     } else if (function == 6 && reg == R_EAX) {
400         ret |= CPUID_6_EAX_ARAT; /* safe to allow because of emulated APIC */
401     } else if (function == 7 && index == 0 && reg == R_EBX) {
402         /* Not new instructions, just an optimization.  */
403         uint32_t ebx;
404         host_cpuid(7, 0, &unused, &ebx, &unused, &unused);
405         ret |= ebx & CPUID_7_0_EBX_ERMS;
406 
407         if (host_tsx_broken()) {
408             ret &= ~(CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_HLE);
409         }
410     } else if (function == 7 && index == 0 && reg == R_EDX) {
411         /* Not new instructions, just an optimization.  */
412         uint32_t edx;
413         host_cpuid(7, 0, &unused, &unused, &unused, &edx);
414         ret |= edx & CPUID_7_0_EDX_FSRM;
415 
416         /*
417          * Linux v4.17-v4.20 incorrectly return ARCH_CAPABILITIES on SVM hosts.
418          * We can detect the bug by checking if MSR_IA32_ARCH_CAPABILITIES is
419          * returned by KVM_GET_MSR_INDEX_LIST.
420          */
421         if (!has_msr_arch_capabs) {
422             ret &= ~CPUID_7_0_EDX_ARCH_CAPABILITIES;
423         }
424     } else if (function == 7 && index == 1 && reg == R_EAX) {
425         /* Not new instructions, just an optimization.  */
426         uint32_t eax;
427         host_cpuid(7, 1, &eax, &unused, &unused, &unused);
428         ret |= eax & (CPUID_7_1_EAX_FZRM | CPUID_7_1_EAX_FSRS | CPUID_7_1_EAX_FSRC);
429     } else if (function == 0xd && index == 0 &&
430                (reg == R_EAX || reg == R_EDX)) {
431         /*
432          * The value returned by KVM_GET_SUPPORTED_CPUID does not include
433          * features that still have to be enabled with the arch_prctl
434          * system call.  QEMU needs the full value, which is retrieved
435          * with KVM_GET_DEVICE_ATTR.
436          */
437         struct kvm_device_attr attr = {
438             .group = 0,
439             .attr = KVM_X86_XCOMP_GUEST_SUPP,
440             .addr = (unsigned long) &bitmask
441         };
442 
443         bool sys_attr = kvm_check_extension(s, KVM_CAP_SYS_ATTRIBUTES);
444         if (!sys_attr) {
445             return ret;
446         }
447 
448         int rc = kvm_ioctl(s, KVM_GET_DEVICE_ATTR, &attr);
449         if (rc < 0) {
450             if (rc != -ENXIO) {
451                 warn_report("KVM_GET_DEVICE_ATTR(0, KVM_X86_XCOMP_GUEST_SUPP) "
452                             "error: %d", rc);
453             }
454             return ret;
455         }
456         ret = (reg == R_EAX) ? bitmask : bitmask >> 32;
457     } else if (function == 0x80000001 && reg == R_ECX) {
458         /*
459          * It's safe to enable TOPOEXT even if it's not returned by
460          * GET_SUPPORTED_CPUID.  Unconditionally enabling TOPOEXT here allows
461          * us to keep CPU models including TOPOEXT runnable on older kernels.
462          */
463         ret |= CPUID_EXT3_TOPOEXT;
464     } else if (function == 0x80000001 && reg == R_EDX) {
465         /* On Intel, kvm returns cpuid according to the Intel spec,
466          * so add missing bits according to the AMD spec:
467          */
468         cpuid_1_edx = kvm_arch_get_supported_cpuid(s, 1, 0, R_EDX);
469         ret |= cpuid_1_edx & CPUID_EXT2_AMD_ALIASES;
470     } else if (function == KVM_CPUID_FEATURES && reg == R_EAX) {
471         /* kvm_pv_unhalt is reported by GET_SUPPORTED_CPUID, but it can't
472          * be enabled without the in-kernel irqchip
473          */
474         if (!kvm_irqchip_in_kernel()) {
475             ret &= ~(1U << KVM_FEATURE_PV_UNHALT);
476         }
477         if (kvm_irqchip_is_split()) {
478             ret |= 1U << KVM_FEATURE_MSI_EXT_DEST_ID;
479         }
480     } else if (function == KVM_CPUID_FEATURES && reg == R_EDX) {
481         ret |= 1U << KVM_HINTS_REALTIME;
482     }
483 
484     return ret;
485 }
486 
487 uint64_t kvm_arch_get_supported_msr_feature(KVMState *s, uint32_t index)
488 {
489     struct {
490         struct kvm_msrs info;
491         struct kvm_msr_entry entries[1];
492     } msr_data = {};
493     uint64_t value;
494     uint32_t ret, can_be_one, must_be_one;
495 
496     if (kvm_feature_msrs == NULL) { /* Host doesn't support feature MSRs */
497         return 0;
498     }
499 
500     /* Check if requested MSR is supported feature MSR */
501     int i;
502     for (i = 0; i < kvm_feature_msrs->nmsrs; i++)
503         if (kvm_feature_msrs->indices[i] == index) {
504             break;
505         }
506     if (i == kvm_feature_msrs->nmsrs) {
507         return 0; /* if the feature MSR is not supported, simply return 0 */
508     }
509 
510     msr_data.info.nmsrs = 1;
511     msr_data.entries[0].index = index;
512 
513     ret = kvm_ioctl(s, KVM_GET_MSRS, &msr_data);
514     if (ret != 1) {
515         error_report("KVM get MSR (index=0x%x) feature failed, %s",
516             index, strerror(-ret));
517         exit(1);
518     }
519 
520     value = msr_data.entries[0].data;
521     switch (index) {
522     case MSR_IA32_VMX_PROCBASED_CTLS2:
523         if (!has_msr_vmx_procbased_ctls2) {
524             /* KVM forgot to add these bits for some time, do this ourselves. */
525             if (kvm_arch_get_supported_cpuid(s, 0xD, 1, R_ECX) &
526                 CPUID_XSAVE_XSAVES) {
527                 value |= (uint64_t)VMX_SECONDARY_EXEC_XSAVES << 32;
528             }
529             if (kvm_arch_get_supported_cpuid(s, 1, 0, R_ECX) &
530                 CPUID_EXT_RDRAND) {
531                 value |= (uint64_t)VMX_SECONDARY_EXEC_RDRAND_EXITING << 32;
532             }
533             if (kvm_arch_get_supported_cpuid(s, 7, 0, R_EBX) &
534                 CPUID_7_0_EBX_INVPCID) {
535                 value |= (uint64_t)VMX_SECONDARY_EXEC_ENABLE_INVPCID << 32;
536             }
537             if (kvm_arch_get_supported_cpuid(s, 7, 0, R_EBX) &
538                 CPUID_7_0_EBX_RDSEED) {
539                 value |= (uint64_t)VMX_SECONDARY_EXEC_RDSEED_EXITING << 32;
540             }
541             if (kvm_arch_get_supported_cpuid(s, 0x80000001, 0, R_EDX) &
542                 CPUID_EXT2_RDTSCP) {
543                 value |= (uint64_t)VMX_SECONDARY_EXEC_RDTSCP << 32;
544             }
545         }
546         /* fall through */
547     case MSR_IA32_VMX_TRUE_PINBASED_CTLS:
548     case MSR_IA32_VMX_TRUE_PROCBASED_CTLS:
549     case MSR_IA32_VMX_TRUE_ENTRY_CTLS:
550     case MSR_IA32_VMX_TRUE_EXIT_CTLS:
551         /*
552          * Return true for bits that can be one, but do not have to be one.
553          * The SDM tells us which bits could have a "must be one" setting,
554          * so we can do the opposite transformation in make_vmx_msr_value.
555          */
556         must_be_one = (uint32_t)value;
557         can_be_one = (uint32_t)(value >> 32);
558         return can_be_one & ~must_be_one;
559 
560     default:
561         return value;
562     }
563 }
564 
565 static int kvm_get_mce_cap_supported(KVMState *s, uint64_t *mce_cap,
566                                      int *max_banks)
567 {
568     int r;
569 
570     r = kvm_check_extension(s, KVM_CAP_MCE);
571     if (r > 0) {
572         *max_banks = r;
573         return kvm_ioctl(s, KVM_X86_GET_MCE_CAP_SUPPORTED, mce_cap);
574     }
575     return -ENOSYS;
576 }
577 
578 static void kvm_mce_inject(X86CPU *cpu, hwaddr paddr, int code)
579 {
580     CPUState *cs = CPU(cpu);
581     CPUX86State *env = &cpu->env;
582     uint64_t status = MCI_STATUS_VAL | MCI_STATUS_UC | MCI_STATUS_EN |
583                       MCI_STATUS_MISCV | MCI_STATUS_ADDRV | MCI_STATUS_S;
584     uint64_t mcg_status = MCG_STATUS_MCIP;
585     int flags = 0;
586 
587     if (code == BUS_MCEERR_AR) {
588         status |= MCI_STATUS_AR | 0x134;
589         mcg_status |= MCG_STATUS_RIPV | MCG_STATUS_EIPV;
590     } else {
591         status |= 0xc0;
592         mcg_status |= MCG_STATUS_RIPV;
593     }
594 
595     flags = cpu_x86_support_mca_broadcast(env) ? MCE_INJECT_BROADCAST : 0;
596     /* We need to read back the value of MSR_EXT_MCG_CTL that was set by the
597      * guest kernel back into env->mcg_ext_ctl.
598      */
599     cpu_synchronize_state(cs);
600     if (env->mcg_ext_ctl & MCG_EXT_CTL_LMCE_EN) {
601         mcg_status |= MCG_STATUS_LMCE;
602         flags = 0;
603     }
604 
605     cpu_x86_inject_mce(NULL, cpu, 9, status, mcg_status, paddr,
606                        (MCM_ADDR_PHYS << 6) | 0xc, flags);
607 }
608 
609 static void emit_hypervisor_memory_failure(MemoryFailureAction action, bool ar)
610 {
611     MemoryFailureFlags mff = {.action_required = ar, .recursive = false};
612 
613     qapi_event_send_memory_failure(MEMORY_FAILURE_RECIPIENT_HYPERVISOR, action,
614                                    &mff);
615 }
616 
617 static void hardware_memory_error(void *host_addr)
618 {
619     emit_hypervisor_memory_failure(MEMORY_FAILURE_ACTION_FATAL, true);
620     error_report("QEMU got Hardware memory error at addr %p", host_addr);
621     exit(1);
622 }
623 
624 void kvm_arch_on_sigbus_vcpu(CPUState *c, int code, void *addr)
625 {
626     X86CPU *cpu = X86_CPU(c);
627     CPUX86State *env = &cpu->env;
628     ram_addr_t ram_addr;
629     hwaddr paddr;
630 
631     /* If we get an action required MCE, it has been injected by KVM
632      * while the VM was running.  An action optional MCE instead should
633      * be coming from the main thread, which qemu_init_sigbus identifies
634      * as the "early kill" thread.
635      */
636     assert(code == BUS_MCEERR_AR || code == BUS_MCEERR_AO);
637 
638     if ((env->mcg_cap & MCG_SER_P) && addr) {
639         ram_addr = qemu_ram_addr_from_host(addr);
640         if (ram_addr != RAM_ADDR_INVALID &&
641             kvm_physical_memory_addr_from_host(c->kvm_state, addr, &paddr)) {
642             kvm_hwpoison_page_add(ram_addr);
643             kvm_mce_inject(cpu, paddr, code);
644 
645             /*
646              * Use different logging severity based on error type.
647              * If there is additional MCE reporting on the hypervisor, QEMU VA
648              * could be another source to identify the PA and MCE details.
649              */
650             if (code == BUS_MCEERR_AR) {
651                 error_report("Guest MCE Memory Error at QEMU addr %p and "
652                     "GUEST addr 0x%" HWADDR_PRIx " of type %s injected",
653                     addr, paddr, "BUS_MCEERR_AR");
654             } else {
655                  warn_report("Guest MCE Memory Error at QEMU addr %p and "
656                      "GUEST addr 0x%" HWADDR_PRIx " of type %s injected",
657                      addr, paddr, "BUS_MCEERR_AO");
658             }
659 
660             return;
661         }
662 
663         if (code == BUS_MCEERR_AO) {
664             warn_report("Hardware memory error at addr %p of type %s "
665                 "for memory used by QEMU itself instead of guest system!",
666                  addr, "BUS_MCEERR_AO");
667         }
668     }
669 
670     if (code == BUS_MCEERR_AR) {
671         hardware_memory_error(addr);
672     }
673 
674     /* Hope we are lucky for AO MCE, just notify a event */
675     emit_hypervisor_memory_failure(MEMORY_FAILURE_ACTION_IGNORE, false);
676 }
677 
678 static void kvm_reset_exception(CPUX86State *env)
679 {
680     env->exception_nr = -1;
681     env->exception_pending = 0;
682     env->exception_injected = 0;
683     env->exception_has_payload = false;
684     env->exception_payload = 0;
685 }
686 
687 static void kvm_queue_exception(CPUX86State *env,
688                                 int32_t exception_nr,
689                                 uint8_t exception_has_payload,
690                                 uint64_t exception_payload)
691 {
692     assert(env->exception_nr == -1);
693     assert(!env->exception_pending);
694     assert(!env->exception_injected);
695     assert(!env->exception_has_payload);
696 
697     env->exception_nr = exception_nr;
698 
699     if (has_exception_payload) {
700         env->exception_pending = 1;
701 
702         env->exception_has_payload = exception_has_payload;
703         env->exception_payload = exception_payload;
704     } else {
705         env->exception_injected = 1;
706 
707         if (exception_nr == EXCP01_DB) {
708             assert(exception_has_payload);
709             env->dr[6] = exception_payload;
710         } else if (exception_nr == EXCP0E_PAGE) {
711             assert(exception_has_payload);
712             env->cr[2] = exception_payload;
713         } else {
714             assert(!exception_has_payload);
715         }
716     }
717 }
718 
719 static int kvm_inject_mce_oldstyle(X86CPU *cpu)
720 {
721     CPUX86State *env = &cpu->env;
722 
723     if (!kvm_has_vcpu_events() && env->exception_nr == EXCP12_MCHK) {
724         unsigned int bank, bank_num = env->mcg_cap & 0xff;
725         struct kvm_x86_mce mce;
726 
727         kvm_reset_exception(env);
728 
729         /*
730          * There must be at least one bank in use if an MCE is pending.
731          * Find it and use its values for the event injection.
732          */
733         for (bank = 0; bank < bank_num; bank++) {
734             if (env->mce_banks[bank * 4 + 1] & MCI_STATUS_VAL) {
735                 break;
736             }
737         }
738         assert(bank < bank_num);
739 
740         mce.bank = bank;
741         mce.status = env->mce_banks[bank * 4 + 1];
742         mce.mcg_status = env->mcg_status;
743         mce.addr = env->mce_banks[bank * 4 + 2];
744         mce.misc = env->mce_banks[bank * 4 + 3];
745 
746         return kvm_vcpu_ioctl(CPU(cpu), KVM_X86_SET_MCE, &mce);
747     }
748     return 0;
749 }
750 
751 static void cpu_update_state(void *opaque, bool running, RunState state)
752 {
753     CPUX86State *env = opaque;
754 
755     if (running) {
756         env->tsc_valid = false;
757     }
758 }
759 
760 unsigned long kvm_arch_vcpu_id(CPUState *cs)
761 {
762     X86CPU *cpu = X86_CPU(cs);
763     return cpu->apic_id;
764 }
765 
766 #ifndef KVM_CPUID_SIGNATURE_NEXT
767 #define KVM_CPUID_SIGNATURE_NEXT                0x40000100
768 #endif
769 
770 static bool hyperv_enabled(X86CPU *cpu)
771 {
772     return kvm_check_extension(kvm_state, KVM_CAP_HYPERV) > 0 &&
773         ((cpu->hyperv_spinlock_attempts != HYPERV_SPINLOCK_NEVER_NOTIFY) ||
774          cpu->hyperv_features || cpu->hyperv_passthrough);
775 }
776 
777 /*
778  * Check whether target_freq is within conservative
779  * ntp correctable bounds (250ppm) of freq
780  */
781 static inline bool freq_within_bounds(int freq, int target_freq)
782 {
783         int max_freq = freq + (freq * 250 / 1000000);
784         int min_freq = freq - (freq * 250 / 1000000);
785 
786         if (target_freq >= min_freq && target_freq <= max_freq) {
787                 return true;
788         }
789 
790         return false;
791 }
792 
793 static int kvm_arch_set_tsc_khz(CPUState *cs)
794 {
795     X86CPU *cpu = X86_CPU(cs);
796     CPUX86State *env = &cpu->env;
797     int r, cur_freq;
798     bool set_ioctl = false;
799 
800     if (!env->tsc_khz) {
801         return 0;
802     }
803 
804     cur_freq = kvm_check_extension(cs->kvm_state, KVM_CAP_GET_TSC_KHZ) ?
805                kvm_vcpu_ioctl(cs, KVM_GET_TSC_KHZ) : -ENOTSUP;
806 
807     /*
808      * If TSC scaling is supported, attempt to set TSC frequency.
809      */
810     if (kvm_check_extension(cs->kvm_state, KVM_CAP_TSC_CONTROL)) {
811         set_ioctl = true;
812     }
813 
814     /*
815      * If desired TSC frequency is within bounds of NTP correction,
816      * attempt to set TSC frequency.
817      */
818     if (cur_freq != -ENOTSUP && freq_within_bounds(cur_freq, env->tsc_khz)) {
819         set_ioctl = true;
820     }
821 
822     r = set_ioctl ?
823         kvm_vcpu_ioctl(cs, KVM_SET_TSC_KHZ, env->tsc_khz) :
824         -ENOTSUP;
825 
826     if (r < 0) {
827         /* When KVM_SET_TSC_KHZ fails, it's an error only if the current
828          * TSC frequency doesn't match the one we want.
829          */
830         cur_freq = kvm_check_extension(cs->kvm_state, KVM_CAP_GET_TSC_KHZ) ?
831                    kvm_vcpu_ioctl(cs, KVM_GET_TSC_KHZ) :
832                    -ENOTSUP;
833         if (cur_freq <= 0 || cur_freq != env->tsc_khz) {
834             warn_report("TSC frequency mismatch between "
835                         "VM (%" PRId64 " kHz) and host (%d kHz), "
836                         "and TSC scaling unavailable",
837                         env->tsc_khz, cur_freq);
838             return r;
839         }
840     }
841 
842     return 0;
843 }
844 
845 static bool tsc_is_stable_and_known(CPUX86State *env)
846 {
847     if (!env->tsc_khz) {
848         return false;
849     }
850     return (env->features[FEAT_8000_0007_EDX] & CPUID_APM_INVTSC)
851         || env->user_tsc_khz;
852 }
853 
854 #define DEFAULT_EVMCS_VERSION ((1 << 8) | 1)
855 
856 static struct {
857     const char *desc;
858     struct {
859         uint32_t func;
860         int reg;
861         uint32_t bits;
862     } flags[2];
863     uint64_t dependencies;
864 } kvm_hyperv_properties[] = {
865     [HYPERV_FEAT_RELAXED] = {
866         .desc = "relaxed timing (hv-relaxed)",
867         .flags = {
868             {.func = HV_CPUID_ENLIGHTMENT_INFO, .reg = R_EAX,
869              .bits = HV_RELAXED_TIMING_RECOMMENDED}
870         }
871     },
872     [HYPERV_FEAT_VAPIC] = {
873         .desc = "virtual APIC (hv-vapic)",
874         .flags = {
875             {.func = HV_CPUID_FEATURES, .reg = R_EAX,
876              .bits = HV_APIC_ACCESS_AVAILABLE}
877         }
878     },
879     [HYPERV_FEAT_TIME] = {
880         .desc = "clocksources (hv-time)",
881         .flags = {
882             {.func = HV_CPUID_FEATURES, .reg = R_EAX,
883              .bits = HV_TIME_REF_COUNT_AVAILABLE | HV_REFERENCE_TSC_AVAILABLE}
884         }
885     },
886     [HYPERV_FEAT_CRASH] = {
887         .desc = "crash MSRs (hv-crash)",
888         .flags = {
889             {.func = HV_CPUID_FEATURES, .reg = R_EDX,
890              .bits = HV_GUEST_CRASH_MSR_AVAILABLE}
891         }
892     },
893     [HYPERV_FEAT_RESET] = {
894         .desc = "reset MSR (hv-reset)",
895         .flags = {
896             {.func = HV_CPUID_FEATURES, .reg = R_EAX,
897              .bits = HV_RESET_AVAILABLE}
898         }
899     },
900     [HYPERV_FEAT_VPINDEX] = {
901         .desc = "VP_INDEX MSR (hv-vpindex)",
902         .flags = {
903             {.func = HV_CPUID_FEATURES, .reg = R_EAX,
904              .bits = HV_VP_INDEX_AVAILABLE}
905         }
906     },
907     [HYPERV_FEAT_RUNTIME] = {
908         .desc = "VP_RUNTIME MSR (hv-runtime)",
909         .flags = {
910             {.func = HV_CPUID_FEATURES, .reg = R_EAX,
911              .bits = HV_VP_RUNTIME_AVAILABLE}
912         }
913     },
914     [HYPERV_FEAT_SYNIC] = {
915         .desc = "synthetic interrupt controller (hv-synic)",
916         .flags = {
917             {.func = HV_CPUID_FEATURES, .reg = R_EAX,
918              .bits = HV_SYNIC_AVAILABLE}
919         }
920     },
921     [HYPERV_FEAT_STIMER] = {
922         .desc = "synthetic timers (hv-stimer)",
923         .flags = {
924             {.func = HV_CPUID_FEATURES, .reg = R_EAX,
925              .bits = HV_SYNTIMERS_AVAILABLE}
926         },
927         .dependencies = BIT(HYPERV_FEAT_SYNIC) | BIT(HYPERV_FEAT_TIME)
928     },
929     [HYPERV_FEAT_FREQUENCIES] = {
930         .desc = "frequency MSRs (hv-frequencies)",
931         .flags = {
932             {.func = HV_CPUID_FEATURES, .reg = R_EAX,
933              .bits = HV_ACCESS_FREQUENCY_MSRS},
934             {.func = HV_CPUID_FEATURES, .reg = R_EDX,
935              .bits = HV_FREQUENCY_MSRS_AVAILABLE}
936         }
937     },
938     [HYPERV_FEAT_REENLIGHTENMENT] = {
939         .desc = "reenlightenment MSRs (hv-reenlightenment)",
940         .flags = {
941             {.func = HV_CPUID_FEATURES, .reg = R_EAX,
942              .bits = HV_ACCESS_REENLIGHTENMENTS_CONTROL}
943         }
944     },
945     [HYPERV_FEAT_TLBFLUSH] = {
946         .desc = "paravirtualized TLB flush (hv-tlbflush)",
947         .flags = {
948             {.func = HV_CPUID_ENLIGHTMENT_INFO, .reg = R_EAX,
949              .bits = HV_REMOTE_TLB_FLUSH_RECOMMENDED |
950              HV_EX_PROCESSOR_MASKS_RECOMMENDED}
951         },
952         .dependencies = BIT(HYPERV_FEAT_VPINDEX)
953     },
954     [HYPERV_FEAT_EVMCS] = {
955         .desc = "enlightened VMCS (hv-evmcs)",
956         .flags = {
957             {.func = HV_CPUID_ENLIGHTMENT_INFO, .reg = R_EAX,
958              .bits = HV_ENLIGHTENED_VMCS_RECOMMENDED}
959         },
960         .dependencies = BIT(HYPERV_FEAT_VAPIC)
961     },
962     [HYPERV_FEAT_IPI] = {
963         .desc = "paravirtualized IPI (hv-ipi)",
964         .flags = {
965             {.func = HV_CPUID_ENLIGHTMENT_INFO, .reg = R_EAX,
966              .bits = HV_CLUSTER_IPI_RECOMMENDED |
967              HV_EX_PROCESSOR_MASKS_RECOMMENDED}
968         },
969         .dependencies = BIT(HYPERV_FEAT_VPINDEX)
970     },
971     [HYPERV_FEAT_STIMER_DIRECT] = {
972         .desc = "direct mode synthetic timers (hv-stimer-direct)",
973         .flags = {
974             {.func = HV_CPUID_FEATURES, .reg = R_EDX,
975              .bits = HV_STIMER_DIRECT_MODE_AVAILABLE}
976         },
977         .dependencies = BIT(HYPERV_FEAT_STIMER)
978     },
979     [HYPERV_FEAT_AVIC] = {
980         .desc = "AVIC/APICv support (hv-avic/hv-apicv)",
981         .flags = {
982             {.func = HV_CPUID_ENLIGHTMENT_INFO, .reg = R_EAX,
983              .bits = HV_DEPRECATING_AEOI_RECOMMENDED}
984         }
985     },
986 #ifdef CONFIG_SYNDBG
987     [HYPERV_FEAT_SYNDBG] = {
988         .desc = "Enable synthetic kernel debugger channel (hv-syndbg)",
989         .flags = {
990             {.func = HV_CPUID_FEATURES, .reg = R_EDX,
991              .bits = HV_FEATURE_DEBUG_MSRS_AVAILABLE}
992         },
993         .dependencies = BIT(HYPERV_FEAT_SYNIC) | BIT(HYPERV_FEAT_RELAXED)
994     },
995 #endif
996     [HYPERV_FEAT_MSR_BITMAP] = {
997         .desc = "enlightened MSR-Bitmap (hv-emsr-bitmap)",
998         .flags = {
999             {.func = HV_CPUID_NESTED_FEATURES, .reg = R_EAX,
1000              .bits = HV_NESTED_MSR_BITMAP}
1001         }
1002     },
1003     [HYPERV_FEAT_XMM_INPUT] = {
1004         .desc = "XMM fast hypercall input (hv-xmm-input)",
1005         .flags = {
1006             {.func = HV_CPUID_FEATURES, .reg = R_EDX,
1007              .bits = HV_HYPERCALL_XMM_INPUT_AVAILABLE}
1008         }
1009     },
1010     [HYPERV_FEAT_TLBFLUSH_EXT] = {
1011         .desc = "Extended gva ranges for TLB flush hypercalls (hv-tlbflush-ext)",
1012         .flags = {
1013             {.func = HV_CPUID_FEATURES, .reg = R_EDX,
1014              .bits = HV_EXT_GVA_RANGES_FLUSH_AVAILABLE}
1015         },
1016         .dependencies = BIT(HYPERV_FEAT_TLBFLUSH)
1017     },
1018     [HYPERV_FEAT_TLBFLUSH_DIRECT] = {
1019         .desc = "direct TLB flush (hv-tlbflush-direct)",
1020         .flags = {
1021             {.func = HV_CPUID_NESTED_FEATURES, .reg = R_EAX,
1022              .bits = HV_NESTED_DIRECT_FLUSH}
1023         },
1024         .dependencies = BIT(HYPERV_FEAT_VAPIC)
1025     },
1026 };
1027 
1028 static struct kvm_cpuid2 *try_get_hv_cpuid(CPUState *cs, int max,
1029                                            bool do_sys_ioctl)
1030 {
1031     struct kvm_cpuid2 *cpuid;
1032     int r, size;
1033 
1034     size = sizeof(*cpuid) + max * sizeof(*cpuid->entries);
1035     cpuid = g_malloc0(size);
1036     cpuid->nent = max;
1037 
1038     if (do_sys_ioctl) {
1039         r = kvm_ioctl(kvm_state, KVM_GET_SUPPORTED_HV_CPUID, cpuid);
1040     } else {
1041         r = kvm_vcpu_ioctl(cs, KVM_GET_SUPPORTED_HV_CPUID, cpuid);
1042     }
1043     if (r == 0 && cpuid->nent >= max) {
1044         r = -E2BIG;
1045     }
1046     if (r < 0) {
1047         if (r == -E2BIG) {
1048             g_free(cpuid);
1049             return NULL;
1050         } else {
1051             fprintf(stderr, "KVM_GET_SUPPORTED_HV_CPUID failed: %s\n",
1052                     strerror(-r));
1053             exit(1);
1054         }
1055     }
1056     return cpuid;
1057 }
1058 
1059 /*
1060  * Run KVM_GET_SUPPORTED_HV_CPUID ioctl(), allocating a buffer large enough
1061  * for all entries.
1062  */
1063 static struct kvm_cpuid2 *get_supported_hv_cpuid(CPUState *cs)
1064 {
1065     struct kvm_cpuid2 *cpuid;
1066     /* 0x40000000..0x40000005, 0x4000000A, 0x40000080..0x40000082 leaves */
1067     int max = 11;
1068     int i;
1069     bool do_sys_ioctl;
1070 
1071     do_sys_ioctl =
1072         kvm_check_extension(kvm_state, KVM_CAP_SYS_HYPERV_CPUID) > 0;
1073 
1074     /*
1075      * Non-empty KVM context is needed when KVM_CAP_SYS_HYPERV_CPUID is
1076      * unsupported, kvm_hyperv_expand_features() checks for that.
1077      */
1078     assert(do_sys_ioctl || cs->kvm_state);
1079 
1080     /*
1081      * When the buffer is too small, KVM_GET_SUPPORTED_HV_CPUID fails with
1082      * -E2BIG, however, it doesn't report back the right size. Keep increasing
1083      * it and re-trying until we succeed.
1084      */
1085     while ((cpuid = try_get_hv_cpuid(cs, max, do_sys_ioctl)) == NULL) {
1086         max++;
1087     }
1088 
1089     /*
1090      * KVM_GET_SUPPORTED_HV_CPUID does not set EVMCS CPUID bit before
1091      * KVM_CAP_HYPERV_ENLIGHTENED_VMCS is enabled but we want to get the
1092      * information early, just check for the capability and set the bit
1093      * manually.
1094      */
1095     if (!do_sys_ioctl && kvm_check_extension(cs->kvm_state,
1096                             KVM_CAP_HYPERV_ENLIGHTENED_VMCS) > 0) {
1097         for (i = 0; i < cpuid->nent; i++) {
1098             if (cpuid->entries[i].function == HV_CPUID_ENLIGHTMENT_INFO) {
1099                 cpuid->entries[i].eax |= HV_ENLIGHTENED_VMCS_RECOMMENDED;
1100             }
1101         }
1102     }
1103 
1104     return cpuid;
1105 }
1106 
1107 /*
1108  * When KVM_GET_SUPPORTED_HV_CPUID is not supported we fill CPUID feature
1109  * leaves from KVM_CAP_HYPERV* and present MSRs data.
1110  */
1111 static struct kvm_cpuid2 *get_supported_hv_cpuid_legacy(CPUState *cs)
1112 {
1113     X86CPU *cpu = X86_CPU(cs);
1114     struct kvm_cpuid2 *cpuid;
1115     struct kvm_cpuid_entry2 *entry_feat, *entry_recomm;
1116 
1117     /* HV_CPUID_FEATURES, HV_CPUID_ENLIGHTMENT_INFO */
1118     cpuid = g_malloc0(sizeof(*cpuid) + 2 * sizeof(*cpuid->entries));
1119     cpuid->nent = 2;
1120 
1121     /* HV_CPUID_VENDOR_AND_MAX_FUNCTIONS */
1122     entry_feat = &cpuid->entries[0];
1123     entry_feat->function = HV_CPUID_FEATURES;
1124 
1125     entry_recomm = &cpuid->entries[1];
1126     entry_recomm->function = HV_CPUID_ENLIGHTMENT_INFO;
1127     entry_recomm->ebx = cpu->hyperv_spinlock_attempts;
1128 
1129     if (kvm_check_extension(cs->kvm_state, KVM_CAP_HYPERV) > 0) {
1130         entry_feat->eax |= HV_HYPERCALL_AVAILABLE;
1131         entry_feat->eax |= HV_APIC_ACCESS_AVAILABLE;
1132         entry_feat->edx |= HV_CPU_DYNAMIC_PARTITIONING_AVAILABLE;
1133         entry_recomm->eax |= HV_RELAXED_TIMING_RECOMMENDED;
1134         entry_recomm->eax |= HV_APIC_ACCESS_RECOMMENDED;
1135     }
1136 
1137     if (kvm_check_extension(cs->kvm_state, KVM_CAP_HYPERV_TIME) > 0) {
1138         entry_feat->eax |= HV_TIME_REF_COUNT_AVAILABLE;
1139         entry_feat->eax |= HV_REFERENCE_TSC_AVAILABLE;
1140     }
1141 
1142     if (has_msr_hv_frequencies) {
1143         entry_feat->eax |= HV_ACCESS_FREQUENCY_MSRS;
1144         entry_feat->edx |= HV_FREQUENCY_MSRS_AVAILABLE;
1145     }
1146 
1147     if (has_msr_hv_crash) {
1148         entry_feat->edx |= HV_GUEST_CRASH_MSR_AVAILABLE;
1149     }
1150 
1151     if (has_msr_hv_reenlightenment) {
1152         entry_feat->eax |= HV_ACCESS_REENLIGHTENMENTS_CONTROL;
1153     }
1154 
1155     if (has_msr_hv_reset) {
1156         entry_feat->eax |= HV_RESET_AVAILABLE;
1157     }
1158 
1159     if (has_msr_hv_vpindex) {
1160         entry_feat->eax |= HV_VP_INDEX_AVAILABLE;
1161     }
1162 
1163     if (has_msr_hv_runtime) {
1164         entry_feat->eax |= HV_VP_RUNTIME_AVAILABLE;
1165     }
1166 
1167     if (has_msr_hv_synic) {
1168         unsigned int cap = cpu->hyperv_synic_kvm_only ?
1169             KVM_CAP_HYPERV_SYNIC : KVM_CAP_HYPERV_SYNIC2;
1170 
1171         if (kvm_check_extension(cs->kvm_state, cap) > 0) {
1172             entry_feat->eax |= HV_SYNIC_AVAILABLE;
1173         }
1174     }
1175 
1176     if (has_msr_hv_stimer) {
1177         entry_feat->eax |= HV_SYNTIMERS_AVAILABLE;
1178     }
1179 
1180     if (has_msr_hv_syndbg_options) {
1181         entry_feat->edx |= HV_GUEST_DEBUGGING_AVAILABLE;
1182         entry_feat->edx |= HV_FEATURE_DEBUG_MSRS_AVAILABLE;
1183         entry_feat->ebx |= HV_PARTITION_DEBUGGING_ALLOWED;
1184     }
1185 
1186     if (kvm_check_extension(cs->kvm_state,
1187                             KVM_CAP_HYPERV_TLBFLUSH) > 0) {
1188         entry_recomm->eax |= HV_REMOTE_TLB_FLUSH_RECOMMENDED;
1189         entry_recomm->eax |= HV_EX_PROCESSOR_MASKS_RECOMMENDED;
1190     }
1191 
1192     if (kvm_check_extension(cs->kvm_state,
1193                             KVM_CAP_HYPERV_ENLIGHTENED_VMCS) > 0) {
1194         entry_recomm->eax |= HV_ENLIGHTENED_VMCS_RECOMMENDED;
1195     }
1196 
1197     if (kvm_check_extension(cs->kvm_state,
1198                             KVM_CAP_HYPERV_SEND_IPI) > 0) {
1199         entry_recomm->eax |= HV_CLUSTER_IPI_RECOMMENDED;
1200         entry_recomm->eax |= HV_EX_PROCESSOR_MASKS_RECOMMENDED;
1201     }
1202 
1203     return cpuid;
1204 }
1205 
1206 static uint32_t hv_cpuid_get_host(CPUState *cs, uint32_t func, int reg)
1207 {
1208     struct kvm_cpuid_entry2 *entry;
1209     struct kvm_cpuid2 *cpuid;
1210 
1211     if (hv_cpuid_cache) {
1212         cpuid = hv_cpuid_cache;
1213     } else {
1214         if (kvm_check_extension(kvm_state, KVM_CAP_HYPERV_CPUID) > 0) {
1215             cpuid = get_supported_hv_cpuid(cs);
1216         } else {
1217             /*
1218              * 'cs->kvm_state' may be NULL when Hyper-V features are expanded
1219              * before KVM context is created but this is only done when
1220              * KVM_CAP_SYS_HYPERV_CPUID is supported and it implies
1221              * KVM_CAP_HYPERV_CPUID.
1222              */
1223             assert(cs->kvm_state);
1224 
1225             cpuid = get_supported_hv_cpuid_legacy(cs);
1226         }
1227         hv_cpuid_cache = cpuid;
1228     }
1229 
1230     if (!cpuid) {
1231         return 0;
1232     }
1233 
1234     entry = cpuid_find_entry(cpuid, func, 0);
1235     if (!entry) {
1236         return 0;
1237     }
1238 
1239     return cpuid_entry_get_reg(entry, reg);
1240 }
1241 
1242 static bool hyperv_feature_supported(CPUState *cs, int feature)
1243 {
1244     uint32_t func, bits;
1245     int i, reg;
1246 
1247     for (i = 0; i < ARRAY_SIZE(kvm_hyperv_properties[feature].flags); i++) {
1248 
1249         func = kvm_hyperv_properties[feature].flags[i].func;
1250         reg = kvm_hyperv_properties[feature].flags[i].reg;
1251         bits = kvm_hyperv_properties[feature].flags[i].bits;
1252 
1253         if (!func) {
1254             continue;
1255         }
1256 
1257         if ((hv_cpuid_get_host(cs, func, reg) & bits) != bits) {
1258             return false;
1259         }
1260     }
1261 
1262     return true;
1263 }
1264 
1265 /* Checks that all feature dependencies are enabled */
1266 static bool hv_feature_check_deps(X86CPU *cpu, int feature, Error **errp)
1267 {
1268     uint64_t deps;
1269     int dep_feat;
1270 
1271     deps = kvm_hyperv_properties[feature].dependencies;
1272     while (deps) {
1273         dep_feat = ctz64(deps);
1274         if (!(hyperv_feat_enabled(cpu, dep_feat))) {
1275             error_setg(errp, "Hyper-V %s requires Hyper-V %s",
1276                        kvm_hyperv_properties[feature].desc,
1277                        kvm_hyperv_properties[dep_feat].desc);
1278             return false;
1279         }
1280         deps &= ~(1ull << dep_feat);
1281     }
1282 
1283     return true;
1284 }
1285 
1286 static uint32_t hv_build_cpuid_leaf(CPUState *cs, uint32_t func, int reg)
1287 {
1288     X86CPU *cpu = X86_CPU(cs);
1289     uint32_t r = 0;
1290     int i, j;
1291 
1292     for (i = 0; i < ARRAY_SIZE(kvm_hyperv_properties); i++) {
1293         if (!hyperv_feat_enabled(cpu, i)) {
1294             continue;
1295         }
1296 
1297         for (j = 0; j < ARRAY_SIZE(kvm_hyperv_properties[i].flags); j++) {
1298             if (kvm_hyperv_properties[i].flags[j].func != func) {
1299                 continue;
1300             }
1301             if (kvm_hyperv_properties[i].flags[j].reg != reg) {
1302                 continue;
1303             }
1304 
1305             r |= kvm_hyperv_properties[i].flags[j].bits;
1306         }
1307     }
1308 
1309     /* HV_CPUID_NESTED_FEATURES.EAX also encodes the supported eVMCS range */
1310     if (func == HV_CPUID_NESTED_FEATURES && reg == R_EAX) {
1311         if (hyperv_feat_enabled(cpu, HYPERV_FEAT_EVMCS)) {
1312             r |= DEFAULT_EVMCS_VERSION;
1313         }
1314     }
1315 
1316     return r;
1317 }
1318 
1319 /*
1320  * Expand Hyper-V CPU features. In partucular, check that all the requested
1321  * features are supported by the host and the sanity of the configuration
1322  * (that all the required dependencies are included). Also, this takes care
1323  * of 'hv_passthrough' mode and fills the environment with all supported
1324  * Hyper-V features.
1325  */
1326 bool kvm_hyperv_expand_features(X86CPU *cpu, Error **errp)
1327 {
1328     CPUState *cs = CPU(cpu);
1329     Error *local_err = NULL;
1330     int feat;
1331 
1332     if (!hyperv_enabled(cpu))
1333         return true;
1334 
1335     /*
1336      * When kvm_hyperv_expand_features is called at CPU feature expansion
1337      * time per-CPU kvm_state is not available yet so we can only proceed
1338      * when KVM_CAP_SYS_HYPERV_CPUID is supported.
1339      */
1340     if (!cs->kvm_state &&
1341         !kvm_check_extension(kvm_state, KVM_CAP_SYS_HYPERV_CPUID))
1342         return true;
1343 
1344     if (cpu->hyperv_passthrough) {
1345         cpu->hyperv_vendor_id[0] =
1346             hv_cpuid_get_host(cs, HV_CPUID_VENDOR_AND_MAX_FUNCTIONS, R_EBX);
1347         cpu->hyperv_vendor_id[1] =
1348             hv_cpuid_get_host(cs, HV_CPUID_VENDOR_AND_MAX_FUNCTIONS, R_ECX);
1349         cpu->hyperv_vendor_id[2] =
1350             hv_cpuid_get_host(cs, HV_CPUID_VENDOR_AND_MAX_FUNCTIONS, R_EDX);
1351         cpu->hyperv_vendor = g_realloc(cpu->hyperv_vendor,
1352                                        sizeof(cpu->hyperv_vendor_id) + 1);
1353         memcpy(cpu->hyperv_vendor, cpu->hyperv_vendor_id,
1354                sizeof(cpu->hyperv_vendor_id));
1355         cpu->hyperv_vendor[sizeof(cpu->hyperv_vendor_id)] = 0;
1356 
1357         cpu->hyperv_interface_id[0] =
1358             hv_cpuid_get_host(cs, HV_CPUID_INTERFACE, R_EAX);
1359         cpu->hyperv_interface_id[1] =
1360             hv_cpuid_get_host(cs, HV_CPUID_INTERFACE, R_EBX);
1361         cpu->hyperv_interface_id[2] =
1362             hv_cpuid_get_host(cs, HV_CPUID_INTERFACE, R_ECX);
1363         cpu->hyperv_interface_id[3] =
1364             hv_cpuid_get_host(cs, HV_CPUID_INTERFACE, R_EDX);
1365 
1366         cpu->hyperv_ver_id_build =
1367             hv_cpuid_get_host(cs, HV_CPUID_VERSION, R_EAX);
1368         cpu->hyperv_ver_id_major =
1369             hv_cpuid_get_host(cs, HV_CPUID_VERSION, R_EBX) >> 16;
1370         cpu->hyperv_ver_id_minor =
1371             hv_cpuid_get_host(cs, HV_CPUID_VERSION, R_EBX) & 0xffff;
1372         cpu->hyperv_ver_id_sp =
1373             hv_cpuid_get_host(cs, HV_CPUID_VERSION, R_ECX);
1374         cpu->hyperv_ver_id_sb =
1375             hv_cpuid_get_host(cs, HV_CPUID_VERSION, R_EDX) >> 24;
1376         cpu->hyperv_ver_id_sn =
1377             hv_cpuid_get_host(cs, HV_CPUID_VERSION, R_EDX) & 0xffffff;
1378 
1379         cpu->hv_max_vps = hv_cpuid_get_host(cs, HV_CPUID_IMPLEMENT_LIMITS,
1380                                             R_EAX);
1381         cpu->hyperv_limits[0] =
1382             hv_cpuid_get_host(cs, HV_CPUID_IMPLEMENT_LIMITS, R_EBX);
1383         cpu->hyperv_limits[1] =
1384             hv_cpuid_get_host(cs, HV_CPUID_IMPLEMENT_LIMITS, R_ECX);
1385         cpu->hyperv_limits[2] =
1386             hv_cpuid_get_host(cs, HV_CPUID_IMPLEMENT_LIMITS, R_EDX);
1387 
1388         cpu->hyperv_spinlock_attempts =
1389             hv_cpuid_get_host(cs, HV_CPUID_ENLIGHTMENT_INFO, R_EBX);
1390 
1391         /*
1392          * Mark feature as enabled in 'cpu->hyperv_features' as
1393          * hv_build_cpuid_leaf() uses this info to build guest CPUIDs.
1394          */
1395         for (feat = 0; feat < ARRAY_SIZE(kvm_hyperv_properties); feat++) {
1396             if (hyperv_feature_supported(cs, feat)) {
1397                 cpu->hyperv_features |= BIT(feat);
1398             }
1399         }
1400     } else {
1401         /* Check features availability and dependencies */
1402         for (feat = 0; feat < ARRAY_SIZE(kvm_hyperv_properties); feat++) {
1403             /* If the feature was not requested skip it. */
1404             if (!hyperv_feat_enabled(cpu, feat)) {
1405                 continue;
1406             }
1407 
1408             /* Check if the feature is supported by KVM */
1409             if (!hyperv_feature_supported(cs, feat)) {
1410                 error_setg(errp, "Hyper-V %s is not supported by kernel",
1411                            kvm_hyperv_properties[feat].desc);
1412                 return false;
1413             }
1414 
1415             /* Check dependencies */
1416             if (!hv_feature_check_deps(cpu, feat, &local_err)) {
1417                 error_propagate(errp, local_err);
1418                 return false;
1419             }
1420         }
1421     }
1422 
1423     /* Additional dependencies not covered by kvm_hyperv_properties[] */
1424     if (hyperv_feat_enabled(cpu, HYPERV_FEAT_SYNIC) &&
1425         !cpu->hyperv_synic_kvm_only &&
1426         !hyperv_feat_enabled(cpu, HYPERV_FEAT_VPINDEX)) {
1427         error_setg(errp, "Hyper-V %s requires Hyper-V %s",
1428                    kvm_hyperv_properties[HYPERV_FEAT_SYNIC].desc,
1429                    kvm_hyperv_properties[HYPERV_FEAT_VPINDEX].desc);
1430         return false;
1431     }
1432 
1433     return true;
1434 }
1435 
1436 /*
1437  * Fill in Hyper-V CPUIDs. Returns the number of entries filled in cpuid_ent.
1438  */
1439 static int hyperv_fill_cpuids(CPUState *cs,
1440                               struct kvm_cpuid_entry2 *cpuid_ent)
1441 {
1442     X86CPU *cpu = X86_CPU(cs);
1443     struct kvm_cpuid_entry2 *c;
1444     uint32_t signature[3];
1445     uint32_t cpuid_i = 0, max_cpuid_leaf = 0;
1446     uint32_t nested_eax =
1447         hv_build_cpuid_leaf(cs, HV_CPUID_NESTED_FEATURES, R_EAX);
1448 
1449     max_cpuid_leaf = nested_eax ? HV_CPUID_NESTED_FEATURES :
1450         HV_CPUID_IMPLEMENT_LIMITS;
1451 
1452     if (hyperv_feat_enabled(cpu, HYPERV_FEAT_SYNDBG)) {
1453         max_cpuid_leaf =
1454             MAX(max_cpuid_leaf, HV_CPUID_SYNDBG_PLATFORM_CAPABILITIES);
1455     }
1456 
1457     c = &cpuid_ent[cpuid_i++];
1458     c->function = HV_CPUID_VENDOR_AND_MAX_FUNCTIONS;
1459     c->eax = max_cpuid_leaf;
1460     c->ebx = cpu->hyperv_vendor_id[0];
1461     c->ecx = cpu->hyperv_vendor_id[1];
1462     c->edx = cpu->hyperv_vendor_id[2];
1463 
1464     c = &cpuid_ent[cpuid_i++];
1465     c->function = HV_CPUID_INTERFACE;
1466     c->eax = cpu->hyperv_interface_id[0];
1467     c->ebx = cpu->hyperv_interface_id[1];
1468     c->ecx = cpu->hyperv_interface_id[2];
1469     c->edx = cpu->hyperv_interface_id[3];
1470 
1471     c = &cpuid_ent[cpuid_i++];
1472     c->function = HV_CPUID_VERSION;
1473     c->eax = cpu->hyperv_ver_id_build;
1474     c->ebx = (uint32_t)cpu->hyperv_ver_id_major << 16 |
1475         cpu->hyperv_ver_id_minor;
1476     c->ecx = cpu->hyperv_ver_id_sp;
1477     c->edx = (uint32_t)cpu->hyperv_ver_id_sb << 24 |
1478         (cpu->hyperv_ver_id_sn & 0xffffff);
1479 
1480     c = &cpuid_ent[cpuid_i++];
1481     c->function = HV_CPUID_FEATURES;
1482     c->eax = hv_build_cpuid_leaf(cs, HV_CPUID_FEATURES, R_EAX);
1483     c->ebx = hv_build_cpuid_leaf(cs, HV_CPUID_FEATURES, R_EBX);
1484     c->edx = hv_build_cpuid_leaf(cs, HV_CPUID_FEATURES, R_EDX);
1485 
1486     /* Unconditionally required with any Hyper-V enlightenment */
1487     c->eax |= HV_HYPERCALL_AVAILABLE;
1488 
1489     /* SynIC and Vmbus devices require messages/signals hypercalls */
1490     if (hyperv_feat_enabled(cpu, HYPERV_FEAT_SYNIC) &&
1491         !cpu->hyperv_synic_kvm_only) {
1492         c->ebx |= HV_POST_MESSAGES | HV_SIGNAL_EVENTS;
1493     }
1494 
1495 
1496     /* Not exposed by KVM but needed to make CPU hotplug in Windows work */
1497     c->edx |= HV_CPU_DYNAMIC_PARTITIONING_AVAILABLE;
1498 
1499     c = &cpuid_ent[cpuid_i++];
1500     c->function = HV_CPUID_ENLIGHTMENT_INFO;
1501     c->eax = hv_build_cpuid_leaf(cs, HV_CPUID_ENLIGHTMENT_INFO, R_EAX);
1502     c->ebx = cpu->hyperv_spinlock_attempts;
1503 
1504     if (hyperv_feat_enabled(cpu, HYPERV_FEAT_VAPIC) &&
1505         !hyperv_feat_enabled(cpu, HYPERV_FEAT_AVIC)) {
1506         c->eax |= HV_APIC_ACCESS_RECOMMENDED;
1507     }
1508 
1509     if (cpu->hyperv_no_nonarch_cs == ON_OFF_AUTO_ON) {
1510         c->eax |= HV_NO_NONARCH_CORESHARING;
1511     } else if (cpu->hyperv_no_nonarch_cs == ON_OFF_AUTO_AUTO) {
1512         c->eax |= hv_cpuid_get_host(cs, HV_CPUID_ENLIGHTMENT_INFO, R_EAX) &
1513             HV_NO_NONARCH_CORESHARING;
1514     }
1515 
1516     c = &cpuid_ent[cpuid_i++];
1517     c->function = HV_CPUID_IMPLEMENT_LIMITS;
1518     c->eax = cpu->hv_max_vps;
1519     c->ebx = cpu->hyperv_limits[0];
1520     c->ecx = cpu->hyperv_limits[1];
1521     c->edx = cpu->hyperv_limits[2];
1522 
1523     if (nested_eax) {
1524         uint32_t function;
1525 
1526         /* Create zeroed 0x40000006..0x40000009 leaves */
1527         for (function = HV_CPUID_IMPLEMENT_LIMITS + 1;
1528              function < HV_CPUID_NESTED_FEATURES; function++) {
1529             c = &cpuid_ent[cpuid_i++];
1530             c->function = function;
1531         }
1532 
1533         c = &cpuid_ent[cpuid_i++];
1534         c->function = HV_CPUID_NESTED_FEATURES;
1535         c->eax = nested_eax;
1536     }
1537 
1538     if (hyperv_feat_enabled(cpu, HYPERV_FEAT_SYNDBG)) {
1539         c = &cpuid_ent[cpuid_i++];
1540         c->function = HV_CPUID_SYNDBG_VENDOR_AND_MAX_FUNCTIONS;
1541         c->eax = hyperv_feat_enabled(cpu, HYPERV_FEAT_EVMCS) ?
1542             HV_CPUID_NESTED_FEATURES : HV_CPUID_IMPLEMENT_LIMITS;
1543         memcpy(signature, "Microsoft VS", 12);
1544         c->eax = 0;
1545         c->ebx = signature[0];
1546         c->ecx = signature[1];
1547         c->edx = signature[2];
1548 
1549         c = &cpuid_ent[cpuid_i++];
1550         c->function = HV_CPUID_SYNDBG_INTERFACE;
1551         memcpy(signature, "VS#1\0\0\0\0\0\0\0\0", 12);
1552         c->eax = signature[0];
1553         c->ebx = 0;
1554         c->ecx = 0;
1555         c->edx = 0;
1556 
1557         c = &cpuid_ent[cpuid_i++];
1558         c->function = HV_CPUID_SYNDBG_PLATFORM_CAPABILITIES;
1559         c->eax = HV_SYNDBG_CAP_ALLOW_KERNEL_DEBUGGING;
1560         c->ebx = 0;
1561         c->ecx = 0;
1562         c->edx = 0;
1563     }
1564 
1565     return cpuid_i;
1566 }
1567 
1568 static Error *hv_passthrough_mig_blocker;
1569 static Error *hv_no_nonarch_cs_mig_blocker;
1570 
1571 /* Checks that the exposed eVMCS version range is supported by KVM */
1572 static bool evmcs_version_supported(uint16_t evmcs_version,
1573                                     uint16_t supported_evmcs_version)
1574 {
1575     uint8_t min_version = evmcs_version & 0xff;
1576     uint8_t max_version = evmcs_version >> 8;
1577     uint8_t min_supported_version = supported_evmcs_version & 0xff;
1578     uint8_t max_supported_version = supported_evmcs_version >> 8;
1579 
1580     return (min_version >= min_supported_version) &&
1581         (max_version <= max_supported_version);
1582 }
1583 
1584 static int hyperv_init_vcpu(X86CPU *cpu)
1585 {
1586     CPUState *cs = CPU(cpu);
1587     Error *local_err = NULL;
1588     int ret;
1589 
1590     if (cpu->hyperv_passthrough && hv_passthrough_mig_blocker == NULL) {
1591         error_setg(&hv_passthrough_mig_blocker,
1592                    "'hv-passthrough' CPU flag prevents migration, use explicit"
1593                    " set of hv-* flags instead");
1594         ret = migrate_add_blocker(hv_passthrough_mig_blocker, &local_err);
1595         if (ret < 0) {
1596             error_report_err(local_err);
1597             return ret;
1598         }
1599     }
1600 
1601     if (cpu->hyperv_no_nonarch_cs == ON_OFF_AUTO_AUTO &&
1602         hv_no_nonarch_cs_mig_blocker == NULL) {
1603         error_setg(&hv_no_nonarch_cs_mig_blocker,
1604                    "'hv-no-nonarch-coresharing=auto' CPU flag prevents migration"
1605                    " use explicit 'hv-no-nonarch-coresharing=on' instead (but"
1606                    " make sure SMT is disabled and/or that vCPUs are properly"
1607                    " pinned)");
1608         ret = migrate_add_blocker(hv_no_nonarch_cs_mig_blocker, &local_err);
1609         if (ret < 0) {
1610             error_report_err(local_err);
1611             return ret;
1612         }
1613     }
1614 
1615     if (hyperv_feat_enabled(cpu, HYPERV_FEAT_VPINDEX) && !hv_vpindex_settable) {
1616         /*
1617          * the kernel doesn't support setting vp_index; assert that its value
1618          * is in sync
1619          */
1620         uint64_t value;
1621 
1622         ret = kvm_get_one_msr(cpu, HV_X64_MSR_VP_INDEX, &value);
1623         if (ret < 0) {
1624             return ret;
1625         }
1626 
1627         if (value != hyperv_vp_index(CPU(cpu))) {
1628             error_report("kernel's vp_index != QEMU's vp_index");
1629             return -ENXIO;
1630         }
1631     }
1632 
1633     if (hyperv_feat_enabled(cpu, HYPERV_FEAT_SYNIC)) {
1634         uint32_t synic_cap = cpu->hyperv_synic_kvm_only ?
1635             KVM_CAP_HYPERV_SYNIC : KVM_CAP_HYPERV_SYNIC2;
1636         ret = kvm_vcpu_enable_cap(cs, synic_cap, 0);
1637         if (ret < 0) {
1638             error_report("failed to turn on HyperV SynIC in KVM: %s",
1639                          strerror(-ret));
1640             return ret;
1641         }
1642 
1643         if (!cpu->hyperv_synic_kvm_only) {
1644             ret = hyperv_x86_synic_add(cpu);
1645             if (ret < 0) {
1646                 error_report("failed to create HyperV SynIC: %s",
1647                              strerror(-ret));
1648                 return ret;
1649             }
1650         }
1651     }
1652 
1653     if (hyperv_feat_enabled(cpu, HYPERV_FEAT_EVMCS)) {
1654         uint16_t evmcs_version = DEFAULT_EVMCS_VERSION;
1655         uint16_t supported_evmcs_version;
1656 
1657         ret = kvm_vcpu_enable_cap(cs, KVM_CAP_HYPERV_ENLIGHTENED_VMCS, 0,
1658                                   (uintptr_t)&supported_evmcs_version);
1659 
1660         /*
1661          * KVM is required to support EVMCS ver.1. as that's what 'hv-evmcs'
1662          * option sets. Note: we hardcode the maximum supported eVMCS version
1663          * to '1' as well so 'hv-evmcs' feature is migratable even when (and if)
1664          * ver.2 is implemented. A new option (e.g. 'hv-evmcs=2') will then have
1665          * to be added.
1666          */
1667         if (ret < 0) {
1668             error_report("Hyper-V %s is not supported by kernel",
1669                          kvm_hyperv_properties[HYPERV_FEAT_EVMCS].desc);
1670             return ret;
1671         }
1672 
1673         if (!evmcs_version_supported(evmcs_version, supported_evmcs_version)) {
1674             error_report("eVMCS version range [%d..%d] is not supported by "
1675                          "kernel (supported: [%d..%d])", evmcs_version & 0xff,
1676                          evmcs_version >> 8, supported_evmcs_version & 0xff,
1677                          supported_evmcs_version >> 8);
1678             return -ENOTSUP;
1679         }
1680     }
1681 
1682     if (cpu->hyperv_enforce_cpuid) {
1683         ret = kvm_vcpu_enable_cap(cs, KVM_CAP_HYPERV_ENFORCE_CPUID, 0, 1);
1684         if (ret < 0) {
1685             error_report("failed to enable KVM_CAP_HYPERV_ENFORCE_CPUID: %s",
1686                          strerror(-ret));
1687             return ret;
1688         }
1689     }
1690 
1691     return 0;
1692 }
1693 
1694 static Error *invtsc_mig_blocker;
1695 
1696 #define KVM_MAX_CPUID_ENTRIES  100
1697 
1698 static void kvm_init_xsave(CPUX86State *env)
1699 {
1700     if (has_xsave2) {
1701         env->xsave_buf_len = QEMU_ALIGN_UP(has_xsave2, 4096);
1702     } else if (has_xsave) {
1703         env->xsave_buf_len = sizeof(struct kvm_xsave);
1704     } else {
1705         return;
1706     }
1707 
1708     env->xsave_buf = qemu_memalign(4096, env->xsave_buf_len);
1709     memset(env->xsave_buf, 0, env->xsave_buf_len);
1710     /*
1711      * The allocated storage must be large enough for all of the
1712      * possible XSAVE state components.
1713      */
1714     assert(kvm_arch_get_supported_cpuid(kvm_state, 0xd, 0, R_ECX) <=
1715            env->xsave_buf_len);
1716 }
1717 
1718 static void kvm_init_nested_state(CPUX86State *env)
1719 {
1720     struct kvm_vmx_nested_state_hdr *vmx_hdr;
1721     uint32_t size;
1722 
1723     if (!env->nested_state) {
1724         return;
1725     }
1726 
1727     size = env->nested_state->size;
1728 
1729     memset(env->nested_state, 0, size);
1730     env->nested_state->size = size;
1731 
1732     if (cpu_has_vmx(env)) {
1733         env->nested_state->format = KVM_STATE_NESTED_FORMAT_VMX;
1734         vmx_hdr = &env->nested_state->hdr.vmx;
1735         vmx_hdr->vmxon_pa = -1ull;
1736         vmx_hdr->vmcs12_pa = -1ull;
1737     } else if (cpu_has_svm(env)) {
1738         env->nested_state->format = KVM_STATE_NESTED_FORMAT_SVM;
1739     }
1740 }
1741 
1742 int kvm_arch_init_vcpu(CPUState *cs)
1743 {
1744     struct {
1745         struct kvm_cpuid2 cpuid;
1746         struct kvm_cpuid_entry2 entries[KVM_MAX_CPUID_ENTRIES];
1747     } cpuid_data;
1748     /*
1749      * The kernel defines these structs with padding fields so there
1750      * should be no extra padding in our cpuid_data struct.
1751      */
1752     QEMU_BUILD_BUG_ON(sizeof(cpuid_data) !=
1753                       sizeof(struct kvm_cpuid2) +
1754                       sizeof(struct kvm_cpuid_entry2) * KVM_MAX_CPUID_ENTRIES);
1755 
1756     X86CPU *cpu = X86_CPU(cs);
1757     CPUX86State *env = &cpu->env;
1758     uint32_t limit, i, j, cpuid_i;
1759     uint32_t unused;
1760     struct kvm_cpuid_entry2 *c;
1761     uint32_t signature[3];
1762     int kvm_base = KVM_CPUID_SIGNATURE;
1763     int max_nested_state_len;
1764     int r;
1765     Error *local_err = NULL;
1766 
1767     memset(&cpuid_data, 0, sizeof(cpuid_data));
1768 
1769     cpuid_i = 0;
1770 
1771     has_xsave2 = kvm_check_extension(cs->kvm_state, KVM_CAP_XSAVE2);
1772 
1773     r = kvm_arch_set_tsc_khz(cs);
1774     if (r < 0) {
1775         return r;
1776     }
1777 
1778     /* vcpu's TSC frequency is either specified by user, or following
1779      * the value used by KVM if the former is not present. In the
1780      * latter case, we query it from KVM and record in env->tsc_khz,
1781      * so that vcpu's TSC frequency can be migrated later via this field.
1782      */
1783     if (!env->tsc_khz) {
1784         r = kvm_check_extension(cs->kvm_state, KVM_CAP_GET_TSC_KHZ) ?
1785             kvm_vcpu_ioctl(cs, KVM_GET_TSC_KHZ) :
1786             -ENOTSUP;
1787         if (r > 0) {
1788             env->tsc_khz = r;
1789         }
1790     }
1791 
1792     env->apic_bus_freq = KVM_APIC_BUS_FREQUENCY;
1793 
1794     /*
1795      * kvm_hyperv_expand_features() is called here for the second time in case
1796      * KVM_CAP_SYS_HYPERV_CPUID is not supported. While we can't possibly handle
1797      * 'query-cpu-model-expansion' in this case as we don't have a KVM vCPU to
1798      * check which Hyper-V enlightenments are supported and which are not, we
1799      * can still proceed and check/expand Hyper-V enlightenments here so legacy
1800      * behavior is preserved.
1801      */
1802     if (!kvm_hyperv_expand_features(cpu, &local_err)) {
1803         error_report_err(local_err);
1804         return -ENOSYS;
1805     }
1806 
1807     if (hyperv_enabled(cpu)) {
1808         r = hyperv_init_vcpu(cpu);
1809         if (r) {
1810             return r;
1811         }
1812 
1813         cpuid_i = hyperv_fill_cpuids(cs, cpuid_data.entries);
1814         kvm_base = KVM_CPUID_SIGNATURE_NEXT;
1815         has_msr_hv_hypercall = true;
1816     }
1817 
1818     if (cpu->expose_kvm) {
1819         memcpy(signature, "KVMKVMKVM\0\0\0", 12);
1820         c = &cpuid_data.entries[cpuid_i++];
1821         c->function = KVM_CPUID_SIGNATURE | kvm_base;
1822         c->eax = KVM_CPUID_FEATURES | kvm_base;
1823         c->ebx = signature[0];
1824         c->ecx = signature[1];
1825         c->edx = signature[2];
1826 
1827         c = &cpuid_data.entries[cpuid_i++];
1828         c->function = KVM_CPUID_FEATURES | kvm_base;
1829         c->eax = env->features[FEAT_KVM];
1830         c->edx = env->features[FEAT_KVM_HINTS];
1831     }
1832 
1833     cpu_x86_cpuid(env, 0, 0, &limit, &unused, &unused, &unused);
1834 
1835     if (cpu->kvm_pv_enforce_cpuid) {
1836         r = kvm_vcpu_enable_cap(cs, KVM_CAP_ENFORCE_PV_FEATURE_CPUID, 0, 1);
1837         if (r < 0) {
1838             fprintf(stderr,
1839                     "failed to enable KVM_CAP_ENFORCE_PV_FEATURE_CPUID: %s",
1840                     strerror(-r));
1841             abort();
1842         }
1843     }
1844 
1845     for (i = 0; i <= limit; i++) {
1846         if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
1847             fprintf(stderr, "unsupported level value: 0x%x\n", limit);
1848             abort();
1849         }
1850         c = &cpuid_data.entries[cpuid_i++];
1851 
1852         switch (i) {
1853         case 2: {
1854             /* Keep reading function 2 till all the input is received */
1855             int times;
1856 
1857             c->function = i;
1858             c->flags = KVM_CPUID_FLAG_STATEFUL_FUNC |
1859                        KVM_CPUID_FLAG_STATE_READ_NEXT;
1860             cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
1861             times = c->eax & 0xff;
1862 
1863             for (j = 1; j < times; ++j) {
1864                 if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
1865                     fprintf(stderr, "cpuid_data is full, no space for "
1866                             "cpuid(eax:2):eax & 0xf = 0x%x\n", times);
1867                     abort();
1868                 }
1869                 c = &cpuid_data.entries[cpuid_i++];
1870                 c->function = i;
1871                 c->flags = KVM_CPUID_FLAG_STATEFUL_FUNC;
1872                 cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
1873             }
1874             break;
1875         }
1876         case 0x1f:
1877             if (env->nr_dies < 2) {
1878                 break;
1879             }
1880             /* fallthrough */
1881         case 4:
1882         case 0xb:
1883         case 0xd:
1884             for (j = 0; ; j++) {
1885                 if (i == 0xd && j == 64) {
1886                     break;
1887                 }
1888 
1889                 if (i == 0x1f && j == 64) {
1890                     break;
1891                 }
1892 
1893                 c->function = i;
1894                 c->flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
1895                 c->index = j;
1896                 cpu_x86_cpuid(env, i, j, &c->eax, &c->ebx, &c->ecx, &c->edx);
1897 
1898                 if (i == 4 && c->eax == 0) {
1899                     break;
1900                 }
1901                 if (i == 0xb && !(c->ecx & 0xff00)) {
1902                     break;
1903                 }
1904                 if (i == 0x1f && !(c->ecx & 0xff00)) {
1905                     break;
1906                 }
1907                 if (i == 0xd && c->eax == 0) {
1908                     continue;
1909                 }
1910                 if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
1911                     fprintf(stderr, "cpuid_data is full, no space for "
1912                             "cpuid(eax:0x%x,ecx:0x%x)\n", i, j);
1913                     abort();
1914                 }
1915                 c = &cpuid_data.entries[cpuid_i++];
1916             }
1917             break;
1918         case 0x7:
1919         case 0x12:
1920             for (j = 0; ; j++) {
1921                 c->function = i;
1922                 c->flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
1923                 c->index = j;
1924                 cpu_x86_cpuid(env, i, j, &c->eax, &c->ebx, &c->ecx, &c->edx);
1925 
1926                 if (j > 1 && (c->eax & 0xf) != 1) {
1927                     break;
1928                 }
1929 
1930                 if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
1931                     fprintf(stderr, "cpuid_data is full, no space for "
1932                                 "cpuid(eax:0x12,ecx:0x%x)\n", j);
1933                     abort();
1934                 }
1935                 c = &cpuid_data.entries[cpuid_i++];
1936             }
1937             break;
1938         case 0x14:
1939         case 0x1d:
1940         case 0x1e: {
1941             uint32_t times;
1942 
1943             c->function = i;
1944             c->index = 0;
1945             c->flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
1946             cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
1947             times = c->eax;
1948 
1949             for (j = 1; j <= times; ++j) {
1950                 if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
1951                     fprintf(stderr, "cpuid_data is full, no space for "
1952                                 "cpuid(eax:0x%x,ecx:0x%x)\n", i, j);
1953                     abort();
1954                 }
1955                 c = &cpuid_data.entries[cpuid_i++];
1956                 c->function = i;
1957                 c->index = j;
1958                 c->flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
1959                 cpu_x86_cpuid(env, i, j, &c->eax, &c->ebx, &c->ecx, &c->edx);
1960             }
1961             break;
1962         }
1963         default:
1964             c->function = i;
1965             c->flags = 0;
1966             cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
1967             if (!c->eax && !c->ebx && !c->ecx && !c->edx) {
1968                 /*
1969                  * KVM already returns all zeroes if a CPUID entry is missing,
1970                  * so we can omit it and avoid hitting KVM's 80-entry limit.
1971                  */
1972                 cpuid_i--;
1973             }
1974             break;
1975         }
1976     }
1977 
1978     if (limit >= 0x0a) {
1979         uint32_t eax, edx;
1980 
1981         cpu_x86_cpuid(env, 0x0a, 0, &eax, &unused, &unused, &edx);
1982 
1983         has_architectural_pmu_version = eax & 0xff;
1984         if (has_architectural_pmu_version > 0) {
1985             num_architectural_pmu_gp_counters = (eax & 0xff00) >> 8;
1986 
1987             /* Shouldn't be more than 32, since that's the number of bits
1988              * available in EBX to tell us _which_ counters are available.
1989              * Play it safe.
1990              */
1991             if (num_architectural_pmu_gp_counters > MAX_GP_COUNTERS) {
1992                 num_architectural_pmu_gp_counters = MAX_GP_COUNTERS;
1993             }
1994 
1995             if (has_architectural_pmu_version > 1) {
1996                 num_architectural_pmu_fixed_counters = edx & 0x1f;
1997 
1998                 if (num_architectural_pmu_fixed_counters > MAX_FIXED_COUNTERS) {
1999                     num_architectural_pmu_fixed_counters = MAX_FIXED_COUNTERS;
2000                 }
2001             }
2002         }
2003     }
2004 
2005     cpu_x86_cpuid(env, 0x80000000, 0, &limit, &unused, &unused, &unused);
2006 
2007     for (i = 0x80000000; i <= limit; i++) {
2008         if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
2009             fprintf(stderr, "unsupported xlevel value: 0x%x\n", limit);
2010             abort();
2011         }
2012         c = &cpuid_data.entries[cpuid_i++];
2013 
2014         switch (i) {
2015         case 0x8000001d:
2016             /* Query for all AMD cache information leaves */
2017             for (j = 0; ; j++) {
2018                 c->function = i;
2019                 c->flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
2020                 c->index = j;
2021                 cpu_x86_cpuid(env, i, j, &c->eax, &c->ebx, &c->ecx, &c->edx);
2022 
2023                 if (c->eax == 0) {
2024                     break;
2025                 }
2026                 if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
2027                     fprintf(stderr, "cpuid_data is full, no space for "
2028                             "cpuid(eax:0x%x,ecx:0x%x)\n", i, j);
2029                     abort();
2030                 }
2031                 c = &cpuid_data.entries[cpuid_i++];
2032             }
2033             break;
2034         default:
2035             c->function = i;
2036             c->flags = 0;
2037             cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
2038             if (!c->eax && !c->ebx && !c->ecx && !c->edx) {
2039                 /*
2040                  * KVM already returns all zeroes if a CPUID entry is missing,
2041                  * so we can omit it and avoid hitting KVM's 80-entry limit.
2042                  */
2043                 cpuid_i--;
2044             }
2045             break;
2046         }
2047     }
2048 
2049     /* Call Centaur's CPUID instructions they are supported. */
2050     if (env->cpuid_xlevel2 > 0) {
2051         cpu_x86_cpuid(env, 0xC0000000, 0, &limit, &unused, &unused, &unused);
2052 
2053         for (i = 0xC0000000; i <= limit; i++) {
2054             if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
2055                 fprintf(stderr, "unsupported xlevel2 value: 0x%x\n", limit);
2056                 abort();
2057             }
2058             c = &cpuid_data.entries[cpuid_i++];
2059 
2060             c->function = i;
2061             c->flags = 0;
2062             cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
2063         }
2064     }
2065 
2066     cpuid_data.cpuid.nent = cpuid_i;
2067 
2068     if (((env->cpuid_version >> 8)&0xF) >= 6
2069         && (env->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) ==
2070            (CPUID_MCE | CPUID_MCA)
2071         && kvm_check_extension(cs->kvm_state, KVM_CAP_MCE) > 0) {
2072         uint64_t mcg_cap, unsupported_caps;
2073         int banks;
2074         int ret;
2075 
2076         ret = kvm_get_mce_cap_supported(cs->kvm_state, &mcg_cap, &banks);
2077         if (ret < 0) {
2078             fprintf(stderr, "kvm_get_mce_cap_supported: %s", strerror(-ret));
2079             return ret;
2080         }
2081 
2082         if (banks < (env->mcg_cap & MCG_CAP_BANKS_MASK)) {
2083             error_report("kvm: Unsupported MCE bank count (QEMU = %d, KVM = %d)",
2084                          (int)(env->mcg_cap & MCG_CAP_BANKS_MASK), banks);
2085             return -ENOTSUP;
2086         }
2087 
2088         unsupported_caps = env->mcg_cap & ~(mcg_cap | MCG_CAP_BANKS_MASK);
2089         if (unsupported_caps) {
2090             if (unsupported_caps & MCG_LMCE_P) {
2091                 error_report("kvm: LMCE not supported");
2092                 return -ENOTSUP;
2093             }
2094             warn_report("Unsupported MCG_CAP bits: 0x%" PRIx64,
2095                         unsupported_caps);
2096         }
2097 
2098         env->mcg_cap &= mcg_cap | MCG_CAP_BANKS_MASK;
2099         ret = kvm_vcpu_ioctl(cs, KVM_X86_SETUP_MCE, &env->mcg_cap);
2100         if (ret < 0) {
2101             fprintf(stderr, "KVM_X86_SETUP_MCE: %s", strerror(-ret));
2102             return ret;
2103         }
2104     }
2105 
2106     cpu->vmsentry = qemu_add_vm_change_state_handler(cpu_update_state, env);
2107 
2108     c = cpuid_find_entry(&cpuid_data.cpuid, 1, 0);
2109     if (c) {
2110         has_msr_feature_control = !!(c->ecx & CPUID_EXT_VMX) ||
2111                                   !!(c->ecx & CPUID_EXT_SMX);
2112     }
2113 
2114     c = cpuid_find_entry(&cpuid_data.cpuid, 7, 0);
2115     if (c && (c->ebx & CPUID_7_0_EBX_SGX)) {
2116         has_msr_feature_control = true;
2117     }
2118 
2119     if (env->mcg_cap & MCG_LMCE_P) {
2120         has_msr_mcg_ext_ctl = has_msr_feature_control = true;
2121     }
2122 
2123     if (!env->user_tsc_khz) {
2124         if ((env->features[FEAT_8000_0007_EDX] & CPUID_APM_INVTSC) &&
2125             invtsc_mig_blocker == NULL) {
2126             error_setg(&invtsc_mig_blocker,
2127                        "State blocked by non-migratable CPU device"
2128                        " (invtsc flag)");
2129             r = migrate_add_blocker(invtsc_mig_blocker, &local_err);
2130             if (r < 0) {
2131                 error_report_err(local_err);
2132                 return r;
2133             }
2134         }
2135     }
2136 
2137     if (cpu->vmware_cpuid_freq
2138         /* Guests depend on 0x40000000 to detect this feature, so only expose
2139          * it if KVM exposes leaf 0x40000000. (Conflicts with Hyper-V) */
2140         && cpu->expose_kvm
2141         && kvm_base == KVM_CPUID_SIGNATURE
2142         /* TSC clock must be stable and known for this feature. */
2143         && tsc_is_stable_and_known(env)) {
2144 
2145         c = &cpuid_data.entries[cpuid_i++];
2146         c->function = KVM_CPUID_SIGNATURE | 0x10;
2147         c->eax = env->tsc_khz;
2148         c->ebx = env->apic_bus_freq / 1000; /* Hz to KHz */
2149         c->ecx = c->edx = 0;
2150 
2151         c = cpuid_find_entry(&cpuid_data.cpuid, kvm_base, 0);
2152         c->eax = MAX(c->eax, KVM_CPUID_SIGNATURE | 0x10);
2153     }
2154 
2155     cpuid_data.cpuid.nent = cpuid_i;
2156 
2157     cpuid_data.cpuid.padding = 0;
2158     r = kvm_vcpu_ioctl(cs, KVM_SET_CPUID2, &cpuid_data);
2159     if (r) {
2160         goto fail;
2161     }
2162     kvm_init_xsave(env);
2163 
2164     max_nested_state_len = kvm_max_nested_state_length();
2165     if (max_nested_state_len > 0) {
2166         assert(max_nested_state_len >= offsetof(struct kvm_nested_state, data));
2167 
2168         if (cpu_has_vmx(env) || cpu_has_svm(env)) {
2169             env->nested_state = g_malloc0(max_nested_state_len);
2170             env->nested_state->size = max_nested_state_len;
2171 
2172             kvm_init_nested_state(env);
2173         }
2174     }
2175 
2176     cpu->kvm_msr_buf = g_malloc0(MSR_BUF_SIZE);
2177 
2178     if (!(env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_RDTSCP)) {
2179         has_msr_tsc_aux = false;
2180     }
2181 
2182     kvm_init_msrs(cpu);
2183 
2184     return 0;
2185 
2186  fail:
2187     migrate_del_blocker(invtsc_mig_blocker);
2188 
2189     return r;
2190 }
2191 
2192 int kvm_arch_destroy_vcpu(CPUState *cs)
2193 {
2194     X86CPU *cpu = X86_CPU(cs);
2195     CPUX86State *env = &cpu->env;
2196 
2197     g_free(env->xsave_buf);
2198 
2199     g_free(cpu->kvm_msr_buf);
2200     cpu->kvm_msr_buf = NULL;
2201 
2202     g_free(env->nested_state);
2203     env->nested_state = NULL;
2204 
2205     qemu_del_vm_change_state_handler(cpu->vmsentry);
2206 
2207     return 0;
2208 }
2209 
2210 void kvm_arch_reset_vcpu(X86CPU *cpu)
2211 {
2212     CPUX86State *env = &cpu->env;
2213 
2214     env->xcr0 = 1;
2215     if (kvm_irqchip_in_kernel()) {
2216         env->mp_state = cpu_is_bsp(cpu) ? KVM_MP_STATE_RUNNABLE :
2217                                           KVM_MP_STATE_UNINITIALIZED;
2218     } else {
2219         env->mp_state = KVM_MP_STATE_RUNNABLE;
2220     }
2221 
2222     /* enabled by default */
2223     env->poll_control_msr = 1;
2224 
2225     kvm_init_nested_state(env);
2226 
2227     sev_es_set_reset_vector(CPU(cpu));
2228 }
2229 
2230 void kvm_arch_after_reset_vcpu(X86CPU *cpu)
2231 {
2232     CPUX86State *env = &cpu->env;
2233     int i;
2234 
2235     /*
2236      * Reset SynIC after all other devices have been reset to let them remove
2237      * their SINT routes first.
2238      */
2239     if (hyperv_feat_enabled(cpu, HYPERV_FEAT_SYNIC)) {
2240         for (i = 0; i < ARRAY_SIZE(env->msr_hv_synic_sint); i++) {
2241             env->msr_hv_synic_sint[i] = HV_SINT_MASKED;
2242         }
2243 
2244         hyperv_x86_synic_reset(cpu);
2245     }
2246 }
2247 
2248 void kvm_arch_do_init_vcpu(X86CPU *cpu)
2249 {
2250     CPUX86State *env = &cpu->env;
2251 
2252     /* APs get directly into wait-for-SIPI state.  */
2253     if (env->mp_state == KVM_MP_STATE_UNINITIALIZED) {
2254         env->mp_state = KVM_MP_STATE_INIT_RECEIVED;
2255     }
2256 }
2257 
2258 static int kvm_get_supported_feature_msrs(KVMState *s)
2259 {
2260     int ret = 0;
2261 
2262     if (kvm_feature_msrs != NULL) {
2263         return 0;
2264     }
2265 
2266     if (!kvm_check_extension(s, KVM_CAP_GET_MSR_FEATURES)) {
2267         return 0;
2268     }
2269 
2270     struct kvm_msr_list msr_list;
2271 
2272     msr_list.nmsrs = 0;
2273     ret = kvm_ioctl(s, KVM_GET_MSR_FEATURE_INDEX_LIST, &msr_list);
2274     if (ret < 0 && ret != -E2BIG) {
2275         error_report("Fetch KVM feature MSR list failed: %s",
2276             strerror(-ret));
2277         return ret;
2278     }
2279 
2280     assert(msr_list.nmsrs > 0);
2281     kvm_feature_msrs = g_malloc0(sizeof(msr_list) +
2282                  msr_list.nmsrs * sizeof(msr_list.indices[0]));
2283 
2284     kvm_feature_msrs->nmsrs = msr_list.nmsrs;
2285     ret = kvm_ioctl(s, KVM_GET_MSR_FEATURE_INDEX_LIST, kvm_feature_msrs);
2286 
2287     if (ret < 0) {
2288         error_report("Fetch KVM feature MSR list failed: %s",
2289             strerror(-ret));
2290         g_free(kvm_feature_msrs);
2291         kvm_feature_msrs = NULL;
2292         return ret;
2293     }
2294 
2295     return 0;
2296 }
2297 
2298 static int kvm_get_supported_msrs(KVMState *s)
2299 {
2300     int ret = 0;
2301     struct kvm_msr_list msr_list, *kvm_msr_list;
2302 
2303     /*
2304      *  Obtain MSR list from KVM.  These are the MSRs that we must
2305      *  save/restore.
2306      */
2307     msr_list.nmsrs = 0;
2308     ret = kvm_ioctl(s, KVM_GET_MSR_INDEX_LIST, &msr_list);
2309     if (ret < 0 && ret != -E2BIG) {
2310         return ret;
2311     }
2312     /*
2313      * Old kernel modules had a bug and could write beyond the provided
2314      * memory. Allocate at least a safe amount of 1K.
2315      */
2316     kvm_msr_list = g_malloc0(MAX(1024, sizeof(msr_list) +
2317                                           msr_list.nmsrs *
2318                                           sizeof(msr_list.indices[0])));
2319 
2320     kvm_msr_list->nmsrs = msr_list.nmsrs;
2321     ret = kvm_ioctl(s, KVM_GET_MSR_INDEX_LIST, kvm_msr_list);
2322     if (ret >= 0) {
2323         int i;
2324 
2325         for (i = 0; i < kvm_msr_list->nmsrs; i++) {
2326             switch (kvm_msr_list->indices[i]) {
2327             case MSR_STAR:
2328                 has_msr_star = true;
2329                 break;
2330             case MSR_VM_HSAVE_PA:
2331                 has_msr_hsave_pa = true;
2332                 break;
2333             case MSR_TSC_AUX:
2334                 has_msr_tsc_aux = true;
2335                 break;
2336             case MSR_TSC_ADJUST:
2337                 has_msr_tsc_adjust = true;
2338                 break;
2339             case MSR_IA32_TSCDEADLINE:
2340                 has_msr_tsc_deadline = true;
2341                 break;
2342             case MSR_IA32_SMBASE:
2343                 has_msr_smbase = true;
2344                 break;
2345             case MSR_SMI_COUNT:
2346                 has_msr_smi_count = true;
2347                 break;
2348             case MSR_IA32_MISC_ENABLE:
2349                 has_msr_misc_enable = true;
2350                 break;
2351             case MSR_IA32_BNDCFGS:
2352                 has_msr_bndcfgs = true;
2353                 break;
2354             case MSR_IA32_XSS:
2355                 has_msr_xss = true;
2356                 break;
2357             case MSR_IA32_UMWAIT_CONTROL:
2358                 has_msr_umwait = true;
2359                 break;
2360             case HV_X64_MSR_CRASH_CTL:
2361                 has_msr_hv_crash = true;
2362                 break;
2363             case HV_X64_MSR_RESET:
2364                 has_msr_hv_reset = true;
2365                 break;
2366             case HV_X64_MSR_VP_INDEX:
2367                 has_msr_hv_vpindex = true;
2368                 break;
2369             case HV_X64_MSR_VP_RUNTIME:
2370                 has_msr_hv_runtime = true;
2371                 break;
2372             case HV_X64_MSR_SCONTROL:
2373                 has_msr_hv_synic = true;
2374                 break;
2375             case HV_X64_MSR_STIMER0_CONFIG:
2376                 has_msr_hv_stimer = true;
2377                 break;
2378             case HV_X64_MSR_TSC_FREQUENCY:
2379                 has_msr_hv_frequencies = true;
2380                 break;
2381             case HV_X64_MSR_REENLIGHTENMENT_CONTROL:
2382                 has_msr_hv_reenlightenment = true;
2383                 break;
2384             case HV_X64_MSR_SYNDBG_OPTIONS:
2385                 has_msr_hv_syndbg_options = true;
2386                 break;
2387             case MSR_IA32_SPEC_CTRL:
2388                 has_msr_spec_ctrl = true;
2389                 break;
2390             case MSR_AMD64_TSC_RATIO:
2391                 has_tsc_scale_msr = true;
2392                 break;
2393             case MSR_IA32_TSX_CTRL:
2394                 has_msr_tsx_ctrl = true;
2395                 break;
2396             case MSR_VIRT_SSBD:
2397                 has_msr_virt_ssbd = true;
2398                 break;
2399             case MSR_IA32_ARCH_CAPABILITIES:
2400                 has_msr_arch_capabs = true;
2401                 break;
2402             case MSR_IA32_CORE_CAPABILITY:
2403                 has_msr_core_capabs = true;
2404                 break;
2405             case MSR_IA32_PERF_CAPABILITIES:
2406                 has_msr_perf_capabs = true;
2407                 break;
2408             case MSR_IA32_VMX_VMFUNC:
2409                 has_msr_vmx_vmfunc = true;
2410                 break;
2411             case MSR_IA32_UCODE_REV:
2412                 has_msr_ucode_rev = true;
2413                 break;
2414             case MSR_IA32_VMX_PROCBASED_CTLS2:
2415                 has_msr_vmx_procbased_ctls2 = true;
2416                 break;
2417             case MSR_IA32_PKRS:
2418                 has_msr_pkrs = true;
2419                 break;
2420             }
2421         }
2422     }
2423 
2424     g_free(kvm_msr_list);
2425 
2426     return ret;
2427 }
2428 
2429 static bool kvm_rdmsr_core_thread_count(X86CPU *cpu, uint32_t msr,
2430                                         uint64_t *val)
2431 {
2432     CPUState *cs = CPU(cpu);
2433 
2434     *val = cs->nr_threads * cs->nr_cores; /* thread count, bits 15..0 */
2435     *val |= ((uint32_t)cs->nr_cores << 16); /* core count, bits 31..16 */
2436 
2437     return true;
2438 }
2439 
2440 static Notifier smram_machine_done;
2441 static KVMMemoryListener smram_listener;
2442 static AddressSpace smram_address_space;
2443 static MemoryRegion smram_as_root;
2444 static MemoryRegion smram_as_mem;
2445 
2446 static void register_smram_listener(Notifier *n, void *unused)
2447 {
2448     MemoryRegion *smram =
2449         (MemoryRegion *) object_resolve_path("/machine/smram", NULL);
2450 
2451     /* Outer container... */
2452     memory_region_init(&smram_as_root, OBJECT(kvm_state), "mem-container-smram", ~0ull);
2453     memory_region_set_enabled(&smram_as_root, true);
2454 
2455     /* ... with two regions inside: normal system memory with low
2456      * priority, and...
2457      */
2458     memory_region_init_alias(&smram_as_mem, OBJECT(kvm_state), "mem-smram",
2459                              get_system_memory(), 0, ~0ull);
2460     memory_region_add_subregion_overlap(&smram_as_root, 0, &smram_as_mem, 0);
2461     memory_region_set_enabled(&smram_as_mem, true);
2462 
2463     if (smram) {
2464         /* ... SMRAM with higher priority */
2465         memory_region_add_subregion_overlap(&smram_as_root, 0, smram, 10);
2466         memory_region_set_enabled(smram, true);
2467     }
2468 
2469     address_space_init(&smram_address_space, &smram_as_root, "KVM-SMRAM");
2470     kvm_memory_listener_register(kvm_state, &smram_listener,
2471                                  &smram_address_space, 1, "kvm-smram");
2472 }
2473 
2474 int kvm_arch_init(MachineState *ms, KVMState *s)
2475 {
2476     uint64_t identity_base = 0xfffbc000;
2477     uint64_t shadow_mem;
2478     int ret;
2479     struct utsname utsname;
2480     Error *local_err = NULL;
2481 
2482     /*
2483      * Initialize SEV context, if required
2484      *
2485      * If no memory encryption is requested (ms->cgs == NULL) this is
2486      * a no-op.
2487      *
2488      * It's also a no-op if a non-SEV confidential guest support
2489      * mechanism is selected.  SEV is the only mechanism available to
2490      * select on x86 at present, so this doesn't arise, but if new
2491      * mechanisms are supported in future (e.g. TDX), they'll need
2492      * their own initialization either here or elsewhere.
2493      */
2494     ret = sev_kvm_init(ms->cgs, &local_err);
2495     if (ret < 0) {
2496         error_report_err(local_err);
2497         return ret;
2498     }
2499 
2500     if (!kvm_check_extension(s, KVM_CAP_IRQ_ROUTING)) {
2501         error_report("kvm: KVM_CAP_IRQ_ROUTING not supported by KVM");
2502         return -ENOTSUP;
2503     }
2504 
2505     has_xsave = kvm_check_extension(s, KVM_CAP_XSAVE);
2506     has_xcrs = kvm_check_extension(s, KVM_CAP_XCRS);
2507     has_pit_state2 = kvm_check_extension(s, KVM_CAP_PIT_STATE2);
2508     has_sregs2 = kvm_check_extension(s, KVM_CAP_SREGS2) > 0;
2509 
2510     hv_vpindex_settable = kvm_check_extension(s, KVM_CAP_HYPERV_VP_INDEX);
2511 
2512     has_exception_payload = kvm_check_extension(s, KVM_CAP_EXCEPTION_PAYLOAD);
2513     if (has_exception_payload) {
2514         ret = kvm_vm_enable_cap(s, KVM_CAP_EXCEPTION_PAYLOAD, 0, true);
2515         if (ret < 0) {
2516             error_report("kvm: Failed to enable exception payload cap: %s",
2517                          strerror(-ret));
2518             return ret;
2519         }
2520     }
2521 
2522     has_triple_fault_event = kvm_check_extension(s, KVM_CAP_X86_TRIPLE_FAULT_EVENT);
2523     if (has_triple_fault_event) {
2524         ret = kvm_vm_enable_cap(s, KVM_CAP_X86_TRIPLE_FAULT_EVENT, 0, true);
2525         if (ret < 0) {
2526             error_report("kvm: Failed to enable triple fault event cap: %s",
2527                          strerror(-ret));
2528             return ret;
2529         }
2530     }
2531 
2532     ret = kvm_get_supported_msrs(s);
2533     if (ret < 0) {
2534         return ret;
2535     }
2536 
2537     kvm_get_supported_feature_msrs(s);
2538 
2539     uname(&utsname);
2540     lm_capable_kernel = strcmp(utsname.machine, "x86_64") == 0;
2541 
2542     /*
2543      * On older Intel CPUs, KVM uses vm86 mode to emulate 16-bit code directly.
2544      * In order to use vm86 mode, an EPT identity map and a TSS  are needed.
2545      * Since these must be part of guest physical memory, we need to allocate
2546      * them, both by setting their start addresses in the kernel and by
2547      * creating a corresponding e820 entry. We need 4 pages before the BIOS.
2548      *
2549      * Older KVM versions may not support setting the identity map base. In
2550      * that case we need to stick with the default, i.e. a 256K maximum BIOS
2551      * size.
2552      */
2553     if (kvm_check_extension(s, KVM_CAP_SET_IDENTITY_MAP_ADDR)) {
2554         /* Allows up to 16M BIOSes. */
2555         identity_base = 0xfeffc000;
2556 
2557         ret = kvm_vm_ioctl(s, KVM_SET_IDENTITY_MAP_ADDR, &identity_base);
2558         if (ret < 0) {
2559             return ret;
2560         }
2561     }
2562 
2563     /* Set TSS base one page after EPT identity map. */
2564     ret = kvm_vm_ioctl(s, KVM_SET_TSS_ADDR, identity_base + 0x1000);
2565     if (ret < 0) {
2566         return ret;
2567     }
2568 
2569     /* Tell fw_cfg to notify the BIOS to reserve the range. */
2570     ret = e820_add_entry(identity_base, 0x4000, E820_RESERVED);
2571     if (ret < 0) {
2572         fprintf(stderr, "e820_add_entry() table is full\n");
2573         return ret;
2574     }
2575 
2576     shadow_mem = object_property_get_int(OBJECT(s), "kvm-shadow-mem", &error_abort);
2577     if (shadow_mem != -1) {
2578         shadow_mem /= 4096;
2579         ret = kvm_vm_ioctl(s, KVM_SET_NR_MMU_PAGES, shadow_mem);
2580         if (ret < 0) {
2581             return ret;
2582         }
2583     }
2584 
2585     if (kvm_check_extension(s, KVM_CAP_X86_SMM) &&
2586         object_dynamic_cast(OBJECT(ms), TYPE_X86_MACHINE) &&
2587         x86_machine_is_smm_enabled(X86_MACHINE(ms))) {
2588         smram_machine_done.notify = register_smram_listener;
2589         qemu_add_machine_init_done_notifier(&smram_machine_done);
2590     }
2591 
2592     if (enable_cpu_pm) {
2593         int disable_exits = kvm_check_extension(s, KVM_CAP_X86_DISABLE_EXITS);
2594         int ret;
2595 
2596 /* Work around for kernel header with a typo. TODO: fix header and drop. */
2597 #if defined(KVM_X86_DISABLE_EXITS_HTL) && !defined(KVM_X86_DISABLE_EXITS_HLT)
2598 #define KVM_X86_DISABLE_EXITS_HLT KVM_X86_DISABLE_EXITS_HTL
2599 #endif
2600         if (disable_exits) {
2601             disable_exits &= (KVM_X86_DISABLE_EXITS_MWAIT |
2602                               KVM_X86_DISABLE_EXITS_HLT |
2603                               KVM_X86_DISABLE_EXITS_PAUSE |
2604                               KVM_X86_DISABLE_EXITS_CSTATE);
2605         }
2606 
2607         ret = kvm_vm_enable_cap(s, KVM_CAP_X86_DISABLE_EXITS, 0,
2608                                 disable_exits);
2609         if (ret < 0) {
2610             error_report("kvm: guest stopping CPU not supported: %s",
2611                          strerror(-ret));
2612         }
2613     }
2614 
2615     if (object_dynamic_cast(OBJECT(ms), TYPE_X86_MACHINE)) {
2616         X86MachineState *x86ms = X86_MACHINE(ms);
2617 
2618         if (x86ms->bus_lock_ratelimit > 0) {
2619             ret = kvm_check_extension(s, KVM_CAP_X86_BUS_LOCK_EXIT);
2620             if (!(ret & KVM_BUS_LOCK_DETECTION_EXIT)) {
2621                 error_report("kvm: bus lock detection unsupported");
2622                 return -ENOTSUP;
2623             }
2624             ret = kvm_vm_enable_cap(s, KVM_CAP_X86_BUS_LOCK_EXIT, 0,
2625                                     KVM_BUS_LOCK_DETECTION_EXIT);
2626             if (ret < 0) {
2627                 error_report("kvm: Failed to enable bus lock detection cap: %s",
2628                              strerror(-ret));
2629                 return ret;
2630             }
2631             ratelimit_init(&bus_lock_ratelimit_ctrl);
2632             ratelimit_set_speed(&bus_lock_ratelimit_ctrl,
2633                                 x86ms->bus_lock_ratelimit, BUS_LOCK_SLICE_TIME);
2634         }
2635     }
2636 
2637     if (s->notify_vmexit != NOTIFY_VMEXIT_OPTION_DISABLE &&
2638         kvm_check_extension(s, KVM_CAP_X86_NOTIFY_VMEXIT)) {
2639             uint64_t notify_window_flags =
2640                 ((uint64_t)s->notify_window << 32) |
2641                 KVM_X86_NOTIFY_VMEXIT_ENABLED |
2642                 KVM_X86_NOTIFY_VMEXIT_USER;
2643             ret = kvm_vm_enable_cap(s, KVM_CAP_X86_NOTIFY_VMEXIT, 0,
2644                                     notify_window_flags);
2645             if (ret < 0) {
2646                 error_report("kvm: Failed to enable notify vmexit cap: %s",
2647                              strerror(-ret));
2648                 return ret;
2649             }
2650     }
2651     if (kvm_vm_check_extension(s, KVM_CAP_X86_USER_SPACE_MSR)) {
2652         bool r;
2653 
2654         ret = kvm_vm_enable_cap(s, KVM_CAP_X86_USER_SPACE_MSR, 0,
2655                                 KVM_MSR_EXIT_REASON_FILTER);
2656         if (ret) {
2657             error_report("Could not enable user space MSRs: %s",
2658                          strerror(-ret));
2659             exit(1);
2660         }
2661 
2662         r = kvm_filter_msr(s, MSR_CORE_THREAD_COUNT,
2663                            kvm_rdmsr_core_thread_count, NULL);
2664         if (!r) {
2665             error_report("Could not install MSR_CORE_THREAD_COUNT handler: %s",
2666                          strerror(-ret));
2667             exit(1);
2668         }
2669     }
2670 
2671     return 0;
2672 }
2673 
2674 static void set_v8086_seg(struct kvm_segment *lhs, const SegmentCache *rhs)
2675 {
2676     lhs->selector = rhs->selector;
2677     lhs->base = rhs->base;
2678     lhs->limit = rhs->limit;
2679     lhs->type = 3;
2680     lhs->present = 1;
2681     lhs->dpl = 3;
2682     lhs->db = 0;
2683     lhs->s = 1;
2684     lhs->l = 0;
2685     lhs->g = 0;
2686     lhs->avl = 0;
2687     lhs->unusable = 0;
2688 }
2689 
2690 static void set_seg(struct kvm_segment *lhs, const SegmentCache *rhs)
2691 {
2692     unsigned flags = rhs->flags;
2693     lhs->selector = rhs->selector;
2694     lhs->base = rhs->base;
2695     lhs->limit = rhs->limit;
2696     lhs->type = (flags >> DESC_TYPE_SHIFT) & 15;
2697     lhs->present = (flags & DESC_P_MASK) != 0;
2698     lhs->dpl = (flags >> DESC_DPL_SHIFT) & 3;
2699     lhs->db = (flags >> DESC_B_SHIFT) & 1;
2700     lhs->s = (flags & DESC_S_MASK) != 0;
2701     lhs->l = (flags >> DESC_L_SHIFT) & 1;
2702     lhs->g = (flags & DESC_G_MASK) != 0;
2703     lhs->avl = (flags & DESC_AVL_MASK) != 0;
2704     lhs->unusable = !lhs->present;
2705     lhs->padding = 0;
2706 }
2707 
2708 static void get_seg(SegmentCache *lhs, const struct kvm_segment *rhs)
2709 {
2710     lhs->selector = rhs->selector;
2711     lhs->base = rhs->base;
2712     lhs->limit = rhs->limit;
2713     lhs->flags = (rhs->type << DESC_TYPE_SHIFT) |
2714                  ((rhs->present && !rhs->unusable) * DESC_P_MASK) |
2715                  (rhs->dpl << DESC_DPL_SHIFT) |
2716                  (rhs->db << DESC_B_SHIFT) |
2717                  (rhs->s * DESC_S_MASK) |
2718                  (rhs->l << DESC_L_SHIFT) |
2719                  (rhs->g * DESC_G_MASK) |
2720                  (rhs->avl * DESC_AVL_MASK);
2721 }
2722 
2723 static void kvm_getput_reg(__u64 *kvm_reg, target_ulong *qemu_reg, int set)
2724 {
2725     if (set) {
2726         *kvm_reg = *qemu_reg;
2727     } else {
2728         *qemu_reg = *kvm_reg;
2729     }
2730 }
2731 
2732 static int kvm_getput_regs(X86CPU *cpu, int set)
2733 {
2734     CPUX86State *env = &cpu->env;
2735     struct kvm_regs regs;
2736     int ret = 0;
2737 
2738     if (!set) {
2739         ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_REGS, &regs);
2740         if (ret < 0) {
2741             return ret;
2742         }
2743     }
2744 
2745     kvm_getput_reg(&regs.rax, &env->regs[R_EAX], set);
2746     kvm_getput_reg(&regs.rbx, &env->regs[R_EBX], set);
2747     kvm_getput_reg(&regs.rcx, &env->regs[R_ECX], set);
2748     kvm_getput_reg(&regs.rdx, &env->regs[R_EDX], set);
2749     kvm_getput_reg(&regs.rsi, &env->regs[R_ESI], set);
2750     kvm_getput_reg(&regs.rdi, &env->regs[R_EDI], set);
2751     kvm_getput_reg(&regs.rsp, &env->regs[R_ESP], set);
2752     kvm_getput_reg(&regs.rbp, &env->regs[R_EBP], set);
2753 #ifdef TARGET_X86_64
2754     kvm_getput_reg(&regs.r8, &env->regs[8], set);
2755     kvm_getput_reg(&regs.r9, &env->regs[9], set);
2756     kvm_getput_reg(&regs.r10, &env->regs[10], set);
2757     kvm_getput_reg(&regs.r11, &env->regs[11], set);
2758     kvm_getput_reg(&regs.r12, &env->regs[12], set);
2759     kvm_getput_reg(&regs.r13, &env->regs[13], set);
2760     kvm_getput_reg(&regs.r14, &env->regs[14], set);
2761     kvm_getput_reg(&regs.r15, &env->regs[15], set);
2762 #endif
2763 
2764     kvm_getput_reg(&regs.rflags, &env->eflags, set);
2765     kvm_getput_reg(&regs.rip, &env->eip, set);
2766 
2767     if (set) {
2768         ret = kvm_vcpu_ioctl(CPU(cpu), KVM_SET_REGS, &regs);
2769     }
2770 
2771     return ret;
2772 }
2773 
2774 static int kvm_put_fpu(X86CPU *cpu)
2775 {
2776     CPUX86State *env = &cpu->env;
2777     struct kvm_fpu fpu;
2778     int i;
2779 
2780     memset(&fpu, 0, sizeof fpu);
2781     fpu.fsw = env->fpus & ~(7 << 11);
2782     fpu.fsw |= (env->fpstt & 7) << 11;
2783     fpu.fcw = env->fpuc;
2784     fpu.last_opcode = env->fpop;
2785     fpu.last_ip = env->fpip;
2786     fpu.last_dp = env->fpdp;
2787     for (i = 0; i < 8; ++i) {
2788         fpu.ftwx |= (!env->fptags[i]) << i;
2789     }
2790     memcpy(fpu.fpr, env->fpregs, sizeof env->fpregs);
2791     for (i = 0; i < CPU_NB_REGS; i++) {
2792         stq_p(&fpu.xmm[i][0], env->xmm_regs[i].ZMM_Q(0));
2793         stq_p(&fpu.xmm[i][8], env->xmm_regs[i].ZMM_Q(1));
2794     }
2795     fpu.mxcsr = env->mxcsr;
2796 
2797     return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_FPU, &fpu);
2798 }
2799 
2800 static int kvm_put_xsave(X86CPU *cpu)
2801 {
2802     CPUX86State *env = &cpu->env;
2803     void *xsave = env->xsave_buf;
2804 
2805     if (!has_xsave) {
2806         return kvm_put_fpu(cpu);
2807     }
2808     x86_cpu_xsave_all_areas(cpu, xsave, env->xsave_buf_len);
2809 
2810     return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_XSAVE, xsave);
2811 }
2812 
2813 static int kvm_put_xcrs(X86CPU *cpu)
2814 {
2815     CPUX86State *env = &cpu->env;
2816     struct kvm_xcrs xcrs = {};
2817 
2818     if (!has_xcrs) {
2819         return 0;
2820     }
2821 
2822     xcrs.nr_xcrs = 1;
2823     xcrs.flags = 0;
2824     xcrs.xcrs[0].xcr = 0;
2825     xcrs.xcrs[0].value = env->xcr0;
2826     return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_XCRS, &xcrs);
2827 }
2828 
2829 static int kvm_put_sregs(X86CPU *cpu)
2830 {
2831     CPUX86State *env = &cpu->env;
2832     struct kvm_sregs sregs;
2833 
2834     /*
2835      * The interrupt_bitmap is ignored because KVM_SET_SREGS is
2836      * always followed by KVM_SET_VCPU_EVENTS.
2837      */
2838     memset(sregs.interrupt_bitmap, 0, sizeof(sregs.interrupt_bitmap));
2839 
2840     if ((env->eflags & VM_MASK)) {
2841         set_v8086_seg(&sregs.cs, &env->segs[R_CS]);
2842         set_v8086_seg(&sregs.ds, &env->segs[R_DS]);
2843         set_v8086_seg(&sregs.es, &env->segs[R_ES]);
2844         set_v8086_seg(&sregs.fs, &env->segs[R_FS]);
2845         set_v8086_seg(&sregs.gs, &env->segs[R_GS]);
2846         set_v8086_seg(&sregs.ss, &env->segs[R_SS]);
2847     } else {
2848         set_seg(&sregs.cs, &env->segs[R_CS]);
2849         set_seg(&sregs.ds, &env->segs[R_DS]);
2850         set_seg(&sregs.es, &env->segs[R_ES]);
2851         set_seg(&sregs.fs, &env->segs[R_FS]);
2852         set_seg(&sregs.gs, &env->segs[R_GS]);
2853         set_seg(&sregs.ss, &env->segs[R_SS]);
2854     }
2855 
2856     set_seg(&sregs.tr, &env->tr);
2857     set_seg(&sregs.ldt, &env->ldt);
2858 
2859     sregs.idt.limit = env->idt.limit;
2860     sregs.idt.base = env->idt.base;
2861     memset(sregs.idt.padding, 0, sizeof sregs.idt.padding);
2862     sregs.gdt.limit = env->gdt.limit;
2863     sregs.gdt.base = env->gdt.base;
2864     memset(sregs.gdt.padding, 0, sizeof sregs.gdt.padding);
2865 
2866     sregs.cr0 = env->cr[0];
2867     sregs.cr2 = env->cr[2];
2868     sregs.cr3 = env->cr[3];
2869     sregs.cr4 = env->cr[4];
2870 
2871     sregs.cr8 = cpu_get_apic_tpr(cpu->apic_state);
2872     sregs.apic_base = cpu_get_apic_base(cpu->apic_state);
2873 
2874     sregs.efer = env->efer;
2875 
2876     return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_SREGS, &sregs);
2877 }
2878 
2879 static int kvm_put_sregs2(X86CPU *cpu)
2880 {
2881     CPUX86State *env = &cpu->env;
2882     struct kvm_sregs2 sregs;
2883     int i;
2884 
2885     sregs.flags = 0;
2886 
2887     if ((env->eflags & VM_MASK)) {
2888         set_v8086_seg(&sregs.cs, &env->segs[R_CS]);
2889         set_v8086_seg(&sregs.ds, &env->segs[R_DS]);
2890         set_v8086_seg(&sregs.es, &env->segs[R_ES]);
2891         set_v8086_seg(&sregs.fs, &env->segs[R_FS]);
2892         set_v8086_seg(&sregs.gs, &env->segs[R_GS]);
2893         set_v8086_seg(&sregs.ss, &env->segs[R_SS]);
2894     } else {
2895         set_seg(&sregs.cs, &env->segs[R_CS]);
2896         set_seg(&sregs.ds, &env->segs[R_DS]);
2897         set_seg(&sregs.es, &env->segs[R_ES]);
2898         set_seg(&sregs.fs, &env->segs[R_FS]);
2899         set_seg(&sregs.gs, &env->segs[R_GS]);
2900         set_seg(&sregs.ss, &env->segs[R_SS]);
2901     }
2902 
2903     set_seg(&sregs.tr, &env->tr);
2904     set_seg(&sregs.ldt, &env->ldt);
2905 
2906     sregs.idt.limit = env->idt.limit;
2907     sregs.idt.base = env->idt.base;
2908     memset(sregs.idt.padding, 0, sizeof sregs.idt.padding);
2909     sregs.gdt.limit = env->gdt.limit;
2910     sregs.gdt.base = env->gdt.base;
2911     memset(sregs.gdt.padding, 0, sizeof sregs.gdt.padding);
2912 
2913     sregs.cr0 = env->cr[0];
2914     sregs.cr2 = env->cr[2];
2915     sregs.cr3 = env->cr[3];
2916     sregs.cr4 = env->cr[4];
2917 
2918     sregs.cr8 = cpu_get_apic_tpr(cpu->apic_state);
2919     sregs.apic_base = cpu_get_apic_base(cpu->apic_state);
2920 
2921     sregs.efer = env->efer;
2922 
2923     if (env->pdptrs_valid) {
2924         for (i = 0; i < 4; i++) {
2925             sregs.pdptrs[i] = env->pdptrs[i];
2926         }
2927         sregs.flags |= KVM_SREGS2_FLAGS_PDPTRS_VALID;
2928     }
2929 
2930     return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_SREGS2, &sregs);
2931 }
2932 
2933 
2934 static void kvm_msr_buf_reset(X86CPU *cpu)
2935 {
2936     memset(cpu->kvm_msr_buf, 0, MSR_BUF_SIZE);
2937 }
2938 
2939 static void kvm_msr_entry_add(X86CPU *cpu, uint32_t index, uint64_t value)
2940 {
2941     struct kvm_msrs *msrs = cpu->kvm_msr_buf;
2942     void *limit = ((void *)msrs) + MSR_BUF_SIZE;
2943     struct kvm_msr_entry *entry = &msrs->entries[msrs->nmsrs];
2944 
2945     assert((void *)(entry + 1) <= limit);
2946 
2947     entry->index = index;
2948     entry->reserved = 0;
2949     entry->data = value;
2950     msrs->nmsrs++;
2951 }
2952 
2953 static int kvm_put_one_msr(X86CPU *cpu, int index, uint64_t value)
2954 {
2955     kvm_msr_buf_reset(cpu);
2956     kvm_msr_entry_add(cpu, index, value);
2957 
2958     return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MSRS, cpu->kvm_msr_buf);
2959 }
2960 
2961 static int kvm_get_one_msr(X86CPU *cpu, int index, uint64_t *value)
2962 {
2963     int ret;
2964     struct {
2965         struct kvm_msrs info;
2966         struct kvm_msr_entry entries[1];
2967     } msr_data = {
2968         .info.nmsrs = 1,
2969         .entries[0].index = index,
2970     };
2971 
2972     ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_MSRS, &msr_data);
2973     if (ret < 0) {
2974         return ret;
2975     }
2976     assert(ret == 1);
2977     *value = msr_data.entries[0].data;
2978     return ret;
2979 }
2980 void kvm_put_apicbase(X86CPU *cpu, uint64_t value)
2981 {
2982     int ret;
2983 
2984     ret = kvm_put_one_msr(cpu, MSR_IA32_APICBASE, value);
2985     assert(ret == 1);
2986 }
2987 
2988 static int kvm_put_tscdeadline_msr(X86CPU *cpu)
2989 {
2990     CPUX86State *env = &cpu->env;
2991     int ret;
2992 
2993     if (!has_msr_tsc_deadline) {
2994         return 0;
2995     }
2996 
2997     ret = kvm_put_one_msr(cpu, MSR_IA32_TSCDEADLINE, env->tsc_deadline);
2998     if (ret < 0) {
2999         return ret;
3000     }
3001 
3002     assert(ret == 1);
3003     return 0;
3004 }
3005 
3006 /*
3007  * Provide a separate write service for the feature control MSR in order to
3008  * kick the VCPU out of VMXON or even guest mode on reset. This has to be done
3009  * before writing any other state because forcibly leaving nested mode
3010  * invalidates the VCPU state.
3011  */
3012 static int kvm_put_msr_feature_control(X86CPU *cpu)
3013 {
3014     int ret;
3015 
3016     if (!has_msr_feature_control) {
3017         return 0;
3018     }
3019 
3020     ret = kvm_put_one_msr(cpu, MSR_IA32_FEATURE_CONTROL,
3021                           cpu->env.msr_ia32_feature_control);
3022     if (ret < 0) {
3023         return ret;
3024     }
3025 
3026     assert(ret == 1);
3027     return 0;
3028 }
3029 
3030 static uint64_t make_vmx_msr_value(uint32_t index, uint32_t features)
3031 {
3032     uint32_t default1, can_be_one, can_be_zero;
3033     uint32_t must_be_one;
3034 
3035     switch (index) {
3036     case MSR_IA32_VMX_TRUE_PINBASED_CTLS:
3037         default1 = 0x00000016;
3038         break;
3039     case MSR_IA32_VMX_TRUE_PROCBASED_CTLS:
3040         default1 = 0x0401e172;
3041         break;
3042     case MSR_IA32_VMX_TRUE_ENTRY_CTLS:
3043         default1 = 0x000011ff;
3044         break;
3045     case MSR_IA32_VMX_TRUE_EXIT_CTLS:
3046         default1 = 0x00036dff;
3047         break;
3048     case MSR_IA32_VMX_PROCBASED_CTLS2:
3049         default1 = 0;
3050         break;
3051     default:
3052         abort();
3053     }
3054 
3055     /* If a feature bit is set, the control can be either set or clear.
3056      * Otherwise the value is limited to either 0 or 1 by default1.
3057      */
3058     can_be_one = features | default1;
3059     can_be_zero = features | ~default1;
3060     must_be_one = ~can_be_zero;
3061 
3062     /*
3063      * Bit 0:31 -> 0 if the control bit can be zero (i.e. 1 if it must be one).
3064      * Bit 32:63 -> 1 if the control bit can be one.
3065      */
3066     return must_be_one | (((uint64_t)can_be_one) << 32);
3067 }
3068 
3069 static void kvm_msr_entry_add_vmx(X86CPU *cpu, FeatureWordArray f)
3070 {
3071     uint64_t kvm_vmx_basic =
3072         kvm_arch_get_supported_msr_feature(kvm_state,
3073                                            MSR_IA32_VMX_BASIC);
3074 
3075     if (!kvm_vmx_basic) {
3076         /* If the kernel doesn't support VMX feature (kvm_intel.nested=0),
3077          * then kvm_vmx_basic will be 0 and KVM_SET_MSR will fail.
3078          */
3079         return;
3080     }
3081 
3082     uint64_t kvm_vmx_misc =
3083         kvm_arch_get_supported_msr_feature(kvm_state,
3084                                            MSR_IA32_VMX_MISC);
3085     uint64_t kvm_vmx_ept_vpid =
3086         kvm_arch_get_supported_msr_feature(kvm_state,
3087                                            MSR_IA32_VMX_EPT_VPID_CAP);
3088 
3089     /*
3090      * If the guest is 64-bit, a value of 1 is allowed for the host address
3091      * space size vmexit control.
3092      */
3093     uint64_t fixed_vmx_exit = f[FEAT_8000_0001_EDX] & CPUID_EXT2_LM
3094         ? (uint64_t)VMX_VM_EXIT_HOST_ADDR_SPACE_SIZE << 32 : 0;
3095 
3096     /*
3097      * Bits 0-30, 32-44 and 50-53 come from the host.  KVM should
3098      * not change them for backwards compatibility.
3099      */
3100     uint64_t fixed_vmx_basic = kvm_vmx_basic &
3101         (MSR_VMX_BASIC_VMCS_REVISION_MASK |
3102          MSR_VMX_BASIC_VMXON_REGION_SIZE_MASK |
3103          MSR_VMX_BASIC_VMCS_MEM_TYPE_MASK);
3104 
3105     /*
3106      * Same for bits 0-4 and 25-27.  Bits 16-24 (CR3 target count) can
3107      * change in the future but are always zero for now, clear them to be
3108      * future proof.  Bits 32-63 in theory could change, though KVM does
3109      * not support dual-monitor treatment and probably never will; mask
3110      * them out as well.
3111      */
3112     uint64_t fixed_vmx_misc = kvm_vmx_misc &
3113         (MSR_VMX_MISC_PREEMPTION_TIMER_SHIFT_MASK |
3114          MSR_VMX_MISC_MAX_MSR_LIST_SIZE_MASK);
3115 
3116     /*
3117      * EPT memory types should not change either, so we do not bother
3118      * adding features for them.
3119      */
3120     uint64_t fixed_vmx_ept_mask =
3121             (f[FEAT_VMX_SECONDARY_CTLS] & VMX_SECONDARY_EXEC_ENABLE_EPT ?
3122              MSR_VMX_EPT_UC | MSR_VMX_EPT_WB : 0);
3123     uint64_t fixed_vmx_ept_vpid = kvm_vmx_ept_vpid & fixed_vmx_ept_mask;
3124 
3125     kvm_msr_entry_add(cpu, MSR_IA32_VMX_TRUE_PROCBASED_CTLS,
3126                       make_vmx_msr_value(MSR_IA32_VMX_TRUE_PROCBASED_CTLS,
3127                                          f[FEAT_VMX_PROCBASED_CTLS]));
3128     kvm_msr_entry_add(cpu, MSR_IA32_VMX_TRUE_PINBASED_CTLS,
3129                       make_vmx_msr_value(MSR_IA32_VMX_TRUE_PINBASED_CTLS,
3130                                          f[FEAT_VMX_PINBASED_CTLS]));
3131     kvm_msr_entry_add(cpu, MSR_IA32_VMX_TRUE_EXIT_CTLS,
3132                       make_vmx_msr_value(MSR_IA32_VMX_TRUE_EXIT_CTLS,
3133                                          f[FEAT_VMX_EXIT_CTLS]) | fixed_vmx_exit);
3134     kvm_msr_entry_add(cpu, MSR_IA32_VMX_TRUE_ENTRY_CTLS,
3135                       make_vmx_msr_value(MSR_IA32_VMX_TRUE_ENTRY_CTLS,
3136                                          f[FEAT_VMX_ENTRY_CTLS]));
3137     kvm_msr_entry_add(cpu, MSR_IA32_VMX_PROCBASED_CTLS2,
3138                       make_vmx_msr_value(MSR_IA32_VMX_PROCBASED_CTLS2,
3139                                          f[FEAT_VMX_SECONDARY_CTLS]));
3140     kvm_msr_entry_add(cpu, MSR_IA32_VMX_EPT_VPID_CAP,
3141                       f[FEAT_VMX_EPT_VPID_CAPS] | fixed_vmx_ept_vpid);
3142     kvm_msr_entry_add(cpu, MSR_IA32_VMX_BASIC,
3143                       f[FEAT_VMX_BASIC] | fixed_vmx_basic);
3144     kvm_msr_entry_add(cpu, MSR_IA32_VMX_MISC,
3145                       f[FEAT_VMX_MISC] | fixed_vmx_misc);
3146     if (has_msr_vmx_vmfunc) {
3147         kvm_msr_entry_add(cpu, MSR_IA32_VMX_VMFUNC, f[FEAT_VMX_VMFUNC]);
3148     }
3149 
3150     /*
3151      * Just to be safe, write these with constant values.  The CRn_FIXED1
3152      * MSRs are generated by KVM based on the vCPU's CPUID.
3153      */
3154     kvm_msr_entry_add(cpu, MSR_IA32_VMX_CR0_FIXED0,
3155                       CR0_PE_MASK | CR0_PG_MASK | CR0_NE_MASK);
3156     kvm_msr_entry_add(cpu, MSR_IA32_VMX_CR4_FIXED0,
3157                       CR4_VMXE_MASK);
3158 
3159     if (f[FEAT_VMX_SECONDARY_CTLS] & VMX_SECONDARY_EXEC_TSC_SCALING) {
3160         /* TSC multiplier (0x2032).  */
3161         kvm_msr_entry_add(cpu, MSR_IA32_VMX_VMCS_ENUM, 0x32);
3162     } else {
3163         /* Preemption timer (0x482E).  */
3164         kvm_msr_entry_add(cpu, MSR_IA32_VMX_VMCS_ENUM, 0x2E);
3165     }
3166 }
3167 
3168 static void kvm_msr_entry_add_perf(X86CPU *cpu, FeatureWordArray f)
3169 {
3170     uint64_t kvm_perf_cap =
3171         kvm_arch_get_supported_msr_feature(kvm_state,
3172                                            MSR_IA32_PERF_CAPABILITIES);
3173 
3174     if (kvm_perf_cap) {
3175         kvm_msr_entry_add(cpu, MSR_IA32_PERF_CAPABILITIES,
3176                         kvm_perf_cap & f[FEAT_PERF_CAPABILITIES]);
3177     }
3178 }
3179 
3180 static int kvm_buf_set_msrs(X86CPU *cpu)
3181 {
3182     int ret = kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MSRS, cpu->kvm_msr_buf);
3183     if (ret < 0) {
3184         return ret;
3185     }
3186 
3187     if (ret < cpu->kvm_msr_buf->nmsrs) {
3188         struct kvm_msr_entry *e = &cpu->kvm_msr_buf->entries[ret];
3189         error_report("error: failed to set MSR 0x%" PRIx32 " to 0x%" PRIx64,
3190                      (uint32_t)e->index, (uint64_t)e->data);
3191     }
3192 
3193     assert(ret == cpu->kvm_msr_buf->nmsrs);
3194     return 0;
3195 }
3196 
3197 static void kvm_init_msrs(X86CPU *cpu)
3198 {
3199     CPUX86State *env = &cpu->env;
3200 
3201     kvm_msr_buf_reset(cpu);
3202     if (has_msr_arch_capabs) {
3203         kvm_msr_entry_add(cpu, MSR_IA32_ARCH_CAPABILITIES,
3204                           env->features[FEAT_ARCH_CAPABILITIES]);
3205     }
3206 
3207     if (has_msr_core_capabs) {
3208         kvm_msr_entry_add(cpu, MSR_IA32_CORE_CAPABILITY,
3209                           env->features[FEAT_CORE_CAPABILITY]);
3210     }
3211 
3212     if (has_msr_perf_capabs && cpu->enable_pmu) {
3213         kvm_msr_entry_add_perf(cpu, env->features);
3214     }
3215 
3216     if (has_msr_ucode_rev) {
3217         kvm_msr_entry_add(cpu, MSR_IA32_UCODE_REV, cpu->ucode_rev);
3218     }
3219 
3220     /*
3221      * Older kernels do not include VMX MSRs in KVM_GET_MSR_INDEX_LIST, but
3222      * all kernels with MSR features should have them.
3223      */
3224     if (kvm_feature_msrs && cpu_has_vmx(env)) {
3225         kvm_msr_entry_add_vmx(cpu, env->features);
3226     }
3227 
3228     assert(kvm_buf_set_msrs(cpu) == 0);
3229 }
3230 
3231 static int kvm_put_msrs(X86CPU *cpu, int level)
3232 {
3233     CPUX86State *env = &cpu->env;
3234     int i;
3235 
3236     kvm_msr_buf_reset(cpu);
3237 
3238     kvm_msr_entry_add(cpu, MSR_IA32_SYSENTER_CS, env->sysenter_cs);
3239     kvm_msr_entry_add(cpu, MSR_IA32_SYSENTER_ESP, env->sysenter_esp);
3240     kvm_msr_entry_add(cpu, MSR_IA32_SYSENTER_EIP, env->sysenter_eip);
3241     kvm_msr_entry_add(cpu, MSR_PAT, env->pat);
3242     if (has_msr_star) {
3243         kvm_msr_entry_add(cpu, MSR_STAR, env->star);
3244     }
3245     if (has_msr_hsave_pa) {
3246         kvm_msr_entry_add(cpu, MSR_VM_HSAVE_PA, env->vm_hsave);
3247     }
3248     if (has_msr_tsc_aux) {
3249         kvm_msr_entry_add(cpu, MSR_TSC_AUX, env->tsc_aux);
3250     }
3251     if (has_msr_tsc_adjust) {
3252         kvm_msr_entry_add(cpu, MSR_TSC_ADJUST, env->tsc_adjust);
3253     }
3254     if (has_msr_misc_enable) {
3255         kvm_msr_entry_add(cpu, MSR_IA32_MISC_ENABLE,
3256                           env->msr_ia32_misc_enable);
3257     }
3258     if (has_msr_smbase) {
3259         kvm_msr_entry_add(cpu, MSR_IA32_SMBASE, env->smbase);
3260     }
3261     if (has_msr_smi_count) {
3262         kvm_msr_entry_add(cpu, MSR_SMI_COUNT, env->msr_smi_count);
3263     }
3264     if (has_msr_pkrs) {
3265         kvm_msr_entry_add(cpu, MSR_IA32_PKRS, env->pkrs);
3266     }
3267     if (has_msr_bndcfgs) {
3268         kvm_msr_entry_add(cpu, MSR_IA32_BNDCFGS, env->msr_bndcfgs);
3269     }
3270     if (has_msr_xss) {
3271         kvm_msr_entry_add(cpu, MSR_IA32_XSS, env->xss);
3272     }
3273     if (has_msr_umwait) {
3274         kvm_msr_entry_add(cpu, MSR_IA32_UMWAIT_CONTROL, env->umwait);
3275     }
3276     if (has_msr_spec_ctrl) {
3277         kvm_msr_entry_add(cpu, MSR_IA32_SPEC_CTRL, env->spec_ctrl);
3278     }
3279     if (has_tsc_scale_msr) {
3280         kvm_msr_entry_add(cpu, MSR_AMD64_TSC_RATIO, env->amd_tsc_scale_msr);
3281     }
3282 
3283     if (has_msr_tsx_ctrl) {
3284         kvm_msr_entry_add(cpu, MSR_IA32_TSX_CTRL, env->tsx_ctrl);
3285     }
3286     if (has_msr_virt_ssbd) {
3287         kvm_msr_entry_add(cpu, MSR_VIRT_SSBD, env->virt_ssbd);
3288     }
3289 
3290 #ifdef TARGET_X86_64
3291     if (lm_capable_kernel) {
3292         kvm_msr_entry_add(cpu, MSR_CSTAR, env->cstar);
3293         kvm_msr_entry_add(cpu, MSR_KERNELGSBASE, env->kernelgsbase);
3294         kvm_msr_entry_add(cpu, MSR_FMASK, env->fmask);
3295         kvm_msr_entry_add(cpu, MSR_LSTAR, env->lstar);
3296     }
3297 #endif
3298 
3299     /*
3300      * The following MSRs have side effects on the guest or are too heavy
3301      * for normal writeback. Limit them to reset or full state updates.
3302      */
3303     if (level >= KVM_PUT_RESET_STATE) {
3304         kvm_msr_entry_add(cpu, MSR_IA32_TSC, env->tsc);
3305         kvm_msr_entry_add(cpu, MSR_KVM_SYSTEM_TIME, env->system_time_msr);
3306         kvm_msr_entry_add(cpu, MSR_KVM_WALL_CLOCK, env->wall_clock_msr);
3307         if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_ASYNC_PF_INT)) {
3308             kvm_msr_entry_add(cpu, MSR_KVM_ASYNC_PF_INT, env->async_pf_int_msr);
3309         }
3310         if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_ASYNC_PF)) {
3311             kvm_msr_entry_add(cpu, MSR_KVM_ASYNC_PF_EN, env->async_pf_en_msr);
3312         }
3313         if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_PV_EOI)) {
3314             kvm_msr_entry_add(cpu, MSR_KVM_PV_EOI_EN, env->pv_eoi_en_msr);
3315         }
3316         if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_STEAL_TIME)) {
3317             kvm_msr_entry_add(cpu, MSR_KVM_STEAL_TIME, env->steal_time_msr);
3318         }
3319 
3320         if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_POLL_CONTROL)) {
3321             kvm_msr_entry_add(cpu, MSR_KVM_POLL_CONTROL, env->poll_control_msr);
3322         }
3323 
3324         if (has_architectural_pmu_version > 0) {
3325             if (has_architectural_pmu_version > 1) {
3326                 /* Stop the counter.  */
3327                 kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR_CTRL, 0);
3328                 kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_CTRL, 0);
3329             }
3330 
3331             /* Set the counter values.  */
3332             for (i = 0; i < num_architectural_pmu_fixed_counters; i++) {
3333                 kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR0 + i,
3334                                   env->msr_fixed_counters[i]);
3335             }
3336             for (i = 0; i < num_architectural_pmu_gp_counters; i++) {
3337                 kvm_msr_entry_add(cpu, MSR_P6_PERFCTR0 + i,
3338                                   env->msr_gp_counters[i]);
3339                 kvm_msr_entry_add(cpu, MSR_P6_EVNTSEL0 + i,
3340                                   env->msr_gp_evtsel[i]);
3341             }
3342             if (has_architectural_pmu_version > 1) {
3343                 kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_STATUS,
3344                                   env->msr_global_status);
3345                 kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_OVF_CTRL,
3346                                   env->msr_global_ovf_ctrl);
3347 
3348                 /* Now start the PMU.  */
3349                 kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR_CTRL,
3350                                   env->msr_fixed_ctr_ctrl);
3351                 kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_CTRL,
3352                                   env->msr_global_ctrl);
3353             }
3354         }
3355         /*
3356          * Hyper-V partition-wide MSRs: to avoid clearing them on cpu hot-add,
3357          * only sync them to KVM on the first cpu
3358          */
3359         if (current_cpu == first_cpu) {
3360             if (has_msr_hv_hypercall) {
3361                 kvm_msr_entry_add(cpu, HV_X64_MSR_GUEST_OS_ID,
3362                                   env->msr_hv_guest_os_id);
3363                 kvm_msr_entry_add(cpu, HV_X64_MSR_HYPERCALL,
3364                                   env->msr_hv_hypercall);
3365             }
3366             if (hyperv_feat_enabled(cpu, HYPERV_FEAT_TIME)) {
3367                 kvm_msr_entry_add(cpu, HV_X64_MSR_REFERENCE_TSC,
3368                                   env->msr_hv_tsc);
3369             }
3370             if (hyperv_feat_enabled(cpu, HYPERV_FEAT_REENLIGHTENMENT)) {
3371                 kvm_msr_entry_add(cpu, HV_X64_MSR_REENLIGHTENMENT_CONTROL,
3372                                   env->msr_hv_reenlightenment_control);
3373                 kvm_msr_entry_add(cpu, HV_X64_MSR_TSC_EMULATION_CONTROL,
3374                                   env->msr_hv_tsc_emulation_control);
3375                 kvm_msr_entry_add(cpu, HV_X64_MSR_TSC_EMULATION_STATUS,
3376                                   env->msr_hv_tsc_emulation_status);
3377             }
3378 #ifdef CONFIG_SYNDBG
3379             if (hyperv_feat_enabled(cpu, HYPERV_FEAT_SYNDBG) &&
3380                 has_msr_hv_syndbg_options) {
3381                 kvm_msr_entry_add(cpu, HV_X64_MSR_SYNDBG_OPTIONS,
3382                                   hyperv_syndbg_query_options());
3383             }
3384 #endif
3385         }
3386         if (hyperv_feat_enabled(cpu, HYPERV_FEAT_VAPIC)) {
3387             kvm_msr_entry_add(cpu, HV_X64_MSR_APIC_ASSIST_PAGE,
3388                               env->msr_hv_vapic);
3389         }
3390         if (has_msr_hv_crash) {
3391             int j;
3392 
3393             for (j = 0; j < HV_CRASH_PARAMS; j++)
3394                 kvm_msr_entry_add(cpu, HV_X64_MSR_CRASH_P0 + j,
3395                                   env->msr_hv_crash_params[j]);
3396 
3397             kvm_msr_entry_add(cpu, HV_X64_MSR_CRASH_CTL, HV_CRASH_CTL_NOTIFY);
3398         }
3399         if (has_msr_hv_runtime) {
3400             kvm_msr_entry_add(cpu, HV_X64_MSR_VP_RUNTIME, env->msr_hv_runtime);
3401         }
3402         if (hyperv_feat_enabled(cpu, HYPERV_FEAT_VPINDEX)
3403             && hv_vpindex_settable) {
3404             kvm_msr_entry_add(cpu, HV_X64_MSR_VP_INDEX,
3405                               hyperv_vp_index(CPU(cpu)));
3406         }
3407         if (hyperv_feat_enabled(cpu, HYPERV_FEAT_SYNIC)) {
3408             int j;
3409 
3410             kvm_msr_entry_add(cpu, HV_X64_MSR_SVERSION, HV_SYNIC_VERSION);
3411 
3412             kvm_msr_entry_add(cpu, HV_X64_MSR_SCONTROL,
3413                               env->msr_hv_synic_control);
3414             kvm_msr_entry_add(cpu, HV_X64_MSR_SIEFP,
3415                               env->msr_hv_synic_evt_page);
3416             kvm_msr_entry_add(cpu, HV_X64_MSR_SIMP,
3417                               env->msr_hv_synic_msg_page);
3418 
3419             for (j = 0; j < ARRAY_SIZE(env->msr_hv_synic_sint); j++) {
3420                 kvm_msr_entry_add(cpu, HV_X64_MSR_SINT0 + j,
3421                                   env->msr_hv_synic_sint[j]);
3422             }
3423         }
3424         if (has_msr_hv_stimer) {
3425             int j;
3426 
3427             for (j = 0; j < ARRAY_SIZE(env->msr_hv_stimer_config); j++) {
3428                 kvm_msr_entry_add(cpu, HV_X64_MSR_STIMER0_CONFIG + j * 2,
3429                                 env->msr_hv_stimer_config[j]);
3430             }
3431 
3432             for (j = 0; j < ARRAY_SIZE(env->msr_hv_stimer_count); j++) {
3433                 kvm_msr_entry_add(cpu, HV_X64_MSR_STIMER0_COUNT + j * 2,
3434                                 env->msr_hv_stimer_count[j]);
3435             }
3436         }
3437         if (env->features[FEAT_1_EDX] & CPUID_MTRR) {
3438             uint64_t phys_mask = MAKE_64BIT_MASK(0, cpu->phys_bits);
3439 
3440             kvm_msr_entry_add(cpu, MSR_MTRRdefType, env->mtrr_deftype);
3441             kvm_msr_entry_add(cpu, MSR_MTRRfix64K_00000, env->mtrr_fixed[0]);
3442             kvm_msr_entry_add(cpu, MSR_MTRRfix16K_80000, env->mtrr_fixed[1]);
3443             kvm_msr_entry_add(cpu, MSR_MTRRfix16K_A0000, env->mtrr_fixed[2]);
3444             kvm_msr_entry_add(cpu, MSR_MTRRfix4K_C0000, env->mtrr_fixed[3]);
3445             kvm_msr_entry_add(cpu, MSR_MTRRfix4K_C8000, env->mtrr_fixed[4]);
3446             kvm_msr_entry_add(cpu, MSR_MTRRfix4K_D0000, env->mtrr_fixed[5]);
3447             kvm_msr_entry_add(cpu, MSR_MTRRfix4K_D8000, env->mtrr_fixed[6]);
3448             kvm_msr_entry_add(cpu, MSR_MTRRfix4K_E0000, env->mtrr_fixed[7]);
3449             kvm_msr_entry_add(cpu, MSR_MTRRfix4K_E8000, env->mtrr_fixed[8]);
3450             kvm_msr_entry_add(cpu, MSR_MTRRfix4K_F0000, env->mtrr_fixed[9]);
3451             kvm_msr_entry_add(cpu, MSR_MTRRfix4K_F8000, env->mtrr_fixed[10]);
3452             for (i = 0; i < MSR_MTRRcap_VCNT; i++) {
3453                 /* The CPU GPs if we write to a bit above the physical limit of
3454                  * the host CPU (and KVM emulates that)
3455                  */
3456                 uint64_t mask = env->mtrr_var[i].mask;
3457                 mask &= phys_mask;
3458 
3459                 kvm_msr_entry_add(cpu, MSR_MTRRphysBase(i),
3460                                   env->mtrr_var[i].base);
3461                 kvm_msr_entry_add(cpu, MSR_MTRRphysMask(i), mask);
3462             }
3463         }
3464         if (env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) {
3465             int addr_num = kvm_arch_get_supported_cpuid(kvm_state,
3466                                                     0x14, 1, R_EAX) & 0x7;
3467 
3468             kvm_msr_entry_add(cpu, MSR_IA32_RTIT_CTL,
3469                             env->msr_rtit_ctrl);
3470             kvm_msr_entry_add(cpu, MSR_IA32_RTIT_STATUS,
3471                             env->msr_rtit_status);
3472             kvm_msr_entry_add(cpu, MSR_IA32_RTIT_OUTPUT_BASE,
3473                             env->msr_rtit_output_base);
3474             kvm_msr_entry_add(cpu, MSR_IA32_RTIT_OUTPUT_MASK,
3475                             env->msr_rtit_output_mask);
3476             kvm_msr_entry_add(cpu, MSR_IA32_RTIT_CR3_MATCH,
3477                             env->msr_rtit_cr3_match);
3478             for (i = 0; i < addr_num; i++) {
3479                 kvm_msr_entry_add(cpu, MSR_IA32_RTIT_ADDR0_A + i,
3480                             env->msr_rtit_addrs[i]);
3481             }
3482         }
3483 
3484         if (env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_SGX_LC) {
3485             kvm_msr_entry_add(cpu, MSR_IA32_SGXLEPUBKEYHASH0,
3486                               env->msr_ia32_sgxlepubkeyhash[0]);
3487             kvm_msr_entry_add(cpu, MSR_IA32_SGXLEPUBKEYHASH1,
3488                               env->msr_ia32_sgxlepubkeyhash[1]);
3489             kvm_msr_entry_add(cpu, MSR_IA32_SGXLEPUBKEYHASH2,
3490                               env->msr_ia32_sgxlepubkeyhash[2]);
3491             kvm_msr_entry_add(cpu, MSR_IA32_SGXLEPUBKEYHASH3,
3492                               env->msr_ia32_sgxlepubkeyhash[3]);
3493         }
3494 
3495         if (env->features[FEAT_XSAVE] & CPUID_D_1_EAX_XFD) {
3496             kvm_msr_entry_add(cpu, MSR_IA32_XFD,
3497                               env->msr_xfd);
3498             kvm_msr_entry_add(cpu, MSR_IA32_XFD_ERR,
3499                               env->msr_xfd_err);
3500         }
3501 
3502         if (kvm_enabled() && cpu->enable_pmu &&
3503             (env->features[FEAT_7_0_EDX] & CPUID_7_0_EDX_ARCH_LBR)) {
3504             uint64_t depth;
3505             int i, ret;
3506 
3507             /*
3508              * Only migrate Arch LBR states when the host Arch LBR depth
3509              * equals that of source guest's, this is to avoid mismatch
3510              * of guest/host config for the msr hence avoid unexpected
3511              * misbehavior.
3512              */
3513             ret = kvm_get_one_msr(cpu, MSR_ARCH_LBR_DEPTH, &depth);
3514 
3515             if (ret == 1 && !!depth && depth == env->msr_lbr_depth) {
3516                 kvm_msr_entry_add(cpu, MSR_ARCH_LBR_CTL, env->msr_lbr_ctl);
3517                 kvm_msr_entry_add(cpu, MSR_ARCH_LBR_DEPTH, env->msr_lbr_depth);
3518 
3519                 for (i = 0; i < ARCH_LBR_NR_ENTRIES; i++) {
3520                     if (!env->lbr_records[i].from) {
3521                         continue;
3522                     }
3523                     kvm_msr_entry_add(cpu, MSR_ARCH_LBR_FROM_0 + i,
3524                                       env->lbr_records[i].from);
3525                     kvm_msr_entry_add(cpu, MSR_ARCH_LBR_TO_0 + i,
3526                                       env->lbr_records[i].to);
3527                     kvm_msr_entry_add(cpu, MSR_ARCH_LBR_INFO_0 + i,
3528                                       env->lbr_records[i].info);
3529                 }
3530             }
3531         }
3532 
3533         /* Note: MSR_IA32_FEATURE_CONTROL is written separately, see
3534          *       kvm_put_msr_feature_control. */
3535     }
3536 
3537     if (env->mcg_cap) {
3538         int i;
3539 
3540         kvm_msr_entry_add(cpu, MSR_MCG_STATUS, env->mcg_status);
3541         kvm_msr_entry_add(cpu, MSR_MCG_CTL, env->mcg_ctl);
3542         if (has_msr_mcg_ext_ctl) {
3543             kvm_msr_entry_add(cpu, MSR_MCG_EXT_CTL, env->mcg_ext_ctl);
3544         }
3545         for (i = 0; i < (env->mcg_cap & 0xff) * 4; i++) {
3546             kvm_msr_entry_add(cpu, MSR_MC0_CTL + i, env->mce_banks[i]);
3547         }
3548     }
3549 
3550     return kvm_buf_set_msrs(cpu);
3551 }
3552 
3553 
3554 static int kvm_get_fpu(X86CPU *cpu)
3555 {
3556     CPUX86State *env = &cpu->env;
3557     struct kvm_fpu fpu;
3558     int i, ret;
3559 
3560     ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_FPU, &fpu);
3561     if (ret < 0) {
3562         return ret;
3563     }
3564 
3565     env->fpstt = (fpu.fsw >> 11) & 7;
3566     env->fpus = fpu.fsw;
3567     env->fpuc = fpu.fcw;
3568     env->fpop = fpu.last_opcode;
3569     env->fpip = fpu.last_ip;
3570     env->fpdp = fpu.last_dp;
3571     for (i = 0; i < 8; ++i) {
3572         env->fptags[i] = !((fpu.ftwx >> i) & 1);
3573     }
3574     memcpy(env->fpregs, fpu.fpr, sizeof env->fpregs);
3575     for (i = 0; i < CPU_NB_REGS; i++) {
3576         env->xmm_regs[i].ZMM_Q(0) = ldq_p(&fpu.xmm[i][0]);
3577         env->xmm_regs[i].ZMM_Q(1) = ldq_p(&fpu.xmm[i][8]);
3578     }
3579     env->mxcsr = fpu.mxcsr;
3580 
3581     return 0;
3582 }
3583 
3584 static int kvm_get_xsave(X86CPU *cpu)
3585 {
3586     CPUX86State *env = &cpu->env;
3587     void *xsave = env->xsave_buf;
3588     int type, ret;
3589 
3590     if (!has_xsave) {
3591         return kvm_get_fpu(cpu);
3592     }
3593 
3594     type = has_xsave2 ? KVM_GET_XSAVE2 : KVM_GET_XSAVE;
3595     ret = kvm_vcpu_ioctl(CPU(cpu), type, xsave);
3596     if (ret < 0) {
3597         return ret;
3598     }
3599     x86_cpu_xrstor_all_areas(cpu, xsave, env->xsave_buf_len);
3600 
3601     return 0;
3602 }
3603 
3604 static int kvm_get_xcrs(X86CPU *cpu)
3605 {
3606     CPUX86State *env = &cpu->env;
3607     int i, ret;
3608     struct kvm_xcrs xcrs;
3609 
3610     if (!has_xcrs) {
3611         return 0;
3612     }
3613 
3614     ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_XCRS, &xcrs);
3615     if (ret < 0) {
3616         return ret;
3617     }
3618 
3619     for (i = 0; i < xcrs.nr_xcrs; i++) {
3620         /* Only support xcr0 now */
3621         if (xcrs.xcrs[i].xcr == 0) {
3622             env->xcr0 = xcrs.xcrs[i].value;
3623             break;
3624         }
3625     }
3626     return 0;
3627 }
3628 
3629 static int kvm_get_sregs(X86CPU *cpu)
3630 {
3631     CPUX86State *env = &cpu->env;
3632     struct kvm_sregs sregs;
3633     int ret;
3634 
3635     ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_SREGS, &sregs);
3636     if (ret < 0) {
3637         return ret;
3638     }
3639 
3640     /*
3641      * The interrupt_bitmap is ignored because KVM_GET_SREGS is
3642      * always preceded by KVM_GET_VCPU_EVENTS.
3643      */
3644 
3645     get_seg(&env->segs[R_CS], &sregs.cs);
3646     get_seg(&env->segs[R_DS], &sregs.ds);
3647     get_seg(&env->segs[R_ES], &sregs.es);
3648     get_seg(&env->segs[R_FS], &sregs.fs);
3649     get_seg(&env->segs[R_GS], &sregs.gs);
3650     get_seg(&env->segs[R_SS], &sregs.ss);
3651 
3652     get_seg(&env->tr, &sregs.tr);
3653     get_seg(&env->ldt, &sregs.ldt);
3654 
3655     env->idt.limit = sregs.idt.limit;
3656     env->idt.base = sregs.idt.base;
3657     env->gdt.limit = sregs.gdt.limit;
3658     env->gdt.base = sregs.gdt.base;
3659 
3660     env->cr[0] = sregs.cr0;
3661     env->cr[2] = sregs.cr2;
3662     env->cr[3] = sregs.cr3;
3663     env->cr[4] = sregs.cr4;
3664 
3665     env->efer = sregs.efer;
3666 
3667     /* changes to apic base and cr8/tpr are read back via kvm_arch_post_run */
3668     x86_update_hflags(env);
3669 
3670     return 0;
3671 }
3672 
3673 static int kvm_get_sregs2(X86CPU *cpu)
3674 {
3675     CPUX86State *env = &cpu->env;
3676     struct kvm_sregs2 sregs;
3677     int i, ret;
3678 
3679     ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_SREGS2, &sregs);
3680     if (ret < 0) {
3681         return ret;
3682     }
3683 
3684     get_seg(&env->segs[R_CS], &sregs.cs);
3685     get_seg(&env->segs[R_DS], &sregs.ds);
3686     get_seg(&env->segs[R_ES], &sregs.es);
3687     get_seg(&env->segs[R_FS], &sregs.fs);
3688     get_seg(&env->segs[R_GS], &sregs.gs);
3689     get_seg(&env->segs[R_SS], &sregs.ss);
3690 
3691     get_seg(&env->tr, &sregs.tr);
3692     get_seg(&env->ldt, &sregs.ldt);
3693 
3694     env->idt.limit = sregs.idt.limit;
3695     env->idt.base = sregs.idt.base;
3696     env->gdt.limit = sregs.gdt.limit;
3697     env->gdt.base = sregs.gdt.base;
3698 
3699     env->cr[0] = sregs.cr0;
3700     env->cr[2] = sregs.cr2;
3701     env->cr[3] = sregs.cr3;
3702     env->cr[4] = sregs.cr4;
3703 
3704     env->efer = sregs.efer;
3705 
3706     env->pdptrs_valid = sregs.flags & KVM_SREGS2_FLAGS_PDPTRS_VALID;
3707 
3708     if (env->pdptrs_valid) {
3709         for (i = 0; i < 4; i++) {
3710             env->pdptrs[i] = sregs.pdptrs[i];
3711         }
3712     }
3713 
3714     /* changes to apic base and cr8/tpr are read back via kvm_arch_post_run */
3715     x86_update_hflags(env);
3716 
3717     return 0;
3718 }
3719 
3720 static int kvm_get_msrs(X86CPU *cpu)
3721 {
3722     CPUX86State *env = &cpu->env;
3723     struct kvm_msr_entry *msrs = cpu->kvm_msr_buf->entries;
3724     int ret, i;
3725     uint64_t mtrr_top_bits;
3726 
3727     kvm_msr_buf_reset(cpu);
3728 
3729     kvm_msr_entry_add(cpu, MSR_IA32_SYSENTER_CS, 0);
3730     kvm_msr_entry_add(cpu, MSR_IA32_SYSENTER_ESP, 0);
3731     kvm_msr_entry_add(cpu, MSR_IA32_SYSENTER_EIP, 0);
3732     kvm_msr_entry_add(cpu, MSR_PAT, 0);
3733     if (has_msr_star) {
3734         kvm_msr_entry_add(cpu, MSR_STAR, 0);
3735     }
3736     if (has_msr_hsave_pa) {
3737         kvm_msr_entry_add(cpu, MSR_VM_HSAVE_PA, 0);
3738     }
3739     if (has_msr_tsc_aux) {
3740         kvm_msr_entry_add(cpu, MSR_TSC_AUX, 0);
3741     }
3742     if (has_msr_tsc_adjust) {
3743         kvm_msr_entry_add(cpu, MSR_TSC_ADJUST, 0);
3744     }
3745     if (has_msr_tsc_deadline) {
3746         kvm_msr_entry_add(cpu, MSR_IA32_TSCDEADLINE, 0);
3747     }
3748     if (has_msr_misc_enable) {
3749         kvm_msr_entry_add(cpu, MSR_IA32_MISC_ENABLE, 0);
3750     }
3751     if (has_msr_smbase) {
3752         kvm_msr_entry_add(cpu, MSR_IA32_SMBASE, 0);
3753     }
3754     if (has_msr_smi_count) {
3755         kvm_msr_entry_add(cpu, MSR_SMI_COUNT, 0);
3756     }
3757     if (has_msr_feature_control) {
3758         kvm_msr_entry_add(cpu, MSR_IA32_FEATURE_CONTROL, 0);
3759     }
3760     if (has_msr_pkrs) {
3761         kvm_msr_entry_add(cpu, MSR_IA32_PKRS, 0);
3762     }
3763     if (has_msr_bndcfgs) {
3764         kvm_msr_entry_add(cpu, MSR_IA32_BNDCFGS, 0);
3765     }
3766     if (has_msr_xss) {
3767         kvm_msr_entry_add(cpu, MSR_IA32_XSS, 0);
3768     }
3769     if (has_msr_umwait) {
3770         kvm_msr_entry_add(cpu, MSR_IA32_UMWAIT_CONTROL, 0);
3771     }
3772     if (has_msr_spec_ctrl) {
3773         kvm_msr_entry_add(cpu, MSR_IA32_SPEC_CTRL, 0);
3774     }
3775     if (has_tsc_scale_msr) {
3776         kvm_msr_entry_add(cpu, MSR_AMD64_TSC_RATIO, 0);
3777     }
3778 
3779     if (has_msr_tsx_ctrl) {
3780         kvm_msr_entry_add(cpu, MSR_IA32_TSX_CTRL, 0);
3781     }
3782     if (has_msr_virt_ssbd) {
3783         kvm_msr_entry_add(cpu, MSR_VIRT_SSBD, 0);
3784     }
3785     if (!env->tsc_valid) {
3786         kvm_msr_entry_add(cpu, MSR_IA32_TSC, 0);
3787         env->tsc_valid = !runstate_is_running();
3788     }
3789 
3790 #ifdef TARGET_X86_64
3791     if (lm_capable_kernel) {
3792         kvm_msr_entry_add(cpu, MSR_CSTAR, 0);
3793         kvm_msr_entry_add(cpu, MSR_KERNELGSBASE, 0);
3794         kvm_msr_entry_add(cpu, MSR_FMASK, 0);
3795         kvm_msr_entry_add(cpu, MSR_LSTAR, 0);
3796     }
3797 #endif
3798     kvm_msr_entry_add(cpu, MSR_KVM_SYSTEM_TIME, 0);
3799     kvm_msr_entry_add(cpu, MSR_KVM_WALL_CLOCK, 0);
3800     if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_ASYNC_PF_INT)) {
3801         kvm_msr_entry_add(cpu, MSR_KVM_ASYNC_PF_INT, 0);
3802     }
3803     if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_ASYNC_PF)) {
3804         kvm_msr_entry_add(cpu, MSR_KVM_ASYNC_PF_EN, 0);
3805     }
3806     if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_PV_EOI)) {
3807         kvm_msr_entry_add(cpu, MSR_KVM_PV_EOI_EN, 0);
3808     }
3809     if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_STEAL_TIME)) {
3810         kvm_msr_entry_add(cpu, MSR_KVM_STEAL_TIME, 0);
3811     }
3812     if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_POLL_CONTROL)) {
3813         kvm_msr_entry_add(cpu, MSR_KVM_POLL_CONTROL, 1);
3814     }
3815     if (has_architectural_pmu_version > 0) {
3816         if (has_architectural_pmu_version > 1) {
3817             kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR_CTRL, 0);
3818             kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_CTRL, 0);
3819             kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_STATUS, 0);
3820             kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_OVF_CTRL, 0);
3821         }
3822         for (i = 0; i < num_architectural_pmu_fixed_counters; i++) {
3823             kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR0 + i, 0);
3824         }
3825         for (i = 0; i < num_architectural_pmu_gp_counters; i++) {
3826             kvm_msr_entry_add(cpu, MSR_P6_PERFCTR0 + i, 0);
3827             kvm_msr_entry_add(cpu, MSR_P6_EVNTSEL0 + i, 0);
3828         }
3829     }
3830 
3831     if (env->mcg_cap) {
3832         kvm_msr_entry_add(cpu, MSR_MCG_STATUS, 0);
3833         kvm_msr_entry_add(cpu, MSR_MCG_CTL, 0);
3834         if (has_msr_mcg_ext_ctl) {
3835             kvm_msr_entry_add(cpu, MSR_MCG_EXT_CTL, 0);
3836         }
3837         for (i = 0; i < (env->mcg_cap & 0xff) * 4; i++) {
3838             kvm_msr_entry_add(cpu, MSR_MC0_CTL + i, 0);
3839         }
3840     }
3841 
3842     if (has_msr_hv_hypercall) {
3843         kvm_msr_entry_add(cpu, HV_X64_MSR_HYPERCALL, 0);
3844         kvm_msr_entry_add(cpu, HV_X64_MSR_GUEST_OS_ID, 0);
3845     }
3846     if (hyperv_feat_enabled(cpu, HYPERV_FEAT_VAPIC)) {
3847         kvm_msr_entry_add(cpu, HV_X64_MSR_APIC_ASSIST_PAGE, 0);
3848     }
3849     if (hyperv_feat_enabled(cpu, HYPERV_FEAT_TIME)) {
3850         kvm_msr_entry_add(cpu, HV_X64_MSR_REFERENCE_TSC, 0);
3851     }
3852     if (hyperv_feat_enabled(cpu, HYPERV_FEAT_REENLIGHTENMENT)) {
3853         kvm_msr_entry_add(cpu, HV_X64_MSR_REENLIGHTENMENT_CONTROL, 0);
3854         kvm_msr_entry_add(cpu, HV_X64_MSR_TSC_EMULATION_CONTROL, 0);
3855         kvm_msr_entry_add(cpu, HV_X64_MSR_TSC_EMULATION_STATUS, 0);
3856     }
3857     if (has_msr_hv_syndbg_options) {
3858         kvm_msr_entry_add(cpu, HV_X64_MSR_SYNDBG_OPTIONS, 0);
3859     }
3860     if (has_msr_hv_crash) {
3861         int j;
3862 
3863         for (j = 0; j < HV_CRASH_PARAMS; j++) {
3864             kvm_msr_entry_add(cpu, HV_X64_MSR_CRASH_P0 + j, 0);
3865         }
3866     }
3867     if (has_msr_hv_runtime) {
3868         kvm_msr_entry_add(cpu, HV_X64_MSR_VP_RUNTIME, 0);
3869     }
3870     if (hyperv_feat_enabled(cpu, HYPERV_FEAT_SYNIC)) {
3871         uint32_t msr;
3872 
3873         kvm_msr_entry_add(cpu, HV_X64_MSR_SCONTROL, 0);
3874         kvm_msr_entry_add(cpu, HV_X64_MSR_SIEFP, 0);
3875         kvm_msr_entry_add(cpu, HV_X64_MSR_SIMP, 0);
3876         for (msr = HV_X64_MSR_SINT0; msr <= HV_X64_MSR_SINT15; msr++) {
3877             kvm_msr_entry_add(cpu, msr, 0);
3878         }
3879     }
3880     if (has_msr_hv_stimer) {
3881         uint32_t msr;
3882 
3883         for (msr = HV_X64_MSR_STIMER0_CONFIG; msr <= HV_X64_MSR_STIMER3_COUNT;
3884              msr++) {
3885             kvm_msr_entry_add(cpu, msr, 0);
3886         }
3887     }
3888     if (env->features[FEAT_1_EDX] & CPUID_MTRR) {
3889         kvm_msr_entry_add(cpu, MSR_MTRRdefType, 0);
3890         kvm_msr_entry_add(cpu, MSR_MTRRfix64K_00000, 0);
3891         kvm_msr_entry_add(cpu, MSR_MTRRfix16K_80000, 0);
3892         kvm_msr_entry_add(cpu, MSR_MTRRfix16K_A0000, 0);
3893         kvm_msr_entry_add(cpu, MSR_MTRRfix4K_C0000, 0);
3894         kvm_msr_entry_add(cpu, MSR_MTRRfix4K_C8000, 0);
3895         kvm_msr_entry_add(cpu, MSR_MTRRfix4K_D0000, 0);
3896         kvm_msr_entry_add(cpu, MSR_MTRRfix4K_D8000, 0);
3897         kvm_msr_entry_add(cpu, MSR_MTRRfix4K_E0000, 0);
3898         kvm_msr_entry_add(cpu, MSR_MTRRfix4K_E8000, 0);
3899         kvm_msr_entry_add(cpu, MSR_MTRRfix4K_F0000, 0);
3900         kvm_msr_entry_add(cpu, MSR_MTRRfix4K_F8000, 0);
3901         for (i = 0; i < MSR_MTRRcap_VCNT; i++) {
3902             kvm_msr_entry_add(cpu, MSR_MTRRphysBase(i), 0);
3903             kvm_msr_entry_add(cpu, MSR_MTRRphysMask(i), 0);
3904         }
3905     }
3906 
3907     if (env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) {
3908         int addr_num =
3909             kvm_arch_get_supported_cpuid(kvm_state, 0x14, 1, R_EAX) & 0x7;
3910 
3911         kvm_msr_entry_add(cpu, MSR_IA32_RTIT_CTL, 0);
3912         kvm_msr_entry_add(cpu, MSR_IA32_RTIT_STATUS, 0);
3913         kvm_msr_entry_add(cpu, MSR_IA32_RTIT_OUTPUT_BASE, 0);
3914         kvm_msr_entry_add(cpu, MSR_IA32_RTIT_OUTPUT_MASK, 0);
3915         kvm_msr_entry_add(cpu, MSR_IA32_RTIT_CR3_MATCH, 0);
3916         for (i = 0; i < addr_num; i++) {
3917             kvm_msr_entry_add(cpu, MSR_IA32_RTIT_ADDR0_A + i, 0);
3918         }
3919     }
3920 
3921     if (env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_SGX_LC) {
3922         kvm_msr_entry_add(cpu, MSR_IA32_SGXLEPUBKEYHASH0, 0);
3923         kvm_msr_entry_add(cpu, MSR_IA32_SGXLEPUBKEYHASH1, 0);
3924         kvm_msr_entry_add(cpu, MSR_IA32_SGXLEPUBKEYHASH2, 0);
3925         kvm_msr_entry_add(cpu, MSR_IA32_SGXLEPUBKEYHASH3, 0);
3926     }
3927 
3928     if (env->features[FEAT_XSAVE] & CPUID_D_1_EAX_XFD) {
3929         kvm_msr_entry_add(cpu, MSR_IA32_XFD, 0);
3930         kvm_msr_entry_add(cpu, MSR_IA32_XFD_ERR, 0);
3931     }
3932 
3933     if (kvm_enabled() && cpu->enable_pmu &&
3934         (env->features[FEAT_7_0_EDX] & CPUID_7_0_EDX_ARCH_LBR)) {
3935         uint64_t depth;
3936         int i, ret;
3937 
3938         ret = kvm_get_one_msr(cpu, MSR_ARCH_LBR_DEPTH, &depth);
3939         if (ret == 1 && depth == ARCH_LBR_NR_ENTRIES) {
3940             kvm_msr_entry_add(cpu, MSR_ARCH_LBR_CTL, 0);
3941             kvm_msr_entry_add(cpu, MSR_ARCH_LBR_DEPTH, 0);
3942 
3943             for (i = 0; i < ARCH_LBR_NR_ENTRIES; i++) {
3944                 kvm_msr_entry_add(cpu, MSR_ARCH_LBR_FROM_0 + i, 0);
3945                 kvm_msr_entry_add(cpu, MSR_ARCH_LBR_TO_0 + i, 0);
3946                 kvm_msr_entry_add(cpu, MSR_ARCH_LBR_INFO_0 + i, 0);
3947             }
3948         }
3949     }
3950 
3951     ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_MSRS, cpu->kvm_msr_buf);
3952     if (ret < 0) {
3953         return ret;
3954     }
3955 
3956     if (ret < cpu->kvm_msr_buf->nmsrs) {
3957         struct kvm_msr_entry *e = &cpu->kvm_msr_buf->entries[ret];
3958         error_report("error: failed to get MSR 0x%" PRIx32,
3959                      (uint32_t)e->index);
3960     }
3961 
3962     assert(ret == cpu->kvm_msr_buf->nmsrs);
3963     /*
3964      * MTRR masks: Each mask consists of 5 parts
3965      * a  10..0: must be zero
3966      * b  11   : valid bit
3967      * c n-1.12: actual mask bits
3968      * d  51..n: reserved must be zero
3969      * e  63.52: reserved must be zero
3970      *
3971      * 'n' is the number of physical bits supported by the CPU and is
3972      * apparently always <= 52.   We know our 'n' but don't know what
3973      * the destinations 'n' is; it might be smaller, in which case
3974      * it masks (c) on loading. It might be larger, in which case
3975      * we fill 'd' so that d..c is consistent irrespetive of the 'n'
3976      * we're migrating to.
3977      */
3978 
3979     if (cpu->fill_mtrr_mask) {
3980         QEMU_BUILD_BUG_ON(TARGET_PHYS_ADDR_SPACE_BITS > 52);
3981         assert(cpu->phys_bits <= TARGET_PHYS_ADDR_SPACE_BITS);
3982         mtrr_top_bits = MAKE_64BIT_MASK(cpu->phys_bits, 52 - cpu->phys_bits);
3983     } else {
3984         mtrr_top_bits = 0;
3985     }
3986 
3987     for (i = 0; i < ret; i++) {
3988         uint32_t index = msrs[i].index;
3989         switch (index) {
3990         case MSR_IA32_SYSENTER_CS:
3991             env->sysenter_cs = msrs[i].data;
3992             break;
3993         case MSR_IA32_SYSENTER_ESP:
3994             env->sysenter_esp = msrs[i].data;
3995             break;
3996         case MSR_IA32_SYSENTER_EIP:
3997             env->sysenter_eip = msrs[i].data;
3998             break;
3999         case MSR_PAT:
4000             env->pat = msrs[i].data;
4001             break;
4002         case MSR_STAR:
4003             env->star = msrs[i].data;
4004             break;
4005 #ifdef TARGET_X86_64
4006         case MSR_CSTAR:
4007             env->cstar = msrs[i].data;
4008             break;
4009         case MSR_KERNELGSBASE:
4010             env->kernelgsbase = msrs[i].data;
4011             break;
4012         case MSR_FMASK:
4013             env->fmask = msrs[i].data;
4014             break;
4015         case MSR_LSTAR:
4016             env->lstar = msrs[i].data;
4017             break;
4018 #endif
4019         case MSR_IA32_TSC:
4020             env->tsc = msrs[i].data;
4021             break;
4022         case MSR_TSC_AUX:
4023             env->tsc_aux = msrs[i].data;
4024             break;
4025         case MSR_TSC_ADJUST:
4026             env->tsc_adjust = msrs[i].data;
4027             break;
4028         case MSR_IA32_TSCDEADLINE:
4029             env->tsc_deadline = msrs[i].data;
4030             break;
4031         case MSR_VM_HSAVE_PA:
4032             env->vm_hsave = msrs[i].data;
4033             break;
4034         case MSR_KVM_SYSTEM_TIME:
4035             env->system_time_msr = msrs[i].data;
4036             break;
4037         case MSR_KVM_WALL_CLOCK:
4038             env->wall_clock_msr = msrs[i].data;
4039             break;
4040         case MSR_MCG_STATUS:
4041             env->mcg_status = msrs[i].data;
4042             break;
4043         case MSR_MCG_CTL:
4044             env->mcg_ctl = msrs[i].data;
4045             break;
4046         case MSR_MCG_EXT_CTL:
4047             env->mcg_ext_ctl = msrs[i].data;
4048             break;
4049         case MSR_IA32_MISC_ENABLE:
4050             env->msr_ia32_misc_enable = msrs[i].data;
4051             break;
4052         case MSR_IA32_SMBASE:
4053             env->smbase = msrs[i].data;
4054             break;
4055         case MSR_SMI_COUNT:
4056             env->msr_smi_count = msrs[i].data;
4057             break;
4058         case MSR_IA32_FEATURE_CONTROL:
4059             env->msr_ia32_feature_control = msrs[i].data;
4060             break;
4061         case MSR_IA32_BNDCFGS:
4062             env->msr_bndcfgs = msrs[i].data;
4063             break;
4064         case MSR_IA32_XSS:
4065             env->xss = msrs[i].data;
4066             break;
4067         case MSR_IA32_UMWAIT_CONTROL:
4068             env->umwait = msrs[i].data;
4069             break;
4070         case MSR_IA32_PKRS:
4071             env->pkrs = msrs[i].data;
4072             break;
4073         default:
4074             if (msrs[i].index >= MSR_MC0_CTL &&
4075                 msrs[i].index < MSR_MC0_CTL + (env->mcg_cap & 0xff) * 4) {
4076                 env->mce_banks[msrs[i].index - MSR_MC0_CTL] = msrs[i].data;
4077             }
4078             break;
4079         case MSR_KVM_ASYNC_PF_EN:
4080             env->async_pf_en_msr = msrs[i].data;
4081             break;
4082         case MSR_KVM_ASYNC_PF_INT:
4083             env->async_pf_int_msr = msrs[i].data;
4084             break;
4085         case MSR_KVM_PV_EOI_EN:
4086             env->pv_eoi_en_msr = msrs[i].data;
4087             break;
4088         case MSR_KVM_STEAL_TIME:
4089             env->steal_time_msr = msrs[i].data;
4090             break;
4091         case MSR_KVM_POLL_CONTROL: {
4092             env->poll_control_msr = msrs[i].data;
4093             break;
4094         }
4095         case MSR_CORE_PERF_FIXED_CTR_CTRL:
4096             env->msr_fixed_ctr_ctrl = msrs[i].data;
4097             break;
4098         case MSR_CORE_PERF_GLOBAL_CTRL:
4099             env->msr_global_ctrl = msrs[i].data;
4100             break;
4101         case MSR_CORE_PERF_GLOBAL_STATUS:
4102             env->msr_global_status = msrs[i].data;
4103             break;
4104         case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
4105             env->msr_global_ovf_ctrl = msrs[i].data;
4106             break;
4107         case MSR_CORE_PERF_FIXED_CTR0 ... MSR_CORE_PERF_FIXED_CTR0 + MAX_FIXED_COUNTERS - 1:
4108             env->msr_fixed_counters[index - MSR_CORE_PERF_FIXED_CTR0] = msrs[i].data;
4109             break;
4110         case MSR_P6_PERFCTR0 ... MSR_P6_PERFCTR0 + MAX_GP_COUNTERS - 1:
4111             env->msr_gp_counters[index - MSR_P6_PERFCTR0] = msrs[i].data;
4112             break;
4113         case MSR_P6_EVNTSEL0 ... MSR_P6_EVNTSEL0 + MAX_GP_COUNTERS - 1:
4114             env->msr_gp_evtsel[index - MSR_P6_EVNTSEL0] = msrs[i].data;
4115             break;
4116         case HV_X64_MSR_HYPERCALL:
4117             env->msr_hv_hypercall = msrs[i].data;
4118             break;
4119         case HV_X64_MSR_GUEST_OS_ID:
4120             env->msr_hv_guest_os_id = msrs[i].data;
4121             break;
4122         case HV_X64_MSR_APIC_ASSIST_PAGE:
4123             env->msr_hv_vapic = msrs[i].data;
4124             break;
4125         case HV_X64_MSR_REFERENCE_TSC:
4126             env->msr_hv_tsc = msrs[i].data;
4127             break;
4128         case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
4129             env->msr_hv_crash_params[index - HV_X64_MSR_CRASH_P0] = msrs[i].data;
4130             break;
4131         case HV_X64_MSR_VP_RUNTIME:
4132             env->msr_hv_runtime = msrs[i].data;
4133             break;
4134         case HV_X64_MSR_SCONTROL:
4135             env->msr_hv_synic_control = msrs[i].data;
4136             break;
4137         case HV_X64_MSR_SIEFP:
4138             env->msr_hv_synic_evt_page = msrs[i].data;
4139             break;
4140         case HV_X64_MSR_SIMP:
4141             env->msr_hv_synic_msg_page = msrs[i].data;
4142             break;
4143         case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15:
4144             env->msr_hv_synic_sint[index - HV_X64_MSR_SINT0] = msrs[i].data;
4145             break;
4146         case HV_X64_MSR_STIMER0_CONFIG:
4147         case HV_X64_MSR_STIMER1_CONFIG:
4148         case HV_X64_MSR_STIMER2_CONFIG:
4149         case HV_X64_MSR_STIMER3_CONFIG:
4150             env->msr_hv_stimer_config[(index - HV_X64_MSR_STIMER0_CONFIG)/2] =
4151                                 msrs[i].data;
4152             break;
4153         case HV_X64_MSR_STIMER0_COUNT:
4154         case HV_X64_MSR_STIMER1_COUNT:
4155         case HV_X64_MSR_STIMER2_COUNT:
4156         case HV_X64_MSR_STIMER3_COUNT:
4157             env->msr_hv_stimer_count[(index - HV_X64_MSR_STIMER0_COUNT)/2] =
4158                                 msrs[i].data;
4159             break;
4160         case HV_X64_MSR_REENLIGHTENMENT_CONTROL:
4161             env->msr_hv_reenlightenment_control = msrs[i].data;
4162             break;
4163         case HV_X64_MSR_TSC_EMULATION_CONTROL:
4164             env->msr_hv_tsc_emulation_control = msrs[i].data;
4165             break;
4166         case HV_X64_MSR_TSC_EMULATION_STATUS:
4167             env->msr_hv_tsc_emulation_status = msrs[i].data;
4168             break;
4169         case HV_X64_MSR_SYNDBG_OPTIONS:
4170             env->msr_hv_syndbg_options = msrs[i].data;
4171             break;
4172         case MSR_MTRRdefType:
4173             env->mtrr_deftype = msrs[i].data;
4174             break;
4175         case MSR_MTRRfix64K_00000:
4176             env->mtrr_fixed[0] = msrs[i].data;
4177             break;
4178         case MSR_MTRRfix16K_80000:
4179             env->mtrr_fixed[1] = msrs[i].data;
4180             break;
4181         case MSR_MTRRfix16K_A0000:
4182             env->mtrr_fixed[2] = msrs[i].data;
4183             break;
4184         case MSR_MTRRfix4K_C0000:
4185             env->mtrr_fixed[3] = msrs[i].data;
4186             break;
4187         case MSR_MTRRfix4K_C8000:
4188             env->mtrr_fixed[4] = msrs[i].data;
4189             break;
4190         case MSR_MTRRfix4K_D0000:
4191             env->mtrr_fixed[5] = msrs[i].data;
4192             break;
4193         case MSR_MTRRfix4K_D8000:
4194             env->mtrr_fixed[6] = msrs[i].data;
4195             break;
4196         case MSR_MTRRfix4K_E0000:
4197             env->mtrr_fixed[7] = msrs[i].data;
4198             break;
4199         case MSR_MTRRfix4K_E8000:
4200             env->mtrr_fixed[8] = msrs[i].data;
4201             break;
4202         case MSR_MTRRfix4K_F0000:
4203             env->mtrr_fixed[9] = msrs[i].data;
4204             break;
4205         case MSR_MTRRfix4K_F8000:
4206             env->mtrr_fixed[10] = msrs[i].data;
4207             break;
4208         case MSR_MTRRphysBase(0) ... MSR_MTRRphysMask(MSR_MTRRcap_VCNT - 1):
4209             if (index & 1) {
4210                 env->mtrr_var[MSR_MTRRphysIndex(index)].mask = msrs[i].data |
4211                                                                mtrr_top_bits;
4212             } else {
4213                 env->mtrr_var[MSR_MTRRphysIndex(index)].base = msrs[i].data;
4214             }
4215             break;
4216         case MSR_IA32_SPEC_CTRL:
4217             env->spec_ctrl = msrs[i].data;
4218             break;
4219         case MSR_AMD64_TSC_RATIO:
4220             env->amd_tsc_scale_msr = msrs[i].data;
4221             break;
4222         case MSR_IA32_TSX_CTRL:
4223             env->tsx_ctrl = msrs[i].data;
4224             break;
4225         case MSR_VIRT_SSBD:
4226             env->virt_ssbd = msrs[i].data;
4227             break;
4228         case MSR_IA32_RTIT_CTL:
4229             env->msr_rtit_ctrl = msrs[i].data;
4230             break;
4231         case MSR_IA32_RTIT_STATUS:
4232             env->msr_rtit_status = msrs[i].data;
4233             break;
4234         case MSR_IA32_RTIT_OUTPUT_BASE:
4235             env->msr_rtit_output_base = msrs[i].data;
4236             break;
4237         case MSR_IA32_RTIT_OUTPUT_MASK:
4238             env->msr_rtit_output_mask = msrs[i].data;
4239             break;
4240         case MSR_IA32_RTIT_CR3_MATCH:
4241             env->msr_rtit_cr3_match = msrs[i].data;
4242             break;
4243         case MSR_IA32_RTIT_ADDR0_A ... MSR_IA32_RTIT_ADDR3_B:
4244             env->msr_rtit_addrs[index - MSR_IA32_RTIT_ADDR0_A] = msrs[i].data;
4245             break;
4246         case MSR_IA32_SGXLEPUBKEYHASH0 ... MSR_IA32_SGXLEPUBKEYHASH3:
4247             env->msr_ia32_sgxlepubkeyhash[index - MSR_IA32_SGXLEPUBKEYHASH0] =
4248                            msrs[i].data;
4249             break;
4250         case MSR_IA32_XFD:
4251             env->msr_xfd = msrs[i].data;
4252             break;
4253         case MSR_IA32_XFD_ERR:
4254             env->msr_xfd_err = msrs[i].data;
4255             break;
4256         case MSR_ARCH_LBR_CTL:
4257             env->msr_lbr_ctl = msrs[i].data;
4258             break;
4259         case MSR_ARCH_LBR_DEPTH:
4260             env->msr_lbr_depth = msrs[i].data;
4261             break;
4262         case MSR_ARCH_LBR_FROM_0 ... MSR_ARCH_LBR_FROM_0 + 31:
4263             env->lbr_records[index - MSR_ARCH_LBR_FROM_0].from = msrs[i].data;
4264             break;
4265         case MSR_ARCH_LBR_TO_0 ... MSR_ARCH_LBR_TO_0 + 31:
4266             env->lbr_records[index - MSR_ARCH_LBR_TO_0].to = msrs[i].data;
4267             break;
4268         case MSR_ARCH_LBR_INFO_0 ... MSR_ARCH_LBR_INFO_0 + 31:
4269             env->lbr_records[index - MSR_ARCH_LBR_INFO_0].info = msrs[i].data;
4270             break;
4271         }
4272     }
4273 
4274     return 0;
4275 }
4276 
4277 static int kvm_put_mp_state(X86CPU *cpu)
4278 {
4279     struct kvm_mp_state mp_state = { .mp_state = cpu->env.mp_state };
4280 
4281     return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MP_STATE, &mp_state);
4282 }
4283 
4284 static int kvm_get_mp_state(X86CPU *cpu)
4285 {
4286     CPUState *cs = CPU(cpu);
4287     CPUX86State *env = &cpu->env;
4288     struct kvm_mp_state mp_state;
4289     int ret;
4290 
4291     ret = kvm_vcpu_ioctl(cs, KVM_GET_MP_STATE, &mp_state);
4292     if (ret < 0) {
4293         return ret;
4294     }
4295     env->mp_state = mp_state.mp_state;
4296     if (kvm_irqchip_in_kernel()) {
4297         cs->halted = (mp_state.mp_state == KVM_MP_STATE_HALTED);
4298     }
4299     return 0;
4300 }
4301 
4302 static int kvm_get_apic(X86CPU *cpu)
4303 {
4304     DeviceState *apic = cpu->apic_state;
4305     struct kvm_lapic_state kapic;
4306     int ret;
4307 
4308     if (apic && kvm_irqchip_in_kernel()) {
4309         ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_LAPIC, &kapic);
4310         if (ret < 0) {
4311             return ret;
4312         }
4313 
4314         kvm_get_apic_state(apic, &kapic);
4315     }
4316     return 0;
4317 }
4318 
4319 static int kvm_put_vcpu_events(X86CPU *cpu, int level)
4320 {
4321     CPUState *cs = CPU(cpu);
4322     CPUX86State *env = &cpu->env;
4323     struct kvm_vcpu_events events = {};
4324 
4325     if (!kvm_has_vcpu_events()) {
4326         return 0;
4327     }
4328 
4329     events.flags = 0;
4330 
4331     if (has_exception_payload) {
4332         events.flags |= KVM_VCPUEVENT_VALID_PAYLOAD;
4333         events.exception.pending = env->exception_pending;
4334         events.exception_has_payload = env->exception_has_payload;
4335         events.exception_payload = env->exception_payload;
4336     }
4337     events.exception.nr = env->exception_nr;
4338     events.exception.injected = env->exception_injected;
4339     events.exception.has_error_code = env->has_error_code;
4340     events.exception.error_code = env->error_code;
4341 
4342     events.interrupt.injected = (env->interrupt_injected >= 0);
4343     events.interrupt.nr = env->interrupt_injected;
4344     events.interrupt.soft = env->soft_interrupt;
4345 
4346     events.nmi.injected = env->nmi_injected;
4347     events.nmi.pending = env->nmi_pending;
4348     events.nmi.masked = !!(env->hflags2 & HF2_NMI_MASK);
4349 
4350     events.sipi_vector = env->sipi_vector;
4351 
4352     if (has_msr_smbase) {
4353         events.smi.smm = !!(env->hflags & HF_SMM_MASK);
4354         events.smi.smm_inside_nmi = !!(env->hflags2 & HF2_SMM_INSIDE_NMI_MASK);
4355         if (kvm_irqchip_in_kernel()) {
4356             /* As soon as these are moved to the kernel, remove them
4357              * from cs->interrupt_request.
4358              */
4359             events.smi.pending = cs->interrupt_request & CPU_INTERRUPT_SMI;
4360             events.smi.latched_init = cs->interrupt_request & CPU_INTERRUPT_INIT;
4361             cs->interrupt_request &= ~(CPU_INTERRUPT_INIT | CPU_INTERRUPT_SMI);
4362         } else {
4363             /* Keep these in cs->interrupt_request.  */
4364             events.smi.pending = 0;
4365             events.smi.latched_init = 0;
4366         }
4367         /* Stop SMI delivery on old machine types to avoid a reboot
4368          * on an inward migration of an old VM.
4369          */
4370         if (!cpu->kvm_no_smi_migration) {
4371             events.flags |= KVM_VCPUEVENT_VALID_SMM;
4372         }
4373     }
4374 
4375     if (level >= KVM_PUT_RESET_STATE) {
4376         events.flags |= KVM_VCPUEVENT_VALID_NMI_PENDING;
4377         if (env->mp_state == KVM_MP_STATE_SIPI_RECEIVED) {
4378             events.flags |= KVM_VCPUEVENT_VALID_SIPI_VECTOR;
4379         }
4380     }
4381 
4382     if (has_triple_fault_event) {
4383         events.flags |= KVM_VCPUEVENT_VALID_TRIPLE_FAULT;
4384         events.triple_fault.pending = env->triple_fault_pending;
4385     }
4386 
4387     return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_VCPU_EVENTS, &events);
4388 }
4389 
4390 static int kvm_get_vcpu_events(X86CPU *cpu)
4391 {
4392     CPUX86State *env = &cpu->env;
4393     struct kvm_vcpu_events events;
4394     int ret;
4395 
4396     if (!kvm_has_vcpu_events()) {
4397         return 0;
4398     }
4399 
4400     memset(&events, 0, sizeof(events));
4401     ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_VCPU_EVENTS, &events);
4402     if (ret < 0) {
4403        return ret;
4404     }
4405 
4406     if (events.flags & KVM_VCPUEVENT_VALID_PAYLOAD) {
4407         env->exception_pending = events.exception.pending;
4408         env->exception_has_payload = events.exception_has_payload;
4409         env->exception_payload = events.exception_payload;
4410     } else {
4411         env->exception_pending = 0;
4412         env->exception_has_payload = false;
4413     }
4414     env->exception_injected = events.exception.injected;
4415     env->exception_nr =
4416         (env->exception_pending || env->exception_injected) ?
4417         events.exception.nr : -1;
4418     env->has_error_code = events.exception.has_error_code;
4419     env->error_code = events.exception.error_code;
4420 
4421     env->interrupt_injected =
4422         events.interrupt.injected ? events.interrupt.nr : -1;
4423     env->soft_interrupt = events.interrupt.soft;
4424 
4425     env->nmi_injected = events.nmi.injected;
4426     env->nmi_pending = events.nmi.pending;
4427     if (events.nmi.masked) {
4428         env->hflags2 |= HF2_NMI_MASK;
4429     } else {
4430         env->hflags2 &= ~HF2_NMI_MASK;
4431     }
4432 
4433     if (events.flags & KVM_VCPUEVENT_VALID_SMM) {
4434         if (events.smi.smm) {
4435             env->hflags |= HF_SMM_MASK;
4436         } else {
4437             env->hflags &= ~HF_SMM_MASK;
4438         }
4439         if (events.smi.pending) {
4440             cpu_interrupt(CPU(cpu), CPU_INTERRUPT_SMI);
4441         } else {
4442             cpu_reset_interrupt(CPU(cpu), CPU_INTERRUPT_SMI);
4443         }
4444         if (events.smi.smm_inside_nmi) {
4445             env->hflags2 |= HF2_SMM_INSIDE_NMI_MASK;
4446         } else {
4447             env->hflags2 &= ~HF2_SMM_INSIDE_NMI_MASK;
4448         }
4449         if (events.smi.latched_init) {
4450             cpu_interrupt(CPU(cpu), CPU_INTERRUPT_INIT);
4451         } else {
4452             cpu_reset_interrupt(CPU(cpu), CPU_INTERRUPT_INIT);
4453         }
4454     }
4455 
4456     if (events.flags & KVM_VCPUEVENT_VALID_TRIPLE_FAULT) {
4457         env->triple_fault_pending = events.triple_fault.pending;
4458     }
4459 
4460     env->sipi_vector = events.sipi_vector;
4461 
4462     return 0;
4463 }
4464 
4465 static int kvm_guest_debug_workarounds(X86CPU *cpu)
4466 {
4467     CPUState *cs = CPU(cpu);
4468     CPUX86State *env = &cpu->env;
4469     int ret = 0;
4470     unsigned long reinject_trap = 0;
4471 
4472     if (!kvm_has_vcpu_events()) {
4473         if (env->exception_nr == EXCP01_DB) {
4474             reinject_trap = KVM_GUESTDBG_INJECT_DB;
4475         } else if (env->exception_injected == EXCP03_INT3) {
4476             reinject_trap = KVM_GUESTDBG_INJECT_BP;
4477         }
4478         kvm_reset_exception(env);
4479     }
4480 
4481     /*
4482      * Kernels before KVM_CAP_X86_ROBUST_SINGLESTEP overwrote flags.TF
4483      * injected via SET_GUEST_DEBUG while updating GP regs. Work around this
4484      * by updating the debug state once again if single-stepping is on.
4485      * Another reason to call kvm_update_guest_debug here is a pending debug
4486      * trap raise by the guest. On kernels without SET_VCPU_EVENTS we have to
4487      * reinject them via SET_GUEST_DEBUG.
4488      */
4489     if (reinject_trap ||
4490         (!kvm_has_robust_singlestep() && cs->singlestep_enabled)) {
4491         ret = kvm_update_guest_debug(cs, reinject_trap);
4492     }
4493     return ret;
4494 }
4495 
4496 static int kvm_put_debugregs(X86CPU *cpu)
4497 {
4498     CPUX86State *env = &cpu->env;
4499     struct kvm_debugregs dbgregs;
4500     int i;
4501 
4502     if (!kvm_has_debugregs()) {
4503         return 0;
4504     }
4505 
4506     memset(&dbgregs, 0, sizeof(dbgregs));
4507     for (i = 0; i < 4; i++) {
4508         dbgregs.db[i] = env->dr[i];
4509     }
4510     dbgregs.dr6 = env->dr[6];
4511     dbgregs.dr7 = env->dr[7];
4512     dbgregs.flags = 0;
4513 
4514     return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_DEBUGREGS, &dbgregs);
4515 }
4516 
4517 static int kvm_get_debugregs(X86CPU *cpu)
4518 {
4519     CPUX86State *env = &cpu->env;
4520     struct kvm_debugregs dbgregs;
4521     int i, ret;
4522 
4523     if (!kvm_has_debugregs()) {
4524         return 0;
4525     }
4526 
4527     ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_DEBUGREGS, &dbgregs);
4528     if (ret < 0) {
4529         return ret;
4530     }
4531     for (i = 0; i < 4; i++) {
4532         env->dr[i] = dbgregs.db[i];
4533     }
4534     env->dr[4] = env->dr[6] = dbgregs.dr6;
4535     env->dr[5] = env->dr[7] = dbgregs.dr7;
4536 
4537     return 0;
4538 }
4539 
4540 static int kvm_put_nested_state(X86CPU *cpu)
4541 {
4542     CPUX86State *env = &cpu->env;
4543     int max_nested_state_len = kvm_max_nested_state_length();
4544 
4545     if (!env->nested_state) {
4546         return 0;
4547     }
4548 
4549     /*
4550      * Copy flags that are affected by reset from env->hflags and env->hflags2.
4551      */
4552     if (env->hflags & HF_GUEST_MASK) {
4553         env->nested_state->flags |= KVM_STATE_NESTED_GUEST_MODE;
4554     } else {
4555         env->nested_state->flags &= ~KVM_STATE_NESTED_GUEST_MODE;
4556     }
4557 
4558     /* Don't set KVM_STATE_NESTED_GIF_SET on VMX as it is illegal */
4559     if (cpu_has_svm(env) && (env->hflags2 & HF2_GIF_MASK)) {
4560         env->nested_state->flags |= KVM_STATE_NESTED_GIF_SET;
4561     } else {
4562         env->nested_state->flags &= ~KVM_STATE_NESTED_GIF_SET;
4563     }
4564 
4565     assert(env->nested_state->size <= max_nested_state_len);
4566     return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_NESTED_STATE, env->nested_state);
4567 }
4568 
4569 static int kvm_get_nested_state(X86CPU *cpu)
4570 {
4571     CPUX86State *env = &cpu->env;
4572     int max_nested_state_len = kvm_max_nested_state_length();
4573     int ret;
4574 
4575     if (!env->nested_state) {
4576         return 0;
4577     }
4578 
4579     /*
4580      * It is possible that migration restored a smaller size into
4581      * nested_state->hdr.size than what our kernel support.
4582      * We preserve migration origin nested_state->hdr.size for
4583      * call to KVM_SET_NESTED_STATE but wish that our next call
4584      * to KVM_GET_NESTED_STATE will use max size our kernel support.
4585      */
4586     env->nested_state->size = max_nested_state_len;
4587 
4588     ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_NESTED_STATE, env->nested_state);
4589     if (ret < 0) {
4590         return ret;
4591     }
4592 
4593     /*
4594      * Copy flags that are affected by reset to env->hflags and env->hflags2.
4595      */
4596     if (env->nested_state->flags & KVM_STATE_NESTED_GUEST_MODE) {
4597         env->hflags |= HF_GUEST_MASK;
4598     } else {
4599         env->hflags &= ~HF_GUEST_MASK;
4600     }
4601 
4602     /* Keep HF2_GIF_MASK set on !SVM as x86_cpu_pending_interrupt() needs it */
4603     if (cpu_has_svm(env)) {
4604         if (env->nested_state->flags & KVM_STATE_NESTED_GIF_SET) {
4605             env->hflags2 |= HF2_GIF_MASK;
4606         } else {
4607             env->hflags2 &= ~HF2_GIF_MASK;
4608         }
4609     }
4610 
4611     return ret;
4612 }
4613 
4614 int kvm_arch_put_registers(CPUState *cpu, int level)
4615 {
4616     X86CPU *x86_cpu = X86_CPU(cpu);
4617     int ret;
4618 
4619     assert(cpu_is_stopped(cpu) || qemu_cpu_is_self(cpu));
4620 
4621     /*
4622      * Put MSR_IA32_FEATURE_CONTROL first, this ensures the VM gets out of VMX
4623      * root operation upon vCPU reset. kvm_put_msr_feature_control() should also
4624      * preceed kvm_put_nested_state() when 'real' nested state is set.
4625      */
4626     if (level >= KVM_PUT_RESET_STATE) {
4627         ret = kvm_put_msr_feature_control(x86_cpu);
4628         if (ret < 0) {
4629             return ret;
4630         }
4631     }
4632 
4633     /* must be before kvm_put_nested_state so that EFER.SVME is set */
4634     ret = has_sregs2 ? kvm_put_sregs2(x86_cpu) : kvm_put_sregs(x86_cpu);
4635     if (ret < 0) {
4636         return ret;
4637     }
4638 
4639     if (level >= KVM_PUT_RESET_STATE) {
4640         ret = kvm_put_nested_state(x86_cpu);
4641         if (ret < 0) {
4642             return ret;
4643         }
4644     }
4645 
4646     if (level == KVM_PUT_FULL_STATE) {
4647         /* We don't check for kvm_arch_set_tsc_khz() errors here,
4648          * because TSC frequency mismatch shouldn't abort migration,
4649          * unless the user explicitly asked for a more strict TSC
4650          * setting (e.g. using an explicit "tsc-freq" option).
4651          */
4652         kvm_arch_set_tsc_khz(cpu);
4653     }
4654 
4655     ret = kvm_getput_regs(x86_cpu, 1);
4656     if (ret < 0) {
4657         return ret;
4658     }
4659     ret = kvm_put_xsave(x86_cpu);
4660     if (ret < 0) {
4661         return ret;
4662     }
4663     ret = kvm_put_xcrs(x86_cpu);
4664     if (ret < 0) {
4665         return ret;
4666     }
4667     /* must be before kvm_put_msrs */
4668     ret = kvm_inject_mce_oldstyle(x86_cpu);
4669     if (ret < 0) {
4670         return ret;
4671     }
4672     ret = kvm_put_msrs(x86_cpu, level);
4673     if (ret < 0) {
4674         return ret;
4675     }
4676     ret = kvm_put_vcpu_events(x86_cpu, level);
4677     if (ret < 0) {
4678         return ret;
4679     }
4680     if (level >= KVM_PUT_RESET_STATE) {
4681         ret = kvm_put_mp_state(x86_cpu);
4682         if (ret < 0) {
4683             return ret;
4684         }
4685     }
4686 
4687     ret = kvm_put_tscdeadline_msr(x86_cpu);
4688     if (ret < 0) {
4689         return ret;
4690     }
4691     ret = kvm_put_debugregs(x86_cpu);
4692     if (ret < 0) {
4693         return ret;
4694     }
4695     /* must be last */
4696     ret = kvm_guest_debug_workarounds(x86_cpu);
4697     if (ret < 0) {
4698         return ret;
4699     }
4700     return 0;
4701 }
4702 
4703 int kvm_arch_get_registers(CPUState *cs)
4704 {
4705     X86CPU *cpu = X86_CPU(cs);
4706     int ret;
4707 
4708     assert(cpu_is_stopped(cs) || qemu_cpu_is_self(cs));
4709 
4710     ret = kvm_get_vcpu_events(cpu);
4711     if (ret < 0) {
4712         goto out;
4713     }
4714     /*
4715      * KVM_GET_MPSTATE can modify CS and RIP, call it before
4716      * KVM_GET_REGS and KVM_GET_SREGS.
4717      */
4718     ret = kvm_get_mp_state(cpu);
4719     if (ret < 0) {
4720         goto out;
4721     }
4722     ret = kvm_getput_regs(cpu, 0);
4723     if (ret < 0) {
4724         goto out;
4725     }
4726     ret = kvm_get_xsave(cpu);
4727     if (ret < 0) {
4728         goto out;
4729     }
4730     ret = kvm_get_xcrs(cpu);
4731     if (ret < 0) {
4732         goto out;
4733     }
4734     ret = has_sregs2 ? kvm_get_sregs2(cpu) : kvm_get_sregs(cpu);
4735     if (ret < 0) {
4736         goto out;
4737     }
4738     ret = kvm_get_msrs(cpu);
4739     if (ret < 0) {
4740         goto out;
4741     }
4742     ret = kvm_get_apic(cpu);
4743     if (ret < 0) {
4744         goto out;
4745     }
4746     ret = kvm_get_debugregs(cpu);
4747     if (ret < 0) {
4748         goto out;
4749     }
4750     ret = kvm_get_nested_state(cpu);
4751     if (ret < 0) {
4752         goto out;
4753     }
4754     ret = 0;
4755  out:
4756     cpu_sync_bndcs_hflags(&cpu->env);
4757     return ret;
4758 }
4759 
4760 void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run)
4761 {
4762     X86CPU *x86_cpu = X86_CPU(cpu);
4763     CPUX86State *env = &x86_cpu->env;
4764     int ret;
4765 
4766     /* Inject NMI */
4767     if (cpu->interrupt_request & (CPU_INTERRUPT_NMI | CPU_INTERRUPT_SMI)) {
4768         if (cpu->interrupt_request & CPU_INTERRUPT_NMI) {
4769             qemu_mutex_lock_iothread();
4770             cpu->interrupt_request &= ~CPU_INTERRUPT_NMI;
4771             qemu_mutex_unlock_iothread();
4772             DPRINTF("injected NMI\n");
4773             ret = kvm_vcpu_ioctl(cpu, KVM_NMI);
4774             if (ret < 0) {
4775                 fprintf(stderr, "KVM: injection failed, NMI lost (%s)\n",
4776                         strerror(-ret));
4777             }
4778         }
4779         if (cpu->interrupt_request & CPU_INTERRUPT_SMI) {
4780             qemu_mutex_lock_iothread();
4781             cpu->interrupt_request &= ~CPU_INTERRUPT_SMI;
4782             qemu_mutex_unlock_iothread();
4783             DPRINTF("injected SMI\n");
4784             ret = kvm_vcpu_ioctl(cpu, KVM_SMI);
4785             if (ret < 0) {
4786                 fprintf(stderr, "KVM: injection failed, SMI lost (%s)\n",
4787                         strerror(-ret));
4788             }
4789         }
4790     }
4791 
4792     if (!kvm_pic_in_kernel()) {
4793         qemu_mutex_lock_iothread();
4794     }
4795 
4796     /* Force the VCPU out of its inner loop to process any INIT requests
4797      * or (for userspace APIC, but it is cheap to combine the checks here)
4798      * pending TPR access reports.
4799      */
4800     if (cpu->interrupt_request & (CPU_INTERRUPT_INIT | CPU_INTERRUPT_TPR)) {
4801         if ((cpu->interrupt_request & CPU_INTERRUPT_INIT) &&
4802             !(env->hflags & HF_SMM_MASK)) {
4803             cpu->exit_request = 1;
4804         }
4805         if (cpu->interrupt_request & CPU_INTERRUPT_TPR) {
4806             cpu->exit_request = 1;
4807         }
4808     }
4809 
4810     if (!kvm_pic_in_kernel()) {
4811         /* Try to inject an interrupt if the guest can accept it */
4812         if (run->ready_for_interrupt_injection &&
4813             (cpu->interrupt_request & CPU_INTERRUPT_HARD) &&
4814             (env->eflags & IF_MASK)) {
4815             int irq;
4816 
4817             cpu->interrupt_request &= ~CPU_INTERRUPT_HARD;
4818             irq = cpu_get_pic_interrupt(env);
4819             if (irq >= 0) {
4820                 struct kvm_interrupt intr;
4821 
4822                 intr.irq = irq;
4823                 DPRINTF("injected interrupt %d\n", irq);
4824                 ret = kvm_vcpu_ioctl(cpu, KVM_INTERRUPT, &intr);
4825                 if (ret < 0) {
4826                     fprintf(stderr,
4827                             "KVM: injection failed, interrupt lost (%s)\n",
4828                             strerror(-ret));
4829                 }
4830             }
4831         }
4832 
4833         /* If we have an interrupt but the guest is not ready to receive an
4834          * interrupt, request an interrupt window exit.  This will
4835          * cause a return to userspace as soon as the guest is ready to
4836          * receive interrupts. */
4837         if ((cpu->interrupt_request & CPU_INTERRUPT_HARD)) {
4838             run->request_interrupt_window = 1;
4839         } else {
4840             run->request_interrupt_window = 0;
4841         }
4842 
4843         DPRINTF("setting tpr\n");
4844         run->cr8 = cpu_get_apic_tpr(x86_cpu->apic_state);
4845 
4846         qemu_mutex_unlock_iothread();
4847     }
4848 }
4849 
4850 static void kvm_rate_limit_on_bus_lock(void)
4851 {
4852     uint64_t delay_ns = ratelimit_calculate_delay(&bus_lock_ratelimit_ctrl, 1);
4853 
4854     if (delay_ns) {
4855         g_usleep(delay_ns / SCALE_US);
4856     }
4857 }
4858 
4859 MemTxAttrs kvm_arch_post_run(CPUState *cpu, struct kvm_run *run)
4860 {
4861     X86CPU *x86_cpu = X86_CPU(cpu);
4862     CPUX86State *env = &x86_cpu->env;
4863 
4864     if (run->flags & KVM_RUN_X86_SMM) {
4865         env->hflags |= HF_SMM_MASK;
4866     } else {
4867         env->hflags &= ~HF_SMM_MASK;
4868     }
4869     if (run->if_flag) {
4870         env->eflags |= IF_MASK;
4871     } else {
4872         env->eflags &= ~IF_MASK;
4873     }
4874     if (run->flags & KVM_RUN_X86_BUS_LOCK) {
4875         kvm_rate_limit_on_bus_lock();
4876     }
4877 
4878     /* We need to protect the apic state against concurrent accesses from
4879      * different threads in case the userspace irqchip is used. */
4880     if (!kvm_irqchip_in_kernel()) {
4881         qemu_mutex_lock_iothread();
4882     }
4883     cpu_set_apic_tpr(x86_cpu->apic_state, run->cr8);
4884     cpu_set_apic_base(x86_cpu->apic_state, run->apic_base);
4885     if (!kvm_irqchip_in_kernel()) {
4886         qemu_mutex_unlock_iothread();
4887     }
4888     return cpu_get_mem_attrs(env);
4889 }
4890 
4891 int kvm_arch_process_async_events(CPUState *cs)
4892 {
4893     X86CPU *cpu = X86_CPU(cs);
4894     CPUX86State *env = &cpu->env;
4895 
4896     if (cs->interrupt_request & CPU_INTERRUPT_MCE) {
4897         /* We must not raise CPU_INTERRUPT_MCE if it's not supported. */
4898         assert(env->mcg_cap);
4899 
4900         cs->interrupt_request &= ~CPU_INTERRUPT_MCE;
4901 
4902         kvm_cpu_synchronize_state(cs);
4903 
4904         if (env->exception_nr == EXCP08_DBLE) {
4905             /* this means triple fault */
4906             qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
4907             cs->exit_request = 1;
4908             return 0;
4909         }
4910         kvm_queue_exception(env, EXCP12_MCHK, 0, 0);
4911         env->has_error_code = 0;
4912 
4913         cs->halted = 0;
4914         if (kvm_irqchip_in_kernel() && env->mp_state == KVM_MP_STATE_HALTED) {
4915             env->mp_state = KVM_MP_STATE_RUNNABLE;
4916         }
4917     }
4918 
4919     if ((cs->interrupt_request & CPU_INTERRUPT_INIT) &&
4920         !(env->hflags & HF_SMM_MASK)) {
4921         kvm_cpu_synchronize_state(cs);
4922         do_cpu_init(cpu);
4923     }
4924 
4925     if (kvm_irqchip_in_kernel()) {
4926         return 0;
4927     }
4928 
4929     if (cs->interrupt_request & CPU_INTERRUPT_POLL) {
4930         cs->interrupt_request &= ~CPU_INTERRUPT_POLL;
4931         apic_poll_irq(cpu->apic_state);
4932     }
4933     if (((cs->interrupt_request & CPU_INTERRUPT_HARD) &&
4934          (env->eflags & IF_MASK)) ||
4935         (cs->interrupt_request & CPU_INTERRUPT_NMI)) {
4936         cs->halted = 0;
4937     }
4938     if (cs->interrupt_request & CPU_INTERRUPT_SIPI) {
4939         kvm_cpu_synchronize_state(cs);
4940         do_cpu_sipi(cpu);
4941     }
4942     if (cs->interrupt_request & CPU_INTERRUPT_TPR) {
4943         cs->interrupt_request &= ~CPU_INTERRUPT_TPR;
4944         kvm_cpu_synchronize_state(cs);
4945         apic_handle_tpr_access_report(cpu->apic_state, env->eip,
4946                                       env->tpr_access_type);
4947     }
4948 
4949     return cs->halted;
4950 }
4951 
4952 static int kvm_handle_halt(X86CPU *cpu)
4953 {
4954     CPUState *cs = CPU(cpu);
4955     CPUX86State *env = &cpu->env;
4956 
4957     if (!((cs->interrupt_request & CPU_INTERRUPT_HARD) &&
4958           (env->eflags & IF_MASK)) &&
4959         !(cs->interrupt_request & CPU_INTERRUPT_NMI)) {
4960         cs->halted = 1;
4961         return EXCP_HLT;
4962     }
4963 
4964     return 0;
4965 }
4966 
4967 static int kvm_handle_tpr_access(X86CPU *cpu)
4968 {
4969     CPUState *cs = CPU(cpu);
4970     struct kvm_run *run = cs->kvm_run;
4971 
4972     apic_handle_tpr_access_report(cpu->apic_state, run->tpr_access.rip,
4973                                   run->tpr_access.is_write ? TPR_ACCESS_WRITE
4974                                                            : TPR_ACCESS_READ);
4975     return 1;
4976 }
4977 
4978 int kvm_arch_insert_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
4979 {
4980     static const uint8_t int3 = 0xcc;
4981 
4982     if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 1, 0) ||
4983         cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&int3, 1, 1)) {
4984         return -EINVAL;
4985     }
4986     return 0;
4987 }
4988 
4989 int kvm_arch_remove_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
4990 {
4991     uint8_t int3;
4992 
4993     if (cpu_memory_rw_debug(cs, bp->pc, &int3, 1, 0)) {
4994         return -EINVAL;
4995     }
4996     if (int3 != 0xcc) {
4997         return 0;
4998     }
4999     if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 1, 1)) {
5000         return -EINVAL;
5001     }
5002     return 0;
5003 }
5004 
5005 static struct {
5006     target_ulong addr;
5007     int len;
5008     int type;
5009 } hw_breakpoint[4];
5010 
5011 static int nb_hw_breakpoint;
5012 
5013 static int find_hw_breakpoint(target_ulong addr, int len, int type)
5014 {
5015     int n;
5016 
5017     for (n = 0; n < nb_hw_breakpoint; n++) {
5018         if (hw_breakpoint[n].addr == addr && hw_breakpoint[n].type == type &&
5019             (hw_breakpoint[n].len == len || len == -1)) {
5020             return n;
5021         }
5022     }
5023     return -1;
5024 }
5025 
5026 int kvm_arch_insert_hw_breakpoint(target_ulong addr,
5027                                   target_ulong len, int type)
5028 {
5029     switch (type) {
5030     case GDB_BREAKPOINT_HW:
5031         len = 1;
5032         break;
5033     case GDB_WATCHPOINT_WRITE:
5034     case GDB_WATCHPOINT_ACCESS:
5035         switch (len) {
5036         case 1:
5037             break;
5038         case 2:
5039         case 4:
5040         case 8:
5041             if (addr & (len - 1)) {
5042                 return -EINVAL;
5043             }
5044             break;
5045         default:
5046             return -EINVAL;
5047         }
5048         break;
5049     default:
5050         return -ENOSYS;
5051     }
5052 
5053     if (nb_hw_breakpoint == 4) {
5054         return -ENOBUFS;
5055     }
5056     if (find_hw_breakpoint(addr, len, type) >= 0) {
5057         return -EEXIST;
5058     }
5059     hw_breakpoint[nb_hw_breakpoint].addr = addr;
5060     hw_breakpoint[nb_hw_breakpoint].len = len;
5061     hw_breakpoint[nb_hw_breakpoint].type = type;
5062     nb_hw_breakpoint++;
5063 
5064     return 0;
5065 }
5066 
5067 int kvm_arch_remove_hw_breakpoint(target_ulong addr,
5068                                   target_ulong len, int type)
5069 {
5070     int n;
5071 
5072     n = find_hw_breakpoint(addr, (type == GDB_BREAKPOINT_HW) ? 1 : len, type);
5073     if (n < 0) {
5074         return -ENOENT;
5075     }
5076     nb_hw_breakpoint--;
5077     hw_breakpoint[n] = hw_breakpoint[nb_hw_breakpoint];
5078 
5079     return 0;
5080 }
5081 
5082 void kvm_arch_remove_all_hw_breakpoints(void)
5083 {
5084     nb_hw_breakpoint = 0;
5085 }
5086 
5087 static CPUWatchpoint hw_watchpoint;
5088 
5089 static int kvm_handle_debug(X86CPU *cpu,
5090                             struct kvm_debug_exit_arch *arch_info)
5091 {
5092     CPUState *cs = CPU(cpu);
5093     CPUX86State *env = &cpu->env;
5094     int ret = 0;
5095     int n;
5096 
5097     if (arch_info->exception == EXCP01_DB) {
5098         if (arch_info->dr6 & DR6_BS) {
5099             if (cs->singlestep_enabled) {
5100                 ret = EXCP_DEBUG;
5101             }
5102         } else {
5103             for (n = 0; n < 4; n++) {
5104                 if (arch_info->dr6 & (1 << n)) {
5105                     switch ((arch_info->dr7 >> (16 + n*4)) & 0x3) {
5106                     case 0x0:
5107                         ret = EXCP_DEBUG;
5108                         break;
5109                     case 0x1:
5110                         ret = EXCP_DEBUG;
5111                         cs->watchpoint_hit = &hw_watchpoint;
5112                         hw_watchpoint.vaddr = hw_breakpoint[n].addr;
5113                         hw_watchpoint.flags = BP_MEM_WRITE;
5114                         break;
5115                     case 0x3:
5116                         ret = EXCP_DEBUG;
5117                         cs->watchpoint_hit = &hw_watchpoint;
5118                         hw_watchpoint.vaddr = hw_breakpoint[n].addr;
5119                         hw_watchpoint.flags = BP_MEM_ACCESS;
5120                         break;
5121                     }
5122                 }
5123             }
5124         }
5125     } else if (kvm_find_sw_breakpoint(cs, arch_info->pc)) {
5126         ret = EXCP_DEBUG;
5127     }
5128     if (ret == 0) {
5129         cpu_synchronize_state(cs);
5130         assert(env->exception_nr == -1);
5131 
5132         /* pass to guest */
5133         kvm_queue_exception(env, arch_info->exception,
5134                             arch_info->exception == EXCP01_DB,
5135                             arch_info->dr6);
5136         env->has_error_code = 0;
5137     }
5138 
5139     return ret;
5140 }
5141 
5142 void kvm_arch_update_guest_debug(CPUState *cpu, struct kvm_guest_debug *dbg)
5143 {
5144     const uint8_t type_code[] = {
5145         [GDB_BREAKPOINT_HW] = 0x0,
5146         [GDB_WATCHPOINT_WRITE] = 0x1,
5147         [GDB_WATCHPOINT_ACCESS] = 0x3
5148     };
5149     const uint8_t len_code[] = {
5150         [1] = 0x0, [2] = 0x1, [4] = 0x3, [8] = 0x2
5151     };
5152     int n;
5153 
5154     if (kvm_sw_breakpoints_active(cpu)) {
5155         dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP;
5156     }
5157     if (nb_hw_breakpoint > 0) {
5158         dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW_BP;
5159         dbg->arch.debugreg[7] = 0x0600;
5160         for (n = 0; n < nb_hw_breakpoint; n++) {
5161             dbg->arch.debugreg[n] = hw_breakpoint[n].addr;
5162             dbg->arch.debugreg[7] |= (2 << (n * 2)) |
5163                 (type_code[hw_breakpoint[n].type] << (16 + n*4)) |
5164                 ((uint32_t)len_code[hw_breakpoint[n].len] << (18 + n*4));
5165         }
5166     }
5167 }
5168 
5169 static bool kvm_install_msr_filters(KVMState *s)
5170 {
5171     uint64_t zero = 0;
5172     struct kvm_msr_filter filter = {
5173         .flags = KVM_MSR_FILTER_DEFAULT_ALLOW,
5174     };
5175     int r, i, j = 0;
5176 
5177     for (i = 0; i < KVM_MSR_FILTER_MAX_RANGES; i++) {
5178         KVMMSRHandlers *handler = &msr_handlers[i];
5179         if (handler->msr) {
5180             struct kvm_msr_filter_range *range = &filter.ranges[j++];
5181 
5182             *range = (struct kvm_msr_filter_range) {
5183                 .flags = 0,
5184                 .nmsrs = 1,
5185                 .base = handler->msr,
5186                 .bitmap = (__u8 *)&zero,
5187             };
5188 
5189             if (handler->rdmsr) {
5190                 range->flags |= KVM_MSR_FILTER_READ;
5191             }
5192 
5193             if (handler->wrmsr) {
5194                 range->flags |= KVM_MSR_FILTER_WRITE;
5195             }
5196         }
5197     }
5198 
5199     r = kvm_vm_ioctl(s, KVM_X86_SET_MSR_FILTER, &filter);
5200     if (r) {
5201         return false;
5202     }
5203 
5204     return true;
5205 }
5206 
5207 bool kvm_filter_msr(KVMState *s, uint32_t msr, QEMURDMSRHandler *rdmsr,
5208                     QEMUWRMSRHandler *wrmsr)
5209 {
5210     int i;
5211 
5212     for (i = 0; i < ARRAY_SIZE(msr_handlers); i++) {
5213         if (!msr_handlers[i].msr) {
5214             msr_handlers[i] = (KVMMSRHandlers) {
5215                 .msr = msr,
5216                 .rdmsr = rdmsr,
5217                 .wrmsr = wrmsr,
5218             };
5219 
5220             if (!kvm_install_msr_filters(s)) {
5221                 msr_handlers[i] = (KVMMSRHandlers) { };
5222                 return false;
5223             }
5224 
5225             return true;
5226         }
5227     }
5228 
5229     return false;
5230 }
5231 
5232 static int kvm_handle_rdmsr(X86CPU *cpu, struct kvm_run *run)
5233 {
5234     int i;
5235     bool r;
5236 
5237     for (i = 0; i < ARRAY_SIZE(msr_handlers); i++) {
5238         KVMMSRHandlers *handler = &msr_handlers[i];
5239         if (run->msr.index == handler->msr) {
5240             if (handler->rdmsr) {
5241                 r = handler->rdmsr(cpu, handler->msr,
5242                                    (uint64_t *)&run->msr.data);
5243                 run->msr.error = r ? 0 : 1;
5244                 return 0;
5245             }
5246         }
5247     }
5248 
5249     assert(false);
5250 }
5251 
5252 static int kvm_handle_wrmsr(X86CPU *cpu, struct kvm_run *run)
5253 {
5254     int i;
5255     bool r;
5256 
5257     for (i = 0; i < ARRAY_SIZE(msr_handlers); i++) {
5258         KVMMSRHandlers *handler = &msr_handlers[i];
5259         if (run->msr.index == handler->msr) {
5260             if (handler->wrmsr) {
5261                 r = handler->wrmsr(cpu, handler->msr, run->msr.data);
5262                 run->msr.error = r ? 0 : 1;
5263                 return 0;
5264             }
5265         }
5266     }
5267 
5268     assert(false);
5269 }
5270 
5271 static bool has_sgx_provisioning;
5272 
5273 static bool __kvm_enable_sgx_provisioning(KVMState *s)
5274 {
5275     int fd, ret;
5276 
5277     if (!kvm_vm_check_extension(s, KVM_CAP_SGX_ATTRIBUTE)) {
5278         return false;
5279     }
5280 
5281     fd = qemu_open_old("/dev/sgx_provision", O_RDONLY);
5282     if (fd < 0) {
5283         return false;
5284     }
5285 
5286     ret = kvm_vm_enable_cap(s, KVM_CAP_SGX_ATTRIBUTE, 0, fd);
5287     if (ret) {
5288         error_report("Could not enable SGX PROVISIONKEY: %s", strerror(-ret));
5289         exit(1);
5290     }
5291     close(fd);
5292     return true;
5293 }
5294 
5295 bool kvm_enable_sgx_provisioning(KVMState *s)
5296 {
5297     return MEMORIZE(__kvm_enable_sgx_provisioning(s), has_sgx_provisioning);
5298 }
5299 
5300 static bool host_supports_vmx(void)
5301 {
5302     uint32_t ecx, unused;
5303 
5304     host_cpuid(1, 0, &unused, &unused, &ecx, &unused);
5305     return ecx & CPUID_EXT_VMX;
5306 }
5307 
5308 #define VMX_INVALID_GUEST_STATE 0x80000021
5309 
5310 int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run)
5311 {
5312     X86CPU *cpu = X86_CPU(cs);
5313     uint64_t code;
5314     int ret;
5315     bool ctx_invalid;
5316     char str[256];
5317     KVMState *state;
5318 
5319     switch (run->exit_reason) {
5320     case KVM_EXIT_HLT:
5321         DPRINTF("handle_hlt\n");
5322         qemu_mutex_lock_iothread();
5323         ret = kvm_handle_halt(cpu);
5324         qemu_mutex_unlock_iothread();
5325         break;
5326     case KVM_EXIT_SET_TPR:
5327         ret = 0;
5328         break;
5329     case KVM_EXIT_TPR_ACCESS:
5330         qemu_mutex_lock_iothread();
5331         ret = kvm_handle_tpr_access(cpu);
5332         qemu_mutex_unlock_iothread();
5333         break;
5334     case KVM_EXIT_FAIL_ENTRY:
5335         code = run->fail_entry.hardware_entry_failure_reason;
5336         fprintf(stderr, "KVM: entry failed, hardware error 0x%" PRIx64 "\n",
5337                 code);
5338         if (host_supports_vmx() && code == VMX_INVALID_GUEST_STATE) {
5339             fprintf(stderr,
5340                     "\nIf you're running a guest on an Intel machine without "
5341                         "unrestricted mode\n"
5342                     "support, the failure can be most likely due to the guest "
5343                         "entering an invalid\n"
5344                     "state for Intel VT. For example, the guest maybe running "
5345                         "in big real mode\n"
5346                     "which is not supported on less recent Intel processors."
5347                         "\n\n");
5348         }
5349         ret = -1;
5350         break;
5351     case KVM_EXIT_EXCEPTION:
5352         fprintf(stderr, "KVM: exception %d exit (error code 0x%x)\n",
5353                 run->ex.exception, run->ex.error_code);
5354         ret = -1;
5355         break;
5356     case KVM_EXIT_DEBUG:
5357         DPRINTF("kvm_exit_debug\n");
5358         qemu_mutex_lock_iothread();
5359         ret = kvm_handle_debug(cpu, &run->debug.arch);
5360         qemu_mutex_unlock_iothread();
5361         break;
5362     case KVM_EXIT_HYPERV:
5363         ret = kvm_hv_handle_exit(cpu, &run->hyperv);
5364         break;
5365     case KVM_EXIT_IOAPIC_EOI:
5366         ioapic_eoi_broadcast(run->eoi.vector);
5367         ret = 0;
5368         break;
5369     case KVM_EXIT_X86_BUS_LOCK:
5370         /* already handled in kvm_arch_post_run */
5371         ret = 0;
5372         break;
5373     case KVM_EXIT_NOTIFY:
5374         ctx_invalid = !!(run->notify.flags & KVM_NOTIFY_CONTEXT_INVALID);
5375         state = KVM_STATE(current_accel());
5376         sprintf(str, "Encounter a notify exit with %svalid context in"
5377                      " guest. There can be possible misbehaves in guest."
5378                      " Please have a look.", ctx_invalid ? "in" : "");
5379         if (ctx_invalid ||
5380             state->notify_vmexit == NOTIFY_VMEXIT_OPTION_INTERNAL_ERROR) {
5381             warn_report("KVM internal error: %s", str);
5382             ret = -1;
5383         } else {
5384             warn_report_once("KVM: %s", str);
5385             ret = 0;
5386         }
5387         break;
5388     case KVM_EXIT_X86_RDMSR:
5389         /* We only enable MSR filtering, any other exit is bogus */
5390         assert(run->msr.reason == KVM_MSR_EXIT_REASON_FILTER);
5391         ret = kvm_handle_rdmsr(cpu, run);
5392         break;
5393     case KVM_EXIT_X86_WRMSR:
5394         /* We only enable MSR filtering, any other exit is bogus */
5395         assert(run->msr.reason == KVM_MSR_EXIT_REASON_FILTER);
5396         ret = kvm_handle_wrmsr(cpu, run);
5397         break;
5398     default:
5399         fprintf(stderr, "KVM: unknown exit reason %d\n", run->exit_reason);
5400         ret = -1;
5401         break;
5402     }
5403 
5404     return ret;
5405 }
5406 
5407 bool kvm_arch_stop_on_emulation_error(CPUState *cs)
5408 {
5409     X86CPU *cpu = X86_CPU(cs);
5410     CPUX86State *env = &cpu->env;
5411 
5412     kvm_cpu_synchronize_state(cs);
5413     return !(env->cr[0] & CR0_PE_MASK) ||
5414            ((env->segs[R_CS].selector  & 3) != 3);
5415 }
5416 
5417 void kvm_arch_init_irq_routing(KVMState *s)
5418 {
5419     /* We know at this point that we're using the in-kernel
5420      * irqchip, so we can use irqfds, and on x86 we know
5421      * we can use msi via irqfd and GSI routing.
5422      */
5423     kvm_msi_via_irqfd_allowed = true;
5424     kvm_gsi_routing_allowed = true;
5425 
5426     if (kvm_irqchip_is_split()) {
5427         KVMRouteChange c = kvm_irqchip_begin_route_changes(s);
5428         int i;
5429 
5430         /* If the ioapic is in QEMU and the lapics are in KVM, reserve
5431            MSI routes for signaling interrupts to the local apics. */
5432         for (i = 0; i < IOAPIC_NUM_PINS; i++) {
5433             if (kvm_irqchip_add_msi_route(&c, 0, NULL) < 0) {
5434                 error_report("Could not enable split IRQ mode.");
5435                 exit(1);
5436             }
5437         }
5438         kvm_irqchip_commit_route_changes(&c);
5439     }
5440 }
5441 
5442 int kvm_arch_irqchip_create(KVMState *s)
5443 {
5444     int ret;
5445     if (kvm_kernel_irqchip_split()) {
5446         ret = kvm_vm_enable_cap(s, KVM_CAP_SPLIT_IRQCHIP, 0, 24);
5447         if (ret) {
5448             error_report("Could not enable split irqchip mode: %s",
5449                          strerror(-ret));
5450             exit(1);
5451         } else {
5452             DPRINTF("Enabled KVM_CAP_SPLIT_IRQCHIP\n");
5453             kvm_split_irqchip = true;
5454             return 1;
5455         }
5456     } else {
5457         return 0;
5458     }
5459 }
5460 
5461 uint64_t kvm_swizzle_msi_ext_dest_id(uint64_t address)
5462 {
5463     CPUX86State *env;
5464     uint64_t ext_id;
5465 
5466     if (!first_cpu) {
5467         return address;
5468     }
5469     env = &X86_CPU(first_cpu)->env;
5470     if (!(env->features[FEAT_KVM] & (1 << KVM_FEATURE_MSI_EXT_DEST_ID))) {
5471         return address;
5472     }
5473 
5474     /*
5475      * If the remappable format bit is set, or the upper bits are
5476      * already set in address_hi, or the low extended bits aren't
5477      * there anyway, do nothing.
5478      */
5479     ext_id = address & (0xff << MSI_ADDR_DEST_IDX_SHIFT);
5480     if (!ext_id || (ext_id & (1 << MSI_ADDR_DEST_IDX_SHIFT)) || (address >> 32)) {
5481         return address;
5482     }
5483 
5484     address &= ~ext_id;
5485     address |= ext_id << 35;
5486     return address;
5487 }
5488 
5489 int kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry *route,
5490                              uint64_t address, uint32_t data, PCIDevice *dev)
5491 {
5492     X86IOMMUState *iommu = x86_iommu_get_default();
5493 
5494     if (iommu) {
5495         X86IOMMUClass *class = X86_IOMMU_DEVICE_GET_CLASS(iommu);
5496 
5497         if (class->int_remap) {
5498             int ret;
5499             MSIMessage src, dst;
5500 
5501             src.address = route->u.msi.address_hi;
5502             src.address <<= VTD_MSI_ADDR_HI_SHIFT;
5503             src.address |= route->u.msi.address_lo;
5504             src.data = route->u.msi.data;
5505 
5506             ret = class->int_remap(iommu, &src, &dst, dev ?     \
5507                                    pci_requester_id(dev) :      \
5508                                    X86_IOMMU_SID_INVALID);
5509             if (ret) {
5510                 trace_kvm_x86_fixup_msi_error(route->gsi);
5511                 return 1;
5512             }
5513 
5514             /*
5515              * Handled untranslated compatibilty format interrupt with
5516              * extended destination ID in the low bits 11-5. */
5517             dst.address = kvm_swizzle_msi_ext_dest_id(dst.address);
5518 
5519             route->u.msi.address_hi = dst.address >> VTD_MSI_ADDR_HI_SHIFT;
5520             route->u.msi.address_lo = dst.address & VTD_MSI_ADDR_LO_MASK;
5521             route->u.msi.data = dst.data;
5522             return 0;
5523         }
5524     }
5525 
5526     address = kvm_swizzle_msi_ext_dest_id(address);
5527     route->u.msi.address_hi = address >> VTD_MSI_ADDR_HI_SHIFT;
5528     route->u.msi.address_lo = address & VTD_MSI_ADDR_LO_MASK;
5529     return 0;
5530 }
5531 
5532 typedef struct MSIRouteEntry MSIRouteEntry;
5533 
5534 struct MSIRouteEntry {
5535     PCIDevice *dev;             /* Device pointer */
5536     int vector;                 /* MSI/MSIX vector index */
5537     int virq;                   /* Virtual IRQ index */
5538     QLIST_ENTRY(MSIRouteEntry) list;
5539 };
5540 
5541 /* List of used GSI routes */
5542 static QLIST_HEAD(, MSIRouteEntry) msi_route_list = \
5543     QLIST_HEAD_INITIALIZER(msi_route_list);
5544 
5545 static void kvm_update_msi_routes_all(void *private, bool global,
5546                                       uint32_t index, uint32_t mask)
5547 {
5548     int cnt = 0, vector;
5549     MSIRouteEntry *entry;
5550     MSIMessage msg;
5551     PCIDevice *dev;
5552 
5553     /* TODO: explicit route update */
5554     QLIST_FOREACH(entry, &msi_route_list, list) {
5555         cnt++;
5556         vector = entry->vector;
5557         dev = entry->dev;
5558         if (msix_enabled(dev) && !msix_is_masked(dev, vector)) {
5559             msg = msix_get_message(dev, vector);
5560         } else if (msi_enabled(dev) && !msi_is_masked(dev, vector)) {
5561             msg = msi_get_message(dev, vector);
5562         } else {
5563             /*
5564              * Either MSI/MSIX is disabled for the device, or the
5565              * specific message was masked out.  Skip this one.
5566              */
5567             continue;
5568         }
5569         kvm_irqchip_update_msi_route(kvm_state, entry->virq, msg, dev);
5570     }
5571     kvm_irqchip_commit_routes(kvm_state);
5572     trace_kvm_x86_update_msi_routes(cnt);
5573 }
5574 
5575 int kvm_arch_add_msi_route_post(struct kvm_irq_routing_entry *route,
5576                                 int vector, PCIDevice *dev)
5577 {
5578     static bool notify_list_inited = false;
5579     MSIRouteEntry *entry;
5580 
5581     if (!dev) {
5582         /* These are (possibly) IOAPIC routes only used for split
5583          * kernel irqchip mode, while what we are housekeeping are
5584          * PCI devices only. */
5585         return 0;
5586     }
5587 
5588     entry = g_new0(MSIRouteEntry, 1);
5589     entry->dev = dev;
5590     entry->vector = vector;
5591     entry->virq = route->gsi;
5592     QLIST_INSERT_HEAD(&msi_route_list, entry, list);
5593 
5594     trace_kvm_x86_add_msi_route(route->gsi);
5595 
5596     if (!notify_list_inited) {
5597         /* For the first time we do add route, add ourselves into
5598          * IOMMU's IEC notify list if needed. */
5599         X86IOMMUState *iommu = x86_iommu_get_default();
5600         if (iommu) {
5601             x86_iommu_iec_register_notifier(iommu,
5602                                             kvm_update_msi_routes_all,
5603                                             NULL);
5604         }
5605         notify_list_inited = true;
5606     }
5607     return 0;
5608 }
5609 
5610 int kvm_arch_release_virq_post(int virq)
5611 {
5612     MSIRouteEntry *entry, *next;
5613     QLIST_FOREACH_SAFE(entry, &msi_route_list, list, next) {
5614         if (entry->virq == virq) {
5615             trace_kvm_x86_remove_msi_route(virq);
5616             QLIST_REMOVE(entry, list);
5617             g_free(entry);
5618             break;
5619         }
5620     }
5621     return 0;
5622 }
5623 
5624 int kvm_arch_msi_data_to_gsi(uint32_t data)
5625 {
5626     abort();
5627 }
5628 
5629 bool kvm_has_waitpkg(void)
5630 {
5631     return has_msr_umwait;
5632 }
5633 
5634 bool kvm_arch_cpu_check_are_resettable(void)
5635 {
5636     return !sev_es_enabled();
5637 }
5638 
5639 #define ARCH_REQ_XCOMP_GUEST_PERM       0x1025
5640 
5641 void kvm_request_xsave_components(X86CPU *cpu, uint64_t mask)
5642 {
5643     KVMState *s = kvm_state;
5644     uint64_t supported;
5645 
5646     mask &= XSTATE_DYNAMIC_MASK;
5647     if (!mask) {
5648         return;
5649     }
5650     /*
5651      * Just ignore bits that are not in CPUID[EAX=0xD,ECX=0].
5652      * ARCH_REQ_XCOMP_GUEST_PERM would fail, and QEMU has warned
5653      * about them already because they are not supported features.
5654      */
5655     supported = kvm_arch_get_supported_cpuid(s, 0xd, 0, R_EAX);
5656     supported |= (uint64_t)kvm_arch_get_supported_cpuid(s, 0xd, 0, R_EDX) << 32;
5657     mask &= supported;
5658 
5659     while (mask) {
5660         int bit = ctz64(mask);
5661         int rc = syscall(SYS_arch_prctl, ARCH_REQ_XCOMP_GUEST_PERM, bit);
5662         if (rc) {
5663             /*
5664              * Older kernel version (<5.17) do not support
5665              * ARCH_REQ_XCOMP_GUEST_PERM, but also do not return
5666              * any dynamic feature from kvm_arch_get_supported_cpuid.
5667              */
5668             warn_report("prctl(ARCH_REQ_XCOMP_GUEST_PERM) failure "
5669                         "for feature bit %d", bit);
5670         }
5671         mask &= ~BIT_ULL(bit);
5672     }
5673 }
5674 
5675 static int kvm_arch_get_notify_vmexit(Object *obj, Error **errp)
5676 {
5677     KVMState *s = KVM_STATE(obj);
5678     return s->notify_vmexit;
5679 }
5680 
5681 static void kvm_arch_set_notify_vmexit(Object *obj, int value, Error **errp)
5682 {
5683     KVMState *s = KVM_STATE(obj);
5684 
5685     if (s->fd != -1) {
5686         error_setg(errp, "Cannot set properties after the accelerator has been initialized");
5687         return;
5688     }
5689 
5690     s->notify_vmexit = value;
5691 }
5692 
5693 static void kvm_arch_get_notify_window(Object *obj, Visitor *v,
5694                                        const char *name, void *opaque,
5695                                        Error **errp)
5696 {
5697     KVMState *s = KVM_STATE(obj);
5698     uint32_t value = s->notify_window;
5699 
5700     visit_type_uint32(v, name, &value, errp);
5701 }
5702 
5703 static void kvm_arch_set_notify_window(Object *obj, Visitor *v,
5704                                        const char *name, void *opaque,
5705                                        Error **errp)
5706 {
5707     KVMState *s = KVM_STATE(obj);
5708     uint32_t value;
5709 
5710     if (s->fd != -1) {
5711         error_setg(errp, "Cannot set properties after the accelerator has been initialized");
5712         return;
5713     }
5714 
5715     if (!visit_type_uint32(v, name, &value, errp)) {
5716         return;
5717     }
5718 
5719     s->notify_window = value;
5720 }
5721 
5722 void kvm_arch_accel_class_init(ObjectClass *oc)
5723 {
5724     object_class_property_add_enum(oc, "notify-vmexit", "NotifyVMexitOption",
5725                                    &NotifyVmexitOption_lookup,
5726                                    kvm_arch_get_notify_vmexit,
5727                                    kvm_arch_set_notify_vmexit);
5728     object_class_property_set_description(oc, "notify-vmexit",
5729                                           "Enable notify VM exit");
5730 
5731     object_class_property_add(oc, "notify-window", "uint32",
5732                               kvm_arch_get_notify_window,
5733                               kvm_arch_set_notify_window,
5734                               NULL, NULL);
5735     object_class_property_set_description(oc, "notify-window",
5736                                           "Clock cycles without an event window "
5737                                           "after which a notification VM exit occurs");
5738 }
5739 
5740 void kvm_set_max_apic_id(uint32_t max_apic_id)
5741 {
5742     kvm_vm_enable_cap(kvm_state, KVM_CAP_MAX_VCPU_ID, 0, max_apic_id);
5743 }
5744