xref: /openbmc/qemu/target/i386/machine.c (revision 650d103d3ea959212f826acb9d3fe80cf30e347b)
1 #include "qemu/osdep.h"
2 #include "cpu.h"
3 #include "exec/exec-all.h"
4 #include "hw/boards.h"
5 #include "hw/i386/pc.h"
6 #include "hw/isa/isa.h"
7 #include "migration/cpu.h"
8 #include "hyperv.h"
9 #include "kvm_i386.h"
10 
11 #include "sysemu/kvm.h"
12 #include "sysemu/tcg.h"
13 
14 #include "qemu/error-report.h"
15 
16 static const VMStateDescription vmstate_segment = {
17     .name = "segment",
18     .version_id = 1,
19     .minimum_version_id = 1,
20     .fields = (VMStateField[]) {
21         VMSTATE_UINT32(selector, SegmentCache),
22         VMSTATE_UINTTL(base, SegmentCache),
23         VMSTATE_UINT32(limit, SegmentCache),
24         VMSTATE_UINT32(flags, SegmentCache),
25         VMSTATE_END_OF_LIST()
26     }
27 };
28 
29 #define VMSTATE_SEGMENT(_field, _state) {                            \
30     .name       = (stringify(_field)),                               \
31     .size       = sizeof(SegmentCache),                              \
32     .vmsd       = &vmstate_segment,                                  \
33     .flags      = VMS_STRUCT,                                        \
34     .offset     = offsetof(_state, _field)                           \
35             + type_check(SegmentCache,typeof_field(_state, _field))  \
36 }
37 
38 #define VMSTATE_SEGMENT_ARRAY(_field, _state, _n)                    \
39     VMSTATE_STRUCT_ARRAY(_field, _state, _n, 0, vmstate_segment, SegmentCache)
40 
41 static const VMStateDescription vmstate_xmm_reg = {
42     .name = "xmm_reg",
43     .version_id = 1,
44     .minimum_version_id = 1,
45     .fields = (VMStateField[]) {
46         VMSTATE_UINT64(ZMM_Q(0), ZMMReg),
47         VMSTATE_UINT64(ZMM_Q(1), ZMMReg),
48         VMSTATE_END_OF_LIST()
49     }
50 };
51 
52 #define VMSTATE_XMM_REGS(_field, _state, _start)                         \
53     VMSTATE_STRUCT_SUB_ARRAY(_field, _state, _start, CPU_NB_REGS, 0,     \
54                              vmstate_xmm_reg, ZMMReg)
55 
56 /* YMMH format is the same as XMM, but for bits 128-255 */
57 static const VMStateDescription vmstate_ymmh_reg = {
58     .name = "ymmh_reg",
59     .version_id = 1,
60     .minimum_version_id = 1,
61     .fields = (VMStateField[]) {
62         VMSTATE_UINT64(ZMM_Q(2), ZMMReg),
63         VMSTATE_UINT64(ZMM_Q(3), ZMMReg),
64         VMSTATE_END_OF_LIST()
65     }
66 };
67 
68 #define VMSTATE_YMMH_REGS_VARS(_field, _state, _start, _v)               \
69     VMSTATE_STRUCT_SUB_ARRAY(_field, _state, _start, CPU_NB_REGS, _v,    \
70                              vmstate_ymmh_reg, ZMMReg)
71 
72 static const VMStateDescription vmstate_zmmh_reg = {
73     .name = "zmmh_reg",
74     .version_id = 1,
75     .minimum_version_id = 1,
76     .fields = (VMStateField[]) {
77         VMSTATE_UINT64(ZMM_Q(4), ZMMReg),
78         VMSTATE_UINT64(ZMM_Q(5), ZMMReg),
79         VMSTATE_UINT64(ZMM_Q(6), ZMMReg),
80         VMSTATE_UINT64(ZMM_Q(7), ZMMReg),
81         VMSTATE_END_OF_LIST()
82     }
83 };
84 
85 #define VMSTATE_ZMMH_REGS_VARS(_field, _state, _start)                   \
86     VMSTATE_STRUCT_SUB_ARRAY(_field, _state, _start, CPU_NB_REGS, 0,     \
87                              vmstate_zmmh_reg, ZMMReg)
88 
89 #ifdef TARGET_X86_64
90 static const VMStateDescription vmstate_hi16_zmm_reg = {
91     .name = "hi16_zmm_reg",
92     .version_id = 1,
93     .minimum_version_id = 1,
94     .fields = (VMStateField[]) {
95         VMSTATE_UINT64(ZMM_Q(0), ZMMReg),
96         VMSTATE_UINT64(ZMM_Q(1), ZMMReg),
97         VMSTATE_UINT64(ZMM_Q(2), ZMMReg),
98         VMSTATE_UINT64(ZMM_Q(3), ZMMReg),
99         VMSTATE_UINT64(ZMM_Q(4), ZMMReg),
100         VMSTATE_UINT64(ZMM_Q(5), ZMMReg),
101         VMSTATE_UINT64(ZMM_Q(6), ZMMReg),
102         VMSTATE_UINT64(ZMM_Q(7), ZMMReg),
103         VMSTATE_END_OF_LIST()
104     }
105 };
106 
107 #define VMSTATE_Hi16_ZMM_REGS_VARS(_field, _state, _start)               \
108     VMSTATE_STRUCT_SUB_ARRAY(_field, _state, _start, CPU_NB_REGS, 0,     \
109                              vmstate_hi16_zmm_reg, ZMMReg)
110 #endif
111 
112 static const VMStateDescription vmstate_bnd_regs = {
113     .name = "bnd_regs",
114     .version_id = 1,
115     .minimum_version_id = 1,
116     .fields = (VMStateField[]) {
117         VMSTATE_UINT64(lb, BNDReg),
118         VMSTATE_UINT64(ub, BNDReg),
119         VMSTATE_END_OF_LIST()
120     }
121 };
122 
123 #define VMSTATE_BND_REGS(_field, _state, _n)          \
124     VMSTATE_STRUCT_ARRAY(_field, _state, _n, 0, vmstate_bnd_regs, BNDReg)
125 
126 static const VMStateDescription vmstate_mtrr_var = {
127     .name = "mtrr_var",
128     .version_id = 1,
129     .minimum_version_id = 1,
130     .fields = (VMStateField[]) {
131         VMSTATE_UINT64(base, MTRRVar),
132         VMSTATE_UINT64(mask, MTRRVar),
133         VMSTATE_END_OF_LIST()
134     }
135 };
136 
137 #define VMSTATE_MTRR_VARS(_field, _state, _n, _v)                    \
138     VMSTATE_STRUCT_ARRAY(_field, _state, _n, _v, vmstate_mtrr_var, MTRRVar)
139 
140 typedef struct x86_FPReg_tmp {
141     FPReg *parent;
142     uint64_t tmp_mant;
143     uint16_t tmp_exp;
144 } x86_FPReg_tmp;
145 
146 static void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, floatx80 f)
147 {
148     CPU_LDoubleU temp;
149 
150     temp.d = f;
151     *pmant = temp.l.lower;
152     *pexp = temp.l.upper;
153 }
154 
155 static floatx80 cpu_set_fp80(uint64_t mant, uint16_t upper)
156 {
157     CPU_LDoubleU temp;
158 
159     temp.l.upper = upper;
160     temp.l.lower = mant;
161     return temp.d;
162 }
163 
164 static int fpreg_pre_save(void *opaque)
165 {
166     x86_FPReg_tmp *tmp = opaque;
167 
168     /* we save the real CPU data (in case of MMX usage only 'mant'
169        contains the MMX register */
170     cpu_get_fp80(&tmp->tmp_mant, &tmp->tmp_exp, tmp->parent->d);
171 
172     return 0;
173 }
174 
175 static int fpreg_post_load(void *opaque, int version)
176 {
177     x86_FPReg_tmp *tmp = opaque;
178 
179     tmp->parent->d = cpu_set_fp80(tmp->tmp_mant, tmp->tmp_exp);
180     return 0;
181 }
182 
183 static const VMStateDescription vmstate_fpreg_tmp = {
184     .name = "fpreg_tmp",
185     .post_load = fpreg_post_load,
186     .pre_save  = fpreg_pre_save,
187     .fields = (VMStateField[]) {
188         VMSTATE_UINT64(tmp_mant, x86_FPReg_tmp),
189         VMSTATE_UINT16(tmp_exp, x86_FPReg_tmp),
190         VMSTATE_END_OF_LIST()
191     }
192 };
193 
194 static const VMStateDescription vmstate_fpreg = {
195     .name = "fpreg",
196     .fields = (VMStateField[]) {
197         VMSTATE_WITH_TMP(FPReg, x86_FPReg_tmp, vmstate_fpreg_tmp),
198         VMSTATE_END_OF_LIST()
199     }
200 };
201 
202 static int cpu_pre_save(void *opaque)
203 {
204     X86CPU *cpu = opaque;
205     CPUX86State *env = &cpu->env;
206     int i;
207 
208     /* FPU */
209     env->fpus_vmstate = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
210     env->fptag_vmstate = 0;
211     for(i = 0; i < 8; i++) {
212         env->fptag_vmstate |= ((!env->fptags[i]) << i);
213     }
214 
215     env->fpregs_format_vmstate = 0;
216 
217     /*
218      * Real mode guest segments register DPL should be zero.
219      * Older KVM version were setting it wrongly.
220      * Fixing it will allow live migration to host with unrestricted guest
221      * support (otherwise the migration will fail with invalid guest state
222      * error).
223      */
224     if (!(env->cr[0] & CR0_PE_MASK) &&
225         (env->segs[R_CS].flags >> DESC_DPL_SHIFT & 3) != 0) {
226         env->segs[R_CS].flags &= ~(env->segs[R_CS].flags & DESC_DPL_MASK);
227         env->segs[R_DS].flags &= ~(env->segs[R_DS].flags & DESC_DPL_MASK);
228         env->segs[R_ES].flags &= ~(env->segs[R_ES].flags & DESC_DPL_MASK);
229         env->segs[R_FS].flags &= ~(env->segs[R_FS].flags & DESC_DPL_MASK);
230         env->segs[R_GS].flags &= ~(env->segs[R_GS].flags & DESC_DPL_MASK);
231         env->segs[R_SS].flags &= ~(env->segs[R_SS].flags & DESC_DPL_MASK);
232     }
233 
234 #ifdef CONFIG_KVM
235     /*
236      * In case vCPU may have enabled VMX, we need to make sure kernel have
237      * required capabilities in order to perform migration correctly:
238      *
239      * 1) We must be able to extract vCPU nested-state from KVM.
240      *
241      * 2) In case vCPU is running in guest-mode and it has a pending exception,
242      * we must be able to determine if it's in a pending or injected state.
243      * Note that in case KVM don't have required capability to do so,
244      * a pending/injected exception will always appear as an
245      * injected exception.
246      */
247     if (kvm_enabled() && cpu_vmx_maybe_enabled(env) &&
248         (!env->nested_state ||
249          (!kvm_has_exception_payload() && (env->hflags & HF_GUEST_MASK) &&
250           env->exception_injected))) {
251         error_report("Guest maybe enabled nested virtualization but kernel "
252                 "does not support required capabilities to save vCPU "
253                 "nested state");
254         return -EINVAL;
255     }
256 #endif
257 
258     /*
259      * When vCPU is running L2 and exception is still pending,
260      * it can potentially be intercepted by L1 hypervisor.
261      * In contrast to an injected exception which cannot be
262      * intercepted anymore.
263      *
264      * Furthermore, when a L2 exception is intercepted by L1
265      * hypervisor, it's exception payload (CR2/DR6 on #PF/#DB)
266      * should not be set yet in the respective vCPU register.
267      * Thus, in case an exception is pending, it is
268      * important to save the exception payload seperately.
269      *
270      * Therefore, if an exception is not in a pending state
271      * or vCPU is not in guest-mode, it is not important to
272      * distinguish between a pending and injected exception
273      * and we don't need to store seperately the exception payload.
274      *
275      * In order to preserve better backwards-compatabile migration,
276      * convert a pending exception to an injected exception in
277      * case it is not important to distingiush between them
278      * as described above.
279      */
280     if (env->exception_pending && !(env->hflags & HF_GUEST_MASK)) {
281         env->exception_pending = 0;
282         env->exception_injected = 1;
283 
284         if (env->exception_has_payload) {
285             if (env->exception_nr == EXCP01_DB) {
286                 env->dr[6] = env->exception_payload;
287             } else if (env->exception_nr == EXCP0E_PAGE) {
288                 env->cr[2] = env->exception_payload;
289             }
290         }
291     }
292 
293     return 0;
294 }
295 
296 static int cpu_post_load(void *opaque, int version_id)
297 {
298     X86CPU *cpu = opaque;
299     CPUState *cs = CPU(cpu);
300     CPUX86State *env = &cpu->env;
301     int i;
302 
303     if (env->tsc_khz && env->user_tsc_khz &&
304         env->tsc_khz != env->user_tsc_khz) {
305         error_report("Mismatch between user-specified TSC frequency and "
306                      "migrated TSC frequency");
307         return -EINVAL;
308     }
309 
310     if (env->fpregs_format_vmstate) {
311         error_report("Unsupported old non-softfloat CPU state");
312         return -EINVAL;
313     }
314     /*
315      * Real mode guest segments register DPL should be zero.
316      * Older KVM version were setting it wrongly.
317      * Fixing it will allow live migration from such host that don't have
318      * restricted guest support to a host with unrestricted guest support
319      * (otherwise the migration will fail with invalid guest state
320      * error).
321      */
322     if (!(env->cr[0] & CR0_PE_MASK) &&
323         (env->segs[R_CS].flags >> DESC_DPL_SHIFT & 3) != 0) {
324         env->segs[R_CS].flags &= ~(env->segs[R_CS].flags & DESC_DPL_MASK);
325         env->segs[R_DS].flags &= ~(env->segs[R_DS].flags & DESC_DPL_MASK);
326         env->segs[R_ES].flags &= ~(env->segs[R_ES].flags & DESC_DPL_MASK);
327         env->segs[R_FS].flags &= ~(env->segs[R_FS].flags & DESC_DPL_MASK);
328         env->segs[R_GS].flags &= ~(env->segs[R_GS].flags & DESC_DPL_MASK);
329         env->segs[R_SS].flags &= ~(env->segs[R_SS].flags & DESC_DPL_MASK);
330     }
331 
332     /* Older versions of QEMU incorrectly used CS.DPL as the CPL when
333      * running under KVM.  This is wrong for conforming code segments.
334      * Luckily, in our implementation the CPL field of hflags is redundant
335      * and we can get the right value from the SS descriptor privilege level.
336      */
337     env->hflags &= ~HF_CPL_MASK;
338     env->hflags |= (env->segs[R_SS].flags >> DESC_DPL_SHIFT) & HF_CPL_MASK;
339 
340 #ifdef CONFIG_KVM
341     if ((env->hflags & HF_GUEST_MASK) &&
342         (!env->nested_state ||
343         !(env->nested_state->flags & KVM_STATE_NESTED_GUEST_MODE))) {
344         error_report("vCPU set in guest-mode inconsistent with "
345                      "migrated kernel nested state");
346         return -EINVAL;
347     }
348 #endif
349 
350     /*
351      * There are cases that we can get valid exception_nr with both
352      * exception_pending and exception_injected being cleared.
353      * This can happen in one of the following scenarios:
354      * 1) Source is older QEMU without KVM_CAP_EXCEPTION_PAYLOAD support.
355      * 2) Source is running on kernel without KVM_CAP_EXCEPTION_PAYLOAD support.
356      * 3) "cpu/exception_info" subsection not sent because there is no exception
357      *    pending or guest wasn't running L2 (See comment in cpu_pre_save()).
358      *
359      * In those cases, we can just deduce that a valid exception_nr means
360      * we can treat the exception as already injected.
361      */
362     if ((env->exception_nr != -1) &&
363         !env->exception_pending && !env->exception_injected) {
364         env->exception_injected = 1;
365     }
366 
367     env->fpstt = (env->fpus_vmstate >> 11) & 7;
368     env->fpus = env->fpus_vmstate & ~0x3800;
369     env->fptag_vmstate ^= 0xff;
370     for(i = 0; i < 8; i++) {
371         env->fptags[i] = (env->fptag_vmstate >> i) & 1;
372     }
373     if (tcg_enabled()) {
374         target_ulong dr7;
375         update_fp_status(env);
376         update_mxcsr_status(env);
377 
378         cpu_breakpoint_remove_all(cs, BP_CPU);
379         cpu_watchpoint_remove_all(cs, BP_CPU);
380 
381         /* Indicate all breakpoints disabled, as they are, then
382            let the helper re-enable them.  */
383         dr7 = env->dr[7];
384         env->dr[7] = dr7 & ~(DR7_GLOBAL_BP_MASK | DR7_LOCAL_BP_MASK);
385         cpu_x86_update_dr7(env, dr7);
386     }
387     tlb_flush(cs);
388     return 0;
389 }
390 
391 static bool async_pf_msr_needed(void *opaque)
392 {
393     X86CPU *cpu = opaque;
394 
395     return cpu->env.async_pf_en_msr != 0;
396 }
397 
398 static bool pv_eoi_msr_needed(void *opaque)
399 {
400     X86CPU *cpu = opaque;
401 
402     return cpu->env.pv_eoi_en_msr != 0;
403 }
404 
405 static bool steal_time_msr_needed(void *opaque)
406 {
407     X86CPU *cpu = opaque;
408 
409     return cpu->env.steal_time_msr != 0;
410 }
411 
412 static bool exception_info_needed(void *opaque)
413 {
414     X86CPU *cpu = opaque;
415     CPUX86State *env = &cpu->env;
416 
417     /*
418      * It is important to save exception-info only in case
419      * we need to distingiush between a pending and injected
420      * exception. Which is only required in case there is a
421      * pending exception and vCPU is running L2.
422      * For more info, refer to comment in cpu_pre_save().
423      */
424     return env->exception_pending && (env->hflags & HF_GUEST_MASK);
425 }
426 
427 static const VMStateDescription vmstate_exception_info = {
428     .name = "cpu/exception_info",
429     .version_id = 1,
430     .minimum_version_id = 1,
431     .needed = exception_info_needed,
432     .fields = (VMStateField[]) {
433         VMSTATE_UINT8(env.exception_pending, X86CPU),
434         VMSTATE_UINT8(env.exception_injected, X86CPU),
435         VMSTATE_UINT8(env.exception_has_payload, X86CPU),
436         VMSTATE_UINT64(env.exception_payload, X86CPU),
437         VMSTATE_END_OF_LIST()
438     }
439 };
440 
441 static const VMStateDescription vmstate_steal_time_msr = {
442     .name = "cpu/steal_time_msr",
443     .version_id = 1,
444     .minimum_version_id = 1,
445     .needed = steal_time_msr_needed,
446     .fields = (VMStateField[]) {
447         VMSTATE_UINT64(env.steal_time_msr, X86CPU),
448         VMSTATE_END_OF_LIST()
449     }
450 };
451 
452 static const VMStateDescription vmstate_async_pf_msr = {
453     .name = "cpu/async_pf_msr",
454     .version_id = 1,
455     .minimum_version_id = 1,
456     .needed = async_pf_msr_needed,
457     .fields = (VMStateField[]) {
458         VMSTATE_UINT64(env.async_pf_en_msr, X86CPU),
459         VMSTATE_END_OF_LIST()
460     }
461 };
462 
463 static const VMStateDescription vmstate_pv_eoi_msr = {
464     .name = "cpu/async_pv_eoi_msr",
465     .version_id = 1,
466     .minimum_version_id = 1,
467     .needed = pv_eoi_msr_needed,
468     .fields = (VMStateField[]) {
469         VMSTATE_UINT64(env.pv_eoi_en_msr, X86CPU),
470         VMSTATE_END_OF_LIST()
471     }
472 };
473 
474 static bool fpop_ip_dp_needed(void *opaque)
475 {
476     X86CPU *cpu = opaque;
477     CPUX86State *env = &cpu->env;
478 
479     return env->fpop != 0 || env->fpip != 0 || env->fpdp != 0;
480 }
481 
482 static const VMStateDescription vmstate_fpop_ip_dp = {
483     .name = "cpu/fpop_ip_dp",
484     .version_id = 1,
485     .minimum_version_id = 1,
486     .needed = fpop_ip_dp_needed,
487     .fields = (VMStateField[]) {
488         VMSTATE_UINT16(env.fpop, X86CPU),
489         VMSTATE_UINT64(env.fpip, X86CPU),
490         VMSTATE_UINT64(env.fpdp, X86CPU),
491         VMSTATE_END_OF_LIST()
492     }
493 };
494 
495 static bool tsc_adjust_needed(void *opaque)
496 {
497     X86CPU *cpu = opaque;
498     CPUX86State *env = &cpu->env;
499 
500     return env->tsc_adjust != 0;
501 }
502 
503 static const VMStateDescription vmstate_msr_tsc_adjust = {
504     .name = "cpu/msr_tsc_adjust",
505     .version_id = 1,
506     .minimum_version_id = 1,
507     .needed = tsc_adjust_needed,
508     .fields = (VMStateField[]) {
509         VMSTATE_UINT64(env.tsc_adjust, X86CPU),
510         VMSTATE_END_OF_LIST()
511     }
512 };
513 
514 static bool msr_smi_count_needed(void *opaque)
515 {
516     X86CPU *cpu = opaque;
517     CPUX86State *env = &cpu->env;
518 
519     return cpu->migrate_smi_count && env->msr_smi_count != 0;
520 }
521 
522 static const VMStateDescription vmstate_msr_smi_count = {
523     .name = "cpu/msr_smi_count",
524     .version_id = 1,
525     .minimum_version_id = 1,
526     .needed = msr_smi_count_needed,
527     .fields = (VMStateField[]) {
528         VMSTATE_UINT64(env.msr_smi_count, X86CPU),
529         VMSTATE_END_OF_LIST()
530     }
531 };
532 
533 static bool tscdeadline_needed(void *opaque)
534 {
535     X86CPU *cpu = opaque;
536     CPUX86State *env = &cpu->env;
537 
538     return env->tsc_deadline != 0;
539 }
540 
541 static const VMStateDescription vmstate_msr_tscdeadline = {
542     .name = "cpu/msr_tscdeadline",
543     .version_id = 1,
544     .minimum_version_id = 1,
545     .needed = tscdeadline_needed,
546     .fields = (VMStateField[]) {
547         VMSTATE_UINT64(env.tsc_deadline, X86CPU),
548         VMSTATE_END_OF_LIST()
549     }
550 };
551 
552 static bool misc_enable_needed(void *opaque)
553 {
554     X86CPU *cpu = opaque;
555     CPUX86State *env = &cpu->env;
556 
557     return env->msr_ia32_misc_enable != MSR_IA32_MISC_ENABLE_DEFAULT;
558 }
559 
560 static bool feature_control_needed(void *opaque)
561 {
562     X86CPU *cpu = opaque;
563     CPUX86State *env = &cpu->env;
564 
565     return env->msr_ia32_feature_control != 0;
566 }
567 
568 static const VMStateDescription vmstate_msr_ia32_misc_enable = {
569     .name = "cpu/msr_ia32_misc_enable",
570     .version_id = 1,
571     .minimum_version_id = 1,
572     .needed = misc_enable_needed,
573     .fields = (VMStateField[]) {
574         VMSTATE_UINT64(env.msr_ia32_misc_enable, X86CPU),
575         VMSTATE_END_OF_LIST()
576     }
577 };
578 
579 static const VMStateDescription vmstate_msr_ia32_feature_control = {
580     .name = "cpu/msr_ia32_feature_control",
581     .version_id = 1,
582     .minimum_version_id = 1,
583     .needed = feature_control_needed,
584     .fields = (VMStateField[]) {
585         VMSTATE_UINT64(env.msr_ia32_feature_control, X86CPU),
586         VMSTATE_END_OF_LIST()
587     }
588 };
589 
590 static bool pmu_enable_needed(void *opaque)
591 {
592     X86CPU *cpu = opaque;
593     CPUX86State *env = &cpu->env;
594     int i;
595 
596     if (env->msr_fixed_ctr_ctrl || env->msr_global_ctrl ||
597         env->msr_global_status || env->msr_global_ovf_ctrl) {
598         return true;
599     }
600     for (i = 0; i < MAX_FIXED_COUNTERS; i++) {
601         if (env->msr_fixed_counters[i]) {
602             return true;
603         }
604     }
605     for (i = 0; i < MAX_GP_COUNTERS; i++) {
606         if (env->msr_gp_counters[i] || env->msr_gp_evtsel[i]) {
607             return true;
608         }
609     }
610 
611     return false;
612 }
613 
614 static const VMStateDescription vmstate_msr_architectural_pmu = {
615     .name = "cpu/msr_architectural_pmu",
616     .version_id = 1,
617     .minimum_version_id = 1,
618     .needed = pmu_enable_needed,
619     .fields = (VMStateField[]) {
620         VMSTATE_UINT64(env.msr_fixed_ctr_ctrl, X86CPU),
621         VMSTATE_UINT64(env.msr_global_ctrl, X86CPU),
622         VMSTATE_UINT64(env.msr_global_status, X86CPU),
623         VMSTATE_UINT64(env.msr_global_ovf_ctrl, X86CPU),
624         VMSTATE_UINT64_ARRAY(env.msr_fixed_counters, X86CPU, MAX_FIXED_COUNTERS),
625         VMSTATE_UINT64_ARRAY(env.msr_gp_counters, X86CPU, MAX_GP_COUNTERS),
626         VMSTATE_UINT64_ARRAY(env.msr_gp_evtsel, X86CPU, MAX_GP_COUNTERS),
627         VMSTATE_END_OF_LIST()
628     }
629 };
630 
631 static bool mpx_needed(void *opaque)
632 {
633     X86CPU *cpu = opaque;
634     CPUX86State *env = &cpu->env;
635     unsigned int i;
636 
637     for (i = 0; i < 4; i++) {
638         if (env->bnd_regs[i].lb || env->bnd_regs[i].ub) {
639             return true;
640         }
641     }
642 
643     if (env->bndcs_regs.cfgu || env->bndcs_regs.sts) {
644         return true;
645     }
646 
647     return !!env->msr_bndcfgs;
648 }
649 
650 static const VMStateDescription vmstate_mpx = {
651     .name = "cpu/mpx",
652     .version_id = 1,
653     .minimum_version_id = 1,
654     .needed = mpx_needed,
655     .fields = (VMStateField[]) {
656         VMSTATE_BND_REGS(env.bnd_regs, X86CPU, 4),
657         VMSTATE_UINT64(env.bndcs_regs.cfgu, X86CPU),
658         VMSTATE_UINT64(env.bndcs_regs.sts, X86CPU),
659         VMSTATE_UINT64(env.msr_bndcfgs, X86CPU),
660         VMSTATE_END_OF_LIST()
661     }
662 };
663 
664 static bool hyperv_hypercall_enable_needed(void *opaque)
665 {
666     X86CPU *cpu = opaque;
667     CPUX86State *env = &cpu->env;
668 
669     return env->msr_hv_hypercall != 0 || env->msr_hv_guest_os_id != 0;
670 }
671 
672 static const VMStateDescription vmstate_msr_hypercall_hypercall = {
673     .name = "cpu/msr_hyperv_hypercall",
674     .version_id = 1,
675     .minimum_version_id = 1,
676     .needed = hyperv_hypercall_enable_needed,
677     .fields = (VMStateField[]) {
678         VMSTATE_UINT64(env.msr_hv_guest_os_id, X86CPU),
679         VMSTATE_UINT64(env.msr_hv_hypercall, X86CPU),
680         VMSTATE_END_OF_LIST()
681     }
682 };
683 
684 static bool hyperv_vapic_enable_needed(void *opaque)
685 {
686     X86CPU *cpu = opaque;
687     CPUX86State *env = &cpu->env;
688 
689     return env->msr_hv_vapic != 0;
690 }
691 
692 static const VMStateDescription vmstate_msr_hyperv_vapic = {
693     .name = "cpu/msr_hyperv_vapic",
694     .version_id = 1,
695     .minimum_version_id = 1,
696     .needed = hyperv_vapic_enable_needed,
697     .fields = (VMStateField[]) {
698         VMSTATE_UINT64(env.msr_hv_vapic, X86CPU),
699         VMSTATE_END_OF_LIST()
700     }
701 };
702 
703 static bool hyperv_time_enable_needed(void *opaque)
704 {
705     X86CPU *cpu = opaque;
706     CPUX86State *env = &cpu->env;
707 
708     return env->msr_hv_tsc != 0;
709 }
710 
711 static const VMStateDescription vmstate_msr_hyperv_time = {
712     .name = "cpu/msr_hyperv_time",
713     .version_id = 1,
714     .minimum_version_id = 1,
715     .needed = hyperv_time_enable_needed,
716     .fields = (VMStateField[]) {
717         VMSTATE_UINT64(env.msr_hv_tsc, X86CPU),
718         VMSTATE_END_OF_LIST()
719     }
720 };
721 
722 static bool hyperv_crash_enable_needed(void *opaque)
723 {
724     X86CPU *cpu = opaque;
725     CPUX86State *env = &cpu->env;
726     int i;
727 
728     for (i = 0; i < HV_CRASH_PARAMS; i++) {
729         if (env->msr_hv_crash_params[i]) {
730             return true;
731         }
732     }
733     return false;
734 }
735 
736 static const VMStateDescription vmstate_msr_hyperv_crash = {
737     .name = "cpu/msr_hyperv_crash",
738     .version_id = 1,
739     .minimum_version_id = 1,
740     .needed = hyperv_crash_enable_needed,
741     .fields = (VMStateField[]) {
742         VMSTATE_UINT64_ARRAY(env.msr_hv_crash_params, X86CPU, HV_CRASH_PARAMS),
743         VMSTATE_END_OF_LIST()
744     }
745 };
746 
747 static bool hyperv_runtime_enable_needed(void *opaque)
748 {
749     X86CPU *cpu = opaque;
750     CPUX86State *env = &cpu->env;
751 
752     if (!hyperv_feat_enabled(cpu, HYPERV_FEAT_RUNTIME)) {
753         return false;
754     }
755 
756     return env->msr_hv_runtime != 0;
757 }
758 
759 static const VMStateDescription vmstate_msr_hyperv_runtime = {
760     .name = "cpu/msr_hyperv_runtime",
761     .version_id = 1,
762     .minimum_version_id = 1,
763     .needed = hyperv_runtime_enable_needed,
764     .fields = (VMStateField[]) {
765         VMSTATE_UINT64(env.msr_hv_runtime, X86CPU),
766         VMSTATE_END_OF_LIST()
767     }
768 };
769 
770 static bool hyperv_synic_enable_needed(void *opaque)
771 {
772     X86CPU *cpu = opaque;
773     CPUX86State *env = &cpu->env;
774     int i;
775 
776     if (env->msr_hv_synic_control != 0 ||
777         env->msr_hv_synic_evt_page != 0 ||
778         env->msr_hv_synic_msg_page != 0) {
779         return true;
780     }
781 
782     for (i = 0; i < ARRAY_SIZE(env->msr_hv_synic_sint); i++) {
783         if (env->msr_hv_synic_sint[i] != 0) {
784             return true;
785         }
786     }
787 
788     return false;
789 }
790 
791 static int hyperv_synic_post_load(void *opaque, int version_id)
792 {
793     X86CPU *cpu = opaque;
794     hyperv_x86_synic_update(cpu);
795     return 0;
796 }
797 
798 static const VMStateDescription vmstate_msr_hyperv_synic = {
799     .name = "cpu/msr_hyperv_synic",
800     .version_id = 1,
801     .minimum_version_id = 1,
802     .needed = hyperv_synic_enable_needed,
803     .post_load = hyperv_synic_post_load,
804     .fields = (VMStateField[]) {
805         VMSTATE_UINT64(env.msr_hv_synic_control, X86CPU),
806         VMSTATE_UINT64(env.msr_hv_synic_evt_page, X86CPU),
807         VMSTATE_UINT64(env.msr_hv_synic_msg_page, X86CPU),
808         VMSTATE_UINT64_ARRAY(env.msr_hv_synic_sint, X86CPU, HV_SINT_COUNT),
809         VMSTATE_END_OF_LIST()
810     }
811 };
812 
813 static bool hyperv_stimer_enable_needed(void *opaque)
814 {
815     X86CPU *cpu = opaque;
816     CPUX86State *env = &cpu->env;
817     int i;
818 
819     for (i = 0; i < ARRAY_SIZE(env->msr_hv_stimer_config); i++) {
820         if (env->msr_hv_stimer_config[i] || env->msr_hv_stimer_count[i]) {
821             return true;
822         }
823     }
824     return false;
825 }
826 
827 static const VMStateDescription vmstate_msr_hyperv_stimer = {
828     .name = "cpu/msr_hyperv_stimer",
829     .version_id = 1,
830     .minimum_version_id = 1,
831     .needed = hyperv_stimer_enable_needed,
832     .fields = (VMStateField[]) {
833         VMSTATE_UINT64_ARRAY(env.msr_hv_stimer_config, X86CPU,
834                              HV_STIMER_COUNT),
835         VMSTATE_UINT64_ARRAY(env.msr_hv_stimer_count, X86CPU, HV_STIMER_COUNT),
836         VMSTATE_END_OF_LIST()
837     }
838 };
839 
840 static bool hyperv_reenlightenment_enable_needed(void *opaque)
841 {
842     X86CPU *cpu = opaque;
843     CPUX86State *env = &cpu->env;
844 
845     return env->msr_hv_reenlightenment_control != 0 ||
846         env->msr_hv_tsc_emulation_control != 0 ||
847         env->msr_hv_tsc_emulation_status != 0;
848 }
849 
850 static const VMStateDescription vmstate_msr_hyperv_reenlightenment = {
851     .name = "cpu/msr_hyperv_reenlightenment",
852     .version_id = 1,
853     .minimum_version_id = 1,
854     .needed = hyperv_reenlightenment_enable_needed,
855     .fields = (VMStateField[]) {
856         VMSTATE_UINT64(env.msr_hv_reenlightenment_control, X86CPU),
857         VMSTATE_UINT64(env.msr_hv_tsc_emulation_control, X86CPU),
858         VMSTATE_UINT64(env.msr_hv_tsc_emulation_status, X86CPU),
859         VMSTATE_END_OF_LIST()
860     }
861 };
862 
863 static bool avx512_needed(void *opaque)
864 {
865     X86CPU *cpu = opaque;
866     CPUX86State *env = &cpu->env;
867     unsigned int i;
868 
869     for (i = 0; i < NB_OPMASK_REGS; i++) {
870         if (env->opmask_regs[i]) {
871             return true;
872         }
873     }
874 
875     for (i = 0; i < CPU_NB_REGS; i++) {
876 #define ENV_XMM(reg, field) (env->xmm_regs[reg].ZMM_Q(field))
877         if (ENV_XMM(i, 4) || ENV_XMM(i, 6) ||
878             ENV_XMM(i, 5) || ENV_XMM(i, 7)) {
879             return true;
880         }
881 #ifdef TARGET_X86_64
882         if (ENV_XMM(i+16, 0) || ENV_XMM(i+16, 1) ||
883             ENV_XMM(i+16, 2) || ENV_XMM(i+16, 3) ||
884             ENV_XMM(i+16, 4) || ENV_XMM(i+16, 5) ||
885             ENV_XMM(i+16, 6) || ENV_XMM(i+16, 7)) {
886             return true;
887         }
888 #endif
889     }
890 
891     return false;
892 }
893 
894 static const VMStateDescription vmstate_avx512 = {
895     .name = "cpu/avx512",
896     .version_id = 1,
897     .minimum_version_id = 1,
898     .needed = avx512_needed,
899     .fields = (VMStateField[]) {
900         VMSTATE_UINT64_ARRAY(env.opmask_regs, X86CPU, NB_OPMASK_REGS),
901         VMSTATE_ZMMH_REGS_VARS(env.xmm_regs, X86CPU, 0),
902 #ifdef TARGET_X86_64
903         VMSTATE_Hi16_ZMM_REGS_VARS(env.xmm_regs, X86CPU, 16),
904 #endif
905         VMSTATE_END_OF_LIST()
906     }
907 };
908 
909 static bool xss_needed(void *opaque)
910 {
911     X86CPU *cpu = opaque;
912     CPUX86State *env = &cpu->env;
913 
914     return env->xss != 0;
915 }
916 
917 static const VMStateDescription vmstate_xss = {
918     .name = "cpu/xss",
919     .version_id = 1,
920     .minimum_version_id = 1,
921     .needed = xss_needed,
922     .fields = (VMStateField[]) {
923         VMSTATE_UINT64(env.xss, X86CPU),
924         VMSTATE_END_OF_LIST()
925     }
926 };
927 
928 #ifdef TARGET_X86_64
929 static bool pkru_needed(void *opaque)
930 {
931     X86CPU *cpu = opaque;
932     CPUX86State *env = &cpu->env;
933 
934     return env->pkru != 0;
935 }
936 
937 static const VMStateDescription vmstate_pkru = {
938     .name = "cpu/pkru",
939     .version_id = 1,
940     .minimum_version_id = 1,
941     .needed = pkru_needed,
942     .fields = (VMStateField[]){
943         VMSTATE_UINT32(env.pkru, X86CPU),
944         VMSTATE_END_OF_LIST()
945     }
946 };
947 #endif
948 
949 static bool tsc_khz_needed(void *opaque)
950 {
951     X86CPU *cpu = opaque;
952     CPUX86State *env = &cpu->env;
953     MachineClass *mc = MACHINE_GET_CLASS(qdev_get_machine());
954     PCMachineClass *pcmc = PC_MACHINE_CLASS(mc);
955     return env->tsc_khz && pcmc->save_tsc_khz;
956 }
957 
958 static const VMStateDescription vmstate_tsc_khz = {
959     .name = "cpu/tsc_khz",
960     .version_id = 1,
961     .minimum_version_id = 1,
962     .needed = tsc_khz_needed,
963     .fields = (VMStateField[]) {
964         VMSTATE_INT64(env.tsc_khz, X86CPU),
965         VMSTATE_END_OF_LIST()
966     }
967 };
968 
969 #ifdef CONFIG_KVM
970 
971 static bool vmx_vmcs12_needed(void *opaque)
972 {
973     struct kvm_nested_state *nested_state = opaque;
974     return (nested_state->size >
975             offsetof(struct kvm_nested_state, data.vmx[0].vmcs12));
976 }
977 
978 static const VMStateDescription vmstate_vmx_vmcs12 = {
979     .name = "cpu/kvm_nested_state/vmx/vmcs12",
980     .version_id = 1,
981     .minimum_version_id = 1,
982     .needed = vmx_vmcs12_needed,
983     .fields = (VMStateField[]) {
984         VMSTATE_UINT8_ARRAY(data.vmx[0].vmcs12,
985                             struct kvm_nested_state,
986                             KVM_STATE_NESTED_VMX_VMCS_SIZE),
987         VMSTATE_END_OF_LIST()
988     }
989 };
990 
991 static bool vmx_shadow_vmcs12_needed(void *opaque)
992 {
993     struct kvm_nested_state *nested_state = opaque;
994     return (nested_state->size >
995             offsetof(struct kvm_nested_state, data.vmx[0].shadow_vmcs12));
996 }
997 
998 static const VMStateDescription vmstate_vmx_shadow_vmcs12 = {
999     .name = "cpu/kvm_nested_state/vmx/shadow_vmcs12",
1000     .version_id = 1,
1001     .minimum_version_id = 1,
1002     .needed = vmx_shadow_vmcs12_needed,
1003     .fields = (VMStateField[]) {
1004         VMSTATE_UINT8_ARRAY(data.vmx[0].shadow_vmcs12,
1005                             struct kvm_nested_state,
1006                             KVM_STATE_NESTED_VMX_VMCS_SIZE),
1007         VMSTATE_END_OF_LIST()
1008     }
1009 };
1010 
1011 static bool vmx_nested_state_needed(void *opaque)
1012 {
1013     struct kvm_nested_state *nested_state = opaque;
1014 
1015     return (nested_state->format == KVM_STATE_NESTED_FORMAT_VMX &&
1016             nested_state->hdr.vmx.vmxon_pa != -1ull);
1017 }
1018 
1019 static const VMStateDescription vmstate_vmx_nested_state = {
1020     .name = "cpu/kvm_nested_state/vmx",
1021     .version_id = 1,
1022     .minimum_version_id = 1,
1023     .needed = vmx_nested_state_needed,
1024     .fields = (VMStateField[]) {
1025         VMSTATE_U64(hdr.vmx.vmxon_pa, struct kvm_nested_state),
1026         VMSTATE_U64(hdr.vmx.vmcs12_pa, struct kvm_nested_state),
1027         VMSTATE_U16(hdr.vmx.smm.flags, struct kvm_nested_state),
1028         VMSTATE_END_OF_LIST()
1029     },
1030     .subsections = (const VMStateDescription*[]) {
1031         &vmstate_vmx_vmcs12,
1032         &vmstate_vmx_shadow_vmcs12,
1033         NULL,
1034     }
1035 };
1036 
1037 static bool nested_state_needed(void *opaque)
1038 {
1039     X86CPU *cpu = opaque;
1040     CPUX86State *env = &cpu->env;
1041 
1042     return (env->nested_state &&
1043             vmx_nested_state_needed(env->nested_state));
1044 }
1045 
1046 static int nested_state_post_load(void *opaque, int version_id)
1047 {
1048     X86CPU *cpu = opaque;
1049     CPUX86State *env = &cpu->env;
1050     struct kvm_nested_state *nested_state = env->nested_state;
1051     int min_nested_state_len = offsetof(struct kvm_nested_state, data);
1052     int max_nested_state_len = kvm_max_nested_state_length();
1053 
1054     /*
1055      * If our kernel don't support setting nested state
1056      * and we have received nested state from migration stream,
1057      * we need to fail migration
1058      */
1059     if (max_nested_state_len <= 0) {
1060         error_report("Received nested state when kernel cannot restore it");
1061         return -EINVAL;
1062     }
1063 
1064     /*
1065      * Verify that the size of received nested_state struct
1066      * at least cover required header and is not larger
1067      * than the max size that our kernel support
1068      */
1069     if (nested_state->size < min_nested_state_len) {
1070         error_report("Received nested state size less than min: "
1071                      "len=%d, min=%d",
1072                      nested_state->size, min_nested_state_len);
1073         return -EINVAL;
1074     }
1075     if (nested_state->size > max_nested_state_len) {
1076         error_report("Recieved unsupported nested state size: "
1077                      "nested_state->size=%d, max=%d",
1078                      nested_state->size, max_nested_state_len);
1079         return -EINVAL;
1080     }
1081 
1082     /* Verify format is valid */
1083     if ((nested_state->format != KVM_STATE_NESTED_FORMAT_VMX) &&
1084         (nested_state->format != KVM_STATE_NESTED_FORMAT_SVM)) {
1085         error_report("Received invalid nested state format: %d",
1086                      nested_state->format);
1087         return -EINVAL;
1088     }
1089 
1090     return 0;
1091 }
1092 
1093 static const VMStateDescription vmstate_kvm_nested_state = {
1094     .name = "cpu/kvm_nested_state",
1095     .version_id = 1,
1096     .minimum_version_id = 1,
1097     .fields = (VMStateField[]) {
1098         VMSTATE_U16(flags, struct kvm_nested_state),
1099         VMSTATE_U16(format, struct kvm_nested_state),
1100         VMSTATE_U32(size, struct kvm_nested_state),
1101         VMSTATE_END_OF_LIST()
1102     },
1103     .subsections = (const VMStateDescription*[]) {
1104         &vmstate_vmx_nested_state,
1105         NULL
1106     }
1107 };
1108 
1109 static const VMStateDescription vmstate_nested_state = {
1110     .name = "cpu/nested_state",
1111     .version_id = 1,
1112     .minimum_version_id = 1,
1113     .needed = nested_state_needed,
1114     .post_load = nested_state_post_load,
1115     .fields = (VMStateField[]) {
1116         VMSTATE_STRUCT_POINTER(env.nested_state, X86CPU,
1117                 vmstate_kvm_nested_state,
1118                 struct kvm_nested_state),
1119         VMSTATE_END_OF_LIST()
1120     }
1121 };
1122 
1123 #endif
1124 
1125 static bool mcg_ext_ctl_needed(void *opaque)
1126 {
1127     X86CPU *cpu = opaque;
1128     CPUX86State *env = &cpu->env;
1129     return cpu->enable_lmce && env->mcg_ext_ctl;
1130 }
1131 
1132 static const VMStateDescription vmstate_mcg_ext_ctl = {
1133     .name = "cpu/mcg_ext_ctl",
1134     .version_id = 1,
1135     .minimum_version_id = 1,
1136     .needed = mcg_ext_ctl_needed,
1137     .fields = (VMStateField[]) {
1138         VMSTATE_UINT64(env.mcg_ext_ctl, X86CPU),
1139         VMSTATE_END_OF_LIST()
1140     }
1141 };
1142 
1143 static bool spec_ctrl_needed(void *opaque)
1144 {
1145     X86CPU *cpu = opaque;
1146     CPUX86State *env = &cpu->env;
1147 
1148     return env->spec_ctrl != 0;
1149 }
1150 
1151 static const VMStateDescription vmstate_spec_ctrl = {
1152     .name = "cpu/spec_ctrl",
1153     .version_id = 1,
1154     .minimum_version_id = 1,
1155     .needed = spec_ctrl_needed,
1156     .fields = (VMStateField[]){
1157         VMSTATE_UINT64(env.spec_ctrl, X86CPU),
1158         VMSTATE_END_OF_LIST()
1159     }
1160 };
1161 
1162 static bool intel_pt_enable_needed(void *opaque)
1163 {
1164     X86CPU *cpu = opaque;
1165     CPUX86State *env = &cpu->env;
1166     int i;
1167 
1168     if (env->msr_rtit_ctrl || env->msr_rtit_status ||
1169         env->msr_rtit_output_base || env->msr_rtit_output_mask ||
1170         env->msr_rtit_cr3_match) {
1171         return true;
1172     }
1173 
1174     for (i = 0; i < MAX_RTIT_ADDRS; i++) {
1175         if (env->msr_rtit_addrs[i]) {
1176             return true;
1177         }
1178     }
1179 
1180     return false;
1181 }
1182 
1183 static const VMStateDescription vmstate_msr_intel_pt = {
1184     .name = "cpu/intel_pt",
1185     .version_id = 1,
1186     .minimum_version_id = 1,
1187     .needed = intel_pt_enable_needed,
1188     .fields = (VMStateField[]) {
1189         VMSTATE_UINT64(env.msr_rtit_ctrl, X86CPU),
1190         VMSTATE_UINT64(env.msr_rtit_status, X86CPU),
1191         VMSTATE_UINT64(env.msr_rtit_output_base, X86CPU),
1192         VMSTATE_UINT64(env.msr_rtit_output_mask, X86CPU),
1193         VMSTATE_UINT64(env.msr_rtit_cr3_match, X86CPU),
1194         VMSTATE_UINT64_ARRAY(env.msr_rtit_addrs, X86CPU, MAX_RTIT_ADDRS),
1195         VMSTATE_END_OF_LIST()
1196     }
1197 };
1198 
1199 static bool virt_ssbd_needed(void *opaque)
1200 {
1201     X86CPU *cpu = opaque;
1202     CPUX86State *env = &cpu->env;
1203 
1204     return env->virt_ssbd != 0;
1205 }
1206 
1207 static const VMStateDescription vmstate_msr_virt_ssbd = {
1208     .name = "cpu/virt_ssbd",
1209     .version_id = 1,
1210     .minimum_version_id = 1,
1211     .needed = virt_ssbd_needed,
1212     .fields = (VMStateField[]){
1213         VMSTATE_UINT64(env.virt_ssbd, X86CPU),
1214         VMSTATE_END_OF_LIST()
1215     }
1216 };
1217 
1218 static bool svm_npt_needed(void *opaque)
1219 {
1220     X86CPU *cpu = opaque;
1221     CPUX86State *env = &cpu->env;
1222 
1223     return !!(env->hflags2 & HF2_NPT_MASK);
1224 }
1225 
1226 static const VMStateDescription vmstate_svm_npt = {
1227     .name = "cpu/svn_npt",
1228     .version_id = 1,
1229     .minimum_version_id = 1,
1230     .needed = svm_npt_needed,
1231     .fields = (VMStateField[]){
1232         VMSTATE_UINT64(env.nested_cr3, X86CPU),
1233         VMSTATE_UINT32(env.nested_pg_mode, X86CPU),
1234         VMSTATE_END_OF_LIST()
1235     }
1236 };
1237 
1238 #ifndef TARGET_X86_64
1239 static bool intel_efer32_needed(void *opaque)
1240 {
1241     X86CPU *cpu = opaque;
1242     CPUX86State *env = &cpu->env;
1243 
1244     return env->efer != 0;
1245 }
1246 
1247 static const VMStateDescription vmstate_efer32 = {
1248     .name = "cpu/efer32",
1249     .version_id = 1,
1250     .minimum_version_id = 1,
1251     .needed = intel_efer32_needed,
1252     .fields = (VMStateField[]) {
1253         VMSTATE_UINT64(env.efer, X86CPU),
1254         VMSTATE_END_OF_LIST()
1255     }
1256 };
1257 #endif
1258 
1259 VMStateDescription vmstate_x86_cpu = {
1260     .name = "cpu",
1261     .version_id = 12,
1262     .minimum_version_id = 11,
1263     .pre_save = cpu_pre_save,
1264     .post_load = cpu_post_load,
1265     .fields = (VMStateField[]) {
1266         VMSTATE_UINTTL_ARRAY(env.regs, X86CPU, CPU_NB_REGS),
1267         VMSTATE_UINTTL(env.eip, X86CPU),
1268         VMSTATE_UINTTL(env.eflags, X86CPU),
1269         VMSTATE_UINT32(env.hflags, X86CPU),
1270         /* FPU */
1271         VMSTATE_UINT16(env.fpuc, X86CPU),
1272         VMSTATE_UINT16(env.fpus_vmstate, X86CPU),
1273         VMSTATE_UINT16(env.fptag_vmstate, X86CPU),
1274         VMSTATE_UINT16(env.fpregs_format_vmstate, X86CPU),
1275 
1276         VMSTATE_STRUCT_ARRAY(env.fpregs, X86CPU, 8, 0, vmstate_fpreg, FPReg),
1277 
1278         VMSTATE_SEGMENT_ARRAY(env.segs, X86CPU, 6),
1279         VMSTATE_SEGMENT(env.ldt, X86CPU),
1280         VMSTATE_SEGMENT(env.tr, X86CPU),
1281         VMSTATE_SEGMENT(env.gdt, X86CPU),
1282         VMSTATE_SEGMENT(env.idt, X86CPU),
1283 
1284         VMSTATE_UINT32(env.sysenter_cs, X86CPU),
1285         VMSTATE_UINTTL(env.sysenter_esp, X86CPU),
1286         VMSTATE_UINTTL(env.sysenter_eip, X86CPU),
1287 
1288         VMSTATE_UINTTL(env.cr[0], X86CPU),
1289         VMSTATE_UINTTL(env.cr[2], X86CPU),
1290         VMSTATE_UINTTL(env.cr[3], X86CPU),
1291         VMSTATE_UINTTL(env.cr[4], X86CPU),
1292         VMSTATE_UINTTL_ARRAY(env.dr, X86CPU, 8),
1293         /* MMU */
1294         VMSTATE_INT32(env.a20_mask, X86CPU),
1295         /* XMM */
1296         VMSTATE_UINT32(env.mxcsr, X86CPU),
1297         VMSTATE_XMM_REGS(env.xmm_regs, X86CPU, 0),
1298 
1299 #ifdef TARGET_X86_64
1300         VMSTATE_UINT64(env.efer, X86CPU),
1301         VMSTATE_UINT64(env.star, X86CPU),
1302         VMSTATE_UINT64(env.lstar, X86CPU),
1303         VMSTATE_UINT64(env.cstar, X86CPU),
1304         VMSTATE_UINT64(env.fmask, X86CPU),
1305         VMSTATE_UINT64(env.kernelgsbase, X86CPU),
1306 #endif
1307         VMSTATE_UINT32(env.smbase, X86CPU),
1308 
1309         VMSTATE_UINT64(env.pat, X86CPU),
1310         VMSTATE_UINT32(env.hflags2, X86CPU),
1311 
1312         VMSTATE_UINT64(env.vm_hsave, X86CPU),
1313         VMSTATE_UINT64(env.vm_vmcb, X86CPU),
1314         VMSTATE_UINT64(env.tsc_offset, X86CPU),
1315         VMSTATE_UINT64(env.intercept, X86CPU),
1316         VMSTATE_UINT16(env.intercept_cr_read, X86CPU),
1317         VMSTATE_UINT16(env.intercept_cr_write, X86CPU),
1318         VMSTATE_UINT16(env.intercept_dr_read, X86CPU),
1319         VMSTATE_UINT16(env.intercept_dr_write, X86CPU),
1320         VMSTATE_UINT32(env.intercept_exceptions, X86CPU),
1321         VMSTATE_UINT8(env.v_tpr, X86CPU),
1322         /* MTRRs */
1323         VMSTATE_UINT64_ARRAY(env.mtrr_fixed, X86CPU, 11),
1324         VMSTATE_UINT64(env.mtrr_deftype, X86CPU),
1325         VMSTATE_MTRR_VARS(env.mtrr_var, X86CPU, MSR_MTRRcap_VCNT, 8),
1326         /* KVM-related states */
1327         VMSTATE_INT32(env.interrupt_injected, X86CPU),
1328         VMSTATE_UINT32(env.mp_state, X86CPU),
1329         VMSTATE_UINT64(env.tsc, X86CPU),
1330         VMSTATE_INT32(env.exception_nr, X86CPU),
1331         VMSTATE_UINT8(env.soft_interrupt, X86CPU),
1332         VMSTATE_UINT8(env.nmi_injected, X86CPU),
1333         VMSTATE_UINT8(env.nmi_pending, X86CPU),
1334         VMSTATE_UINT8(env.has_error_code, X86CPU),
1335         VMSTATE_UINT32(env.sipi_vector, X86CPU),
1336         /* MCE */
1337         VMSTATE_UINT64(env.mcg_cap, X86CPU),
1338         VMSTATE_UINT64(env.mcg_status, X86CPU),
1339         VMSTATE_UINT64(env.mcg_ctl, X86CPU),
1340         VMSTATE_UINT64_ARRAY(env.mce_banks, X86CPU, MCE_BANKS_DEF * 4),
1341         /* rdtscp */
1342         VMSTATE_UINT64(env.tsc_aux, X86CPU),
1343         /* KVM pvclock msr */
1344         VMSTATE_UINT64(env.system_time_msr, X86CPU),
1345         VMSTATE_UINT64(env.wall_clock_msr, X86CPU),
1346         /* XSAVE related fields */
1347         VMSTATE_UINT64_V(env.xcr0, X86CPU, 12),
1348         VMSTATE_UINT64_V(env.xstate_bv, X86CPU, 12),
1349         VMSTATE_YMMH_REGS_VARS(env.xmm_regs, X86CPU, 0, 12),
1350         VMSTATE_END_OF_LIST()
1351         /* The above list is not sorted /wrt version numbers, watch out! */
1352     },
1353     .subsections = (const VMStateDescription*[]) {
1354         &vmstate_exception_info,
1355         &vmstate_async_pf_msr,
1356         &vmstate_pv_eoi_msr,
1357         &vmstate_steal_time_msr,
1358         &vmstate_fpop_ip_dp,
1359         &vmstate_msr_tsc_adjust,
1360         &vmstate_msr_tscdeadline,
1361         &vmstate_msr_ia32_misc_enable,
1362         &vmstate_msr_ia32_feature_control,
1363         &vmstate_msr_architectural_pmu,
1364         &vmstate_mpx,
1365         &vmstate_msr_hypercall_hypercall,
1366         &vmstate_msr_hyperv_vapic,
1367         &vmstate_msr_hyperv_time,
1368         &vmstate_msr_hyperv_crash,
1369         &vmstate_msr_hyperv_runtime,
1370         &vmstate_msr_hyperv_synic,
1371         &vmstate_msr_hyperv_stimer,
1372         &vmstate_msr_hyperv_reenlightenment,
1373         &vmstate_avx512,
1374         &vmstate_xss,
1375         &vmstate_tsc_khz,
1376         &vmstate_msr_smi_count,
1377 #ifdef TARGET_X86_64
1378         &vmstate_pkru,
1379 #endif
1380         &vmstate_spec_ctrl,
1381         &vmstate_mcg_ext_ctl,
1382         &vmstate_msr_intel_pt,
1383         &vmstate_msr_virt_ssbd,
1384         &vmstate_svm_npt,
1385 #ifndef TARGET_X86_64
1386         &vmstate_efer32,
1387 #endif
1388 #ifdef CONFIG_KVM
1389         &vmstate_nested_state,
1390 #endif
1391         NULL
1392     }
1393 };
1394