1fcf5ef2aSThomas Huth #include "qemu/osdep.h"
2fcf5ef2aSThomas Huth #include "cpu.h"
3fcf5ef2aSThomas Huth #include "exec/exec-all.h"
4fcf5ef2aSThomas Huth #include "hw/isa/isa.h"
5fcf5ef2aSThomas Huth #include "migration/cpu.h"
6a9dc68d9SClaudio Fontana #include "kvm/hyperv.h"
789a289c7SPaolo Bonzini #include "hw/i386/x86.h"
8a9dc68d9SClaudio Fontana #include "kvm/kvm_i386.h"
9c345104cSJoao Martins #include "hw/xen/xen.h"
10fcf5ef2aSThomas Huth
11fcf5ef2aSThomas Huth #include "sysemu/kvm.h"
12c345104cSJoao Martins #include "sysemu/kvm_xen.h"
1314a48c1dSMarkus Armbruster #include "sysemu/tcg.h"
14fcf5ef2aSThomas Huth
15fcf5ef2aSThomas Huth #include "qemu/error-report.h"
16fcf5ef2aSThomas Huth
17fcf5ef2aSThomas Huth static const VMStateDescription vmstate_segment = {
18fcf5ef2aSThomas Huth .name = "segment",
19fcf5ef2aSThomas Huth .version_id = 1,
20fcf5ef2aSThomas Huth .minimum_version_id = 1,
21c4f54bd6SRichard Henderson .fields = (const VMStateField[]) {
22fcf5ef2aSThomas Huth VMSTATE_UINT32(selector, SegmentCache),
23fcf5ef2aSThomas Huth VMSTATE_UINTTL(base, SegmentCache),
24fcf5ef2aSThomas Huth VMSTATE_UINT32(limit, SegmentCache),
25fcf5ef2aSThomas Huth VMSTATE_UINT32(flags, SegmentCache),
26fcf5ef2aSThomas Huth VMSTATE_END_OF_LIST()
27fcf5ef2aSThomas Huth }
28fcf5ef2aSThomas Huth };
29fcf5ef2aSThomas Huth
30fcf5ef2aSThomas Huth #define VMSTATE_SEGMENT(_field, _state) { \
31fcf5ef2aSThomas Huth .name = (stringify(_field)), \
32fcf5ef2aSThomas Huth .size = sizeof(SegmentCache), \
33fcf5ef2aSThomas Huth .vmsd = &vmstate_segment, \
34fcf5ef2aSThomas Huth .flags = VMS_STRUCT, \
35fcf5ef2aSThomas Huth .offset = offsetof(_state, _field) \
36fcf5ef2aSThomas Huth + type_check(SegmentCache,typeof_field(_state, _field)) \
37fcf5ef2aSThomas Huth }
38fcf5ef2aSThomas Huth
39fcf5ef2aSThomas Huth #define VMSTATE_SEGMENT_ARRAY(_field, _state, _n) \
40fcf5ef2aSThomas Huth VMSTATE_STRUCT_ARRAY(_field, _state, _n, 0, vmstate_segment, SegmentCache)
41fcf5ef2aSThomas Huth
42fcf5ef2aSThomas Huth static const VMStateDescription vmstate_xmm_reg = {
43fcf5ef2aSThomas Huth .name = "xmm_reg",
44fcf5ef2aSThomas Huth .version_id = 1,
45fcf5ef2aSThomas Huth .minimum_version_id = 1,
46c4f54bd6SRichard Henderson .fields = (const VMStateField[]) {
47fcf5ef2aSThomas Huth VMSTATE_UINT64(ZMM_Q(0), ZMMReg),
48fcf5ef2aSThomas Huth VMSTATE_UINT64(ZMM_Q(1), ZMMReg),
49fcf5ef2aSThomas Huth VMSTATE_END_OF_LIST()
50fcf5ef2aSThomas Huth }
51fcf5ef2aSThomas Huth };
52fcf5ef2aSThomas Huth
53fcf5ef2aSThomas Huth #define VMSTATE_XMM_REGS(_field, _state, _start) \
54fcf5ef2aSThomas Huth VMSTATE_STRUCT_SUB_ARRAY(_field, _state, _start, CPU_NB_REGS, 0, \
55fcf5ef2aSThomas Huth vmstate_xmm_reg, ZMMReg)
56fcf5ef2aSThomas Huth
57fcf5ef2aSThomas Huth /* YMMH format is the same as XMM, but for bits 128-255 */
58fcf5ef2aSThomas Huth static const VMStateDescription vmstate_ymmh_reg = {
59fcf5ef2aSThomas Huth .name = "ymmh_reg",
60fcf5ef2aSThomas Huth .version_id = 1,
61fcf5ef2aSThomas Huth .minimum_version_id = 1,
62c4f54bd6SRichard Henderson .fields = (const VMStateField[]) {
63fcf5ef2aSThomas Huth VMSTATE_UINT64(ZMM_Q(2), ZMMReg),
64fcf5ef2aSThomas Huth VMSTATE_UINT64(ZMM_Q(3), ZMMReg),
65fcf5ef2aSThomas Huth VMSTATE_END_OF_LIST()
66fcf5ef2aSThomas Huth }
67fcf5ef2aSThomas Huth };
68fcf5ef2aSThomas Huth
69fcf5ef2aSThomas Huth #define VMSTATE_YMMH_REGS_VARS(_field, _state, _start, _v) \
70fcf5ef2aSThomas Huth VMSTATE_STRUCT_SUB_ARRAY(_field, _state, _start, CPU_NB_REGS, _v, \
71fcf5ef2aSThomas Huth vmstate_ymmh_reg, ZMMReg)
72fcf5ef2aSThomas Huth
73fcf5ef2aSThomas Huth static const VMStateDescription vmstate_zmmh_reg = {
74fcf5ef2aSThomas Huth .name = "zmmh_reg",
75fcf5ef2aSThomas Huth .version_id = 1,
76fcf5ef2aSThomas Huth .minimum_version_id = 1,
77c4f54bd6SRichard Henderson .fields = (const VMStateField[]) {
78fcf5ef2aSThomas Huth VMSTATE_UINT64(ZMM_Q(4), ZMMReg),
79fcf5ef2aSThomas Huth VMSTATE_UINT64(ZMM_Q(5), ZMMReg),
80fcf5ef2aSThomas Huth VMSTATE_UINT64(ZMM_Q(6), ZMMReg),
81fcf5ef2aSThomas Huth VMSTATE_UINT64(ZMM_Q(7), ZMMReg),
82fcf5ef2aSThomas Huth VMSTATE_END_OF_LIST()
83fcf5ef2aSThomas Huth }
84fcf5ef2aSThomas Huth };
85fcf5ef2aSThomas Huth
86fcf5ef2aSThomas Huth #define VMSTATE_ZMMH_REGS_VARS(_field, _state, _start) \
87fcf5ef2aSThomas Huth VMSTATE_STRUCT_SUB_ARRAY(_field, _state, _start, CPU_NB_REGS, 0, \
88fcf5ef2aSThomas Huth vmstate_zmmh_reg, ZMMReg)
89fcf5ef2aSThomas Huth
90fcf5ef2aSThomas Huth #ifdef TARGET_X86_64
91fcf5ef2aSThomas Huth static const VMStateDescription vmstate_hi16_zmm_reg = {
92fcf5ef2aSThomas Huth .name = "hi16_zmm_reg",
93fcf5ef2aSThomas Huth .version_id = 1,
94fcf5ef2aSThomas Huth .minimum_version_id = 1,
95c4f54bd6SRichard Henderson .fields = (const VMStateField[]) {
96fcf5ef2aSThomas Huth VMSTATE_UINT64(ZMM_Q(0), ZMMReg),
97fcf5ef2aSThomas Huth VMSTATE_UINT64(ZMM_Q(1), ZMMReg),
98fcf5ef2aSThomas Huth VMSTATE_UINT64(ZMM_Q(2), ZMMReg),
99fcf5ef2aSThomas Huth VMSTATE_UINT64(ZMM_Q(3), ZMMReg),
100fcf5ef2aSThomas Huth VMSTATE_UINT64(ZMM_Q(4), ZMMReg),
101fcf5ef2aSThomas Huth VMSTATE_UINT64(ZMM_Q(5), ZMMReg),
102fcf5ef2aSThomas Huth VMSTATE_UINT64(ZMM_Q(6), ZMMReg),
103fcf5ef2aSThomas Huth VMSTATE_UINT64(ZMM_Q(7), ZMMReg),
104fcf5ef2aSThomas Huth VMSTATE_END_OF_LIST()
105fcf5ef2aSThomas Huth }
106fcf5ef2aSThomas Huth };
107fcf5ef2aSThomas Huth
108fcf5ef2aSThomas Huth #define VMSTATE_Hi16_ZMM_REGS_VARS(_field, _state, _start) \
109fcf5ef2aSThomas Huth VMSTATE_STRUCT_SUB_ARRAY(_field, _state, _start, CPU_NB_REGS, 0, \
110fcf5ef2aSThomas Huth vmstate_hi16_zmm_reg, ZMMReg)
111fcf5ef2aSThomas Huth #endif
112fcf5ef2aSThomas Huth
113fcf5ef2aSThomas Huth static const VMStateDescription vmstate_bnd_regs = {
114fcf5ef2aSThomas Huth .name = "bnd_regs",
115fcf5ef2aSThomas Huth .version_id = 1,
116fcf5ef2aSThomas Huth .minimum_version_id = 1,
117c4f54bd6SRichard Henderson .fields = (const VMStateField[]) {
118fcf5ef2aSThomas Huth VMSTATE_UINT64(lb, BNDReg),
119fcf5ef2aSThomas Huth VMSTATE_UINT64(ub, BNDReg),
120fcf5ef2aSThomas Huth VMSTATE_END_OF_LIST()
121fcf5ef2aSThomas Huth }
122fcf5ef2aSThomas Huth };
123fcf5ef2aSThomas Huth
124fcf5ef2aSThomas Huth #define VMSTATE_BND_REGS(_field, _state, _n) \
125fcf5ef2aSThomas Huth VMSTATE_STRUCT_ARRAY(_field, _state, _n, 0, vmstate_bnd_regs, BNDReg)
126fcf5ef2aSThomas Huth
127fcf5ef2aSThomas Huth static const VMStateDescription vmstate_mtrr_var = {
128fcf5ef2aSThomas Huth .name = "mtrr_var",
129fcf5ef2aSThomas Huth .version_id = 1,
130fcf5ef2aSThomas Huth .minimum_version_id = 1,
131c4f54bd6SRichard Henderson .fields = (const VMStateField[]) {
132fcf5ef2aSThomas Huth VMSTATE_UINT64(base, MTRRVar),
133fcf5ef2aSThomas Huth VMSTATE_UINT64(mask, MTRRVar),
134fcf5ef2aSThomas Huth VMSTATE_END_OF_LIST()
135fcf5ef2aSThomas Huth }
136fcf5ef2aSThomas Huth };
137fcf5ef2aSThomas Huth
138fcf5ef2aSThomas Huth #define VMSTATE_MTRR_VARS(_field, _state, _n, _v) \
139fcf5ef2aSThomas Huth VMSTATE_STRUCT_ARRAY(_field, _state, _n, _v, vmstate_mtrr_var, MTRRVar)
140fcf5ef2aSThomas Huth
141f2e7c2fcSYang Weijiang static const VMStateDescription vmstate_lbr_records_var = {
142f2e7c2fcSYang Weijiang .name = "lbr_records_var",
143f2e7c2fcSYang Weijiang .version_id = 1,
144f2e7c2fcSYang Weijiang .minimum_version_id = 1,
145c4f54bd6SRichard Henderson .fields = (const VMStateField[]) {
146f2e7c2fcSYang Weijiang VMSTATE_UINT64(from, LBREntry),
147f2e7c2fcSYang Weijiang VMSTATE_UINT64(to, LBREntry),
148f2e7c2fcSYang Weijiang VMSTATE_UINT64(info, LBREntry),
149f2e7c2fcSYang Weijiang VMSTATE_END_OF_LIST()
150f2e7c2fcSYang Weijiang }
151f2e7c2fcSYang Weijiang };
152f2e7c2fcSYang Weijiang
153f2e7c2fcSYang Weijiang #define VMSTATE_LBR_VARS(_field, _state, _n, _v) \
154f2e7c2fcSYang Weijiang VMSTATE_STRUCT_ARRAY(_field, _state, _n, _v, vmstate_lbr_records_var, \
155f2e7c2fcSYang Weijiang LBREntry)
156f2e7c2fcSYang Weijiang
157ab808276SDr. David Alan Gilbert typedef struct x86_FPReg_tmp {
158ab808276SDr. David Alan Gilbert FPReg *parent;
159ab808276SDr. David Alan Gilbert uint64_t tmp_mant;
160ab808276SDr. David Alan Gilbert uint16_t tmp_exp;
161ab808276SDr. David Alan Gilbert } x86_FPReg_tmp;
162fcf5ef2aSThomas Huth
cpu_get_fp80(uint64_t * pmant,uint16_t * pexp,floatx80 f)163db573d2cSYang Zhong static void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, floatx80 f)
164db573d2cSYang Zhong {
165db573d2cSYang Zhong CPU_LDoubleU temp;
166db573d2cSYang Zhong
167db573d2cSYang Zhong temp.d = f;
168db573d2cSYang Zhong *pmant = temp.l.lower;
169db573d2cSYang Zhong *pexp = temp.l.upper;
170db573d2cSYang Zhong }
171db573d2cSYang Zhong
cpu_set_fp80(uint64_t mant,uint16_t upper)172db573d2cSYang Zhong static floatx80 cpu_set_fp80(uint64_t mant, uint16_t upper)
173db573d2cSYang Zhong {
174db573d2cSYang Zhong CPU_LDoubleU temp;
175db573d2cSYang Zhong
176db573d2cSYang Zhong temp.l.upper = upper;
177db573d2cSYang Zhong temp.l.lower = mant;
178db573d2cSYang Zhong return temp.d;
179db573d2cSYang Zhong }
180db573d2cSYang Zhong
fpreg_pre_save(void * opaque)18144b1ff31SDr. David Alan Gilbert static int fpreg_pre_save(void *opaque)
182fcf5ef2aSThomas Huth {
183ab808276SDr. David Alan Gilbert x86_FPReg_tmp *tmp = opaque;
184ab808276SDr. David Alan Gilbert
185fcf5ef2aSThomas Huth /* we save the real CPU data (in case of MMX usage only 'mant'
186fcf5ef2aSThomas Huth contains the MMX register */
187ab808276SDr. David Alan Gilbert cpu_get_fp80(&tmp->tmp_mant, &tmp->tmp_exp, tmp->parent->d);
18844b1ff31SDr. David Alan Gilbert
18944b1ff31SDr. David Alan Gilbert return 0;
190ab808276SDr. David Alan Gilbert }
1912c21ee76SJianjun Duan
fpreg_post_load(void * opaque,int version)192ab808276SDr. David Alan Gilbert static int fpreg_post_load(void *opaque, int version)
193ab808276SDr. David Alan Gilbert {
194ab808276SDr. David Alan Gilbert x86_FPReg_tmp *tmp = opaque;
195ab808276SDr. David Alan Gilbert
196ab808276SDr. David Alan Gilbert tmp->parent->d = cpu_set_fp80(tmp->tmp_mant, tmp->tmp_exp);
1972c21ee76SJianjun Duan return 0;
198fcf5ef2aSThomas Huth }
199fcf5ef2aSThomas Huth
200ab808276SDr. David Alan Gilbert static const VMStateDescription vmstate_fpreg_tmp = {
201ab808276SDr. David Alan Gilbert .name = "fpreg_tmp",
202ab808276SDr. David Alan Gilbert .post_load = fpreg_post_load,
203ab808276SDr. David Alan Gilbert .pre_save = fpreg_pre_save,
204c4f54bd6SRichard Henderson .fields = (const VMStateField[]) {
205ab808276SDr. David Alan Gilbert VMSTATE_UINT64(tmp_mant, x86_FPReg_tmp),
206ab808276SDr. David Alan Gilbert VMSTATE_UINT16(tmp_exp, x86_FPReg_tmp),
207ab808276SDr. David Alan Gilbert VMSTATE_END_OF_LIST()
208ab808276SDr. David Alan Gilbert }
209ab808276SDr. David Alan Gilbert };
210ab808276SDr. David Alan Gilbert
211ab808276SDr. David Alan Gilbert static const VMStateDescription vmstate_fpreg = {
212fcf5ef2aSThomas Huth .name = "fpreg",
213c4f54bd6SRichard Henderson .fields = (const VMStateField[]) {
214ab808276SDr. David Alan Gilbert VMSTATE_WITH_TMP(FPReg, x86_FPReg_tmp, vmstate_fpreg_tmp),
215ab808276SDr. David Alan Gilbert VMSTATE_END_OF_LIST()
216ab808276SDr. David Alan Gilbert }
217fcf5ef2aSThomas Huth };
218fcf5ef2aSThomas Huth
cpu_pre_save(void * opaque)21944b1ff31SDr. David Alan Gilbert static int cpu_pre_save(void *opaque)
220fcf5ef2aSThomas Huth {
221fcf5ef2aSThomas Huth X86CPU *cpu = opaque;
222fcf5ef2aSThomas Huth CPUX86State *env = &cpu->env;
223fcf5ef2aSThomas Huth int i;
224e3126a5cSLara Lazier env->v_tpr = env->int_ctl & V_TPR_MASK;
225fcf5ef2aSThomas Huth /* FPU */
226fcf5ef2aSThomas Huth env->fpus_vmstate = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
227fcf5ef2aSThomas Huth env->fptag_vmstate = 0;
228fcf5ef2aSThomas Huth for(i = 0; i < 8; i++) {
229fcf5ef2aSThomas Huth env->fptag_vmstate |= ((!env->fptags[i]) << i);
230fcf5ef2aSThomas Huth }
231fcf5ef2aSThomas Huth
232fcf5ef2aSThomas Huth env->fpregs_format_vmstate = 0;
233fcf5ef2aSThomas Huth
234fcf5ef2aSThomas Huth /*
235fcf5ef2aSThomas Huth * Real mode guest segments register DPL should be zero.
236fcf5ef2aSThomas Huth * Older KVM version were setting it wrongly.
237fcf5ef2aSThomas Huth * Fixing it will allow live migration to host with unrestricted guest
238fcf5ef2aSThomas Huth * support (otherwise the migration will fail with invalid guest state
239fcf5ef2aSThomas Huth * error).
240fcf5ef2aSThomas Huth */
241fcf5ef2aSThomas Huth if (!(env->cr[0] & CR0_PE_MASK) &&
242fcf5ef2aSThomas Huth (env->segs[R_CS].flags >> DESC_DPL_SHIFT & 3) != 0) {
243fcf5ef2aSThomas Huth env->segs[R_CS].flags &= ~(env->segs[R_CS].flags & DESC_DPL_MASK);
244fcf5ef2aSThomas Huth env->segs[R_DS].flags &= ~(env->segs[R_DS].flags & DESC_DPL_MASK);
245fcf5ef2aSThomas Huth env->segs[R_ES].flags &= ~(env->segs[R_ES].flags & DESC_DPL_MASK);
246fcf5ef2aSThomas Huth env->segs[R_FS].flags &= ~(env->segs[R_FS].flags & DESC_DPL_MASK);
247fcf5ef2aSThomas Huth env->segs[R_GS].flags &= ~(env->segs[R_GS].flags & DESC_DPL_MASK);
248fcf5ef2aSThomas Huth env->segs[R_SS].flags &= ~(env->segs[R_SS].flags & DESC_DPL_MASK);
249fcf5ef2aSThomas Huth }
250fcf5ef2aSThomas Huth
251ebbfef2fSLiran Alon #ifdef CONFIG_KVM
25279a197abSLiran Alon /*
25379a197abSLiran Alon * In case vCPU may have enabled VMX, we need to make sure kernel have
25479a197abSLiran Alon * required capabilities in order to perform migration correctly:
25579a197abSLiran Alon *
25679a197abSLiran Alon * 1) We must be able to extract vCPU nested-state from KVM.
25779a197abSLiran Alon *
25879a197abSLiran Alon * 2) In case vCPU is running in guest-mode and it has a pending exception,
25979a197abSLiran Alon * we must be able to determine if it's in a pending or injected state.
26079a197abSLiran Alon * Note that in case KVM don't have required capability to do so,
26179a197abSLiran Alon * a pending/injected exception will always appear as an
26279a197abSLiran Alon * injected exception.
26379a197abSLiran Alon */
26479a197abSLiran Alon if (kvm_enabled() && cpu_vmx_maybe_enabled(env) &&
26579a197abSLiran Alon (!env->nested_state ||
26679a197abSLiran Alon (!kvm_has_exception_payload() && (env->hflags & HF_GUEST_MASK) &&
26779a197abSLiran Alon env->exception_injected))) {
26879a197abSLiran Alon error_report("Guest maybe enabled nested virtualization but kernel "
26979a197abSLiran Alon "does not support required capabilities to save vCPU "
27079a197abSLiran Alon "nested state");
271ebbfef2fSLiran Alon return -EINVAL;
272ebbfef2fSLiran Alon }
273ebbfef2fSLiran Alon #endif
274ebbfef2fSLiran Alon
275fd13f23bSLiran Alon /*
276fd13f23bSLiran Alon * When vCPU is running L2 and exception is still pending,
277fd13f23bSLiran Alon * it can potentially be intercepted by L1 hypervisor.
278fd13f23bSLiran Alon * In contrast to an injected exception which cannot be
279fd13f23bSLiran Alon * intercepted anymore.
280fd13f23bSLiran Alon *
281fd13f23bSLiran Alon * Furthermore, when a L2 exception is intercepted by L1
2827332a4a4SCameron Esfahani * hypervisor, its exception payload (CR2/DR6 on #PF/#DB)
283fd13f23bSLiran Alon * should not be set yet in the respective vCPU register.
284fd13f23bSLiran Alon * Thus, in case an exception is pending, it is
285bad5cfcdSMichael Tokarev * important to save the exception payload separately.
286fd13f23bSLiran Alon *
287fd13f23bSLiran Alon * Therefore, if an exception is not in a pending state
288fd13f23bSLiran Alon * or vCPU is not in guest-mode, it is not important to
289fd13f23bSLiran Alon * distinguish between a pending and injected exception
290bad5cfcdSMichael Tokarev * and we don't need to store separately the exception payload.
291fd13f23bSLiran Alon *
2927332a4a4SCameron Esfahani * In order to preserve better backwards-compatible migration,
293fd13f23bSLiran Alon * convert a pending exception to an injected exception in
2947332a4a4SCameron Esfahani * case it is not important to distinguish between them
295fd13f23bSLiran Alon * as described above.
296fd13f23bSLiran Alon */
297fd13f23bSLiran Alon if (env->exception_pending && !(env->hflags & HF_GUEST_MASK)) {
298fd13f23bSLiran Alon env->exception_pending = 0;
299fd13f23bSLiran Alon env->exception_injected = 1;
300fd13f23bSLiran Alon
301fd13f23bSLiran Alon if (env->exception_has_payload) {
302fd13f23bSLiran Alon if (env->exception_nr == EXCP01_DB) {
303fd13f23bSLiran Alon env->dr[6] = env->exception_payload;
304fd13f23bSLiran Alon } else if (env->exception_nr == EXCP0E_PAGE) {
305fd13f23bSLiran Alon env->cr[2] = env->exception_payload;
306fd13f23bSLiran Alon }
307fd13f23bSLiran Alon }
308fd13f23bSLiran Alon }
309fd13f23bSLiran Alon
31044b1ff31SDr. David Alan Gilbert return 0;
311fcf5ef2aSThomas Huth }
312fcf5ef2aSThomas Huth
cpu_post_load(void * opaque,int version_id)313fcf5ef2aSThomas Huth static int cpu_post_load(void *opaque, int version_id)
314fcf5ef2aSThomas Huth {
315fcf5ef2aSThomas Huth X86CPU *cpu = opaque;
316fcf5ef2aSThomas Huth CPUState *cs = CPU(cpu);
317fcf5ef2aSThomas Huth CPUX86State *env = &cpu->env;
318fcf5ef2aSThomas Huth int i;
319fcf5ef2aSThomas Huth
320fcf5ef2aSThomas Huth if (env->tsc_khz && env->user_tsc_khz &&
321fcf5ef2aSThomas Huth env->tsc_khz != env->user_tsc_khz) {
322fcf5ef2aSThomas Huth error_report("Mismatch between user-specified TSC frequency and "
323fcf5ef2aSThomas Huth "migrated TSC frequency");
324fcf5ef2aSThomas Huth return -EINVAL;
325fcf5ef2aSThomas Huth }
326fcf5ef2aSThomas Huth
32746baa900SDr. David Alan Gilbert if (env->fpregs_format_vmstate) {
32846baa900SDr. David Alan Gilbert error_report("Unsupported old non-softfloat CPU state");
32946baa900SDr. David Alan Gilbert return -EINVAL;
33046baa900SDr. David Alan Gilbert }
331fcf5ef2aSThomas Huth /*
332fcf5ef2aSThomas Huth * Real mode guest segments register DPL should be zero.
333fcf5ef2aSThomas Huth * Older KVM version were setting it wrongly.
334fcf5ef2aSThomas Huth * Fixing it will allow live migration from such host that don't have
335fcf5ef2aSThomas Huth * restricted guest support to a host with unrestricted guest support
336fcf5ef2aSThomas Huth * (otherwise the migration will fail with invalid guest state
337fcf5ef2aSThomas Huth * error).
338fcf5ef2aSThomas Huth */
339fcf5ef2aSThomas Huth if (!(env->cr[0] & CR0_PE_MASK) &&
340fcf5ef2aSThomas Huth (env->segs[R_CS].flags >> DESC_DPL_SHIFT & 3) != 0) {
341fcf5ef2aSThomas Huth env->segs[R_CS].flags &= ~(env->segs[R_CS].flags & DESC_DPL_MASK);
342fcf5ef2aSThomas Huth env->segs[R_DS].flags &= ~(env->segs[R_DS].flags & DESC_DPL_MASK);
343fcf5ef2aSThomas Huth env->segs[R_ES].flags &= ~(env->segs[R_ES].flags & DESC_DPL_MASK);
344fcf5ef2aSThomas Huth env->segs[R_FS].flags &= ~(env->segs[R_FS].flags & DESC_DPL_MASK);
345fcf5ef2aSThomas Huth env->segs[R_GS].flags &= ~(env->segs[R_GS].flags & DESC_DPL_MASK);
346fcf5ef2aSThomas Huth env->segs[R_SS].flags &= ~(env->segs[R_SS].flags & DESC_DPL_MASK);
347fcf5ef2aSThomas Huth }
348fcf5ef2aSThomas Huth
349fcf5ef2aSThomas Huth /* Older versions of QEMU incorrectly used CS.DPL as the CPL when
350fcf5ef2aSThomas Huth * running under KVM. This is wrong for conforming code segments.
351fcf5ef2aSThomas Huth * Luckily, in our implementation the CPL field of hflags is redundant
352fcf5ef2aSThomas Huth * and we can get the right value from the SS descriptor privilege level.
353fcf5ef2aSThomas Huth */
354fcf5ef2aSThomas Huth env->hflags &= ~HF_CPL_MASK;
355fcf5ef2aSThomas Huth env->hflags |= (env->segs[R_SS].flags >> DESC_DPL_SHIFT) & HF_CPL_MASK;
356fcf5ef2aSThomas Huth
357ebbfef2fSLiran Alon #ifdef CONFIG_KVM
358ebbfef2fSLiran Alon if ((env->hflags & HF_GUEST_MASK) &&
359ebbfef2fSLiran Alon (!env->nested_state ||
360ebbfef2fSLiran Alon !(env->nested_state->flags & KVM_STATE_NESTED_GUEST_MODE))) {
361ebbfef2fSLiran Alon error_report("vCPU set in guest-mode inconsistent with "
362ebbfef2fSLiran Alon "migrated kernel nested state");
363ebbfef2fSLiran Alon return -EINVAL;
364ebbfef2fSLiran Alon }
365ebbfef2fSLiran Alon #endif
366ebbfef2fSLiran Alon
367fd13f23bSLiran Alon /*
368fd13f23bSLiran Alon * There are cases that we can get valid exception_nr with both
369fd13f23bSLiran Alon * exception_pending and exception_injected being cleared.
370fd13f23bSLiran Alon * This can happen in one of the following scenarios:
371fd13f23bSLiran Alon * 1) Source is older QEMU without KVM_CAP_EXCEPTION_PAYLOAD support.
372fd13f23bSLiran Alon * 2) Source is running on kernel without KVM_CAP_EXCEPTION_PAYLOAD support.
373fd13f23bSLiran Alon * 3) "cpu/exception_info" subsection not sent because there is no exception
374fd13f23bSLiran Alon * pending or guest wasn't running L2 (See comment in cpu_pre_save()).
375fd13f23bSLiran Alon *
376fd13f23bSLiran Alon * In those cases, we can just deduce that a valid exception_nr means
377fd13f23bSLiran Alon * we can treat the exception as already injected.
378fd13f23bSLiran Alon */
379fd13f23bSLiran Alon if ((env->exception_nr != -1) &&
380fd13f23bSLiran Alon !env->exception_pending && !env->exception_injected) {
381fd13f23bSLiran Alon env->exception_injected = 1;
382fd13f23bSLiran Alon }
383fd13f23bSLiran Alon
384fcf5ef2aSThomas Huth env->fpstt = (env->fpus_vmstate >> 11) & 7;
385fcf5ef2aSThomas Huth env->fpus = env->fpus_vmstate & ~0x3800;
386fcf5ef2aSThomas Huth env->fptag_vmstate ^= 0xff;
387fcf5ef2aSThomas Huth for(i = 0; i < 8; i++) {
388fcf5ef2aSThomas Huth env->fptags[i] = (env->fptag_vmstate >> i) & 1;
389fcf5ef2aSThomas Huth }
3901d8ad165SYang Zhong if (tcg_enabled()) {
39179c664f6SYang Zhong target_ulong dr7;
392fcf5ef2aSThomas Huth update_fp_status(env);
3931d8ad165SYang Zhong update_mxcsr_status(env);
394fcf5ef2aSThomas Huth
395fcf5ef2aSThomas Huth cpu_breakpoint_remove_all(cs, BP_CPU);
396fcf5ef2aSThomas Huth cpu_watchpoint_remove_all(cs, BP_CPU);
39779c664f6SYang Zhong
398fcf5ef2aSThomas Huth /* Indicate all breakpoints disabled, as they are, then
399fcf5ef2aSThomas Huth let the helper re-enable them. */
40079c664f6SYang Zhong dr7 = env->dr[7];
401fcf5ef2aSThomas Huth env->dr[7] = dr7 & ~(DR7_GLOBAL_BP_MASK | DR7_LOCAL_BP_MASK);
402fcf5ef2aSThomas Huth cpu_x86_update_dr7(env, dr7);
403fcf5ef2aSThomas Huth }
404d10eb08fSAlex Bennée tlb_flush(cs);
405fcf5ef2aSThomas Huth return 0;
406fcf5ef2aSThomas Huth }
407fcf5ef2aSThomas Huth
async_pf_msr_needed(void * opaque)408fcf5ef2aSThomas Huth static bool async_pf_msr_needed(void *opaque)
409fcf5ef2aSThomas Huth {
410fcf5ef2aSThomas Huth X86CPU *cpu = opaque;
411fcf5ef2aSThomas Huth
412fcf5ef2aSThomas Huth return cpu->env.async_pf_en_msr != 0;
413fcf5ef2aSThomas Huth }
414fcf5ef2aSThomas Huth
async_pf_int_msr_needed(void * opaque)415db5daafaSVitaly Kuznetsov static bool async_pf_int_msr_needed(void *opaque)
416db5daafaSVitaly Kuznetsov {
417db5daafaSVitaly Kuznetsov X86CPU *cpu = opaque;
418db5daafaSVitaly Kuznetsov
419db5daafaSVitaly Kuznetsov return cpu->env.async_pf_int_msr != 0;
420db5daafaSVitaly Kuznetsov }
421db5daafaSVitaly Kuznetsov
pv_eoi_msr_needed(void * opaque)422fcf5ef2aSThomas Huth static bool pv_eoi_msr_needed(void *opaque)
423fcf5ef2aSThomas Huth {
424fcf5ef2aSThomas Huth X86CPU *cpu = opaque;
425fcf5ef2aSThomas Huth
426fcf5ef2aSThomas Huth return cpu->env.pv_eoi_en_msr != 0;
427fcf5ef2aSThomas Huth }
428fcf5ef2aSThomas Huth
steal_time_msr_needed(void * opaque)429fcf5ef2aSThomas Huth static bool steal_time_msr_needed(void *opaque)
430fcf5ef2aSThomas Huth {
431fcf5ef2aSThomas Huth X86CPU *cpu = opaque;
432fcf5ef2aSThomas Huth
433fcf5ef2aSThomas Huth return cpu->env.steal_time_msr != 0;
434fcf5ef2aSThomas Huth }
435fcf5ef2aSThomas Huth
exception_info_needed(void * opaque)436fd13f23bSLiran Alon static bool exception_info_needed(void *opaque)
437fd13f23bSLiran Alon {
438fd13f23bSLiran Alon X86CPU *cpu = opaque;
439fd13f23bSLiran Alon CPUX86State *env = &cpu->env;
440fd13f23bSLiran Alon
441fd13f23bSLiran Alon /*
442fd13f23bSLiran Alon * It is important to save exception-info only in case
4437332a4a4SCameron Esfahani * we need to distinguish between a pending and injected
444fd13f23bSLiran Alon * exception. Which is only required in case there is a
445fd13f23bSLiran Alon * pending exception and vCPU is running L2.
446fd13f23bSLiran Alon * For more info, refer to comment in cpu_pre_save().
447fd13f23bSLiran Alon */
448fd13f23bSLiran Alon return env->exception_pending && (env->hflags & HF_GUEST_MASK);
449fd13f23bSLiran Alon }
450fd13f23bSLiran Alon
451fd13f23bSLiran Alon static const VMStateDescription vmstate_exception_info = {
452fd13f23bSLiran Alon .name = "cpu/exception_info",
453fd13f23bSLiran Alon .version_id = 1,
454fd13f23bSLiran Alon .minimum_version_id = 1,
455fd13f23bSLiran Alon .needed = exception_info_needed,
456c4f54bd6SRichard Henderson .fields = (const VMStateField[]) {
457fd13f23bSLiran Alon VMSTATE_UINT8(env.exception_pending, X86CPU),
458fd13f23bSLiran Alon VMSTATE_UINT8(env.exception_injected, X86CPU),
459fd13f23bSLiran Alon VMSTATE_UINT8(env.exception_has_payload, X86CPU),
460fd13f23bSLiran Alon VMSTATE_UINT64(env.exception_payload, X86CPU),
461fd13f23bSLiran Alon VMSTATE_END_OF_LIST()
462fd13f23bSLiran Alon }
463fd13f23bSLiran Alon };
464fd13f23bSLiran Alon
465d645e132SMarcelo Tosatti /* Poll control MSR enabled by default */
poll_control_msr_needed(void * opaque)466d645e132SMarcelo Tosatti static bool poll_control_msr_needed(void *opaque)
467d645e132SMarcelo Tosatti {
468d645e132SMarcelo Tosatti X86CPU *cpu = opaque;
469d645e132SMarcelo Tosatti
470d645e132SMarcelo Tosatti return cpu->env.poll_control_msr != 1;
471d645e132SMarcelo Tosatti }
472d645e132SMarcelo Tosatti
473fcf5ef2aSThomas Huth static const VMStateDescription vmstate_steal_time_msr = {
474fcf5ef2aSThomas Huth .name = "cpu/steal_time_msr",
475fcf5ef2aSThomas Huth .version_id = 1,
476fcf5ef2aSThomas Huth .minimum_version_id = 1,
477fcf5ef2aSThomas Huth .needed = steal_time_msr_needed,
478c4f54bd6SRichard Henderson .fields = (const VMStateField[]) {
479fcf5ef2aSThomas Huth VMSTATE_UINT64(env.steal_time_msr, X86CPU),
480fcf5ef2aSThomas Huth VMSTATE_END_OF_LIST()
481fcf5ef2aSThomas Huth }
482fcf5ef2aSThomas Huth };
483fcf5ef2aSThomas Huth
484fcf5ef2aSThomas Huth static const VMStateDescription vmstate_async_pf_msr = {
485fcf5ef2aSThomas Huth .name = "cpu/async_pf_msr",
486fcf5ef2aSThomas Huth .version_id = 1,
487fcf5ef2aSThomas Huth .minimum_version_id = 1,
488fcf5ef2aSThomas Huth .needed = async_pf_msr_needed,
489c4f54bd6SRichard Henderson .fields = (const VMStateField[]) {
490fcf5ef2aSThomas Huth VMSTATE_UINT64(env.async_pf_en_msr, X86CPU),
491fcf5ef2aSThomas Huth VMSTATE_END_OF_LIST()
492fcf5ef2aSThomas Huth }
493fcf5ef2aSThomas Huth };
494fcf5ef2aSThomas Huth
495db5daafaSVitaly Kuznetsov static const VMStateDescription vmstate_async_pf_int_msr = {
496db5daafaSVitaly Kuznetsov .name = "cpu/async_pf_int_msr",
497db5daafaSVitaly Kuznetsov .version_id = 1,
498db5daafaSVitaly Kuznetsov .minimum_version_id = 1,
499db5daafaSVitaly Kuznetsov .needed = async_pf_int_msr_needed,
500c4f54bd6SRichard Henderson .fields = (const VMStateField[]) {
501db5daafaSVitaly Kuznetsov VMSTATE_UINT64(env.async_pf_int_msr, X86CPU),
502db5daafaSVitaly Kuznetsov VMSTATE_END_OF_LIST()
503db5daafaSVitaly Kuznetsov }
504db5daafaSVitaly Kuznetsov };
505db5daafaSVitaly Kuznetsov
506fcf5ef2aSThomas Huth static const VMStateDescription vmstate_pv_eoi_msr = {
507fcf5ef2aSThomas Huth .name = "cpu/async_pv_eoi_msr",
508fcf5ef2aSThomas Huth .version_id = 1,
509fcf5ef2aSThomas Huth .minimum_version_id = 1,
510fcf5ef2aSThomas Huth .needed = pv_eoi_msr_needed,
511c4f54bd6SRichard Henderson .fields = (const VMStateField[]) {
512fcf5ef2aSThomas Huth VMSTATE_UINT64(env.pv_eoi_en_msr, X86CPU),
513fcf5ef2aSThomas Huth VMSTATE_END_OF_LIST()
514fcf5ef2aSThomas Huth }
515fcf5ef2aSThomas Huth };
516fcf5ef2aSThomas Huth
517d645e132SMarcelo Tosatti static const VMStateDescription vmstate_poll_control_msr = {
518d645e132SMarcelo Tosatti .name = "cpu/poll_control_msr",
519d645e132SMarcelo Tosatti .version_id = 1,
520d645e132SMarcelo Tosatti .minimum_version_id = 1,
521d645e132SMarcelo Tosatti .needed = poll_control_msr_needed,
522c4f54bd6SRichard Henderson .fields = (const VMStateField[]) {
523d645e132SMarcelo Tosatti VMSTATE_UINT64(env.poll_control_msr, X86CPU),
524d645e132SMarcelo Tosatti VMSTATE_END_OF_LIST()
525d645e132SMarcelo Tosatti }
526d645e132SMarcelo Tosatti };
527d645e132SMarcelo Tosatti
fpop_ip_dp_needed(void * opaque)528fcf5ef2aSThomas Huth static bool fpop_ip_dp_needed(void *opaque)
529fcf5ef2aSThomas Huth {
530fcf5ef2aSThomas Huth X86CPU *cpu = opaque;
531fcf5ef2aSThomas Huth CPUX86State *env = &cpu->env;
532fcf5ef2aSThomas Huth
533fcf5ef2aSThomas Huth return env->fpop != 0 || env->fpip != 0 || env->fpdp != 0;
534fcf5ef2aSThomas Huth }
535fcf5ef2aSThomas Huth
536fcf5ef2aSThomas Huth static const VMStateDescription vmstate_fpop_ip_dp = {
537fcf5ef2aSThomas Huth .name = "cpu/fpop_ip_dp",
538fcf5ef2aSThomas Huth .version_id = 1,
539fcf5ef2aSThomas Huth .minimum_version_id = 1,
540fcf5ef2aSThomas Huth .needed = fpop_ip_dp_needed,
541c4f54bd6SRichard Henderson .fields = (const VMStateField[]) {
542fcf5ef2aSThomas Huth VMSTATE_UINT16(env.fpop, X86CPU),
543fcf5ef2aSThomas Huth VMSTATE_UINT64(env.fpip, X86CPU),
544fcf5ef2aSThomas Huth VMSTATE_UINT64(env.fpdp, X86CPU),
545fcf5ef2aSThomas Huth VMSTATE_END_OF_LIST()
546fcf5ef2aSThomas Huth }
547fcf5ef2aSThomas Huth };
548fcf5ef2aSThomas Huth
tsc_adjust_needed(void * opaque)549fcf5ef2aSThomas Huth static bool tsc_adjust_needed(void *opaque)
550fcf5ef2aSThomas Huth {
551fcf5ef2aSThomas Huth X86CPU *cpu = opaque;
552fcf5ef2aSThomas Huth CPUX86State *env = &cpu->env;
553fcf5ef2aSThomas Huth
554fcf5ef2aSThomas Huth return env->tsc_adjust != 0;
555fcf5ef2aSThomas Huth }
556fcf5ef2aSThomas Huth
557fcf5ef2aSThomas Huth static const VMStateDescription vmstate_msr_tsc_adjust = {
558fcf5ef2aSThomas Huth .name = "cpu/msr_tsc_adjust",
559fcf5ef2aSThomas Huth .version_id = 1,
560fcf5ef2aSThomas Huth .minimum_version_id = 1,
561fcf5ef2aSThomas Huth .needed = tsc_adjust_needed,
562c4f54bd6SRichard Henderson .fields = (const VMStateField[]) {
563fcf5ef2aSThomas Huth VMSTATE_UINT64(env.tsc_adjust, X86CPU),
564fcf5ef2aSThomas Huth VMSTATE_END_OF_LIST()
565fcf5ef2aSThomas Huth }
566fcf5ef2aSThomas Huth };
567fcf5ef2aSThomas Huth
msr_smi_count_needed(void * opaque)568e13713dbSLiran Alon static bool msr_smi_count_needed(void *opaque)
569e13713dbSLiran Alon {
570e13713dbSLiran Alon X86CPU *cpu = opaque;
571e13713dbSLiran Alon CPUX86State *env = &cpu->env;
572e13713dbSLiran Alon
573990e0be2SPaolo Bonzini return cpu->migrate_smi_count && env->msr_smi_count != 0;
574e13713dbSLiran Alon }
575e13713dbSLiran Alon
576e13713dbSLiran Alon static const VMStateDescription vmstate_msr_smi_count = {
577e13713dbSLiran Alon .name = "cpu/msr_smi_count",
578e13713dbSLiran Alon .version_id = 1,
579e13713dbSLiran Alon .minimum_version_id = 1,
580e13713dbSLiran Alon .needed = msr_smi_count_needed,
581c4f54bd6SRichard Henderson .fields = (const VMStateField[]) {
582e13713dbSLiran Alon VMSTATE_UINT64(env.msr_smi_count, X86CPU),
583e13713dbSLiran Alon VMSTATE_END_OF_LIST()
584e13713dbSLiran Alon }
585e13713dbSLiran Alon };
586e13713dbSLiran Alon
tscdeadline_needed(void * opaque)587fcf5ef2aSThomas Huth static bool tscdeadline_needed(void *opaque)
588fcf5ef2aSThomas Huth {
589fcf5ef2aSThomas Huth X86CPU *cpu = opaque;
590fcf5ef2aSThomas Huth CPUX86State *env = &cpu->env;
591fcf5ef2aSThomas Huth
592fcf5ef2aSThomas Huth return env->tsc_deadline != 0;
593fcf5ef2aSThomas Huth }
594fcf5ef2aSThomas Huth
595fcf5ef2aSThomas Huth static const VMStateDescription vmstate_msr_tscdeadline = {
596fcf5ef2aSThomas Huth .name = "cpu/msr_tscdeadline",
597fcf5ef2aSThomas Huth .version_id = 1,
598fcf5ef2aSThomas Huth .minimum_version_id = 1,
599fcf5ef2aSThomas Huth .needed = tscdeadline_needed,
600c4f54bd6SRichard Henderson .fields = (const VMStateField[]) {
601fcf5ef2aSThomas Huth VMSTATE_UINT64(env.tsc_deadline, X86CPU),
602fcf5ef2aSThomas Huth VMSTATE_END_OF_LIST()
603fcf5ef2aSThomas Huth }
604fcf5ef2aSThomas Huth };
605fcf5ef2aSThomas Huth
misc_enable_needed(void * opaque)606fcf5ef2aSThomas Huth static bool misc_enable_needed(void *opaque)
607fcf5ef2aSThomas Huth {
608fcf5ef2aSThomas Huth X86CPU *cpu = opaque;
609fcf5ef2aSThomas Huth CPUX86State *env = &cpu->env;
610fcf5ef2aSThomas Huth
611fcf5ef2aSThomas Huth return env->msr_ia32_misc_enable != MSR_IA32_MISC_ENABLE_DEFAULT;
612fcf5ef2aSThomas Huth }
613fcf5ef2aSThomas Huth
feature_control_needed(void * opaque)614fcf5ef2aSThomas Huth static bool feature_control_needed(void *opaque)
615fcf5ef2aSThomas Huth {
616fcf5ef2aSThomas Huth X86CPU *cpu = opaque;
617fcf5ef2aSThomas Huth CPUX86State *env = &cpu->env;
618fcf5ef2aSThomas Huth
619fcf5ef2aSThomas Huth return env->msr_ia32_feature_control != 0;
620fcf5ef2aSThomas Huth }
621fcf5ef2aSThomas Huth
622fcf5ef2aSThomas Huth static const VMStateDescription vmstate_msr_ia32_misc_enable = {
623fcf5ef2aSThomas Huth .name = "cpu/msr_ia32_misc_enable",
624fcf5ef2aSThomas Huth .version_id = 1,
625fcf5ef2aSThomas Huth .minimum_version_id = 1,
626fcf5ef2aSThomas Huth .needed = misc_enable_needed,
627c4f54bd6SRichard Henderson .fields = (const VMStateField[]) {
628fcf5ef2aSThomas Huth VMSTATE_UINT64(env.msr_ia32_misc_enable, X86CPU),
629fcf5ef2aSThomas Huth VMSTATE_END_OF_LIST()
630fcf5ef2aSThomas Huth }
631fcf5ef2aSThomas Huth };
632fcf5ef2aSThomas Huth
633fcf5ef2aSThomas Huth static const VMStateDescription vmstate_msr_ia32_feature_control = {
634fcf5ef2aSThomas Huth .name = "cpu/msr_ia32_feature_control",
635fcf5ef2aSThomas Huth .version_id = 1,
636fcf5ef2aSThomas Huth .minimum_version_id = 1,
637fcf5ef2aSThomas Huth .needed = feature_control_needed,
638c4f54bd6SRichard Henderson .fields = (const VMStateField[]) {
639fcf5ef2aSThomas Huth VMSTATE_UINT64(env.msr_ia32_feature_control, X86CPU),
640fcf5ef2aSThomas Huth VMSTATE_END_OF_LIST()
641fcf5ef2aSThomas Huth }
642fcf5ef2aSThomas Huth };
643fcf5ef2aSThomas Huth
pmu_enable_needed(void * opaque)644fcf5ef2aSThomas Huth static bool pmu_enable_needed(void *opaque)
645fcf5ef2aSThomas Huth {
646fcf5ef2aSThomas Huth X86CPU *cpu = opaque;
647fcf5ef2aSThomas Huth CPUX86State *env = &cpu->env;
648fcf5ef2aSThomas Huth int i;
649fcf5ef2aSThomas Huth
650fcf5ef2aSThomas Huth if (env->msr_fixed_ctr_ctrl || env->msr_global_ctrl ||
651fcf5ef2aSThomas Huth env->msr_global_status || env->msr_global_ovf_ctrl) {
652fcf5ef2aSThomas Huth return true;
653fcf5ef2aSThomas Huth }
654fcf5ef2aSThomas Huth for (i = 0; i < MAX_FIXED_COUNTERS; i++) {
655fcf5ef2aSThomas Huth if (env->msr_fixed_counters[i]) {
656fcf5ef2aSThomas Huth return true;
657fcf5ef2aSThomas Huth }
658fcf5ef2aSThomas Huth }
659fcf5ef2aSThomas Huth for (i = 0; i < MAX_GP_COUNTERS; i++) {
660fcf5ef2aSThomas Huth if (env->msr_gp_counters[i] || env->msr_gp_evtsel[i]) {
661fcf5ef2aSThomas Huth return true;
662fcf5ef2aSThomas Huth }
663fcf5ef2aSThomas Huth }
664fcf5ef2aSThomas Huth
665fcf5ef2aSThomas Huth return false;
666fcf5ef2aSThomas Huth }
667fcf5ef2aSThomas Huth
668fcf5ef2aSThomas Huth static const VMStateDescription vmstate_msr_architectural_pmu = {
669fcf5ef2aSThomas Huth .name = "cpu/msr_architectural_pmu",
670fcf5ef2aSThomas Huth .version_id = 1,
671fcf5ef2aSThomas Huth .minimum_version_id = 1,
672fcf5ef2aSThomas Huth .needed = pmu_enable_needed,
673c4f54bd6SRichard Henderson .fields = (const VMStateField[]) {
674fcf5ef2aSThomas Huth VMSTATE_UINT64(env.msr_fixed_ctr_ctrl, X86CPU),
675fcf5ef2aSThomas Huth VMSTATE_UINT64(env.msr_global_ctrl, X86CPU),
676fcf5ef2aSThomas Huth VMSTATE_UINT64(env.msr_global_status, X86CPU),
677fcf5ef2aSThomas Huth VMSTATE_UINT64(env.msr_global_ovf_ctrl, X86CPU),
678fcf5ef2aSThomas Huth VMSTATE_UINT64_ARRAY(env.msr_fixed_counters, X86CPU, MAX_FIXED_COUNTERS),
679fcf5ef2aSThomas Huth VMSTATE_UINT64_ARRAY(env.msr_gp_counters, X86CPU, MAX_GP_COUNTERS),
680fcf5ef2aSThomas Huth VMSTATE_UINT64_ARRAY(env.msr_gp_evtsel, X86CPU, MAX_GP_COUNTERS),
681fcf5ef2aSThomas Huth VMSTATE_END_OF_LIST()
682fcf5ef2aSThomas Huth }
683fcf5ef2aSThomas Huth };
684fcf5ef2aSThomas Huth
mpx_needed(void * opaque)685fcf5ef2aSThomas Huth static bool mpx_needed(void *opaque)
686fcf5ef2aSThomas Huth {
687fcf5ef2aSThomas Huth X86CPU *cpu = opaque;
688fcf5ef2aSThomas Huth CPUX86State *env = &cpu->env;
689fcf5ef2aSThomas Huth unsigned int i;
690fcf5ef2aSThomas Huth
691fcf5ef2aSThomas Huth for (i = 0; i < 4; i++) {
692fcf5ef2aSThomas Huth if (env->bnd_regs[i].lb || env->bnd_regs[i].ub) {
693fcf5ef2aSThomas Huth return true;
694fcf5ef2aSThomas Huth }
695fcf5ef2aSThomas Huth }
696fcf5ef2aSThomas Huth
697fcf5ef2aSThomas Huth if (env->bndcs_regs.cfgu || env->bndcs_regs.sts) {
698fcf5ef2aSThomas Huth return true;
699fcf5ef2aSThomas Huth }
700fcf5ef2aSThomas Huth
701fcf5ef2aSThomas Huth return !!env->msr_bndcfgs;
702fcf5ef2aSThomas Huth }
703fcf5ef2aSThomas Huth
704fcf5ef2aSThomas Huth static const VMStateDescription vmstate_mpx = {
705fcf5ef2aSThomas Huth .name = "cpu/mpx",
706fcf5ef2aSThomas Huth .version_id = 1,
707fcf5ef2aSThomas Huth .minimum_version_id = 1,
708fcf5ef2aSThomas Huth .needed = mpx_needed,
709c4f54bd6SRichard Henderson .fields = (const VMStateField[]) {
710fcf5ef2aSThomas Huth VMSTATE_BND_REGS(env.bnd_regs, X86CPU, 4),
711fcf5ef2aSThomas Huth VMSTATE_UINT64(env.bndcs_regs.cfgu, X86CPU),
712fcf5ef2aSThomas Huth VMSTATE_UINT64(env.bndcs_regs.sts, X86CPU),
713fcf5ef2aSThomas Huth VMSTATE_UINT64(env.msr_bndcfgs, X86CPU),
714fcf5ef2aSThomas Huth VMSTATE_END_OF_LIST()
715fcf5ef2aSThomas Huth }
716fcf5ef2aSThomas Huth };
717fcf5ef2aSThomas Huth
hyperv_hypercall_enable_needed(void * opaque)718fcf5ef2aSThomas Huth static bool hyperv_hypercall_enable_needed(void *opaque)
719fcf5ef2aSThomas Huth {
720fcf5ef2aSThomas Huth X86CPU *cpu = opaque;
721fcf5ef2aSThomas Huth CPUX86State *env = &cpu->env;
722fcf5ef2aSThomas Huth
723fcf5ef2aSThomas Huth return env->msr_hv_hypercall != 0 || env->msr_hv_guest_os_id != 0;
724fcf5ef2aSThomas Huth }
725fcf5ef2aSThomas Huth
726816d20c9SVitaly Kuznetsov static const VMStateDescription vmstate_msr_hyperv_hypercall = {
727fcf5ef2aSThomas Huth .name = "cpu/msr_hyperv_hypercall",
728fcf5ef2aSThomas Huth .version_id = 1,
729fcf5ef2aSThomas Huth .minimum_version_id = 1,
730fcf5ef2aSThomas Huth .needed = hyperv_hypercall_enable_needed,
731c4f54bd6SRichard Henderson .fields = (const VMStateField[]) {
732fcf5ef2aSThomas Huth VMSTATE_UINT64(env.msr_hv_guest_os_id, X86CPU),
733fcf5ef2aSThomas Huth VMSTATE_UINT64(env.msr_hv_hypercall, X86CPU),
734fcf5ef2aSThomas Huth VMSTATE_END_OF_LIST()
735fcf5ef2aSThomas Huth }
736fcf5ef2aSThomas Huth };
737fcf5ef2aSThomas Huth
hyperv_vapic_enable_needed(void * opaque)738fcf5ef2aSThomas Huth static bool hyperv_vapic_enable_needed(void *opaque)
739fcf5ef2aSThomas Huth {
740fcf5ef2aSThomas Huth X86CPU *cpu = opaque;
741fcf5ef2aSThomas Huth CPUX86State *env = &cpu->env;
742fcf5ef2aSThomas Huth
743fcf5ef2aSThomas Huth return env->msr_hv_vapic != 0;
744fcf5ef2aSThomas Huth }
745fcf5ef2aSThomas Huth
746fcf5ef2aSThomas Huth static const VMStateDescription vmstate_msr_hyperv_vapic = {
747fcf5ef2aSThomas Huth .name = "cpu/msr_hyperv_vapic",
748fcf5ef2aSThomas Huth .version_id = 1,
749fcf5ef2aSThomas Huth .minimum_version_id = 1,
750fcf5ef2aSThomas Huth .needed = hyperv_vapic_enable_needed,
751c4f54bd6SRichard Henderson .fields = (const VMStateField[]) {
752fcf5ef2aSThomas Huth VMSTATE_UINT64(env.msr_hv_vapic, X86CPU),
753fcf5ef2aSThomas Huth VMSTATE_END_OF_LIST()
754fcf5ef2aSThomas Huth }
755fcf5ef2aSThomas Huth };
756fcf5ef2aSThomas Huth
hyperv_time_enable_needed(void * opaque)757fcf5ef2aSThomas Huth static bool hyperv_time_enable_needed(void *opaque)
758fcf5ef2aSThomas Huth {
759fcf5ef2aSThomas Huth X86CPU *cpu = opaque;
760fcf5ef2aSThomas Huth CPUX86State *env = &cpu->env;
761fcf5ef2aSThomas Huth
762fcf5ef2aSThomas Huth return env->msr_hv_tsc != 0;
763fcf5ef2aSThomas Huth }
764fcf5ef2aSThomas Huth
765fcf5ef2aSThomas Huth static const VMStateDescription vmstate_msr_hyperv_time = {
766fcf5ef2aSThomas Huth .name = "cpu/msr_hyperv_time",
767fcf5ef2aSThomas Huth .version_id = 1,
768fcf5ef2aSThomas Huth .minimum_version_id = 1,
769fcf5ef2aSThomas Huth .needed = hyperv_time_enable_needed,
770c4f54bd6SRichard Henderson .fields = (const VMStateField[]) {
771fcf5ef2aSThomas Huth VMSTATE_UINT64(env.msr_hv_tsc, X86CPU),
772fcf5ef2aSThomas Huth VMSTATE_END_OF_LIST()
773fcf5ef2aSThomas Huth }
774fcf5ef2aSThomas Huth };
775fcf5ef2aSThomas Huth
hyperv_crash_enable_needed(void * opaque)776fcf5ef2aSThomas Huth static bool hyperv_crash_enable_needed(void *opaque)
777fcf5ef2aSThomas Huth {
778fcf5ef2aSThomas Huth X86CPU *cpu = opaque;
779fcf5ef2aSThomas Huth CPUX86State *env = &cpu->env;
780fcf5ef2aSThomas Huth int i;
781fcf5ef2aSThomas Huth
7825e953812SRoman Kagan for (i = 0; i < HV_CRASH_PARAMS; i++) {
783fcf5ef2aSThomas Huth if (env->msr_hv_crash_params[i]) {
784fcf5ef2aSThomas Huth return true;
785fcf5ef2aSThomas Huth }
786fcf5ef2aSThomas Huth }
787fcf5ef2aSThomas Huth return false;
788fcf5ef2aSThomas Huth }
789fcf5ef2aSThomas Huth
790fcf5ef2aSThomas Huth static const VMStateDescription vmstate_msr_hyperv_crash = {
791fcf5ef2aSThomas Huth .name = "cpu/msr_hyperv_crash",
792fcf5ef2aSThomas Huth .version_id = 1,
793fcf5ef2aSThomas Huth .minimum_version_id = 1,
794fcf5ef2aSThomas Huth .needed = hyperv_crash_enable_needed,
795c4f54bd6SRichard Henderson .fields = (const VMStateField[]) {
7965e953812SRoman Kagan VMSTATE_UINT64_ARRAY(env.msr_hv_crash_params, X86CPU, HV_CRASH_PARAMS),
797fcf5ef2aSThomas Huth VMSTATE_END_OF_LIST()
798fcf5ef2aSThomas Huth }
799fcf5ef2aSThomas Huth };
800fcf5ef2aSThomas Huth
hyperv_runtime_enable_needed(void * opaque)801fcf5ef2aSThomas Huth static bool hyperv_runtime_enable_needed(void *opaque)
802fcf5ef2aSThomas Huth {
803fcf5ef2aSThomas Huth X86CPU *cpu = opaque;
804fcf5ef2aSThomas Huth CPUX86State *env = &cpu->env;
805fcf5ef2aSThomas Huth
8062d384d7cSVitaly Kuznetsov if (!hyperv_feat_enabled(cpu, HYPERV_FEAT_RUNTIME)) {
807fcf5ef2aSThomas Huth return false;
808fcf5ef2aSThomas Huth }
809fcf5ef2aSThomas Huth
810fcf5ef2aSThomas Huth return env->msr_hv_runtime != 0;
811fcf5ef2aSThomas Huth }
812fcf5ef2aSThomas Huth
813fcf5ef2aSThomas Huth static const VMStateDescription vmstate_msr_hyperv_runtime = {
814fcf5ef2aSThomas Huth .name = "cpu/msr_hyperv_runtime",
815fcf5ef2aSThomas Huth .version_id = 1,
816fcf5ef2aSThomas Huth .minimum_version_id = 1,
817fcf5ef2aSThomas Huth .needed = hyperv_runtime_enable_needed,
818c4f54bd6SRichard Henderson .fields = (const VMStateField[]) {
819fcf5ef2aSThomas Huth VMSTATE_UINT64(env.msr_hv_runtime, X86CPU),
820fcf5ef2aSThomas Huth VMSTATE_END_OF_LIST()
821fcf5ef2aSThomas Huth }
822fcf5ef2aSThomas Huth };
823fcf5ef2aSThomas Huth
hyperv_synic_enable_needed(void * opaque)824fcf5ef2aSThomas Huth static bool hyperv_synic_enable_needed(void *opaque)
825fcf5ef2aSThomas Huth {
826fcf5ef2aSThomas Huth X86CPU *cpu = opaque;
827fcf5ef2aSThomas Huth CPUX86State *env = &cpu->env;
828fcf5ef2aSThomas Huth int i;
829fcf5ef2aSThomas Huth
830fcf5ef2aSThomas Huth if (env->msr_hv_synic_control != 0 ||
831fcf5ef2aSThomas Huth env->msr_hv_synic_evt_page != 0 ||
832fcf5ef2aSThomas Huth env->msr_hv_synic_msg_page != 0) {
833fcf5ef2aSThomas Huth return true;
834fcf5ef2aSThomas Huth }
835fcf5ef2aSThomas Huth
836fcf5ef2aSThomas Huth for (i = 0; i < ARRAY_SIZE(env->msr_hv_synic_sint); i++) {
837fcf5ef2aSThomas Huth if (env->msr_hv_synic_sint[i] != 0) {
838fcf5ef2aSThomas Huth return true;
839fcf5ef2aSThomas Huth }
840fcf5ef2aSThomas Huth }
841fcf5ef2aSThomas Huth
842fcf5ef2aSThomas Huth return false;
843fcf5ef2aSThomas Huth }
844fcf5ef2aSThomas Huth
hyperv_synic_post_load(void * opaque,int version_id)845606c34bfSRoman Kagan static int hyperv_synic_post_load(void *opaque, int version_id)
846606c34bfSRoman Kagan {
847606c34bfSRoman Kagan X86CPU *cpu = opaque;
848606c34bfSRoman Kagan hyperv_x86_synic_update(cpu);
849606c34bfSRoman Kagan return 0;
850606c34bfSRoman Kagan }
851606c34bfSRoman Kagan
852fcf5ef2aSThomas Huth static const VMStateDescription vmstate_msr_hyperv_synic = {
853fcf5ef2aSThomas Huth .name = "cpu/msr_hyperv_synic",
854fcf5ef2aSThomas Huth .version_id = 1,
855fcf5ef2aSThomas Huth .minimum_version_id = 1,
856fcf5ef2aSThomas Huth .needed = hyperv_synic_enable_needed,
857606c34bfSRoman Kagan .post_load = hyperv_synic_post_load,
858c4f54bd6SRichard Henderson .fields = (const VMStateField[]) {
859fcf5ef2aSThomas Huth VMSTATE_UINT64(env.msr_hv_synic_control, X86CPU),
860fcf5ef2aSThomas Huth VMSTATE_UINT64(env.msr_hv_synic_evt_page, X86CPU),
861fcf5ef2aSThomas Huth VMSTATE_UINT64(env.msr_hv_synic_msg_page, X86CPU),
8625e953812SRoman Kagan VMSTATE_UINT64_ARRAY(env.msr_hv_synic_sint, X86CPU, HV_SINT_COUNT),
863fcf5ef2aSThomas Huth VMSTATE_END_OF_LIST()
864fcf5ef2aSThomas Huth }
865fcf5ef2aSThomas Huth };
866fcf5ef2aSThomas Huth
hyperv_stimer_enable_needed(void * opaque)867fcf5ef2aSThomas Huth static bool hyperv_stimer_enable_needed(void *opaque)
868fcf5ef2aSThomas Huth {
869fcf5ef2aSThomas Huth X86CPU *cpu = opaque;
870fcf5ef2aSThomas Huth CPUX86State *env = &cpu->env;
871fcf5ef2aSThomas Huth int i;
872fcf5ef2aSThomas Huth
873fcf5ef2aSThomas Huth for (i = 0; i < ARRAY_SIZE(env->msr_hv_stimer_config); i++) {
874fcf5ef2aSThomas Huth if (env->msr_hv_stimer_config[i] || env->msr_hv_stimer_count[i]) {
875fcf5ef2aSThomas Huth return true;
876fcf5ef2aSThomas Huth }
877fcf5ef2aSThomas Huth }
878fcf5ef2aSThomas Huth return false;
879fcf5ef2aSThomas Huth }
880fcf5ef2aSThomas Huth
881fcf5ef2aSThomas Huth static const VMStateDescription vmstate_msr_hyperv_stimer = {
882fcf5ef2aSThomas Huth .name = "cpu/msr_hyperv_stimer",
883fcf5ef2aSThomas Huth .version_id = 1,
884fcf5ef2aSThomas Huth .minimum_version_id = 1,
885fcf5ef2aSThomas Huth .needed = hyperv_stimer_enable_needed,
886c4f54bd6SRichard Henderson .fields = (const VMStateField[]) {
8875e953812SRoman Kagan VMSTATE_UINT64_ARRAY(env.msr_hv_stimer_config, X86CPU,
8885e953812SRoman Kagan HV_STIMER_COUNT),
8895e953812SRoman Kagan VMSTATE_UINT64_ARRAY(env.msr_hv_stimer_count, X86CPU, HV_STIMER_COUNT),
890fcf5ef2aSThomas Huth VMSTATE_END_OF_LIST()
891fcf5ef2aSThomas Huth }
892fcf5ef2aSThomas Huth };
893fcf5ef2aSThomas Huth
hyperv_reenlightenment_enable_needed(void * opaque)894ba6a4fd9SVitaly Kuznetsov static bool hyperv_reenlightenment_enable_needed(void *opaque)
895ba6a4fd9SVitaly Kuznetsov {
896ba6a4fd9SVitaly Kuznetsov X86CPU *cpu = opaque;
897ba6a4fd9SVitaly Kuznetsov CPUX86State *env = &cpu->env;
898ba6a4fd9SVitaly Kuznetsov
899ba6a4fd9SVitaly Kuznetsov return env->msr_hv_reenlightenment_control != 0 ||
900ba6a4fd9SVitaly Kuznetsov env->msr_hv_tsc_emulation_control != 0 ||
901ba6a4fd9SVitaly Kuznetsov env->msr_hv_tsc_emulation_status != 0;
902ba6a4fd9SVitaly Kuznetsov }
903ba6a4fd9SVitaly Kuznetsov
hyperv_reenlightenment_post_load(void * opaque,int version_id)904561dbb41SVitaly Kuznetsov static int hyperv_reenlightenment_post_load(void *opaque, int version_id)
905561dbb41SVitaly Kuznetsov {
906561dbb41SVitaly Kuznetsov X86CPU *cpu = opaque;
907561dbb41SVitaly Kuznetsov CPUX86State *env = &cpu->env;
908561dbb41SVitaly Kuznetsov
909561dbb41SVitaly Kuznetsov /*
910561dbb41SVitaly Kuznetsov * KVM doesn't fully support re-enlightenment notifications so we need to
911561dbb41SVitaly Kuznetsov * make sure TSC frequency doesn't change upon migration.
912561dbb41SVitaly Kuznetsov */
913561dbb41SVitaly Kuznetsov if ((env->msr_hv_reenlightenment_control & HV_REENLIGHTENMENT_ENABLE_BIT) &&
914561dbb41SVitaly Kuznetsov !env->user_tsc_khz) {
915561dbb41SVitaly Kuznetsov error_report("Guest enabled re-enlightenment notifications, "
916561dbb41SVitaly Kuznetsov "'tsc-frequency=' has to be specified");
917561dbb41SVitaly Kuznetsov return -EINVAL;
918561dbb41SVitaly Kuznetsov }
919561dbb41SVitaly Kuznetsov
920561dbb41SVitaly Kuznetsov return 0;
921561dbb41SVitaly Kuznetsov }
922561dbb41SVitaly Kuznetsov
923ba6a4fd9SVitaly Kuznetsov static const VMStateDescription vmstate_msr_hyperv_reenlightenment = {
924ba6a4fd9SVitaly Kuznetsov .name = "cpu/msr_hyperv_reenlightenment",
925ba6a4fd9SVitaly Kuznetsov .version_id = 1,
926ba6a4fd9SVitaly Kuznetsov .minimum_version_id = 1,
927ba6a4fd9SVitaly Kuznetsov .needed = hyperv_reenlightenment_enable_needed,
928561dbb41SVitaly Kuznetsov .post_load = hyperv_reenlightenment_post_load,
929c4f54bd6SRichard Henderson .fields = (const VMStateField[]) {
930ba6a4fd9SVitaly Kuznetsov VMSTATE_UINT64(env.msr_hv_reenlightenment_control, X86CPU),
931ba6a4fd9SVitaly Kuznetsov VMSTATE_UINT64(env.msr_hv_tsc_emulation_control, X86CPU),
932ba6a4fd9SVitaly Kuznetsov VMSTATE_UINT64(env.msr_hv_tsc_emulation_status, X86CPU),
933ba6a4fd9SVitaly Kuznetsov VMSTATE_END_OF_LIST()
934ba6a4fd9SVitaly Kuznetsov }
935ba6a4fd9SVitaly Kuznetsov };
936ba6a4fd9SVitaly Kuznetsov
avx512_needed(void * opaque)937fcf5ef2aSThomas Huth static bool avx512_needed(void *opaque)
938fcf5ef2aSThomas Huth {
939fcf5ef2aSThomas Huth X86CPU *cpu = opaque;
940fcf5ef2aSThomas Huth CPUX86State *env = &cpu->env;
941fcf5ef2aSThomas Huth unsigned int i;
942fcf5ef2aSThomas Huth
943fcf5ef2aSThomas Huth for (i = 0; i < NB_OPMASK_REGS; i++) {
944fcf5ef2aSThomas Huth if (env->opmask_regs[i]) {
945fcf5ef2aSThomas Huth return true;
946fcf5ef2aSThomas Huth }
947fcf5ef2aSThomas Huth }
948fcf5ef2aSThomas Huth
949fcf5ef2aSThomas Huth for (i = 0; i < CPU_NB_REGS; i++) {
950fcf5ef2aSThomas Huth #define ENV_XMM(reg, field) (env->xmm_regs[reg].ZMM_Q(field))
951fcf5ef2aSThomas Huth if (ENV_XMM(i, 4) || ENV_XMM(i, 6) ||
952fcf5ef2aSThomas Huth ENV_XMM(i, 5) || ENV_XMM(i, 7)) {
953fcf5ef2aSThomas Huth return true;
954fcf5ef2aSThomas Huth }
955fcf5ef2aSThomas Huth #ifdef TARGET_X86_64
956fcf5ef2aSThomas Huth if (ENV_XMM(i+16, 0) || ENV_XMM(i+16, 1) ||
957fcf5ef2aSThomas Huth ENV_XMM(i+16, 2) || ENV_XMM(i+16, 3) ||
958fcf5ef2aSThomas Huth ENV_XMM(i+16, 4) || ENV_XMM(i+16, 5) ||
959fcf5ef2aSThomas Huth ENV_XMM(i+16, 6) || ENV_XMM(i+16, 7)) {
960fcf5ef2aSThomas Huth return true;
961fcf5ef2aSThomas Huth }
962fcf5ef2aSThomas Huth #endif
963fcf5ef2aSThomas Huth }
964fcf5ef2aSThomas Huth
965fcf5ef2aSThomas Huth return false;
966fcf5ef2aSThomas Huth }
967fcf5ef2aSThomas Huth
968fcf5ef2aSThomas Huth static const VMStateDescription vmstate_avx512 = {
969fcf5ef2aSThomas Huth .name = "cpu/avx512",
970fcf5ef2aSThomas Huth .version_id = 1,
971fcf5ef2aSThomas Huth .minimum_version_id = 1,
972fcf5ef2aSThomas Huth .needed = avx512_needed,
973c4f54bd6SRichard Henderson .fields = (const VMStateField[]) {
974fcf5ef2aSThomas Huth VMSTATE_UINT64_ARRAY(env.opmask_regs, X86CPU, NB_OPMASK_REGS),
975fcf5ef2aSThomas Huth VMSTATE_ZMMH_REGS_VARS(env.xmm_regs, X86CPU, 0),
976fcf5ef2aSThomas Huth #ifdef TARGET_X86_64
977fcf5ef2aSThomas Huth VMSTATE_Hi16_ZMM_REGS_VARS(env.xmm_regs, X86CPU, 16),
978fcf5ef2aSThomas Huth #endif
979fcf5ef2aSThomas Huth VMSTATE_END_OF_LIST()
980fcf5ef2aSThomas Huth }
981fcf5ef2aSThomas Huth };
982fcf5ef2aSThomas Huth
xss_needed(void * opaque)983fcf5ef2aSThomas Huth static bool xss_needed(void *opaque)
984fcf5ef2aSThomas Huth {
985fcf5ef2aSThomas Huth X86CPU *cpu = opaque;
986fcf5ef2aSThomas Huth CPUX86State *env = &cpu->env;
987fcf5ef2aSThomas Huth
988fcf5ef2aSThomas Huth return env->xss != 0;
989fcf5ef2aSThomas Huth }
990fcf5ef2aSThomas Huth
991fcf5ef2aSThomas Huth static const VMStateDescription vmstate_xss = {
992fcf5ef2aSThomas Huth .name = "cpu/xss",
993fcf5ef2aSThomas Huth .version_id = 1,
994fcf5ef2aSThomas Huth .minimum_version_id = 1,
995fcf5ef2aSThomas Huth .needed = xss_needed,
996c4f54bd6SRichard Henderson .fields = (const VMStateField[]) {
997fcf5ef2aSThomas Huth VMSTATE_UINT64(env.xss, X86CPU),
998fcf5ef2aSThomas Huth VMSTATE_END_OF_LIST()
999fcf5ef2aSThomas Huth }
1000fcf5ef2aSThomas Huth };
1001fcf5ef2aSThomas Huth
umwait_needed(void * opaque)100265087997STao Xu static bool umwait_needed(void *opaque)
100365087997STao Xu {
100465087997STao Xu X86CPU *cpu = opaque;
100565087997STao Xu CPUX86State *env = &cpu->env;
100665087997STao Xu
100765087997STao Xu return env->umwait != 0;
100865087997STao Xu }
100965087997STao Xu
101065087997STao Xu static const VMStateDescription vmstate_umwait = {
101165087997STao Xu .name = "cpu/umwait",
101265087997STao Xu .version_id = 1,
101365087997STao Xu .minimum_version_id = 1,
101465087997STao Xu .needed = umwait_needed,
1015c4f54bd6SRichard Henderson .fields = (const VMStateField[]) {
101665087997STao Xu VMSTATE_UINT32(env.umwait, X86CPU),
101765087997STao Xu VMSTATE_END_OF_LIST()
101865087997STao Xu }
101965087997STao Xu };
102065087997STao Xu
pkru_needed(void * opaque)1021fcf5ef2aSThomas Huth static bool pkru_needed(void *opaque)
1022fcf5ef2aSThomas Huth {
1023fcf5ef2aSThomas Huth X86CPU *cpu = opaque;
1024fcf5ef2aSThomas Huth CPUX86State *env = &cpu->env;
1025fcf5ef2aSThomas Huth
1026fcf5ef2aSThomas Huth return env->pkru != 0;
1027fcf5ef2aSThomas Huth }
1028fcf5ef2aSThomas Huth
1029fcf5ef2aSThomas Huth static const VMStateDescription vmstate_pkru = {
1030fcf5ef2aSThomas Huth .name = "cpu/pkru",
1031fcf5ef2aSThomas Huth .version_id = 1,
1032fcf5ef2aSThomas Huth .minimum_version_id = 1,
1033fcf5ef2aSThomas Huth .needed = pkru_needed,
1034c4f54bd6SRichard Henderson .fields = (const VMStateField[]){
1035fcf5ef2aSThomas Huth VMSTATE_UINT32(env.pkru, X86CPU),
1036fcf5ef2aSThomas Huth VMSTATE_END_OF_LIST()
1037fcf5ef2aSThomas Huth }
1038fcf5ef2aSThomas Huth };
1039e7e7bdabSPaolo Bonzini
pkrs_needed(void * opaque)1040e7e7bdabSPaolo Bonzini static bool pkrs_needed(void *opaque)
1041e7e7bdabSPaolo Bonzini {
1042e7e7bdabSPaolo Bonzini X86CPU *cpu = opaque;
1043e7e7bdabSPaolo Bonzini CPUX86State *env = &cpu->env;
1044e7e7bdabSPaolo Bonzini
1045e7e7bdabSPaolo Bonzini return env->pkrs != 0;
1046e7e7bdabSPaolo Bonzini }
1047e7e7bdabSPaolo Bonzini
1048e7e7bdabSPaolo Bonzini static const VMStateDescription vmstate_pkrs = {
1049e7e7bdabSPaolo Bonzini .name = "cpu/pkrs",
1050e7e7bdabSPaolo Bonzini .version_id = 1,
1051e7e7bdabSPaolo Bonzini .minimum_version_id = 1,
1052e7e7bdabSPaolo Bonzini .needed = pkrs_needed,
1053c4f54bd6SRichard Henderson .fields = (const VMStateField[]){
1054e7e7bdabSPaolo Bonzini VMSTATE_UINT32(env.pkrs, X86CPU),
1055e7e7bdabSPaolo Bonzini VMSTATE_END_OF_LIST()
1056e7e7bdabSPaolo Bonzini }
1057e7e7bdabSPaolo Bonzini };
1058fcf5ef2aSThomas Huth
tsc_khz_needed(void * opaque)1059fcf5ef2aSThomas Huth static bool tsc_khz_needed(void *opaque)
1060fcf5ef2aSThomas Huth {
1061fcf5ef2aSThomas Huth X86CPU *cpu = opaque;
1062fcf5ef2aSThomas Huth CPUX86State *env = &cpu->env;
1063fcf5ef2aSThomas Huth MachineClass *mc = MACHINE_GET_CLASS(qdev_get_machine());
10642f34ebf2SLiam Merwick X86MachineClass *x86mc = X86_MACHINE_CLASS(mc);
10652f34ebf2SLiam Merwick return env->tsc_khz && x86mc->save_tsc_khz;
1066fcf5ef2aSThomas Huth }
1067fcf5ef2aSThomas Huth
1068fcf5ef2aSThomas Huth static const VMStateDescription vmstate_tsc_khz = {
1069fcf5ef2aSThomas Huth .name = "cpu/tsc_khz",
1070fcf5ef2aSThomas Huth .version_id = 1,
1071fcf5ef2aSThomas Huth .minimum_version_id = 1,
1072fcf5ef2aSThomas Huth .needed = tsc_khz_needed,
1073c4f54bd6SRichard Henderson .fields = (const VMStateField[]) {
1074fcf5ef2aSThomas Huth VMSTATE_INT64(env.tsc_khz, X86CPU),
1075fcf5ef2aSThomas Huth VMSTATE_END_OF_LIST()
1076fcf5ef2aSThomas Huth }
1077fcf5ef2aSThomas Huth };
1078fcf5ef2aSThomas Huth
1079ebbfef2fSLiran Alon #ifdef CONFIG_KVM
1080ebbfef2fSLiran Alon
vmx_vmcs12_needed(void * opaque)1081ebbfef2fSLiran Alon static bool vmx_vmcs12_needed(void *opaque)
1082ebbfef2fSLiran Alon {
1083ebbfef2fSLiran Alon struct kvm_nested_state *nested_state = opaque;
1084ebbfef2fSLiran Alon return (nested_state->size >
1085ebbfef2fSLiran Alon offsetof(struct kvm_nested_state, data.vmx[0].vmcs12));
1086ebbfef2fSLiran Alon }
1087ebbfef2fSLiran Alon
1088ebbfef2fSLiran Alon static const VMStateDescription vmstate_vmx_vmcs12 = {
1089ebbfef2fSLiran Alon .name = "cpu/kvm_nested_state/vmx/vmcs12",
1090ebbfef2fSLiran Alon .version_id = 1,
1091ebbfef2fSLiran Alon .minimum_version_id = 1,
1092ebbfef2fSLiran Alon .needed = vmx_vmcs12_needed,
1093c4f54bd6SRichard Henderson .fields = (const VMStateField[]) {
1094ebbfef2fSLiran Alon VMSTATE_UINT8_ARRAY(data.vmx[0].vmcs12,
1095ebbfef2fSLiran Alon struct kvm_nested_state,
1096ebbfef2fSLiran Alon KVM_STATE_NESTED_VMX_VMCS_SIZE),
1097ebbfef2fSLiran Alon VMSTATE_END_OF_LIST()
1098ebbfef2fSLiran Alon }
1099ebbfef2fSLiran Alon };
1100ebbfef2fSLiran Alon
vmx_shadow_vmcs12_needed(void * opaque)1101ebbfef2fSLiran Alon static bool vmx_shadow_vmcs12_needed(void *opaque)
1102ebbfef2fSLiran Alon {
1103ebbfef2fSLiran Alon struct kvm_nested_state *nested_state = opaque;
1104ebbfef2fSLiran Alon return (nested_state->size >
1105ebbfef2fSLiran Alon offsetof(struct kvm_nested_state, data.vmx[0].shadow_vmcs12));
1106ebbfef2fSLiran Alon }
1107ebbfef2fSLiran Alon
1108ebbfef2fSLiran Alon static const VMStateDescription vmstate_vmx_shadow_vmcs12 = {
1109ebbfef2fSLiran Alon .name = "cpu/kvm_nested_state/vmx/shadow_vmcs12",
1110ebbfef2fSLiran Alon .version_id = 1,
1111ebbfef2fSLiran Alon .minimum_version_id = 1,
1112ebbfef2fSLiran Alon .needed = vmx_shadow_vmcs12_needed,
1113c4f54bd6SRichard Henderson .fields = (const VMStateField[]) {
1114ebbfef2fSLiran Alon VMSTATE_UINT8_ARRAY(data.vmx[0].shadow_vmcs12,
1115ebbfef2fSLiran Alon struct kvm_nested_state,
1116ebbfef2fSLiran Alon KVM_STATE_NESTED_VMX_VMCS_SIZE),
1117ebbfef2fSLiran Alon VMSTATE_END_OF_LIST()
1118ebbfef2fSLiran Alon }
1119ebbfef2fSLiran Alon };
1120ebbfef2fSLiran Alon
vmx_nested_state_needed(void * opaque)1121ebbfef2fSLiran Alon static bool vmx_nested_state_needed(void *opaque)
1122ebbfef2fSLiran Alon {
1123ebbfef2fSLiran Alon struct kvm_nested_state *nested_state = opaque;
1124ebbfef2fSLiran Alon
1125ec7b1bbdSLiran Alon return (nested_state->format == KVM_STATE_NESTED_FORMAT_VMX &&
1126ec7b1bbdSLiran Alon nested_state->hdr.vmx.vmxon_pa != -1ull);
1127ebbfef2fSLiran Alon }
1128ebbfef2fSLiran Alon
1129ebbfef2fSLiran Alon static const VMStateDescription vmstate_vmx_nested_state = {
1130ebbfef2fSLiran Alon .name = "cpu/kvm_nested_state/vmx",
1131ebbfef2fSLiran Alon .version_id = 1,
1132ebbfef2fSLiran Alon .minimum_version_id = 1,
1133ebbfef2fSLiran Alon .needed = vmx_nested_state_needed,
1134c4f54bd6SRichard Henderson .fields = (const VMStateField[]) {
1135ebbfef2fSLiran Alon VMSTATE_U64(hdr.vmx.vmxon_pa, struct kvm_nested_state),
1136ebbfef2fSLiran Alon VMSTATE_U64(hdr.vmx.vmcs12_pa, struct kvm_nested_state),
1137ebbfef2fSLiran Alon VMSTATE_U16(hdr.vmx.smm.flags, struct kvm_nested_state),
1138ebbfef2fSLiran Alon VMSTATE_END_OF_LIST()
1139ebbfef2fSLiran Alon },
1140c4f54bd6SRichard Henderson .subsections = (const VMStateDescription * const []) {
1141ebbfef2fSLiran Alon &vmstate_vmx_vmcs12,
1142ebbfef2fSLiran Alon &vmstate_vmx_shadow_vmcs12,
1143ebbfef2fSLiran Alon NULL,
1144ebbfef2fSLiran Alon }
1145ebbfef2fSLiran Alon };
1146ebbfef2fSLiran Alon
svm_nested_state_needed(void * opaque)1147b16c0e20SPaolo Bonzini static bool svm_nested_state_needed(void *opaque)
1148b16c0e20SPaolo Bonzini {
1149b16c0e20SPaolo Bonzini struct kvm_nested_state *nested_state = opaque;
1150b16c0e20SPaolo Bonzini
1151b16c0e20SPaolo Bonzini /*
1152b16c0e20SPaolo Bonzini * HF_GUEST_MASK and HF2_GIF_MASK are already serialized
1153b16c0e20SPaolo Bonzini * via hflags and hflags2, all that's left is the opaque
1154b16c0e20SPaolo Bonzini * nested state blob.
1155b16c0e20SPaolo Bonzini */
1156b16c0e20SPaolo Bonzini return (nested_state->format == KVM_STATE_NESTED_FORMAT_SVM &&
1157b16c0e20SPaolo Bonzini nested_state->size > offsetof(struct kvm_nested_state, data));
1158b16c0e20SPaolo Bonzini }
1159b16c0e20SPaolo Bonzini
1160b16c0e20SPaolo Bonzini static const VMStateDescription vmstate_svm_nested_state = {
1161b16c0e20SPaolo Bonzini .name = "cpu/kvm_nested_state/svm",
1162b16c0e20SPaolo Bonzini .version_id = 1,
1163b16c0e20SPaolo Bonzini .minimum_version_id = 1,
1164b16c0e20SPaolo Bonzini .needed = svm_nested_state_needed,
1165c4f54bd6SRichard Henderson .fields = (const VMStateField[]) {
1166b16c0e20SPaolo Bonzini VMSTATE_U64(hdr.svm.vmcb_pa, struct kvm_nested_state),
1167b16c0e20SPaolo Bonzini VMSTATE_UINT8_ARRAY(data.svm[0].vmcb12,
1168b16c0e20SPaolo Bonzini struct kvm_nested_state,
1169b16c0e20SPaolo Bonzini KVM_STATE_NESTED_SVM_VMCB_SIZE),
1170b16c0e20SPaolo Bonzini VMSTATE_END_OF_LIST()
1171b16c0e20SPaolo Bonzini }
1172b16c0e20SPaolo Bonzini };
1173b16c0e20SPaolo Bonzini
nested_state_needed(void * opaque)1174ebbfef2fSLiran Alon static bool nested_state_needed(void *opaque)
1175ebbfef2fSLiran Alon {
1176ebbfef2fSLiran Alon X86CPU *cpu = opaque;
1177ebbfef2fSLiran Alon CPUX86State *env = &cpu->env;
1178ebbfef2fSLiran Alon
1179ebbfef2fSLiran Alon return (env->nested_state &&
1180b16c0e20SPaolo Bonzini (vmx_nested_state_needed(env->nested_state) ||
1181b16c0e20SPaolo Bonzini svm_nested_state_needed(env->nested_state)));
1182ebbfef2fSLiran Alon }
1183ebbfef2fSLiran Alon
nested_state_post_load(void * opaque,int version_id)1184ebbfef2fSLiran Alon static int nested_state_post_load(void *opaque, int version_id)
1185ebbfef2fSLiran Alon {
1186ebbfef2fSLiran Alon X86CPU *cpu = opaque;
1187ebbfef2fSLiran Alon CPUX86State *env = &cpu->env;
1188ebbfef2fSLiran Alon struct kvm_nested_state *nested_state = env->nested_state;
1189ebbfef2fSLiran Alon int min_nested_state_len = offsetof(struct kvm_nested_state, data);
1190ebbfef2fSLiran Alon int max_nested_state_len = kvm_max_nested_state_length();
1191ebbfef2fSLiran Alon
1192ebbfef2fSLiran Alon /*
1193ebbfef2fSLiran Alon * If our kernel don't support setting nested state
1194ebbfef2fSLiran Alon * and we have received nested state from migration stream,
1195ebbfef2fSLiran Alon * we need to fail migration
1196ebbfef2fSLiran Alon */
1197ebbfef2fSLiran Alon if (max_nested_state_len <= 0) {
1198ebbfef2fSLiran Alon error_report("Received nested state when kernel cannot restore it");
1199ebbfef2fSLiran Alon return -EINVAL;
1200ebbfef2fSLiran Alon }
1201ebbfef2fSLiran Alon
1202ebbfef2fSLiran Alon /*
1203ebbfef2fSLiran Alon * Verify that the size of received nested_state struct
1204ebbfef2fSLiran Alon * at least cover required header and is not larger
1205ebbfef2fSLiran Alon * than the max size that our kernel support
1206ebbfef2fSLiran Alon */
1207ebbfef2fSLiran Alon if (nested_state->size < min_nested_state_len) {
1208ebbfef2fSLiran Alon error_report("Received nested state size less than min: "
1209ebbfef2fSLiran Alon "len=%d, min=%d",
1210ebbfef2fSLiran Alon nested_state->size, min_nested_state_len);
1211ebbfef2fSLiran Alon return -EINVAL;
1212ebbfef2fSLiran Alon }
1213ebbfef2fSLiran Alon if (nested_state->size > max_nested_state_len) {
1214cba42d61SMichael Tokarev error_report("Received unsupported nested state size: "
1215ebbfef2fSLiran Alon "nested_state->size=%d, max=%d",
1216ebbfef2fSLiran Alon nested_state->size, max_nested_state_len);
1217ebbfef2fSLiran Alon return -EINVAL;
1218ebbfef2fSLiran Alon }
1219ebbfef2fSLiran Alon
1220ebbfef2fSLiran Alon /* Verify format is valid */
1221ebbfef2fSLiran Alon if ((nested_state->format != KVM_STATE_NESTED_FORMAT_VMX) &&
1222ebbfef2fSLiran Alon (nested_state->format != KVM_STATE_NESTED_FORMAT_SVM)) {
1223ebbfef2fSLiran Alon error_report("Received invalid nested state format: %d",
1224ebbfef2fSLiran Alon nested_state->format);
1225ebbfef2fSLiran Alon return -EINVAL;
1226ebbfef2fSLiran Alon }
1227ebbfef2fSLiran Alon
1228ebbfef2fSLiran Alon return 0;
1229ebbfef2fSLiran Alon }
1230ebbfef2fSLiran Alon
1231ebbfef2fSLiran Alon static const VMStateDescription vmstate_kvm_nested_state = {
1232ebbfef2fSLiran Alon .name = "cpu/kvm_nested_state",
1233ebbfef2fSLiran Alon .version_id = 1,
1234ebbfef2fSLiran Alon .minimum_version_id = 1,
1235c4f54bd6SRichard Henderson .fields = (const VMStateField[]) {
1236ebbfef2fSLiran Alon VMSTATE_U16(flags, struct kvm_nested_state),
1237ebbfef2fSLiran Alon VMSTATE_U16(format, struct kvm_nested_state),
1238ebbfef2fSLiran Alon VMSTATE_U32(size, struct kvm_nested_state),
1239ebbfef2fSLiran Alon VMSTATE_END_OF_LIST()
1240ebbfef2fSLiran Alon },
1241c4f54bd6SRichard Henderson .subsections = (const VMStateDescription * const []) {
1242ebbfef2fSLiran Alon &vmstate_vmx_nested_state,
1243b16c0e20SPaolo Bonzini &vmstate_svm_nested_state,
1244ebbfef2fSLiran Alon NULL
1245ebbfef2fSLiran Alon }
1246ebbfef2fSLiran Alon };
1247ebbfef2fSLiran Alon
1248ebbfef2fSLiran Alon static const VMStateDescription vmstate_nested_state = {
1249ebbfef2fSLiran Alon .name = "cpu/nested_state",
1250ebbfef2fSLiran Alon .version_id = 1,
1251ebbfef2fSLiran Alon .minimum_version_id = 1,
1252ebbfef2fSLiran Alon .needed = nested_state_needed,
1253ebbfef2fSLiran Alon .post_load = nested_state_post_load,
1254c4f54bd6SRichard Henderson .fields = (const VMStateField[]) {
1255ebbfef2fSLiran Alon VMSTATE_STRUCT_POINTER(env.nested_state, X86CPU,
1256ebbfef2fSLiran Alon vmstate_kvm_nested_state,
1257ebbfef2fSLiran Alon struct kvm_nested_state),
1258ebbfef2fSLiran Alon VMSTATE_END_OF_LIST()
1259ebbfef2fSLiran Alon }
1260ebbfef2fSLiran Alon };
1261ebbfef2fSLiran Alon
xen_vcpu_needed(void * opaque)1262c345104cSJoao Martins static bool xen_vcpu_needed(void *opaque)
1263c345104cSJoao Martins {
1264c345104cSJoao Martins return (xen_mode == XEN_EMULATE);
1265c345104cSJoao Martins }
1266c345104cSJoao Martins
1267c345104cSJoao Martins static const VMStateDescription vmstate_xen_vcpu = {
1268c345104cSJoao Martins .name = "cpu/xen_vcpu",
1269c345104cSJoao Martins .version_id = 1,
1270c345104cSJoao Martins .minimum_version_id = 1,
1271c345104cSJoao Martins .needed = xen_vcpu_needed,
1272c4f54bd6SRichard Henderson .fields = (const VMStateField[]) {
1273c345104cSJoao Martins VMSTATE_UINT64(env.xen_vcpu_info_gpa, X86CPU),
1274c345104cSJoao Martins VMSTATE_UINT64(env.xen_vcpu_info_default_gpa, X86CPU),
1275f0689302SJoao Martins VMSTATE_UINT64(env.xen_vcpu_time_info_gpa, X86CPU),
12765092db87SJoao Martins VMSTATE_UINT64(env.xen_vcpu_runstate_gpa, X86CPU),
1277105b47fdSAnkur Arora VMSTATE_UINT8(env.xen_vcpu_callback_vector, X86CPU),
1278c723d4c1SDavid Woodhouse VMSTATE_UINT16_ARRAY(env.xen_virq, X86CPU, XEN_NR_VIRQS),
1279c723d4c1SDavid Woodhouse VMSTATE_UINT64(env.xen_singleshot_timer_ns, X86CPU),
1280b746a779SJoao Martins VMSTATE_UINT64(env.xen_periodic_timer_period, X86CPU),
1281c345104cSJoao Martins VMSTATE_END_OF_LIST()
1282c345104cSJoao Martins }
1283c345104cSJoao Martins };
1284ebbfef2fSLiran Alon #endif
1285ebbfef2fSLiran Alon
mcg_ext_ctl_needed(void * opaque)1286fcf5ef2aSThomas Huth static bool mcg_ext_ctl_needed(void *opaque)
1287fcf5ef2aSThomas Huth {
1288fcf5ef2aSThomas Huth X86CPU *cpu = opaque;
1289fcf5ef2aSThomas Huth CPUX86State *env = &cpu->env;
1290fcf5ef2aSThomas Huth return cpu->enable_lmce && env->mcg_ext_ctl;
1291fcf5ef2aSThomas Huth }
1292fcf5ef2aSThomas Huth
1293fcf5ef2aSThomas Huth static const VMStateDescription vmstate_mcg_ext_ctl = {
1294fcf5ef2aSThomas Huth .name = "cpu/mcg_ext_ctl",
1295fcf5ef2aSThomas Huth .version_id = 1,
1296fcf5ef2aSThomas Huth .minimum_version_id = 1,
1297fcf5ef2aSThomas Huth .needed = mcg_ext_ctl_needed,
1298c4f54bd6SRichard Henderson .fields = (const VMStateField[]) {
1299fcf5ef2aSThomas Huth VMSTATE_UINT64(env.mcg_ext_ctl, X86CPU),
1300fcf5ef2aSThomas Huth VMSTATE_END_OF_LIST()
1301fcf5ef2aSThomas Huth }
1302fcf5ef2aSThomas Huth };
1303fcf5ef2aSThomas Huth
spec_ctrl_needed(void * opaque)1304a33a2cfeSPaolo Bonzini static bool spec_ctrl_needed(void *opaque)
1305a33a2cfeSPaolo Bonzini {
1306a33a2cfeSPaolo Bonzini X86CPU *cpu = opaque;
1307a33a2cfeSPaolo Bonzini CPUX86State *env = &cpu->env;
1308a33a2cfeSPaolo Bonzini
1309a33a2cfeSPaolo Bonzini return env->spec_ctrl != 0;
1310a33a2cfeSPaolo Bonzini }
1311a33a2cfeSPaolo Bonzini
1312a33a2cfeSPaolo Bonzini static const VMStateDescription vmstate_spec_ctrl = {
1313a33a2cfeSPaolo Bonzini .name = "cpu/spec_ctrl",
1314a33a2cfeSPaolo Bonzini .version_id = 1,
1315a33a2cfeSPaolo Bonzini .minimum_version_id = 1,
1316a33a2cfeSPaolo Bonzini .needed = spec_ctrl_needed,
1317c4f54bd6SRichard Henderson .fields = (const VMStateField[]){
1318a33a2cfeSPaolo Bonzini VMSTATE_UINT64(env.spec_ctrl, X86CPU),
1319a33a2cfeSPaolo Bonzini VMSTATE_END_OF_LIST()
1320a33a2cfeSPaolo Bonzini }
1321a33a2cfeSPaolo Bonzini };
1322a33a2cfeSPaolo Bonzini
1323cabf9862SMaxim Levitsky
amd_tsc_scale_msr_needed(void * opaque)1324cabf9862SMaxim Levitsky static bool amd_tsc_scale_msr_needed(void *opaque)
1325cabf9862SMaxim Levitsky {
1326cabf9862SMaxim Levitsky X86CPU *cpu = opaque;
1327cabf9862SMaxim Levitsky CPUX86State *env = &cpu->env;
1328cabf9862SMaxim Levitsky
1329cabf9862SMaxim Levitsky return (env->features[FEAT_SVM] & CPUID_SVM_TSCSCALE);
1330cabf9862SMaxim Levitsky }
1331cabf9862SMaxim Levitsky
1332cabf9862SMaxim Levitsky static const VMStateDescription amd_tsc_scale_msr_ctrl = {
1333cabf9862SMaxim Levitsky .name = "cpu/amd_tsc_scale_msr",
1334cabf9862SMaxim Levitsky .version_id = 1,
1335cabf9862SMaxim Levitsky .minimum_version_id = 1,
1336cabf9862SMaxim Levitsky .needed = amd_tsc_scale_msr_needed,
1337c4f54bd6SRichard Henderson .fields = (const VMStateField[]){
1338cabf9862SMaxim Levitsky VMSTATE_UINT64(env.amd_tsc_scale_msr, X86CPU),
1339cabf9862SMaxim Levitsky VMSTATE_END_OF_LIST()
1340cabf9862SMaxim Levitsky }
1341cabf9862SMaxim Levitsky };
1342cabf9862SMaxim Levitsky
1343cabf9862SMaxim Levitsky
intel_pt_enable_needed(void * opaque)1344b77146e9SChao Peng static bool intel_pt_enable_needed(void *opaque)
1345b77146e9SChao Peng {
1346b77146e9SChao Peng X86CPU *cpu = opaque;
1347b77146e9SChao Peng CPUX86State *env = &cpu->env;
1348b77146e9SChao Peng int i;
1349b77146e9SChao Peng
1350b77146e9SChao Peng if (env->msr_rtit_ctrl || env->msr_rtit_status ||
1351b77146e9SChao Peng env->msr_rtit_output_base || env->msr_rtit_output_mask ||
1352b77146e9SChao Peng env->msr_rtit_cr3_match) {
1353b77146e9SChao Peng return true;
1354b77146e9SChao Peng }
1355b77146e9SChao Peng
1356b77146e9SChao Peng for (i = 0; i < MAX_RTIT_ADDRS; i++) {
1357b77146e9SChao Peng if (env->msr_rtit_addrs[i]) {
1358b77146e9SChao Peng return true;
1359b77146e9SChao Peng }
1360b77146e9SChao Peng }
1361b77146e9SChao Peng
1362b77146e9SChao Peng return false;
1363b77146e9SChao Peng }
1364b77146e9SChao Peng
1365b77146e9SChao Peng static const VMStateDescription vmstate_msr_intel_pt = {
1366b77146e9SChao Peng .name = "cpu/intel_pt",
1367b77146e9SChao Peng .version_id = 1,
1368b77146e9SChao Peng .minimum_version_id = 1,
1369b77146e9SChao Peng .needed = intel_pt_enable_needed,
1370c4f54bd6SRichard Henderson .fields = (const VMStateField[]) {
1371b77146e9SChao Peng VMSTATE_UINT64(env.msr_rtit_ctrl, X86CPU),
1372b77146e9SChao Peng VMSTATE_UINT64(env.msr_rtit_status, X86CPU),
1373b77146e9SChao Peng VMSTATE_UINT64(env.msr_rtit_output_base, X86CPU),
1374b77146e9SChao Peng VMSTATE_UINT64(env.msr_rtit_output_mask, X86CPU),
1375b77146e9SChao Peng VMSTATE_UINT64(env.msr_rtit_cr3_match, X86CPU),
1376b77146e9SChao Peng VMSTATE_UINT64_ARRAY(env.msr_rtit_addrs, X86CPU, MAX_RTIT_ADDRS),
1377b77146e9SChao Peng VMSTATE_END_OF_LIST()
1378b77146e9SChao Peng }
1379b77146e9SChao Peng };
1380b77146e9SChao Peng
virt_ssbd_needed(void * opaque)1381cfeea0c0SKonrad Rzeszutek Wilk static bool virt_ssbd_needed(void *opaque)
1382cfeea0c0SKonrad Rzeszutek Wilk {
1383cfeea0c0SKonrad Rzeszutek Wilk X86CPU *cpu = opaque;
1384cfeea0c0SKonrad Rzeszutek Wilk CPUX86State *env = &cpu->env;
1385cfeea0c0SKonrad Rzeszutek Wilk
1386cfeea0c0SKonrad Rzeszutek Wilk return env->virt_ssbd != 0;
1387cfeea0c0SKonrad Rzeszutek Wilk }
1388cfeea0c0SKonrad Rzeszutek Wilk
1389cfeea0c0SKonrad Rzeszutek Wilk static const VMStateDescription vmstate_msr_virt_ssbd = {
1390cfeea0c0SKonrad Rzeszutek Wilk .name = "cpu/virt_ssbd",
1391cfeea0c0SKonrad Rzeszutek Wilk .version_id = 1,
1392cfeea0c0SKonrad Rzeszutek Wilk .minimum_version_id = 1,
1393cfeea0c0SKonrad Rzeszutek Wilk .needed = virt_ssbd_needed,
1394c4f54bd6SRichard Henderson .fields = (const VMStateField[]){
1395cfeea0c0SKonrad Rzeszutek Wilk VMSTATE_UINT64(env.virt_ssbd, X86CPU),
1396cfeea0c0SKonrad Rzeszutek Wilk VMSTATE_END_OF_LIST()
1397cfeea0c0SKonrad Rzeszutek Wilk }
1398cfeea0c0SKonrad Rzeszutek Wilk };
1399cfeea0c0SKonrad Rzeszutek Wilk
svm_npt_needed(void * opaque)1400fe441054SJan Kiszka static bool svm_npt_needed(void *opaque)
1401fe441054SJan Kiszka {
1402fe441054SJan Kiszka X86CPU *cpu = opaque;
1403fe441054SJan Kiszka CPUX86State *env = &cpu->env;
1404fe441054SJan Kiszka
1405fe441054SJan Kiszka return !!(env->hflags2 & HF2_NPT_MASK);
1406fe441054SJan Kiszka }
1407fe441054SJan Kiszka
1408fe441054SJan Kiszka static const VMStateDescription vmstate_svm_npt = {
1409fe441054SJan Kiszka .name = "cpu/svn_npt",
1410fe441054SJan Kiszka .version_id = 1,
1411fe441054SJan Kiszka .minimum_version_id = 1,
1412fe441054SJan Kiszka .needed = svm_npt_needed,
1413c4f54bd6SRichard Henderson .fields = (const VMStateField[]){
1414fe441054SJan Kiszka VMSTATE_UINT64(env.nested_cr3, X86CPU),
1415fe441054SJan Kiszka VMSTATE_UINT32(env.nested_pg_mode, X86CPU),
1416fe441054SJan Kiszka VMSTATE_END_OF_LIST()
1417fe441054SJan Kiszka }
1418fe441054SJan Kiszka };
1419fe441054SJan Kiszka
svm_guest_needed(void * opaque)1420e3126a5cSLara Lazier static bool svm_guest_needed(void *opaque)
1421e3126a5cSLara Lazier {
1422e3126a5cSLara Lazier X86CPU *cpu = opaque;
1423e3126a5cSLara Lazier CPUX86State *env = &cpu->env;
1424e3126a5cSLara Lazier
1425e3126a5cSLara Lazier return tcg_enabled() && env->int_ctl;
1426e3126a5cSLara Lazier }
1427e3126a5cSLara Lazier
1428e3126a5cSLara Lazier static const VMStateDescription vmstate_svm_guest = {
1429e3126a5cSLara Lazier .name = "cpu/svm_guest",
1430e3126a5cSLara Lazier .version_id = 1,
1431e3126a5cSLara Lazier .minimum_version_id = 1,
1432e3126a5cSLara Lazier .needed = svm_guest_needed,
1433c4f54bd6SRichard Henderson .fields = (const VMStateField[]){
1434e3126a5cSLara Lazier VMSTATE_UINT32(env.int_ctl, X86CPU),
1435e3126a5cSLara Lazier VMSTATE_END_OF_LIST()
1436e3126a5cSLara Lazier }
1437e3126a5cSLara Lazier };
1438e3126a5cSLara Lazier
143989a44a10SPavel Dovgalyuk #ifndef TARGET_X86_64
intel_efer32_needed(void * opaque)144089a44a10SPavel Dovgalyuk static bool intel_efer32_needed(void *opaque)
144189a44a10SPavel Dovgalyuk {
144289a44a10SPavel Dovgalyuk X86CPU *cpu = opaque;
144389a44a10SPavel Dovgalyuk CPUX86State *env = &cpu->env;
144489a44a10SPavel Dovgalyuk
144589a44a10SPavel Dovgalyuk return env->efer != 0;
144689a44a10SPavel Dovgalyuk }
144789a44a10SPavel Dovgalyuk
144889a44a10SPavel Dovgalyuk static const VMStateDescription vmstate_efer32 = {
144989a44a10SPavel Dovgalyuk .name = "cpu/efer32",
145089a44a10SPavel Dovgalyuk .version_id = 1,
145189a44a10SPavel Dovgalyuk .minimum_version_id = 1,
145289a44a10SPavel Dovgalyuk .needed = intel_efer32_needed,
1453c4f54bd6SRichard Henderson .fields = (const VMStateField[]) {
145489a44a10SPavel Dovgalyuk VMSTATE_UINT64(env.efer, X86CPU),
145589a44a10SPavel Dovgalyuk VMSTATE_END_OF_LIST()
145689a44a10SPavel Dovgalyuk }
145789a44a10SPavel Dovgalyuk };
145889a44a10SPavel Dovgalyuk #endif
145989a44a10SPavel Dovgalyuk
msr_tsx_ctrl_needed(void * opaque)14602a9758c5SPaolo Bonzini static bool msr_tsx_ctrl_needed(void *opaque)
14612a9758c5SPaolo Bonzini {
14622a9758c5SPaolo Bonzini X86CPU *cpu = opaque;
14632a9758c5SPaolo Bonzini CPUX86State *env = &cpu->env;
14642a9758c5SPaolo Bonzini
14652a9758c5SPaolo Bonzini return env->features[FEAT_ARCH_CAPABILITIES] & ARCH_CAP_TSX_CTRL_MSR;
14662a9758c5SPaolo Bonzini }
14672a9758c5SPaolo Bonzini
14682a9758c5SPaolo Bonzini static const VMStateDescription vmstate_msr_tsx_ctrl = {
14692a9758c5SPaolo Bonzini .name = "cpu/msr_tsx_ctrl",
14702a9758c5SPaolo Bonzini .version_id = 1,
14712a9758c5SPaolo Bonzini .minimum_version_id = 1,
14722a9758c5SPaolo Bonzini .needed = msr_tsx_ctrl_needed,
1473c4f54bd6SRichard Henderson .fields = (const VMStateField[]) {
14742a9758c5SPaolo Bonzini VMSTATE_UINT32(env.tsx_ctrl, X86CPU),
14752a9758c5SPaolo Bonzini VMSTATE_END_OF_LIST()
14762a9758c5SPaolo Bonzini }
14772a9758c5SPaolo Bonzini };
14782a9758c5SPaolo Bonzini
intel_sgx_msrs_needed(void * opaque)1479db888065SSean Christopherson static bool intel_sgx_msrs_needed(void *opaque)
1480db888065SSean Christopherson {
1481db888065SSean Christopherson X86CPU *cpu = opaque;
1482db888065SSean Christopherson CPUX86State *env = &cpu->env;
1483db888065SSean Christopherson
1484db888065SSean Christopherson return !!(env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_SGX_LC);
1485db888065SSean Christopherson }
1486db888065SSean Christopherson
1487db888065SSean Christopherson static const VMStateDescription vmstate_msr_intel_sgx = {
1488db888065SSean Christopherson .name = "cpu/intel_sgx",
1489db888065SSean Christopherson .version_id = 1,
1490db888065SSean Christopherson .minimum_version_id = 1,
1491db888065SSean Christopherson .needed = intel_sgx_msrs_needed,
1492c4f54bd6SRichard Henderson .fields = (const VMStateField[]) {
1493db888065SSean Christopherson VMSTATE_UINT64_ARRAY(env.msr_ia32_sgxlepubkeyhash, X86CPU, 4),
1494db888065SSean Christopherson VMSTATE_END_OF_LIST()
1495db888065SSean Christopherson }
1496db888065SSean Christopherson };
1497db888065SSean Christopherson
pdptrs_needed(void * opaque)14988f515d38SMaxim Levitsky static bool pdptrs_needed(void *opaque)
14998f515d38SMaxim Levitsky {
15008f515d38SMaxim Levitsky X86CPU *cpu = opaque;
15018f515d38SMaxim Levitsky CPUX86State *env = &cpu->env;
15028f515d38SMaxim Levitsky return env->pdptrs_valid;
15038f515d38SMaxim Levitsky }
15048f515d38SMaxim Levitsky
pdptrs_post_load(void * opaque,int version_id)15058f515d38SMaxim Levitsky static int pdptrs_post_load(void *opaque, int version_id)
15068f515d38SMaxim Levitsky {
15078f515d38SMaxim Levitsky X86CPU *cpu = opaque;
15088f515d38SMaxim Levitsky CPUX86State *env = &cpu->env;
15098f515d38SMaxim Levitsky env->pdptrs_valid = true;
15108f515d38SMaxim Levitsky return 0;
15118f515d38SMaxim Levitsky }
15128f515d38SMaxim Levitsky
15138f515d38SMaxim Levitsky
15148f515d38SMaxim Levitsky static const VMStateDescription vmstate_pdptrs = {
15158f515d38SMaxim Levitsky .name = "cpu/pdptrs",
15168f515d38SMaxim Levitsky .version_id = 1,
15178f515d38SMaxim Levitsky .minimum_version_id = 1,
15188f515d38SMaxim Levitsky .needed = pdptrs_needed,
15198f515d38SMaxim Levitsky .post_load = pdptrs_post_load,
1520c4f54bd6SRichard Henderson .fields = (const VMStateField[]) {
15218f515d38SMaxim Levitsky VMSTATE_UINT64_ARRAY(env.pdptrs, X86CPU, 4),
15228f515d38SMaxim Levitsky VMSTATE_END_OF_LIST()
15238f515d38SMaxim Levitsky }
15248f515d38SMaxim Levitsky };
15258f515d38SMaxim Levitsky
xfd_msrs_needed(void * opaque)1526cdec2b75SZeng Guang static bool xfd_msrs_needed(void *opaque)
1527cdec2b75SZeng Guang {
1528cdec2b75SZeng Guang X86CPU *cpu = opaque;
1529cdec2b75SZeng Guang CPUX86State *env = &cpu->env;
1530cdec2b75SZeng Guang
1531cdec2b75SZeng Guang return !!(env->features[FEAT_XSAVE] & CPUID_D_1_EAX_XFD);
1532cdec2b75SZeng Guang }
1533cdec2b75SZeng Guang
1534cdec2b75SZeng Guang static const VMStateDescription vmstate_msr_xfd = {
1535cdec2b75SZeng Guang .name = "cpu/msr_xfd",
1536cdec2b75SZeng Guang .version_id = 1,
1537cdec2b75SZeng Guang .minimum_version_id = 1,
1538cdec2b75SZeng Guang .needed = xfd_msrs_needed,
1539c4f54bd6SRichard Henderson .fields = (const VMStateField[]) {
1540cdec2b75SZeng Guang VMSTATE_UINT64(env.msr_xfd, X86CPU),
1541cdec2b75SZeng Guang VMSTATE_UINT64(env.msr_xfd_err, X86CPU),
1542cdec2b75SZeng Guang VMSTATE_END_OF_LIST()
1543cdec2b75SZeng Guang }
1544cdec2b75SZeng Guang };
1545cdec2b75SZeng Guang
msr_hwcr_needed(void * opaque)1546*b5151aceSGao Shiyuan static bool msr_hwcr_needed(void *opaque)
1547*b5151aceSGao Shiyuan {
1548*b5151aceSGao Shiyuan X86CPU *cpu = opaque;
1549*b5151aceSGao Shiyuan CPUX86State *env = &cpu->env;
1550*b5151aceSGao Shiyuan
1551*b5151aceSGao Shiyuan return env->msr_hwcr != 0;
1552*b5151aceSGao Shiyuan }
1553*b5151aceSGao Shiyuan
1554*b5151aceSGao Shiyuan static const VMStateDescription vmstate_msr_hwcr = {
1555*b5151aceSGao Shiyuan .name = "cpu/msr_hwcr",
1556*b5151aceSGao Shiyuan .version_id = 1,
1557*b5151aceSGao Shiyuan .minimum_version_id = 1,
1558*b5151aceSGao Shiyuan .needed = msr_hwcr_needed,
1559*b5151aceSGao Shiyuan .fields = (VMStateField[]) {
1560*b5151aceSGao Shiyuan VMSTATE_UINT64(env.msr_hwcr, X86CPU),
1561*b5151aceSGao Shiyuan VMSTATE_END_OF_LIST()
1562*b5151aceSGao Shiyuan }
1563*b5151aceSGao Shiyuan };
1564*b5151aceSGao Shiyuan
1565cdec2b75SZeng Guang #ifdef TARGET_X86_64
intel_fred_msrs_needed(void * opaque)15664ebd98ebSXin Li static bool intel_fred_msrs_needed(void *opaque)
15674ebd98ebSXin Li {
15684ebd98ebSXin Li X86CPU *cpu = opaque;
15694ebd98ebSXin Li CPUX86State *env = &cpu->env;
15704ebd98ebSXin Li
15714ebd98ebSXin Li return !!(env->features[FEAT_7_1_EAX] & CPUID_7_1_EAX_FRED);
15724ebd98ebSXin Li }
15734ebd98ebSXin Li
15744ebd98ebSXin Li static const VMStateDescription vmstate_msr_fred = {
15754ebd98ebSXin Li .name = "cpu/fred",
15764ebd98ebSXin Li .version_id = 1,
15774ebd98ebSXin Li .minimum_version_id = 1,
15784ebd98ebSXin Li .needed = intel_fred_msrs_needed,
15794ebd98ebSXin Li .fields = (VMStateField[]) {
15804ebd98ebSXin Li VMSTATE_UINT64(env.fred_rsp0, X86CPU),
15814ebd98ebSXin Li VMSTATE_UINT64(env.fred_rsp1, X86CPU),
15824ebd98ebSXin Li VMSTATE_UINT64(env.fred_rsp2, X86CPU),
15834ebd98ebSXin Li VMSTATE_UINT64(env.fred_rsp3, X86CPU),
15844ebd98ebSXin Li VMSTATE_UINT64(env.fred_stklvls, X86CPU),
15854ebd98ebSXin Li VMSTATE_UINT64(env.fred_ssp1, X86CPU),
15864ebd98ebSXin Li VMSTATE_UINT64(env.fred_ssp2, X86CPU),
15874ebd98ebSXin Li VMSTATE_UINT64(env.fred_ssp3, X86CPU),
15884ebd98ebSXin Li VMSTATE_UINT64(env.fred_config, X86CPU),
15894ebd98ebSXin Li VMSTATE_END_OF_LIST()
15904ebd98ebSXin Li }
15914ebd98ebSXin Li };
15924ebd98ebSXin Li
amx_xtile_needed(void * opaque)1593cdec2b75SZeng Guang static bool amx_xtile_needed(void *opaque)
1594cdec2b75SZeng Guang {
1595cdec2b75SZeng Guang X86CPU *cpu = opaque;
1596cdec2b75SZeng Guang CPUX86State *env = &cpu->env;
1597cdec2b75SZeng Guang
1598cdec2b75SZeng Guang return !!(env->features[FEAT_7_0_EDX] & CPUID_7_0_EDX_AMX_TILE);
1599cdec2b75SZeng Guang }
1600cdec2b75SZeng Guang
1601cdec2b75SZeng Guang static const VMStateDescription vmstate_amx_xtile = {
1602cdec2b75SZeng Guang .name = "cpu/intel_amx_xtile",
1603cdec2b75SZeng Guang .version_id = 1,
1604cdec2b75SZeng Guang .minimum_version_id = 1,
1605cdec2b75SZeng Guang .needed = amx_xtile_needed,
1606c4f54bd6SRichard Henderson .fields = (const VMStateField[]) {
1607cdec2b75SZeng Guang VMSTATE_UINT8_ARRAY(env.xtilecfg, X86CPU, 64),
1608cdec2b75SZeng Guang VMSTATE_UINT8_ARRAY(env.xtiledata, X86CPU, 8192),
1609cdec2b75SZeng Guang VMSTATE_END_OF_LIST()
1610cdec2b75SZeng Guang }
1611cdec2b75SZeng Guang };
1612cdec2b75SZeng Guang #endif
1613cdec2b75SZeng Guang
arch_lbr_needed(void * opaque)1614f2e7c2fcSYang Weijiang static bool arch_lbr_needed(void *opaque)
1615f2e7c2fcSYang Weijiang {
1616f2e7c2fcSYang Weijiang X86CPU *cpu = opaque;
1617f2e7c2fcSYang Weijiang CPUX86State *env = &cpu->env;
1618f2e7c2fcSYang Weijiang
1619f2e7c2fcSYang Weijiang return !!(env->features[FEAT_7_0_EDX] & CPUID_7_0_EDX_ARCH_LBR);
1620f2e7c2fcSYang Weijiang }
1621f2e7c2fcSYang Weijiang
1622f2e7c2fcSYang Weijiang static const VMStateDescription vmstate_arch_lbr = {
1623f2e7c2fcSYang Weijiang .name = "cpu/arch_lbr",
1624f2e7c2fcSYang Weijiang .version_id = 1,
1625f2e7c2fcSYang Weijiang .minimum_version_id = 1,
1626f2e7c2fcSYang Weijiang .needed = arch_lbr_needed,
1627c4f54bd6SRichard Henderson .fields = (const VMStateField[]) {
1628f2e7c2fcSYang Weijiang VMSTATE_UINT64(env.msr_lbr_ctl, X86CPU),
1629f2e7c2fcSYang Weijiang VMSTATE_UINT64(env.msr_lbr_depth, X86CPU),
1630f2e7c2fcSYang Weijiang VMSTATE_LBR_VARS(env.lbr_records, X86CPU, ARCH_LBR_NR_ENTRIES, 1),
1631f2e7c2fcSYang Weijiang VMSTATE_END_OF_LIST()
1632f2e7c2fcSYang Weijiang }
1633f2e7c2fcSYang Weijiang };
1634f2e7c2fcSYang Weijiang
triple_fault_needed(void * opaque)163512f89a39SChenyi Qiang static bool triple_fault_needed(void *opaque)
163612f89a39SChenyi Qiang {
163712f89a39SChenyi Qiang X86CPU *cpu = opaque;
163812f89a39SChenyi Qiang CPUX86State *env = &cpu->env;
163912f89a39SChenyi Qiang
164012f89a39SChenyi Qiang return env->triple_fault_pending;
164112f89a39SChenyi Qiang }
164212f89a39SChenyi Qiang
164312f89a39SChenyi Qiang static const VMStateDescription vmstate_triple_fault = {
164412f89a39SChenyi Qiang .name = "cpu/triple_fault",
164512f89a39SChenyi Qiang .version_id = 1,
164612f89a39SChenyi Qiang .minimum_version_id = 1,
164712f89a39SChenyi Qiang .needed = triple_fault_needed,
1648c4f54bd6SRichard Henderson .fields = (const VMStateField[]) {
164912f89a39SChenyi Qiang VMSTATE_UINT8(env.triple_fault_pending, X86CPU),
165012f89a39SChenyi Qiang VMSTATE_END_OF_LIST()
165112f89a39SChenyi Qiang }
165212f89a39SChenyi Qiang };
165312f89a39SChenyi Qiang
1654ac701a4fSKeqian Zhu const VMStateDescription vmstate_x86_cpu = {
1655fcf5ef2aSThomas Huth .name = "cpu",
1656fcf5ef2aSThomas Huth .version_id = 12,
165708b277acSDr. David Alan Gilbert .minimum_version_id = 11,
1658fcf5ef2aSThomas Huth .pre_save = cpu_pre_save,
1659fcf5ef2aSThomas Huth .post_load = cpu_post_load,
1660c4f54bd6SRichard Henderson .fields = (const VMStateField[]) {
1661fcf5ef2aSThomas Huth VMSTATE_UINTTL_ARRAY(env.regs, X86CPU, CPU_NB_REGS),
1662fcf5ef2aSThomas Huth VMSTATE_UINTTL(env.eip, X86CPU),
1663fcf5ef2aSThomas Huth VMSTATE_UINTTL(env.eflags, X86CPU),
1664fcf5ef2aSThomas Huth VMSTATE_UINT32(env.hflags, X86CPU),
1665fcf5ef2aSThomas Huth /* FPU */
1666fcf5ef2aSThomas Huth VMSTATE_UINT16(env.fpuc, X86CPU),
1667fcf5ef2aSThomas Huth VMSTATE_UINT16(env.fpus_vmstate, X86CPU),
1668fcf5ef2aSThomas Huth VMSTATE_UINT16(env.fptag_vmstate, X86CPU),
1669fcf5ef2aSThomas Huth VMSTATE_UINT16(env.fpregs_format_vmstate, X86CPU),
167046baa900SDr. David Alan Gilbert
167146baa900SDr. David Alan Gilbert VMSTATE_STRUCT_ARRAY(env.fpregs, X86CPU, 8, 0, vmstate_fpreg, FPReg),
1672fcf5ef2aSThomas Huth
1673fcf5ef2aSThomas Huth VMSTATE_SEGMENT_ARRAY(env.segs, X86CPU, 6),
1674fcf5ef2aSThomas Huth VMSTATE_SEGMENT(env.ldt, X86CPU),
1675fcf5ef2aSThomas Huth VMSTATE_SEGMENT(env.tr, X86CPU),
1676fcf5ef2aSThomas Huth VMSTATE_SEGMENT(env.gdt, X86CPU),
1677fcf5ef2aSThomas Huth VMSTATE_SEGMENT(env.idt, X86CPU),
1678fcf5ef2aSThomas Huth
1679fcf5ef2aSThomas Huth VMSTATE_UINT32(env.sysenter_cs, X86CPU),
1680fcf5ef2aSThomas Huth VMSTATE_UINTTL(env.sysenter_esp, X86CPU),
1681fcf5ef2aSThomas Huth VMSTATE_UINTTL(env.sysenter_eip, X86CPU),
1682fcf5ef2aSThomas Huth
1683fcf5ef2aSThomas Huth VMSTATE_UINTTL(env.cr[0], X86CPU),
1684fcf5ef2aSThomas Huth VMSTATE_UINTTL(env.cr[2], X86CPU),
1685fcf5ef2aSThomas Huth VMSTATE_UINTTL(env.cr[3], X86CPU),
1686fcf5ef2aSThomas Huth VMSTATE_UINTTL(env.cr[4], X86CPU),
1687fcf5ef2aSThomas Huth VMSTATE_UINTTL_ARRAY(env.dr, X86CPU, 8),
1688fcf5ef2aSThomas Huth /* MMU */
1689fcf5ef2aSThomas Huth VMSTATE_INT32(env.a20_mask, X86CPU),
1690fcf5ef2aSThomas Huth /* XMM */
1691fcf5ef2aSThomas Huth VMSTATE_UINT32(env.mxcsr, X86CPU),
1692fcf5ef2aSThomas Huth VMSTATE_XMM_REGS(env.xmm_regs, X86CPU, 0),
1693fcf5ef2aSThomas Huth
1694fcf5ef2aSThomas Huth #ifdef TARGET_X86_64
1695fcf5ef2aSThomas Huth VMSTATE_UINT64(env.efer, X86CPU),
1696fcf5ef2aSThomas Huth VMSTATE_UINT64(env.star, X86CPU),
1697fcf5ef2aSThomas Huth VMSTATE_UINT64(env.lstar, X86CPU),
1698fcf5ef2aSThomas Huth VMSTATE_UINT64(env.cstar, X86CPU),
1699fcf5ef2aSThomas Huth VMSTATE_UINT64(env.fmask, X86CPU),
1700fcf5ef2aSThomas Huth VMSTATE_UINT64(env.kernelgsbase, X86CPU),
1701fcf5ef2aSThomas Huth #endif
170208b277acSDr. David Alan Gilbert VMSTATE_UINT32(env.smbase, X86CPU),
1703fcf5ef2aSThomas Huth
170408b277acSDr. David Alan Gilbert VMSTATE_UINT64(env.pat, X86CPU),
170508b277acSDr. David Alan Gilbert VMSTATE_UINT32(env.hflags2, X86CPU),
1706fcf5ef2aSThomas Huth
170708b277acSDr. David Alan Gilbert VMSTATE_UINT64(env.vm_hsave, X86CPU),
170808b277acSDr. David Alan Gilbert VMSTATE_UINT64(env.vm_vmcb, X86CPU),
170908b277acSDr. David Alan Gilbert VMSTATE_UINT64(env.tsc_offset, X86CPU),
171008b277acSDr. David Alan Gilbert VMSTATE_UINT64(env.intercept, X86CPU),
171108b277acSDr. David Alan Gilbert VMSTATE_UINT16(env.intercept_cr_read, X86CPU),
171208b277acSDr. David Alan Gilbert VMSTATE_UINT16(env.intercept_cr_write, X86CPU),
171308b277acSDr. David Alan Gilbert VMSTATE_UINT16(env.intercept_dr_read, X86CPU),
171408b277acSDr. David Alan Gilbert VMSTATE_UINT16(env.intercept_dr_write, X86CPU),
171508b277acSDr. David Alan Gilbert VMSTATE_UINT32(env.intercept_exceptions, X86CPU),
171608b277acSDr. David Alan Gilbert VMSTATE_UINT8(env.v_tpr, X86CPU),
1717fcf5ef2aSThomas Huth /* MTRRs */
171808b277acSDr. David Alan Gilbert VMSTATE_UINT64_ARRAY(env.mtrr_fixed, X86CPU, 11),
171908b277acSDr. David Alan Gilbert VMSTATE_UINT64(env.mtrr_deftype, X86CPU),
1720fcf5ef2aSThomas Huth VMSTATE_MTRR_VARS(env.mtrr_var, X86CPU, MSR_MTRRcap_VCNT, 8),
1721fcf5ef2aSThomas Huth /* KVM-related states */
172208b277acSDr. David Alan Gilbert VMSTATE_INT32(env.interrupt_injected, X86CPU),
172308b277acSDr. David Alan Gilbert VMSTATE_UINT32(env.mp_state, X86CPU),
172408b277acSDr. David Alan Gilbert VMSTATE_UINT64(env.tsc, X86CPU),
1725fd13f23bSLiran Alon VMSTATE_INT32(env.exception_nr, X86CPU),
172608b277acSDr. David Alan Gilbert VMSTATE_UINT8(env.soft_interrupt, X86CPU),
172708b277acSDr. David Alan Gilbert VMSTATE_UINT8(env.nmi_injected, X86CPU),
172808b277acSDr. David Alan Gilbert VMSTATE_UINT8(env.nmi_pending, X86CPU),
172908b277acSDr. David Alan Gilbert VMSTATE_UINT8(env.has_error_code, X86CPU),
173008b277acSDr. David Alan Gilbert VMSTATE_UINT32(env.sipi_vector, X86CPU),
1731fcf5ef2aSThomas Huth /* MCE */
173208b277acSDr. David Alan Gilbert VMSTATE_UINT64(env.mcg_cap, X86CPU),
173308b277acSDr. David Alan Gilbert VMSTATE_UINT64(env.mcg_status, X86CPU),
173408b277acSDr. David Alan Gilbert VMSTATE_UINT64(env.mcg_ctl, X86CPU),
173508b277acSDr. David Alan Gilbert VMSTATE_UINT64_ARRAY(env.mce_banks, X86CPU, MCE_BANKS_DEF * 4),
1736fcf5ef2aSThomas Huth /* rdtscp */
173708b277acSDr. David Alan Gilbert VMSTATE_UINT64(env.tsc_aux, X86CPU),
1738fcf5ef2aSThomas Huth /* KVM pvclock msr */
173908b277acSDr. David Alan Gilbert VMSTATE_UINT64(env.system_time_msr, X86CPU),
174008b277acSDr. David Alan Gilbert VMSTATE_UINT64(env.wall_clock_msr, X86CPU),
1741fcf5ef2aSThomas Huth /* XSAVE related fields */
1742fcf5ef2aSThomas Huth VMSTATE_UINT64_V(env.xcr0, X86CPU, 12),
1743fcf5ef2aSThomas Huth VMSTATE_UINT64_V(env.xstate_bv, X86CPU, 12),
1744fcf5ef2aSThomas Huth VMSTATE_YMMH_REGS_VARS(env.xmm_regs, X86CPU, 0, 12),
1745fcf5ef2aSThomas Huth VMSTATE_END_OF_LIST()
1746fcf5ef2aSThomas Huth /* The above list is not sorted /wrt version numbers, watch out! */
1747fcf5ef2aSThomas Huth },
1748c4f54bd6SRichard Henderson .subsections = (const VMStateDescription * const []) {
1749fd13f23bSLiran Alon &vmstate_exception_info,
1750fcf5ef2aSThomas Huth &vmstate_async_pf_msr,
1751db5daafaSVitaly Kuznetsov &vmstate_async_pf_int_msr,
1752fcf5ef2aSThomas Huth &vmstate_pv_eoi_msr,
1753fcf5ef2aSThomas Huth &vmstate_steal_time_msr,
1754d645e132SMarcelo Tosatti &vmstate_poll_control_msr,
1755fcf5ef2aSThomas Huth &vmstate_fpop_ip_dp,
1756fcf5ef2aSThomas Huth &vmstate_msr_tsc_adjust,
1757fcf5ef2aSThomas Huth &vmstate_msr_tscdeadline,
1758fcf5ef2aSThomas Huth &vmstate_msr_ia32_misc_enable,
1759fcf5ef2aSThomas Huth &vmstate_msr_ia32_feature_control,
1760fcf5ef2aSThomas Huth &vmstate_msr_architectural_pmu,
1761fcf5ef2aSThomas Huth &vmstate_mpx,
1762816d20c9SVitaly Kuznetsov &vmstate_msr_hyperv_hypercall,
1763fcf5ef2aSThomas Huth &vmstate_msr_hyperv_vapic,
1764fcf5ef2aSThomas Huth &vmstate_msr_hyperv_time,
1765fcf5ef2aSThomas Huth &vmstate_msr_hyperv_crash,
1766fcf5ef2aSThomas Huth &vmstate_msr_hyperv_runtime,
1767fcf5ef2aSThomas Huth &vmstate_msr_hyperv_synic,
1768fcf5ef2aSThomas Huth &vmstate_msr_hyperv_stimer,
1769ba6a4fd9SVitaly Kuznetsov &vmstate_msr_hyperv_reenlightenment,
1770fcf5ef2aSThomas Huth &vmstate_avx512,
1771fcf5ef2aSThomas Huth &vmstate_xss,
177265087997STao Xu &vmstate_umwait,
1773fcf5ef2aSThomas Huth &vmstate_tsc_khz,
1774e13713dbSLiran Alon &vmstate_msr_smi_count,
1775fcf5ef2aSThomas Huth &vmstate_pkru,
1776e7e7bdabSPaolo Bonzini &vmstate_pkrs,
1777a33a2cfeSPaolo Bonzini &vmstate_spec_ctrl,
1778cabf9862SMaxim Levitsky &amd_tsc_scale_msr_ctrl,
1779fcf5ef2aSThomas Huth &vmstate_mcg_ext_ctl,
1780b77146e9SChao Peng &vmstate_msr_intel_pt,
1781cfeea0c0SKonrad Rzeszutek Wilk &vmstate_msr_virt_ssbd,
1782fe441054SJan Kiszka &vmstate_svm_npt,
1783e3126a5cSLara Lazier &vmstate_svm_guest,
178489a44a10SPavel Dovgalyuk #ifndef TARGET_X86_64
178589a44a10SPavel Dovgalyuk &vmstate_efer32,
178689a44a10SPavel Dovgalyuk #endif
1787ebbfef2fSLiran Alon #ifdef CONFIG_KVM
1788ebbfef2fSLiran Alon &vmstate_nested_state,
1789c345104cSJoao Martins &vmstate_xen_vcpu,
1790ebbfef2fSLiran Alon #endif
17912a9758c5SPaolo Bonzini &vmstate_msr_tsx_ctrl,
1792db888065SSean Christopherson &vmstate_msr_intel_sgx,
17938f515d38SMaxim Levitsky &vmstate_pdptrs,
1794cdec2b75SZeng Guang &vmstate_msr_xfd,
1795*b5151aceSGao Shiyuan &vmstate_msr_hwcr,
1796cdec2b75SZeng Guang #ifdef TARGET_X86_64
17974ebd98ebSXin Li &vmstate_msr_fred,
1798cdec2b75SZeng Guang &vmstate_amx_xtile,
1799cdec2b75SZeng Guang #endif
1800f2e7c2fcSYang Weijiang &vmstate_arch_lbr,
180112f89a39SChenyi Qiang &vmstate_triple_fault,
1802fcf5ef2aSThomas Huth NULL
1803fcf5ef2aSThomas Huth }
1804fcf5ef2aSThomas Huth };
1805