1 #include "qemu/osdep.h"
2 #include "cpu.h"
3 #include "qemu/error-report.h"
4 #include "system/kvm.h"
5 #include "system/tcg.h"
6 #include "kvm_arm.h"
7 #include "internals.h"
8 #include "cpu-features.h"
9 #include "migration/qemu-file-types.h"
10 #include "migration/vmstate.h"
11 #include "target/arm/gtimer.h"
12
vfp_needed(void * opaque)13 static bool vfp_needed(void *opaque)
14 {
15 ARMCPU *cpu = opaque;
16
17 return (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)
18 ? cpu_isar_feature(aa64_fp_simd, cpu)
19 : cpu_isar_feature(aa32_vfp_simd, cpu));
20 }
21
vfp_fpcr_fpsr_needed(void * opaque)22 static bool vfp_fpcr_fpsr_needed(void *opaque)
23 {
24 /*
25 * If either the FPCR or the FPSR include set bits that are not
26 * visible in the AArch32 FPSCR view of floating point control/status
27 * then we must send the FPCR and FPSR as two separate fields in the
28 * cpu/vfp/fpcr_fpsr subsection, and we will send a 0 for the old
29 * FPSCR field in cpu/vfp.
30 *
31 * If all the set bits are representable in an AArch32 FPSCR then we
32 * send that value as the cpu/vfp FPSCR field, and don't send the
33 * cpu/vfp/fpcr_fpsr subsection.
34 *
35 * On incoming migration, if the cpu/vfp FPSCR field is non-zero we
36 * use it, and if the fpcr_fpsr subsection is present we use that.
37 * (The subsection will never be present with a non-zero FPSCR field,
38 * and if FPSCR is zero and the subsection is not present that means
39 * that FPSCR/FPSR/FPCR are zero.)
40 *
41 * This preserves migration compatibility with older QEMU versions,
42 * in both directions.
43 */
44 ARMCPU *cpu = opaque;
45 CPUARMState *env = &cpu->env;
46
47 return (vfp_get_fpcr(env) & ~FPSCR_FPCR_MASK) ||
48 (vfp_get_fpsr(env) & ~FPSCR_FPSR_MASK);
49 }
50
get_fpscr(QEMUFile * f,void * opaque,size_t size,const VMStateField * field)51 static int get_fpscr(QEMUFile *f, void *opaque, size_t size,
52 const VMStateField *field)
53 {
54 ARMCPU *cpu = opaque;
55 CPUARMState *env = &cpu->env;
56 uint32_t val = qemu_get_be32(f);
57
58 if (val) {
59 /* 0 means we might have the data in the fpcr_fpsr subsection */
60 vfp_set_fpscr(env, val);
61 }
62 return 0;
63 }
64
put_fpscr(QEMUFile * f,void * opaque,size_t size,const VMStateField * field,JSONWriter * vmdesc)65 static int put_fpscr(QEMUFile *f, void *opaque, size_t size,
66 const VMStateField *field, JSONWriter *vmdesc)
67 {
68 ARMCPU *cpu = opaque;
69 CPUARMState *env = &cpu->env;
70 uint32_t fpscr = vfp_fpcr_fpsr_needed(opaque) ? 0 : vfp_get_fpscr(env);
71
72 qemu_put_be32(f, fpscr);
73 return 0;
74 }
75
76 static const VMStateInfo vmstate_fpscr = {
77 .name = "fpscr",
78 .get = get_fpscr,
79 .put = put_fpscr,
80 };
81
get_fpcr(QEMUFile * f,void * opaque,size_t size,const VMStateField * field)82 static int get_fpcr(QEMUFile *f, void *opaque, size_t size,
83 const VMStateField *field)
84 {
85 ARMCPU *cpu = opaque;
86 CPUARMState *env = &cpu->env;
87 uint64_t val = qemu_get_be64(f);
88
89 vfp_set_fpcr(env, val);
90 return 0;
91 }
92
put_fpcr(QEMUFile * f,void * opaque,size_t size,const VMStateField * field,JSONWriter * vmdesc)93 static int put_fpcr(QEMUFile *f, void *opaque, size_t size,
94 const VMStateField *field, JSONWriter *vmdesc)
95 {
96 ARMCPU *cpu = opaque;
97 CPUARMState *env = &cpu->env;
98
99 qemu_put_be64(f, vfp_get_fpcr(env));
100 return 0;
101 }
102
103 static const VMStateInfo vmstate_fpcr = {
104 .name = "fpcr",
105 .get = get_fpcr,
106 .put = put_fpcr,
107 };
108
get_fpsr(QEMUFile * f,void * opaque,size_t size,const VMStateField * field)109 static int get_fpsr(QEMUFile *f, void *opaque, size_t size,
110 const VMStateField *field)
111 {
112 ARMCPU *cpu = opaque;
113 CPUARMState *env = &cpu->env;
114 uint64_t val = qemu_get_be64(f);
115
116 vfp_set_fpsr(env, val);
117 return 0;
118 }
119
put_fpsr(QEMUFile * f,void * opaque,size_t size,const VMStateField * field,JSONWriter * vmdesc)120 static int put_fpsr(QEMUFile *f, void *opaque, size_t size,
121 const VMStateField *field, JSONWriter *vmdesc)
122 {
123 ARMCPU *cpu = opaque;
124 CPUARMState *env = &cpu->env;
125
126 qemu_put_be64(f, vfp_get_fpsr(env));
127 return 0;
128 }
129
130 static const VMStateInfo vmstate_fpsr = {
131 .name = "fpsr",
132 .get = get_fpsr,
133 .put = put_fpsr,
134 };
135
136 static const VMStateDescription vmstate_vfp_fpcr_fpsr = {
137 .name = "cpu/vfp/fpcr_fpsr",
138 .version_id = 1,
139 .minimum_version_id = 1,
140 .needed = vfp_fpcr_fpsr_needed,
141 .fields = (const VMStateField[]) {
142 {
143 .name = "fpcr",
144 .version_id = 0,
145 .size = sizeof(uint64_t),
146 .info = &vmstate_fpcr,
147 .flags = VMS_SINGLE,
148 .offset = 0,
149 },
150 {
151 .name = "fpsr",
152 .version_id = 0,
153 .size = sizeof(uint64_t),
154 .info = &vmstate_fpsr,
155 .flags = VMS_SINGLE,
156 .offset = 0,
157 },
158 VMSTATE_END_OF_LIST()
159 },
160 };
161
162 static const VMStateDescription vmstate_vfp = {
163 .name = "cpu/vfp",
164 .version_id = 3,
165 .minimum_version_id = 3,
166 .needed = vfp_needed,
167 .fields = (const VMStateField[]) {
168 /* For compatibility, store Qn out of Zn here. */
169 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[0].d, ARMCPU, 0, 2),
170 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[1].d, ARMCPU, 0, 2),
171 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[2].d, ARMCPU, 0, 2),
172 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[3].d, ARMCPU, 0, 2),
173 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[4].d, ARMCPU, 0, 2),
174 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[5].d, ARMCPU, 0, 2),
175 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[6].d, ARMCPU, 0, 2),
176 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[7].d, ARMCPU, 0, 2),
177 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[8].d, ARMCPU, 0, 2),
178 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[9].d, ARMCPU, 0, 2),
179 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[10].d, ARMCPU, 0, 2),
180 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[11].d, ARMCPU, 0, 2),
181 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[12].d, ARMCPU, 0, 2),
182 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[13].d, ARMCPU, 0, 2),
183 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[14].d, ARMCPU, 0, 2),
184 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[15].d, ARMCPU, 0, 2),
185 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[16].d, ARMCPU, 0, 2),
186 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[17].d, ARMCPU, 0, 2),
187 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[18].d, ARMCPU, 0, 2),
188 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[19].d, ARMCPU, 0, 2),
189 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[20].d, ARMCPU, 0, 2),
190 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[21].d, ARMCPU, 0, 2),
191 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[22].d, ARMCPU, 0, 2),
192 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[23].d, ARMCPU, 0, 2),
193 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[24].d, ARMCPU, 0, 2),
194 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[25].d, ARMCPU, 0, 2),
195 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[26].d, ARMCPU, 0, 2),
196 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[27].d, ARMCPU, 0, 2),
197 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[28].d, ARMCPU, 0, 2),
198 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[29].d, ARMCPU, 0, 2),
199 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[30].d, ARMCPU, 0, 2),
200 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[31].d, ARMCPU, 0, 2),
201
202 /* The xregs array is a little awkward because element 1 (FPSCR)
203 * requires a specific accessor, so we have to split it up in
204 * the vmstate:
205 */
206 VMSTATE_UINT32(env.vfp.xregs[0], ARMCPU),
207 VMSTATE_UINT32_SUB_ARRAY(env.vfp.xregs, ARMCPU, 2, 14),
208 {
209 .name = "fpscr",
210 .version_id = 0,
211 .size = sizeof(uint32_t),
212 .info = &vmstate_fpscr,
213 .flags = VMS_SINGLE,
214 .offset = 0,
215 },
216 VMSTATE_END_OF_LIST()
217 },
218 .subsections = (const VMStateDescription * const []) {
219 &vmstate_vfp_fpcr_fpsr,
220 NULL
221 }
222 };
223
iwmmxt_needed(void * opaque)224 static bool iwmmxt_needed(void *opaque)
225 {
226 ARMCPU *cpu = opaque;
227 CPUARMState *env = &cpu->env;
228
229 return arm_feature(env, ARM_FEATURE_IWMMXT);
230 }
231
232 static const VMStateDescription vmstate_iwmmxt = {
233 .name = "cpu/iwmmxt",
234 .version_id = 1,
235 .minimum_version_id = 1,
236 .needed = iwmmxt_needed,
237 .fields = (const VMStateField[]) {
238 VMSTATE_UINT64_ARRAY(env.iwmmxt.regs, ARMCPU, 16),
239 VMSTATE_UINT32_ARRAY(env.iwmmxt.cregs, ARMCPU, 16),
240 VMSTATE_END_OF_LIST()
241 }
242 };
243
244 /* The expression ARM_MAX_VQ - 2 is 0 for pure AArch32 build,
245 * and ARMPredicateReg is actively empty. This triggers errors
246 * in the expansion of the VMSTATE macros.
247 */
248
sve_needed(void * opaque)249 static bool sve_needed(void *opaque)
250 {
251 ARMCPU *cpu = opaque;
252
253 return cpu_isar_feature(aa64_sve, cpu);
254 }
255
256 /* The first two words of each Zreg is stored in VFP state. */
257 static const VMStateDescription vmstate_zreg_hi_reg = {
258 .name = "cpu/sve/zreg_hi",
259 .version_id = 1,
260 .minimum_version_id = 1,
261 .fields = (const VMStateField[]) {
262 VMSTATE_UINT64_SUB_ARRAY(d, ARMVectorReg, 2, ARM_MAX_VQ - 2),
263 VMSTATE_END_OF_LIST()
264 }
265 };
266
267 static const VMStateDescription vmstate_preg_reg = {
268 .name = "cpu/sve/preg",
269 .version_id = 1,
270 .minimum_version_id = 1,
271 .fields = (const VMStateField[]) {
272 VMSTATE_UINT64_ARRAY(p, ARMPredicateReg, 2 * ARM_MAX_VQ / 8),
273 VMSTATE_END_OF_LIST()
274 }
275 };
276
277 static const VMStateDescription vmstate_sve = {
278 .name = "cpu/sve",
279 .version_id = 1,
280 .minimum_version_id = 1,
281 .needed = sve_needed,
282 .fields = (const VMStateField[]) {
283 VMSTATE_STRUCT_ARRAY(env.vfp.zregs, ARMCPU, 32, 0,
284 vmstate_zreg_hi_reg, ARMVectorReg),
285 VMSTATE_STRUCT_ARRAY(env.vfp.pregs, ARMCPU, 17, 0,
286 vmstate_preg_reg, ARMPredicateReg),
287 VMSTATE_END_OF_LIST()
288 }
289 };
290
291 static const VMStateDescription vmstate_vreg = {
292 .name = "vreg",
293 .version_id = 1,
294 .minimum_version_id = 1,
295 .fields = (const VMStateField[]) {
296 VMSTATE_UINT64_ARRAY(d, ARMVectorReg, ARM_MAX_VQ * 2),
297 VMSTATE_END_OF_LIST()
298 }
299 };
300
za_needed(void * opaque)301 static bool za_needed(void *opaque)
302 {
303 ARMCPU *cpu = opaque;
304
305 /*
306 * When ZA storage is disabled, its contents are discarded.
307 * It will be zeroed when ZA storage is re-enabled.
308 */
309 return FIELD_EX64(cpu->env.svcr, SVCR, ZA);
310 }
311
312 static const VMStateDescription vmstate_za = {
313 .name = "cpu/sme",
314 .version_id = 1,
315 .minimum_version_id = 1,
316 .needed = za_needed,
317 .fields = (const VMStateField[]) {
318 VMSTATE_STRUCT_ARRAY(env.za_state.za, ARMCPU, ARM_MAX_VQ * 16, 0,
319 vmstate_vreg, ARMVectorReg),
320 VMSTATE_END_OF_LIST()
321 }
322 };
323
zt0_needed(void * opaque)324 static bool zt0_needed(void *opaque)
325 {
326 ARMCPU *cpu = opaque;
327
328 return za_needed(cpu) && cpu_isar_feature(aa64_sme2, cpu);
329 }
330
331 static const VMStateDescription vmstate_zt0 = {
332 .name = "cpu/zt0",
333 .version_id = 1,
334 .minimum_version_id = 1,
335 .needed = zt0_needed,
336 .fields = (VMStateField[]) {
337 VMSTATE_UINT64_ARRAY(env.za_state.zt0, ARMCPU,
338 ARRAY_SIZE(((CPUARMState *)0)->za_state.zt0)),
339 VMSTATE_END_OF_LIST()
340 }
341 };
342
serror_needed(void * opaque)343 static bool serror_needed(void *opaque)
344 {
345 ARMCPU *cpu = opaque;
346 CPUARMState *env = &cpu->env;
347
348 return env->serror.pending != 0;
349 }
350
351 static const VMStateDescription vmstate_serror = {
352 .name = "cpu/serror",
353 .version_id = 1,
354 .minimum_version_id = 1,
355 .needed = serror_needed,
356 .fields = (const VMStateField[]) {
357 VMSTATE_UINT8(env.serror.pending, ARMCPU),
358 VMSTATE_UINT8(env.serror.has_esr, ARMCPU),
359 VMSTATE_UINT64(env.serror.esr, ARMCPU),
360 VMSTATE_END_OF_LIST()
361 }
362 };
363
irq_line_state_needed(void * opaque)364 static bool irq_line_state_needed(void *opaque)
365 {
366 return true;
367 }
368
369 static const VMStateDescription vmstate_irq_line_state = {
370 .name = "cpu/irq-line-state",
371 .version_id = 1,
372 .minimum_version_id = 1,
373 .needed = irq_line_state_needed,
374 .fields = (const VMStateField[]) {
375 VMSTATE_UINT32(env.irq_line_state, ARMCPU),
376 VMSTATE_END_OF_LIST()
377 }
378 };
379
wfxt_timer_needed(void * opaque)380 static bool wfxt_timer_needed(void *opaque)
381 {
382 ARMCPU *cpu = opaque;
383
384 /* We'll only have the timer object if FEAT_WFxT is implemented */
385 return cpu->wfxt_timer;
386 }
387
388 static const VMStateDescription vmstate_wfxt_timer = {
389 .name = "cpu/wfxt-timer",
390 .version_id = 1,
391 .minimum_version_id = 1,
392 .needed = wfxt_timer_needed,
393 .fields = (const VMStateField[]) {
394 VMSTATE_TIMER_PTR(wfxt_timer, ARMCPU),
395 VMSTATE_END_OF_LIST()
396 }
397 };
398
m_needed(void * opaque)399 static bool m_needed(void *opaque)
400 {
401 ARMCPU *cpu = opaque;
402 CPUARMState *env = &cpu->env;
403
404 return arm_feature(env, ARM_FEATURE_M);
405 }
406
407 static const VMStateDescription vmstate_m_faultmask_primask = {
408 .name = "cpu/m/faultmask-primask",
409 .version_id = 1,
410 .minimum_version_id = 1,
411 .needed = m_needed,
412 .fields = (const VMStateField[]) {
413 VMSTATE_UINT32(env.v7m.faultmask[M_REG_NS], ARMCPU),
414 VMSTATE_UINT32(env.v7m.primask[M_REG_NS], ARMCPU),
415 VMSTATE_END_OF_LIST()
416 }
417 };
418
419 /* CSSELR is in a subsection because we didn't implement it previously.
420 * Migration from an old implementation will leave it at zero, which
421 * is OK since the only CPUs in the old implementation make the
422 * register RAZ/WI.
423 * Since there was no version of QEMU which implemented the CSSELR for
424 * just non-secure, we transfer both banks here rather than putting
425 * the secure banked version in the m-security subsection.
426 */
csselr_vmstate_validate(void * opaque,int version_id)427 static bool csselr_vmstate_validate(void *opaque, int version_id)
428 {
429 ARMCPU *cpu = opaque;
430
431 return cpu->env.v7m.csselr[M_REG_NS] <= R_V7M_CSSELR_INDEX_MASK
432 && cpu->env.v7m.csselr[M_REG_S] <= R_V7M_CSSELR_INDEX_MASK;
433 }
434
m_csselr_needed(void * opaque)435 static bool m_csselr_needed(void *opaque)
436 {
437 ARMCPU *cpu = opaque;
438
439 return !arm_v7m_csselr_razwi(cpu);
440 }
441
442 static const VMStateDescription vmstate_m_csselr = {
443 .name = "cpu/m/csselr",
444 .version_id = 1,
445 .minimum_version_id = 1,
446 .needed = m_csselr_needed,
447 .fields = (const VMStateField[]) {
448 VMSTATE_UINT32_ARRAY(env.v7m.csselr, ARMCPU, M_REG_NUM_BANKS),
449 VMSTATE_VALIDATE("CSSELR is valid", csselr_vmstate_validate),
450 VMSTATE_END_OF_LIST()
451 }
452 };
453
454 static const VMStateDescription vmstate_m_scr = {
455 .name = "cpu/m/scr",
456 .version_id = 1,
457 .minimum_version_id = 1,
458 .needed = m_needed,
459 .fields = (const VMStateField[]) {
460 VMSTATE_UINT32(env.v7m.scr[M_REG_NS], ARMCPU),
461 VMSTATE_END_OF_LIST()
462 }
463 };
464
465 static const VMStateDescription vmstate_m_other_sp = {
466 .name = "cpu/m/other-sp",
467 .version_id = 1,
468 .minimum_version_id = 1,
469 .needed = m_needed,
470 .fields = (const VMStateField[]) {
471 VMSTATE_UINT32(env.v7m.other_sp, ARMCPU),
472 VMSTATE_END_OF_LIST()
473 }
474 };
475
m_v8m_needed(void * opaque)476 static bool m_v8m_needed(void *opaque)
477 {
478 ARMCPU *cpu = opaque;
479 CPUARMState *env = &cpu->env;
480
481 return arm_feature(env, ARM_FEATURE_M) && arm_feature(env, ARM_FEATURE_V8);
482 }
483
484 static const VMStateDescription vmstate_m_v8m = {
485 .name = "cpu/m/v8m",
486 .version_id = 1,
487 .minimum_version_id = 1,
488 .needed = m_v8m_needed,
489 .fields = (const VMStateField[]) {
490 VMSTATE_UINT32_ARRAY(env.v7m.msplim, ARMCPU, M_REG_NUM_BANKS),
491 VMSTATE_UINT32_ARRAY(env.v7m.psplim, ARMCPU, M_REG_NUM_BANKS),
492 VMSTATE_END_OF_LIST()
493 }
494 };
495
496 static const VMStateDescription vmstate_m_fp = {
497 .name = "cpu/m/fp",
498 .version_id = 1,
499 .minimum_version_id = 1,
500 .needed = vfp_needed,
501 .fields = (const VMStateField[]) {
502 VMSTATE_UINT32_ARRAY(env.v7m.fpcar, ARMCPU, M_REG_NUM_BANKS),
503 VMSTATE_UINT32_ARRAY(env.v7m.fpccr, ARMCPU, M_REG_NUM_BANKS),
504 VMSTATE_UINT32_ARRAY(env.v7m.fpdscr, ARMCPU, M_REG_NUM_BANKS),
505 VMSTATE_UINT32_ARRAY(env.v7m.cpacr, ARMCPU, M_REG_NUM_BANKS),
506 VMSTATE_UINT32(env.v7m.nsacr, ARMCPU),
507 VMSTATE_END_OF_LIST()
508 }
509 };
510
mve_needed(void * opaque)511 static bool mve_needed(void *opaque)
512 {
513 ARMCPU *cpu = opaque;
514
515 return cpu_isar_feature(aa32_mve, cpu);
516 }
517
518 static const VMStateDescription vmstate_m_mve = {
519 .name = "cpu/m/mve",
520 .version_id = 1,
521 .minimum_version_id = 1,
522 .needed = mve_needed,
523 .fields = (const VMStateField[]) {
524 VMSTATE_UINT32(env.v7m.vpr, ARMCPU),
525 VMSTATE_UINT32(env.v7m.ltpsize, ARMCPU),
526 VMSTATE_END_OF_LIST()
527 },
528 };
529
530 static const VMStateDescription vmstate_m = {
531 .name = "cpu/m",
532 .version_id = 4,
533 .minimum_version_id = 4,
534 .needed = m_needed,
535 .fields = (const VMStateField[]) {
536 VMSTATE_UINT32(env.v7m.vecbase[M_REG_NS], ARMCPU),
537 VMSTATE_UINT32(env.v7m.basepri[M_REG_NS], ARMCPU),
538 VMSTATE_UINT32(env.v7m.control[M_REG_NS], ARMCPU),
539 VMSTATE_UINT32(env.v7m.ccr[M_REG_NS], ARMCPU),
540 VMSTATE_UINT32(env.v7m.cfsr[M_REG_NS], ARMCPU),
541 VMSTATE_UINT32(env.v7m.hfsr, ARMCPU),
542 VMSTATE_UINT32(env.v7m.dfsr, ARMCPU),
543 VMSTATE_UINT32(env.v7m.mmfar[M_REG_NS], ARMCPU),
544 VMSTATE_UINT32(env.v7m.bfar, ARMCPU),
545 VMSTATE_UINT32(env.v7m.mpu_ctrl[M_REG_NS], ARMCPU),
546 VMSTATE_INT32(env.v7m.exception, ARMCPU),
547 VMSTATE_END_OF_LIST()
548 },
549 .subsections = (const VMStateDescription * const []) {
550 &vmstate_m_faultmask_primask,
551 &vmstate_m_csselr,
552 &vmstate_m_scr,
553 &vmstate_m_other_sp,
554 &vmstate_m_v8m,
555 &vmstate_m_fp,
556 &vmstate_m_mve,
557 NULL
558 }
559 };
560
thumb2ee_needed(void * opaque)561 static bool thumb2ee_needed(void *opaque)
562 {
563 ARMCPU *cpu = opaque;
564 CPUARMState *env = &cpu->env;
565
566 return arm_feature(env, ARM_FEATURE_THUMB2EE);
567 }
568
569 static const VMStateDescription vmstate_thumb2ee = {
570 .name = "cpu/thumb2ee",
571 .version_id = 1,
572 .minimum_version_id = 1,
573 .needed = thumb2ee_needed,
574 .fields = (const VMStateField[]) {
575 VMSTATE_UINT32(env.teecr, ARMCPU),
576 VMSTATE_UINT32(env.teehbr, ARMCPU),
577 VMSTATE_END_OF_LIST()
578 }
579 };
580
pmsav7_needed(void * opaque)581 static bool pmsav7_needed(void *opaque)
582 {
583 ARMCPU *cpu = opaque;
584 CPUARMState *env = &cpu->env;
585
586 return arm_feature(env, ARM_FEATURE_PMSA) &&
587 arm_feature(env, ARM_FEATURE_V7) &&
588 !arm_feature(env, ARM_FEATURE_V8);
589 }
590
pmsav7_rgnr_vmstate_validate(void * opaque,int version_id)591 static bool pmsav7_rgnr_vmstate_validate(void *opaque, int version_id)
592 {
593 ARMCPU *cpu = opaque;
594
595 return cpu->env.pmsav7.rnr[M_REG_NS] < cpu->pmsav7_dregion;
596 }
597
598 static const VMStateDescription vmstate_pmsav7 = {
599 .name = "cpu/pmsav7",
600 .version_id = 1,
601 .minimum_version_id = 1,
602 .needed = pmsav7_needed,
603 .fields = (const VMStateField[]) {
604 VMSTATE_VARRAY_UINT32(env.pmsav7.drbar, ARMCPU, pmsav7_dregion, 0,
605 vmstate_info_uint32, uint32_t),
606 VMSTATE_VARRAY_UINT32(env.pmsav7.drsr, ARMCPU, pmsav7_dregion, 0,
607 vmstate_info_uint32, uint32_t),
608 VMSTATE_VARRAY_UINT32(env.pmsav7.dracr, ARMCPU, pmsav7_dregion, 0,
609 vmstate_info_uint32, uint32_t),
610 VMSTATE_VALIDATE("rgnr is valid", pmsav7_rgnr_vmstate_validate),
611 VMSTATE_END_OF_LIST()
612 }
613 };
614
pmsav7_rnr_needed(void * opaque)615 static bool pmsav7_rnr_needed(void *opaque)
616 {
617 ARMCPU *cpu = opaque;
618 CPUARMState *env = &cpu->env;
619
620 /* For R profile cores pmsav7.rnr is migrated via the cpreg
621 * "RGNR" definition in helper.h. For M profile we have to
622 * migrate it separately.
623 */
624 return arm_feature(env, ARM_FEATURE_M);
625 }
626
627 static const VMStateDescription vmstate_pmsav7_rnr = {
628 .name = "cpu/pmsav7-rnr",
629 .version_id = 1,
630 .minimum_version_id = 1,
631 .needed = pmsav7_rnr_needed,
632 .fields = (const VMStateField[]) {
633 VMSTATE_UINT32(env.pmsav7.rnr[M_REG_NS], ARMCPU),
634 VMSTATE_END_OF_LIST()
635 }
636 };
637
pmsav8_needed(void * opaque)638 static bool pmsav8_needed(void *opaque)
639 {
640 ARMCPU *cpu = opaque;
641 CPUARMState *env = &cpu->env;
642
643 return arm_feature(env, ARM_FEATURE_PMSA) &&
644 arm_feature(env, ARM_FEATURE_V8);
645 }
646
pmsav8r_needed(void * opaque)647 static bool pmsav8r_needed(void *opaque)
648 {
649 ARMCPU *cpu = opaque;
650 CPUARMState *env = &cpu->env;
651
652 return arm_feature(env, ARM_FEATURE_PMSA) &&
653 arm_feature(env, ARM_FEATURE_V8) &&
654 !arm_feature(env, ARM_FEATURE_M);
655 }
656
657 static const VMStateDescription vmstate_pmsav8r = {
658 .name = "cpu/pmsav8/pmsav8r",
659 .version_id = 1,
660 .minimum_version_id = 1,
661 .needed = pmsav8r_needed,
662 .fields = (const VMStateField[]) {
663 VMSTATE_VARRAY_UINT32(env.pmsav8.hprbar, ARMCPU,
664 pmsav8r_hdregion, 0, vmstate_info_uint32, uint32_t),
665 VMSTATE_VARRAY_UINT32(env.pmsav8.hprlar, ARMCPU,
666 pmsav8r_hdregion, 0, vmstate_info_uint32, uint32_t),
667 VMSTATE_END_OF_LIST()
668 },
669 };
670
671 static const VMStateDescription vmstate_pmsav8 = {
672 .name = "cpu/pmsav8",
673 .version_id = 1,
674 .minimum_version_id = 1,
675 .needed = pmsav8_needed,
676 .fields = (const VMStateField[]) {
677 VMSTATE_VARRAY_UINT32(env.pmsav8.rbar[M_REG_NS], ARMCPU, pmsav7_dregion,
678 0, vmstate_info_uint32, uint32_t),
679 VMSTATE_VARRAY_UINT32(env.pmsav8.rlar[M_REG_NS], ARMCPU, pmsav7_dregion,
680 0, vmstate_info_uint32, uint32_t),
681 VMSTATE_UINT32(env.pmsav8.mair0[M_REG_NS], ARMCPU),
682 VMSTATE_UINT32(env.pmsav8.mair1[M_REG_NS], ARMCPU),
683 VMSTATE_END_OF_LIST()
684 },
685 .subsections = (const VMStateDescription * const []) {
686 &vmstate_pmsav8r,
687 NULL
688 }
689 };
690
s_rnr_vmstate_validate(void * opaque,int version_id)691 static bool s_rnr_vmstate_validate(void *opaque, int version_id)
692 {
693 ARMCPU *cpu = opaque;
694
695 return cpu->env.pmsav7.rnr[M_REG_S] < cpu->pmsav7_dregion;
696 }
697
sau_rnr_vmstate_validate(void * opaque,int version_id)698 static bool sau_rnr_vmstate_validate(void *opaque, int version_id)
699 {
700 ARMCPU *cpu = opaque;
701
702 return cpu->env.sau.rnr < cpu->sau_sregion;
703 }
704
m_security_needed(void * opaque)705 static bool m_security_needed(void *opaque)
706 {
707 ARMCPU *cpu = opaque;
708 CPUARMState *env = &cpu->env;
709
710 return arm_feature(env, ARM_FEATURE_M_SECURITY);
711 }
712
713 static const VMStateDescription vmstate_m_security = {
714 .name = "cpu/m-security",
715 .version_id = 1,
716 .minimum_version_id = 1,
717 .needed = m_security_needed,
718 .fields = (const VMStateField[]) {
719 VMSTATE_UINT32(env.v7m.secure, ARMCPU),
720 VMSTATE_UINT32(env.v7m.other_ss_msp, ARMCPU),
721 VMSTATE_UINT32(env.v7m.other_ss_psp, ARMCPU),
722 VMSTATE_UINT32(env.v7m.basepri[M_REG_S], ARMCPU),
723 VMSTATE_UINT32(env.v7m.primask[M_REG_S], ARMCPU),
724 VMSTATE_UINT32(env.v7m.faultmask[M_REG_S], ARMCPU),
725 VMSTATE_UINT32(env.v7m.control[M_REG_S], ARMCPU),
726 VMSTATE_UINT32(env.v7m.vecbase[M_REG_S], ARMCPU),
727 VMSTATE_UINT32(env.pmsav8.mair0[M_REG_S], ARMCPU),
728 VMSTATE_UINT32(env.pmsav8.mair1[M_REG_S], ARMCPU),
729 VMSTATE_VARRAY_UINT32(env.pmsav8.rbar[M_REG_S], ARMCPU, pmsav7_dregion,
730 0, vmstate_info_uint32, uint32_t),
731 VMSTATE_VARRAY_UINT32(env.pmsav8.rlar[M_REG_S], ARMCPU, pmsav7_dregion,
732 0, vmstate_info_uint32, uint32_t),
733 VMSTATE_UINT32(env.pmsav7.rnr[M_REG_S], ARMCPU),
734 VMSTATE_VALIDATE("secure MPU_RNR is valid", s_rnr_vmstate_validate),
735 VMSTATE_UINT32(env.v7m.mpu_ctrl[M_REG_S], ARMCPU),
736 VMSTATE_UINT32(env.v7m.ccr[M_REG_S], ARMCPU),
737 VMSTATE_UINT32(env.v7m.mmfar[M_REG_S], ARMCPU),
738 VMSTATE_UINT32(env.v7m.cfsr[M_REG_S], ARMCPU),
739 VMSTATE_UINT32(env.v7m.sfsr, ARMCPU),
740 VMSTATE_UINT32(env.v7m.sfar, ARMCPU),
741 VMSTATE_VARRAY_UINT32(env.sau.rbar, ARMCPU, sau_sregion, 0,
742 vmstate_info_uint32, uint32_t),
743 VMSTATE_VARRAY_UINT32(env.sau.rlar, ARMCPU, sau_sregion, 0,
744 vmstate_info_uint32, uint32_t),
745 VMSTATE_UINT32(env.sau.rnr, ARMCPU),
746 VMSTATE_VALIDATE("SAU_RNR is valid", sau_rnr_vmstate_validate),
747 VMSTATE_UINT32(env.sau.ctrl, ARMCPU),
748 VMSTATE_UINT32(env.v7m.scr[M_REG_S], ARMCPU),
749 /* AIRCR is not secure-only, but our implementation is R/O if the
750 * security extension is unimplemented, so we migrate it here.
751 */
752 VMSTATE_UINT32(env.v7m.aircr, ARMCPU),
753 VMSTATE_END_OF_LIST()
754 }
755 };
756
get_cpsr(QEMUFile * f,void * opaque,size_t size,const VMStateField * field)757 static int get_cpsr(QEMUFile *f, void *opaque, size_t size,
758 const VMStateField *field)
759 {
760 ARMCPU *cpu = opaque;
761 CPUARMState *env = &cpu->env;
762 uint32_t val = qemu_get_be32(f);
763
764 if (arm_feature(env, ARM_FEATURE_M)) {
765 if (val & XPSR_EXCP) {
766 /* This is a CPSR format value from an older QEMU. (We can tell
767 * because values transferred in XPSR format always have zero
768 * for the EXCP field, and CPSR format will always have bit 4
769 * set in CPSR_M.) Rearrange it into XPSR format. The significant
770 * differences are that the T bit is not in the same place, the
771 * primask/faultmask info may be in the CPSR I and F bits, and
772 * we do not want the mode bits.
773 * We know that this cleanup happened before v8M, so there
774 * is no complication with banked primask/faultmask.
775 */
776 uint32_t newval = val;
777
778 assert(!arm_feature(env, ARM_FEATURE_M_SECURITY));
779
780 newval &= (CPSR_NZCV | CPSR_Q | CPSR_IT | CPSR_GE);
781 if (val & CPSR_T) {
782 newval |= XPSR_T;
783 }
784 /* If the I or F bits are set then this is a migration from
785 * an old QEMU which still stored the M profile FAULTMASK
786 * and PRIMASK in env->daif. For a new QEMU, the data is
787 * transferred using the vmstate_m_faultmask_primask subsection.
788 */
789 if (val & CPSR_F) {
790 env->v7m.faultmask[M_REG_NS] = 1;
791 }
792 if (val & CPSR_I) {
793 env->v7m.primask[M_REG_NS] = 1;
794 }
795 val = newval;
796 }
797 /* Ignore the low bits, they are handled by vmstate_m. */
798 xpsr_write(env, val, ~XPSR_EXCP);
799 return 0;
800 }
801
802 env->aarch64 = ((val & PSTATE_nRW) == 0);
803
804 if (is_a64(env)) {
805 pstate_write(env, val);
806 return 0;
807 }
808
809 cpsr_write(env, val, 0xffffffff, CPSRWriteRaw);
810 return 0;
811 }
812
put_cpsr(QEMUFile * f,void * opaque,size_t size,const VMStateField * field,JSONWriter * vmdesc)813 static int put_cpsr(QEMUFile *f, void *opaque, size_t size,
814 const VMStateField *field, JSONWriter *vmdesc)
815 {
816 ARMCPU *cpu = opaque;
817 CPUARMState *env = &cpu->env;
818 uint32_t val;
819
820 if (arm_feature(env, ARM_FEATURE_M)) {
821 /* The low 9 bits are v7m.exception, which is handled by vmstate_m. */
822 val = xpsr_read(env) & ~XPSR_EXCP;
823 } else if (is_a64(env)) {
824 val = pstate_read(env);
825 } else {
826 val = cpsr_read(env);
827 }
828
829 qemu_put_be32(f, val);
830 return 0;
831 }
832
833 static const VMStateInfo vmstate_cpsr = {
834 .name = "cpsr",
835 .get = get_cpsr,
836 .put = put_cpsr,
837 };
838
get_power(QEMUFile * f,void * opaque,size_t size,const VMStateField * field)839 static int get_power(QEMUFile *f, void *opaque, size_t size,
840 const VMStateField *field)
841 {
842 ARMCPU *cpu = opaque;
843 bool powered_off = qemu_get_byte(f);
844 cpu->power_state = powered_off ? PSCI_OFF : PSCI_ON;
845 return 0;
846 }
847
put_power(QEMUFile * f,void * opaque,size_t size,const VMStateField * field,JSONWriter * vmdesc)848 static int put_power(QEMUFile *f, void *opaque, size_t size,
849 const VMStateField *field, JSONWriter *vmdesc)
850 {
851 ARMCPU *cpu = opaque;
852
853 /* Migration should never happen while we transition power states */
854
855 if (cpu->power_state == PSCI_ON ||
856 cpu->power_state == PSCI_OFF) {
857 bool powered_off = (cpu->power_state == PSCI_OFF) ? true : false;
858 qemu_put_byte(f, powered_off);
859 return 0;
860 } else {
861 return 1;
862 }
863 }
864
865 static const VMStateInfo vmstate_powered_off = {
866 .name = "powered_off",
867 .get = get_power,
868 .put = put_power,
869 };
870
cpu_pre_save(void * opaque)871 static int cpu_pre_save(void *opaque)
872 {
873 ARMCPU *cpu = opaque;
874
875 if (!kvm_enabled()) {
876 pmu_op_start(&cpu->env);
877 }
878
879 if (kvm_enabled()) {
880 if (!write_kvmstate_to_list(cpu)) {
881 /* This should never fail */
882 g_assert_not_reached();
883 }
884
885 /*
886 * kvm_arm_cpu_pre_save() must be called after
887 * write_kvmstate_to_list()
888 */
889 kvm_arm_cpu_pre_save(cpu);
890 } else {
891 if (!write_cpustate_to_list(cpu, false)) {
892 /* This should never fail. */
893 g_assert_not_reached();
894 }
895 }
896
897 cpu->cpreg_vmstate_array_len = cpu->cpreg_array_len;
898 memcpy(cpu->cpreg_vmstate_indexes, cpu->cpreg_indexes,
899 cpu->cpreg_array_len * sizeof(uint64_t));
900 memcpy(cpu->cpreg_vmstate_values, cpu->cpreg_values,
901 cpu->cpreg_array_len * sizeof(uint64_t));
902
903 return 0;
904 }
905
cpu_post_save(void * opaque)906 static int cpu_post_save(void *opaque)
907 {
908 ARMCPU *cpu = opaque;
909
910 if (!kvm_enabled()) {
911 pmu_op_finish(&cpu->env);
912 }
913
914 return 0;
915 }
916
cpu_pre_load(void * opaque)917 static int cpu_pre_load(void *opaque)
918 {
919 ARMCPU *cpu = opaque;
920 CPUARMState *env = &cpu->env;
921
922 /*
923 * In an inbound migration where on the source FPSCR/FPSR/FPCR are 0,
924 * there will be no fpcr_fpsr subsection so we won't call vfp_set_fpcr()
925 * and vfp_set_fpsr() from get_fpcr() and get_fpsr(); also the get_fpscr()
926 * function will not call vfp_set_fpscr() because it will see a 0 in the
927 * inbound data. Ensure that in this case we have a correctly set up
928 * zero FPSCR/FPCR/FPSR.
929 *
930 * This is not strictly needed because FPSCR is zero out of reset, but
931 * it avoids the possibility of future confusing migration bugs if some
932 * future architecture change makes the reset value non-zero.
933 */
934 vfp_set_fpscr(env, 0);
935
936 /*
937 * Pre-initialize irq_line_state to a value that's never valid as
938 * real data, so cpu_post_load() can tell whether we've seen the
939 * irq-line-state subsection in the incoming migration state.
940 */
941 env->irq_line_state = UINT32_MAX;
942
943 if (!kvm_enabled()) {
944 pmu_op_start(env);
945 }
946
947 return 0;
948 }
949
cpu_post_load(void * opaque,int version_id)950 static int cpu_post_load(void *opaque, int version_id)
951 {
952 ARMCPU *cpu = opaque;
953 CPUARMState *env = &cpu->env;
954 int i, v;
955
956 /*
957 * Handle migration compatibility from old QEMU which didn't
958 * send the irq-line-state subsection. A QEMU without it did not
959 * implement the HCR_EL2.{VI,VF} bits as generating interrupts,
960 * so for TCG the line state matches the bits set in cs->interrupt_request.
961 * For KVM the line state is not stored in cs->interrupt_request
962 * and so this will leave irq_line_state as 0, but this is OK because
963 * we only need to care about it for TCG.
964 */
965 if (env->irq_line_state == UINT32_MAX) {
966 CPUState *cs = CPU(cpu);
967
968 env->irq_line_state = cs->interrupt_request &
969 (CPU_INTERRUPT_HARD | CPU_INTERRUPT_FIQ |
970 CPU_INTERRUPT_VIRQ | CPU_INTERRUPT_VFIQ);
971 }
972
973 /* Update the values list from the incoming migration data.
974 * Anything in the incoming data which we don't know about is
975 * a migration failure; anything we know about but the incoming
976 * data doesn't specify retains its current (reset) value.
977 * The indexes list remains untouched -- we only inspect the
978 * incoming migration index list so we can match the values array
979 * entries with the right slots in our own values array.
980 */
981
982 for (i = 0, v = 0; i < cpu->cpreg_array_len
983 && v < cpu->cpreg_vmstate_array_len; i++) {
984 if (cpu->cpreg_vmstate_indexes[v] > cpu->cpreg_indexes[i]) {
985 /* register in our list but not incoming : skip it */
986 continue;
987 }
988 if (cpu->cpreg_vmstate_indexes[v] < cpu->cpreg_indexes[i]) {
989 /* register in their list but not ours: fail migration */
990 return -1;
991 }
992 /* matching register, copy the value over */
993 cpu->cpreg_values[i] = cpu->cpreg_vmstate_values[v];
994 v++;
995 }
996
997 if (kvm_enabled()) {
998 if (!kvm_arm_cpu_post_load(cpu)) {
999 return -1;
1000 }
1001 } else {
1002 if (!write_list_to_cpustate(cpu)) {
1003 return -1;
1004 }
1005 }
1006
1007 /*
1008 * Misaligned thumb pc is architecturally impossible. Fail the
1009 * incoming migration. For TCG it would trigger the assert in
1010 * thumb_tr_translate_insn().
1011 */
1012 if (!is_a64(env) && env->thumb && (env->regs[15] & 1)) {
1013 return -1;
1014 }
1015
1016 if (tcg_enabled()) {
1017 hw_breakpoint_update_all(cpu);
1018 hw_watchpoint_update_all(cpu);
1019 }
1020
1021 /*
1022 * TCG gen_update_fp_context() relies on the invariant that
1023 * FPDSCR.LTPSIZE is constant 4 for M-profile with the LOB extension;
1024 * forbid bogus incoming data with some other value.
1025 */
1026 if (arm_feature(env, ARM_FEATURE_M) && cpu_isar_feature(aa32_lob, cpu)) {
1027 if (extract32(env->v7m.fpdscr[M_REG_NS],
1028 FPCR_LTPSIZE_SHIFT, FPCR_LTPSIZE_LENGTH) != 4 ||
1029 extract32(env->v7m.fpdscr[M_REG_S],
1030 FPCR_LTPSIZE_SHIFT, FPCR_LTPSIZE_LENGTH) != 4) {
1031 return -1;
1032 }
1033 }
1034
1035 if (!kvm_enabled()) {
1036 pmu_op_finish(env);
1037 }
1038
1039 if (tcg_enabled()) {
1040 arm_rebuild_hflags(env);
1041 }
1042
1043 return 0;
1044 }
1045
1046 const VMStateDescription vmstate_arm_cpu = {
1047 .name = "cpu",
1048 .version_id = 22,
1049 .minimum_version_id = 22,
1050 .pre_save = cpu_pre_save,
1051 .post_save = cpu_post_save,
1052 .pre_load = cpu_pre_load,
1053 .post_load = cpu_post_load,
1054 .fields = (const VMStateField[]) {
1055 VMSTATE_UINT32_ARRAY(env.regs, ARMCPU, 16),
1056 VMSTATE_UINT64_ARRAY(env.xregs, ARMCPU, 32),
1057 VMSTATE_UINT64(env.pc, ARMCPU),
1058 {
1059 .name = "cpsr",
1060 .version_id = 0,
1061 .size = sizeof(uint32_t),
1062 .info = &vmstate_cpsr,
1063 .flags = VMS_SINGLE,
1064 .offset = 0,
1065 },
1066 VMSTATE_UINT32(env.spsr, ARMCPU),
1067 VMSTATE_UINT64_ARRAY(env.banked_spsr, ARMCPU, 8),
1068 VMSTATE_UINT32_ARRAY(env.banked_r13, ARMCPU, 8),
1069 VMSTATE_UINT32_ARRAY(env.banked_r14, ARMCPU, 8),
1070 VMSTATE_UINT32_ARRAY(env.usr_regs, ARMCPU, 5),
1071 VMSTATE_UINT32_ARRAY(env.fiq_regs, ARMCPU, 5),
1072 VMSTATE_UINT64_ARRAY(env.elr_el, ARMCPU, 4),
1073 VMSTATE_UINT64_ARRAY(env.sp_el, ARMCPU, 4),
1074 /* The length-check must come before the arrays to avoid
1075 * incoming data possibly overflowing the array.
1076 */
1077 VMSTATE_INT32_POSITIVE_LE(cpreg_vmstate_array_len, ARMCPU),
1078 VMSTATE_VARRAY_INT32(cpreg_vmstate_indexes, ARMCPU,
1079 cpreg_vmstate_array_len,
1080 0, vmstate_info_uint64, uint64_t),
1081 VMSTATE_VARRAY_INT32(cpreg_vmstate_values, ARMCPU,
1082 cpreg_vmstate_array_len,
1083 0, vmstate_info_uint64, uint64_t),
1084 VMSTATE_UINT64(env.exclusive_addr, ARMCPU),
1085 VMSTATE_UINT64(env.exclusive_val, ARMCPU),
1086 VMSTATE_UINT64(env.exclusive_high, ARMCPU),
1087 VMSTATE_UNUSED(sizeof(uint64_t)),
1088 VMSTATE_UINT32(env.exception.syndrome, ARMCPU),
1089 VMSTATE_UINT32(env.exception.fsr, ARMCPU),
1090 VMSTATE_UINT64(env.exception.vaddress, ARMCPU),
1091 VMSTATE_TIMER_PTR(gt_timer[GTIMER_PHYS], ARMCPU),
1092 VMSTATE_TIMER_PTR(gt_timer[GTIMER_VIRT], ARMCPU),
1093 {
1094 .name = "power_state",
1095 .version_id = 0,
1096 .size = sizeof(bool),
1097 .info = &vmstate_powered_off,
1098 .flags = VMS_SINGLE,
1099 .offset = 0,
1100 },
1101 VMSTATE_END_OF_LIST()
1102 },
1103 .subsections = (const VMStateDescription * const []) {
1104 &vmstate_vfp,
1105 &vmstate_iwmmxt,
1106 &vmstate_m,
1107 &vmstate_thumb2ee,
1108 /* pmsav7_rnr must come before pmsav7 so that we have the
1109 * region number before we test it in the VMSTATE_VALIDATE
1110 * in vmstate_pmsav7.
1111 */
1112 &vmstate_pmsav7_rnr,
1113 &vmstate_pmsav7,
1114 &vmstate_pmsav8,
1115 &vmstate_m_security,
1116 &vmstate_sve,
1117 &vmstate_za,
1118 &vmstate_zt0,
1119 &vmstate_serror,
1120 &vmstate_irq_line_state,
1121 &vmstate_wfxt_timer,
1122 NULL
1123 }
1124 };
1125