xref: /openbmc/qemu/target/arm/machine.c (revision fa3673e4)
1 #include "qemu/osdep.h"
2 #include "cpu.h"
3 #include "qemu/error-report.h"
4 #include "sysemu/kvm.h"
5 #include "sysemu/tcg.h"
6 #include "kvm_arm.h"
7 #include "internals.h"
8 #include "cpu-features.h"
9 #include "migration/cpu.h"
10 
11 static bool vfp_needed(void *opaque)
12 {
13     ARMCPU *cpu = opaque;
14 
15     return (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)
16             ? cpu_isar_feature(aa64_fp_simd, cpu)
17             : cpu_isar_feature(aa32_vfp_simd, cpu));
18 }
19 
20 static int get_fpscr(QEMUFile *f, void *opaque, size_t size,
21                      const VMStateField *field)
22 {
23     ARMCPU *cpu = opaque;
24     CPUARMState *env = &cpu->env;
25     uint32_t val = qemu_get_be32(f);
26 
27     vfp_set_fpscr(env, val);
28     return 0;
29 }
30 
31 static int put_fpscr(QEMUFile *f, void *opaque, size_t size,
32                      const VMStateField *field, JSONWriter *vmdesc)
33 {
34     ARMCPU *cpu = opaque;
35     CPUARMState *env = &cpu->env;
36 
37     qemu_put_be32(f, vfp_get_fpscr(env));
38     return 0;
39 }
40 
41 static const VMStateInfo vmstate_fpscr = {
42     .name = "fpscr",
43     .get = get_fpscr,
44     .put = put_fpscr,
45 };
46 
47 static const VMStateDescription vmstate_vfp = {
48     .name = "cpu/vfp",
49     .version_id = 3,
50     .minimum_version_id = 3,
51     .needed = vfp_needed,
52     .fields = (VMStateField[]) {
53         /* For compatibility, store Qn out of Zn here.  */
54         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[0].d, ARMCPU, 0, 2),
55         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[1].d, ARMCPU, 0, 2),
56         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[2].d, ARMCPU, 0, 2),
57         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[3].d, ARMCPU, 0, 2),
58         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[4].d, ARMCPU, 0, 2),
59         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[5].d, ARMCPU, 0, 2),
60         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[6].d, ARMCPU, 0, 2),
61         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[7].d, ARMCPU, 0, 2),
62         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[8].d, ARMCPU, 0, 2),
63         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[9].d, ARMCPU, 0, 2),
64         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[10].d, ARMCPU, 0, 2),
65         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[11].d, ARMCPU, 0, 2),
66         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[12].d, ARMCPU, 0, 2),
67         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[13].d, ARMCPU, 0, 2),
68         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[14].d, ARMCPU, 0, 2),
69         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[15].d, ARMCPU, 0, 2),
70         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[16].d, ARMCPU, 0, 2),
71         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[17].d, ARMCPU, 0, 2),
72         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[18].d, ARMCPU, 0, 2),
73         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[19].d, ARMCPU, 0, 2),
74         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[20].d, ARMCPU, 0, 2),
75         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[21].d, ARMCPU, 0, 2),
76         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[22].d, ARMCPU, 0, 2),
77         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[23].d, ARMCPU, 0, 2),
78         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[24].d, ARMCPU, 0, 2),
79         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[25].d, ARMCPU, 0, 2),
80         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[26].d, ARMCPU, 0, 2),
81         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[27].d, ARMCPU, 0, 2),
82         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[28].d, ARMCPU, 0, 2),
83         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[29].d, ARMCPU, 0, 2),
84         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[30].d, ARMCPU, 0, 2),
85         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[31].d, ARMCPU, 0, 2),
86 
87         /* The xregs array is a little awkward because element 1 (FPSCR)
88          * requires a specific accessor, so we have to split it up in
89          * the vmstate:
90          */
91         VMSTATE_UINT32(env.vfp.xregs[0], ARMCPU),
92         VMSTATE_UINT32_SUB_ARRAY(env.vfp.xregs, ARMCPU, 2, 14),
93         {
94             .name = "fpscr",
95             .version_id = 0,
96             .size = sizeof(uint32_t),
97             .info = &vmstate_fpscr,
98             .flags = VMS_SINGLE,
99             .offset = 0,
100         },
101         VMSTATE_END_OF_LIST()
102     }
103 };
104 
105 static bool iwmmxt_needed(void *opaque)
106 {
107     ARMCPU *cpu = opaque;
108     CPUARMState *env = &cpu->env;
109 
110     return arm_feature(env, ARM_FEATURE_IWMMXT);
111 }
112 
113 static const VMStateDescription vmstate_iwmmxt = {
114     .name = "cpu/iwmmxt",
115     .version_id = 1,
116     .minimum_version_id = 1,
117     .needed = iwmmxt_needed,
118     .fields = (VMStateField[]) {
119         VMSTATE_UINT64_ARRAY(env.iwmmxt.regs, ARMCPU, 16),
120         VMSTATE_UINT32_ARRAY(env.iwmmxt.cregs, ARMCPU, 16),
121         VMSTATE_END_OF_LIST()
122     }
123 };
124 
125 #ifdef TARGET_AARCH64
126 /* The expression ARM_MAX_VQ - 2 is 0 for pure AArch32 build,
127  * and ARMPredicateReg is actively empty.  This triggers errors
128  * in the expansion of the VMSTATE macros.
129  */
130 
131 static bool sve_needed(void *opaque)
132 {
133     ARMCPU *cpu = opaque;
134 
135     return cpu_isar_feature(aa64_sve, cpu);
136 }
137 
138 /* The first two words of each Zreg is stored in VFP state.  */
139 static const VMStateDescription vmstate_zreg_hi_reg = {
140     .name = "cpu/sve/zreg_hi",
141     .version_id = 1,
142     .minimum_version_id = 1,
143     .fields = (VMStateField[]) {
144         VMSTATE_UINT64_SUB_ARRAY(d, ARMVectorReg, 2, ARM_MAX_VQ - 2),
145         VMSTATE_END_OF_LIST()
146     }
147 };
148 
149 static const VMStateDescription vmstate_preg_reg = {
150     .name = "cpu/sve/preg",
151     .version_id = 1,
152     .minimum_version_id = 1,
153     .fields = (VMStateField[]) {
154         VMSTATE_UINT64_ARRAY(p, ARMPredicateReg, 2 * ARM_MAX_VQ / 8),
155         VMSTATE_END_OF_LIST()
156     }
157 };
158 
159 static const VMStateDescription vmstate_sve = {
160     .name = "cpu/sve",
161     .version_id = 1,
162     .minimum_version_id = 1,
163     .needed = sve_needed,
164     .fields = (VMStateField[]) {
165         VMSTATE_STRUCT_ARRAY(env.vfp.zregs, ARMCPU, 32, 0,
166                              vmstate_zreg_hi_reg, ARMVectorReg),
167         VMSTATE_STRUCT_ARRAY(env.vfp.pregs, ARMCPU, 17, 0,
168                              vmstate_preg_reg, ARMPredicateReg),
169         VMSTATE_END_OF_LIST()
170     }
171 };
172 
173 static const VMStateDescription vmstate_vreg = {
174     .name = "vreg",
175     .version_id = 1,
176     .minimum_version_id = 1,
177     .fields = (VMStateField[]) {
178         VMSTATE_UINT64_ARRAY(d, ARMVectorReg, ARM_MAX_VQ * 2),
179         VMSTATE_END_OF_LIST()
180     }
181 };
182 
183 static bool za_needed(void *opaque)
184 {
185     ARMCPU *cpu = opaque;
186 
187     /*
188      * When ZA storage is disabled, its contents are discarded.
189      * It will be zeroed when ZA storage is re-enabled.
190      */
191     return FIELD_EX64(cpu->env.svcr, SVCR, ZA);
192 }
193 
194 static const VMStateDescription vmstate_za = {
195     .name = "cpu/sme",
196     .version_id = 1,
197     .minimum_version_id = 1,
198     .needed = za_needed,
199     .fields = (VMStateField[]) {
200         VMSTATE_STRUCT_ARRAY(env.zarray, ARMCPU, ARM_MAX_VQ * 16, 0,
201                              vmstate_vreg, ARMVectorReg),
202         VMSTATE_END_OF_LIST()
203     }
204 };
205 #endif /* AARCH64 */
206 
207 static bool serror_needed(void *opaque)
208 {
209     ARMCPU *cpu = opaque;
210     CPUARMState *env = &cpu->env;
211 
212     return env->serror.pending != 0;
213 }
214 
215 static const VMStateDescription vmstate_serror = {
216     .name = "cpu/serror",
217     .version_id = 1,
218     .minimum_version_id = 1,
219     .needed = serror_needed,
220     .fields = (VMStateField[]) {
221         VMSTATE_UINT8(env.serror.pending, ARMCPU),
222         VMSTATE_UINT8(env.serror.has_esr, ARMCPU),
223         VMSTATE_UINT64(env.serror.esr, ARMCPU),
224         VMSTATE_END_OF_LIST()
225     }
226 };
227 
228 static bool irq_line_state_needed(void *opaque)
229 {
230     return true;
231 }
232 
233 static const VMStateDescription vmstate_irq_line_state = {
234     .name = "cpu/irq-line-state",
235     .version_id = 1,
236     .minimum_version_id = 1,
237     .needed = irq_line_state_needed,
238     .fields = (VMStateField[]) {
239         VMSTATE_UINT32(env.irq_line_state, ARMCPU),
240         VMSTATE_END_OF_LIST()
241     }
242 };
243 
244 static bool m_needed(void *opaque)
245 {
246     ARMCPU *cpu = opaque;
247     CPUARMState *env = &cpu->env;
248 
249     return arm_feature(env, ARM_FEATURE_M);
250 }
251 
252 static const VMStateDescription vmstate_m_faultmask_primask = {
253     .name = "cpu/m/faultmask-primask",
254     .version_id = 1,
255     .minimum_version_id = 1,
256     .needed = m_needed,
257     .fields = (VMStateField[]) {
258         VMSTATE_UINT32(env.v7m.faultmask[M_REG_NS], ARMCPU),
259         VMSTATE_UINT32(env.v7m.primask[M_REG_NS], ARMCPU),
260         VMSTATE_END_OF_LIST()
261     }
262 };
263 
264 /* CSSELR is in a subsection because we didn't implement it previously.
265  * Migration from an old implementation will leave it at zero, which
266  * is OK since the only CPUs in the old implementation make the
267  * register RAZ/WI.
268  * Since there was no version of QEMU which implemented the CSSELR for
269  * just non-secure, we transfer both banks here rather than putting
270  * the secure banked version in the m-security subsection.
271  */
272 static bool csselr_vmstate_validate(void *opaque, int version_id)
273 {
274     ARMCPU *cpu = opaque;
275 
276     return cpu->env.v7m.csselr[M_REG_NS] <= R_V7M_CSSELR_INDEX_MASK
277         && cpu->env.v7m.csselr[M_REG_S] <= R_V7M_CSSELR_INDEX_MASK;
278 }
279 
280 static bool m_csselr_needed(void *opaque)
281 {
282     ARMCPU *cpu = opaque;
283 
284     return !arm_v7m_csselr_razwi(cpu);
285 }
286 
287 static const VMStateDescription vmstate_m_csselr = {
288     .name = "cpu/m/csselr",
289     .version_id = 1,
290     .minimum_version_id = 1,
291     .needed = m_csselr_needed,
292     .fields = (VMStateField[]) {
293         VMSTATE_UINT32_ARRAY(env.v7m.csselr, ARMCPU, M_REG_NUM_BANKS),
294         VMSTATE_VALIDATE("CSSELR is valid", csselr_vmstate_validate),
295         VMSTATE_END_OF_LIST()
296     }
297 };
298 
299 static const VMStateDescription vmstate_m_scr = {
300     .name = "cpu/m/scr",
301     .version_id = 1,
302     .minimum_version_id = 1,
303     .needed = m_needed,
304     .fields = (VMStateField[]) {
305         VMSTATE_UINT32(env.v7m.scr[M_REG_NS], ARMCPU),
306         VMSTATE_END_OF_LIST()
307     }
308 };
309 
310 static const VMStateDescription vmstate_m_other_sp = {
311     .name = "cpu/m/other-sp",
312     .version_id = 1,
313     .minimum_version_id = 1,
314     .needed = m_needed,
315     .fields = (VMStateField[]) {
316         VMSTATE_UINT32(env.v7m.other_sp, ARMCPU),
317         VMSTATE_END_OF_LIST()
318     }
319 };
320 
321 static bool m_v8m_needed(void *opaque)
322 {
323     ARMCPU *cpu = opaque;
324     CPUARMState *env = &cpu->env;
325 
326     return arm_feature(env, ARM_FEATURE_M) && arm_feature(env, ARM_FEATURE_V8);
327 }
328 
329 static const VMStateDescription vmstate_m_v8m = {
330     .name = "cpu/m/v8m",
331     .version_id = 1,
332     .minimum_version_id = 1,
333     .needed = m_v8m_needed,
334     .fields = (VMStateField[]) {
335         VMSTATE_UINT32_ARRAY(env.v7m.msplim, ARMCPU, M_REG_NUM_BANKS),
336         VMSTATE_UINT32_ARRAY(env.v7m.psplim, ARMCPU, M_REG_NUM_BANKS),
337         VMSTATE_END_OF_LIST()
338     }
339 };
340 
341 static const VMStateDescription vmstate_m_fp = {
342     .name = "cpu/m/fp",
343     .version_id = 1,
344     .minimum_version_id = 1,
345     .needed = vfp_needed,
346     .fields = (VMStateField[]) {
347         VMSTATE_UINT32_ARRAY(env.v7m.fpcar, ARMCPU, M_REG_NUM_BANKS),
348         VMSTATE_UINT32_ARRAY(env.v7m.fpccr, ARMCPU, M_REG_NUM_BANKS),
349         VMSTATE_UINT32_ARRAY(env.v7m.fpdscr, ARMCPU, M_REG_NUM_BANKS),
350         VMSTATE_UINT32_ARRAY(env.v7m.cpacr, ARMCPU, M_REG_NUM_BANKS),
351         VMSTATE_UINT32(env.v7m.nsacr, ARMCPU),
352         VMSTATE_END_OF_LIST()
353     }
354 };
355 
356 static bool mve_needed(void *opaque)
357 {
358     ARMCPU *cpu = opaque;
359 
360     return cpu_isar_feature(aa32_mve, cpu);
361 }
362 
363 static const VMStateDescription vmstate_m_mve = {
364     .name = "cpu/m/mve",
365     .version_id = 1,
366     .minimum_version_id = 1,
367     .needed = mve_needed,
368     .fields = (VMStateField[]) {
369         VMSTATE_UINT32(env.v7m.vpr, ARMCPU),
370         VMSTATE_UINT32(env.v7m.ltpsize, ARMCPU),
371         VMSTATE_END_OF_LIST()
372     },
373 };
374 
375 static const VMStateDescription vmstate_m = {
376     .name = "cpu/m",
377     .version_id = 4,
378     .minimum_version_id = 4,
379     .needed = m_needed,
380     .fields = (VMStateField[]) {
381         VMSTATE_UINT32(env.v7m.vecbase[M_REG_NS], ARMCPU),
382         VMSTATE_UINT32(env.v7m.basepri[M_REG_NS], ARMCPU),
383         VMSTATE_UINT32(env.v7m.control[M_REG_NS], ARMCPU),
384         VMSTATE_UINT32(env.v7m.ccr[M_REG_NS], ARMCPU),
385         VMSTATE_UINT32(env.v7m.cfsr[M_REG_NS], ARMCPU),
386         VMSTATE_UINT32(env.v7m.hfsr, ARMCPU),
387         VMSTATE_UINT32(env.v7m.dfsr, ARMCPU),
388         VMSTATE_UINT32(env.v7m.mmfar[M_REG_NS], ARMCPU),
389         VMSTATE_UINT32(env.v7m.bfar, ARMCPU),
390         VMSTATE_UINT32(env.v7m.mpu_ctrl[M_REG_NS], ARMCPU),
391         VMSTATE_INT32(env.v7m.exception, ARMCPU),
392         VMSTATE_END_OF_LIST()
393     },
394     .subsections = (const VMStateDescription*[]) {
395         &vmstate_m_faultmask_primask,
396         &vmstate_m_csselr,
397         &vmstate_m_scr,
398         &vmstate_m_other_sp,
399         &vmstate_m_v8m,
400         &vmstate_m_fp,
401         &vmstate_m_mve,
402         NULL
403     }
404 };
405 
406 static bool thumb2ee_needed(void *opaque)
407 {
408     ARMCPU *cpu = opaque;
409     CPUARMState *env = &cpu->env;
410 
411     return arm_feature(env, ARM_FEATURE_THUMB2EE);
412 }
413 
414 static const VMStateDescription vmstate_thumb2ee = {
415     .name = "cpu/thumb2ee",
416     .version_id = 1,
417     .minimum_version_id = 1,
418     .needed = thumb2ee_needed,
419     .fields = (VMStateField[]) {
420         VMSTATE_UINT32(env.teecr, ARMCPU),
421         VMSTATE_UINT32(env.teehbr, ARMCPU),
422         VMSTATE_END_OF_LIST()
423     }
424 };
425 
426 static bool pmsav7_needed(void *opaque)
427 {
428     ARMCPU *cpu = opaque;
429     CPUARMState *env = &cpu->env;
430 
431     return arm_feature(env, ARM_FEATURE_PMSA) &&
432            arm_feature(env, ARM_FEATURE_V7) &&
433            !arm_feature(env, ARM_FEATURE_V8);
434 }
435 
436 static bool pmsav7_rgnr_vmstate_validate(void *opaque, int version_id)
437 {
438     ARMCPU *cpu = opaque;
439 
440     return cpu->env.pmsav7.rnr[M_REG_NS] < cpu->pmsav7_dregion;
441 }
442 
443 static const VMStateDescription vmstate_pmsav7 = {
444     .name = "cpu/pmsav7",
445     .version_id = 1,
446     .minimum_version_id = 1,
447     .needed = pmsav7_needed,
448     .fields = (VMStateField[]) {
449         VMSTATE_VARRAY_UINT32(env.pmsav7.drbar, ARMCPU, pmsav7_dregion, 0,
450                               vmstate_info_uint32, uint32_t),
451         VMSTATE_VARRAY_UINT32(env.pmsav7.drsr, ARMCPU, pmsav7_dregion, 0,
452                               vmstate_info_uint32, uint32_t),
453         VMSTATE_VARRAY_UINT32(env.pmsav7.dracr, ARMCPU, pmsav7_dregion, 0,
454                               vmstate_info_uint32, uint32_t),
455         VMSTATE_VALIDATE("rgnr is valid", pmsav7_rgnr_vmstate_validate),
456         VMSTATE_END_OF_LIST()
457     }
458 };
459 
460 static bool pmsav7_rnr_needed(void *opaque)
461 {
462     ARMCPU *cpu = opaque;
463     CPUARMState *env = &cpu->env;
464 
465     /* For R profile cores pmsav7.rnr is migrated via the cpreg
466      * "RGNR" definition in helper.h. For M profile we have to
467      * migrate it separately.
468      */
469     return arm_feature(env, ARM_FEATURE_M);
470 }
471 
472 static const VMStateDescription vmstate_pmsav7_rnr = {
473     .name = "cpu/pmsav7-rnr",
474     .version_id = 1,
475     .minimum_version_id = 1,
476     .needed = pmsav7_rnr_needed,
477     .fields = (VMStateField[]) {
478         VMSTATE_UINT32(env.pmsav7.rnr[M_REG_NS], ARMCPU),
479         VMSTATE_END_OF_LIST()
480     }
481 };
482 
483 static bool pmsav8_needed(void *opaque)
484 {
485     ARMCPU *cpu = opaque;
486     CPUARMState *env = &cpu->env;
487 
488     return arm_feature(env, ARM_FEATURE_PMSA) &&
489         arm_feature(env, ARM_FEATURE_V8);
490 }
491 
492 static bool pmsav8r_needed(void *opaque)
493 {
494     ARMCPU *cpu = opaque;
495     CPUARMState *env = &cpu->env;
496 
497     return arm_feature(env, ARM_FEATURE_PMSA) &&
498         arm_feature(env, ARM_FEATURE_V8) &&
499         !arm_feature(env, ARM_FEATURE_M);
500 }
501 
502 static const VMStateDescription vmstate_pmsav8r = {
503     .name = "cpu/pmsav8/pmsav8r",
504     .version_id = 1,
505     .minimum_version_id = 1,
506     .needed = pmsav8r_needed,
507     .fields = (VMStateField[]) {
508         VMSTATE_VARRAY_UINT32(env.pmsav8.hprbar, ARMCPU,
509                         pmsav8r_hdregion, 0, vmstate_info_uint32, uint32_t),
510         VMSTATE_VARRAY_UINT32(env.pmsav8.hprlar, ARMCPU,
511                         pmsav8r_hdregion, 0, vmstate_info_uint32, uint32_t),
512         VMSTATE_END_OF_LIST()
513     },
514 };
515 
516 static const VMStateDescription vmstate_pmsav8 = {
517     .name = "cpu/pmsav8",
518     .version_id = 1,
519     .minimum_version_id = 1,
520     .needed = pmsav8_needed,
521     .fields = (VMStateField[]) {
522         VMSTATE_VARRAY_UINT32(env.pmsav8.rbar[M_REG_NS], ARMCPU, pmsav7_dregion,
523                               0, vmstate_info_uint32, uint32_t),
524         VMSTATE_VARRAY_UINT32(env.pmsav8.rlar[M_REG_NS], ARMCPU, pmsav7_dregion,
525                               0, vmstate_info_uint32, uint32_t),
526         VMSTATE_UINT32(env.pmsav8.mair0[M_REG_NS], ARMCPU),
527         VMSTATE_UINT32(env.pmsav8.mair1[M_REG_NS], ARMCPU),
528         VMSTATE_END_OF_LIST()
529     },
530     .subsections = (const VMStateDescription * []) {
531         &vmstate_pmsav8r,
532         NULL
533     }
534 };
535 
536 static bool s_rnr_vmstate_validate(void *opaque, int version_id)
537 {
538     ARMCPU *cpu = opaque;
539 
540     return cpu->env.pmsav7.rnr[M_REG_S] < cpu->pmsav7_dregion;
541 }
542 
543 static bool sau_rnr_vmstate_validate(void *opaque, int version_id)
544 {
545     ARMCPU *cpu = opaque;
546 
547     return cpu->env.sau.rnr < cpu->sau_sregion;
548 }
549 
550 static bool m_security_needed(void *opaque)
551 {
552     ARMCPU *cpu = opaque;
553     CPUARMState *env = &cpu->env;
554 
555     return arm_feature(env, ARM_FEATURE_M_SECURITY);
556 }
557 
558 static const VMStateDescription vmstate_m_security = {
559     .name = "cpu/m-security",
560     .version_id = 1,
561     .minimum_version_id = 1,
562     .needed = m_security_needed,
563     .fields = (VMStateField[]) {
564         VMSTATE_UINT32(env.v7m.secure, ARMCPU),
565         VMSTATE_UINT32(env.v7m.other_ss_msp, ARMCPU),
566         VMSTATE_UINT32(env.v7m.other_ss_psp, ARMCPU),
567         VMSTATE_UINT32(env.v7m.basepri[M_REG_S], ARMCPU),
568         VMSTATE_UINT32(env.v7m.primask[M_REG_S], ARMCPU),
569         VMSTATE_UINT32(env.v7m.faultmask[M_REG_S], ARMCPU),
570         VMSTATE_UINT32(env.v7m.control[M_REG_S], ARMCPU),
571         VMSTATE_UINT32(env.v7m.vecbase[M_REG_S], ARMCPU),
572         VMSTATE_UINT32(env.pmsav8.mair0[M_REG_S], ARMCPU),
573         VMSTATE_UINT32(env.pmsav8.mair1[M_REG_S], ARMCPU),
574         VMSTATE_VARRAY_UINT32(env.pmsav8.rbar[M_REG_S], ARMCPU, pmsav7_dregion,
575                               0, vmstate_info_uint32, uint32_t),
576         VMSTATE_VARRAY_UINT32(env.pmsav8.rlar[M_REG_S], ARMCPU, pmsav7_dregion,
577                               0, vmstate_info_uint32, uint32_t),
578         VMSTATE_UINT32(env.pmsav7.rnr[M_REG_S], ARMCPU),
579         VMSTATE_VALIDATE("secure MPU_RNR is valid", s_rnr_vmstate_validate),
580         VMSTATE_UINT32(env.v7m.mpu_ctrl[M_REG_S], ARMCPU),
581         VMSTATE_UINT32(env.v7m.ccr[M_REG_S], ARMCPU),
582         VMSTATE_UINT32(env.v7m.mmfar[M_REG_S], ARMCPU),
583         VMSTATE_UINT32(env.v7m.cfsr[M_REG_S], ARMCPU),
584         VMSTATE_UINT32(env.v7m.sfsr, ARMCPU),
585         VMSTATE_UINT32(env.v7m.sfar, ARMCPU),
586         VMSTATE_VARRAY_UINT32(env.sau.rbar, ARMCPU, sau_sregion, 0,
587                               vmstate_info_uint32, uint32_t),
588         VMSTATE_VARRAY_UINT32(env.sau.rlar, ARMCPU, sau_sregion, 0,
589                               vmstate_info_uint32, uint32_t),
590         VMSTATE_UINT32(env.sau.rnr, ARMCPU),
591         VMSTATE_VALIDATE("SAU_RNR is valid", sau_rnr_vmstate_validate),
592         VMSTATE_UINT32(env.sau.ctrl, ARMCPU),
593         VMSTATE_UINT32(env.v7m.scr[M_REG_S], ARMCPU),
594         /* AIRCR is not secure-only, but our implementation is R/O if the
595          * security extension is unimplemented, so we migrate it here.
596          */
597         VMSTATE_UINT32(env.v7m.aircr, ARMCPU),
598         VMSTATE_END_OF_LIST()
599     }
600 };
601 
602 static int get_cpsr(QEMUFile *f, void *opaque, size_t size,
603                     const VMStateField *field)
604 {
605     ARMCPU *cpu = opaque;
606     CPUARMState *env = &cpu->env;
607     uint32_t val = qemu_get_be32(f);
608 
609     if (arm_feature(env, ARM_FEATURE_M)) {
610         if (val & XPSR_EXCP) {
611             /* This is a CPSR format value from an older QEMU. (We can tell
612              * because values transferred in XPSR format always have zero
613              * for the EXCP field, and CPSR format will always have bit 4
614              * set in CPSR_M.) Rearrange it into XPSR format. The significant
615              * differences are that the T bit is not in the same place, the
616              * primask/faultmask info may be in the CPSR I and F bits, and
617              * we do not want the mode bits.
618              * We know that this cleanup happened before v8M, so there
619              * is no complication with banked primask/faultmask.
620              */
621             uint32_t newval = val;
622 
623             assert(!arm_feature(env, ARM_FEATURE_M_SECURITY));
624 
625             newval &= (CPSR_NZCV | CPSR_Q | CPSR_IT | CPSR_GE);
626             if (val & CPSR_T) {
627                 newval |= XPSR_T;
628             }
629             /* If the I or F bits are set then this is a migration from
630              * an old QEMU which still stored the M profile FAULTMASK
631              * and PRIMASK in env->daif. For a new QEMU, the data is
632              * transferred using the vmstate_m_faultmask_primask subsection.
633              */
634             if (val & CPSR_F) {
635                 env->v7m.faultmask[M_REG_NS] = 1;
636             }
637             if (val & CPSR_I) {
638                 env->v7m.primask[M_REG_NS] = 1;
639             }
640             val = newval;
641         }
642         /* Ignore the low bits, they are handled by vmstate_m. */
643         xpsr_write(env, val, ~XPSR_EXCP);
644         return 0;
645     }
646 
647     env->aarch64 = ((val & PSTATE_nRW) == 0);
648 
649     if (is_a64(env)) {
650         pstate_write(env, val);
651         return 0;
652     }
653 
654     cpsr_write(env, val, 0xffffffff, CPSRWriteRaw);
655     return 0;
656 }
657 
658 static int put_cpsr(QEMUFile *f, void *opaque, size_t size,
659                     const VMStateField *field, JSONWriter *vmdesc)
660 {
661     ARMCPU *cpu = opaque;
662     CPUARMState *env = &cpu->env;
663     uint32_t val;
664 
665     if (arm_feature(env, ARM_FEATURE_M)) {
666         /* The low 9 bits are v7m.exception, which is handled by vmstate_m. */
667         val = xpsr_read(env) & ~XPSR_EXCP;
668     } else if (is_a64(env)) {
669         val = pstate_read(env);
670     } else {
671         val = cpsr_read(env);
672     }
673 
674     qemu_put_be32(f, val);
675     return 0;
676 }
677 
678 static const VMStateInfo vmstate_cpsr = {
679     .name = "cpsr",
680     .get = get_cpsr,
681     .put = put_cpsr,
682 };
683 
684 static int get_power(QEMUFile *f, void *opaque, size_t size,
685                     const VMStateField *field)
686 {
687     ARMCPU *cpu = opaque;
688     bool powered_off = qemu_get_byte(f);
689     cpu->power_state = powered_off ? PSCI_OFF : PSCI_ON;
690     return 0;
691 }
692 
693 static int put_power(QEMUFile *f, void *opaque, size_t size,
694                     const VMStateField *field, JSONWriter *vmdesc)
695 {
696     ARMCPU *cpu = opaque;
697 
698     /* Migration should never happen while we transition power states */
699 
700     if (cpu->power_state == PSCI_ON ||
701         cpu->power_state == PSCI_OFF) {
702         bool powered_off = (cpu->power_state == PSCI_OFF) ? true : false;
703         qemu_put_byte(f, powered_off);
704         return 0;
705     } else {
706         return 1;
707     }
708 }
709 
710 static const VMStateInfo vmstate_powered_off = {
711     .name = "powered_off",
712     .get = get_power,
713     .put = put_power,
714 };
715 
716 static int cpu_pre_save(void *opaque)
717 {
718     ARMCPU *cpu = opaque;
719 
720     if (!kvm_enabled()) {
721         pmu_op_start(&cpu->env);
722     }
723 
724     if (kvm_enabled()) {
725         if (!write_kvmstate_to_list(cpu)) {
726             /* This should never fail */
727             g_assert_not_reached();
728         }
729 
730         /*
731          * kvm_arm_cpu_pre_save() must be called after
732          * write_kvmstate_to_list()
733          */
734         kvm_arm_cpu_pre_save(cpu);
735     } else {
736         if (!write_cpustate_to_list(cpu, false)) {
737             /* This should never fail. */
738             g_assert_not_reached();
739         }
740     }
741 
742     cpu->cpreg_vmstate_array_len = cpu->cpreg_array_len;
743     memcpy(cpu->cpreg_vmstate_indexes, cpu->cpreg_indexes,
744            cpu->cpreg_array_len * sizeof(uint64_t));
745     memcpy(cpu->cpreg_vmstate_values, cpu->cpreg_values,
746            cpu->cpreg_array_len * sizeof(uint64_t));
747 
748     return 0;
749 }
750 
751 static int cpu_post_save(void *opaque)
752 {
753     ARMCPU *cpu = opaque;
754 
755     if (!kvm_enabled()) {
756         pmu_op_finish(&cpu->env);
757     }
758 
759     return 0;
760 }
761 
762 static int cpu_pre_load(void *opaque)
763 {
764     ARMCPU *cpu = opaque;
765     CPUARMState *env = &cpu->env;
766 
767     /*
768      * Pre-initialize irq_line_state to a value that's never valid as
769      * real data, so cpu_post_load() can tell whether we've seen the
770      * irq-line-state subsection in the incoming migration state.
771      */
772     env->irq_line_state = UINT32_MAX;
773 
774     if (!kvm_enabled()) {
775         pmu_op_start(&cpu->env);
776     }
777 
778     return 0;
779 }
780 
781 static int cpu_post_load(void *opaque, int version_id)
782 {
783     ARMCPU *cpu = opaque;
784     CPUARMState *env = &cpu->env;
785     int i, v;
786 
787     /*
788      * Handle migration compatibility from old QEMU which didn't
789      * send the irq-line-state subsection. A QEMU without it did not
790      * implement the HCR_EL2.{VI,VF} bits as generating interrupts,
791      * so for TCG the line state matches the bits set in cs->interrupt_request.
792      * For KVM the line state is not stored in cs->interrupt_request
793      * and so this will leave irq_line_state as 0, but this is OK because
794      * we only need to care about it for TCG.
795      */
796     if (env->irq_line_state == UINT32_MAX) {
797         CPUState *cs = CPU(cpu);
798 
799         env->irq_line_state = cs->interrupt_request &
800             (CPU_INTERRUPT_HARD | CPU_INTERRUPT_FIQ |
801              CPU_INTERRUPT_VIRQ | CPU_INTERRUPT_VFIQ);
802     }
803 
804     /* Update the values list from the incoming migration data.
805      * Anything in the incoming data which we don't know about is
806      * a migration failure; anything we know about but the incoming
807      * data doesn't specify retains its current (reset) value.
808      * The indexes list remains untouched -- we only inspect the
809      * incoming migration index list so we can match the values array
810      * entries with the right slots in our own values array.
811      */
812 
813     for (i = 0, v = 0; i < cpu->cpreg_array_len
814              && v < cpu->cpreg_vmstate_array_len; i++) {
815         if (cpu->cpreg_vmstate_indexes[v] > cpu->cpreg_indexes[i]) {
816             /* register in our list but not incoming : skip it */
817             continue;
818         }
819         if (cpu->cpreg_vmstate_indexes[v] < cpu->cpreg_indexes[i]) {
820             /* register in their list but not ours: fail migration */
821             return -1;
822         }
823         /* matching register, copy the value over */
824         cpu->cpreg_values[i] = cpu->cpreg_vmstate_values[v];
825         v++;
826     }
827 
828     if (kvm_enabled()) {
829         if (!write_list_to_kvmstate(cpu, KVM_PUT_FULL_STATE)) {
830             return -1;
831         }
832         /* Note that it's OK for the TCG side not to know about
833          * every register in the list; KVM is authoritative if
834          * we're using it.
835          */
836         write_list_to_cpustate(cpu);
837         kvm_arm_cpu_post_load(cpu);
838     } else {
839         if (!write_list_to_cpustate(cpu)) {
840             return -1;
841         }
842     }
843 
844     /*
845      * Misaligned thumb pc is architecturally impossible. Fail the
846      * incoming migration. For TCG it would trigger the assert in
847      * thumb_tr_translate_insn().
848      */
849     if (!is_a64(env) && env->thumb && (env->regs[15] & 1)) {
850         return -1;
851     }
852 
853     if (tcg_enabled()) {
854         hw_breakpoint_update_all(cpu);
855         hw_watchpoint_update_all(cpu);
856     }
857 
858     /*
859      * TCG gen_update_fp_context() relies on the invariant that
860      * FPDSCR.LTPSIZE is constant 4 for M-profile with the LOB extension;
861      * forbid bogus incoming data with some other value.
862      */
863     if (arm_feature(env, ARM_FEATURE_M) && cpu_isar_feature(aa32_lob, cpu)) {
864         if (extract32(env->v7m.fpdscr[M_REG_NS],
865                       FPCR_LTPSIZE_SHIFT, FPCR_LTPSIZE_LENGTH) != 4 ||
866             extract32(env->v7m.fpdscr[M_REG_S],
867                       FPCR_LTPSIZE_SHIFT, FPCR_LTPSIZE_LENGTH) != 4) {
868             return -1;
869         }
870     }
871 
872     if (!kvm_enabled()) {
873         pmu_op_finish(&cpu->env);
874     }
875 
876     if (tcg_enabled()) {
877         arm_rebuild_hflags(&cpu->env);
878     }
879 
880     return 0;
881 }
882 
883 const VMStateDescription vmstate_arm_cpu = {
884     .name = "cpu",
885     .version_id = 22,
886     .minimum_version_id = 22,
887     .pre_save = cpu_pre_save,
888     .post_save = cpu_post_save,
889     .pre_load = cpu_pre_load,
890     .post_load = cpu_post_load,
891     .fields = (VMStateField[]) {
892         VMSTATE_UINT32_ARRAY(env.regs, ARMCPU, 16),
893         VMSTATE_UINT64_ARRAY(env.xregs, ARMCPU, 32),
894         VMSTATE_UINT64(env.pc, ARMCPU),
895         {
896             .name = "cpsr",
897             .version_id = 0,
898             .size = sizeof(uint32_t),
899             .info = &vmstate_cpsr,
900             .flags = VMS_SINGLE,
901             .offset = 0,
902         },
903         VMSTATE_UINT32(env.spsr, ARMCPU),
904         VMSTATE_UINT64_ARRAY(env.banked_spsr, ARMCPU, 8),
905         VMSTATE_UINT32_ARRAY(env.banked_r13, ARMCPU, 8),
906         VMSTATE_UINT32_ARRAY(env.banked_r14, ARMCPU, 8),
907         VMSTATE_UINT32_ARRAY(env.usr_regs, ARMCPU, 5),
908         VMSTATE_UINT32_ARRAY(env.fiq_regs, ARMCPU, 5),
909         VMSTATE_UINT64_ARRAY(env.elr_el, ARMCPU, 4),
910         VMSTATE_UINT64_ARRAY(env.sp_el, ARMCPU, 4),
911         /* The length-check must come before the arrays to avoid
912          * incoming data possibly overflowing the array.
913          */
914         VMSTATE_INT32_POSITIVE_LE(cpreg_vmstate_array_len, ARMCPU),
915         VMSTATE_VARRAY_INT32(cpreg_vmstate_indexes, ARMCPU,
916                              cpreg_vmstate_array_len,
917                              0, vmstate_info_uint64, uint64_t),
918         VMSTATE_VARRAY_INT32(cpreg_vmstate_values, ARMCPU,
919                              cpreg_vmstate_array_len,
920                              0, vmstate_info_uint64, uint64_t),
921         VMSTATE_UINT64(env.exclusive_addr, ARMCPU),
922         VMSTATE_UINT64(env.exclusive_val, ARMCPU),
923         VMSTATE_UINT64(env.exclusive_high, ARMCPU),
924         VMSTATE_UNUSED(sizeof(uint64_t)),
925         VMSTATE_UINT32(env.exception.syndrome, ARMCPU),
926         VMSTATE_UINT32(env.exception.fsr, ARMCPU),
927         VMSTATE_UINT64(env.exception.vaddress, ARMCPU),
928         VMSTATE_TIMER_PTR(gt_timer[GTIMER_PHYS], ARMCPU),
929         VMSTATE_TIMER_PTR(gt_timer[GTIMER_VIRT], ARMCPU),
930         {
931             .name = "power_state",
932             .version_id = 0,
933             .size = sizeof(bool),
934             .info = &vmstate_powered_off,
935             .flags = VMS_SINGLE,
936             .offset = 0,
937         },
938         VMSTATE_END_OF_LIST()
939     },
940     .subsections = (const VMStateDescription*[]) {
941         &vmstate_vfp,
942         &vmstate_iwmmxt,
943         &vmstate_m,
944         &vmstate_thumb2ee,
945         /* pmsav7_rnr must come before pmsav7 so that we have the
946          * region number before we test it in the VMSTATE_VALIDATE
947          * in vmstate_pmsav7.
948          */
949         &vmstate_pmsav7_rnr,
950         &vmstate_pmsav7,
951         &vmstate_pmsav8,
952         &vmstate_m_security,
953 #ifdef TARGET_AARCH64
954         &vmstate_sve,
955         &vmstate_za,
956 #endif
957         &vmstate_serror,
958         &vmstate_irq_line_state,
959         NULL
960     }
961 };
962