xref: /openbmc/qemu/target/arm/machine.c (revision e6203636)
1 #include "qemu/osdep.h"
2 #include "cpu.h"
3 #include "qemu/error-report.h"
4 #include "sysemu/kvm.h"
5 #include "sysemu/tcg.h"
6 #include "kvm_arm.h"
7 #include "internals.h"
8 #include "cpu-features.h"
9 #include "migration/cpu.h"
10 #include "target/arm/gtimer.h"
11 
12 static bool vfp_needed(void *opaque)
13 {
14     ARMCPU *cpu = opaque;
15 
16     return (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)
17             ? cpu_isar_feature(aa64_fp_simd, cpu)
18             : cpu_isar_feature(aa32_vfp_simd, cpu));
19 }
20 
21 static int get_fpscr(QEMUFile *f, void *opaque, size_t size,
22                      const VMStateField *field)
23 {
24     ARMCPU *cpu = opaque;
25     CPUARMState *env = &cpu->env;
26     uint32_t val = qemu_get_be32(f);
27 
28     vfp_set_fpscr(env, val);
29     return 0;
30 }
31 
32 static int put_fpscr(QEMUFile *f, void *opaque, size_t size,
33                      const VMStateField *field, JSONWriter *vmdesc)
34 {
35     ARMCPU *cpu = opaque;
36     CPUARMState *env = &cpu->env;
37 
38     qemu_put_be32(f, vfp_get_fpscr(env));
39     return 0;
40 }
41 
42 static const VMStateInfo vmstate_fpscr = {
43     .name = "fpscr",
44     .get = get_fpscr,
45     .put = put_fpscr,
46 };
47 
48 static const VMStateDescription vmstate_vfp = {
49     .name = "cpu/vfp",
50     .version_id = 3,
51     .minimum_version_id = 3,
52     .needed = vfp_needed,
53     .fields = (const VMStateField[]) {
54         /* For compatibility, store Qn out of Zn here.  */
55         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[0].d, ARMCPU, 0, 2),
56         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[1].d, ARMCPU, 0, 2),
57         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[2].d, ARMCPU, 0, 2),
58         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[3].d, ARMCPU, 0, 2),
59         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[4].d, ARMCPU, 0, 2),
60         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[5].d, ARMCPU, 0, 2),
61         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[6].d, ARMCPU, 0, 2),
62         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[7].d, ARMCPU, 0, 2),
63         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[8].d, ARMCPU, 0, 2),
64         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[9].d, ARMCPU, 0, 2),
65         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[10].d, ARMCPU, 0, 2),
66         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[11].d, ARMCPU, 0, 2),
67         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[12].d, ARMCPU, 0, 2),
68         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[13].d, ARMCPU, 0, 2),
69         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[14].d, ARMCPU, 0, 2),
70         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[15].d, ARMCPU, 0, 2),
71         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[16].d, ARMCPU, 0, 2),
72         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[17].d, ARMCPU, 0, 2),
73         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[18].d, ARMCPU, 0, 2),
74         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[19].d, ARMCPU, 0, 2),
75         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[20].d, ARMCPU, 0, 2),
76         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[21].d, ARMCPU, 0, 2),
77         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[22].d, ARMCPU, 0, 2),
78         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[23].d, ARMCPU, 0, 2),
79         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[24].d, ARMCPU, 0, 2),
80         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[25].d, ARMCPU, 0, 2),
81         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[26].d, ARMCPU, 0, 2),
82         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[27].d, ARMCPU, 0, 2),
83         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[28].d, ARMCPU, 0, 2),
84         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[29].d, ARMCPU, 0, 2),
85         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[30].d, ARMCPU, 0, 2),
86         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[31].d, ARMCPU, 0, 2),
87 
88         /* The xregs array is a little awkward because element 1 (FPSCR)
89          * requires a specific accessor, so we have to split it up in
90          * the vmstate:
91          */
92         VMSTATE_UINT32(env.vfp.xregs[0], ARMCPU),
93         VMSTATE_UINT32_SUB_ARRAY(env.vfp.xregs, ARMCPU, 2, 14),
94         {
95             .name = "fpscr",
96             .version_id = 0,
97             .size = sizeof(uint32_t),
98             .info = &vmstate_fpscr,
99             .flags = VMS_SINGLE,
100             .offset = 0,
101         },
102         VMSTATE_END_OF_LIST()
103     }
104 };
105 
106 static bool iwmmxt_needed(void *opaque)
107 {
108     ARMCPU *cpu = opaque;
109     CPUARMState *env = &cpu->env;
110 
111     return arm_feature(env, ARM_FEATURE_IWMMXT);
112 }
113 
114 static const VMStateDescription vmstate_iwmmxt = {
115     .name = "cpu/iwmmxt",
116     .version_id = 1,
117     .minimum_version_id = 1,
118     .needed = iwmmxt_needed,
119     .fields = (const VMStateField[]) {
120         VMSTATE_UINT64_ARRAY(env.iwmmxt.regs, ARMCPU, 16),
121         VMSTATE_UINT32_ARRAY(env.iwmmxt.cregs, ARMCPU, 16),
122         VMSTATE_END_OF_LIST()
123     }
124 };
125 
126 #ifdef TARGET_AARCH64
127 /* The expression ARM_MAX_VQ - 2 is 0 for pure AArch32 build,
128  * and ARMPredicateReg is actively empty.  This triggers errors
129  * in the expansion of the VMSTATE macros.
130  */
131 
132 static bool sve_needed(void *opaque)
133 {
134     ARMCPU *cpu = opaque;
135 
136     return cpu_isar_feature(aa64_sve, cpu);
137 }
138 
139 /* The first two words of each Zreg is stored in VFP state.  */
140 static const VMStateDescription vmstate_zreg_hi_reg = {
141     .name = "cpu/sve/zreg_hi",
142     .version_id = 1,
143     .minimum_version_id = 1,
144     .fields = (const VMStateField[]) {
145         VMSTATE_UINT64_SUB_ARRAY(d, ARMVectorReg, 2, ARM_MAX_VQ - 2),
146         VMSTATE_END_OF_LIST()
147     }
148 };
149 
150 static const VMStateDescription vmstate_preg_reg = {
151     .name = "cpu/sve/preg",
152     .version_id = 1,
153     .minimum_version_id = 1,
154     .fields = (const VMStateField[]) {
155         VMSTATE_UINT64_ARRAY(p, ARMPredicateReg, 2 * ARM_MAX_VQ / 8),
156         VMSTATE_END_OF_LIST()
157     }
158 };
159 
160 static const VMStateDescription vmstate_sve = {
161     .name = "cpu/sve",
162     .version_id = 1,
163     .minimum_version_id = 1,
164     .needed = sve_needed,
165     .fields = (const VMStateField[]) {
166         VMSTATE_STRUCT_ARRAY(env.vfp.zregs, ARMCPU, 32, 0,
167                              vmstate_zreg_hi_reg, ARMVectorReg),
168         VMSTATE_STRUCT_ARRAY(env.vfp.pregs, ARMCPU, 17, 0,
169                              vmstate_preg_reg, ARMPredicateReg),
170         VMSTATE_END_OF_LIST()
171     }
172 };
173 
174 static const VMStateDescription vmstate_vreg = {
175     .name = "vreg",
176     .version_id = 1,
177     .minimum_version_id = 1,
178     .fields = (const VMStateField[]) {
179         VMSTATE_UINT64_ARRAY(d, ARMVectorReg, ARM_MAX_VQ * 2),
180         VMSTATE_END_OF_LIST()
181     }
182 };
183 
184 static bool za_needed(void *opaque)
185 {
186     ARMCPU *cpu = opaque;
187 
188     /*
189      * When ZA storage is disabled, its contents are discarded.
190      * It will be zeroed when ZA storage is re-enabled.
191      */
192     return FIELD_EX64(cpu->env.svcr, SVCR, ZA);
193 }
194 
195 static const VMStateDescription vmstate_za = {
196     .name = "cpu/sme",
197     .version_id = 1,
198     .minimum_version_id = 1,
199     .needed = za_needed,
200     .fields = (const VMStateField[]) {
201         VMSTATE_STRUCT_ARRAY(env.zarray, ARMCPU, ARM_MAX_VQ * 16, 0,
202                              vmstate_vreg, ARMVectorReg),
203         VMSTATE_END_OF_LIST()
204     }
205 };
206 #endif /* AARCH64 */
207 
208 static bool serror_needed(void *opaque)
209 {
210     ARMCPU *cpu = opaque;
211     CPUARMState *env = &cpu->env;
212 
213     return env->serror.pending != 0;
214 }
215 
216 static const VMStateDescription vmstate_serror = {
217     .name = "cpu/serror",
218     .version_id = 1,
219     .minimum_version_id = 1,
220     .needed = serror_needed,
221     .fields = (const VMStateField[]) {
222         VMSTATE_UINT8(env.serror.pending, ARMCPU),
223         VMSTATE_UINT8(env.serror.has_esr, ARMCPU),
224         VMSTATE_UINT64(env.serror.esr, ARMCPU),
225         VMSTATE_END_OF_LIST()
226     }
227 };
228 
229 static bool irq_line_state_needed(void *opaque)
230 {
231     return true;
232 }
233 
234 static const VMStateDescription vmstate_irq_line_state = {
235     .name = "cpu/irq-line-state",
236     .version_id = 1,
237     .minimum_version_id = 1,
238     .needed = irq_line_state_needed,
239     .fields = (const VMStateField[]) {
240         VMSTATE_UINT32(env.irq_line_state, ARMCPU),
241         VMSTATE_END_OF_LIST()
242     }
243 };
244 
245 static bool m_needed(void *opaque)
246 {
247     ARMCPU *cpu = opaque;
248     CPUARMState *env = &cpu->env;
249 
250     return arm_feature(env, ARM_FEATURE_M);
251 }
252 
253 static const VMStateDescription vmstate_m_faultmask_primask = {
254     .name = "cpu/m/faultmask-primask",
255     .version_id = 1,
256     .minimum_version_id = 1,
257     .needed = m_needed,
258     .fields = (const VMStateField[]) {
259         VMSTATE_UINT32(env.v7m.faultmask[M_REG_NS], ARMCPU),
260         VMSTATE_UINT32(env.v7m.primask[M_REG_NS], ARMCPU),
261         VMSTATE_END_OF_LIST()
262     }
263 };
264 
265 /* CSSELR is in a subsection because we didn't implement it previously.
266  * Migration from an old implementation will leave it at zero, which
267  * is OK since the only CPUs in the old implementation make the
268  * register RAZ/WI.
269  * Since there was no version of QEMU which implemented the CSSELR for
270  * just non-secure, we transfer both banks here rather than putting
271  * the secure banked version in the m-security subsection.
272  */
273 static bool csselr_vmstate_validate(void *opaque, int version_id)
274 {
275     ARMCPU *cpu = opaque;
276 
277     return cpu->env.v7m.csselr[M_REG_NS] <= R_V7M_CSSELR_INDEX_MASK
278         && cpu->env.v7m.csselr[M_REG_S] <= R_V7M_CSSELR_INDEX_MASK;
279 }
280 
281 static bool m_csselr_needed(void *opaque)
282 {
283     ARMCPU *cpu = opaque;
284 
285     return !arm_v7m_csselr_razwi(cpu);
286 }
287 
288 static const VMStateDescription vmstate_m_csselr = {
289     .name = "cpu/m/csselr",
290     .version_id = 1,
291     .minimum_version_id = 1,
292     .needed = m_csselr_needed,
293     .fields = (const VMStateField[]) {
294         VMSTATE_UINT32_ARRAY(env.v7m.csselr, ARMCPU, M_REG_NUM_BANKS),
295         VMSTATE_VALIDATE("CSSELR is valid", csselr_vmstate_validate),
296         VMSTATE_END_OF_LIST()
297     }
298 };
299 
300 static const VMStateDescription vmstate_m_scr = {
301     .name = "cpu/m/scr",
302     .version_id = 1,
303     .minimum_version_id = 1,
304     .needed = m_needed,
305     .fields = (const VMStateField[]) {
306         VMSTATE_UINT32(env.v7m.scr[M_REG_NS], ARMCPU),
307         VMSTATE_END_OF_LIST()
308     }
309 };
310 
311 static const VMStateDescription vmstate_m_other_sp = {
312     .name = "cpu/m/other-sp",
313     .version_id = 1,
314     .minimum_version_id = 1,
315     .needed = m_needed,
316     .fields = (const VMStateField[]) {
317         VMSTATE_UINT32(env.v7m.other_sp, ARMCPU),
318         VMSTATE_END_OF_LIST()
319     }
320 };
321 
322 static bool m_v8m_needed(void *opaque)
323 {
324     ARMCPU *cpu = opaque;
325     CPUARMState *env = &cpu->env;
326 
327     return arm_feature(env, ARM_FEATURE_M) && arm_feature(env, ARM_FEATURE_V8);
328 }
329 
330 static const VMStateDescription vmstate_m_v8m = {
331     .name = "cpu/m/v8m",
332     .version_id = 1,
333     .minimum_version_id = 1,
334     .needed = m_v8m_needed,
335     .fields = (const VMStateField[]) {
336         VMSTATE_UINT32_ARRAY(env.v7m.msplim, ARMCPU, M_REG_NUM_BANKS),
337         VMSTATE_UINT32_ARRAY(env.v7m.psplim, ARMCPU, M_REG_NUM_BANKS),
338         VMSTATE_END_OF_LIST()
339     }
340 };
341 
342 static const VMStateDescription vmstate_m_fp = {
343     .name = "cpu/m/fp",
344     .version_id = 1,
345     .minimum_version_id = 1,
346     .needed = vfp_needed,
347     .fields = (const VMStateField[]) {
348         VMSTATE_UINT32_ARRAY(env.v7m.fpcar, ARMCPU, M_REG_NUM_BANKS),
349         VMSTATE_UINT32_ARRAY(env.v7m.fpccr, ARMCPU, M_REG_NUM_BANKS),
350         VMSTATE_UINT32_ARRAY(env.v7m.fpdscr, ARMCPU, M_REG_NUM_BANKS),
351         VMSTATE_UINT32_ARRAY(env.v7m.cpacr, ARMCPU, M_REG_NUM_BANKS),
352         VMSTATE_UINT32(env.v7m.nsacr, ARMCPU),
353         VMSTATE_END_OF_LIST()
354     }
355 };
356 
357 static bool mve_needed(void *opaque)
358 {
359     ARMCPU *cpu = opaque;
360 
361     return cpu_isar_feature(aa32_mve, cpu);
362 }
363 
364 static const VMStateDescription vmstate_m_mve = {
365     .name = "cpu/m/mve",
366     .version_id = 1,
367     .minimum_version_id = 1,
368     .needed = mve_needed,
369     .fields = (const VMStateField[]) {
370         VMSTATE_UINT32(env.v7m.vpr, ARMCPU),
371         VMSTATE_UINT32(env.v7m.ltpsize, ARMCPU),
372         VMSTATE_END_OF_LIST()
373     },
374 };
375 
376 static const VMStateDescription vmstate_m = {
377     .name = "cpu/m",
378     .version_id = 4,
379     .minimum_version_id = 4,
380     .needed = m_needed,
381     .fields = (const VMStateField[]) {
382         VMSTATE_UINT32(env.v7m.vecbase[M_REG_NS], ARMCPU),
383         VMSTATE_UINT32(env.v7m.basepri[M_REG_NS], ARMCPU),
384         VMSTATE_UINT32(env.v7m.control[M_REG_NS], ARMCPU),
385         VMSTATE_UINT32(env.v7m.ccr[M_REG_NS], ARMCPU),
386         VMSTATE_UINT32(env.v7m.cfsr[M_REG_NS], ARMCPU),
387         VMSTATE_UINT32(env.v7m.hfsr, ARMCPU),
388         VMSTATE_UINT32(env.v7m.dfsr, ARMCPU),
389         VMSTATE_UINT32(env.v7m.mmfar[M_REG_NS], ARMCPU),
390         VMSTATE_UINT32(env.v7m.bfar, ARMCPU),
391         VMSTATE_UINT32(env.v7m.mpu_ctrl[M_REG_NS], ARMCPU),
392         VMSTATE_INT32(env.v7m.exception, ARMCPU),
393         VMSTATE_END_OF_LIST()
394     },
395     .subsections = (const VMStateDescription * const []) {
396         &vmstate_m_faultmask_primask,
397         &vmstate_m_csselr,
398         &vmstate_m_scr,
399         &vmstate_m_other_sp,
400         &vmstate_m_v8m,
401         &vmstate_m_fp,
402         &vmstate_m_mve,
403         NULL
404     }
405 };
406 
407 static bool thumb2ee_needed(void *opaque)
408 {
409     ARMCPU *cpu = opaque;
410     CPUARMState *env = &cpu->env;
411 
412     return arm_feature(env, ARM_FEATURE_THUMB2EE);
413 }
414 
415 static const VMStateDescription vmstate_thumb2ee = {
416     .name = "cpu/thumb2ee",
417     .version_id = 1,
418     .minimum_version_id = 1,
419     .needed = thumb2ee_needed,
420     .fields = (const VMStateField[]) {
421         VMSTATE_UINT32(env.teecr, ARMCPU),
422         VMSTATE_UINT32(env.teehbr, ARMCPU),
423         VMSTATE_END_OF_LIST()
424     }
425 };
426 
427 static bool pmsav7_needed(void *opaque)
428 {
429     ARMCPU *cpu = opaque;
430     CPUARMState *env = &cpu->env;
431 
432     return arm_feature(env, ARM_FEATURE_PMSA) &&
433            arm_feature(env, ARM_FEATURE_V7) &&
434            !arm_feature(env, ARM_FEATURE_V8);
435 }
436 
437 static bool pmsav7_rgnr_vmstate_validate(void *opaque, int version_id)
438 {
439     ARMCPU *cpu = opaque;
440 
441     return cpu->env.pmsav7.rnr[M_REG_NS] < cpu->pmsav7_dregion;
442 }
443 
444 static const VMStateDescription vmstate_pmsav7 = {
445     .name = "cpu/pmsav7",
446     .version_id = 1,
447     .minimum_version_id = 1,
448     .needed = pmsav7_needed,
449     .fields = (const VMStateField[]) {
450         VMSTATE_VARRAY_UINT32(env.pmsav7.drbar, ARMCPU, pmsav7_dregion, 0,
451                               vmstate_info_uint32, uint32_t),
452         VMSTATE_VARRAY_UINT32(env.pmsav7.drsr, ARMCPU, pmsav7_dregion, 0,
453                               vmstate_info_uint32, uint32_t),
454         VMSTATE_VARRAY_UINT32(env.pmsav7.dracr, ARMCPU, pmsav7_dregion, 0,
455                               vmstate_info_uint32, uint32_t),
456         VMSTATE_VALIDATE("rgnr is valid", pmsav7_rgnr_vmstate_validate),
457         VMSTATE_END_OF_LIST()
458     }
459 };
460 
461 static bool pmsav7_rnr_needed(void *opaque)
462 {
463     ARMCPU *cpu = opaque;
464     CPUARMState *env = &cpu->env;
465 
466     /* For R profile cores pmsav7.rnr is migrated via the cpreg
467      * "RGNR" definition in helper.h. For M profile we have to
468      * migrate it separately.
469      */
470     return arm_feature(env, ARM_FEATURE_M);
471 }
472 
473 static const VMStateDescription vmstate_pmsav7_rnr = {
474     .name = "cpu/pmsav7-rnr",
475     .version_id = 1,
476     .minimum_version_id = 1,
477     .needed = pmsav7_rnr_needed,
478     .fields = (const VMStateField[]) {
479         VMSTATE_UINT32(env.pmsav7.rnr[M_REG_NS], ARMCPU),
480         VMSTATE_END_OF_LIST()
481     }
482 };
483 
484 static bool pmsav8_needed(void *opaque)
485 {
486     ARMCPU *cpu = opaque;
487     CPUARMState *env = &cpu->env;
488 
489     return arm_feature(env, ARM_FEATURE_PMSA) &&
490         arm_feature(env, ARM_FEATURE_V8);
491 }
492 
493 static bool pmsav8r_needed(void *opaque)
494 {
495     ARMCPU *cpu = opaque;
496     CPUARMState *env = &cpu->env;
497 
498     return arm_feature(env, ARM_FEATURE_PMSA) &&
499         arm_feature(env, ARM_FEATURE_V8) &&
500         !arm_feature(env, ARM_FEATURE_M);
501 }
502 
503 static const VMStateDescription vmstate_pmsav8r = {
504     .name = "cpu/pmsav8/pmsav8r",
505     .version_id = 1,
506     .minimum_version_id = 1,
507     .needed = pmsav8r_needed,
508     .fields = (const VMStateField[]) {
509         VMSTATE_VARRAY_UINT32(env.pmsav8.hprbar, ARMCPU,
510                         pmsav8r_hdregion, 0, vmstate_info_uint32, uint32_t),
511         VMSTATE_VARRAY_UINT32(env.pmsav8.hprlar, ARMCPU,
512                         pmsav8r_hdregion, 0, vmstate_info_uint32, uint32_t),
513         VMSTATE_END_OF_LIST()
514     },
515 };
516 
517 static const VMStateDescription vmstate_pmsav8 = {
518     .name = "cpu/pmsav8",
519     .version_id = 1,
520     .minimum_version_id = 1,
521     .needed = pmsav8_needed,
522     .fields = (const VMStateField[]) {
523         VMSTATE_VARRAY_UINT32(env.pmsav8.rbar[M_REG_NS], ARMCPU, pmsav7_dregion,
524                               0, vmstate_info_uint32, uint32_t),
525         VMSTATE_VARRAY_UINT32(env.pmsav8.rlar[M_REG_NS], ARMCPU, pmsav7_dregion,
526                               0, vmstate_info_uint32, uint32_t),
527         VMSTATE_UINT32(env.pmsav8.mair0[M_REG_NS], ARMCPU),
528         VMSTATE_UINT32(env.pmsav8.mair1[M_REG_NS], ARMCPU),
529         VMSTATE_END_OF_LIST()
530     },
531     .subsections = (const VMStateDescription * const []) {
532         &vmstate_pmsav8r,
533         NULL
534     }
535 };
536 
537 static bool s_rnr_vmstate_validate(void *opaque, int version_id)
538 {
539     ARMCPU *cpu = opaque;
540 
541     return cpu->env.pmsav7.rnr[M_REG_S] < cpu->pmsav7_dregion;
542 }
543 
544 static bool sau_rnr_vmstate_validate(void *opaque, int version_id)
545 {
546     ARMCPU *cpu = opaque;
547 
548     return cpu->env.sau.rnr < cpu->sau_sregion;
549 }
550 
551 static bool m_security_needed(void *opaque)
552 {
553     ARMCPU *cpu = opaque;
554     CPUARMState *env = &cpu->env;
555 
556     return arm_feature(env, ARM_FEATURE_M_SECURITY);
557 }
558 
559 static const VMStateDescription vmstate_m_security = {
560     .name = "cpu/m-security",
561     .version_id = 1,
562     .minimum_version_id = 1,
563     .needed = m_security_needed,
564     .fields = (const VMStateField[]) {
565         VMSTATE_UINT32(env.v7m.secure, ARMCPU),
566         VMSTATE_UINT32(env.v7m.other_ss_msp, ARMCPU),
567         VMSTATE_UINT32(env.v7m.other_ss_psp, ARMCPU),
568         VMSTATE_UINT32(env.v7m.basepri[M_REG_S], ARMCPU),
569         VMSTATE_UINT32(env.v7m.primask[M_REG_S], ARMCPU),
570         VMSTATE_UINT32(env.v7m.faultmask[M_REG_S], ARMCPU),
571         VMSTATE_UINT32(env.v7m.control[M_REG_S], ARMCPU),
572         VMSTATE_UINT32(env.v7m.vecbase[M_REG_S], ARMCPU),
573         VMSTATE_UINT32(env.pmsav8.mair0[M_REG_S], ARMCPU),
574         VMSTATE_UINT32(env.pmsav8.mair1[M_REG_S], ARMCPU),
575         VMSTATE_VARRAY_UINT32(env.pmsav8.rbar[M_REG_S], ARMCPU, pmsav7_dregion,
576                               0, vmstate_info_uint32, uint32_t),
577         VMSTATE_VARRAY_UINT32(env.pmsav8.rlar[M_REG_S], ARMCPU, pmsav7_dregion,
578                               0, vmstate_info_uint32, uint32_t),
579         VMSTATE_UINT32(env.pmsav7.rnr[M_REG_S], ARMCPU),
580         VMSTATE_VALIDATE("secure MPU_RNR is valid", s_rnr_vmstate_validate),
581         VMSTATE_UINT32(env.v7m.mpu_ctrl[M_REG_S], ARMCPU),
582         VMSTATE_UINT32(env.v7m.ccr[M_REG_S], ARMCPU),
583         VMSTATE_UINT32(env.v7m.mmfar[M_REG_S], ARMCPU),
584         VMSTATE_UINT32(env.v7m.cfsr[M_REG_S], ARMCPU),
585         VMSTATE_UINT32(env.v7m.sfsr, ARMCPU),
586         VMSTATE_UINT32(env.v7m.sfar, ARMCPU),
587         VMSTATE_VARRAY_UINT32(env.sau.rbar, ARMCPU, sau_sregion, 0,
588                               vmstate_info_uint32, uint32_t),
589         VMSTATE_VARRAY_UINT32(env.sau.rlar, ARMCPU, sau_sregion, 0,
590                               vmstate_info_uint32, uint32_t),
591         VMSTATE_UINT32(env.sau.rnr, ARMCPU),
592         VMSTATE_VALIDATE("SAU_RNR is valid", sau_rnr_vmstate_validate),
593         VMSTATE_UINT32(env.sau.ctrl, ARMCPU),
594         VMSTATE_UINT32(env.v7m.scr[M_REG_S], ARMCPU),
595         /* AIRCR is not secure-only, but our implementation is R/O if the
596          * security extension is unimplemented, so we migrate it here.
597          */
598         VMSTATE_UINT32(env.v7m.aircr, ARMCPU),
599         VMSTATE_END_OF_LIST()
600     }
601 };
602 
603 static int get_cpsr(QEMUFile *f, void *opaque, size_t size,
604                     const VMStateField *field)
605 {
606     ARMCPU *cpu = opaque;
607     CPUARMState *env = &cpu->env;
608     uint32_t val = qemu_get_be32(f);
609 
610     if (arm_feature(env, ARM_FEATURE_M)) {
611         if (val & XPSR_EXCP) {
612             /* This is a CPSR format value from an older QEMU. (We can tell
613              * because values transferred in XPSR format always have zero
614              * for the EXCP field, and CPSR format will always have bit 4
615              * set in CPSR_M.) Rearrange it into XPSR format. The significant
616              * differences are that the T bit is not in the same place, the
617              * primask/faultmask info may be in the CPSR I and F bits, and
618              * we do not want the mode bits.
619              * We know that this cleanup happened before v8M, so there
620              * is no complication with banked primask/faultmask.
621              */
622             uint32_t newval = val;
623 
624             assert(!arm_feature(env, ARM_FEATURE_M_SECURITY));
625 
626             newval &= (CPSR_NZCV | CPSR_Q | CPSR_IT | CPSR_GE);
627             if (val & CPSR_T) {
628                 newval |= XPSR_T;
629             }
630             /* If the I or F bits are set then this is a migration from
631              * an old QEMU which still stored the M profile FAULTMASK
632              * and PRIMASK in env->daif. For a new QEMU, the data is
633              * transferred using the vmstate_m_faultmask_primask subsection.
634              */
635             if (val & CPSR_F) {
636                 env->v7m.faultmask[M_REG_NS] = 1;
637             }
638             if (val & CPSR_I) {
639                 env->v7m.primask[M_REG_NS] = 1;
640             }
641             val = newval;
642         }
643         /* Ignore the low bits, they are handled by vmstate_m. */
644         xpsr_write(env, val, ~XPSR_EXCP);
645         return 0;
646     }
647 
648     env->aarch64 = ((val & PSTATE_nRW) == 0);
649 
650     if (is_a64(env)) {
651         pstate_write(env, val);
652         return 0;
653     }
654 
655     cpsr_write(env, val, 0xffffffff, CPSRWriteRaw);
656     return 0;
657 }
658 
659 static int put_cpsr(QEMUFile *f, void *opaque, size_t size,
660                     const VMStateField *field, JSONWriter *vmdesc)
661 {
662     ARMCPU *cpu = opaque;
663     CPUARMState *env = &cpu->env;
664     uint32_t val;
665 
666     if (arm_feature(env, ARM_FEATURE_M)) {
667         /* The low 9 bits are v7m.exception, which is handled by vmstate_m. */
668         val = xpsr_read(env) & ~XPSR_EXCP;
669     } else if (is_a64(env)) {
670         val = pstate_read(env);
671     } else {
672         val = cpsr_read(env);
673     }
674 
675     qemu_put_be32(f, val);
676     return 0;
677 }
678 
679 static const VMStateInfo vmstate_cpsr = {
680     .name = "cpsr",
681     .get = get_cpsr,
682     .put = put_cpsr,
683 };
684 
685 static int get_power(QEMUFile *f, void *opaque, size_t size,
686                     const VMStateField *field)
687 {
688     ARMCPU *cpu = opaque;
689     bool powered_off = qemu_get_byte(f);
690     cpu->power_state = powered_off ? PSCI_OFF : PSCI_ON;
691     return 0;
692 }
693 
694 static int put_power(QEMUFile *f, void *opaque, size_t size,
695                     const VMStateField *field, JSONWriter *vmdesc)
696 {
697     ARMCPU *cpu = opaque;
698 
699     /* Migration should never happen while we transition power states */
700 
701     if (cpu->power_state == PSCI_ON ||
702         cpu->power_state == PSCI_OFF) {
703         bool powered_off = (cpu->power_state == PSCI_OFF) ? true : false;
704         qemu_put_byte(f, powered_off);
705         return 0;
706     } else {
707         return 1;
708     }
709 }
710 
711 static const VMStateInfo vmstate_powered_off = {
712     .name = "powered_off",
713     .get = get_power,
714     .put = put_power,
715 };
716 
717 static int cpu_pre_save(void *opaque)
718 {
719     ARMCPU *cpu = opaque;
720 
721     if (!kvm_enabled()) {
722         pmu_op_start(&cpu->env);
723     }
724 
725     if (kvm_enabled()) {
726         if (!write_kvmstate_to_list(cpu)) {
727             /* This should never fail */
728             g_assert_not_reached();
729         }
730 
731         /*
732          * kvm_arm_cpu_pre_save() must be called after
733          * write_kvmstate_to_list()
734          */
735         kvm_arm_cpu_pre_save(cpu);
736     } else {
737         if (!write_cpustate_to_list(cpu, false)) {
738             /* This should never fail. */
739             g_assert_not_reached();
740         }
741     }
742 
743     cpu->cpreg_vmstate_array_len = cpu->cpreg_array_len;
744     memcpy(cpu->cpreg_vmstate_indexes, cpu->cpreg_indexes,
745            cpu->cpreg_array_len * sizeof(uint64_t));
746     memcpy(cpu->cpreg_vmstate_values, cpu->cpreg_values,
747            cpu->cpreg_array_len * sizeof(uint64_t));
748 
749     return 0;
750 }
751 
752 static int cpu_post_save(void *opaque)
753 {
754     ARMCPU *cpu = opaque;
755 
756     if (!kvm_enabled()) {
757         pmu_op_finish(&cpu->env);
758     }
759 
760     return 0;
761 }
762 
763 static int cpu_pre_load(void *opaque)
764 {
765     ARMCPU *cpu = opaque;
766     CPUARMState *env = &cpu->env;
767 
768     /*
769      * Pre-initialize irq_line_state to a value that's never valid as
770      * real data, so cpu_post_load() can tell whether we've seen the
771      * irq-line-state subsection in the incoming migration state.
772      */
773     env->irq_line_state = UINT32_MAX;
774 
775     if (!kvm_enabled()) {
776         pmu_op_start(env);
777     }
778 
779     return 0;
780 }
781 
782 static int cpu_post_load(void *opaque, int version_id)
783 {
784     ARMCPU *cpu = opaque;
785     CPUARMState *env = &cpu->env;
786     int i, v;
787 
788     /*
789      * Handle migration compatibility from old QEMU which didn't
790      * send the irq-line-state subsection. A QEMU without it did not
791      * implement the HCR_EL2.{VI,VF} bits as generating interrupts,
792      * so for TCG the line state matches the bits set in cs->interrupt_request.
793      * For KVM the line state is not stored in cs->interrupt_request
794      * and so this will leave irq_line_state as 0, but this is OK because
795      * we only need to care about it for TCG.
796      */
797     if (env->irq_line_state == UINT32_MAX) {
798         CPUState *cs = CPU(cpu);
799 
800         env->irq_line_state = cs->interrupt_request &
801             (CPU_INTERRUPT_HARD | CPU_INTERRUPT_FIQ |
802              CPU_INTERRUPT_VIRQ | CPU_INTERRUPT_VFIQ);
803     }
804 
805     /* Update the values list from the incoming migration data.
806      * Anything in the incoming data which we don't know about is
807      * a migration failure; anything we know about but the incoming
808      * data doesn't specify retains its current (reset) value.
809      * The indexes list remains untouched -- we only inspect the
810      * incoming migration index list so we can match the values array
811      * entries with the right slots in our own values array.
812      */
813 
814     for (i = 0, v = 0; i < cpu->cpreg_array_len
815              && v < cpu->cpreg_vmstate_array_len; i++) {
816         if (cpu->cpreg_vmstate_indexes[v] > cpu->cpreg_indexes[i]) {
817             /* register in our list but not incoming : skip it */
818             continue;
819         }
820         if (cpu->cpreg_vmstate_indexes[v] < cpu->cpreg_indexes[i]) {
821             /* register in their list but not ours: fail migration */
822             return -1;
823         }
824         /* matching register, copy the value over */
825         cpu->cpreg_values[i] = cpu->cpreg_vmstate_values[v];
826         v++;
827     }
828 
829     if (kvm_enabled()) {
830         if (!write_list_to_kvmstate(cpu, KVM_PUT_FULL_STATE)) {
831             return -1;
832         }
833         /* Note that it's OK for the TCG side not to know about
834          * every register in the list; KVM is authoritative if
835          * we're using it.
836          */
837         write_list_to_cpustate(cpu);
838         kvm_arm_cpu_post_load(cpu);
839     } else {
840         if (!write_list_to_cpustate(cpu)) {
841             return -1;
842         }
843     }
844 
845     /*
846      * Misaligned thumb pc is architecturally impossible. Fail the
847      * incoming migration. For TCG it would trigger the assert in
848      * thumb_tr_translate_insn().
849      */
850     if (!is_a64(env) && env->thumb && (env->regs[15] & 1)) {
851         return -1;
852     }
853 
854     if (tcg_enabled()) {
855         hw_breakpoint_update_all(cpu);
856         hw_watchpoint_update_all(cpu);
857     }
858 
859     /*
860      * TCG gen_update_fp_context() relies on the invariant that
861      * FPDSCR.LTPSIZE is constant 4 for M-profile with the LOB extension;
862      * forbid bogus incoming data with some other value.
863      */
864     if (arm_feature(env, ARM_FEATURE_M) && cpu_isar_feature(aa32_lob, cpu)) {
865         if (extract32(env->v7m.fpdscr[M_REG_NS],
866                       FPCR_LTPSIZE_SHIFT, FPCR_LTPSIZE_LENGTH) != 4 ||
867             extract32(env->v7m.fpdscr[M_REG_S],
868                       FPCR_LTPSIZE_SHIFT, FPCR_LTPSIZE_LENGTH) != 4) {
869             return -1;
870         }
871     }
872 
873     if (!kvm_enabled()) {
874         pmu_op_finish(env);
875     }
876 
877     if (tcg_enabled()) {
878         arm_rebuild_hflags(env);
879     }
880 
881     return 0;
882 }
883 
884 const VMStateDescription vmstate_arm_cpu = {
885     .name = "cpu",
886     .version_id = 22,
887     .minimum_version_id = 22,
888     .pre_save = cpu_pre_save,
889     .post_save = cpu_post_save,
890     .pre_load = cpu_pre_load,
891     .post_load = cpu_post_load,
892     .fields = (const VMStateField[]) {
893         VMSTATE_UINT32_ARRAY(env.regs, ARMCPU, 16),
894         VMSTATE_UINT64_ARRAY(env.xregs, ARMCPU, 32),
895         VMSTATE_UINT64(env.pc, ARMCPU),
896         {
897             .name = "cpsr",
898             .version_id = 0,
899             .size = sizeof(uint32_t),
900             .info = &vmstate_cpsr,
901             .flags = VMS_SINGLE,
902             .offset = 0,
903         },
904         VMSTATE_UINT32(env.spsr, ARMCPU),
905         VMSTATE_UINT64_ARRAY(env.banked_spsr, ARMCPU, 8),
906         VMSTATE_UINT32_ARRAY(env.banked_r13, ARMCPU, 8),
907         VMSTATE_UINT32_ARRAY(env.banked_r14, ARMCPU, 8),
908         VMSTATE_UINT32_ARRAY(env.usr_regs, ARMCPU, 5),
909         VMSTATE_UINT32_ARRAY(env.fiq_regs, ARMCPU, 5),
910         VMSTATE_UINT64_ARRAY(env.elr_el, ARMCPU, 4),
911         VMSTATE_UINT64_ARRAY(env.sp_el, ARMCPU, 4),
912         /* The length-check must come before the arrays to avoid
913          * incoming data possibly overflowing the array.
914          */
915         VMSTATE_INT32_POSITIVE_LE(cpreg_vmstate_array_len, ARMCPU),
916         VMSTATE_VARRAY_INT32(cpreg_vmstate_indexes, ARMCPU,
917                              cpreg_vmstate_array_len,
918                              0, vmstate_info_uint64, uint64_t),
919         VMSTATE_VARRAY_INT32(cpreg_vmstate_values, ARMCPU,
920                              cpreg_vmstate_array_len,
921                              0, vmstate_info_uint64, uint64_t),
922         VMSTATE_UINT64(env.exclusive_addr, ARMCPU),
923         VMSTATE_UINT64(env.exclusive_val, ARMCPU),
924         VMSTATE_UINT64(env.exclusive_high, ARMCPU),
925         VMSTATE_UNUSED(sizeof(uint64_t)),
926         VMSTATE_UINT32(env.exception.syndrome, ARMCPU),
927         VMSTATE_UINT32(env.exception.fsr, ARMCPU),
928         VMSTATE_UINT64(env.exception.vaddress, ARMCPU),
929         VMSTATE_TIMER_PTR(gt_timer[GTIMER_PHYS], ARMCPU),
930         VMSTATE_TIMER_PTR(gt_timer[GTIMER_VIRT], ARMCPU),
931         {
932             .name = "power_state",
933             .version_id = 0,
934             .size = sizeof(bool),
935             .info = &vmstate_powered_off,
936             .flags = VMS_SINGLE,
937             .offset = 0,
938         },
939         VMSTATE_END_OF_LIST()
940     },
941     .subsections = (const VMStateDescription * const []) {
942         &vmstate_vfp,
943         &vmstate_iwmmxt,
944         &vmstate_m,
945         &vmstate_thumb2ee,
946         /* pmsav7_rnr must come before pmsav7 so that we have the
947          * region number before we test it in the VMSTATE_VALIDATE
948          * in vmstate_pmsav7.
949          */
950         &vmstate_pmsav7_rnr,
951         &vmstate_pmsav7,
952         &vmstate_pmsav8,
953         &vmstate_m_security,
954 #ifdef TARGET_AARCH64
955         &vmstate_sve,
956         &vmstate_za,
957 #endif
958         &vmstate_serror,
959         &vmstate_irq_line_state,
960         NULL
961     }
962 };
963