xref: /openbmc/qemu/target/arm/machine.c (revision 16dcf200)
1 #include "qemu/osdep.h"
2 #include "cpu.h"
3 #include "qemu/error-report.h"
4 #include "sysemu/kvm.h"
5 #include "sysemu/tcg.h"
6 #include "kvm_arm.h"
7 #include "internals.h"
8 #include "cpu-features.h"
9 #include "migration/cpu.h"
10 #include "target/arm/gtimer.h"
11 
12 static bool vfp_needed(void *opaque)
13 {
14     ARMCPU *cpu = opaque;
15 
16     return (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)
17             ? cpu_isar_feature(aa64_fp_simd, cpu)
18             : cpu_isar_feature(aa32_vfp_simd, cpu));
19 }
20 
21 static int get_fpscr(QEMUFile *f, void *opaque, size_t size,
22                      const VMStateField *field)
23 {
24     ARMCPU *cpu = opaque;
25     CPUARMState *env = &cpu->env;
26     uint32_t val = qemu_get_be32(f);
27 
28     vfp_set_fpscr(env, val);
29     return 0;
30 }
31 
32 static int put_fpscr(QEMUFile *f, void *opaque, size_t size,
33                      const VMStateField *field, JSONWriter *vmdesc)
34 {
35     ARMCPU *cpu = opaque;
36     CPUARMState *env = &cpu->env;
37 
38     qemu_put_be32(f, vfp_get_fpscr(env));
39     return 0;
40 }
41 
42 static const VMStateInfo vmstate_fpscr = {
43     .name = "fpscr",
44     .get = get_fpscr,
45     .put = put_fpscr,
46 };
47 
48 static const VMStateDescription vmstate_vfp = {
49     .name = "cpu/vfp",
50     .version_id = 3,
51     .minimum_version_id = 3,
52     .needed = vfp_needed,
53     .fields = (const VMStateField[]) {
54         /* For compatibility, store Qn out of Zn here.  */
55         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[0].d, ARMCPU, 0, 2),
56         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[1].d, ARMCPU, 0, 2),
57         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[2].d, ARMCPU, 0, 2),
58         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[3].d, ARMCPU, 0, 2),
59         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[4].d, ARMCPU, 0, 2),
60         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[5].d, ARMCPU, 0, 2),
61         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[6].d, ARMCPU, 0, 2),
62         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[7].d, ARMCPU, 0, 2),
63         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[8].d, ARMCPU, 0, 2),
64         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[9].d, ARMCPU, 0, 2),
65         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[10].d, ARMCPU, 0, 2),
66         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[11].d, ARMCPU, 0, 2),
67         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[12].d, ARMCPU, 0, 2),
68         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[13].d, ARMCPU, 0, 2),
69         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[14].d, ARMCPU, 0, 2),
70         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[15].d, ARMCPU, 0, 2),
71         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[16].d, ARMCPU, 0, 2),
72         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[17].d, ARMCPU, 0, 2),
73         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[18].d, ARMCPU, 0, 2),
74         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[19].d, ARMCPU, 0, 2),
75         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[20].d, ARMCPU, 0, 2),
76         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[21].d, ARMCPU, 0, 2),
77         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[22].d, ARMCPU, 0, 2),
78         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[23].d, ARMCPU, 0, 2),
79         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[24].d, ARMCPU, 0, 2),
80         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[25].d, ARMCPU, 0, 2),
81         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[26].d, ARMCPU, 0, 2),
82         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[27].d, ARMCPU, 0, 2),
83         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[28].d, ARMCPU, 0, 2),
84         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[29].d, ARMCPU, 0, 2),
85         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[30].d, ARMCPU, 0, 2),
86         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[31].d, ARMCPU, 0, 2),
87 
88         /* The xregs array is a little awkward because element 1 (FPSCR)
89          * requires a specific accessor, so we have to split it up in
90          * the vmstate:
91          */
92         VMSTATE_UINT32(env.vfp.xregs[0], ARMCPU),
93         VMSTATE_UINT32_SUB_ARRAY(env.vfp.xregs, ARMCPU, 2, 14),
94         {
95             .name = "fpscr",
96             .version_id = 0,
97             .size = sizeof(uint32_t),
98             .info = &vmstate_fpscr,
99             .flags = VMS_SINGLE,
100             .offset = 0,
101         },
102         VMSTATE_END_OF_LIST()
103     }
104 };
105 
106 static bool iwmmxt_needed(void *opaque)
107 {
108     ARMCPU *cpu = opaque;
109     CPUARMState *env = &cpu->env;
110 
111     return arm_feature(env, ARM_FEATURE_IWMMXT);
112 }
113 
114 static const VMStateDescription vmstate_iwmmxt = {
115     .name = "cpu/iwmmxt",
116     .version_id = 1,
117     .minimum_version_id = 1,
118     .needed = iwmmxt_needed,
119     .fields = (const VMStateField[]) {
120         VMSTATE_UINT64_ARRAY(env.iwmmxt.regs, ARMCPU, 16),
121         VMSTATE_UINT32_ARRAY(env.iwmmxt.cregs, ARMCPU, 16),
122         VMSTATE_END_OF_LIST()
123     }
124 };
125 
126 #ifdef TARGET_AARCH64
127 /* The expression ARM_MAX_VQ - 2 is 0 for pure AArch32 build,
128  * and ARMPredicateReg is actively empty.  This triggers errors
129  * in the expansion of the VMSTATE macros.
130  */
131 
132 static bool sve_needed(void *opaque)
133 {
134     ARMCPU *cpu = opaque;
135 
136     return cpu_isar_feature(aa64_sve, cpu);
137 }
138 
139 /* The first two words of each Zreg is stored in VFP state.  */
140 static const VMStateDescription vmstate_zreg_hi_reg = {
141     .name = "cpu/sve/zreg_hi",
142     .version_id = 1,
143     .minimum_version_id = 1,
144     .fields = (const VMStateField[]) {
145         VMSTATE_UINT64_SUB_ARRAY(d, ARMVectorReg, 2, ARM_MAX_VQ - 2),
146         VMSTATE_END_OF_LIST()
147     }
148 };
149 
150 static const VMStateDescription vmstate_preg_reg = {
151     .name = "cpu/sve/preg",
152     .version_id = 1,
153     .minimum_version_id = 1,
154     .fields = (const VMStateField[]) {
155         VMSTATE_UINT64_ARRAY(p, ARMPredicateReg, 2 * ARM_MAX_VQ / 8),
156         VMSTATE_END_OF_LIST()
157     }
158 };
159 
160 static const VMStateDescription vmstate_sve = {
161     .name = "cpu/sve",
162     .version_id = 1,
163     .minimum_version_id = 1,
164     .needed = sve_needed,
165     .fields = (const VMStateField[]) {
166         VMSTATE_STRUCT_ARRAY(env.vfp.zregs, ARMCPU, 32, 0,
167                              vmstate_zreg_hi_reg, ARMVectorReg),
168         VMSTATE_STRUCT_ARRAY(env.vfp.pregs, ARMCPU, 17, 0,
169                              vmstate_preg_reg, ARMPredicateReg),
170         VMSTATE_END_OF_LIST()
171     }
172 };
173 
174 static const VMStateDescription vmstate_vreg = {
175     .name = "vreg",
176     .version_id = 1,
177     .minimum_version_id = 1,
178     .fields = (const VMStateField[]) {
179         VMSTATE_UINT64_ARRAY(d, ARMVectorReg, ARM_MAX_VQ * 2),
180         VMSTATE_END_OF_LIST()
181     }
182 };
183 
184 static bool za_needed(void *opaque)
185 {
186     ARMCPU *cpu = opaque;
187 
188     /*
189      * When ZA storage is disabled, its contents are discarded.
190      * It will be zeroed when ZA storage is re-enabled.
191      */
192     return FIELD_EX64(cpu->env.svcr, SVCR, ZA);
193 }
194 
195 static const VMStateDescription vmstate_za = {
196     .name = "cpu/sme",
197     .version_id = 1,
198     .minimum_version_id = 1,
199     .needed = za_needed,
200     .fields = (const VMStateField[]) {
201         VMSTATE_STRUCT_ARRAY(env.zarray, ARMCPU, ARM_MAX_VQ * 16, 0,
202                              vmstate_vreg, ARMVectorReg),
203         VMSTATE_END_OF_LIST()
204     }
205 };
206 #endif /* AARCH64 */
207 
208 static bool serror_needed(void *opaque)
209 {
210     ARMCPU *cpu = opaque;
211     CPUARMState *env = &cpu->env;
212 
213     return env->serror.pending != 0;
214 }
215 
216 static const VMStateDescription vmstate_serror = {
217     .name = "cpu/serror",
218     .version_id = 1,
219     .minimum_version_id = 1,
220     .needed = serror_needed,
221     .fields = (const VMStateField[]) {
222         VMSTATE_UINT8(env.serror.pending, ARMCPU),
223         VMSTATE_UINT8(env.serror.has_esr, ARMCPU),
224         VMSTATE_UINT64(env.serror.esr, ARMCPU),
225         VMSTATE_END_OF_LIST()
226     }
227 };
228 
229 static bool irq_line_state_needed(void *opaque)
230 {
231     return true;
232 }
233 
234 static const VMStateDescription vmstate_irq_line_state = {
235     .name = "cpu/irq-line-state",
236     .version_id = 1,
237     .minimum_version_id = 1,
238     .needed = irq_line_state_needed,
239     .fields = (const VMStateField[]) {
240         VMSTATE_UINT32(env.irq_line_state, ARMCPU),
241         VMSTATE_END_OF_LIST()
242     }
243 };
244 
245 static bool wfxt_timer_needed(void *opaque)
246 {
247     ARMCPU *cpu = opaque;
248 
249     /* We'll only have the timer object if FEAT_WFxT is implemented */
250     return cpu->wfxt_timer;
251 }
252 
253 static const VMStateDescription vmstate_wfxt_timer = {
254     .name = "cpu/wfxt-timer",
255     .version_id = 1,
256     .minimum_version_id = 1,
257     .needed = wfxt_timer_needed,
258     .fields = (const VMStateField[]) {
259         VMSTATE_TIMER_PTR(wfxt_timer, ARMCPU),
260         VMSTATE_END_OF_LIST()
261     }
262 };
263 
264 static bool m_needed(void *opaque)
265 {
266     ARMCPU *cpu = opaque;
267     CPUARMState *env = &cpu->env;
268 
269     return arm_feature(env, ARM_FEATURE_M);
270 }
271 
272 static const VMStateDescription vmstate_m_faultmask_primask = {
273     .name = "cpu/m/faultmask-primask",
274     .version_id = 1,
275     .minimum_version_id = 1,
276     .needed = m_needed,
277     .fields = (const VMStateField[]) {
278         VMSTATE_UINT32(env.v7m.faultmask[M_REG_NS], ARMCPU),
279         VMSTATE_UINT32(env.v7m.primask[M_REG_NS], ARMCPU),
280         VMSTATE_END_OF_LIST()
281     }
282 };
283 
284 /* CSSELR is in a subsection because we didn't implement it previously.
285  * Migration from an old implementation will leave it at zero, which
286  * is OK since the only CPUs in the old implementation make the
287  * register RAZ/WI.
288  * Since there was no version of QEMU which implemented the CSSELR for
289  * just non-secure, we transfer both banks here rather than putting
290  * the secure banked version in the m-security subsection.
291  */
292 static bool csselr_vmstate_validate(void *opaque, int version_id)
293 {
294     ARMCPU *cpu = opaque;
295 
296     return cpu->env.v7m.csselr[M_REG_NS] <= R_V7M_CSSELR_INDEX_MASK
297         && cpu->env.v7m.csselr[M_REG_S] <= R_V7M_CSSELR_INDEX_MASK;
298 }
299 
300 static bool m_csselr_needed(void *opaque)
301 {
302     ARMCPU *cpu = opaque;
303 
304     return !arm_v7m_csselr_razwi(cpu);
305 }
306 
307 static const VMStateDescription vmstate_m_csselr = {
308     .name = "cpu/m/csselr",
309     .version_id = 1,
310     .minimum_version_id = 1,
311     .needed = m_csselr_needed,
312     .fields = (const VMStateField[]) {
313         VMSTATE_UINT32_ARRAY(env.v7m.csselr, ARMCPU, M_REG_NUM_BANKS),
314         VMSTATE_VALIDATE("CSSELR is valid", csselr_vmstate_validate),
315         VMSTATE_END_OF_LIST()
316     }
317 };
318 
319 static const VMStateDescription vmstate_m_scr = {
320     .name = "cpu/m/scr",
321     .version_id = 1,
322     .minimum_version_id = 1,
323     .needed = m_needed,
324     .fields = (const VMStateField[]) {
325         VMSTATE_UINT32(env.v7m.scr[M_REG_NS], ARMCPU),
326         VMSTATE_END_OF_LIST()
327     }
328 };
329 
330 static const VMStateDescription vmstate_m_other_sp = {
331     .name = "cpu/m/other-sp",
332     .version_id = 1,
333     .minimum_version_id = 1,
334     .needed = m_needed,
335     .fields = (const VMStateField[]) {
336         VMSTATE_UINT32(env.v7m.other_sp, ARMCPU),
337         VMSTATE_END_OF_LIST()
338     }
339 };
340 
341 static bool m_v8m_needed(void *opaque)
342 {
343     ARMCPU *cpu = opaque;
344     CPUARMState *env = &cpu->env;
345 
346     return arm_feature(env, ARM_FEATURE_M) && arm_feature(env, ARM_FEATURE_V8);
347 }
348 
349 static const VMStateDescription vmstate_m_v8m = {
350     .name = "cpu/m/v8m",
351     .version_id = 1,
352     .minimum_version_id = 1,
353     .needed = m_v8m_needed,
354     .fields = (const VMStateField[]) {
355         VMSTATE_UINT32_ARRAY(env.v7m.msplim, ARMCPU, M_REG_NUM_BANKS),
356         VMSTATE_UINT32_ARRAY(env.v7m.psplim, ARMCPU, M_REG_NUM_BANKS),
357         VMSTATE_END_OF_LIST()
358     }
359 };
360 
361 static const VMStateDescription vmstate_m_fp = {
362     .name = "cpu/m/fp",
363     .version_id = 1,
364     .minimum_version_id = 1,
365     .needed = vfp_needed,
366     .fields = (const VMStateField[]) {
367         VMSTATE_UINT32_ARRAY(env.v7m.fpcar, ARMCPU, M_REG_NUM_BANKS),
368         VMSTATE_UINT32_ARRAY(env.v7m.fpccr, ARMCPU, M_REG_NUM_BANKS),
369         VMSTATE_UINT32_ARRAY(env.v7m.fpdscr, ARMCPU, M_REG_NUM_BANKS),
370         VMSTATE_UINT32_ARRAY(env.v7m.cpacr, ARMCPU, M_REG_NUM_BANKS),
371         VMSTATE_UINT32(env.v7m.nsacr, ARMCPU),
372         VMSTATE_END_OF_LIST()
373     }
374 };
375 
376 static bool mve_needed(void *opaque)
377 {
378     ARMCPU *cpu = opaque;
379 
380     return cpu_isar_feature(aa32_mve, cpu);
381 }
382 
383 static const VMStateDescription vmstate_m_mve = {
384     .name = "cpu/m/mve",
385     .version_id = 1,
386     .minimum_version_id = 1,
387     .needed = mve_needed,
388     .fields = (const VMStateField[]) {
389         VMSTATE_UINT32(env.v7m.vpr, ARMCPU),
390         VMSTATE_UINT32(env.v7m.ltpsize, ARMCPU),
391         VMSTATE_END_OF_LIST()
392     },
393 };
394 
395 static const VMStateDescription vmstate_m = {
396     .name = "cpu/m",
397     .version_id = 4,
398     .minimum_version_id = 4,
399     .needed = m_needed,
400     .fields = (const VMStateField[]) {
401         VMSTATE_UINT32(env.v7m.vecbase[M_REG_NS], ARMCPU),
402         VMSTATE_UINT32(env.v7m.basepri[M_REG_NS], ARMCPU),
403         VMSTATE_UINT32(env.v7m.control[M_REG_NS], ARMCPU),
404         VMSTATE_UINT32(env.v7m.ccr[M_REG_NS], ARMCPU),
405         VMSTATE_UINT32(env.v7m.cfsr[M_REG_NS], ARMCPU),
406         VMSTATE_UINT32(env.v7m.hfsr, ARMCPU),
407         VMSTATE_UINT32(env.v7m.dfsr, ARMCPU),
408         VMSTATE_UINT32(env.v7m.mmfar[M_REG_NS], ARMCPU),
409         VMSTATE_UINT32(env.v7m.bfar, ARMCPU),
410         VMSTATE_UINT32(env.v7m.mpu_ctrl[M_REG_NS], ARMCPU),
411         VMSTATE_INT32(env.v7m.exception, ARMCPU),
412         VMSTATE_END_OF_LIST()
413     },
414     .subsections = (const VMStateDescription * const []) {
415         &vmstate_m_faultmask_primask,
416         &vmstate_m_csselr,
417         &vmstate_m_scr,
418         &vmstate_m_other_sp,
419         &vmstate_m_v8m,
420         &vmstate_m_fp,
421         &vmstate_m_mve,
422         NULL
423     }
424 };
425 
426 static bool thumb2ee_needed(void *opaque)
427 {
428     ARMCPU *cpu = opaque;
429     CPUARMState *env = &cpu->env;
430 
431     return arm_feature(env, ARM_FEATURE_THUMB2EE);
432 }
433 
434 static const VMStateDescription vmstate_thumb2ee = {
435     .name = "cpu/thumb2ee",
436     .version_id = 1,
437     .minimum_version_id = 1,
438     .needed = thumb2ee_needed,
439     .fields = (const VMStateField[]) {
440         VMSTATE_UINT32(env.teecr, ARMCPU),
441         VMSTATE_UINT32(env.teehbr, ARMCPU),
442         VMSTATE_END_OF_LIST()
443     }
444 };
445 
446 static bool pmsav7_needed(void *opaque)
447 {
448     ARMCPU *cpu = opaque;
449     CPUARMState *env = &cpu->env;
450 
451     return arm_feature(env, ARM_FEATURE_PMSA) &&
452            arm_feature(env, ARM_FEATURE_V7) &&
453            !arm_feature(env, ARM_FEATURE_V8);
454 }
455 
456 static bool pmsav7_rgnr_vmstate_validate(void *opaque, int version_id)
457 {
458     ARMCPU *cpu = opaque;
459 
460     return cpu->env.pmsav7.rnr[M_REG_NS] < cpu->pmsav7_dregion;
461 }
462 
463 static const VMStateDescription vmstate_pmsav7 = {
464     .name = "cpu/pmsav7",
465     .version_id = 1,
466     .minimum_version_id = 1,
467     .needed = pmsav7_needed,
468     .fields = (const VMStateField[]) {
469         VMSTATE_VARRAY_UINT32(env.pmsav7.drbar, ARMCPU, pmsav7_dregion, 0,
470                               vmstate_info_uint32, uint32_t),
471         VMSTATE_VARRAY_UINT32(env.pmsav7.drsr, ARMCPU, pmsav7_dregion, 0,
472                               vmstate_info_uint32, uint32_t),
473         VMSTATE_VARRAY_UINT32(env.pmsav7.dracr, ARMCPU, pmsav7_dregion, 0,
474                               vmstate_info_uint32, uint32_t),
475         VMSTATE_VALIDATE("rgnr is valid", pmsav7_rgnr_vmstate_validate),
476         VMSTATE_END_OF_LIST()
477     }
478 };
479 
480 static bool pmsav7_rnr_needed(void *opaque)
481 {
482     ARMCPU *cpu = opaque;
483     CPUARMState *env = &cpu->env;
484 
485     /* For R profile cores pmsav7.rnr is migrated via the cpreg
486      * "RGNR" definition in helper.h. For M profile we have to
487      * migrate it separately.
488      */
489     return arm_feature(env, ARM_FEATURE_M);
490 }
491 
492 static const VMStateDescription vmstate_pmsav7_rnr = {
493     .name = "cpu/pmsav7-rnr",
494     .version_id = 1,
495     .minimum_version_id = 1,
496     .needed = pmsav7_rnr_needed,
497     .fields = (const VMStateField[]) {
498         VMSTATE_UINT32(env.pmsav7.rnr[M_REG_NS], ARMCPU),
499         VMSTATE_END_OF_LIST()
500     }
501 };
502 
503 static bool pmsav8_needed(void *opaque)
504 {
505     ARMCPU *cpu = opaque;
506     CPUARMState *env = &cpu->env;
507 
508     return arm_feature(env, ARM_FEATURE_PMSA) &&
509         arm_feature(env, ARM_FEATURE_V8);
510 }
511 
512 static bool pmsav8r_needed(void *opaque)
513 {
514     ARMCPU *cpu = opaque;
515     CPUARMState *env = &cpu->env;
516 
517     return arm_feature(env, ARM_FEATURE_PMSA) &&
518         arm_feature(env, ARM_FEATURE_V8) &&
519         !arm_feature(env, ARM_FEATURE_M);
520 }
521 
522 static const VMStateDescription vmstate_pmsav8r = {
523     .name = "cpu/pmsav8/pmsav8r",
524     .version_id = 1,
525     .minimum_version_id = 1,
526     .needed = pmsav8r_needed,
527     .fields = (const VMStateField[]) {
528         VMSTATE_VARRAY_UINT32(env.pmsav8.hprbar, ARMCPU,
529                         pmsav8r_hdregion, 0, vmstate_info_uint32, uint32_t),
530         VMSTATE_VARRAY_UINT32(env.pmsav8.hprlar, ARMCPU,
531                         pmsav8r_hdregion, 0, vmstate_info_uint32, uint32_t),
532         VMSTATE_END_OF_LIST()
533     },
534 };
535 
536 static const VMStateDescription vmstate_pmsav8 = {
537     .name = "cpu/pmsav8",
538     .version_id = 1,
539     .minimum_version_id = 1,
540     .needed = pmsav8_needed,
541     .fields = (const VMStateField[]) {
542         VMSTATE_VARRAY_UINT32(env.pmsav8.rbar[M_REG_NS], ARMCPU, pmsav7_dregion,
543                               0, vmstate_info_uint32, uint32_t),
544         VMSTATE_VARRAY_UINT32(env.pmsav8.rlar[M_REG_NS], ARMCPU, pmsav7_dregion,
545                               0, vmstate_info_uint32, uint32_t),
546         VMSTATE_UINT32(env.pmsav8.mair0[M_REG_NS], ARMCPU),
547         VMSTATE_UINT32(env.pmsav8.mair1[M_REG_NS], ARMCPU),
548         VMSTATE_END_OF_LIST()
549     },
550     .subsections = (const VMStateDescription * const []) {
551         &vmstate_pmsav8r,
552         NULL
553     }
554 };
555 
556 static bool s_rnr_vmstate_validate(void *opaque, int version_id)
557 {
558     ARMCPU *cpu = opaque;
559 
560     return cpu->env.pmsav7.rnr[M_REG_S] < cpu->pmsav7_dregion;
561 }
562 
563 static bool sau_rnr_vmstate_validate(void *opaque, int version_id)
564 {
565     ARMCPU *cpu = opaque;
566 
567     return cpu->env.sau.rnr < cpu->sau_sregion;
568 }
569 
570 static bool m_security_needed(void *opaque)
571 {
572     ARMCPU *cpu = opaque;
573     CPUARMState *env = &cpu->env;
574 
575     return arm_feature(env, ARM_FEATURE_M_SECURITY);
576 }
577 
578 static const VMStateDescription vmstate_m_security = {
579     .name = "cpu/m-security",
580     .version_id = 1,
581     .minimum_version_id = 1,
582     .needed = m_security_needed,
583     .fields = (const VMStateField[]) {
584         VMSTATE_UINT32(env.v7m.secure, ARMCPU),
585         VMSTATE_UINT32(env.v7m.other_ss_msp, ARMCPU),
586         VMSTATE_UINT32(env.v7m.other_ss_psp, ARMCPU),
587         VMSTATE_UINT32(env.v7m.basepri[M_REG_S], ARMCPU),
588         VMSTATE_UINT32(env.v7m.primask[M_REG_S], ARMCPU),
589         VMSTATE_UINT32(env.v7m.faultmask[M_REG_S], ARMCPU),
590         VMSTATE_UINT32(env.v7m.control[M_REG_S], ARMCPU),
591         VMSTATE_UINT32(env.v7m.vecbase[M_REG_S], ARMCPU),
592         VMSTATE_UINT32(env.pmsav8.mair0[M_REG_S], ARMCPU),
593         VMSTATE_UINT32(env.pmsav8.mair1[M_REG_S], ARMCPU),
594         VMSTATE_VARRAY_UINT32(env.pmsav8.rbar[M_REG_S], ARMCPU, pmsav7_dregion,
595                               0, vmstate_info_uint32, uint32_t),
596         VMSTATE_VARRAY_UINT32(env.pmsav8.rlar[M_REG_S], ARMCPU, pmsav7_dregion,
597                               0, vmstate_info_uint32, uint32_t),
598         VMSTATE_UINT32(env.pmsav7.rnr[M_REG_S], ARMCPU),
599         VMSTATE_VALIDATE("secure MPU_RNR is valid", s_rnr_vmstate_validate),
600         VMSTATE_UINT32(env.v7m.mpu_ctrl[M_REG_S], ARMCPU),
601         VMSTATE_UINT32(env.v7m.ccr[M_REG_S], ARMCPU),
602         VMSTATE_UINT32(env.v7m.mmfar[M_REG_S], ARMCPU),
603         VMSTATE_UINT32(env.v7m.cfsr[M_REG_S], ARMCPU),
604         VMSTATE_UINT32(env.v7m.sfsr, ARMCPU),
605         VMSTATE_UINT32(env.v7m.sfar, ARMCPU),
606         VMSTATE_VARRAY_UINT32(env.sau.rbar, ARMCPU, sau_sregion, 0,
607                               vmstate_info_uint32, uint32_t),
608         VMSTATE_VARRAY_UINT32(env.sau.rlar, ARMCPU, sau_sregion, 0,
609                               vmstate_info_uint32, uint32_t),
610         VMSTATE_UINT32(env.sau.rnr, ARMCPU),
611         VMSTATE_VALIDATE("SAU_RNR is valid", sau_rnr_vmstate_validate),
612         VMSTATE_UINT32(env.sau.ctrl, ARMCPU),
613         VMSTATE_UINT32(env.v7m.scr[M_REG_S], ARMCPU),
614         /* AIRCR is not secure-only, but our implementation is R/O if the
615          * security extension is unimplemented, so we migrate it here.
616          */
617         VMSTATE_UINT32(env.v7m.aircr, ARMCPU),
618         VMSTATE_END_OF_LIST()
619     }
620 };
621 
622 static int get_cpsr(QEMUFile *f, void *opaque, size_t size,
623                     const VMStateField *field)
624 {
625     ARMCPU *cpu = opaque;
626     CPUARMState *env = &cpu->env;
627     uint32_t val = qemu_get_be32(f);
628 
629     if (arm_feature(env, ARM_FEATURE_M)) {
630         if (val & XPSR_EXCP) {
631             /* This is a CPSR format value from an older QEMU. (We can tell
632              * because values transferred in XPSR format always have zero
633              * for the EXCP field, and CPSR format will always have bit 4
634              * set in CPSR_M.) Rearrange it into XPSR format. The significant
635              * differences are that the T bit is not in the same place, the
636              * primask/faultmask info may be in the CPSR I and F bits, and
637              * we do not want the mode bits.
638              * We know that this cleanup happened before v8M, so there
639              * is no complication with banked primask/faultmask.
640              */
641             uint32_t newval = val;
642 
643             assert(!arm_feature(env, ARM_FEATURE_M_SECURITY));
644 
645             newval &= (CPSR_NZCV | CPSR_Q | CPSR_IT | CPSR_GE);
646             if (val & CPSR_T) {
647                 newval |= XPSR_T;
648             }
649             /* If the I or F bits are set then this is a migration from
650              * an old QEMU which still stored the M profile FAULTMASK
651              * and PRIMASK in env->daif. For a new QEMU, the data is
652              * transferred using the vmstate_m_faultmask_primask subsection.
653              */
654             if (val & CPSR_F) {
655                 env->v7m.faultmask[M_REG_NS] = 1;
656             }
657             if (val & CPSR_I) {
658                 env->v7m.primask[M_REG_NS] = 1;
659             }
660             val = newval;
661         }
662         /* Ignore the low bits, they are handled by vmstate_m. */
663         xpsr_write(env, val, ~XPSR_EXCP);
664         return 0;
665     }
666 
667     env->aarch64 = ((val & PSTATE_nRW) == 0);
668 
669     if (is_a64(env)) {
670         pstate_write(env, val);
671         return 0;
672     }
673 
674     cpsr_write(env, val, 0xffffffff, CPSRWriteRaw);
675     return 0;
676 }
677 
678 static int put_cpsr(QEMUFile *f, void *opaque, size_t size,
679                     const VMStateField *field, JSONWriter *vmdesc)
680 {
681     ARMCPU *cpu = opaque;
682     CPUARMState *env = &cpu->env;
683     uint32_t val;
684 
685     if (arm_feature(env, ARM_FEATURE_M)) {
686         /* The low 9 bits are v7m.exception, which is handled by vmstate_m. */
687         val = xpsr_read(env) & ~XPSR_EXCP;
688     } else if (is_a64(env)) {
689         val = pstate_read(env);
690     } else {
691         val = cpsr_read(env);
692     }
693 
694     qemu_put_be32(f, val);
695     return 0;
696 }
697 
698 static const VMStateInfo vmstate_cpsr = {
699     .name = "cpsr",
700     .get = get_cpsr,
701     .put = put_cpsr,
702 };
703 
704 static int get_power(QEMUFile *f, void *opaque, size_t size,
705                     const VMStateField *field)
706 {
707     ARMCPU *cpu = opaque;
708     bool powered_off = qemu_get_byte(f);
709     cpu->power_state = powered_off ? PSCI_OFF : PSCI_ON;
710     return 0;
711 }
712 
713 static int put_power(QEMUFile *f, void *opaque, size_t size,
714                     const VMStateField *field, JSONWriter *vmdesc)
715 {
716     ARMCPU *cpu = opaque;
717 
718     /* Migration should never happen while we transition power states */
719 
720     if (cpu->power_state == PSCI_ON ||
721         cpu->power_state == PSCI_OFF) {
722         bool powered_off = (cpu->power_state == PSCI_OFF) ? true : false;
723         qemu_put_byte(f, powered_off);
724         return 0;
725     } else {
726         return 1;
727     }
728 }
729 
730 static const VMStateInfo vmstate_powered_off = {
731     .name = "powered_off",
732     .get = get_power,
733     .put = put_power,
734 };
735 
736 static int cpu_pre_save(void *opaque)
737 {
738     ARMCPU *cpu = opaque;
739 
740     if (!kvm_enabled()) {
741         pmu_op_start(&cpu->env);
742     }
743 
744     if (kvm_enabled()) {
745         if (!write_kvmstate_to_list(cpu)) {
746             /* This should never fail */
747             g_assert_not_reached();
748         }
749 
750         /*
751          * kvm_arm_cpu_pre_save() must be called after
752          * write_kvmstate_to_list()
753          */
754         kvm_arm_cpu_pre_save(cpu);
755     } else {
756         if (!write_cpustate_to_list(cpu, false)) {
757             /* This should never fail. */
758             g_assert_not_reached();
759         }
760     }
761 
762     cpu->cpreg_vmstate_array_len = cpu->cpreg_array_len;
763     memcpy(cpu->cpreg_vmstate_indexes, cpu->cpreg_indexes,
764            cpu->cpreg_array_len * sizeof(uint64_t));
765     memcpy(cpu->cpreg_vmstate_values, cpu->cpreg_values,
766            cpu->cpreg_array_len * sizeof(uint64_t));
767 
768     return 0;
769 }
770 
771 static int cpu_post_save(void *opaque)
772 {
773     ARMCPU *cpu = opaque;
774 
775     if (!kvm_enabled()) {
776         pmu_op_finish(&cpu->env);
777     }
778 
779     return 0;
780 }
781 
782 static int cpu_pre_load(void *opaque)
783 {
784     ARMCPU *cpu = opaque;
785     CPUARMState *env = &cpu->env;
786 
787     /*
788      * Pre-initialize irq_line_state to a value that's never valid as
789      * real data, so cpu_post_load() can tell whether we've seen the
790      * irq-line-state subsection in the incoming migration state.
791      */
792     env->irq_line_state = UINT32_MAX;
793 
794     if (!kvm_enabled()) {
795         pmu_op_start(env);
796     }
797 
798     return 0;
799 }
800 
801 static int cpu_post_load(void *opaque, int version_id)
802 {
803     ARMCPU *cpu = opaque;
804     CPUARMState *env = &cpu->env;
805     int i, v;
806 
807     /*
808      * Handle migration compatibility from old QEMU which didn't
809      * send the irq-line-state subsection. A QEMU without it did not
810      * implement the HCR_EL2.{VI,VF} bits as generating interrupts,
811      * so for TCG the line state matches the bits set in cs->interrupt_request.
812      * For KVM the line state is not stored in cs->interrupt_request
813      * and so this will leave irq_line_state as 0, but this is OK because
814      * we only need to care about it for TCG.
815      */
816     if (env->irq_line_state == UINT32_MAX) {
817         CPUState *cs = CPU(cpu);
818 
819         env->irq_line_state = cs->interrupt_request &
820             (CPU_INTERRUPT_HARD | CPU_INTERRUPT_FIQ |
821              CPU_INTERRUPT_VIRQ | CPU_INTERRUPT_VFIQ);
822     }
823 
824     /* Update the values list from the incoming migration data.
825      * Anything in the incoming data which we don't know about is
826      * a migration failure; anything we know about but the incoming
827      * data doesn't specify retains its current (reset) value.
828      * The indexes list remains untouched -- we only inspect the
829      * incoming migration index list so we can match the values array
830      * entries with the right slots in our own values array.
831      */
832 
833     for (i = 0, v = 0; i < cpu->cpreg_array_len
834              && v < cpu->cpreg_vmstate_array_len; i++) {
835         if (cpu->cpreg_vmstate_indexes[v] > cpu->cpreg_indexes[i]) {
836             /* register in our list but not incoming : skip it */
837             continue;
838         }
839         if (cpu->cpreg_vmstate_indexes[v] < cpu->cpreg_indexes[i]) {
840             /* register in their list but not ours: fail migration */
841             return -1;
842         }
843         /* matching register, copy the value over */
844         cpu->cpreg_values[i] = cpu->cpreg_vmstate_values[v];
845         v++;
846     }
847 
848     if (kvm_enabled()) {
849         if (!write_list_to_kvmstate(cpu, KVM_PUT_FULL_STATE)) {
850             return -1;
851         }
852         /* Note that it's OK for the TCG side not to know about
853          * every register in the list; KVM is authoritative if
854          * we're using it.
855          */
856         write_list_to_cpustate(cpu);
857         kvm_arm_cpu_post_load(cpu);
858     } else {
859         if (!write_list_to_cpustate(cpu)) {
860             return -1;
861         }
862     }
863 
864     /*
865      * Misaligned thumb pc is architecturally impossible. Fail the
866      * incoming migration. For TCG it would trigger the assert in
867      * thumb_tr_translate_insn().
868      */
869     if (!is_a64(env) && env->thumb && (env->regs[15] & 1)) {
870         return -1;
871     }
872 
873     if (tcg_enabled()) {
874         hw_breakpoint_update_all(cpu);
875         hw_watchpoint_update_all(cpu);
876     }
877 
878     /*
879      * TCG gen_update_fp_context() relies on the invariant that
880      * FPDSCR.LTPSIZE is constant 4 for M-profile with the LOB extension;
881      * forbid bogus incoming data with some other value.
882      */
883     if (arm_feature(env, ARM_FEATURE_M) && cpu_isar_feature(aa32_lob, cpu)) {
884         if (extract32(env->v7m.fpdscr[M_REG_NS],
885                       FPCR_LTPSIZE_SHIFT, FPCR_LTPSIZE_LENGTH) != 4 ||
886             extract32(env->v7m.fpdscr[M_REG_S],
887                       FPCR_LTPSIZE_SHIFT, FPCR_LTPSIZE_LENGTH) != 4) {
888             return -1;
889         }
890     }
891 
892     if (!kvm_enabled()) {
893         pmu_op_finish(env);
894     }
895 
896     if (tcg_enabled()) {
897         arm_rebuild_hflags(env);
898     }
899 
900     return 0;
901 }
902 
903 const VMStateDescription vmstate_arm_cpu = {
904     .name = "cpu",
905     .version_id = 22,
906     .minimum_version_id = 22,
907     .pre_save = cpu_pre_save,
908     .post_save = cpu_post_save,
909     .pre_load = cpu_pre_load,
910     .post_load = cpu_post_load,
911     .fields = (const VMStateField[]) {
912         VMSTATE_UINT32_ARRAY(env.regs, ARMCPU, 16),
913         VMSTATE_UINT64_ARRAY(env.xregs, ARMCPU, 32),
914         VMSTATE_UINT64(env.pc, ARMCPU),
915         {
916             .name = "cpsr",
917             .version_id = 0,
918             .size = sizeof(uint32_t),
919             .info = &vmstate_cpsr,
920             .flags = VMS_SINGLE,
921             .offset = 0,
922         },
923         VMSTATE_UINT32(env.spsr, ARMCPU),
924         VMSTATE_UINT64_ARRAY(env.banked_spsr, ARMCPU, 8),
925         VMSTATE_UINT32_ARRAY(env.banked_r13, ARMCPU, 8),
926         VMSTATE_UINT32_ARRAY(env.banked_r14, ARMCPU, 8),
927         VMSTATE_UINT32_ARRAY(env.usr_regs, ARMCPU, 5),
928         VMSTATE_UINT32_ARRAY(env.fiq_regs, ARMCPU, 5),
929         VMSTATE_UINT64_ARRAY(env.elr_el, ARMCPU, 4),
930         VMSTATE_UINT64_ARRAY(env.sp_el, ARMCPU, 4),
931         /* The length-check must come before the arrays to avoid
932          * incoming data possibly overflowing the array.
933          */
934         VMSTATE_INT32_POSITIVE_LE(cpreg_vmstate_array_len, ARMCPU),
935         VMSTATE_VARRAY_INT32(cpreg_vmstate_indexes, ARMCPU,
936                              cpreg_vmstate_array_len,
937                              0, vmstate_info_uint64, uint64_t),
938         VMSTATE_VARRAY_INT32(cpreg_vmstate_values, ARMCPU,
939                              cpreg_vmstate_array_len,
940                              0, vmstate_info_uint64, uint64_t),
941         VMSTATE_UINT64(env.exclusive_addr, ARMCPU),
942         VMSTATE_UINT64(env.exclusive_val, ARMCPU),
943         VMSTATE_UINT64(env.exclusive_high, ARMCPU),
944         VMSTATE_UNUSED(sizeof(uint64_t)),
945         VMSTATE_UINT32(env.exception.syndrome, ARMCPU),
946         VMSTATE_UINT32(env.exception.fsr, ARMCPU),
947         VMSTATE_UINT64(env.exception.vaddress, ARMCPU),
948         VMSTATE_TIMER_PTR(gt_timer[GTIMER_PHYS], ARMCPU),
949         VMSTATE_TIMER_PTR(gt_timer[GTIMER_VIRT], ARMCPU),
950         {
951             .name = "power_state",
952             .version_id = 0,
953             .size = sizeof(bool),
954             .info = &vmstate_powered_off,
955             .flags = VMS_SINGLE,
956             .offset = 0,
957         },
958         VMSTATE_END_OF_LIST()
959     },
960     .subsections = (const VMStateDescription * const []) {
961         &vmstate_vfp,
962         &vmstate_iwmmxt,
963         &vmstate_m,
964         &vmstate_thumb2ee,
965         /* pmsav7_rnr must come before pmsav7 so that we have the
966          * region number before we test it in the VMSTATE_VALIDATE
967          * in vmstate_pmsav7.
968          */
969         &vmstate_pmsav7_rnr,
970         &vmstate_pmsav7,
971         &vmstate_pmsav8,
972         &vmstate_m_security,
973 #ifdef TARGET_AARCH64
974         &vmstate_sve,
975         &vmstate_za,
976 #endif
977         &vmstate_serror,
978         &vmstate_irq_line_state,
979         &vmstate_wfxt_timer,
980         NULL
981     }
982 };
983