xref: /openbmc/qemu/target/arm/machine.c (revision c4b8ffcb)
1 #include "qemu/osdep.h"
2 #include "cpu.h"
3 #include "qemu/error-report.h"
4 #include "sysemu/kvm.h"
5 #include "kvm_arm.h"
6 #include "internals.h"
7 #include "migration/cpu.h"
8 
9 static bool vfp_needed(void *opaque)
10 {
11     ARMCPU *cpu = opaque;
12 
13     return (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)
14             ? cpu_isar_feature(aa64_fp_simd, cpu)
15             : cpu_isar_feature(aa32_vfp_simd, cpu));
16 }
17 
18 static int get_fpscr(QEMUFile *f, void *opaque, size_t size,
19                      const VMStateField *field)
20 {
21     ARMCPU *cpu = opaque;
22     CPUARMState *env = &cpu->env;
23     uint32_t val = qemu_get_be32(f);
24 
25     vfp_set_fpscr(env, val);
26     return 0;
27 }
28 
29 static int put_fpscr(QEMUFile *f, void *opaque, size_t size,
30                      const VMStateField *field, JSONWriter *vmdesc)
31 {
32     ARMCPU *cpu = opaque;
33     CPUARMState *env = &cpu->env;
34 
35     qemu_put_be32(f, vfp_get_fpscr(env));
36     return 0;
37 }
38 
39 static const VMStateInfo vmstate_fpscr = {
40     .name = "fpscr",
41     .get = get_fpscr,
42     .put = put_fpscr,
43 };
44 
45 static const VMStateDescription vmstate_vfp = {
46     .name = "cpu/vfp",
47     .version_id = 3,
48     .minimum_version_id = 3,
49     .needed = vfp_needed,
50     .fields = (VMStateField[]) {
51         /* For compatibility, store Qn out of Zn here.  */
52         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[0].d, ARMCPU, 0, 2),
53         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[1].d, ARMCPU, 0, 2),
54         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[2].d, ARMCPU, 0, 2),
55         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[3].d, ARMCPU, 0, 2),
56         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[4].d, ARMCPU, 0, 2),
57         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[5].d, ARMCPU, 0, 2),
58         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[6].d, ARMCPU, 0, 2),
59         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[7].d, ARMCPU, 0, 2),
60         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[8].d, ARMCPU, 0, 2),
61         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[9].d, ARMCPU, 0, 2),
62         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[10].d, ARMCPU, 0, 2),
63         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[11].d, ARMCPU, 0, 2),
64         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[12].d, ARMCPU, 0, 2),
65         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[13].d, ARMCPU, 0, 2),
66         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[14].d, ARMCPU, 0, 2),
67         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[15].d, ARMCPU, 0, 2),
68         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[16].d, ARMCPU, 0, 2),
69         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[17].d, ARMCPU, 0, 2),
70         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[18].d, ARMCPU, 0, 2),
71         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[19].d, ARMCPU, 0, 2),
72         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[20].d, ARMCPU, 0, 2),
73         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[21].d, ARMCPU, 0, 2),
74         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[22].d, ARMCPU, 0, 2),
75         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[23].d, ARMCPU, 0, 2),
76         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[24].d, ARMCPU, 0, 2),
77         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[25].d, ARMCPU, 0, 2),
78         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[26].d, ARMCPU, 0, 2),
79         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[27].d, ARMCPU, 0, 2),
80         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[28].d, ARMCPU, 0, 2),
81         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[29].d, ARMCPU, 0, 2),
82         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[30].d, ARMCPU, 0, 2),
83         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[31].d, ARMCPU, 0, 2),
84 
85         /* The xregs array is a little awkward because element 1 (FPSCR)
86          * requires a specific accessor, so we have to split it up in
87          * the vmstate:
88          */
89         VMSTATE_UINT32(env.vfp.xregs[0], ARMCPU),
90         VMSTATE_UINT32_SUB_ARRAY(env.vfp.xregs, ARMCPU, 2, 14),
91         {
92             .name = "fpscr",
93             .version_id = 0,
94             .size = sizeof(uint32_t),
95             .info = &vmstate_fpscr,
96             .flags = VMS_SINGLE,
97             .offset = 0,
98         },
99         VMSTATE_END_OF_LIST()
100     }
101 };
102 
103 static bool iwmmxt_needed(void *opaque)
104 {
105     ARMCPU *cpu = opaque;
106     CPUARMState *env = &cpu->env;
107 
108     return arm_feature(env, ARM_FEATURE_IWMMXT);
109 }
110 
111 static const VMStateDescription vmstate_iwmmxt = {
112     .name = "cpu/iwmmxt",
113     .version_id = 1,
114     .minimum_version_id = 1,
115     .needed = iwmmxt_needed,
116     .fields = (VMStateField[]) {
117         VMSTATE_UINT64_ARRAY(env.iwmmxt.regs, ARMCPU, 16),
118         VMSTATE_UINT32_ARRAY(env.iwmmxt.cregs, ARMCPU, 16),
119         VMSTATE_END_OF_LIST()
120     }
121 };
122 
123 #ifdef TARGET_AARCH64
124 /* The expression ARM_MAX_VQ - 2 is 0 for pure AArch32 build,
125  * and ARMPredicateReg is actively empty.  This triggers errors
126  * in the expansion of the VMSTATE macros.
127  */
128 
129 static bool sve_needed(void *opaque)
130 {
131     ARMCPU *cpu = opaque;
132 
133     return cpu_isar_feature(aa64_sve, cpu);
134 }
135 
136 /* The first two words of each Zreg is stored in VFP state.  */
137 static const VMStateDescription vmstate_zreg_hi_reg = {
138     .name = "cpu/sve/zreg_hi",
139     .version_id = 1,
140     .minimum_version_id = 1,
141     .fields = (VMStateField[]) {
142         VMSTATE_UINT64_SUB_ARRAY(d, ARMVectorReg, 2, ARM_MAX_VQ - 2),
143         VMSTATE_END_OF_LIST()
144     }
145 };
146 
147 static const VMStateDescription vmstate_preg_reg = {
148     .name = "cpu/sve/preg",
149     .version_id = 1,
150     .minimum_version_id = 1,
151     .fields = (VMStateField[]) {
152         VMSTATE_UINT64_ARRAY(p, ARMPredicateReg, 2 * ARM_MAX_VQ / 8),
153         VMSTATE_END_OF_LIST()
154     }
155 };
156 
157 static const VMStateDescription vmstate_sve = {
158     .name = "cpu/sve",
159     .version_id = 1,
160     .minimum_version_id = 1,
161     .needed = sve_needed,
162     .fields = (VMStateField[]) {
163         VMSTATE_STRUCT_ARRAY(env.vfp.zregs, ARMCPU, 32, 0,
164                              vmstate_zreg_hi_reg, ARMVectorReg),
165         VMSTATE_STRUCT_ARRAY(env.vfp.pregs, ARMCPU, 17, 0,
166                              vmstate_preg_reg, ARMPredicateReg),
167         VMSTATE_END_OF_LIST()
168     }
169 };
170 #endif /* AARCH64 */
171 
172 static bool serror_needed(void *opaque)
173 {
174     ARMCPU *cpu = opaque;
175     CPUARMState *env = &cpu->env;
176 
177     return env->serror.pending != 0;
178 }
179 
180 static const VMStateDescription vmstate_serror = {
181     .name = "cpu/serror",
182     .version_id = 1,
183     .minimum_version_id = 1,
184     .needed = serror_needed,
185     .fields = (VMStateField[]) {
186         VMSTATE_UINT8(env.serror.pending, ARMCPU),
187         VMSTATE_UINT8(env.serror.has_esr, ARMCPU),
188         VMSTATE_UINT64(env.serror.esr, ARMCPU),
189         VMSTATE_END_OF_LIST()
190     }
191 };
192 
193 static bool irq_line_state_needed(void *opaque)
194 {
195     return true;
196 }
197 
198 static const VMStateDescription vmstate_irq_line_state = {
199     .name = "cpu/irq-line-state",
200     .version_id = 1,
201     .minimum_version_id = 1,
202     .needed = irq_line_state_needed,
203     .fields = (VMStateField[]) {
204         VMSTATE_UINT32(env.irq_line_state, ARMCPU),
205         VMSTATE_END_OF_LIST()
206     }
207 };
208 
209 static bool m_needed(void *opaque)
210 {
211     ARMCPU *cpu = opaque;
212     CPUARMState *env = &cpu->env;
213 
214     return arm_feature(env, ARM_FEATURE_M);
215 }
216 
217 static const VMStateDescription vmstate_m_faultmask_primask = {
218     .name = "cpu/m/faultmask-primask",
219     .version_id = 1,
220     .minimum_version_id = 1,
221     .needed = m_needed,
222     .fields = (VMStateField[]) {
223         VMSTATE_UINT32(env.v7m.faultmask[M_REG_NS], ARMCPU),
224         VMSTATE_UINT32(env.v7m.primask[M_REG_NS], ARMCPU),
225         VMSTATE_END_OF_LIST()
226     }
227 };
228 
229 /* CSSELR is in a subsection because we didn't implement it previously.
230  * Migration from an old implementation will leave it at zero, which
231  * is OK since the only CPUs in the old implementation make the
232  * register RAZ/WI.
233  * Since there was no version of QEMU which implemented the CSSELR for
234  * just non-secure, we transfer both banks here rather than putting
235  * the secure banked version in the m-security subsection.
236  */
237 static bool csselr_vmstate_validate(void *opaque, int version_id)
238 {
239     ARMCPU *cpu = opaque;
240 
241     return cpu->env.v7m.csselr[M_REG_NS] <= R_V7M_CSSELR_INDEX_MASK
242         && cpu->env.v7m.csselr[M_REG_S] <= R_V7M_CSSELR_INDEX_MASK;
243 }
244 
245 static bool m_csselr_needed(void *opaque)
246 {
247     ARMCPU *cpu = opaque;
248 
249     return !arm_v7m_csselr_razwi(cpu);
250 }
251 
252 static const VMStateDescription vmstate_m_csselr = {
253     .name = "cpu/m/csselr",
254     .version_id = 1,
255     .minimum_version_id = 1,
256     .needed = m_csselr_needed,
257     .fields = (VMStateField[]) {
258         VMSTATE_UINT32_ARRAY(env.v7m.csselr, ARMCPU, M_REG_NUM_BANKS),
259         VMSTATE_VALIDATE("CSSELR is valid", csselr_vmstate_validate),
260         VMSTATE_END_OF_LIST()
261     }
262 };
263 
264 static const VMStateDescription vmstate_m_scr = {
265     .name = "cpu/m/scr",
266     .version_id = 1,
267     .minimum_version_id = 1,
268     .needed = m_needed,
269     .fields = (VMStateField[]) {
270         VMSTATE_UINT32(env.v7m.scr[M_REG_NS], ARMCPU),
271         VMSTATE_END_OF_LIST()
272     }
273 };
274 
275 static const VMStateDescription vmstate_m_other_sp = {
276     .name = "cpu/m/other-sp",
277     .version_id = 1,
278     .minimum_version_id = 1,
279     .needed = m_needed,
280     .fields = (VMStateField[]) {
281         VMSTATE_UINT32(env.v7m.other_sp, ARMCPU),
282         VMSTATE_END_OF_LIST()
283     }
284 };
285 
286 static bool m_v8m_needed(void *opaque)
287 {
288     ARMCPU *cpu = opaque;
289     CPUARMState *env = &cpu->env;
290 
291     return arm_feature(env, ARM_FEATURE_M) && arm_feature(env, ARM_FEATURE_V8);
292 }
293 
294 static const VMStateDescription vmstate_m_v8m = {
295     .name = "cpu/m/v8m",
296     .version_id = 1,
297     .minimum_version_id = 1,
298     .needed = m_v8m_needed,
299     .fields = (VMStateField[]) {
300         VMSTATE_UINT32_ARRAY(env.v7m.msplim, ARMCPU, M_REG_NUM_BANKS),
301         VMSTATE_UINT32_ARRAY(env.v7m.psplim, ARMCPU, M_REG_NUM_BANKS),
302         VMSTATE_END_OF_LIST()
303     }
304 };
305 
306 static const VMStateDescription vmstate_m_fp = {
307     .name = "cpu/m/fp",
308     .version_id = 1,
309     .minimum_version_id = 1,
310     .needed = vfp_needed,
311     .fields = (VMStateField[]) {
312         VMSTATE_UINT32_ARRAY(env.v7m.fpcar, ARMCPU, M_REG_NUM_BANKS),
313         VMSTATE_UINT32_ARRAY(env.v7m.fpccr, ARMCPU, M_REG_NUM_BANKS),
314         VMSTATE_UINT32_ARRAY(env.v7m.fpdscr, ARMCPU, M_REG_NUM_BANKS),
315         VMSTATE_UINT32_ARRAY(env.v7m.cpacr, ARMCPU, M_REG_NUM_BANKS),
316         VMSTATE_UINT32(env.v7m.nsacr, ARMCPU),
317         VMSTATE_END_OF_LIST()
318     }
319 };
320 
321 static bool mve_needed(void *opaque)
322 {
323     ARMCPU *cpu = opaque;
324 
325     return cpu_isar_feature(aa32_mve, cpu);
326 }
327 
328 static const VMStateDescription vmstate_m_mve = {
329     .name = "cpu/m/mve",
330     .version_id = 1,
331     .minimum_version_id = 1,
332     .needed = mve_needed,
333     .fields = (VMStateField[]) {
334         VMSTATE_UINT32(env.v7m.vpr, ARMCPU),
335         VMSTATE_UINT32(env.v7m.ltpsize, ARMCPU),
336         VMSTATE_END_OF_LIST()
337     },
338 };
339 
340 static const VMStateDescription vmstate_m = {
341     .name = "cpu/m",
342     .version_id = 4,
343     .minimum_version_id = 4,
344     .needed = m_needed,
345     .fields = (VMStateField[]) {
346         VMSTATE_UINT32(env.v7m.vecbase[M_REG_NS], ARMCPU),
347         VMSTATE_UINT32(env.v7m.basepri[M_REG_NS], ARMCPU),
348         VMSTATE_UINT32(env.v7m.control[M_REG_NS], ARMCPU),
349         VMSTATE_UINT32(env.v7m.ccr[M_REG_NS], ARMCPU),
350         VMSTATE_UINT32(env.v7m.cfsr[M_REG_NS], ARMCPU),
351         VMSTATE_UINT32(env.v7m.hfsr, ARMCPU),
352         VMSTATE_UINT32(env.v7m.dfsr, ARMCPU),
353         VMSTATE_UINT32(env.v7m.mmfar[M_REG_NS], ARMCPU),
354         VMSTATE_UINT32(env.v7m.bfar, ARMCPU),
355         VMSTATE_UINT32(env.v7m.mpu_ctrl[M_REG_NS], ARMCPU),
356         VMSTATE_INT32(env.v7m.exception, ARMCPU),
357         VMSTATE_END_OF_LIST()
358     },
359     .subsections = (const VMStateDescription*[]) {
360         &vmstate_m_faultmask_primask,
361         &vmstate_m_csselr,
362         &vmstate_m_scr,
363         &vmstate_m_other_sp,
364         &vmstate_m_v8m,
365         &vmstate_m_fp,
366         &vmstate_m_mve,
367         NULL
368     }
369 };
370 
371 static bool thumb2ee_needed(void *opaque)
372 {
373     ARMCPU *cpu = opaque;
374     CPUARMState *env = &cpu->env;
375 
376     return arm_feature(env, ARM_FEATURE_THUMB2EE);
377 }
378 
379 static const VMStateDescription vmstate_thumb2ee = {
380     .name = "cpu/thumb2ee",
381     .version_id = 1,
382     .minimum_version_id = 1,
383     .needed = thumb2ee_needed,
384     .fields = (VMStateField[]) {
385         VMSTATE_UINT32(env.teecr, ARMCPU),
386         VMSTATE_UINT32(env.teehbr, ARMCPU),
387         VMSTATE_END_OF_LIST()
388     }
389 };
390 
391 static bool pmsav7_needed(void *opaque)
392 {
393     ARMCPU *cpu = opaque;
394     CPUARMState *env = &cpu->env;
395 
396     return arm_feature(env, ARM_FEATURE_PMSA) &&
397            arm_feature(env, ARM_FEATURE_V7) &&
398            !arm_feature(env, ARM_FEATURE_V8);
399 }
400 
401 static bool pmsav7_rgnr_vmstate_validate(void *opaque, int version_id)
402 {
403     ARMCPU *cpu = opaque;
404 
405     return cpu->env.pmsav7.rnr[M_REG_NS] < cpu->pmsav7_dregion;
406 }
407 
408 static const VMStateDescription vmstate_pmsav7 = {
409     .name = "cpu/pmsav7",
410     .version_id = 1,
411     .minimum_version_id = 1,
412     .needed = pmsav7_needed,
413     .fields = (VMStateField[]) {
414         VMSTATE_VARRAY_UINT32(env.pmsav7.drbar, ARMCPU, pmsav7_dregion, 0,
415                               vmstate_info_uint32, uint32_t),
416         VMSTATE_VARRAY_UINT32(env.pmsav7.drsr, ARMCPU, pmsav7_dregion, 0,
417                               vmstate_info_uint32, uint32_t),
418         VMSTATE_VARRAY_UINT32(env.pmsav7.dracr, ARMCPU, pmsav7_dregion, 0,
419                               vmstate_info_uint32, uint32_t),
420         VMSTATE_VALIDATE("rgnr is valid", pmsav7_rgnr_vmstate_validate),
421         VMSTATE_END_OF_LIST()
422     }
423 };
424 
425 static bool pmsav7_rnr_needed(void *opaque)
426 {
427     ARMCPU *cpu = opaque;
428     CPUARMState *env = &cpu->env;
429 
430     /* For R profile cores pmsav7.rnr is migrated via the cpreg
431      * "RGNR" definition in helper.h. For M profile we have to
432      * migrate it separately.
433      */
434     return arm_feature(env, ARM_FEATURE_M);
435 }
436 
437 static const VMStateDescription vmstate_pmsav7_rnr = {
438     .name = "cpu/pmsav7-rnr",
439     .version_id = 1,
440     .minimum_version_id = 1,
441     .needed = pmsav7_rnr_needed,
442     .fields = (VMStateField[]) {
443         VMSTATE_UINT32(env.pmsav7.rnr[M_REG_NS], ARMCPU),
444         VMSTATE_END_OF_LIST()
445     }
446 };
447 
448 static bool pmsav8_needed(void *opaque)
449 {
450     ARMCPU *cpu = opaque;
451     CPUARMState *env = &cpu->env;
452 
453     return arm_feature(env, ARM_FEATURE_PMSA) &&
454         arm_feature(env, ARM_FEATURE_V8);
455 }
456 
457 static const VMStateDescription vmstate_pmsav8 = {
458     .name = "cpu/pmsav8",
459     .version_id = 1,
460     .minimum_version_id = 1,
461     .needed = pmsav8_needed,
462     .fields = (VMStateField[]) {
463         VMSTATE_VARRAY_UINT32(env.pmsav8.rbar[M_REG_NS], ARMCPU, pmsav7_dregion,
464                               0, vmstate_info_uint32, uint32_t),
465         VMSTATE_VARRAY_UINT32(env.pmsav8.rlar[M_REG_NS], ARMCPU, pmsav7_dregion,
466                               0, vmstate_info_uint32, uint32_t),
467         VMSTATE_UINT32(env.pmsav8.mair0[M_REG_NS], ARMCPU),
468         VMSTATE_UINT32(env.pmsav8.mair1[M_REG_NS], ARMCPU),
469         VMSTATE_END_OF_LIST()
470     }
471 };
472 
473 static bool s_rnr_vmstate_validate(void *opaque, int version_id)
474 {
475     ARMCPU *cpu = opaque;
476 
477     return cpu->env.pmsav7.rnr[M_REG_S] < cpu->pmsav7_dregion;
478 }
479 
480 static bool sau_rnr_vmstate_validate(void *opaque, int version_id)
481 {
482     ARMCPU *cpu = opaque;
483 
484     return cpu->env.sau.rnr < cpu->sau_sregion;
485 }
486 
487 static bool m_security_needed(void *opaque)
488 {
489     ARMCPU *cpu = opaque;
490     CPUARMState *env = &cpu->env;
491 
492     return arm_feature(env, ARM_FEATURE_M_SECURITY);
493 }
494 
495 static const VMStateDescription vmstate_m_security = {
496     .name = "cpu/m-security",
497     .version_id = 1,
498     .minimum_version_id = 1,
499     .needed = m_security_needed,
500     .fields = (VMStateField[]) {
501         VMSTATE_UINT32(env.v7m.secure, ARMCPU),
502         VMSTATE_UINT32(env.v7m.other_ss_msp, ARMCPU),
503         VMSTATE_UINT32(env.v7m.other_ss_psp, ARMCPU),
504         VMSTATE_UINT32(env.v7m.basepri[M_REG_S], ARMCPU),
505         VMSTATE_UINT32(env.v7m.primask[M_REG_S], ARMCPU),
506         VMSTATE_UINT32(env.v7m.faultmask[M_REG_S], ARMCPU),
507         VMSTATE_UINT32(env.v7m.control[M_REG_S], ARMCPU),
508         VMSTATE_UINT32(env.v7m.vecbase[M_REG_S], ARMCPU),
509         VMSTATE_UINT32(env.pmsav8.mair0[M_REG_S], ARMCPU),
510         VMSTATE_UINT32(env.pmsav8.mair1[M_REG_S], ARMCPU),
511         VMSTATE_VARRAY_UINT32(env.pmsav8.rbar[M_REG_S], ARMCPU, pmsav7_dregion,
512                               0, vmstate_info_uint32, uint32_t),
513         VMSTATE_VARRAY_UINT32(env.pmsav8.rlar[M_REG_S], ARMCPU, pmsav7_dregion,
514                               0, vmstate_info_uint32, uint32_t),
515         VMSTATE_UINT32(env.pmsav7.rnr[M_REG_S], ARMCPU),
516         VMSTATE_VALIDATE("secure MPU_RNR is valid", s_rnr_vmstate_validate),
517         VMSTATE_UINT32(env.v7m.mpu_ctrl[M_REG_S], ARMCPU),
518         VMSTATE_UINT32(env.v7m.ccr[M_REG_S], ARMCPU),
519         VMSTATE_UINT32(env.v7m.mmfar[M_REG_S], ARMCPU),
520         VMSTATE_UINT32(env.v7m.cfsr[M_REG_S], ARMCPU),
521         VMSTATE_UINT32(env.v7m.sfsr, ARMCPU),
522         VMSTATE_UINT32(env.v7m.sfar, ARMCPU),
523         VMSTATE_VARRAY_UINT32(env.sau.rbar, ARMCPU, sau_sregion, 0,
524                               vmstate_info_uint32, uint32_t),
525         VMSTATE_VARRAY_UINT32(env.sau.rlar, ARMCPU, sau_sregion, 0,
526                               vmstate_info_uint32, uint32_t),
527         VMSTATE_UINT32(env.sau.rnr, ARMCPU),
528         VMSTATE_VALIDATE("SAU_RNR is valid", sau_rnr_vmstate_validate),
529         VMSTATE_UINT32(env.sau.ctrl, ARMCPU),
530         VMSTATE_UINT32(env.v7m.scr[M_REG_S], ARMCPU),
531         /* AIRCR is not secure-only, but our implementation is R/O if the
532          * security extension is unimplemented, so we migrate it here.
533          */
534         VMSTATE_UINT32(env.v7m.aircr, ARMCPU),
535         VMSTATE_END_OF_LIST()
536     }
537 };
538 
539 static int get_cpsr(QEMUFile *f, void *opaque, size_t size,
540                     const VMStateField *field)
541 {
542     ARMCPU *cpu = opaque;
543     CPUARMState *env = &cpu->env;
544     uint32_t val = qemu_get_be32(f);
545 
546     if (arm_feature(env, ARM_FEATURE_M)) {
547         if (val & XPSR_EXCP) {
548             /* This is a CPSR format value from an older QEMU. (We can tell
549              * because values transferred in XPSR format always have zero
550              * for the EXCP field, and CPSR format will always have bit 4
551              * set in CPSR_M.) Rearrange it into XPSR format. The significant
552              * differences are that the T bit is not in the same place, the
553              * primask/faultmask info may be in the CPSR I and F bits, and
554              * we do not want the mode bits.
555              * We know that this cleanup happened before v8M, so there
556              * is no complication with banked primask/faultmask.
557              */
558             uint32_t newval = val;
559 
560             assert(!arm_feature(env, ARM_FEATURE_M_SECURITY));
561 
562             newval &= (CPSR_NZCV | CPSR_Q | CPSR_IT | CPSR_GE);
563             if (val & CPSR_T) {
564                 newval |= XPSR_T;
565             }
566             /* If the I or F bits are set then this is a migration from
567              * an old QEMU which still stored the M profile FAULTMASK
568              * and PRIMASK in env->daif. For a new QEMU, the data is
569              * transferred using the vmstate_m_faultmask_primask subsection.
570              */
571             if (val & CPSR_F) {
572                 env->v7m.faultmask[M_REG_NS] = 1;
573             }
574             if (val & CPSR_I) {
575                 env->v7m.primask[M_REG_NS] = 1;
576             }
577             val = newval;
578         }
579         /* Ignore the low bits, they are handled by vmstate_m. */
580         xpsr_write(env, val, ~XPSR_EXCP);
581         return 0;
582     }
583 
584     env->aarch64 = ((val & PSTATE_nRW) == 0);
585 
586     if (is_a64(env)) {
587         pstate_write(env, val);
588         return 0;
589     }
590 
591     cpsr_write(env, val, 0xffffffff, CPSRWriteRaw);
592     return 0;
593 }
594 
595 static int put_cpsr(QEMUFile *f, void *opaque, size_t size,
596                     const VMStateField *field, JSONWriter *vmdesc)
597 {
598     ARMCPU *cpu = opaque;
599     CPUARMState *env = &cpu->env;
600     uint32_t val;
601 
602     if (arm_feature(env, ARM_FEATURE_M)) {
603         /* The low 9 bits are v7m.exception, which is handled by vmstate_m. */
604         val = xpsr_read(env) & ~XPSR_EXCP;
605     } else if (is_a64(env)) {
606         val = pstate_read(env);
607     } else {
608         val = cpsr_read(env);
609     }
610 
611     qemu_put_be32(f, val);
612     return 0;
613 }
614 
615 static const VMStateInfo vmstate_cpsr = {
616     .name = "cpsr",
617     .get = get_cpsr,
618     .put = put_cpsr,
619 };
620 
621 static int get_power(QEMUFile *f, void *opaque, size_t size,
622                     const VMStateField *field)
623 {
624     ARMCPU *cpu = opaque;
625     bool powered_off = qemu_get_byte(f);
626     cpu->power_state = powered_off ? PSCI_OFF : PSCI_ON;
627     return 0;
628 }
629 
630 static int put_power(QEMUFile *f, void *opaque, size_t size,
631                     const VMStateField *field, JSONWriter *vmdesc)
632 {
633     ARMCPU *cpu = opaque;
634 
635     /* Migration should never happen while we transition power states */
636 
637     if (cpu->power_state == PSCI_ON ||
638         cpu->power_state == PSCI_OFF) {
639         bool powered_off = (cpu->power_state == PSCI_OFF) ? true : false;
640         qemu_put_byte(f, powered_off);
641         return 0;
642     } else {
643         return 1;
644     }
645 }
646 
647 static const VMStateInfo vmstate_powered_off = {
648     .name = "powered_off",
649     .get = get_power,
650     .put = put_power,
651 };
652 
653 static int cpu_pre_save(void *opaque)
654 {
655     ARMCPU *cpu = opaque;
656 
657     if (!kvm_enabled()) {
658         pmu_op_start(&cpu->env);
659     }
660 
661     if (kvm_enabled()) {
662         if (!write_kvmstate_to_list(cpu)) {
663             /* This should never fail */
664             g_assert_not_reached();
665         }
666 
667         /*
668          * kvm_arm_cpu_pre_save() must be called after
669          * write_kvmstate_to_list()
670          */
671         kvm_arm_cpu_pre_save(cpu);
672     } else {
673         if (!write_cpustate_to_list(cpu, false)) {
674             /* This should never fail. */
675             g_assert_not_reached();
676         }
677     }
678 
679     cpu->cpreg_vmstate_array_len = cpu->cpreg_array_len;
680     memcpy(cpu->cpreg_vmstate_indexes, cpu->cpreg_indexes,
681            cpu->cpreg_array_len * sizeof(uint64_t));
682     memcpy(cpu->cpreg_vmstate_values, cpu->cpreg_values,
683            cpu->cpreg_array_len * sizeof(uint64_t));
684 
685     return 0;
686 }
687 
688 static int cpu_post_save(void *opaque)
689 {
690     ARMCPU *cpu = opaque;
691 
692     if (!kvm_enabled()) {
693         pmu_op_finish(&cpu->env);
694     }
695 
696     return 0;
697 }
698 
699 static int cpu_pre_load(void *opaque)
700 {
701     ARMCPU *cpu = opaque;
702     CPUARMState *env = &cpu->env;
703 
704     /*
705      * Pre-initialize irq_line_state to a value that's never valid as
706      * real data, so cpu_post_load() can tell whether we've seen the
707      * irq-line-state subsection in the incoming migration state.
708      */
709     env->irq_line_state = UINT32_MAX;
710 
711     if (!kvm_enabled()) {
712         pmu_op_start(&cpu->env);
713     }
714 
715     return 0;
716 }
717 
718 static int cpu_post_load(void *opaque, int version_id)
719 {
720     ARMCPU *cpu = opaque;
721     CPUARMState *env = &cpu->env;
722     int i, v;
723 
724     /*
725      * Handle migration compatibility from old QEMU which didn't
726      * send the irq-line-state subsection. A QEMU without it did not
727      * implement the HCR_EL2.{VI,VF} bits as generating interrupts,
728      * so for TCG the line state matches the bits set in cs->interrupt_request.
729      * For KVM the line state is not stored in cs->interrupt_request
730      * and so this will leave irq_line_state as 0, but this is OK because
731      * we only need to care about it for TCG.
732      */
733     if (env->irq_line_state == UINT32_MAX) {
734         CPUState *cs = CPU(cpu);
735 
736         env->irq_line_state = cs->interrupt_request &
737             (CPU_INTERRUPT_HARD | CPU_INTERRUPT_FIQ |
738              CPU_INTERRUPT_VIRQ | CPU_INTERRUPT_VFIQ);
739     }
740 
741     /* Update the values list from the incoming migration data.
742      * Anything in the incoming data which we don't know about is
743      * a migration failure; anything we know about but the incoming
744      * data doesn't specify retains its current (reset) value.
745      * The indexes list remains untouched -- we only inspect the
746      * incoming migration index list so we can match the values array
747      * entries with the right slots in our own values array.
748      */
749 
750     for (i = 0, v = 0; i < cpu->cpreg_array_len
751              && v < cpu->cpreg_vmstate_array_len; i++) {
752         if (cpu->cpreg_vmstate_indexes[v] > cpu->cpreg_indexes[i]) {
753             /* register in our list but not incoming : skip it */
754             continue;
755         }
756         if (cpu->cpreg_vmstate_indexes[v] < cpu->cpreg_indexes[i]) {
757             /* register in their list but not ours: fail migration */
758             return -1;
759         }
760         /* matching register, copy the value over */
761         cpu->cpreg_values[i] = cpu->cpreg_vmstate_values[v];
762         v++;
763     }
764 
765     if (kvm_enabled()) {
766         if (!write_list_to_kvmstate(cpu, KVM_PUT_FULL_STATE)) {
767             return -1;
768         }
769         /* Note that it's OK for the TCG side not to know about
770          * every register in the list; KVM is authoritative if
771          * we're using it.
772          */
773         write_list_to_cpustate(cpu);
774         kvm_arm_cpu_post_load(cpu);
775     } else {
776         if (!write_list_to_cpustate(cpu)) {
777             return -1;
778         }
779     }
780 
781     hw_breakpoint_update_all(cpu);
782     hw_watchpoint_update_all(cpu);
783 
784     /*
785      * TCG gen_update_fp_context() relies on the invariant that
786      * FPDSCR.LTPSIZE is constant 4 for M-profile with the LOB extension;
787      * forbid bogus incoming data with some other value.
788      */
789     if (arm_feature(env, ARM_FEATURE_M) && cpu_isar_feature(aa32_lob, cpu)) {
790         if (extract32(env->v7m.fpdscr[M_REG_NS],
791                       FPCR_LTPSIZE_SHIFT, FPCR_LTPSIZE_LENGTH) != 4 ||
792             extract32(env->v7m.fpdscr[M_REG_S],
793                       FPCR_LTPSIZE_SHIFT, FPCR_LTPSIZE_LENGTH) != 4) {
794             return -1;
795         }
796     }
797 
798     /*
799      * Misaligned thumb pc is architecturally impossible.
800      * We have an assert in thumb_tr_translate_insn to verify this.
801      * Fail an incoming migrate to avoid this assert.
802      */
803     if (!is_a64(env) && env->thumb && (env->regs[15] & 1)) {
804         return -1;
805     }
806 
807     if (!kvm_enabled()) {
808         pmu_op_finish(&cpu->env);
809     }
810     arm_rebuild_hflags(&cpu->env);
811 
812     return 0;
813 }
814 
815 const VMStateDescription vmstate_arm_cpu = {
816     .name = "cpu",
817     .version_id = 22,
818     .minimum_version_id = 22,
819     .pre_save = cpu_pre_save,
820     .post_save = cpu_post_save,
821     .pre_load = cpu_pre_load,
822     .post_load = cpu_post_load,
823     .fields = (VMStateField[]) {
824         VMSTATE_UINT32_ARRAY(env.regs, ARMCPU, 16),
825         VMSTATE_UINT64_ARRAY(env.xregs, ARMCPU, 32),
826         VMSTATE_UINT64(env.pc, ARMCPU),
827         {
828             .name = "cpsr",
829             .version_id = 0,
830             .size = sizeof(uint32_t),
831             .info = &vmstate_cpsr,
832             .flags = VMS_SINGLE,
833             .offset = 0,
834         },
835         VMSTATE_UINT32(env.spsr, ARMCPU),
836         VMSTATE_UINT64_ARRAY(env.banked_spsr, ARMCPU, 8),
837         VMSTATE_UINT32_ARRAY(env.banked_r13, ARMCPU, 8),
838         VMSTATE_UINT32_ARRAY(env.banked_r14, ARMCPU, 8),
839         VMSTATE_UINT32_ARRAY(env.usr_regs, ARMCPU, 5),
840         VMSTATE_UINT32_ARRAY(env.fiq_regs, ARMCPU, 5),
841         VMSTATE_UINT64_ARRAY(env.elr_el, ARMCPU, 4),
842         VMSTATE_UINT64_ARRAY(env.sp_el, ARMCPU, 4),
843         /* The length-check must come before the arrays to avoid
844          * incoming data possibly overflowing the array.
845          */
846         VMSTATE_INT32_POSITIVE_LE(cpreg_vmstate_array_len, ARMCPU),
847         VMSTATE_VARRAY_INT32(cpreg_vmstate_indexes, ARMCPU,
848                              cpreg_vmstate_array_len,
849                              0, vmstate_info_uint64, uint64_t),
850         VMSTATE_VARRAY_INT32(cpreg_vmstate_values, ARMCPU,
851                              cpreg_vmstate_array_len,
852                              0, vmstate_info_uint64, uint64_t),
853         VMSTATE_UINT64(env.exclusive_addr, ARMCPU),
854         VMSTATE_UINT64(env.exclusive_val, ARMCPU),
855         VMSTATE_UINT64(env.exclusive_high, ARMCPU),
856         VMSTATE_UNUSED(sizeof(uint64_t)),
857         VMSTATE_UINT32(env.exception.syndrome, ARMCPU),
858         VMSTATE_UINT32(env.exception.fsr, ARMCPU),
859         VMSTATE_UINT64(env.exception.vaddress, ARMCPU),
860         VMSTATE_TIMER_PTR(gt_timer[GTIMER_PHYS], ARMCPU),
861         VMSTATE_TIMER_PTR(gt_timer[GTIMER_VIRT], ARMCPU),
862         {
863             .name = "power_state",
864             .version_id = 0,
865             .size = sizeof(bool),
866             .info = &vmstate_powered_off,
867             .flags = VMS_SINGLE,
868             .offset = 0,
869         },
870         VMSTATE_END_OF_LIST()
871     },
872     .subsections = (const VMStateDescription*[]) {
873         &vmstate_vfp,
874         &vmstate_iwmmxt,
875         &vmstate_m,
876         &vmstate_thumb2ee,
877         /* pmsav7_rnr must come before pmsav7 so that we have the
878          * region number before we test it in the VMSTATE_VALIDATE
879          * in vmstate_pmsav7.
880          */
881         &vmstate_pmsav7_rnr,
882         &vmstate_pmsav7,
883         &vmstate_pmsav8,
884         &vmstate_m_security,
885 #ifdef TARGET_AARCH64
886         &vmstate_sve,
887 #endif
888         &vmstate_serror,
889         &vmstate_irq_line_state,
890         NULL
891     }
892 };
893