xref: /openbmc/qemu/target/arm/machine.c (revision 7f6c3d1a)
1 #include "qemu/osdep.h"
2 #include "cpu.h"
3 #include "qemu/error-report.h"
4 #include "sysemu/kvm.h"
5 #include "kvm_arm.h"
6 #include "internals.h"
7 #include "migration/cpu.h"
8 
9 static bool vfp_needed(void *opaque)
10 {
11     ARMCPU *cpu = opaque;
12 
13     return (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)
14             ? cpu_isar_feature(aa64_fp_simd, cpu)
15             : cpu_isar_feature(aa32_vfp_simd, cpu));
16 }
17 
18 static int get_fpscr(QEMUFile *f, void *opaque, size_t size,
19                      const VMStateField *field)
20 {
21     ARMCPU *cpu = opaque;
22     CPUARMState *env = &cpu->env;
23     uint32_t val = qemu_get_be32(f);
24 
25     vfp_set_fpscr(env, val);
26     return 0;
27 }
28 
29 static int put_fpscr(QEMUFile *f, void *opaque, size_t size,
30                      const VMStateField *field, QJSON *vmdesc)
31 {
32     ARMCPU *cpu = opaque;
33     CPUARMState *env = &cpu->env;
34 
35     qemu_put_be32(f, vfp_get_fpscr(env));
36     return 0;
37 }
38 
39 static const VMStateInfo vmstate_fpscr = {
40     .name = "fpscr",
41     .get = get_fpscr,
42     .put = put_fpscr,
43 };
44 
45 static const VMStateDescription vmstate_vfp = {
46     .name = "cpu/vfp",
47     .version_id = 3,
48     .minimum_version_id = 3,
49     .needed = vfp_needed,
50     .fields = (VMStateField[]) {
51         /* For compatibility, store Qn out of Zn here.  */
52         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[0].d, ARMCPU, 0, 2),
53         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[1].d, ARMCPU, 0, 2),
54         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[2].d, ARMCPU, 0, 2),
55         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[3].d, ARMCPU, 0, 2),
56         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[4].d, ARMCPU, 0, 2),
57         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[5].d, ARMCPU, 0, 2),
58         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[6].d, ARMCPU, 0, 2),
59         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[7].d, ARMCPU, 0, 2),
60         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[8].d, ARMCPU, 0, 2),
61         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[9].d, ARMCPU, 0, 2),
62         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[10].d, ARMCPU, 0, 2),
63         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[11].d, ARMCPU, 0, 2),
64         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[12].d, ARMCPU, 0, 2),
65         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[13].d, ARMCPU, 0, 2),
66         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[14].d, ARMCPU, 0, 2),
67         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[15].d, ARMCPU, 0, 2),
68         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[16].d, ARMCPU, 0, 2),
69         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[17].d, ARMCPU, 0, 2),
70         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[18].d, ARMCPU, 0, 2),
71         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[19].d, ARMCPU, 0, 2),
72         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[20].d, ARMCPU, 0, 2),
73         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[21].d, ARMCPU, 0, 2),
74         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[22].d, ARMCPU, 0, 2),
75         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[23].d, ARMCPU, 0, 2),
76         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[24].d, ARMCPU, 0, 2),
77         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[25].d, ARMCPU, 0, 2),
78         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[26].d, ARMCPU, 0, 2),
79         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[27].d, ARMCPU, 0, 2),
80         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[28].d, ARMCPU, 0, 2),
81         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[29].d, ARMCPU, 0, 2),
82         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[30].d, ARMCPU, 0, 2),
83         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[31].d, ARMCPU, 0, 2),
84 
85         /* The xregs array is a little awkward because element 1 (FPSCR)
86          * requires a specific accessor, so we have to split it up in
87          * the vmstate:
88          */
89         VMSTATE_UINT32(env.vfp.xregs[0], ARMCPU),
90         VMSTATE_UINT32_SUB_ARRAY(env.vfp.xregs, ARMCPU, 2, 14),
91         {
92             .name = "fpscr",
93             .version_id = 0,
94             .size = sizeof(uint32_t),
95             .info = &vmstate_fpscr,
96             .flags = VMS_SINGLE,
97             .offset = 0,
98         },
99         VMSTATE_END_OF_LIST()
100     }
101 };
102 
103 static bool iwmmxt_needed(void *opaque)
104 {
105     ARMCPU *cpu = opaque;
106     CPUARMState *env = &cpu->env;
107 
108     return arm_feature(env, ARM_FEATURE_IWMMXT);
109 }
110 
111 static const VMStateDescription vmstate_iwmmxt = {
112     .name = "cpu/iwmmxt",
113     .version_id = 1,
114     .minimum_version_id = 1,
115     .needed = iwmmxt_needed,
116     .fields = (VMStateField[]) {
117         VMSTATE_UINT64_ARRAY(env.iwmmxt.regs, ARMCPU, 16),
118         VMSTATE_UINT32_ARRAY(env.iwmmxt.cregs, ARMCPU, 16),
119         VMSTATE_END_OF_LIST()
120     }
121 };
122 
123 #ifdef TARGET_AARCH64
124 /* The expression ARM_MAX_VQ - 2 is 0 for pure AArch32 build,
125  * and ARMPredicateReg is actively empty.  This triggers errors
126  * in the expansion of the VMSTATE macros.
127  */
128 
129 static bool sve_needed(void *opaque)
130 {
131     ARMCPU *cpu = opaque;
132 
133     return cpu_isar_feature(aa64_sve, cpu);
134 }
135 
136 /* The first two words of each Zreg is stored in VFP state.  */
137 static const VMStateDescription vmstate_zreg_hi_reg = {
138     .name = "cpu/sve/zreg_hi",
139     .version_id = 1,
140     .minimum_version_id = 1,
141     .fields = (VMStateField[]) {
142         VMSTATE_UINT64_SUB_ARRAY(d, ARMVectorReg, 2, ARM_MAX_VQ - 2),
143         VMSTATE_END_OF_LIST()
144     }
145 };
146 
147 static const VMStateDescription vmstate_preg_reg = {
148     .name = "cpu/sve/preg",
149     .version_id = 1,
150     .minimum_version_id = 1,
151     .fields = (VMStateField[]) {
152         VMSTATE_UINT64_ARRAY(p, ARMPredicateReg, 2 * ARM_MAX_VQ / 8),
153         VMSTATE_END_OF_LIST()
154     }
155 };
156 
157 static const VMStateDescription vmstate_sve = {
158     .name = "cpu/sve",
159     .version_id = 1,
160     .minimum_version_id = 1,
161     .needed = sve_needed,
162     .fields = (VMStateField[]) {
163         VMSTATE_STRUCT_ARRAY(env.vfp.zregs, ARMCPU, 32, 0,
164                              vmstate_zreg_hi_reg, ARMVectorReg),
165         VMSTATE_STRUCT_ARRAY(env.vfp.pregs, ARMCPU, 17, 0,
166                              vmstate_preg_reg, ARMPredicateReg),
167         VMSTATE_END_OF_LIST()
168     }
169 };
170 #endif /* AARCH64 */
171 
172 static bool serror_needed(void *opaque)
173 {
174     ARMCPU *cpu = opaque;
175     CPUARMState *env = &cpu->env;
176 
177     return env->serror.pending != 0;
178 }
179 
180 static const VMStateDescription vmstate_serror = {
181     .name = "cpu/serror",
182     .version_id = 1,
183     .minimum_version_id = 1,
184     .needed = serror_needed,
185     .fields = (VMStateField[]) {
186         VMSTATE_UINT8(env.serror.pending, ARMCPU),
187         VMSTATE_UINT8(env.serror.has_esr, ARMCPU),
188         VMSTATE_UINT64(env.serror.esr, ARMCPU),
189         VMSTATE_END_OF_LIST()
190     }
191 };
192 
193 static bool irq_line_state_needed(void *opaque)
194 {
195     return true;
196 }
197 
198 static const VMStateDescription vmstate_irq_line_state = {
199     .name = "cpu/irq-line-state",
200     .version_id = 1,
201     .minimum_version_id = 1,
202     .needed = irq_line_state_needed,
203     .fields = (VMStateField[]) {
204         VMSTATE_UINT32(env.irq_line_state, ARMCPU),
205         VMSTATE_END_OF_LIST()
206     }
207 };
208 
209 static bool m_needed(void *opaque)
210 {
211     ARMCPU *cpu = opaque;
212     CPUARMState *env = &cpu->env;
213 
214     return arm_feature(env, ARM_FEATURE_M);
215 }
216 
217 static const VMStateDescription vmstate_m_faultmask_primask = {
218     .name = "cpu/m/faultmask-primask",
219     .version_id = 1,
220     .minimum_version_id = 1,
221     .needed = m_needed,
222     .fields = (VMStateField[]) {
223         VMSTATE_UINT32(env.v7m.faultmask[M_REG_NS], ARMCPU),
224         VMSTATE_UINT32(env.v7m.primask[M_REG_NS], ARMCPU),
225         VMSTATE_END_OF_LIST()
226     }
227 };
228 
229 /* CSSELR is in a subsection because we didn't implement it previously.
230  * Migration from an old implementation will leave it at zero, which
231  * is OK since the only CPUs in the old implementation make the
232  * register RAZ/WI.
233  * Since there was no version of QEMU which implemented the CSSELR for
234  * just non-secure, we transfer both banks here rather than putting
235  * the secure banked version in the m-security subsection.
236  */
237 static bool csselr_vmstate_validate(void *opaque, int version_id)
238 {
239     ARMCPU *cpu = opaque;
240 
241     return cpu->env.v7m.csselr[M_REG_NS] <= R_V7M_CSSELR_INDEX_MASK
242         && cpu->env.v7m.csselr[M_REG_S] <= R_V7M_CSSELR_INDEX_MASK;
243 }
244 
245 static bool m_csselr_needed(void *opaque)
246 {
247     ARMCPU *cpu = opaque;
248 
249     return !arm_v7m_csselr_razwi(cpu);
250 }
251 
252 static const VMStateDescription vmstate_m_csselr = {
253     .name = "cpu/m/csselr",
254     .version_id = 1,
255     .minimum_version_id = 1,
256     .needed = m_csselr_needed,
257     .fields = (VMStateField[]) {
258         VMSTATE_UINT32_ARRAY(env.v7m.csselr, ARMCPU, M_REG_NUM_BANKS),
259         VMSTATE_VALIDATE("CSSELR is valid", csselr_vmstate_validate),
260         VMSTATE_END_OF_LIST()
261     }
262 };
263 
264 static const VMStateDescription vmstate_m_scr = {
265     .name = "cpu/m/scr",
266     .version_id = 1,
267     .minimum_version_id = 1,
268     .needed = m_needed,
269     .fields = (VMStateField[]) {
270         VMSTATE_UINT32(env.v7m.scr[M_REG_NS], ARMCPU),
271         VMSTATE_END_OF_LIST()
272     }
273 };
274 
275 static const VMStateDescription vmstate_m_other_sp = {
276     .name = "cpu/m/other-sp",
277     .version_id = 1,
278     .minimum_version_id = 1,
279     .needed = m_needed,
280     .fields = (VMStateField[]) {
281         VMSTATE_UINT32(env.v7m.other_sp, ARMCPU),
282         VMSTATE_END_OF_LIST()
283     }
284 };
285 
286 static bool m_v8m_needed(void *opaque)
287 {
288     ARMCPU *cpu = opaque;
289     CPUARMState *env = &cpu->env;
290 
291     return arm_feature(env, ARM_FEATURE_M) && arm_feature(env, ARM_FEATURE_V8);
292 }
293 
294 static const VMStateDescription vmstate_m_v8m = {
295     .name = "cpu/m/v8m",
296     .version_id = 1,
297     .minimum_version_id = 1,
298     .needed = m_v8m_needed,
299     .fields = (VMStateField[]) {
300         VMSTATE_UINT32_ARRAY(env.v7m.msplim, ARMCPU, M_REG_NUM_BANKS),
301         VMSTATE_UINT32_ARRAY(env.v7m.psplim, ARMCPU, M_REG_NUM_BANKS),
302         VMSTATE_END_OF_LIST()
303     }
304 };
305 
306 static const VMStateDescription vmstate_m_fp = {
307     .name = "cpu/m/fp",
308     .version_id = 1,
309     .minimum_version_id = 1,
310     .needed = vfp_needed,
311     .fields = (VMStateField[]) {
312         VMSTATE_UINT32_ARRAY(env.v7m.fpcar, ARMCPU, M_REG_NUM_BANKS),
313         VMSTATE_UINT32_ARRAY(env.v7m.fpccr, ARMCPU, M_REG_NUM_BANKS),
314         VMSTATE_UINT32_ARRAY(env.v7m.fpdscr, ARMCPU, M_REG_NUM_BANKS),
315         VMSTATE_UINT32_ARRAY(env.v7m.cpacr, ARMCPU, M_REG_NUM_BANKS),
316         VMSTATE_UINT32(env.v7m.nsacr, ARMCPU),
317         VMSTATE_END_OF_LIST()
318     }
319 };
320 
321 static const VMStateDescription vmstate_m = {
322     .name = "cpu/m",
323     .version_id = 4,
324     .minimum_version_id = 4,
325     .needed = m_needed,
326     .fields = (VMStateField[]) {
327         VMSTATE_UINT32(env.v7m.vecbase[M_REG_NS], ARMCPU),
328         VMSTATE_UINT32(env.v7m.basepri[M_REG_NS], ARMCPU),
329         VMSTATE_UINT32(env.v7m.control[M_REG_NS], ARMCPU),
330         VMSTATE_UINT32(env.v7m.ccr[M_REG_NS], ARMCPU),
331         VMSTATE_UINT32(env.v7m.cfsr[M_REG_NS], ARMCPU),
332         VMSTATE_UINT32(env.v7m.hfsr, ARMCPU),
333         VMSTATE_UINT32(env.v7m.dfsr, ARMCPU),
334         VMSTATE_UINT32(env.v7m.mmfar[M_REG_NS], ARMCPU),
335         VMSTATE_UINT32(env.v7m.bfar, ARMCPU),
336         VMSTATE_UINT32(env.v7m.mpu_ctrl[M_REG_NS], ARMCPU),
337         VMSTATE_INT32(env.v7m.exception, ARMCPU),
338         VMSTATE_END_OF_LIST()
339     },
340     .subsections = (const VMStateDescription*[]) {
341         &vmstate_m_faultmask_primask,
342         &vmstate_m_csselr,
343         &vmstate_m_scr,
344         &vmstate_m_other_sp,
345         &vmstate_m_v8m,
346         &vmstate_m_fp,
347         NULL
348     }
349 };
350 
351 static bool thumb2ee_needed(void *opaque)
352 {
353     ARMCPU *cpu = opaque;
354     CPUARMState *env = &cpu->env;
355 
356     return arm_feature(env, ARM_FEATURE_THUMB2EE);
357 }
358 
359 static const VMStateDescription vmstate_thumb2ee = {
360     .name = "cpu/thumb2ee",
361     .version_id = 1,
362     .minimum_version_id = 1,
363     .needed = thumb2ee_needed,
364     .fields = (VMStateField[]) {
365         VMSTATE_UINT32(env.teecr, ARMCPU),
366         VMSTATE_UINT32(env.teehbr, ARMCPU),
367         VMSTATE_END_OF_LIST()
368     }
369 };
370 
371 static bool pmsav7_needed(void *opaque)
372 {
373     ARMCPU *cpu = opaque;
374     CPUARMState *env = &cpu->env;
375 
376     return arm_feature(env, ARM_FEATURE_PMSA) &&
377            arm_feature(env, ARM_FEATURE_V7) &&
378            !arm_feature(env, ARM_FEATURE_V8);
379 }
380 
381 static bool pmsav7_rgnr_vmstate_validate(void *opaque, int version_id)
382 {
383     ARMCPU *cpu = opaque;
384 
385     return cpu->env.pmsav7.rnr[M_REG_NS] < cpu->pmsav7_dregion;
386 }
387 
388 static const VMStateDescription vmstate_pmsav7 = {
389     .name = "cpu/pmsav7",
390     .version_id = 1,
391     .minimum_version_id = 1,
392     .needed = pmsav7_needed,
393     .fields = (VMStateField[]) {
394         VMSTATE_VARRAY_UINT32(env.pmsav7.drbar, ARMCPU, pmsav7_dregion, 0,
395                               vmstate_info_uint32, uint32_t),
396         VMSTATE_VARRAY_UINT32(env.pmsav7.drsr, ARMCPU, pmsav7_dregion, 0,
397                               vmstate_info_uint32, uint32_t),
398         VMSTATE_VARRAY_UINT32(env.pmsav7.dracr, ARMCPU, pmsav7_dregion, 0,
399                               vmstate_info_uint32, uint32_t),
400         VMSTATE_VALIDATE("rgnr is valid", pmsav7_rgnr_vmstate_validate),
401         VMSTATE_END_OF_LIST()
402     }
403 };
404 
405 static bool pmsav7_rnr_needed(void *opaque)
406 {
407     ARMCPU *cpu = opaque;
408     CPUARMState *env = &cpu->env;
409 
410     /* For R profile cores pmsav7.rnr is migrated via the cpreg
411      * "RGNR" definition in helper.h. For M profile we have to
412      * migrate it separately.
413      */
414     return arm_feature(env, ARM_FEATURE_M);
415 }
416 
417 static const VMStateDescription vmstate_pmsav7_rnr = {
418     .name = "cpu/pmsav7-rnr",
419     .version_id = 1,
420     .minimum_version_id = 1,
421     .needed = pmsav7_rnr_needed,
422     .fields = (VMStateField[]) {
423         VMSTATE_UINT32(env.pmsav7.rnr[M_REG_NS], ARMCPU),
424         VMSTATE_END_OF_LIST()
425     }
426 };
427 
428 static bool pmsav8_needed(void *opaque)
429 {
430     ARMCPU *cpu = opaque;
431     CPUARMState *env = &cpu->env;
432 
433     return arm_feature(env, ARM_FEATURE_PMSA) &&
434         arm_feature(env, ARM_FEATURE_V8);
435 }
436 
437 static const VMStateDescription vmstate_pmsav8 = {
438     .name = "cpu/pmsav8",
439     .version_id = 1,
440     .minimum_version_id = 1,
441     .needed = pmsav8_needed,
442     .fields = (VMStateField[]) {
443         VMSTATE_VARRAY_UINT32(env.pmsav8.rbar[M_REG_NS], ARMCPU, pmsav7_dregion,
444                               0, vmstate_info_uint32, uint32_t),
445         VMSTATE_VARRAY_UINT32(env.pmsav8.rlar[M_REG_NS], ARMCPU, pmsav7_dregion,
446                               0, vmstate_info_uint32, uint32_t),
447         VMSTATE_UINT32(env.pmsav8.mair0[M_REG_NS], ARMCPU),
448         VMSTATE_UINT32(env.pmsav8.mair1[M_REG_NS], ARMCPU),
449         VMSTATE_END_OF_LIST()
450     }
451 };
452 
453 static bool s_rnr_vmstate_validate(void *opaque, int version_id)
454 {
455     ARMCPU *cpu = opaque;
456 
457     return cpu->env.pmsav7.rnr[M_REG_S] < cpu->pmsav7_dregion;
458 }
459 
460 static bool sau_rnr_vmstate_validate(void *opaque, int version_id)
461 {
462     ARMCPU *cpu = opaque;
463 
464     return cpu->env.sau.rnr < cpu->sau_sregion;
465 }
466 
467 static bool m_security_needed(void *opaque)
468 {
469     ARMCPU *cpu = opaque;
470     CPUARMState *env = &cpu->env;
471 
472     return arm_feature(env, ARM_FEATURE_M_SECURITY);
473 }
474 
475 static const VMStateDescription vmstate_m_security = {
476     .name = "cpu/m-security",
477     .version_id = 1,
478     .minimum_version_id = 1,
479     .needed = m_security_needed,
480     .fields = (VMStateField[]) {
481         VMSTATE_UINT32(env.v7m.secure, ARMCPU),
482         VMSTATE_UINT32(env.v7m.other_ss_msp, ARMCPU),
483         VMSTATE_UINT32(env.v7m.other_ss_psp, ARMCPU),
484         VMSTATE_UINT32(env.v7m.basepri[M_REG_S], ARMCPU),
485         VMSTATE_UINT32(env.v7m.primask[M_REG_S], ARMCPU),
486         VMSTATE_UINT32(env.v7m.faultmask[M_REG_S], ARMCPU),
487         VMSTATE_UINT32(env.v7m.control[M_REG_S], ARMCPU),
488         VMSTATE_UINT32(env.v7m.vecbase[M_REG_S], ARMCPU),
489         VMSTATE_UINT32(env.pmsav8.mair0[M_REG_S], ARMCPU),
490         VMSTATE_UINT32(env.pmsav8.mair1[M_REG_S], ARMCPU),
491         VMSTATE_VARRAY_UINT32(env.pmsav8.rbar[M_REG_S], ARMCPU, pmsav7_dregion,
492                               0, vmstate_info_uint32, uint32_t),
493         VMSTATE_VARRAY_UINT32(env.pmsav8.rlar[M_REG_S], ARMCPU, pmsav7_dregion,
494                               0, vmstate_info_uint32, uint32_t),
495         VMSTATE_UINT32(env.pmsav7.rnr[M_REG_S], ARMCPU),
496         VMSTATE_VALIDATE("secure MPU_RNR is valid", s_rnr_vmstate_validate),
497         VMSTATE_UINT32(env.v7m.mpu_ctrl[M_REG_S], ARMCPU),
498         VMSTATE_UINT32(env.v7m.ccr[M_REG_S], ARMCPU),
499         VMSTATE_UINT32(env.v7m.mmfar[M_REG_S], ARMCPU),
500         VMSTATE_UINT32(env.v7m.cfsr[M_REG_S], ARMCPU),
501         VMSTATE_UINT32(env.v7m.sfsr, ARMCPU),
502         VMSTATE_UINT32(env.v7m.sfar, ARMCPU),
503         VMSTATE_VARRAY_UINT32(env.sau.rbar, ARMCPU, sau_sregion, 0,
504                               vmstate_info_uint32, uint32_t),
505         VMSTATE_VARRAY_UINT32(env.sau.rlar, ARMCPU, sau_sregion, 0,
506                               vmstate_info_uint32, uint32_t),
507         VMSTATE_UINT32(env.sau.rnr, ARMCPU),
508         VMSTATE_VALIDATE("SAU_RNR is valid", sau_rnr_vmstate_validate),
509         VMSTATE_UINT32(env.sau.ctrl, ARMCPU),
510         VMSTATE_UINT32(env.v7m.scr[M_REG_S], ARMCPU),
511         /* AIRCR is not secure-only, but our implementation is R/O if the
512          * security extension is unimplemented, so we migrate it here.
513          */
514         VMSTATE_UINT32(env.v7m.aircr, ARMCPU),
515         VMSTATE_END_OF_LIST()
516     }
517 };
518 
519 static int get_cpsr(QEMUFile *f, void *opaque, size_t size,
520                     const VMStateField *field)
521 {
522     ARMCPU *cpu = opaque;
523     CPUARMState *env = &cpu->env;
524     uint32_t val = qemu_get_be32(f);
525 
526     if (arm_feature(env, ARM_FEATURE_M)) {
527         if (val & XPSR_EXCP) {
528             /* This is a CPSR format value from an older QEMU. (We can tell
529              * because values transferred in XPSR format always have zero
530              * for the EXCP field, and CPSR format will always have bit 4
531              * set in CPSR_M.) Rearrange it into XPSR format. The significant
532              * differences are that the T bit is not in the same place, the
533              * primask/faultmask info may be in the CPSR I and F bits, and
534              * we do not want the mode bits.
535              * We know that this cleanup happened before v8M, so there
536              * is no complication with banked primask/faultmask.
537              */
538             uint32_t newval = val;
539 
540             assert(!arm_feature(env, ARM_FEATURE_M_SECURITY));
541 
542             newval &= (CPSR_NZCV | CPSR_Q | CPSR_IT | CPSR_GE);
543             if (val & CPSR_T) {
544                 newval |= XPSR_T;
545             }
546             /* If the I or F bits are set then this is a migration from
547              * an old QEMU which still stored the M profile FAULTMASK
548              * and PRIMASK in env->daif. For a new QEMU, the data is
549              * transferred using the vmstate_m_faultmask_primask subsection.
550              */
551             if (val & CPSR_F) {
552                 env->v7m.faultmask[M_REG_NS] = 1;
553             }
554             if (val & CPSR_I) {
555                 env->v7m.primask[M_REG_NS] = 1;
556             }
557             val = newval;
558         }
559         /* Ignore the low bits, they are handled by vmstate_m. */
560         xpsr_write(env, val, ~XPSR_EXCP);
561         return 0;
562     }
563 
564     env->aarch64 = ((val & PSTATE_nRW) == 0);
565 
566     if (is_a64(env)) {
567         pstate_write(env, val);
568         return 0;
569     }
570 
571     cpsr_write(env, val, 0xffffffff, CPSRWriteRaw);
572     return 0;
573 }
574 
575 static int put_cpsr(QEMUFile *f, void *opaque, size_t size,
576                     const VMStateField *field, QJSON *vmdesc)
577 {
578     ARMCPU *cpu = opaque;
579     CPUARMState *env = &cpu->env;
580     uint32_t val;
581 
582     if (arm_feature(env, ARM_FEATURE_M)) {
583         /* The low 9 bits are v7m.exception, which is handled by vmstate_m. */
584         val = xpsr_read(env) & ~XPSR_EXCP;
585     } else if (is_a64(env)) {
586         val = pstate_read(env);
587     } else {
588         val = cpsr_read(env);
589     }
590 
591     qemu_put_be32(f, val);
592     return 0;
593 }
594 
595 static const VMStateInfo vmstate_cpsr = {
596     .name = "cpsr",
597     .get = get_cpsr,
598     .put = put_cpsr,
599 };
600 
601 static int get_power(QEMUFile *f, void *opaque, size_t size,
602                     const VMStateField *field)
603 {
604     ARMCPU *cpu = opaque;
605     bool powered_off = qemu_get_byte(f);
606     cpu->power_state = powered_off ? PSCI_OFF : PSCI_ON;
607     return 0;
608 }
609 
610 static int put_power(QEMUFile *f, void *opaque, size_t size,
611                     const VMStateField *field, QJSON *vmdesc)
612 {
613     ARMCPU *cpu = opaque;
614 
615     /* Migration should never happen while we transition power states */
616 
617     if (cpu->power_state == PSCI_ON ||
618         cpu->power_state == PSCI_OFF) {
619         bool powered_off = (cpu->power_state == PSCI_OFF) ? true : false;
620         qemu_put_byte(f, powered_off);
621         return 0;
622     } else {
623         return 1;
624     }
625 }
626 
627 static const VMStateInfo vmstate_powered_off = {
628     .name = "powered_off",
629     .get = get_power,
630     .put = put_power,
631 };
632 
633 static int cpu_pre_save(void *opaque)
634 {
635     ARMCPU *cpu = opaque;
636 
637     if (!kvm_enabled()) {
638         pmu_op_start(&cpu->env);
639     }
640 
641     if (kvm_enabled()) {
642         if (!write_kvmstate_to_list(cpu)) {
643             /* This should never fail */
644             abort();
645         }
646 
647         /*
648          * kvm_arm_cpu_pre_save() must be called after
649          * write_kvmstate_to_list()
650          */
651         kvm_arm_cpu_pre_save(cpu);
652     } else {
653         if (!write_cpustate_to_list(cpu, false)) {
654             /* This should never fail. */
655             abort();
656         }
657     }
658 
659     cpu->cpreg_vmstate_array_len = cpu->cpreg_array_len;
660     memcpy(cpu->cpreg_vmstate_indexes, cpu->cpreg_indexes,
661            cpu->cpreg_array_len * sizeof(uint64_t));
662     memcpy(cpu->cpreg_vmstate_values, cpu->cpreg_values,
663            cpu->cpreg_array_len * sizeof(uint64_t));
664 
665     return 0;
666 }
667 
668 static int cpu_post_save(void *opaque)
669 {
670     ARMCPU *cpu = opaque;
671 
672     if (!kvm_enabled()) {
673         pmu_op_finish(&cpu->env);
674     }
675 
676     return 0;
677 }
678 
679 static int cpu_pre_load(void *opaque)
680 {
681     ARMCPU *cpu = opaque;
682     CPUARMState *env = &cpu->env;
683 
684     /*
685      * Pre-initialize irq_line_state to a value that's never valid as
686      * real data, so cpu_post_load() can tell whether we've seen the
687      * irq-line-state subsection in the incoming migration state.
688      */
689     env->irq_line_state = UINT32_MAX;
690 
691     if (!kvm_enabled()) {
692         pmu_op_start(&cpu->env);
693     }
694 
695     return 0;
696 }
697 
698 static int cpu_post_load(void *opaque, int version_id)
699 {
700     ARMCPU *cpu = opaque;
701     CPUARMState *env = &cpu->env;
702     int i, v;
703 
704     /*
705      * Handle migration compatibility from old QEMU which didn't
706      * send the irq-line-state subsection. A QEMU without it did not
707      * implement the HCR_EL2.{VI,VF} bits as generating interrupts,
708      * so for TCG the line state matches the bits set in cs->interrupt_request.
709      * For KVM the line state is not stored in cs->interrupt_request
710      * and so this will leave irq_line_state as 0, but this is OK because
711      * we only need to care about it for TCG.
712      */
713     if (env->irq_line_state == UINT32_MAX) {
714         CPUState *cs = CPU(cpu);
715 
716         env->irq_line_state = cs->interrupt_request &
717             (CPU_INTERRUPT_HARD | CPU_INTERRUPT_FIQ |
718              CPU_INTERRUPT_VIRQ | CPU_INTERRUPT_VFIQ);
719     }
720 
721     /* Update the values list from the incoming migration data.
722      * Anything in the incoming data which we don't know about is
723      * a migration failure; anything we know about but the incoming
724      * data doesn't specify retains its current (reset) value.
725      * The indexes list remains untouched -- we only inspect the
726      * incoming migration index list so we can match the values array
727      * entries with the right slots in our own values array.
728      */
729 
730     for (i = 0, v = 0; i < cpu->cpreg_array_len
731              && v < cpu->cpreg_vmstate_array_len; i++) {
732         if (cpu->cpreg_vmstate_indexes[v] > cpu->cpreg_indexes[i]) {
733             /* register in our list but not incoming : skip it */
734             continue;
735         }
736         if (cpu->cpreg_vmstate_indexes[v] < cpu->cpreg_indexes[i]) {
737             /* register in their list but not ours: fail migration */
738             return -1;
739         }
740         /* matching register, copy the value over */
741         cpu->cpreg_values[i] = cpu->cpreg_vmstate_values[v];
742         v++;
743     }
744 
745     if (kvm_enabled()) {
746         if (!write_list_to_kvmstate(cpu, KVM_PUT_FULL_STATE)) {
747             return -1;
748         }
749         /* Note that it's OK for the TCG side not to know about
750          * every register in the list; KVM is authoritative if
751          * we're using it.
752          */
753         write_list_to_cpustate(cpu);
754         kvm_arm_cpu_post_load(cpu);
755     } else {
756         if (!write_list_to_cpustate(cpu)) {
757             return -1;
758         }
759     }
760 
761     hw_breakpoint_update_all(cpu);
762     hw_watchpoint_update_all(cpu);
763 
764     if (!kvm_enabled()) {
765         pmu_op_finish(&cpu->env);
766     }
767     arm_rebuild_hflags(&cpu->env);
768 
769     return 0;
770 }
771 
772 const VMStateDescription vmstate_arm_cpu = {
773     .name = "cpu",
774     .version_id = 22,
775     .minimum_version_id = 22,
776     .pre_save = cpu_pre_save,
777     .post_save = cpu_post_save,
778     .pre_load = cpu_pre_load,
779     .post_load = cpu_post_load,
780     .fields = (VMStateField[]) {
781         VMSTATE_UINT32_ARRAY(env.regs, ARMCPU, 16),
782         VMSTATE_UINT64_ARRAY(env.xregs, ARMCPU, 32),
783         VMSTATE_UINT64(env.pc, ARMCPU),
784         {
785             .name = "cpsr",
786             .version_id = 0,
787             .size = sizeof(uint32_t),
788             .info = &vmstate_cpsr,
789             .flags = VMS_SINGLE,
790             .offset = 0,
791         },
792         VMSTATE_UINT32(env.spsr, ARMCPU),
793         VMSTATE_UINT64_ARRAY(env.banked_spsr, ARMCPU, 8),
794         VMSTATE_UINT32_ARRAY(env.banked_r13, ARMCPU, 8),
795         VMSTATE_UINT32_ARRAY(env.banked_r14, ARMCPU, 8),
796         VMSTATE_UINT32_ARRAY(env.usr_regs, ARMCPU, 5),
797         VMSTATE_UINT32_ARRAY(env.fiq_regs, ARMCPU, 5),
798         VMSTATE_UINT64_ARRAY(env.elr_el, ARMCPU, 4),
799         VMSTATE_UINT64_ARRAY(env.sp_el, ARMCPU, 4),
800         /* The length-check must come before the arrays to avoid
801          * incoming data possibly overflowing the array.
802          */
803         VMSTATE_INT32_POSITIVE_LE(cpreg_vmstate_array_len, ARMCPU),
804         VMSTATE_VARRAY_INT32(cpreg_vmstate_indexes, ARMCPU,
805                              cpreg_vmstate_array_len,
806                              0, vmstate_info_uint64, uint64_t),
807         VMSTATE_VARRAY_INT32(cpreg_vmstate_values, ARMCPU,
808                              cpreg_vmstate_array_len,
809                              0, vmstate_info_uint64, uint64_t),
810         VMSTATE_UINT64(env.exclusive_addr, ARMCPU),
811         VMSTATE_UINT64(env.exclusive_val, ARMCPU),
812         VMSTATE_UINT64(env.exclusive_high, ARMCPU),
813         VMSTATE_UINT64(env.features, ARMCPU),
814         VMSTATE_UINT32(env.exception.syndrome, ARMCPU),
815         VMSTATE_UINT32(env.exception.fsr, ARMCPU),
816         VMSTATE_UINT64(env.exception.vaddress, ARMCPU),
817         VMSTATE_TIMER_PTR(gt_timer[GTIMER_PHYS], ARMCPU),
818         VMSTATE_TIMER_PTR(gt_timer[GTIMER_VIRT], ARMCPU),
819         {
820             .name = "power_state",
821             .version_id = 0,
822             .size = sizeof(bool),
823             .info = &vmstate_powered_off,
824             .flags = VMS_SINGLE,
825             .offset = 0,
826         },
827         VMSTATE_END_OF_LIST()
828     },
829     .subsections = (const VMStateDescription*[]) {
830         &vmstate_vfp,
831         &vmstate_iwmmxt,
832         &vmstate_m,
833         &vmstate_thumb2ee,
834         /* pmsav7_rnr must come before pmsav7 so that we have the
835          * region number before we test it in the VMSTATE_VALIDATE
836          * in vmstate_pmsav7.
837          */
838         &vmstate_pmsav7_rnr,
839         &vmstate_pmsav7,
840         &vmstate_pmsav8,
841         &vmstate_m_security,
842 #ifdef TARGET_AARCH64
843         &vmstate_sve,
844 #endif
845         &vmstate_serror,
846         &vmstate_irq_line_state,
847         NULL
848     }
849 };
850