xref: /openbmc/qemu/target/arm/machine.c (revision 23901b2b721c0576007ab7580da8aa855d6042a9)
1  #include "qemu/osdep.h"
2  #include "cpu.h"
3  #include "qemu/error-report.h"
4  #include "sysemu/kvm.h"
5  #include "sysemu/tcg.h"
6  #include "kvm_arm.h"
7  #include "internals.h"
8  #include "cpu-features.h"
9  #include "migration/cpu.h"
10  #include "target/arm/gtimer.h"
11  
vfp_needed(void * opaque)12  static bool vfp_needed(void *opaque)
13  {
14      ARMCPU *cpu = opaque;
15  
16      return (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)
17              ? cpu_isar_feature(aa64_fp_simd, cpu)
18              : cpu_isar_feature(aa32_vfp_simd, cpu));
19  }
20  
vfp_fpcr_fpsr_needed(void * opaque)21  static bool vfp_fpcr_fpsr_needed(void *opaque)
22  {
23      /*
24       * If either the FPCR or the FPSR include set bits that are not
25       * visible in the AArch32 FPSCR view of floating point control/status
26       * then we must send the FPCR and FPSR as two separate fields in the
27       * cpu/vfp/fpcr_fpsr subsection, and we will send a 0 for the old
28       * FPSCR field in cpu/vfp.
29       *
30       * If all the set bits are representable in an AArch32 FPSCR then we
31       * send that value as the cpu/vfp FPSCR field, and don't send the
32       * cpu/vfp/fpcr_fpsr subsection.
33       *
34       * On incoming migration, if the cpu/vfp FPSCR field is non-zero we
35       * use it, and if the fpcr_fpsr subsection is present we use that.
36       * (The subsection will never be present with a non-zero FPSCR field,
37       * and if FPSCR is zero and the subsection is not present that means
38       * that FPSCR/FPSR/FPCR are zero.)
39       *
40       * This preserves migration compatibility with older QEMU versions,
41       * in both directions.
42       */
43      ARMCPU *cpu = opaque;
44      CPUARMState *env = &cpu->env;
45  
46      return (vfp_get_fpcr(env) & ~FPSCR_FPCR_MASK) ||
47          (vfp_get_fpsr(env) & ~FPSCR_FPSR_MASK);
48  }
49  
get_fpscr(QEMUFile * f,void * opaque,size_t size,const VMStateField * field)50  static int get_fpscr(QEMUFile *f, void *opaque, size_t size,
51                       const VMStateField *field)
52  {
53      ARMCPU *cpu = opaque;
54      CPUARMState *env = &cpu->env;
55      uint32_t val = qemu_get_be32(f);
56  
57      if (val) {
58          /* 0 means we might have the data in the fpcr_fpsr subsection */
59          vfp_set_fpscr(env, val);
60      }
61      return 0;
62  }
63  
put_fpscr(QEMUFile * f,void * opaque,size_t size,const VMStateField * field,JSONWriter * vmdesc)64  static int put_fpscr(QEMUFile *f, void *opaque, size_t size,
65                       const VMStateField *field, JSONWriter *vmdesc)
66  {
67      ARMCPU *cpu = opaque;
68      CPUARMState *env = &cpu->env;
69      uint32_t fpscr = vfp_fpcr_fpsr_needed(opaque) ? 0 : vfp_get_fpscr(env);
70  
71      qemu_put_be32(f, fpscr);
72      return 0;
73  }
74  
75  static const VMStateInfo vmstate_fpscr = {
76      .name = "fpscr",
77      .get = get_fpscr,
78      .put = put_fpscr,
79  };
80  
get_fpcr(QEMUFile * f,void * opaque,size_t size,const VMStateField * field)81  static int get_fpcr(QEMUFile *f, void *opaque, size_t size,
82                       const VMStateField *field)
83  {
84      ARMCPU *cpu = opaque;
85      CPUARMState *env = &cpu->env;
86      uint64_t val = qemu_get_be64(f);
87  
88      vfp_set_fpcr(env, val);
89      return 0;
90  }
91  
put_fpcr(QEMUFile * f,void * opaque,size_t size,const VMStateField * field,JSONWriter * vmdesc)92  static int put_fpcr(QEMUFile *f, void *opaque, size_t size,
93                       const VMStateField *field, JSONWriter *vmdesc)
94  {
95      ARMCPU *cpu = opaque;
96      CPUARMState *env = &cpu->env;
97  
98      qemu_put_be64(f, vfp_get_fpcr(env));
99      return 0;
100  }
101  
102  static const VMStateInfo vmstate_fpcr = {
103      .name = "fpcr",
104      .get = get_fpcr,
105      .put = put_fpcr,
106  };
107  
get_fpsr(QEMUFile * f,void * opaque,size_t size,const VMStateField * field)108  static int get_fpsr(QEMUFile *f, void *opaque, size_t size,
109                       const VMStateField *field)
110  {
111      ARMCPU *cpu = opaque;
112      CPUARMState *env = &cpu->env;
113      uint64_t val = qemu_get_be64(f);
114  
115      vfp_set_fpsr(env, val);
116      return 0;
117  }
118  
put_fpsr(QEMUFile * f,void * opaque,size_t size,const VMStateField * field,JSONWriter * vmdesc)119  static int put_fpsr(QEMUFile *f, void *opaque, size_t size,
120                       const VMStateField *field, JSONWriter *vmdesc)
121  {
122      ARMCPU *cpu = opaque;
123      CPUARMState *env = &cpu->env;
124  
125      qemu_put_be64(f, vfp_get_fpsr(env));
126      return 0;
127  }
128  
129  static const VMStateInfo vmstate_fpsr = {
130      .name = "fpsr",
131      .get = get_fpsr,
132      .put = put_fpsr,
133  };
134  
135  static const VMStateDescription vmstate_vfp_fpcr_fpsr = {
136      .name = "cpu/vfp/fpcr_fpsr",
137      .version_id = 1,
138      .minimum_version_id = 1,
139      .needed = vfp_fpcr_fpsr_needed,
140      .fields = (const VMStateField[]) {
141          {
142              .name = "fpcr",
143              .version_id = 0,
144              .size = sizeof(uint64_t),
145              .info = &vmstate_fpcr,
146              .flags = VMS_SINGLE,
147              .offset = 0,
148          },
149          {
150              .name = "fpsr",
151              .version_id = 0,
152              .size = sizeof(uint64_t),
153              .info = &vmstate_fpsr,
154              .flags = VMS_SINGLE,
155              .offset = 0,
156          },
157          VMSTATE_END_OF_LIST()
158      },
159  };
160  
161  static const VMStateDescription vmstate_vfp = {
162      .name = "cpu/vfp",
163      .version_id = 3,
164      .minimum_version_id = 3,
165      .needed = vfp_needed,
166      .fields = (const VMStateField[]) {
167          /* For compatibility, store Qn out of Zn here.  */
168          VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[0].d, ARMCPU, 0, 2),
169          VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[1].d, ARMCPU, 0, 2),
170          VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[2].d, ARMCPU, 0, 2),
171          VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[3].d, ARMCPU, 0, 2),
172          VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[4].d, ARMCPU, 0, 2),
173          VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[5].d, ARMCPU, 0, 2),
174          VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[6].d, ARMCPU, 0, 2),
175          VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[7].d, ARMCPU, 0, 2),
176          VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[8].d, ARMCPU, 0, 2),
177          VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[9].d, ARMCPU, 0, 2),
178          VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[10].d, ARMCPU, 0, 2),
179          VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[11].d, ARMCPU, 0, 2),
180          VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[12].d, ARMCPU, 0, 2),
181          VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[13].d, ARMCPU, 0, 2),
182          VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[14].d, ARMCPU, 0, 2),
183          VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[15].d, ARMCPU, 0, 2),
184          VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[16].d, ARMCPU, 0, 2),
185          VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[17].d, ARMCPU, 0, 2),
186          VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[18].d, ARMCPU, 0, 2),
187          VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[19].d, ARMCPU, 0, 2),
188          VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[20].d, ARMCPU, 0, 2),
189          VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[21].d, ARMCPU, 0, 2),
190          VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[22].d, ARMCPU, 0, 2),
191          VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[23].d, ARMCPU, 0, 2),
192          VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[24].d, ARMCPU, 0, 2),
193          VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[25].d, ARMCPU, 0, 2),
194          VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[26].d, ARMCPU, 0, 2),
195          VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[27].d, ARMCPU, 0, 2),
196          VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[28].d, ARMCPU, 0, 2),
197          VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[29].d, ARMCPU, 0, 2),
198          VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[30].d, ARMCPU, 0, 2),
199          VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[31].d, ARMCPU, 0, 2),
200  
201          /* The xregs array is a little awkward because element 1 (FPSCR)
202           * requires a specific accessor, so we have to split it up in
203           * the vmstate:
204           */
205          VMSTATE_UINT32(env.vfp.xregs[0], ARMCPU),
206          VMSTATE_UINT32_SUB_ARRAY(env.vfp.xregs, ARMCPU, 2, 14),
207          {
208              .name = "fpscr",
209              .version_id = 0,
210              .size = sizeof(uint32_t),
211              .info = &vmstate_fpscr,
212              .flags = VMS_SINGLE,
213              .offset = 0,
214          },
215          VMSTATE_END_OF_LIST()
216      },
217      .subsections = (const VMStateDescription * const []) {
218          &vmstate_vfp_fpcr_fpsr,
219          NULL
220      }
221  };
222  
iwmmxt_needed(void * opaque)223  static bool iwmmxt_needed(void *opaque)
224  {
225      ARMCPU *cpu = opaque;
226      CPUARMState *env = &cpu->env;
227  
228      return arm_feature(env, ARM_FEATURE_IWMMXT);
229  }
230  
231  static const VMStateDescription vmstate_iwmmxt = {
232      .name = "cpu/iwmmxt",
233      .version_id = 1,
234      .minimum_version_id = 1,
235      .needed = iwmmxt_needed,
236      .fields = (const VMStateField[]) {
237          VMSTATE_UINT64_ARRAY(env.iwmmxt.regs, ARMCPU, 16),
238          VMSTATE_UINT32_ARRAY(env.iwmmxt.cregs, ARMCPU, 16),
239          VMSTATE_END_OF_LIST()
240      }
241  };
242  
243  #ifdef TARGET_AARCH64
244  /* The expression ARM_MAX_VQ - 2 is 0 for pure AArch32 build,
245   * and ARMPredicateReg is actively empty.  This triggers errors
246   * in the expansion of the VMSTATE macros.
247   */
248  
sve_needed(void * opaque)249  static bool sve_needed(void *opaque)
250  {
251      ARMCPU *cpu = opaque;
252  
253      return cpu_isar_feature(aa64_sve, cpu);
254  }
255  
256  /* The first two words of each Zreg is stored in VFP state.  */
257  static const VMStateDescription vmstate_zreg_hi_reg = {
258      .name = "cpu/sve/zreg_hi",
259      .version_id = 1,
260      .minimum_version_id = 1,
261      .fields = (const VMStateField[]) {
262          VMSTATE_UINT64_SUB_ARRAY(d, ARMVectorReg, 2, ARM_MAX_VQ - 2),
263          VMSTATE_END_OF_LIST()
264      }
265  };
266  
267  static const VMStateDescription vmstate_preg_reg = {
268      .name = "cpu/sve/preg",
269      .version_id = 1,
270      .minimum_version_id = 1,
271      .fields = (const VMStateField[]) {
272          VMSTATE_UINT64_ARRAY(p, ARMPredicateReg, 2 * ARM_MAX_VQ / 8),
273          VMSTATE_END_OF_LIST()
274      }
275  };
276  
277  static const VMStateDescription vmstate_sve = {
278      .name = "cpu/sve",
279      .version_id = 1,
280      .minimum_version_id = 1,
281      .needed = sve_needed,
282      .fields = (const VMStateField[]) {
283          VMSTATE_STRUCT_ARRAY(env.vfp.zregs, ARMCPU, 32, 0,
284                               vmstate_zreg_hi_reg, ARMVectorReg),
285          VMSTATE_STRUCT_ARRAY(env.vfp.pregs, ARMCPU, 17, 0,
286                               vmstate_preg_reg, ARMPredicateReg),
287          VMSTATE_END_OF_LIST()
288      }
289  };
290  
291  static const VMStateDescription vmstate_vreg = {
292      .name = "vreg",
293      .version_id = 1,
294      .minimum_version_id = 1,
295      .fields = (const VMStateField[]) {
296          VMSTATE_UINT64_ARRAY(d, ARMVectorReg, ARM_MAX_VQ * 2),
297          VMSTATE_END_OF_LIST()
298      }
299  };
300  
za_needed(void * opaque)301  static bool za_needed(void *opaque)
302  {
303      ARMCPU *cpu = opaque;
304  
305      /*
306       * When ZA storage is disabled, its contents are discarded.
307       * It will be zeroed when ZA storage is re-enabled.
308       */
309      return FIELD_EX64(cpu->env.svcr, SVCR, ZA);
310  }
311  
312  static const VMStateDescription vmstate_za = {
313      .name = "cpu/sme",
314      .version_id = 1,
315      .minimum_version_id = 1,
316      .needed = za_needed,
317      .fields = (const VMStateField[]) {
318          VMSTATE_STRUCT_ARRAY(env.zarray, ARMCPU, ARM_MAX_VQ * 16, 0,
319                               vmstate_vreg, ARMVectorReg),
320          VMSTATE_END_OF_LIST()
321      }
322  };
323  #endif /* AARCH64 */
324  
serror_needed(void * opaque)325  static bool serror_needed(void *opaque)
326  {
327      ARMCPU *cpu = opaque;
328      CPUARMState *env = &cpu->env;
329  
330      return env->serror.pending != 0;
331  }
332  
333  static const VMStateDescription vmstate_serror = {
334      .name = "cpu/serror",
335      .version_id = 1,
336      .minimum_version_id = 1,
337      .needed = serror_needed,
338      .fields = (const VMStateField[]) {
339          VMSTATE_UINT8(env.serror.pending, ARMCPU),
340          VMSTATE_UINT8(env.serror.has_esr, ARMCPU),
341          VMSTATE_UINT64(env.serror.esr, ARMCPU),
342          VMSTATE_END_OF_LIST()
343      }
344  };
345  
irq_line_state_needed(void * opaque)346  static bool irq_line_state_needed(void *opaque)
347  {
348      return true;
349  }
350  
351  static const VMStateDescription vmstate_irq_line_state = {
352      .name = "cpu/irq-line-state",
353      .version_id = 1,
354      .minimum_version_id = 1,
355      .needed = irq_line_state_needed,
356      .fields = (const VMStateField[]) {
357          VMSTATE_UINT32(env.irq_line_state, ARMCPU),
358          VMSTATE_END_OF_LIST()
359      }
360  };
361  
wfxt_timer_needed(void * opaque)362  static bool wfxt_timer_needed(void *opaque)
363  {
364      ARMCPU *cpu = opaque;
365  
366      /* We'll only have the timer object if FEAT_WFxT is implemented */
367      return cpu->wfxt_timer;
368  }
369  
370  static const VMStateDescription vmstate_wfxt_timer = {
371      .name = "cpu/wfxt-timer",
372      .version_id = 1,
373      .minimum_version_id = 1,
374      .needed = wfxt_timer_needed,
375      .fields = (const VMStateField[]) {
376          VMSTATE_TIMER_PTR(wfxt_timer, ARMCPU),
377          VMSTATE_END_OF_LIST()
378      }
379  };
380  
m_needed(void * opaque)381  static bool m_needed(void *opaque)
382  {
383      ARMCPU *cpu = opaque;
384      CPUARMState *env = &cpu->env;
385  
386      return arm_feature(env, ARM_FEATURE_M);
387  }
388  
389  static const VMStateDescription vmstate_m_faultmask_primask = {
390      .name = "cpu/m/faultmask-primask",
391      .version_id = 1,
392      .minimum_version_id = 1,
393      .needed = m_needed,
394      .fields = (const VMStateField[]) {
395          VMSTATE_UINT32(env.v7m.faultmask[M_REG_NS], ARMCPU),
396          VMSTATE_UINT32(env.v7m.primask[M_REG_NS], ARMCPU),
397          VMSTATE_END_OF_LIST()
398      }
399  };
400  
401  /* CSSELR is in a subsection because we didn't implement it previously.
402   * Migration from an old implementation will leave it at zero, which
403   * is OK since the only CPUs in the old implementation make the
404   * register RAZ/WI.
405   * Since there was no version of QEMU which implemented the CSSELR for
406   * just non-secure, we transfer both banks here rather than putting
407   * the secure banked version in the m-security subsection.
408   */
csselr_vmstate_validate(void * opaque,int version_id)409  static bool csselr_vmstate_validate(void *opaque, int version_id)
410  {
411      ARMCPU *cpu = opaque;
412  
413      return cpu->env.v7m.csselr[M_REG_NS] <= R_V7M_CSSELR_INDEX_MASK
414          && cpu->env.v7m.csselr[M_REG_S] <= R_V7M_CSSELR_INDEX_MASK;
415  }
416  
m_csselr_needed(void * opaque)417  static bool m_csselr_needed(void *opaque)
418  {
419      ARMCPU *cpu = opaque;
420  
421      return !arm_v7m_csselr_razwi(cpu);
422  }
423  
424  static const VMStateDescription vmstate_m_csselr = {
425      .name = "cpu/m/csselr",
426      .version_id = 1,
427      .minimum_version_id = 1,
428      .needed = m_csselr_needed,
429      .fields = (const VMStateField[]) {
430          VMSTATE_UINT32_ARRAY(env.v7m.csselr, ARMCPU, M_REG_NUM_BANKS),
431          VMSTATE_VALIDATE("CSSELR is valid", csselr_vmstate_validate),
432          VMSTATE_END_OF_LIST()
433      }
434  };
435  
436  static const VMStateDescription vmstate_m_scr = {
437      .name = "cpu/m/scr",
438      .version_id = 1,
439      .minimum_version_id = 1,
440      .needed = m_needed,
441      .fields = (const VMStateField[]) {
442          VMSTATE_UINT32(env.v7m.scr[M_REG_NS], ARMCPU),
443          VMSTATE_END_OF_LIST()
444      }
445  };
446  
447  static const VMStateDescription vmstate_m_other_sp = {
448      .name = "cpu/m/other-sp",
449      .version_id = 1,
450      .minimum_version_id = 1,
451      .needed = m_needed,
452      .fields = (const VMStateField[]) {
453          VMSTATE_UINT32(env.v7m.other_sp, ARMCPU),
454          VMSTATE_END_OF_LIST()
455      }
456  };
457  
m_v8m_needed(void * opaque)458  static bool m_v8m_needed(void *opaque)
459  {
460      ARMCPU *cpu = opaque;
461      CPUARMState *env = &cpu->env;
462  
463      return arm_feature(env, ARM_FEATURE_M) && arm_feature(env, ARM_FEATURE_V8);
464  }
465  
466  static const VMStateDescription vmstate_m_v8m = {
467      .name = "cpu/m/v8m",
468      .version_id = 1,
469      .minimum_version_id = 1,
470      .needed = m_v8m_needed,
471      .fields = (const VMStateField[]) {
472          VMSTATE_UINT32_ARRAY(env.v7m.msplim, ARMCPU, M_REG_NUM_BANKS),
473          VMSTATE_UINT32_ARRAY(env.v7m.psplim, ARMCPU, M_REG_NUM_BANKS),
474          VMSTATE_END_OF_LIST()
475      }
476  };
477  
478  static const VMStateDescription vmstate_m_fp = {
479      .name = "cpu/m/fp",
480      .version_id = 1,
481      .minimum_version_id = 1,
482      .needed = vfp_needed,
483      .fields = (const VMStateField[]) {
484          VMSTATE_UINT32_ARRAY(env.v7m.fpcar, ARMCPU, M_REG_NUM_BANKS),
485          VMSTATE_UINT32_ARRAY(env.v7m.fpccr, ARMCPU, M_REG_NUM_BANKS),
486          VMSTATE_UINT32_ARRAY(env.v7m.fpdscr, ARMCPU, M_REG_NUM_BANKS),
487          VMSTATE_UINT32_ARRAY(env.v7m.cpacr, ARMCPU, M_REG_NUM_BANKS),
488          VMSTATE_UINT32(env.v7m.nsacr, ARMCPU),
489          VMSTATE_END_OF_LIST()
490      }
491  };
492  
mve_needed(void * opaque)493  static bool mve_needed(void *opaque)
494  {
495      ARMCPU *cpu = opaque;
496  
497      return cpu_isar_feature(aa32_mve, cpu);
498  }
499  
500  static const VMStateDescription vmstate_m_mve = {
501      .name = "cpu/m/mve",
502      .version_id = 1,
503      .minimum_version_id = 1,
504      .needed = mve_needed,
505      .fields = (const VMStateField[]) {
506          VMSTATE_UINT32(env.v7m.vpr, ARMCPU),
507          VMSTATE_UINT32(env.v7m.ltpsize, ARMCPU),
508          VMSTATE_END_OF_LIST()
509      },
510  };
511  
512  static const VMStateDescription vmstate_m = {
513      .name = "cpu/m",
514      .version_id = 4,
515      .minimum_version_id = 4,
516      .needed = m_needed,
517      .fields = (const VMStateField[]) {
518          VMSTATE_UINT32(env.v7m.vecbase[M_REG_NS], ARMCPU),
519          VMSTATE_UINT32(env.v7m.basepri[M_REG_NS], ARMCPU),
520          VMSTATE_UINT32(env.v7m.control[M_REG_NS], ARMCPU),
521          VMSTATE_UINT32(env.v7m.ccr[M_REG_NS], ARMCPU),
522          VMSTATE_UINT32(env.v7m.cfsr[M_REG_NS], ARMCPU),
523          VMSTATE_UINT32(env.v7m.hfsr, ARMCPU),
524          VMSTATE_UINT32(env.v7m.dfsr, ARMCPU),
525          VMSTATE_UINT32(env.v7m.mmfar[M_REG_NS], ARMCPU),
526          VMSTATE_UINT32(env.v7m.bfar, ARMCPU),
527          VMSTATE_UINT32(env.v7m.mpu_ctrl[M_REG_NS], ARMCPU),
528          VMSTATE_INT32(env.v7m.exception, ARMCPU),
529          VMSTATE_END_OF_LIST()
530      },
531      .subsections = (const VMStateDescription * const []) {
532          &vmstate_m_faultmask_primask,
533          &vmstate_m_csselr,
534          &vmstate_m_scr,
535          &vmstate_m_other_sp,
536          &vmstate_m_v8m,
537          &vmstate_m_fp,
538          &vmstate_m_mve,
539          NULL
540      }
541  };
542  
thumb2ee_needed(void * opaque)543  static bool thumb2ee_needed(void *opaque)
544  {
545      ARMCPU *cpu = opaque;
546      CPUARMState *env = &cpu->env;
547  
548      return arm_feature(env, ARM_FEATURE_THUMB2EE);
549  }
550  
551  static const VMStateDescription vmstate_thumb2ee = {
552      .name = "cpu/thumb2ee",
553      .version_id = 1,
554      .minimum_version_id = 1,
555      .needed = thumb2ee_needed,
556      .fields = (const VMStateField[]) {
557          VMSTATE_UINT32(env.teecr, ARMCPU),
558          VMSTATE_UINT32(env.teehbr, ARMCPU),
559          VMSTATE_END_OF_LIST()
560      }
561  };
562  
pmsav7_needed(void * opaque)563  static bool pmsav7_needed(void *opaque)
564  {
565      ARMCPU *cpu = opaque;
566      CPUARMState *env = &cpu->env;
567  
568      return arm_feature(env, ARM_FEATURE_PMSA) &&
569             arm_feature(env, ARM_FEATURE_V7) &&
570             !arm_feature(env, ARM_FEATURE_V8);
571  }
572  
pmsav7_rgnr_vmstate_validate(void * opaque,int version_id)573  static bool pmsav7_rgnr_vmstate_validate(void *opaque, int version_id)
574  {
575      ARMCPU *cpu = opaque;
576  
577      return cpu->env.pmsav7.rnr[M_REG_NS] < cpu->pmsav7_dregion;
578  }
579  
580  static const VMStateDescription vmstate_pmsav7 = {
581      .name = "cpu/pmsav7",
582      .version_id = 1,
583      .minimum_version_id = 1,
584      .needed = pmsav7_needed,
585      .fields = (const VMStateField[]) {
586          VMSTATE_VARRAY_UINT32(env.pmsav7.drbar, ARMCPU, pmsav7_dregion, 0,
587                                vmstate_info_uint32, uint32_t),
588          VMSTATE_VARRAY_UINT32(env.pmsav7.drsr, ARMCPU, pmsav7_dregion, 0,
589                                vmstate_info_uint32, uint32_t),
590          VMSTATE_VARRAY_UINT32(env.pmsav7.dracr, ARMCPU, pmsav7_dregion, 0,
591                                vmstate_info_uint32, uint32_t),
592          VMSTATE_VALIDATE("rgnr is valid", pmsav7_rgnr_vmstate_validate),
593          VMSTATE_END_OF_LIST()
594      }
595  };
596  
pmsav7_rnr_needed(void * opaque)597  static bool pmsav7_rnr_needed(void *opaque)
598  {
599      ARMCPU *cpu = opaque;
600      CPUARMState *env = &cpu->env;
601  
602      /* For R profile cores pmsav7.rnr is migrated via the cpreg
603       * "RGNR" definition in helper.h. For M profile we have to
604       * migrate it separately.
605       */
606      return arm_feature(env, ARM_FEATURE_M);
607  }
608  
609  static const VMStateDescription vmstate_pmsav7_rnr = {
610      .name = "cpu/pmsav7-rnr",
611      .version_id = 1,
612      .minimum_version_id = 1,
613      .needed = pmsav7_rnr_needed,
614      .fields = (const VMStateField[]) {
615          VMSTATE_UINT32(env.pmsav7.rnr[M_REG_NS], ARMCPU),
616          VMSTATE_END_OF_LIST()
617      }
618  };
619  
pmsav8_needed(void * opaque)620  static bool pmsav8_needed(void *opaque)
621  {
622      ARMCPU *cpu = opaque;
623      CPUARMState *env = &cpu->env;
624  
625      return arm_feature(env, ARM_FEATURE_PMSA) &&
626          arm_feature(env, ARM_FEATURE_V8);
627  }
628  
pmsav8r_needed(void * opaque)629  static bool pmsav8r_needed(void *opaque)
630  {
631      ARMCPU *cpu = opaque;
632      CPUARMState *env = &cpu->env;
633  
634      return arm_feature(env, ARM_FEATURE_PMSA) &&
635          arm_feature(env, ARM_FEATURE_V8) &&
636          !arm_feature(env, ARM_FEATURE_M);
637  }
638  
639  static const VMStateDescription vmstate_pmsav8r = {
640      .name = "cpu/pmsav8/pmsav8r",
641      .version_id = 1,
642      .minimum_version_id = 1,
643      .needed = pmsav8r_needed,
644      .fields = (const VMStateField[]) {
645          VMSTATE_VARRAY_UINT32(env.pmsav8.hprbar, ARMCPU,
646                          pmsav8r_hdregion, 0, vmstate_info_uint32, uint32_t),
647          VMSTATE_VARRAY_UINT32(env.pmsav8.hprlar, ARMCPU,
648                          pmsav8r_hdregion, 0, vmstate_info_uint32, uint32_t),
649          VMSTATE_END_OF_LIST()
650      },
651  };
652  
653  static const VMStateDescription vmstate_pmsav8 = {
654      .name = "cpu/pmsav8",
655      .version_id = 1,
656      .minimum_version_id = 1,
657      .needed = pmsav8_needed,
658      .fields = (const VMStateField[]) {
659          VMSTATE_VARRAY_UINT32(env.pmsav8.rbar[M_REG_NS], ARMCPU, pmsav7_dregion,
660                                0, vmstate_info_uint32, uint32_t),
661          VMSTATE_VARRAY_UINT32(env.pmsav8.rlar[M_REG_NS], ARMCPU, pmsav7_dregion,
662                                0, vmstate_info_uint32, uint32_t),
663          VMSTATE_UINT32(env.pmsav8.mair0[M_REG_NS], ARMCPU),
664          VMSTATE_UINT32(env.pmsav8.mair1[M_REG_NS], ARMCPU),
665          VMSTATE_END_OF_LIST()
666      },
667      .subsections = (const VMStateDescription * const []) {
668          &vmstate_pmsav8r,
669          NULL
670      }
671  };
672  
s_rnr_vmstate_validate(void * opaque,int version_id)673  static bool s_rnr_vmstate_validate(void *opaque, int version_id)
674  {
675      ARMCPU *cpu = opaque;
676  
677      return cpu->env.pmsav7.rnr[M_REG_S] < cpu->pmsav7_dregion;
678  }
679  
sau_rnr_vmstate_validate(void * opaque,int version_id)680  static bool sau_rnr_vmstate_validate(void *opaque, int version_id)
681  {
682      ARMCPU *cpu = opaque;
683  
684      return cpu->env.sau.rnr < cpu->sau_sregion;
685  }
686  
m_security_needed(void * opaque)687  static bool m_security_needed(void *opaque)
688  {
689      ARMCPU *cpu = opaque;
690      CPUARMState *env = &cpu->env;
691  
692      return arm_feature(env, ARM_FEATURE_M_SECURITY);
693  }
694  
695  static const VMStateDescription vmstate_m_security = {
696      .name = "cpu/m-security",
697      .version_id = 1,
698      .minimum_version_id = 1,
699      .needed = m_security_needed,
700      .fields = (const VMStateField[]) {
701          VMSTATE_UINT32(env.v7m.secure, ARMCPU),
702          VMSTATE_UINT32(env.v7m.other_ss_msp, ARMCPU),
703          VMSTATE_UINT32(env.v7m.other_ss_psp, ARMCPU),
704          VMSTATE_UINT32(env.v7m.basepri[M_REG_S], ARMCPU),
705          VMSTATE_UINT32(env.v7m.primask[M_REG_S], ARMCPU),
706          VMSTATE_UINT32(env.v7m.faultmask[M_REG_S], ARMCPU),
707          VMSTATE_UINT32(env.v7m.control[M_REG_S], ARMCPU),
708          VMSTATE_UINT32(env.v7m.vecbase[M_REG_S], ARMCPU),
709          VMSTATE_UINT32(env.pmsav8.mair0[M_REG_S], ARMCPU),
710          VMSTATE_UINT32(env.pmsav8.mair1[M_REG_S], ARMCPU),
711          VMSTATE_VARRAY_UINT32(env.pmsav8.rbar[M_REG_S], ARMCPU, pmsav7_dregion,
712                                0, vmstate_info_uint32, uint32_t),
713          VMSTATE_VARRAY_UINT32(env.pmsav8.rlar[M_REG_S], ARMCPU, pmsav7_dregion,
714                                0, vmstate_info_uint32, uint32_t),
715          VMSTATE_UINT32(env.pmsav7.rnr[M_REG_S], ARMCPU),
716          VMSTATE_VALIDATE("secure MPU_RNR is valid", s_rnr_vmstate_validate),
717          VMSTATE_UINT32(env.v7m.mpu_ctrl[M_REG_S], ARMCPU),
718          VMSTATE_UINT32(env.v7m.ccr[M_REG_S], ARMCPU),
719          VMSTATE_UINT32(env.v7m.mmfar[M_REG_S], ARMCPU),
720          VMSTATE_UINT32(env.v7m.cfsr[M_REG_S], ARMCPU),
721          VMSTATE_UINT32(env.v7m.sfsr, ARMCPU),
722          VMSTATE_UINT32(env.v7m.sfar, ARMCPU),
723          VMSTATE_VARRAY_UINT32(env.sau.rbar, ARMCPU, sau_sregion, 0,
724                                vmstate_info_uint32, uint32_t),
725          VMSTATE_VARRAY_UINT32(env.sau.rlar, ARMCPU, sau_sregion, 0,
726                                vmstate_info_uint32, uint32_t),
727          VMSTATE_UINT32(env.sau.rnr, ARMCPU),
728          VMSTATE_VALIDATE("SAU_RNR is valid", sau_rnr_vmstate_validate),
729          VMSTATE_UINT32(env.sau.ctrl, ARMCPU),
730          VMSTATE_UINT32(env.v7m.scr[M_REG_S], ARMCPU),
731          /* AIRCR is not secure-only, but our implementation is R/O if the
732           * security extension is unimplemented, so we migrate it here.
733           */
734          VMSTATE_UINT32(env.v7m.aircr, ARMCPU),
735          VMSTATE_END_OF_LIST()
736      }
737  };
738  
get_cpsr(QEMUFile * f,void * opaque,size_t size,const VMStateField * field)739  static int get_cpsr(QEMUFile *f, void *opaque, size_t size,
740                      const VMStateField *field)
741  {
742      ARMCPU *cpu = opaque;
743      CPUARMState *env = &cpu->env;
744      uint32_t val = qemu_get_be32(f);
745  
746      if (arm_feature(env, ARM_FEATURE_M)) {
747          if (val & XPSR_EXCP) {
748              /* This is a CPSR format value from an older QEMU. (We can tell
749               * because values transferred in XPSR format always have zero
750               * for the EXCP field, and CPSR format will always have bit 4
751               * set in CPSR_M.) Rearrange it into XPSR format. The significant
752               * differences are that the T bit is not in the same place, the
753               * primask/faultmask info may be in the CPSR I and F bits, and
754               * we do not want the mode bits.
755               * We know that this cleanup happened before v8M, so there
756               * is no complication with banked primask/faultmask.
757               */
758              uint32_t newval = val;
759  
760              assert(!arm_feature(env, ARM_FEATURE_M_SECURITY));
761  
762              newval &= (CPSR_NZCV | CPSR_Q | CPSR_IT | CPSR_GE);
763              if (val & CPSR_T) {
764                  newval |= XPSR_T;
765              }
766              /* If the I or F bits are set then this is a migration from
767               * an old QEMU which still stored the M profile FAULTMASK
768               * and PRIMASK in env->daif. For a new QEMU, the data is
769               * transferred using the vmstate_m_faultmask_primask subsection.
770               */
771              if (val & CPSR_F) {
772                  env->v7m.faultmask[M_REG_NS] = 1;
773              }
774              if (val & CPSR_I) {
775                  env->v7m.primask[M_REG_NS] = 1;
776              }
777              val = newval;
778          }
779          /* Ignore the low bits, they are handled by vmstate_m. */
780          xpsr_write(env, val, ~XPSR_EXCP);
781          return 0;
782      }
783  
784      env->aarch64 = ((val & PSTATE_nRW) == 0);
785  
786      if (is_a64(env)) {
787          pstate_write(env, val);
788          return 0;
789      }
790  
791      cpsr_write(env, val, 0xffffffff, CPSRWriteRaw);
792      return 0;
793  }
794  
put_cpsr(QEMUFile * f,void * opaque,size_t size,const VMStateField * field,JSONWriter * vmdesc)795  static int put_cpsr(QEMUFile *f, void *opaque, size_t size,
796                      const VMStateField *field, JSONWriter *vmdesc)
797  {
798      ARMCPU *cpu = opaque;
799      CPUARMState *env = &cpu->env;
800      uint32_t val;
801  
802      if (arm_feature(env, ARM_FEATURE_M)) {
803          /* The low 9 bits are v7m.exception, which is handled by vmstate_m. */
804          val = xpsr_read(env) & ~XPSR_EXCP;
805      } else if (is_a64(env)) {
806          val = pstate_read(env);
807      } else {
808          val = cpsr_read(env);
809      }
810  
811      qemu_put_be32(f, val);
812      return 0;
813  }
814  
815  static const VMStateInfo vmstate_cpsr = {
816      .name = "cpsr",
817      .get = get_cpsr,
818      .put = put_cpsr,
819  };
820  
get_power(QEMUFile * f,void * opaque,size_t size,const VMStateField * field)821  static int get_power(QEMUFile *f, void *opaque, size_t size,
822                      const VMStateField *field)
823  {
824      ARMCPU *cpu = opaque;
825      bool powered_off = qemu_get_byte(f);
826      cpu->power_state = powered_off ? PSCI_OFF : PSCI_ON;
827      return 0;
828  }
829  
put_power(QEMUFile * f,void * opaque,size_t size,const VMStateField * field,JSONWriter * vmdesc)830  static int put_power(QEMUFile *f, void *opaque, size_t size,
831                      const VMStateField *field, JSONWriter *vmdesc)
832  {
833      ARMCPU *cpu = opaque;
834  
835      /* Migration should never happen while we transition power states */
836  
837      if (cpu->power_state == PSCI_ON ||
838          cpu->power_state == PSCI_OFF) {
839          bool powered_off = (cpu->power_state == PSCI_OFF) ? true : false;
840          qemu_put_byte(f, powered_off);
841          return 0;
842      } else {
843          return 1;
844      }
845  }
846  
847  static const VMStateInfo vmstate_powered_off = {
848      .name = "powered_off",
849      .get = get_power,
850      .put = put_power,
851  };
852  
cpu_pre_save(void * opaque)853  static int cpu_pre_save(void *opaque)
854  {
855      ARMCPU *cpu = opaque;
856  
857      if (!kvm_enabled()) {
858          pmu_op_start(&cpu->env);
859      }
860  
861      if (kvm_enabled()) {
862          if (!write_kvmstate_to_list(cpu)) {
863              /* This should never fail */
864              g_assert_not_reached();
865          }
866  
867          /*
868           * kvm_arm_cpu_pre_save() must be called after
869           * write_kvmstate_to_list()
870           */
871          kvm_arm_cpu_pre_save(cpu);
872      } else {
873          if (!write_cpustate_to_list(cpu, false)) {
874              /* This should never fail. */
875              g_assert_not_reached();
876          }
877      }
878  
879      cpu->cpreg_vmstate_array_len = cpu->cpreg_array_len;
880      memcpy(cpu->cpreg_vmstate_indexes, cpu->cpreg_indexes,
881             cpu->cpreg_array_len * sizeof(uint64_t));
882      memcpy(cpu->cpreg_vmstate_values, cpu->cpreg_values,
883             cpu->cpreg_array_len * sizeof(uint64_t));
884  
885      return 0;
886  }
887  
cpu_post_save(void * opaque)888  static int cpu_post_save(void *opaque)
889  {
890      ARMCPU *cpu = opaque;
891  
892      if (!kvm_enabled()) {
893          pmu_op_finish(&cpu->env);
894      }
895  
896      return 0;
897  }
898  
cpu_pre_load(void * opaque)899  static int cpu_pre_load(void *opaque)
900  {
901      ARMCPU *cpu = opaque;
902      CPUARMState *env = &cpu->env;
903  
904      /*
905       * In an inbound migration where on the source FPSCR/FPSR/FPCR are 0,
906       * there will be no fpcr_fpsr subsection so we won't call vfp_set_fpcr()
907       * and vfp_set_fpsr() from get_fpcr() and get_fpsr(); also the get_fpscr()
908       * function will not call vfp_set_fpscr() because it will see a 0 in the
909       * inbound data. Ensure that in this case we have a correctly set up
910       * zero FPSCR/FPCR/FPSR.
911       *
912       * This is not strictly needed because FPSCR is zero out of reset, but
913       * it avoids the possibility of future confusing migration bugs if some
914       * future architecture change makes the reset value non-zero.
915       */
916      vfp_set_fpscr(env, 0);
917  
918      /*
919       * Pre-initialize irq_line_state to a value that's never valid as
920       * real data, so cpu_post_load() can tell whether we've seen the
921       * irq-line-state subsection in the incoming migration state.
922       */
923      env->irq_line_state = UINT32_MAX;
924  
925      if (!kvm_enabled()) {
926          pmu_op_start(env);
927      }
928  
929      return 0;
930  }
931  
cpu_post_load(void * opaque,int version_id)932  static int cpu_post_load(void *opaque, int version_id)
933  {
934      ARMCPU *cpu = opaque;
935      CPUARMState *env = &cpu->env;
936      int i, v;
937  
938      /*
939       * Handle migration compatibility from old QEMU which didn't
940       * send the irq-line-state subsection. A QEMU without it did not
941       * implement the HCR_EL2.{VI,VF} bits as generating interrupts,
942       * so for TCG the line state matches the bits set in cs->interrupt_request.
943       * For KVM the line state is not stored in cs->interrupt_request
944       * and so this will leave irq_line_state as 0, but this is OK because
945       * we only need to care about it for TCG.
946       */
947      if (env->irq_line_state == UINT32_MAX) {
948          CPUState *cs = CPU(cpu);
949  
950          env->irq_line_state = cs->interrupt_request &
951              (CPU_INTERRUPT_HARD | CPU_INTERRUPT_FIQ |
952               CPU_INTERRUPT_VIRQ | CPU_INTERRUPT_VFIQ);
953      }
954  
955      /* Update the values list from the incoming migration data.
956       * Anything in the incoming data which we don't know about is
957       * a migration failure; anything we know about but the incoming
958       * data doesn't specify retains its current (reset) value.
959       * The indexes list remains untouched -- we only inspect the
960       * incoming migration index list so we can match the values array
961       * entries with the right slots in our own values array.
962       */
963  
964      for (i = 0, v = 0; i < cpu->cpreg_array_len
965               && v < cpu->cpreg_vmstate_array_len; i++) {
966          if (cpu->cpreg_vmstate_indexes[v] > cpu->cpreg_indexes[i]) {
967              /* register in our list but not incoming : skip it */
968              continue;
969          }
970          if (cpu->cpreg_vmstate_indexes[v] < cpu->cpreg_indexes[i]) {
971              /* register in their list but not ours: fail migration */
972              return -1;
973          }
974          /* matching register, copy the value over */
975          cpu->cpreg_values[i] = cpu->cpreg_vmstate_values[v];
976          v++;
977      }
978  
979      if (kvm_enabled()) {
980          if (!write_list_to_kvmstate(cpu, KVM_PUT_FULL_STATE)) {
981              return -1;
982          }
983          /* Note that it's OK for the TCG side not to know about
984           * every register in the list; KVM is authoritative if
985           * we're using it.
986           */
987          write_list_to_cpustate(cpu);
988          kvm_arm_cpu_post_load(cpu);
989      } else {
990          if (!write_list_to_cpustate(cpu)) {
991              return -1;
992          }
993      }
994  
995      /*
996       * Misaligned thumb pc is architecturally impossible. Fail the
997       * incoming migration. For TCG it would trigger the assert in
998       * thumb_tr_translate_insn().
999       */
1000      if (!is_a64(env) && env->thumb && (env->regs[15] & 1)) {
1001          return -1;
1002      }
1003  
1004      if (tcg_enabled()) {
1005          hw_breakpoint_update_all(cpu);
1006          hw_watchpoint_update_all(cpu);
1007      }
1008  
1009      /*
1010       * TCG gen_update_fp_context() relies on the invariant that
1011       * FPDSCR.LTPSIZE is constant 4 for M-profile with the LOB extension;
1012       * forbid bogus incoming data with some other value.
1013       */
1014      if (arm_feature(env, ARM_FEATURE_M) && cpu_isar_feature(aa32_lob, cpu)) {
1015          if (extract32(env->v7m.fpdscr[M_REG_NS],
1016                        FPCR_LTPSIZE_SHIFT, FPCR_LTPSIZE_LENGTH) != 4 ||
1017              extract32(env->v7m.fpdscr[M_REG_S],
1018                        FPCR_LTPSIZE_SHIFT, FPCR_LTPSIZE_LENGTH) != 4) {
1019              return -1;
1020          }
1021      }
1022  
1023      if (!kvm_enabled()) {
1024          pmu_op_finish(env);
1025      }
1026  
1027      if (tcg_enabled()) {
1028          arm_rebuild_hflags(env);
1029      }
1030  
1031      return 0;
1032  }
1033  
1034  const VMStateDescription vmstate_arm_cpu = {
1035      .name = "cpu",
1036      .version_id = 22,
1037      .minimum_version_id = 22,
1038      .pre_save = cpu_pre_save,
1039      .post_save = cpu_post_save,
1040      .pre_load = cpu_pre_load,
1041      .post_load = cpu_post_load,
1042      .fields = (const VMStateField[]) {
1043          VMSTATE_UINT32_ARRAY(env.regs, ARMCPU, 16),
1044          VMSTATE_UINT64_ARRAY(env.xregs, ARMCPU, 32),
1045          VMSTATE_UINT64(env.pc, ARMCPU),
1046          {
1047              .name = "cpsr",
1048              .version_id = 0,
1049              .size = sizeof(uint32_t),
1050              .info = &vmstate_cpsr,
1051              .flags = VMS_SINGLE,
1052              .offset = 0,
1053          },
1054          VMSTATE_UINT32(env.spsr, ARMCPU),
1055          VMSTATE_UINT64_ARRAY(env.banked_spsr, ARMCPU, 8),
1056          VMSTATE_UINT32_ARRAY(env.banked_r13, ARMCPU, 8),
1057          VMSTATE_UINT32_ARRAY(env.banked_r14, ARMCPU, 8),
1058          VMSTATE_UINT32_ARRAY(env.usr_regs, ARMCPU, 5),
1059          VMSTATE_UINT32_ARRAY(env.fiq_regs, ARMCPU, 5),
1060          VMSTATE_UINT64_ARRAY(env.elr_el, ARMCPU, 4),
1061          VMSTATE_UINT64_ARRAY(env.sp_el, ARMCPU, 4),
1062          /* The length-check must come before the arrays to avoid
1063           * incoming data possibly overflowing the array.
1064           */
1065          VMSTATE_INT32_POSITIVE_LE(cpreg_vmstate_array_len, ARMCPU),
1066          VMSTATE_VARRAY_INT32(cpreg_vmstate_indexes, ARMCPU,
1067                               cpreg_vmstate_array_len,
1068                               0, vmstate_info_uint64, uint64_t),
1069          VMSTATE_VARRAY_INT32(cpreg_vmstate_values, ARMCPU,
1070                               cpreg_vmstate_array_len,
1071                               0, vmstate_info_uint64, uint64_t),
1072          VMSTATE_UINT64(env.exclusive_addr, ARMCPU),
1073          VMSTATE_UINT64(env.exclusive_val, ARMCPU),
1074          VMSTATE_UINT64(env.exclusive_high, ARMCPU),
1075          VMSTATE_UNUSED(sizeof(uint64_t)),
1076          VMSTATE_UINT32(env.exception.syndrome, ARMCPU),
1077          VMSTATE_UINT32(env.exception.fsr, ARMCPU),
1078          VMSTATE_UINT64(env.exception.vaddress, ARMCPU),
1079          VMSTATE_TIMER_PTR(gt_timer[GTIMER_PHYS], ARMCPU),
1080          VMSTATE_TIMER_PTR(gt_timer[GTIMER_VIRT], ARMCPU),
1081          {
1082              .name = "power_state",
1083              .version_id = 0,
1084              .size = sizeof(bool),
1085              .info = &vmstate_powered_off,
1086              .flags = VMS_SINGLE,
1087              .offset = 0,
1088          },
1089          VMSTATE_END_OF_LIST()
1090      },
1091      .subsections = (const VMStateDescription * const []) {
1092          &vmstate_vfp,
1093          &vmstate_iwmmxt,
1094          &vmstate_m,
1095          &vmstate_thumb2ee,
1096          /* pmsav7_rnr must come before pmsav7 so that we have the
1097           * region number before we test it in the VMSTATE_VALIDATE
1098           * in vmstate_pmsav7.
1099           */
1100          &vmstate_pmsav7_rnr,
1101          &vmstate_pmsav7,
1102          &vmstate_pmsav8,
1103          &vmstate_m_security,
1104  #ifdef TARGET_AARCH64
1105          &vmstate_sve,
1106          &vmstate_za,
1107  #endif
1108          &vmstate_serror,
1109          &vmstate_irq_line_state,
1110          &vmstate_wfxt_timer,
1111          NULL
1112      }
1113  };
1114