xref: /openbmc/qemu/target/ppc/machine.c (revision 3d9569b8)
1 #include "qemu/osdep.h"
2 #include "cpu.h"
3 #include "exec/exec-all.h"
4 #include "hw/hw.h"
5 #include "hw/boards.h"
6 #include "sysemu/kvm.h"
7 #include "helper_regs.h"
8 #include "mmu-hash64.h"
9 #include "migration/cpu.h"
10 #include "qapi/error.h"
11 #include "kvm_ppc.h"
12 #include "exec/helper-proto.h"
13 
14 static int cpu_load_old(QEMUFile *f, void *opaque, int version_id)
15 {
16     PowerPCCPU *cpu = opaque;
17     CPUPPCState *env = &cpu->env;
18     unsigned int i, j;
19     target_ulong sdr1;
20     uint32_t fpscr, vscr;
21 #if defined(TARGET_PPC64)
22     int32_t slb_nr;
23 #endif
24     target_ulong xer;
25 
26     for (i = 0; i < 32; i++) {
27         qemu_get_betls(f, &env->gpr[i]);
28     }
29 #if !defined(TARGET_PPC64)
30     for (i = 0; i < 32; i++) {
31         qemu_get_betls(f, &env->gprh[i]);
32     }
33 #endif
34     qemu_get_betls(f, &env->lr);
35     qemu_get_betls(f, &env->ctr);
36     for (i = 0; i < 8; i++) {
37         qemu_get_be32s(f, &env->crf[i]);
38     }
39     qemu_get_betls(f, &xer);
40     cpu_write_xer(env, xer);
41     qemu_get_betls(f, &env->reserve_addr);
42     qemu_get_betls(f, &env->msr);
43     for (i = 0; i < 4; i++) {
44         qemu_get_betls(f, &env->tgpr[i]);
45     }
46     for (i = 0; i < 32; i++) {
47         union {
48             float64 d;
49             uint64_t l;
50         } u;
51         u.l = qemu_get_be64(f);
52         *cpu_fpr_ptr(env, i) = u.d;
53     }
54     qemu_get_be32s(f, &fpscr);
55     env->fpscr = fpscr;
56     qemu_get_sbe32s(f, &env->access_type);
57 #if defined(TARGET_PPC64)
58     qemu_get_betls(f, &env->spr[SPR_ASR]);
59     qemu_get_sbe32s(f, &slb_nr);
60 #endif
61     qemu_get_betls(f, &sdr1);
62     for (i = 0; i < 32; i++) {
63         qemu_get_betls(f, &env->sr[i]);
64     }
65     for (i = 0; i < 2; i++) {
66         for (j = 0; j < 8; j++) {
67             qemu_get_betls(f, &env->DBAT[i][j]);
68         }
69     }
70     for (i = 0; i < 2; i++) {
71         for (j = 0; j < 8; j++) {
72             qemu_get_betls(f, &env->IBAT[i][j]);
73         }
74     }
75     qemu_get_sbe32s(f, &env->nb_tlb);
76     qemu_get_sbe32s(f, &env->tlb_per_way);
77     qemu_get_sbe32s(f, &env->nb_ways);
78     qemu_get_sbe32s(f, &env->last_way);
79     qemu_get_sbe32s(f, &env->id_tlbs);
80     qemu_get_sbe32s(f, &env->nb_pids);
81     if (env->tlb.tlb6) {
82         /* XXX assumes 6xx */
83         for (i = 0; i < env->nb_tlb; i++) {
84             qemu_get_betls(f, &env->tlb.tlb6[i].pte0);
85             qemu_get_betls(f, &env->tlb.tlb6[i].pte1);
86             qemu_get_betls(f, &env->tlb.tlb6[i].EPN);
87         }
88     }
89     for (i = 0; i < 4; i++) {
90         qemu_get_betls(f, &env->pb[i]);
91     }
92     for (i = 0; i < 1024; i++) {
93         qemu_get_betls(f, &env->spr[i]);
94     }
95     if (!cpu->vhyp) {
96         ppc_store_sdr1(env, sdr1);
97     }
98     qemu_get_be32s(f, &vscr);
99     helper_mtvscr(env, vscr);
100     qemu_get_be64s(f, &env->spe_acc);
101     qemu_get_be32s(f, &env->spe_fscr);
102     qemu_get_betls(f, &env->msr_mask);
103     qemu_get_be32s(f, &env->flags);
104     qemu_get_sbe32s(f, &env->error_code);
105     qemu_get_be32s(f, &env->pending_interrupts);
106     qemu_get_be32s(f, &env->irq_input_state);
107     for (i = 0; i < POWERPC_EXCP_NB; i++) {
108         qemu_get_betls(f, &env->excp_vectors[i]);
109     }
110     qemu_get_betls(f, &env->excp_prefix);
111     qemu_get_betls(f, &env->ivor_mask);
112     qemu_get_betls(f, &env->ivpr_mask);
113     qemu_get_betls(f, &env->hreset_vector);
114     qemu_get_betls(f, &env->nip);
115     qemu_get_betls(f, &env->hflags);
116     qemu_get_betls(f, &env->hflags_nmsr);
117     qemu_get_sbe32(f); /* Discard unused mmu_idx */
118     qemu_get_sbe32(f); /* Discard unused power_mode */
119 
120     /* Recompute mmu indices */
121     hreg_compute_mem_idx(env);
122 
123     return 0;
124 }
125 
126 static int get_avr(QEMUFile *f, void *pv, size_t size,
127                    const VMStateField *field)
128 {
129     ppc_avr_t *v = pv;
130 
131     v->u64[0] = qemu_get_be64(f);
132     v->u64[1] = qemu_get_be64(f);
133 
134     return 0;
135 }
136 
137 static int put_avr(QEMUFile *f, void *pv, size_t size,
138                    const VMStateField *field, QJSON *vmdesc)
139 {
140     ppc_avr_t *v = pv;
141 
142     qemu_put_be64(f, v->u64[0]);
143     qemu_put_be64(f, v->u64[1]);
144     return 0;
145 }
146 
147 static const VMStateInfo vmstate_info_avr = {
148     .name = "avr",
149     .get  = get_avr,
150     .put  = put_avr,
151 };
152 
153 #define VMSTATE_AVR_ARRAY_V(_f, _s, _n, _v)                       \
154     VMSTATE_SUB_ARRAY(_f, _s, 32, _n, _v, vmstate_info_avr, ppc_avr_t)
155 
156 #define VMSTATE_AVR_ARRAY(_f, _s, _n)                             \
157     VMSTATE_AVR_ARRAY_V(_f, _s, _n, 0)
158 
159 static int get_fpr(QEMUFile *f, void *pv, size_t size,
160                    const VMStateField *field)
161 {
162     ppc_vsr_t *v = pv;
163 
164     v->VsrD(0) = qemu_get_be64(f);
165 
166     return 0;
167 }
168 
169 static int put_fpr(QEMUFile *f, void *pv, size_t size,
170                    const VMStateField *field, QJSON *vmdesc)
171 {
172     ppc_vsr_t *v = pv;
173 
174     qemu_put_be64(f, v->VsrD(0));
175     return 0;
176 }
177 
178 static const VMStateInfo vmstate_info_fpr = {
179     .name = "fpr",
180     .get  = get_fpr,
181     .put  = put_fpr,
182 };
183 
184 #define VMSTATE_FPR_ARRAY_V(_f, _s, _n, _v)                       \
185     VMSTATE_SUB_ARRAY(_f, _s, 0, _n, _v, vmstate_info_fpr, ppc_vsr_t)
186 
187 #define VMSTATE_FPR_ARRAY(_f, _s, _n)                             \
188     VMSTATE_FPR_ARRAY_V(_f, _s, _n, 0)
189 
190 static int get_vsr(QEMUFile *f, void *pv, size_t size,
191                    const VMStateField *field)
192 {
193     ppc_vsr_t *v = pv;
194 
195     v->VsrD(1) = qemu_get_be64(f);
196 
197     return 0;
198 }
199 
200 static int put_vsr(QEMUFile *f, void *pv, size_t size,
201                    const VMStateField *field, QJSON *vmdesc)
202 {
203     ppc_vsr_t *v = pv;
204 
205     qemu_put_be64(f, v->VsrD(1));
206     return 0;
207 }
208 
209 static const VMStateInfo vmstate_info_vsr = {
210     .name = "vsr",
211     .get  = get_vsr,
212     .put  = put_vsr,
213 };
214 
215 #define VMSTATE_VSR_ARRAY_V(_f, _s, _n, _v)                       \
216     VMSTATE_SUB_ARRAY(_f, _s, 0, _n, _v, vmstate_info_vsr, ppc_vsr_t)
217 
218 #define VMSTATE_VSR_ARRAY(_f, _s, _n)                             \
219     VMSTATE_VSR_ARRAY_V(_f, _s, _n, 0)
220 
221 static bool cpu_pre_2_8_migration(void *opaque, int version_id)
222 {
223     PowerPCCPU *cpu = opaque;
224 
225     return cpu->pre_2_8_migration;
226 }
227 
228 #if defined(TARGET_PPC64)
229 static bool cpu_pre_3_0_migration(void *opaque, int version_id)
230 {
231     PowerPCCPU *cpu = opaque;
232 
233     return cpu->pre_3_0_migration;
234 }
235 #endif
236 
237 static int cpu_pre_save(void *opaque)
238 {
239     PowerPCCPU *cpu = opaque;
240     CPUPPCState *env = &cpu->env;
241     int i;
242     uint64_t insns_compat_mask =
243         PPC_INSNS_BASE | PPC_ISEL | PPC_STRING | PPC_MFTB
244         | PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES
245         | PPC_FLOAT_FSQRT | PPC_FLOAT_FRSQRTE | PPC_FLOAT_FRSQRTES
246         | PPC_FLOAT_STFIWX | PPC_FLOAT_EXT
247         | PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBZ
248         | PPC_MEM_SYNC | PPC_MEM_EIEIO | PPC_MEM_TLBIE | PPC_MEM_TLBSYNC
249         | PPC_64B | PPC_64BX | PPC_ALTIVEC
250         | PPC_SEGMENT_64B | PPC_SLBI | PPC_POPCNTB | PPC_POPCNTWD;
251     uint64_t insns_compat_mask2 = PPC2_VSX | PPC2_VSX207 | PPC2_DFP | PPC2_DBRX
252         | PPC2_PERM_ISA206 | PPC2_DIVE_ISA206
253         | PPC2_ATOMIC_ISA206 | PPC2_FP_CVT_ISA206
254         | PPC2_FP_TST_ISA206 | PPC2_BCTAR_ISA207
255         | PPC2_LSQ_ISA207 | PPC2_ALTIVEC_207
256         | PPC2_ISA205 | PPC2_ISA207S | PPC2_FP_CVT_S64 | PPC2_TM;
257 
258     env->spr[SPR_LR] = env->lr;
259     env->spr[SPR_CTR] = env->ctr;
260     env->spr[SPR_XER] = cpu_read_xer(env);
261 #if defined(TARGET_PPC64)
262     env->spr[SPR_CFAR] = env->cfar;
263 #endif
264     env->spr[SPR_BOOKE_SPEFSCR] = env->spe_fscr;
265 
266     for (i = 0; (i < 4) && (i < env->nb_BATs); i++) {
267         env->spr[SPR_DBAT0U + 2 * i] = env->DBAT[0][i];
268         env->spr[SPR_DBAT0U + 2 * i + 1] = env->DBAT[1][i];
269         env->spr[SPR_IBAT0U + 2 * i] = env->IBAT[0][i];
270         env->spr[SPR_IBAT0U + 2 * i + 1] = env->IBAT[1][i];
271     }
272     for (i = 0; (i < 4) && ((i + 4) < env->nb_BATs); i++) {
273         env->spr[SPR_DBAT4U + 2 * i] = env->DBAT[0][i + 4];
274         env->spr[SPR_DBAT4U + 2 * i + 1] = env->DBAT[1][i + 4];
275         env->spr[SPR_IBAT4U + 2 * i] = env->IBAT[0][i + 4];
276         env->spr[SPR_IBAT4U + 2 * i + 1] = env->IBAT[1][i + 4];
277     }
278 
279     /* Hacks for migration compatibility between 2.6, 2.7 & 2.8 */
280     if (cpu->pre_2_8_migration) {
281         /*
282          * Mask out bits that got added to msr_mask since the versions
283          * which stupidly included it in the migration stream.
284          */
285         target_ulong metamask = 0
286 #if defined(TARGET_PPC64)
287             | (1ULL << MSR_TS0)
288             | (1ULL << MSR_TS1)
289 #endif
290             ;
291         cpu->mig_msr_mask = env->msr_mask & ~metamask;
292         cpu->mig_insns_flags = env->insns_flags & insns_compat_mask;
293         /*
294          * CPU models supported by old machines all have
295          * PPC_MEM_TLBIE, so we set it unconditionally to allow
296          * backward migration from a POWER9 host to a POWER8 host.
297          */
298         cpu->mig_insns_flags |= PPC_MEM_TLBIE;
299         cpu->mig_insns_flags2 = env->insns_flags2 & insns_compat_mask2;
300         cpu->mig_nb_BATs = env->nb_BATs;
301     }
302     if (cpu->pre_3_0_migration) {
303         if (cpu->hash64_opts) {
304             cpu->mig_slb_nr = cpu->hash64_opts->slb_size;
305         }
306     }
307 
308     return 0;
309 }
310 
311 /*
312  * Determine if a given PVR is a "close enough" match to the CPU
313  * object.  For TCG and KVM PR it would probably be sufficient to
314  * require an exact PVR match.  However for KVM HV the user is
315  * restricted to a PVR exactly matching the host CPU.  The correct way
316  * to handle this is to put the guest into an architected
317  * compatibility mode.  However, to allow a more forgiving transition
318  * and migration from before this was widely done, we allow migration
319  * between sufficiently similar PVRs, as determined by the CPU class's
320  * pvr_match() hook.
321  */
322 static bool pvr_match(PowerPCCPU *cpu, uint32_t pvr)
323 {
324     PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu);
325 
326     if (pvr == pcc->pvr) {
327         return true;
328     }
329     return pcc->pvr_match(pcc, pvr);
330 }
331 
332 static int cpu_post_load(void *opaque, int version_id)
333 {
334     PowerPCCPU *cpu = opaque;
335     CPUPPCState *env = &cpu->env;
336     int i;
337     target_ulong msr;
338 
339     /*
340      * If we're operating in compat mode, we should be ok as long as
341      * the destination supports the same compatiblity mode.
342      *
343      * Otherwise, however, we require that the destination has exactly
344      * the same CPU model as the source.
345      */
346 
347 #if defined(TARGET_PPC64)
348     if (cpu->compat_pvr) {
349         uint32_t compat_pvr = cpu->compat_pvr;
350         Error *local_err = NULL;
351 
352         cpu->compat_pvr = 0;
353         ppc_set_compat(cpu, compat_pvr, &local_err);
354         if (local_err) {
355             error_report_err(local_err);
356             return -1;
357         }
358     } else
359 #endif
360     {
361         if (!pvr_match(cpu, env->spr[SPR_PVR])) {
362             return -1;
363         }
364     }
365 
366     /*
367      * If we're running with KVM HV, there is a chance that the guest
368      * is running with KVM HV and its kernel does not have the
369      * capability of dealing with a different PVR other than this
370      * exact host PVR in KVM_SET_SREGS. If that happens, the
371      * guest freezes after migration.
372      *
373      * The function kvmppc_pvr_workaround_required does this verification
374      * by first checking if the kernel has the cap, returning true immediately
375      * if that is the case. Otherwise, it checks if we're running in KVM PR.
376      * If the guest kernel does not have the cap and we're not running KVM-PR
377      * (so, it is running KVM-HV), we need to ensure that KVM_SET_SREGS will
378      * receive the PVR it expects as a workaround.
379      *
380      */
381 #if defined(CONFIG_KVM)
382     if (kvmppc_pvr_workaround_required(cpu)) {
383         env->spr[SPR_PVR] = env->spr_cb[SPR_PVR].default_value;
384     }
385 #endif
386 
387     env->lr = env->spr[SPR_LR];
388     env->ctr = env->spr[SPR_CTR];
389     cpu_write_xer(env, env->spr[SPR_XER]);
390 #if defined(TARGET_PPC64)
391     env->cfar = env->spr[SPR_CFAR];
392 #endif
393     env->spe_fscr = env->spr[SPR_BOOKE_SPEFSCR];
394 
395     for (i = 0; (i < 4) && (i < env->nb_BATs); i++) {
396         env->DBAT[0][i] = env->spr[SPR_DBAT0U + 2 * i];
397         env->DBAT[1][i] = env->spr[SPR_DBAT0U + 2 * i + 1];
398         env->IBAT[0][i] = env->spr[SPR_IBAT0U + 2 * i];
399         env->IBAT[1][i] = env->spr[SPR_IBAT0U + 2 * i + 1];
400     }
401     for (i = 0; (i < 4) && ((i + 4) < env->nb_BATs); i++) {
402         env->DBAT[0][i + 4] = env->spr[SPR_DBAT4U + 2 * i];
403         env->DBAT[1][i + 4] = env->spr[SPR_DBAT4U + 2 * i + 1];
404         env->IBAT[0][i + 4] = env->spr[SPR_IBAT4U + 2 * i];
405         env->IBAT[1][i + 4] = env->spr[SPR_IBAT4U + 2 * i + 1];
406     }
407 
408     if (!cpu->vhyp) {
409         ppc_store_sdr1(env, env->spr[SPR_SDR1]);
410     }
411 
412     /*
413      * Invalidate all supported msr bits except MSR_TGPR/MSR_HVB
414      * before restoring
415      */
416     msr = env->msr;
417     env->msr ^= env->msr_mask & ~((1ULL << MSR_TGPR) | MSR_HVB);
418     ppc_store_msr(env, msr);
419 
420     hreg_compute_mem_idx(env);
421 
422     return 0;
423 }
424 
425 static bool fpu_needed(void *opaque)
426 {
427     PowerPCCPU *cpu = opaque;
428 
429     return cpu->env.insns_flags & PPC_FLOAT;
430 }
431 
432 static const VMStateDescription vmstate_fpu = {
433     .name = "cpu/fpu",
434     .version_id = 1,
435     .minimum_version_id = 1,
436     .needed = fpu_needed,
437     .fields = (VMStateField[]) {
438         VMSTATE_FPR_ARRAY(env.vsr, PowerPCCPU, 32),
439         VMSTATE_UINTTL(env.fpscr, PowerPCCPU),
440         VMSTATE_END_OF_LIST()
441     },
442 };
443 
444 static bool altivec_needed(void *opaque)
445 {
446     PowerPCCPU *cpu = opaque;
447 
448     return cpu->env.insns_flags & PPC_ALTIVEC;
449 }
450 
451 static int get_vscr(QEMUFile *f, void *opaque, size_t size,
452                     const VMStateField *field)
453 {
454     PowerPCCPU *cpu = opaque;
455     helper_mtvscr(&cpu->env, qemu_get_be32(f));
456     return 0;
457 }
458 
459 static int put_vscr(QEMUFile *f, void *opaque, size_t size,
460                     const VMStateField *field, QJSON *vmdesc)
461 {
462     PowerPCCPU *cpu = opaque;
463     qemu_put_be32(f, helper_mfvscr(&cpu->env));
464     return 0;
465 }
466 
467 static const VMStateInfo vmstate_vscr = {
468     .name = "cpu/altivec/vscr",
469     .get = get_vscr,
470     .put = put_vscr,
471 };
472 
473 static const VMStateDescription vmstate_altivec = {
474     .name = "cpu/altivec",
475     .version_id = 1,
476     .minimum_version_id = 1,
477     .needed = altivec_needed,
478     .fields = (VMStateField[]) {
479         VMSTATE_AVR_ARRAY(env.vsr, PowerPCCPU, 32),
480         /*
481          * Save the architecture value of the vscr, not the internally
482          * expanded version.  Since this architecture value does not
483          * exist in memory to be stored, this requires a but of hoop
484          * jumping.  We want OFFSET=0 so that we effectively pass CPU
485          * to the helper functions.
486          */
487         {
488             .name = "vscr",
489             .version_id = 0,
490             .size = sizeof(uint32_t),
491             .info = &vmstate_vscr,
492             .flags = VMS_SINGLE,
493             .offset = 0
494         },
495         VMSTATE_END_OF_LIST()
496     },
497 };
498 
499 static bool vsx_needed(void *opaque)
500 {
501     PowerPCCPU *cpu = opaque;
502 
503     return cpu->env.insns_flags2 & PPC2_VSX;
504 }
505 
506 static const VMStateDescription vmstate_vsx = {
507     .name = "cpu/vsx",
508     .version_id = 1,
509     .minimum_version_id = 1,
510     .needed = vsx_needed,
511     .fields = (VMStateField[]) {
512         VMSTATE_VSR_ARRAY(env.vsr, PowerPCCPU, 32),
513         VMSTATE_END_OF_LIST()
514     },
515 };
516 
517 #ifdef TARGET_PPC64
518 /* Transactional memory state */
519 static bool tm_needed(void *opaque)
520 {
521     PowerPCCPU *cpu = opaque;
522     CPUPPCState *env = &cpu->env;
523     return msr_ts;
524 }
525 
526 static const VMStateDescription vmstate_tm = {
527     .name = "cpu/tm",
528     .version_id = 1,
529     .minimum_version_id = 1,
530     .minimum_version_id_old = 1,
531     .needed = tm_needed,
532     .fields      = (VMStateField []) {
533         VMSTATE_UINTTL_ARRAY(env.tm_gpr, PowerPCCPU, 32),
534         VMSTATE_AVR_ARRAY(env.tm_vsr, PowerPCCPU, 64),
535         VMSTATE_UINT64(env.tm_cr, PowerPCCPU),
536         VMSTATE_UINT64(env.tm_lr, PowerPCCPU),
537         VMSTATE_UINT64(env.tm_ctr, PowerPCCPU),
538         VMSTATE_UINT64(env.tm_fpscr, PowerPCCPU),
539         VMSTATE_UINT64(env.tm_amr, PowerPCCPU),
540         VMSTATE_UINT64(env.tm_ppr, PowerPCCPU),
541         VMSTATE_UINT64(env.tm_vrsave, PowerPCCPU),
542         VMSTATE_UINT32(env.tm_vscr, PowerPCCPU),
543         VMSTATE_UINT64(env.tm_dscr, PowerPCCPU),
544         VMSTATE_UINT64(env.tm_tar, PowerPCCPU),
545         VMSTATE_END_OF_LIST()
546     },
547 };
548 #endif
549 
550 static bool sr_needed(void *opaque)
551 {
552 #ifdef TARGET_PPC64
553     PowerPCCPU *cpu = opaque;
554 
555     return !(cpu->env.mmu_model & POWERPC_MMU_64);
556 #else
557     return true;
558 #endif
559 }
560 
561 static const VMStateDescription vmstate_sr = {
562     .name = "cpu/sr",
563     .version_id = 1,
564     .minimum_version_id = 1,
565     .needed = sr_needed,
566     .fields = (VMStateField[]) {
567         VMSTATE_UINTTL_ARRAY(env.sr, PowerPCCPU, 32),
568         VMSTATE_END_OF_LIST()
569     },
570 };
571 
572 #ifdef TARGET_PPC64
573 static int get_slbe(QEMUFile *f, void *pv, size_t size,
574                     const VMStateField *field)
575 {
576     ppc_slb_t *v = pv;
577 
578     v->esid = qemu_get_be64(f);
579     v->vsid = qemu_get_be64(f);
580 
581     return 0;
582 }
583 
584 static int put_slbe(QEMUFile *f, void *pv, size_t size,
585                     const VMStateField *field, QJSON *vmdesc)
586 {
587     ppc_slb_t *v = pv;
588 
589     qemu_put_be64(f, v->esid);
590     qemu_put_be64(f, v->vsid);
591     return 0;
592 }
593 
594 static const VMStateInfo vmstate_info_slbe = {
595     .name = "slbe",
596     .get  = get_slbe,
597     .put  = put_slbe,
598 };
599 
600 #define VMSTATE_SLB_ARRAY_V(_f, _s, _n, _v)                       \
601     VMSTATE_ARRAY(_f, _s, _n, _v, vmstate_info_slbe, ppc_slb_t)
602 
603 #define VMSTATE_SLB_ARRAY(_f, _s, _n)                             \
604     VMSTATE_SLB_ARRAY_V(_f, _s, _n, 0)
605 
606 static bool slb_needed(void *opaque)
607 {
608     PowerPCCPU *cpu = opaque;
609 
610     /* We don't support any of the old segment table based 64-bit CPUs */
611     return cpu->env.mmu_model & POWERPC_MMU_64;
612 }
613 
614 static int slb_post_load(void *opaque, int version_id)
615 {
616     PowerPCCPU *cpu = opaque;
617     CPUPPCState *env = &cpu->env;
618     int i;
619 
620     /*
621      * We've pulled in the raw esid and vsid values from the migration
622      * stream, but we need to recompute the page size pointers
623      */
624     for (i = 0; i < cpu->hash64_opts->slb_size; i++) {
625         if (ppc_store_slb(cpu, i, env->slb[i].esid, env->slb[i].vsid) < 0) {
626             /* Migration source had bad values in its SLB */
627             return -1;
628         }
629     }
630 
631     return 0;
632 }
633 
634 static const VMStateDescription vmstate_slb = {
635     .name = "cpu/slb",
636     .version_id = 1,
637     .minimum_version_id = 1,
638     .needed = slb_needed,
639     .post_load = slb_post_load,
640     .fields = (VMStateField[]) {
641         VMSTATE_INT32_TEST(mig_slb_nr, PowerPCCPU, cpu_pre_3_0_migration),
642         VMSTATE_SLB_ARRAY(env.slb, PowerPCCPU, MAX_SLB_ENTRIES),
643         VMSTATE_END_OF_LIST()
644     }
645 };
646 #endif /* TARGET_PPC64 */
647 
648 static const VMStateDescription vmstate_tlb6xx_entry = {
649     .name = "cpu/tlb6xx_entry",
650     .version_id = 1,
651     .minimum_version_id = 1,
652     .fields = (VMStateField[]) {
653         VMSTATE_UINTTL(pte0, ppc6xx_tlb_t),
654         VMSTATE_UINTTL(pte1, ppc6xx_tlb_t),
655         VMSTATE_UINTTL(EPN, ppc6xx_tlb_t),
656         VMSTATE_END_OF_LIST()
657     },
658 };
659 
660 static bool tlb6xx_needed(void *opaque)
661 {
662     PowerPCCPU *cpu = opaque;
663     CPUPPCState *env = &cpu->env;
664 
665     return env->nb_tlb && (env->tlb_type == TLB_6XX);
666 }
667 
668 static const VMStateDescription vmstate_tlb6xx = {
669     .name = "cpu/tlb6xx",
670     .version_id = 1,
671     .minimum_version_id = 1,
672     .needed = tlb6xx_needed,
673     .fields = (VMStateField[]) {
674         VMSTATE_INT32_EQUAL(env.nb_tlb, PowerPCCPU, NULL),
675         VMSTATE_STRUCT_VARRAY_POINTER_INT32(env.tlb.tlb6, PowerPCCPU,
676                                             env.nb_tlb,
677                                             vmstate_tlb6xx_entry,
678                                             ppc6xx_tlb_t),
679         VMSTATE_UINTTL_ARRAY(env.tgpr, PowerPCCPU, 4),
680         VMSTATE_END_OF_LIST()
681     }
682 };
683 
684 static const VMStateDescription vmstate_tlbemb_entry = {
685     .name = "cpu/tlbemb_entry",
686     .version_id = 1,
687     .minimum_version_id = 1,
688     .fields = (VMStateField[]) {
689         VMSTATE_UINT64(RPN, ppcemb_tlb_t),
690         VMSTATE_UINTTL(EPN, ppcemb_tlb_t),
691         VMSTATE_UINTTL(PID, ppcemb_tlb_t),
692         VMSTATE_UINTTL(size, ppcemb_tlb_t),
693         VMSTATE_UINT32(prot, ppcemb_tlb_t),
694         VMSTATE_UINT32(attr, ppcemb_tlb_t),
695         VMSTATE_END_OF_LIST()
696     },
697 };
698 
699 static bool tlbemb_needed(void *opaque)
700 {
701     PowerPCCPU *cpu = opaque;
702     CPUPPCState *env = &cpu->env;
703 
704     return env->nb_tlb && (env->tlb_type == TLB_EMB);
705 }
706 
707 static bool pbr403_needed(void *opaque)
708 {
709     PowerPCCPU *cpu = opaque;
710     uint32_t pvr = cpu->env.spr[SPR_PVR];
711 
712     return (pvr & 0xffff0000) == 0x00200000;
713 }
714 
715 static const VMStateDescription vmstate_pbr403 = {
716     .name = "cpu/pbr403",
717     .version_id = 1,
718     .minimum_version_id = 1,
719     .needed = pbr403_needed,
720     .fields = (VMStateField[]) {
721         VMSTATE_UINTTL_ARRAY(env.pb, PowerPCCPU, 4),
722         VMSTATE_END_OF_LIST()
723     },
724 };
725 
726 static const VMStateDescription vmstate_tlbemb = {
727     .name = "cpu/tlb6xx",
728     .version_id = 1,
729     .minimum_version_id = 1,
730     .needed = tlbemb_needed,
731     .fields = (VMStateField[]) {
732         VMSTATE_INT32_EQUAL(env.nb_tlb, PowerPCCPU, NULL),
733         VMSTATE_STRUCT_VARRAY_POINTER_INT32(env.tlb.tlbe, PowerPCCPU,
734                                             env.nb_tlb,
735                                             vmstate_tlbemb_entry,
736                                             ppcemb_tlb_t),
737         /* 403 protection registers */
738         VMSTATE_END_OF_LIST()
739     },
740     .subsections = (const VMStateDescription*[]) {
741         &vmstate_pbr403,
742         NULL
743     }
744 };
745 
746 static const VMStateDescription vmstate_tlbmas_entry = {
747     .name = "cpu/tlbmas_entry",
748     .version_id = 1,
749     .minimum_version_id = 1,
750     .fields = (VMStateField[]) {
751         VMSTATE_UINT32(mas8, ppcmas_tlb_t),
752         VMSTATE_UINT32(mas1, ppcmas_tlb_t),
753         VMSTATE_UINT64(mas2, ppcmas_tlb_t),
754         VMSTATE_UINT64(mas7_3, ppcmas_tlb_t),
755         VMSTATE_END_OF_LIST()
756     },
757 };
758 
759 static bool tlbmas_needed(void *opaque)
760 {
761     PowerPCCPU *cpu = opaque;
762     CPUPPCState *env = &cpu->env;
763 
764     return env->nb_tlb && (env->tlb_type == TLB_MAS);
765 }
766 
767 static const VMStateDescription vmstate_tlbmas = {
768     .name = "cpu/tlbmas",
769     .version_id = 1,
770     .minimum_version_id = 1,
771     .needed = tlbmas_needed,
772     .fields = (VMStateField[]) {
773         VMSTATE_INT32_EQUAL(env.nb_tlb, PowerPCCPU, NULL),
774         VMSTATE_STRUCT_VARRAY_POINTER_INT32(env.tlb.tlbm, PowerPCCPU,
775                                             env.nb_tlb,
776                                             vmstate_tlbmas_entry,
777                                             ppcmas_tlb_t),
778         VMSTATE_END_OF_LIST()
779     }
780 };
781 
782 static bool compat_needed(void *opaque)
783 {
784     PowerPCCPU *cpu = opaque;
785 
786     assert(!(cpu->compat_pvr && !cpu->vhyp));
787     return !cpu->pre_2_10_migration && cpu->compat_pvr != 0;
788 }
789 
790 static const VMStateDescription vmstate_compat = {
791     .name = "cpu/compat",
792     .version_id = 1,
793     .minimum_version_id = 1,
794     .needed = compat_needed,
795     .fields = (VMStateField[]) {
796         VMSTATE_UINT32(compat_pvr, PowerPCCPU),
797         VMSTATE_END_OF_LIST()
798     }
799 };
800 
801 const VMStateDescription vmstate_ppc_cpu = {
802     .name = "cpu",
803     .version_id = 5,
804     .minimum_version_id = 5,
805     .minimum_version_id_old = 4,
806     .load_state_old = cpu_load_old,
807     .pre_save = cpu_pre_save,
808     .post_load = cpu_post_load,
809     .fields = (VMStateField[]) {
810         VMSTATE_UNUSED(sizeof(target_ulong)), /* was _EQUAL(env.spr[SPR_PVR]) */
811 
812         /* User mode architected state */
813         VMSTATE_UINTTL_ARRAY(env.gpr, PowerPCCPU, 32),
814 #if !defined(TARGET_PPC64)
815         VMSTATE_UINTTL_ARRAY(env.gprh, PowerPCCPU, 32),
816 #endif
817         VMSTATE_UINT32_ARRAY(env.crf, PowerPCCPU, 8),
818         VMSTATE_UINTTL(env.nip, PowerPCCPU),
819 
820         /* SPRs */
821         VMSTATE_UINTTL_ARRAY(env.spr, PowerPCCPU, 1024),
822         VMSTATE_UINT64(env.spe_acc, PowerPCCPU),
823 
824         /* Reservation */
825         VMSTATE_UINTTL(env.reserve_addr, PowerPCCPU),
826 
827         /* Supervisor mode architected state */
828         VMSTATE_UINTTL(env.msr, PowerPCCPU),
829 
830         /* Internal state */
831         VMSTATE_UINTTL(env.hflags_nmsr, PowerPCCPU),
832         /* FIXME: access_type? */
833 
834         /* Sanity checking */
835         VMSTATE_UINTTL_TEST(mig_msr_mask, PowerPCCPU, cpu_pre_2_8_migration),
836         VMSTATE_UINT64_TEST(mig_insns_flags, PowerPCCPU, cpu_pre_2_8_migration),
837         VMSTATE_UINT64_TEST(mig_insns_flags2, PowerPCCPU,
838                             cpu_pre_2_8_migration),
839         VMSTATE_UINT32_TEST(mig_nb_BATs, PowerPCCPU, cpu_pre_2_8_migration),
840         VMSTATE_END_OF_LIST()
841     },
842     .subsections = (const VMStateDescription*[]) {
843         &vmstate_fpu,
844         &vmstate_altivec,
845         &vmstate_vsx,
846         &vmstate_sr,
847 #ifdef TARGET_PPC64
848         &vmstate_tm,
849         &vmstate_slb,
850 #endif /* TARGET_PPC64 */
851         &vmstate_tlb6xx,
852         &vmstate_tlbemb,
853         &vmstate_tlbmas,
854         &vmstate_compat,
855         NULL
856     }
857 };
858