1 #include "qemu/osdep.h" 2 #include "qemu-common.h" 3 #include "cpu.h" 4 #include "exec/exec-all.h" 5 #include "hw/hw.h" 6 #include "hw/boards.h" 7 #include "sysemu/kvm.h" 8 #include "helper_regs.h" 9 #include "mmu-hash64.h" 10 #include "migration/cpu.h" 11 #include "qapi/error.h" 12 #include "kvm_ppc.h" 13 14 static int cpu_load_old(QEMUFile *f, void *opaque, int version_id) 15 { 16 PowerPCCPU *cpu = opaque; 17 CPUPPCState *env = &cpu->env; 18 unsigned int i, j; 19 target_ulong sdr1; 20 uint32_t fpscr; 21 #if defined(TARGET_PPC64) 22 int32_t slb_nr; 23 #endif 24 target_ulong xer; 25 26 for (i = 0; i < 32; i++) 27 qemu_get_betls(f, &env->gpr[i]); 28 #if !defined(TARGET_PPC64) 29 for (i = 0; i < 32; i++) 30 qemu_get_betls(f, &env->gprh[i]); 31 #endif 32 qemu_get_betls(f, &env->lr); 33 qemu_get_betls(f, &env->ctr); 34 for (i = 0; i < 8; i++) 35 qemu_get_be32s(f, &env->crf[i]); 36 qemu_get_betls(f, &xer); 37 cpu_write_xer(env, xer); 38 qemu_get_betls(f, &env->reserve_addr); 39 qemu_get_betls(f, &env->msr); 40 for (i = 0; i < 4; i++) 41 qemu_get_betls(f, &env->tgpr[i]); 42 for (i = 0; i < 32; i++) { 43 union { 44 float64 d; 45 uint64_t l; 46 } u; 47 u.l = qemu_get_be64(f); 48 *cpu_fpr_ptr(env, i) = u.d; 49 } 50 qemu_get_be32s(f, &fpscr); 51 env->fpscr = fpscr; 52 qemu_get_sbe32s(f, &env->access_type); 53 #if defined(TARGET_PPC64) 54 qemu_get_betls(f, &env->spr[SPR_ASR]); 55 qemu_get_sbe32s(f, &slb_nr); 56 #endif 57 qemu_get_betls(f, &sdr1); 58 for (i = 0; i < 32; i++) 59 qemu_get_betls(f, &env->sr[i]); 60 for (i = 0; i < 2; i++) 61 for (j = 0; j < 8; j++) 62 qemu_get_betls(f, &env->DBAT[i][j]); 63 for (i = 0; i < 2; i++) 64 for (j = 0; j < 8; j++) 65 qemu_get_betls(f, &env->IBAT[i][j]); 66 qemu_get_sbe32s(f, &env->nb_tlb); 67 qemu_get_sbe32s(f, &env->tlb_per_way); 68 qemu_get_sbe32s(f, &env->nb_ways); 69 qemu_get_sbe32s(f, &env->last_way); 70 qemu_get_sbe32s(f, &env->id_tlbs); 71 qemu_get_sbe32s(f, &env->nb_pids); 72 if (env->tlb.tlb6) { 73 // XXX assumes 6xx 74 for (i = 0; i < env->nb_tlb; i++) { 75 qemu_get_betls(f, &env->tlb.tlb6[i].pte0); 76 qemu_get_betls(f, &env->tlb.tlb6[i].pte1); 77 qemu_get_betls(f, &env->tlb.tlb6[i].EPN); 78 } 79 } 80 for (i = 0; i < 4; i++) 81 qemu_get_betls(f, &env->pb[i]); 82 for (i = 0; i < 1024; i++) 83 qemu_get_betls(f, &env->spr[i]); 84 if (!cpu->vhyp) { 85 ppc_store_sdr1(env, sdr1); 86 } 87 qemu_get_be32s(f, &env->vscr); 88 qemu_get_be64s(f, &env->spe_acc); 89 qemu_get_be32s(f, &env->spe_fscr); 90 qemu_get_betls(f, &env->msr_mask); 91 qemu_get_be32s(f, &env->flags); 92 qemu_get_sbe32s(f, &env->error_code); 93 qemu_get_be32s(f, &env->pending_interrupts); 94 qemu_get_be32s(f, &env->irq_input_state); 95 for (i = 0; i < POWERPC_EXCP_NB; i++) 96 qemu_get_betls(f, &env->excp_vectors[i]); 97 qemu_get_betls(f, &env->excp_prefix); 98 qemu_get_betls(f, &env->ivor_mask); 99 qemu_get_betls(f, &env->ivpr_mask); 100 qemu_get_betls(f, &env->hreset_vector); 101 qemu_get_betls(f, &env->nip); 102 qemu_get_betls(f, &env->hflags); 103 qemu_get_betls(f, &env->hflags_nmsr); 104 qemu_get_sbe32(f); /* Discard unused mmu_idx */ 105 qemu_get_sbe32(f); /* Discard unused power_mode */ 106 107 /* Recompute mmu indices */ 108 hreg_compute_mem_idx(env); 109 110 return 0; 111 } 112 113 static int get_avr(QEMUFile *f, void *pv, size_t size, 114 const VMStateField *field) 115 { 116 ppc_avr_t *v = pv; 117 118 v->u64[0] = qemu_get_be64(f); 119 v->u64[1] = qemu_get_be64(f); 120 121 return 0; 122 } 123 124 static int put_avr(QEMUFile *f, void *pv, size_t size, 125 const VMStateField *field, QJSON *vmdesc) 126 { 127 ppc_avr_t *v = pv; 128 129 qemu_put_be64(f, v->u64[0]); 130 qemu_put_be64(f, v->u64[1]); 131 return 0; 132 } 133 134 static const VMStateInfo vmstate_info_avr = { 135 .name = "avr", 136 .get = get_avr, 137 .put = put_avr, 138 }; 139 140 #define VMSTATE_AVR_ARRAY_V(_f, _s, _n, _v) \ 141 VMSTATE_SUB_ARRAY(_f, _s, 32, _n, _v, vmstate_info_avr, ppc_avr_t) 142 143 #define VMSTATE_AVR_ARRAY(_f, _s, _n) \ 144 VMSTATE_AVR_ARRAY_V(_f, _s, _n, 0) 145 146 static int get_fpr(QEMUFile *f, void *pv, size_t size, 147 const VMStateField *field) 148 { 149 ppc_vsr_t *v = pv; 150 151 v->u64[0] = qemu_get_be64(f); 152 153 return 0; 154 } 155 156 static int put_fpr(QEMUFile *f, void *pv, size_t size, 157 const VMStateField *field, QJSON *vmdesc) 158 { 159 ppc_vsr_t *v = pv; 160 161 qemu_put_be64(f, v->u64[0]); 162 return 0; 163 } 164 165 static const VMStateInfo vmstate_info_fpr = { 166 .name = "fpr", 167 .get = get_fpr, 168 .put = put_fpr, 169 }; 170 171 #define VMSTATE_FPR_ARRAY_V(_f, _s, _n, _v) \ 172 VMSTATE_SUB_ARRAY(_f, _s, 0, _n, _v, vmstate_info_fpr, ppc_vsr_t) 173 174 #define VMSTATE_FPR_ARRAY(_f, _s, _n) \ 175 VMSTATE_FPR_ARRAY_V(_f, _s, _n, 0) 176 177 static int get_vsr(QEMUFile *f, void *pv, size_t size, 178 const VMStateField *field) 179 { 180 ppc_vsr_t *v = pv; 181 182 v->u64[1] = qemu_get_be64(f); 183 184 return 0; 185 } 186 187 static int put_vsr(QEMUFile *f, void *pv, size_t size, 188 const VMStateField *field, QJSON *vmdesc) 189 { 190 ppc_vsr_t *v = pv; 191 192 qemu_put_be64(f, v->u64[1]); 193 return 0; 194 } 195 196 static const VMStateInfo vmstate_info_vsr = { 197 .name = "vsr", 198 .get = get_vsr, 199 .put = put_vsr, 200 }; 201 202 #define VMSTATE_VSR_ARRAY_V(_f, _s, _n, _v) \ 203 VMSTATE_SUB_ARRAY(_f, _s, 0, _n, _v, vmstate_info_vsr, ppc_vsr_t) 204 205 #define VMSTATE_VSR_ARRAY(_f, _s, _n) \ 206 VMSTATE_VSR_ARRAY_V(_f, _s, _n, 0) 207 208 static bool cpu_pre_2_8_migration(void *opaque, int version_id) 209 { 210 PowerPCCPU *cpu = opaque; 211 212 return cpu->pre_2_8_migration; 213 } 214 215 #if defined(TARGET_PPC64) 216 static bool cpu_pre_3_0_migration(void *opaque, int version_id) 217 { 218 PowerPCCPU *cpu = opaque; 219 220 return cpu->pre_3_0_migration; 221 } 222 #endif 223 224 static int cpu_pre_save(void *opaque) 225 { 226 PowerPCCPU *cpu = opaque; 227 CPUPPCState *env = &cpu->env; 228 int i; 229 uint64_t insns_compat_mask = 230 PPC_INSNS_BASE | PPC_ISEL | PPC_STRING | PPC_MFTB 231 | PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES 232 | PPC_FLOAT_FSQRT | PPC_FLOAT_FRSQRTE | PPC_FLOAT_FRSQRTES 233 | PPC_FLOAT_STFIWX | PPC_FLOAT_EXT 234 | PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBZ 235 | PPC_MEM_SYNC | PPC_MEM_EIEIO | PPC_MEM_TLBIE | PPC_MEM_TLBSYNC 236 | PPC_64B | PPC_64BX | PPC_ALTIVEC 237 | PPC_SEGMENT_64B | PPC_SLBI | PPC_POPCNTB | PPC_POPCNTWD; 238 uint64_t insns_compat_mask2 = PPC2_VSX | PPC2_VSX207 | PPC2_DFP | PPC2_DBRX 239 | PPC2_PERM_ISA206 | PPC2_DIVE_ISA206 240 | PPC2_ATOMIC_ISA206 | PPC2_FP_CVT_ISA206 241 | PPC2_FP_TST_ISA206 | PPC2_BCTAR_ISA207 242 | PPC2_LSQ_ISA207 | PPC2_ALTIVEC_207 243 | PPC2_ISA205 | PPC2_ISA207S | PPC2_FP_CVT_S64 | PPC2_TM; 244 245 env->spr[SPR_LR] = env->lr; 246 env->spr[SPR_CTR] = env->ctr; 247 env->spr[SPR_XER] = cpu_read_xer(env); 248 #if defined(TARGET_PPC64) 249 env->spr[SPR_CFAR] = env->cfar; 250 #endif 251 env->spr[SPR_BOOKE_SPEFSCR] = env->spe_fscr; 252 253 for (i = 0; (i < 4) && (i < env->nb_BATs); i++) { 254 env->spr[SPR_DBAT0U + 2*i] = env->DBAT[0][i]; 255 env->spr[SPR_DBAT0U + 2*i + 1] = env->DBAT[1][i]; 256 env->spr[SPR_IBAT0U + 2*i] = env->IBAT[0][i]; 257 env->spr[SPR_IBAT0U + 2*i + 1] = env->IBAT[1][i]; 258 } 259 for (i = 0; (i < 4) && ((i+4) < env->nb_BATs); i++) { 260 env->spr[SPR_DBAT4U + 2*i] = env->DBAT[0][i+4]; 261 env->spr[SPR_DBAT4U + 2*i + 1] = env->DBAT[1][i+4]; 262 env->spr[SPR_IBAT4U + 2*i] = env->IBAT[0][i+4]; 263 env->spr[SPR_IBAT4U + 2*i + 1] = env->IBAT[1][i+4]; 264 } 265 266 /* Hacks for migration compatibility between 2.6, 2.7 & 2.8 */ 267 if (cpu->pre_2_8_migration) { 268 /* Mask out bits that got added to msr_mask since the versions 269 * which stupidly included it in the migration stream. */ 270 target_ulong metamask = 0 271 #if defined(TARGET_PPC64) 272 | (1ULL << MSR_TS0) 273 | (1ULL << MSR_TS1) 274 #endif 275 ; 276 cpu->mig_msr_mask = env->msr_mask & ~metamask; 277 cpu->mig_insns_flags = env->insns_flags & insns_compat_mask; 278 /* CPU models supported by old machines all have PPC_MEM_TLBIE, 279 * so we set it unconditionally to allow backward migration from 280 * a POWER9 host to a POWER8 host. 281 */ 282 cpu->mig_insns_flags |= PPC_MEM_TLBIE; 283 cpu->mig_insns_flags2 = env->insns_flags2 & insns_compat_mask2; 284 cpu->mig_nb_BATs = env->nb_BATs; 285 } 286 if (cpu->pre_3_0_migration) { 287 if (cpu->hash64_opts) { 288 cpu->mig_slb_nr = cpu->hash64_opts->slb_size; 289 } 290 } 291 292 return 0; 293 } 294 295 /* 296 * Determine if a given PVR is a "close enough" match to the CPU 297 * object. For TCG and KVM PR it would probably be sufficient to 298 * require an exact PVR match. However for KVM HV the user is 299 * restricted to a PVR exactly matching the host CPU. The correct way 300 * to handle this is to put the guest into an architected 301 * compatibility mode. However, to allow a more forgiving transition 302 * and migration from before this was widely done, we allow migration 303 * between sufficiently similar PVRs, as determined by the CPU class's 304 * pvr_match() hook. 305 */ 306 static bool pvr_match(PowerPCCPU *cpu, uint32_t pvr) 307 { 308 PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu); 309 310 if (pvr == pcc->pvr) { 311 return true; 312 } 313 return pcc->pvr_match(pcc, pvr); 314 } 315 316 static int cpu_post_load(void *opaque, int version_id) 317 { 318 PowerPCCPU *cpu = opaque; 319 CPUPPCState *env = &cpu->env; 320 int i; 321 target_ulong msr; 322 323 /* 324 * If we're operating in compat mode, we should be ok as long as 325 * the destination supports the same compatiblity mode. 326 * 327 * Otherwise, however, we require that the destination has exactly 328 * the same CPU model as the source. 329 */ 330 331 #if defined(TARGET_PPC64) 332 if (cpu->compat_pvr) { 333 uint32_t compat_pvr = cpu->compat_pvr; 334 Error *local_err = NULL; 335 336 cpu->compat_pvr = 0; 337 ppc_set_compat(cpu, compat_pvr, &local_err); 338 if (local_err) { 339 error_report_err(local_err); 340 return -1; 341 } 342 } else 343 #endif 344 { 345 if (!pvr_match(cpu, env->spr[SPR_PVR])) { 346 return -1; 347 } 348 } 349 350 /* 351 * If we're running with KVM HV, there is a chance that the guest 352 * is running with KVM HV and its kernel does not have the 353 * capability of dealing with a different PVR other than this 354 * exact host PVR in KVM_SET_SREGS. If that happens, the 355 * guest freezes after migration. 356 * 357 * The function kvmppc_pvr_workaround_required does this verification 358 * by first checking if the kernel has the cap, returning true immediately 359 * if that is the case. Otherwise, it checks if we're running in KVM PR. 360 * If the guest kernel does not have the cap and we're not running KVM-PR 361 * (so, it is running KVM-HV), we need to ensure that KVM_SET_SREGS will 362 * receive the PVR it expects as a workaround. 363 * 364 */ 365 #if defined(CONFIG_KVM) 366 if (kvmppc_pvr_workaround_required(cpu)) { 367 env->spr[SPR_PVR] = env->spr_cb[SPR_PVR].default_value; 368 } 369 #endif 370 371 env->lr = env->spr[SPR_LR]; 372 env->ctr = env->spr[SPR_CTR]; 373 cpu_write_xer(env, env->spr[SPR_XER]); 374 #if defined(TARGET_PPC64) 375 env->cfar = env->spr[SPR_CFAR]; 376 #endif 377 env->spe_fscr = env->spr[SPR_BOOKE_SPEFSCR]; 378 379 for (i = 0; (i < 4) && (i < env->nb_BATs); i++) { 380 env->DBAT[0][i] = env->spr[SPR_DBAT0U + 2*i]; 381 env->DBAT[1][i] = env->spr[SPR_DBAT0U + 2*i + 1]; 382 env->IBAT[0][i] = env->spr[SPR_IBAT0U + 2*i]; 383 env->IBAT[1][i] = env->spr[SPR_IBAT0U + 2*i + 1]; 384 } 385 for (i = 0; (i < 4) && ((i+4) < env->nb_BATs); i++) { 386 env->DBAT[0][i+4] = env->spr[SPR_DBAT4U + 2*i]; 387 env->DBAT[1][i+4] = env->spr[SPR_DBAT4U + 2*i + 1]; 388 env->IBAT[0][i+4] = env->spr[SPR_IBAT4U + 2*i]; 389 env->IBAT[1][i+4] = env->spr[SPR_IBAT4U + 2*i + 1]; 390 } 391 392 if (!cpu->vhyp) { 393 ppc_store_sdr1(env, env->spr[SPR_SDR1]); 394 } 395 396 /* Invalidate all supported msr bits except MSR_TGPR/MSR_HVB before restoring */ 397 msr = env->msr; 398 env->msr ^= env->msr_mask & ~((1ULL << MSR_TGPR) | MSR_HVB); 399 ppc_store_msr(env, msr); 400 401 hreg_compute_mem_idx(env); 402 403 return 0; 404 } 405 406 static bool fpu_needed(void *opaque) 407 { 408 PowerPCCPU *cpu = opaque; 409 410 return (cpu->env.insns_flags & PPC_FLOAT); 411 } 412 413 static const VMStateDescription vmstate_fpu = { 414 .name = "cpu/fpu", 415 .version_id = 1, 416 .minimum_version_id = 1, 417 .needed = fpu_needed, 418 .fields = (VMStateField[]) { 419 VMSTATE_FPR_ARRAY(env.vsr, PowerPCCPU, 32), 420 VMSTATE_UINTTL(env.fpscr, PowerPCCPU), 421 VMSTATE_END_OF_LIST() 422 }, 423 }; 424 425 static bool altivec_needed(void *opaque) 426 { 427 PowerPCCPU *cpu = opaque; 428 429 return (cpu->env.insns_flags & PPC_ALTIVEC); 430 } 431 432 static const VMStateDescription vmstate_altivec = { 433 .name = "cpu/altivec", 434 .version_id = 1, 435 .minimum_version_id = 1, 436 .needed = altivec_needed, 437 .fields = (VMStateField[]) { 438 VMSTATE_AVR_ARRAY(env.vsr, PowerPCCPU, 32), 439 VMSTATE_UINT32(env.vscr, PowerPCCPU), 440 VMSTATE_END_OF_LIST() 441 }, 442 }; 443 444 static bool vsx_needed(void *opaque) 445 { 446 PowerPCCPU *cpu = opaque; 447 448 return (cpu->env.insns_flags2 & PPC2_VSX); 449 } 450 451 static const VMStateDescription vmstate_vsx = { 452 .name = "cpu/vsx", 453 .version_id = 1, 454 .minimum_version_id = 1, 455 .needed = vsx_needed, 456 .fields = (VMStateField[]) { 457 VMSTATE_VSR_ARRAY(env.vsr, PowerPCCPU, 32), 458 VMSTATE_END_OF_LIST() 459 }, 460 }; 461 462 #ifdef TARGET_PPC64 463 /* Transactional memory state */ 464 static bool tm_needed(void *opaque) 465 { 466 PowerPCCPU *cpu = opaque; 467 CPUPPCState *env = &cpu->env; 468 return msr_ts; 469 } 470 471 static const VMStateDescription vmstate_tm = { 472 .name = "cpu/tm", 473 .version_id = 1, 474 .minimum_version_id = 1, 475 .minimum_version_id_old = 1, 476 .needed = tm_needed, 477 .fields = (VMStateField []) { 478 VMSTATE_UINTTL_ARRAY(env.tm_gpr, PowerPCCPU, 32), 479 VMSTATE_AVR_ARRAY(env.tm_vsr, PowerPCCPU, 64), 480 VMSTATE_UINT64(env.tm_cr, PowerPCCPU), 481 VMSTATE_UINT64(env.tm_lr, PowerPCCPU), 482 VMSTATE_UINT64(env.tm_ctr, PowerPCCPU), 483 VMSTATE_UINT64(env.tm_fpscr, PowerPCCPU), 484 VMSTATE_UINT64(env.tm_amr, PowerPCCPU), 485 VMSTATE_UINT64(env.tm_ppr, PowerPCCPU), 486 VMSTATE_UINT64(env.tm_vrsave, PowerPCCPU), 487 VMSTATE_UINT32(env.tm_vscr, PowerPCCPU), 488 VMSTATE_UINT64(env.tm_dscr, PowerPCCPU), 489 VMSTATE_UINT64(env.tm_tar, PowerPCCPU), 490 VMSTATE_END_OF_LIST() 491 }, 492 }; 493 #endif 494 495 static bool sr_needed(void *opaque) 496 { 497 #ifdef TARGET_PPC64 498 PowerPCCPU *cpu = opaque; 499 500 return !(cpu->env.mmu_model & POWERPC_MMU_64); 501 #else 502 return true; 503 #endif 504 } 505 506 static const VMStateDescription vmstate_sr = { 507 .name = "cpu/sr", 508 .version_id = 1, 509 .minimum_version_id = 1, 510 .needed = sr_needed, 511 .fields = (VMStateField[]) { 512 VMSTATE_UINTTL_ARRAY(env.sr, PowerPCCPU, 32), 513 VMSTATE_END_OF_LIST() 514 }, 515 }; 516 517 #ifdef TARGET_PPC64 518 static int get_slbe(QEMUFile *f, void *pv, size_t size, 519 const VMStateField *field) 520 { 521 ppc_slb_t *v = pv; 522 523 v->esid = qemu_get_be64(f); 524 v->vsid = qemu_get_be64(f); 525 526 return 0; 527 } 528 529 static int put_slbe(QEMUFile *f, void *pv, size_t size, 530 const VMStateField *field, QJSON *vmdesc) 531 { 532 ppc_slb_t *v = pv; 533 534 qemu_put_be64(f, v->esid); 535 qemu_put_be64(f, v->vsid); 536 return 0; 537 } 538 539 static const VMStateInfo vmstate_info_slbe = { 540 .name = "slbe", 541 .get = get_slbe, 542 .put = put_slbe, 543 }; 544 545 #define VMSTATE_SLB_ARRAY_V(_f, _s, _n, _v) \ 546 VMSTATE_ARRAY(_f, _s, _n, _v, vmstate_info_slbe, ppc_slb_t) 547 548 #define VMSTATE_SLB_ARRAY(_f, _s, _n) \ 549 VMSTATE_SLB_ARRAY_V(_f, _s, _n, 0) 550 551 static bool slb_needed(void *opaque) 552 { 553 PowerPCCPU *cpu = opaque; 554 555 /* We don't support any of the old segment table based 64-bit CPUs */ 556 return (cpu->env.mmu_model & POWERPC_MMU_64); 557 } 558 559 static int slb_post_load(void *opaque, int version_id) 560 { 561 PowerPCCPU *cpu = opaque; 562 CPUPPCState *env = &cpu->env; 563 int i; 564 565 /* We've pulled in the raw esid and vsid values from the migration 566 * stream, but we need to recompute the page size pointers */ 567 for (i = 0; i < cpu->hash64_opts->slb_size; i++) { 568 if (ppc_store_slb(cpu, i, env->slb[i].esid, env->slb[i].vsid) < 0) { 569 /* Migration source had bad values in its SLB */ 570 return -1; 571 } 572 } 573 574 return 0; 575 } 576 577 static const VMStateDescription vmstate_slb = { 578 .name = "cpu/slb", 579 .version_id = 1, 580 .minimum_version_id = 1, 581 .needed = slb_needed, 582 .post_load = slb_post_load, 583 .fields = (VMStateField[]) { 584 VMSTATE_INT32_TEST(mig_slb_nr, PowerPCCPU, cpu_pre_3_0_migration), 585 VMSTATE_SLB_ARRAY(env.slb, PowerPCCPU, MAX_SLB_ENTRIES), 586 VMSTATE_END_OF_LIST() 587 } 588 }; 589 #endif /* TARGET_PPC64 */ 590 591 static const VMStateDescription vmstate_tlb6xx_entry = { 592 .name = "cpu/tlb6xx_entry", 593 .version_id = 1, 594 .minimum_version_id = 1, 595 .fields = (VMStateField[]) { 596 VMSTATE_UINTTL(pte0, ppc6xx_tlb_t), 597 VMSTATE_UINTTL(pte1, ppc6xx_tlb_t), 598 VMSTATE_UINTTL(EPN, ppc6xx_tlb_t), 599 VMSTATE_END_OF_LIST() 600 }, 601 }; 602 603 static bool tlb6xx_needed(void *opaque) 604 { 605 PowerPCCPU *cpu = opaque; 606 CPUPPCState *env = &cpu->env; 607 608 return env->nb_tlb && (env->tlb_type == TLB_6XX); 609 } 610 611 static const VMStateDescription vmstate_tlb6xx = { 612 .name = "cpu/tlb6xx", 613 .version_id = 1, 614 .minimum_version_id = 1, 615 .needed = tlb6xx_needed, 616 .fields = (VMStateField[]) { 617 VMSTATE_INT32_EQUAL(env.nb_tlb, PowerPCCPU, NULL), 618 VMSTATE_STRUCT_VARRAY_POINTER_INT32(env.tlb.tlb6, PowerPCCPU, 619 env.nb_tlb, 620 vmstate_tlb6xx_entry, 621 ppc6xx_tlb_t), 622 VMSTATE_UINTTL_ARRAY(env.tgpr, PowerPCCPU, 4), 623 VMSTATE_END_OF_LIST() 624 } 625 }; 626 627 static const VMStateDescription vmstate_tlbemb_entry = { 628 .name = "cpu/tlbemb_entry", 629 .version_id = 1, 630 .minimum_version_id = 1, 631 .fields = (VMStateField[]) { 632 VMSTATE_UINT64(RPN, ppcemb_tlb_t), 633 VMSTATE_UINTTL(EPN, ppcemb_tlb_t), 634 VMSTATE_UINTTL(PID, ppcemb_tlb_t), 635 VMSTATE_UINTTL(size, ppcemb_tlb_t), 636 VMSTATE_UINT32(prot, ppcemb_tlb_t), 637 VMSTATE_UINT32(attr, ppcemb_tlb_t), 638 VMSTATE_END_OF_LIST() 639 }, 640 }; 641 642 static bool tlbemb_needed(void *opaque) 643 { 644 PowerPCCPU *cpu = opaque; 645 CPUPPCState *env = &cpu->env; 646 647 return env->nb_tlb && (env->tlb_type == TLB_EMB); 648 } 649 650 static bool pbr403_needed(void *opaque) 651 { 652 PowerPCCPU *cpu = opaque; 653 uint32_t pvr = cpu->env.spr[SPR_PVR]; 654 655 return (pvr & 0xffff0000) == 0x00200000; 656 } 657 658 static const VMStateDescription vmstate_pbr403 = { 659 .name = "cpu/pbr403", 660 .version_id = 1, 661 .minimum_version_id = 1, 662 .needed = pbr403_needed, 663 .fields = (VMStateField[]) { 664 VMSTATE_UINTTL_ARRAY(env.pb, PowerPCCPU, 4), 665 VMSTATE_END_OF_LIST() 666 }, 667 }; 668 669 static const VMStateDescription vmstate_tlbemb = { 670 .name = "cpu/tlb6xx", 671 .version_id = 1, 672 .minimum_version_id = 1, 673 .needed = tlbemb_needed, 674 .fields = (VMStateField[]) { 675 VMSTATE_INT32_EQUAL(env.nb_tlb, PowerPCCPU, NULL), 676 VMSTATE_STRUCT_VARRAY_POINTER_INT32(env.tlb.tlbe, PowerPCCPU, 677 env.nb_tlb, 678 vmstate_tlbemb_entry, 679 ppcemb_tlb_t), 680 /* 403 protection registers */ 681 VMSTATE_END_OF_LIST() 682 }, 683 .subsections = (const VMStateDescription*[]) { 684 &vmstate_pbr403, 685 NULL 686 } 687 }; 688 689 static const VMStateDescription vmstate_tlbmas_entry = { 690 .name = "cpu/tlbmas_entry", 691 .version_id = 1, 692 .minimum_version_id = 1, 693 .fields = (VMStateField[]) { 694 VMSTATE_UINT32(mas8, ppcmas_tlb_t), 695 VMSTATE_UINT32(mas1, ppcmas_tlb_t), 696 VMSTATE_UINT64(mas2, ppcmas_tlb_t), 697 VMSTATE_UINT64(mas7_3, ppcmas_tlb_t), 698 VMSTATE_END_OF_LIST() 699 }, 700 }; 701 702 static bool tlbmas_needed(void *opaque) 703 { 704 PowerPCCPU *cpu = opaque; 705 CPUPPCState *env = &cpu->env; 706 707 return env->nb_tlb && (env->tlb_type == TLB_MAS); 708 } 709 710 static const VMStateDescription vmstate_tlbmas = { 711 .name = "cpu/tlbmas", 712 .version_id = 1, 713 .minimum_version_id = 1, 714 .needed = tlbmas_needed, 715 .fields = (VMStateField[]) { 716 VMSTATE_INT32_EQUAL(env.nb_tlb, PowerPCCPU, NULL), 717 VMSTATE_STRUCT_VARRAY_POINTER_INT32(env.tlb.tlbm, PowerPCCPU, 718 env.nb_tlb, 719 vmstate_tlbmas_entry, 720 ppcmas_tlb_t), 721 VMSTATE_END_OF_LIST() 722 } 723 }; 724 725 static bool compat_needed(void *opaque) 726 { 727 PowerPCCPU *cpu = opaque; 728 729 assert(!(cpu->compat_pvr && !cpu->vhyp)); 730 return !cpu->pre_2_10_migration && cpu->compat_pvr != 0; 731 } 732 733 static const VMStateDescription vmstate_compat = { 734 .name = "cpu/compat", 735 .version_id = 1, 736 .minimum_version_id = 1, 737 .needed = compat_needed, 738 .fields = (VMStateField[]) { 739 VMSTATE_UINT32(compat_pvr, PowerPCCPU), 740 VMSTATE_END_OF_LIST() 741 } 742 }; 743 744 const VMStateDescription vmstate_ppc_cpu = { 745 .name = "cpu", 746 .version_id = 5, 747 .minimum_version_id = 5, 748 .minimum_version_id_old = 4, 749 .load_state_old = cpu_load_old, 750 .pre_save = cpu_pre_save, 751 .post_load = cpu_post_load, 752 .fields = (VMStateField[]) { 753 VMSTATE_UNUSED(sizeof(target_ulong)), /* was _EQUAL(env.spr[SPR_PVR]) */ 754 755 /* User mode architected state */ 756 VMSTATE_UINTTL_ARRAY(env.gpr, PowerPCCPU, 32), 757 #if !defined(TARGET_PPC64) 758 VMSTATE_UINTTL_ARRAY(env.gprh, PowerPCCPU, 32), 759 #endif 760 VMSTATE_UINT32_ARRAY(env.crf, PowerPCCPU, 8), 761 VMSTATE_UINTTL(env.nip, PowerPCCPU), 762 763 /* SPRs */ 764 VMSTATE_UINTTL_ARRAY(env.spr, PowerPCCPU, 1024), 765 VMSTATE_UINT64(env.spe_acc, PowerPCCPU), 766 767 /* Reservation */ 768 VMSTATE_UINTTL(env.reserve_addr, PowerPCCPU), 769 770 /* Supervisor mode architected state */ 771 VMSTATE_UINTTL(env.msr, PowerPCCPU), 772 773 /* Internal state */ 774 VMSTATE_UINTTL(env.hflags_nmsr, PowerPCCPU), 775 /* FIXME: access_type? */ 776 777 /* Sanity checking */ 778 VMSTATE_UINTTL_TEST(mig_msr_mask, PowerPCCPU, cpu_pre_2_8_migration), 779 VMSTATE_UINT64_TEST(mig_insns_flags, PowerPCCPU, cpu_pre_2_8_migration), 780 VMSTATE_UINT64_TEST(mig_insns_flags2, PowerPCCPU, 781 cpu_pre_2_8_migration), 782 VMSTATE_UINT32_TEST(mig_nb_BATs, PowerPCCPU, cpu_pre_2_8_migration), 783 VMSTATE_END_OF_LIST() 784 }, 785 .subsections = (const VMStateDescription*[]) { 786 &vmstate_fpu, 787 &vmstate_altivec, 788 &vmstate_vsx, 789 &vmstate_sr, 790 #ifdef TARGET_PPC64 791 &vmstate_tm, 792 &vmstate_slb, 793 #endif /* TARGET_PPC64 */ 794 &vmstate_tlb6xx, 795 &vmstate_tlbemb, 796 &vmstate_tlbmas, 797 &vmstate_compat, 798 NULL 799 } 800 }; 801