1 #include "qemu/osdep.h" 2 #include "cpu.h" 3 #include "qemu/error-report.h" 4 #include "sysemu/kvm.h" 5 #include "sysemu/tcg.h" 6 #include "kvm_arm.h" 7 #include "internals.h" 8 #include "cpu-features.h" 9 #include "migration/cpu.h" 10 #include "target/arm/gtimer.h" 11 12 static bool vfp_needed(void *opaque) 13 { 14 ARMCPU *cpu = opaque; 15 16 return (arm_feature(&cpu->env, ARM_FEATURE_AARCH64) 17 ? cpu_isar_feature(aa64_fp_simd, cpu) 18 : cpu_isar_feature(aa32_vfp_simd, cpu)); 19 } 20 21 static bool vfp_fpcr_fpsr_needed(void *opaque) 22 { 23 /* 24 * If either the FPCR or the FPSR include set bits that are not 25 * visible in the AArch32 FPSCR view of floating point control/status 26 * then we must send the FPCR and FPSR as two separate fields in the 27 * cpu/vfp/fpcr_fpsr subsection, and we will send a 0 for the old 28 * FPSCR field in cpu/vfp. 29 * 30 * If all the set bits are representable in an AArch32 FPSCR then we 31 * send that value as the cpu/vfp FPSCR field, and don't send the 32 * cpu/vfp/fpcr_fpsr subsection. 33 * 34 * On incoming migration, if the cpu/vfp FPSCR field is non-zero we 35 * use it, and if the fpcr_fpsr subsection is present we use that. 36 * (The subsection will never be present with a non-zero FPSCR field, 37 * and if FPSCR is zero and the subsection is not present that means 38 * that FPSCR/FPSR/FPCR are zero.) 39 * 40 * This preserves migration compatibility with older QEMU versions, 41 * in both directions. 42 */ 43 ARMCPU *cpu = opaque; 44 CPUARMState *env = &cpu->env; 45 46 return (vfp_get_fpcr(env) & ~FPCR_MASK) || (vfp_get_fpsr(env) & ~FPSR_MASK); 47 } 48 49 static int get_fpscr(QEMUFile *f, void *opaque, size_t size, 50 const VMStateField *field) 51 { 52 ARMCPU *cpu = opaque; 53 CPUARMState *env = &cpu->env; 54 uint32_t val = qemu_get_be32(f); 55 56 if (val) { 57 /* 0 means we might have the data in the fpcr_fpsr subsection */ 58 vfp_set_fpscr(env, val); 59 } 60 return 0; 61 } 62 63 static int put_fpscr(QEMUFile *f, void *opaque, size_t size, 64 const VMStateField *field, JSONWriter *vmdesc) 65 { 66 ARMCPU *cpu = opaque; 67 CPUARMState *env = &cpu->env; 68 uint32_t fpscr = vfp_fpcr_fpsr_needed(opaque) ? 0 : vfp_get_fpscr(env); 69 70 qemu_put_be32(f, fpscr); 71 return 0; 72 } 73 74 static const VMStateInfo vmstate_fpscr = { 75 .name = "fpscr", 76 .get = get_fpscr, 77 .put = put_fpscr, 78 }; 79 80 static int get_fpcr(QEMUFile *f, void *opaque, size_t size, 81 const VMStateField *field) 82 { 83 ARMCPU *cpu = opaque; 84 CPUARMState *env = &cpu->env; 85 uint64_t val = qemu_get_be64(f); 86 87 vfp_set_fpcr(env, val); 88 return 0; 89 } 90 91 static int put_fpcr(QEMUFile *f, void *opaque, size_t size, 92 const VMStateField *field, JSONWriter *vmdesc) 93 { 94 ARMCPU *cpu = opaque; 95 CPUARMState *env = &cpu->env; 96 97 qemu_put_be64(f, vfp_get_fpcr(env)); 98 return 0; 99 } 100 101 static const VMStateInfo vmstate_fpcr = { 102 .name = "fpcr", 103 .get = get_fpcr, 104 .put = put_fpcr, 105 }; 106 107 static int get_fpsr(QEMUFile *f, void *opaque, size_t size, 108 const VMStateField *field) 109 { 110 ARMCPU *cpu = opaque; 111 CPUARMState *env = &cpu->env; 112 uint64_t val = qemu_get_be64(f); 113 114 vfp_set_fpsr(env, val); 115 return 0; 116 } 117 118 static int put_fpsr(QEMUFile *f, void *opaque, size_t size, 119 const VMStateField *field, JSONWriter *vmdesc) 120 { 121 ARMCPU *cpu = opaque; 122 CPUARMState *env = &cpu->env; 123 124 qemu_put_be64(f, vfp_get_fpsr(env)); 125 return 0; 126 } 127 128 static const VMStateInfo vmstate_fpsr = { 129 .name = "fpsr", 130 .get = get_fpsr, 131 .put = put_fpsr, 132 }; 133 134 static const VMStateDescription vmstate_vfp_fpcr_fpsr = { 135 .name = "cpu/vfp/fpcr_fpsr", 136 .version_id = 1, 137 .minimum_version_id = 1, 138 .needed = vfp_fpcr_fpsr_needed, 139 .fields = (const VMStateField[]) { 140 { 141 .name = "fpcr", 142 .version_id = 0, 143 .size = sizeof(uint64_t), 144 .info = &vmstate_fpcr, 145 .flags = VMS_SINGLE, 146 .offset = 0, 147 }, 148 { 149 .name = "fpsr", 150 .version_id = 0, 151 .size = sizeof(uint64_t), 152 .info = &vmstate_fpsr, 153 .flags = VMS_SINGLE, 154 .offset = 0, 155 }, 156 VMSTATE_END_OF_LIST() 157 }, 158 }; 159 160 static const VMStateDescription vmstate_vfp = { 161 .name = "cpu/vfp", 162 .version_id = 3, 163 .minimum_version_id = 3, 164 .needed = vfp_needed, 165 .fields = (const VMStateField[]) { 166 /* For compatibility, store Qn out of Zn here. */ 167 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[0].d, ARMCPU, 0, 2), 168 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[1].d, ARMCPU, 0, 2), 169 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[2].d, ARMCPU, 0, 2), 170 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[3].d, ARMCPU, 0, 2), 171 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[4].d, ARMCPU, 0, 2), 172 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[5].d, ARMCPU, 0, 2), 173 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[6].d, ARMCPU, 0, 2), 174 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[7].d, ARMCPU, 0, 2), 175 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[8].d, ARMCPU, 0, 2), 176 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[9].d, ARMCPU, 0, 2), 177 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[10].d, ARMCPU, 0, 2), 178 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[11].d, ARMCPU, 0, 2), 179 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[12].d, ARMCPU, 0, 2), 180 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[13].d, ARMCPU, 0, 2), 181 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[14].d, ARMCPU, 0, 2), 182 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[15].d, ARMCPU, 0, 2), 183 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[16].d, ARMCPU, 0, 2), 184 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[17].d, ARMCPU, 0, 2), 185 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[18].d, ARMCPU, 0, 2), 186 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[19].d, ARMCPU, 0, 2), 187 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[20].d, ARMCPU, 0, 2), 188 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[21].d, ARMCPU, 0, 2), 189 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[22].d, ARMCPU, 0, 2), 190 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[23].d, ARMCPU, 0, 2), 191 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[24].d, ARMCPU, 0, 2), 192 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[25].d, ARMCPU, 0, 2), 193 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[26].d, ARMCPU, 0, 2), 194 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[27].d, ARMCPU, 0, 2), 195 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[28].d, ARMCPU, 0, 2), 196 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[29].d, ARMCPU, 0, 2), 197 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[30].d, ARMCPU, 0, 2), 198 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[31].d, ARMCPU, 0, 2), 199 200 /* The xregs array is a little awkward because element 1 (FPSCR) 201 * requires a specific accessor, so we have to split it up in 202 * the vmstate: 203 */ 204 VMSTATE_UINT32(env.vfp.xregs[0], ARMCPU), 205 VMSTATE_UINT32_SUB_ARRAY(env.vfp.xregs, ARMCPU, 2, 14), 206 { 207 .name = "fpscr", 208 .version_id = 0, 209 .size = sizeof(uint32_t), 210 .info = &vmstate_fpscr, 211 .flags = VMS_SINGLE, 212 .offset = 0, 213 }, 214 VMSTATE_END_OF_LIST() 215 }, 216 .subsections = (const VMStateDescription * const []) { 217 &vmstate_vfp_fpcr_fpsr, 218 NULL 219 } 220 }; 221 222 static bool iwmmxt_needed(void *opaque) 223 { 224 ARMCPU *cpu = opaque; 225 CPUARMState *env = &cpu->env; 226 227 return arm_feature(env, ARM_FEATURE_IWMMXT); 228 } 229 230 static const VMStateDescription vmstate_iwmmxt = { 231 .name = "cpu/iwmmxt", 232 .version_id = 1, 233 .minimum_version_id = 1, 234 .needed = iwmmxt_needed, 235 .fields = (const VMStateField[]) { 236 VMSTATE_UINT64_ARRAY(env.iwmmxt.regs, ARMCPU, 16), 237 VMSTATE_UINT32_ARRAY(env.iwmmxt.cregs, ARMCPU, 16), 238 VMSTATE_END_OF_LIST() 239 } 240 }; 241 242 #ifdef TARGET_AARCH64 243 /* The expression ARM_MAX_VQ - 2 is 0 for pure AArch32 build, 244 * and ARMPredicateReg is actively empty. This triggers errors 245 * in the expansion of the VMSTATE macros. 246 */ 247 248 static bool sve_needed(void *opaque) 249 { 250 ARMCPU *cpu = opaque; 251 252 return cpu_isar_feature(aa64_sve, cpu); 253 } 254 255 /* The first two words of each Zreg is stored in VFP state. */ 256 static const VMStateDescription vmstate_zreg_hi_reg = { 257 .name = "cpu/sve/zreg_hi", 258 .version_id = 1, 259 .minimum_version_id = 1, 260 .fields = (const VMStateField[]) { 261 VMSTATE_UINT64_SUB_ARRAY(d, ARMVectorReg, 2, ARM_MAX_VQ - 2), 262 VMSTATE_END_OF_LIST() 263 } 264 }; 265 266 static const VMStateDescription vmstate_preg_reg = { 267 .name = "cpu/sve/preg", 268 .version_id = 1, 269 .minimum_version_id = 1, 270 .fields = (const VMStateField[]) { 271 VMSTATE_UINT64_ARRAY(p, ARMPredicateReg, 2 * ARM_MAX_VQ / 8), 272 VMSTATE_END_OF_LIST() 273 } 274 }; 275 276 static const VMStateDescription vmstate_sve = { 277 .name = "cpu/sve", 278 .version_id = 1, 279 .minimum_version_id = 1, 280 .needed = sve_needed, 281 .fields = (const VMStateField[]) { 282 VMSTATE_STRUCT_ARRAY(env.vfp.zregs, ARMCPU, 32, 0, 283 vmstate_zreg_hi_reg, ARMVectorReg), 284 VMSTATE_STRUCT_ARRAY(env.vfp.pregs, ARMCPU, 17, 0, 285 vmstate_preg_reg, ARMPredicateReg), 286 VMSTATE_END_OF_LIST() 287 } 288 }; 289 290 static const VMStateDescription vmstate_vreg = { 291 .name = "vreg", 292 .version_id = 1, 293 .minimum_version_id = 1, 294 .fields = (const VMStateField[]) { 295 VMSTATE_UINT64_ARRAY(d, ARMVectorReg, ARM_MAX_VQ * 2), 296 VMSTATE_END_OF_LIST() 297 } 298 }; 299 300 static bool za_needed(void *opaque) 301 { 302 ARMCPU *cpu = opaque; 303 304 /* 305 * When ZA storage is disabled, its contents are discarded. 306 * It will be zeroed when ZA storage is re-enabled. 307 */ 308 return FIELD_EX64(cpu->env.svcr, SVCR, ZA); 309 } 310 311 static const VMStateDescription vmstate_za = { 312 .name = "cpu/sme", 313 .version_id = 1, 314 .minimum_version_id = 1, 315 .needed = za_needed, 316 .fields = (const VMStateField[]) { 317 VMSTATE_STRUCT_ARRAY(env.zarray, ARMCPU, ARM_MAX_VQ * 16, 0, 318 vmstate_vreg, ARMVectorReg), 319 VMSTATE_END_OF_LIST() 320 } 321 }; 322 #endif /* AARCH64 */ 323 324 static bool serror_needed(void *opaque) 325 { 326 ARMCPU *cpu = opaque; 327 CPUARMState *env = &cpu->env; 328 329 return env->serror.pending != 0; 330 } 331 332 static const VMStateDescription vmstate_serror = { 333 .name = "cpu/serror", 334 .version_id = 1, 335 .minimum_version_id = 1, 336 .needed = serror_needed, 337 .fields = (const VMStateField[]) { 338 VMSTATE_UINT8(env.serror.pending, ARMCPU), 339 VMSTATE_UINT8(env.serror.has_esr, ARMCPU), 340 VMSTATE_UINT64(env.serror.esr, ARMCPU), 341 VMSTATE_END_OF_LIST() 342 } 343 }; 344 345 static bool irq_line_state_needed(void *opaque) 346 { 347 return true; 348 } 349 350 static const VMStateDescription vmstate_irq_line_state = { 351 .name = "cpu/irq-line-state", 352 .version_id = 1, 353 .minimum_version_id = 1, 354 .needed = irq_line_state_needed, 355 .fields = (const VMStateField[]) { 356 VMSTATE_UINT32(env.irq_line_state, ARMCPU), 357 VMSTATE_END_OF_LIST() 358 } 359 }; 360 361 static bool wfxt_timer_needed(void *opaque) 362 { 363 ARMCPU *cpu = opaque; 364 365 /* We'll only have the timer object if FEAT_WFxT is implemented */ 366 return cpu->wfxt_timer; 367 } 368 369 static const VMStateDescription vmstate_wfxt_timer = { 370 .name = "cpu/wfxt-timer", 371 .version_id = 1, 372 .minimum_version_id = 1, 373 .needed = wfxt_timer_needed, 374 .fields = (const VMStateField[]) { 375 VMSTATE_TIMER_PTR(wfxt_timer, ARMCPU), 376 VMSTATE_END_OF_LIST() 377 } 378 }; 379 380 static bool m_needed(void *opaque) 381 { 382 ARMCPU *cpu = opaque; 383 CPUARMState *env = &cpu->env; 384 385 return arm_feature(env, ARM_FEATURE_M); 386 } 387 388 static const VMStateDescription vmstate_m_faultmask_primask = { 389 .name = "cpu/m/faultmask-primask", 390 .version_id = 1, 391 .minimum_version_id = 1, 392 .needed = m_needed, 393 .fields = (const VMStateField[]) { 394 VMSTATE_UINT32(env.v7m.faultmask[M_REG_NS], ARMCPU), 395 VMSTATE_UINT32(env.v7m.primask[M_REG_NS], ARMCPU), 396 VMSTATE_END_OF_LIST() 397 } 398 }; 399 400 /* CSSELR is in a subsection because we didn't implement it previously. 401 * Migration from an old implementation will leave it at zero, which 402 * is OK since the only CPUs in the old implementation make the 403 * register RAZ/WI. 404 * Since there was no version of QEMU which implemented the CSSELR for 405 * just non-secure, we transfer both banks here rather than putting 406 * the secure banked version in the m-security subsection. 407 */ 408 static bool csselr_vmstate_validate(void *opaque, int version_id) 409 { 410 ARMCPU *cpu = opaque; 411 412 return cpu->env.v7m.csselr[M_REG_NS] <= R_V7M_CSSELR_INDEX_MASK 413 && cpu->env.v7m.csselr[M_REG_S] <= R_V7M_CSSELR_INDEX_MASK; 414 } 415 416 static bool m_csselr_needed(void *opaque) 417 { 418 ARMCPU *cpu = opaque; 419 420 return !arm_v7m_csselr_razwi(cpu); 421 } 422 423 static const VMStateDescription vmstate_m_csselr = { 424 .name = "cpu/m/csselr", 425 .version_id = 1, 426 .minimum_version_id = 1, 427 .needed = m_csselr_needed, 428 .fields = (const VMStateField[]) { 429 VMSTATE_UINT32_ARRAY(env.v7m.csselr, ARMCPU, M_REG_NUM_BANKS), 430 VMSTATE_VALIDATE("CSSELR is valid", csselr_vmstate_validate), 431 VMSTATE_END_OF_LIST() 432 } 433 }; 434 435 static const VMStateDescription vmstate_m_scr = { 436 .name = "cpu/m/scr", 437 .version_id = 1, 438 .minimum_version_id = 1, 439 .needed = m_needed, 440 .fields = (const VMStateField[]) { 441 VMSTATE_UINT32(env.v7m.scr[M_REG_NS], ARMCPU), 442 VMSTATE_END_OF_LIST() 443 } 444 }; 445 446 static const VMStateDescription vmstate_m_other_sp = { 447 .name = "cpu/m/other-sp", 448 .version_id = 1, 449 .minimum_version_id = 1, 450 .needed = m_needed, 451 .fields = (const VMStateField[]) { 452 VMSTATE_UINT32(env.v7m.other_sp, ARMCPU), 453 VMSTATE_END_OF_LIST() 454 } 455 }; 456 457 static bool m_v8m_needed(void *opaque) 458 { 459 ARMCPU *cpu = opaque; 460 CPUARMState *env = &cpu->env; 461 462 return arm_feature(env, ARM_FEATURE_M) && arm_feature(env, ARM_FEATURE_V8); 463 } 464 465 static const VMStateDescription vmstate_m_v8m = { 466 .name = "cpu/m/v8m", 467 .version_id = 1, 468 .minimum_version_id = 1, 469 .needed = m_v8m_needed, 470 .fields = (const VMStateField[]) { 471 VMSTATE_UINT32_ARRAY(env.v7m.msplim, ARMCPU, M_REG_NUM_BANKS), 472 VMSTATE_UINT32_ARRAY(env.v7m.psplim, ARMCPU, M_REG_NUM_BANKS), 473 VMSTATE_END_OF_LIST() 474 } 475 }; 476 477 static const VMStateDescription vmstate_m_fp = { 478 .name = "cpu/m/fp", 479 .version_id = 1, 480 .minimum_version_id = 1, 481 .needed = vfp_needed, 482 .fields = (const VMStateField[]) { 483 VMSTATE_UINT32_ARRAY(env.v7m.fpcar, ARMCPU, M_REG_NUM_BANKS), 484 VMSTATE_UINT32_ARRAY(env.v7m.fpccr, ARMCPU, M_REG_NUM_BANKS), 485 VMSTATE_UINT32_ARRAY(env.v7m.fpdscr, ARMCPU, M_REG_NUM_BANKS), 486 VMSTATE_UINT32_ARRAY(env.v7m.cpacr, ARMCPU, M_REG_NUM_BANKS), 487 VMSTATE_UINT32(env.v7m.nsacr, ARMCPU), 488 VMSTATE_END_OF_LIST() 489 } 490 }; 491 492 static bool mve_needed(void *opaque) 493 { 494 ARMCPU *cpu = opaque; 495 496 return cpu_isar_feature(aa32_mve, cpu); 497 } 498 499 static const VMStateDescription vmstate_m_mve = { 500 .name = "cpu/m/mve", 501 .version_id = 1, 502 .minimum_version_id = 1, 503 .needed = mve_needed, 504 .fields = (const VMStateField[]) { 505 VMSTATE_UINT32(env.v7m.vpr, ARMCPU), 506 VMSTATE_UINT32(env.v7m.ltpsize, ARMCPU), 507 VMSTATE_END_OF_LIST() 508 }, 509 }; 510 511 static const VMStateDescription vmstate_m = { 512 .name = "cpu/m", 513 .version_id = 4, 514 .minimum_version_id = 4, 515 .needed = m_needed, 516 .fields = (const VMStateField[]) { 517 VMSTATE_UINT32(env.v7m.vecbase[M_REG_NS], ARMCPU), 518 VMSTATE_UINT32(env.v7m.basepri[M_REG_NS], ARMCPU), 519 VMSTATE_UINT32(env.v7m.control[M_REG_NS], ARMCPU), 520 VMSTATE_UINT32(env.v7m.ccr[M_REG_NS], ARMCPU), 521 VMSTATE_UINT32(env.v7m.cfsr[M_REG_NS], ARMCPU), 522 VMSTATE_UINT32(env.v7m.hfsr, ARMCPU), 523 VMSTATE_UINT32(env.v7m.dfsr, ARMCPU), 524 VMSTATE_UINT32(env.v7m.mmfar[M_REG_NS], ARMCPU), 525 VMSTATE_UINT32(env.v7m.bfar, ARMCPU), 526 VMSTATE_UINT32(env.v7m.mpu_ctrl[M_REG_NS], ARMCPU), 527 VMSTATE_INT32(env.v7m.exception, ARMCPU), 528 VMSTATE_END_OF_LIST() 529 }, 530 .subsections = (const VMStateDescription * const []) { 531 &vmstate_m_faultmask_primask, 532 &vmstate_m_csselr, 533 &vmstate_m_scr, 534 &vmstate_m_other_sp, 535 &vmstate_m_v8m, 536 &vmstate_m_fp, 537 &vmstate_m_mve, 538 NULL 539 } 540 }; 541 542 static bool thumb2ee_needed(void *opaque) 543 { 544 ARMCPU *cpu = opaque; 545 CPUARMState *env = &cpu->env; 546 547 return arm_feature(env, ARM_FEATURE_THUMB2EE); 548 } 549 550 static const VMStateDescription vmstate_thumb2ee = { 551 .name = "cpu/thumb2ee", 552 .version_id = 1, 553 .minimum_version_id = 1, 554 .needed = thumb2ee_needed, 555 .fields = (const VMStateField[]) { 556 VMSTATE_UINT32(env.teecr, ARMCPU), 557 VMSTATE_UINT32(env.teehbr, ARMCPU), 558 VMSTATE_END_OF_LIST() 559 } 560 }; 561 562 static bool pmsav7_needed(void *opaque) 563 { 564 ARMCPU *cpu = opaque; 565 CPUARMState *env = &cpu->env; 566 567 return arm_feature(env, ARM_FEATURE_PMSA) && 568 arm_feature(env, ARM_FEATURE_V7) && 569 !arm_feature(env, ARM_FEATURE_V8); 570 } 571 572 static bool pmsav7_rgnr_vmstate_validate(void *opaque, int version_id) 573 { 574 ARMCPU *cpu = opaque; 575 576 return cpu->env.pmsav7.rnr[M_REG_NS] < cpu->pmsav7_dregion; 577 } 578 579 static const VMStateDescription vmstate_pmsav7 = { 580 .name = "cpu/pmsav7", 581 .version_id = 1, 582 .minimum_version_id = 1, 583 .needed = pmsav7_needed, 584 .fields = (const VMStateField[]) { 585 VMSTATE_VARRAY_UINT32(env.pmsav7.drbar, ARMCPU, pmsav7_dregion, 0, 586 vmstate_info_uint32, uint32_t), 587 VMSTATE_VARRAY_UINT32(env.pmsav7.drsr, ARMCPU, pmsav7_dregion, 0, 588 vmstate_info_uint32, uint32_t), 589 VMSTATE_VARRAY_UINT32(env.pmsav7.dracr, ARMCPU, pmsav7_dregion, 0, 590 vmstate_info_uint32, uint32_t), 591 VMSTATE_VALIDATE("rgnr is valid", pmsav7_rgnr_vmstate_validate), 592 VMSTATE_END_OF_LIST() 593 } 594 }; 595 596 static bool pmsav7_rnr_needed(void *opaque) 597 { 598 ARMCPU *cpu = opaque; 599 CPUARMState *env = &cpu->env; 600 601 /* For R profile cores pmsav7.rnr is migrated via the cpreg 602 * "RGNR" definition in helper.h. For M profile we have to 603 * migrate it separately. 604 */ 605 return arm_feature(env, ARM_FEATURE_M); 606 } 607 608 static const VMStateDescription vmstate_pmsav7_rnr = { 609 .name = "cpu/pmsav7-rnr", 610 .version_id = 1, 611 .minimum_version_id = 1, 612 .needed = pmsav7_rnr_needed, 613 .fields = (const VMStateField[]) { 614 VMSTATE_UINT32(env.pmsav7.rnr[M_REG_NS], ARMCPU), 615 VMSTATE_END_OF_LIST() 616 } 617 }; 618 619 static bool pmsav8_needed(void *opaque) 620 { 621 ARMCPU *cpu = opaque; 622 CPUARMState *env = &cpu->env; 623 624 return arm_feature(env, ARM_FEATURE_PMSA) && 625 arm_feature(env, ARM_FEATURE_V8); 626 } 627 628 static bool pmsav8r_needed(void *opaque) 629 { 630 ARMCPU *cpu = opaque; 631 CPUARMState *env = &cpu->env; 632 633 return arm_feature(env, ARM_FEATURE_PMSA) && 634 arm_feature(env, ARM_FEATURE_V8) && 635 !arm_feature(env, ARM_FEATURE_M); 636 } 637 638 static const VMStateDescription vmstate_pmsav8r = { 639 .name = "cpu/pmsav8/pmsav8r", 640 .version_id = 1, 641 .minimum_version_id = 1, 642 .needed = pmsav8r_needed, 643 .fields = (const VMStateField[]) { 644 VMSTATE_VARRAY_UINT32(env.pmsav8.hprbar, ARMCPU, 645 pmsav8r_hdregion, 0, vmstate_info_uint32, uint32_t), 646 VMSTATE_VARRAY_UINT32(env.pmsav8.hprlar, ARMCPU, 647 pmsav8r_hdregion, 0, vmstate_info_uint32, uint32_t), 648 VMSTATE_END_OF_LIST() 649 }, 650 }; 651 652 static const VMStateDescription vmstate_pmsav8 = { 653 .name = "cpu/pmsav8", 654 .version_id = 1, 655 .minimum_version_id = 1, 656 .needed = pmsav8_needed, 657 .fields = (const VMStateField[]) { 658 VMSTATE_VARRAY_UINT32(env.pmsav8.rbar[M_REG_NS], ARMCPU, pmsav7_dregion, 659 0, vmstate_info_uint32, uint32_t), 660 VMSTATE_VARRAY_UINT32(env.pmsav8.rlar[M_REG_NS], ARMCPU, pmsav7_dregion, 661 0, vmstate_info_uint32, uint32_t), 662 VMSTATE_UINT32(env.pmsav8.mair0[M_REG_NS], ARMCPU), 663 VMSTATE_UINT32(env.pmsav8.mair1[M_REG_NS], ARMCPU), 664 VMSTATE_END_OF_LIST() 665 }, 666 .subsections = (const VMStateDescription * const []) { 667 &vmstate_pmsav8r, 668 NULL 669 } 670 }; 671 672 static bool s_rnr_vmstate_validate(void *opaque, int version_id) 673 { 674 ARMCPU *cpu = opaque; 675 676 return cpu->env.pmsav7.rnr[M_REG_S] < cpu->pmsav7_dregion; 677 } 678 679 static bool sau_rnr_vmstate_validate(void *opaque, int version_id) 680 { 681 ARMCPU *cpu = opaque; 682 683 return cpu->env.sau.rnr < cpu->sau_sregion; 684 } 685 686 static bool m_security_needed(void *opaque) 687 { 688 ARMCPU *cpu = opaque; 689 CPUARMState *env = &cpu->env; 690 691 return arm_feature(env, ARM_FEATURE_M_SECURITY); 692 } 693 694 static const VMStateDescription vmstate_m_security = { 695 .name = "cpu/m-security", 696 .version_id = 1, 697 .minimum_version_id = 1, 698 .needed = m_security_needed, 699 .fields = (const VMStateField[]) { 700 VMSTATE_UINT32(env.v7m.secure, ARMCPU), 701 VMSTATE_UINT32(env.v7m.other_ss_msp, ARMCPU), 702 VMSTATE_UINT32(env.v7m.other_ss_psp, ARMCPU), 703 VMSTATE_UINT32(env.v7m.basepri[M_REG_S], ARMCPU), 704 VMSTATE_UINT32(env.v7m.primask[M_REG_S], ARMCPU), 705 VMSTATE_UINT32(env.v7m.faultmask[M_REG_S], ARMCPU), 706 VMSTATE_UINT32(env.v7m.control[M_REG_S], ARMCPU), 707 VMSTATE_UINT32(env.v7m.vecbase[M_REG_S], ARMCPU), 708 VMSTATE_UINT32(env.pmsav8.mair0[M_REG_S], ARMCPU), 709 VMSTATE_UINT32(env.pmsav8.mair1[M_REG_S], ARMCPU), 710 VMSTATE_VARRAY_UINT32(env.pmsav8.rbar[M_REG_S], ARMCPU, pmsav7_dregion, 711 0, vmstate_info_uint32, uint32_t), 712 VMSTATE_VARRAY_UINT32(env.pmsav8.rlar[M_REG_S], ARMCPU, pmsav7_dregion, 713 0, vmstate_info_uint32, uint32_t), 714 VMSTATE_UINT32(env.pmsav7.rnr[M_REG_S], ARMCPU), 715 VMSTATE_VALIDATE("secure MPU_RNR is valid", s_rnr_vmstate_validate), 716 VMSTATE_UINT32(env.v7m.mpu_ctrl[M_REG_S], ARMCPU), 717 VMSTATE_UINT32(env.v7m.ccr[M_REG_S], ARMCPU), 718 VMSTATE_UINT32(env.v7m.mmfar[M_REG_S], ARMCPU), 719 VMSTATE_UINT32(env.v7m.cfsr[M_REG_S], ARMCPU), 720 VMSTATE_UINT32(env.v7m.sfsr, ARMCPU), 721 VMSTATE_UINT32(env.v7m.sfar, ARMCPU), 722 VMSTATE_VARRAY_UINT32(env.sau.rbar, ARMCPU, sau_sregion, 0, 723 vmstate_info_uint32, uint32_t), 724 VMSTATE_VARRAY_UINT32(env.sau.rlar, ARMCPU, sau_sregion, 0, 725 vmstate_info_uint32, uint32_t), 726 VMSTATE_UINT32(env.sau.rnr, ARMCPU), 727 VMSTATE_VALIDATE("SAU_RNR is valid", sau_rnr_vmstate_validate), 728 VMSTATE_UINT32(env.sau.ctrl, ARMCPU), 729 VMSTATE_UINT32(env.v7m.scr[M_REG_S], ARMCPU), 730 /* AIRCR is not secure-only, but our implementation is R/O if the 731 * security extension is unimplemented, so we migrate it here. 732 */ 733 VMSTATE_UINT32(env.v7m.aircr, ARMCPU), 734 VMSTATE_END_OF_LIST() 735 } 736 }; 737 738 static int get_cpsr(QEMUFile *f, void *opaque, size_t size, 739 const VMStateField *field) 740 { 741 ARMCPU *cpu = opaque; 742 CPUARMState *env = &cpu->env; 743 uint32_t val = qemu_get_be32(f); 744 745 if (arm_feature(env, ARM_FEATURE_M)) { 746 if (val & XPSR_EXCP) { 747 /* This is a CPSR format value from an older QEMU. (We can tell 748 * because values transferred in XPSR format always have zero 749 * for the EXCP field, and CPSR format will always have bit 4 750 * set in CPSR_M.) Rearrange it into XPSR format. The significant 751 * differences are that the T bit is not in the same place, the 752 * primask/faultmask info may be in the CPSR I and F bits, and 753 * we do not want the mode bits. 754 * We know that this cleanup happened before v8M, so there 755 * is no complication with banked primask/faultmask. 756 */ 757 uint32_t newval = val; 758 759 assert(!arm_feature(env, ARM_FEATURE_M_SECURITY)); 760 761 newval &= (CPSR_NZCV | CPSR_Q | CPSR_IT | CPSR_GE); 762 if (val & CPSR_T) { 763 newval |= XPSR_T; 764 } 765 /* If the I or F bits are set then this is a migration from 766 * an old QEMU which still stored the M profile FAULTMASK 767 * and PRIMASK in env->daif. For a new QEMU, the data is 768 * transferred using the vmstate_m_faultmask_primask subsection. 769 */ 770 if (val & CPSR_F) { 771 env->v7m.faultmask[M_REG_NS] = 1; 772 } 773 if (val & CPSR_I) { 774 env->v7m.primask[M_REG_NS] = 1; 775 } 776 val = newval; 777 } 778 /* Ignore the low bits, they are handled by vmstate_m. */ 779 xpsr_write(env, val, ~XPSR_EXCP); 780 return 0; 781 } 782 783 env->aarch64 = ((val & PSTATE_nRW) == 0); 784 785 if (is_a64(env)) { 786 pstate_write(env, val); 787 return 0; 788 } 789 790 cpsr_write(env, val, 0xffffffff, CPSRWriteRaw); 791 return 0; 792 } 793 794 static int put_cpsr(QEMUFile *f, void *opaque, size_t size, 795 const VMStateField *field, JSONWriter *vmdesc) 796 { 797 ARMCPU *cpu = opaque; 798 CPUARMState *env = &cpu->env; 799 uint32_t val; 800 801 if (arm_feature(env, ARM_FEATURE_M)) { 802 /* The low 9 bits are v7m.exception, which is handled by vmstate_m. */ 803 val = xpsr_read(env) & ~XPSR_EXCP; 804 } else if (is_a64(env)) { 805 val = pstate_read(env); 806 } else { 807 val = cpsr_read(env); 808 } 809 810 qemu_put_be32(f, val); 811 return 0; 812 } 813 814 static const VMStateInfo vmstate_cpsr = { 815 .name = "cpsr", 816 .get = get_cpsr, 817 .put = put_cpsr, 818 }; 819 820 static int get_power(QEMUFile *f, void *opaque, size_t size, 821 const VMStateField *field) 822 { 823 ARMCPU *cpu = opaque; 824 bool powered_off = qemu_get_byte(f); 825 cpu->power_state = powered_off ? PSCI_OFF : PSCI_ON; 826 return 0; 827 } 828 829 static int put_power(QEMUFile *f, void *opaque, size_t size, 830 const VMStateField *field, JSONWriter *vmdesc) 831 { 832 ARMCPU *cpu = opaque; 833 834 /* Migration should never happen while we transition power states */ 835 836 if (cpu->power_state == PSCI_ON || 837 cpu->power_state == PSCI_OFF) { 838 bool powered_off = (cpu->power_state == PSCI_OFF) ? true : false; 839 qemu_put_byte(f, powered_off); 840 return 0; 841 } else { 842 return 1; 843 } 844 } 845 846 static const VMStateInfo vmstate_powered_off = { 847 .name = "powered_off", 848 .get = get_power, 849 .put = put_power, 850 }; 851 852 static int cpu_pre_save(void *opaque) 853 { 854 ARMCPU *cpu = opaque; 855 856 if (!kvm_enabled()) { 857 pmu_op_start(&cpu->env); 858 } 859 860 if (kvm_enabled()) { 861 if (!write_kvmstate_to_list(cpu)) { 862 /* This should never fail */ 863 g_assert_not_reached(); 864 } 865 866 /* 867 * kvm_arm_cpu_pre_save() must be called after 868 * write_kvmstate_to_list() 869 */ 870 kvm_arm_cpu_pre_save(cpu); 871 } else { 872 if (!write_cpustate_to_list(cpu, false)) { 873 /* This should never fail. */ 874 g_assert_not_reached(); 875 } 876 } 877 878 cpu->cpreg_vmstate_array_len = cpu->cpreg_array_len; 879 memcpy(cpu->cpreg_vmstate_indexes, cpu->cpreg_indexes, 880 cpu->cpreg_array_len * sizeof(uint64_t)); 881 memcpy(cpu->cpreg_vmstate_values, cpu->cpreg_values, 882 cpu->cpreg_array_len * sizeof(uint64_t)); 883 884 return 0; 885 } 886 887 static int cpu_post_save(void *opaque) 888 { 889 ARMCPU *cpu = opaque; 890 891 if (!kvm_enabled()) { 892 pmu_op_finish(&cpu->env); 893 } 894 895 return 0; 896 } 897 898 static int cpu_pre_load(void *opaque) 899 { 900 ARMCPU *cpu = opaque; 901 CPUARMState *env = &cpu->env; 902 903 /* 904 * In an inbound migration where on the source FPSCR/FPSR/FPCR are 0, 905 * there will be no fpcr_fpsr subsection so we won't call vfp_set_fpcr() 906 * and vfp_set_fpsr() from get_fpcr() and get_fpsr(); also the get_fpscr() 907 * function will not call vfp_set_fpscr() because it will see a 0 in the 908 * inbound data. Ensure that in this case we have a correctly set up 909 * zero FPSCR/FPCR/FPSR. 910 * 911 * This is not strictly needed because FPSCR is zero out of reset, but 912 * it avoids the possibility of future confusing migration bugs if some 913 * future architecture change makes the reset value non-zero. 914 */ 915 vfp_set_fpscr(env, 0); 916 917 /* 918 * Pre-initialize irq_line_state to a value that's never valid as 919 * real data, so cpu_post_load() can tell whether we've seen the 920 * irq-line-state subsection in the incoming migration state. 921 */ 922 env->irq_line_state = UINT32_MAX; 923 924 if (!kvm_enabled()) { 925 pmu_op_start(env); 926 } 927 928 return 0; 929 } 930 931 static int cpu_post_load(void *opaque, int version_id) 932 { 933 ARMCPU *cpu = opaque; 934 CPUARMState *env = &cpu->env; 935 int i, v; 936 937 /* 938 * Handle migration compatibility from old QEMU which didn't 939 * send the irq-line-state subsection. A QEMU without it did not 940 * implement the HCR_EL2.{VI,VF} bits as generating interrupts, 941 * so for TCG the line state matches the bits set in cs->interrupt_request. 942 * For KVM the line state is not stored in cs->interrupt_request 943 * and so this will leave irq_line_state as 0, but this is OK because 944 * we only need to care about it for TCG. 945 */ 946 if (env->irq_line_state == UINT32_MAX) { 947 CPUState *cs = CPU(cpu); 948 949 env->irq_line_state = cs->interrupt_request & 950 (CPU_INTERRUPT_HARD | CPU_INTERRUPT_FIQ | 951 CPU_INTERRUPT_VIRQ | CPU_INTERRUPT_VFIQ); 952 } 953 954 /* Update the values list from the incoming migration data. 955 * Anything in the incoming data which we don't know about is 956 * a migration failure; anything we know about but the incoming 957 * data doesn't specify retains its current (reset) value. 958 * The indexes list remains untouched -- we only inspect the 959 * incoming migration index list so we can match the values array 960 * entries with the right slots in our own values array. 961 */ 962 963 for (i = 0, v = 0; i < cpu->cpreg_array_len 964 && v < cpu->cpreg_vmstate_array_len; i++) { 965 if (cpu->cpreg_vmstate_indexes[v] > cpu->cpreg_indexes[i]) { 966 /* register in our list but not incoming : skip it */ 967 continue; 968 } 969 if (cpu->cpreg_vmstate_indexes[v] < cpu->cpreg_indexes[i]) { 970 /* register in their list but not ours: fail migration */ 971 return -1; 972 } 973 /* matching register, copy the value over */ 974 cpu->cpreg_values[i] = cpu->cpreg_vmstate_values[v]; 975 v++; 976 } 977 978 if (kvm_enabled()) { 979 if (!write_list_to_kvmstate(cpu, KVM_PUT_FULL_STATE)) { 980 return -1; 981 } 982 /* Note that it's OK for the TCG side not to know about 983 * every register in the list; KVM is authoritative if 984 * we're using it. 985 */ 986 write_list_to_cpustate(cpu); 987 kvm_arm_cpu_post_load(cpu); 988 } else { 989 if (!write_list_to_cpustate(cpu)) { 990 return -1; 991 } 992 } 993 994 /* 995 * Misaligned thumb pc is architecturally impossible. Fail the 996 * incoming migration. For TCG it would trigger the assert in 997 * thumb_tr_translate_insn(). 998 */ 999 if (!is_a64(env) && env->thumb && (env->regs[15] & 1)) { 1000 return -1; 1001 } 1002 1003 if (tcg_enabled()) { 1004 hw_breakpoint_update_all(cpu); 1005 hw_watchpoint_update_all(cpu); 1006 } 1007 1008 /* 1009 * TCG gen_update_fp_context() relies on the invariant that 1010 * FPDSCR.LTPSIZE is constant 4 for M-profile with the LOB extension; 1011 * forbid bogus incoming data with some other value. 1012 */ 1013 if (arm_feature(env, ARM_FEATURE_M) && cpu_isar_feature(aa32_lob, cpu)) { 1014 if (extract32(env->v7m.fpdscr[M_REG_NS], 1015 FPCR_LTPSIZE_SHIFT, FPCR_LTPSIZE_LENGTH) != 4 || 1016 extract32(env->v7m.fpdscr[M_REG_S], 1017 FPCR_LTPSIZE_SHIFT, FPCR_LTPSIZE_LENGTH) != 4) { 1018 return -1; 1019 } 1020 } 1021 1022 if (!kvm_enabled()) { 1023 pmu_op_finish(env); 1024 } 1025 1026 if (tcg_enabled()) { 1027 arm_rebuild_hflags(env); 1028 } 1029 1030 return 0; 1031 } 1032 1033 const VMStateDescription vmstate_arm_cpu = { 1034 .name = "cpu", 1035 .version_id = 22, 1036 .minimum_version_id = 22, 1037 .pre_save = cpu_pre_save, 1038 .post_save = cpu_post_save, 1039 .pre_load = cpu_pre_load, 1040 .post_load = cpu_post_load, 1041 .fields = (const VMStateField[]) { 1042 VMSTATE_UINT32_ARRAY(env.regs, ARMCPU, 16), 1043 VMSTATE_UINT64_ARRAY(env.xregs, ARMCPU, 32), 1044 VMSTATE_UINT64(env.pc, ARMCPU), 1045 { 1046 .name = "cpsr", 1047 .version_id = 0, 1048 .size = sizeof(uint32_t), 1049 .info = &vmstate_cpsr, 1050 .flags = VMS_SINGLE, 1051 .offset = 0, 1052 }, 1053 VMSTATE_UINT32(env.spsr, ARMCPU), 1054 VMSTATE_UINT64_ARRAY(env.banked_spsr, ARMCPU, 8), 1055 VMSTATE_UINT32_ARRAY(env.banked_r13, ARMCPU, 8), 1056 VMSTATE_UINT32_ARRAY(env.banked_r14, ARMCPU, 8), 1057 VMSTATE_UINT32_ARRAY(env.usr_regs, ARMCPU, 5), 1058 VMSTATE_UINT32_ARRAY(env.fiq_regs, ARMCPU, 5), 1059 VMSTATE_UINT64_ARRAY(env.elr_el, ARMCPU, 4), 1060 VMSTATE_UINT64_ARRAY(env.sp_el, ARMCPU, 4), 1061 /* The length-check must come before the arrays to avoid 1062 * incoming data possibly overflowing the array. 1063 */ 1064 VMSTATE_INT32_POSITIVE_LE(cpreg_vmstate_array_len, ARMCPU), 1065 VMSTATE_VARRAY_INT32(cpreg_vmstate_indexes, ARMCPU, 1066 cpreg_vmstate_array_len, 1067 0, vmstate_info_uint64, uint64_t), 1068 VMSTATE_VARRAY_INT32(cpreg_vmstate_values, ARMCPU, 1069 cpreg_vmstate_array_len, 1070 0, vmstate_info_uint64, uint64_t), 1071 VMSTATE_UINT64(env.exclusive_addr, ARMCPU), 1072 VMSTATE_UINT64(env.exclusive_val, ARMCPU), 1073 VMSTATE_UINT64(env.exclusive_high, ARMCPU), 1074 VMSTATE_UNUSED(sizeof(uint64_t)), 1075 VMSTATE_UINT32(env.exception.syndrome, ARMCPU), 1076 VMSTATE_UINT32(env.exception.fsr, ARMCPU), 1077 VMSTATE_UINT64(env.exception.vaddress, ARMCPU), 1078 VMSTATE_TIMER_PTR(gt_timer[GTIMER_PHYS], ARMCPU), 1079 VMSTATE_TIMER_PTR(gt_timer[GTIMER_VIRT], ARMCPU), 1080 { 1081 .name = "power_state", 1082 .version_id = 0, 1083 .size = sizeof(bool), 1084 .info = &vmstate_powered_off, 1085 .flags = VMS_SINGLE, 1086 .offset = 0, 1087 }, 1088 VMSTATE_END_OF_LIST() 1089 }, 1090 .subsections = (const VMStateDescription * const []) { 1091 &vmstate_vfp, 1092 &vmstate_iwmmxt, 1093 &vmstate_m, 1094 &vmstate_thumb2ee, 1095 /* pmsav7_rnr must come before pmsav7 so that we have the 1096 * region number before we test it in the VMSTATE_VALIDATE 1097 * in vmstate_pmsav7. 1098 */ 1099 &vmstate_pmsav7_rnr, 1100 &vmstate_pmsav7, 1101 &vmstate_pmsav8, 1102 &vmstate_m_security, 1103 #ifdef TARGET_AARCH64 1104 &vmstate_sve, 1105 &vmstate_za, 1106 #endif 1107 &vmstate_serror, 1108 &vmstate_irq_line_state, 1109 &vmstate_wfxt_timer, 1110 NULL 1111 } 1112 }; 1113