1 #include "qemu/osdep.h" 2 #include "qemu-common.h" 3 #include "cpu.h" 4 #include "exec/exec-all.h" 5 #include "hw/hw.h" 6 #include "hw/boards.h" 7 #include "hw/i386/pc.h" 8 #include "hw/isa/isa.h" 9 #include "migration/cpu.h" 10 11 #include "sysemu/kvm.h" 12 13 #include "qemu/error-report.h" 14 15 static const VMStateDescription vmstate_segment = { 16 .name = "segment", 17 .version_id = 1, 18 .minimum_version_id = 1, 19 .fields = (VMStateField[]) { 20 VMSTATE_UINT32(selector, SegmentCache), 21 VMSTATE_UINTTL(base, SegmentCache), 22 VMSTATE_UINT32(limit, SegmentCache), 23 VMSTATE_UINT32(flags, SegmentCache), 24 VMSTATE_END_OF_LIST() 25 } 26 }; 27 28 #define VMSTATE_SEGMENT(_field, _state) { \ 29 .name = (stringify(_field)), \ 30 .size = sizeof(SegmentCache), \ 31 .vmsd = &vmstate_segment, \ 32 .flags = VMS_STRUCT, \ 33 .offset = offsetof(_state, _field) \ 34 + type_check(SegmentCache,typeof_field(_state, _field)) \ 35 } 36 37 #define VMSTATE_SEGMENT_ARRAY(_field, _state, _n) \ 38 VMSTATE_STRUCT_ARRAY(_field, _state, _n, 0, vmstate_segment, SegmentCache) 39 40 static const VMStateDescription vmstate_xmm_reg = { 41 .name = "xmm_reg", 42 .version_id = 1, 43 .minimum_version_id = 1, 44 .fields = (VMStateField[]) { 45 VMSTATE_UINT64(ZMM_Q(0), ZMMReg), 46 VMSTATE_UINT64(ZMM_Q(1), ZMMReg), 47 VMSTATE_END_OF_LIST() 48 } 49 }; 50 51 #define VMSTATE_XMM_REGS(_field, _state, _start) \ 52 VMSTATE_STRUCT_SUB_ARRAY(_field, _state, _start, CPU_NB_REGS, 0, \ 53 vmstate_xmm_reg, ZMMReg) 54 55 /* YMMH format is the same as XMM, but for bits 128-255 */ 56 static const VMStateDescription vmstate_ymmh_reg = { 57 .name = "ymmh_reg", 58 .version_id = 1, 59 .minimum_version_id = 1, 60 .fields = (VMStateField[]) { 61 VMSTATE_UINT64(ZMM_Q(2), ZMMReg), 62 VMSTATE_UINT64(ZMM_Q(3), ZMMReg), 63 VMSTATE_END_OF_LIST() 64 } 65 }; 66 67 #define VMSTATE_YMMH_REGS_VARS(_field, _state, _start, _v) \ 68 VMSTATE_STRUCT_SUB_ARRAY(_field, _state, _start, CPU_NB_REGS, _v, \ 69 vmstate_ymmh_reg, ZMMReg) 70 71 static const VMStateDescription vmstate_zmmh_reg = { 72 .name = "zmmh_reg", 73 .version_id = 1, 74 .minimum_version_id = 1, 75 .fields = (VMStateField[]) { 76 VMSTATE_UINT64(ZMM_Q(4), ZMMReg), 77 VMSTATE_UINT64(ZMM_Q(5), ZMMReg), 78 VMSTATE_UINT64(ZMM_Q(6), ZMMReg), 79 VMSTATE_UINT64(ZMM_Q(7), ZMMReg), 80 VMSTATE_END_OF_LIST() 81 } 82 }; 83 84 #define VMSTATE_ZMMH_REGS_VARS(_field, _state, _start) \ 85 VMSTATE_STRUCT_SUB_ARRAY(_field, _state, _start, CPU_NB_REGS, 0, \ 86 vmstate_zmmh_reg, ZMMReg) 87 88 #ifdef TARGET_X86_64 89 static const VMStateDescription vmstate_hi16_zmm_reg = { 90 .name = "hi16_zmm_reg", 91 .version_id = 1, 92 .minimum_version_id = 1, 93 .fields = (VMStateField[]) { 94 VMSTATE_UINT64(ZMM_Q(0), ZMMReg), 95 VMSTATE_UINT64(ZMM_Q(1), ZMMReg), 96 VMSTATE_UINT64(ZMM_Q(2), ZMMReg), 97 VMSTATE_UINT64(ZMM_Q(3), ZMMReg), 98 VMSTATE_UINT64(ZMM_Q(4), ZMMReg), 99 VMSTATE_UINT64(ZMM_Q(5), ZMMReg), 100 VMSTATE_UINT64(ZMM_Q(6), ZMMReg), 101 VMSTATE_UINT64(ZMM_Q(7), ZMMReg), 102 VMSTATE_END_OF_LIST() 103 } 104 }; 105 106 #define VMSTATE_Hi16_ZMM_REGS_VARS(_field, _state, _start) \ 107 VMSTATE_STRUCT_SUB_ARRAY(_field, _state, _start, CPU_NB_REGS, 0, \ 108 vmstate_hi16_zmm_reg, ZMMReg) 109 #endif 110 111 static const VMStateDescription vmstate_bnd_regs = { 112 .name = "bnd_regs", 113 .version_id = 1, 114 .minimum_version_id = 1, 115 .fields = (VMStateField[]) { 116 VMSTATE_UINT64(lb, BNDReg), 117 VMSTATE_UINT64(ub, BNDReg), 118 VMSTATE_END_OF_LIST() 119 } 120 }; 121 122 #define VMSTATE_BND_REGS(_field, _state, _n) \ 123 VMSTATE_STRUCT_ARRAY(_field, _state, _n, 0, vmstate_bnd_regs, BNDReg) 124 125 static const VMStateDescription vmstate_mtrr_var = { 126 .name = "mtrr_var", 127 .version_id = 1, 128 .minimum_version_id = 1, 129 .fields = (VMStateField[]) { 130 VMSTATE_UINT64(base, MTRRVar), 131 VMSTATE_UINT64(mask, MTRRVar), 132 VMSTATE_END_OF_LIST() 133 } 134 }; 135 136 #define VMSTATE_MTRR_VARS(_field, _state, _n, _v) \ 137 VMSTATE_STRUCT_ARRAY(_field, _state, _n, _v, vmstate_mtrr_var, MTRRVar) 138 139 typedef struct x86_FPReg_tmp { 140 FPReg *parent; 141 uint64_t tmp_mant; 142 uint16_t tmp_exp; 143 } x86_FPReg_tmp; 144 145 static void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, floatx80 f) 146 { 147 CPU_LDoubleU temp; 148 149 temp.d = f; 150 *pmant = temp.l.lower; 151 *pexp = temp.l.upper; 152 } 153 154 static floatx80 cpu_set_fp80(uint64_t mant, uint16_t upper) 155 { 156 CPU_LDoubleU temp; 157 158 temp.l.upper = upper; 159 temp.l.lower = mant; 160 return temp.d; 161 } 162 163 static void fpreg_pre_save(void *opaque) 164 { 165 x86_FPReg_tmp *tmp = opaque; 166 167 /* we save the real CPU data (in case of MMX usage only 'mant' 168 contains the MMX register */ 169 cpu_get_fp80(&tmp->tmp_mant, &tmp->tmp_exp, tmp->parent->d); 170 } 171 172 static int fpreg_post_load(void *opaque, int version) 173 { 174 x86_FPReg_tmp *tmp = opaque; 175 176 tmp->parent->d = cpu_set_fp80(tmp->tmp_mant, tmp->tmp_exp); 177 return 0; 178 } 179 180 static const VMStateDescription vmstate_fpreg_tmp = { 181 .name = "fpreg_tmp", 182 .post_load = fpreg_post_load, 183 .pre_save = fpreg_pre_save, 184 .fields = (VMStateField[]) { 185 VMSTATE_UINT64(tmp_mant, x86_FPReg_tmp), 186 VMSTATE_UINT16(tmp_exp, x86_FPReg_tmp), 187 VMSTATE_END_OF_LIST() 188 } 189 }; 190 191 static const VMStateDescription vmstate_fpreg = { 192 .name = "fpreg", 193 .fields = (VMStateField[]) { 194 VMSTATE_WITH_TMP(FPReg, x86_FPReg_tmp, vmstate_fpreg_tmp), 195 VMSTATE_END_OF_LIST() 196 } 197 }; 198 199 static void cpu_pre_save(void *opaque) 200 { 201 X86CPU *cpu = opaque; 202 CPUX86State *env = &cpu->env; 203 int i; 204 205 /* FPU */ 206 env->fpus_vmstate = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11; 207 env->fptag_vmstate = 0; 208 for(i = 0; i < 8; i++) { 209 env->fptag_vmstate |= ((!env->fptags[i]) << i); 210 } 211 212 env->fpregs_format_vmstate = 0; 213 214 /* 215 * Real mode guest segments register DPL should be zero. 216 * Older KVM version were setting it wrongly. 217 * Fixing it will allow live migration to host with unrestricted guest 218 * support (otherwise the migration will fail with invalid guest state 219 * error). 220 */ 221 if (!(env->cr[0] & CR0_PE_MASK) && 222 (env->segs[R_CS].flags >> DESC_DPL_SHIFT & 3) != 0) { 223 env->segs[R_CS].flags &= ~(env->segs[R_CS].flags & DESC_DPL_MASK); 224 env->segs[R_DS].flags &= ~(env->segs[R_DS].flags & DESC_DPL_MASK); 225 env->segs[R_ES].flags &= ~(env->segs[R_ES].flags & DESC_DPL_MASK); 226 env->segs[R_FS].flags &= ~(env->segs[R_FS].flags & DESC_DPL_MASK); 227 env->segs[R_GS].flags &= ~(env->segs[R_GS].flags & DESC_DPL_MASK); 228 env->segs[R_SS].flags &= ~(env->segs[R_SS].flags & DESC_DPL_MASK); 229 } 230 231 } 232 233 static int cpu_post_load(void *opaque, int version_id) 234 { 235 X86CPU *cpu = opaque; 236 CPUState *cs = CPU(cpu); 237 CPUX86State *env = &cpu->env; 238 int i; 239 240 if (env->tsc_khz && env->user_tsc_khz && 241 env->tsc_khz != env->user_tsc_khz) { 242 error_report("Mismatch between user-specified TSC frequency and " 243 "migrated TSC frequency"); 244 return -EINVAL; 245 } 246 247 if (env->fpregs_format_vmstate) { 248 error_report("Unsupported old non-softfloat CPU state"); 249 return -EINVAL; 250 } 251 /* 252 * Real mode guest segments register DPL should be zero. 253 * Older KVM version were setting it wrongly. 254 * Fixing it will allow live migration from such host that don't have 255 * restricted guest support to a host with unrestricted guest support 256 * (otherwise the migration will fail with invalid guest state 257 * error). 258 */ 259 if (!(env->cr[0] & CR0_PE_MASK) && 260 (env->segs[R_CS].flags >> DESC_DPL_SHIFT & 3) != 0) { 261 env->segs[R_CS].flags &= ~(env->segs[R_CS].flags & DESC_DPL_MASK); 262 env->segs[R_DS].flags &= ~(env->segs[R_DS].flags & DESC_DPL_MASK); 263 env->segs[R_ES].flags &= ~(env->segs[R_ES].flags & DESC_DPL_MASK); 264 env->segs[R_FS].flags &= ~(env->segs[R_FS].flags & DESC_DPL_MASK); 265 env->segs[R_GS].flags &= ~(env->segs[R_GS].flags & DESC_DPL_MASK); 266 env->segs[R_SS].flags &= ~(env->segs[R_SS].flags & DESC_DPL_MASK); 267 } 268 269 /* Older versions of QEMU incorrectly used CS.DPL as the CPL when 270 * running under KVM. This is wrong for conforming code segments. 271 * Luckily, in our implementation the CPL field of hflags is redundant 272 * and we can get the right value from the SS descriptor privilege level. 273 */ 274 env->hflags &= ~HF_CPL_MASK; 275 env->hflags |= (env->segs[R_SS].flags >> DESC_DPL_SHIFT) & HF_CPL_MASK; 276 277 env->fpstt = (env->fpus_vmstate >> 11) & 7; 278 env->fpus = env->fpus_vmstate & ~0x3800; 279 env->fptag_vmstate ^= 0xff; 280 for(i = 0; i < 8; i++) { 281 env->fptags[i] = (env->fptag_vmstate >> i) & 1; 282 } 283 if (tcg_enabled()) { 284 target_ulong dr7; 285 update_fp_status(env); 286 update_mxcsr_status(env); 287 288 cpu_breakpoint_remove_all(cs, BP_CPU); 289 cpu_watchpoint_remove_all(cs, BP_CPU); 290 291 /* Indicate all breakpoints disabled, as they are, then 292 let the helper re-enable them. */ 293 dr7 = env->dr[7]; 294 env->dr[7] = dr7 & ~(DR7_GLOBAL_BP_MASK | DR7_LOCAL_BP_MASK); 295 cpu_x86_update_dr7(env, dr7); 296 } 297 tlb_flush(cs); 298 return 0; 299 } 300 301 static bool async_pf_msr_needed(void *opaque) 302 { 303 X86CPU *cpu = opaque; 304 305 return cpu->env.async_pf_en_msr != 0; 306 } 307 308 static bool pv_eoi_msr_needed(void *opaque) 309 { 310 X86CPU *cpu = opaque; 311 312 return cpu->env.pv_eoi_en_msr != 0; 313 } 314 315 static bool steal_time_msr_needed(void *opaque) 316 { 317 X86CPU *cpu = opaque; 318 319 return cpu->env.steal_time_msr != 0; 320 } 321 322 static const VMStateDescription vmstate_steal_time_msr = { 323 .name = "cpu/steal_time_msr", 324 .version_id = 1, 325 .minimum_version_id = 1, 326 .needed = steal_time_msr_needed, 327 .fields = (VMStateField[]) { 328 VMSTATE_UINT64(env.steal_time_msr, X86CPU), 329 VMSTATE_END_OF_LIST() 330 } 331 }; 332 333 static const VMStateDescription vmstate_async_pf_msr = { 334 .name = "cpu/async_pf_msr", 335 .version_id = 1, 336 .minimum_version_id = 1, 337 .needed = async_pf_msr_needed, 338 .fields = (VMStateField[]) { 339 VMSTATE_UINT64(env.async_pf_en_msr, X86CPU), 340 VMSTATE_END_OF_LIST() 341 } 342 }; 343 344 static const VMStateDescription vmstate_pv_eoi_msr = { 345 .name = "cpu/async_pv_eoi_msr", 346 .version_id = 1, 347 .minimum_version_id = 1, 348 .needed = pv_eoi_msr_needed, 349 .fields = (VMStateField[]) { 350 VMSTATE_UINT64(env.pv_eoi_en_msr, X86CPU), 351 VMSTATE_END_OF_LIST() 352 } 353 }; 354 355 static bool fpop_ip_dp_needed(void *opaque) 356 { 357 X86CPU *cpu = opaque; 358 CPUX86State *env = &cpu->env; 359 360 return env->fpop != 0 || env->fpip != 0 || env->fpdp != 0; 361 } 362 363 static const VMStateDescription vmstate_fpop_ip_dp = { 364 .name = "cpu/fpop_ip_dp", 365 .version_id = 1, 366 .minimum_version_id = 1, 367 .needed = fpop_ip_dp_needed, 368 .fields = (VMStateField[]) { 369 VMSTATE_UINT16(env.fpop, X86CPU), 370 VMSTATE_UINT64(env.fpip, X86CPU), 371 VMSTATE_UINT64(env.fpdp, X86CPU), 372 VMSTATE_END_OF_LIST() 373 } 374 }; 375 376 static bool tsc_adjust_needed(void *opaque) 377 { 378 X86CPU *cpu = opaque; 379 CPUX86State *env = &cpu->env; 380 381 return env->tsc_adjust != 0; 382 } 383 384 static const VMStateDescription vmstate_msr_tsc_adjust = { 385 .name = "cpu/msr_tsc_adjust", 386 .version_id = 1, 387 .minimum_version_id = 1, 388 .needed = tsc_adjust_needed, 389 .fields = (VMStateField[]) { 390 VMSTATE_UINT64(env.tsc_adjust, X86CPU), 391 VMSTATE_END_OF_LIST() 392 } 393 }; 394 395 static bool tscdeadline_needed(void *opaque) 396 { 397 X86CPU *cpu = opaque; 398 CPUX86State *env = &cpu->env; 399 400 return env->tsc_deadline != 0; 401 } 402 403 static const VMStateDescription vmstate_msr_tscdeadline = { 404 .name = "cpu/msr_tscdeadline", 405 .version_id = 1, 406 .minimum_version_id = 1, 407 .needed = tscdeadline_needed, 408 .fields = (VMStateField[]) { 409 VMSTATE_UINT64(env.tsc_deadline, X86CPU), 410 VMSTATE_END_OF_LIST() 411 } 412 }; 413 414 static bool misc_enable_needed(void *opaque) 415 { 416 X86CPU *cpu = opaque; 417 CPUX86State *env = &cpu->env; 418 419 return env->msr_ia32_misc_enable != MSR_IA32_MISC_ENABLE_DEFAULT; 420 } 421 422 static bool feature_control_needed(void *opaque) 423 { 424 X86CPU *cpu = opaque; 425 CPUX86State *env = &cpu->env; 426 427 return env->msr_ia32_feature_control != 0; 428 } 429 430 static const VMStateDescription vmstate_msr_ia32_misc_enable = { 431 .name = "cpu/msr_ia32_misc_enable", 432 .version_id = 1, 433 .minimum_version_id = 1, 434 .needed = misc_enable_needed, 435 .fields = (VMStateField[]) { 436 VMSTATE_UINT64(env.msr_ia32_misc_enable, X86CPU), 437 VMSTATE_END_OF_LIST() 438 } 439 }; 440 441 static const VMStateDescription vmstate_msr_ia32_feature_control = { 442 .name = "cpu/msr_ia32_feature_control", 443 .version_id = 1, 444 .minimum_version_id = 1, 445 .needed = feature_control_needed, 446 .fields = (VMStateField[]) { 447 VMSTATE_UINT64(env.msr_ia32_feature_control, X86CPU), 448 VMSTATE_END_OF_LIST() 449 } 450 }; 451 452 static bool pmu_enable_needed(void *opaque) 453 { 454 X86CPU *cpu = opaque; 455 CPUX86State *env = &cpu->env; 456 int i; 457 458 if (env->msr_fixed_ctr_ctrl || env->msr_global_ctrl || 459 env->msr_global_status || env->msr_global_ovf_ctrl) { 460 return true; 461 } 462 for (i = 0; i < MAX_FIXED_COUNTERS; i++) { 463 if (env->msr_fixed_counters[i]) { 464 return true; 465 } 466 } 467 for (i = 0; i < MAX_GP_COUNTERS; i++) { 468 if (env->msr_gp_counters[i] || env->msr_gp_evtsel[i]) { 469 return true; 470 } 471 } 472 473 return false; 474 } 475 476 static const VMStateDescription vmstate_msr_architectural_pmu = { 477 .name = "cpu/msr_architectural_pmu", 478 .version_id = 1, 479 .minimum_version_id = 1, 480 .needed = pmu_enable_needed, 481 .fields = (VMStateField[]) { 482 VMSTATE_UINT64(env.msr_fixed_ctr_ctrl, X86CPU), 483 VMSTATE_UINT64(env.msr_global_ctrl, X86CPU), 484 VMSTATE_UINT64(env.msr_global_status, X86CPU), 485 VMSTATE_UINT64(env.msr_global_ovf_ctrl, X86CPU), 486 VMSTATE_UINT64_ARRAY(env.msr_fixed_counters, X86CPU, MAX_FIXED_COUNTERS), 487 VMSTATE_UINT64_ARRAY(env.msr_gp_counters, X86CPU, MAX_GP_COUNTERS), 488 VMSTATE_UINT64_ARRAY(env.msr_gp_evtsel, X86CPU, MAX_GP_COUNTERS), 489 VMSTATE_END_OF_LIST() 490 } 491 }; 492 493 static bool mpx_needed(void *opaque) 494 { 495 X86CPU *cpu = opaque; 496 CPUX86State *env = &cpu->env; 497 unsigned int i; 498 499 for (i = 0; i < 4; i++) { 500 if (env->bnd_regs[i].lb || env->bnd_regs[i].ub) { 501 return true; 502 } 503 } 504 505 if (env->bndcs_regs.cfgu || env->bndcs_regs.sts) { 506 return true; 507 } 508 509 return !!env->msr_bndcfgs; 510 } 511 512 static const VMStateDescription vmstate_mpx = { 513 .name = "cpu/mpx", 514 .version_id = 1, 515 .minimum_version_id = 1, 516 .needed = mpx_needed, 517 .fields = (VMStateField[]) { 518 VMSTATE_BND_REGS(env.bnd_regs, X86CPU, 4), 519 VMSTATE_UINT64(env.bndcs_regs.cfgu, X86CPU), 520 VMSTATE_UINT64(env.bndcs_regs.sts, X86CPU), 521 VMSTATE_UINT64(env.msr_bndcfgs, X86CPU), 522 VMSTATE_END_OF_LIST() 523 } 524 }; 525 526 static bool hyperv_hypercall_enable_needed(void *opaque) 527 { 528 X86CPU *cpu = opaque; 529 CPUX86State *env = &cpu->env; 530 531 return env->msr_hv_hypercall != 0 || env->msr_hv_guest_os_id != 0; 532 } 533 534 static const VMStateDescription vmstate_msr_hypercall_hypercall = { 535 .name = "cpu/msr_hyperv_hypercall", 536 .version_id = 1, 537 .minimum_version_id = 1, 538 .needed = hyperv_hypercall_enable_needed, 539 .fields = (VMStateField[]) { 540 VMSTATE_UINT64(env.msr_hv_guest_os_id, X86CPU), 541 VMSTATE_UINT64(env.msr_hv_hypercall, X86CPU), 542 VMSTATE_END_OF_LIST() 543 } 544 }; 545 546 static bool hyperv_vapic_enable_needed(void *opaque) 547 { 548 X86CPU *cpu = opaque; 549 CPUX86State *env = &cpu->env; 550 551 return env->msr_hv_vapic != 0; 552 } 553 554 static const VMStateDescription vmstate_msr_hyperv_vapic = { 555 .name = "cpu/msr_hyperv_vapic", 556 .version_id = 1, 557 .minimum_version_id = 1, 558 .needed = hyperv_vapic_enable_needed, 559 .fields = (VMStateField[]) { 560 VMSTATE_UINT64(env.msr_hv_vapic, X86CPU), 561 VMSTATE_END_OF_LIST() 562 } 563 }; 564 565 static bool hyperv_time_enable_needed(void *opaque) 566 { 567 X86CPU *cpu = opaque; 568 CPUX86State *env = &cpu->env; 569 570 return env->msr_hv_tsc != 0; 571 } 572 573 static const VMStateDescription vmstate_msr_hyperv_time = { 574 .name = "cpu/msr_hyperv_time", 575 .version_id = 1, 576 .minimum_version_id = 1, 577 .needed = hyperv_time_enable_needed, 578 .fields = (VMStateField[]) { 579 VMSTATE_UINT64(env.msr_hv_tsc, X86CPU), 580 VMSTATE_END_OF_LIST() 581 } 582 }; 583 584 static bool hyperv_crash_enable_needed(void *opaque) 585 { 586 X86CPU *cpu = opaque; 587 CPUX86State *env = &cpu->env; 588 int i; 589 590 for (i = 0; i < HV_X64_MSR_CRASH_PARAMS; i++) { 591 if (env->msr_hv_crash_params[i]) { 592 return true; 593 } 594 } 595 return false; 596 } 597 598 static const VMStateDescription vmstate_msr_hyperv_crash = { 599 .name = "cpu/msr_hyperv_crash", 600 .version_id = 1, 601 .minimum_version_id = 1, 602 .needed = hyperv_crash_enable_needed, 603 .fields = (VMStateField[]) { 604 VMSTATE_UINT64_ARRAY(env.msr_hv_crash_params, 605 X86CPU, HV_X64_MSR_CRASH_PARAMS), 606 VMSTATE_END_OF_LIST() 607 } 608 }; 609 610 static bool hyperv_runtime_enable_needed(void *opaque) 611 { 612 X86CPU *cpu = opaque; 613 CPUX86State *env = &cpu->env; 614 615 if (!cpu->hyperv_runtime) { 616 return false; 617 } 618 619 return env->msr_hv_runtime != 0; 620 } 621 622 static const VMStateDescription vmstate_msr_hyperv_runtime = { 623 .name = "cpu/msr_hyperv_runtime", 624 .version_id = 1, 625 .minimum_version_id = 1, 626 .needed = hyperv_runtime_enable_needed, 627 .fields = (VMStateField[]) { 628 VMSTATE_UINT64(env.msr_hv_runtime, X86CPU), 629 VMSTATE_END_OF_LIST() 630 } 631 }; 632 633 static bool hyperv_synic_enable_needed(void *opaque) 634 { 635 X86CPU *cpu = opaque; 636 CPUX86State *env = &cpu->env; 637 int i; 638 639 if (env->msr_hv_synic_control != 0 || 640 env->msr_hv_synic_evt_page != 0 || 641 env->msr_hv_synic_msg_page != 0) { 642 return true; 643 } 644 645 for (i = 0; i < ARRAY_SIZE(env->msr_hv_synic_sint); i++) { 646 if (env->msr_hv_synic_sint[i] != 0) { 647 return true; 648 } 649 } 650 651 return false; 652 } 653 654 static const VMStateDescription vmstate_msr_hyperv_synic = { 655 .name = "cpu/msr_hyperv_synic", 656 .version_id = 1, 657 .minimum_version_id = 1, 658 .needed = hyperv_synic_enable_needed, 659 .fields = (VMStateField[]) { 660 VMSTATE_UINT64(env.msr_hv_synic_control, X86CPU), 661 VMSTATE_UINT64(env.msr_hv_synic_evt_page, X86CPU), 662 VMSTATE_UINT64(env.msr_hv_synic_msg_page, X86CPU), 663 VMSTATE_UINT64_ARRAY(env.msr_hv_synic_sint, X86CPU, 664 HV_SYNIC_SINT_COUNT), 665 VMSTATE_END_OF_LIST() 666 } 667 }; 668 669 static bool hyperv_stimer_enable_needed(void *opaque) 670 { 671 X86CPU *cpu = opaque; 672 CPUX86State *env = &cpu->env; 673 int i; 674 675 for (i = 0; i < ARRAY_SIZE(env->msr_hv_stimer_config); i++) { 676 if (env->msr_hv_stimer_config[i] || env->msr_hv_stimer_count[i]) { 677 return true; 678 } 679 } 680 return false; 681 } 682 683 static const VMStateDescription vmstate_msr_hyperv_stimer = { 684 .name = "cpu/msr_hyperv_stimer", 685 .version_id = 1, 686 .minimum_version_id = 1, 687 .needed = hyperv_stimer_enable_needed, 688 .fields = (VMStateField[]) { 689 VMSTATE_UINT64_ARRAY(env.msr_hv_stimer_config, 690 X86CPU, HV_SYNIC_STIMER_COUNT), 691 VMSTATE_UINT64_ARRAY(env.msr_hv_stimer_count, 692 X86CPU, HV_SYNIC_STIMER_COUNT), 693 VMSTATE_END_OF_LIST() 694 } 695 }; 696 697 static bool avx512_needed(void *opaque) 698 { 699 X86CPU *cpu = opaque; 700 CPUX86State *env = &cpu->env; 701 unsigned int i; 702 703 for (i = 0; i < NB_OPMASK_REGS; i++) { 704 if (env->opmask_regs[i]) { 705 return true; 706 } 707 } 708 709 for (i = 0; i < CPU_NB_REGS; i++) { 710 #define ENV_XMM(reg, field) (env->xmm_regs[reg].ZMM_Q(field)) 711 if (ENV_XMM(i, 4) || ENV_XMM(i, 6) || 712 ENV_XMM(i, 5) || ENV_XMM(i, 7)) { 713 return true; 714 } 715 #ifdef TARGET_X86_64 716 if (ENV_XMM(i+16, 0) || ENV_XMM(i+16, 1) || 717 ENV_XMM(i+16, 2) || ENV_XMM(i+16, 3) || 718 ENV_XMM(i+16, 4) || ENV_XMM(i+16, 5) || 719 ENV_XMM(i+16, 6) || ENV_XMM(i+16, 7)) { 720 return true; 721 } 722 #endif 723 } 724 725 return false; 726 } 727 728 static const VMStateDescription vmstate_avx512 = { 729 .name = "cpu/avx512", 730 .version_id = 1, 731 .minimum_version_id = 1, 732 .needed = avx512_needed, 733 .fields = (VMStateField[]) { 734 VMSTATE_UINT64_ARRAY(env.opmask_regs, X86CPU, NB_OPMASK_REGS), 735 VMSTATE_ZMMH_REGS_VARS(env.xmm_regs, X86CPU, 0), 736 #ifdef TARGET_X86_64 737 VMSTATE_Hi16_ZMM_REGS_VARS(env.xmm_regs, X86CPU, 16), 738 #endif 739 VMSTATE_END_OF_LIST() 740 } 741 }; 742 743 static bool xss_needed(void *opaque) 744 { 745 X86CPU *cpu = opaque; 746 CPUX86State *env = &cpu->env; 747 748 return env->xss != 0; 749 } 750 751 static const VMStateDescription vmstate_xss = { 752 .name = "cpu/xss", 753 .version_id = 1, 754 .minimum_version_id = 1, 755 .needed = xss_needed, 756 .fields = (VMStateField[]) { 757 VMSTATE_UINT64(env.xss, X86CPU), 758 VMSTATE_END_OF_LIST() 759 } 760 }; 761 762 #ifdef TARGET_X86_64 763 static bool pkru_needed(void *opaque) 764 { 765 X86CPU *cpu = opaque; 766 CPUX86State *env = &cpu->env; 767 768 return env->pkru != 0; 769 } 770 771 static const VMStateDescription vmstate_pkru = { 772 .name = "cpu/pkru", 773 .version_id = 1, 774 .minimum_version_id = 1, 775 .needed = pkru_needed, 776 .fields = (VMStateField[]){ 777 VMSTATE_UINT32(env.pkru, X86CPU), 778 VMSTATE_END_OF_LIST() 779 } 780 }; 781 #endif 782 783 static bool tsc_khz_needed(void *opaque) 784 { 785 X86CPU *cpu = opaque; 786 CPUX86State *env = &cpu->env; 787 MachineClass *mc = MACHINE_GET_CLASS(qdev_get_machine()); 788 PCMachineClass *pcmc = PC_MACHINE_CLASS(mc); 789 return env->tsc_khz && pcmc->save_tsc_khz; 790 } 791 792 static const VMStateDescription vmstate_tsc_khz = { 793 .name = "cpu/tsc_khz", 794 .version_id = 1, 795 .minimum_version_id = 1, 796 .needed = tsc_khz_needed, 797 .fields = (VMStateField[]) { 798 VMSTATE_INT64(env.tsc_khz, X86CPU), 799 VMSTATE_END_OF_LIST() 800 } 801 }; 802 803 static bool mcg_ext_ctl_needed(void *opaque) 804 { 805 X86CPU *cpu = opaque; 806 CPUX86State *env = &cpu->env; 807 return cpu->enable_lmce && env->mcg_ext_ctl; 808 } 809 810 static const VMStateDescription vmstate_mcg_ext_ctl = { 811 .name = "cpu/mcg_ext_ctl", 812 .version_id = 1, 813 .minimum_version_id = 1, 814 .needed = mcg_ext_ctl_needed, 815 .fields = (VMStateField[]) { 816 VMSTATE_UINT64(env.mcg_ext_ctl, X86CPU), 817 VMSTATE_END_OF_LIST() 818 } 819 }; 820 821 VMStateDescription vmstate_x86_cpu = { 822 .name = "cpu", 823 .version_id = 12, 824 .minimum_version_id = 11, 825 .pre_save = cpu_pre_save, 826 .post_load = cpu_post_load, 827 .fields = (VMStateField[]) { 828 VMSTATE_UINTTL_ARRAY(env.regs, X86CPU, CPU_NB_REGS), 829 VMSTATE_UINTTL(env.eip, X86CPU), 830 VMSTATE_UINTTL(env.eflags, X86CPU), 831 VMSTATE_UINT32(env.hflags, X86CPU), 832 /* FPU */ 833 VMSTATE_UINT16(env.fpuc, X86CPU), 834 VMSTATE_UINT16(env.fpus_vmstate, X86CPU), 835 VMSTATE_UINT16(env.fptag_vmstate, X86CPU), 836 VMSTATE_UINT16(env.fpregs_format_vmstate, X86CPU), 837 838 VMSTATE_STRUCT_ARRAY(env.fpregs, X86CPU, 8, 0, vmstate_fpreg, FPReg), 839 840 VMSTATE_SEGMENT_ARRAY(env.segs, X86CPU, 6), 841 VMSTATE_SEGMENT(env.ldt, X86CPU), 842 VMSTATE_SEGMENT(env.tr, X86CPU), 843 VMSTATE_SEGMENT(env.gdt, X86CPU), 844 VMSTATE_SEGMENT(env.idt, X86CPU), 845 846 VMSTATE_UINT32(env.sysenter_cs, X86CPU), 847 VMSTATE_UINTTL(env.sysenter_esp, X86CPU), 848 VMSTATE_UINTTL(env.sysenter_eip, X86CPU), 849 850 VMSTATE_UINTTL(env.cr[0], X86CPU), 851 VMSTATE_UINTTL(env.cr[2], X86CPU), 852 VMSTATE_UINTTL(env.cr[3], X86CPU), 853 VMSTATE_UINTTL(env.cr[4], X86CPU), 854 VMSTATE_UINTTL_ARRAY(env.dr, X86CPU, 8), 855 /* MMU */ 856 VMSTATE_INT32(env.a20_mask, X86CPU), 857 /* XMM */ 858 VMSTATE_UINT32(env.mxcsr, X86CPU), 859 VMSTATE_XMM_REGS(env.xmm_regs, X86CPU, 0), 860 861 #ifdef TARGET_X86_64 862 VMSTATE_UINT64(env.efer, X86CPU), 863 VMSTATE_UINT64(env.star, X86CPU), 864 VMSTATE_UINT64(env.lstar, X86CPU), 865 VMSTATE_UINT64(env.cstar, X86CPU), 866 VMSTATE_UINT64(env.fmask, X86CPU), 867 VMSTATE_UINT64(env.kernelgsbase, X86CPU), 868 #endif 869 VMSTATE_UINT32(env.smbase, X86CPU), 870 871 VMSTATE_UINT64(env.pat, X86CPU), 872 VMSTATE_UINT32(env.hflags2, X86CPU), 873 874 VMSTATE_UINT64(env.vm_hsave, X86CPU), 875 VMSTATE_UINT64(env.vm_vmcb, X86CPU), 876 VMSTATE_UINT64(env.tsc_offset, X86CPU), 877 VMSTATE_UINT64(env.intercept, X86CPU), 878 VMSTATE_UINT16(env.intercept_cr_read, X86CPU), 879 VMSTATE_UINT16(env.intercept_cr_write, X86CPU), 880 VMSTATE_UINT16(env.intercept_dr_read, X86CPU), 881 VMSTATE_UINT16(env.intercept_dr_write, X86CPU), 882 VMSTATE_UINT32(env.intercept_exceptions, X86CPU), 883 VMSTATE_UINT8(env.v_tpr, X86CPU), 884 /* MTRRs */ 885 VMSTATE_UINT64_ARRAY(env.mtrr_fixed, X86CPU, 11), 886 VMSTATE_UINT64(env.mtrr_deftype, X86CPU), 887 VMSTATE_MTRR_VARS(env.mtrr_var, X86CPU, MSR_MTRRcap_VCNT, 8), 888 /* KVM-related states */ 889 VMSTATE_INT32(env.interrupt_injected, X86CPU), 890 VMSTATE_UINT32(env.mp_state, X86CPU), 891 VMSTATE_UINT64(env.tsc, X86CPU), 892 VMSTATE_INT32(env.exception_injected, X86CPU), 893 VMSTATE_UINT8(env.soft_interrupt, X86CPU), 894 VMSTATE_UINT8(env.nmi_injected, X86CPU), 895 VMSTATE_UINT8(env.nmi_pending, X86CPU), 896 VMSTATE_UINT8(env.has_error_code, X86CPU), 897 VMSTATE_UINT32(env.sipi_vector, X86CPU), 898 /* MCE */ 899 VMSTATE_UINT64(env.mcg_cap, X86CPU), 900 VMSTATE_UINT64(env.mcg_status, X86CPU), 901 VMSTATE_UINT64(env.mcg_ctl, X86CPU), 902 VMSTATE_UINT64_ARRAY(env.mce_banks, X86CPU, MCE_BANKS_DEF * 4), 903 /* rdtscp */ 904 VMSTATE_UINT64(env.tsc_aux, X86CPU), 905 /* KVM pvclock msr */ 906 VMSTATE_UINT64(env.system_time_msr, X86CPU), 907 VMSTATE_UINT64(env.wall_clock_msr, X86CPU), 908 /* XSAVE related fields */ 909 VMSTATE_UINT64_V(env.xcr0, X86CPU, 12), 910 VMSTATE_UINT64_V(env.xstate_bv, X86CPU, 12), 911 VMSTATE_YMMH_REGS_VARS(env.xmm_regs, X86CPU, 0, 12), 912 VMSTATE_END_OF_LIST() 913 /* The above list is not sorted /wrt version numbers, watch out! */ 914 }, 915 .subsections = (const VMStateDescription*[]) { 916 &vmstate_async_pf_msr, 917 &vmstate_pv_eoi_msr, 918 &vmstate_steal_time_msr, 919 &vmstate_fpop_ip_dp, 920 &vmstate_msr_tsc_adjust, 921 &vmstate_msr_tscdeadline, 922 &vmstate_msr_ia32_misc_enable, 923 &vmstate_msr_ia32_feature_control, 924 &vmstate_msr_architectural_pmu, 925 &vmstate_mpx, 926 &vmstate_msr_hypercall_hypercall, 927 &vmstate_msr_hyperv_vapic, 928 &vmstate_msr_hyperv_time, 929 &vmstate_msr_hyperv_crash, 930 &vmstate_msr_hyperv_runtime, 931 &vmstate_msr_hyperv_synic, 932 &vmstate_msr_hyperv_stimer, 933 &vmstate_avx512, 934 &vmstate_xss, 935 &vmstate_tsc_khz, 936 #ifdef TARGET_X86_64 937 &vmstate_pkru, 938 #endif 939 &vmstate_mcg_ext_ctl, 940 NULL 941 } 942 }; 943