1 /* 2 * QEMU System Emulator 3 * 4 * Copyright (c) 2003-2008 Fabrice Bellard 5 * Copyright (c) 2009-2015 Red Hat Inc 6 * 7 * Authors: 8 * Juan Quintela <quintela@redhat.com> 9 * 10 * Permission is hereby granted, free of charge, to any person obtaining a copy 11 * of this software and associated documentation files (the "Software"), to deal 12 * in the Software without restriction, including without limitation the rights 13 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 14 * copies of the Software, and to permit persons to whom the Software is 15 * furnished to do so, subject to the following conditions: 16 * 17 * The above copyright notice and this permission notice shall be included in 18 * all copies or substantial portions of the Software. 19 * 20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 23 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 24 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 25 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 26 * THE SOFTWARE. 27 */ 28 29 #include "qemu/osdep.h" 30 #include "hw/boards.h" 31 #include "net/net.h" 32 #include "migration.h" 33 #include "migration/snapshot.h" 34 #include "migration-stats.h" 35 #include "migration/vmstate.h" 36 #include "migration/misc.h" 37 #include "migration/register.h" 38 #include "migration/global_state.h" 39 #include "migration/channel-block.h" 40 #include "ram.h" 41 #include "qemu-file.h" 42 #include "savevm.h" 43 #include "postcopy-ram.h" 44 #include "qapi/error.h" 45 #include "qapi/qapi-commands-migration.h" 46 #include "qapi/clone-visitor.h" 47 #include "qapi/qapi-builtin-visit.h" 48 #include "qapi/qmp/qerror.h" 49 #include "qemu/error-report.h" 50 #include "sysemu/cpus.h" 51 #include "exec/memory.h" 52 #include "exec/target_page.h" 53 #include "trace.h" 54 #include "qemu/iov.h" 55 #include "qemu/job.h" 56 #include "qemu/main-loop.h" 57 #include "block/snapshot.h" 58 #include "qemu/cutils.h" 59 #include "io/channel-buffer.h" 60 #include "io/channel-file.h" 61 #include "sysemu/replay.h" 62 #include "sysemu/runstate.h" 63 #include "sysemu/sysemu.h" 64 #include "sysemu/xen.h" 65 #include "migration/colo.h" 66 #include "qemu/bitmap.h" 67 #include "net/announce.h" 68 #include "qemu/yank.h" 69 #include "yank_functions.h" 70 #include "sysemu/qtest.h" 71 #include "options.h" 72 73 const unsigned int postcopy_ram_discard_version; 74 75 /* Subcommands for QEMU_VM_COMMAND */ 76 enum qemu_vm_cmd { 77 MIG_CMD_INVALID = 0, /* Must be 0 */ 78 MIG_CMD_OPEN_RETURN_PATH, /* Tell the dest to open the Return path */ 79 MIG_CMD_PING, /* Request a PONG on the RP */ 80 81 MIG_CMD_POSTCOPY_ADVISE, /* Prior to any page transfers, just 82 warn we might want to do PC */ 83 MIG_CMD_POSTCOPY_LISTEN, /* Start listening for incoming 84 pages as it's running. */ 85 MIG_CMD_POSTCOPY_RUN, /* Start execution */ 86 87 MIG_CMD_POSTCOPY_RAM_DISCARD, /* A list of pages to discard that 88 were previously sent during 89 precopy but are dirty. */ 90 MIG_CMD_PACKAGED, /* Send a wrapped stream within this stream */ 91 MIG_CMD_ENABLE_COLO, /* Enable COLO */ 92 MIG_CMD_POSTCOPY_RESUME, /* resume postcopy on dest */ 93 MIG_CMD_RECV_BITMAP, /* Request for recved bitmap on dst */ 94 MIG_CMD_MAX 95 }; 96 97 #define MAX_VM_CMD_PACKAGED_SIZE UINT32_MAX 98 static struct mig_cmd_args { 99 ssize_t len; /* -1 = variable */ 100 const char *name; 101 } mig_cmd_args[] = { 102 [MIG_CMD_INVALID] = { .len = -1, .name = "INVALID" }, 103 [MIG_CMD_OPEN_RETURN_PATH] = { .len = 0, .name = "OPEN_RETURN_PATH" }, 104 [MIG_CMD_PING] = { .len = sizeof(uint32_t), .name = "PING" }, 105 [MIG_CMD_POSTCOPY_ADVISE] = { .len = -1, .name = "POSTCOPY_ADVISE" }, 106 [MIG_CMD_POSTCOPY_LISTEN] = { .len = 0, .name = "POSTCOPY_LISTEN" }, 107 [MIG_CMD_POSTCOPY_RUN] = { .len = 0, .name = "POSTCOPY_RUN" }, 108 [MIG_CMD_POSTCOPY_RAM_DISCARD] = { 109 .len = -1, .name = "POSTCOPY_RAM_DISCARD" }, 110 [MIG_CMD_POSTCOPY_RESUME] = { .len = 0, .name = "POSTCOPY_RESUME" }, 111 [MIG_CMD_PACKAGED] = { .len = 4, .name = "PACKAGED" }, 112 [MIG_CMD_RECV_BITMAP] = { .len = -1, .name = "RECV_BITMAP" }, 113 [MIG_CMD_MAX] = { .len = -1, .name = "MAX" }, 114 }; 115 116 /* Note for MIG_CMD_POSTCOPY_ADVISE: 117 * The format of arguments is depending on postcopy mode: 118 * - postcopy RAM only 119 * uint64_t host page size 120 * uint64_t taget page size 121 * 122 * - postcopy RAM and postcopy dirty bitmaps 123 * format is the same as for postcopy RAM only 124 * 125 * - postcopy dirty bitmaps only 126 * Nothing. Command length field is 0. 127 * 128 * Be careful: adding a new postcopy entity with some other parameters should 129 * not break format self-description ability. Good way is to introduce some 130 * generic extendable format with an exception for two old entities. 131 */ 132 133 /***********************************************************/ 134 /* savevm/loadvm support */ 135 136 static QEMUFile *qemu_fopen_bdrv(BlockDriverState *bs, int is_writable) 137 { 138 if (is_writable) { 139 return qemu_file_new_output(QIO_CHANNEL(qio_channel_block_new(bs))); 140 } else { 141 return qemu_file_new_input(QIO_CHANNEL(qio_channel_block_new(bs))); 142 } 143 } 144 145 146 /* QEMUFile timer support. 147 * Not in qemu-file.c to not add qemu-timer.c as dependency to qemu-file.c 148 */ 149 150 void timer_put(QEMUFile *f, QEMUTimer *ts) 151 { 152 uint64_t expire_time; 153 154 expire_time = timer_expire_time_ns(ts); 155 qemu_put_be64(f, expire_time); 156 } 157 158 void timer_get(QEMUFile *f, QEMUTimer *ts) 159 { 160 uint64_t expire_time; 161 162 expire_time = qemu_get_be64(f); 163 if (expire_time != -1) { 164 timer_mod_ns(ts, expire_time); 165 } else { 166 timer_del(ts); 167 } 168 } 169 170 171 /* VMState timer support. 172 * Not in vmstate.c to not add qemu-timer.c as dependency to vmstate.c 173 */ 174 175 static int get_timer(QEMUFile *f, void *pv, size_t size, 176 const VMStateField *field) 177 { 178 QEMUTimer *v = pv; 179 timer_get(f, v); 180 return 0; 181 } 182 183 static int put_timer(QEMUFile *f, void *pv, size_t size, 184 const VMStateField *field, JSONWriter *vmdesc) 185 { 186 QEMUTimer *v = pv; 187 timer_put(f, v); 188 189 return 0; 190 } 191 192 const VMStateInfo vmstate_info_timer = { 193 .name = "timer", 194 .get = get_timer, 195 .put = put_timer, 196 }; 197 198 199 typedef struct CompatEntry { 200 char idstr[256]; 201 int instance_id; 202 } CompatEntry; 203 204 typedef struct SaveStateEntry { 205 QTAILQ_ENTRY(SaveStateEntry) entry; 206 char idstr[256]; 207 uint32_t instance_id; 208 int alias_id; 209 int version_id; 210 /* version id read from the stream */ 211 int load_version_id; 212 int section_id; 213 /* section id read from the stream */ 214 int load_section_id; 215 const SaveVMHandlers *ops; 216 const VMStateDescription *vmsd; 217 void *opaque; 218 CompatEntry *compat; 219 int is_ram; 220 } SaveStateEntry; 221 222 typedef struct SaveState { 223 QTAILQ_HEAD(, SaveStateEntry) handlers; 224 SaveStateEntry *handler_pri_head[MIG_PRI_MAX + 1]; 225 int global_section_id; 226 uint32_t len; 227 const char *name; 228 uint32_t target_page_bits; 229 uint32_t caps_count; 230 MigrationCapability *capabilities; 231 QemuUUID uuid; 232 } SaveState; 233 234 static SaveState savevm_state = { 235 .handlers = QTAILQ_HEAD_INITIALIZER(savevm_state.handlers), 236 .handler_pri_head = { [MIG_PRI_DEFAULT ... MIG_PRI_MAX] = NULL }, 237 .global_section_id = 0, 238 }; 239 240 static bool should_validate_capability(int capability) 241 { 242 assert(capability >= 0 && capability < MIGRATION_CAPABILITY__MAX); 243 /* Validate only new capabilities to keep compatibility. */ 244 switch (capability) { 245 case MIGRATION_CAPABILITY_X_IGNORE_SHARED: 246 return true; 247 default: 248 return false; 249 } 250 } 251 252 static uint32_t get_validatable_capabilities_count(void) 253 { 254 MigrationState *s = migrate_get_current(); 255 uint32_t result = 0; 256 int i; 257 for (i = 0; i < MIGRATION_CAPABILITY__MAX; i++) { 258 if (should_validate_capability(i) && s->capabilities[i]) { 259 result++; 260 } 261 } 262 return result; 263 } 264 265 static int configuration_pre_save(void *opaque) 266 { 267 SaveState *state = opaque; 268 const char *current_name = MACHINE_GET_CLASS(current_machine)->name; 269 MigrationState *s = migrate_get_current(); 270 int i, j; 271 272 state->len = strlen(current_name); 273 state->name = current_name; 274 state->target_page_bits = qemu_target_page_bits(); 275 276 state->caps_count = get_validatable_capabilities_count(); 277 state->capabilities = g_renew(MigrationCapability, state->capabilities, 278 state->caps_count); 279 for (i = j = 0; i < MIGRATION_CAPABILITY__MAX; i++) { 280 if (should_validate_capability(i) && s->capabilities[i]) { 281 state->capabilities[j++] = i; 282 } 283 } 284 state->uuid = qemu_uuid; 285 286 return 0; 287 } 288 289 static int configuration_post_save(void *opaque) 290 { 291 SaveState *state = opaque; 292 293 g_free(state->capabilities); 294 state->capabilities = NULL; 295 state->caps_count = 0; 296 return 0; 297 } 298 299 static int configuration_pre_load(void *opaque) 300 { 301 SaveState *state = opaque; 302 303 /* If there is no target-page-bits subsection it means the source 304 * predates the variable-target-page-bits support and is using the 305 * minimum possible value for this CPU. 306 */ 307 state->target_page_bits = qemu_target_page_bits_min(); 308 return 0; 309 } 310 311 static bool configuration_validate_capabilities(SaveState *state) 312 { 313 bool ret = true; 314 MigrationState *s = migrate_get_current(); 315 unsigned long *source_caps_bm; 316 int i; 317 318 source_caps_bm = bitmap_new(MIGRATION_CAPABILITY__MAX); 319 for (i = 0; i < state->caps_count; i++) { 320 MigrationCapability capability = state->capabilities[i]; 321 set_bit(capability, source_caps_bm); 322 } 323 324 for (i = 0; i < MIGRATION_CAPABILITY__MAX; i++) { 325 bool source_state, target_state; 326 if (!should_validate_capability(i)) { 327 continue; 328 } 329 source_state = test_bit(i, source_caps_bm); 330 target_state = s->capabilities[i]; 331 if (source_state != target_state) { 332 error_report("Capability %s is %s, but received capability is %s", 333 MigrationCapability_str(i), 334 target_state ? "on" : "off", 335 source_state ? "on" : "off"); 336 ret = false; 337 /* Don't break here to report all failed capabilities */ 338 } 339 } 340 341 g_free(source_caps_bm); 342 return ret; 343 } 344 345 static int configuration_post_load(void *opaque, int version_id) 346 { 347 SaveState *state = opaque; 348 const char *current_name = MACHINE_GET_CLASS(current_machine)->name; 349 int ret = 0; 350 351 if (strncmp(state->name, current_name, state->len) != 0) { 352 error_report("Machine type received is '%.*s' and local is '%s'", 353 (int) state->len, state->name, current_name); 354 ret = -EINVAL; 355 goto out; 356 } 357 358 if (state->target_page_bits != qemu_target_page_bits()) { 359 error_report("Received TARGET_PAGE_BITS is %d but local is %d", 360 state->target_page_bits, qemu_target_page_bits()); 361 ret = -EINVAL; 362 goto out; 363 } 364 365 if (!configuration_validate_capabilities(state)) { 366 ret = -EINVAL; 367 goto out; 368 } 369 370 out: 371 g_free((void *)state->name); 372 state->name = NULL; 373 state->len = 0; 374 g_free(state->capabilities); 375 state->capabilities = NULL; 376 state->caps_count = 0; 377 378 return ret; 379 } 380 381 static int get_capability(QEMUFile *f, void *pv, size_t size, 382 const VMStateField *field) 383 { 384 MigrationCapability *capability = pv; 385 char capability_str[UINT8_MAX + 1]; 386 uint8_t len; 387 int i; 388 389 len = qemu_get_byte(f); 390 qemu_get_buffer(f, (uint8_t *)capability_str, len); 391 capability_str[len] = '\0'; 392 for (i = 0; i < MIGRATION_CAPABILITY__MAX; i++) { 393 if (!strcmp(MigrationCapability_str(i), capability_str)) { 394 *capability = i; 395 return 0; 396 } 397 } 398 error_report("Received unknown capability %s", capability_str); 399 return -EINVAL; 400 } 401 402 static int put_capability(QEMUFile *f, void *pv, size_t size, 403 const VMStateField *field, JSONWriter *vmdesc) 404 { 405 MigrationCapability *capability = pv; 406 const char *capability_str = MigrationCapability_str(*capability); 407 size_t len = strlen(capability_str); 408 assert(len <= UINT8_MAX); 409 410 qemu_put_byte(f, len); 411 qemu_put_buffer(f, (uint8_t *)capability_str, len); 412 return 0; 413 } 414 415 static const VMStateInfo vmstate_info_capability = { 416 .name = "capability", 417 .get = get_capability, 418 .put = put_capability, 419 }; 420 421 /* The target-page-bits subsection is present only if the 422 * target page size is not the same as the default (ie the 423 * minimum page size for a variable-page-size guest CPU). 424 * If it is present then it contains the actual target page 425 * bits for the machine, and migration will fail if the 426 * two ends don't agree about it. 427 */ 428 static bool vmstate_target_page_bits_needed(void *opaque) 429 { 430 return qemu_target_page_bits() 431 > qemu_target_page_bits_min(); 432 } 433 434 static const VMStateDescription vmstate_target_page_bits = { 435 .name = "configuration/target-page-bits", 436 .version_id = 1, 437 .minimum_version_id = 1, 438 .needed = vmstate_target_page_bits_needed, 439 .fields = (VMStateField[]) { 440 VMSTATE_UINT32(target_page_bits, SaveState), 441 VMSTATE_END_OF_LIST() 442 } 443 }; 444 445 static bool vmstate_capabilites_needed(void *opaque) 446 { 447 return get_validatable_capabilities_count() > 0; 448 } 449 450 static const VMStateDescription vmstate_capabilites = { 451 .name = "configuration/capabilities", 452 .version_id = 1, 453 .minimum_version_id = 1, 454 .needed = vmstate_capabilites_needed, 455 .fields = (VMStateField[]) { 456 VMSTATE_UINT32_V(caps_count, SaveState, 1), 457 VMSTATE_VARRAY_UINT32_ALLOC(capabilities, SaveState, caps_count, 1, 458 vmstate_info_capability, 459 MigrationCapability), 460 VMSTATE_END_OF_LIST() 461 } 462 }; 463 464 static bool vmstate_uuid_needed(void *opaque) 465 { 466 return qemu_uuid_set && migrate_validate_uuid(); 467 } 468 469 static int vmstate_uuid_post_load(void *opaque, int version_id) 470 { 471 SaveState *state = opaque; 472 char uuid_src[UUID_FMT_LEN + 1]; 473 char uuid_dst[UUID_FMT_LEN + 1]; 474 475 if (!qemu_uuid_set) { 476 /* 477 * It's warning because user might not know UUID in some cases, 478 * e.g. load an old snapshot 479 */ 480 qemu_uuid_unparse(&state->uuid, uuid_src); 481 warn_report("UUID is received %s, but local uuid isn't set", 482 uuid_src); 483 return 0; 484 } 485 if (!qemu_uuid_is_equal(&state->uuid, &qemu_uuid)) { 486 qemu_uuid_unparse(&state->uuid, uuid_src); 487 qemu_uuid_unparse(&qemu_uuid, uuid_dst); 488 error_report("UUID received is %s and local is %s", uuid_src, uuid_dst); 489 return -EINVAL; 490 } 491 return 0; 492 } 493 494 static const VMStateDescription vmstate_uuid = { 495 .name = "configuration/uuid", 496 .version_id = 1, 497 .minimum_version_id = 1, 498 .needed = vmstate_uuid_needed, 499 .post_load = vmstate_uuid_post_load, 500 .fields = (VMStateField[]) { 501 VMSTATE_UINT8_ARRAY_V(uuid.data, SaveState, sizeof(QemuUUID), 1), 502 VMSTATE_END_OF_LIST() 503 } 504 }; 505 506 static const VMStateDescription vmstate_configuration = { 507 .name = "configuration", 508 .version_id = 1, 509 .pre_load = configuration_pre_load, 510 .post_load = configuration_post_load, 511 .pre_save = configuration_pre_save, 512 .post_save = configuration_post_save, 513 .fields = (VMStateField[]) { 514 VMSTATE_UINT32(len, SaveState), 515 VMSTATE_VBUFFER_ALLOC_UINT32(name, SaveState, 0, NULL, len), 516 VMSTATE_END_OF_LIST() 517 }, 518 .subsections = (const VMStateDescription *[]) { 519 &vmstate_target_page_bits, 520 &vmstate_capabilites, 521 &vmstate_uuid, 522 NULL 523 } 524 }; 525 526 static void dump_vmstate_vmsd(FILE *out_file, 527 const VMStateDescription *vmsd, int indent, 528 bool is_subsection); 529 530 static void dump_vmstate_vmsf(FILE *out_file, const VMStateField *field, 531 int indent) 532 { 533 fprintf(out_file, "%*s{\n", indent, ""); 534 indent += 2; 535 fprintf(out_file, "%*s\"field\": \"%s\",\n", indent, "", field->name); 536 fprintf(out_file, "%*s\"version_id\": %d,\n", indent, "", 537 field->version_id); 538 fprintf(out_file, "%*s\"field_exists\": %s,\n", indent, "", 539 field->field_exists ? "true" : "false"); 540 if (field->flags & VMS_ARRAY) { 541 fprintf(out_file, "%*s\"num\": %d,\n", indent, "", field->num); 542 } 543 fprintf(out_file, "%*s\"size\": %zu", indent, "", field->size); 544 if (field->vmsd != NULL) { 545 fprintf(out_file, ",\n"); 546 dump_vmstate_vmsd(out_file, field->vmsd, indent, false); 547 } 548 fprintf(out_file, "\n%*s}", indent - 2, ""); 549 } 550 551 static void dump_vmstate_vmss(FILE *out_file, 552 const VMStateDescription **subsection, 553 int indent) 554 { 555 if (*subsection != NULL) { 556 dump_vmstate_vmsd(out_file, *subsection, indent, true); 557 } 558 } 559 560 static void dump_vmstate_vmsd(FILE *out_file, 561 const VMStateDescription *vmsd, int indent, 562 bool is_subsection) 563 { 564 if (is_subsection) { 565 fprintf(out_file, "%*s{\n", indent, ""); 566 } else { 567 fprintf(out_file, "%*s\"%s\": {\n", indent, "", "Description"); 568 } 569 indent += 2; 570 fprintf(out_file, "%*s\"name\": \"%s\",\n", indent, "", vmsd->name); 571 fprintf(out_file, "%*s\"version_id\": %d,\n", indent, "", 572 vmsd->version_id); 573 fprintf(out_file, "%*s\"minimum_version_id\": %d", indent, "", 574 vmsd->minimum_version_id); 575 if (vmsd->fields != NULL) { 576 const VMStateField *field = vmsd->fields; 577 bool first; 578 579 fprintf(out_file, ",\n%*s\"Fields\": [\n", indent, ""); 580 first = true; 581 while (field->name != NULL) { 582 if (field->flags & VMS_MUST_EXIST) { 583 /* Ignore VMSTATE_VALIDATE bits; these don't get migrated */ 584 field++; 585 continue; 586 } 587 if (!first) { 588 fprintf(out_file, ",\n"); 589 } 590 dump_vmstate_vmsf(out_file, field, indent + 2); 591 field++; 592 first = false; 593 } 594 assert(field->flags == VMS_END); 595 fprintf(out_file, "\n%*s]", indent, ""); 596 } 597 if (vmsd->subsections != NULL) { 598 const VMStateDescription **subsection = vmsd->subsections; 599 bool first; 600 601 fprintf(out_file, ",\n%*s\"Subsections\": [\n", indent, ""); 602 first = true; 603 while (*subsection != NULL) { 604 if (!first) { 605 fprintf(out_file, ",\n"); 606 } 607 dump_vmstate_vmss(out_file, subsection, indent + 2); 608 subsection++; 609 first = false; 610 } 611 fprintf(out_file, "\n%*s]", indent, ""); 612 } 613 fprintf(out_file, "\n%*s}", indent - 2, ""); 614 } 615 616 static void dump_machine_type(FILE *out_file) 617 { 618 MachineClass *mc; 619 620 mc = MACHINE_GET_CLASS(current_machine); 621 622 fprintf(out_file, " \"vmschkmachine\": {\n"); 623 fprintf(out_file, " \"Name\": \"%s\"\n", mc->name); 624 fprintf(out_file, " },\n"); 625 } 626 627 void dump_vmstate_json_to_file(FILE *out_file) 628 { 629 GSList *list, *elt; 630 bool first; 631 632 fprintf(out_file, "{\n"); 633 dump_machine_type(out_file); 634 635 first = true; 636 list = object_class_get_list(TYPE_DEVICE, true); 637 for (elt = list; elt; elt = elt->next) { 638 DeviceClass *dc = OBJECT_CLASS_CHECK(DeviceClass, elt->data, 639 TYPE_DEVICE); 640 const char *name; 641 int indent = 2; 642 643 if (!dc->vmsd) { 644 continue; 645 } 646 647 if (!first) { 648 fprintf(out_file, ",\n"); 649 } 650 name = object_class_get_name(OBJECT_CLASS(dc)); 651 fprintf(out_file, "%*s\"%s\": {\n", indent, "", name); 652 indent += 2; 653 fprintf(out_file, "%*s\"Name\": \"%s\",\n", indent, "", name); 654 fprintf(out_file, "%*s\"version_id\": %d,\n", indent, "", 655 dc->vmsd->version_id); 656 fprintf(out_file, "%*s\"minimum_version_id\": %d,\n", indent, "", 657 dc->vmsd->minimum_version_id); 658 659 dump_vmstate_vmsd(out_file, dc->vmsd, indent, false); 660 661 fprintf(out_file, "\n%*s}", indent - 2, ""); 662 first = false; 663 } 664 fprintf(out_file, "\n}\n"); 665 fclose(out_file); 666 g_slist_free(list); 667 } 668 669 static uint32_t calculate_new_instance_id(const char *idstr) 670 { 671 SaveStateEntry *se; 672 uint32_t instance_id = 0; 673 674 QTAILQ_FOREACH(se, &savevm_state.handlers, entry) { 675 if (strcmp(idstr, se->idstr) == 0 676 && instance_id <= se->instance_id) { 677 instance_id = se->instance_id + 1; 678 } 679 } 680 /* Make sure we never loop over without being noticed */ 681 assert(instance_id != VMSTATE_INSTANCE_ID_ANY); 682 return instance_id; 683 } 684 685 static int calculate_compat_instance_id(const char *idstr) 686 { 687 SaveStateEntry *se; 688 int instance_id = 0; 689 690 QTAILQ_FOREACH(se, &savevm_state.handlers, entry) { 691 if (!se->compat) { 692 continue; 693 } 694 695 if (strcmp(idstr, se->compat->idstr) == 0 696 && instance_id <= se->compat->instance_id) { 697 instance_id = se->compat->instance_id + 1; 698 } 699 } 700 return instance_id; 701 } 702 703 static inline MigrationPriority save_state_priority(SaveStateEntry *se) 704 { 705 if (se->vmsd) { 706 return se->vmsd->priority; 707 } 708 return MIG_PRI_DEFAULT; 709 } 710 711 static void savevm_state_handler_insert(SaveStateEntry *nse) 712 { 713 MigrationPriority priority = save_state_priority(nse); 714 SaveStateEntry *se; 715 int i; 716 717 assert(priority <= MIG_PRI_MAX); 718 719 for (i = priority - 1; i >= 0; i--) { 720 se = savevm_state.handler_pri_head[i]; 721 if (se != NULL) { 722 assert(save_state_priority(se) < priority); 723 break; 724 } 725 } 726 727 if (i >= 0) { 728 QTAILQ_INSERT_BEFORE(se, nse, entry); 729 } else { 730 QTAILQ_INSERT_TAIL(&savevm_state.handlers, nse, entry); 731 } 732 733 if (savevm_state.handler_pri_head[priority] == NULL) { 734 savevm_state.handler_pri_head[priority] = nse; 735 } 736 } 737 738 static void savevm_state_handler_remove(SaveStateEntry *se) 739 { 740 SaveStateEntry *next; 741 MigrationPriority priority = save_state_priority(se); 742 743 if (se == savevm_state.handler_pri_head[priority]) { 744 next = QTAILQ_NEXT(se, entry); 745 if (next != NULL && save_state_priority(next) == priority) { 746 savevm_state.handler_pri_head[priority] = next; 747 } else { 748 savevm_state.handler_pri_head[priority] = NULL; 749 } 750 } 751 QTAILQ_REMOVE(&savevm_state.handlers, se, entry); 752 } 753 754 /* TODO: Individual devices generally have very little idea about the rest 755 of the system, so instance_id should be removed/replaced. 756 Meanwhile pass -1 as instance_id if you do not already have a clearly 757 distinguishing id for all instances of your device class. */ 758 int register_savevm_live(const char *idstr, 759 uint32_t instance_id, 760 int version_id, 761 const SaveVMHandlers *ops, 762 void *opaque) 763 { 764 SaveStateEntry *se; 765 766 se = g_new0(SaveStateEntry, 1); 767 se->version_id = version_id; 768 se->section_id = savevm_state.global_section_id++; 769 se->ops = ops; 770 se->opaque = opaque; 771 se->vmsd = NULL; 772 /* if this is a live_savem then set is_ram */ 773 if (ops->save_setup != NULL) { 774 se->is_ram = 1; 775 } 776 777 pstrcat(se->idstr, sizeof(se->idstr), idstr); 778 779 if (instance_id == VMSTATE_INSTANCE_ID_ANY) { 780 se->instance_id = calculate_new_instance_id(se->idstr); 781 } else { 782 se->instance_id = instance_id; 783 } 784 assert(!se->compat || se->instance_id == 0); 785 savevm_state_handler_insert(se); 786 return 0; 787 } 788 789 void unregister_savevm(VMStateIf *obj, const char *idstr, void *opaque) 790 { 791 SaveStateEntry *se, *new_se; 792 char id[256] = ""; 793 794 if (obj) { 795 char *oid = vmstate_if_get_id(obj); 796 if (oid) { 797 pstrcpy(id, sizeof(id), oid); 798 pstrcat(id, sizeof(id), "/"); 799 g_free(oid); 800 } 801 } 802 pstrcat(id, sizeof(id), idstr); 803 804 QTAILQ_FOREACH_SAFE(se, &savevm_state.handlers, entry, new_se) { 805 if (strcmp(se->idstr, id) == 0 && se->opaque == opaque) { 806 savevm_state_handler_remove(se); 807 g_free(se->compat); 808 g_free(se); 809 } 810 } 811 } 812 813 /* 814 * Perform some basic checks on vmsd's at registration 815 * time. 816 */ 817 static void vmstate_check(const VMStateDescription *vmsd) 818 { 819 const VMStateField *field = vmsd->fields; 820 const VMStateDescription **subsection = vmsd->subsections; 821 822 if (field) { 823 while (field->name) { 824 if (field->flags & (VMS_STRUCT | VMS_VSTRUCT)) { 825 /* Recurse to sub structures */ 826 vmstate_check(field->vmsd); 827 } 828 /* Carry on */ 829 field++; 830 } 831 /* Check for the end of field list canary */ 832 if (field->flags != VMS_END) { 833 error_report("VMSTATE not ending with VMS_END: %s", vmsd->name); 834 g_assert_not_reached(); 835 } 836 } 837 838 while (subsection && *subsection) { 839 /* 840 * The name of a subsection should start with the name of the 841 * current object. 842 */ 843 assert(!strncmp(vmsd->name, (*subsection)->name, strlen(vmsd->name))); 844 vmstate_check(*subsection); 845 subsection++; 846 } 847 } 848 849 int vmstate_register_with_alias_id(VMStateIf *obj, uint32_t instance_id, 850 const VMStateDescription *vmsd, 851 void *opaque, int alias_id, 852 int required_for_version, 853 Error **errp) 854 { 855 SaveStateEntry *se; 856 857 /* If this triggers, alias support can be dropped for the vmsd. */ 858 assert(alias_id == -1 || required_for_version >= vmsd->minimum_version_id); 859 860 se = g_new0(SaveStateEntry, 1); 861 se->version_id = vmsd->version_id; 862 se->section_id = savevm_state.global_section_id++; 863 se->opaque = opaque; 864 se->vmsd = vmsd; 865 se->alias_id = alias_id; 866 867 if (obj) { 868 char *id = vmstate_if_get_id(obj); 869 if (id) { 870 if (snprintf(se->idstr, sizeof(se->idstr), "%s/", id) >= 871 sizeof(se->idstr)) { 872 error_setg(errp, "Path too long for VMState (%s)", id); 873 g_free(id); 874 g_free(se); 875 876 return -1; 877 } 878 g_free(id); 879 880 se->compat = g_new0(CompatEntry, 1); 881 pstrcpy(se->compat->idstr, sizeof(se->compat->idstr), vmsd->name); 882 se->compat->instance_id = instance_id == VMSTATE_INSTANCE_ID_ANY ? 883 calculate_compat_instance_id(vmsd->name) : instance_id; 884 instance_id = VMSTATE_INSTANCE_ID_ANY; 885 } 886 } 887 pstrcat(se->idstr, sizeof(se->idstr), vmsd->name); 888 889 if (instance_id == VMSTATE_INSTANCE_ID_ANY) { 890 se->instance_id = calculate_new_instance_id(se->idstr); 891 } else { 892 se->instance_id = instance_id; 893 } 894 895 /* Perform a recursive sanity check during the test runs */ 896 if (qtest_enabled()) { 897 vmstate_check(vmsd); 898 } 899 assert(!se->compat || se->instance_id == 0); 900 savevm_state_handler_insert(se); 901 return 0; 902 } 903 904 void vmstate_unregister(VMStateIf *obj, const VMStateDescription *vmsd, 905 void *opaque) 906 { 907 SaveStateEntry *se, *new_se; 908 909 QTAILQ_FOREACH_SAFE(se, &savevm_state.handlers, entry, new_se) { 910 if (se->vmsd == vmsd && se->opaque == opaque) { 911 savevm_state_handler_remove(se); 912 g_free(se->compat); 913 g_free(se); 914 } 915 } 916 } 917 918 static int vmstate_load(QEMUFile *f, SaveStateEntry *se) 919 { 920 trace_vmstate_load(se->idstr, se->vmsd ? se->vmsd->name : "(old)"); 921 if (!se->vmsd) { /* Old style */ 922 return se->ops->load_state(f, se->opaque, se->load_version_id); 923 } 924 return vmstate_load_state(f, se->vmsd, se->opaque, se->load_version_id); 925 } 926 927 static void vmstate_save_old_style(QEMUFile *f, SaveStateEntry *se, 928 JSONWriter *vmdesc) 929 { 930 uint64_t old_offset = qemu_file_transferred_fast(f); 931 se->ops->save_state(f, se->opaque); 932 uint64_t size = qemu_file_transferred_fast(f) - old_offset; 933 934 if (vmdesc) { 935 json_writer_int64(vmdesc, "size", size); 936 json_writer_start_array(vmdesc, "fields"); 937 json_writer_start_object(vmdesc, NULL); 938 json_writer_str(vmdesc, "name", "data"); 939 json_writer_int64(vmdesc, "size", size); 940 json_writer_str(vmdesc, "type", "buffer"); 941 json_writer_end_object(vmdesc); 942 json_writer_end_array(vmdesc); 943 } 944 } 945 946 /* 947 * Write the header for device section (QEMU_VM_SECTION START/END/PART/FULL) 948 */ 949 static void save_section_header(QEMUFile *f, SaveStateEntry *se, 950 uint8_t section_type) 951 { 952 qemu_put_byte(f, section_type); 953 qemu_put_be32(f, se->section_id); 954 955 if (section_type == QEMU_VM_SECTION_FULL || 956 section_type == QEMU_VM_SECTION_START) { 957 /* ID string */ 958 size_t len = strlen(se->idstr); 959 qemu_put_byte(f, len); 960 qemu_put_buffer(f, (uint8_t *)se->idstr, len); 961 962 qemu_put_be32(f, se->instance_id); 963 qemu_put_be32(f, se->version_id); 964 } 965 } 966 967 /* 968 * Write a footer onto device sections that catches cases misformatted device 969 * sections. 970 */ 971 static void save_section_footer(QEMUFile *f, SaveStateEntry *se) 972 { 973 if (migrate_get_current()->send_section_footer) { 974 qemu_put_byte(f, QEMU_VM_SECTION_FOOTER); 975 qemu_put_be32(f, se->section_id); 976 } 977 } 978 979 static int vmstate_save(QEMUFile *f, SaveStateEntry *se, JSONWriter *vmdesc) 980 { 981 int ret; 982 983 if ((!se->ops || !se->ops->save_state) && !se->vmsd) { 984 return 0; 985 } 986 if (se->vmsd && !vmstate_save_needed(se->vmsd, se->opaque)) { 987 trace_savevm_section_skip(se->idstr, se->section_id); 988 return 0; 989 } 990 991 trace_savevm_section_start(se->idstr, se->section_id); 992 save_section_header(f, se, QEMU_VM_SECTION_FULL); 993 if (vmdesc) { 994 json_writer_start_object(vmdesc, NULL); 995 json_writer_str(vmdesc, "name", se->idstr); 996 json_writer_int64(vmdesc, "instance_id", se->instance_id); 997 } 998 999 trace_vmstate_save(se->idstr, se->vmsd ? se->vmsd->name : "(old)"); 1000 if (!se->vmsd) { 1001 vmstate_save_old_style(f, se, vmdesc); 1002 } else { 1003 ret = vmstate_save_state(f, se->vmsd, se->opaque, vmdesc); 1004 if (ret) { 1005 return ret; 1006 } 1007 } 1008 1009 trace_savevm_section_end(se->idstr, se->section_id, 0); 1010 save_section_footer(f, se); 1011 if (vmdesc) { 1012 json_writer_end_object(vmdesc); 1013 } 1014 return 0; 1015 } 1016 /** 1017 * qemu_savevm_command_send: Send a 'QEMU_VM_COMMAND' type element with the 1018 * command and associated data. 1019 * 1020 * @f: File to send command on 1021 * @command: Command type to send 1022 * @len: Length of associated data 1023 * @data: Data associated with command. 1024 */ 1025 static void qemu_savevm_command_send(QEMUFile *f, 1026 enum qemu_vm_cmd command, 1027 uint16_t len, 1028 uint8_t *data) 1029 { 1030 trace_savevm_command_send(command, len); 1031 qemu_put_byte(f, QEMU_VM_COMMAND); 1032 qemu_put_be16(f, (uint16_t)command); 1033 qemu_put_be16(f, len); 1034 qemu_put_buffer(f, data, len); 1035 qemu_fflush(f); 1036 } 1037 1038 void qemu_savevm_send_colo_enable(QEMUFile *f) 1039 { 1040 trace_savevm_send_colo_enable(); 1041 qemu_savevm_command_send(f, MIG_CMD_ENABLE_COLO, 0, NULL); 1042 } 1043 1044 void qemu_savevm_send_ping(QEMUFile *f, uint32_t value) 1045 { 1046 uint32_t buf; 1047 1048 trace_savevm_send_ping(value); 1049 buf = cpu_to_be32(value); 1050 qemu_savevm_command_send(f, MIG_CMD_PING, sizeof(value), (uint8_t *)&buf); 1051 } 1052 1053 void qemu_savevm_send_open_return_path(QEMUFile *f) 1054 { 1055 trace_savevm_send_open_return_path(); 1056 qemu_savevm_command_send(f, MIG_CMD_OPEN_RETURN_PATH, 0, NULL); 1057 } 1058 1059 /* We have a buffer of data to send; we don't want that all to be loaded 1060 * by the command itself, so the command contains just the length of the 1061 * extra buffer that we then send straight after it. 1062 * TODO: Must be a better way to organise that 1063 * 1064 * Returns: 1065 * 0 on success 1066 * -ve on error 1067 */ 1068 int qemu_savevm_send_packaged(QEMUFile *f, const uint8_t *buf, size_t len) 1069 { 1070 uint32_t tmp; 1071 1072 if (len > MAX_VM_CMD_PACKAGED_SIZE) { 1073 error_report("%s: Unreasonably large packaged state: %zu", 1074 __func__, len); 1075 return -1; 1076 } 1077 1078 tmp = cpu_to_be32(len); 1079 1080 trace_qemu_savevm_send_packaged(); 1081 qemu_savevm_command_send(f, MIG_CMD_PACKAGED, 4, (uint8_t *)&tmp); 1082 1083 qemu_put_buffer(f, buf, len); 1084 1085 return 0; 1086 } 1087 1088 /* Send prior to any postcopy transfer */ 1089 void qemu_savevm_send_postcopy_advise(QEMUFile *f) 1090 { 1091 if (migrate_postcopy_ram()) { 1092 uint64_t tmp[2]; 1093 tmp[0] = cpu_to_be64(ram_pagesize_summary()); 1094 tmp[1] = cpu_to_be64(qemu_target_page_size()); 1095 1096 trace_qemu_savevm_send_postcopy_advise(); 1097 qemu_savevm_command_send(f, MIG_CMD_POSTCOPY_ADVISE, 1098 16, (uint8_t *)tmp); 1099 } else { 1100 qemu_savevm_command_send(f, MIG_CMD_POSTCOPY_ADVISE, 0, NULL); 1101 } 1102 } 1103 1104 /* Sent prior to starting the destination running in postcopy, discard pages 1105 * that have already been sent but redirtied on the source. 1106 * CMD_POSTCOPY_RAM_DISCARD consist of: 1107 * byte version (0) 1108 * byte Length of name field (not including 0) 1109 * n x byte RAM block name 1110 * byte 0 terminator (just for safety) 1111 * n x Byte ranges within the named RAMBlock 1112 * be64 Start of the range 1113 * be64 Length 1114 * 1115 * name: RAMBlock name that these entries are part of 1116 * len: Number of page entries 1117 * start_list: 'len' addresses 1118 * length_list: 'len' addresses 1119 * 1120 */ 1121 void qemu_savevm_send_postcopy_ram_discard(QEMUFile *f, const char *name, 1122 uint16_t len, 1123 uint64_t *start_list, 1124 uint64_t *length_list) 1125 { 1126 uint8_t *buf; 1127 uint16_t tmplen; 1128 uint16_t t; 1129 size_t name_len = strlen(name); 1130 1131 trace_qemu_savevm_send_postcopy_ram_discard(name, len); 1132 assert(name_len < 256); 1133 buf = g_malloc0(1 + 1 + name_len + 1 + (8 + 8) * len); 1134 buf[0] = postcopy_ram_discard_version; 1135 buf[1] = name_len; 1136 memcpy(buf + 2, name, name_len); 1137 tmplen = 2 + name_len; 1138 buf[tmplen++] = '\0'; 1139 1140 for (t = 0; t < len; t++) { 1141 stq_be_p(buf + tmplen, start_list[t]); 1142 tmplen += 8; 1143 stq_be_p(buf + tmplen, length_list[t]); 1144 tmplen += 8; 1145 } 1146 qemu_savevm_command_send(f, MIG_CMD_POSTCOPY_RAM_DISCARD, tmplen, buf); 1147 g_free(buf); 1148 } 1149 1150 /* Get the destination into a state where it can receive postcopy data. */ 1151 void qemu_savevm_send_postcopy_listen(QEMUFile *f) 1152 { 1153 trace_savevm_send_postcopy_listen(); 1154 qemu_savevm_command_send(f, MIG_CMD_POSTCOPY_LISTEN, 0, NULL); 1155 } 1156 1157 /* Kick the destination into running */ 1158 void qemu_savevm_send_postcopy_run(QEMUFile *f) 1159 { 1160 trace_savevm_send_postcopy_run(); 1161 qemu_savevm_command_send(f, MIG_CMD_POSTCOPY_RUN, 0, NULL); 1162 } 1163 1164 void qemu_savevm_send_postcopy_resume(QEMUFile *f) 1165 { 1166 trace_savevm_send_postcopy_resume(); 1167 qemu_savevm_command_send(f, MIG_CMD_POSTCOPY_RESUME, 0, NULL); 1168 } 1169 1170 void qemu_savevm_send_recv_bitmap(QEMUFile *f, char *block_name) 1171 { 1172 size_t len; 1173 char buf[256]; 1174 1175 trace_savevm_send_recv_bitmap(block_name); 1176 1177 buf[0] = len = strlen(block_name); 1178 memcpy(buf + 1, block_name, len); 1179 1180 qemu_savevm_command_send(f, MIG_CMD_RECV_BITMAP, len + 1, (uint8_t *)buf); 1181 } 1182 1183 bool qemu_savevm_state_blocked(Error **errp) 1184 { 1185 SaveStateEntry *se; 1186 1187 QTAILQ_FOREACH(se, &savevm_state.handlers, entry) { 1188 if (se->vmsd && se->vmsd->unmigratable) { 1189 error_setg(errp, "State blocked by non-migratable device '%s'", 1190 se->idstr); 1191 return true; 1192 } 1193 } 1194 return false; 1195 } 1196 1197 void qemu_savevm_non_migratable_list(strList **reasons) 1198 { 1199 SaveStateEntry *se; 1200 1201 QTAILQ_FOREACH(se, &savevm_state.handlers, entry) { 1202 if (se->vmsd && se->vmsd->unmigratable) { 1203 QAPI_LIST_PREPEND(*reasons, 1204 g_strdup_printf("non-migratable device: %s", 1205 se->idstr)); 1206 } 1207 } 1208 } 1209 1210 void qemu_savevm_state_header(QEMUFile *f) 1211 { 1212 trace_savevm_state_header(); 1213 qemu_put_be32(f, QEMU_VM_FILE_MAGIC); 1214 qemu_put_be32(f, QEMU_VM_FILE_VERSION); 1215 1216 if (migrate_get_current()->send_configuration) { 1217 qemu_put_byte(f, QEMU_VM_CONFIGURATION); 1218 vmstate_save_state(f, &vmstate_configuration, &savevm_state, 0); 1219 } 1220 } 1221 1222 bool qemu_savevm_state_guest_unplug_pending(void) 1223 { 1224 SaveStateEntry *se; 1225 1226 QTAILQ_FOREACH(se, &savevm_state.handlers, entry) { 1227 if (se->vmsd && se->vmsd->dev_unplug_pending && 1228 se->vmsd->dev_unplug_pending(se->opaque)) { 1229 return true; 1230 } 1231 } 1232 1233 return false; 1234 } 1235 1236 void qemu_savevm_state_setup(QEMUFile *f) 1237 { 1238 MigrationState *ms = migrate_get_current(); 1239 SaveStateEntry *se; 1240 Error *local_err = NULL; 1241 int ret; 1242 1243 ms->vmdesc = json_writer_new(false); 1244 json_writer_start_object(ms->vmdesc, NULL); 1245 json_writer_int64(ms->vmdesc, "page_size", qemu_target_page_size()); 1246 json_writer_start_array(ms->vmdesc, "devices"); 1247 1248 trace_savevm_state_setup(); 1249 QTAILQ_FOREACH(se, &savevm_state.handlers, entry) { 1250 if (se->vmsd && se->vmsd->early_setup) { 1251 ret = vmstate_save(f, se, ms->vmdesc); 1252 if (ret) { 1253 qemu_file_set_error(f, ret); 1254 break; 1255 } 1256 continue; 1257 } 1258 1259 if (!se->ops || !se->ops->save_setup) { 1260 continue; 1261 } 1262 if (se->ops->is_active) { 1263 if (!se->ops->is_active(se->opaque)) { 1264 continue; 1265 } 1266 } 1267 save_section_header(f, se, QEMU_VM_SECTION_START); 1268 1269 ret = se->ops->save_setup(f, se->opaque); 1270 save_section_footer(f, se); 1271 if (ret < 0) { 1272 qemu_file_set_error(f, ret); 1273 break; 1274 } 1275 } 1276 1277 if (precopy_notify(PRECOPY_NOTIFY_SETUP, &local_err)) { 1278 error_report_err(local_err); 1279 } 1280 } 1281 1282 int qemu_savevm_state_resume_prepare(MigrationState *s) 1283 { 1284 SaveStateEntry *se; 1285 int ret; 1286 1287 trace_savevm_state_resume_prepare(); 1288 1289 QTAILQ_FOREACH(se, &savevm_state.handlers, entry) { 1290 if (!se->ops || !se->ops->resume_prepare) { 1291 continue; 1292 } 1293 if (se->ops->is_active) { 1294 if (!se->ops->is_active(se->opaque)) { 1295 continue; 1296 } 1297 } 1298 ret = se->ops->resume_prepare(s, se->opaque); 1299 if (ret < 0) { 1300 return ret; 1301 } 1302 } 1303 1304 return 0; 1305 } 1306 1307 /* 1308 * this function has three return values: 1309 * negative: there was one error, and we have -errno. 1310 * 0 : We haven't finished, caller have to go again 1311 * 1 : We have finished, we can go to complete phase 1312 */ 1313 int qemu_savevm_state_iterate(QEMUFile *f, bool postcopy) 1314 { 1315 SaveStateEntry *se; 1316 int ret = 1; 1317 1318 trace_savevm_state_iterate(); 1319 QTAILQ_FOREACH(se, &savevm_state.handlers, entry) { 1320 if (!se->ops || !se->ops->save_live_iterate) { 1321 continue; 1322 } 1323 if (se->ops->is_active && 1324 !se->ops->is_active(se->opaque)) { 1325 continue; 1326 } 1327 if (se->ops->is_active_iterate && 1328 !se->ops->is_active_iterate(se->opaque)) { 1329 continue; 1330 } 1331 /* 1332 * In the postcopy phase, any device that doesn't know how to 1333 * do postcopy should have saved it's state in the _complete 1334 * call that's already run, it might get confused if we call 1335 * iterate afterwards. 1336 */ 1337 if (postcopy && 1338 !(se->ops->has_postcopy && se->ops->has_postcopy(se->opaque))) { 1339 continue; 1340 } 1341 if (migration_rate_exceeded(f)) { 1342 return 0; 1343 } 1344 trace_savevm_section_start(se->idstr, se->section_id); 1345 1346 save_section_header(f, se, QEMU_VM_SECTION_PART); 1347 1348 ret = se->ops->save_live_iterate(f, se->opaque); 1349 trace_savevm_section_end(se->idstr, se->section_id, ret); 1350 save_section_footer(f, se); 1351 1352 if (ret < 0) { 1353 error_report("failed to save SaveStateEntry with id(name): " 1354 "%d(%s): %d", 1355 se->section_id, se->idstr, ret); 1356 qemu_file_set_error(f, ret); 1357 } 1358 if (ret <= 0) { 1359 /* Do not proceed to the next vmstate before this one reported 1360 completion of the current stage. This serializes the migration 1361 and reduces the probability that a faster changing state is 1362 synchronized over and over again. */ 1363 break; 1364 } 1365 } 1366 return ret; 1367 } 1368 1369 static bool should_send_vmdesc(void) 1370 { 1371 MachineState *machine = MACHINE(qdev_get_machine()); 1372 bool in_postcopy = migration_in_postcopy(); 1373 return !machine->suppress_vmdesc && !in_postcopy; 1374 } 1375 1376 /* 1377 * Calls the save_live_complete_postcopy methods 1378 * causing the last few pages to be sent immediately and doing any associated 1379 * cleanup. 1380 * Note postcopy also calls qemu_savevm_state_complete_precopy to complete 1381 * all the other devices, but that happens at the point we switch to postcopy. 1382 */ 1383 void qemu_savevm_state_complete_postcopy(QEMUFile *f) 1384 { 1385 SaveStateEntry *se; 1386 int ret; 1387 1388 QTAILQ_FOREACH(se, &savevm_state.handlers, entry) { 1389 if (!se->ops || !se->ops->save_live_complete_postcopy) { 1390 continue; 1391 } 1392 if (se->ops->is_active) { 1393 if (!se->ops->is_active(se->opaque)) { 1394 continue; 1395 } 1396 } 1397 trace_savevm_section_start(se->idstr, se->section_id); 1398 /* Section type */ 1399 qemu_put_byte(f, QEMU_VM_SECTION_END); 1400 qemu_put_be32(f, se->section_id); 1401 1402 ret = se->ops->save_live_complete_postcopy(f, se->opaque); 1403 trace_savevm_section_end(se->idstr, se->section_id, ret); 1404 save_section_footer(f, se); 1405 if (ret < 0) { 1406 qemu_file_set_error(f, ret); 1407 return; 1408 } 1409 } 1410 1411 qemu_put_byte(f, QEMU_VM_EOF); 1412 qemu_fflush(f); 1413 } 1414 1415 static 1416 int qemu_savevm_state_complete_precopy_iterable(QEMUFile *f, bool in_postcopy) 1417 { 1418 SaveStateEntry *se; 1419 int ret; 1420 1421 QTAILQ_FOREACH(se, &savevm_state.handlers, entry) { 1422 if (!se->ops || 1423 (in_postcopy && se->ops->has_postcopy && 1424 se->ops->has_postcopy(se->opaque)) || 1425 !se->ops->save_live_complete_precopy) { 1426 continue; 1427 } 1428 1429 if (se->ops->is_active) { 1430 if (!se->ops->is_active(se->opaque)) { 1431 continue; 1432 } 1433 } 1434 trace_savevm_section_start(se->idstr, se->section_id); 1435 1436 save_section_header(f, se, QEMU_VM_SECTION_END); 1437 1438 ret = se->ops->save_live_complete_precopy(f, se->opaque); 1439 trace_savevm_section_end(se->idstr, se->section_id, ret); 1440 save_section_footer(f, se); 1441 if (ret < 0) { 1442 qemu_file_set_error(f, ret); 1443 return -1; 1444 } 1445 } 1446 1447 return 0; 1448 } 1449 1450 int qemu_savevm_state_complete_precopy_non_iterable(QEMUFile *f, 1451 bool in_postcopy, 1452 bool inactivate_disks) 1453 { 1454 MigrationState *ms = migrate_get_current(); 1455 JSONWriter *vmdesc = ms->vmdesc; 1456 int vmdesc_len; 1457 SaveStateEntry *se; 1458 int ret; 1459 1460 QTAILQ_FOREACH(se, &savevm_state.handlers, entry) { 1461 if (se->vmsd && se->vmsd->early_setup) { 1462 /* Already saved during qemu_savevm_state_setup(). */ 1463 continue; 1464 } 1465 1466 ret = vmstate_save(f, se, vmdesc); 1467 if (ret) { 1468 qemu_file_set_error(f, ret); 1469 return ret; 1470 } 1471 } 1472 1473 if (inactivate_disks) { 1474 /* Inactivate before sending QEMU_VM_EOF so that the 1475 * bdrv_activate_all() on the other end won't fail. */ 1476 ret = bdrv_inactivate_all(); 1477 if (ret) { 1478 error_report("%s: bdrv_inactivate_all() failed (%d)", 1479 __func__, ret); 1480 qemu_file_set_error(f, ret); 1481 return ret; 1482 } 1483 } 1484 if (!in_postcopy) { 1485 /* Postcopy stream will still be going */ 1486 qemu_put_byte(f, QEMU_VM_EOF); 1487 } 1488 1489 json_writer_end_array(vmdesc); 1490 json_writer_end_object(vmdesc); 1491 vmdesc_len = strlen(json_writer_get(vmdesc)); 1492 1493 if (should_send_vmdesc()) { 1494 qemu_put_byte(f, QEMU_VM_VMDESCRIPTION); 1495 qemu_put_be32(f, vmdesc_len); 1496 qemu_put_buffer(f, (uint8_t *)json_writer_get(vmdesc), vmdesc_len); 1497 } 1498 1499 /* Free it now to detect any inconsistencies. */ 1500 json_writer_free(vmdesc); 1501 ms->vmdesc = NULL; 1502 1503 return 0; 1504 } 1505 1506 int qemu_savevm_state_complete_precopy(QEMUFile *f, bool iterable_only, 1507 bool inactivate_disks) 1508 { 1509 int ret; 1510 Error *local_err = NULL; 1511 bool in_postcopy = migration_in_postcopy(); 1512 1513 if (precopy_notify(PRECOPY_NOTIFY_COMPLETE, &local_err)) { 1514 error_report_err(local_err); 1515 } 1516 1517 trace_savevm_state_complete_precopy(); 1518 1519 cpu_synchronize_all_states(); 1520 1521 if (!in_postcopy || iterable_only) { 1522 ret = qemu_savevm_state_complete_precopy_iterable(f, in_postcopy); 1523 if (ret) { 1524 return ret; 1525 } 1526 } 1527 1528 if (iterable_only) { 1529 goto flush; 1530 } 1531 1532 ret = qemu_savevm_state_complete_precopy_non_iterable(f, in_postcopy, 1533 inactivate_disks); 1534 if (ret) { 1535 return ret; 1536 } 1537 1538 flush: 1539 qemu_fflush(f); 1540 return 0; 1541 } 1542 1543 /* Give an estimate of the amount left to be transferred, 1544 * the result is split into the amount for units that can and 1545 * for units that can't do postcopy. 1546 */ 1547 void qemu_savevm_state_pending_estimate(uint64_t *must_precopy, 1548 uint64_t *can_postcopy) 1549 { 1550 SaveStateEntry *se; 1551 1552 *must_precopy = 0; 1553 *can_postcopy = 0; 1554 1555 QTAILQ_FOREACH(se, &savevm_state.handlers, entry) { 1556 if (!se->ops || !se->ops->state_pending_estimate) { 1557 continue; 1558 } 1559 if (se->ops->is_active) { 1560 if (!se->ops->is_active(se->opaque)) { 1561 continue; 1562 } 1563 } 1564 se->ops->state_pending_estimate(se->opaque, must_precopy, can_postcopy); 1565 } 1566 } 1567 1568 void qemu_savevm_state_pending_exact(uint64_t *must_precopy, 1569 uint64_t *can_postcopy) 1570 { 1571 SaveStateEntry *se; 1572 1573 *must_precopy = 0; 1574 *can_postcopy = 0; 1575 1576 QTAILQ_FOREACH(se, &savevm_state.handlers, entry) { 1577 if (!se->ops || !se->ops->state_pending_exact) { 1578 continue; 1579 } 1580 if (se->ops->is_active) { 1581 if (!se->ops->is_active(se->opaque)) { 1582 continue; 1583 } 1584 } 1585 se->ops->state_pending_exact(se->opaque, must_precopy, can_postcopy); 1586 } 1587 } 1588 1589 void qemu_savevm_state_cleanup(void) 1590 { 1591 SaveStateEntry *se; 1592 Error *local_err = NULL; 1593 1594 if (precopy_notify(PRECOPY_NOTIFY_CLEANUP, &local_err)) { 1595 error_report_err(local_err); 1596 } 1597 1598 trace_savevm_state_cleanup(); 1599 QTAILQ_FOREACH(se, &savevm_state.handlers, entry) { 1600 if (se->ops && se->ops->save_cleanup) { 1601 se->ops->save_cleanup(se->opaque); 1602 } 1603 } 1604 } 1605 1606 static int qemu_savevm_state(QEMUFile *f, Error **errp) 1607 { 1608 int ret; 1609 MigrationState *ms = migrate_get_current(); 1610 MigrationStatus status; 1611 1612 if (migration_is_running(ms->state)) { 1613 error_setg(errp, QERR_MIGRATION_ACTIVE); 1614 return -EINVAL; 1615 } 1616 1617 if (migrate_block()) { 1618 error_setg(errp, "Block migration and snapshots are incompatible"); 1619 return -EINVAL; 1620 } 1621 1622 migrate_init(ms); 1623 memset(&mig_stats, 0, sizeof(mig_stats)); 1624 memset(&compression_counters, 0, sizeof(compression_counters)); 1625 reset_vfio_bytes_transferred(); 1626 ms->to_dst_file = f; 1627 1628 qemu_mutex_unlock_iothread(); 1629 qemu_savevm_state_header(f); 1630 qemu_savevm_state_setup(f); 1631 qemu_mutex_lock_iothread(); 1632 1633 while (qemu_file_get_error(f) == 0) { 1634 if (qemu_savevm_state_iterate(f, false) > 0) { 1635 break; 1636 } 1637 } 1638 1639 ret = qemu_file_get_error(f); 1640 if (ret == 0) { 1641 qemu_savevm_state_complete_precopy(f, false, false); 1642 ret = qemu_file_get_error(f); 1643 } 1644 qemu_savevm_state_cleanup(); 1645 if (ret != 0) { 1646 error_setg_errno(errp, -ret, "Error while writing VM state"); 1647 } 1648 1649 if (ret != 0) { 1650 status = MIGRATION_STATUS_FAILED; 1651 } else { 1652 status = MIGRATION_STATUS_COMPLETED; 1653 } 1654 migrate_set_state(&ms->state, MIGRATION_STATUS_SETUP, status); 1655 1656 /* f is outer parameter, it should not stay in global migration state after 1657 * this function finished */ 1658 ms->to_dst_file = NULL; 1659 1660 return ret; 1661 } 1662 1663 void qemu_savevm_live_state(QEMUFile *f) 1664 { 1665 /* save QEMU_VM_SECTION_END section */ 1666 qemu_savevm_state_complete_precopy(f, true, false); 1667 qemu_put_byte(f, QEMU_VM_EOF); 1668 } 1669 1670 int qemu_save_device_state(QEMUFile *f) 1671 { 1672 SaveStateEntry *se; 1673 1674 if (!migration_in_colo_state()) { 1675 qemu_put_be32(f, QEMU_VM_FILE_MAGIC); 1676 qemu_put_be32(f, QEMU_VM_FILE_VERSION); 1677 } 1678 cpu_synchronize_all_states(); 1679 1680 QTAILQ_FOREACH(se, &savevm_state.handlers, entry) { 1681 int ret; 1682 1683 if (se->is_ram) { 1684 continue; 1685 } 1686 ret = vmstate_save(f, se, NULL); 1687 if (ret) { 1688 return ret; 1689 } 1690 } 1691 1692 qemu_put_byte(f, QEMU_VM_EOF); 1693 1694 return qemu_file_get_error(f); 1695 } 1696 1697 static SaveStateEntry *find_se(const char *idstr, uint32_t instance_id) 1698 { 1699 SaveStateEntry *se; 1700 1701 QTAILQ_FOREACH(se, &savevm_state.handlers, entry) { 1702 if (!strcmp(se->idstr, idstr) && 1703 (instance_id == se->instance_id || 1704 instance_id == se->alias_id)) 1705 return se; 1706 /* Migrating from an older version? */ 1707 if (strstr(se->idstr, idstr) && se->compat) { 1708 if (!strcmp(se->compat->idstr, idstr) && 1709 (instance_id == se->compat->instance_id || 1710 instance_id == se->alias_id)) 1711 return se; 1712 } 1713 } 1714 return NULL; 1715 } 1716 1717 enum LoadVMExitCodes { 1718 /* Allow a command to quit all layers of nested loadvm loops */ 1719 LOADVM_QUIT = 1, 1720 }; 1721 1722 /* ------ incoming postcopy messages ------ */ 1723 /* 'advise' arrives before any transfers just to tell us that a postcopy 1724 * *might* happen - it might be skipped if precopy transferred everything 1725 * quickly. 1726 */ 1727 static int loadvm_postcopy_handle_advise(MigrationIncomingState *mis, 1728 uint16_t len) 1729 { 1730 PostcopyState ps = postcopy_state_set(POSTCOPY_INCOMING_ADVISE); 1731 uint64_t remote_pagesize_summary, local_pagesize_summary, remote_tps; 1732 size_t page_size = qemu_target_page_size(); 1733 Error *local_err = NULL; 1734 1735 trace_loadvm_postcopy_handle_advise(); 1736 if (ps != POSTCOPY_INCOMING_NONE) { 1737 error_report("CMD_POSTCOPY_ADVISE in wrong postcopy state (%d)", ps); 1738 return -1; 1739 } 1740 1741 switch (len) { 1742 case 0: 1743 if (migrate_postcopy_ram()) { 1744 error_report("RAM postcopy is enabled but have 0 byte advise"); 1745 return -EINVAL; 1746 } 1747 return 0; 1748 case 8 + 8: 1749 if (!migrate_postcopy_ram()) { 1750 error_report("RAM postcopy is disabled but have 16 byte advise"); 1751 return -EINVAL; 1752 } 1753 break; 1754 default: 1755 error_report("CMD_POSTCOPY_ADVISE invalid length (%d)", len); 1756 return -EINVAL; 1757 } 1758 1759 if (!postcopy_ram_supported_by_host(mis, &local_err)) { 1760 error_report_err(local_err); 1761 postcopy_state_set(POSTCOPY_INCOMING_NONE); 1762 return -1; 1763 } 1764 1765 remote_pagesize_summary = qemu_get_be64(mis->from_src_file); 1766 local_pagesize_summary = ram_pagesize_summary(); 1767 1768 if (remote_pagesize_summary != local_pagesize_summary) { 1769 /* 1770 * This detects two potential causes of mismatch: 1771 * a) A mismatch in host page sizes 1772 * Some combinations of mismatch are probably possible but it gets 1773 * a bit more complicated. In particular we need to place whole 1774 * host pages on the dest at once, and we need to ensure that we 1775 * handle dirtying to make sure we never end up sending part of 1776 * a hostpage on it's own. 1777 * b) The use of different huge page sizes on source/destination 1778 * a more fine grain test is performed during RAM block migration 1779 * but this test here causes a nice early clear failure, and 1780 * also fails when passed to an older qemu that doesn't 1781 * do huge pages. 1782 */ 1783 error_report("Postcopy needs matching RAM page sizes (s=%" PRIx64 1784 " d=%" PRIx64 ")", 1785 remote_pagesize_summary, local_pagesize_summary); 1786 return -1; 1787 } 1788 1789 remote_tps = qemu_get_be64(mis->from_src_file); 1790 if (remote_tps != page_size) { 1791 /* 1792 * Again, some differences could be dealt with, but for now keep it 1793 * simple. 1794 */ 1795 error_report("Postcopy needs matching target page sizes (s=%d d=%zd)", 1796 (int)remote_tps, page_size); 1797 return -1; 1798 } 1799 1800 if (postcopy_notify(POSTCOPY_NOTIFY_INBOUND_ADVISE, &local_err)) { 1801 error_report_err(local_err); 1802 return -1; 1803 } 1804 1805 if (ram_postcopy_incoming_init(mis)) { 1806 return -1; 1807 } 1808 1809 return 0; 1810 } 1811 1812 /* After postcopy we will be told to throw some pages away since they're 1813 * dirty and will have to be demand fetched. Must happen before CPU is 1814 * started. 1815 * There can be 0..many of these messages, each encoding multiple pages. 1816 */ 1817 static int loadvm_postcopy_ram_handle_discard(MigrationIncomingState *mis, 1818 uint16_t len) 1819 { 1820 int tmp; 1821 char ramid[256]; 1822 PostcopyState ps = postcopy_state_get(); 1823 1824 trace_loadvm_postcopy_ram_handle_discard(); 1825 1826 switch (ps) { 1827 case POSTCOPY_INCOMING_ADVISE: 1828 /* 1st discard */ 1829 tmp = postcopy_ram_prepare_discard(mis); 1830 if (tmp) { 1831 return tmp; 1832 } 1833 break; 1834 1835 case POSTCOPY_INCOMING_DISCARD: 1836 /* Expected state */ 1837 break; 1838 1839 default: 1840 error_report("CMD_POSTCOPY_RAM_DISCARD in wrong postcopy state (%d)", 1841 ps); 1842 return -1; 1843 } 1844 /* We're expecting a 1845 * Version (0) 1846 * a RAM ID string (length byte, name, 0 term) 1847 * then at least 1 16 byte chunk 1848 */ 1849 if (len < (1 + 1 + 1 + 1 + 2 * 8)) { 1850 error_report("CMD_POSTCOPY_RAM_DISCARD invalid length (%d)", len); 1851 return -1; 1852 } 1853 1854 tmp = qemu_get_byte(mis->from_src_file); 1855 if (tmp != postcopy_ram_discard_version) { 1856 error_report("CMD_POSTCOPY_RAM_DISCARD invalid version (%d)", tmp); 1857 return -1; 1858 } 1859 1860 if (!qemu_get_counted_string(mis->from_src_file, ramid)) { 1861 error_report("CMD_POSTCOPY_RAM_DISCARD Failed to read RAMBlock ID"); 1862 return -1; 1863 } 1864 tmp = qemu_get_byte(mis->from_src_file); 1865 if (tmp != 0) { 1866 error_report("CMD_POSTCOPY_RAM_DISCARD missing nil (%d)", tmp); 1867 return -1; 1868 } 1869 1870 len -= 3 + strlen(ramid); 1871 if (len % 16) { 1872 error_report("CMD_POSTCOPY_RAM_DISCARD invalid length (%d)", len); 1873 return -1; 1874 } 1875 trace_loadvm_postcopy_ram_handle_discard_header(ramid, len); 1876 while (len) { 1877 uint64_t start_addr, block_length; 1878 start_addr = qemu_get_be64(mis->from_src_file); 1879 block_length = qemu_get_be64(mis->from_src_file); 1880 1881 len -= 16; 1882 int ret = ram_discard_range(ramid, start_addr, block_length); 1883 if (ret) { 1884 return ret; 1885 } 1886 } 1887 trace_loadvm_postcopy_ram_handle_discard_end(); 1888 1889 return 0; 1890 } 1891 1892 /* 1893 * Triggered by a postcopy_listen command; this thread takes over reading 1894 * the input stream, leaving the main thread free to carry on loading the rest 1895 * of the device state (from RAM). 1896 * (TODO:This could do with being in a postcopy file - but there again it's 1897 * just another input loop, not that postcopy specific) 1898 */ 1899 static void *postcopy_ram_listen_thread(void *opaque) 1900 { 1901 MigrationIncomingState *mis = migration_incoming_get_current(); 1902 QEMUFile *f = mis->from_src_file; 1903 int load_res; 1904 MigrationState *migr = migrate_get_current(); 1905 1906 object_ref(OBJECT(migr)); 1907 1908 migrate_set_state(&mis->state, MIGRATION_STATUS_ACTIVE, 1909 MIGRATION_STATUS_POSTCOPY_ACTIVE); 1910 qemu_sem_post(&mis->thread_sync_sem); 1911 trace_postcopy_ram_listen_thread_start(); 1912 1913 rcu_register_thread(); 1914 /* 1915 * Because we're a thread and not a coroutine we can't yield 1916 * in qemu_file, and thus we must be blocking now. 1917 */ 1918 qemu_file_set_blocking(f, true); 1919 load_res = qemu_loadvm_state_main(f, mis); 1920 1921 /* 1922 * This is tricky, but, mis->from_src_file can change after it 1923 * returns, when postcopy recovery happened. In the future, we may 1924 * want a wrapper for the QEMUFile handle. 1925 */ 1926 f = mis->from_src_file; 1927 1928 /* And non-blocking again so we don't block in any cleanup */ 1929 qemu_file_set_blocking(f, false); 1930 1931 trace_postcopy_ram_listen_thread_exit(); 1932 if (load_res < 0) { 1933 qemu_file_set_error(f, load_res); 1934 dirty_bitmap_mig_cancel_incoming(); 1935 if (postcopy_state_get() == POSTCOPY_INCOMING_RUNNING && 1936 !migrate_postcopy_ram() && migrate_dirty_bitmaps()) 1937 { 1938 error_report("%s: loadvm failed during postcopy: %d. All states " 1939 "are migrated except dirty bitmaps. Some dirty " 1940 "bitmaps may be lost, and present migrated dirty " 1941 "bitmaps are correctly migrated and valid.", 1942 __func__, load_res); 1943 load_res = 0; /* prevent further exit() */ 1944 } else { 1945 error_report("%s: loadvm failed: %d", __func__, load_res); 1946 migrate_set_state(&mis->state, MIGRATION_STATUS_POSTCOPY_ACTIVE, 1947 MIGRATION_STATUS_FAILED); 1948 } 1949 } 1950 if (load_res >= 0) { 1951 /* 1952 * This looks good, but it's possible that the device loading in the 1953 * main thread hasn't finished yet, and so we might not be in 'RUN' 1954 * state yet; wait for the end of the main thread. 1955 */ 1956 qemu_event_wait(&mis->main_thread_load_event); 1957 } 1958 postcopy_ram_incoming_cleanup(mis); 1959 1960 if (load_res < 0) { 1961 /* 1962 * If something went wrong then we have a bad state so exit; 1963 * depending how far we got it might be possible at this point 1964 * to leave the guest running and fire MCEs for pages that never 1965 * arrived as a desperate recovery step. 1966 */ 1967 rcu_unregister_thread(); 1968 exit(EXIT_FAILURE); 1969 } 1970 1971 migrate_set_state(&mis->state, MIGRATION_STATUS_POSTCOPY_ACTIVE, 1972 MIGRATION_STATUS_COMPLETED); 1973 /* 1974 * If everything has worked fine, then the main thread has waited 1975 * for us to start, and we're the last use of the mis. 1976 * (If something broke then qemu will have to exit anyway since it's 1977 * got a bad migration state). 1978 */ 1979 migration_incoming_state_destroy(); 1980 qemu_loadvm_state_cleanup(); 1981 1982 rcu_unregister_thread(); 1983 mis->have_listen_thread = false; 1984 postcopy_state_set(POSTCOPY_INCOMING_END); 1985 1986 object_unref(OBJECT(migr)); 1987 1988 return NULL; 1989 } 1990 1991 /* After this message we must be able to immediately receive postcopy data */ 1992 static int loadvm_postcopy_handle_listen(MigrationIncomingState *mis) 1993 { 1994 PostcopyState ps = postcopy_state_set(POSTCOPY_INCOMING_LISTENING); 1995 Error *local_err = NULL; 1996 1997 trace_loadvm_postcopy_handle_listen("enter"); 1998 1999 if (ps != POSTCOPY_INCOMING_ADVISE && ps != POSTCOPY_INCOMING_DISCARD) { 2000 error_report("CMD_POSTCOPY_LISTEN in wrong postcopy state (%d)", ps); 2001 return -1; 2002 } 2003 if (ps == POSTCOPY_INCOMING_ADVISE) { 2004 /* 2005 * A rare case, we entered listen without having to do any discards, 2006 * so do the setup that's normally done at the time of the 1st discard. 2007 */ 2008 if (migrate_postcopy_ram()) { 2009 postcopy_ram_prepare_discard(mis); 2010 } 2011 } 2012 2013 trace_loadvm_postcopy_handle_listen("after discard"); 2014 2015 /* 2016 * Sensitise RAM - can now generate requests for blocks that don't exist 2017 * However, at this point the CPU shouldn't be running, and the IO 2018 * shouldn't be doing anything yet so don't actually expect requests 2019 */ 2020 if (migrate_postcopy_ram()) { 2021 if (postcopy_ram_incoming_setup(mis)) { 2022 postcopy_ram_incoming_cleanup(mis); 2023 return -1; 2024 } 2025 } 2026 2027 trace_loadvm_postcopy_handle_listen("after uffd"); 2028 2029 if (postcopy_notify(POSTCOPY_NOTIFY_INBOUND_LISTEN, &local_err)) { 2030 error_report_err(local_err); 2031 return -1; 2032 } 2033 2034 mis->have_listen_thread = true; 2035 postcopy_thread_create(mis, &mis->listen_thread, "postcopy/listen", 2036 postcopy_ram_listen_thread, QEMU_THREAD_DETACHED); 2037 trace_loadvm_postcopy_handle_listen("return"); 2038 2039 return 0; 2040 } 2041 2042 static void loadvm_postcopy_handle_run_bh(void *opaque) 2043 { 2044 Error *local_err = NULL; 2045 MigrationIncomingState *mis = opaque; 2046 2047 trace_loadvm_postcopy_handle_run_bh("enter"); 2048 2049 /* TODO we should move all of this lot into postcopy_ram.c or a shared code 2050 * in migration.c 2051 */ 2052 cpu_synchronize_all_post_init(); 2053 2054 trace_loadvm_postcopy_handle_run_bh("after cpu sync"); 2055 2056 qemu_announce_self(&mis->announce_timer, migrate_announce_params()); 2057 2058 trace_loadvm_postcopy_handle_run_bh("after announce"); 2059 2060 /* Make sure all file formats throw away their mutable metadata. 2061 * If we get an error here, just don't restart the VM yet. */ 2062 bdrv_activate_all(&local_err); 2063 if (local_err) { 2064 error_report_err(local_err); 2065 local_err = NULL; 2066 autostart = false; 2067 } 2068 2069 trace_loadvm_postcopy_handle_run_bh("after invalidate cache"); 2070 2071 dirty_bitmap_mig_before_vm_start(); 2072 2073 if (autostart) { 2074 /* Hold onto your hats, starting the CPU */ 2075 vm_start(); 2076 } else { 2077 /* leave it paused and let management decide when to start the CPU */ 2078 runstate_set(RUN_STATE_PAUSED); 2079 } 2080 2081 qemu_bh_delete(mis->bh); 2082 2083 trace_loadvm_postcopy_handle_run_bh("return"); 2084 } 2085 2086 /* After all discards we can start running and asking for pages */ 2087 static int loadvm_postcopy_handle_run(MigrationIncomingState *mis) 2088 { 2089 PostcopyState ps = postcopy_state_get(); 2090 2091 trace_loadvm_postcopy_handle_run(); 2092 if (ps != POSTCOPY_INCOMING_LISTENING) { 2093 error_report("CMD_POSTCOPY_RUN in wrong postcopy state (%d)", ps); 2094 return -1; 2095 } 2096 2097 postcopy_state_set(POSTCOPY_INCOMING_RUNNING); 2098 mis->bh = qemu_bh_new(loadvm_postcopy_handle_run_bh, mis); 2099 qemu_bh_schedule(mis->bh); 2100 2101 /* We need to finish reading the stream from the package 2102 * and also stop reading anything more from the stream that loaded the 2103 * package (since it's now being read by the listener thread). 2104 * LOADVM_QUIT will quit all the layers of nested loadvm loops. 2105 */ 2106 return LOADVM_QUIT; 2107 } 2108 2109 /* We must be with page_request_mutex held */ 2110 static gboolean postcopy_sync_page_req(gpointer key, gpointer value, 2111 gpointer data) 2112 { 2113 MigrationIncomingState *mis = data; 2114 void *host_addr = (void *) key; 2115 ram_addr_t rb_offset; 2116 RAMBlock *rb; 2117 int ret; 2118 2119 rb = qemu_ram_block_from_host(host_addr, true, &rb_offset); 2120 if (!rb) { 2121 /* 2122 * This should _never_ happen. However be nice for a migrating VM to 2123 * not crash/assert. Post an error (note: intended to not use *_once 2124 * because we do want to see all the illegal addresses; and this can 2125 * never be triggered by the guest so we're safe) and move on next. 2126 */ 2127 error_report("%s: illegal host addr %p", __func__, host_addr); 2128 /* Try the next entry */ 2129 return FALSE; 2130 } 2131 2132 ret = migrate_send_rp_message_req_pages(mis, rb, rb_offset); 2133 if (ret) { 2134 /* Please refer to above comment. */ 2135 error_report("%s: send rp message failed for addr %p", 2136 __func__, host_addr); 2137 return FALSE; 2138 } 2139 2140 trace_postcopy_page_req_sync(host_addr); 2141 2142 return FALSE; 2143 } 2144 2145 static void migrate_send_rp_req_pages_pending(MigrationIncomingState *mis) 2146 { 2147 WITH_QEMU_LOCK_GUARD(&mis->page_request_mutex) { 2148 g_tree_foreach(mis->page_requested, postcopy_sync_page_req, mis); 2149 } 2150 } 2151 2152 static int loadvm_postcopy_handle_resume(MigrationIncomingState *mis) 2153 { 2154 if (mis->state != MIGRATION_STATUS_POSTCOPY_RECOVER) { 2155 error_report("%s: illegal resume received", __func__); 2156 /* Don't fail the load, only for this. */ 2157 return 0; 2158 } 2159 2160 /* 2161 * Reset the last_rb before we resend any page req to source again, since 2162 * the source should have it reset already. 2163 */ 2164 mis->last_rb = NULL; 2165 2166 /* 2167 * This means source VM is ready to resume the postcopy migration. 2168 */ 2169 migrate_set_state(&mis->state, MIGRATION_STATUS_POSTCOPY_RECOVER, 2170 MIGRATION_STATUS_POSTCOPY_ACTIVE); 2171 2172 trace_loadvm_postcopy_handle_resume(); 2173 2174 /* Tell source that "we are ready" */ 2175 migrate_send_rp_resume_ack(mis, MIGRATION_RESUME_ACK_VALUE); 2176 2177 /* 2178 * After a postcopy recovery, the source should have lost the postcopy 2179 * queue, or potentially the requested pages could have been lost during 2180 * the network down phase. Let's re-sync with the source VM by re-sending 2181 * all the pending pages that we eagerly need, so these threads won't get 2182 * blocked too long due to the recovery. 2183 * 2184 * Without this procedure, the faulted destination VM threads (waiting for 2185 * page requests right before the postcopy is interrupted) can keep hanging 2186 * until the pages are sent by the source during the background copying of 2187 * pages, or another thread faulted on the same address accidentally. 2188 */ 2189 migrate_send_rp_req_pages_pending(mis); 2190 2191 /* 2192 * It's time to switch state and release the fault thread to continue 2193 * service page faults. Note that this should be explicitly after the 2194 * above call to migrate_send_rp_req_pages_pending(). In short: 2195 * migrate_send_rp_message_req_pages() is not thread safe, yet. 2196 */ 2197 qemu_sem_post(&mis->postcopy_pause_sem_fault); 2198 2199 if (migrate_postcopy_preempt()) { 2200 /* 2201 * The preempt channel will be created in async manner, now let's 2202 * wait for it and make sure it's created. 2203 */ 2204 qemu_sem_wait(&mis->postcopy_qemufile_dst_done); 2205 assert(mis->postcopy_qemufile_dst); 2206 /* Kick the fast ram load thread too */ 2207 qemu_sem_post(&mis->postcopy_pause_sem_fast_load); 2208 } 2209 2210 return 0; 2211 } 2212 2213 /** 2214 * Immediately following this command is a blob of data containing an embedded 2215 * chunk of migration stream; read it and load it. 2216 * 2217 * @mis: Incoming state 2218 * @length: Length of packaged data to read 2219 * 2220 * Returns: Negative values on error 2221 * 2222 */ 2223 static int loadvm_handle_cmd_packaged(MigrationIncomingState *mis) 2224 { 2225 int ret; 2226 size_t length; 2227 QIOChannelBuffer *bioc; 2228 2229 length = qemu_get_be32(mis->from_src_file); 2230 trace_loadvm_handle_cmd_packaged(length); 2231 2232 if (length > MAX_VM_CMD_PACKAGED_SIZE) { 2233 error_report("Unreasonably large packaged state: %zu", length); 2234 return -1; 2235 } 2236 2237 bioc = qio_channel_buffer_new(length); 2238 qio_channel_set_name(QIO_CHANNEL(bioc), "migration-loadvm-buffer"); 2239 ret = qemu_get_buffer(mis->from_src_file, 2240 bioc->data, 2241 length); 2242 if (ret != length) { 2243 object_unref(OBJECT(bioc)); 2244 error_report("CMD_PACKAGED: Buffer receive fail ret=%d length=%zu", 2245 ret, length); 2246 return (ret < 0) ? ret : -EAGAIN; 2247 } 2248 bioc->usage += length; 2249 trace_loadvm_handle_cmd_packaged_received(ret); 2250 2251 QEMUFile *packf = qemu_file_new_input(QIO_CHANNEL(bioc)); 2252 2253 ret = qemu_loadvm_state_main(packf, mis); 2254 trace_loadvm_handle_cmd_packaged_main(ret); 2255 qemu_fclose(packf); 2256 object_unref(OBJECT(bioc)); 2257 2258 return ret; 2259 } 2260 2261 /* 2262 * Handle request that source requests for recved_bitmap on 2263 * destination. Payload format: 2264 * 2265 * len (1 byte) + ramblock_name (<255 bytes) 2266 */ 2267 static int loadvm_handle_recv_bitmap(MigrationIncomingState *mis, 2268 uint16_t len) 2269 { 2270 QEMUFile *file = mis->from_src_file; 2271 RAMBlock *rb; 2272 char block_name[256]; 2273 size_t cnt; 2274 2275 cnt = qemu_get_counted_string(file, block_name); 2276 if (!cnt) { 2277 error_report("%s: failed to read block name", __func__); 2278 return -EINVAL; 2279 } 2280 2281 /* Validate before using the data */ 2282 if (qemu_file_get_error(file)) { 2283 return qemu_file_get_error(file); 2284 } 2285 2286 if (len != cnt + 1) { 2287 error_report("%s: invalid payload length (%d)", __func__, len); 2288 return -EINVAL; 2289 } 2290 2291 rb = qemu_ram_block_by_name(block_name); 2292 if (!rb) { 2293 error_report("%s: block '%s' not found", __func__, block_name); 2294 return -EINVAL; 2295 } 2296 2297 migrate_send_rp_recv_bitmap(mis, block_name); 2298 2299 trace_loadvm_handle_recv_bitmap(block_name); 2300 2301 return 0; 2302 } 2303 2304 static int loadvm_process_enable_colo(MigrationIncomingState *mis) 2305 { 2306 int ret = migration_incoming_enable_colo(); 2307 2308 if (!ret) { 2309 ret = colo_init_ram_cache(); 2310 if (ret) { 2311 migration_incoming_disable_colo(); 2312 } 2313 } 2314 return ret; 2315 } 2316 2317 /* 2318 * Process an incoming 'QEMU_VM_COMMAND' 2319 * 0 just a normal return 2320 * LOADVM_QUIT All good, but exit the loop 2321 * <0 Error 2322 */ 2323 static int loadvm_process_command(QEMUFile *f) 2324 { 2325 MigrationIncomingState *mis = migration_incoming_get_current(); 2326 uint16_t cmd; 2327 uint16_t len; 2328 uint32_t tmp32; 2329 2330 cmd = qemu_get_be16(f); 2331 len = qemu_get_be16(f); 2332 2333 /* Check validity before continue processing of cmds */ 2334 if (qemu_file_get_error(f)) { 2335 return qemu_file_get_error(f); 2336 } 2337 2338 if (cmd >= MIG_CMD_MAX || cmd == MIG_CMD_INVALID) { 2339 error_report("MIG_CMD 0x%x unknown (len 0x%x)", cmd, len); 2340 return -EINVAL; 2341 } 2342 2343 trace_loadvm_process_command(mig_cmd_args[cmd].name, len); 2344 2345 if (mig_cmd_args[cmd].len != -1 && mig_cmd_args[cmd].len != len) { 2346 error_report("%s received with bad length - expecting %zu, got %d", 2347 mig_cmd_args[cmd].name, 2348 (size_t)mig_cmd_args[cmd].len, len); 2349 return -ERANGE; 2350 } 2351 2352 switch (cmd) { 2353 case MIG_CMD_OPEN_RETURN_PATH: 2354 if (mis->to_src_file) { 2355 error_report("CMD_OPEN_RETURN_PATH called when RP already open"); 2356 /* Not really a problem, so don't give up */ 2357 return 0; 2358 } 2359 mis->to_src_file = qemu_file_get_return_path(f); 2360 if (!mis->to_src_file) { 2361 error_report("CMD_OPEN_RETURN_PATH failed"); 2362 return -1; 2363 } 2364 2365 /* 2366 * Switchover ack is enabled but no device uses it, so send an ACK to 2367 * source that it's OK to switchover. Do it here, after return path has 2368 * been created. 2369 */ 2370 if (migrate_switchover_ack() && !mis->switchover_ack_pending_num) { 2371 int ret = migrate_send_rp_switchover_ack(mis); 2372 if (ret) { 2373 error_report( 2374 "Could not send switchover ack RP MSG, err %d (%s)", ret, 2375 strerror(-ret)); 2376 return ret; 2377 } 2378 } 2379 break; 2380 2381 case MIG_CMD_PING: 2382 tmp32 = qemu_get_be32(f); 2383 trace_loadvm_process_command_ping(tmp32); 2384 if (!mis->to_src_file) { 2385 error_report("CMD_PING (0x%x) received with no return path", 2386 tmp32); 2387 return -1; 2388 } 2389 migrate_send_rp_pong(mis, tmp32); 2390 break; 2391 2392 case MIG_CMD_PACKAGED: 2393 return loadvm_handle_cmd_packaged(mis); 2394 2395 case MIG_CMD_POSTCOPY_ADVISE: 2396 return loadvm_postcopy_handle_advise(mis, len); 2397 2398 case MIG_CMD_POSTCOPY_LISTEN: 2399 return loadvm_postcopy_handle_listen(mis); 2400 2401 case MIG_CMD_POSTCOPY_RUN: 2402 return loadvm_postcopy_handle_run(mis); 2403 2404 case MIG_CMD_POSTCOPY_RAM_DISCARD: 2405 return loadvm_postcopy_ram_handle_discard(mis, len); 2406 2407 case MIG_CMD_POSTCOPY_RESUME: 2408 return loadvm_postcopy_handle_resume(mis); 2409 2410 case MIG_CMD_RECV_BITMAP: 2411 return loadvm_handle_recv_bitmap(mis, len); 2412 2413 case MIG_CMD_ENABLE_COLO: 2414 return loadvm_process_enable_colo(mis); 2415 } 2416 2417 return 0; 2418 } 2419 2420 /* 2421 * Read a footer off the wire and check that it matches the expected section 2422 * 2423 * Returns: true if the footer was good 2424 * false if there is a problem (and calls error_report to say why) 2425 */ 2426 static bool check_section_footer(QEMUFile *f, SaveStateEntry *se) 2427 { 2428 int ret; 2429 uint8_t read_mark; 2430 uint32_t read_section_id; 2431 2432 if (!migrate_get_current()->send_section_footer) { 2433 /* No footer to check */ 2434 return true; 2435 } 2436 2437 read_mark = qemu_get_byte(f); 2438 2439 ret = qemu_file_get_error(f); 2440 if (ret) { 2441 error_report("%s: Read section footer failed: %d", 2442 __func__, ret); 2443 return false; 2444 } 2445 2446 if (read_mark != QEMU_VM_SECTION_FOOTER) { 2447 error_report("Missing section footer for %s", se->idstr); 2448 return false; 2449 } 2450 2451 read_section_id = qemu_get_be32(f); 2452 if (read_section_id != se->load_section_id) { 2453 error_report("Mismatched section id in footer for %s -" 2454 " read 0x%x expected 0x%x", 2455 se->idstr, read_section_id, se->load_section_id); 2456 return false; 2457 } 2458 2459 /* All good */ 2460 return true; 2461 } 2462 2463 static int 2464 qemu_loadvm_section_start_full(QEMUFile *f, MigrationIncomingState *mis) 2465 { 2466 uint32_t instance_id, version_id, section_id; 2467 SaveStateEntry *se; 2468 char idstr[256]; 2469 int ret; 2470 2471 /* Read section start */ 2472 section_id = qemu_get_be32(f); 2473 if (!qemu_get_counted_string(f, idstr)) { 2474 error_report("Unable to read ID string for section %u", 2475 section_id); 2476 return -EINVAL; 2477 } 2478 instance_id = qemu_get_be32(f); 2479 version_id = qemu_get_be32(f); 2480 2481 ret = qemu_file_get_error(f); 2482 if (ret) { 2483 error_report("%s: Failed to read instance/version ID: %d", 2484 __func__, ret); 2485 return ret; 2486 } 2487 2488 trace_qemu_loadvm_state_section_startfull(section_id, idstr, 2489 instance_id, version_id); 2490 /* Find savevm section */ 2491 se = find_se(idstr, instance_id); 2492 if (se == NULL) { 2493 error_report("Unknown savevm section or instance '%s' %"PRIu32". " 2494 "Make sure that your current VM setup matches your " 2495 "saved VM setup, including any hotplugged devices", 2496 idstr, instance_id); 2497 return -EINVAL; 2498 } 2499 2500 /* Validate version */ 2501 if (version_id > se->version_id) { 2502 error_report("savevm: unsupported version %d for '%s' v%d", 2503 version_id, idstr, se->version_id); 2504 return -EINVAL; 2505 } 2506 se->load_version_id = version_id; 2507 se->load_section_id = section_id; 2508 2509 /* Validate if it is a device's state */ 2510 if (xen_enabled() && se->is_ram) { 2511 error_report("loadvm: %s RAM loading not allowed on Xen", idstr); 2512 return -EINVAL; 2513 } 2514 2515 ret = vmstate_load(f, se); 2516 if (ret < 0) { 2517 error_report("error while loading state for instance 0x%"PRIx32" of" 2518 " device '%s'", instance_id, idstr); 2519 return ret; 2520 } 2521 if (!check_section_footer(f, se)) { 2522 return -EINVAL; 2523 } 2524 2525 return 0; 2526 } 2527 2528 static int 2529 qemu_loadvm_section_part_end(QEMUFile *f, MigrationIncomingState *mis) 2530 { 2531 uint32_t section_id; 2532 SaveStateEntry *se; 2533 int ret; 2534 2535 section_id = qemu_get_be32(f); 2536 2537 ret = qemu_file_get_error(f); 2538 if (ret) { 2539 error_report("%s: Failed to read section ID: %d", 2540 __func__, ret); 2541 return ret; 2542 } 2543 2544 trace_qemu_loadvm_state_section_partend(section_id); 2545 QTAILQ_FOREACH(se, &savevm_state.handlers, entry) { 2546 if (se->load_section_id == section_id) { 2547 break; 2548 } 2549 } 2550 if (se == NULL) { 2551 error_report("Unknown savevm section %d", section_id); 2552 return -EINVAL; 2553 } 2554 2555 ret = vmstate_load(f, se); 2556 if (ret < 0) { 2557 error_report("error while loading state section id %d(%s)", 2558 section_id, se->idstr); 2559 return ret; 2560 } 2561 if (!check_section_footer(f, se)) { 2562 return -EINVAL; 2563 } 2564 2565 return 0; 2566 } 2567 2568 static int qemu_loadvm_state_header(QEMUFile *f) 2569 { 2570 unsigned int v; 2571 int ret; 2572 2573 v = qemu_get_be32(f); 2574 if (v != QEMU_VM_FILE_MAGIC) { 2575 error_report("Not a migration stream"); 2576 return -EINVAL; 2577 } 2578 2579 v = qemu_get_be32(f); 2580 if (v == QEMU_VM_FILE_VERSION_COMPAT) { 2581 error_report("SaveVM v2 format is obsolete and don't work anymore"); 2582 return -ENOTSUP; 2583 } 2584 if (v != QEMU_VM_FILE_VERSION) { 2585 error_report("Unsupported migration stream version"); 2586 return -ENOTSUP; 2587 } 2588 2589 if (migrate_get_current()->send_configuration) { 2590 if (qemu_get_byte(f) != QEMU_VM_CONFIGURATION) { 2591 error_report("Configuration section missing"); 2592 qemu_loadvm_state_cleanup(); 2593 return -EINVAL; 2594 } 2595 ret = vmstate_load_state(f, &vmstate_configuration, &savevm_state, 0); 2596 2597 if (ret) { 2598 qemu_loadvm_state_cleanup(); 2599 return ret; 2600 } 2601 } 2602 return 0; 2603 } 2604 2605 static void qemu_loadvm_state_switchover_ack_needed(MigrationIncomingState *mis) 2606 { 2607 SaveStateEntry *se; 2608 2609 QTAILQ_FOREACH(se, &savevm_state.handlers, entry) { 2610 if (!se->ops || !se->ops->switchover_ack_needed) { 2611 continue; 2612 } 2613 2614 if (se->ops->switchover_ack_needed(se->opaque)) { 2615 mis->switchover_ack_pending_num++; 2616 } 2617 } 2618 2619 trace_loadvm_state_switchover_ack_needed(mis->switchover_ack_pending_num); 2620 } 2621 2622 static int qemu_loadvm_state_setup(QEMUFile *f) 2623 { 2624 SaveStateEntry *se; 2625 int ret; 2626 2627 trace_loadvm_state_setup(); 2628 QTAILQ_FOREACH(se, &savevm_state.handlers, entry) { 2629 if (!se->ops || !se->ops->load_setup) { 2630 continue; 2631 } 2632 if (se->ops->is_active) { 2633 if (!se->ops->is_active(se->opaque)) { 2634 continue; 2635 } 2636 } 2637 2638 ret = se->ops->load_setup(f, se->opaque); 2639 if (ret < 0) { 2640 qemu_file_set_error(f, ret); 2641 error_report("Load state of device %s failed", se->idstr); 2642 return ret; 2643 } 2644 } 2645 return 0; 2646 } 2647 2648 void qemu_loadvm_state_cleanup(void) 2649 { 2650 SaveStateEntry *se; 2651 2652 trace_loadvm_state_cleanup(); 2653 QTAILQ_FOREACH(se, &savevm_state.handlers, entry) { 2654 if (se->ops && se->ops->load_cleanup) { 2655 se->ops->load_cleanup(se->opaque); 2656 } 2657 } 2658 } 2659 2660 /* Return true if we should continue the migration, or false. */ 2661 static bool postcopy_pause_incoming(MigrationIncomingState *mis) 2662 { 2663 int i; 2664 2665 trace_postcopy_pause_incoming(); 2666 2667 assert(migrate_postcopy_ram()); 2668 2669 /* 2670 * Unregister yank with either from/to src would work, since ioc behind it 2671 * is the same 2672 */ 2673 migration_ioc_unregister_yank_from_file(mis->from_src_file); 2674 2675 assert(mis->from_src_file); 2676 qemu_file_shutdown(mis->from_src_file); 2677 qemu_fclose(mis->from_src_file); 2678 mis->from_src_file = NULL; 2679 2680 assert(mis->to_src_file); 2681 qemu_file_shutdown(mis->to_src_file); 2682 qemu_mutex_lock(&mis->rp_mutex); 2683 qemu_fclose(mis->to_src_file); 2684 mis->to_src_file = NULL; 2685 qemu_mutex_unlock(&mis->rp_mutex); 2686 2687 /* 2688 * NOTE: this must happen before reset the PostcopyTmpPages below, 2689 * otherwise it's racy to reset those fields when the fast load thread 2690 * can be accessing it in parallel. 2691 */ 2692 if (mis->postcopy_qemufile_dst) { 2693 qemu_file_shutdown(mis->postcopy_qemufile_dst); 2694 /* Take the mutex to make sure the fast ram load thread halted */ 2695 qemu_mutex_lock(&mis->postcopy_prio_thread_mutex); 2696 migration_ioc_unregister_yank_from_file(mis->postcopy_qemufile_dst); 2697 qemu_fclose(mis->postcopy_qemufile_dst); 2698 mis->postcopy_qemufile_dst = NULL; 2699 qemu_mutex_unlock(&mis->postcopy_prio_thread_mutex); 2700 } 2701 2702 migrate_set_state(&mis->state, MIGRATION_STATUS_POSTCOPY_ACTIVE, 2703 MIGRATION_STATUS_POSTCOPY_PAUSED); 2704 2705 /* Notify the fault thread for the invalidated file handle */ 2706 postcopy_fault_thread_notify(mis); 2707 2708 /* 2709 * If network is interrupted, any temp page we received will be useless 2710 * because we didn't mark them as "received" in receivedmap. After a 2711 * proper recovery later (which will sync src dirty bitmap with receivedmap 2712 * on dest) these cached small pages will be resent again. 2713 */ 2714 for (i = 0; i < mis->postcopy_channels; i++) { 2715 postcopy_temp_page_reset(&mis->postcopy_tmp_pages[i]); 2716 } 2717 2718 error_report("Detected IO failure for postcopy. " 2719 "Migration paused."); 2720 2721 while (mis->state == MIGRATION_STATUS_POSTCOPY_PAUSED) { 2722 qemu_sem_wait(&mis->postcopy_pause_sem_dst); 2723 } 2724 2725 trace_postcopy_pause_incoming_continued(); 2726 2727 return true; 2728 } 2729 2730 int qemu_loadvm_state_main(QEMUFile *f, MigrationIncomingState *mis) 2731 { 2732 uint8_t section_type; 2733 int ret = 0; 2734 2735 retry: 2736 while (true) { 2737 section_type = qemu_get_byte(f); 2738 2739 ret = qemu_file_get_error_obj_any(f, mis->postcopy_qemufile_dst, NULL); 2740 if (ret) { 2741 break; 2742 } 2743 2744 trace_qemu_loadvm_state_section(section_type); 2745 switch (section_type) { 2746 case QEMU_VM_SECTION_START: 2747 case QEMU_VM_SECTION_FULL: 2748 ret = qemu_loadvm_section_start_full(f, mis); 2749 if (ret < 0) { 2750 goto out; 2751 } 2752 break; 2753 case QEMU_VM_SECTION_PART: 2754 case QEMU_VM_SECTION_END: 2755 ret = qemu_loadvm_section_part_end(f, mis); 2756 if (ret < 0) { 2757 goto out; 2758 } 2759 break; 2760 case QEMU_VM_COMMAND: 2761 ret = loadvm_process_command(f); 2762 trace_qemu_loadvm_state_section_command(ret); 2763 if ((ret < 0) || (ret == LOADVM_QUIT)) { 2764 goto out; 2765 } 2766 break; 2767 case QEMU_VM_EOF: 2768 /* This is the end of migration */ 2769 goto out; 2770 default: 2771 error_report("Unknown savevm section type %d", section_type); 2772 ret = -EINVAL; 2773 goto out; 2774 } 2775 } 2776 2777 out: 2778 if (ret < 0) { 2779 qemu_file_set_error(f, ret); 2780 2781 /* Cancel bitmaps incoming regardless of recovery */ 2782 dirty_bitmap_mig_cancel_incoming(); 2783 2784 /* 2785 * If we are during an active postcopy, then we pause instead 2786 * of bail out to at least keep the VM's dirty data. Note 2787 * that POSTCOPY_INCOMING_LISTENING stage is still not enough, 2788 * during which we're still receiving device states and we 2789 * still haven't yet started the VM on destination. 2790 * 2791 * Only RAM postcopy supports recovery. Still, if RAM postcopy is 2792 * enabled, canceled bitmaps postcopy will not affect RAM postcopy 2793 * recovering. 2794 */ 2795 if (postcopy_state_get() == POSTCOPY_INCOMING_RUNNING && 2796 migrate_postcopy_ram() && postcopy_pause_incoming(mis)) { 2797 /* Reset f to point to the newly created channel */ 2798 f = mis->from_src_file; 2799 goto retry; 2800 } 2801 } 2802 return ret; 2803 } 2804 2805 int qemu_loadvm_state(QEMUFile *f) 2806 { 2807 MigrationIncomingState *mis = migration_incoming_get_current(); 2808 Error *local_err = NULL; 2809 int ret; 2810 2811 if (qemu_savevm_state_blocked(&local_err)) { 2812 error_report_err(local_err); 2813 return -EINVAL; 2814 } 2815 2816 ret = qemu_loadvm_state_header(f); 2817 if (ret) { 2818 return ret; 2819 } 2820 2821 if (qemu_loadvm_state_setup(f) != 0) { 2822 return -EINVAL; 2823 } 2824 2825 if (migrate_switchover_ack()) { 2826 qemu_loadvm_state_switchover_ack_needed(mis); 2827 } 2828 2829 cpu_synchronize_all_pre_loadvm(); 2830 2831 ret = qemu_loadvm_state_main(f, mis); 2832 qemu_event_set(&mis->main_thread_load_event); 2833 2834 trace_qemu_loadvm_state_post_main(ret); 2835 2836 if (mis->have_listen_thread) { 2837 /* Listen thread still going, can't clean up yet */ 2838 return ret; 2839 } 2840 2841 if (ret == 0) { 2842 ret = qemu_file_get_error(f); 2843 } 2844 2845 /* 2846 * Try to read in the VMDESC section as well, so that dumping tools that 2847 * intercept our migration stream have the chance to see it. 2848 */ 2849 2850 /* We've got to be careful; if we don't read the data and just shut the fd 2851 * then the sender can error if we close while it's still sending. 2852 * We also mustn't read data that isn't there; some transports (RDMA) 2853 * will stall waiting for that data when the source has already closed. 2854 */ 2855 if (ret == 0 && should_send_vmdesc()) { 2856 uint8_t *buf; 2857 uint32_t size; 2858 uint8_t section_type = qemu_get_byte(f); 2859 2860 if (section_type != QEMU_VM_VMDESCRIPTION) { 2861 error_report("Expected vmdescription section, but got %d", 2862 section_type); 2863 /* 2864 * It doesn't seem worth failing at this point since 2865 * we apparently have an otherwise valid VM state 2866 */ 2867 } else { 2868 buf = g_malloc(0x1000); 2869 size = qemu_get_be32(f); 2870 2871 while (size > 0) { 2872 uint32_t read_chunk = MIN(size, 0x1000); 2873 qemu_get_buffer(f, buf, read_chunk); 2874 size -= read_chunk; 2875 } 2876 g_free(buf); 2877 } 2878 } 2879 2880 qemu_loadvm_state_cleanup(); 2881 cpu_synchronize_all_post_init(); 2882 2883 return ret; 2884 } 2885 2886 int qemu_load_device_state(QEMUFile *f) 2887 { 2888 MigrationIncomingState *mis = migration_incoming_get_current(); 2889 int ret; 2890 2891 /* Load QEMU_VM_SECTION_FULL section */ 2892 ret = qemu_loadvm_state_main(f, mis); 2893 if (ret < 0) { 2894 error_report("Failed to load device state: %d", ret); 2895 return ret; 2896 } 2897 2898 cpu_synchronize_all_post_init(); 2899 return 0; 2900 } 2901 2902 int qemu_loadvm_approve_switchover(void) 2903 { 2904 MigrationIncomingState *mis = migration_incoming_get_current(); 2905 2906 if (!mis->switchover_ack_pending_num) { 2907 return -EINVAL; 2908 } 2909 2910 mis->switchover_ack_pending_num--; 2911 trace_loadvm_approve_switchover(mis->switchover_ack_pending_num); 2912 2913 if (mis->switchover_ack_pending_num) { 2914 return 0; 2915 } 2916 2917 return migrate_send_rp_switchover_ack(mis); 2918 } 2919 2920 bool save_snapshot(const char *name, bool overwrite, const char *vmstate, 2921 bool has_devices, strList *devices, Error **errp) 2922 { 2923 BlockDriverState *bs; 2924 QEMUSnapshotInfo sn1, *sn = &sn1; 2925 int ret = -1, ret2; 2926 QEMUFile *f; 2927 int saved_vm_running; 2928 uint64_t vm_state_size; 2929 g_autoptr(GDateTime) now = g_date_time_new_now_local(); 2930 AioContext *aio_context; 2931 2932 GLOBAL_STATE_CODE(); 2933 2934 if (migration_is_blocked(errp)) { 2935 return false; 2936 } 2937 2938 if (!replay_can_snapshot()) { 2939 error_setg(errp, "Record/replay does not allow making snapshot " 2940 "right now. Try once more later."); 2941 return false; 2942 } 2943 2944 if (!bdrv_all_can_snapshot(has_devices, devices, errp)) { 2945 return false; 2946 } 2947 2948 /* Delete old snapshots of the same name */ 2949 if (name) { 2950 if (overwrite) { 2951 if (bdrv_all_delete_snapshot(name, has_devices, 2952 devices, errp) < 0) { 2953 return false; 2954 } 2955 } else { 2956 ret2 = bdrv_all_has_snapshot(name, has_devices, devices, errp); 2957 if (ret2 < 0) { 2958 return false; 2959 } 2960 if (ret2 == 1) { 2961 error_setg(errp, 2962 "Snapshot '%s' already exists in one or more devices", 2963 name); 2964 return false; 2965 } 2966 } 2967 } 2968 2969 bs = bdrv_all_find_vmstate_bs(vmstate, has_devices, devices, errp); 2970 if (bs == NULL) { 2971 return false; 2972 } 2973 aio_context = bdrv_get_aio_context(bs); 2974 2975 saved_vm_running = runstate_is_running(); 2976 2977 global_state_store(); 2978 vm_stop(RUN_STATE_SAVE_VM); 2979 2980 bdrv_drain_all_begin(); 2981 2982 aio_context_acquire(aio_context); 2983 2984 memset(sn, 0, sizeof(*sn)); 2985 2986 /* fill auxiliary fields */ 2987 sn->date_sec = g_date_time_to_unix(now); 2988 sn->date_nsec = g_date_time_get_microsecond(now) * 1000; 2989 sn->vm_clock_nsec = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); 2990 if (replay_mode != REPLAY_MODE_NONE) { 2991 sn->icount = replay_get_current_icount(); 2992 } else { 2993 sn->icount = -1ULL; 2994 } 2995 2996 if (name) { 2997 pstrcpy(sn->name, sizeof(sn->name), name); 2998 } else { 2999 g_autofree char *autoname = g_date_time_format(now, "vm-%Y%m%d%H%M%S"); 3000 pstrcpy(sn->name, sizeof(sn->name), autoname); 3001 } 3002 3003 /* save the VM state */ 3004 f = qemu_fopen_bdrv(bs, 1); 3005 if (!f) { 3006 error_setg(errp, "Could not open VM state file"); 3007 goto the_end; 3008 } 3009 ret = qemu_savevm_state(f, errp); 3010 vm_state_size = qemu_file_transferred(f); 3011 ret2 = qemu_fclose(f); 3012 if (ret < 0) { 3013 goto the_end; 3014 } 3015 if (ret2 < 0) { 3016 ret = ret2; 3017 goto the_end; 3018 } 3019 3020 /* The bdrv_all_create_snapshot() call that follows acquires the AioContext 3021 * for itself. BDRV_POLL_WHILE() does not support nested locking because 3022 * it only releases the lock once. Therefore synchronous I/O will deadlock 3023 * unless we release the AioContext before bdrv_all_create_snapshot(). 3024 */ 3025 aio_context_release(aio_context); 3026 aio_context = NULL; 3027 3028 ret = bdrv_all_create_snapshot(sn, bs, vm_state_size, 3029 has_devices, devices, errp); 3030 if (ret < 0) { 3031 bdrv_all_delete_snapshot(sn->name, has_devices, devices, NULL); 3032 goto the_end; 3033 } 3034 3035 ret = 0; 3036 3037 the_end: 3038 if (aio_context) { 3039 aio_context_release(aio_context); 3040 } 3041 3042 bdrv_drain_all_end(); 3043 3044 if (saved_vm_running) { 3045 vm_start(); 3046 } 3047 return ret == 0; 3048 } 3049 3050 void qmp_xen_save_devices_state(const char *filename, bool has_live, bool live, 3051 Error **errp) 3052 { 3053 QEMUFile *f; 3054 QIOChannelFile *ioc; 3055 int saved_vm_running; 3056 int ret; 3057 3058 if (!has_live) { 3059 /* live default to true so old version of Xen tool stack can have a 3060 * successful live migration */ 3061 live = true; 3062 } 3063 3064 saved_vm_running = runstate_is_running(); 3065 vm_stop(RUN_STATE_SAVE_VM); 3066 global_state_store_running(); 3067 3068 ioc = qio_channel_file_new_path(filename, O_WRONLY | O_CREAT | O_TRUNC, 3069 0660, errp); 3070 if (!ioc) { 3071 goto the_end; 3072 } 3073 qio_channel_set_name(QIO_CHANNEL(ioc), "migration-xen-save-state"); 3074 f = qemu_file_new_output(QIO_CHANNEL(ioc)); 3075 object_unref(OBJECT(ioc)); 3076 ret = qemu_save_device_state(f); 3077 if (ret < 0 || qemu_fclose(f) < 0) { 3078 error_setg(errp, QERR_IO_ERROR); 3079 } else { 3080 /* libxl calls the QMP command "stop" before calling 3081 * "xen-save-devices-state" and in case of migration failure, libxl 3082 * would call "cont". 3083 * So call bdrv_inactivate_all (release locks) here to let the other 3084 * side of the migration take control of the images. 3085 */ 3086 if (live && !saved_vm_running) { 3087 ret = bdrv_inactivate_all(); 3088 if (ret) { 3089 error_setg(errp, "%s: bdrv_inactivate_all() failed (%d)", 3090 __func__, ret); 3091 } 3092 } 3093 } 3094 3095 the_end: 3096 if (saved_vm_running) { 3097 vm_start(); 3098 } 3099 } 3100 3101 void qmp_xen_load_devices_state(const char *filename, Error **errp) 3102 { 3103 QEMUFile *f; 3104 QIOChannelFile *ioc; 3105 int ret; 3106 3107 /* Guest must be paused before loading the device state; the RAM state 3108 * will already have been loaded by xc 3109 */ 3110 if (runstate_is_running()) { 3111 error_setg(errp, "Cannot update device state while vm is running"); 3112 return; 3113 } 3114 vm_stop(RUN_STATE_RESTORE_VM); 3115 3116 ioc = qio_channel_file_new_path(filename, O_RDONLY | O_BINARY, 0, errp); 3117 if (!ioc) { 3118 return; 3119 } 3120 qio_channel_set_name(QIO_CHANNEL(ioc), "migration-xen-load-state"); 3121 f = qemu_file_new_input(QIO_CHANNEL(ioc)); 3122 object_unref(OBJECT(ioc)); 3123 3124 ret = qemu_loadvm_state(f); 3125 qemu_fclose(f); 3126 if (ret < 0) { 3127 error_setg(errp, QERR_IO_ERROR); 3128 } 3129 migration_incoming_state_destroy(); 3130 } 3131 3132 bool load_snapshot(const char *name, const char *vmstate, 3133 bool has_devices, strList *devices, Error **errp) 3134 { 3135 BlockDriverState *bs_vm_state; 3136 QEMUSnapshotInfo sn; 3137 QEMUFile *f; 3138 int ret; 3139 AioContext *aio_context; 3140 MigrationIncomingState *mis = migration_incoming_get_current(); 3141 3142 if (!bdrv_all_can_snapshot(has_devices, devices, errp)) { 3143 return false; 3144 } 3145 ret = bdrv_all_has_snapshot(name, has_devices, devices, errp); 3146 if (ret < 0) { 3147 return false; 3148 } 3149 if (ret == 0) { 3150 error_setg(errp, "Snapshot '%s' does not exist in one or more devices", 3151 name); 3152 return false; 3153 } 3154 3155 bs_vm_state = bdrv_all_find_vmstate_bs(vmstate, has_devices, devices, errp); 3156 if (!bs_vm_state) { 3157 return false; 3158 } 3159 aio_context = bdrv_get_aio_context(bs_vm_state); 3160 3161 /* Don't even try to load empty VM states */ 3162 aio_context_acquire(aio_context); 3163 ret = bdrv_snapshot_find(bs_vm_state, &sn, name); 3164 aio_context_release(aio_context); 3165 if (ret < 0) { 3166 return false; 3167 } else if (sn.vm_state_size == 0) { 3168 error_setg(errp, "This is a disk-only snapshot. Revert to it " 3169 " offline using qemu-img"); 3170 return false; 3171 } 3172 3173 /* 3174 * Flush the record/replay queue. Now the VM state is going 3175 * to change. Therefore we don't need to preserve its consistency 3176 */ 3177 replay_flush_events(); 3178 3179 /* Flush all IO requests so they don't interfere with the new state. */ 3180 bdrv_drain_all_begin(); 3181 3182 ret = bdrv_all_goto_snapshot(name, has_devices, devices, errp); 3183 if (ret < 0) { 3184 goto err_drain; 3185 } 3186 3187 /* restore the VM state */ 3188 f = qemu_fopen_bdrv(bs_vm_state, 0); 3189 if (!f) { 3190 error_setg(errp, "Could not open VM state file"); 3191 goto err_drain; 3192 } 3193 3194 qemu_system_reset(SHUTDOWN_CAUSE_SNAPSHOT_LOAD); 3195 mis->from_src_file = f; 3196 3197 if (!yank_register_instance(MIGRATION_YANK_INSTANCE, errp)) { 3198 ret = -EINVAL; 3199 goto err_drain; 3200 } 3201 aio_context_acquire(aio_context); 3202 ret = qemu_loadvm_state(f); 3203 migration_incoming_state_destroy(); 3204 aio_context_release(aio_context); 3205 3206 bdrv_drain_all_end(); 3207 3208 if (ret < 0) { 3209 error_setg(errp, "Error %d while loading VM state", ret); 3210 return false; 3211 } 3212 3213 return true; 3214 3215 err_drain: 3216 bdrv_drain_all_end(); 3217 return false; 3218 } 3219 3220 bool delete_snapshot(const char *name, bool has_devices, 3221 strList *devices, Error **errp) 3222 { 3223 if (!bdrv_all_can_snapshot(has_devices, devices, errp)) { 3224 return false; 3225 } 3226 3227 if (bdrv_all_delete_snapshot(name, has_devices, devices, errp) < 0) { 3228 return false; 3229 } 3230 3231 return true; 3232 } 3233 3234 void vmstate_register_ram(MemoryRegion *mr, DeviceState *dev) 3235 { 3236 qemu_ram_set_idstr(mr->ram_block, 3237 memory_region_name(mr), dev); 3238 qemu_ram_set_migratable(mr->ram_block); 3239 } 3240 3241 void vmstate_unregister_ram(MemoryRegion *mr, DeviceState *dev) 3242 { 3243 qemu_ram_unset_idstr(mr->ram_block); 3244 qemu_ram_unset_migratable(mr->ram_block); 3245 } 3246 3247 void vmstate_register_ram_global(MemoryRegion *mr) 3248 { 3249 vmstate_register_ram(mr, NULL); 3250 } 3251 3252 bool vmstate_check_only_migratable(const VMStateDescription *vmsd) 3253 { 3254 /* check needed if --only-migratable is specified */ 3255 if (!only_migratable) { 3256 return true; 3257 } 3258 3259 return !(vmsd && vmsd->unmigratable); 3260 } 3261 3262 typedef struct SnapshotJob { 3263 Job common; 3264 char *tag; 3265 char *vmstate; 3266 strList *devices; 3267 Coroutine *co; 3268 Error **errp; 3269 bool ret; 3270 } SnapshotJob; 3271 3272 static void qmp_snapshot_job_free(SnapshotJob *s) 3273 { 3274 g_free(s->tag); 3275 g_free(s->vmstate); 3276 qapi_free_strList(s->devices); 3277 } 3278 3279 3280 static void snapshot_load_job_bh(void *opaque) 3281 { 3282 Job *job = opaque; 3283 SnapshotJob *s = container_of(job, SnapshotJob, common); 3284 int orig_vm_running; 3285 3286 job_progress_set_remaining(&s->common, 1); 3287 3288 orig_vm_running = runstate_is_running(); 3289 vm_stop(RUN_STATE_RESTORE_VM); 3290 3291 s->ret = load_snapshot(s->tag, s->vmstate, true, s->devices, s->errp); 3292 if (s->ret && orig_vm_running) { 3293 vm_start(); 3294 } 3295 3296 job_progress_update(&s->common, 1); 3297 3298 qmp_snapshot_job_free(s); 3299 aio_co_wake(s->co); 3300 } 3301 3302 static void snapshot_save_job_bh(void *opaque) 3303 { 3304 Job *job = opaque; 3305 SnapshotJob *s = container_of(job, SnapshotJob, common); 3306 3307 job_progress_set_remaining(&s->common, 1); 3308 s->ret = save_snapshot(s->tag, false, s->vmstate, 3309 true, s->devices, s->errp); 3310 job_progress_update(&s->common, 1); 3311 3312 qmp_snapshot_job_free(s); 3313 aio_co_wake(s->co); 3314 } 3315 3316 static void snapshot_delete_job_bh(void *opaque) 3317 { 3318 Job *job = opaque; 3319 SnapshotJob *s = container_of(job, SnapshotJob, common); 3320 3321 job_progress_set_remaining(&s->common, 1); 3322 s->ret = delete_snapshot(s->tag, true, s->devices, s->errp); 3323 job_progress_update(&s->common, 1); 3324 3325 qmp_snapshot_job_free(s); 3326 aio_co_wake(s->co); 3327 } 3328 3329 static int coroutine_fn snapshot_save_job_run(Job *job, Error **errp) 3330 { 3331 SnapshotJob *s = container_of(job, SnapshotJob, common); 3332 s->errp = errp; 3333 s->co = qemu_coroutine_self(); 3334 aio_bh_schedule_oneshot(qemu_get_aio_context(), 3335 snapshot_save_job_bh, job); 3336 qemu_coroutine_yield(); 3337 return s->ret ? 0 : -1; 3338 } 3339 3340 static int coroutine_fn snapshot_load_job_run(Job *job, Error **errp) 3341 { 3342 SnapshotJob *s = container_of(job, SnapshotJob, common); 3343 s->errp = errp; 3344 s->co = qemu_coroutine_self(); 3345 aio_bh_schedule_oneshot(qemu_get_aio_context(), 3346 snapshot_load_job_bh, job); 3347 qemu_coroutine_yield(); 3348 return s->ret ? 0 : -1; 3349 } 3350 3351 static int coroutine_fn snapshot_delete_job_run(Job *job, Error **errp) 3352 { 3353 SnapshotJob *s = container_of(job, SnapshotJob, common); 3354 s->errp = errp; 3355 s->co = qemu_coroutine_self(); 3356 aio_bh_schedule_oneshot(qemu_get_aio_context(), 3357 snapshot_delete_job_bh, job); 3358 qemu_coroutine_yield(); 3359 return s->ret ? 0 : -1; 3360 } 3361 3362 3363 static const JobDriver snapshot_load_job_driver = { 3364 .instance_size = sizeof(SnapshotJob), 3365 .job_type = JOB_TYPE_SNAPSHOT_LOAD, 3366 .run = snapshot_load_job_run, 3367 }; 3368 3369 static const JobDriver snapshot_save_job_driver = { 3370 .instance_size = sizeof(SnapshotJob), 3371 .job_type = JOB_TYPE_SNAPSHOT_SAVE, 3372 .run = snapshot_save_job_run, 3373 }; 3374 3375 static const JobDriver snapshot_delete_job_driver = { 3376 .instance_size = sizeof(SnapshotJob), 3377 .job_type = JOB_TYPE_SNAPSHOT_DELETE, 3378 .run = snapshot_delete_job_run, 3379 }; 3380 3381 3382 void qmp_snapshot_save(const char *job_id, 3383 const char *tag, 3384 const char *vmstate, 3385 strList *devices, 3386 Error **errp) 3387 { 3388 SnapshotJob *s; 3389 3390 s = job_create(job_id, &snapshot_save_job_driver, NULL, 3391 qemu_get_aio_context(), JOB_MANUAL_DISMISS, 3392 NULL, NULL, errp); 3393 if (!s) { 3394 return; 3395 } 3396 3397 s->tag = g_strdup(tag); 3398 s->vmstate = g_strdup(vmstate); 3399 s->devices = QAPI_CLONE(strList, devices); 3400 3401 job_start(&s->common); 3402 } 3403 3404 void qmp_snapshot_load(const char *job_id, 3405 const char *tag, 3406 const char *vmstate, 3407 strList *devices, 3408 Error **errp) 3409 { 3410 SnapshotJob *s; 3411 3412 s = job_create(job_id, &snapshot_load_job_driver, NULL, 3413 qemu_get_aio_context(), JOB_MANUAL_DISMISS, 3414 NULL, NULL, errp); 3415 if (!s) { 3416 return; 3417 } 3418 3419 s->tag = g_strdup(tag); 3420 s->vmstate = g_strdup(vmstate); 3421 s->devices = QAPI_CLONE(strList, devices); 3422 3423 job_start(&s->common); 3424 } 3425 3426 void qmp_snapshot_delete(const char *job_id, 3427 const char *tag, 3428 strList *devices, 3429 Error **errp) 3430 { 3431 SnapshotJob *s; 3432 3433 s = job_create(job_id, &snapshot_delete_job_driver, NULL, 3434 qemu_get_aio_context(), JOB_MANUAL_DISMISS, 3435 NULL, NULL, errp); 3436 if (!s) { 3437 return; 3438 } 3439 3440 s->tag = g_strdup(tag); 3441 s->devices = QAPI_CLONE(strList, devices); 3442 3443 job_start(&s->common); 3444 } 3445