1 /* 2 * QEMU System Emulator 3 * 4 * Copyright (c) 2003-2008 Fabrice Bellard 5 * Copyright (c) 2009-2015 Red Hat Inc 6 * 7 * Authors: 8 * Juan Quintela <quintela@redhat.com> 9 * 10 * Permission is hereby granted, free of charge, to any person obtaining a copy 11 * of this software and associated documentation files (the "Software"), to deal 12 * in the Software without restriction, including without limitation the rights 13 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 14 * copies of the Software, and to permit persons to whom the Software is 15 * furnished to do so, subject to the following conditions: 16 * 17 * The above copyright notice and this permission notice shall be included in 18 * all copies or substantial portions of the Software. 19 * 20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 23 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 24 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 25 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 26 * THE SOFTWARE. 27 */ 28 29 #include "qemu/osdep.h" 30 #include "hw/boards.h" 31 #include "net/net.h" 32 #include "migration.h" 33 #include "migration/snapshot.h" 34 #include "migration/vmstate.h" 35 #include "migration/misc.h" 36 #include "migration/register.h" 37 #include "migration/global_state.h" 38 #include "migration/channel-block.h" 39 #include "ram.h" 40 #include "qemu-file.h" 41 #include "savevm.h" 42 #include "postcopy-ram.h" 43 #include "qapi/error.h" 44 #include "qapi/qapi-commands-migration.h" 45 #include "qapi/clone-visitor.h" 46 #include "qapi/qapi-builtin-visit.h" 47 #include "qapi/qmp/qerror.h" 48 #include "qemu/error-report.h" 49 #include "sysemu/cpus.h" 50 #include "exec/memory.h" 51 #include "exec/target_page.h" 52 #include "trace.h" 53 #include "qemu/iov.h" 54 #include "qemu/job.h" 55 #include "qemu/main-loop.h" 56 #include "block/snapshot.h" 57 #include "qemu/cutils.h" 58 #include "io/channel-buffer.h" 59 #include "io/channel-file.h" 60 #include "sysemu/replay.h" 61 #include "sysemu/runstate.h" 62 #include "sysemu/sysemu.h" 63 #include "sysemu/xen.h" 64 #include "migration/colo.h" 65 #include "qemu/bitmap.h" 66 #include "net/announce.h" 67 #include "qemu/yank.h" 68 #include "yank_functions.h" 69 #include "sysemu/qtest.h" 70 71 const unsigned int postcopy_ram_discard_version; 72 73 /* Subcommands for QEMU_VM_COMMAND */ 74 enum qemu_vm_cmd { 75 MIG_CMD_INVALID = 0, /* Must be 0 */ 76 MIG_CMD_OPEN_RETURN_PATH, /* Tell the dest to open the Return path */ 77 MIG_CMD_PING, /* Request a PONG on the RP */ 78 79 MIG_CMD_POSTCOPY_ADVISE, /* Prior to any page transfers, just 80 warn we might want to do PC */ 81 MIG_CMD_POSTCOPY_LISTEN, /* Start listening for incoming 82 pages as it's running. */ 83 MIG_CMD_POSTCOPY_RUN, /* Start execution */ 84 85 MIG_CMD_POSTCOPY_RAM_DISCARD, /* A list of pages to discard that 86 were previously sent during 87 precopy but are dirty. */ 88 MIG_CMD_PACKAGED, /* Send a wrapped stream within this stream */ 89 MIG_CMD_ENABLE_COLO, /* Enable COLO */ 90 MIG_CMD_POSTCOPY_RESUME, /* resume postcopy on dest */ 91 MIG_CMD_RECV_BITMAP, /* Request for recved bitmap on dst */ 92 MIG_CMD_MAX 93 }; 94 95 #define MAX_VM_CMD_PACKAGED_SIZE UINT32_MAX 96 static struct mig_cmd_args { 97 ssize_t len; /* -1 = variable */ 98 const char *name; 99 } mig_cmd_args[] = { 100 [MIG_CMD_INVALID] = { .len = -1, .name = "INVALID" }, 101 [MIG_CMD_OPEN_RETURN_PATH] = { .len = 0, .name = "OPEN_RETURN_PATH" }, 102 [MIG_CMD_PING] = { .len = sizeof(uint32_t), .name = "PING" }, 103 [MIG_CMD_POSTCOPY_ADVISE] = { .len = -1, .name = "POSTCOPY_ADVISE" }, 104 [MIG_CMD_POSTCOPY_LISTEN] = { .len = 0, .name = "POSTCOPY_LISTEN" }, 105 [MIG_CMD_POSTCOPY_RUN] = { .len = 0, .name = "POSTCOPY_RUN" }, 106 [MIG_CMD_POSTCOPY_RAM_DISCARD] = { 107 .len = -1, .name = "POSTCOPY_RAM_DISCARD" }, 108 [MIG_CMD_POSTCOPY_RESUME] = { .len = 0, .name = "POSTCOPY_RESUME" }, 109 [MIG_CMD_PACKAGED] = { .len = 4, .name = "PACKAGED" }, 110 [MIG_CMD_RECV_BITMAP] = { .len = -1, .name = "RECV_BITMAP" }, 111 [MIG_CMD_MAX] = { .len = -1, .name = "MAX" }, 112 }; 113 114 /* Note for MIG_CMD_POSTCOPY_ADVISE: 115 * The format of arguments is depending on postcopy mode: 116 * - postcopy RAM only 117 * uint64_t host page size 118 * uint64_t taget page size 119 * 120 * - postcopy RAM and postcopy dirty bitmaps 121 * format is the same as for postcopy RAM only 122 * 123 * - postcopy dirty bitmaps only 124 * Nothing. Command length field is 0. 125 * 126 * Be careful: adding a new postcopy entity with some other parameters should 127 * not break format self-description ability. Good way is to introduce some 128 * generic extendable format with an exception for two old entities. 129 */ 130 131 /***********************************************************/ 132 /* savevm/loadvm support */ 133 134 static QEMUFile *qemu_fopen_bdrv(BlockDriverState *bs, int is_writable) 135 { 136 if (is_writable) { 137 return qemu_file_new_output(QIO_CHANNEL(qio_channel_block_new(bs))); 138 } else { 139 return qemu_file_new_input(QIO_CHANNEL(qio_channel_block_new(bs))); 140 } 141 } 142 143 144 /* QEMUFile timer support. 145 * Not in qemu-file.c to not add qemu-timer.c as dependency to qemu-file.c 146 */ 147 148 void timer_put(QEMUFile *f, QEMUTimer *ts) 149 { 150 uint64_t expire_time; 151 152 expire_time = timer_expire_time_ns(ts); 153 qemu_put_be64(f, expire_time); 154 } 155 156 void timer_get(QEMUFile *f, QEMUTimer *ts) 157 { 158 uint64_t expire_time; 159 160 expire_time = qemu_get_be64(f); 161 if (expire_time != -1) { 162 timer_mod_ns(ts, expire_time); 163 } else { 164 timer_del(ts); 165 } 166 } 167 168 169 /* VMState timer support. 170 * Not in vmstate.c to not add qemu-timer.c as dependency to vmstate.c 171 */ 172 173 static int get_timer(QEMUFile *f, void *pv, size_t size, 174 const VMStateField *field) 175 { 176 QEMUTimer *v = pv; 177 timer_get(f, v); 178 return 0; 179 } 180 181 static int put_timer(QEMUFile *f, void *pv, size_t size, 182 const VMStateField *field, JSONWriter *vmdesc) 183 { 184 QEMUTimer *v = pv; 185 timer_put(f, v); 186 187 return 0; 188 } 189 190 const VMStateInfo vmstate_info_timer = { 191 .name = "timer", 192 .get = get_timer, 193 .put = put_timer, 194 }; 195 196 197 typedef struct CompatEntry { 198 char idstr[256]; 199 int instance_id; 200 } CompatEntry; 201 202 typedef struct SaveStateEntry { 203 QTAILQ_ENTRY(SaveStateEntry) entry; 204 char idstr[256]; 205 uint32_t instance_id; 206 int alias_id; 207 int version_id; 208 /* version id read from the stream */ 209 int load_version_id; 210 int section_id; 211 /* section id read from the stream */ 212 int load_section_id; 213 const SaveVMHandlers *ops; 214 const VMStateDescription *vmsd; 215 void *opaque; 216 CompatEntry *compat; 217 int is_ram; 218 } SaveStateEntry; 219 220 typedef struct SaveState { 221 QTAILQ_HEAD(, SaveStateEntry) handlers; 222 SaveStateEntry *handler_pri_head[MIG_PRI_MAX + 1]; 223 int global_section_id; 224 uint32_t len; 225 const char *name; 226 uint32_t target_page_bits; 227 uint32_t caps_count; 228 MigrationCapability *capabilities; 229 QemuUUID uuid; 230 } SaveState; 231 232 static SaveState savevm_state = { 233 .handlers = QTAILQ_HEAD_INITIALIZER(savevm_state.handlers), 234 .handler_pri_head = { [MIG_PRI_DEFAULT ... MIG_PRI_MAX] = NULL }, 235 .global_section_id = 0, 236 }; 237 238 static bool should_validate_capability(int capability) 239 { 240 assert(capability >= 0 && capability < MIGRATION_CAPABILITY__MAX); 241 /* Validate only new capabilities to keep compatibility. */ 242 switch (capability) { 243 case MIGRATION_CAPABILITY_X_IGNORE_SHARED: 244 return true; 245 default: 246 return false; 247 } 248 } 249 250 static uint32_t get_validatable_capabilities_count(void) 251 { 252 MigrationState *s = migrate_get_current(); 253 uint32_t result = 0; 254 int i; 255 for (i = 0; i < MIGRATION_CAPABILITY__MAX; i++) { 256 if (should_validate_capability(i) && s->enabled_capabilities[i]) { 257 result++; 258 } 259 } 260 return result; 261 } 262 263 static int configuration_pre_save(void *opaque) 264 { 265 SaveState *state = opaque; 266 const char *current_name = MACHINE_GET_CLASS(current_machine)->name; 267 MigrationState *s = migrate_get_current(); 268 int i, j; 269 270 state->len = strlen(current_name); 271 state->name = current_name; 272 state->target_page_bits = qemu_target_page_bits(); 273 274 state->caps_count = get_validatable_capabilities_count(); 275 state->capabilities = g_renew(MigrationCapability, state->capabilities, 276 state->caps_count); 277 for (i = j = 0; i < MIGRATION_CAPABILITY__MAX; i++) { 278 if (should_validate_capability(i) && s->enabled_capabilities[i]) { 279 state->capabilities[j++] = i; 280 } 281 } 282 state->uuid = qemu_uuid; 283 284 return 0; 285 } 286 287 static int configuration_post_save(void *opaque) 288 { 289 SaveState *state = opaque; 290 291 g_free(state->capabilities); 292 state->capabilities = NULL; 293 state->caps_count = 0; 294 return 0; 295 } 296 297 static int configuration_pre_load(void *opaque) 298 { 299 SaveState *state = opaque; 300 301 /* If there is no target-page-bits subsection it means the source 302 * predates the variable-target-page-bits support and is using the 303 * minimum possible value for this CPU. 304 */ 305 state->target_page_bits = qemu_target_page_bits_min(); 306 return 0; 307 } 308 309 static bool configuration_validate_capabilities(SaveState *state) 310 { 311 bool ret = true; 312 MigrationState *s = migrate_get_current(); 313 unsigned long *source_caps_bm; 314 int i; 315 316 source_caps_bm = bitmap_new(MIGRATION_CAPABILITY__MAX); 317 for (i = 0; i < state->caps_count; i++) { 318 MigrationCapability capability = state->capabilities[i]; 319 set_bit(capability, source_caps_bm); 320 } 321 322 for (i = 0; i < MIGRATION_CAPABILITY__MAX; i++) { 323 bool source_state, target_state; 324 if (!should_validate_capability(i)) { 325 continue; 326 } 327 source_state = test_bit(i, source_caps_bm); 328 target_state = s->enabled_capabilities[i]; 329 if (source_state != target_state) { 330 error_report("Capability %s is %s, but received capability is %s", 331 MigrationCapability_str(i), 332 target_state ? "on" : "off", 333 source_state ? "on" : "off"); 334 ret = false; 335 /* Don't break here to report all failed capabilities */ 336 } 337 } 338 339 g_free(source_caps_bm); 340 return ret; 341 } 342 343 static int configuration_post_load(void *opaque, int version_id) 344 { 345 SaveState *state = opaque; 346 const char *current_name = MACHINE_GET_CLASS(current_machine)->name; 347 int ret = 0; 348 349 if (strncmp(state->name, current_name, state->len) != 0) { 350 error_report("Machine type received is '%.*s' and local is '%s'", 351 (int) state->len, state->name, current_name); 352 ret = -EINVAL; 353 goto out; 354 } 355 356 if (state->target_page_bits != qemu_target_page_bits()) { 357 error_report("Received TARGET_PAGE_BITS is %d but local is %d", 358 state->target_page_bits, qemu_target_page_bits()); 359 ret = -EINVAL; 360 goto out; 361 } 362 363 if (!configuration_validate_capabilities(state)) { 364 ret = -EINVAL; 365 goto out; 366 } 367 368 out: 369 g_free((void *)state->name); 370 state->name = NULL; 371 state->len = 0; 372 g_free(state->capabilities); 373 state->capabilities = NULL; 374 state->caps_count = 0; 375 376 return ret; 377 } 378 379 static int get_capability(QEMUFile *f, void *pv, size_t size, 380 const VMStateField *field) 381 { 382 MigrationCapability *capability = pv; 383 char capability_str[UINT8_MAX + 1]; 384 uint8_t len; 385 int i; 386 387 len = qemu_get_byte(f); 388 qemu_get_buffer(f, (uint8_t *)capability_str, len); 389 capability_str[len] = '\0'; 390 for (i = 0; i < MIGRATION_CAPABILITY__MAX; i++) { 391 if (!strcmp(MigrationCapability_str(i), capability_str)) { 392 *capability = i; 393 return 0; 394 } 395 } 396 error_report("Received unknown capability %s", capability_str); 397 return -EINVAL; 398 } 399 400 static int put_capability(QEMUFile *f, void *pv, size_t size, 401 const VMStateField *field, JSONWriter *vmdesc) 402 { 403 MigrationCapability *capability = pv; 404 const char *capability_str = MigrationCapability_str(*capability); 405 size_t len = strlen(capability_str); 406 assert(len <= UINT8_MAX); 407 408 qemu_put_byte(f, len); 409 qemu_put_buffer(f, (uint8_t *)capability_str, len); 410 return 0; 411 } 412 413 static const VMStateInfo vmstate_info_capability = { 414 .name = "capability", 415 .get = get_capability, 416 .put = put_capability, 417 }; 418 419 /* The target-page-bits subsection is present only if the 420 * target page size is not the same as the default (ie the 421 * minimum page size for a variable-page-size guest CPU). 422 * If it is present then it contains the actual target page 423 * bits for the machine, and migration will fail if the 424 * two ends don't agree about it. 425 */ 426 static bool vmstate_target_page_bits_needed(void *opaque) 427 { 428 return qemu_target_page_bits() 429 > qemu_target_page_bits_min(); 430 } 431 432 static const VMStateDescription vmstate_target_page_bits = { 433 .name = "configuration/target-page-bits", 434 .version_id = 1, 435 .minimum_version_id = 1, 436 .needed = vmstate_target_page_bits_needed, 437 .fields = (VMStateField[]) { 438 VMSTATE_UINT32(target_page_bits, SaveState), 439 VMSTATE_END_OF_LIST() 440 } 441 }; 442 443 static bool vmstate_capabilites_needed(void *opaque) 444 { 445 return get_validatable_capabilities_count() > 0; 446 } 447 448 static const VMStateDescription vmstate_capabilites = { 449 .name = "configuration/capabilities", 450 .version_id = 1, 451 .minimum_version_id = 1, 452 .needed = vmstate_capabilites_needed, 453 .fields = (VMStateField[]) { 454 VMSTATE_UINT32_V(caps_count, SaveState, 1), 455 VMSTATE_VARRAY_UINT32_ALLOC(capabilities, SaveState, caps_count, 1, 456 vmstate_info_capability, 457 MigrationCapability), 458 VMSTATE_END_OF_LIST() 459 } 460 }; 461 462 static bool vmstate_uuid_needed(void *opaque) 463 { 464 return qemu_uuid_set && migrate_validate_uuid(); 465 } 466 467 static int vmstate_uuid_post_load(void *opaque, int version_id) 468 { 469 SaveState *state = opaque; 470 char uuid_src[UUID_FMT_LEN + 1]; 471 char uuid_dst[UUID_FMT_LEN + 1]; 472 473 if (!qemu_uuid_set) { 474 /* 475 * It's warning because user might not know UUID in some cases, 476 * e.g. load an old snapshot 477 */ 478 qemu_uuid_unparse(&state->uuid, uuid_src); 479 warn_report("UUID is received %s, but local uuid isn't set", 480 uuid_src); 481 return 0; 482 } 483 if (!qemu_uuid_is_equal(&state->uuid, &qemu_uuid)) { 484 qemu_uuid_unparse(&state->uuid, uuid_src); 485 qemu_uuid_unparse(&qemu_uuid, uuid_dst); 486 error_report("UUID received is %s and local is %s", uuid_src, uuid_dst); 487 return -EINVAL; 488 } 489 return 0; 490 } 491 492 static const VMStateDescription vmstate_uuid = { 493 .name = "configuration/uuid", 494 .version_id = 1, 495 .minimum_version_id = 1, 496 .needed = vmstate_uuid_needed, 497 .post_load = vmstate_uuid_post_load, 498 .fields = (VMStateField[]) { 499 VMSTATE_UINT8_ARRAY_V(uuid.data, SaveState, sizeof(QemuUUID), 1), 500 VMSTATE_END_OF_LIST() 501 } 502 }; 503 504 static const VMStateDescription vmstate_configuration = { 505 .name = "configuration", 506 .version_id = 1, 507 .pre_load = configuration_pre_load, 508 .post_load = configuration_post_load, 509 .pre_save = configuration_pre_save, 510 .post_save = configuration_post_save, 511 .fields = (VMStateField[]) { 512 VMSTATE_UINT32(len, SaveState), 513 VMSTATE_VBUFFER_ALLOC_UINT32(name, SaveState, 0, NULL, len), 514 VMSTATE_END_OF_LIST() 515 }, 516 .subsections = (const VMStateDescription *[]) { 517 &vmstate_target_page_bits, 518 &vmstate_capabilites, 519 &vmstate_uuid, 520 NULL 521 } 522 }; 523 524 static void dump_vmstate_vmsd(FILE *out_file, 525 const VMStateDescription *vmsd, int indent, 526 bool is_subsection); 527 528 static void dump_vmstate_vmsf(FILE *out_file, const VMStateField *field, 529 int indent) 530 { 531 fprintf(out_file, "%*s{\n", indent, ""); 532 indent += 2; 533 fprintf(out_file, "%*s\"field\": \"%s\",\n", indent, "", field->name); 534 fprintf(out_file, "%*s\"version_id\": %d,\n", indent, "", 535 field->version_id); 536 fprintf(out_file, "%*s\"field_exists\": %s,\n", indent, "", 537 field->field_exists ? "true" : "false"); 538 fprintf(out_file, "%*s\"size\": %zu", indent, "", field->size); 539 if (field->vmsd != NULL) { 540 fprintf(out_file, ",\n"); 541 dump_vmstate_vmsd(out_file, field->vmsd, indent, false); 542 } 543 fprintf(out_file, "\n%*s}", indent - 2, ""); 544 } 545 546 static void dump_vmstate_vmss(FILE *out_file, 547 const VMStateDescription **subsection, 548 int indent) 549 { 550 if (*subsection != NULL) { 551 dump_vmstate_vmsd(out_file, *subsection, indent, true); 552 } 553 } 554 555 static void dump_vmstate_vmsd(FILE *out_file, 556 const VMStateDescription *vmsd, int indent, 557 bool is_subsection) 558 { 559 if (is_subsection) { 560 fprintf(out_file, "%*s{\n", indent, ""); 561 } else { 562 fprintf(out_file, "%*s\"%s\": {\n", indent, "", "Description"); 563 } 564 indent += 2; 565 fprintf(out_file, "%*s\"name\": \"%s\",\n", indent, "", vmsd->name); 566 fprintf(out_file, "%*s\"version_id\": %d,\n", indent, "", 567 vmsd->version_id); 568 fprintf(out_file, "%*s\"minimum_version_id\": %d", indent, "", 569 vmsd->minimum_version_id); 570 if (vmsd->fields != NULL) { 571 const VMStateField *field = vmsd->fields; 572 bool first; 573 574 fprintf(out_file, ",\n%*s\"Fields\": [\n", indent, ""); 575 first = true; 576 while (field->name != NULL) { 577 if (field->flags & VMS_MUST_EXIST) { 578 /* Ignore VMSTATE_VALIDATE bits; these don't get migrated */ 579 field++; 580 continue; 581 } 582 if (!first) { 583 fprintf(out_file, ",\n"); 584 } 585 dump_vmstate_vmsf(out_file, field, indent + 2); 586 field++; 587 first = false; 588 } 589 assert(field->flags == VMS_END); 590 fprintf(out_file, "\n%*s]", indent, ""); 591 } 592 if (vmsd->subsections != NULL) { 593 const VMStateDescription **subsection = vmsd->subsections; 594 bool first; 595 596 fprintf(out_file, ",\n%*s\"Subsections\": [\n", indent, ""); 597 first = true; 598 while (*subsection != NULL) { 599 if (!first) { 600 fprintf(out_file, ",\n"); 601 } 602 dump_vmstate_vmss(out_file, subsection, indent + 2); 603 subsection++; 604 first = false; 605 } 606 fprintf(out_file, "\n%*s]", indent, ""); 607 } 608 fprintf(out_file, "\n%*s}", indent - 2, ""); 609 } 610 611 static void dump_machine_type(FILE *out_file) 612 { 613 MachineClass *mc; 614 615 mc = MACHINE_GET_CLASS(current_machine); 616 617 fprintf(out_file, " \"vmschkmachine\": {\n"); 618 fprintf(out_file, " \"Name\": \"%s\"\n", mc->name); 619 fprintf(out_file, " },\n"); 620 } 621 622 void dump_vmstate_json_to_file(FILE *out_file) 623 { 624 GSList *list, *elt; 625 bool first; 626 627 fprintf(out_file, "{\n"); 628 dump_machine_type(out_file); 629 630 first = true; 631 list = object_class_get_list(TYPE_DEVICE, true); 632 for (elt = list; elt; elt = elt->next) { 633 DeviceClass *dc = OBJECT_CLASS_CHECK(DeviceClass, elt->data, 634 TYPE_DEVICE); 635 const char *name; 636 int indent = 2; 637 638 if (!dc->vmsd) { 639 continue; 640 } 641 642 if (!first) { 643 fprintf(out_file, ",\n"); 644 } 645 name = object_class_get_name(OBJECT_CLASS(dc)); 646 fprintf(out_file, "%*s\"%s\": {\n", indent, "", name); 647 indent += 2; 648 fprintf(out_file, "%*s\"Name\": \"%s\",\n", indent, "", name); 649 fprintf(out_file, "%*s\"version_id\": %d,\n", indent, "", 650 dc->vmsd->version_id); 651 fprintf(out_file, "%*s\"minimum_version_id\": %d,\n", indent, "", 652 dc->vmsd->minimum_version_id); 653 654 dump_vmstate_vmsd(out_file, dc->vmsd, indent, false); 655 656 fprintf(out_file, "\n%*s}", indent - 2, ""); 657 first = false; 658 } 659 fprintf(out_file, "\n}\n"); 660 fclose(out_file); 661 g_slist_free(list); 662 } 663 664 static uint32_t calculate_new_instance_id(const char *idstr) 665 { 666 SaveStateEntry *se; 667 uint32_t instance_id = 0; 668 669 QTAILQ_FOREACH(se, &savevm_state.handlers, entry) { 670 if (strcmp(idstr, se->idstr) == 0 671 && instance_id <= se->instance_id) { 672 instance_id = se->instance_id + 1; 673 } 674 } 675 /* Make sure we never loop over without being noticed */ 676 assert(instance_id != VMSTATE_INSTANCE_ID_ANY); 677 return instance_id; 678 } 679 680 static int calculate_compat_instance_id(const char *idstr) 681 { 682 SaveStateEntry *se; 683 int instance_id = 0; 684 685 QTAILQ_FOREACH(se, &savevm_state.handlers, entry) { 686 if (!se->compat) { 687 continue; 688 } 689 690 if (strcmp(idstr, se->compat->idstr) == 0 691 && instance_id <= se->compat->instance_id) { 692 instance_id = se->compat->instance_id + 1; 693 } 694 } 695 return instance_id; 696 } 697 698 static inline MigrationPriority save_state_priority(SaveStateEntry *se) 699 { 700 if (se->vmsd) { 701 return se->vmsd->priority; 702 } 703 return MIG_PRI_DEFAULT; 704 } 705 706 static void savevm_state_handler_insert(SaveStateEntry *nse) 707 { 708 MigrationPriority priority = save_state_priority(nse); 709 SaveStateEntry *se; 710 int i; 711 712 assert(priority <= MIG_PRI_MAX); 713 714 for (i = priority - 1; i >= 0; i--) { 715 se = savevm_state.handler_pri_head[i]; 716 if (se != NULL) { 717 assert(save_state_priority(se) < priority); 718 break; 719 } 720 } 721 722 if (i >= 0) { 723 QTAILQ_INSERT_BEFORE(se, nse, entry); 724 } else { 725 QTAILQ_INSERT_TAIL(&savevm_state.handlers, nse, entry); 726 } 727 728 if (savevm_state.handler_pri_head[priority] == NULL) { 729 savevm_state.handler_pri_head[priority] = nse; 730 } 731 } 732 733 static void savevm_state_handler_remove(SaveStateEntry *se) 734 { 735 SaveStateEntry *next; 736 MigrationPriority priority = save_state_priority(se); 737 738 if (se == savevm_state.handler_pri_head[priority]) { 739 next = QTAILQ_NEXT(se, entry); 740 if (next != NULL && save_state_priority(next) == priority) { 741 savevm_state.handler_pri_head[priority] = next; 742 } else { 743 savevm_state.handler_pri_head[priority] = NULL; 744 } 745 } 746 QTAILQ_REMOVE(&savevm_state.handlers, se, entry); 747 } 748 749 /* TODO: Individual devices generally have very little idea about the rest 750 of the system, so instance_id should be removed/replaced. 751 Meanwhile pass -1 as instance_id if you do not already have a clearly 752 distinguishing id for all instances of your device class. */ 753 int register_savevm_live(const char *idstr, 754 uint32_t instance_id, 755 int version_id, 756 const SaveVMHandlers *ops, 757 void *opaque) 758 { 759 SaveStateEntry *se; 760 761 se = g_new0(SaveStateEntry, 1); 762 se->version_id = version_id; 763 se->section_id = savevm_state.global_section_id++; 764 se->ops = ops; 765 se->opaque = opaque; 766 se->vmsd = NULL; 767 /* if this is a live_savem then set is_ram */ 768 if (ops->save_setup != NULL) { 769 se->is_ram = 1; 770 } 771 772 pstrcat(se->idstr, sizeof(se->idstr), idstr); 773 774 if (instance_id == VMSTATE_INSTANCE_ID_ANY) { 775 se->instance_id = calculate_new_instance_id(se->idstr); 776 } else { 777 se->instance_id = instance_id; 778 } 779 assert(!se->compat || se->instance_id == 0); 780 savevm_state_handler_insert(se); 781 return 0; 782 } 783 784 void unregister_savevm(VMStateIf *obj, const char *idstr, void *opaque) 785 { 786 SaveStateEntry *se, *new_se; 787 char id[256] = ""; 788 789 if (obj) { 790 char *oid = vmstate_if_get_id(obj); 791 if (oid) { 792 pstrcpy(id, sizeof(id), oid); 793 pstrcat(id, sizeof(id), "/"); 794 g_free(oid); 795 } 796 } 797 pstrcat(id, sizeof(id), idstr); 798 799 QTAILQ_FOREACH_SAFE(se, &savevm_state.handlers, entry, new_se) { 800 if (strcmp(se->idstr, id) == 0 && se->opaque == opaque) { 801 savevm_state_handler_remove(se); 802 g_free(se->compat); 803 g_free(se); 804 } 805 } 806 } 807 808 /* 809 * Perform some basic checks on vmsd's at registration 810 * time. 811 */ 812 static void vmstate_check(const VMStateDescription *vmsd) 813 { 814 const VMStateField *field = vmsd->fields; 815 const VMStateDescription **subsection = vmsd->subsections; 816 817 if (field) { 818 while (field->name) { 819 if (field->flags & (VMS_STRUCT | VMS_VSTRUCT)) { 820 /* Recurse to sub structures */ 821 vmstate_check(field->vmsd); 822 } 823 /* Carry on */ 824 field++; 825 } 826 /* Check for the end of field list canary */ 827 if (field->flags != VMS_END) { 828 error_report("VMSTATE not ending with VMS_END: %s", vmsd->name); 829 g_assert_not_reached(); 830 } 831 } 832 833 while (subsection && *subsection) { 834 /* 835 * The name of a subsection should start with the name of the 836 * current object. 837 */ 838 assert(!strncmp(vmsd->name, (*subsection)->name, strlen(vmsd->name))); 839 vmstate_check(*subsection); 840 subsection++; 841 } 842 } 843 844 int vmstate_register_with_alias_id(VMStateIf *obj, uint32_t instance_id, 845 const VMStateDescription *vmsd, 846 void *opaque, int alias_id, 847 int required_for_version, 848 Error **errp) 849 { 850 SaveStateEntry *se; 851 852 /* If this triggers, alias support can be dropped for the vmsd. */ 853 assert(alias_id == -1 || required_for_version >= vmsd->minimum_version_id); 854 855 se = g_new0(SaveStateEntry, 1); 856 se->version_id = vmsd->version_id; 857 se->section_id = savevm_state.global_section_id++; 858 se->opaque = opaque; 859 se->vmsd = vmsd; 860 se->alias_id = alias_id; 861 862 if (obj) { 863 char *id = vmstate_if_get_id(obj); 864 if (id) { 865 if (snprintf(se->idstr, sizeof(se->idstr), "%s/", id) >= 866 sizeof(se->idstr)) { 867 error_setg(errp, "Path too long for VMState (%s)", id); 868 g_free(id); 869 g_free(se); 870 871 return -1; 872 } 873 g_free(id); 874 875 se->compat = g_new0(CompatEntry, 1); 876 pstrcpy(se->compat->idstr, sizeof(se->compat->idstr), vmsd->name); 877 se->compat->instance_id = instance_id == VMSTATE_INSTANCE_ID_ANY ? 878 calculate_compat_instance_id(vmsd->name) : instance_id; 879 instance_id = VMSTATE_INSTANCE_ID_ANY; 880 } 881 } 882 pstrcat(se->idstr, sizeof(se->idstr), vmsd->name); 883 884 if (instance_id == VMSTATE_INSTANCE_ID_ANY) { 885 se->instance_id = calculate_new_instance_id(se->idstr); 886 } else { 887 se->instance_id = instance_id; 888 } 889 890 /* Perform a recursive sanity check during the test runs */ 891 if (qtest_enabled()) { 892 vmstate_check(vmsd); 893 } 894 assert(!se->compat || se->instance_id == 0); 895 savevm_state_handler_insert(se); 896 return 0; 897 } 898 899 void vmstate_unregister(VMStateIf *obj, const VMStateDescription *vmsd, 900 void *opaque) 901 { 902 SaveStateEntry *se, *new_se; 903 904 QTAILQ_FOREACH_SAFE(se, &savevm_state.handlers, entry, new_se) { 905 if (se->vmsd == vmsd && se->opaque == opaque) { 906 savevm_state_handler_remove(se); 907 g_free(se->compat); 908 g_free(se); 909 } 910 } 911 } 912 913 static int vmstate_load(QEMUFile *f, SaveStateEntry *se) 914 { 915 trace_vmstate_load(se->idstr, se->vmsd ? se->vmsd->name : "(old)"); 916 if (!se->vmsd) { /* Old style */ 917 return se->ops->load_state(f, se->opaque, se->load_version_id); 918 } 919 return vmstate_load_state(f, se->vmsd, se->opaque, se->load_version_id); 920 } 921 922 static void vmstate_save_old_style(QEMUFile *f, SaveStateEntry *se, 923 JSONWriter *vmdesc) 924 { 925 int64_t old_offset, size; 926 927 old_offset = qemu_file_total_transferred_fast(f); 928 se->ops->save_state(f, se->opaque); 929 size = qemu_file_total_transferred_fast(f) - old_offset; 930 931 if (vmdesc) { 932 json_writer_int64(vmdesc, "size", size); 933 json_writer_start_array(vmdesc, "fields"); 934 json_writer_start_object(vmdesc, NULL); 935 json_writer_str(vmdesc, "name", "data"); 936 json_writer_int64(vmdesc, "size", size); 937 json_writer_str(vmdesc, "type", "buffer"); 938 json_writer_end_object(vmdesc); 939 json_writer_end_array(vmdesc); 940 } 941 } 942 943 /* 944 * Write the header for device section (QEMU_VM_SECTION START/END/PART/FULL) 945 */ 946 static void save_section_header(QEMUFile *f, SaveStateEntry *se, 947 uint8_t section_type) 948 { 949 qemu_put_byte(f, section_type); 950 qemu_put_be32(f, se->section_id); 951 952 if (section_type == QEMU_VM_SECTION_FULL || 953 section_type == QEMU_VM_SECTION_START) { 954 /* ID string */ 955 size_t len = strlen(se->idstr); 956 qemu_put_byte(f, len); 957 qemu_put_buffer(f, (uint8_t *)se->idstr, len); 958 959 qemu_put_be32(f, se->instance_id); 960 qemu_put_be32(f, se->version_id); 961 } 962 } 963 964 /* 965 * Write a footer onto device sections that catches cases misformatted device 966 * sections. 967 */ 968 static void save_section_footer(QEMUFile *f, SaveStateEntry *se) 969 { 970 if (migrate_get_current()->send_section_footer) { 971 qemu_put_byte(f, QEMU_VM_SECTION_FOOTER); 972 qemu_put_be32(f, se->section_id); 973 } 974 } 975 976 static int vmstate_save(QEMUFile *f, SaveStateEntry *se, JSONWriter *vmdesc) 977 { 978 int ret; 979 980 if ((!se->ops || !se->ops->save_state) && !se->vmsd) { 981 return 0; 982 } 983 if (se->vmsd && !vmstate_save_needed(se->vmsd, se->opaque)) { 984 trace_savevm_section_skip(se->idstr, se->section_id); 985 return 0; 986 } 987 988 trace_savevm_section_start(se->idstr, se->section_id); 989 save_section_header(f, se, QEMU_VM_SECTION_FULL); 990 if (vmdesc) { 991 json_writer_start_object(vmdesc, NULL); 992 json_writer_str(vmdesc, "name", se->idstr); 993 json_writer_int64(vmdesc, "instance_id", se->instance_id); 994 } 995 996 trace_vmstate_save(se->idstr, se->vmsd ? se->vmsd->name : "(old)"); 997 if (!se->vmsd) { 998 vmstate_save_old_style(f, se, vmdesc); 999 } else { 1000 ret = vmstate_save_state(f, se->vmsd, se->opaque, vmdesc); 1001 if (ret) { 1002 return ret; 1003 } 1004 } 1005 1006 trace_savevm_section_end(se->idstr, se->section_id, 0); 1007 save_section_footer(f, se); 1008 if (vmdesc) { 1009 json_writer_end_object(vmdesc); 1010 } 1011 return 0; 1012 } 1013 /** 1014 * qemu_savevm_command_send: Send a 'QEMU_VM_COMMAND' type element with the 1015 * command and associated data. 1016 * 1017 * @f: File to send command on 1018 * @command: Command type to send 1019 * @len: Length of associated data 1020 * @data: Data associated with command. 1021 */ 1022 static void qemu_savevm_command_send(QEMUFile *f, 1023 enum qemu_vm_cmd command, 1024 uint16_t len, 1025 uint8_t *data) 1026 { 1027 trace_savevm_command_send(command, len); 1028 qemu_put_byte(f, QEMU_VM_COMMAND); 1029 qemu_put_be16(f, (uint16_t)command); 1030 qemu_put_be16(f, len); 1031 qemu_put_buffer(f, data, len); 1032 qemu_fflush(f); 1033 } 1034 1035 void qemu_savevm_send_colo_enable(QEMUFile *f) 1036 { 1037 trace_savevm_send_colo_enable(); 1038 qemu_savevm_command_send(f, MIG_CMD_ENABLE_COLO, 0, NULL); 1039 } 1040 1041 void qemu_savevm_send_ping(QEMUFile *f, uint32_t value) 1042 { 1043 uint32_t buf; 1044 1045 trace_savevm_send_ping(value); 1046 buf = cpu_to_be32(value); 1047 qemu_savevm_command_send(f, MIG_CMD_PING, sizeof(value), (uint8_t *)&buf); 1048 } 1049 1050 void qemu_savevm_send_open_return_path(QEMUFile *f) 1051 { 1052 trace_savevm_send_open_return_path(); 1053 qemu_savevm_command_send(f, MIG_CMD_OPEN_RETURN_PATH, 0, NULL); 1054 } 1055 1056 /* We have a buffer of data to send; we don't want that all to be loaded 1057 * by the command itself, so the command contains just the length of the 1058 * extra buffer that we then send straight after it. 1059 * TODO: Must be a better way to organise that 1060 * 1061 * Returns: 1062 * 0 on success 1063 * -ve on error 1064 */ 1065 int qemu_savevm_send_packaged(QEMUFile *f, const uint8_t *buf, size_t len) 1066 { 1067 uint32_t tmp; 1068 1069 if (len > MAX_VM_CMD_PACKAGED_SIZE) { 1070 error_report("%s: Unreasonably large packaged state: %zu", 1071 __func__, len); 1072 return -1; 1073 } 1074 1075 tmp = cpu_to_be32(len); 1076 1077 trace_qemu_savevm_send_packaged(); 1078 qemu_savevm_command_send(f, MIG_CMD_PACKAGED, 4, (uint8_t *)&tmp); 1079 1080 qemu_put_buffer(f, buf, len); 1081 1082 return 0; 1083 } 1084 1085 /* Send prior to any postcopy transfer */ 1086 void qemu_savevm_send_postcopy_advise(QEMUFile *f) 1087 { 1088 if (migrate_postcopy_ram()) { 1089 uint64_t tmp[2]; 1090 tmp[0] = cpu_to_be64(ram_pagesize_summary()); 1091 tmp[1] = cpu_to_be64(qemu_target_page_size()); 1092 1093 trace_qemu_savevm_send_postcopy_advise(); 1094 qemu_savevm_command_send(f, MIG_CMD_POSTCOPY_ADVISE, 1095 16, (uint8_t *)tmp); 1096 } else { 1097 qemu_savevm_command_send(f, MIG_CMD_POSTCOPY_ADVISE, 0, NULL); 1098 } 1099 } 1100 1101 /* Sent prior to starting the destination running in postcopy, discard pages 1102 * that have already been sent but redirtied on the source. 1103 * CMD_POSTCOPY_RAM_DISCARD consist of: 1104 * byte version (0) 1105 * byte Length of name field (not including 0) 1106 * n x byte RAM block name 1107 * byte 0 terminator (just for safety) 1108 * n x Byte ranges within the named RAMBlock 1109 * be64 Start of the range 1110 * be64 Length 1111 * 1112 * name: RAMBlock name that these entries are part of 1113 * len: Number of page entries 1114 * start_list: 'len' addresses 1115 * length_list: 'len' addresses 1116 * 1117 */ 1118 void qemu_savevm_send_postcopy_ram_discard(QEMUFile *f, const char *name, 1119 uint16_t len, 1120 uint64_t *start_list, 1121 uint64_t *length_list) 1122 { 1123 uint8_t *buf; 1124 uint16_t tmplen; 1125 uint16_t t; 1126 size_t name_len = strlen(name); 1127 1128 trace_qemu_savevm_send_postcopy_ram_discard(name, len); 1129 assert(name_len < 256); 1130 buf = g_malloc0(1 + 1 + name_len + 1 + (8 + 8) * len); 1131 buf[0] = postcopy_ram_discard_version; 1132 buf[1] = name_len; 1133 memcpy(buf + 2, name, name_len); 1134 tmplen = 2 + name_len; 1135 buf[tmplen++] = '\0'; 1136 1137 for (t = 0; t < len; t++) { 1138 stq_be_p(buf + tmplen, start_list[t]); 1139 tmplen += 8; 1140 stq_be_p(buf + tmplen, length_list[t]); 1141 tmplen += 8; 1142 } 1143 qemu_savevm_command_send(f, MIG_CMD_POSTCOPY_RAM_DISCARD, tmplen, buf); 1144 g_free(buf); 1145 } 1146 1147 /* Get the destination into a state where it can receive postcopy data. */ 1148 void qemu_savevm_send_postcopy_listen(QEMUFile *f) 1149 { 1150 trace_savevm_send_postcopy_listen(); 1151 qemu_savevm_command_send(f, MIG_CMD_POSTCOPY_LISTEN, 0, NULL); 1152 } 1153 1154 /* Kick the destination into running */ 1155 void qemu_savevm_send_postcopy_run(QEMUFile *f) 1156 { 1157 trace_savevm_send_postcopy_run(); 1158 qemu_savevm_command_send(f, MIG_CMD_POSTCOPY_RUN, 0, NULL); 1159 } 1160 1161 void qemu_savevm_send_postcopy_resume(QEMUFile *f) 1162 { 1163 trace_savevm_send_postcopy_resume(); 1164 qemu_savevm_command_send(f, MIG_CMD_POSTCOPY_RESUME, 0, NULL); 1165 } 1166 1167 void qemu_savevm_send_recv_bitmap(QEMUFile *f, char *block_name) 1168 { 1169 size_t len; 1170 char buf[256]; 1171 1172 trace_savevm_send_recv_bitmap(block_name); 1173 1174 buf[0] = len = strlen(block_name); 1175 memcpy(buf + 1, block_name, len); 1176 1177 qemu_savevm_command_send(f, MIG_CMD_RECV_BITMAP, len + 1, (uint8_t *)buf); 1178 } 1179 1180 bool qemu_savevm_state_blocked(Error **errp) 1181 { 1182 SaveStateEntry *se; 1183 1184 QTAILQ_FOREACH(se, &savevm_state.handlers, entry) { 1185 if (se->vmsd && se->vmsd->unmigratable) { 1186 error_setg(errp, "State blocked by non-migratable device '%s'", 1187 se->idstr); 1188 return true; 1189 } 1190 } 1191 return false; 1192 } 1193 1194 void qemu_savevm_non_migratable_list(strList **reasons) 1195 { 1196 SaveStateEntry *se; 1197 1198 QTAILQ_FOREACH(se, &savevm_state.handlers, entry) { 1199 if (se->vmsd && se->vmsd->unmigratable) { 1200 QAPI_LIST_PREPEND(*reasons, 1201 g_strdup_printf("non-migratable device: %s", 1202 se->idstr)); 1203 } 1204 } 1205 } 1206 1207 void qemu_savevm_state_header(QEMUFile *f) 1208 { 1209 trace_savevm_state_header(); 1210 qemu_put_be32(f, QEMU_VM_FILE_MAGIC); 1211 qemu_put_be32(f, QEMU_VM_FILE_VERSION); 1212 1213 if (migrate_get_current()->send_configuration) { 1214 qemu_put_byte(f, QEMU_VM_CONFIGURATION); 1215 vmstate_save_state(f, &vmstate_configuration, &savevm_state, 0); 1216 } 1217 } 1218 1219 bool qemu_savevm_state_guest_unplug_pending(void) 1220 { 1221 SaveStateEntry *se; 1222 1223 QTAILQ_FOREACH(se, &savevm_state.handlers, entry) { 1224 if (se->vmsd && se->vmsd->dev_unplug_pending && 1225 se->vmsd->dev_unplug_pending(se->opaque)) { 1226 return true; 1227 } 1228 } 1229 1230 return false; 1231 } 1232 1233 void qemu_savevm_state_setup(QEMUFile *f) 1234 { 1235 MigrationState *ms = migrate_get_current(); 1236 SaveStateEntry *se; 1237 Error *local_err = NULL; 1238 int ret; 1239 1240 ms->vmdesc = json_writer_new(false); 1241 json_writer_start_object(ms->vmdesc, NULL); 1242 json_writer_int64(ms->vmdesc, "page_size", qemu_target_page_size()); 1243 json_writer_start_array(ms->vmdesc, "devices"); 1244 1245 trace_savevm_state_setup(); 1246 QTAILQ_FOREACH(se, &savevm_state.handlers, entry) { 1247 if (se->vmsd && se->vmsd->early_setup) { 1248 ret = vmstate_save(f, se, ms->vmdesc); 1249 if (ret) { 1250 qemu_file_set_error(f, ret); 1251 break; 1252 } 1253 continue; 1254 } 1255 1256 if (!se->ops || !se->ops->save_setup) { 1257 continue; 1258 } 1259 if (se->ops->is_active) { 1260 if (!se->ops->is_active(se->opaque)) { 1261 continue; 1262 } 1263 } 1264 save_section_header(f, se, QEMU_VM_SECTION_START); 1265 1266 ret = se->ops->save_setup(f, se->opaque); 1267 save_section_footer(f, se); 1268 if (ret < 0) { 1269 qemu_file_set_error(f, ret); 1270 break; 1271 } 1272 } 1273 1274 if (precopy_notify(PRECOPY_NOTIFY_SETUP, &local_err)) { 1275 error_report_err(local_err); 1276 } 1277 } 1278 1279 int qemu_savevm_state_resume_prepare(MigrationState *s) 1280 { 1281 SaveStateEntry *se; 1282 int ret; 1283 1284 trace_savevm_state_resume_prepare(); 1285 1286 QTAILQ_FOREACH(se, &savevm_state.handlers, entry) { 1287 if (!se->ops || !se->ops->resume_prepare) { 1288 continue; 1289 } 1290 if (se->ops->is_active) { 1291 if (!se->ops->is_active(se->opaque)) { 1292 continue; 1293 } 1294 } 1295 ret = se->ops->resume_prepare(s, se->opaque); 1296 if (ret < 0) { 1297 return ret; 1298 } 1299 } 1300 1301 return 0; 1302 } 1303 1304 /* 1305 * this function has three return values: 1306 * negative: there was one error, and we have -errno. 1307 * 0 : We haven't finished, caller have to go again 1308 * 1 : We have finished, we can go to complete phase 1309 */ 1310 int qemu_savevm_state_iterate(QEMUFile *f, bool postcopy) 1311 { 1312 SaveStateEntry *se; 1313 int ret = 1; 1314 1315 trace_savevm_state_iterate(); 1316 QTAILQ_FOREACH(se, &savevm_state.handlers, entry) { 1317 if (!se->ops || !se->ops->save_live_iterate) { 1318 continue; 1319 } 1320 if (se->ops->is_active && 1321 !se->ops->is_active(se->opaque)) { 1322 continue; 1323 } 1324 if (se->ops->is_active_iterate && 1325 !se->ops->is_active_iterate(se->opaque)) { 1326 continue; 1327 } 1328 /* 1329 * In the postcopy phase, any device that doesn't know how to 1330 * do postcopy should have saved it's state in the _complete 1331 * call that's already run, it might get confused if we call 1332 * iterate afterwards. 1333 */ 1334 if (postcopy && 1335 !(se->ops->has_postcopy && se->ops->has_postcopy(se->opaque))) { 1336 continue; 1337 } 1338 if (qemu_file_rate_limit(f)) { 1339 return 0; 1340 } 1341 trace_savevm_section_start(se->idstr, se->section_id); 1342 1343 save_section_header(f, se, QEMU_VM_SECTION_PART); 1344 1345 ret = se->ops->save_live_iterate(f, se->opaque); 1346 trace_savevm_section_end(se->idstr, se->section_id, ret); 1347 save_section_footer(f, se); 1348 1349 if (ret < 0) { 1350 error_report("failed to save SaveStateEntry with id(name): " 1351 "%d(%s): %d", 1352 se->section_id, se->idstr, ret); 1353 qemu_file_set_error(f, ret); 1354 } 1355 if (ret <= 0) { 1356 /* Do not proceed to the next vmstate before this one reported 1357 completion of the current stage. This serializes the migration 1358 and reduces the probability that a faster changing state is 1359 synchronized over and over again. */ 1360 break; 1361 } 1362 } 1363 return ret; 1364 } 1365 1366 static bool should_send_vmdesc(void) 1367 { 1368 MachineState *machine = MACHINE(qdev_get_machine()); 1369 bool in_postcopy = migration_in_postcopy(); 1370 return !machine->suppress_vmdesc && !in_postcopy; 1371 } 1372 1373 /* 1374 * Calls the save_live_complete_postcopy methods 1375 * causing the last few pages to be sent immediately and doing any associated 1376 * cleanup. 1377 * Note postcopy also calls qemu_savevm_state_complete_precopy to complete 1378 * all the other devices, but that happens at the point we switch to postcopy. 1379 */ 1380 void qemu_savevm_state_complete_postcopy(QEMUFile *f) 1381 { 1382 SaveStateEntry *se; 1383 int ret; 1384 1385 QTAILQ_FOREACH(se, &savevm_state.handlers, entry) { 1386 if (!se->ops || !se->ops->save_live_complete_postcopy) { 1387 continue; 1388 } 1389 if (se->ops->is_active) { 1390 if (!se->ops->is_active(se->opaque)) { 1391 continue; 1392 } 1393 } 1394 trace_savevm_section_start(se->idstr, se->section_id); 1395 /* Section type */ 1396 qemu_put_byte(f, QEMU_VM_SECTION_END); 1397 qemu_put_be32(f, se->section_id); 1398 1399 ret = se->ops->save_live_complete_postcopy(f, se->opaque); 1400 trace_savevm_section_end(se->idstr, se->section_id, ret); 1401 save_section_footer(f, se); 1402 if (ret < 0) { 1403 qemu_file_set_error(f, ret); 1404 return; 1405 } 1406 } 1407 1408 qemu_put_byte(f, QEMU_VM_EOF); 1409 qemu_fflush(f); 1410 } 1411 1412 static 1413 int qemu_savevm_state_complete_precopy_iterable(QEMUFile *f, bool in_postcopy) 1414 { 1415 SaveStateEntry *se; 1416 int ret; 1417 1418 QTAILQ_FOREACH(se, &savevm_state.handlers, entry) { 1419 if (!se->ops || 1420 (in_postcopy && se->ops->has_postcopy && 1421 se->ops->has_postcopy(se->opaque)) || 1422 !se->ops->save_live_complete_precopy) { 1423 continue; 1424 } 1425 1426 if (se->ops->is_active) { 1427 if (!se->ops->is_active(se->opaque)) { 1428 continue; 1429 } 1430 } 1431 trace_savevm_section_start(se->idstr, se->section_id); 1432 1433 save_section_header(f, se, QEMU_VM_SECTION_END); 1434 1435 ret = se->ops->save_live_complete_precopy(f, se->opaque); 1436 trace_savevm_section_end(se->idstr, se->section_id, ret); 1437 save_section_footer(f, se); 1438 if (ret < 0) { 1439 qemu_file_set_error(f, ret); 1440 return -1; 1441 } 1442 } 1443 1444 return 0; 1445 } 1446 1447 int qemu_savevm_state_complete_precopy_non_iterable(QEMUFile *f, 1448 bool in_postcopy, 1449 bool inactivate_disks) 1450 { 1451 MigrationState *ms = migrate_get_current(); 1452 JSONWriter *vmdesc = ms->vmdesc; 1453 int vmdesc_len; 1454 SaveStateEntry *se; 1455 int ret; 1456 1457 QTAILQ_FOREACH(se, &savevm_state.handlers, entry) { 1458 if (se->vmsd && se->vmsd->early_setup) { 1459 /* Already saved during qemu_savevm_state_setup(). */ 1460 continue; 1461 } 1462 1463 ret = vmstate_save(f, se, vmdesc); 1464 if (ret) { 1465 qemu_file_set_error(f, ret); 1466 return ret; 1467 } 1468 } 1469 1470 if (inactivate_disks) { 1471 /* Inactivate before sending QEMU_VM_EOF so that the 1472 * bdrv_activate_all() on the other end won't fail. */ 1473 ret = bdrv_inactivate_all(); 1474 if (ret) { 1475 error_report("%s: bdrv_inactivate_all() failed (%d)", 1476 __func__, ret); 1477 qemu_file_set_error(f, ret); 1478 return ret; 1479 } 1480 } 1481 if (!in_postcopy) { 1482 /* Postcopy stream will still be going */ 1483 qemu_put_byte(f, QEMU_VM_EOF); 1484 } 1485 1486 json_writer_end_array(vmdesc); 1487 json_writer_end_object(vmdesc); 1488 vmdesc_len = strlen(json_writer_get(vmdesc)); 1489 1490 if (should_send_vmdesc()) { 1491 qemu_put_byte(f, QEMU_VM_VMDESCRIPTION); 1492 qemu_put_be32(f, vmdesc_len); 1493 qemu_put_buffer(f, (uint8_t *)json_writer_get(vmdesc), vmdesc_len); 1494 } 1495 1496 /* Free it now to detect any inconsistencies. */ 1497 json_writer_free(vmdesc); 1498 ms->vmdesc = NULL; 1499 1500 return 0; 1501 } 1502 1503 int qemu_savevm_state_complete_precopy(QEMUFile *f, bool iterable_only, 1504 bool inactivate_disks) 1505 { 1506 int ret; 1507 Error *local_err = NULL; 1508 bool in_postcopy = migration_in_postcopy(); 1509 1510 if (precopy_notify(PRECOPY_NOTIFY_COMPLETE, &local_err)) { 1511 error_report_err(local_err); 1512 } 1513 1514 trace_savevm_state_complete_precopy(); 1515 1516 cpu_synchronize_all_states(); 1517 1518 if (!in_postcopy || iterable_only) { 1519 ret = qemu_savevm_state_complete_precopy_iterable(f, in_postcopy); 1520 if (ret) { 1521 return ret; 1522 } 1523 } 1524 1525 if (iterable_only) { 1526 goto flush; 1527 } 1528 1529 ret = qemu_savevm_state_complete_precopy_non_iterable(f, in_postcopy, 1530 inactivate_disks); 1531 if (ret) { 1532 return ret; 1533 } 1534 1535 flush: 1536 qemu_fflush(f); 1537 return 0; 1538 } 1539 1540 /* Give an estimate of the amount left to be transferred, 1541 * the result is split into the amount for units that can and 1542 * for units that can't do postcopy. 1543 */ 1544 void qemu_savevm_state_pending_estimate(uint64_t *res_precopy_only, 1545 uint64_t *res_compatible, 1546 uint64_t *res_postcopy_only) 1547 { 1548 SaveStateEntry *se; 1549 1550 *res_precopy_only = 0; 1551 *res_compatible = 0; 1552 *res_postcopy_only = 0; 1553 1554 QTAILQ_FOREACH(se, &savevm_state.handlers, entry) { 1555 if (!se->ops || !se->ops->state_pending_estimate) { 1556 continue; 1557 } 1558 if (se->ops->is_active) { 1559 if (!se->ops->is_active(se->opaque)) { 1560 continue; 1561 } 1562 } 1563 se->ops->state_pending_estimate(se->opaque, 1564 res_precopy_only, res_compatible, 1565 res_postcopy_only); 1566 } 1567 } 1568 1569 void qemu_savevm_state_pending_exact(uint64_t *res_precopy_only, 1570 uint64_t *res_compatible, 1571 uint64_t *res_postcopy_only) 1572 { 1573 SaveStateEntry *se; 1574 1575 *res_precopy_only = 0; 1576 *res_compatible = 0; 1577 *res_postcopy_only = 0; 1578 1579 QTAILQ_FOREACH(se, &savevm_state.handlers, entry) { 1580 if (!se->ops || !se->ops->state_pending_exact) { 1581 continue; 1582 } 1583 if (se->ops->is_active) { 1584 if (!se->ops->is_active(se->opaque)) { 1585 continue; 1586 } 1587 } 1588 se->ops->state_pending_exact(se->opaque, 1589 res_precopy_only, res_compatible, 1590 res_postcopy_only); 1591 } 1592 } 1593 1594 void qemu_savevm_state_cleanup(void) 1595 { 1596 SaveStateEntry *se; 1597 Error *local_err = NULL; 1598 1599 if (precopy_notify(PRECOPY_NOTIFY_CLEANUP, &local_err)) { 1600 error_report_err(local_err); 1601 } 1602 1603 trace_savevm_state_cleanup(); 1604 QTAILQ_FOREACH(se, &savevm_state.handlers, entry) { 1605 if (se->ops && se->ops->save_cleanup) { 1606 se->ops->save_cleanup(se->opaque); 1607 } 1608 } 1609 } 1610 1611 static int qemu_savevm_state(QEMUFile *f, Error **errp) 1612 { 1613 int ret; 1614 MigrationState *ms = migrate_get_current(); 1615 MigrationStatus status; 1616 1617 if (migration_is_running(ms->state)) { 1618 error_setg(errp, QERR_MIGRATION_ACTIVE); 1619 return -EINVAL; 1620 } 1621 1622 if (migrate_use_block()) { 1623 error_setg(errp, "Block migration and snapshots are incompatible"); 1624 return -EINVAL; 1625 } 1626 1627 migrate_init(ms); 1628 memset(&ram_counters, 0, sizeof(ram_counters)); 1629 memset(&compression_counters, 0, sizeof(compression_counters)); 1630 ms->to_dst_file = f; 1631 1632 qemu_mutex_unlock_iothread(); 1633 qemu_savevm_state_header(f); 1634 qemu_savevm_state_setup(f); 1635 qemu_mutex_lock_iothread(); 1636 1637 while (qemu_file_get_error(f) == 0) { 1638 if (qemu_savevm_state_iterate(f, false) > 0) { 1639 break; 1640 } 1641 } 1642 1643 ret = qemu_file_get_error(f); 1644 if (ret == 0) { 1645 qemu_savevm_state_complete_precopy(f, false, false); 1646 ret = qemu_file_get_error(f); 1647 } 1648 qemu_savevm_state_cleanup(); 1649 if (ret != 0) { 1650 error_setg_errno(errp, -ret, "Error while writing VM state"); 1651 } 1652 1653 if (ret != 0) { 1654 status = MIGRATION_STATUS_FAILED; 1655 } else { 1656 status = MIGRATION_STATUS_COMPLETED; 1657 } 1658 migrate_set_state(&ms->state, MIGRATION_STATUS_SETUP, status); 1659 1660 /* f is outer parameter, it should not stay in global migration state after 1661 * this function finished */ 1662 ms->to_dst_file = NULL; 1663 1664 return ret; 1665 } 1666 1667 void qemu_savevm_live_state(QEMUFile *f) 1668 { 1669 /* save QEMU_VM_SECTION_END section */ 1670 qemu_savevm_state_complete_precopy(f, true, false); 1671 qemu_put_byte(f, QEMU_VM_EOF); 1672 } 1673 1674 int qemu_save_device_state(QEMUFile *f) 1675 { 1676 SaveStateEntry *se; 1677 1678 if (!migration_in_colo_state()) { 1679 qemu_put_be32(f, QEMU_VM_FILE_MAGIC); 1680 qemu_put_be32(f, QEMU_VM_FILE_VERSION); 1681 } 1682 cpu_synchronize_all_states(); 1683 1684 QTAILQ_FOREACH(se, &savevm_state.handlers, entry) { 1685 int ret; 1686 1687 if (se->is_ram) { 1688 continue; 1689 } 1690 ret = vmstate_save(f, se, NULL); 1691 if (ret) { 1692 return ret; 1693 } 1694 } 1695 1696 qemu_put_byte(f, QEMU_VM_EOF); 1697 1698 return qemu_file_get_error(f); 1699 } 1700 1701 static SaveStateEntry *find_se(const char *idstr, uint32_t instance_id) 1702 { 1703 SaveStateEntry *se; 1704 1705 QTAILQ_FOREACH(se, &savevm_state.handlers, entry) { 1706 if (!strcmp(se->idstr, idstr) && 1707 (instance_id == se->instance_id || 1708 instance_id == se->alias_id)) 1709 return se; 1710 /* Migrating from an older version? */ 1711 if (strstr(se->idstr, idstr) && se->compat) { 1712 if (!strcmp(se->compat->idstr, idstr) && 1713 (instance_id == se->compat->instance_id || 1714 instance_id == se->alias_id)) 1715 return se; 1716 } 1717 } 1718 return NULL; 1719 } 1720 1721 enum LoadVMExitCodes { 1722 /* Allow a command to quit all layers of nested loadvm loops */ 1723 LOADVM_QUIT = 1, 1724 }; 1725 1726 /* ------ incoming postcopy messages ------ */ 1727 /* 'advise' arrives before any transfers just to tell us that a postcopy 1728 * *might* happen - it might be skipped if precopy transferred everything 1729 * quickly. 1730 */ 1731 static int loadvm_postcopy_handle_advise(MigrationIncomingState *mis, 1732 uint16_t len) 1733 { 1734 PostcopyState ps = postcopy_state_set(POSTCOPY_INCOMING_ADVISE); 1735 uint64_t remote_pagesize_summary, local_pagesize_summary, remote_tps; 1736 size_t page_size = qemu_target_page_size(); 1737 Error *local_err = NULL; 1738 1739 trace_loadvm_postcopy_handle_advise(); 1740 if (ps != POSTCOPY_INCOMING_NONE) { 1741 error_report("CMD_POSTCOPY_ADVISE in wrong postcopy state (%d)", ps); 1742 return -1; 1743 } 1744 1745 switch (len) { 1746 case 0: 1747 if (migrate_postcopy_ram()) { 1748 error_report("RAM postcopy is enabled but have 0 byte advise"); 1749 return -EINVAL; 1750 } 1751 return 0; 1752 case 8 + 8: 1753 if (!migrate_postcopy_ram()) { 1754 error_report("RAM postcopy is disabled but have 16 byte advise"); 1755 return -EINVAL; 1756 } 1757 break; 1758 default: 1759 error_report("CMD_POSTCOPY_ADVISE invalid length (%d)", len); 1760 return -EINVAL; 1761 } 1762 1763 if (!postcopy_ram_supported_by_host(mis)) { 1764 postcopy_state_set(POSTCOPY_INCOMING_NONE); 1765 return -1; 1766 } 1767 1768 remote_pagesize_summary = qemu_get_be64(mis->from_src_file); 1769 local_pagesize_summary = ram_pagesize_summary(); 1770 1771 if (remote_pagesize_summary != local_pagesize_summary) { 1772 /* 1773 * This detects two potential causes of mismatch: 1774 * a) A mismatch in host page sizes 1775 * Some combinations of mismatch are probably possible but it gets 1776 * a bit more complicated. In particular we need to place whole 1777 * host pages on the dest at once, and we need to ensure that we 1778 * handle dirtying to make sure we never end up sending part of 1779 * a hostpage on it's own. 1780 * b) The use of different huge page sizes on source/destination 1781 * a more fine grain test is performed during RAM block migration 1782 * but this test here causes a nice early clear failure, and 1783 * also fails when passed to an older qemu that doesn't 1784 * do huge pages. 1785 */ 1786 error_report("Postcopy needs matching RAM page sizes (s=%" PRIx64 1787 " d=%" PRIx64 ")", 1788 remote_pagesize_summary, local_pagesize_summary); 1789 return -1; 1790 } 1791 1792 remote_tps = qemu_get_be64(mis->from_src_file); 1793 if (remote_tps != page_size) { 1794 /* 1795 * Again, some differences could be dealt with, but for now keep it 1796 * simple. 1797 */ 1798 error_report("Postcopy needs matching target page sizes (s=%d d=%zd)", 1799 (int)remote_tps, page_size); 1800 return -1; 1801 } 1802 1803 if (postcopy_notify(POSTCOPY_NOTIFY_INBOUND_ADVISE, &local_err)) { 1804 error_report_err(local_err); 1805 return -1; 1806 } 1807 1808 if (ram_postcopy_incoming_init(mis)) { 1809 return -1; 1810 } 1811 1812 return 0; 1813 } 1814 1815 /* After postcopy we will be told to throw some pages away since they're 1816 * dirty and will have to be demand fetched. Must happen before CPU is 1817 * started. 1818 * There can be 0..many of these messages, each encoding multiple pages. 1819 */ 1820 static int loadvm_postcopy_ram_handle_discard(MigrationIncomingState *mis, 1821 uint16_t len) 1822 { 1823 int tmp; 1824 char ramid[256]; 1825 PostcopyState ps = postcopy_state_get(); 1826 1827 trace_loadvm_postcopy_ram_handle_discard(); 1828 1829 switch (ps) { 1830 case POSTCOPY_INCOMING_ADVISE: 1831 /* 1st discard */ 1832 tmp = postcopy_ram_prepare_discard(mis); 1833 if (tmp) { 1834 return tmp; 1835 } 1836 break; 1837 1838 case POSTCOPY_INCOMING_DISCARD: 1839 /* Expected state */ 1840 break; 1841 1842 default: 1843 error_report("CMD_POSTCOPY_RAM_DISCARD in wrong postcopy state (%d)", 1844 ps); 1845 return -1; 1846 } 1847 /* We're expecting a 1848 * Version (0) 1849 * a RAM ID string (length byte, name, 0 term) 1850 * then at least 1 16 byte chunk 1851 */ 1852 if (len < (1 + 1 + 1 + 1 + 2 * 8)) { 1853 error_report("CMD_POSTCOPY_RAM_DISCARD invalid length (%d)", len); 1854 return -1; 1855 } 1856 1857 tmp = qemu_get_byte(mis->from_src_file); 1858 if (tmp != postcopy_ram_discard_version) { 1859 error_report("CMD_POSTCOPY_RAM_DISCARD invalid version (%d)", tmp); 1860 return -1; 1861 } 1862 1863 if (!qemu_get_counted_string(mis->from_src_file, ramid)) { 1864 error_report("CMD_POSTCOPY_RAM_DISCARD Failed to read RAMBlock ID"); 1865 return -1; 1866 } 1867 tmp = qemu_get_byte(mis->from_src_file); 1868 if (tmp != 0) { 1869 error_report("CMD_POSTCOPY_RAM_DISCARD missing nil (%d)", tmp); 1870 return -1; 1871 } 1872 1873 len -= 3 + strlen(ramid); 1874 if (len % 16) { 1875 error_report("CMD_POSTCOPY_RAM_DISCARD invalid length (%d)", len); 1876 return -1; 1877 } 1878 trace_loadvm_postcopy_ram_handle_discard_header(ramid, len); 1879 while (len) { 1880 uint64_t start_addr, block_length; 1881 start_addr = qemu_get_be64(mis->from_src_file); 1882 block_length = qemu_get_be64(mis->from_src_file); 1883 1884 len -= 16; 1885 int ret = ram_discard_range(ramid, start_addr, block_length); 1886 if (ret) { 1887 return ret; 1888 } 1889 } 1890 trace_loadvm_postcopy_ram_handle_discard_end(); 1891 1892 return 0; 1893 } 1894 1895 /* 1896 * Triggered by a postcopy_listen command; this thread takes over reading 1897 * the input stream, leaving the main thread free to carry on loading the rest 1898 * of the device state (from RAM). 1899 * (TODO:This could do with being in a postcopy file - but there again it's 1900 * just another input loop, not that postcopy specific) 1901 */ 1902 static void *postcopy_ram_listen_thread(void *opaque) 1903 { 1904 MigrationIncomingState *mis = migration_incoming_get_current(); 1905 QEMUFile *f = mis->from_src_file; 1906 int load_res; 1907 MigrationState *migr = migrate_get_current(); 1908 1909 object_ref(OBJECT(migr)); 1910 1911 migrate_set_state(&mis->state, MIGRATION_STATUS_ACTIVE, 1912 MIGRATION_STATUS_POSTCOPY_ACTIVE); 1913 qemu_sem_post(&mis->thread_sync_sem); 1914 trace_postcopy_ram_listen_thread_start(); 1915 1916 rcu_register_thread(); 1917 /* 1918 * Because we're a thread and not a coroutine we can't yield 1919 * in qemu_file, and thus we must be blocking now. 1920 */ 1921 qemu_file_set_blocking(f, true); 1922 load_res = qemu_loadvm_state_main(f, mis); 1923 1924 /* 1925 * This is tricky, but, mis->from_src_file can change after it 1926 * returns, when postcopy recovery happened. In the future, we may 1927 * want a wrapper for the QEMUFile handle. 1928 */ 1929 f = mis->from_src_file; 1930 1931 /* And non-blocking again so we don't block in any cleanup */ 1932 qemu_file_set_blocking(f, false); 1933 1934 trace_postcopy_ram_listen_thread_exit(); 1935 if (load_res < 0) { 1936 qemu_file_set_error(f, load_res); 1937 dirty_bitmap_mig_cancel_incoming(); 1938 if (postcopy_state_get() == POSTCOPY_INCOMING_RUNNING && 1939 !migrate_postcopy_ram() && migrate_dirty_bitmaps()) 1940 { 1941 error_report("%s: loadvm failed during postcopy: %d. All states " 1942 "are migrated except dirty bitmaps. Some dirty " 1943 "bitmaps may be lost, and present migrated dirty " 1944 "bitmaps are correctly migrated and valid.", 1945 __func__, load_res); 1946 load_res = 0; /* prevent further exit() */ 1947 } else { 1948 error_report("%s: loadvm failed: %d", __func__, load_res); 1949 migrate_set_state(&mis->state, MIGRATION_STATUS_POSTCOPY_ACTIVE, 1950 MIGRATION_STATUS_FAILED); 1951 } 1952 } 1953 if (load_res >= 0) { 1954 /* 1955 * This looks good, but it's possible that the device loading in the 1956 * main thread hasn't finished yet, and so we might not be in 'RUN' 1957 * state yet; wait for the end of the main thread. 1958 */ 1959 qemu_event_wait(&mis->main_thread_load_event); 1960 } 1961 postcopy_ram_incoming_cleanup(mis); 1962 1963 if (load_res < 0) { 1964 /* 1965 * If something went wrong then we have a bad state so exit; 1966 * depending how far we got it might be possible at this point 1967 * to leave the guest running and fire MCEs for pages that never 1968 * arrived as a desperate recovery step. 1969 */ 1970 rcu_unregister_thread(); 1971 exit(EXIT_FAILURE); 1972 } 1973 1974 migrate_set_state(&mis->state, MIGRATION_STATUS_POSTCOPY_ACTIVE, 1975 MIGRATION_STATUS_COMPLETED); 1976 /* 1977 * If everything has worked fine, then the main thread has waited 1978 * for us to start, and we're the last use of the mis. 1979 * (If something broke then qemu will have to exit anyway since it's 1980 * got a bad migration state). 1981 */ 1982 migration_incoming_state_destroy(); 1983 qemu_loadvm_state_cleanup(); 1984 1985 rcu_unregister_thread(); 1986 mis->have_listen_thread = false; 1987 postcopy_state_set(POSTCOPY_INCOMING_END); 1988 1989 object_unref(OBJECT(migr)); 1990 1991 return NULL; 1992 } 1993 1994 /* After this message we must be able to immediately receive postcopy data */ 1995 static int loadvm_postcopy_handle_listen(MigrationIncomingState *mis) 1996 { 1997 PostcopyState ps = postcopy_state_set(POSTCOPY_INCOMING_LISTENING); 1998 Error *local_err = NULL; 1999 2000 trace_loadvm_postcopy_handle_listen("enter"); 2001 2002 if (ps != POSTCOPY_INCOMING_ADVISE && ps != POSTCOPY_INCOMING_DISCARD) { 2003 error_report("CMD_POSTCOPY_LISTEN in wrong postcopy state (%d)", ps); 2004 return -1; 2005 } 2006 if (ps == POSTCOPY_INCOMING_ADVISE) { 2007 /* 2008 * A rare case, we entered listen without having to do any discards, 2009 * so do the setup that's normally done at the time of the 1st discard. 2010 */ 2011 if (migrate_postcopy_ram()) { 2012 postcopy_ram_prepare_discard(mis); 2013 } 2014 } 2015 2016 trace_loadvm_postcopy_handle_listen("after discard"); 2017 2018 /* 2019 * Sensitise RAM - can now generate requests for blocks that don't exist 2020 * However, at this point the CPU shouldn't be running, and the IO 2021 * shouldn't be doing anything yet so don't actually expect requests 2022 */ 2023 if (migrate_postcopy_ram()) { 2024 if (postcopy_ram_incoming_setup(mis)) { 2025 postcopy_ram_incoming_cleanup(mis); 2026 return -1; 2027 } 2028 } 2029 2030 trace_loadvm_postcopy_handle_listen("after uffd"); 2031 2032 if (postcopy_notify(POSTCOPY_NOTIFY_INBOUND_LISTEN, &local_err)) { 2033 error_report_err(local_err); 2034 return -1; 2035 } 2036 2037 mis->have_listen_thread = true; 2038 postcopy_thread_create(mis, &mis->listen_thread, "postcopy/listen", 2039 postcopy_ram_listen_thread, QEMU_THREAD_DETACHED); 2040 trace_loadvm_postcopy_handle_listen("return"); 2041 2042 return 0; 2043 } 2044 2045 static void loadvm_postcopy_handle_run_bh(void *opaque) 2046 { 2047 Error *local_err = NULL; 2048 MigrationIncomingState *mis = opaque; 2049 2050 trace_loadvm_postcopy_handle_run_bh("enter"); 2051 2052 /* TODO we should move all of this lot into postcopy_ram.c or a shared code 2053 * in migration.c 2054 */ 2055 cpu_synchronize_all_post_init(); 2056 2057 trace_loadvm_postcopy_handle_run_bh("after cpu sync"); 2058 2059 qemu_announce_self(&mis->announce_timer, migrate_announce_params()); 2060 2061 trace_loadvm_postcopy_handle_run_bh("after announce"); 2062 2063 /* Make sure all file formats throw away their mutable metadata. 2064 * If we get an error here, just don't restart the VM yet. */ 2065 bdrv_activate_all(&local_err); 2066 if (local_err) { 2067 error_report_err(local_err); 2068 local_err = NULL; 2069 autostart = false; 2070 } 2071 2072 trace_loadvm_postcopy_handle_run_bh("after invalidate cache"); 2073 2074 dirty_bitmap_mig_before_vm_start(); 2075 2076 if (autostart) { 2077 /* Hold onto your hats, starting the CPU */ 2078 vm_start(); 2079 } else { 2080 /* leave it paused and let management decide when to start the CPU */ 2081 runstate_set(RUN_STATE_PAUSED); 2082 } 2083 2084 qemu_bh_delete(mis->bh); 2085 2086 trace_loadvm_postcopy_handle_run_bh("return"); 2087 } 2088 2089 /* After all discards we can start running and asking for pages */ 2090 static int loadvm_postcopy_handle_run(MigrationIncomingState *mis) 2091 { 2092 PostcopyState ps = postcopy_state_get(); 2093 2094 trace_loadvm_postcopy_handle_run(); 2095 if (ps != POSTCOPY_INCOMING_LISTENING) { 2096 error_report("CMD_POSTCOPY_RUN in wrong postcopy state (%d)", ps); 2097 return -1; 2098 } 2099 2100 postcopy_state_set(POSTCOPY_INCOMING_RUNNING); 2101 mis->bh = qemu_bh_new(loadvm_postcopy_handle_run_bh, mis); 2102 qemu_bh_schedule(mis->bh); 2103 2104 /* We need to finish reading the stream from the package 2105 * and also stop reading anything more from the stream that loaded the 2106 * package (since it's now being read by the listener thread). 2107 * LOADVM_QUIT will quit all the layers of nested loadvm loops. 2108 */ 2109 return LOADVM_QUIT; 2110 } 2111 2112 /* We must be with page_request_mutex held */ 2113 static gboolean postcopy_sync_page_req(gpointer key, gpointer value, 2114 gpointer data) 2115 { 2116 MigrationIncomingState *mis = data; 2117 void *host_addr = (void *) key; 2118 ram_addr_t rb_offset; 2119 RAMBlock *rb; 2120 int ret; 2121 2122 rb = qemu_ram_block_from_host(host_addr, true, &rb_offset); 2123 if (!rb) { 2124 /* 2125 * This should _never_ happen. However be nice for a migrating VM to 2126 * not crash/assert. Post an error (note: intended to not use *_once 2127 * because we do want to see all the illegal addresses; and this can 2128 * never be triggered by the guest so we're safe) and move on next. 2129 */ 2130 error_report("%s: illegal host addr %p", __func__, host_addr); 2131 /* Try the next entry */ 2132 return FALSE; 2133 } 2134 2135 ret = migrate_send_rp_message_req_pages(mis, rb, rb_offset); 2136 if (ret) { 2137 /* Please refer to above comment. */ 2138 error_report("%s: send rp message failed for addr %p", 2139 __func__, host_addr); 2140 return FALSE; 2141 } 2142 2143 trace_postcopy_page_req_sync(host_addr); 2144 2145 return FALSE; 2146 } 2147 2148 static void migrate_send_rp_req_pages_pending(MigrationIncomingState *mis) 2149 { 2150 WITH_QEMU_LOCK_GUARD(&mis->page_request_mutex) { 2151 g_tree_foreach(mis->page_requested, postcopy_sync_page_req, mis); 2152 } 2153 } 2154 2155 static int loadvm_postcopy_handle_resume(MigrationIncomingState *mis) 2156 { 2157 if (mis->state != MIGRATION_STATUS_POSTCOPY_RECOVER) { 2158 error_report("%s: illegal resume received", __func__); 2159 /* Don't fail the load, only for this. */ 2160 return 0; 2161 } 2162 2163 /* 2164 * Reset the last_rb before we resend any page req to source again, since 2165 * the source should have it reset already. 2166 */ 2167 mis->last_rb = NULL; 2168 2169 /* 2170 * This means source VM is ready to resume the postcopy migration. 2171 */ 2172 migrate_set_state(&mis->state, MIGRATION_STATUS_POSTCOPY_RECOVER, 2173 MIGRATION_STATUS_POSTCOPY_ACTIVE); 2174 2175 trace_loadvm_postcopy_handle_resume(); 2176 2177 /* Tell source that "we are ready" */ 2178 migrate_send_rp_resume_ack(mis, MIGRATION_RESUME_ACK_VALUE); 2179 2180 /* 2181 * After a postcopy recovery, the source should have lost the postcopy 2182 * queue, or potentially the requested pages could have been lost during 2183 * the network down phase. Let's re-sync with the source VM by re-sending 2184 * all the pending pages that we eagerly need, so these threads won't get 2185 * blocked too long due to the recovery. 2186 * 2187 * Without this procedure, the faulted destination VM threads (waiting for 2188 * page requests right before the postcopy is interrupted) can keep hanging 2189 * until the pages are sent by the source during the background copying of 2190 * pages, or another thread faulted on the same address accidentally. 2191 */ 2192 migrate_send_rp_req_pages_pending(mis); 2193 2194 /* 2195 * It's time to switch state and release the fault thread to continue 2196 * service page faults. Note that this should be explicitly after the 2197 * above call to migrate_send_rp_req_pages_pending(). In short: 2198 * migrate_send_rp_message_req_pages() is not thread safe, yet. 2199 */ 2200 qemu_sem_post(&mis->postcopy_pause_sem_fault); 2201 2202 if (migrate_postcopy_preempt()) { 2203 /* 2204 * The preempt channel will be created in async manner, now let's 2205 * wait for it and make sure it's created. 2206 */ 2207 qemu_sem_wait(&mis->postcopy_qemufile_dst_done); 2208 assert(mis->postcopy_qemufile_dst); 2209 /* Kick the fast ram load thread too */ 2210 qemu_sem_post(&mis->postcopy_pause_sem_fast_load); 2211 } 2212 2213 return 0; 2214 } 2215 2216 /** 2217 * Immediately following this command is a blob of data containing an embedded 2218 * chunk of migration stream; read it and load it. 2219 * 2220 * @mis: Incoming state 2221 * @length: Length of packaged data to read 2222 * 2223 * Returns: Negative values on error 2224 * 2225 */ 2226 static int loadvm_handle_cmd_packaged(MigrationIncomingState *mis) 2227 { 2228 int ret; 2229 size_t length; 2230 QIOChannelBuffer *bioc; 2231 2232 length = qemu_get_be32(mis->from_src_file); 2233 trace_loadvm_handle_cmd_packaged(length); 2234 2235 if (length > MAX_VM_CMD_PACKAGED_SIZE) { 2236 error_report("Unreasonably large packaged state: %zu", length); 2237 return -1; 2238 } 2239 2240 bioc = qio_channel_buffer_new(length); 2241 qio_channel_set_name(QIO_CHANNEL(bioc), "migration-loadvm-buffer"); 2242 ret = qemu_get_buffer(mis->from_src_file, 2243 bioc->data, 2244 length); 2245 if (ret != length) { 2246 object_unref(OBJECT(bioc)); 2247 error_report("CMD_PACKAGED: Buffer receive fail ret=%d length=%zu", 2248 ret, length); 2249 return (ret < 0) ? ret : -EAGAIN; 2250 } 2251 bioc->usage += length; 2252 trace_loadvm_handle_cmd_packaged_received(ret); 2253 2254 QEMUFile *packf = qemu_file_new_input(QIO_CHANNEL(bioc)); 2255 2256 ret = qemu_loadvm_state_main(packf, mis); 2257 trace_loadvm_handle_cmd_packaged_main(ret); 2258 qemu_fclose(packf); 2259 object_unref(OBJECT(bioc)); 2260 2261 return ret; 2262 } 2263 2264 /* 2265 * Handle request that source requests for recved_bitmap on 2266 * destination. Payload format: 2267 * 2268 * len (1 byte) + ramblock_name (<255 bytes) 2269 */ 2270 static int loadvm_handle_recv_bitmap(MigrationIncomingState *mis, 2271 uint16_t len) 2272 { 2273 QEMUFile *file = mis->from_src_file; 2274 RAMBlock *rb; 2275 char block_name[256]; 2276 size_t cnt; 2277 2278 cnt = qemu_get_counted_string(file, block_name); 2279 if (!cnt) { 2280 error_report("%s: failed to read block name", __func__); 2281 return -EINVAL; 2282 } 2283 2284 /* Validate before using the data */ 2285 if (qemu_file_get_error(file)) { 2286 return qemu_file_get_error(file); 2287 } 2288 2289 if (len != cnt + 1) { 2290 error_report("%s: invalid payload length (%d)", __func__, len); 2291 return -EINVAL; 2292 } 2293 2294 rb = qemu_ram_block_by_name(block_name); 2295 if (!rb) { 2296 error_report("%s: block '%s' not found", __func__, block_name); 2297 return -EINVAL; 2298 } 2299 2300 migrate_send_rp_recv_bitmap(mis, block_name); 2301 2302 trace_loadvm_handle_recv_bitmap(block_name); 2303 2304 return 0; 2305 } 2306 2307 static int loadvm_process_enable_colo(MigrationIncomingState *mis) 2308 { 2309 int ret = migration_incoming_enable_colo(); 2310 2311 if (!ret) { 2312 ret = colo_init_ram_cache(); 2313 if (ret) { 2314 migration_incoming_disable_colo(); 2315 } 2316 } 2317 return ret; 2318 } 2319 2320 /* 2321 * Process an incoming 'QEMU_VM_COMMAND' 2322 * 0 just a normal return 2323 * LOADVM_QUIT All good, but exit the loop 2324 * <0 Error 2325 */ 2326 static int loadvm_process_command(QEMUFile *f) 2327 { 2328 MigrationIncomingState *mis = migration_incoming_get_current(); 2329 uint16_t cmd; 2330 uint16_t len; 2331 uint32_t tmp32; 2332 2333 cmd = qemu_get_be16(f); 2334 len = qemu_get_be16(f); 2335 2336 /* Check validity before continue processing of cmds */ 2337 if (qemu_file_get_error(f)) { 2338 return qemu_file_get_error(f); 2339 } 2340 2341 if (cmd >= MIG_CMD_MAX || cmd == MIG_CMD_INVALID) { 2342 error_report("MIG_CMD 0x%x unknown (len 0x%x)", cmd, len); 2343 return -EINVAL; 2344 } 2345 2346 trace_loadvm_process_command(mig_cmd_args[cmd].name, len); 2347 2348 if (mig_cmd_args[cmd].len != -1 && mig_cmd_args[cmd].len != len) { 2349 error_report("%s received with bad length - expecting %zu, got %d", 2350 mig_cmd_args[cmd].name, 2351 (size_t)mig_cmd_args[cmd].len, len); 2352 return -ERANGE; 2353 } 2354 2355 switch (cmd) { 2356 case MIG_CMD_OPEN_RETURN_PATH: 2357 if (mis->to_src_file) { 2358 error_report("CMD_OPEN_RETURN_PATH called when RP already open"); 2359 /* Not really a problem, so don't give up */ 2360 return 0; 2361 } 2362 mis->to_src_file = qemu_file_get_return_path(f); 2363 if (!mis->to_src_file) { 2364 error_report("CMD_OPEN_RETURN_PATH failed"); 2365 return -1; 2366 } 2367 break; 2368 2369 case MIG_CMD_PING: 2370 tmp32 = qemu_get_be32(f); 2371 trace_loadvm_process_command_ping(tmp32); 2372 if (!mis->to_src_file) { 2373 error_report("CMD_PING (0x%x) received with no return path", 2374 tmp32); 2375 return -1; 2376 } 2377 migrate_send_rp_pong(mis, tmp32); 2378 break; 2379 2380 case MIG_CMD_PACKAGED: 2381 return loadvm_handle_cmd_packaged(mis); 2382 2383 case MIG_CMD_POSTCOPY_ADVISE: 2384 return loadvm_postcopy_handle_advise(mis, len); 2385 2386 case MIG_CMD_POSTCOPY_LISTEN: 2387 return loadvm_postcopy_handle_listen(mis); 2388 2389 case MIG_CMD_POSTCOPY_RUN: 2390 return loadvm_postcopy_handle_run(mis); 2391 2392 case MIG_CMD_POSTCOPY_RAM_DISCARD: 2393 return loadvm_postcopy_ram_handle_discard(mis, len); 2394 2395 case MIG_CMD_POSTCOPY_RESUME: 2396 return loadvm_postcopy_handle_resume(mis); 2397 2398 case MIG_CMD_RECV_BITMAP: 2399 return loadvm_handle_recv_bitmap(mis, len); 2400 2401 case MIG_CMD_ENABLE_COLO: 2402 return loadvm_process_enable_colo(mis); 2403 } 2404 2405 return 0; 2406 } 2407 2408 /* 2409 * Read a footer off the wire and check that it matches the expected section 2410 * 2411 * Returns: true if the footer was good 2412 * false if there is a problem (and calls error_report to say why) 2413 */ 2414 static bool check_section_footer(QEMUFile *f, SaveStateEntry *se) 2415 { 2416 int ret; 2417 uint8_t read_mark; 2418 uint32_t read_section_id; 2419 2420 if (!migrate_get_current()->send_section_footer) { 2421 /* No footer to check */ 2422 return true; 2423 } 2424 2425 read_mark = qemu_get_byte(f); 2426 2427 ret = qemu_file_get_error(f); 2428 if (ret) { 2429 error_report("%s: Read section footer failed: %d", 2430 __func__, ret); 2431 return false; 2432 } 2433 2434 if (read_mark != QEMU_VM_SECTION_FOOTER) { 2435 error_report("Missing section footer for %s", se->idstr); 2436 return false; 2437 } 2438 2439 read_section_id = qemu_get_be32(f); 2440 if (read_section_id != se->load_section_id) { 2441 error_report("Mismatched section id in footer for %s -" 2442 " read 0x%x expected 0x%x", 2443 se->idstr, read_section_id, se->load_section_id); 2444 return false; 2445 } 2446 2447 /* All good */ 2448 return true; 2449 } 2450 2451 static int 2452 qemu_loadvm_section_start_full(QEMUFile *f, MigrationIncomingState *mis) 2453 { 2454 uint32_t instance_id, version_id, section_id; 2455 SaveStateEntry *se; 2456 char idstr[256]; 2457 int ret; 2458 2459 /* Read section start */ 2460 section_id = qemu_get_be32(f); 2461 if (!qemu_get_counted_string(f, idstr)) { 2462 error_report("Unable to read ID string for section %u", 2463 section_id); 2464 return -EINVAL; 2465 } 2466 instance_id = qemu_get_be32(f); 2467 version_id = qemu_get_be32(f); 2468 2469 ret = qemu_file_get_error(f); 2470 if (ret) { 2471 error_report("%s: Failed to read instance/version ID: %d", 2472 __func__, ret); 2473 return ret; 2474 } 2475 2476 trace_qemu_loadvm_state_section_startfull(section_id, idstr, 2477 instance_id, version_id); 2478 /* Find savevm section */ 2479 se = find_se(idstr, instance_id); 2480 if (se == NULL) { 2481 error_report("Unknown savevm section or instance '%s' %"PRIu32". " 2482 "Make sure that your current VM setup matches your " 2483 "saved VM setup, including any hotplugged devices", 2484 idstr, instance_id); 2485 return -EINVAL; 2486 } 2487 2488 /* Validate version */ 2489 if (version_id > se->version_id) { 2490 error_report("savevm: unsupported version %d for '%s' v%d", 2491 version_id, idstr, se->version_id); 2492 return -EINVAL; 2493 } 2494 se->load_version_id = version_id; 2495 se->load_section_id = section_id; 2496 2497 /* Validate if it is a device's state */ 2498 if (xen_enabled() && se->is_ram) { 2499 error_report("loadvm: %s RAM loading not allowed on Xen", idstr); 2500 return -EINVAL; 2501 } 2502 2503 ret = vmstate_load(f, se); 2504 if (ret < 0) { 2505 error_report("error while loading state for instance 0x%"PRIx32" of" 2506 " device '%s'", instance_id, idstr); 2507 return ret; 2508 } 2509 if (!check_section_footer(f, se)) { 2510 return -EINVAL; 2511 } 2512 2513 return 0; 2514 } 2515 2516 static int 2517 qemu_loadvm_section_part_end(QEMUFile *f, MigrationIncomingState *mis) 2518 { 2519 uint32_t section_id; 2520 SaveStateEntry *se; 2521 int ret; 2522 2523 section_id = qemu_get_be32(f); 2524 2525 ret = qemu_file_get_error(f); 2526 if (ret) { 2527 error_report("%s: Failed to read section ID: %d", 2528 __func__, ret); 2529 return ret; 2530 } 2531 2532 trace_qemu_loadvm_state_section_partend(section_id); 2533 QTAILQ_FOREACH(se, &savevm_state.handlers, entry) { 2534 if (se->load_section_id == section_id) { 2535 break; 2536 } 2537 } 2538 if (se == NULL) { 2539 error_report("Unknown savevm section %d", section_id); 2540 return -EINVAL; 2541 } 2542 2543 ret = vmstate_load(f, se); 2544 if (ret < 0) { 2545 error_report("error while loading state section id %d(%s)", 2546 section_id, se->idstr); 2547 return ret; 2548 } 2549 if (!check_section_footer(f, se)) { 2550 return -EINVAL; 2551 } 2552 2553 return 0; 2554 } 2555 2556 static int qemu_loadvm_state_header(QEMUFile *f) 2557 { 2558 unsigned int v; 2559 int ret; 2560 2561 v = qemu_get_be32(f); 2562 if (v != QEMU_VM_FILE_MAGIC) { 2563 error_report("Not a migration stream"); 2564 return -EINVAL; 2565 } 2566 2567 v = qemu_get_be32(f); 2568 if (v == QEMU_VM_FILE_VERSION_COMPAT) { 2569 error_report("SaveVM v2 format is obsolete and don't work anymore"); 2570 return -ENOTSUP; 2571 } 2572 if (v != QEMU_VM_FILE_VERSION) { 2573 error_report("Unsupported migration stream version"); 2574 return -ENOTSUP; 2575 } 2576 2577 if (migrate_get_current()->send_configuration) { 2578 if (qemu_get_byte(f) != QEMU_VM_CONFIGURATION) { 2579 error_report("Configuration section missing"); 2580 qemu_loadvm_state_cleanup(); 2581 return -EINVAL; 2582 } 2583 ret = vmstate_load_state(f, &vmstate_configuration, &savevm_state, 0); 2584 2585 if (ret) { 2586 qemu_loadvm_state_cleanup(); 2587 return ret; 2588 } 2589 } 2590 return 0; 2591 } 2592 2593 static int qemu_loadvm_state_setup(QEMUFile *f) 2594 { 2595 SaveStateEntry *se; 2596 int ret; 2597 2598 trace_loadvm_state_setup(); 2599 QTAILQ_FOREACH(se, &savevm_state.handlers, entry) { 2600 if (!se->ops || !se->ops->load_setup) { 2601 continue; 2602 } 2603 if (se->ops->is_active) { 2604 if (!se->ops->is_active(se->opaque)) { 2605 continue; 2606 } 2607 } 2608 2609 ret = se->ops->load_setup(f, se->opaque); 2610 if (ret < 0) { 2611 qemu_file_set_error(f, ret); 2612 error_report("Load state of device %s failed", se->idstr); 2613 return ret; 2614 } 2615 } 2616 return 0; 2617 } 2618 2619 void qemu_loadvm_state_cleanup(void) 2620 { 2621 SaveStateEntry *se; 2622 2623 trace_loadvm_state_cleanup(); 2624 QTAILQ_FOREACH(se, &savevm_state.handlers, entry) { 2625 if (se->ops && se->ops->load_cleanup) { 2626 se->ops->load_cleanup(se->opaque); 2627 } 2628 } 2629 } 2630 2631 /* Return true if we should continue the migration, or false. */ 2632 static bool postcopy_pause_incoming(MigrationIncomingState *mis) 2633 { 2634 int i; 2635 2636 trace_postcopy_pause_incoming(); 2637 2638 assert(migrate_postcopy_ram()); 2639 2640 /* 2641 * Unregister yank with either from/to src would work, since ioc behind it 2642 * is the same 2643 */ 2644 migration_ioc_unregister_yank_from_file(mis->from_src_file); 2645 2646 assert(mis->from_src_file); 2647 qemu_file_shutdown(mis->from_src_file); 2648 qemu_fclose(mis->from_src_file); 2649 mis->from_src_file = NULL; 2650 2651 assert(mis->to_src_file); 2652 qemu_file_shutdown(mis->to_src_file); 2653 qemu_mutex_lock(&mis->rp_mutex); 2654 qemu_fclose(mis->to_src_file); 2655 mis->to_src_file = NULL; 2656 qemu_mutex_unlock(&mis->rp_mutex); 2657 2658 /* 2659 * NOTE: this must happen before reset the PostcopyTmpPages below, 2660 * otherwise it's racy to reset those fields when the fast load thread 2661 * can be accessing it in parallel. 2662 */ 2663 if (mis->postcopy_qemufile_dst) { 2664 qemu_file_shutdown(mis->postcopy_qemufile_dst); 2665 /* Take the mutex to make sure the fast ram load thread halted */ 2666 qemu_mutex_lock(&mis->postcopy_prio_thread_mutex); 2667 migration_ioc_unregister_yank_from_file(mis->postcopy_qemufile_dst); 2668 qemu_fclose(mis->postcopy_qemufile_dst); 2669 mis->postcopy_qemufile_dst = NULL; 2670 qemu_mutex_unlock(&mis->postcopy_prio_thread_mutex); 2671 } 2672 2673 migrate_set_state(&mis->state, MIGRATION_STATUS_POSTCOPY_ACTIVE, 2674 MIGRATION_STATUS_POSTCOPY_PAUSED); 2675 2676 /* Notify the fault thread for the invalidated file handle */ 2677 postcopy_fault_thread_notify(mis); 2678 2679 /* 2680 * If network is interrupted, any temp page we received will be useless 2681 * because we didn't mark them as "received" in receivedmap. After a 2682 * proper recovery later (which will sync src dirty bitmap with receivedmap 2683 * on dest) these cached small pages will be resent again. 2684 */ 2685 for (i = 0; i < mis->postcopy_channels; i++) { 2686 postcopy_temp_page_reset(&mis->postcopy_tmp_pages[i]); 2687 } 2688 2689 error_report("Detected IO failure for postcopy. " 2690 "Migration paused."); 2691 2692 while (mis->state == MIGRATION_STATUS_POSTCOPY_PAUSED) { 2693 qemu_sem_wait(&mis->postcopy_pause_sem_dst); 2694 } 2695 2696 trace_postcopy_pause_incoming_continued(); 2697 2698 return true; 2699 } 2700 2701 int qemu_loadvm_state_main(QEMUFile *f, MigrationIncomingState *mis) 2702 { 2703 uint8_t section_type; 2704 int ret = 0; 2705 2706 retry: 2707 while (true) { 2708 section_type = qemu_get_byte(f); 2709 2710 ret = qemu_file_get_error_obj_any(f, mis->postcopy_qemufile_dst, NULL); 2711 if (ret) { 2712 break; 2713 } 2714 2715 trace_qemu_loadvm_state_section(section_type); 2716 switch (section_type) { 2717 case QEMU_VM_SECTION_START: 2718 case QEMU_VM_SECTION_FULL: 2719 ret = qemu_loadvm_section_start_full(f, mis); 2720 if (ret < 0) { 2721 goto out; 2722 } 2723 break; 2724 case QEMU_VM_SECTION_PART: 2725 case QEMU_VM_SECTION_END: 2726 ret = qemu_loadvm_section_part_end(f, mis); 2727 if (ret < 0) { 2728 goto out; 2729 } 2730 break; 2731 case QEMU_VM_COMMAND: 2732 ret = loadvm_process_command(f); 2733 trace_qemu_loadvm_state_section_command(ret); 2734 if ((ret < 0) || (ret == LOADVM_QUIT)) { 2735 goto out; 2736 } 2737 break; 2738 case QEMU_VM_EOF: 2739 /* This is the end of migration */ 2740 goto out; 2741 default: 2742 error_report("Unknown savevm section type %d", section_type); 2743 ret = -EINVAL; 2744 goto out; 2745 } 2746 } 2747 2748 out: 2749 if (ret < 0) { 2750 qemu_file_set_error(f, ret); 2751 2752 /* Cancel bitmaps incoming regardless of recovery */ 2753 dirty_bitmap_mig_cancel_incoming(); 2754 2755 /* 2756 * If we are during an active postcopy, then we pause instead 2757 * of bail out to at least keep the VM's dirty data. Note 2758 * that POSTCOPY_INCOMING_LISTENING stage is still not enough, 2759 * during which we're still receiving device states and we 2760 * still haven't yet started the VM on destination. 2761 * 2762 * Only RAM postcopy supports recovery. Still, if RAM postcopy is 2763 * enabled, canceled bitmaps postcopy will not affect RAM postcopy 2764 * recovering. 2765 */ 2766 if (postcopy_state_get() == POSTCOPY_INCOMING_RUNNING && 2767 migrate_postcopy_ram() && postcopy_pause_incoming(mis)) { 2768 /* Reset f to point to the newly created channel */ 2769 f = mis->from_src_file; 2770 goto retry; 2771 } 2772 } 2773 return ret; 2774 } 2775 2776 int qemu_loadvm_state(QEMUFile *f) 2777 { 2778 MigrationIncomingState *mis = migration_incoming_get_current(); 2779 Error *local_err = NULL; 2780 int ret; 2781 2782 if (qemu_savevm_state_blocked(&local_err)) { 2783 error_report_err(local_err); 2784 return -EINVAL; 2785 } 2786 2787 ret = qemu_loadvm_state_header(f); 2788 if (ret) { 2789 return ret; 2790 } 2791 2792 if (qemu_loadvm_state_setup(f) != 0) { 2793 return -EINVAL; 2794 } 2795 2796 cpu_synchronize_all_pre_loadvm(); 2797 2798 ret = qemu_loadvm_state_main(f, mis); 2799 qemu_event_set(&mis->main_thread_load_event); 2800 2801 trace_qemu_loadvm_state_post_main(ret); 2802 2803 if (mis->have_listen_thread) { 2804 /* Listen thread still going, can't clean up yet */ 2805 return ret; 2806 } 2807 2808 if (ret == 0) { 2809 ret = qemu_file_get_error(f); 2810 } 2811 2812 /* 2813 * Try to read in the VMDESC section as well, so that dumping tools that 2814 * intercept our migration stream have the chance to see it. 2815 */ 2816 2817 /* We've got to be careful; if we don't read the data and just shut the fd 2818 * then the sender can error if we close while it's still sending. 2819 * We also mustn't read data that isn't there; some transports (RDMA) 2820 * will stall waiting for that data when the source has already closed. 2821 */ 2822 if (ret == 0 && should_send_vmdesc()) { 2823 uint8_t *buf; 2824 uint32_t size; 2825 uint8_t section_type = qemu_get_byte(f); 2826 2827 if (section_type != QEMU_VM_VMDESCRIPTION) { 2828 error_report("Expected vmdescription section, but got %d", 2829 section_type); 2830 /* 2831 * It doesn't seem worth failing at this point since 2832 * we apparently have an otherwise valid VM state 2833 */ 2834 } else { 2835 buf = g_malloc(0x1000); 2836 size = qemu_get_be32(f); 2837 2838 while (size > 0) { 2839 uint32_t read_chunk = MIN(size, 0x1000); 2840 qemu_get_buffer(f, buf, read_chunk); 2841 size -= read_chunk; 2842 } 2843 g_free(buf); 2844 } 2845 } 2846 2847 qemu_loadvm_state_cleanup(); 2848 cpu_synchronize_all_post_init(); 2849 2850 return ret; 2851 } 2852 2853 int qemu_load_device_state(QEMUFile *f) 2854 { 2855 MigrationIncomingState *mis = migration_incoming_get_current(); 2856 int ret; 2857 2858 /* Load QEMU_VM_SECTION_FULL section */ 2859 ret = qemu_loadvm_state_main(f, mis); 2860 if (ret < 0) { 2861 error_report("Failed to load device state: %d", ret); 2862 return ret; 2863 } 2864 2865 cpu_synchronize_all_post_init(); 2866 return 0; 2867 } 2868 2869 bool save_snapshot(const char *name, bool overwrite, const char *vmstate, 2870 bool has_devices, strList *devices, Error **errp) 2871 { 2872 BlockDriverState *bs; 2873 QEMUSnapshotInfo sn1, *sn = &sn1; 2874 int ret = -1, ret2; 2875 QEMUFile *f; 2876 int saved_vm_running; 2877 uint64_t vm_state_size; 2878 g_autoptr(GDateTime) now = g_date_time_new_now_local(); 2879 AioContext *aio_context; 2880 2881 GLOBAL_STATE_CODE(); 2882 2883 if (migration_is_blocked(errp)) { 2884 return false; 2885 } 2886 2887 if (!replay_can_snapshot()) { 2888 error_setg(errp, "Record/replay does not allow making snapshot " 2889 "right now. Try once more later."); 2890 return false; 2891 } 2892 2893 if (!bdrv_all_can_snapshot(has_devices, devices, errp)) { 2894 return false; 2895 } 2896 2897 /* Delete old snapshots of the same name */ 2898 if (name) { 2899 if (overwrite) { 2900 if (bdrv_all_delete_snapshot(name, has_devices, 2901 devices, errp) < 0) { 2902 return false; 2903 } 2904 } else { 2905 ret2 = bdrv_all_has_snapshot(name, has_devices, devices, errp); 2906 if (ret2 < 0) { 2907 return false; 2908 } 2909 if (ret2 == 1) { 2910 error_setg(errp, 2911 "Snapshot '%s' already exists in one or more devices", 2912 name); 2913 return false; 2914 } 2915 } 2916 } 2917 2918 bs = bdrv_all_find_vmstate_bs(vmstate, has_devices, devices, errp); 2919 if (bs == NULL) { 2920 return false; 2921 } 2922 aio_context = bdrv_get_aio_context(bs); 2923 2924 saved_vm_running = runstate_is_running(); 2925 2926 ret = global_state_store(); 2927 if (ret) { 2928 error_setg(errp, "Error saving global state"); 2929 return false; 2930 } 2931 vm_stop(RUN_STATE_SAVE_VM); 2932 2933 bdrv_drain_all_begin(); 2934 2935 aio_context_acquire(aio_context); 2936 2937 memset(sn, 0, sizeof(*sn)); 2938 2939 /* fill auxiliary fields */ 2940 sn->date_sec = g_date_time_to_unix(now); 2941 sn->date_nsec = g_date_time_get_microsecond(now) * 1000; 2942 sn->vm_clock_nsec = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); 2943 if (replay_mode != REPLAY_MODE_NONE) { 2944 sn->icount = replay_get_current_icount(); 2945 } else { 2946 sn->icount = -1ULL; 2947 } 2948 2949 if (name) { 2950 pstrcpy(sn->name, sizeof(sn->name), name); 2951 } else { 2952 g_autofree char *autoname = g_date_time_format(now, "vm-%Y%m%d%H%M%S"); 2953 pstrcpy(sn->name, sizeof(sn->name), autoname); 2954 } 2955 2956 /* save the VM state */ 2957 f = qemu_fopen_bdrv(bs, 1); 2958 if (!f) { 2959 error_setg(errp, "Could not open VM state file"); 2960 goto the_end; 2961 } 2962 ret = qemu_savevm_state(f, errp); 2963 vm_state_size = qemu_file_total_transferred(f); 2964 ret2 = qemu_fclose(f); 2965 if (ret < 0) { 2966 goto the_end; 2967 } 2968 if (ret2 < 0) { 2969 ret = ret2; 2970 goto the_end; 2971 } 2972 2973 /* The bdrv_all_create_snapshot() call that follows acquires the AioContext 2974 * for itself. BDRV_POLL_WHILE() does not support nested locking because 2975 * it only releases the lock once. Therefore synchronous I/O will deadlock 2976 * unless we release the AioContext before bdrv_all_create_snapshot(). 2977 */ 2978 aio_context_release(aio_context); 2979 aio_context = NULL; 2980 2981 ret = bdrv_all_create_snapshot(sn, bs, vm_state_size, 2982 has_devices, devices, errp); 2983 if (ret < 0) { 2984 bdrv_all_delete_snapshot(sn->name, has_devices, devices, NULL); 2985 goto the_end; 2986 } 2987 2988 ret = 0; 2989 2990 the_end: 2991 if (aio_context) { 2992 aio_context_release(aio_context); 2993 } 2994 2995 bdrv_drain_all_end(); 2996 2997 if (saved_vm_running) { 2998 vm_start(); 2999 } 3000 return ret == 0; 3001 } 3002 3003 void qmp_xen_save_devices_state(const char *filename, bool has_live, bool live, 3004 Error **errp) 3005 { 3006 QEMUFile *f; 3007 QIOChannelFile *ioc; 3008 int saved_vm_running; 3009 int ret; 3010 3011 if (!has_live) { 3012 /* live default to true so old version of Xen tool stack can have a 3013 * successful live migration */ 3014 live = true; 3015 } 3016 3017 saved_vm_running = runstate_is_running(); 3018 vm_stop(RUN_STATE_SAVE_VM); 3019 global_state_store_running(); 3020 3021 ioc = qio_channel_file_new_path(filename, O_WRONLY | O_CREAT | O_TRUNC, 3022 0660, errp); 3023 if (!ioc) { 3024 goto the_end; 3025 } 3026 qio_channel_set_name(QIO_CHANNEL(ioc), "migration-xen-save-state"); 3027 f = qemu_file_new_output(QIO_CHANNEL(ioc)); 3028 object_unref(OBJECT(ioc)); 3029 ret = qemu_save_device_state(f); 3030 if (ret < 0 || qemu_fclose(f) < 0) { 3031 error_setg(errp, QERR_IO_ERROR); 3032 } else { 3033 /* libxl calls the QMP command "stop" before calling 3034 * "xen-save-devices-state" and in case of migration failure, libxl 3035 * would call "cont". 3036 * So call bdrv_inactivate_all (release locks) here to let the other 3037 * side of the migration take control of the images. 3038 */ 3039 if (live && !saved_vm_running) { 3040 ret = bdrv_inactivate_all(); 3041 if (ret) { 3042 error_setg(errp, "%s: bdrv_inactivate_all() failed (%d)", 3043 __func__, ret); 3044 } 3045 } 3046 } 3047 3048 the_end: 3049 if (saved_vm_running) { 3050 vm_start(); 3051 } 3052 } 3053 3054 void qmp_xen_load_devices_state(const char *filename, Error **errp) 3055 { 3056 QEMUFile *f; 3057 QIOChannelFile *ioc; 3058 int ret; 3059 3060 /* Guest must be paused before loading the device state; the RAM state 3061 * will already have been loaded by xc 3062 */ 3063 if (runstate_is_running()) { 3064 error_setg(errp, "Cannot update device state while vm is running"); 3065 return; 3066 } 3067 vm_stop(RUN_STATE_RESTORE_VM); 3068 3069 ioc = qio_channel_file_new_path(filename, O_RDONLY | O_BINARY, 0, errp); 3070 if (!ioc) { 3071 return; 3072 } 3073 qio_channel_set_name(QIO_CHANNEL(ioc), "migration-xen-load-state"); 3074 f = qemu_file_new_input(QIO_CHANNEL(ioc)); 3075 object_unref(OBJECT(ioc)); 3076 3077 ret = qemu_loadvm_state(f); 3078 qemu_fclose(f); 3079 if (ret < 0) { 3080 error_setg(errp, QERR_IO_ERROR); 3081 } 3082 migration_incoming_state_destroy(); 3083 } 3084 3085 bool load_snapshot(const char *name, const char *vmstate, 3086 bool has_devices, strList *devices, Error **errp) 3087 { 3088 BlockDriverState *bs_vm_state; 3089 QEMUSnapshotInfo sn; 3090 QEMUFile *f; 3091 int ret; 3092 AioContext *aio_context; 3093 MigrationIncomingState *mis = migration_incoming_get_current(); 3094 3095 if (!bdrv_all_can_snapshot(has_devices, devices, errp)) { 3096 return false; 3097 } 3098 ret = bdrv_all_has_snapshot(name, has_devices, devices, errp); 3099 if (ret < 0) { 3100 return false; 3101 } 3102 if (ret == 0) { 3103 error_setg(errp, "Snapshot '%s' does not exist in one or more devices", 3104 name); 3105 return false; 3106 } 3107 3108 bs_vm_state = bdrv_all_find_vmstate_bs(vmstate, has_devices, devices, errp); 3109 if (!bs_vm_state) { 3110 return false; 3111 } 3112 aio_context = bdrv_get_aio_context(bs_vm_state); 3113 3114 /* Don't even try to load empty VM states */ 3115 aio_context_acquire(aio_context); 3116 ret = bdrv_snapshot_find(bs_vm_state, &sn, name); 3117 aio_context_release(aio_context); 3118 if (ret < 0) { 3119 return false; 3120 } else if (sn.vm_state_size == 0) { 3121 error_setg(errp, "This is a disk-only snapshot. Revert to it " 3122 " offline using qemu-img"); 3123 return false; 3124 } 3125 3126 /* 3127 * Flush the record/replay queue. Now the VM state is going 3128 * to change. Therefore we don't need to preserve its consistency 3129 */ 3130 replay_flush_events(); 3131 3132 /* Flush all IO requests so they don't interfere with the new state. */ 3133 bdrv_drain_all_begin(); 3134 3135 ret = bdrv_all_goto_snapshot(name, has_devices, devices, errp); 3136 if (ret < 0) { 3137 goto err_drain; 3138 } 3139 3140 /* restore the VM state */ 3141 f = qemu_fopen_bdrv(bs_vm_state, 0); 3142 if (!f) { 3143 error_setg(errp, "Could not open VM state file"); 3144 goto err_drain; 3145 } 3146 3147 qemu_system_reset(SHUTDOWN_CAUSE_SNAPSHOT_LOAD); 3148 mis->from_src_file = f; 3149 3150 if (!yank_register_instance(MIGRATION_YANK_INSTANCE, errp)) { 3151 ret = -EINVAL; 3152 goto err_drain; 3153 } 3154 aio_context_acquire(aio_context); 3155 ret = qemu_loadvm_state(f); 3156 migration_incoming_state_destroy(); 3157 aio_context_release(aio_context); 3158 3159 bdrv_drain_all_end(); 3160 3161 if (ret < 0) { 3162 error_setg(errp, "Error %d while loading VM state", ret); 3163 return false; 3164 } 3165 3166 return true; 3167 3168 err_drain: 3169 bdrv_drain_all_end(); 3170 return false; 3171 } 3172 3173 bool delete_snapshot(const char *name, bool has_devices, 3174 strList *devices, Error **errp) 3175 { 3176 if (!bdrv_all_can_snapshot(has_devices, devices, errp)) { 3177 return false; 3178 } 3179 3180 if (bdrv_all_delete_snapshot(name, has_devices, devices, errp) < 0) { 3181 return false; 3182 } 3183 3184 return true; 3185 } 3186 3187 void vmstate_register_ram(MemoryRegion *mr, DeviceState *dev) 3188 { 3189 qemu_ram_set_idstr(mr->ram_block, 3190 memory_region_name(mr), dev); 3191 qemu_ram_set_migratable(mr->ram_block); 3192 } 3193 3194 void vmstate_unregister_ram(MemoryRegion *mr, DeviceState *dev) 3195 { 3196 qemu_ram_unset_idstr(mr->ram_block); 3197 qemu_ram_unset_migratable(mr->ram_block); 3198 } 3199 3200 void vmstate_register_ram_global(MemoryRegion *mr) 3201 { 3202 vmstate_register_ram(mr, NULL); 3203 } 3204 3205 bool vmstate_check_only_migratable(const VMStateDescription *vmsd) 3206 { 3207 /* check needed if --only-migratable is specified */ 3208 if (!only_migratable) { 3209 return true; 3210 } 3211 3212 return !(vmsd && vmsd->unmigratable); 3213 } 3214 3215 typedef struct SnapshotJob { 3216 Job common; 3217 char *tag; 3218 char *vmstate; 3219 strList *devices; 3220 Coroutine *co; 3221 Error **errp; 3222 bool ret; 3223 } SnapshotJob; 3224 3225 static void qmp_snapshot_job_free(SnapshotJob *s) 3226 { 3227 g_free(s->tag); 3228 g_free(s->vmstate); 3229 qapi_free_strList(s->devices); 3230 } 3231 3232 3233 static void snapshot_load_job_bh(void *opaque) 3234 { 3235 Job *job = opaque; 3236 SnapshotJob *s = container_of(job, SnapshotJob, common); 3237 int orig_vm_running; 3238 3239 job_progress_set_remaining(&s->common, 1); 3240 3241 orig_vm_running = runstate_is_running(); 3242 vm_stop(RUN_STATE_RESTORE_VM); 3243 3244 s->ret = load_snapshot(s->tag, s->vmstate, true, s->devices, s->errp); 3245 if (s->ret && orig_vm_running) { 3246 vm_start(); 3247 } 3248 3249 job_progress_update(&s->common, 1); 3250 3251 qmp_snapshot_job_free(s); 3252 aio_co_wake(s->co); 3253 } 3254 3255 static void snapshot_save_job_bh(void *opaque) 3256 { 3257 Job *job = opaque; 3258 SnapshotJob *s = container_of(job, SnapshotJob, common); 3259 3260 job_progress_set_remaining(&s->common, 1); 3261 s->ret = save_snapshot(s->tag, false, s->vmstate, 3262 true, s->devices, s->errp); 3263 job_progress_update(&s->common, 1); 3264 3265 qmp_snapshot_job_free(s); 3266 aio_co_wake(s->co); 3267 } 3268 3269 static void snapshot_delete_job_bh(void *opaque) 3270 { 3271 Job *job = opaque; 3272 SnapshotJob *s = container_of(job, SnapshotJob, common); 3273 3274 job_progress_set_remaining(&s->common, 1); 3275 s->ret = delete_snapshot(s->tag, true, s->devices, s->errp); 3276 job_progress_update(&s->common, 1); 3277 3278 qmp_snapshot_job_free(s); 3279 aio_co_wake(s->co); 3280 } 3281 3282 static int coroutine_fn snapshot_save_job_run(Job *job, Error **errp) 3283 { 3284 SnapshotJob *s = container_of(job, SnapshotJob, common); 3285 s->errp = errp; 3286 s->co = qemu_coroutine_self(); 3287 aio_bh_schedule_oneshot(qemu_get_aio_context(), 3288 snapshot_save_job_bh, job); 3289 qemu_coroutine_yield(); 3290 return s->ret ? 0 : -1; 3291 } 3292 3293 static int coroutine_fn snapshot_load_job_run(Job *job, Error **errp) 3294 { 3295 SnapshotJob *s = container_of(job, SnapshotJob, common); 3296 s->errp = errp; 3297 s->co = qemu_coroutine_self(); 3298 aio_bh_schedule_oneshot(qemu_get_aio_context(), 3299 snapshot_load_job_bh, job); 3300 qemu_coroutine_yield(); 3301 return s->ret ? 0 : -1; 3302 } 3303 3304 static int coroutine_fn snapshot_delete_job_run(Job *job, Error **errp) 3305 { 3306 SnapshotJob *s = container_of(job, SnapshotJob, common); 3307 s->errp = errp; 3308 s->co = qemu_coroutine_self(); 3309 aio_bh_schedule_oneshot(qemu_get_aio_context(), 3310 snapshot_delete_job_bh, job); 3311 qemu_coroutine_yield(); 3312 return s->ret ? 0 : -1; 3313 } 3314 3315 3316 static const JobDriver snapshot_load_job_driver = { 3317 .instance_size = sizeof(SnapshotJob), 3318 .job_type = JOB_TYPE_SNAPSHOT_LOAD, 3319 .run = snapshot_load_job_run, 3320 }; 3321 3322 static const JobDriver snapshot_save_job_driver = { 3323 .instance_size = sizeof(SnapshotJob), 3324 .job_type = JOB_TYPE_SNAPSHOT_SAVE, 3325 .run = snapshot_save_job_run, 3326 }; 3327 3328 static const JobDriver snapshot_delete_job_driver = { 3329 .instance_size = sizeof(SnapshotJob), 3330 .job_type = JOB_TYPE_SNAPSHOT_DELETE, 3331 .run = snapshot_delete_job_run, 3332 }; 3333 3334 3335 void qmp_snapshot_save(const char *job_id, 3336 const char *tag, 3337 const char *vmstate, 3338 strList *devices, 3339 Error **errp) 3340 { 3341 SnapshotJob *s; 3342 3343 s = job_create(job_id, &snapshot_save_job_driver, NULL, 3344 qemu_get_aio_context(), JOB_MANUAL_DISMISS, 3345 NULL, NULL, errp); 3346 if (!s) { 3347 return; 3348 } 3349 3350 s->tag = g_strdup(tag); 3351 s->vmstate = g_strdup(vmstate); 3352 s->devices = QAPI_CLONE(strList, devices); 3353 3354 job_start(&s->common); 3355 } 3356 3357 void qmp_snapshot_load(const char *job_id, 3358 const char *tag, 3359 const char *vmstate, 3360 strList *devices, 3361 Error **errp) 3362 { 3363 SnapshotJob *s; 3364 3365 s = job_create(job_id, &snapshot_load_job_driver, NULL, 3366 qemu_get_aio_context(), JOB_MANUAL_DISMISS, 3367 NULL, NULL, errp); 3368 if (!s) { 3369 return; 3370 } 3371 3372 s->tag = g_strdup(tag); 3373 s->vmstate = g_strdup(vmstate); 3374 s->devices = QAPI_CLONE(strList, devices); 3375 3376 job_start(&s->common); 3377 } 3378 3379 void qmp_snapshot_delete(const char *job_id, 3380 const char *tag, 3381 strList *devices, 3382 Error **errp) 3383 { 3384 SnapshotJob *s; 3385 3386 s = job_create(job_id, &snapshot_delete_job_driver, NULL, 3387 qemu_get_aio_context(), JOB_MANUAL_DISMISS, 3388 NULL, NULL, errp); 3389 if (!s) { 3390 return; 3391 } 3392 3393 s->tag = g_strdup(tag); 3394 s->devices = QAPI_CLONE(strList, devices); 3395 3396 job_start(&s->common); 3397 } 3398