1 /* 2 * QEMU System Emulator 3 * 4 * Copyright (c) 2003-2008 Fabrice Bellard 5 * Copyright (c) 2009-2015 Red Hat Inc 6 * 7 * Authors: 8 * Juan Quintela <quintela@redhat.com> 9 * 10 * Permission is hereby granted, free of charge, to any person obtaining a copy 11 * of this software and associated documentation files (the "Software"), to deal 12 * in the Software without restriction, including without limitation the rights 13 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 14 * copies of the Software, and to permit persons to whom the Software is 15 * furnished to do so, subject to the following conditions: 16 * 17 * The above copyright notice and this permission notice shall be included in 18 * all copies or substantial portions of the Software. 19 * 20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 23 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 24 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 25 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 26 * THE SOFTWARE. 27 */ 28 29 #include "qemu/osdep.h" 30 #include "hw/boards.h" 31 #include "net/net.h" 32 #include "migration.h" 33 #include "migration/snapshot.h" 34 #include "migration-stats.h" 35 #include "migration/vmstate.h" 36 #include "migration/misc.h" 37 #include "migration/register.h" 38 #include "migration/global_state.h" 39 #include "migration/channel-block.h" 40 #include "ram.h" 41 #include "qemu-file.h" 42 #include "savevm.h" 43 #include "postcopy-ram.h" 44 #include "qapi/error.h" 45 #include "qapi/qapi-commands-migration.h" 46 #include "qapi/clone-visitor.h" 47 #include "qapi/qapi-builtin-visit.h" 48 #include "qapi/qmp/qerror.h" 49 #include "qemu/error-report.h" 50 #include "sysemu/cpus.h" 51 #include "exec/memory.h" 52 #include "exec/target_page.h" 53 #include "trace.h" 54 #include "qemu/iov.h" 55 #include "qemu/job.h" 56 #include "qemu/main-loop.h" 57 #include "block/snapshot.h" 58 #include "qemu/cutils.h" 59 #include "io/channel-buffer.h" 60 #include "io/channel-file.h" 61 #include "sysemu/replay.h" 62 #include "sysemu/runstate.h" 63 #include "sysemu/sysemu.h" 64 #include "sysemu/xen.h" 65 #include "migration/colo.h" 66 #include "qemu/bitmap.h" 67 #include "net/announce.h" 68 #include "qemu/yank.h" 69 #include "yank_functions.h" 70 #include "sysemu/qtest.h" 71 #include "options.h" 72 73 const unsigned int postcopy_ram_discard_version; 74 75 /* Subcommands for QEMU_VM_COMMAND */ 76 enum qemu_vm_cmd { 77 MIG_CMD_INVALID = 0, /* Must be 0 */ 78 MIG_CMD_OPEN_RETURN_PATH, /* Tell the dest to open the Return path */ 79 MIG_CMD_PING, /* Request a PONG on the RP */ 80 81 MIG_CMD_POSTCOPY_ADVISE, /* Prior to any page transfers, just 82 warn we might want to do PC */ 83 MIG_CMD_POSTCOPY_LISTEN, /* Start listening for incoming 84 pages as it's running. */ 85 MIG_CMD_POSTCOPY_RUN, /* Start execution */ 86 87 MIG_CMD_POSTCOPY_RAM_DISCARD, /* A list of pages to discard that 88 were previously sent during 89 precopy but are dirty. */ 90 MIG_CMD_PACKAGED, /* Send a wrapped stream within this stream */ 91 MIG_CMD_ENABLE_COLO, /* Enable COLO */ 92 MIG_CMD_POSTCOPY_RESUME, /* resume postcopy on dest */ 93 MIG_CMD_RECV_BITMAP, /* Request for recved bitmap on dst */ 94 MIG_CMD_MAX 95 }; 96 97 #define MAX_VM_CMD_PACKAGED_SIZE UINT32_MAX 98 static struct mig_cmd_args { 99 ssize_t len; /* -1 = variable */ 100 const char *name; 101 } mig_cmd_args[] = { 102 [MIG_CMD_INVALID] = { .len = -1, .name = "INVALID" }, 103 [MIG_CMD_OPEN_RETURN_PATH] = { .len = 0, .name = "OPEN_RETURN_PATH" }, 104 [MIG_CMD_PING] = { .len = sizeof(uint32_t), .name = "PING" }, 105 [MIG_CMD_POSTCOPY_ADVISE] = { .len = -1, .name = "POSTCOPY_ADVISE" }, 106 [MIG_CMD_POSTCOPY_LISTEN] = { .len = 0, .name = "POSTCOPY_LISTEN" }, 107 [MIG_CMD_POSTCOPY_RUN] = { .len = 0, .name = "POSTCOPY_RUN" }, 108 [MIG_CMD_POSTCOPY_RAM_DISCARD] = { 109 .len = -1, .name = "POSTCOPY_RAM_DISCARD" }, 110 [MIG_CMD_POSTCOPY_RESUME] = { .len = 0, .name = "POSTCOPY_RESUME" }, 111 [MIG_CMD_PACKAGED] = { .len = 4, .name = "PACKAGED" }, 112 [MIG_CMD_RECV_BITMAP] = { .len = -1, .name = "RECV_BITMAP" }, 113 [MIG_CMD_MAX] = { .len = -1, .name = "MAX" }, 114 }; 115 116 /* Note for MIG_CMD_POSTCOPY_ADVISE: 117 * The format of arguments is depending on postcopy mode: 118 * - postcopy RAM only 119 * uint64_t host page size 120 * uint64_t taget page size 121 * 122 * - postcopy RAM and postcopy dirty bitmaps 123 * format is the same as for postcopy RAM only 124 * 125 * - postcopy dirty bitmaps only 126 * Nothing. Command length field is 0. 127 * 128 * Be careful: adding a new postcopy entity with some other parameters should 129 * not break format self-description ability. Good way is to introduce some 130 * generic extendable format with an exception for two old entities. 131 */ 132 133 /***********************************************************/ 134 /* savevm/loadvm support */ 135 136 static QEMUFile *qemu_fopen_bdrv(BlockDriverState *bs, int is_writable) 137 { 138 if (is_writable) { 139 return qemu_file_new_output(QIO_CHANNEL(qio_channel_block_new(bs))); 140 } else { 141 return qemu_file_new_input(QIO_CHANNEL(qio_channel_block_new(bs))); 142 } 143 } 144 145 146 /* QEMUFile timer support. 147 * Not in qemu-file.c to not add qemu-timer.c as dependency to qemu-file.c 148 */ 149 150 void timer_put(QEMUFile *f, QEMUTimer *ts) 151 { 152 uint64_t expire_time; 153 154 expire_time = timer_expire_time_ns(ts); 155 qemu_put_be64(f, expire_time); 156 } 157 158 void timer_get(QEMUFile *f, QEMUTimer *ts) 159 { 160 uint64_t expire_time; 161 162 expire_time = qemu_get_be64(f); 163 if (expire_time != -1) { 164 timer_mod_ns(ts, expire_time); 165 } else { 166 timer_del(ts); 167 } 168 } 169 170 171 /* VMState timer support. 172 * Not in vmstate.c to not add qemu-timer.c as dependency to vmstate.c 173 */ 174 175 static int get_timer(QEMUFile *f, void *pv, size_t size, 176 const VMStateField *field) 177 { 178 QEMUTimer *v = pv; 179 timer_get(f, v); 180 return 0; 181 } 182 183 static int put_timer(QEMUFile *f, void *pv, size_t size, 184 const VMStateField *field, JSONWriter *vmdesc) 185 { 186 QEMUTimer *v = pv; 187 timer_put(f, v); 188 189 return 0; 190 } 191 192 const VMStateInfo vmstate_info_timer = { 193 .name = "timer", 194 .get = get_timer, 195 .put = put_timer, 196 }; 197 198 199 typedef struct CompatEntry { 200 char idstr[256]; 201 int instance_id; 202 } CompatEntry; 203 204 typedef struct SaveStateEntry { 205 QTAILQ_ENTRY(SaveStateEntry) entry; 206 char idstr[256]; 207 uint32_t instance_id; 208 int alias_id; 209 int version_id; 210 /* version id read from the stream */ 211 int load_version_id; 212 int section_id; 213 /* section id read from the stream */ 214 int load_section_id; 215 const SaveVMHandlers *ops; 216 const VMStateDescription *vmsd; 217 void *opaque; 218 CompatEntry *compat; 219 int is_ram; 220 } SaveStateEntry; 221 222 typedef struct SaveState { 223 QTAILQ_HEAD(, SaveStateEntry) handlers; 224 SaveStateEntry *handler_pri_head[MIG_PRI_MAX + 1]; 225 int global_section_id; 226 uint32_t len; 227 const char *name; 228 uint32_t target_page_bits; 229 uint32_t caps_count; 230 MigrationCapability *capabilities; 231 QemuUUID uuid; 232 } SaveState; 233 234 static SaveState savevm_state = { 235 .handlers = QTAILQ_HEAD_INITIALIZER(savevm_state.handlers), 236 .handler_pri_head = { [MIG_PRI_DEFAULT ... MIG_PRI_MAX] = NULL }, 237 .global_section_id = 0, 238 }; 239 240 static bool should_validate_capability(int capability) 241 { 242 assert(capability >= 0 && capability < MIGRATION_CAPABILITY__MAX); 243 /* Validate only new capabilities to keep compatibility. */ 244 switch (capability) { 245 case MIGRATION_CAPABILITY_X_IGNORE_SHARED: 246 return true; 247 default: 248 return false; 249 } 250 } 251 252 static uint32_t get_validatable_capabilities_count(void) 253 { 254 MigrationState *s = migrate_get_current(); 255 uint32_t result = 0; 256 int i; 257 for (i = 0; i < MIGRATION_CAPABILITY__MAX; i++) { 258 if (should_validate_capability(i) && s->capabilities[i]) { 259 result++; 260 } 261 } 262 return result; 263 } 264 265 static int configuration_pre_save(void *opaque) 266 { 267 SaveState *state = opaque; 268 const char *current_name = MACHINE_GET_CLASS(current_machine)->name; 269 MigrationState *s = migrate_get_current(); 270 int i, j; 271 272 state->len = strlen(current_name); 273 state->name = current_name; 274 state->target_page_bits = qemu_target_page_bits(); 275 276 state->caps_count = get_validatable_capabilities_count(); 277 state->capabilities = g_renew(MigrationCapability, state->capabilities, 278 state->caps_count); 279 for (i = j = 0; i < MIGRATION_CAPABILITY__MAX; i++) { 280 if (should_validate_capability(i) && s->capabilities[i]) { 281 state->capabilities[j++] = i; 282 } 283 } 284 state->uuid = qemu_uuid; 285 286 return 0; 287 } 288 289 static int configuration_post_save(void *opaque) 290 { 291 SaveState *state = opaque; 292 293 g_free(state->capabilities); 294 state->capabilities = NULL; 295 state->caps_count = 0; 296 return 0; 297 } 298 299 static int configuration_pre_load(void *opaque) 300 { 301 SaveState *state = opaque; 302 303 /* If there is no target-page-bits subsection it means the source 304 * predates the variable-target-page-bits support and is using the 305 * minimum possible value for this CPU. 306 */ 307 state->target_page_bits = qemu_target_page_bits_min(); 308 return 0; 309 } 310 311 static bool configuration_validate_capabilities(SaveState *state) 312 { 313 bool ret = true; 314 MigrationState *s = migrate_get_current(); 315 unsigned long *source_caps_bm; 316 int i; 317 318 source_caps_bm = bitmap_new(MIGRATION_CAPABILITY__MAX); 319 for (i = 0; i < state->caps_count; i++) { 320 MigrationCapability capability = state->capabilities[i]; 321 set_bit(capability, source_caps_bm); 322 } 323 324 for (i = 0; i < MIGRATION_CAPABILITY__MAX; i++) { 325 bool source_state, target_state; 326 if (!should_validate_capability(i)) { 327 continue; 328 } 329 source_state = test_bit(i, source_caps_bm); 330 target_state = s->capabilities[i]; 331 if (source_state != target_state) { 332 error_report("Capability %s is %s, but received capability is %s", 333 MigrationCapability_str(i), 334 target_state ? "on" : "off", 335 source_state ? "on" : "off"); 336 ret = false; 337 /* Don't break here to report all failed capabilities */ 338 } 339 } 340 341 g_free(source_caps_bm); 342 return ret; 343 } 344 345 static int configuration_post_load(void *opaque, int version_id) 346 { 347 SaveState *state = opaque; 348 const char *current_name = MACHINE_GET_CLASS(current_machine)->name; 349 int ret = 0; 350 351 if (strncmp(state->name, current_name, state->len) != 0) { 352 error_report("Machine type received is '%.*s' and local is '%s'", 353 (int) state->len, state->name, current_name); 354 ret = -EINVAL; 355 goto out; 356 } 357 358 if (state->target_page_bits != qemu_target_page_bits()) { 359 error_report("Received TARGET_PAGE_BITS is %d but local is %d", 360 state->target_page_bits, qemu_target_page_bits()); 361 ret = -EINVAL; 362 goto out; 363 } 364 365 if (!configuration_validate_capabilities(state)) { 366 ret = -EINVAL; 367 goto out; 368 } 369 370 out: 371 g_free((void *)state->name); 372 state->name = NULL; 373 state->len = 0; 374 g_free(state->capabilities); 375 state->capabilities = NULL; 376 state->caps_count = 0; 377 378 return ret; 379 } 380 381 static int get_capability(QEMUFile *f, void *pv, size_t size, 382 const VMStateField *field) 383 { 384 MigrationCapability *capability = pv; 385 char capability_str[UINT8_MAX + 1]; 386 uint8_t len; 387 int i; 388 389 len = qemu_get_byte(f); 390 qemu_get_buffer(f, (uint8_t *)capability_str, len); 391 capability_str[len] = '\0'; 392 for (i = 0; i < MIGRATION_CAPABILITY__MAX; i++) { 393 if (!strcmp(MigrationCapability_str(i), capability_str)) { 394 *capability = i; 395 return 0; 396 } 397 } 398 error_report("Received unknown capability %s", capability_str); 399 return -EINVAL; 400 } 401 402 static int put_capability(QEMUFile *f, void *pv, size_t size, 403 const VMStateField *field, JSONWriter *vmdesc) 404 { 405 MigrationCapability *capability = pv; 406 const char *capability_str = MigrationCapability_str(*capability); 407 size_t len = strlen(capability_str); 408 assert(len <= UINT8_MAX); 409 410 qemu_put_byte(f, len); 411 qemu_put_buffer(f, (uint8_t *)capability_str, len); 412 return 0; 413 } 414 415 static const VMStateInfo vmstate_info_capability = { 416 .name = "capability", 417 .get = get_capability, 418 .put = put_capability, 419 }; 420 421 /* The target-page-bits subsection is present only if the 422 * target page size is not the same as the default (ie the 423 * minimum page size for a variable-page-size guest CPU). 424 * If it is present then it contains the actual target page 425 * bits for the machine, and migration will fail if the 426 * two ends don't agree about it. 427 */ 428 static bool vmstate_target_page_bits_needed(void *opaque) 429 { 430 return qemu_target_page_bits() 431 > qemu_target_page_bits_min(); 432 } 433 434 static const VMStateDescription vmstate_target_page_bits = { 435 .name = "configuration/target-page-bits", 436 .version_id = 1, 437 .minimum_version_id = 1, 438 .needed = vmstate_target_page_bits_needed, 439 .fields = (VMStateField[]) { 440 VMSTATE_UINT32(target_page_bits, SaveState), 441 VMSTATE_END_OF_LIST() 442 } 443 }; 444 445 static bool vmstate_capabilites_needed(void *opaque) 446 { 447 return get_validatable_capabilities_count() > 0; 448 } 449 450 static const VMStateDescription vmstate_capabilites = { 451 .name = "configuration/capabilities", 452 .version_id = 1, 453 .minimum_version_id = 1, 454 .needed = vmstate_capabilites_needed, 455 .fields = (VMStateField[]) { 456 VMSTATE_UINT32_V(caps_count, SaveState, 1), 457 VMSTATE_VARRAY_UINT32_ALLOC(capabilities, SaveState, caps_count, 1, 458 vmstate_info_capability, 459 MigrationCapability), 460 VMSTATE_END_OF_LIST() 461 } 462 }; 463 464 static bool vmstate_uuid_needed(void *opaque) 465 { 466 return qemu_uuid_set && migrate_validate_uuid(); 467 } 468 469 static int vmstate_uuid_post_load(void *opaque, int version_id) 470 { 471 SaveState *state = opaque; 472 char uuid_src[UUID_FMT_LEN + 1]; 473 char uuid_dst[UUID_FMT_LEN + 1]; 474 475 if (!qemu_uuid_set) { 476 /* 477 * It's warning because user might not know UUID in some cases, 478 * e.g. load an old snapshot 479 */ 480 qemu_uuid_unparse(&state->uuid, uuid_src); 481 warn_report("UUID is received %s, but local uuid isn't set", 482 uuid_src); 483 return 0; 484 } 485 if (!qemu_uuid_is_equal(&state->uuid, &qemu_uuid)) { 486 qemu_uuid_unparse(&state->uuid, uuid_src); 487 qemu_uuid_unparse(&qemu_uuid, uuid_dst); 488 error_report("UUID received is %s and local is %s", uuid_src, uuid_dst); 489 return -EINVAL; 490 } 491 return 0; 492 } 493 494 static const VMStateDescription vmstate_uuid = { 495 .name = "configuration/uuid", 496 .version_id = 1, 497 .minimum_version_id = 1, 498 .needed = vmstate_uuid_needed, 499 .post_load = vmstate_uuid_post_load, 500 .fields = (VMStateField[]) { 501 VMSTATE_UINT8_ARRAY_V(uuid.data, SaveState, sizeof(QemuUUID), 1), 502 VMSTATE_END_OF_LIST() 503 } 504 }; 505 506 static const VMStateDescription vmstate_configuration = { 507 .name = "configuration", 508 .version_id = 1, 509 .pre_load = configuration_pre_load, 510 .post_load = configuration_post_load, 511 .pre_save = configuration_pre_save, 512 .post_save = configuration_post_save, 513 .fields = (VMStateField[]) { 514 VMSTATE_UINT32(len, SaveState), 515 VMSTATE_VBUFFER_ALLOC_UINT32(name, SaveState, 0, NULL, len), 516 VMSTATE_END_OF_LIST() 517 }, 518 .subsections = (const VMStateDescription *[]) { 519 &vmstate_target_page_bits, 520 &vmstate_capabilites, 521 &vmstate_uuid, 522 NULL 523 } 524 }; 525 526 static void dump_vmstate_vmsd(FILE *out_file, 527 const VMStateDescription *vmsd, int indent, 528 bool is_subsection); 529 530 static void dump_vmstate_vmsf(FILE *out_file, const VMStateField *field, 531 int indent) 532 { 533 fprintf(out_file, "%*s{\n", indent, ""); 534 indent += 2; 535 fprintf(out_file, "%*s\"field\": \"%s\",\n", indent, "", field->name); 536 fprintf(out_file, "%*s\"version_id\": %d,\n", indent, "", 537 field->version_id); 538 fprintf(out_file, "%*s\"field_exists\": %s,\n", indent, "", 539 field->field_exists ? "true" : "false"); 540 if (field->flags & VMS_ARRAY) { 541 fprintf(out_file, "%*s\"num\": %d,\n", indent, "", field->num); 542 } 543 fprintf(out_file, "%*s\"size\": %zu", indent, "", field->size); 544 if (field->vmsd != NULL) { 545 fprintf(out_file, ",\n"); 546 dump_vmstate_vmsd(out_file, field->vmsd, indent, false); 547 } 548 fprintf(out_file, "\n%*s}", indent - 2, ""); 549 } 550 551 static void dump_vmstate_vmss(FILE *out_file, 552 const VMStateDescription **subsection, 553 int indent) 554 { 555 if (*subsection != NULL) { 556 dump_vmstate_vmsd(out_file, *subsection, indent, true); 557 } 558 } 559 560 static void dump_vmstate_vmsd(FILE *out_file, 561 const VMStateDescription *vmsd, int indent, 562 bool is_subsection) 563 { 564 if (is_subsection) { 565 fprintf(out_file, "%*s{\n", indent, ""); 566 } else { 567 fprintf(out_file, "%*s\"%s\": {\n", indent, "", "Description"); 568 } 569 indent += 2; 570 fprintf(out_file, "%*s\"name\": \"%s\",\n", indent, "", vmsd->name); 571 fprintf(out_file, "%*s\"version_id\": %d,\n", indent, "", 572 vmsd->version_id); 573 fprintf(out_file, "%*s\"minimum_version_id\": %d", indent, "", 574 vmsd->minimum_version_id); 575 if (vmsd->fields != NULL) { 576 const VMStateField *field = vmsd->fields; 577 bool first; 578 579 fprintf(out_file, ",\n%*s\"Fields\": [\n", indent, ""); 580 first = true; 581 while (field->name != NULL) { 582 if (field->flags & VMS_MUST_EXIST) { 583 /* Ignore VMSTATE_VALIDATE bits; these don't get migrated */ 584 field++; 585 continue; 586 } 587 if (!first) { 588 fprintf(out_file, ",\n"); 589 } 590 dump_vmstate_vmsf(out_file, field, indent + 2); 591 field++; 592 first = false; 593 } 594 assert(field->flags == VMS_END); 595 fprintf(out_file, "\n%*s]", indent, ""); 596 } 597 if (vmsd->subsections != NULL) { 598 const VMStateDescription **subsection = vmsd->subsections; 599 bool first; 600 601 fprintf(out_file, ",\n%*s\"Subsections\": [\n", indent, ""); 602 first = true; 603 while (*subsection != NULL) { 604 if (!first) { 605 fprintf(out_file, ",\n"); 606 } 607 dump_vmstate_vmss(out_file, subsection, indent + 2); 608 subsection++; 609 first = false; 610 } 611 fprintf(out_file, "\n%*s]", indent, ""); 612 } 613 fprintf(out_file, "\n%*s}", indent - 2, ""); 614 } 615 616 static void dump_machine_type(FILE *out_file) 617 { 618 MachineClass *mc; 619 620 mc = MACHINE_GET_CLASS(current_machine); 621 622 fprintf(out_file, " \"vmschkmachine\": {\n"); 623 fprintf(out_file, " \"Name\": \"%s\"\n", mc->name); 624 fprintf(out_file, " },\n"); 625 } 626 627 void dump_vmstate_json_to_file(FILE *out_file) 628 { 629 GSList *list, *elt; 630 bool first; 631 632 fprintf(out_file, "{\n"); 633 dump_machine_type(out_file); 634 635 first = true; 636 list = object_class_get_list(TYPE_DEVICE, true); 637 for (elt = list; elt; elt = elt->next) { 638 DeviceClass *dc = OBJECT_CLASS_CHECK(DeviceClass, elt->data, 639 TYPE_DEVICE); 640 const char *name; 641 int indent = 2; 642 643 if (!dc->vmsd) { 644 continue; 645 } 646 647 if (!first) { 648 fprintf(out_file, ",\n"); 649 } 650 name = object_class_get_name(OBJECT_CLASS(dc)); 651 fprintf(out_file, "%*s\"%s\": {\n", indent, "", name); 652 indent += 2; 653 fprintf(out_file, "%*s\"Name\": \"%s\",\n", indent, "", name); 654 fprintf(out_file, "%*s\"version_id\": %d,\n", indent, "", 655 dc->vmsd->version_id); 656 fprintf(out_file, "%*s\"minimum_version_id\": %d,\n", indent, "", 657 dc->vmsd->minimum_version_id); 658 659 dump_vmstate_vmsd(out_file, dc->vmsd, indent, false); 660 661 fprintf(out_file, "\n%*s}", indent - 2, ""); 662 first = false; 663 } 664 fprintf(out_file, "\n}\n"); 665 fclose(out_file); 666 g_slist_free(list); 667 } 668 669 static uint32_t calculate_new_instance_id(const char *idstr) 670 { 671 SaveStateEntry *se; 672 uint32_t instance_id = 0; 673 674 QTAILQ_FOREACH(se, &savevm_state.handlers, entry) { 675 if (strcmp(idstr, se->idstr) == 0 676 && instance_id <= se->instance_id) { 677 instance_id = se->instance_id + 1; 678 } 679 } 680 /* Make sure we never loop over without being noticed */ 681 assert(instance_id != VMSTATE_INSTANCE_ID_ANY); 682 return instance_id; 683 } 684 685 static int calculate_compat_instance_id(const char *idstr) 686 { 687 SaveStateEntry *se; 688 int instance_id = 0; 689 690 QTAILQ_FOREACH(se, &savevm_state.handlers, entry) { 691 if (!se->compat) { 692 continue; 693 } 694 695 if (strcmp(idstr, se->compat->idstr) == 0 696 && instance_id <= se->compat->instance_id) { 697 instance_id = se->compat->instance_id + 1; 698 } 699 } 700 return instance_id; 701 } 702 703 static inline MigrationPriority save_state_priority(SaveStateEntry *se) 704 { 705 if (se->vmsd) { 706 return se->vmsd->priority; 707 } 708 return MIG_PRI_DEFAULT; 709 } 710 711 static void savevm_state_handler_insert(SaveStateEntry *nse) 712 { 713 MigrationPriority priority = save_state_priority(nse); 714 SaveStateEntry *se; 715 int i; 716 717 assert(priority <= MIG_PRI_MAX); 718 719 for (i = priority - 1; i >= 0; i--) { 720 se = savevm_state.handler_pri_head[i]; 721 if (se != NULL) { 722 assert(save_state_priority(se) < priority); 723 break; 724 } 725 } 726 727 if (i >= 0) { 728 QTAILQ_INSERT_BEFORE(se, nse, entry); 729 } else { 730 QTAILQ_INSERT_TAIL(&savevm_state.handlers, nse, entry); 731 } 732 733 if (savevm_state.handler_pri_head[priority] == NULL) { 734 savevm_state.handler_pri_head[priority] = nse; 735 } 736 } 737 738 static void savevm_state_handler_remove(SaveStateEntry *se) 739 { 740 SaveStateEntry *next; 741 MigrationPriority priority = save_state_priority(se); 742 743 if (se == savevm_state.handler_pri_head[priority]) { 744 next = QTAILQ_NEXT(se, entry); 745 if (next != NULL && save_state_priority(next) == priority) { 746 savevm_state.handler_pri_head[priority] = next; 747 } else { 748 savevm_state.handler_pri_head[priority] = NULL; 749 } 750 } 751 QTAILQ_REMOVE(&savevm_state.handlers, se, entry); 752 } 753 754 /* TODO: Individual devices generally have very little idea about the rest 755 of the system, so instance_id should be removed/replaced. 756 Meanwhile pass -1 as instance_id if you do not already have a clearly 757 distinguishing id for all instances of your device class. */ 758 int register_savevm_live(const char *idstr, 759 uint32_t instance_id, 760 int version_id, 761 const SaveVMHandlers *ops, 762 void *opaque) 763 { 764 SaveStateEntry *se; 765 766 se = g_new0(SaveStateEntry, 1); 767 se->version_id = version_id; 768 se->section_id = savevm_state.global_section_id++; 769 se->ops = ops; 770 se->opaque = opaque; 771 se->vmsd = NULL; 772 /* if this is a live_savem then set is_ram */ 773 if (ops->save_setup != NULL) { 774 se->is_ram = 1; 775 } 776 777 pstrcat(se->idstr, sizeof(se->idstr), idstr); 778 779 if (instance_id == VMSTATE_INSTANCE_ID_ANY) { 780 se->instance_id = calculate_new_instance_id(se->idstr); 781 } else { 782 se->instance_id = instance_id; 783 } 784 assert(!se->compat || se->instance_id == 0); 785 savevm_state_handler_insert(se); 786 return 0; 787 } 788 789 void unregister_savevm(VMStateIf *obj, const char *idstr, void *opaque) 790 { 791 SaveStateEntry *se, *new_se; 792 char id[256] = ""; 793 794 if (obj) { 795 char *oid = vmstate_if_get_id(obj); 796 if (oid) { 797 pstrcpy(id, sizeof(id), oid); 798 pstrcat(id, sizeof(id), "/"); 799 g_free(oid); 800 } 801 } 802 pstrcat(id, sizeof(id), idstr); 803 804 QTAILQ_FOREACH_SAFE(se, &savevm_state.handlers, entry, new_se) { 805 if (strcmp(se->idstr, id) == 0 && se->opaque == opaque) { 806 savevm_state_handler_remove(se); 807 g_free(se->compat); 808 g_free(se); 809 } 810 } 811 } 812 813 /* 814 * Perform some basic checks on vmsd's at registration 815 * time. 816 */ 817 static void vmstate_check(const VMStateDescription *vmsd) 818 { 819 const VMStateField *field = vmsd->fields; 820 const VMStateDescription **subsection = vmsd->subsections; 821 822 if (field) { 823 while (field->name) { 824 if (field->flags & (VMS_STRUCT | VMS_VSTRUCT)) { 825 /* Recurse to sub structures */ 826 vmstate_check(field->vmsd); 827 } 828 /* Carry on */ 829 field++; 830 } 831 /* Check for the end of field list canary */ 832 if (field->flags != VMS_END) { 833 error_report("VMSTATE not ending with VMS_END: %s", vmsd->name); 834 g_assert_not_reached(); 835 } 836 } 837 838 while (subsection && *subsection) { 839 /* 840 * The name of a subsection should start with the name of the 841 * current object. 842 */ 843 assert(!strncmp(vmsd->name, (*subsection)->name, strlen(vmsd->name))); 844 vmstate_check(*subsection); 845 subsection++; 846 } 847 } 848 849 int vmstate_register_with_alias_id(VMStateIf *obj, uint32_t instance_id, 850 const VMStateDescription *vmsd, 851 void *opaque, int alias_id, 852 int required_for_version, 853 Error **errp) 854 { 855 SaveStateEntry *se; 856 857 /* If this triggers, alias support can be dropped for the vmsd. */ 858 assert(alias_id == -1 || required_for_version >= vmsd->minimum_version_id); 859 860 se = g_new0(SaveStateEntry, 1); 861 se->version_id = vmsd->version_id; 862 se->section_id = savevm_state.global_section_id++; 863 se->opaque = opaque; 864 se->vmsd = vmsd; 865 se->alias_id = alias_id; 866 867 if (obj) { 868 char *id = vmstate_if_get_id(obj); 869 if (id) { 870 if (snprintf(se->idstr, sizeof(se->idstr), "%s/", id) >= 871 sizeof(se->idstr)) { 872 error_setg(errp, "Path too long for VMState (%s)", id); 873 g_free(id); 874 g_free(se); 875 876 return -1; 877 } 878 g_free(id); 879 880 se->compat = g_new0(CompatEntry, 1); 881 pstrcpy(se->compat->idstr, sizeof(se->compat->idstr), vmsd->name); 882 se->compat->instance_id = instance_id == VMSTATE_INSTANCE_ID_ANY ? 883 calculate_compat_instance_id(vmsd->name) : instance_id; 884 instance_id = VMSTATE_INSTANCE_ID_ANY; 885 } 886 } 887 pstrcat(se->idstr, sizeof(se->idstr), vmsd->name); 888 889 if (instance_id == VMSTATE_INSTANCE_ID_ANY) { 890 se->instance_id = calculate_new_instance_id(se->idstr); 891 } else { 892 se->instance_id = instance_id; 893 } 894 895 /* Perform a recursive sanity check during the test runs */ 896 if (qtest_enabled()) { 897 vmstate_check(vmsd); 898 } 899 assert(!se->compat || se->instance_id == 0); 900 savevm_state_handler_insert(se); 901 return 0; 902 } 903 904 void vmstate_unregister(VMStateIf *obj, const VMStateDescription *vmsd, 905 void *opaque) 906 { 907 SaveStateEntry *se, *new_se; 908 909 QTAILQ_FOREACH_SAFE(se, &savevm_state.handlers, entry, new_se) { 910 if (se->vmsd == vmsd && se->opaque == opaque) { 911 savevm_state_handler_remove(se); 912 g_free(se->compat); 913 g_free(se); 914 } 915 } 916 } 917 918 static int vmstate_load(QEMUFile *f, SaveStateEntry *se) 919 { 920 trace_vmstate_load(se->idstr, se->vmsd ? se->vmsd->name : "(old)"); 921 if (!se->vmsd) { /* Old style */ 922 return se->ops->load_state(f, se->opaque, se->load_version_id); 923 } 924 return vmstate_load_state(f, se->vmsd, se->opaque, se->load_version_id); 925 } 926 927 static void vmstate_save_old_style(QEMUFile *f, SaveStateEntry *se, 928 JSONWriter *vmdesc) 929 { 930 int64_t old_offset, size; 931 932 old_offset = qemu_file_total_transferred_fast(f); 933 se->ops->save_state(f, se->opaque); 934 size = qemu_file_total_transferred_fast(f) - old_offset; 935 936 if (vmdesc) { 937 json_writer_int64(vmdesc, "size", size); 938 json_writer_start_array(vmdesc, "fields"); 939 json_writer_start_object(vmdesc, NULL); 940 json_writer_str(vmdesc, "name", "data"); 941 json_writer_int64(vmdesc, "size", size); 942 json_writer_str(vmdesc, "type", "buffer"); 943 json_writer_end_object(vmdesc); 944 json_writer_end_array(vmdesc); 945 } 946 } 947 948 /* 949 * Write the header for device section (QEMU_VM_SECTION START/END/PART/FULL) 950 */ 951 static void save_section_header(QEMUFile *f, SaveStateEntry *se, 952 uint8_t section_type) 953 { 954 qemu_put_byte(f, section_type); 955 qemu_put_be32(f, se->section_id); 956 957 if (section_type == QEMU_VM_SECTION_FULL || 958 section_type == QEMU_VM_SECTION_START) { 959 /* ID string */ 960 size_t len = strlen(se->idstr); 961 qemu_put_byte(f, len); 962 qemu_put_buffer(f, (uint8_t *)se->idstr, len); 963 964 qemu_put_be32(f, se->instance_id); 965 qemu_put_be32(f, se->version_id); 966 } 967 } 968 969 /* 970 * Write a footer onto device sections that catches cases misformatted device 971 * sections. 972 */ 973 static void save_section_footer(QEMUFile *f, SaveStateEntry *se) 974 { 975 if (migrate_get_current()->send_section_footer) { 976 qemu_put_byte(f, QEMU_VM_SECTION_FOOTER); 977 qemu_put_be32(f, se->section_id); 978 } 979 } 980 981 static int vmstate_save(QEMUFile *f, SaveStateEntry *se, JSONWriter *vmdesc) 982 { 983 int ret; 984 985 if ((!se->ops || !se->ops->save_state) && !se->vmsd) { 986 return 0; 987 } 988 if (se->vmsd && !vmstate_save_needed(se->vmsd, se->opaque)) { 989 trace_savevm_section_skip(se->idstr, se->section_id); 990 return 0; 991 } 992 993 trace_savevm_section_start(se->idstr, se->section_id); 994 save_section_header(f, se, QEMU_VM_SECTION_FULL); 995 if (vmdesc) { 996 json_writer_start_object(vmdesc, NULL); 997 json_writer_str(vmdesc, "name", se->idstr); 998 json_writer_int64(vmdesc, "instance_id", se->instance_id); 999 } 1000 1001 trace_vmstate_save(se->idstr, se->vmsd ? se->vmsd->name : "(old)"); 1002 if (!se->vmsd) { 1003 vmstate_save_old_style(f, se, vmdesc); 1004 } else { 1005 ret = vmstate_save_state(f, se->vmsd, se->opaque, vmdesc); 1006 if (ret) { 1007 return ret; 1008 } 1009 } 1010 1011 trace_savevm_section_end(se->idstr, se->section_id, 0); 1012 save_section_footer(f, se); 1013 if (vmdesc) { 1014 json_writer_end_object(vmdesc); 1015 } 1016 return 0; 1017 } 1018 /** 1019 * qemu_savevm_command_send: Send a 'QEMU_VM_COMMAND' type element with the 1020 * command and associated data. 1021 * 1022 * @f: File to send command on 1023 * @command: Command type to send 1024 * @len: Length of associated data 1025 * @data: Data associated with command. 1026 */ 1027 static void qemu_savevm_command_send(QEMUFile *f, 1028 enum qemu_vm_cmd command, 1029 uint16_t len, 1030 uint8_t *data) 1031 { 1032 trace_savevm_command_send(command, len); 1033 qemu_put_byte(f, QEMU_VM_COMMAND); 1034 qemu_put_be16(f, (uint16_t)command); 1035 qemu_put_be16(f, len); 1036 qemu_put_buffer(f, data, len); 1037 qemu_fflush(f); 1038 } 1039 1040 void qemu_savevm_send_colo_enable(QEMUFile *f) 1041 { 1042 trace_savevm_send_colo_enable(); 1043 qemu_savevm_command_send(f, MIG_CMD_ENABLE_COLO, 0, NULL); 1044 } 1045 1046 void qemu_savevm_send_ping(QEMUFile *f, uint32_t value) 1047 { 1048 uint32_t buf; 1049 1050 trace_savevm_send_ping(value); 1051 buf = cpu_to_be32(value); 1052 qemu_savevm_command_send(f, MIG_CMD_PING, sizeof(value), (uint8_t *)&buf); 1053 } 1054 1055 void qemu_savevm_send_open_return_path(QEMUFile *f) 1056 { 1057 trace_savevm_send_open_return_path(); 1058 qemu_savevm_command_send(f, MIG_CMD_OPEN_RETURN_PATH, 0, NULL); 1059 } 1060 1061 /* We have a buffer of data to send; we don't want that all to be loaded 1062 * by the command itself, so the command contains just the length of the 1063 * extra buffer that we then send straight after it. 1064 * TODO: Must be a better way to organise that 1065 * 1066 * Returns: 1067 * 0 on success 1068 * -ve on error 1069 */ 1070 int qemu_savevm_send_packaged(QEMUFile *f, const uint8_t *buf, size_t len) 1071 { 1072 uint32_t tmp; 1073 1074 if (len > MAX_VM_CMD_PACKAGED_SIZE) { 1075 error_report("%s: Unreasonably large packaged state: %zu", 1076 __func__, len); 1077 return -1; 1078 } 1079 1080 tmp = cpu_to_be32(len); 1081 1082 trace_qemu_savevm_send_packaged(); 1083 qemu_savevm_command_send(f, MIG_CMD_PACKAGED, 4, (uint8_t *)&tmp); 1084 1085 qemu_put_buffer(f, buf, len); 1086 1087 return 0; 1088 } 1089 1090 /* Send prior to any postcopy transfer */ 1091 void qemu_savevm_send_postcopy_advise(QEMUFile *f) 1092 { 1093 if (migrate_postcopy_ram()) { 1094 uint64_t tmp[2]; 1095 tmp[0] = cpu_to_be64(ram_pagesize_summary()); 1096 tmp[1] = cpu_to_be64(qemu_target_page_size()); 1097 1098 trace_qemu_savevm_send_postcopy_advise(); 1099 qemu_savevm_command_send(f, MIG_CMD_POSTCOPY_ADVISE, 1100 16, (uint8_t *)tmp); 1101 } else { 1102 qemu_savevm_command_send(f, MIG_CMD_POSTCOPY_ADVISE, 0, NULL); 1103 } 1104 } 1105 1106 /* Sent prior to starting the destination running in postcopy, discard pages 1107 * that have already been sent but redirtied on the source. 1108 * CMD_POSTCOPY_RAM_DISCARD consist of: 1109 * byte version (0) 1110 * byte Length of name field (not including 0) 1111 * n x byte RAM block name 1112 * byte 0 terminator (just for safety) 1113 * n x Byte ranges within the named RAMBlock 1114 * be64 Start of the range 1115 * be64 Length 1116 * 1117 * name: RAMBlock name that these entries are part of 1118 * len: Number of page entries 1119 * start_list: 'len' addresses 1120 * length_list: 'len' addresses 1121 * 1122 */ 1123 void qemu_savevm_send_postcopy_ram_discard(QEMUFile *f, const char *name, 1124 uint16_t len, 1125 uint64_t *start_list, 1126 uint64_t *length_list) 1127 { 1128 uint8_t *buf; 1129 uint16_t tmplen; 1130 uint16_t t; 1131 size_t name_len = strlen(name); 1132 1133 trace_qemu_savevm_send_postcopy_ram_discard(name, len); 1134 assert(name_len < 256); 1135 buf = g_malloc0(1 + 1 + name_len + 1 + (8 + 8) * len); 1136 buf[0] = postcopy_ram_discard_version; 1137 buf[1] = name_len; 1138 memcpy(buf + 2, name, name_len); 1139 tmplen = 2 + name_len; 1140 buf[tmplen++] = '\0'; 1141 1142 for (t = 0; t < len; t++) { 1143 stq_be_p(buf + tmplen, start_list[t]); 1144 tmplen += 8; 1145 stq_be_p(buf + tmplen, length_list[t]); 1146 tmplen += 8; 1147 } 1148 qemu_savevm_command_send(f, MIG_CMD_POSTCOPY_RAM_DISCARD, tmplen, buf); 1149 g_free(buf); 1150 } 1151 1152 /* Get the destination into a state where it can receive postcopy data. */ 1153 void qemu_savevm_send_postcopy_listen(QEMUFile *f) 1154 { 1155 trace_savevm_send_postcopy_listen(); 1156 qemu_savevm_command_send(f, MIG_CMD_POSTCOPY_LISTEN, 0, NULL); 1157 } 1158 1159 /* Kick the destination into running */ 1160 void qemu_savevm_send_postcopy_run(QEMUFile *f) 1161 { 1162 trace_savevm_send_postcopy_run(); 1163 qemu_savevm_command_send(f, MIG_CMD_POSTCOPY_RUN, 0, NULL); 1164 } 1165 1166 void qemu_savevm_send_postcopy_resume(QEMUFile *f) 1167 { 1168 trace_savevm_send_postcopy_resume(); 1169 qemu_savevm_command_send(f, MIG_CMD_POSTCOPY_RESUME, 0, NULL); 1170 } 1171 1172 void qemu_savevm_send_recv_bitmap(QEMUFile *f, char *block_name) 1173 { 1174 size_t len; 1175 char buf[256]; 1176 1177 trace_savevm_send_recv_bitmap(block_name); 1178 1179 buf[0] = len = strlen(block_name); 1180 memcpy(buf + 1, block_name, len); 1181 1182 qemu_savevm_command_send(f, MIG_CMD_RECV_BITMAP, len + 1, (uint8_t *)buf); 1183 } 1184 1185 bool qemu_savevm_state_blocked(Error **errp) 1186 { 1187 SaveStateEntry *se; 1188 1189 QTAILQ_FOREACH(se, &savevm_state.handlers, entry) { 1190 if (se->vmsd && se->vmsd->unmigratable) { 1191 error_setg(errp, "State blocked by non-migratable device '%s'", 1192 se->idstr); 1193 return true; 1194 } 1195 } 1196 return false; 1197 } 1198 1199 void qemu_savevm_non_migratable_list(strList **reasons) 1200 { 1201 SaveStateEntry *se; 1202 1203 QTAILQ_FOREACH(se, &savevm_state.handlers, entry) { 1204 if (se->vmsd && se->vmsd->unmigratable) { 1205 QAPI_LIST_PREPEND(*reasons, 1206 g_strdup_printf("non-migratable device: %s", 1207 se->idstr)); 1208 } 1209 } 1210 } 1211 1212 void qemu_savevm_state_header(QEMUFile *f) 1213 { 1214 trace_savevm_state_header(); 1215 qemu_put_be32(f, QEMU_VM_FILE_MAGIC); 1216 qemu_put_be32(f, QEMU_VM_FILE_VERSION); 1217 1218 if (migrate_get_current()->send_configuration) { 1219 qemu_put_byte(f, QEMU_VM_CONFIGURATION); 1220 vmstate_save_state(f, &vmstate_configuration, &savevm_state, 0); 1221 } 1222 } 1223 1224 bool qemu_savevm_state_guest_unplug_pending(void) 1225 { 1226 SaveStateEntry *se; 1227 1228 QTAILQ_FOREACH(se, &savevm_state.handlers, entry) { 1229 if (se->vmsd && se->vmsd->dev_unplug_pending && 1230 se->vmsd->dev_unplug_pending(se->opaque)) { 1231 return true; 1232 } 1233 } 1234 1235 return false; 1236 } 1237 1238 void qemu_savevm_state_setup(QEMUFile *f) 1239 { 1240 MigrationState *ms = migrate_get_current(); 1241 SaveStateEntry *se; 1242 Error *local_err = NULL; 1243 int ret; 1244 1245 ms->vmdesc = json_writer_new(false); 1246 json_writer_start_object(ms->vmdesc, NULL); 1247 json_writer_int64(ms->vmdesc, "page_size", qemu_target_page_size()); 1248 json_writer_start_array(ms->vmdesc, "devices"); 1249 1250 trace_savevm_state_setup(); 1251 QTAILQ_FOREACH(se, &savevm_state.handlers, entry) { 1252 if (se->vmsd && se->vmsd->early_setup) { 1253 ret = vmstate_save(f, se, ms->vmdesc); 1254 if (ret) { 1255 qemu_file_set_error(f, ret); 1256 break; 1257 } 1258 continue; 1259 } 1260 1261 if (!se->ops || !se->ops->save_setup) { 1262 continue; 1263 } 1264 if (se->ops->is_active) { 1265 if (!se->ops->is_active(se->opaque)) { 1266 continue; 1267 } 1268 } 1269 save_section_header(f, se, QEMU_VM_SECTION_START); 1270 1271 ret = se->ops->save_setup(f, se->opaque); 1272 save_section_footer(f, se); 1273 if (ret < 0) { 1274 qemu_file_set_error(f, ret); 1275 break; 1276 } 1277 } 1278 1279 if (precopy_notify(PRECOPY_NOTIFY_SETUP, &local_err)) { 1280 error_report_err(local_err); 1281 } 1282 } 1283 1284 int qemu_savevm_state_resume_prepare(MigrationState *s) 1285 { 1286 SaveStateEntry *se; 1287 int ret; 1288 1289 trace_savevm_state_resume_prepare(); 1290 1291 QTAILQ_FOREACH(se, &savevm_state.handlers, entry) { 1292 if (!se->ops || !se->ops->resume_prepare) { 1293 continue; 1294 } 1295 if (se->ops->is_active) { 1296 if (!se->ops->is_active(se->opaque)) { 1297 continue; 1298 } 1299 } 1300 ret = se->ops->resume_prepare(s, se->opaque); 1301 if (ret < 0) { 1302 return ret; 1303 } 1304 } 1305 1306 return 0; 1307 } 1308 1309 /* 1310 * this function has three return values: 1311 * negative: there was one error, and we have -errno. 1312 * 0 : We haven't finished, caller have to go again 1313 * 1 : We have finished, we can go to complete phase 1314 */ 1315 int qemu_savevm_state_iterate(QEMUFile *f, bool postcopy) 1316 { 1317 SaveStateEntry *se; 1318 int ret = 1; 1319 1320 trace_savevm_state_iterate(); 1321 QTAILQ_FOREACH(se, &savevm_state.handlers, entry) { 1322 if (!se->ops || !se->ops->save_live_iterate) { 1323 continue; 1324 } 1325 if (se->ops->is_active && 1326 !se->ops->is_active(se->opaque)) { 1327 continue; 1328 } 1329 if (se->ops->is_active_iterate && 1330 !se->ops->is_active_iterate(se->opaque)) { 1331 continue; 1332 } 1333 /* 1334 * In the postcopy phase, any device that doesn't know how to 1335 * do postcopy should have saved it's state in the _complete 1336 * call that's already run, it might get confused if we call 1337 * iterate afterwards. 1338 */ 1339 if (postcopy && 1340 !(se->ops->has_postcopy && se->ops->has_postcopy(se->opaque))) { 1341 continue; 1342 } 1343 if (qemu_file_rate_limit(f)) { 1344 return 0; 1345 } 1346 trace_savevm_section_start(se->idstr, se->section_id); 1347 1348 save_section_header(f, se, QEMU_VM_SECTION_PART); 1349 1350 ret = se->ops->save_live_iterate(f, se->opaque); 1351 trace_savevm_section_end(se->idstr, se->section_id, ret); 1352 save_section_footer(f, se); 1353 1354 if (ret < 0) { 1355 error_report("failed to save SaveStateEntry with id(name): " 1356 "%d(%s): %d", 1357 se->section_id, se->idstr, ret); 1358 qemu_file_set_error(f, ret); 1359 } 1360 if (ret <= 0) { 1361 /* Do not proceed to the next vmstate before this one reported 1362 completion of the current stage. This serializes the migration 1363 and reduces the probability that a faster changing state is 1364 synchronized over and over again. */ 1365 break; 1366 } 1367 } 1368 return ret; 1369 } 1370 1371 static bool should_send_vmdesc(void) 1372 { 1373 MachineState *machine = MACHINE(qdev_get_machine()); 1374 bool in_postcopy = migration_in_postcopy(); 1375 return !machine->suppress_vmdesc && !in_postcopy; 1376 } 1377 1378 /* 1379 * Calls the save_live_complete_postcopy methods 1380 * causing the last few pages to be sent immediately and doing any associated 1381 * cleanup. 1382 * Note postcopy also calls qemu_savevm_state_complete_precopy to complete 1383 * all the other devices, but that happens at the point we switch to postcopy. 1384 */ 1385 void qemu_savevm_state_complete_postcopy(QEMUFile *f) 1386 { 1387 SaveStateEntry *se; 1388 int ret; 1389 1390 QTAILQ_FOREACH(se, &savevm_state.handlers, entry) { 1391 if (!se->ops || !se->ops->save_live_complete_postcopy) { 1392 continue; 1393 } 1394 if (se->ops->is_active) { 1395 if (!se->ops->is_active(se->opaque)) { 1396 continue; 1397 } 1398 } 1399 trace_savevm_section_start(se->idstr, se->section_id); 1400 /* Section type */ 1401 qemu_put_byte(f, QEMU_VM_SECTION_END); 1402 qemu_put_be32(f, se->section_id); 1403 1404 ret = se->ops->save_live_complete_postcopy(f, se->opaque); 1405 trace_savevm_section_end(se->idstr, se->section_id, ret); 1406 save_section_footer(f, se); 1407 if (ret < 0) { 1408 qemu_file_set_error(f, ret); 1409 return; 1410 } 1411 } 1412 1413 qemu_put_byte(f, QEMU_VM_EOF); 1414 qemu_fflush(f); 1415 } 1416 1417 static 1418 int qemu_savevm_state_complete_precopy_iterable(QEMUFile *f, bool in_postcopy) 1419 { 1420 SaveStateEntry *se; 1421 int ret; 1422 1423 QTAILQ_FOREACH(se, &savevm_state.handlers, entry) { 1424 if (!se->ops || 1425 (in_postcopy && se->ops->has_postcopy && 1426 se->ops->has_postcopy(se->opaque)) || 1427 !se->ops->save_live_complete_precopy) { 1428 continue; 1429 } 1430 1431 if (se->ops->is_active) { 1432 if (!se->ops->is_active(se->opaque)) { 1433 continue; 1434 } 1435 } 1436 trace_savevm_section_start(se->idstr, se->section_id); 1437 1438 save_section_header(f, se, QEMU_VM_SECTION_END); 1439 1440 ret = se->ops->save_live_complete_precopy(f, se->opaque); 1441 trace_savevm_section_end(se->idstr, se->section_id, ret); 1442 save_section_footer(f, se); 1443 if (ret < 0) { 1444 qemu_file_set_error(f, ret); 1445 return -1; 1446 } 1447 } 1448 1449 return 0; 1450 } 1451 1452 int qemu_savevm_state_complete_precopy_non_iterable(QEMUFile *f, 1453 bool in_postcopy, 1454 bool inactivate_disks) 1455 { 1456 MigrationState *ms = migrate_get_current(); 1457 JSONWriter *vmdesc = ms->vmdesc; 1458 int vmdesc_len; 1459 SaveStateEntry *se; 1460 int ret; 1461 1462 QTAILQ_FOREACH(se, &savevm_state.handlers, entry) { 1463 if (se->vmsd && se->vmsd->early_setup) { 1464 /* Already saved during qemu_savevm_state_setup(). */ 1465 continue; 1466 } 1467 1468 ret = vmstate_save(f, se, vmdesc); 1469 if (ret) { 1470 qemu_file_set_error(f, ret); 1471 return ret; 1472 } 1473 } 1474 1475 if (inactivate_disks) { 1476 /* Inactivate before sending QEMU_VM_EOF so that the 1477 * bdrv_activate_all() on the other end won't fail. */ 1478 ret = bdrv_inactivate_all(); 1479 if (ret) { 1480 error_report("%s: bdrv_inactivate_all() failed (%d)", 1481 __func__, ret); 1482 qemu_file_set_error(f, ret); 1483 return ret; 1484 } 1485 } 1486 if (!in_postcopy) { 1487 /* Postcopy stream will still be going */ 1488 qemu_put_byte(f, QEMU_VM_EOF); 1489 } 1490 1491 json_writer_end_array(vmdesc); 1492 json_writer_end_object(vmdesc); 1493 vmdesc_len = strlen(json_writer_get(vmdesc)); 1494 1495 if (should_send_vmdesc()) { 1496 qemu_put_byte(f, QEMU_VM_VMDESCRIPTION); 1497 qemu_put_be32(f, vmdesc_len); 1498 qemu_put_buffer(f, (uint8_t *)json_writer_get(vmdesc), vmdesc_len); 1499 } 1500 1501 /* Free it now to detect any inconsistencies. */ 1502 json_writer_free(vmdesc); 1503 ms->vmdesc = NULL; 1504 1505 return 0; 1506 } 1507 1508 int qemu_savevm_state_complete_precopy(QEMUFile *f, bool iterable_only, 1509 bool inactivate_disks) 1510 { 1511 int ret; 1512 Error *local_err = NULL; 1513 bool in_postcopy = migration_in_postcopy(); 1514 1515 if (precopy_notify(PRECOPY_NOTIFY_COMPLETE, &local_err)) { 1516 error_report_err(local_err); 1517 } 1518 1519 trace_savevm_state_complete_precopy(); 1520 1521 cpu_synchronize_all_states(); 1522 1523 if (!in_postcopy || iterable_only) { 1524 ret = qemu_savevm_state_complete_precopy_iterable(f, in_postcopy); 1525 if (ret) { 1526 return ret; 1527 } 1528 } 1529 1530 if (iterable_only) { 1531 goto flush; 1532 } 1533 1534 ret = qemu_savevm_state_complete_precopy_non_iterable(f, in_postcopy, 1535 inactivate_disks); 1536 if (ret) { 1537 return ret; 1538 } 1539 1540 flush: 1541 qemu_fflush(f); 1542 return 0; 1543 } 1544 1545 /* Give an estimate of the amount left to be transferred, 1546 * the result is split into the amount for units that can and 1547 * for units that can't do postcopy. 1548 */ 1549 void qemu_savevm_state_pending_estimate(uint64_t *must_precopy, 1550 uint64_t *can_postcopy) 1551 { 1552 SaveStateEntry *se; 1553 1554 *must_precopy = 0; 1555 *can_postcopy = 0; 1556 1557 QTAILQ_FOREACH(se, &savevm_state.handlers, entry) { 1558 if (!se->ops || !se->ops->state_pending_estimate) { 1559 continue; 1560 } 1561 if (se->ops->is_active) { 1562 if (!se->ops->is_active(se->opaque)) { 1563 continue; 1564 } 1565 } 1566 se->ops->state_pending_estimate(se->opaque, must_precopy, can_postcopy); 1567 } 1568 } 1569 1570 void qemu_savevm_state_pending_exact(uint64_t *must_precopy, 1571 uint64_t *can_postcopy) 1572 { 1573 SaveStateEntry *se; 1574 1575 *must_precopy = 0; 1576 *can_postcopy = 0; 1577 1578 QTAILQ_FOREACH(se, &savevm_state.handlers, entry) { 1579 if (!se->ops || !se->ops->state_pending_exact) { 1580 continue; 1581 } 1582 if (se->ops->is_active) { 1583 if (!se->ops->is_active(se->opaque)) { 1584 continue; 1585 } 1586 } 1587 se->ops->state_pending_exact(se->opaque, must_precopy, can_postcopy); 1588 } 1589 } 1590 1591 void qemu_savevm_state_cleanup(void) 1592 { 1593 SaveStateEntry *se; 1594 Error *local_err = NULL; 1595 1596 if (precopy_notify(PRECOPY_NOTIFY_CLEANUP, &local_err)) { 1597 error_report_err(local_err); 1598 } 1599 1600 trace_savevm_state_cleanup(); 1601 QTAILQ_FOREACH(se, &savevm_state.handlers, entry) { 1602 if (se->ops && se->ops->save_cleanup) { 1603 se->ops->save_cleanup(se->opaque); 1604 } 1605 } 1606 } 1607 1608 static int qemu_savevm_state(QEMUFile *f, Error **errp) 1609 { 1610 int ret; 1611 MigrationState *ms = migrate_get_current(); 1612 MigrationStatus status; 1613 1614 if (migration_is_running(ms->state)) { 1615 error_setg(errp, QERR_MIGRATION_ACTIVE); 1616 return -EINVAL; 1617 } 1618 1619 if (migrate_block()) { 1620 error_setg(errp, "Block migration and snapshots are incompatible"); 1621 return -EINVAL; 1622 } 1623 1624 migrate_init(ms); 1625 memset(&mig_stats, 0, sizeof(mig_stats)); 1626 memset(&compression_counters, 0, sizeof(compression_counters)); 1627 ms->to_dst_file = f; 1628 1629 qemu_mutex_unlock_iothread(); 1630 qemu_savevm_state_header(f); 1631 qemu_savevm_state_setup(f); 1632 qemu_mutex_lock_iothread(); 1633 1634 while (qemu_file_get_error(f) == 0) { 1635 if (qemu_savevm_state_iterate(f, false) > 0) { 1636 break; 1637 } 1638 } 1639 1640 ret = qemu_file_get_error(f); 1641 if (ret == 0) { 1642 qemu_savevm_state_complete_precopy(f, false, false); 1643 ret = qemu_file_get_error(f); 1644 } 1645 qemu_savevm_state_cleanup(); 1646 if (ret != 0) { 1647 error_setg_errno(errp, -ret, "Error while writing VM state"); 1648 } 1649 1650 if (ret != 0) { 1651 status = MIGRATION_STATUS_FAILED; 1652 } else { 1653 status = MIGRATION_STATUS_COMPLETED; 1654 } 1655 migrate_set_state(&ms->state, MIGRATION_STATUS_SETUP, status); 1656 1657 /* f is outer parameter, it should not stay in global migration state after 1658 * this function finished */ 1659 ms->to_dst_file = NULL; 1660 1661 return ret; 1662 } 1663 1664 void qemu_savevm_live_state(QEMUFile *f) 1665 { 1666 /* save QEMU_VM_SECTION_END section */ 1667 qemu_savevm_state_complete_precopy(f, true, false); 1668 qemu_put_byte(f, QEMU_VM_EOF); 1669 } 1670 1671 int qemu_save_device_state(QEMUFile *f) 1672 { 1673 SaveStateEntry *se; 1674 1675 if (!migration_in_colo_state()) { 1676 qemu_put_be32(f, QEMU_VM_FILE_MAGIC); 1677 qemu_put_be32(f, QEMU_VM_FILE_VERSION); 1678 } 1679 cpu_synchronize_all_states(); 1680 1681 QTAILQ_FOREACH(se, &savevm_state.handlers, entry) { 1682 int ret; 1683 1684 if (se->is_ram) { 1685 continue; 1686 } 1687 ret = vmstate_save(f, se, NULL); 1688 if (ret) { 1689 return ret; 1690 } 1691 } 1692 1693 qemu_put_byte(f, QEMU_VM_EOF); 1694 1695 return qemu_file_get_error(f); 1696 } 1697 1698 static SaveStateEntry *find_se(const char *idstr, uint32_t instance_id) 1699 { 1700 SaveStateEntry *se; 1701 1702 QTAILQ_FOREACH(se, &savevm_state.handlers, entry) { 1703 if (!strcmp(se->idstr, idstr) && 1704 (instance_id == se->instance_id || 1705 instance_id == se->alias_id)) 1706 return se; 1707 /* Migrating from an older version? */ 1708 if (strstr(se->idstr, idstr) && se->compat) { 1709 if (!strcmp(se->compat->idstr, idstr) && 1710 (instance_id == se->compat->instance_id || 1711 instance_id == se->alias_id)) 1712 return se; 1713 } 1714 } 1715 return NULL; 1716 } 1717 1718 enum LoadVMExitCodes { 1719 /* Allow a command to quit all layers of nested loadvm loops */ 1720 LOADVM_QUIT = 1, 1721 }; 1722 1723 /* ------ incoming postcopy messages ------ */ 1724 /* 'advise' arrives before any transfers just to tell us that a postcopy 1725 * *might* happen - it might be skipped if precopy transferred everything 1726 * quickly. 1727 */ 1728 static int loadvm_postcopy_handle_advise(MigrationIncomingState *mis, 1729 uint16_t len) 1730 { 1731 PostcopyState ps = postcopy_state_set(POSTCOPY_INCOMING_ADVISE); 1732 uint64_t remote_pagesize_summary, local_pagesize_summary, remote_tps; 1733 size_t page_size = qemu_target_page_size(); 1734 Error *local_err = NULL; 1735 1736 trace_loadvm_postcopy_handle_advise(); 1737 if (ps != POSTCOPY_INCOMING_NONE) { 1738 error_report("CMD_POSTCOPY_ADVISE in wrong postcopy state (%d)", ps); 1739 return -1; 1740 } 1741 1742 switch (len) { 1743 case 0: 1744 if (migrate_postcopy_ram()) { 1745 error_report("RAM postcopy is enabled but have 0 byte advise"); 1746 return -EINVAL; 1747 } 1748 return 0; 1749 case 8 + 8: 1750 if (!migrate_postcopy_ram()) { 1751 error_report("RAM postcopy is disabled but have 16 byte advise"); 1752 return -EINVAL; 1753 } 1754 break; 1755 default: 1756 error_report("CMD_POSTCOPY_ADVISE invalid length (%d)", len); 1757 return -EINVAL; 1758 } 1759 1760 if (!postcopy_ram_supported_by_host(mis, &local_err)) { 1761 error_report_err(local_err); 1762 postcopy_state_set(POSTCOPY_INCOMING_NONE); 1763 return -1; 1764 } 1765 1766 remote_pagesize_summary = qemu_get_be64(mis->from_src_file); 1767 local_pagesize_summary = ram_pagesize_summary(); 1768 1769 if (remote_pagesize_summary != local_pagesize_summary) { 1770 /* 1771 * This detects two potential causes of mismatch: 1772 * a) A mismatch in host page sizes 1773 * Some combinations of mismatch are probably possible but it gets 1774 * a bit more complicated. In particular we need to place whole 1775 * host pages on the dest at once, and we need to ensure that we 1776 * handle dirtying to make sure we never end up sending part of 1777 * a hostpage on it's own. 1778 * b) The use of different huge page sizes on source/destination 1779 * a more fine grain test is performed during RAM block migration 1780 * but this test here causes a nice early clear failure, and 1781 * also fails when passed to an older qemu that doesn't 1782 * do huge pages. 1783 */ 1784 error_report("Postcopy needs matching RAM page sizes (s=%" PRIx64 1785 " d=%" PRIx64 ")", 1786 remote_pagesize_summary, local_pagesize_summary); 1787 return -1; 1788 } 1789 1790 remote_tps = qemu_get_be64(mis->from_src_file); 1791 if (remote_tps != page_size) { 1792 /* 1793 * Again, some differences could be dealt with, but for now keep it 1794 * simple. 1795 */ 1796 error_report("Postcopy needs matching target page sizes (s=%d d=%zd)", 1797 (int)remote_tps, page_size); 1798 return -1; 1799 } 1800 1801 if (postcopy_notify(POSTCOPY_NOTIFY_INBOUND_ADVISE, &local_err)) { 1802 error_report_err(local_err); 1803 return -1; 1804 } 1805 1806 if (ram_postcopy_incoming_init(mis)) { 1807 return -1; 1808 } 1809 1810 return 0; 1811 } 1812 1813 /* After postcopy we will be told to throw some pages away since they're 1814 * dirty and will have to be demand fetched. Must happen before CPU is 1815 * started. 1816 * There can be 0..many of these messages, each encoding multiple pages. 1817 */ 1818 static int loadvm_postcopy_ram_handle_discard(MigrationIncomingState *mis, 1819 uint16_t len) 1820 { 1821 int tmp; 1822 char ramid[256]; 1823 PostcopyState ps = postcopy_state_get(); 1824 1825 trace_loadvm_postcopy_ram_handle_discard(); 1826 1827 switch (ps) { 1828 case POSTCOPY_INCOMING_ADVISE: 1829 /* 1st discard */ 1830 tmp = postcopy_ram_prepare_discard(mis); 1831 if (tmp) { 1832 return tmp; 1833 } 1834 break; 1835 1836 case POSTCOPY_INCOMING_DISCARD: 1837 /* Expected state */ 1838 break; 1839 1840 default: 1841 error_report("CMD_POSTCOPY_RAM_DISCARD in wrong postcopy state (%d)", 1842 ps); 1843 return -1; 1844 } 1845 /* We're expecting a 1846 * Version (0) 1847 * a RAM ID string (length byte, name, 0 term) 1848 * then at least 1 16 byte chunk 1849 */ 1850 if (len < (1 + 1 + 1 + 1 + 2 * 8)) { 1851 error_report("CMD_POSTCOPY_RAM_DISCARD invalid length (%d)", len); 1852 return -1; 1853 } 1854 1855 tmp = qemu_get_byte(mis->from_src_file); 1856 if (tmp != postcopy_ram_discard_version) { 1857 error_report("CMD_POSTCOPY_RAM_DISCARD invalid version (%d)", tmp); 1858 return -1; 1859 } 1860 1861 if (!qemu_get_counted_string(mis->from_src_file, ramid)) { 1862 error_report("CMD_POSTCOPY_RAM_DISCARD Failed to read RAMBlock ID"); 1863 return -1; 1864 } 1865 tmp = qemu_get_byte(mis->from_src_file); 1866 if (tmp != 0) { 1867 error_report("CMD_POSTCOPY_RAM_DISCARD missing nil (%d)", tmp); 1868 return -1; 1869 } 1870 1871 len -= 3 + strlen(ramid); 1872 if (len % 16) { 1873 error_report("CMD_POSTCOPY_RAM_DISCARD invalid length (%d)", len); 1874 return -1; 1875 } 1876 trace_loadvm_postcopy_ram_handle_discard_header(ramid, len); 1877 while (len) { 1878 uint64_t start_addr, block_length; 1879 start_addr = qemu_get_be64(mis->from_src_file); 1880 block_length = qemu_get_be64(mis->from_src_file); 1881 1882 len -= 16; 1883 int ret = ram_discard_range(ramid, start_addr, block_length); 1884 if (ret) { 1885 return ret; 1886 } 1887 } 1888 trace_loadvm_postcopy_ram_handle_discard_end(); 1889 1890 return 0; 1891 } 1892 1893 /* 1894 * Triggered by a postcopy_listen command; this thread takes over reading 1895 * the input stream, leaving the main thread free to carry on loading the rest 1896 * of the device state (from RAM). 1897 * (TODO:This could do with being in a postcopy file - but there again it's 1898 * just another input loop, not that postcopy specific) 1899 */ 1900 static void *postcopy_ram_listen_thread(void *opaque) 1901 { 1902 MigrationIncomingState *mis = migration_incoming_get_current(); 1903 QEMUFile *f = mis->from_src_file; 1904 int load_res; 1905 MigrationState *migr = migrate_get_current(); 1906 1907 object_ref(OBJECT(migr)); 1908 1909 migrate_set_state(&mis->state, MIGRATION_STATUS_ACTIVE, 1910 MIGRATION_STATUS_POSTCOPY_ACTIVE); 1911 qemu_sem_post(&mis->thread_sync_sem); 1912 trace_postcopy_ram_listen_thread_start(); 1913 1914 rcu_register_thread(); 1915 /* 1916 * Because we're a thread and not a coroutine we can't yield 1917 * in qemu_file, and thus we must be blocking now. 1918 */ 1919 qemu_file_set_blocking(f, true); 1920 load_res = qemu_loadvm_state_main(f, mis); 1921 1922 /* 1923 * This is tricky, but, mis->from_src_file can change after it 1924 * returns, when postcopy recovery happened. In the future, we may 1925 * want a wrapper for the QEMUFile handle. 1926 */ 1927 f = mis->from_src_file; 1928 1929 /* And non-blocking again so we don't block in any cleanup */ 1930 qemu_file_set_blocking(f, false); 1931 1932 trace_postcopy_ram_listen_thread_exit(); 1933 if (load_res < 0) { 1934 qemu_file_set_error(f, load_res); 1935 dirty_bitmap_mig_cancel_incoming(); 1936 if (postcopy_state_get() == POSTCOPY_INCOMING_RUNNING && 1937 !migrate_postcopy_ram() && migrate_dirty_bitmaps()) 1938 { 1939 error_report("%s: loadvm failed during postcopy: %d. All states " 1940 "are migrated except dirty bitmaps. Some dirty " 1941 "bitmaps may be lost, and present migrated dirty " 1942 "bitmaps are correctly migrated and valid.", 1943 __func__, load_res); 1944 load_res = 0; /* prevent further exit() */ 1945 } else { 1946 error_report("%s: loadvm failed: %d", __func__, load_res); 1947 migrate_set_state(&mis->state, MIGRATION_STATUS_POSTCOPY_ACTIVE, 1948 MIGRATION_STATUS_FAILED); 1949 } 1950 } 1951 if (load_res >= 0) { 1952 /* 1953 * This looks good, but it's possible that the device loading in the 1954 * main thread hasn't finished yet, and so we might not be in 'RUN' 1955 * state yet; wait for the end of the main thread. 1956 */ 1957 qemu_event_wait(&mis->main_thread_load_event); 1958 } 1959 postcopy_ram_incoming_cleanup(mis); 1960 1961 if (load_res < 0) { 1962 /* 1963 * If something went wrong then we have a bad state so exit; 1964 * depending how far we got it might be possible at this point 1965 * to leave the guest running and fire MCEs for pages that never 1966 * arrived as a desperate recovery step. 1967 */ 1968 rcu_unregister_thread(); 1969 exit(EXIT_FAILURE); 1970 } 1971 1972 migrate_set_state(&mis->state, MIGRATION_STATUS_POSTCOPY_ACTIVE, 1973 MIGRATION_STATUS_COMPLETED); 1974 /* 1975 * If everything has worked fine, then the main thread has waited 1976 * for us to start, and we're the last use of the mis. 1977 * (If something broke then qemu will have to exit anyway since it's 1978 * got a bad migration state). 1979 */ 1980 migration_incoming_state_destroy(); 1981 qemu_loadvm_state_cleanup(); 1982 1983 rcu_unregister_thread(); 1984 mis->have_listen_thread = false; 1985 postcopy_state_set(POSTCOPY_INCOMING_END); 1986 1987 object_unref(OBJECT(migr)); 1988 1989 return NULL; 1990 } 1991 1992 /* After this message we must be able to immediately receive postcopy data */ 1993 static int loadvm_postcopy_handle_listen(MigrationIncomingState *mis) 1994 { 1995 PostcopyState ps = postcopy_state_set(POSTCOPY_INCOMING_LISTENING); 1996 Error *local_err = NULL; 1997 1998 trace_loadvm_postcopy_handle_listen("enter"); 1999 2000 if (ps != POSTCOPY_INCOMING_ADVISE && ps != POSTCOPY_INCOMING_DISCARD) { 2001 error_report("CMD_POSTCOPY_LISTEN in wrong postcopy state (%d)", ps); 2002 return -1; 2003 } 2004 if (ps == POSTCOPY_INCOMING_ADVISE) { 2005 /* 2006 * A rare case, we entered listen without having to do any discards, 2007 * so do the setup that's normally done at the time of the 1st discard. 2008 */ 2009 if (migrate_postcopy_ram()) { 2010 postcopy_ram_prepare_discard(mis); 2011 } 2012 } 2013 2014 trace_loadvm_postcopy_handle_listen("after discard"); 2015 2016 /* 2017 * Sensitise RAM - can now generate requests for blocks that don't exist 2018 * However, at this point the CPU shouldn't be running, and the IO 2019 * shouldn't be doing anything yet so don't actually expect requests 2020 */ 2021 if (migrate_postcopy_ram()) { 2022 if (postcopy_ram_incoming_setup(mis)) { 2023 postcopy_ram_incoming_cleanup(mis); 2024 return -1; 2025 } 2026 } 2027 2028 trace_loadvm_postcopy_handle_listen("after uffd"); 2029 2030 if (postcopy_notify(POSTCOPY_NOTIFY_INBOUND_LISTEN, &local_err)) { 2031 error_report_err(local_err); 2032 return -1; 2033 } 2034 2035 mis->have_listen_thread = true; 2036 postcopy_thread_create(mis, &mis->listen_thread, "postcopy/listen", 2037 postcopy_ram_listen_thread, QEMU_THREAD_DETACHED); 2038 trace_loadvm_postcopy_handle_listen("return"); 2039 2040 return 0; 2041 } 2042 2043 static void loadvm_postcopy_handle_run_bh(void *opaque) 2044 { 2045 Error *local_err = NULL; 2046 MigrationIncomingState *mis = opaque; 2047 2048 trace_loadvm_postcopy_handle_run_bh("enter"); 2049 2050 /* TODO we should move all of this lot into postcopy_ram.c or a shared code 2051 * in migration.c 2052 */ 2053 cpu_synchronize_all_post_init(); 2054 2055 trace_loadvm_postcopy_handle_run_bh("after cpu sync"); 2056 2057 qemu_announce_self(&mis->announce_timer, migrate_announce_params()); 2058 2059 trace_loadvm_postcopy_handle_run_bh("after announce"); 2060 2061 /* Make sure all file formats throw away their mutable metadata. 2062 * If we get an error here, just don't restart the VM yet. */ 2063 bdrv_activate_all(&local_err); 2064 if (local_err) { 2065 error_report_err(local_err); 2066 local_err = NULL; 2067 autostart = false; 2068 } 2069 2070 trace_loadvm_postcopy_handle_run_bh("after invalidate cache"); 2071 2072 dirty_bitmap_mig_before_vm_start(); 2073 2074 if (autostart) { 2075 /* Hold onto your hats, starting the CPU */ 2076 vm_start(); 2077 } else { 2078 /* leave it paused and let management decide when to start the CPU */ 2079 runstate_set(RUN_STATE_PAUSED); 2080 } 2081 2082 qemu_bh_delete(mis->bh); 2083 2084 trace_loadvm_postcopy_handle_run_bh("return"); 2085 } 2086 2087 /* After all discards we can start running and asking for pages */ 2088 static int loadvm_postcopy_handle_run(MigrationIncomingState *mis) 2089 { 2090 PostcopyState ps = postcopy_state_get(); 2091 2092 trace_loadvm_postcopy_handle_run(); 2093 if (ps != POSTCOPY_INCOMING_LISTENING) { 2094 error_report("CMD_POSTCOPY_RUN in wrong postcopy state (%d)", ps); 2095 return -1; 2096 } 2097 2098 postcopy_state_set(POSTCOPY_INCOMING_RUNNING); 2099 mis->bh = qemu_bh_new(loadvm_postcopy_handle_run_bh, mis); 2100 qemu_bh_schedule(mis->bh); 2101 2102 /* We need to finish reading the stream from the package 2103 * and also stop reading anything more from the stream that loaded the 2104 * package (since it's now being read by the listener thread). 2105 * LOADVM_QUIT will quit all the layers of nested loadvm loops. 2106 */ 2107 return LOADVM_QUIT; 2108 } 2109 2110 /* We must be with page_request_mutex held */ 2111 static gboolean postcopy_sync_page_req(gpointer key, gpointer value, 2112 gpointer data) 2113 { 2114 MigrationIncomingState *mis = data; 2115 void *host_addr = (void *) key; 2116 ram_addr_t rb_offset; 2117 RAMBlock *rb; 2118 int ret; 2119 2120 rb = qemu_ram_block_from_host(host_addr, true, &rb_offset); 2121 if (!rb) { 2122 /* 2123 * This should _never_ happen. However be nice for a migrating VM to 2124 * not crash/assert. Post an error (note: intended to not use *_once 2125 * because we do want to see all the illegal addresses; and this can 2126 * never be triggered by the guest so we're safe) and move on next. 2127 */ 2128 error_report("%s: illegal host addr %p", __func__, host_addr); 2129 /* Try the next entry */ 2130 return FALSE; 2131 } 2132 2133 ret = migrate_send_rp_message_req_pages(mis, rb, rb_offset); 2134 if (ret) { 2135 /* Please refer to above comment. */ 2136 error_report("%s: send rp message failed for addr %p", 2137 __func__, host_addr); 2138 return FALSE; 2139 } 2140 2141 trace_postcopy_page_req_sync(host_addr); 2142 2143 return FALSE; 2144 } 2145 2146 static void migrate_send_rp_req_pages_pending(MigrationIncomingState *mis) 2147 { 2148 WITH_QEMU_LOCK_GUARD(&mis->page_request_mutex) { 2149 g_tree_foreach(mis->page_requested, postcopy_sync_page_req, mis); 2150 } 2151 } 2152 2153 static int loadvm_postcopy_handle_resume(MigrationIncomingState *mis) 2154 { 2155 if (mis->state != MIGRATION_STATUS_POSTCOPY_RECOVER) { 2156 error_report("%s: illegal resume received", __func__); 2157 /* Don't fail the load, only for this. */ 2158 return 0; 2159 } 2160 2161 /* 2162 * Reset the last_rb before we resend any page req to source again, since 2163 * the source should have it reset already. 2164 */ 2165 mis->last_rb = NULL; 2166 2167 /* 2168 * This means source VM is ready to resume the postcopy migration. 2169 */ 2170 migrate_set_state(&mis->state, MIGRATION_STATUS_POSTCOPY_RECOVER, 2171 MIGRATION_STATUS_POSTCOPY_ACTIVE); 2172 2173 trace_loadvm_postcopy_handle_resume(); 2174 2175 /* Tell source that "we are ready" */ 2176 migrate_send_rp_resume_ack(mis, MIGRATION_RESUME_ACK_VALUE); 2177 2178 /* 2179 * After a postcopy recovery, the source should have lost the postcopy 2180 * queue, or potentially the requested pages could have been lost during 2181 * the network down phase. Let's re-sync with the source VM by re-sending 2182 * all the pending pages that we eagerly need, so these threads won't get 2183 * blocked too long due to the recovery. 2184 * 2185 * Without this procedure, the faulted destination VM threads (waiting for 2186 * page requests right before the postcopy is interrupted) can keep hanging 2187 * until the pages are sent by the source during the background copying of 2188 * pages, or another thread faulted on the same address accidentally. 2189 */ 2190 migrate_send_rp_req_pages_pending(mis); 2191 2192 /* 2193 * It's time to switch state and release the fault thread to continue 2194 * service page faults. Note that this should be explicitly after the 2195 * above call to migrate_send_rp_req_pages_pending(). In short: 2196 * migrate_send_rp_message_req_pages() is not thread safe, yet. 2197 */ 2198 qemu_sem_post(&mis->postcopy_pause_sem_fault); 2199 2200 if (migrate_postcopy_preempt()) { 2201 /* 2202 * The preempt channel will be created in async manner, now let's 2203 * wait for it and make sure it's created. 2204 */ 2205 qemu_sem_wait(&mis->postcopy_qemufile_dst_done); 2206 assert(mis->postcopy_qemufile_dst); 2207 /* Kick the fast ram load thread too */ 2208 qemu_sem_post(&mis->postcopy_pause_sem_fast_load); 2209 } 2210 2211 return 0; 2212 } 2213 2214 /** 2215 * Immediately following this command is a blob of data containing an embedded 2216 * chunk of migration stream; read it and load it. 2217 * 2218 * @mis: Incoming state 2219 * @length: Length of packaged data to read 2220 * 2221 * Returns: Negative values on error 2222 * 2223 */ 2224 static int loadvm_handle_cmd_packaged(MigrationIncomingState *mis) 2225 { 2226 int ret; 2227 size_t length; 2228 QIOChannelBuffer *bioc; 2229 2230 length = qemu_get_be32(mis->from_src_file); 2231 trace_loadvm_handle_cmd_packaged(length); 2232 2233 if (length > MAX_VM_CMD_PACKAGED_SIZE) { 2234 error_report("Unreasonably large packaged state: %zu", length); 2235 return -1; 2236 } 2237 2238 bioc = qio_channel_buffer_new(length); 2239 qio_channel_set_name(QIO_CHANNEL(bioc), "migration-loadvm-buffer"); 2240 ret = qemu_get_buffer(mis->from_src_file, 2241 bioc->data, 2242 length); 2243 if (ret != length) { 2244 object_unref(OBJECT(bioc)); 2245 error_report("CMD_PACKAGED: Buffer receive fail ret=%d length=%zu", 2246 ret, length); 2247 return (ret < 0) ? ret : -EAGAIN; 2248 } 2249 bioc->usage += length; 2250 trace_loadvm_handle_cmd_packaged_received(ret); 2251 2252 QEMUFile *packf = qemu_file_new_input(QIO_CHANNEL(bioc)); 2253 2254 ret = qemu_loadvm_state_main(packf, mis); 2255 trace_loadvm_handle_cmd_packaged_main(ret); 2256 qemu_fclose(packf); 2257 object_unref(OBJECT(bioc)); 2258 2259 return ret; 2260 } 2261 2262 /* 2263 * Handle request that source requests for recved_bitmap on 2264 * destination. Payload format: 2265 * 2266 * len (1 byte) + ramblock_name (<255 bytes) 2267 */ 2268 static int loadvm_handle_recv_bitmap(MigrationIncomingState *mis, 2269 uint16_t len) 2270 { 2271 QEMUFile *file = mis->from_src_file; 2272 RAMBlock *rb; 2273 char block_name[256]; 2274 size_t cnt; 2275 2276 cnt = qemu_get_counted_string(file, block_name); 2277 if (!cnt) { 2278 error_report("%s: failed to read block name", __func__); 2279 return -EINVAL; 2280 } 2281 2282 /* Validate before using the data */ 2283 if (qemu_file_get_error(file)) { 2284 return qemu_file_get_error(file); 2285 } 2286 2287 if (len != cnt + 1) { 2288 error_report("%s: invalid payload length (%d)", __func__, len); 2289 return -EINVAL; 2290 } 2291 2292 rb = qemu_ram_block_by_name(block_name); 2293 if (!rb) { 2294 error_report("%s: block '%s' not found", __func__, block_name); 2295 return -EINVAL; 2296 } 2297 2298 migrate_send_rp_recv_bitmap(mis, block_name); 2299 2300 trace_loadvm_handle_recv_bitmap(block_name); 2301 2302 return 0; 2303 } 2304 2305 static int loadvm_process_enable_colo(MigrationIncomingState *mis) 2306 { 2307 int ret = migration_incoming_enable_colo(); 2308 2309 if (!ret) { 2310 ret = colo_init_ram_cache(); 2311 if (ret) { 2312 migration_incoming_disable_colo(); 2313 } 2314 } 2315 return ret; 2316 } 2317 2318 /* 2319 * Process an incoming 'QEMU_VM_COMMAND' 2320 * 0 just a normal return 2321 * LOADVM_QUIT All good, but exit the loop 2322 * <0 Error 2323 */ 2324 static int loadvm_process_command(QEMUFile *f) 2325 { 2326 MigrationIncomingState *mis = migration_incoming_get_current(); 2327 uint16_t cmd; 2328 uint16_t len; 2329 uint32_t tmp32; 2330 2331 cmd = qemu_get_be16(f); 2332 len = qemu_get_be16(f); 2333 2334 /* Check validity before continue processing of cmds */ 2335 if (qemu_file_get_error(f)) { 2336 return qemu_file_get_error(f); 2337 } 2338 2339 if (cmd >= MIG_CMD_MAX || cmd == MIG_CMD_INVALID) { 2340 error_report("MIG_CMD 0x%x unknown (len 0x%x)", cmd, len); 2341 return -EINVAL; 2342 } 2343 2344 trace_loadvm_process_command(mig_cmd_args[cmd].name, len); 2345 2346 if (mig_cmd_args[cmd].len != -1 && mig_cmd_args[cmd].len != len) { 2347 error_report("%s received with bad length - expecting %zu, got %d", 2348 mig_cmd_args[cmd].name, 2349 (size_t)mig_cmd_args[cmd].len, len); 2350 return -ERANGE; 2351 } 2352 2353 switch (cmd) { 2354 case MIG_CMD_OPEN_RETURN_PATH: 2355 if (mis->to_src_file) { 2356 error_report("CMD_OPEN_RETURN_PATH called when RP already open"); 2357 /* Not really a problem, so don't give up */ 2358 return 0; 2359 } 2360 mis->to_src_file = qemu_file_get_return_path(f); 2361 if (!mis->to_src_file) { 2362 error_report("CMD_OPEN_RETURN_PATH failed"); 2363 return -1; 2364 } 2365 break; 2366 2367 case MIG_CMD_PING: 2368 tmp32 = qemu_get_be32(f); 2369 trace_loadvm_process_command_ping(tmp32); 2370 if (!mis->to_src_file) { 2371 error_report("CMD_PING (0x%x) received with no return path", 2372 tmp32); 2373 return -1; 2374 } 2375 migrate_send_rp_pong(mis, tmp32); 2376 break; 2377 2378 case MIG_CMD_PACKAGED: 2379 return loadvm_handle_cmd_packaged(mis); 2380 2381 case MIG_CMD_POSTCOPY_ADVISE: 2382 return loadvm_postcopy_handle_advise(mis, len); 2383 2384 case MIG_CMD_POSTCOPY_LISTEN: 2385 return loadvm_postcopy_handle_listen(mis); 2386 2387 case MIG_CMD_POSTCOPY_RUN: 2388 return loadvm_postcopy_handle_run(mis); 2389 2390 case MIG_CMD_POSTCOPY_RAM_DISCARD: 2391 return loadvm_postcopy_ram_handle_discard(mis, len); 2392 2393 case MIG_CMD_POSTCOPY_RESUME: 2394 return loadvm_postcopy_handle_resume(mis); 2395 2396 case MIG_CMD_RECV_BITMAP: 2397 return loadvm_handle_recv_bitmap(mis, len); 2398 2399 case MIG_CMD_ENABLE_COLO: 2400 return loadvm_process_enable_colo(mis); 2401 } 2402 2403 return 0; 2404 } 2405 2406 /* 2407 * Read a footer off the wire and check that it matches the expected section 2408 * 2409 * Returns: true if the footer was good 2410 * false if there is a problem (and calls error_report to say why) 2411 */ 2412 static bool check_section_footer(QEMUFile *f, SaveStateEntry *se) 2413 { 2414 int ret; 2415 uint8_t read_mark; 2416 uint32_t read_section_id; 2417 2418 if (!migrate_get_current()->send_section_footer) { 2419 /* No footer to check */ 2420 return true; 2421 } 2422 2423 read_mark = qemu_get_byte(f); 2424 2425 ret = qemu_file_get_error(f); 2426 if (ret) { 2427 error_report("%s: Read section footer failed: %d", 2428 __func__, ret); 2429 return false; 2430 } 2431 2432 if (read_mark != QEMU_VM_SECTION_FOOTER) { 2433 error_report("Missing section footer for %s", se->idstr); 2434 return false; 2435 } 2436 2437 read_section_id = qemu_get_be32(f); 2438 if (read_section_id != se->load_section_id) { 2439 error_report("Mismatched section id in footer for %s -" 2440 " read 0x%x expected 0x%x", 2441 se->idstr, read_section_id, se->load_section_id); 2442 return false; 2443 } 2444 2445 /* All good */ 2446 return true; 2447 } 2448 2449 static int 2450 qemu_loadvm_section_start_full(QEMUFile *f, MigrationIncomingState *mis) 2451 { 2452 uint32_t instance_id, version_id, section_id; 2453 SaveStateEntry *se; 2454 char idstr[256]; 2455 int ret; 2456 2457 /* Read section start */ 2458 section_id = qemu_get_be32(f); 2459 if (!qemu_get_counted_string(f, idstr)) { 2460 error_report("Unable to read ID string for section %u", 2461 section_id); 2462 return -EINVAL; 2463 } 2464 instance_id = qemu_get_be32(f); 2465 version_id = qemu_get_be32(f); 2466 2467 ret = qemu_file_get_error(f); 2468 if (ret) { 2469 error_report("%s: Failed to read instance/version ID: %d", 2470 __func__, ret); 2471 return ret; 2472 } 2473 2474 trace_qemu_loadvm_state_section_startfull(section_id, idstr, 2475 instance_id, version_id); 2476 /* Find savevm section */ 2477 se = find_se(idstr, instance_id); 2478 if (se == NULL) { 2479 error_report("Unknown savevm section or instance '%s' %"PRIu32". " 2480 "Make sure that your current VM setup matches your " 2481 "saved VM setup, including any hotplugged devices", 2482 idstr, instance_id); 2483 return -EINVAL; 2484 } 2485 2486 /* Validate version */ 2487 if (version_id > se->version_id) { 2488 error_report("savevm: unsupported version %d for '%s' v%d", 2489 version_id, idstr, se->version_id); 2490 return -EINVAL; 2491 } 2492 se->load_version_id = version_id; 2493 se->load_section_id = section_id; 2494 2495 /* Validate if it is a device's state */ 2496 if (xen_enabled() && se->is_ram) { 2497 error_report("loadvm: %s RAM loading not allowed on Xen", idstr); 2498 return -EINVAL; 2499 } 2500 2501 ret = vmstate_load(f, se); 2502 if (ret < 0) { 2503 error_report("error while loading state for instance 0x%"PRIx32" of" 2504 " device '%s'", instance_id, idstr); 2505 return ret; 2506 } 2507 if (!check_section_footer(f, se)) { 2508 return -EINVAL; 2509 } 2510 2511 return 0; 2512 } 2513 2514 static int 2515 qemu_loadvm_section_part_end(QEMUFile *f, MigrationIncomingState *mis) 2516 { 2517 uint32_t section_id; 2518 SaveStateEntry *se; 2519 int ret; 2520 2521 section_id = qemu_get_be32(f); 2522 2523 ret = qemu_file_get_error(f); 2524 if (ret) { 2525 error_report("%s: Failed to read section ID: %d", 2526 __func__, ret); 2527 return ret; 2528 } 2529 2530 trace_qemu_loadvm_state_section_partend(section_id); 2531 QTAILQ_FOREACH(se, &savevm_state.handlers, entry) { 2532 if (se->load_section_id == section_id) { 2533 break; 2534 } 2535 } 2536 if (se == NULL) { 2537 error_report("Unknown savevm section %d", section_id); 2538 return -EINVAL; 2539 } 2540 2541 ret = vmstate_load(f, se); 2542 if (ret < 0) { 2543 error_report("error while loading state section id %d(%s)", 2544 section_id, se->idstr); 2545 return ret; 2546 } 2547 if (!check_section_footer(f, se)) { 2548 return -EINVAL; 2549 } 2550 2551 return 0; 2552 } 2553 2554 static int qemu_loadvm_state_header(QEMUFile *f) 2555 { 2556 unsigned int v; 2557 int ret; 2558 2559 v = qemu_get_be32(f); 2560 if (v != QEMU_VM_FILE_MAGIC) { 2561 error_report("Not a migration stream"); 2562 return -EINVAL; 2563 } 2564 2565 v = qemu_get_be32(f); 2566 if (v == QEMU_VM_FILE_VERSION_COMPAT) { 2567 error_report("SaveVM v2 format is obsolete and don't work anymore"); 2568 return -ENOTSUP; 2569 } 2570 if (v != QEMU_VM_FILE_VERSION) { 2571 error_report("Unsupported migration stream version"); 2572 return -ENOTSUP; 2573 } 2574 2575 if (migrate_get_current()->send_configuration) { 2576 if (qemu_get_byte(f) != QEMU_VM_CONFIGURATION) { 2577 error_report("Configuration section missing"); 2578 qemu_loadvm_state_cleanup(); 2579 return -EINVAL; 2580 } 2581 ret = vmstate_load_state(f, &vmstate_configuration, &savevm_state, 0); 2582 2583 if (ret) { 2584 qemu_loadvm_state_cleanup(); 2585 return ret; 2586 } 2587 } 2588 return 0; 2589 } 2590 2591 static int qemu_loadvm_state_setup(QEMUFile *f) 2592 { 2593 SaveStateEntry *se; 2594 int ret; 2595 2596 trace_loadvm_state_setup(); 2597 QTAILQ_FOREACH(se, &savevm_state.handlers, entry) { 2598 if (!se->ops || !se->ops->load_setup) { 2599 continue; 2600 } 2601 if (se->ops->is_active) { 2602 if (!se->ops->is_active(se->opaque)) { 2603 continue; 2604 } 2605 } 2606 2607 ret = se->ops->load_setup(f, se->opaque); 2608 if (ret < 0) { 2609 qemu_file_set_error(f, ret); 2610 error_report("Load state of device %s failed", se->idstr); 2611 return ret; 2612 } 2613 } 2614 return 0; 2615 } 2616 2617 void qemu_loadvm_state_cleanup(void) 2618 { 2619 SaveStateEntry *se; 2620 2621 trace_loadvm_state_cleanup(); 2622 QTAILQ_FOREACH(se, &savevm_state.handlers, entry) { 2623 if (se->ops && se->ops->load_cleanup) { 2624 se->ops->load_cleanup(se->opaque); 2625 } 2626 } 2627 } 2628 2629 /* Return true if we should continue the migration, or false. */ 2630 static bool postcopy_pause_incoming(MigrationIncomingState *mis) 2631 { 2632 int i; 2633 2634 trace_postcopy_pause_incoming(); 2635 2636 assert(migrate_postcopy_ram()); 2637 2638 /* 2639 * Unregister yank with either from/to src would work, since ioc behind it 2640 * is the same 2641 */ 2642 migration_ioc_unregister_yank_from_file(mis->from_src_file); 2643 2644 assert(mis->from_src_file); 2645 qemu_file_shutdown(mis->from_src_file); 2646 qemu_fclose(mis->from_src_file); 2647 mis->from_src_file = NULL; 2648 2649 assert(mis->to_src_file); 2650 qemu_file_shutdown(mis->to_src_file); 2651 qemu_mutex_lock(&mis->rp_mutex); 2652 qemu_fclose(mis->to_src_file); 2653 mis->to_src_file = NULL; 2654 qemu_mutex_unlock(&mis->rp_mutex); 2655 2656 /* 2657 * NOTE: this must happen before reset the PostcopyTmpPages below, 2658 * otherwise it's racy to reset those fields when the fast load thread 2659 * can be accessing it in parallel. 2660 */ 2661 if (mis->postcopy_qemufile_dst) { 2662 qemu_file_shutdown(mis->postcopy_qemufile_dst); 2663 /* Take the mutex to make sure the fast ram load thread halted */ 2664 qemu_mutex_lock(&mis->postcopy_prio_thread_mutex); 2665 migration_ioc_unregister_yank_from_file(mis->postcopy_qemufile_dst); 2666 qemu_fclose(mis->postcopy_qemufile_dst); 2667 mis->postcopy_qemufile_dst = NULL; 2668 qemu_mutex_unlock(&mis->postcopy_prio_thread_mutex); 2669 } 2670 2671 migrate_set_state(&mis->state, MIGRATION_STATUS_POSTCOPY_ACTIVE, 2672 MIGRATION_STATUS_POSTCOPY_PAUSED); 2673 2674 /* Notify the fault thread for the invalidated file handle */ 2675 postcopy_fault_thread_notify(mis); 2676 2677 /* 2678 * If network is interrupted, any temp page we received will be useless 2679 * because we didn't mark them as "received" in receivedmap. After a 2680 * proper recovery later (which will sync src dirty bitmap with receivedmap 2681 * on dest) these cached small pages will be resent again. 2682 */ 2683 for (i = 0; i < mis->postcopy_channels; i++) { 2684 postcopy_temp_page_reset(&mis->postcopy_tmp_pages[i]); 2685 } 2686 2687 error_report("Detected IO failure for postcopy. " 2688 "Migration paused."); 2689 2690 while (mis->state == MIGRATION_STATUS_POSTCOPY_PAUSED) { 2691 qemu_sem_wait(&mis->postcopy_pause_sem_dst); 2692 } 2693 2694 trace_postcopy_pause_incoming_continued(); 2695 2696 return true; 2697 } 2698 2699 int qemu_loadvm_state_main(QEMUFile *f, MigrationIncomingState *mis) 2700 { 2701 uint8_t section_type; 2702 int ret = 0; 2703 2704 retry: 2705 while (true) { 2706 section_type = qemu_get_byte(f); 2707 2708 ret = qemu_file_get_error_obj_any(f, mis->postcopy_qemufile_dst, NULL); 2709 if (ret) { 2710 break; 2711 } 2712 2713 trace_qemu_loadvm_state_section(section_type); 2714 switch (section_type) { 2715 case QEMU_VM_SECTION_START: 2716 case QEMU_VM_SECTION_FULL: 2717 ret = qemu_loadvm_section_start_full(f, mis); 2718 if (ret < 0) { 2719 goto out; 2720 } 2721 break; 2722 case QEMU_VM_SECTION_PART: 2723 case QEMU_VM_SECTION_END: 2724 ret = qemu_loadvm_section_part_end(f, mis); 2725 if (ret < 0) { 2726 goto out; 2727 } 2728 break; 2729 case QEMU_VM_COMMAND: 2730 ret = loadvm_process_command(f); 2731 trace_qemu_loadvm_state_section_command(ret); 2732 if ((ret < 0) || (ret == LOADVM_QUIT)) { 2733 goto out; 2734 } 2735 break; 2736 case QEMU_VM_EOF: 2737 /* This is the end of migration */ 2738 goto out; 2739 default: 2740 error_report("Unknown savevm section type %d", section_type); 2741 ret = -EINVAL; 2742 goto out; 2743 } 2744 } 2745 2746 out: 2747 if (ret < 0) { 2748 qemu_file_set_error(f, ret); 2749 2750 /* Cancel bitmaps incoming regardless of recovery */ 2751 dirty_bitmap_mig_cancel_incoming(); 2752 2753 /* 2754 * If we are during an active postcopy, then we pause instead 2755 * of bail out to at least keep the VM's dirty data. Note 2756 * that POSTCOPY_INCOMING_LISTENING stage is still not enough, 2757 * during which we're still receiving device states and we 2758 * still haven't yet started the VM on destination. 2759 * 2760 * Only RAM postcopy supports recovery. Still, if RAM postcopy is 2761 * enabled, canceled bitmaps postcopy will not affect RAM postcopy 2762 * recovering. 2763 */ 2764 if (postcopy_state_get() == POSTCOPY_INCOMING_RUNNING && 2765 migrate_postcopy_ram() && postcopy_pause_incoming(mis)) { 2766 /* Reset f to point to the newly created channel */ 2767 f = mis->from_src_file; 2768 goto retry; 2769 } 2770 } 2771 return ret; 2772 } 2773 2774 int qemu_loadvm_state(QEMUFile *f) 2775 { 2776 MigrationIncomingState *mis = migration_incoming_get_current(); 2777 Error *local_err = NULL; 2778 int ret; 2779 2780 if (qemu_savevm_state_blocked(&local_err)) { 2781 error_report_err(local_err); 2782 return -EINVAL; 2783 } 2784 2785 ret = qemu_loadvm_state_header(f); 2786 if (ret) { 2787 return ret; 2788 } 2789 2790 if (qemu_loadvm_state_setup(f) != 0) { 2791 return -EINVAL; 2792 } 2793 2794 cpu_synchronize_all_pre_loadvm(); 2795 2796 ret = qemu_loadvm_state_main(f, mis); 2797 qemu_event_set(&mis->main_thread_load_event); 2798 2799 trace_qemu_loadvm_state_post_main(ret); 2800 2801 if (mis->have_listen_thread) { 2802 /* Listen thread still going, can't clean up yet */ 2803 return ret; 2804 } 2805 2806 if (ret == 0) { 2807 ret = qemu_file_get_error(f); 2808 } 2809 2810 /* 2811 * Try to read in the VMDESC section as well, so that dumping tools that 2812 * intercept our migration stream have the chance to see it. 2813 */ 2814 2815 /* We've got to be careful; if we don't read the data and just shut the fd 2816 * then the sender can error if we close while it's still sending. 2817 * We also mustn't read data that isn't there; some transports (RDMA) 2818 * will stall waiting for that data when the source has already closed. 2819 */ 2820 if (ret == 0 && should_send_vmdesc()) { 2821 uint8_t *buf; 2822 uint32_t size; 2823 uint8_t section_type = qemu_get_byte(f); 2824 2825 if (section_type != QEMU_VM_VMDESCRIPTION) { 2826 error_report("Expected vmdescription section, but got %d", 2827 section_type); 2828 /* 2829 * It doesn't seem worth failing at this point since 2830 * we apparently have an otherwise valid VM state 2831 */ 2832 } else { 2833 buf = g_malloc(0x1000); 2834 size = qemu_get_be32(f); 2835 2836 while (size > 0) { 2837 uint32_t read_chunk = MIN(size, 0x1000); 2838 qemu_get_buffer(f, buf, read_chunk); 2839 size -= read_chunk; 2840 } 2841 g_free(buf); 2842 } 2843 } 2844 2845 qemu_loadvm_state_cleanup(); 2846 cpu_synchronize_all_post_init(); 2847 2848 return ret; 2849 } 2850 2851 int qemu_load_device_state(QEMUFile *f) 2852 { 2853 MigrationIncomingState *mis = migration_incoming_get_current(); 2854 int ret; 2855 2856 /* Load QEMU_VM_SECTION_FULL section */ 2857 ret = qemu_loadvm_state_main(f, mis); 2858 if (ret < 0) { 2859 error_report("Failed to load device state: %d", ret); 2860 return ret; 2861 } 2862 2863 cpu_synchronize_all_post_init(); 2864 return 0; 2865 } 2866 2867 bool save_snapshot(const char *name, bool overwrite, const char *vmstate, 2868 bool has_devices, strList *devices, Error **errp) 2869 { 2870 BlockDriverState *bs; 2871 QEMUSnapshotInfo sn1, *sn = &sn1; 2872 int ret = -1, ret2; 2873 QEMUFile *f; 2874 int saved_vm_running; 2875 uint64_t vm_state_size; 2876 g_autoptr(GDateTime) now = g_date_time_new_now_local(); 2877 AioContext *aio_context; 2878 2879 GLOBAL_STATE_CODE(); 2880 2881 if (migration_is_blocked(errp)) { 2882 return false; 2883 } 2884 2885 if (!replay_can_snapshot()) { 2886 error_setg(errp, "Record/replay does not allow making snapshot " 2887 "right now. Try once more later."); 2888 return false; 2889 } 2890 2891 if (!bdrv_all_can_snapshot(has_devices, devices, errp)) { 2892 return false; 2893 } 2894 2895 /* Delete old snapshots of the same name */ 2896 if (name) { 2897 if (overwrite) { 2898 if (bdrv_all_delete_snapshot(name, has_devices, 2899 devices, errp) < 0) { 2900 return false; 2901 } 2902 } else { 2903 ret2 = bdrv_all_has_snapshot(name, has_devices, devices, errp); 2904 if (ret2 < 0) { 2905 return false; 2906 } 2907 if (ret2 == 1) { 2908 error_setg(errp, 2909 "Snapshot '%s' already exists in one or more devices", 2910 name); 2911 return false; 2912 } 2913 } 2914 } 2915 2916 bs = bdrv_all_find_vmstate_bs(vmstate, has_devices, devices, errp); 2917 if (bs == NULL) { 2918 return false; 2919 } 2920 aio_context = bdrv_get_aio_context(bs); 2921 2922 saved_vm_running = runstate_is_running(); 2923 2924 ret = global_state_store(); 2925 if (ret) { 2926 error_setg(errp, "Error saving global state"); 2927 return false; 2928 } 2929 vm_stop(RUN_STATE_SAVE_VM); 2930 2931 bdrv_drain_all_begin(); 2932 2933 aio_context_acquire(aio_context); 2934 2935 memset(sn, 0, sizeof(*sn)); 2936 2937 /* fill auxiliary fields */ 2938 sn->date_sec = g_date_time_to_unix(now); 2939 sn->date_nsec = g_date_time_get_microsecond(now) * 1000; 2940 sn->vm_clock_nsec = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); 2941 if (replay_mode != REPLAY_MODE_NONE) { 2942 sn->icount = replay_get_current_icount(); 2943 } else { 2944 sn->icount = -1ULL; 2945 } 2946 2947 if (name) { 2948 pstrcpy(sn->name, sizeof(sn->name), name); 2949 } else { 2950 g_autofree char *autoname = g_date_time_format(now, "vm-%Y%m%d%H%M%S"); 2951 pstrcpy(sn->name, sizeof(sn->name), autoname); 2952 } 2953 2954 /* save the VM state */ 2955 f = qemu_fopen_bdrv(bs, 1); 2956 if (!f) { 2957 error_setg(errp, "Could not open VM state file"); 2958 goto the_end; 2959 } 2960 ret = qemu_savevm_state(f, errp); 2961 vm_state_size = qemu_file_total_transferred(f); 2962 ret2 = qemu_fclose(f); 2963 if (ret < 0) { 2964 goto the_end; 2965 } 2966 if (ret2 < 0) { 2967 ret = ret2; 2968 goto the_end; 2969 } 2970 2971 /* The bdrv_all_create_snapshot() call that follows acquires the AioContext 2972 * for itself. BDRV_POLL_WHILE() does not support nested locking because 2973 * it only releases the lock once. Therefore synchronous I/O will deadlock 2974 * unless we release the AioContext before bdrv_all_create_snapshot(). 2975 */ 2976 aio_context_release(aio_context); 2977 aio_context = NULL; 2978 2979 ret = bdrv_all_create_snapshot(sn, bs, vm_state_size, 2980 has_devices, devices, errp); 2981 if (ret < 0) { 2982 bdrv_all_delete_snapshot(sn->name, has_devices, devices, NULL); 2983 goto the_end; 2984 } 2985 2986 ret = 0; 2987 2988 the_end: 2989 if (aio_context) { 2990 aio_context_release(aio_context); 2991 } 2992 2993 bdrv_drain_all_end(); 2994 2995 if (saved_vm_running) { 2996 vm_start(); 2997 } 2998 return ret == 0; 2999 } 3000 3001 void qmp_xen_save_devices_state(const char *filename, bool has_live, bool live, 3002 Error **errp) 3003 { 3004 QEMUFile *f; 3005 QIOChannelFile *ioc; 3006 int saved_vm_running; 3007 int ret; 3008 3009 if (!has_live) { 3010 /* live default to true so old version of Xen tool stack can have a 3011 * successful live migration */ 3012 live = true; 3013 } 3014 3015 saved_vm_running = runstate_is_running(); 3016 vm_stop(RUN_STATE_SAVE_VM); 3017 global_state_store_running(); 3018 3019 ioc = qio_channel_file_new_path(filename, O_WRONLY | O_CREAT | O_TRUNC, 3020 0660, errp); 3021 if (!ioc) { 3022 goto the_end; 3023 } 3024 qio_channel_set_name(QIO_CHANNEL(ioc), "migration-xen-save-state"); 3025 f = qemu_file_new_output(QIO_CHANNEL(ioc)); 3026 object_unref(OBJECT(ioc)); 3027 ret = qemu_save_device_state(f); 3028 if (ret < 0 || qemu_fclose(f) < 0) { 3029 error_setg(errp, QERR_IO_ERROR); 3030 } else { 3031 /* libxl calls the QMP command "stop" before calling 3032 * "xen-save-devices-state" and in case of migration failure, libxl 3033 * would call "cont". 3034 * So call bdrv_inactivate_all (release locks) here to let the other 3035 * side of the migration take control of the images. 3036 */ 3037 if (live && !saved_vm_running) { 3038 ret = bdrv_inactivate_all(); 3039 if (ret) { 3040 error_setg(errp, "%s: bdrv_inactivate_all() failed (%d)", 3041 __func__, ret); 3042 } 3043 } 3044 } 3045 3046 the_end: 3047 if (saved_vm_running) { 3048 vm_start(); 3049 } 3050 } 3051 3052 void qmp_xen_load_devices_state(const char *filename, Error **errp) 3053 { 3054 QEMUFile *f; 3055 QIOChannelFile *ioc; 3056 int ret; 3057 3058 /* Guest must be paused before loading the device state; the RAM state 3059 * will already have been loaded by xc 3060 */ 3061 if (runstate_is_running()) { 3062 error_setg(errp, "Cannot update device state while vm is running"); 3063 return; 3064 } 3065 vm_stop(RUN_STATE_RESTORE_VM); 3066 3067 ioc = qio_channel_file_new_path(filename, O_RDONLY | O_BINARY, 0, errp); 3068 if (!ioc) { 3069 return; 3070 } 3071 qio_channel_set_name(QIO_CHANNEL(ioc), "migration-xen-load-state"); 3072 f = qemu_file_new_input(QIO_CHANNEL(ioc)); 3073 object_unref(OBJECT(ioc)); 3074 3075 ret = qemu_loadvm_state(f); 3076 qemu_fclose(f); 3077 if (ret < 0) { 3078 error_setg(errp, QERR_IO_ERROR); 3079 } 3080 migration_incoming_state_destroy(); 3081 } 3082 3083 bool load_snapshot(const char *name, const char *vmstate, 3084 bool has_devices, strList *devices, Error **errp) 3085 { 3086 BlockDriverState *bs_vm_state; 3087 QEMUSnapshotInfo sn; 3088 QEMUFile *f; 3089 int ret; 3090 AioContext *aio_context; 3091 MigrationIncomingState *mis = migration_incoming_get_current(); 3092 3093 if (!bdrv_all_can_snapshot(has_devices, devices, errp)) { 3094 return false; 3095 } 3096 ret = bdrv_all_has_snapshot(name, has_devices, devices, errp); 3097 if (ret < 0) { 3098 return false; 3099 } 3100 if (ret == 0) { 3101 error_setg(errp, "Snapshot '%s' does not exist in one or more devices", 3102 name); 3103 return false; 3104 } 3105 3106 bs_vm_state = bdrv_all_find_vmstate_bs(vmstate, has_devices, devices, errp); 3107 if (!bs_vm_state) { 3108 return false; 3109 } 3110 aio_context = bdrv_get_aio_context(bs_vm_state); 3111 3112 /* Don't even try to load empty VM states */ 3113 aio_context_acquire(aio_context); 3114 ret = bdrv_snapshot_find(bs_vm_state, &sn, name); 3115 aio_context_release(aio_context); 3116 if (ret < 0) { 3117 return false; 3118 } else if (sn.vm_state_size == 0) { 3119 error_setg(errp, "This is a disk-only snapshot. Revert to it " 3120 " offline using qemu-img"); 3121 return false; 3122 } 3123 3124 /* 3125 * Flush the record/replay queue. Now the VM state is going 3126 * to change. Therefore we don't need to preserve its consistency 3127 */ 3128 replay_flush_events(); 3129 3130 /* Flush all IO requests so they don't interfere with the new state. */ 3131 bdrv_drain_all_begin(); 3132 3133 ret = bdrv_all_goto_snapshot(name, has_devices, devices, errp); 3134 if (ret < 0) { 3135 goto err_drain; 3136 } 3137 3138 /* restore the VM state */ 3139 f = qemu_fopen_bdrv(bs_vm_state, 0); 3140 if (!f) { 3141 error_setg(errp, "Could not open VM state file"); 3142 goto err_drain; 3143 } 3144 3145 qemu_system_reset(SHUTDOWN_CAUSE_SNAPSHOT_LOAD); 3146 mis->from_src_file = f; 3147 3148 if (!yank_register_instance(MIGRATION_YANK_INSTANCE, errp)) { 3149 ret = -EINVAL; 3150 goto err_drain; 3151 } 3152 aio_context_acquire(aio_context); 3153 ret = qemu_loadvm_state(f); 3154 migration_incoming_state_destroy(); 3155 aio_context_release(aio_context); 3156 3157 bdrv_drain_all_end(); 3158 3159 if (ret < 0) { 3160 error_setg(errp, "Error %d while loading VM state", ret); 3161 return false; 3162 } 3163 3164 return true; 3165 3166 err_drain: 3167 bdrv_drain_all_end(); 3168 return false; 3169 } 3170 3171 bool delete_snapshot(const char *name, bool has_devices, 3172 strList *devices, Error **errp) 3173 { 3174 if (!bdrv_all_can_snapshot(has_devices, devices, errp)) { 3175 return false; 3176 } 3177 3178 if (bdrv_all_delete_snapshot(name, has_devices, devices, errp) < 0) { 3179 return false; 3180 } 3181 3182 return true; 3183 } 3184 3185 void vmstate_register_ram(MemoryRegion *mr, DeviceState *dev) 3186 { 3187 qemu_ram_set_idstr(mr->ram_block, 3188 memory_region_name(mr), dev); 3189 qemu_ram_set_migratable(mr->ram_block); 3190 } 3191 3192 void vmstate_unregister_ram(MemoryRegion *mr, DeviceState *dev) 3193 { 3194 qemu_ram_unset_idstr(mr->ram_block); 3195 qemu_ram_unset_migratable(mr->ram_block); 3196 } 3197 3198 void vmstate_register_ram_global(MemoryRegion *mr) 3199 { 3200 vmstate_register_ram(mr, NULL); 3201 } 3202 3203 bool vmstate_check_only_migratable(const VMStateDescription *vmsd) 3204 { 3205 /* check needed if --only-migratable is specified */ 3206 if (!only_migratable) { 3207 return true; 3208 } 3209 3210 return !(vmsd && vmsd->unmigratable); 3211 } 3212 3213 typedef struct SnapshotJob { 3214 Job common; 3215 char *tag; 3216 char *vmstate; 3217 strList *devices; 3218 Coroutine *co; 3219 Error **errp; 3220 bool ret; 3221 } SnapshotJob; 3222 3223 static void qmp_snapshot_job_free(SnapshotJob *s) 3224 { 3225 g_free(s->tag); 3226 g_free(s->vmstate); 3227 qapi_free_strList(s->devices); 3228 } 3229 3230 3231 static void snapshot_load_job_bh(void *opaque) 3232 { 3233 Job *job = opaque; 3234 SnapshotJob *s = container_of(job, SnapshotJob, common); 3235 int orig_vm_running; 3236 3237 job_progress_set_remaining(&s->common, 1); 3238 3239 orig_vm_running = runstate_is_running(); 3240 vm_stop(RUN_STATE_RESTORE_VM); 3241 3242 s->ret = load_snapshot(s->tag, s->vmstate, true, s->devices, s->errp); 3243 if (s->ret && orig_vm_running) { 3244 vm_start(); 3245 } 3246 3247 job_progress_update(&s->common, 1); 3248 3249 qmp_snapshot_job_free(s); 3250 aio_co_wake(s->co); 3251 } 3252 3253 static void snapshot_save_job_bh(void *opaque) 3254 { 3255 Job *job = opaque; 3256 SnapshotJob *s = container_of(job, SnapshotJob, common); 3257 3258 job_progress_set_remaining(&s->common, 1); 3259 s->ret = save_snapshot(s->tag, false, s->vmstate, 3260 true, s->devices, s->errp); 3261 job_progress_update(&s->common, 1); 3262 3263 qmp_snapshot_job_free(s); 3264 aio_co_wake(s->co); 3265 } 3266 3267 static void snapshot_delete_job_bh(void *opaque) 3268 { 3269 Job *job = opaque; 3270 SnapshotJob *s = container_of(job, SnapshotJob, common); 3271 3272 job_progress_set_remaining(&s->common, 1); 3273 s->ret = delete_snapshot(s->tag, true, s->devices, s->errp); 3274 job_progress_update(&s->common, 1); 3275 3276 qmp_snapshot_job_free(s); 3277 aio_co_wake(s->co); 3278 } 3279 3280 static int coroutine_fn snapshot_save_job_run(Job *job, Error **errp) 3281 { 3282 SnapshotJob *s = container_of(job, SnapshotJob, common); 3283 s->errp = errp; 3284 s->co = qemu_coroutine_self(); 3285 aio_bh_schedule_oneshot(qemu_get_aio_context(), 3286 snapshot_save_job_bh, job); 3287 qemu_coroutine_yield(); 3288 return s->ret ? 0 : -1; 3289 } 3290 3291 static int coroutine_fn snapshot_load_job_run(Job *job, Error **errp) 3292 { 3293 SnapshotJob *s = container_of(job, SnapshotJob, common); 3294 s->errp = errp; 3295 s->co = qemu_coroutine_self(); 3296 aio_bh_schedule_oneshot(qemu_get_aio_context(), 3297 snapshot_load_job_bh, job); 3298 qemu_coroutine_yield(); 3299 return s->ret ? 0 : -1; 3300 } 3301 3302 static int coroutine_fn snapshot_delete_job_run(Job *job, Error **errp) 3303 { 3304 SnapshotJob *s = container_of(job, SnapshotJob, common); 3305 s->errp = errp; 3306 s->co = qemu_coroutine_self(); 3307 aio_bh_schedule_oneshot(qemu_get_aio_context(), 3308 snapshot_delete_job_bh, job); 3309 qemu_coroutine_yield(); 3310 return s->ret ? 0 : -1; 3311 } 3312 3313 3314 static const JobDriver snapshot_load_job_driver = { 3315 .instance_size = sizeof(SnapshotJob), 3316 .job_type = JOB_TYPE_SNAPSHOT_LOAD, 3317 .run = snapshot_load_job_run, 3318 }; 3319 3320 static const JobDriver snapshot_save_job_driver = { 3321 .instance_size = sizeof(SnapshotJob), 3322 .job_type = JOB_TYPE_SNAPSHOT_SAVE, 3323 .run = snapshot_save_job_run, 3324 }; 3325 3326 static const JobDriver snapshot_delete_job_driver = { 3327 .instance_size = sizeof(SnapshotJob), 3328 .job_type = JOB_TYPE_SNAPSHOT_DELETE, 3329 .run = snapshot_delete_job_run, 3330 }; 3331 3332 3333 void qmp_snapshot_save(const char *job_id, 3334 const char *tag, 3335 const char *vmstate, 3336 strList *devices, 3337 Error **errp) 3338 { 3339 SnapshotJob *s; 3340 3341 s = job_create(job_id, &snapshot_save_job_driver, NULL, 3342 qemu_get_aio_context(), JOB_MANUAL_DISMISS, 3343 NULL, NULL, errp); 3344 if (!s) { 3345 return; 3346 } 3347 3348 s->tag = g_strdup(tag); 3349 s->vmstate = g_strdup(vmstate); 3350 s->devices = QAPI_CLONE(strList, devices); 3351 3352 job_start(&s->common); 3353 } 3354 3355 void qmp_snapshot_load(const char *job_id, 3356 const char *tag, 3357 const char *vmstate, 3358 strList *devices, 3359 Error **errp) 3360 { 3361 SnapshotJob *s; 3362 3363 s = job_create(job_id, &snapshot_load_job_driver, NULL, 3364 qemu_get_aio_context(), JOB_MANUAL_DISMISS, 3365 NULL, NULL, errp); 3366 if (!s) { 3367 return; 3368 } 3369 3370 s->tag = g_strdup(tag); 3371 s->vmstate = g_strdup(vmstate); 3372 s->devices = QAPI_CLONE(strList, devices); 3373 3374 job_start(&s->common); 3375 } 3376 3377 void qmp_snapshot_delete(const char *job_id, 3378 const char *tag, 3379 strList *devices, 3380 Error **errp) 3381 { 3382 SnapshotJob *s; 3383 3384 s = job_create(job_id, &snapshot_delete_job_driver, NULL, 3385 qemu_get_aio_context(), JOB_MANUAL_DISMISS, 3386 NULL, NULL, errp); 3387 if (!s) { 3388 return; 3389 } 3390 3391 s->tag = g_strdup(tag); 3392 s->devices = QAPI_CLONE(strList, devices); 3393 3394 job_start(&s->common); 3395 } 3396