1 /* 2 * QEMU live migration 3 * 4 * Copyright IBM, Corp. 2008 5 * 6 * Authors: 7 * Anthony Liguori <aliguori@us.ibm.com> 8 * 9 * This work is licensed under the terms of the GNU GPL, version 2. See 10 * the COPYING file in the top-level directory. 11 * 12 * Contributions after 2012-01-13 are licensed under the terms of the 13 * GNU GPL, version 2 or (at your option) any later version. 14 */ 15 16 #include "qemu/osdep.h" 17 #include "qemu/cutils.h" 18 #include "qemu/error-report.h" 19 #include "qemu/main-loop.h" 20 #include "migration/blocker.h" 21 #include "exec.h" 22 #include "fd.h" 23 #include "file.h" 24 #include "socket.h" 25 #include "sysemu/runstate.h" 26 #include "sysemu/sysemu.h" 27 #include "sysemu/cpu-throttle.h" 28 #include "rdma.h" 29 #include "ram.h" 30 #include "migration/global_state.h" 31 #include "migration/misc.h" 32 #include "migration.h" 33 #include "migration-stats.h" 34 #include "savevm.h" 35 #include "qemu-file.h" 36 #include "channel.h" 37 #include "migration/vmstate.h" 38 #include "block/block.h" 39 #include "qapi/error.h" 40 #include "qapi/clone-visitor.h" 41 #include "qapi/qapi-visit-migration.h" 42 #include "qapi/qapi-visit-sockets.h" 43 #include "qapi/qapi-commands-migration.h" 44 #include "qapi/qapi-events-migration.h" 45 #include "qapi/qmp/qerror.h" 46 #include "qapi/qmp/qnull.h" 47 #include "qemu/rcu.h" 48 #include "postcopy-ram.h" 49 #include "qemu/thread.h" 50 #include "trace.h" 51 #include "exec/target_page.h" 52 #include "io/channel-buffer.h" 53 #include "io/channel-tls.h" 54 #include "migration/colo.h" 55 #include "hw/boards.h" 56 #include "monitor/monitor.h" 57 #include "net/announce.h" 58 #include "qemu/queue.h" 59 #include "multifd.h" 60 #include "threadinfo.h" 61 #include "qemu/yank.h" 62 #include "sysemu/cpus.h" 63 #include "yank_functions.h" 64 #include "sysemu/qtest.h" 65 #include "options.h" 66 #include "sysemu/dirtylimit.h" 67 #include "qemu/sockets.h" 68 #include "sysemu/kvm.h" 69 70 #define NOTIFIER_ELEM_INIT(array, elem) \ 71 [elem] = NOTIFIER_WITH_RETURN_LIST_INITIALIZER((array)[elem]) 72 73 #define INMIGRATE_DEFAULT_EXIT_ON_ERROR true 74 75 static NotifierWithReturnList migration_state_notifiers[] = { 76 NOTIFIER_ELEM_INIT(migration_state_notifiers, MIG_MODE_NORMAL), 77 NOTIFIER_ELEM_INIT(migration_state_notifiers, MIG_MODE_CPR_REBOOT), 78 }; 79 80 /* Messages sent on the return path from destination to source */ 81 enum mig_rp_message_type { 82 MIG_RP_MSG_INVALID = 0, /* Must be 0 */ 83 MIG_RP_MSG_SHUT, /* sibling will not send any more RP messages */ 84 MIG_RP_MSG_PONG, /* Response to a PING; data (seq: be32 ) */ 85 86 MIG_RP_MSG_REQ_PAGES_ID, /* data (start: be64, len: be32, id: string) */ 87 MIG_RP_MSG_REQ_PAGES, /* data (start: be64, len: be32) */ 88 MIG_RP_MSG_RECV_BITMAP, /* send recved_bitmap back to source */ 89 MIG_RP_MSG_RESUME_ACK, /* tell source that we are ready to resume */ 90 MIG_RP_MSG_SWITCHOVER_ACK, /* Tell source it's OK to do switchover */ 91 92 MIG_RP_MSG_MAX 93 }; 94 95 /* When we add fault tolerance, we could have several 96 migrations at once. For now we don't need to add 97 dynamic creation of migration */ 98 99 static MigrationState *current_migration; 100 static MigrationIncomingState *current_incoming; 101 102 static GSList *migration_blockers[MIG_MODE__MAX]; 103 104 static bool migration_object_check(MigrationState *ms, Error **errp); 105 static int migration_maybe_pause(MigrationState *s, 106 int *current_active_state, 107 int new_state); 108 static void migrate_fd_cancel(MigrationState *s); 109 static bool close_return_path_on_source(MigrationState *s); 110 static void migration_completion_end(MigrationState *s); 111 112 static void migration_downtime_start(MigrationState *s) 113 { 114 trace_vmstate_downtime_checkpoint("src-downtime-start"); 115 s->downtime_start = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); 116 } 117 118 static void migration_downtime_end(MigrationState *s) 119 { 120 int64_t now = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); 121 122 /* 123 * If downtime already set, should mean that postcopy already set it, 124 * then that should be the real downtime already. 125 */ 126 if (!s->downtime) { 127 s->downtime = now - s->downtime_start; 128 } 129 130 trace_vmstate_downtime_checkpoint("src-downtime-end"); 131 } 132 133 static bool migration_needs_multiple_sockets(void) 134 { 135 return migrate_multifd() || migrate_postcopy_preempt(); 136 } 137 138 static bool transport_supports_multi_channels(MigrationAddress *addr) 139 { 140 if (addr->transport == MIGRATION_ADDRESS_TYPE_SOCKET) { 141 SocketAddress *saddr = &addr->u.socket; 142 143 return (saddr->type == SOCKET_ADDRESS_TYPE_INET || 144 saddr->type == SOCKET_ADDRESS_TYPE_UNIX || 145 saddr->type == SOCKET_ADDRESS_TYPE_VSOCK); 146 } else if (addr->transport == MIGRATION_ADDRESS_TYPE_FILE) { 147 return migrate_mapped_ram(); 148 } else { 149 return false; 150 } 151 } 152 153 static bool migration_needs_seekable_channel(void) 154 { 155 return migrate_mapped_ram(); 156 } 157 158 static bool migration_needs_extra_fds(void) 159 { 160 /* 161 * When doing direct-io, multifd requires two different, 162 * non-duplicated file descriptors so we can use one of them for 163 * unaligned IO. 164 */ 165 return migrate_multifd() && migrate_direct_io(); 166 } 167 168 static bool transport_supports_seeking(MigrationAddress *addr) 169 { 170 if (addr->transport == MIGRATION_ADDRESS_TYPE_FILE) { 171 return true; 172 } 173 174 return false; 175 } 176 177 static bool transport_supports_extra_fds(MigrationAddress *addr) 178 { 179 /* file: works because QEMU can open it multiple times */ 180 return addr->transport == MIGRATION_ADDRESS_TYPE_FILE; 181 } 182 183 static bool 184 migration_channels_and_transport_compatible(MigrationAddress *addr, 185 Error **errp) 186 { 187 if (migration_needs_seekable_channel() && 188 !transport_supports_seeking(addr)) { 189 error_setg(errp, "Migration requires seekable transport (e.g. file)"); 190 return false; 191 } 192 193 if (migration_needs_multiple_sockets() && 194 !transport_supports_multi_channels(addr)) { 195 error_setg(errp, "Migration requires multi-channel URIs (e.g. tcp)"); 196 return false; 197 } 198 199 if (migration_needs_extra_fds() && 200 !transport_supports_extra_fds(addr)) { 201 error_setg(errp, 202 "Migration requires a transport that allows for extra fds (e.g. file)"); 203 return false; 204 } 205 206 return true; 207 } 208 209 static gint page_request_addr_cmp(gconstpointer ap, gconstpointer bp) 210 { 211 uintptr_t a = (uintptr_t) ap, b = (uintptr_t) bp; 212 213 return (a > b) - (a < b); 214 } 215 216 static int migration_stop_vm(MigrationState *s, RunState state) 217 { 218 int ret; 219 220 migration_downtime_start(s); 221 222 s->vm_old_state = runstate_get(); 223 global_state_store(); 224 225 ret = vm_stop_force_state(state); 226 227 trace_vmstate_downtime_checkpoint("src-vm-stopped"); 228 trace_migration_completion_vm_stop(ret); 229 230 return ret; 231 } 232 233 void migration_object_init(void) 234 { 235 /* This can only be called once. */ 236 assert(!current_migration); 237 current_migration = MIGRATION_OBJ(object_new(TYPE_MIGRATION)); 238 239 /* 240 * Init the migrate incoming object as well no matter whether 241 * we'll use it or not. 242 */ 243 assert(!current_incoming); 244 current_incoming = g_new0(MigrationIncomingState, 1); 245 current_incoming->state = MIGRATION_STATUS_NONE; 246 current_incoming->postcopy_remote_fds = 247 g_array_new(FALSE, TRUE, sizeof(struct PostCopyFD)); 248 qemu_mutex_init(¤t_incoming->rp_mutex); 249 qemu_mutex_init(¤t_incoming->postcopy_prio_thread_mutex); 250 qemu_event_init(¤t_incoming->main_thread_load_event, false); 251 qemu_sem_init(¤t_incoming->postcopy_pause_sem_dst, 0); 252 qemu_sem_init(¤t_incoming->postcopy_pause_sem_fault, 0); 253 qemu_sem_init(¤t_incoming->postcopy_pause_sem_fast_load, 0); 254 qemu_sem_init(¤t_incoming->postcopy_qemufile_dst_done, 0); 255 256 qemu_mutex_init(¤t_incoming->page_request_mutex); 257 qemu_cond_init(¤t_incoming->page_request_cond); 258 current_incoming->page_requested = g_tree_new(page_request_addr_cmp); 259 260 current_incoming->exit_on_error = INMIGRATE_DEFAULT_EXIT_ON_ERROR; 261 262 migration_object_check(current_migration, &error_fatal); 263 264 ram_mig_init(); 265 dirty_bitmap_mig_init(); 266 } 267 268 typedef struct { 269 QEMUBH *bh; 270 QEMUBHFunc *cb; 271 void *opaque; 272 } MigrationBH; 273 274 static void migration_bh_dispatch_bh(void *opaque) 275 { 276 MigrationState *s = migrate_get_current(); 277 MigrationBH *migbh = opaque; 278 279 /* cleanup this BH */ 280 qemu_bh_delete(migbh->bh); 281 migbh->bh = NULL; 282 283 /* dispatch the other one */ 284 migbh->cb(migbh->opaque); 285 object_unref(OBJECT(s)); 286 287 g_free(migbh); 288 } 289 290 void migration_bh_schedule(QEMUBHFunc *cb, void *opaque) 291 { 292 MigrationState *s = migrate_get_current(); 293 MigrationBH *migbh = g_new0(MigrationBH, 1); 294 QEMUBH *bh = qemu_bh_new(migration_bh_dispatch_bh, migbh); 295 296 /* Store these to dispatch when the BH runs */ 297 migbh->bh = bh; 298 migbh->cb = cb; 299 migbh->opaque = opaque; 300 301 /* 302 * Ref the state for bh, because it may be called when 303 * there're already no other refs 304 */ 305 object_ref(OBJECT(s)); 306 qemu_bh_schedule(bh); 307 } 308 309 void migration_cancel(const Error *error) 310 { 311 if (error) { 312 migrate_set_error(current_migration, error); 313 } 314 if (migrate_dirty_limit()) { 315 qmp_cancel_vcpu_dirty_limit(false, -1, NULL); 316 } 317 migrate_fd_cancel(current_migration); 318 } 319 320 void migration_shutdown(void) 321 { 322 /* 323 * When the QEMU main thread exit, the COLO thread 324 * may wait a semaphore. So, we should wakeup the 325 * COLO thread before migration shutdown. 326 */ 327 colo_shutdown(); 328 /* 329 * Cancel the current migration - that will (eventually) 330 * stop the migration using this structure 331 */ 332 migration_cancel(NULL); 333 object_unref(OBJECT(current_migration)); 334 335 /* 336 * Cancel outgoing migration of dirty bitmaps. It should 337 * at least unref used block nodes. 338 */ 339 dirty_bitmap_mig_cancel_outgoing(); 340 341 /* 342 * Cancel incoming migration of dirty bitmaps. Dirty bitmaps 343 * are non-critical data, and their loss never considered as 344 * something serious. 345 */ 346 dirty_bitmap_mig_cancel_incoming(); 347 } 348 349 /* For outgoing */ 350 MigrationState *migrate_get_current(void) 351 { 352 /* This can only be called after the object created. */ 353 assert(current_migration); 354 return current_migration; 355 } 356 357 MigrationIncomingState *migration_incoming_get_current(void) 358 { 359 assert(current_incoming); 360 return current_incoming; 361 } 362 363 void migration_incoming_transport_cleanup(MigrationIncomingState *mis) 364 { 365 if (mis->socket_address_list) { 366 qapi_free_SocketAddressList(mis->socket_address_list); 367 mis->socket_address_list = NULL; 368 } 369 370 if (mis->transport_cleanup) { 371 mis->transport_cleanup(mis->transport_data); 372 mis->transport_data = mis->transport_cleanup = NULL; 373 } 374 } 375 376 void migration_incoming_state_destroy(void) 377 { 378 struct MigrationIncomingState *mis = migration_incoming_get_current(); 379 380 multifd_recv_cleanup(); 381 382 if (mis->to_src_file) { 383 /* Tell source that we are done */ 384 migrate_send_rp_shut(mis, qemu_file_get_error(mis->from_src_file) != 0); 385 qemu_fclose(mis->to_src_file); 386 mis->to_src_file = NULL; 387 } 388 389 if (mis->from_src_file) { 390 migration_ioc_unregister_yank_from_file(mis->from_src_file); 391 qemu_fclose(mis->from_src_file); 392 mis->from_src_file = NULL; 393 } 394 if (mis->postcopy_remote_fds) { 395 g_array_free(mis->postcopy_remote_fds, TRUE); 396 mis->postcopy_remote_fds = NULL; 397 } 398 399 migration_incoming_transport_cleanup(mis); 400 qemu_event_reset(&mis->main_thread_load_event); 401 402 if (mis->page_requested) { 403 g_tree_destroy(mis->page_requested); 404 mis->page_requested = NULL; 405 } 406 407 if (mis->postcopy_qemufile_dst) { 408 migration_ioc_unregister_yank_from_file(mis->postcopy_qemufile_dst); 409 qemu_fclose(mis->postcopy_qemufile_dst); 410 mis->postcopy_qemufile_dst = NULL; 411 } 412 413 yank_unregister_instance(MIGRATION_YANK_INSTANCE); 414 } 415 416 static void migrate_generate_event(MigrationStatus new_state) 417 { 418 if (migrate_events()) { 419 qapi_event_send_migration(new_state); 420 } 421 } 422 423 /* 424 * Send a message on the return channel back to the source 425 * of the migration. 426 */ 427 static int migrate_send_rp_message(MigrationIncomingState *mis, 428 enum mig_rp_message_type message_type, 429 uint16_t len, void *data) 430 { 431 int ret = 0; 432 433 trace_migrate_send_rp_message((int)message_type, len); 434 QEMU_LOCK_GUARD(&mis->rp_mutex); 435 436 /* 437 * It's possible that the file handle got lost due to network 438 * failures. 439 */ 440 if (!mis->to_src_file) { 441 ret = -EIO; 442 return ret; 443 } 444 445 qemu_put_be16(mis->to_src_file, (unsigned int)message_type); 446 qemu_put_be16(mis->to_src_file, len); 447 qemu_put_buffer(mis->to_src_file, data, len); 448 return qemu_fflush(mis->to_src_file); 449 } 450 451 /* Request one page from the source VM at the given start address. 452 * rb: the RAMBlock to request the page in 453 * Start: Address offset within the RB 454 * Len: Length in bytes required - must be a multiple of pagesize 455 */ 456 int migrate_send_rp_message_req_pages(MigrationIncomingState *mis, 457 RAMBlock *rb, ram_addr_t start) 458 { 459 uint8_t bufc[12 + 1 + 255]; /* start (8), len (4), rbname up to 256 */ 460 size_t msglen = 12; /* start + len */ 461 size_t len = qemu_ram_pagesize(rb); 462 enum mig_rp_message_type msg_type; 463 const char *rbname; 464 int rbname_len; 465 466 *(uint64_t *)bufc = cpu_to_be64((uint64_t)start); 467 *(uint32_t *)(bufc + 8) = cpu_to_be32((uint32_t)len); 468 469 /* 470 * We maintain the last ramblock that we requested for page. Note that we 471 * don't need locking because this function will only be called within the 472 * postcopy ram fault thread. 473 */ 474 if (rb != mis->last_rb) { 475 mis->last_rb = rb; 476 477 rbname = qemu_ram_get_idstr(rb); 478 rbname_len = strlen(rbname); 479 480 assert(rbname_len < 256); 481 482 bufc[msglen++] = rbname_len; 483 memcpy(bufc + msglen, rbname, rbname_len); 484 msglen += rbname_len; 485 msg_type = MIG_RP_MSG_REQ_PAGES_ID; 486 } else { 487 msg_type = MIG_RP_MSG_REQ_PAGES; 488 } 489 490 return migrate_send_rp_message(mis, msg_type, msglen, bufc); 491 } 492 493 int migrate_send_rp_req_pages(MigrationIncomingState *mis, 494 RAMBlock *rb, ram_addr_t start, uint64_t haddr) 495 { 496 void *aligned = (void *)(uintptr_t)ROUND_DOWN(haddr, qemu_ram_pagesize(rb)); 497 bool received = false; 498 499 WITH_QEMU_LOCK_GUARD(&mis->page_request_mutex) { 500 received = ramblock_recv_bitmap_test_byte_offset(rb, start); 501 if (!received && !g_tree_lookup(mis->page_requested, aligned)) { 502 /* 503 * The page has not been received, and it's not yet in the page 504 * request list. Queue it. Set the value of element to 1, so that 505 * things like g_tree_lookup() will return TRUE (1) when found. 506 */ 507 g_tree_insert(mis->page_requested, aligned, (gpointer)1); 508 qatomic_inc(&mis->page_requested_count); 509 trace_postcopy_page_req_add(aligned, mis->page_requested_count); 510 } 511 } 512 513 /* 514 * If the page is there, skip sending the message. We don't even need the 515 * lock because as long as the page arrived, it'll be there forever. 516 */ 517 if (received) { 518 return 0; 519 } 520 521 return migrate_send_rp_message_req_pages(mis, rb, start); 522 } 523 524 static bool migration_colo_enabled; 525 bool migration_incoming_colo_enabled(void) 526 { 527 return migration_colo_enabled; 528 } 529 530 void migration_incoming_disable_colo(void) 531 { 532 ram_block_discard_disable(false); 533 migration_colo_enabled = false; 534 } 535 536 int migration_incoming_enable_colo(void) 537 { 538 #ifndef CONFIG_REPLICATION 539 error_report("ENABLE_COLO command come in migration stream, but the " 540 "replication module is not built in"); 541 return -ENOTSUP; 542 #endif 543 544 if (!migrate_colo()) { 545 error_report("ENABLE_COLO command come in migration stream, but x-colo " 546 "capability is not set"); 547 return -EINVAL; 548 } 549 550 if (ram_block_discard_disable(true)) { 551 error_report("COLO: cannot disable RAM discard"); 552 return -EBUSY; 553 } 554 migration_colo_enabled = true; 555 return 0; 556 } 557 558 void migrate_add_address(SocketAddress *address) 559 { 560 MigrationIncomingState *mis = migration_incoming_get_current(); 561 562 QAPI_LIST_PREPEND(mis->socket_address_list, 563 QAPI_CLONE(SocketAddress, address)); 564 } 565 566 bool migrate_uri_parse(const char *uri, MigrationChannel **channel, 567 Error **errp) 568 { 569 g_autoptr(MigrationChannel) val = g_new0(MigrationChannel, 1); 570 g_autoptr(MigrationAddress) addr = g_new0(MigrationAddress, 1); 571 InetSocketAddress *isock = &addr->u.rdma; 572 strList **tail = &addr->u.exec.args; 573 574 if (strstart(uri, "exec:", NULL)) { 575 addr->transport = MIGRATION_ADDRESS_TYPE_EXEC; 576 #ifdef WIN32 577 QAPI_LIST_APPEND(tail, g_strdup(exec_get_cmd_path())); 578 QAPI_LIST_APPEND(tail, g_strdup("/c")); 579 #else 580 QAPI_LIST_APPEND(tail, g_strdup("/bin/sh")); 581 QAPI_LIST_APPEND(tail, g_strdup("-c")); 582 #endif 583 QAPI_LIST_APPEND(tail, g_strdup(uri + strlen("exec:"))); 584 } else if (strstart(uri, "rdma:", NULL)) { 585 if (inet_parse(isock, uri + strlen("rdma:"), errp)) { 586 qapi_free_InetSocketAddress(isock); 587 return false; 588 } 589 addr->transport = MIGRATION_ADDRESS_TYPE_RDMA; 590 } else if (strstart(uri, "tcp:", NULL) || 591 strstart(uri, "unix:", NULL) || 592 strstart(uri, "vsock:", NULL) || 593 strstart(uri, "fd:", NULL)) { 594 addr->transport = MIGRATION_ADDRESS_TYPE_SOCKET; 595 SocketAddress *saddr = socket_parse(uri, errp); 596 if (!saddr) { 597 return false; 598 } 599 addr->u.socket.type = saddr->type; 600 addr->u.socket.u = saddr->u; 601 /* Don't free the objects inside; their ownership moved to "addr" */ 602 g_free(saddr); 603 } else if (strstart(uri, "file:", NULL)) { 604 addr->transport = MIGRATION_ADDRESS_TYPE_FILE; 605 addr->u.file.filename = g_strdup(uri + strlen("file:")); 606 if (file_parse_offset(addr->u.file.filename, &addr->u.file.offset, 607 errp)) { 608 return false; 609 } 610 } else { 611 error_setg(errp, "unknown migration protocol: %s", uri); 612 return false; 613 } 614 615 val->channel_type = MIGRATION_CHANNEL_TYPE_MAIN; 616 val->addr = g_steal_pointer(&addr); 617 *channel = g_steal_pointer(&val); 618 return true; 619 } 620 621 static void qemu_start_incoming_migration(const char *uri, bool has_channels, 622 MigrationChannelList *channels, 623 Error **errp) 624 { 625 g_autoptr(MigrationChannel) channel = NULL; 626 MigrationAddress *addr = NULL; 627 MigrationIncomingState *mis = migration_incoming_get_current(); 628 629 /* 630 * Having preliminary checks for uri and channel 631 */ 632 if (!uri == !channels) { 633 error_setg(errp, "need either 'uri' or 'channels' argument"); 634 return; 635 } 636 637 if (channels) { 638 /* To verify that Migrate channel list has only item */ 639 if (channels->next) { 640 error_setg(errp, "Channel list has more than one entries"); 641 return; 642 } 643 addr = channels->value->addr; 644 } 645 646 if (uri) { 647 /* caller uses the old URI syntax */ 648 if (!migrate_uri_parse(uri, &channel, errp)) { 649 return; 650 } 651 addr = channel->addr; 652 } 653 654 /* transport mechanism not suitable for migration? */ 655 if (!migration_channels_and_transport_compatible(addr, errp)) { 656 return; 657 } 658 659 migrate_set_state(&mis->state, MIGRATION_STATUS_NONE, 660 MIGRATION_STATUS_SETUP); 661 662 if (addr->transport == MIGRATION_ADDRESS_TYPE_SOCKET) { 663 SocketAddress *saddr = &addr->u.socket; 664 if (saddr->type == SOCKET_ADDRESS_TYPE_INET || 665 saddr->type == SOCKET_ADDRESS_TYPE_UNIX || 666 saddr->type == SOCKET_ADDRESS_TYPE_VSOCK) { 667 socket_start_incoming_migration(saddr, errp); 668 } else if (saddr->type == SOCKET_ADDRESS_TYPE_FD) { 669 fd_start_incoming_migration(saddr->u.fd.str, errp); 670 } 671 #ifdef CONFIG_RDMA 672 } else if (addr->transport == MIGRATION_ADDRESS_TYPE_RDMA) { 673 if (migrate_xbzrle()) { 674 error_setg(errp, "RDMA and XBZRLE can't be used together"); 675 return; 676 } 677 if (migrate_multifd()) { 678 error_setg(errp, "RDMA and multifd can't be used together"); 679 return; 680 } 681 rdma_start_incoming_migration(&addr->u.rdma, errp); 682 #endif 683 } else if (addr->transport == MIGRATION_ADDRESS_TYPE_EXEC) { 684 exec_start_incoming_migration(addr->u.exec.args, errp); 685 } else if (addr->transport == MIGRATION_ADDRESS_TYPE_FILE) { 686 file_start_incoming_migration(&addr->u.file, errp); 687 } else { 688 error_setg(errp, "unknown migration protocol: %s", uri); 689 } 690 } 691 692 static void process_incoming_migration_bh(void *opaque) 693 { 694 Error *local_err = NULL; 695 MigrationIncomingState *mis = opaque; 696 697 trace_vmstate_downtime_checkpoint("dst-precopy-bh-enter"); 698 699 /* If capability late_block_activate is set: 700 * Only fire up the block code now if we're going to restart the 701 * VM, else 'cont' will do it. 702 * This causes file locking to happen; so we don't want it to happen 703 * unless we really are starting the VM. 704 */ 705 if (!migrate_late_block_activate() || 706 (autostart && (!global_state_received() || 707 runstate_is_live(global_state_get_runstate())))) { 708 /* Make sure all file formats throw away their mutable metadata. 709 * If we get an error here, just don't restart the VM yet. */ 710 bdrv_activate_all(&local_err); 711 if (local_err) { 712 error_report_err(local_err); 713 local_err = NULL; 714 autostart = false; 715 } 716 } 717 718 /* 719 * This must happen after all error conditions are dealt with and 720 * we're sure the VM is going to be running on this host. 721 */ 722 qemu_announce_self(&mis->announce_timer, migrate_announce_params()); 723 724 trace_vmstate_downtime_checkpoint("dst-precopy-bh-announced"); 725 726 multifd_recv_shutdown(); 727 728 dirty_bitmap_mig_before_vm_start(); 729 730 if (!global_state_received() || 731 runstate_is_live(global_state_get_runstate())) { 732 if (autostart) { 733 vm_start(); 734 } else { 735 runstate_set(RUN_STATE_PAUSED); 736 } 737 } else if (migration_incoming_colo_enabled()) { 738 migration_incoming_disable_colo(); 739 vm_start(); 740 } else { 741 runstate_set(global_state_get_runstate()); 742 } 743 trace_vmstate_downtime_checkpoint("dst-precopy-bh-vm-started"); 744 /* 745 * This must happen after any state changes since as soon as an external 746 * observer sees this event they might start to prod at the VM assuming 747 * it's ready to use. 748 */ 749 migrate_set_state(&mis->state, MIGRATION_STATUS_ACTIVE, 750 MIGRATION_STATUS_COMPLETED); 751 migration_incoming_state_destroy(); 752 } 753 754 static void coroutine_fn 755 process_incoming_migration_co(void *opaque) 756 { 757 MigrationState *s = migrate_get_current(); 758 MigrationIncomingState *mis = migration_incoming_get_current(); 759 PostcopyState ps; 760 int ret; 761 Error *local_err = NULL; 762 763 assert(mis->from_src_file); 764 765 mis->largest_page_size = qemu_ram_pagesize_largest(); 766 postcopy_state_set(POSTCOPY_INCOMING_NONE); 767 migrate_set_state(&mis->state, MIGRATION_STATUS_SETUP, 768 MIGRATION_STATUS_ACTIVE); 769 770 mis->loadvm_co = qemu_coroutine_self(); 771 ret = qemu_loadvm_state(mis->from_src_file); 772 mis->loadvm_co = NULL; 773 774 trace_vmstate_downtime_checkpoint("dst-precopy-loadvm-completed"); 775 776 ps = postcopy_state_get(); 777 trace_process_incoming_migration_co_end(ret, ps); 778 if (ps != POSTCOPY_INCOMING_NONE) { 779 if (ps == POSTCOPY_INCOMING_ADVISE) { 780 /* 781 * Where a migration had postcopy enabled (and thus went to advise) 782 * but managed to complete within the precopy period, we can use 783 * the normal exit. 784 */ 785 postcopy_ram_incoming_cleanup(mis); 786 } else if (ret >= 0) { 787 /* 788 * Postcopy was started, cleanup should happen at the end of the 789 * postcopy thread. 790 */ 791 trace_process_incoming_migration_co_postcopy_end_main(); 792 return; 793 } 794 /* Else if something went wrong then just fall out of the normal exit */ 795 } 796 797 if (ret < 0) { 798 error_setg(&local_err, "load of migration failed: %s", strerror(-ret)); 799 goto fail; 800 } 801 802 if (migration_incoming_colo_enabled()) { 803 /* yield until COLO exit */ 804 colo_incoming_co(); 805 } 806 807 migration_bh_schedule(process_incoming_migration_bh, mis); 808 return; 809 fail: 810 migrate_set_state(&mis->state, MIGRATION_STATUS_ACTIVE, 811 MIGRATION_STATUS_FAILED); 812 migrate_set_error(s, local_err); 813 error_free(local_err); 814 815 migration_incoming_state_destroy(); 816 817 if (mis->exit_on_error) { 818 WITH_QEMU_LOCK_GUARD(&s->error_mutex) { 819 error_report_err(s->error); 820 s->error = NULL; 821 } 822 823 exit(EXIT_FAILURE); 824 } 825 } 826 827 /** 828 * migration_incoming_setup: Setup incoming migration 829 * @f: file for main migration channel 830 */ 831 static void migration_incoming_setup(QEMUFile *f) 832 { 833 MigrationIncomingState *mis = migration_incoming_get_current(); 834 835 if (!mis->from_src_file) { 836 mis->from_src_file = f; 837 } 838 qemu_file_set_blocking(f, false); 839 } 840 841 void migration_incoming_process(void) 842 { 843 Coroutine *co = qemu_coroutine_create(process_incoming_migration_co, NULL); 844 qemu_coroutine_enter(co); 845 } 846 847 /* Returns true if recovered from a paused migration, otherwise false */ 848 static bool postcopy_try_recover(void) 849 { 850 MigrationIncomingState *mis = migration_incoming_get_current(); 851 852 if (mis->state == MIGRATION_STATUS_POSTCOPY_PAUSED) { 853 /* Resumed from a paused postcopy migration */ 854 855 /* This should be set already in migration_incoming_setup() */ 856 assert(mis->from_src_file); 857 /* Postcopy has standalone thread to do vm load */ 858 qemu_file_set_blocking(mis->from_src_file, true); 859 860 /* Re-configure the return path */ 861 mis->to_src_file = qemu_file_get_return_path(mis->from_src_file); 862 863 migrate_set_state(&mis->state, MIGRATION_STATUS_POSTCOPY_PAUSED, 864 MIGRATION_STATUS_POSTCOPY_RECOVER); 865 866 /* 867 * Here, we only wake up the main loading thread (while the 868 * rest threads will still be waiting), so that we can receive 869 * commands from source now, and answer it if needed. The 870 * rest threads will be woken up afterwards until we are sure 871 * that source is ready to reply to page requests. 872 */ 873 qemu_sem_post(&mis->postcopy_pause_sem_dst); 874 return true; 875 } 876 877 return false; 878 } 879 880 void migration_fd_process_incoming(QEMUFile *f) 881 { 882 migration_incoming_setup(f); 883 if (postcopy_try_recover()) { 884 return; 885 } 886 migration_incoming_process(); 887 } 888 889 /* 890 * Returns true when we want to start a new incoming migration process, 891 * false otherwise. 892 */ 893 static bool migration_should_start_incoming(bool main_channel) 894 { 895 /* Multifd doesn't start unless all channels are established */ 896 if (migrate_multifd()) { 897 return migration_has_all_channels(); 898 } 899 900 /* Preempt channel only starts when the main channel is created */ 901 if (migrate_postcopy_preempt()) { 902 return main_channel; 903 } 904 905 /* 906 * For all the rest types of migration, we should only reach here when 907 * it's the main channel that's being created, and we should always 908 * proceed with this channel. 909 */ 910 assert(main_channel); 911 return true; 912 } 913 914 void migration_ioc_process_incoming(QIOChannel *ioc, Error **errp) 915 { 916 MigrationIncomingState *mis = migration_incoming_get_current(); 917 Error *local_err = NULL; 918 QEMUFile *f; 919 bool default_channel = true; 920 uint32_t channel_magic = 0; 921 int ret = 0; 922 923 if (migrate_multifd() && !migrate_mapped_ram() && 924 !migrate_postcopy_ram() && 925 qio_channel_has_feature(ioc, QIO_CHANNEL_FEATURE_READ_MSG_PEEK)) { 926 /* 927 * With multiple channels, it is possible that we receive channels 928 * out of order on destination side, causing incorrect mapping of 929 * source channels on destination side. Check channel MAGIC to 930 * decide type of channel. Please note this is best effort, postcopy 931 * preempt channel does not send any magic number so avoid it for 932 * postcopy live migration. Also tls live migration already does 933 * tls handshake while initializing main channel so with tls this 934 * issue is not possible. 935 */ 936 ret = migration_channel_read_peek(ioc, (void *)&channel_magic, 937 sizeof(channel_magic), errp); 938 939 if (ret != 0) { 940 return; 941 } 942 943 default_channel = (channel_magic == cpu_to_be32(QEMU_VM_FILE_MAGIC)); 944 } else { 945 default_channel = !mis->from_src_file; 946 } 947 948 if (multifd_recv_setup(errp) != 0) { 949 return; 950 } 951 952 if (default_channel) { 953 f = qemu_file_new_input(ioc); 954 migration_incoming_setup(f); 955 } else { 956 /* Multiple connections */ 957 assert(migration_needs_multiple_sockets()); 958 if (migrate_multifd()) { 959 multifd_recv_new_channel(ioc, &local_err); 960 } else { 961 assert(migrate_postcopy_preempt()); 962 f = qemu_file_new_input(ioc); 963 postcopy_preempt_new_channel(mis, f); 964 } 965 if (local_err) { 966 error_propagate(errp, local_err); 967 return; 968 } 969 } 970 971 if (migration_should_start_incoming(default_channel)) { 972 /* If it's a recovery, we're done */ 973 if (postcopy_try_recover()) { 974 return; 975 } 976 migration_incoming_process(); 977 } 978 } 979 980 /** 981 * @migration_has_all_channels: We have received all channels that we need 982 * 983 * Returns true when we have got connections to all the channels that 984 * we need for migration. 985 */ 986 bool migration_has_all_channels(void) 987 { 988 MigrationIncomingState *mis = migration_incoming_get_current(); 989 990 if (!mis->from_src_file) { 991 return false; 992 } 993 994 if (migrate_multifd()) { 995 return multifd_recv_all_channels_created(); 996 } 997 998 if (migrate_postcopy_preempt()) { 999 return mis->postcopy_qemufile_dst != NULL; 1000 } 1001 1002 return true; 1003 } 1004 1005 int migrate_send_rp_switchover_ack(MigrationIncomingState *mis) 1006 { 1007 return migrate_send_rp_message(mis, MIG_RP_MSG_SWITCHOVER_ACK, 0, NULL); 1008 } 1009 1010 /* 1011 * Send a 'SHUT' message on the return channel with the given value 1012 * to indicate that we've finished with the RP. Non-0 value indicates 1013 * error. 1014 */ 1015 void migrate_send_rp_shut(MigrationIncomingState *mis, 1016 uint32_t value) 1017 { 1018 uint32_t buf; 1019 1020 buf = cpu_to_be32(value); 1021 migrate_send_rp_message(mis, MIG_RP_MSG_SHUT, sizeof(buf), &buf); 1022 } 1023 1024 /* 1025 * Send a 'PONG' message on the return channel with the given value 1026 * (normally in response to a 'PING') 1027 */ 1028 void migrate_send_rp_pong(MigrationIncomingState *mis, 1029 uint32_t value) 1030 { 1031 uint32_t buf; 1032 1033 buf = cpu_to_be32(value); 1034 migrate_send_rp_message(mis, MIG_RP_MSG_PONG, sizeof(buf), &buf); 1035 } 1036 1037 void migrate_send_rp_recv_bitmap(MigrationIncomingState *mis, 1038 char *block_name) 1039 { 1040 char buf[512]; 1041 int len; 1042 int64_t res; 1043 1044 /* 1045 * First, we send the header part. It contains only the len of 1046 * idstr, and the idstr itself. 1047 */ 1048 len = strlen(block_name); 1049 buf[0] = len; 1050 memcpy(buf + 1, block_name, len); 1051 1052 if (mis->state != MIGRATION_STATUS_POSTCOPY_RECOVER) { 1053 error_report("%s: MSG_RP_RECV_BITMAP only used for recovery", 1054 __func__); 1055 return; 1056 } 1057 1058 migrate_send_rp_message(mis, MIG_RP_MSG_RECV_BITMAP, len + 1, buf); 1059 1060 /* 1061 * Next, we dump the received bitmap to the stream. 1062 * 1063 * TODO: currently we are safe since we are the only one that is 1064 * using the to_src_file handle (fault thread is still paused), 1065 * and it's ok even not taking the mutex. However the best way is 1066 * to take the lock before sending the message header, and release 1067 * the lock after sending the bitmap. 1068 */ 1069 qemu_mutex_lock(&mis->rp_mutex); 1070 res = ramblock_recv_bitmap_send(mis->to_src_file, block_name); 1071 qemu_mutex_unlock(&mis->rp_mutex); 1072 1073 trace_migrate_send_rp_recv_bitmap(block_name, res); 1074 } 1075 1076 void migrate_send_rp_resume_ack(MigrationIncomingState *mis, uint32_t value) 1077 { 1078 uint32_t buf; 1079 1080 buf = cpu_to_be32(value); 1081 migrate_send_rp_message(mis, MIG_RP_MSG_RESUME_ACK, sizeof(buf), &buf); 1082 } 1083 1084 /* 1085 * Return true if we're already in the middle of a migration 1086 * (i.e. any of the active or setup states) 1087 */ 1088 bool migration_is_setup_or_active(void) 1089 { 1090 MigrationState *s = current_migration; 1091 1092 switch (s->state) { 1093 case MIGRATION_STATUS_ACTIVE: 1094 case MIGRATION_STATUS_POSTCOPY_ACTIVE: 1095 case MIGRATION_STATUS_POSTCOPY_PAUSED: 1096 case MIGRATION_STATUS_POSTCOPY_RECOVER: 1097 case MIGRATION_STATUS_SETUP: 1098 case MIGRATION_STATUS_PRE_SWITCHOVER: 1099 case MIGRATION_STATUS_DEVICE: 1100 case MIGRATION_STATUS_WAIT_UNPLUG: 1101 case MIGRATION_STATUS_COLO: 1102 return true; 1103 1104 default: 1105 return false; 1106 1107 } 1108 } 1109 1110 bool migration_is_running(void) 1111 { 1112 MigrationState *s = current_migration; 1113 1114 switch (s->state) { 1115 case MIGRATION_STATUS_ACTIVE: 1116 case MIGRATION_STATUS_POSTCOPY_ACTIVE: 1117 case MIGRATION_STATUS_POSTCOPY_PAUSED: 1118 case MIGRATION_STATUS_POSTCOPY_RECOVER: 1119 case MIGRATION_STATUS_SETUP: 1120 case MIGRATION_STATUS_PRE_SWITCHOVER: 1121 case MIGRATION_STATUS_DEVICE: 1122 case MIGRATION_STATUS_WAIT_UNPLUG: 1123 case MIGRATION_STATUS_CANCELLING: 1124 return true; 1125 1126 default: 1127 return false; 1128 1129 } 1130 } 1131 1132 static bool migrate_show_downtime(MigrationState *s) 1133 { 1134 return (s->state == MIGRATION_STATUS_COMPLETED) || migration_in_postcopy(); 1135 } 1136 1137 static void populate_time_info(MigrationInfo *info, MigrationState *s) 1138 { 1139 info->has_status = true; 1140 info->has_setup_time = true; 1141 info->setup_time = s->setup_time; 1142 1143 if (s->state == MIGRATION_STATUS_COMPLETED) { 1144 info->has_total_time = true; 1145 info->total_time = s->total_time; 1146 } else { 1147 info->has_total_time = true; 1148 info->total_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME) - 1149 s->start_time; 1150 } 1151 1152 if (migrate_show_downtime(s)) { 1153 info->has_downtime = true; 1154 info->downtime = s->downtime; 1155 } else { 1156 info->has_expected_downtime = true; 1157 info->expected_downtime = s->expected_downtime; 1158 } 1159 } 1160 1161 static void populate_ram_info(MigrationInfo *info, MigrationState *s) 1162 { 1163 size_t page_size = qemu_target_page_size(); 1164 1165 info->ram = g_malloc0(sizeof(*info->ram)); 1166 info->ram->transferred = migration_transferred_bytes(); 1167 info->ram->total = ram_bytes_total(); 1168 info->ram->duplicate = stat64_get(&mig_stats.zero_pages); 1169 info->ram->normal = stat64_get(&mig_stats.normal_pages); 1170 info->ram->normal_bytes = info->ram->normal * page_size; 1171 info->ram->mbps = s->mbps; 1172 info->ram->dirty_sync_count = 1173 stat64_get(&mig_stats.dirty_sync_count); 1174 info->ram->dirty_sync_missed_zero_copy = 1175 stat64_get(&mig_stats.dirty_sync_missed_zero_copy); 1176 info->ram->postcopy_requests = 1177 stat64_get(&mig_stats.postcopy_requests); 1178 info->ram->page_size = page_size; 1179 info->ram->multifd_bytes = stat64_get(&mig_stats.multifd_bytes); 1180 info->ram->pages_per_second = s->pages_per_second; 1181 info->ram->precopy_bytes = stat64_get(&mig_stats.precopy_bytes); 1182 info->ram->downtime_bytes = stat64_get(&mig_stats.downtime_bytes); 1183 info->ram->postcopy_bytes = stat64_get(&mig_stats.postcopy_bytes); 1184 1185 if (migrate_xbzrle()) { 1186 info->xbzrle_cache = g_malloc0(sizeof(*info->xbzrle_cache)); 1187 info->xbzrle_cache->cache_size = migrate_xbzrle_cache_size(); 1188 info->xbzrle_cache->bytes = xbzrle_counters.bytes; 1189 info->xbzrle_cache->pages = xbzrle_counters.pages; 1190 info->xbzrle_cache->cache_miss = xbzrle_counters.cache_miss; 1191 info->xbzrle_cache->cache_miss_rate = xbzrle_counters.cache_miss_rate; 1192 info->xbzrle_cache->encoding_rate = xbzrle_counters.encoding_rate; 1193 info->xbzrle_cache->overflow = xbzrle_counters.overflow; 1194 } 1195 1196 if (cpu_throttle_active()) { 1197 info->has_cpu_throttle_percentage = true; 1198 info->cpu_throttle_percentage = cpu_throttle_get_percentage(); 1199 } 1200 1201 if (s->state != MIGRATION_STATUS_COMPLETED) { 1202 info->ram->remaining = ram_bytes_remaining(); 1203 info->ram->dirty_pages_rate = 1204 stat64_get(&mig_stats.dirty_pages_rate); 1205 } 1206 1207 if (migrate_dirty_limit() && dirtylimit_in_service()) { 1208 info->has_dirty_limit_throttle_time_per_round = true; 1209 info->dirty_limit_throttle_time_per_round = 1210 dirtylimit_throttle_time_per_round(); 1211 1212 info->has_dirty_limit_ring_full_time = true; 1213 info->dirty_limit_ring_full_time = dirtylimit_ring_full_time(); 1214 } 1215 } 1216 1217 static void fill_source_migration_info(MigrationInfo *info) 1218 { 1219 MigrationState *s = migrate_get_current(); 1220 int state = qatomic_read(&s->state); 1221 GSList *cur_blocker = migration_blockers[migrate_mode()]; 1222 1223 info->blocked_reasons = NULL; 1224 1225 /* 1226 * There are two types of reasons a migration might be blocked; 1227 * a) devices marked in VMState as non-migratable, and 1228 * b) Explicit migration blockers 1229 * We need to add both of them here. 1230 */ 1231 qemu_savevm_non_migratable_list(&info->blocked_reasons); 1232 1233 while (cur_blocker) { 1234 QAPI_LIST_PREPEND(info->blocked_reasons, 1235 g_strdup(error_get_pretty(cur_blocker->data))); 1236 cur_blocker = g_slist_next(cur_blocker); 1237 } 1238 info->has_blocked_reasons = info->blocked_reasons != NULL; 1239 1240 switch (state) { 1241 case MIGRATION_STATUS_NONE: 1242 /* no migration has happened ever */ 1243 /* do not overwrite destination migration status */ 1244 return; 1245 case MIGRATION_STATUS_SETUP: 1246 info->has_status = true; 1247 info->has_total_time = false; 1248 break; 1249 case MIGRATION_STATUS_ACTIVE: 1250 case MIGRATION_STATUS_CANCELLING: 1251 case MIGRATION_STATUS_POSTCOPY_ACTIVE: 1252 case MIGRATION_STATUS_PRE_SWITCHOVER: 1253 case MIGRATION_STATUS_DEVICE: 1254 case MIGRATION_STATUS_POSTCOPY_PAUSED: 1255 case MIGRATION_STATUS_POSTCOPY_RECOVER: 1256 /* TODO add some postcopy stats */ 1257 populate_time_info(info, s); 1258 populate_ram_info(info, s); 1259 migration_populate_vfio_info(info); 1260 break; 1261 case MIGRATION_STATUS_COLO: 1262 info->has_status = true; 1263 /* TODO: display COLO specific information (checkpoint info etc.) */ 1264 break; 1265 case MIGRATION_STATUS_COMPLETED: 1266 populate_time_info(info, s); 1267 populate_ram_info(info, s); 1268 migration_populate_vfio_info(info); 1269 break; 1270 case MIGRATION_STATUS_FAILED: 1271 info->has_status = true; 1272 break; 1273 case MIGRATION_STATUS_CANCELLED: 1274 info->has_status = true; 1275 break; 1276 case MIGRATION_STATUS_WAIT_UNPLUG: 1277 info->has_status = true; 1278 break; 1279 } 1280 info->status = state; 1281 1282 QEMU_LOCK_GUARD(&s->error_mutex); 1283 if (s->error) { 1284 info->error_desc = g_strdup(error_get_pretty(s->error)); 1285 } 1286 } 1287 1288 static void fill_destination_migration_info(MigrationInfo *info) 1289 { 1290 MigrationIncomingState *mis = migration_incoming_get_current(); 1291 1292 if (mis->socket_address_list) { 1293 info->has_socket_address = true; 1294 info->socket_address = 1295 QAPI_CLONE(SocketAddressList, mis->socket_address_list); 1296 } 1297 1298 switch (mis->state) { 1299 case MIGRATION_STATUS_SETUP: 1300 case MIGRATION_STATUS_CANCELLING: 1301 case MIGRATION_STATUS_CANCELLED: 1302 case MIGRATION_STATUS_ACTIVE: 1303 case MIGRATION_STATUS_POSTCOPY_ACTIVE: 1304 case MIGRATION_STATUS_POSTCOPY_PAUSED: 1305 case MIGRATION_STATUS_POSTCOPY_RECOVER: 1306 case MIGRATION_STATUS_FAILED: 1307 case MIGRATION_STATUS_COLO: 1308 info->has_status = true; 1309 break; 1310 case MIGRATION_STATUS_COMPLETED: 1311 info->has_status = true; 1312 fill_destination_postcopy_migration_info(info); 1313 break; 1314 default: 1315 return; 1316 } 1317 info->status = mis->state; 1318 1319 if (!info->error_desc) { 1320 MigrationState *s = migrate_get_current(); 1321 QEMU_LOCK_GUARD(&s->error_mutex); 1322 1323 if (s->error) { 1324 info->error_desc = g_strdup(error_get_pretty(s->error)); 1325 } 1326 } 1327 } 1328 1329 MigrationInfo *qmp_query_migrate(Error **errp) 1330 { 1331 MigrationInfo *info = g_malloc0(sizeof(*info)); 1332 1333 fill_destination_migration_info(info); 1334 fill_source_migration_info(info); 1335 1336 return info; 1337 } 1338 1339 void qmp_migrate_start_postcopy(Error **errp) 1340 { 1341 MigrationState *s = migrate_get_current(); 1342 1343 if (!migrate_postcopy()) { 1344 error_setg(errp, "Enable postcopy with migrate_set_capability before" 1345 " the start of migration"); 1346 return; 1347 } 1348 1349 if (s->state == MIGRATION_STATUS_NONE) { 1350 error_setg(errp, "Postcopy must be started after migration has been" 1351 " started"); 1352 return; 1353 } 1354 /* 1355 * we don't error if migration has finished since that would be racy 1356 * with issuing this command. 1357 */ 1358 qatomic_set(&s->start_postcopy, true); 1359 } 1360 1361 /* shared migration helpers */ 1362 1363 void migrate_set_state(MigrationStatus *state, MigrationStatus old_state, 1364 MigrationStatus new_state) 1365 { 1366 assert(new_state < MIGRATION_STATUS__MAX); 1367 if (qatomic_cmpxchg(state, old_state, new_state) == old_state) { 1368 trace_migrate_set_state(MigrationStatus_str(new_state)); 1369 migrate_generate_event(new_state); 1370 } 1371 } 1372 1373 static void migrate_fd_cleanup(MigrationState *s) 1374 { 1375 MigrationEventType type; 1376 1377 g_free(s->hostname); 1378 s->hostname = NULL; 1379 json_writer_free(s->vmdesc); 1380 s->vmdesc = NULL; 1381 1382 qemu_savevm_state_cleanup(); 1383 1384 close_return_path_on_source(s); 1385 1386 if (s->to_dst_file) { 1387 QEMUFile *tmp; 1388 1389 trace_migrate_fd_cleanup(); 1390 bql_unlock(); 1391 if (s->migration_thread_running) { 1392 qemu_thread_join(&s->thread); 1393 s->migration_thread_running = false; 1394 } 1395 bql_lock(); 1396 1397 multifd_send_shutdown(); 1398 qemu_mutex_lock(&s->qemu_file_lock); 1399 tmp = s->to_dst_file; 1400 s->to_dst_file = NULL; 1401 qemu_mutex_unlock(&s->qemu_file_lock); 1402 /* 1403 * Close the file handle without the lock to make sure the 1404 * critical section won't block for long. 1405 */ 1406 migration_ioc_unregister_yank_from_file(tmp); 1407 qemu_fclose(tmp); 1408 } 1409 1410 assert(!migration_is_active()); 1411 1412 if (s->state == MIGRATION_STATUS_CANCELLING) { 1413 migrate_set_state(&s->state, MIGRATION_STATUS_CANCELLING, 1414 MIGRATION_STATUS_CANCELLED); 1415 } 1416 1417 if (s->error) { 1418 /* It is used on info migrate. We can't free it */ 1419 error_report_err(error_copy(s->error)); 1420 } 1421 type = migration_has_failed(s) ? MIG_EVENT_PRECOPY_FAILED : 1422 MIG_EVENT_PRECOPY_DONE; 1423 migration_call_notifiers(s, type, NULL); 1424 yank_unregister_instance(MIGRATION_YANK_INSTANCE); 1425 } 1426 1427 static void migrate_fd_cleanup_bh(void *opaque) 1428 { 1429 migrate_fd_cleanup(opaque); 1430 } 1431 1432 void migrate_set_error(MigrationState *s, const Error *error) 1433 { 1434 QEMU_LOCK_GUARD(&s->error_mutex); 1435 1436 trace_migrate_error(error_get_pretty(error)); 1437 1438 if (!s->error) { 1439 s->error = error_copy(error); 1440 } 1441 } 1442 1443 bool migrate_has_error(MigrationState *s) 1444 { 1445 /* The lock is not helpful here, but still follow the rule */ 1446 QEMU_LOCK_GUARD(&s->error_mutex); 1447 return qatomic_read(&s->error); 1448 } 1449 1450 static void migrate_error_free(MigrationState *s) 1451 { 1452 QEMU_LOCK_GUARD(&s->error_mutex); 1453 if (s->error) { 1454 error_free(s->error); 1455 s->error = NULL; 1456 } 1457 } 1458 1459 static void migrate_fd_error(MigrationState *s, const Error *error) 1460 { 1461 assert(s->to_dst_file == NULL); 1462 migrate_set_state(&s->state, MIGRATION_STATUS_SETUP, 1463 MIGRATION_STATUS_FAILED); 1464 migrate_set_error(s, error); 1465 } 1466 1467 static void migrate_fd_cancel(MigrationState *s) 1468 { 1469 int old_state ; 1470 1471 trace_migrate_fd_cancel(); 1472 1473 WITH_QEMU_LOCK_GUARD(&s->qemu_file_lock) { 1474 if (s->rp_state.from_dst_file) { 1475 /* shutdown the rp socket, so causing the rp thread to shutdown */ 1476 qemu_file_shutdown(s->rp_state.from_dst_file); 1477 } 1478 } 1479 1480 do { 1481 old_state = s->state; 1482 if (!migration_is_running()) { 1483 break; 1484 } 1485 /* If the migration is paused, kick it out of the pause */ 1486 if (old_state == MIGRATION_STATUS_PRE_SWITCHOVER) { 1487 qemu_sem_post(&s->pause_sem); 1488 } 1489 migrate_set_state(&s->state, old_state, MIGRATION_STATUS_CANCELLING); 1490 } while (s->state != MIGRATION_STATUS_CANCELLING); 1491 1492 /* 1493 * If we're unlucky the migration code might be stuck somewhere in a 1494 * send/write while the network has failed and is waiting to timeout; 1495 * if we've got shutdown(2) available then we can force it to quit. 1496 */ 1497 if (s->state == MIGRATION_STATUS_CANCELLING) { 1498 WITH_QEMU_LOCK_GUARD(&s->qemu_file_lock) { 1499 if (s->to_dst_file) { 1500 qemu_file_shutdown(s->to_dst_file); 1501 } 1502 } 1503 } 1504 if (s->state == MIGRATION_STATUS_CANCELLING && s->block_inactive) { 1505 Error *local_err = NULL; 1506 1507 bdrv_activate_all(&local_err); 1508 if (local_err) { 1509 error_report_err(local_err); 1510 } else { 1511 s->block_inactive = false; 1512 } 1513 } 1514 } 1515 1516 void migration_add_notifier_mode(NotifierWithReturn *notify, 1517 MigrationNotifyFunc func, MigMode mode) 1518 { 1519 notify->notify = (NotifierWithReturnFunc)func; 1520 notifier_with_return_list_add(&migration_state_notifiers[mode], notify); 1521 } 1522 1523 void migration_add_notifier(NotifierWithReturn *notify, 1524 MigrationNotifyFunc func) 1525 { 1526 migration_add_notifier_mode(notify, func, MIG_MODE_NORMAL); 1527 } 1528 1529 void migration_remove_notifier(NotifierWithReturn *notify) 1530 { 1531 if (notify->notify) { 1532 notifier_with_return_remove(notify); 1533 notify->notify = NULL; 1534 } 1535 } 1536 1537 int migration_call_notifiers(MigrationState *s, MigrationEventType type, 1538 Error **errp) 1539 { 1540 MigMode mode = s->parameters.mode; 1541 MigrationEvent e; 1542 int ret; 1543 1544 e.type = type; 1545 ret = notifier_with_return_list_notify(&migration_state_notifiers[mode], 1546 &e, errp); 1547 assert(!ret || type == MIG_EVENT_PRECOPY_SETUP); 1548 return ret; 1549 } 1550 1551 bool migration_has_failed(MigrationState *s) 1552 { 1553 return (s->state == MIGRATION_STATUS_CANCELLED || 1554 s->state == MIGRATION_STATUS_FAILED); 1555 } 1556 1557 bool migration_in_postcopy(void) 1558 { 1559 MigrationState *s = migrate_get_current(); 1560 1561 switch (s->state) { 1562 case MIGRATION_STATUS_POSTCOPY_ACTIVE: 1563 case MIGRATION_STATUS_POSTCOPY_PAUSED: 1564 case MIGRATION_STATUS_POSTCOPY_RECOVER: 1565 return true; 1566 default: 1567 return false; 1568 } 1569 } 1570 1571 bool migration_postcopy_is_alive(MigrationStatus state) 1572 { 1573 switch (state) { 1574 case MIGRATION_STATUS_POSTCOPY_ACTIVE: 1575 case MIGRATION_STATUS_POSTCOPY_RECOVER: 1576 return true; 1577 default: 1578 return false; 1579 } 1580 } 1581 1582 bool migration_in_incoming_postcopy(void) 1583 { 1584 PostcopyState ps = postcopy_state_get(); 1585 1586 return ps >= POSTCOPY_INCOMING_DISCARD && ps < POSTCOPY_INCOMING_END; 1587 } 1588 1589 bool migration_incoming_postcopy_advised(void) 1590 { 1591 PostcopyState ps = postcopy_state_get(); 1592 1593 return ps >= POSTCOPY_INCOMING_ADVISE && ps < POSTCOPY_INCOMING_END; 1594 } 1595 1596 bool migration_in_bg_snapshot(void) 1597 { 1598 return migrate_background_snapshot() && 1599 migration_is_setup_or_active(); 1600 } 1601 1602 bool migration_is_idle(void) 1603 { 1604 MigrationState *s = current_migration; 1605 1606 if (!s) { 1607 return true; 1608 } 1609 1610 switch (s->state) { 1611 case MIGRATION_STATUS_NONE: 1612 case MIGRATION_STATUS_CANCELLED: 1613 case MIGRATION_STATUS_COMPLETED: 1614 case MIGRATION_STATUS_FAILED: 1615 return true; 1616 default: 1617 return false; 1618 } 1619 } 1620 1621 bool migration_is_active(void) 1622 { 1623 MigrationState *s = current_migration; 1624 1625 return (s->state == MIGRATION_STATUS_ACTIVE || 1626 s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE); 1627 } 1628 1629 bool migration_is_device(void) 1630 { 1631 MigrationState *s = current_migration; 1632 1633 return s->state == MIGRATION_STATUS_DEVICE; 1634 } 1635 1636 bool migration_thread_is_self(void) 1637 { 1638 MigrationState *s = current_migration; 1639 1640 return qemu_thread_is_self(&s->thread); 1641 } 1642 1643 bool migrate_mode_is_cpr(MigrationState *s) 1644 { 1645 return s->parameters.mode == MIG_MODE_CPR_REBOOT; 1646 } 1647 1648 int migrate_init(MigrationState *s, Error **errp) 1649 { 1650 int ret; 1651 1652 ret = qemu_savevm_state_prepare(errp); 1653 if (ret) { 1654 return ret; 1655 } 1656 1657 /* 1658 * Reinitialise all migration state, except 1659 * parameters/capabilities that the user set, and 1660 * locks. 1661 */ 1662 s->to_dst_file = NULL; 1663 s->state = MIGRATION_STATUS_NONE; 1664 s->rp_state.from_dst_file = NULL; 1665 s->mbps = 0.0; 1666 s->pages_per_second = 0.0; 1667 s->downtime = 0; 1668 s->expected_downtime = 0; 1669 s->setup_time = 0; 1670 s->start_postcopy = false; 1671 s->migration_thread_running = false; 1672 error_free(s->error); 1673 s->error = NULL; 1674 s->vmdesc = NULL; 1675 1676 migrate_set_state(&s->state, MIGRATION_STATUS_NONE, MIGRATION_STATUS_SETUP); 1677 1678 s->start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); 1679 s->total_time = 0; 1680 s->vm_old_state = -1; 1681 s->iteration_initial_bytes = 0; 1682 s->threshold_size = 0; 1683 s->switchover_acked = false; 1684 s->rdma_migration = false; 1685 /* 1686 * set mig_stats memory to zero for a new migration 1687 */ 1688 memset(&mig_stats, 0, sizeof(mig_stats)); 1689 migration_reset_vfio_bytes_transferred(); 1690 1691 return 0; 1692 } 1693 1694 static bool is_busy(Error **reasonp, Error **errp) 1695 { 1696 ERRP_GUARD(); 1697 1698 /* Snapshots are similar to migrations, so check RUN_STATE_SAVE_VM too. */ 1699 if (runstate_check(RUN_STATE_SAVE_VM) || !migration_is_idle()) { 1700 error_propagate_prepend(errp, *reasonp, 1701 "disallowing migration blocker " 1702 "(migration/snapshot in progress) for: "); 1703 *reasonp = NULL; 1704 return true; 1705 } 1706 return false; 1707 } 1708 1709 static bool is_only_migratable(Error **reasonp, Error **errp, int modes) 1710 { 1711 ERRP_GUARD(); 1712 1713 if (only_migratable && (modes & BIT(MIG_MODE_NORMAL))) { 1714 error_propagate_prepend(errp, *reasonp, 1715 "disallowing migration blocker " 1716 "(--only-migratable) for: "); 1717 *reasonp = NULL; 1718 return true; 1719 } 1720 return false; 1721 } 1722 1723 static int get_modes(MigMode mode, va_list ap) 1724 { 1725 int modes = 0; 1726 1727 while (mode != -1 && mode != MIG_MODE_ALL) { 1728 assert(mode >= MIG_MODE_NORMAL && mode < MIG_MODE__MAX); 1729 modes |= BIT(mode); 1730 mode = va_arg(ap, MigMode); 1731 } 1732 if (mode == MIG_MODE_ALL) { 1733 modes = BIT(MIG_MODE__MAX) - 1; 1734 } 1735 return modes; 1736 } 1737 1738 static int add_blockers(Error **reasonp, Error **errp, int modes) 1739 { 1740 for (MigMode mode = 0; mode < MIG_MODE__MAX; mode++) { 1741 if (modes & BIT(mode)) { 1742 migration_blockers[mode] = g_slist_prepend(migration_blockers[mode], 1743 *reasonp); 1744 } 1745 } 1746 return 0; 1747 } 1748 1749 int migrate_add_blocker(Error **reasonp, Error **errp) 1750 { 1751 return migrate_add_blocker_modes(reasonp, errp, MIG_MODE_ALL); 1752 } 1753 1754 int migrate_add_blocker_normal(Error **reasonp, Error **errp) 1755 { 1756 return migrate_add_blocker_modes(reasonp, errp, MIG_MODE_NORMAL, -1); 1757 } 1758 1759 int migrate_add_blocker_modes(Error **reasonp, Error **errp, MigMode mode, ...) 1760 { 1761 int modes; 1762 va_list ap; 1763 1764 va_start(ap, mode); 1765 modes = get_modes(mode, ap); 1766 va_end(ap); 1767 1768 if (is_only_migratable(reasonp, errp, modes)) { 1769 return -EACCES; 1770 } else if (is_busy(reasonp, errp)) { 1771 return -EBUSY; 1772 } 1773 return add_blockers(reasonp, errp, modes); 1774 } 1775 1776 int migrate_add_blocker_internal(Error **reasonp, Error **errp) 1777 { 1778 int modes = BIT(MIG_MODE__MAX) - 1; 1779 1780 if (is_busy(reasonp, errp)) { 1781 return -EBUSY; 1782 } 1783 return add_blockers(reasonp, errp, modes); 1784 } 1785 1786 void migrate_del_blocker(Error **reasonp) 1787 { 1788 if (*reasonp) { 1789 for (MigMode mode = 0; mode < MIG_MODE__MAX; mode++) { 1790 migration_blockers[mode] = g_slist_remove(migration_blockers[mode], 1791 *reasonp); 1792 } 1793 error_free(*reasonp); 1794 *reasonp = NULL; 1795 } 1796 } 1797 1798 void qmp_migrate_incoming(const char *uri, bool has_channels, 1799 MigrationChannelList *channels, 1800 bool has_exit_on_error, bool exit_on_error, 1801 Error **errp) 1802 { 1803 Error *local_err = NULL; 1804 static bool once = true; 1805 MigrationIncomingState *mis = migration_incoming_get_current(); 1806 1807 if (!once) { 1808 error_setg(errp, "The incoming migration has already been started"); 1809 return; 1810 } 1811 if (!runstate_check(RUN_STATE_INMIGRATE)) { 1812 error_setg(errp, "'-incoming' was not specified on the command line"); 1813 return; 1814 } 1815 1816 if (!yank_register_instance(MIGRATION_YANK_INSTANCE, errp)) { 1817 return; 1818 } 1819 1820 mis->exit_on_error = 1821 has_exit_on_error ? exit_on_error : INMIGRATE_DEFAULT_EXIT_ON_ERROR; 1822 1823 qemu_start_incoming_migration(uri, has_channels, channels, &local_err); 1824 1825 if (local_err) { 1826 yank_unregister_instance(MIGRATION_YANK_INSTANCE); 1827 error_propagate(errp, local_err); 1828 return; 1829 } 1830 1831 once = false; 1832 } 1833 1834 void qmp_migrate_recover(const char *uri, Error **errp) 1835 { 1836 MigrationIncomingState *mis = migration_incoming_get_current(); 1837 1838 /* 1839 * Don't even bother to use ERRP_GUARD() as it _must_ always be set by 1840 * callers (no one should ignore a recover failure); if there is, it's a 1841 * programming error. 1842 */ 1843 assert(errp); 1844 1845 if (mis->state != MIGRATION_STATUS_POSTCOPY_PAUSED) { 1846 error_setg(errp, "Migrate recover can only be run " 1847 "when postcopy is paused."); 1848 return; 1849 } 1850 1851 /* If there's an existing transport, release it */ 1852 migration_incoming_transport_cleanup(mis); 1853 1854 /* 1855 * Note that this call will never start a real migration; it will 1856 * only re-setup the migration stream and poke existing migration 1857 * to continue using that newly established channel. 1858 */ 1859 qemu_start_incoming_migration(uri, false, NULL, errp); 1860 } 1861 1862 void qmp_migrate_pause(Error **errp) 1863 { 1864 MigrationState *ms = migrate_get_current(); 1865 MigrationIncomingState *mis = migration_incoming_get_current(); 1866 int ret = 0; 1867 1868 if (migration_postcopy_is_alive(ms->state)) { 1869 /* Source side, during postcopy */ 1870 Error *error = NULL; 1871 1872 /* Tell the core migration that we're pausing */ 1873 error_setg(&error, "Postcopy migration is paused by the user"); 1874 migrate_set_error(ms, error); 1875 error_free(error); 1876 1877 qemu_mutex_lock(&ms->qemu_file_lock); 1878 if (ms->to_dst_file) { 1879 ret = qemu_file_shutdown(ms->to_dst_file); 1880 } 1881 qemu_mutex_unlock(&ms->qemu_file_lock); 1882 if (ret) { 1883 error_setg(errp, "Failed to pause source migration"); 1884 } 1885 1886 /* 1887 * Kick the migration thread out of any waiting windows (on behalf 1888 * of the rp thread). 1889 */ 1890 migration_rp_kick(ms); 1891 1892 return; 1893 } 1894 1895 if (migration_postcopy_is_alive(mis->state)) { 1896 ret = qemu_file_shutdown(mis->from_src_file); 1897 if (ret) { 1898 error_setg(errp, "Failed to pause destination migration"); 1899 } 1900 return; 1901 } 1902 1903 error_setg(errp, "migrate-pause is currently only supported " 1904 "during postcopy-active or postcopy-recover state"); 1905 } 1906 1907 bool migration_is_blocked(Error **errp) 1908 { 1909 GSList *blockers = migration_blockers[migrate_mode()]; 1910 1911 if (qemu_savevm_state_blocked(errp)) { 1912 return true; 1913 } 1914 1915 if (blockers) { 1916 error_propagate(errp, error_copy(blockers->data)); 1917 return true; 1918 } 1919 1920 return false; 1921 } 1922 1923 /* Returns true if continue to migrate, or false if error detected */ 1924 static bool migrate_prepare(MigrationState *s, bool resume, Error **errp) 1925 { 1926 if (resume) { 1927 if (s->state != MIGRATION_STATUS_POSTCOPY_PAUSED) { 1928 error_setg(errp, "Cannot resume if there is no " 1929 "paused migration"); 1930 return false; 1931 } 1932 1933 /* 1934 * Postcopy recovery won't work well with release-ram 1935 * capability since release-ram will drop the page buffer as 1936 * long as the page is put into the send buffer. So if there 1937 * is a network failure happened, any page buffers that have 1938 * not yet reached the destination VM but have already been 1939 * sent from the source VM will be lost forever. Let's refuse 1940 * the client from resuming such a postcopy migration. 1941 * Luckily release-ram was designed to only be used when src 1942 * and destination VMs are on the same host, so it should be 1943 * fine. 1944 */ 1945 if (migrate_release_ram()) { 1946 error_setg(errp, "Postcopy recovery cannot work " 1947 "when release-ram capability is set"); 1948 return false; 1949 } 1950 1951 /* This is a resume, skip init status */ 1952 return true; 1953 } 1954 1955 if (migration_is_running()) { 1956 error_setg(errp, "There's a migration process in progress"); 1957 return false; 1958 } 1959 1960 if (runstate_check(RUN_STATE_INMIGRATE)) { 1961 error_setg(errp, "Guest is waiting for an incoming migration"); 1962 return false; 1963 } 1964 1965 if (runstate_check(RUN_STATE_POSTMIGRATE)) { 1966 error_setg(errp, "Can't migrate the vm that was paused due to " 1967 "previous migration"); 1968 return false; 1969 } 1970 1971 if (kvm_hwpoisoned_mem()) { 1972 error_setg(errp, "Can't migrate this vm with hardware poisoned memory, " 1973 "please reboot the vm and try again"); 1974 return false; 1975 } 1976 1977 if (migration_is_blocked(errp)) { 1978 return false; 1979 } 1980 1981 if (migrate_mapped_ram()) { 1982 if (migrate_tls()) { 1983 error_setg(errp, "Cannot use TLS with mapped-ram"); 1984 return false; 1985 } 1986 1987 if (migrate_multifd_compression()) { 1988 error_setg(errp, "Cannot use compression with mapped-ram"); 1989 return false; 1990 } 1991 } 1992 1993 if (migrate_mode_is_cpr(s)) { 1994 const char *conflict = NULL; 1995 1996 if (migrate_postcopy()) { 1997 conflict = "postcopy"; 1998 } else if (migrate_background_snapshot()) { 1999 conflict = "background snapshot"; 2000 } else if (migrate_colo()) { 2001 conflict = "COLO"; 2002 } 2003 2004 if (conflict) { 2005 error_setg(errp, "Cannot use %s with CPR", conflict); 2006 return false; 2007 } 2008 } 2009 2010 if (migrate_init(s, errp)) { 2011 return false; 2012 } 2013 2014 return true; 2015 } 2016 2017 void qmp_migrate(const char *uri, bool has_channels, 2018 MigrationChannelList *channels, bool has_detach, bool detach, 2019 bool has_resume, bool resume, Error **errp) 2020 { 2021 bool resume_requested; 2022 Error *local_err = NULL; 2023 MigrationState *s = migrate_get_current(); 2024 g_autoptr(MigrationChannel) channel = NULL; 2025 MigrationAddress *addr = NULL; 2026 2027 /* 2028 * Having preliminary checks for uri and channel 2029 */ 2030 if (!uri == !channels) { 2031 error_setg(errp, "need either 'uri' or 'channels' argument"); 2032 return; 2033 } 2034 2035 if (channels) { 2036 /* To verify that Migrate channel list has only item */ 2037 if (channels->next) { 2038 error_setg(errp, "Channel list has more than one entries"); 2039 return; 2040 } 2041 addr = channels->value->addr; 2042 } 2043 2044 if (uri) { 2045 /* caller uses the old URI syntax */ 2046 if (!migrate_uri_parse(uri, &channel, errp)) { 2047 return; 2048 } 2049 addr = channel->addr; 2050 } 2051 2052 /* transport mechanism not suitable for migration? */ 2053 if (!migration_channels_and_transport_compatible(addr, errp)) { 2054 return; 2055 } 2056 2057 resume_requested = has_resume && resume; 2058 if (!migrate_prepare(s, resume_requested, errp)) { 2059 /* Error detected, put into errp */ 2060 return; 2061 } 2062 2063 if (!resume_requested) { 2064 if (!yank_register_instance(MIGRATION_YANK_INSTANCE, errp)) { 2065 return; 2066 } 2067 } 2068 2069 if (addr->transport == MIGRATION_ADDRESS_TYPE_SOCKET) { 2070 SocketAddress *saddr = &addr->u.socket; 2071 if (saddr->type == SOCKET_ADDRESS_TYPE_INET || 2072 saddr->type == SOCKET_ADDRESS_TYPE_UNIX || 2073 saddr->type == SOCKET_ADDRESS_TYPE_VSOCK) { 2074 socket_start_outgoing_migration(s, saddr, &local_err); 2075 } else if (saddr->type == SOCKET_ADDRESS_TYPE_FD) { 2076 fd_start_outgoing_migration(s, saddr->u.fd.str, &local_err); 2077 } 2078 #ifdef CONFIG_RDMA 2079 } else if (addr->transport == MIGRATION_ADDRESS_TYPE_RDMA) { 2080 rdma_start_outgoing_migration(s, &addr->u.rdma, &local_err); 2081 #endif 2082 } else if (addr->transport == MIGRATION_ADDRESS_TYPE_EXEC) { 2083 exec_start_outgoing_migration(s, addr->u.exec.args, &local_err); 2084 } else if (addr->transport == MIGRATION_ADDRESS_TYPE_FILE) { 2085 file_start_outgoing_migration(s, &addr->u.file, &local_err); 2086 } else { 2087 error_setg(&local_err, QERR_INVALID_PARAMETER_VALUE, "uri", 2088 "a valid migration protocol"); 2089 migrate_set_state(&s->state, MIGRATION_STATUS_SETUP, 2090 MIGRATION_STATUS_FAILED); 2091 } 2092 2093 if (local_err) { 2094 if (!resume_requested) { 2095 yank_unregister_instance(MIGRATION_YANK_INSTANCE); 2096 } 2097 migrate_fd_error(s, local_err); 2098 error_propagate(errp, local_err); 2099 return; 2100 } 2101 } 2102 2103 void qmp_migrate_cancel(Error **errp) 2104 { 2105 migration_cancel(NULL); 2106 } 2107 2108 void qmp_migrate_continue(MigrationStatus state, Error **errp) 2109 { 2110 MigrationState *s = migrate_get_current(); 2111 if (s->state != state) { 2112 error_setg(errp, "Migration not in expected state: %s", 2113 MigrationStatus_str(s->state)); 2114 return; 2115 } 2116 qemu_sem_post(&s->pause_sem); 2117 } 2118 2119 int migration_rp_wait(MigrationState *s) 2120 { 2121 /* If migration has failure already, ignore the wait */ 2122 if (migrate_has_error(s)) { 2123 return -1; 2124 } 2125 2126 qemu_sem_wait(&s->rp_state.rp_sem); 2127 2128 /* After wait, double check that there's no failure */ 2129 if (migrate_has_error(s)) { 2130 return -1; 2131 } 2132 2133 return 0; 2134 } 2135 2136 void migration_rp_kick(MigrationState *s) 2137 { 2138 qemu_sem_post(&s->rp_state.rp_sem); 2139 } 2140 2141 static struct rp_cmd_args { 2142 ssize_t len; /* -1 = variable */ 2143 const char *name; 2144 } rp_cmd_args[] = { 2145 [MIG_RP_MSG_INVALID] = { .len = -1, .name = "INVALID" }, 2146 [MIG_RP_MSG_SHUT] = { .len = 4, .name = "SHUT" }, 2147 [MIG_RP_MSG_PONG] = { .len = 4, .name = "PONG" }, 2148 [MIG_RP_MSG_REQ_PAGES] = { .len = 12, .name = "REQ_PAGES" }, 2149 [MIG_RP_MSG_REQ_PAGES_ID] = { .len = -1, .name = "REQ_PAGES_ID" }, 2150 [MIG_RP_MSG_RECV_BITMAP] = { .len = -1, .name = "RECV_BITMAP" }, 2151 [MIG_RP_MSG_RESUME_ACK] = { .len = 4, .name = "RESUME_ACK" }, 2152 [MIG_RP_MSG_SWITCHOVER_ACK] = { .len = 0, .name = "SWITCHOVER_ACK" }, 2153 [MIG_RP_MSG_MAX] = { .len = -1, .name = "MAX" }, 2154 }; 2155 2156 /* 2157 * Process a request for pages received on the return path, 2158 * We're allowed to send more than requested (e.g. to round to our page size) 2159 * and we don't need to send pages that have already been sent. 2160 */ 2161 static void 2162 migrate_handle_rp_req_pages(MigrationState *ms, const char* rbname, 2163 ram_addr_t start, size_t len, Error **errp) 2164 { 2165 long our_host_ps = qemu_real_host_page_size(); 2166 2167 trace_migrate_handle_rp_req_pages(rbname, start, len); 2168 2169 /* 2170 * Since we currently insist on matching page sizes, just sanity check 2171 * we're being asked for whole host pages. 2172 */ 2173 if (!QEMU_IS_ALIGNED(start, our_host_ps) || 2174 !QEMU_IS_ALIGNED(len, our_host_ps)) { 2175 error_setg(errp, "MIG_RP_MSG_REQ_PAGES: Misaligned page request, start:" 2176 RAM_ADDR_FMT " len: %zd", start, len); 2177 return; 2178 } 2179 2180 ram_save_queue_pages(rbname, start, len, errp); 2181 } 2182 2183 static bool migrate_handle_rp_recv_bitmap(MigrationState *s, char *block_name, 2184 Error **errp) 2185 { 2186 RAMBlock *block = qemu_ram_block_by_name(block_name); 2187 2188 if (!block) { 2189 error_setg(errp, "MIG_RP_MSG_RECV_BITMAP has invalid block name '%s'", 2190 block_name); 2191 return false; 2192 } 2193 2194 /* Fetch the received bitmap and refresh the dirty bitmap */ 2195 return ram_dirty_bitmap_reload(s, block, errp); 2196 } 2197 2198 static bool migrate_handle_rp_resume_ack(MigrationState *s, 2199 uint32_t value, Error **errp) 2200 { 2201 trace_source_return_path_thread_resume_ack(value); 2202 2203 if (value != MIGRATION_RESUME_ACK_VALUE) { 2204 error_setg(errp, "illegal resume_ack value %"PRIu32, value); 2205 return false; 2206 } 2207 2208 /* Now both sides are active. */ 2209 migrate_set_state(&s->state, MIGRATION_STATUS_POSTCOPY_RECOVER, 2210 MIGRATION_STATUS_POSTCOPY_ACTIVE); 2211 2212 /* Notify send thread that time to continue send pages */ 2213 migration_rp_kick(s); 2214 2215 return true; 2216 } 2217 2218 /* 2219 * Release ms->rp_state.from_dst_file (and postcopy_qemufile_src if 2220 * existed) in a safe way. 2221 */ 2222 static void migration_release_dst_files(MigrationState *ms) 2223 { 2224 QEMUFile *file; 2225 2226 WITH_QEMU_LOCK_GUARD(&ms->qemu_file_lock) { 2227 /* 2228 * Reset the from_dst_file pointer first before releasing it, as we 2229 * can't block within lock section 2230 */ 2231 file = ms->rp_state.from_dst_file; 2232 ms->rp_state.from_dst_file = NULL; 2233 } 2234 2235 /* 2236 * Do the same to postcopy fast path socket too if there is. No 2237 * locking needed because this qemufile should only be managed by 2238 * return path thread. 2239 */ 2240 if (ms->postcopy_qemufile_src) { 2241 migration_ioc_unregister_yank_from_file(ms->postcopy_qemufile_src); 2242 qemu_file_shutdown(ms->postcopy_qemufile_src); 2243 qemu_fclose(ms->postcopy_qemufile_src); 2244 ms->postcopy_qemufile_src = NULL; 2245 } 2246 2247 qemu_fclose(file); 2248 } 2249 2250 /* 2251 * Handles messages sent on the return path towards the source VM 2252 * 2253 */ 2254 static void *source_return_path_thread(void *opaque) 2255 { 2256 MigrationState *ms = opaque; 2257 QEMUFile *rp = ms->rp_state.from_dst_file; 2258 uint16_t header_len, header_type; 2259 uint8_t buf[512]; 2260 uint32_t tmp32, sibling_error; 2261 ram_addr_t start = 0; /* =0 to silence warning */ 2262 size_t len = 0, expected_len; 2263 Error *err = NULL; 2264 int res; 2265 2266 trace_source_return_path_thread_entry(); 2267 rcu_register_thread(); 2268 2269 while (migration_is_setup_or_active()) { 2270 trace_source_return_path_thread_loop_top(); 2271 2272 header_type = qemu_get_be16(rp); 2273 header_len = qemu_get_be16(rp); 2274 2275 if (qemu_file_get_error(rp)) { 2276 qemu_file_get_error_obj(rp, &err); 2277 goto out; 2278 } 2279 2280 if (header_type >= MIG_RP_MSG_MAX || 2281 header_type == MIG_RP_MSG_INVALID) { 2282 error_setg(&err, "Received invalid message 0x%04x length 0x%04x", 2283 header_type, header_len); 2284 goto out; 2285 } 2286 2287 if ((rp_cmd_args[header_type].len != -1 && 2288 header_len != rp_cmd_args[header_type].len) || 2289 header_len > sizeof(buf)) { 2290 error_setg(&err, "Received '%s' message (0x%04x) with" 2291 "incorrect length %d expecting %zu", 2292 rp_cmd_args[header_type].name, header_type, header_len, 2293 (size_t)rp_cmd_args[header_type].len); 2294 goto out; 2295 } 2296 2297 /* We know we've got a valid header by this point */ 2298 res = qemu_get_buffer(rp, buf, header_len); 2299 if (res != header_len) { 2300 error_setg(&err, "Failed reading data for message 0x%04x" 2301 " read %d expected %d", 2302 header_type, res, header_len); 2303 goto out; 2304 } 2305 2306 /* OK, we have the message and the data */ 2307 switch (header_type) { 2308 case MIG_RP_MSG_SHUT: 2309 sibling_error = ldl_be_p(buf); 2310 trace_source_return_path_thread_shut(sibling_error); 2311 if (sibling_error) { 2312 error_setg(&err, "Sibling indicated error %d", sibling_error); 2313 } 2314 /* 2315 * We'll let the main thread deal with closing the RP 2316 * we could do a shutdown(2) on it, but we're the only user 2317 * anyway, so there's nothing gained. 2318 */ 2319 goto out; 2320 2321 case MIG_RP_MSG_PONG: 2322 tmp32 = ldl_be_p(buf); 2323 trace_source_return_path_thread_pong(tmp32); 2324 qemu_sem_post(&ms->rp_state.rp_pong_acks); 2325 break; 2326 2327 case MIG_RP_MSG_REQ_PAGES: 2328 start = ldq_be_p(buf); 2329 len = ldl_be_p(buf + 8); 2330 migrate_handle_rp_req_pages(ms, NULL, start, len, &err); 2331 if (err) { 2332 goto out; 2333 } 2334 break; 2335 2336 case MIG_RP_MSG_REQ_PAGES_ID: 2337 expected_len = 12 + 1; /* header + termination */ 2338 2339 if (header_len >= expected_len) { 2340 start = ldq_be_p(buf); 2341 len = ldl_be_p(buf + 8); 2342 /* Now we expect an idstr */ 2343 tmp32 = buf[12]; /* Length of the following idstr */ 2344 buf[13 + tmp32] = '\0'; 2345 expected_len += tmp32; 2346 } 2347 if (header_len != expected_len) { 2348 error_setg(&err, "Req_Page_id with length %d expecting %zd", 2349 header_len, expected_len); 2350 goto out; 2351 } 2352 migrate_handle_rp_req_pages(ms, (char *)&buf[13], start, len, 2353 &err); 2354 if (err) { 2355 goto out; 2356 } 2357 break; 2358 2359 case MIG_RP_MSG_RECV_BITMAP: 2360 if (header_len < 1) { 2361 error_setg(&err, "MIG_RP_MSG_RECV_BITMAP missing block name"); 2362 goto out; 2363 } 2364 /* Format: len (1B) + idstr (<255B). This ends the idstr. */ 2365 buf[buf[0] + 1] = '\0'; 2366 if (!migrate_handle_rp_recv_bitmap(ms, (char *)(buf + 1), &err)) { 2367 goto out; 2368 } 2369 break; 2370 2371 case MIG_RP_MSG_RESUME_ACK: 2372 tmp32 = ldl_be_p(buf); 2373 if (!migrate_handle_rp_resume_ack(ms, tmp32, &err)) { 2374 goto out; 2375 } 2376 break; 2377 2378 case MIG_RP_MSG_SWITCHOVER_ACK: 2379 ms->switchover_acked = true; 2380 trace_source_return_path_thread_switchover_acked(); 2381 break; 2382 2383 default: 2384 break; 2385 } 2386 } 2387 2388 out: 2389 if (err) { 2390 migrate_set_error(ms, err); 2391 error_free(err); 2392 trace_source_return_path_thread_bad_end(); 2393 } 2394 2395 if (ms->state == MIGRATION_STATUS_POSTCOPY_RECOVER) { 2396 /* 2397 * this will be extremely unlikely: that we got yet another network 2398 * issue during recovering of the 1st network failure.. during this 2399 * period the main migration thread can be waiting on rp_sem for 2400 * this thread to sync with the other side. 2401 * 2402 * When this happens, explicitly kick the migration thread out of 2403 * RECOVER stage and back to PAUSED, so the admin can try 2404 * everything again. 2405 */ 2406 migration_rp_kick(ms); 2407 } 2408 2409 trace_source_return_path_thread_end(); 2410 rcu_unregister_thread(); 2411 2412 return NULL; 2413 } 2414 2415 static int open_return_path_on_source(MigrationState *ms) 2416 { 2417 ms->rp_state.from_dst_file = qemu_file_get_return_path(ms->to_dst_file); 2418 if (!ms->rp_state.from_dst_file) { 2419 return -1; 2420 } 2421 2422 trace_open_return_path_on_source(); 2423 2424 qemu_thread_create(&ms->rp_state.rp_thread, "mig/src/rp-thr", 2425 source_return_path_thread, ms, QEMU_THREAD_JOINABLE); 2426 ms->rp_state.rp_thread_created = true; 2427 2428 trace_open_return_path_on_source_continue(); 2429 2430 return 0; 2431 } 2432 2433 /* Return true if error detected, or false otherwise */ 2434 static bool close_return_path_on_source(MigrationState *ms) 2435 { 2436 if (!ms->rp_state.rp_thread_created) { 2437 return false; 2438 } 2439 2440 trace_migration_return_path_end_before(); 2441 2442 /* 2443 * If this is a normal exit then the destination will send a SHUT 2444 * and the rp_thread will exit, however if there's an error we 2445 * need to cause it to exit. shutdown(2), if we have it, will 2446 * cause it to unblock if it's stuck waiting for the destination. 2447 */ 2448 WITH_QEMU_LOCK_GUARD(&ms->qemu_file_lock) { 2449 if (migrate_has_error(ms) && ms->rp_state.from_dst_file) { 2450 qemu_file_shutdown(ms->rp_state.from_dst_file); 2451 } 2452 } 2453 2454 qemu_thread_join(&ms->rp_state.rp_thread); 2455 ms->rp_state.rp_thread_created = false; 2456 migration_release_dst_files(ms); 2457 trace_migration_return_path_end_after(); 2458 2459 /* Return path will persist the error in MigrationState when quit */ 2460 return migrate_has_error(ms); 2461 } 2462 2463 static inline void 2464 migration_wait_main_channel(MigrationState *ms) 2465 { 2466 /* Wait until one PONG message received */ 2467 qemu_sem_wait(&ms->rp_state.rp_pong_acks); 2468 } 2469 2470 /* 2471 * Switch from normal iteration to postcopy 2472 * Returns non-0 on error 2473 */ 2474 static int postcopy_start(MigrationState *ms, Error **errp) 2475 { 2476 int ret; 2477 QIOChannelBuffer *bioc; 2478 QEMUFile *fb; 2479 uint64_t bandwidth = migrate_max_postcopy_bandwidth(); 2480 bool restart_block = false; 2481 int cur_state = MIGRATION_STATUS_ACTIVE; 2482 2483 if (migrate_postcopy_preempt()) { 2484 migration_wait_main_channel(ms); 2485 if (postcopy_preempt_establish_channel(ms)) { 2486 migrate_set_state(&ms->state, ms->state, MIGRATION_STATUS_FAILED); 2487 error_setg(errp, "%s: Failed to establish preempt channel", 2488 __func__); 2489 return -1; 2490 } 2491 } 2492 2493 if (!migrate_pause_before_switchover()) { 2494 migrate_set_state(&ms->state, MIGRATION_STATUS_ACTIVE, 2495 MIGRATION_STATUS_POSTCOPY_ACTIVE); 2496 } 2497 2498 trace_postcopy_start(); 2499 bql_lock(); 2500 trace_postcopy_start_set_run(); 2501 2502 ret = migration_stop_vm(ms, RUN_STATE_FINISH_MIGRATE); 2503 if (ret < 0) { 2504 error_setg_errno(errp, -ret, "%s: Failed to stop the VM", __func__); 2505 goto fail; 2506 } 2507 2508 ret = migration_maybe_pause(ms, &cur_state, 2509 MIGRATION_STATUS_POSTCOPY_ACTIVE); 2510 if (ret < 0) { 2511 error_setg_errno(errp, -ret, "%s: Failed in migration_maybe_pause()", 2512 __func__); 2513 goto fail; 2514 } 2515 2516 ret = bdrv_inactivate_all(); 2517 if (ret < 0) { 2518 error_setg_errno(errp, -ret, "%s: Failed in bdrv_inactivate_all()", 2519 __func__); 2520 goto fail; 2521 } 2522 restart_block = true; 2523 2524 /* 2525 * Cause any non-postcopiable, but iterative devices to 2526 * send out their final data. 2527 */ 2528 qemu_savevm_state_complete_precopy(ms->to_dst_file, true, false); 2529 2530 /* 2531 * in Finish migrate and with the io-lock held everything should 2532 * be quiet, but we've potentially still got dirty pages and we 2533 * need to tell the destination to throw any pages it's already received 2534 * that are dirty 2535 */ 2536 if (migrate_postcopy_ram()) { 2537 ram_postcopy_send_discard_bitmap(ms); 2538 } 2539 2540 /* 2541 * send rest of state - note things that are doing postcopy 2542 * will notice we're in POSTCOPY_ACTIVE and not actually 2543 * wrap their state up here 2544 */ 2545 migration_rate_set(bandwidth); 2546 if (migrate_postcopy_ram()) { 2547 /* Ping just for debugging, helps line traces up */ 2548 qemu_savevm_send_ping(ms->to_dst_file, 2); 2549 } 2550 2551 /* 2552 * While loading the device state we may trigger page transfer 2553 * requests and the fd must be free to process those, and thus 2554 * the destination must read the whole device state off the fd before 2555 * it starts processing it. Unfortunately the ad-hoc migration format 2556 * doesn't allow the destination to know the size to read without fully 2557 * parsing it through each devices load-state code (especially the open 2558 * coded devices that use get/put). 2559 * So we wrap the device state up in a package with a length at the start; 2560 * to do this we use a qemu_buf to hold the whole of the device state. 2561 */ 2562 bioc = qio_channel_buffer_new(4096); 2563 qio_channel_set_name(QIO_CHANNEL(bioc), "migration-postcopy-buffer"); 2564 fb = qemu_file_new_output(QIO_CHANNEL(bioc)); 2565 object_unref(OBJECT(bioc)); 2566 2567 /* 2568 * Make sure the receiver can get incoming pages before we send the rest 2569 * of the state 2570 */ 2571 qemu_savevm_send_postcopy_listen(fb); 2572 2573 qemu_savevm_state_complete_precopy(fb, false, false); 2574 if (migrate_postcopy_ram()) { 2575 qemu_savevm_send_ping(fb, 3); 2576 } 2577 2578 qemu_savevm_send_postcopy_run(fb); 2579 2580 /* <><> end of stuff going into the package */ 2581 2582 /* Last point of recovery; as soon as we send the package the destination 2583 * can open devices and potentially start running. 2584 * Lets just check again we've not got any errors. 2585 */ 2586 ret = qemu_file_get_error(ms->to_dst_file); 2587 if (ret) { 2588 error_setg(errp, "postcopy_start: Migration stream errored (pre package)"); 2589 goto fail_closefb; 2590 } 2591 2592 restart_block = false; 2593 2594 /* Now send that blob */ 2595 if (qemu_savevm_send_packaged(ms->to_dst_file, bioc->data, bioc->usage)) { 2596 error_setg(errp, "%s: Failed to send packaged data", __func__); 2597 goto fail_closefb; 2598 } 2599 qemu_fclose(fb); 2600 2601 /* Send a notify to give a chance for anything that needs to happen 2602 * at the transition to postcopy and after the device state; in particular 2603 * spice needs to trigger a transition now 2604 */ 2605 migration_call_notifiers(ms, MIG_EVENT_PRECOPY_DONE, NULL); 2606 2607 migration_downtime_end(ms); 2608 2609 bql_unlock(); 2610 2611 if (migrate_postcopy_ram()) { 2612 /* 2613 * Although this ping is just for debug, it could potentially be 2614 * used for getting a better measurement of downtime at the source. 2615 */ 2616 qemu_savevm_send_ping(ms->to_dst_file, 4); 2617 } 2618 2619 if (migrate_release_ram()) { 2620 ram_postcopy_migrated_memory_release(ms); 2621 } 2622 2623 ret = qemu_file_get_error(ms->to_dst_file); 2624 if (ret) { 2625 error_setg_errno(errp, -ret, "postcopy_start: Migration stream error"); 2626 bql_lock(); 2627 goto fail; 2628 } 2629 trace_postcopy_preempt_enabled(migrate_postcopy_preempt()); 2630 2631 return ret; 2632 2633 fail_closefb: 2634 qemu_fclose(fb); 2635 fail: 2636 migrate_set_state(&ms->state, MIGRATION_STATUS_POSTCOPY_ACTIVE, 2637 MIGRATION_STATUS_FAILED); 2638 if (restart_block) { 2639 /* A failure happened early enough that we know the destination hasn't 2640 * accessed block devices, so we're safe to recover. 2641 */ 2642 Error *local_err = NULL; 2643 2644 bdrv_activate_all(&local_err); 2645 if (local_err) { 2646 error_report_err(local_err); 2647 } 2648 } 2649 migration_call_notifiers(ms, MIG_EVENT_PRECOPY_FAILED, NULL); 2650 bql_unlock(); 2651 return -1; 2652 } 2653 2654 /** 2655 * migration_maybe_pause: Pause if required to by 2656 * migrate_pause_before_switchover called with the BQL locked 2657 * Returns: 0 on success 2658 */ 2659 static int migration_maybe_pause(MigrationState *s, 2660 int *current_active_state, 2661 int new_state) 2662 { 2663 if (!migrate_pause_before_switchover()) { 2664 return 0; 2665 } 2666 2667 /* Since leaving this state is not atomic with posting the semaphore 2668 * it's possible that someone could have issued multiple migrate_continue 2669 * and the semaphore is incorrectly positive at this point; 2670 * the docs say it's undefined to reinit a semaphore that's already 2671 * init'd, so use timedwait to eat up any existing posts. 2672 */ 2673 while (qemu_sem_timedwait(&s->pause_sem, 1) == 0) { 2674 /* This block intentionally left blank */ 2675 } 2676 2677 /* 2678 * If the migration is cancelled when it is in the completion phase, 2679 * the migration state is set to MIGRATION_STATUS_CANCELLING. 2680 * So we don't need to wait a semaphore, otherwise we would always 2681 * wait for the 'pause_sem' semaphore. 2682 */ 2683 if (s->state != MIGRATION_STATUS_CANCELLING) { 2684 bql_unlock(); 2685 migrate_set_state(&s->state, *current_active_state, 2686 MIGRATION_STATUS_PRE_SWITCHOVER); 2687 qemu_sem_wait(&s->pause_sem); 2688 migrate_set_state(&s->state, MIGRATION_STATUS_PRE_SWITCHOVER, 2689 new_state); 2690 *current_active_state = new_state; 2691 bql_lock(); 2692 } 2693 2694 return s->state == new_state ? 0 : -EINVAL; 2695 } 2696 2697 static int migration_completion_precopy(MigrationState *s, 2698 int *current_active_state) 2699 { 2700 int ret; 2701 2702 bql_lock(); 2703 2704 if (!migrate_mode_is_cpr(s)) { 2705 ret = migration_stop_vm(s, RUN_STATE_FINISH_MIGRATE); 2706 if (ret < 0) { 2707 goto out_unlock; 2708 } 2709 } 2710 2711 ret = migration_maybe_pause(s, current_active_state, 2712 MIGRATION_STATUS_DEVICE); 2713 if (ret < 0) { 2714 goto out_unlock; 2715 } 2716 2717 /* 2718 * Inactivate disks except in COLO, and track that we have done so in order 2719 * to remember to reactivate them if migration fails or is cancelled. 2720 */ 2721 s->block_inactive = !migrate_colo(); 2722 migration_rate_set(RATE_LIMIT_DISABLED); 2723 ret = qemu_savevm_state_complete_precopy(s->to_dst_file, false, 2724 s->block_inactive); 2725 out_unlock: 2726 bql_unlock(); 2727 return ret; 2728 } 2729 2730 static void migration_completion_postcopy(MigrationState *s) 2731 { 2732 trace_migration_completion_postcopy_end(); 2733 2734 bql_lock(); 2735 qemu_savevm_state_complete_postcopy(s->to_dst_file); 2736 bql_unlock(); 2737 2738 /* 2739 * Shutdown the postcopy fast path thread. This is only needed when dest 2740 * QEMU binary is old (7.1/7.2). QEMU 8.0+ doesn't need this. 2741 */ 2742 if (migrate_postcopy_preempt() && s->preempt_pre_7_2) { 2743 postcopy_preempt_shutdown_file(s); 2744 } 2745 2746 trace_migration_completion_postcopy_end_after_complete(); 2747 } 2748 2749 static void migration_completion_failed(MigrationState *s, 2750 int current_active_state) 2751 { 2752 if (s->block_inactive && (s->state == MIGRATION_STATUS_ACTIVE || 2753 s->state == MIGRATION_STATUS_DEVICE)) { 2754 /* 2755 * If not doing postcopy, vm_start() will be called: let's 2756 * regain control on images. 2757 */ 2758 Error *local_err = NULL; 2759 2760 bql_lock(); 2761 bdrv_activate_all(&local_err); 2762 if (local_err) { 2763 error_report_err(local_err); 2764 } else { 2765 s->block_inactive = false; 2766 } 2767 bql_unlock(); 2768 } 2769 2770 migrate_set_state(&s->state, current_active_state, 2771 MIGRATION_STATUS_FAILED); 2772 } 2773 2774 /** 2775 * migration_completion: Used by migration_thread when there's not much left. 2776 * The caller 'breaks' the loop when this returns. 2777 * 2778 * @s: Current migration state 2779 */ 2780 static void migration_completion(MigrationState *s) 2781 { 2782 int ret = 0; 2783 int current_active_state = s->state; 2784 Error *local_err = NULL; 2785 2786 if (s->state == MIGRATION_STATUS_ACTIVE) { 2787 ret = migration_completion_precopy(s, ¤t_active_state); 2788 } else if (s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE) { 2789 migration_completion_postcopy(s); 2790 } else { 2791 ret = -1; 2792 } 2793 2794 if (ret < 0) { 2795 goto fail; 2796 } 2797 2798 if (close_return_path_on_source(s)) { 2799 goto fail; 2800 } 2801 2802 if (qemu_file_get_error(s->to_dst_file)) { 2803 trace_migration_completion_file_err(); 2804 goto fail; 2805 } 2806 2807 if (migrate_colo() && s->state == MIGRATION_STATUS_ACTIVE) { 2808 /* COLO does not support postcopy */ 2809 migrate_set_state(&s->state, MIGRATION_STATUS_ACTIVE, 2810 MIGRATION_STATUS_COLO); 2811 } else { 2812 migration_completion_end(s); 2813 } 2814 2815 return; 2816 2817 fail: 2818 if (qemu_file_get_error_obj(s->to_dst_file, &local_err)) { 2819 migrate_set_error(s, local_err); 2820 error_free(local_err); 2821 } else if (ret) { 2822 error_setg_errno(&local_err, -ret, "Error in migration completion"); 2823 migrate_set_error(s, local_err); 2824 error_free(local_err); 2825 } 2826 2827 migration_completion_failed(s, current_active_state); 2828 } 2829 2830 /** 2831 * bg_migration_completion: Used by bg_migration_thread when after all the 2832 * RAM has been saved. The caller 'breaks' the loop when this returns. 2833 * 2834 * @s: Current migration state 2835 */ 2836 static void bg_migration_completion(MigrationState *s) 2837 { 2838 int current_active_state = s->state; 2839 2840 if (s->state == MIGRATION_STATUS_ACTIVE) { 2841 /* 2842 * By this moment we have RAM content saved into the migration stream. 2843 * The next step is to flush the non-RAM content (device state) 2844 * right after the ram content. The device state has been stored into 2845 * the temporary buffer before RAM saving started. 2846 */ 2847 qemu_put_buffer(s->to_dst_file, s->bioc->data, s->bioc->usage); 2848 qemu_fflush(s->to_dst_file); 2849 } else if (s->state == MIGRATION_STATUS_CANCELLING) { 2850 goto fail; 2851 } 2852 2853 if (qemu_file_get_error(s->to_dst_file)) { 2854 trace_migration_completion_file_err(); 2855 goto fail; 2856 } 2857 2858 migration_completion_end(s); 2859 return; 2860 2861 fail: 2862 migrate_set_state(&s->state, current_active_state, 2863 MIGRATION_STATUS_FAILED); 2864 } 2865 2866 typedef enum MigThrError { 2867 /* No error detected */ 2868 MIG_THR_ERR_NONE = 0, 2869 /* Detected error, but resumed successfully */ 2870 MIG_THR_ERR_RECOVERED = 1, 2871 /* Detected fatal error, need to exit */ 2872 MIG_THR_ERR_FATAL = 2, 2873 } MigThrError; 2874 2875 static int postcopy_resume_handshake(MigrationState *s) 2876 { 2877 qemu_savevm_send_postcopy_resume(s->to_dst_file); 2878 2879 while (s->state == MIGRATION_STATUS_POSTCOPY_RECOVER) { 2880 if (migration_rp_wait(s)) { 2881 return -1; 2882 } 2883 } 2884 2885 if (s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE) { 2886 return 0; 2887 } 2888 2889 return -1; 2890 } 2891 2892 /* Return zero if success, or <0 for error */ 2893 static int postcopy_do_resume(MigrationState *s) 2894 { 2895 int ret; 2896 2897 /* 2898 * Call all the resume_prepare() hooks, so that modules can be 2899 * ready for the migration resume. 2900 */ 2901 ret = qemu_savevm_state_resume_prepare(s); 2902 if (ret) { 2903 error_report("%s: resume_prepare() failure detected: %d", 2904 __func__, ret); 2905 return ret; 2906 } 2907 2908 /* 2909 * If preempt is enabled, re-establish the preempt channel. Note that 2910 * we do it after resume prepare to make sure the main channel will be 2911 * created before the preempt channel. E.g. with weak network, the 2912 * dest QEMU may get messed up with the preempt and main channels on 2913 * the order of connection setup. This guarantees the correct order. 2914 */ 2915 ret = postcopy_preempt_establish_channel(s); 2916 if (ret) { 2917 error_report("%s: postcopy_preempt_establish_channel(): %d", 2918 __func__, ret); 2919 return ret; 2920 } 2921 2922 /* 2923 * Last handshake with destination on the resume (destination will 2924 * switch to postcopy-active afterwards) 2925 */ 2926 ret = postcopy_resume_handshake(s); 2927 if (ret) { 2928 error_report("%s: handshake failed: %d", __func__, ret); 2929 return ret; 2930 } 2931 2932 return 0; 2933 } 2934 2935 /* 2936 * We don't return until we are in a safe state to continue current 2937 * postcopy migration. Returns MIG_THR_ERR_RECOVERED if recovered, or 2938 * MIG_THR_ERR_FATAL if unrecovery failure happened. 2939 */ 2940 static MigThrError postcopy_pause(MigrationState *s) 2941 { 2942 assert(s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE); 2943 2944 while (true) { 2945 QEMUFile *file; 2946 2947 /* 2948 * We're already pausing, so ignore any errors on the return 2949 * path and just wait for the thread to finish. It will be 2950 * re-created when we resume. 2951 */ 2952 close_return_path_on_source(s); 2953 2954 /* 2955 * Current channel is possibly broken. Release it. Note that this is 2956 * guaranteed even without lock because to_dst_file should only be 2957 * modified by the migration thread. That also guarantees that the 2958 * unregister of yank is safe too without the lock. It should be safe 2959 * even to be within the qemu_file_lock, but we didn't do that to avoid 2960 * taking more mutex (yank_lock) within qemu_file_lock. TL;DR: we make 2961 * the qemu_file_lock critical section as small as possible. 2962 */ 2963 assert(s->to_dst_file); 2964 migration_ioc_unregister_yank_from_file(s->to_dst_file); 2965 qemu_mutex_lock(&s->qemu_file_lock); 2966 file = s->to_dst_file; 2967 s->to_dst_file = NULL; 2968 qemu_mutex_unlock(&s->qemu_file_lock); 2969 2970 qemu_file_shutdown(file); 2971 qemu_fclose(file); 2972 2973 migrate_set_state(&s->state, s->state, 2974 MIGRATION_STATUS_POSTCOPY_PAUSED); 2975 2976 error_report("Detected IO failure for postcopy. " 2977 "Migration paused."); 2978 2979 /* 2980 * We wait until things fixed up. Then someone will setup the 2981 * status back for us. 2982 */ 2983 while (s->state == MIGRATION_STATUS_POSTCOPY_PAUSED) { 2984 qemu_sem_wait(&s->postcopy_pause_sem); 2985 } 2986 2987 if (s->state == MIGRATION_STATUS_POSTCOPY_RECOVER) { 2988 /* Woken up by a recover procedure. Give it a shot */ 2989 2990 /* Do the resume logic */ 2991 if (postcopy_do_resume(s) == 0) { 2992 /* Let's continue! */ 2993 trace_postcopy_pause_continued(); 2994 return MIG_THR_ERR_RECOVERED; 2995 } else { 2996 /* 2997 * Something wrong happened during the recovery, let's 2998 * pause again. Pause is always better than throwing 2999 * data away. 3000 */ 3001 continue; 3002 } 3003 } else { 3004 /* This is not right... Time to quit. */ 3005 return MIG_THR_ERR_FATAL; 3006 } 3007 } 3008 } 3009 3010 void migration_file_set_error(int ret, Error *err) 3011 { 3012 MigrationState *s = current_migration; 3013 3014 WITH_QEMU_LOCK_GUARD(&s->qemu_file_lock) { 3015 if (s->to_dst_file) { 3016 qemu_file_set_error_obj(s->to_dst_file, ret, err); 3017 } else if (err) { 3018 error_report_err(err); 3019 } 3020 } 3021 } 3022 3023 static MigThrError migration_detect_error(MigrationState *s) 3024 { 3025 int ret; 3026 int state = s->state; 3027 Error *local_error = NULL; 3028 3029 if (state == MIGRATION_STATUS_CANCELLING || 3030 state == MIGRATION_STATUS_CANCELLED) { 3031 /* End the migration, but don't set the state to failed */ 3032 return MIG_THR_ERR_FATAL; 3033 } 3034 3035 /* 3036 * Try to detect any file errors. Note that postcopy_qemufile_src will 3037 * be NULL when postcopy preempt is not enabled. 3038 */ 3039 ret = qemu_file_get_error_obj_any(s->to_dst_file, 3040 s->postcopy_qemufile_src, 3041 &local_error); 3042 if (!ret) { 3043 /* Everything is fine */ 3044 assert(!local_error); 3045 return MIG_THR_ERR_NONE; 3046 } 3047 3048 if (local_error) { 3049 migrate_set_error(s, local_error); 3050 error_free(local_error); 3051 } 3052 3053 if (state == MIGRATION_STATUS_POSTCOPY_ACTIVE && ret) { 3054 /* 3055 * For postcopy, we allow the network to be down for a 3056 * while. After that, it can be continued by a 3057 * recovery phase. 3058 */ 3059 return postcopy_pause(s); 3060 } else { 3061 /* 3062 * For precopy (or postcopy with error outside IO), we fail 3063 * with no time. 3064 */ 3065 migrate_set_state(&s->state, state, MIGRATION_STATUS_FAILED); 3066 trace_migration_thread_file_err(); 3067 3068 /* Time to stop the migration, now. */ 3069 return MIG_THR_ERR_FATAL; 3070 } 3071 } 3072 3073 static void migration_completion_end(MigrationState *s) 3074 { 3075 uint64_t bytes = migration_transferred_bytes(); 3076 int64_t end_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); 3077 int64_t transfer_time; 3078 3079 /* 3080 * Take the BQL here so that query-migrate on the QMP thread sees: 3081 * - atomic update of s->total_time and s->mbps; 3082 * - correct ordering of s->mbps update vs. s->state; 3083 */ 3084 bql_lock(); 3085 migration_downtime_end(s); 3086 s->total_time = end_time - s->start_time; 3087 transfer_time = s->total_time - s->setup_time; 3088 if (transfer_time) { 3089 s->mbps = ((double) bytes * 8.0) / transfer_time / 1000; 3090 } 3091 3092 migrate_set_state(&s->state, s->state, 3093 MIGRATION_STATUS_COMPLETED); 3094 bql_unlock(); 3095 } 3096 3097 static void update_iteration_initial_status(MigrationState *s) 3098 { 3099 /* 3100 * Update these three fields at the same time to avoid mismatch info lead 3101 * wrong speed calculation. 3102 */ 3103 s->iteration_start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); 3104 s->iteration_initial_bytes = migration_transferred_bytes(); 3105 s->iteration_initial_pages = ram_get_total_transferred_pages(); 3106 } 3107 3108 static void migration_update_counters(MigrationState *s, 3109 int64_t current_time) 3110 { 3111 uint64_t transferred, transferred_pages, time_spent; 3112 uint64_t current_bytes; /* bytes transferred since the beginning */ 3113 uint64_t switchover_bw; 3114 /* Expected bandwidth when switching over to destination QEMU */ 3115 double expected_bw_per_ms; 3116 double bandwidth; 3117 3118 if (current_time < s->iteration_start_time + BUFFER_DELAY) { 3119 return; 3120 } 3121 3122 switchover_bw = migrate_avail_switchover_bandwidth(); 3123 current_bytes = migration_transferred_bytes(); 3124 transferred = current_bytes - s->iteration_initial_bytes; 3125 time_spent = current_time - s->iteration_start_time; 3126 bandwidth = (double)transferred / time_spent; 3127 3128 if (switchover_bw) { 3129 /* 3130 * If the user specified a switchover bandwidth, let's trust the 3131 * user so that can be more accurate than what we estimated. 3132 */ 3133 expected_bw_per_ms = switchover_bw / 1000; 3134 } else { 3135 /* If the user doesn't specify bandwidth, we use the estimated */ 3136 expected_bw_per_ms = bandwidth; 3137 } 3138 3139 s->threshold_size = expected_bw_per_ms * migrate_downtime_limit(); 3140 3141 s->mbps = (((double) transferred * 8.0) / 3142 ((double) time_spent / 1000.0)) / 1000.0 / 1000.0; 3143 3144 transferred_pages = ram_get_total_transferred_pages() - 3145 s->iteration_initial_pages; 3146 s->pages_per_second = (double) transferred_pages / 3147 (((double) time_spent / 1000.0)); 3148 3149 /* 3150 * if we haven't sent anything, we don't want to 3151 * recalculate. 10000 is a small enough number for our purposes 3152 */ 3153 if (stat64_get(&mig_stats.dirty_pages_rate) && 3154 transferred > 10000) { 3155 s->expected_downtime = 3156 stat64_get(&mig_stats.dirty_bytes_last_sync) / expected_bw_per_ms; 3157 } 3158 3159 migration_rate_reset(); 3160 3161 update_iteration_initial_status(s); 3162 3163 trace_migrate_transferred(transferred, time_spent, 3164 /* Both in unit bytes/ms */ 3165 bandwidth, switchover_bw / 1000, 3166 s->threshold_size); 3167 } 3168 3169 static bool migration_can_switchover(MigrationState *s) 3170 { 3171 if (!migrate_switchover_ack()) { 3172 return true; 3173 } 3174 3175 /* No reason to wait for switchover ACK if VM is stopped */ 3176 if (!runstate_is_running()) { 3177 return true; 3178 } 3179 3180 return s->switchover_acked; 3181 } 3182 3183 /* Migration thread iteration status */ 3184 typedef enum { 3185 MIG_ITERATE_RESUME, /* Resume current iteration */ 3186 MIG_ITERATE_SKIP, /* Skip current iteration */ 3187 MIG_ITERATE_BREAK, /* Break the loop */ 3188 } MigIterateState; 3189 3190 /* 3191 * Return true if continue to the next iteration directly, false 3192 * otherwise. 3193 */ 3194 static MigIterateState migration_iteration_run(MigrationState *s) 3195 { 3196 uint64_t must_precopy, can_postcopy, pending_size; 3197 Error *local_err = NULL; 3198 bool in_postcopy = s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE; 3199 bool can_switchover = migration_can_switchover(s); 3200 3201 qemu_savevm_state_pending_estimate(&must_precopy, &can_postcopy); 3202 pending_size = must_precopy + can_postcopy; 3203 trace_migrate_pending_estimate(pending_size, must_precopy, can_postcopy); 3204 3205 if (pending_size < s->threshold_size) { 3206 qemu_savevm_state_pending_exact(&must_precopy, &can_postcopy); 3207 pending_size = must_precopy + can_postcopy; 3208 trace_migrate_pending_exact(pending_size, must_precopy, can_postcopy); 3209 } 3210 3211 if ((!pending_size || pending_size < s->threshold_size) && can_switchover) { 3212 trace_migration_thread_low_pending(pending_size); 3213 migration_completion(s); 3214 return MIG_ITERATE_BREAK; 3215 } 3216 3217 /* Still a significant amount to transfer */ 3218 if (!in_postcopy && must_precopy <= s->threshold_size && can_switchover && 3219 qatomic_read(&s->start_postcopy)) { 3220 if (postcopy_start(s, &local_err)) { 3221 migrate_set_error(s, local_err); 3222 error_report_err(local_err); 3223 } 3224 return MIG_ITERATE_SKIP; 3225 } 3226 3227 /* Just another iteration step */ 3228 qemu_savevm_state_iterate(s->to_dst_file, in_postcopy); 3229 return MIG_ITERATE_RESUME; 3230 } 3231 3232 static void migration_iteration_finish(MigrationState *s) 3233 { 3234 /* If we enabled cpu throttling for auto-converge, turn it off. */ 3235 cpu_throttle_stop(); 3236 3237 bql_lock(); 3238 switch (s->state) { 3239 case MIGRATION_STATUS_COMPLETED: 3240 runstate_set(RUN_STATE_POSTMIGRATE); 3241 break; 3242 case MIGRATION_STATUS_COLO: 3243 assert(migrate_colo()); 3244 migrate_start_colo_process(s); 3245 s->vm_old_state = RUN_STATE_RUNNING; 3246 /* Fallthrough */ 3247 case MIGRATION_STATUS_FAILED: 3248 case MIGRATION_STATUS_CANCELLED: 3249 case MIGRATION_STATUS_CANCELLING: 3250 if (runstate_is_live(s->vm_old_state)) { 3251 if (!runstate_check(RUN_STATE_SHUTDOWN)) { 3252 vm_start(); 3253 } 3254 } else { 3255 if (runstate_check(RUN_STATE_FINISH_MIGRATE)) { 3256 runstate_set(s->vm_old_state); 3257 } 3258 } 3259 break; 3260 3261 default: 3262 /* Should not reach here, but if so, forgive the VM. */ 3263 error_report("%s: Unknown ending state %d", __func__, s->state); 3264 break; 3265 } 3266 3267 migration_bh_schedule(migrate_fd_cleanup_bh, s); 3268 bql_unlock(); 3269 } 3270 3271 static void bg_migration_iteration_finish(MigrationState *s) 3272 { 3273 /* 3274 * Stop tracking RAM writes - un-protect memory, un-register UFFD 3275 * memory ranges, flush kernel wait queues and wake up threads 3276 * waiting for write fault to be resolved. 3277 */ 3278 ram_write_tracking_stop(); 3279 3280 bql_lock(); 3281 switch (s->state) { 3282 case MIGRATION_STATUS_COMPLETED: 3283 case MIGRATION_STATUS_ACTIVE: 3284 case MIGRATION_STATUS_FAILED: 3285 case MIGRATION_STATUS_CANCELLED: 3286 case MIGRATION_STATUS_CANCELLING: 3287 break; 3288 3289 default: 3290 /* Should not reach here, but if so, forgive the VM. */ 3291 error_report("%s: Unknown ending state %d", __func__, s->state); 3292 break; 3293 } 3294 3295 migration_bh_schedule(migrate_fd_cleanup_bh, s); 3296 bql_unlock(); 3297 } 3298 3299 /* 3300 * Return true if continue to the next iteration directly, false 3301 * otherwise. 3302 */ 3303 static MigIterateState bg_migration_iteration_run(MigrationState *s) 3304 { 3305 int res; 3306 3307 res = qemu_savevm_state_iterate(s->to_dst_file, false); 3308 if (res > 0) { 3309 bg_migration_completion(s); 3310 return MIG_ITERATE_BREAK; 3311 } 3312 3313 return MIG_ITERATE_RESUME; 3314 } 3315 3316 void migration_make_urgent_request(void) 3317 { 3318 qemu_sem_post(&migrate_get_current()->rate_limit_sem); 3319 } 3320 3321 void migration_consume_urgent_request(void) 3322 { 3323 qemu_sem_wait(&migrate_get_current()->rate_limit_sem); 3324 } 3325 3326 /* Returns true if the rate limiting was broken by an urgent request */ 3327 bool migration_rate_limit(void) 3328 { 3329 int64_t now = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); 3330 MigrationState *s = migrate_get_current(); 3331 3332 bool urgent = false; 3333 migration_update_counters(s, now); 3334 if (migration_rate_exceeded(s->to_dst_file)) { 3335 3336 if (qemu_file_get_error(s->to_dst_file)) { 3337 return false; 3338 } 3339 /* 3340 * Wait for a delay to do rate limiting OR 3341 * something urgent to post the semaphore. 3342 */ 3343 int ms = s->iteration_start_time + BUFFER_DELAY - now; 3344 trace_migration_rate_limit_pre(ms); 3345 if (qemu_sem_timedwait(&s->rate_limit_sem, ms) == 0) { 3346 /* 3347 * We were woken by one or more urgent things but 3348 * the timedwait will have consumed one of them. 3349 * The service routine for the urgent wake will dec 3350 * the semaphore itself for each item it consumes, 3351 * so add this one we just eat back. 3352 */ 3353 qemu_sem_post(&s->rate_limit_sem); 3354 urgent = true; 3355 } 3356 trace_migration_rate_limit_post(urgent); 3357 } 3358 return urgent; 3359 } 3360 3361 /* 3362 * if failover devices are present, wait they are completely 3363 * unplugged 3364 */ 3365 3366 static void qemu_savevm_wait_unplug(MigrationState *s, int old_state, 3367 int new_state) 3368 { 3369 if (qemu_savevm_state_guest_unplug_pending()) { 3370 migrate_set_state(&s->state, old_state, MIGRATION_STATUS_WAIT_UNPLUG); 3371 3372 while (s->state == MIGRATION_STATUS_WAIT_UNPLUG && 3373 qemu_savevm_state_guest_unplug_pending()) { 3374 qemu_sem_timedwait(&s->wait_unplug_sem, 250); 3375 } 3376 if (s->state != MIGRATION_STATUS_WAIT_UNPLUG) { 3377 int timeout = 120; /* 30 seconds */ 3378 /* 3379 * migration has been canceled 3380 * but as we have started an unplug we must wait the end 3381 * to be able to plug back the card 3382 */ 3383 while (timeout-- && qemu_savevm_state_guest_unplug_pending()) { 3384 qemu_sem_timedwait(&s->wait_unplug_sem, 250); 3385 } 3386 if (qemu_savevm_state_guest_unplug_pending() && 3387 !qtest_enabled()) { 3388 warn_report("migration: partially unplugged device on " 3389 "failure"); 3390 } 3391 } 3392 3393 migrate_set_state(&s->state, MIGRATION_STATUS_WAIT_UNPLUG, new_state); 3394 } else { 3395 migrate_set_state(&s->state, old_state, new_state); 3396 } 3397 } 3398 3399 /* 3400 * Master migration thread on the source VM. 3401 * It drives the migration and pumps the data down the outgoing channel. 3402 */ 3403 static void *migration_thread(void *opaque) 3404 { 3405 MigrationState *s = opaque; 3406 MigrationThread *thread = NULL; 3407 int64_t setup_start = qemu_clock_get_ms(QEMU_CLOCK_HOST); 3408 MigThrError thr_error; 3409 bool urgent = false; 3410 Error *local_err = NULL; 3411 int ret; 3412 3413 thread = migration_threads_add("live_migration", qemu_get_thread_id()); 3414 3415 rcu_register_thread(); 3416 3417 object_ref(OBJECT(s)); 3418 update_iteration_initial_status(s); 3419 3420 if (!multifd_send_setup()) { 3421 goto out; 3422 } 3423 3424 bql_lock(); 3425 qemu_savevm_state_header(s->to_dst_file); 3426 bql_unlock(); 3427 3428 /* 3429 * If we opened the return path, we need to make sure dst has it 3430 * opened as well. 3431 */ 3432 if (s->rp_state.rp_thread_created) { 3433 /* Now tell the dest that it should open its end so it can reply */ 3434 qemu_savevm_send_open_return_path(s->to_dst_file); 3435 3436 /* And do a ping that will make stuff easier to debug */ 3437 qemu_savevm_send_ping(s->to_dst_file, 1); 3438 } 3439 3440 if (migrate_postcopy()) { 3441 /* 3442 * Tell the destination that we *might* want to do postcopy later; 3443 * if the other end can't do postcopy it should fail now, nice and 3444 * early. 3445 */ 3446 qemu_savevm_send_postcopy_advise(s->to_dst_file); 3447 } 3448 3449 if (migrate_colo()) { 3450 /* Notify migration destination that we enable COLO */ 3451 qemu_savevm_send_colo_enable(s->to_dst_file); 3452 } 3453 3454 bql_lock(); 3455 ret = qemu_savevm_state_setup(s->to_dst_file, &local_err); 3456 bql_unlock(); 3457 3458 qemu_savevm_wait_unplug(s, MIGRATION_STATUS_SETUP, 3459 MIGRATION_STATUS_ACTIVE); 3460 3461 /* 3462 * Handle SETUP failures after waiting for virtio-net-failover 3463 * devices to unplug. This to preserve migration state transitions. 3464 */ 3465 if (ret) { 3466 migrate_set_error(s, local_err); 3467 error_free(local_err); 3468 migrate_set_state(&s->state, MIGRATION_STATUS_ACTIVE, 3469 MIGRATION_STATUS_FAILED); 3470 goto out; 3471 } 3472 3473 s->setup_time = qemu_clock_get_ms(QEMU_CLOCK_HOST) - setup_start; 3474 3475 trace_migration_thread_setup_complete(); 3476 3477 while (migration_is_active()) { 3478 if (urgent || !migration_rate_exceeded(s->to_dst_file)) { 3479 MigIterateState iter_state = migration_iteration_run(s); 3480 if (iter_state == MIG_ITERATE_SKIP) { 3481 continue; 3482 } else if (iter_state == MIG_ITERATE_BREAK) { 3483 break; 3484 } 3485 } 3486 3487 /* 3488 * Try to detect any kind of failures, and see whether we 3489 * should stop the migration now. 3490 */ 3491 thr_error = migration_detect_error(s); 3492 if (thr_error == MIG_THR_ERR_FATAL) { 3493 /* Stop migration */ 3494 break; 3495 } else if (thr_error == MIG_THR_ERR_RECOVERED) { 3496 /* 3497 * Just recovered from a e.g. network failure, reset all 3498 * the local variables. This is important to avoid 3499 * breaking transferred_bytes and bandwidth calculation 3500 */ 3501 update_iteration_initial_status(s); 3502 } 3503 3504 urgent = migration_rate_limit(); 3505 } 3506 3507 out: 3508 trace_migration_thread_after_loop(); 3509 migration_iteration_finish(s); 3510 object_unref(OBJECT(s)); 3511 rcu_unregister_thread(); 3512 migration_threads_remove(thread); 3513 return NULL; 3514 } 3515 3516 static void bg_migration_vm_start_bh(void *opaque) 3517 { 3518 MigrationState *s = opaque; 3519 3520 vm_resume(s->vm_old_state); 3521 migration_downtime_end(s); 3522 } 3523 3524 /** 3525 * Background snapshot thread, based on live migration code. 3526 * This is an alternative implementation of live migration mechanism 3527 * introduced specifically to support background snapshots. 3528 * 3529 * It takes advantage of userfault_fd write protection mechanism introduced 3530 * in v5.7 kernel. Compared to existing dirty page logging migration much 3531 * lesser stream traffic is produced resulting in smaller snapshot images, 3532 * simply cause of no page duplicates can get into the stream. 3533 * 3534 * Another key point is that generated vmstate stream reflects machine state 3535 * 'frozen' at the beginning of snapshot creation compared to dirty page logging 3536 * mechanism, which effectively results in that saved snapshot is the state of VM 3537 * at the end of the process. 3538 */ 3539 static void *bg_migration_thread(void *opaque) 3540 { 3541 MigrationState *s = opaque; 3542 int64_t setup_start; 3543 MigThrError thr_error; 3544 QEMUFile *fb; 3545 bool early_fail = true; 3546 Error *local_err = NULL; 3547 int ret; 3548 3549 rcu_register_thread(); 3550 object_ref(OBJECT(s)); 3551 3552 migration_rate_set(RATE_LIMIT_DISABLED); 3553 3554 setup_start = qemu_clock_get_ms(QEMU_CLOCK_HOST); 3555 /* 3556 * We want to save vmstate for the moment when migration has been 3557 * initiated but also we want to save RAM content while VM is running. 3558 * The RAM content should appear first in the vmstate. So, we first 3559 * stash the non-RAM part of the vmstate to the temporary buffer, 3560 * then write RAM part of the vmstate to the migration stream 3561 * with vCPUs running and, finally, write stashed non-RAM part of 3562 * the vmstate from the buffer to the migration stream. 3563 */ 3564 s->bioc = qio_channel_buffer_new(512 * 1024); 3565 qio_channel_set_name(QIO_CHANNEL(s->bioc), "vmstate-buffer"); 3566 fb = qemu_file_new_output(QIO_CHANNEL(s->bioc)); 3567 object_unref(OBJECT(s->bioc)); 3568 3569 update_iteration_initial_status(s); 3570 3571 /* 3572 * Prepare for tracking memory writes with UFFD-WP - populate 3573 * RAM pages before protecting. 3574 */ 3575 #ifdef __linux__ 3576 ram_write_tracking_prepare(); 3577 #endif 3578 3579 bql_lock(); 3580 qemu_savevm_state_header(s->to_dst_file); 3581 ret = qemu_savevm_state_setup(s->to_dst_file, &local_err); 3582 bql_unlock(); 3583 3584 qemu_savevm_wait_unplug(s, MIGRATION_STATUS_SETUP, 3585 MIGRATION_STATUS_ACTIVE); 3586 3587 /* 3588 * Handle SETUP failures after waiting for virtio-net-failover 3589 * devices to unplug. This to preserve migration state transitions. 3590 */ 3591 if (ret) { 3592 migrate_set_error(s, local_err); 3593 error_free(local_err); 3594 migrate_set_state(&s->state, MIGRATION_STATUS_ACTIVE, 3595 MIGRATION_STATUS_FAILED); 3596 goto fail_setup; 3597 } 3598 3599 s->setup_time = qemu_clock_get_ms(QEMU_CLOCK_HOST) - setup_start; 3600 3601 trace_migration_thread_setup_complete(); 3602 3603 bql_lock(); 3604 3605 if (migration_stop_vm(s, RUN_STATE_PAUSED)) { 3606 goto fail; 3607 } 3608 /* 3609 * Put vCPUs in sync with shadow context structures, then 3610 * save their state to channel-buffer along with devices. 3611 */ 3612 cpu_synchronize_all_states(); 3613 if (qemu_savevm_state_complete_precopy_non_iterable(fb, false, false)) { 3614 goto fail; 3615 } 3616 /* 3617 * Since we are going to get non-iterable state data directly 3618 * from s->bioc->data, explicit flush is needed here. 3619 */ 3620 qemu_fflush(fb); 3621 3622 /* Now initialize UFFD context and start tracking RAM writes */ 3623 if (ram_write_tracking_start()) { 3624 goto fail; 3625 } 3626 early_fail = false; 3627 3628 /* 3629 * Start VM from BH handler to avoid write-fault lock here. 3630 * UFFD-WP protection for the whole RAM is already enabled so 3631 * calling VM state change notifiers from vm_start() would initiate 3632 * writes to virtio VQs memory which is in write-protected region. 3633 */ 3634 migration_bh_schedule(bg_migration_vm_start_bh, s); 3635 bql_unlock(); 3636 3637 while (migration_is_active()) { 3638 MigIterateState iter_state = bg_migration_iteration_run(s); 3639 if (iter_state == MIG_ITERATE_SKIP) { 3640 continue; 3641 } else if (iter_state == MIG_ITERATE_BREAK) { 3642 break; 3643 } 3644 3645 /* 3646 * Try to detect any kind of failures, and see whether we 3647 * should stop the migration now. 3648 */ 3649 thr_error = migration_detect_error(s); 3650 if (thr_error == MIG_THR_ERR_FATAL) { 3651 /* Stop migration */ 3652 break; 3653 } 3654 3655 migration_update_counters(s, qemu_clock_get_ms(QEMU_CLOCK_REALTIME)); 3656 } 3657 3658 trace_migration_thread_after_loop(); 3659 3660 fail: 3661 if (early_fail) { 3662 migrate_set_state(&s->state, MIGRATION_STATUS_ACTIVE, 3663 MIGRATION_STATUS_FAILED); 3664 bql_unlock(); 3665 } 3666 3667 fail_setup: 3668 bg_migration_iteration_finish(s); 3669 3670 qemu_fclose(fb); 3671 object_unref(OBJECT(s)); 3672 rcu_unregister_thread(); 3673 3674 return NULL; 3675 } 3676 3677 void migrate_fd_connect(MigrationState *s, Error *error_in) 3678 { 3679 Error *local_err = NULL; 3680 uint64_t rate_limit; 3681 bool resume = s->state == MIGRATION_STATUS_POSTCOPY_PAUSED; 3682 int ret; 3683 3684 /* 3685 * If there's a previous error, free it and prepare for another one. 3686 * Meanwhile if migration completes successfully, there won't have an error 3687 * dumped when calling migrate_fd_cleanup(). 3688 */ 3689 migrate_error_free(s); 3690 3691 s->expected_downtime = migrate_downtime_limit(); 3692 if (error_in) { 3693 migrate_fd_error(s, error_in); 3694 if (resume) { 3695 /* 3696 * Don't do cleanup for resume if channel is invalid, but only dump 3697 * the error. We wait for another channel connect from the user. 3698 * The error_report still gives HMP user a hint on what failed. 3699 * It's normally done in migrate_fd_cleanup(), but call it here 3700 * explicitly. 3701 */ 3702 error_report_err(error_copy(s->error)); 3703 } else { 3704 migrate_fd_cleanup(s); 3705 } 3706 return; 3707 } 3708 3709 if (resume) { 3710 /* This is a resumed migration */ 3711 rate_limit = migrate_max_postcopy_bandwidth(); 3712 } else { 3713 /* This is a fresh new migration */ 3714 rate_limit = migrate_max_bandwidth(); 3715 3716 /* Notify before starting migration thread */ 3717 if (migration_call_notifiers(s, MIG_EVENT_PRECOPY_SETUP, &local_err)) { 3718 goto fail; 3719 } 3720 } 3721 3722 migration_rate_set(rate_limit); 3723 qemu_file_set_blocking(s->to_dst_file, true); 3724 3725 /* 3726 * Open the return path. For postcopy, it is used exclusively. For 3727 * precopy, only if user specified "return-path" capability would 3728 * QEMU uses the return path. 3729 */ 3730 if (migrate_postcopy_ram() || migrate_return_path()) { 3731 if (open_return_path_on_source(s)) { 3732 error_setg(&local_err, "Unable to open return-path for postcopy"); 3733 goto fail; 3734 } 3735 } 3736 3737 /* 3738 * This needs to be done before resuming a postcopy. Note: for newer 3739 * QEMUs we will delay the channel creation until postcopy_start(), to 3740 * avoid disorder of channel creations. 3741 */ 3742 if (migrate_postcopy_preempt() && s->preempt_pre_7_2) { 3743 postcopy_preempt_setup(s); 3744 } 3745 3746 if (resume) { 3747 /* Wakeup the main migration thread to do the recovery */ 3748 migrate_set_state(&s->state, MIGRATION_STATUS_POSTCOPY_PAUSED, 3749 MIGRATION_STATUS_POSTCOPY_RECOVER); 3750 qemu_sem_post(&s->postcopy_pause_sem); 3751 return; 3752 } 3753 3754 if (migrate_mode_is_cpr(s)) { 3755 ret = migration_stop_vm(s, RUN_STATE_FINISH_MIGRATE); 3756 if (ret < 0) { 3757 error_setg(&local_err, "migration_stop_vm failed, error %d", -ret); 3758 goto fail; 3759 } 3760 } 3761 3762 if (migrate_background_snapshot()) { 3763 qemu_thread_create(&s->thread, "mig/snapshot", 3764 bg_migration_thread, s, QEMU_THREAD_JOINABLE); 3765 } else { 3766 qemu_thread_create(&s->thread, "mig/src/main", 3767 migration_thread, s, QEMU_THREAD_JOINABLE); 3768 } 3769 s->migration_thread_running = true; 3770 return; 3771 3772 fail: 3773 migrate_set_error(s, local_err); 3774 migrate_set_state(&s->state, s->state, MIGRATION_STATUS_FAILED); 3775 error_report_err(local_err); 3776 migrate_fd_cleanup(s); 3777 } 3778 3779 static void migration_class_init(ObjectClass *klass, void *data) 3780 { 3781 DeviceClass *dc = DEVICE_CLASS(klass); 3782 3783 dc->user_creatable = false; 3784 device_class_set_props(dc, migration_properties); 3785 } 3786 3787 static void migration_instance_finalize(Object *obj) 3788 { 3789 MigrationState *ms = MIGRATION_OBJ(obj); 3790 3791 qemu_mutex_destroy(&ms->error_mutex); 3792 qemu_mutex_destroy(&ms->qemu_file_lock); 3793 qemu_sem_destroy(&ms->wait_unplug_sem); 3794 qemu_sem_destroy(&ms->rate_limit_sem); 3795 qemu_sem_destroy(&ms->pause_sem); 3796 qemu_sem_destroy(&ms->postcopy_pause_sem); 3797 qemu_sem_destroy(&ms->rp_state.rp_sem); 3798 qemu_sem_destroy(&ms->rp_state.rp_pong_acks); 3799 qemu_sem_destroy(&ms->postcopy_qemufile_src_sem); 3800 error_free(ms->error); 3801 } 3802 3803 static void migration_instance_init(Object *obj) 3804 { 3805 MigrationState *ms = MIGRATION_OBJ(obj); 3806 3807 ms->state = MIGRATION_STATUS_NONE; 3808 ms->mbps = -1; 3809 ms->pages_per_second = -1; 3810 qemu_sem_init(&ms->pause_sem, 0); 3811 qemu_mutex_init(&ms->error_mutex); 3812 3813 migrate_params_init(&ms->parameters); 3814 3815 qemu_sem_init(&ms->postcopy_pause_sem, 0); 3816 qemu_sem_init(&ms->rp_state.rp_sem, 0); 3817 qemu_sem_init(&ms->rp_state.rp_pong_acks, 0); 3818 qemu_sem_init(&ms->rate_limit_sem, 0); 3819 qemu_sem_init(&ms->wait_unplug_sem, 0); 3820 qemu_sem_init(&ms->postcopy_qemufile_src_sem, 0); 3821 qemu_mutex_init(&ms->qemu_file_lock); 3822 } 3823 3824 /* 3825 * Return true if check pass, false otherwise. Error will be put 3826 * inside errp if provided. 3827 */ 3828 static bool migration_object_check(MigrationState *ms, Error **errp) 3829 { 3830 /* Assuming all off */ 3831 bool old_caps[MIGRATION_CAPABILITY__MAX] = { 0 }; 3832 3833 if (!migrate_params_check(&ms->parameters, errp)) { 3834 return false; 3835 } 3836 3837 return migrate_caps_check(old_caps, ms->capabilities, errp); 3838 } 3839 3840 static const TypeInfo migration_type = { 3841 .name = TYPE_MIGRATION, 3842 /* 3843 * NOTE: TYPE_MIGRATION is not really a device, as the object is 3844 * not created using qdev_new(), it is not attached to the qdev 3845 * device tree, and it is never realized. 3846 * 3847 * TODO: Make this TYPE_OBJECT once QOM provides something like 3848 * TYPE_DEVICE's "-global" properties. 3849 */ 3850 .parent = TYPE_DEVICE, 3851 .class_init = migration_class_init, 3852 .class_size = sizeof(MigrationClass), 3853 .instance_size = sizeof(MigrationState), 3854 .instance_init = migration_instance_init, 3855 .instance_finalize = migration_instance_finalize, 3856 }; 3857 3858 static void register_migration_types(void) 3859 { 3860 type_register_static(&migration_type); 3861 } 3862 3863 type_init(register_migration_types); 3864