1 /* 2 * QEMU live migration 3 * 4 * Copyright IBM, Corp. 2008 5 * 6 * Authors: 7 * Anthony Liguori <aliguori@us.ibm.com> 8 * 9 * This work is licensed under the terms of the GNU GPL, version 2. See 10 * the COPYING file in the top-level directory. 11 * 12 * Contributions after 2012-01-13 are licensed under the terms of the 13 * GNU GPL, version 2 or (at your option) any later version. 14 */ 15 16 #include "qemu/osdep.h" 17 #include "qemu/cutils.h" 18 #include "qemu/error-report.h" 19 #include "qemu/main-loop.h" 20 #include "migration/blocker.h" 21 #include "exec.h" 22 #include "fd.h" 23 #include "file.h" 24 #include "socket.h" 25 #include "sysemu/runstate.h" 26 #include "sysemu/sysemu.h" 27 #include "sysemu/cpu-throttle.h" 28 #include "rdma.h" 29 #include "ram.h" 30 #include "ram-compress.h" 31 #include "migration/global_state.h" 32 #include "migration/misc.h" 33 #include "migration.h" 34 #include "migration-stats.h" 35 #include "savevm.h" 36 #include "qemu-file.h" 37 #include "channel.h" 38 #include "migration/vmstate.h" 39 #include "block/block.h" 40 #include "qapi/error.h" 41 #include "qapi/clone-visitor.h" 42 #include "qapi/qapi-visit-migration.h" 43 #include "qapi/qapi-visit-sockets.h" 44 #include "qapi/qapi-commands-migration.h" 45 #include "qapi/qapi-events-migration.h" 46 #include "qapi/qmp/qerror.h" 47 #include "qapi/qmp/qnull.h" 48 #include "qemu/rcu.h" 49 #include "block.h" 50 #include "postcopy-ram.h" 51 #include "qemu/thread.h" 52 #include "trace.h" 53 #include "exec/target_page.h" 54 #include "io/channel-buffer.h" 55 #include "io/channel-tls.h" 56 #include "migration/colo.h" 57 #include "hw/boards.h" 58 #include "monitor/monitor.h" 59 #include "net/announce.h" 60 #include "qemu/queue.h" 61 #include "multifd.h" 62 #include "threadinfo.h" 63 #include "qemu/yank.h" 64 #include "sysemu/cpus.h" 65 #include "yank_functions.h" 66 #include "sysemu/qtest.h" 67 #include "options.h" 68 #include "sysemu/dirtylimit.h" 69 #include "qemu/sockets.h" 70 #include "sysemu/kvm.h" 71 72 #define NOTIFIER_ELEM_INIT(array, elem) \ 73 [elem] = NOTIFIER_WITH_RETURN_LIST_INITIALIZER((array)[elem]) 74 75 #define INMIGRATE_DEFAULT_EXIT_ON_ERROR true 76 77 static NotifierWithReturnList migration_state_notifiers[] = { 78 NOTIFIER_ELEM_INIT(migration_state_notifiers, MIG_MODE_NORMAL), 79 NOTIFIER_ELEM_INIT(migration_state_notifiers, MIG_MODE_CPR_REBOOT), 80 }; 81 82 /* Messages sent on the return path from destination to source */ 83 enum mig_rp_message_type { 84 MIG_RP_MSG_INVALID = 0, /* Must be 0 */ 85 MIG_RP_MSG_SHUT, /* sibling will not send any more RP messages */ 86 MIG_RP_MSG_PONG, /* Response to a PING; data (seq: be32 ) */ 87 88 MIG_RP_MSG_REQ_PAGES_ID, /* data (start: be64, len: be32, id: string) */ 89 MIG_RP_MSG_REQ_PAGES, /* data (start: be64, len: be32) */ 90 MIG_RP_MSG_RECV_BITMAP, /* send recved_bitmap back to source */ 91 MIG_RP_MSG_RESUME_ACK, /* tell source that we are ready to resume */ 92 MIG_RP_MSG_SWITCHOVER_ACK, /* Tell source it's OK to do switchover */ 93 94 MIG_RP_MSG_MAX 95 }; 96 97 /* When we add fault tolerance, we could have several 98 migrations at once. For now we don't need to add 99 dynamic creation of migration */ 100 101 static MigrationState *current_migration; 102 static MigrationIncomingState *current_incoming; 103 104 static GSList *migration_blockers[MIG_MODE__MAX]; 105 106 static bool migration_object_check(MigrationState *ms, Error **errp); 107 static int migration_maybe_pause(MigrationState *s, 108 int *current_active_state, 109 int new_state); 110 static void migrate_fd_cancel(MigrationState *s); 111 static bool close_return_path_on_source(MigrationState *s); 112 static void migration_completion_end(MigrationState *s); 113 114 static void migration_downtime_start(MigrationState *s) 115 { 116 trace_vmstate_downtime_checkpoint("src-downtime-start"); 117 s->downtime_start = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); 118 } 119 120 static void migration_downtime_end(MigrationState *s) 121 { 122 int64_t now = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); 123 124 /* 125 * If downtime already set, should mean that postcopy already set it, 126 * then that should be the real downtime already. 127 */ 128 if (!s->downtime) { 129 s->downtime = now - s->downtime_start; 130 } 131 132 trace_vmstate_downtime_checkpoint("src-downtime-end"); 133 } 134 135 static bool migration_needs_multiple_sockets(void) 136 { 137 return migrate_multifd() || migrate_postcopy_preempt(); 138 } 139 140 static bool transport_supports_multi_channels(MigrationAddress *addr) 141 { 142 if (addr->transport == MIGRATION_ADDRESS_TYPE_SOCKET) { 143 SocketAddress *saddr = &addr->u.socket; 144 145 return (saddr->type == SOCKET_ADDRESS_TYPE_INET || 146 saddr->type == SOCKET_ADDRESS_TYPE_UNIX || 147 saddr->type == SOCKET_ADDRESS_TYPE_VSOCK); 148 } else if (addr->transport == MIGRATION_ADDRESS_TYPE_FILE) { 149 return migrate_mapped_ram(); 150 } else { 151 return false; 152 } 153 } 154 155 static bool migration_needs_seekable_channel(void) 156 { 157 return migrate_mapped_ram(); 158 } 159 160 static bool transport_supports_seeking(MigrationAddress *addr) 161 { 162 if (addr->transport == MIGRATION_ADDRESS_TYPE_FILE) { 163 return true; 164 } 165 166 return false; 167 } 168 169 static bool 170 migration_channels_and_transport_compatible(MigrationAddress *addr, 171 Error **errp) 172 { 173 if (migration_needs_seekable_channel() && 174 !transport_supports_seeking(addr)) { 175 error_setg(errp, "Migration requires seekable transport (e.g. file)"); 176 return false; 177 } 178 179 if (migration_needs_multiple_sockets() && 180 !transport_supports_multi_channels(addr)) { 181 error_setg(errp, "Migration requires multi-channel URIs (e.g. tcp)"); 182 return false; 183 } 184 185 return true; 186 } 187 188 static gint page_request_addr_cmp(gconstpointer ap, gconstpointer bp) 189 { 190 uintptr_t a = (uintptr_t) ap, b = (uintptr_t) bp; 191 192 return (a > b) - (a < b); 193 } 194 195 static int migration_stop_vm(MigrationState *s, RunState state) 196 { 197 int ret; 198 199 migration_downtime_start(s); 200 201 s->vm_old_state = runstate_get(); 202 global_state_store(); 203 204 ret = vm_stop_force_state(state); 205 206 trace_vmstate_downtime_checkpoint("src-vm-stopped"); 207 trace_migration_completion_vm_stop(ret); 208 209 return ret; 210 } 211 212 void migration_object_init(void) 213 { 214 /* This can only be called once. */ 215 assert(!current_migration); 216 current_migration = MIGRATION_OBJ(object_new(TYPE_MIGRATION)); 217 218 /* 219 * Init the migrate incoming object as well no matter whether 220 * we'll use it or not. 221 */ 222 assert(!current_incoming); 223 current_incoming = g_new0(MigrationIncomingState, 1); 224 current_incoming->state = MIGRATION_STATUS_NONE; 225 current_incoming->postcopy_remote_fds = 226 g_array_new(FALSE, TRUE, sizeof(struct PostCopyFD)); 227 qemu_mutex_init(¤t_incoming->rp_mutex); 228 qemu_mutex_init(¤t_incoming->postcopy_prio_thread_mutex); 229 qemu_event_init(¤t_incoming->main_thread_load_event, false); 230 qemu_sem_init(¤t_incoming->postcopy_pause_sem_dst, 0); 231 qemu_sem_init(¤t_incoming->postcopy_pause_sem_fault, 0); 232 qemu_sem_init(¤t_incoming->postcopy_pause_sem_fast_load, 0); 233 qemu_sem_init(¤t_incoming->postcopy_qemufile_dst_done, 0); 234 235 qemu_mutex_init(¤t_incoming->page_request_mutex); 236 qemu_cond_init(¤t_incoming->page_request_cond); 237 current_incoming->page_requested = g_tree_new(page_request_addr_cmp); 238 239 current_incoming->exit_on_error = INMIGRATE_DEFAULT_EXIT_ON_ERROR; 240 241 migration_object_check(current_migration, &error_fatal); 242 243 blk_mig_init(); 244 ram_mig_init(); 245 dirty_bitmap_mig_init(); 246 } 247 248 typedef struct { 249 QEMUBH *bh; 250 QEMUBHFunc *cb; 251 void *opaque; 252 } MigrationBH; 253 254 static void migration_bh_dispatch_bh(void *opaque) 255 { 256 MigrationState *s = migrate_get_current(); 257 MigrationBH *migbh = opaque; 258 259 /* cleanup this BH */ 260 qemu_bh_delete(migbh->bh); 261 migbh->bh = NULL; 262 263 /* dispatch the other one */ 264 migbh->cb(migbh->opaque); 265 object_unref(OBJECT(s)); 266 267 g_free(migbh); 268 } 269 270 void migration_bh_schedule(QEMUBHFunc *cb, void *opaque) 271 { 272 MigrationState *s = migrate_get_current(); 273 MigrationBH *migbh = g_new0(MigrationBH, 1); 274 QEMUBH *bh = qemu_bh_new(migration_bh_dispatch_bh, migbh); 275 276 /* Store these to dispatch when the BH runs */ 277 migbh->bh = bh; 278 migbh->cb = cb; 279 migbh->opaque = opaque; 280 281 /* 282 * Ref the state for bh, because it may be called when 283 * there're already no other refs 284 */ 285 object_ref(OBJECT(s)); 286 qemu_bh_schedule(bh); 287 } 288 289 void migration_cancel(const Error *error) 290 { 291 if (error) { 292 migrate_set_error(current_migration, error); 293 } 294 if (migrate_dirty_limit()) { 295 qmp_cancel_vcpu_dirty_limit(false, -1, NULL); 296 } 297 migrate_fd_cancel(current_migration); 298 } 299 300 void migration_shutdown(void) 301 { 302 /* 303 * When the QEMU main thread exit, the COLO thread 304 * may wait a semaphore. So, we should wakeup the 305 * COLO thread before migration shutdown. 306 */ 307 colo_shutdown(); 308 /* 309 * Cancel the current migration - that will (eventually) 310 * stop the migration using this structure 311 */ 312 migration_cancel(NULL); 313 object_unref(OBJECT(current_migration)); 314 315 /* 316 * Cancel outgoing migration of dirty bitmaps. It should 317 * at least unref used block nodes. 318 */ 319 dirty_bitmap_mig_cancel_outgoing(); 320 321 /* 322 * Cancel incoming migration of dirty bitmaps. Dirty bitmaps 323 * are non-critical data, and their loss never considered as 324 * something serious. 325 */ 326 dirty_bitmap_mig_cancel_incoming(); 327 } 328 329 /* For outgoing */ 330 MigrationState *migrate_get_current(void) 331 { 332 /* This can only be called after the object created. */ 333 assert(current_migration); 334 return current_migration; 335 } 336 337 MigrationIncomingState *migration_incoming_get_current(void) 338 { 339 assert(current_incoming); 340 return current_incoming; 341 } 342 343 void migration_incoming_transport_cleanup(MigrationIncomingState *mis) 344 { 345 if (mis->socket_address_list) { 346 qapi_free_SocketAddressList(mis->socket_address_list); 347 mis->socket_address_list = NULL; 348 } 349 350 if (mis->transport_cleanup) { 351 mis->transport_cleanup(mis->transport_data); 352 mis->transport_data = mis->transport_cleanup = NULL; 353 } 354 } 355 356 void migration_incoming_state_destroy(void) 357 { 358 struct MigrationIncomingState *mis = migration_incoming_get_current(); 359 360 multifd_recv_cleanup(); 361 compress_threads_load_cleanup(); 362 363 if (mis->to_src_file) { 364 /* Tell source that we are done */ 365 migrate_send_rp_shut(mis, qemu_file_get_error(mis->from_src_file) != 0); 366 qemu_fclose(mis->to_src_file); 367 mis->to_src_file = NULL; 368 } 369 370 if (mis->from_src_file) { 371 migration_ioc_unregister_yank_from_file(mis->from_src_file); 372 qemu_fclose(mis->from_src_file); 373 mis->from_src_file = NULL; 374 } 375 if (mis->postcopy_remote_fds) { 376 g_array_free(mis->postcopy_remote_fds, TRUE); 377 mis->postcopy_remote_fds = NULL; 378 } 379 380 migration_incoming_transport_cleanup(mis); 381 qemu_event_reset(&mis->main_thread_load_event); 382 383 if (mis->page_requested) { 384 g_tree_destroy(mis->page_requested); 385 mis->page_requested = NULL; 386 } 387 388 if (mis->postcopy_qemufile_dst) { 389 migration_ioc_unregister_yank_from_file(mis->postcopy_qemufile_dst); 390 qemu_fclose(mis->postcopy_qemufile_dst); 391 mis->postcopy_qemufile_dst = NULL; 392 } 393 394 yank_unregister_instance(MIGRATION_YANK_INSTANCE); 395 } 396 397 static void migrate_generate_event(int new_state) 398 { 399 if (migrate_events()) { 400 qapi_event_send_migration(new_state); 401 } 402 } 403 404 /* 405 * Send a message on the return channel back to the source 406 * of the migration. 407 */ 408 static int migrate_send_rp_message(MigrationIncomingState *mis, 409 enum mig_rp_message_type message_type, 410 uint16_t len, void *data) 411 { 412 int ret = 0; 413 414 trace_migrate_send_rp_message((int)message_type, len); 415 QEMU_LOCK_GUARD(&mis->rp_mutex); 416 417 /* 418 * It's possible that the file handle got lost due to network 419 * failures. 420 */ 421 if (!mis->to_src_file) { 422 ret = -EIO; 423 return ret; 424 } 425 426 qemu_put_be16(mis->to_src_file, (unsigned int)message_type); 427 qemu_put_be16(mis->to_src_file, len); 428 qemu_put_buffer(mis->to_src_file, data, len); 429 return qemu_fflush(mis->to_src_file); 430 } 431 432 /* Request one page from the source VM at the given start address. 433 * rb: the RAMBlock to request the page in 434 * Start: Address offset within the RB 435 * Len: Length in bytes required - must be a multiple of pagesize 436 */ 437 int migrate_send_rp_message_req_pages(MigrationIncomingState *mis, 438 RAMBlock *rb, ram_addr_t start) 439 { 440 uint8_t bufc[12 + 1 + 255]; /* start (8), len (4), rbname up to 256 */ 441 size_t msglen = 12; /* start + len */ 442 size_t len = qemu_ram_pagesize(rb); 443 enum mig_rp_message_type msg_type; 444 const char *rbname; 445 int rbname_len; 446 447 *(uint64_t *)bufc = cpu_to_be64((uint64_t)start); 448 *(uint32_t *)(bufc + 8) = cpu_to_be32((uint32_t)len); 449 450 /* 451 * We maintain the last ramblock that we requested for page. Note that we 452 * don't need locking because this function will only be called within the 453 * postcopy ram fault thread. 454 */ 455 if (rb != mis->last_rb) { 456 mis->last_rb = rb; 457 458 rbname = qemu_ram_get_idstr(rb); 459 rbname_len = strlen(rbname); 460 461 assert(rbname_len < 256); 462 463 bufc[msglen++] = rbname_len; 464 memcpy(bufc + msglen, rbname, rbname_len); 465 msglen += rbname_len; 466 msg_type = MIG_RP_MSG_REQ_PAGES_ID; 467 } else { 468 msg_type = MIG_RP_MSG_REQ_PAGES; 469 } 470 471 return migrate_send_rp_message(mis, msg_type, msglen, bufc); 472 } 473 474 int migrate_send_rp_req_pages(MigrationIncomingState *mis, 475 RAMBlock *rb, ram_addr_t start, uint64_t haddr) 476 { 477 void *aligned = (void *)(uintptr_t)ROUND_DOWN(haddr, qemu_ram_pagesize(rb)); 478 bool received = false; 479 480 WITH_QEMU_LOCK_GUARD(&mis->page_request_mutex) { 481 received = ramblock_recv_bitmap_test_byte_offset(rb, start); 482 if (!received && !g_tree_lookup(mis->page_requested, aligned)) { 483 /* 484 * The page has not been received, and it's not yet in the page 485 * request list. Queue it. Set the value of element to 1, so that 486 * things like g_tree_lookup() will return TRUE (1) when found. 487 */ 488 g_tree_insert(mis->page_requested, aligned, (gpointer)1); 489 qatomic_inc(&mis->page_requested_count); 490 trace_postcopy_page_req_add(aligned, mis->page_requested_count); 491 } 492 } 493 494 /* 495 * If the page is there, skip sending the message. We don't even need the 496 * lock because as long as the page arrived, it'll be there forever. 497 */ 498 if (received) { 499 return 0; 500 } 501 502 return migrate_send_rp_message_req_pages(mis, rb, start); 503 } 504 505 static bool migration_colo_enabled; 506 bool migration_incoming_colo_enabled(void) 507 { 508 return migration_colo_enabled; 509 } 510 511 void migration_incoming_disable_colo(void) 512 { 513 ram_block_discard_disable(false); 514 migration_colo_enabled = false; 515 } 516 517 int migration_incoming_enable_colo(void) 518 { 519 #ifndef CONFIG_REPLICATION 520 error_report("ENABLE_COLO command come in migration stream, but COLO " 521 "module is not built in"); 522 return -ENOTSUP; 523 #endif 524 525 if (!migrate_colo()) { 526 error_report("ENABLE_COLO command come in migration stream, but c-colo " 527 "capability is not set"); 528 return -EINVAL; 529 } 530 531 if (ram_block_discard_disable(true)) { 532 error_report("COLO: cannot disable RAM discard"); 533 return -EBUSY; 534 } 535 migration_colo_enabled = true; 536 return 0; 537 } 538 539 void migrate_add_address(SocketAddress *address) 540 { 541 MigrationIncomingState *mis = migration_incoming_get_current(); 542 543 QAPI_LIST_PREPEND(mis->socket_address_list, 544 QAPI_CLONE(SocketAddress, address)); 545 } 546 547 bool migrate_uri_parse(const char *uri, MigrationChannel **channel, 548 Error **errp) 549 { 550 g_autoptr(MigrationChannel) val = g_new0(MigrationChannel, 1); 551 g_autoptr(MigrationAddress) addr = g_new0(MigrationAddress, 1); 552 InetSocketAddress *isock = &addr->u.rdma; 553 strList **tail = &addr->u.exec.args; 554 555 if (strstart(uri, "exec:", NULL)) { 556 addr->transport = MIGRATION_ADDRESS_TYPE_EXEC; 557 #ifdef WIN32 558 QAPI_LIST_APPEND(tail, g_strdup(exec_get_cmd_path())); 559 QAPI_LIST_APPEND(tail, g_strdup("/c")); 560 #else 561 QAPI_LIST_APPEND(tail, g_strdup("/bin/sh")); 562 QAPI_LIST_APPEND(tail, g_strdup("-c")); 563 #endif 564 QAPI_LIST_APPEND(tail, g_strdup(uri + strlen("exec:"))); 565 } else if (strstart(uri, "rdma:", NULL)) { 566 if (inet_parse(isock, uri + strlen("rdma:"), errp)) { 567 qapi_free_InetSocketAddress(isock); 568 return false; 569 } 570 addr->transport = MIGRATION_ADDRESS_TYPE_RDMA; 571 } else if (strstart(uri, "tcp:", NULL) || 572 strstart(uri, "unix:", NULL) || 573 strstart(uri, "vsock:", NULL) || 574 strstart(uri, "fd:", NULL)) { 575 addr->transport = MIGRATION_ADDRESS_TYPE_SOCKET; 576 SocketAddress *saddr = socket_parse(uri, errp); 577 if (!saddr) { 578 return false; 579 } 580 addr->u.socket.type = saddr->type; 581 addr->u.socket.u = saddr->u; 582 /* Don't free the objects inside; their ownership moved to "addr" */ 583 g_free(saddr); 584 } else if (strstart(uri, "file:", NULL)) { 585 addr->transport = MIGRATION_ADDRESS_TYPE_FILE; 586 addr->u.file.filename = g_strdup(uri + strlen("file:")); 587 if (file_parse_offset(addr->u.file.filename, &addr->u.file.offset, 588 errp)) { 589 return false; 590 } 591 } else { 592 error_setg(errp, "unknown migration protocol: %s", uri); 593 return false; 594 } 595 596 val->channel_type = MIGRATION_CHANNEL_TYPE_MAIN; 597 val->addr = g_steal_pointer(&addr); 598 *channel = g_steal_pointer(&val); 599 return true; 600 } 601 602 static void qemu_start_incoming_migration(const char *uri, bool has_channels, 603 MigrationChannelList *channels, 604 Error **errp) 605 { 606 g_autoptr(MigrationChannel) channel = NULL; 607 MigrationAddress *addr = NULL; 608 MigrationIncomingState *mis = migration_incoming_get_current(); 609 610 /* 611 * Having preliminary checks for uri and channel 612 */ 613 if (!uri == !channels) { 614 error_setg(errp, "need either 'uri' or 'channels' argument"); 615 return; 616 } 617 618 if (channels) { 619 /* To verify that Migrate channel list has only item */ 620 if (channels->next) { 621 error_setg(errp, "Channel list has more than one entries"); 622 return; 623 } 624 addr = channels->value->addr; 625 } 626 627 if (uri) { 628 /* caller uses the old URI syntax */ 629 if (!migrate_uri_parse(uri, &channel, errp)) { 630 return; 631 } 632 addr = channel->addr; 633 } 634 635 /* transport mechanism not suitable for migration? */ 636 if (!migration_channels_and_transport_compatible(addr, errp)) { 637 return; 638 } 639 640 migrate_set_state(&mis->state, MIGRATION_STATUS_NONE, 641 MIGRATION_STATUS_SETUP); 642 643 if (addr->transport == MIGRATION_ADDRESS_TYPE_SOCKET) { 644 SocketAddress *saddr = &addr->u.socket; 645 if (saddr->type == SOCKET_ADDRESS_TYPE_INET || 646 saddr->type == SOCKET_ADDRESS_TYPE_UNIX || 647 saddr->type == SOCKET_ADDRESS_TYPE_VSOCK) { 648 socket_start_incoming_migration(saddr, errp); 649 } else if (saddr->type == SOCKET_ADDRESS_TYPE_FD) { 650 fd_start_incoming_migration(saddr->u.fd.str, errp); 651 } 652 #ifdef CONFIG_RDMA 653 } else if (addr->transport == MIGRATION_ADDRESS_TYPE_RDMA) { 654 if (migrate_compress()) { 655 error_setg(errp, "RDMA and compression can't be used together"); 656 return; 657 } 658 if (migrate_xbzrle()) { 659 error_setg(errp, "RDMA and XBZRLE can't be used together"); 660 return; 661 } 662 if (migrate_multifd()) { 663 error_setg(errp, "RDMA and multifd can't be used together"); 664 return; 665 } 666 rdma_start_incoming_migration(&addr->u.rdma, errp); 667 #endif 668 } else if (addr->transport == MIGRATION_ADDRESS_TYPE_EXEC) { 669 exec_start_incoming_migration(addr->u.exec.args, errp); 670 } else if (addr->transport == MIGRATION_ADDRESS_TYPE_FILE) { 671 file_start_incoming_migration(&addr->u.file, errp); 672 } else { 673 error_setg(errp, "unknown migration protocol: %s", uri); 674 } 675 } 676 677 static void process_incoming_migration_bh(void *opaque) 678 { 679 Error *local_err = NULL; 680 MigrationIncomingState *mis = opaque; 681 682 trace_vmstate_downtime_checkpoint("dst-precopy-bh-enter"); 683 684 /* If capability late_block_activate is set: 685 * Only fire up the block code now if we're going to restart the 686 * VM, else 'cont' will do it. 687 * This causes file locking to happen; so we don't want it to happen 688 * unless we really are starting the VM. 689 */ 690 if (!migrate_late_block_activate() || 691 (autostart && (!global_state_received() || 692 runstate_is_live(global_state_get_runstate())))) { 693 /* Make sure all file formats throw away their mutable metadata. 694 * If we get an error here, just don't restart the VM yet. */ 695 bdrv_activate_all(&local_err); 696 if (local_err) { 697 error_report_err(local_err); 698 local_err = NULL; 699 autostart = false; 700 } 701 } 702 703 /* 704 * This must happen after all error conditions are dealt with and 705 * we're sure the VM is going to be running on this host. 706 */ 707 qemu_announce_self(&mis->announce_timer, migrate_announce_params()); 708 709 trace_vmstate_downtime_checkpoint("dst-precopy-bh-announced"); 710 711 multifd_recv_shutdown(); 712 713 dirty_bitmap_mig_before_vm_start(); 714 715 if (!global_state_received() || 716 runstate_is_live(global_state_get_runstate())) { 717 if (autostart) { 718 vm_start(); 719 } else { 720 runstate_set(RUN_STATE_PAUSED); 721 } 722 } else if (migration_incoming_colo_enabled()) { 723 migration_incoming_disable_colo(); 724 vm_start(); 725 } else { 726 runstate_set(global_state_get_runstate()); 727 } 728 trace_vmstate_downtime_checkpoint("dst-precopy-bh-vm-started"); 729 /* 730 * This must happen after any state changes since as soon as an external 731 * observer sees this event they might start to prod at the VM assuming 732 * it's ready to use. 733 */ 734 migrate_set_state(&mis->state, MIGRATION_STATUS_ACTIVE, 735 MIGRATION_STATUS_COMPLETED); 736 migration_incoming_state_destroy(); 737 } 738 739 static void coroutine_fn 740 process_incoming_migration_co(void *opaque) 741 { 742 MigrationState *s = migrate_get_current(); 743 MigrationIncomingState *mis = migration_incoming_get_current(); 744 PostcopyState ps; 745 int ret; 746 Error *local_err = NULL; 747 748 assert(mis->from_src_file); 749 750 if (compress_threads_load_setup(mis->from_src_file)) { 751 error_setg(&local_err, "Failed to setup decompress threads"); 752 goto fail; 753 } 754 755 mis->largest_page_size = qemu_ram_pagesize_largest(); 756 postcopy_state_set(POSTCOPY_INCOMING_NONE); 757 migrate_set_state(&mis->state, MIGRATION_STATUS_SETUP, 758 MIGRATION_STATUS_ACTIVE); 759 760 mis->loadvm_co = qemu_coroutine_self(); 761 ret = qemu_loadvm_state(mis->from_src_file); 762 mis->loadvm_co = NULL; 763 764 trace_vmstate_downtime_checkpoint("dst-precopy-loadvm-completed"); 765 766 ps = postcopy_state_get(); 767 trace_process_incoming_migration_co_end(ret, ps); 768 if (ps != POSTCOPY_INCOMING_NONE) { 769 if (ps == POSTCOPY_INCOMING_ADVISE) { 770 /* 771 * Where a migration had postcopy enabled (and thus went to advise) 772 * but managed to complete within the precopy period, we can use 773 * the normal exit. 774 */ 775 postcopy_ram_incoming_cleanup(mis); 776 } else if (ret >= 0) { 777 /* 778 * Postcopy was started, cleanup should happen at the end of the 779 * postcopy thread. 780 */ 781 trace_process_incoming_migration_co_postcopy_end_main(); 782 return; 783 } 784 /* Else if something went wrong then just fall out of the normal exit */ 785 } 786 787 if (ret < 0) { 788 error_setg(&local_err, "load of migration failed: %s", strerror(-ret)); 789 goto fail; 790 } 791 792 if (colo_incoming_co() < 0) { 793 error_setg(&local_err, "colo incoming failed"); 794 goto fail; 795 } 796 797 migration_bh_schedule(process_incoming_migration_bh, mis); 798 return; 799 fail: 800 migrate_set_state(&mis->state, MIGRATION_STATUS_ACTIVE, 801 MIGRATION_STATUS_FAILED); 802 migrate_set_error(s, local_err); 803 error_free(local_err); 804 805 migration_incoming_state_destroy(); 806 807 if (mis->exit_on_error) { 808 WITH_QEMU_LOCK_GUARD(&s->error_mutex) { 809 error_report_err(s->error); 810 s->error = NULL; 811 } 812 813 exit(EXIT_FAILURE); 814 } 815 } 816 817 /** 818 * migration_incoming_setup: Setup incoming migration 819 * @f: file for main migration channel 820 */ 821 static void migration_incoming_setup(QEMUFile *f) 822 { 823 MigrationIncomingState *mis = migration_incoming_get_current(); 824 825 if (!mis->from_src_file) { 826 mis->from_src_file = f; 827 } 828 qemu_file_set_blocking(f, false); 829 } 830 831 void migration_incoming_process(void) 832 { 833 Coroutine *co = qemu_coroutine_create(process_incoming_migration_co, NULL); 834 qemu_coroutine_enter(co); 835 } 836 837 /* Returns true if recovered from a paused migration, otherwise false */ 838 static bool postcopy_try_recover(void) 839 { 840 MigrationIncomingState *mis = migration_incoming_get_current(); 841 842 if (mis->state == MIGRATION_STATUS_POSTCOPY_PAUSED) { 843 /* Resumed from a paused postcopy migration */ 844 845 /* This should be set already in migration_incoming_setup() */ 846 assert(mis->from_src_file); 847 /* Postcopy has standalone thread to do vm load */ 848 qemu_file_set_blocking(mis->from_src_file, true); 849 850 /* Re-configure the return path */ 851 mis->to_src_file = qemu_file_get_return_path(mis->from_src_file); 852 853 migrate_set_state(&mis->state, MIGRATION_STATUS_POSTCOPY_PAUSED, 854 MIGRATION_STATUS_POSTCOPY_RECOVER); 855 856 /* 857 * Here, we only wake up the main loading thread (while the 858 * rest threads will still be waiting), so that we can receive 859 * commands from source now, and answer it if needed. The 860 * rest threads will be woken up afterwards until we are sure 861 * that source is ready to reply to page requests. 862 */ 863 qemu_sem_post(&mis->postcopy_pause_sem_dst); 864 return true; 865 } 866 867 return false; 868 } 869 870 void migration_fd_process_incoming(QEMUFile *f) 871 { 872 migration_incoming_setup(f); 873 if (postcopy_try_recover()) { 874 return; 875 } 876 migration_incoming_process(); 877 } 878 879 /* 880 * Returns true when we want to start a new incoming migration process, 881 * false otherwise. 882 */ 883 static bool migration_should_start_incoming(bool main_channel) 884 { 885 /* Multifd doesn't start unless all channels are established */ 886 if (migrate_multifd()) { 887 return migration_has_all_channels(); 888 } 889 890 /* Preempt channel only starts when the main channel is created */ 891 if (migrate_postcopy_preempt()) { 892 return main_channel; 893 } 894 895 /* 896 * For all the rest types of migration, we should only reach here when 897 * it's the main channel that's being created, and we should always 898 * proceed with this channel. 899 */ 900 assert(main_channel); 901 return true; 902 } 903 904 void migration_ioc_process_incoming(QIOChannel *ioc, Error **errp) 905 { 906 MigrationIncomingState *mis = migration_incoming_get_current(); 907 Error *local_err = NULL; 908 QEMUFile *f; 909 bool default_channel = true; 910 uint32_t channel_magic = 0; 911 int ret = 0; 912 913 if (migrate_multifd() && !migrate_mapped_ram() && 914 !migrate_postcopy_ram() && 915 qio_channel_has_feature(ioc, QIO_CHANNEL_FEATURE_READ_MSG_PEEK)) { 916 /* 917 * With multiple channels, it is possible that we receive channels 918 * out of order on destination side, causing incorrect mapping of 919 * source channels on destination side. Check channel MAGIC to 920 * decide type of channel. Please note this is best effort, postcopy 921 * preempt channel does not send any magic number so avoid it for 922 * postcopy live migration. Also tls live migration already does 923 * tls handshake while initializing main channel so with tls this 924 * issue is not possible. 925 */ 926 ret = migration_channel_read_peek(ioc, (void *)&channel_magic, 927 sizeof(channel_magic), errp); 928 929 if (ret != 0) { 930 return; 931 } 932 933 default_channel = (channel_magic == cpu_to_be32(QEMU_VM_FILE_MAGIC)); 934 } else { 935 default_channel = !mis->from_src_file; 936 } 937 938 if (multifd_recv_setup(errp) != 0) { 939 return; 940 } 941 942 if (default_channel) { 943 f = qemu_file_new_input(ioc); 944 migration_incoming_setup(f); 945 } else { 946 /* Multiple connections */ 947 assert(migration_needs_multiple_sockets()); 948 if (migrate_multifd()) { 949 multifd_recv_new_channel(ioc, &local_err); 950 } else { 951 assert(migrate_postcopy_preempt()); 952 f = qemu_file_new_input(ioc); 953 postcopy_preempt_new_channel(mis, f); 954 } 955 if (local_err) { 956 error_propagate(errp, local_err); 957 return; 958 } 959 } 960 961 if (migration_should_start_incoming(default_channel)) { 962 /* If it's a recovery, we're done */ 963 if (postcopy_try_recover()) { 964 return; 965 } 966 migration_incoming_process(); 967 } 968 } 969 970 /** 971 * @migration_has_all_channels: We have received all channels that we need 972 * 973 * Returns true when we have got connections to all the channels that 974 * we need for migration. 975 */ 976 bool migration_has_all_channels(void) 977 { 978 MigrationIncomingState *mis = migration_incoming_get_current(); 979 980 if (!mis->from_src_file) { 981 return false; 982 } 983 984 if (migrate_multifd()) { 985 return multifd_recv_all_channels_created(); 986 } 987 988 if (migrate_postcopy_preempt()) { 989 return mis->postcopy_qemufile_dst != NULL; 990 } 991 992 return true; 993 } 994 995 int migrate_send_rp_switchover_ack(MigrationIncomingState *mis) 996 { 997 return migrate_send_rp_message(mis, MIG_RP_MSG_SWITCHOVER_ACK, 0, NULL); 998 } 999 1000 /* 1001 * Send a 'SHUT' message on the return channel with the given value 1002 * to indicate that we've finished with the RP. Non-0 value indicates 1003 * error. 1004 */ 1005 void migrate_send_rp_shut(MigrationIncomingState *mis, 1006 uint32_t value) 1007 { 1008 uint32_t buf; 1009 1010 buf = cpu_to_be32(value); 1011 migrate_send_rp_message(mis, MIG_RP_MSG_SHUT, sizeof(buf), &buf); 1012 } 1013 1014 /* 1015 * Send a 'PONG' message on the return channel with the given value 1016 * (normally in response to a 'PING') 1017 */ 1018 void migrate_send_rp_pong(MigrationIncomingState *mis, 1019 uint32_t value) 1020 { 1021 uint32_t buf; 1022 1023 buf = cpu_to_be32(value); 1024 migrate_send_rp_message(mis, MIG_RP_MSG_PONG, sizeof(buf), &buf); 1025 } 1026 1027 void migrate_send_rp_recv_bitmap(MigrationIncomingState *mis, 1028 char *block_name) 1029 { 1030 char buf[512]; 1031 int len; 1032 int64_t res; 1033 1034 /* 1035 * First, we send the header part. It contains only the len of 1036 * idstr, and the idstr itself. 1037 */ 1038 len = strlen(block_name); 1039 buf[0] = len; 1040 memcpy(buf + 1, block_name, len); 1041 1042 if (mis->state != MIGRATION_STATUS_POSTCOPY_RECOVER) { 1043 error_report("%s: MSG_RP_RECV_BITMAP only used for recovery", 1044 __func__); 1045 return; 1046 } 1047 1048 migrate_send_rp_message(mis, MIG_RP_MSG_RECV_BITMAP, len + 1, buf); 1049 1050 /* 1051 * Next, we dump the received bitmap to the stream. 1052 * 1053 * TODO: currently we are safe since we are the only one that is 1054 * using the to_src_file handle (fault thread is still paused), 1055 * and it's ok even not taking the mutex. However the best way is 1056 * to take the lock before sending the message header, and release 1057 * the lock after sending the bitmap. 1058 */ 1059 qemu_mutex_lock(&mis->rp_mutex); 1060 res = ramblock_recv_bitmap_send(mis->to_src_file, block_name); 1061 qemu_mutex_unlock(&mis->rp_mutex); 1062 1063 trace_migrate_send_rp_recv_bitmap(block_name, res); 1064 } 1065 1066 void migrate_send_rp_resume_ack(MigrationIncomingState *mis, uint32_t value) 1067 { 1068 uint32_t buf; 1069 1070 buf = cpu_to_be32(value); 1071 migrate_send_rp_message(mis, MIG_RP_MSG_RESUME_ACK, sizeof(buf), &buf); 1072 } 1073 1074 /* 1075 * Return true if we're already in the middle of a migration 1076 * (i.e. any of the active or setup states) 1077 */ 1078 bool migration_is_setup_or_active(void) 1079 { 1080 MigrationState *s = current_migration; 1081 1082 switch (s->state) { 1083 case MIGRATION_STATUS_ACTIVE: 1084 case MIGRATION_STATUS_POSTCOPY_ACTIVE: 1085 case MIGRATION_STATUS_POSTCOPY_PAUSED: 1086 case MIGRATION_STATUS_POSTCOPY_RECOVER: 1087 case MIGRATION_STATUS_SETUP: 1088 case MIGRATION_STATUS_PRE_SWITCHOVER: 1089 case MIGRATION_STATUS_DEVICE: 1090 case MIGRATION_STATUS_WAIT_UNPLUG: 1091 case MIGRATION_STATUS_COLO: 1092 return true; 1093 1094 default: 1095 return false; 1096 1097 } 1098 } 1099 1100 bool migration_is_running(void) 1101 { 1102 MigrationState *s = current_migration; 1103 1104 switch (s->state) { 1105 case MIGRATION_STATUS_ACTIVE: 1106 case MIGRATION_STATUS_POSTCOPY_ACTIVE: 1107 case MIGRATION_STATUS_POSTCOPY_PAUSED: 1108 case MIGRATION_STATUS_POSTCOPY_RECOVER: 1109 case MIGRATION_STATUS_SETUP: 1110 case MIGRATION_STATUS_PRE_SWITCHOVER: 1111 case MIGRATION_STATUS_DEVICE: 1112 case MIGRATION_STATUS_WAIT_UNPLUG: 1113 case MIGRATION_STATUS_CANCELLING: 1114 return true; 1115 1116 default: 1117 return false; 1118 1119 } 1120 } 1121 1122 static bool migrate_show_downtime(MigrationState *s) 1123 { 1124 return (s->state == MIGRATION_STATUS_COMPLETED) || migration_in_postcopy(); 1125 } 1126 1127 static void populate_time_info(MigrationInfo *info, MigrationState *s) 1128 { 1129 info->has_status = true; 1130 info->has_setup_time = true; 1131 info->setup_time = s->setup_time; 1132 1133 if (s->state == MIGRATION_STATUS_COMPLETED) { 1134 info->has_total_time = true; 1135 info->total_time = s->total_time; 1136 } else { 1137 info->has_total_time = true; 1138 info->total_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME) - 1139 s->start_time; 1140 } 1141 1142 if (migrate_show_downtime(s)) { 1143 info->has_downtime = true; 1144 info->downtime = s->downtime; 1145 } else { 1146 info->has_expected_downtime = true; 1147 info->expected_downtime = s->expected_downtime; 1148 } 1149 } 1150 1151 static void populate_ram_info(MigrationInfo *info, MigrationState *s) 1152 { 1153 size_t page_size = qemu_target_page_size(); 1154 1155 info->ram = g_malloc0(sizeof(*info->ram)); 1156 info->ram->transferred = migration_transferred_bytes(); 1157 info->ram->total = ram_bytes_total(); 1158 info->ram->duplicate = stat64_get(&mig_stats.zero_pages); 1159 info->ram->normal = stat64_get(&mig_stats.normal_pages); 1160 info->ram->normal_bytes = info->ram->normal * page_size; 1161 info->ram->mbps = s->mbps; 1162 info->ram->dirty_sync_count = 1163 stat64_get(&mig_stats.dirty_sync_count); 1164 info->ram->dirty_sync_missed_zero_copy = 1165 stat64_get(&mig_stats.dirty_sync_missed_zero_copy); 1166 info->ram->postcopy_requests = 1167 stat64_get(&mig_stats.postcopy_requests); 1168 info->ram->page_size = page_size; 1169 info->ram->multifd_bytes = stat64_get(&mig_stats.multifd_bytes); 1170 info->ram->pages_per_second = s->pages_per_second; 1171 info->ram->precopy_bytes = stat64_get(&mig_stats.precopy_bytes); 1172 info->ram->downtime_bytes = stat64_get(&mig_stats.downtime_bytes); 1173 info->ram->postcopy_bytes = stat64_get(&mig_stats.postcopy_bytes); 1174 1175 if (migrate_xbzrle()) { 1176 info->xbzrle_cache = g_malloc0(sizeof(*info->xbzrle_cache)); 1177 info->xbzrle_cache->cache_size = migrate_xbzrle_cache_size(); 1178 info->xbzrle_cache->bytes = xbzrle_counters.bytes; 1179 info->xbzrle_cache->pages = xbzrle_counters.pages; 1180 info->xbzrle_cache->cache_miss = xbzrle_counters.cache_miss; 1181 info->xbzrle_cache->cache_miss_rate = xbzrle_counters.cache_miss_rate; 1182 info->xbzrle_cache->encoding_rate = xbzrle_counters.encoding_rate; 1183 info->xbzrle_cache->overflow = xbzrle_counters.overflow; 1184 } 1185 1186 populate_compress(info); 1187 1188 if (cpu_throttle_active()) { 1189 info->has_cpu_throttle_percentage = true; 1190 info->cpu_throttle_percentage = cpu_throttle_get_percentage(); 1191 } 1192 1193 if (s->state != MIGRATION_STATUS_COMPLETED) { 1194 info->ram->remaining = ram_bytes_remaining(); 1195 info->ram->dirty_pages_rate = 1196 stat64_get(&mig_stats.dirty_pages_rate); 1197 } 1198 1199 if (migrate_dirty_limit() && dirtylimit_in_service()) { 1200 info->has_dirty_limit_throttle_time_per_round = true; 1201 info->dirty_limit_throttle_time_per_round = 1202 dirtylimit_throttle_time_per_round(); 1203 1204 info->has_dirty_limit_ring_full_time = true; 1205 info->dirty_limit_ring_full_time = dirtylimit_ring_full_time(); 1206 } 1207 } 1208 1209 static void populate_disk_info(MigrationInfo *info) 1210 { 1211 if (blk_mig_active()) { 1212 info->disk = g_malloc0(sizeof(*info->disk)); 1213 info->disk->transferred = blk_mig_bytes_transferred(); 1214 info->disk->remaining = blk_mig_bytes_remaining(); 1215 info->disk->total = blk_mig_bytes_total(); 1216 } 1217 } 1218 1219 static void fill_source_migration_info(MigrationInfo *info) 1220 { 1221 MigrationState *s = migrate_get_current(); 1222 int state = qatomic_read(&s->state); 1223 GSList *cur_blocker = migration_blockers[migrate_mode()]; 1224 1225 info->blocked_reasons = NULL; 1226 1227 /* 1228 * There are two types of reasons a migration might be blocked; 1229 * a) devices marked in VMState as non-migratable, and 1230 * b) Explicit migration blockers 1231 * We need to add both of them here. 1232 */ 1233 qemu_savevm_non_migratable_list(&info->blocked_reasons); 1234 1235 while (cur_blocker) { 1236 QAPI_LIST_PREPEND(info->blocked_reasons, 1237 g_strdup(error_get_pretty(cur_blocker->data))); 1238 cur_blocker = g_slist_next(cur_blocker); 1239 } 1240 info->has_blocked_reasons = info->blocked_reasons != NULL; 1241 1242 switch (state) { 1243 case MIGRATION_STATUS_NONE: 1244 /* no migration has happened ever */ 1245 /* do not overwrite destination migration status */ 1246 return; 1247 case MIGRATION_STATUS_SETUP: 1248 info->has_status = true; 1249 info->has_total_time = false; 1250 break; 1251 case MIGRATION_STATUS_ACTIVE: 1252 case MIGRATION_STATUS_CANCELLING: 1253 case MIGRATION_STATUS_POSTCOPY_ACTIVE: 1254 case MIGRATION_STATUS_PRE_SWITCHOVER: 1255 case MIGRATION_STATUS_DEVICE: 1256 case MIGRATION_STATUS_POSTCOPY_PAUSED: 1257 case MIGRATION_STATUS_POSTCOPY_RECOVER: 1258 /* TODO add some postcopy stats */ 1259 populate_time_info(info, s); 1260 populate_ram_info(info, s); 1261 populate_disk_info(info); 1262 migration_populate_vfio_info(info); 1263 break; 1264 case MIGRATION_STATUS_COLO: 1265 info->has_status = true; 1266 /* TODO: display COLO specific information (checkpoint info etc.) */ 1267 break; 1268 case MIGRATION_STATUS_COMPLETED: 1269 populate_time_info(info, s); 1270 populate_ram_info(info, s); 1271 migration_populate_vfio_info(info); 1272 break; 1273 case MIGRATION_STATUS_FAILED: 1274 info->has_status = true; 1275 break; 1276 case MIGRATION_STATUS_CANCELLED: 1277 info->has_status = true; 1278 break; 1279 case MIGRATION_STATUS_WAIT_UNPLUG: 1280 info->has_status = true; 1281 break; 1282 } 1283 info->status = state; 1284 1285 QEMU_LOCK_GUARD(&s->error_mutex); 1286 if (s->error) { 1287 info->error_desc = g_strdup(error_get_pretty(s->error)); 1288 } 1289 } 1290 1291 static void fill_destination_migration_info(MigrationInfo *info) 1292 { 1293 MigrationIncomingState *mis = migration_incoming_get_current(); 1294 1295 if (mis->socket_address_list) { 1296 info->has_socket_address = true; 1297 info->socket_address = 1298 QAPI_CLONE(SocketAddressList, mis->socket_address_list); 1299 } 1300 1301 switch (mis->state) { 1302 case MIGRATION_STATUS_NONE: 1303 return; 1304 case MIGRATION_STATUS_SETUP: 1305 case MIGRATION_STATUS_CANCELLING: 1306 case MIGRATION_STATUS_CANCELLED: 1307 case MIGRATION_STATUS_ACTIVE: 1308 case MIGRATION_STATUS_POSTCOPY_ACTIVE: 1309 case MIGRATION_STATUS_POSTCOPY_PAUSED: 1310 case MIGRATION_STATUS_POSTCOPY_RECOVER: 1311 case MIGRATION_STATUS_FAILED: 1312 case MIGRATION_STATUS_COLO: 1313 info->has_status = true; 1314 break; 1315 case MIGRATION_STATUS_COMPLETED: 1316 info->has_status = true; 1317 fill_destination_postcopy_migration_info(info); 1318 break; 1319 } 1320 info->status = mis->state; 1321 1322 if (!info->error_desc) { 1323 MigrationState *s = migrate_get_current(); 1324 QEMU_LOCK_GUARD(&s->error_mutex); 1325 1326 if (s->error) { 1327 info->error_desc = g_strdup(error_get_pretty(s->error)); 1328 } 1329 } 1330 } 1331 1332 MigrationInfo *qmp_query_migrate(Error **errp) 1333 { 1334 MigrationInfo *info = g_malloc0(sizeof(*info)); 1335 1336 fill_destination_migration_info(info); 1337 fill_source_migration_info(info); 1338 1339 return info; 1340 } 1341 1342 void qmp_migrate_start_postcopy(Error **errp) 1343 { 1344 MigrationState *s = migrate_get_current(); 1345 1346 if (!migrate_postcopy()) { 1347 error_setg(errp, "Enable postcopy with migrate_set_capability before" 1348 " the start of migration"); 1349 return; 1350 } 1351 1352 if (s->state == MIGRATION_STATUS_NONE) { 1353 error_setg(errp, "Postcopy must be started after migration has been" 1354 " started"); 1355 return; 1356 } 1357 /* 1358 * we don't error if migration has finished since that would be racy 1359 * with issuing this command. 1360 */ 1361 qatomic_set(&s->start_postcopy, true); 1362 } 1363 1364 /* shared migration helpers */ 1365 1366 void migrate_set_state(int *state, int old_state, int new_state) 1367 { 1368 assert(new_state < MIGRATION_STATUS__MAX); 1369 if (qatomic_cmpxchg(state, old_state, new_state) == old_state) { 1370 trace_migrate_set_state(MigrationStatus_str(new_state)); 1371 migrate_generate_event(new_state); 1372 } 1373 } 1374 1375 static void migrate_fd_cleanup(MigrationState *s) 1376 { 1377 MigrationEventType type; 1378 1379 g_free(s->hostname); 1380 s->hostname = NULL; 1381 json_writer_free(s->vmdesc); 1382 s->vmdesc = NULL; 1383 1384 qemu_savevm_state_cleanup(); 1385 1386 close_return_path_on_source(s); 1387 1388 if (s->to_dst_file) { 1389 QEMUFile *tmp; 1390 1391 trace_migrate_fd_cleanup(); 1392 bql_unlock(); 1393 if (s->migration_thread_running) { 1394 qemu_thread_join(&s->thread); 1395 s->migration_thread_running = false; 1396 } 1397 bql_lock(); 1398 1399 multifd_send_shutdown(); 1400 qemu_mutex_lock(&s->qemu_file_lock); 1401 tmp = s->to_dst_file; 1402 s->to_dst_file = NULL; 1403 qemu_mutex_unlock(&s->qemu_file_lock); 1404 /* 1405 * Close the file handle without the lock to make sure the 1406 * critical section won't block for long. 1407 */ 1408 migration_ioc_unregister_yank_from_file(tmp); 1409 qemu_fclose(tmp); 1410 } 1411 1412 assert(!migration_is_active()); 1413 1414 if (s->state == MIGRATION_STATUS_CANCELLING) { 1415 migrate_set_state(&s->state, MIGRATION_STATUS_CANCELLING, 1416 MIGRATION_STATUS_CANCELLED); 1417 } 1418 1419 if (s->error) { 1420 /* It is used on info migrate. We can't free it */ 1421 error_report_err(error_copy(s->error)); 1422 } 1423 type = migration_has_failed(s) ? MIG_EVENT_PRECOPY_FAILED : 1424 MIG_EVENT_PRECOPY_DONE; 1425 migration_call_notifiers(s, type, NULL); 1426 block_cleanup_parameters(); 1427 yank_unregister_instance(MIGRATION_YANK_INSTANCE); 1428 } 1429 1430 static void migrate_fd_cleanup_bh(void *opaque) 1431 { 1432 migrate_fd_cleanup(opaque); 1433 } 1434 1435 void migrate_set_error(MigrationState *s, const Error *error) 1436 { 1437 QEMU_LOCK_GUARD(&s->error_mutex); 1438 1439 trace_migrate_error(error_get_pretty(error)); 1440 1441 if (!s->error) { 1442 s->error = error_copy(error); 1443 } 1444 } 1445 1446 bool migrate_has_error(MigrationState *s) 1447 { 1448 /* The lock is not helpful here, but still follow the rule */ 1449 QEMU_LOCK_GUARD(&s->error_mutex); 1450 return qatomic_read(&s->error); 1451 } 1452 1453 static void migrate_error_free(MigrationState *s) 1454 { 1455 QEMU_LOCK_GUARD(&s->error_mutex); 1456 if (s->error) { 1457 error_free(s->error); 1458 s->error = NULL; 1459 } 1460 } 1461 1462 static void migrate_fd_error(MigrationState *s, const Error *error) 1463 { 1464 assert(s->to_dst_file == NULL); 1465 migrate_set_state(&s->state, MIGRATION_STATUS_SETUP, 1466 MIGRATION_STATUS_FAILED); 1467 migrate_set_error(s, error); 1468 } 1469 1470 static void migrate_fd_cancel(MigrationState *s) 1471 { 1472 int old_state ; 1473 1474 trace_migrate_fd_cancel(); 1475 1476 WITH_QEMU_LOCK_GUARD(&s->qemu_file_lock) { 1477 if (s->rp_state.from_dst_file) { 1478 /* shutdown the rp socket, so causing the rp thread to shutdown */ 1479 qemu_file_shutdown(s->rp_state.from_dst_file); 1480 } 1481 } 1482 1483 do { 1484 old_state = s->state; 1485 if (!migration_is_running()) { 1486 break; 1487 } 1488 /* If the migration is paused, kick it out of the pause */ 1489 if (old_state == MIGRATION_STATUS_PRE_SWITCHOVER) { 1490 qemu_sem_post(&s->pause_sem); 1491 } 1492 migrate_set_state(&s->state, old_state, MIGRATION_STATUS_CANCELLING); 1493 } while (s->state != MIGRATION_STATUS_CANCELLING); 1494 1495 /* 1496 * If we're unlucky the migration code might be stuck somewhere in a 1497 * send/write while the network has failed and is waiting to timeout; 1498 * if we've got shutdown(2) available then we can force it to quit. 1499 */ 1500 if (s->state == MIGRATION_STATUS_CANCELLING) { 1501 WITH_QEMU_LOCK_GUARD(&s->qemu_file_lock) { 1502 if (s->to_dst_file) { 1503 qemu_file_shutdown(s->to_dst_file); 1504 } 1505 } 1506 } 1507 if (s->state == MIGRATION_STATUS_CANCELLING && s->block_inactive) { 1508 Error *local_err = NULL; 1509 1510 bdrv_activate_all(&local_err); 1511 if (local_err) { 1512 error_report_err(local_err); 1513 } else { 1514 s->block_inactive = false; 1515 } 1516 } 1517 } 1518 1519 void migration_add_notifier_mode(NotifierWithReturn *notify, 1520 MigrationNotifyFunc func, MigMode mode) 1521 { 1522 notify->notify = (NotifierWithReturnFunc)func; 1523 notifier_with_return_list_add(&migration_state_notifiers[mode], notify); 1524 } 1525 1526 void migration_add_notifier(NotifierWithReturn *notify, 1527 MigrationNotifyFunc func) 1528 { 1529 migration_add_notifier_mode(notify, func, MIG_MODE_NORMAL); 1530 } 1531 1532 void migration_remove_notifier(NotifierWithReturn *notify) 1533 { 1534 if (notify->notify) { 1535 notifier_with_return_remove(notify); 1536 notify->notify = NULL; 1537 } 1538 } 1539 1540 int migration_call_notifiers(MigrationState *s, MigrationEventType type, 1541 Error **errp) 1542 { 1543 MigMode mode = s->parameters.mode; 1544 MigrationEvent e; 1545 int ret; 1546 1547 e.type = type; 1548 ret = notifier_with_return_list_notify(&migration_state_notifiers[mode], 1549 &e, errp); 1550 assert(!ret || type == MIG_EVENT_PRECOPY_SETUP); 1551 return ret; 1552 } 1553 1554 bool migration_has_failed(MigrationState *s) 1555 { 1556 return (s->state == MIGRATION_STATUS_CANCELLED || 1557 s->state == MIGRATION_STATUS_FAILED); 1558 } 1559 1560 bool migration_in_postcopy(void) 1561 { 1562 MigrationState *s = migrate_get_current(); 1563 1564 switch (s->state) { 1565 case MIGRATION_STATUS_POSTCOPY_ACTIVE: 1566 case MIGRATION_STATUS_POSTCOPY_PAUSED: 1567 case MIGRATION_STATUS_POSTCOPY_RECOVER: 1568 return true; 1569 default: 1570 return false; 1571 } 1572 } 1573 1574 bool migration_postcopy_is_alive(int state) 1575 { 1576 switch (state) { 1577 case MIGRATION_STATUS_POSTCOPY_ACTIVE: 1578 case MIGRATION_STATUS_POSTCOPY_RECOVER: 1579 return true; 1580 default: 1581 return false; 1582 } 1583 } 1584 1585 bool migration_in_incoming_postcopy(void) 1586 { 1587 PostcopyState ps = postcopy_state_get(); 1588 1589 return ps >= POSTCOPY_INCOMING_DISCARD && ps < POSTCOPY_INCOMING_END; 1590 } 1591 1592 bool migration_incoming_postcopy_advised(void) 1593 { 1594 PostcopyState ps = postcopy_state_get(); 1595 1596 return ps >= POSTCOPY_INCOMING_ADVISE && ps < POSTCOPY_INCOMING_END; 1597 } 1598 1599 bool migration_in_bg_snapshot(void) 1600 { 1601 return migrate_background_snapshot() && 1602 migration_is_setup_or_active(); 1603 } 1604 1605 bool migration_is_idle(void) 1606 { 1607 MigrationState *s = current_migration; 1608 1609 if (!s) { 1610 return true; 1611 } 1612 1613 switch (s->state) { 1614 case MIGRATION_STATUS_NONE: 1615 case MIGRATION_STATUS_CANCELLED: 1616 case MIGRATION_STATUS_COMPLETED: 1617 case MIGRATION_STATUS_FAILED: 1618 return true; 1619 case MIGRATION_STATUS_SETUP: 1620 case MIGRATION_STATUS_CANCELLING: 1621 case MIGRATION_STATUS_ACTIVE: 1622 case MIGRATION_STATUS_POSTCOPY_ACTIVE: 1623 case MIGRATION_STATUS_COLO: 1624 case MIGRATION_STATUS_PRE_SWITCHOVER: 1625 case MIGRATION_STATUS_DEVICE: 1626 case MIGRATION_STATUS_WAIT_UNPLUG: 1627 return false; 1628 case MIGRATION_STATUS__MAX: 1629 g_assert_not_reached(); 1630 } 1631 1632 return false; 1633 } 1634 1635 bool migration_is_active(void) 1636 { 1637 MigrationState *s = current_migration; 1638 1639 return (s->state == MIGRATION_STATUS_ACTIVE || 1640 s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE); 1641 } 1642 1643 bool migration_is_device(void) 1644 { 1645 MigrationState *s = current_migration; 1646 1647 return s->state == MIGRATION_STATUS_DEVICE; 1648 } 1649 1650 bool migration_thread_is_self(void) 1651 { 1652 MigrationState *s = current_migration; 1653 1654 return qemu_thread_is_self(&s->thread); 1655 } 1656 1657 bool migrate_mode_is_cpr(MigrationState *s) 1658 { 1659 return s->parameters.mode == MIG_MODE_CPR_REBOOT; 1660 } 1661 1662 int migrate_init(MigrationState *s, Error **errp) 1663 { 1664 int ret; 1665 1666 ret = qemu_savevm_state_prepare(errp); 1667 if (ret) { 1668 return ret; 1669 } 1670 1671 /* 1672 * Reinitialise all migration state, except 1673 * parameters/capabilities that the user set, and 1674 * locks. 1675 */ 1676 s->to_dst_file = NULL; 1677 s->state = MIGRATION_STATUS_NONE; 1678 s->rp_state.from_dst_file = NULL; 1679 s->mbps = 0.0; 1680 s->pages_per_second = 0.0; 1681 s->downtime = 0; 1682 s->expected_downtime = 0; 1683 s->setup_time = 0; 1684 s->start_postcopy = false; 1685 s->migration_thread_running = false; 1686 error_free(s->error); 1687 s->error = NULL; 1688 s->vmdesc = NULL; 1689 1690 migrate_set_state(&s->state, MIGRATION_STATUS_NONE, MIGRATION_STATUS_SETUP); 1691 1692 s->start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); 1693 s->total_time = 0; 1694 s->vm_old_state = -1; 1695 s->iteration_initial_bytes = 0; 1696 s->threshold_size = 0; 1697 s->switchover_acked = false; 1698 s->rdma_migration = false; 1699 /* 1700 * set mig_stats memory to zero for a new migration 1701 */ 1702 memset(&mig_stats, 0, sizeof(mig_stats)); 1703 migration_reset_vfio_bytes_transferred(); 1704 1705 return 0; 1706 } 1707 1708 static bool is_busy(Error **reasonp, Error **errp) 1709 { 1710 ERRP_GUARD(); 1711 1712 /* Snapshots are similar to migrations, so check RUN_STATE_SAVE_VM too. */ 1713 if (runstate_check(RUN_STATE_SAVE_VM) || !migration_is_idle()) { 1714 error_propagate_prepend(errp, *reasonp, 1715 "disallowing migration blocker " 1716 "(migration/snapshot in progress) for: "); 1717 *reasonp = NULL; 1718 return true; 1719 } 1720 return false; 1721 } 1722 1723 static bool is_only_migratable(Error **reasonp, Error **errp, int modes) 1724 { 1725 ERRP_GUARD(); 1726 1727 if (only_migratable && (modes & BIT(MIG_MODE_NORMAL))) { 1728 error_propagate_prepend(errp, *reasonp, 1729 "disallowing migration blocker " 1730 "(--only-migratable) for: "); 1731 *reasonp = NULL; 1732 return true; 1733 } 1734 return false; 1735 } 1736 1737 static int get_modes(MigMode mode, va_list ap) 1738 { 1739 int modes = 0; 1740 1741 while (mode != -1 && mode != MIG_MODE_ALL) { 1742 assert(mode >= MIG_MODE_NORMAL && mode < MIG_MODE__MAX); 1743 modes |= BIT(mode); 1744 mode = va_arg(ap, MigMode); 1745 } 1746 if (mode == MIG_MODE_ALL) { 1747 modes = BIT(MIG_MODE__MAX) - 1; 1748 } 1749 return modes; 1750 } 1751 1752 static int add_blockers(Error **reasonp, Error **errp, int modes) 1753 { 1754 for (MigMode mode = 0; mode < MIG_MODE__MAX; mode++) { 1755 if (modes & BIT(mode)) { 1756 migration_blockers[mode] = g_slist_prepend(migration_blockers[mode], 1757 *reasonp); 1758 } 1759 } 1760 return 0; 1761 } 1762 1763 int migrate_add_blocker(Error **reasonp, Error **errp) 1764 { 1765 return migrate_add_blocker_modes(reasonp, errp, MIG_MODE_ALL); 1766 } 1767 1768 int migrate_add_blocker_normal(Error **reasonp, Error **errp) 1769 { 1770 return migrate_add_blocker_modes(reasonp, errp, MIG_MODE_NORMAL, -1); 1771 } 1772 1773 int migrate_add_blocker_modes(Error **reasonp, Error **errp, MigMode mode, ...) 1774 { 1775 int modes; 1776 va_list ap; 1777 1778 va_start(ap, mode); 1779 modes = get_modes(mode, ap); 1780 va_end(ap); 1781 1782 if (is_only_migratable(reasonp, errp, modes)) { 1783 return -EACCES; 1784 } else if (is_busy(reasonp, errp)) { 1785 return -EBUSY; 1786 } 1787 return add_blockers(reasonp, errp, modes); 1788 } 1789 1790 int migrate_add_blocker_internal(Error **reasonp, Error **errp) 1791 { 1792 int modes = BIT(MIG_MODE__MAX) - 1; 1793 1794 if (is_busy(reasonp, errp)) { 1795 return -EBUSY; 1796 } 1797 return add_blockers(reasonp, errp, modes); 1798 } 1799 1800 void migrate_del_blocker(Error **reasonp) 1801 { 1802 if (*reasonp) { 1803 for (MigMode mode = 0; mode < MIG_MODE__MAX; mode++) { 1804 migration_blockers[mode] = g_slist_remove(migration_blockers[mode], 1805 *reasonp); 1806 } 1807 error_free(*reasonp); 1808 *reasonp = NULL; 1809 } 1810 } 1811 1812 void qmp_migrate_incoming(const char *uri, bool has_channels, 1813 MigrationChannelList *channels, 1814 bool has_exit_on_error, bool exit_on_error, 1815 Error **errp) 1816 { 1817 Error *local_err = NULL; 1818 static bool once = true; 1819 MigrationIncomingState *mis = migration_incoming_get_current(); 1820 1821 if (!once) { 1822 error_setg(errp, "The incoming migration has already been started"); 1823 return; 1824 } 1825 if (!runstate_check(RUN_STATE_INMIGRATE)) { 1826 error_setg(errp, "'-incoming' was not specified on the command line"); 1827 return; 1828 } 1829 1830 if (!yank_register_instance(MIGRATION_YANK_INSTANCE, errp)) { 1831 return; 1832 } 1833 1834 mis->exit_on_error = 1835 has_exit_on_error ? exit_on_error : INMIGRATE_DEFAULT_EXIT_ON_ERROR; 1836 1837 qemu_start_incoming_migration(uri, has_channels, channels, &local_err); 1838 1839 if (local_err) { 1840 yank_unregister_instance(MIGRATION_YANK_INSTANCE); 1841 error_propagate(errp, local_err); 1842 return; 1843 } 1844 1845 once = false; 1846 } 1847 1848 void qmp_migrate_recover(const char *uri, Error **errp) 1849 { 1850 MigrationIncomingState *mis = migration_incoming_get_current(); 1851 1852 /* 1853 * Don't even bother to use ERRP_GUARD() as it _must_ always be set by 1854 * callers (no one should ignore a recover failure); if there is, it's a 1855 * programming error. 1856 */ 1857 assert(errp); 1858 1859 if (mis->state != MIGRATION_STATUS_POSTCOPY_PAUSED) { 1860 error_setg(errp, "Migrate recover can only be run " 1861 "when postcopy is paused."); 1862 return; 1863 } 1864 1865 /* If there's an existing transport, release it */ 1866 migration_incoming_transport_cleanup(mis); 1867 1868 /* 1869 * Note that this call will never start a real migration; it will 1870 * only re-setup the migration stream and poke existing migration 1871 * to continue using that newly established channel. 1872 */ 1873 qemu_start_incoming_migration(uri, false, NULL, errp); 1874 } 1875 1876 void qmp_migrate_pause(Error **errp) 1877 { 1878 MigrationState *ms = migrate_get_current(); 1879 MigrationIncomingState *mis = migration_incoming_get_current(); 1880 int ret = 0; 1881 1882 if (migration_postcopy_is_alive(ms->state)) { 1883 /* Source side, during postcopy */ 1884 Error *error = NULL; 1885 1886 /* Tell the core migration that we're pausing */ 1887 error_setg(&error, "Postcopy migration is paused by the user"); 1888 migrate_set_error(ms, error); 1889 error_free(error); 1890 1891 qemu_mutex_lock(&ms->qemu_file_lock); 1892 if (ms->to_dst_file) { 1893 ret = qemu_file_shutdown(ms->to_dst_file); 1894 } 1895 qemu_mutex_unlock(&ms->qemu_file_lock); 1896 if (ret) { 1897 error_setg(errp, "Failed to pause source migration"); 1898 } 1899 1900 /* 1901 * Kick the migration thread out of any waiting windows (on behalf 1902 * of the rp thread). 1903 */ 1904 migration_rp_kick(ms); 1905 1906 return; 1907 } 1908 1909 if (migration_postcopy_is_alive(mis->state)) { 1910 ret = qemu_file_shutdown(mis->from_src_file); 1911 if (ret) { 1912 error_setg(errp, "Failed to pause destination migration"); 1913 } 1914 return; 1915 } 1916 1917 error_setg(errp, "migrate-pause is currently only supported " 1918 "during postcopy-active or postcopy-recover state"); 1919 } 1920 1921 bool migration_is_blocked(Error **errp) 1922 { 1923 GSList *blockers = migration_blockers[migrate_mode()]; 1924 1925 if (qemu_savevm_state_blocked(errp)) { 1926 return true; 1927 } 1928 1929 if (blockers) { 1930 error_propagate(errp, error_copy(blockers->data)); 1931 return true; 1932 } 1933 1934 return false; 1935 } 1936 1937 /* Returns true if continue to migrate, or false if error detected */ 1938 static bool migrate_prepare(MigrationState *s, bool blk, bool resume, 1939 Error **errp) 1940 { 1941 if (blk) { 1942 warn_report("parameter 'blk' is deprecated;" 1943 " use blockdev-mirror with NBD instead"); 1944 } 1945 1946 if (resume) { 1947 if (s->state != MIGRATION_STATUS_POSTCOPY_PAUSED) { 1948 error_setg(errp, "Cannot resume if there is no " 1949 "paused migration"); 1950 return false; 1951 } 1952 1953 /* 1954 * Postcopy recovery won't work well with release-ram 1955 * capability since release-ram will drop the page buffer as 1956 * long as the page is put into the send buffer. So if there 1957 * is a network failure happened, any page buffers that have 1958 * not yet reached the destination VM but have already been 1959 * sent from the source VM will be lost forever. Let's refuse 1960 * the client from resuming such a postcopy migration. 1961 * Luckily release-ram was designed to only be used when src 1962 * and destination VMs are on the same host, so it should be 1963 * fine. 1964 */ 1965 if (migrate_release_ram()) { 1966 error_setg(errp, "Postcopy recovery cannot work " 1967 "when release-ram capability is set"); 1968 return false; 1969 } 1970 1971 /* This is a resume, skip init status */ 1972 return true; 1973 } 1974 1975 if (migration_is_running()) { 1976 error_setg(errp, "There's a migration process in progress"); 1977 return false; 1978 } 1979 1980 if (runstate_check(RUN_STATE_INMIGRATE)) { 1981 error_setg(errp, "Guest is waiting for an incoming migration"); 1982 return false; 1983 } 1984 1985 if (runstate_check(RUN_STATE_POSTMIGRATE)) { 1986 error_setg(errp, "Can't migrate the vm that was paused due to " 1987 "previous migration"); 1988 return false; 1989 } 1990 1991 if (kvm_hwpoisoned_mem()) { 1992 error_setg(errp, "Can't migrate this vm with hardware poisoned memory, " 1993 "please reboot the vm and try again"); 1994 return false; 1995 } 1996 1997 if (migration_is_blocked(errp)) { 1998 return false; 1999 } 2000 2001 if (migrate_mapped_ram()) { 2002 if (migrate_tls()) { 2003 error_setg(errp, "Cannot use TLS with mapped-ram"); 2004 return false; 2005 } 2006 2007 if (migrate_multifd_compression()) { 2008 error_setg(errp, "Cannot use compression with mapped-ram"); 2009 return false; 2010 } 2011 } 2012 2013 if (migrate_mode_is_cpr(s)) { 2014 const char *conflict = NULL; 2015 2016 if (migrate_postcopy()) { 2017 conflict = "postcopy"; 2018 } else if (migrate_background_snapshot()) { 2019 conflict = "background snapshot"; 2020 } else if (migrate_colo()) { 2021 conflict = "COLO"; 2022 } 2023 2024 if (conflict) { 2025 error_setg(errp, "Cannot use %s with CPR", conflict); 2026 return false; 2027 } 2028 } 2029 2030 if (blk) { 2031 if (migrate_colo()) { 2032 error_setg(errp, "No disk migration is required in COLO mode"); 2033 return false; 2034 } 2035 if (migrate_block()) { 2036 error_setg(errp, "Command options are incompatible with " 2037 "current migration capabilities"); 2038 return false; 2039 } 2040 if (!migrate_cap_set(MIGRATION_CAPABILITY_BLOCK, true, errp)) { 2041 return false; 2042 } 2043 s->must_remove_block_options = true; 2044 } 2045 2046 if (migrate_init(s, errp)) { 2047 return false; 2048 } 2049 2050 return true; 2051 } 2052 2053 void qmp_migrate(const char *uri, bool has_channels, 2054 MigrationChannelList *channels, bool has_blk, bool blk, 2055 bool has_detach, bool detach, bool has_resume, bool resume, 2056 Error **errp) 2057 { 2058 bool resume_requested; 2059 Error *local_err = NULL; 2060 MigrationState *s = migrate_get_current(); 2061 g_autoptr(MigrationChannel) channel = NULL; 2062 MigrationAddress *addr = NULL; 2063 2064 /* 2065 * Having preliminary checks for uri and channel 2066 */ 2067 if (!uri == !channels) { 2068 error_setg(errp, "need either 'uri' or 'channels' argument"); 2069 return; 2070 } 2071 2072 if (channels) { 2073 /* To verify that Migrate channel list has only item */ 2074 if (channels->next) { 2075 error_setg(errp, "Channel list has more than one entries"); 2076 return; 2077 } 2078 addr = channels->value->addr; 2079 } 2080 2081 if (uri) { 2082 /* caller uses the old URI syntax */ 2083 if (!migrate_uri_parse(uri, &channel, errp)) { 2084 return; 2085 } 2086 addr = channel->addr; 2087 } 2088 2089 /* transport mechanism not suitable for migration? */ 2090 if (!migration_channels_and_transport_compatible(addr, errp)) { 2091 return; 2092 } 2093 2094 resume_requested = has_resume && resume; 2095 if (!migrate_prepare(s, has_blk && blk, resume_requested, errp)) { 2096 /* Error detected, put into errp */ 2097 return; 2098 } 2099 2100 if (!resume_requested) { 2101 if (!yank_register_instance(MIGRATION_YANK_INSTANCE, errp)) { 2102 return; 2103 } 2104 } 2105 2106 if (addr->transport == MIGRATION_ADDRESS_TYPE_SOCKET) { 2107 SocketAddress *saddr = &addr->u.socket; 2108 if (saddr->type == SOCKET_ADDRESS_TYPE_INET || 2109 saddr->type == SOCKET_ADDRESS_TYPE_UNIX || 2110 saddr->type == SOCKET_ADDRESS_TYPE_VSOCK) { 2111 socket_start_outgoing_migration(s, saddr, &local_err); 2112 } else if (saddr->type == SOCKET_ADDRESS_TYPE_FD) { 2113 fd_start_outgoing_migration(s, saddr->u.fd.str, &local_err); 2114 } 2115 #ifdef CONFIG_RDMA 2116 } else if (addr->transport == MIGRATION_ADDRESS_TYPE_RDMA) { 2117 rdma_start_outgoing_migration(s, &addr->u.rdma, &local_err); 2118 #endif 2119 } else if (addr->transport == MIGRATION_ADDRESS_TYPE_EXEC) { 2120 exec_start_outgoing_migration(s, addr->u.exec.args, &local_err); 2121 } else if (addr->transport == MIGRATION_ADDRESS_TYPE_FILE) { 2122 file_start_outgoing_migration(s, &addr->u.file, &local_err); 2123 } else { 2124 error_setg(&local_err, QERR_INVALID_PARAMETER_VALUE, "uri", 2125 "a valid migration protocol"); 2126 migrate_set_state(&s->state, MIGRATION_STATUS_SETUP, 2127 MIGRATION_STATUS_FAILED); 2128 block_cleanup_parameters(); 2129 } 2130 2131 if (local_err) { 2132 if (!resume_requested) { 2133 yank_unregister_instance(MIGRATION_YANK_INSTANCE); 2134 } 2135 migrate_fd_error(s, local_err); 2136 error_propagate(errp, local_err); 2137 return; 2138 } 2139 } 2140 2141 void qmp_migrate_cancel(Error **errp) 2142 { 2143 migration_cancel(NULL); 2144 } 2145 2146 void qmp_migrate_continue(MigrationStatus state, Error **errp) 2147 { 2148 MigrationState *s = migrate_get_current(); 2149 if (s->state != state) { 2150 error_setg(errp, "Migration not in expected state: %s", 2151 MigrationStatus_str(s->state)); 2152 return; 2153 } 2154 qemu_sem_post(&s->pause_sem); 2155 } 2156 2157 int migration_rp_wait(MigrationState *s) 2158 { 2159 /* If migration has failure already, ignore the wait */ 2160 if (migrate_has_error(s)) { 2161 return -1; 2162 } 2163 2164 qemu_sem_wait(&s->rp_state.rp_sem); 2165 2166 /* After wait, double check that there's no failure */ 2167 if (migrate_has_error(s)) { 2168 return -1; 2169 } 2170 2171 return 0; 2172 } 2173 2174 void migration_rp_kick(MigrationState *s) 2175 { 2176 qemu_sem_post(&s->rp_state.rp_sem); 2177 } 2178 2179 static struct rp_cmd_args { 2180 ssize_t len; /* -1 = variable */ 2181 const char *name; 2182 } rp_cmd_args[] = { 2183 [MIG_RP_MSG_INVALID] = { .len = -1, .name = "INVALID" }, 2184 [MIG_RP_MSG_SHUT] = { .len = 4, .name = "SHUT" }, 2185 [MIG_RP_MSG_PONG] = { .len = 4, .name = "PONG" }, 2186 [MIG_RP_MSG_REQ_PAGES] = { .len = 12, .name = "REQ_PAGES" }, 2187 [MIG_RP_MSG_REQ_PAGES_ID] = { .len = -1, .name = "REQ_PAGES_ID" }, 2188 [MIG_RP_MSG_RECV_BITMAP] = { .len = -1, .name = "RECV_BITMAP" }, 2189 [MIG_RP_MSG_RESUME_ACK] = { .len = 4, .name = "RESUME_ACK" }, 2190 [MIG_RP_MSG_SWITCHOVER_ACK] = { .len = 0, .name = "SWITCHOVER_ACK" }, 2191 [MIG_RP_MSG_MAX] = { .len = -1, .name = "MAX" }, 2192 }; 2193 2194 /* 2195 * Process a request for pages received on the return path, 2196 * We're allowed to send more than requested (e.g. to round to our page size) 2197 * and we don't need to send pages that have already been sent. 2198 */ 2199 static void 2200 migrate_handle_rp_req_pages(MigrationState *ms, const char* rbname, 2201 ram_addr_t start, size_t len, Error **errp) 2202 { 2203 long our_host_ps = qemu_real_host_page_size(); 2204 2205 trace_migrate_handle_rp_req_pages(rbname, start, len); 2206 2207 /* 2208 * Since we currently insist on matching page sizes, just sanity check 2209 * we're being asked for whole host pages. 2210 */ 2211 if (!QEMU_IS_ALIGNED(start, our_host_ps) || 2212 !QEMU_IS_ALIGNED(len, our_host_ps)) { 2213 error_setg(errp, "MIG_RP_MSG_REQ_PAGES: Misaligned page request, start:" 2214 RAM_ADDR_FMT " len: %zd", start, len); 2215 return; 2216 } 2217 2218 ram_save_queue_pages(rbname, start, len, errp); 2219 } 2220 2221 static bool migrate_handle_rp_recv_bitmap(MigrationState *s, char *block_name, 2222 Error **errp) 2223 { 2224 RAMBlock *block = qemu_ram_block_by_name(block_name); 2225 2226 if (!block) { 2227 error_setg(errp, "MIG_RP_MSG_RECV_BITMAP has invalid block name '%s'", 2228 block_name); 2229 return false; 2230 } 2231 2232 /* Fetch the received bitmap and refresh the dirty bitmap */ 2233 return ram_dirty_bitmap_reload(s, block, errp); 2234 } 2235 2236 static bool migrate_handle_rp_resume_ack(MigrationState *s, 2237 uint32_t value, Error **errp) 2238 { 2239 trace_source_return_path_thread_resume_ack(value); 2240 2241 if (value != MIGRATION_RESUME_ACK_VALUE) { 2242 error_setg(errp, "illegal resume_ack value %"PRIu32, value); 2243 return false; 2244 } 2245 2246 /* Now both sides are active. */ 2247 migrate_set_state(&s->state, MIGRATION_STATUS_POSTCOPY_RECOVER, 2248 MIGRATION_STATUS_POSTCOPY_ACTIVE); 2249 2250 /* Notify send thread that time to continue send pages */ 2251 migration_rp_kick(s); 2252 2253 return true; 2254 } 2255 2256 /* 2257 * Release ms->rp_state.from_dst_file (and postcopy_qemufile_src if 2258 * existed) in a safe way. 2259 */ 2260 static void migration_release_dst_files(MigrationState *ms) 2261 { 2262 QEMUFile *file; 2263 2264 WITH_QEMU_LOCK_GUARD(&ms->qemu_file_lock) { 2265 /* 2266 * Reset the from_dst_file pointer first before releasing it, as we 2267 * can't block within lock section 2268 */ 2269 file = ms->rp_state.from_dst_file; 2270 ms->rp_state.from_dst_file = NULL; 2271 } 2272 2273 /* 2274 * Do the same to postcopy fast path socket too if there is. No 2275 * locking needed because this qemufile should only be managed by 2276 * return path thread. 2277 */ 2278 if (ms->postcopy_qemufile_src) { 2279 migration_ioc_unregister_yank_from_file(ms->postcopy_qemufile_src); 2280 qemu_file_shutdown(ms->postcopy_qemufile_src); 2281 qemu_fclose(ms->postcopy_qemufile_src); 2282 ms->postcopy_qemufile_src = NULL; 2283 } 2284 2285 qemu_fclose(file); 2286 } 2287 2288 /* 2289 * Handles messages sent on the return path towards the source VM 2290 * 2291 */ 2292 static void *source_return_path_thread(void *opaque) 2293 { 2294 MigrationState *ms = opaque; 2295 QEMUFile *rp = ms->rp_state.from_dst_file; 2296 uint16_t header_len, header_type; 2297 uint8_t buf[512]; 2298 uint32_t tmp32, sibling_error; 2299 ram_addr_t start = 0; /* =0 to silence warning */ 2300 size_t len = 0, expected_len; 2301 Error *err = NULL; 2302 int res; 2303 2304 trace_source_return_path_thread_entry(); 2305 rcu_register_thread(); 2306 2307 while (migration_is_setup_or_active()) { 2308 trace_source_return_path_thread_loop_top(); 2309 2310 header_type = qemu_get_be16(rp); 2311 header_len = qemu_get_be16(rp); 2312 2313 if (qemu_file_get_error(rp)) { 2314 qemu_file_get_error_obj(rp, &err); 2315 goto out; 2316 } 2317 2318 if (header_type >= MIG_RP_MSG_MAX || 2319 header_type == MIG_RP_MSG_INVALID) { 2320 error_setg(&err, "Received invalid message 0x%04x length 0x%04x", 2321 header_type, header_len); 2322 goto out; 2323 } 2324 2325 if ((rp_cmd_args[header_type].len != -1 && 2326 header_len != rp_cmd_args[header_type].len) || 2327 header_len > sizeof(buf)) { 2328 error_setg(&err, "Received '%s' message (0x%04x) with" 2329 "incorrect length %d expecting %zu", 2330 rp_cmd_args[header_type].name, header_type, header_len, 2331 (size_t)rp_cmd_args[header_type].len); 2332 goto out; 2333 } 2334 2335 /* We know we've got a valid header by this point */ 2336 res = qemu_get_buffer(rp, buf, header_len); 2337 if (res != header_len) { 2338 error_setg(&err, "Failed reading data for message 0x%04x" 2339 " read %d expected %d", 2340 header_type, res, header_len); 2341 goto out; 2342 } 2343 2344 /* OK, we have the message and the data */ 2345 switch (header_type) { 2346 case MIG_RP_MSG_SHUT: 2347 sibling_error = ldl_be_p(buf); 2348 trace_source_return_path_thread_shut(sibling_error); 2349 if (sibling_error) { 2350 error_setg(&err, "Sibling indicated error %d", sibling_error); 2351 } 2352 /* 2353 * We'll let the main thread deal with closing the RP 2354 * we could do a shutdown(2) on it, but we're the only user 2355 * anyway, so there's nothing gained. 2356 */ 2357 goto out; 2358 2359 case MIG_RP_MSG_PONG: 2360 tmp32 = ldl_be_p(buf); 2361 trace_source_return_path_thread_pong(tmp32); 2362 qemu_sem_post(&ms->rp_state.rp_pong_acks); 2363 break; 2364 2365 case MIG_RP_MSG_REQ_PAGES: 2366 start = ldq_be_p(buf); 2367 len = ldl_be_p(buf + 8); 2368 migrate_handle_rp_req_pages(ms, NULL, start, len, &err); 2369 if (err) { 2370 goto out; 2371 } 2372 break; 2373 2374 case MIG_RP_MSG_REQ_PAGES_ID: 2375 expected_len = 12 + 1; /* header + termination */ 2376 2377 if (header_len >= expected_len) { 2378 start = ldq_be_p(buf); 2379 len = ldl_be_p(buf + 8); 2380 /* Now we expect an idstr */ 2381 tmp32 = buf[12]; /* Length of the following idstr */ 2382 buf[13 + tmp32] = '\0'; 2383 expected_len += tmp32; 2384 } 2385 if (header_len != expected_len) { 2386 error_setg(&err, "Req_Page_id with length %d expecting %zd", 2387 header_len, expected_len); 2388 goto out; 2389 } 2390 migrate_handle_rp_req_pages(ms, (char *)&buf[13], start, len, 2391 &err); 2392 if (err) { 2393 goto out; 2394 } 2395 break; 2396 2397 case MIG_RP_MSG_RECV_BITMAP: 2398 if (header_len < 1) { 2399 error_setg(&err, "MIG_RP_MSG_RECV_BITMAP missing block name"); 2400 goto out; 2401 } 2402 /* Format: len (1B) + idstr (<255B). This ends the idstr. */ 2403 buf[buf[0] + 1] = '\0'; 2404 if (!migrate_handle_rp_recv_bitmap(ms, (char *)(buf + 1), &err)) { 2405 goto out; 2406 } 2407 break; 2408 2409 case MIG_RP_MSG_RESUME_ACK: 2410 tmp32 = ldl_be_p(buf); 2411 if (!migrate_handle_rp_resume_ack(ms, tmp32, &err)) { 2412 goto out; 2413 } 2414 break; 2415 2416 case MIG_RP_MSG_SWITCHOVER_ACK: 2417 ms->switchover_acked = true; 2418 trace_source_return_path_thread_switchover_acked(); 2419 break; 2420 2421 default: 2422 break; 2423 } 2424 } 2425 2426 out: 2427 if (err) { 2428 migrate_set_error(ms, err); 2429 error_free(err); 2430 trace_source_return_path_thread_bad_end(); 2431 } 2432 2433 if (ms->state == MIGRATION_STATUS_POSTCOPY_RECOVER) { 2434 /* 2435 * this will be extremely unlikely: that we got yet another network 2436 * issue during recovering of the 1st network failure.. during this 2437 * period the main migration thread can be waiting on rp_sem for 2438 * this thread to sync with the other side. 2439 * 2440 * When this happens, explicitly kick the migration thread out of 2441 * RECOVER stage and back to PAUSED, so the admin can try 2442 * everything again. 2443 */ 2444 migration_rp_kick(ms); 2445 } 2446 2447 trace_source_return_path_thread_end(); 2448 rcu_unregister_thread(); 2449 2450 return NULL; 2451 } 2452 2453 static int open_return_path_on_source(MigrationState *ms) 2454 { 2455 ms->rp_state.from_dst_file = qemu_file_get_return_path(ms->to_dst_file); 2456 if (!ms->rp_state.from_dst_file) { 2457 return -1; 2458 } 2459 2460 trace_open_return_path_on_source(); 2461 2462 qemu_thread_create(&ms->rp_state.rp_thread, "return path", 2463 source_return_path_thread, ms, QEMU_THREAD_JOINABLE); 2464 ms->rp_state.rp_thread_created = true; 2465 2466 trace_open_return_path_on_source_continue(); 2467 2468 return 0; 2469 } 2470 2471 /* Return true if error detected, or false otherwise */ 2472 static bool close_return_path_on_source(MigrationState *ms) 2473 { 2474 if (!ms->rp_state.rp_thread_created) { 2475 return false; 2476 } 2477 2478 trace_migration_return_path_end_before(); 2479 2480 /* 2481 * If this is a normal exit then the destination will send a SHUT 2482 * and the rp_thread will exit, however if there's an error we 2483 * need to cause it to exit. shutdown(2), if we have it, will 2484 * cause it to unblock if it's stuck waiting for the destination. 2485 */ 2486 WITH_QEMU_LOCK_GUARD(&ms->qemu_file_lock) { 2487 if (migrate_has_error(ms) && ms->rp_state.from_dst_file) { 2488 qemu_file_shutdown(ms->rp_state.from_dst_file); 2489 } 2490 } 2491 2492 qemu_thread_join(&ms->rp_state.rp_thread); 2493 ms->rp_state.rp_thread_created = false; 2494 migration_release_dst_files(ms); 2495 trace_migration_return_path_end_after(); 2496 2497 /* Return path will persist the error in MigrationState when quit */ 2498 return migrate_has_error(ms); 2499 } 2500 2501 static inline void 2502 migration_wait_main_channel(MigrationState *ms) 2503 { 2504 /* Wait until one PONG message received */ 2505 qemu_sem_wait(&ms->rp_state.rp_pong_acks); 2506 } 2507 2508 /* 2509 * Switch from normal iteration to postcopy 2510 * Returns non-0 on error 2511 */ 2512 static int postcopy_start(MigrationState *ms, Error **errp) 2513 { 2514 int ret; 2515 QIOChannelBuffer *bioc; 2516 QEMUFile *fb; 2517 uint64_t bandwidth = migrate_max_postcopy_bandwidth(); 2518 bool restart_block = false; 2519 int cur_state = MIGRATION_STATUS_ACTIVE; 2520 2521 if (migrate_postcopy_preempt()) { 2522 migration_wait_main_channel(ms); 2523 if (postcopy_preempt_establish_channel(ms)) { 2524 migrate_set_state(&ms->state, ms->state, MIGRATION_STATUS_FAILED); 2525 error_setg(errp, "%s: Failed to establish preempt channel", 2526 __func__); 2527 return -1; 2528 } 2529 } 2530 2531 if (!migrate_pause_before_switchover()) { 2532 migrate_set_state(&ms->state, MIGRATION_STATUS_ACTIVE, 2533 MIGRATION_STATUS_POSTCOPY_ACTIVE); 2534 } 2535 2536 trace_postcopy_start(); 2537 bql_lock(); 2538 trace_postcopy_start_set_run(); 2539 2540 ret = migration_stop_vm(ms, RUN_STATE_FINISH_MIGRATE); 2541 if (ret < 0) { 2542 error_setg_errno(errp, -ret, "%s: Failed to stop the VM", __func__); 2543 goto fail; 2544 } 2545 2546 ret = migration_maybe_pause(ms, &cur_state, 2547 MIGRATION_STATUS_POSTCOPY_ACTIVE); 2548 if (ret < 0) { 2549 error_setg_errno(errp, -ret, "%s: Failed in migration_maybe_pause()", 2550 __func__); 2551 goto fail; 2552 } 2553 2554 ret = bdrv_inactivate_all(); 2555 if (ret < 0) { 2556 error_setg_errno(errp, -ret, "%s: Failed in bdrv_inactivate_all()", 2557 __func__); 2558 goto fail; 2559 } 2560 restart_block = true; 2561 2562 /* 2563 * Cause any non-postcopiable, but iterative devices to 2564 * send out their final data. 2565 */ 2566 qemu_savevm_state_complete_precopy(ms->to_dst_file, true, false); 2567 2568 /* 2569 * in Finish migrate and with the io-lock held everything should 2570 * be quiet, but we've potentially still got dirty pages and we 2571 * need to tell the destination to throw any pages it's already received 2572 * that are dirty 2573 */ 2574 if (migrate_postcopy_ram()) { 2575 ram_postcopy_send_discard_bitmap(ms); 2576 } 2577 2578 /* 2579 * send rest of state - note things that are doing postcopy 2580 * will notice we're in POSTCOPY_ACTIVE and not actually 2581 * wrap their state up here 2582 */ 2583 migration_rate_set(bandwidth); 2584 if (migrate_postcopy_ram()) { 2585 /* Ping just for debugging, helps line traces up */ 2586 qemu_savevm_send_ping(ms->to_dst_file, 2); 2587 } 2588 2589 /* 2590 * While loading the device state we may trigger page transfer 2591 * requests and the fd must be free to process those, and thus 2592 * the destination must read the whole device state off the fd before 2593 * it starts processing it. Unfortunately the ad-hoc migration format 2594 * doesn't allow the destination to know the size to read without fully 2595 * parsing it through each devices load-state code (especially the open 2596 * coded devices that use get/put). 2597 * So we wrap the device state up in a package with a length at the start; 2598 * to do this we use a qemu_buf to hold the whole of the device state. 2599 */ 2600 bioc = qio_channel_buffer_new(4096); 2601 qio_channel_set_name(QIO_CHANNEL(bioc), "migration-postcopy-buffer"); 2602 fb = qemu_file_new_output(QIO_CHANNEL(bioc)); 2603 object_unref(OBJECT(bioc)); 2604 2605 /* 2606 * Make sure the receiver can get incoming pages before we send the rest 2607 * of the state 2608 */ 2609 qemu_savevm_send_postcopy_listen(fb); 2610 2611 qemu_savevm_state_complete_precopy(fb, false, false); 2612 if (migrate_postcopy_ram()) { 2613 qemu_savevm_send_ping(fb, 3); 2614 } 2615 2616 qemu_savevm_send_postcopy_run(fb); 2617 2618 /* <><> end of stuff going into the package */ 2619 2620 /* Last point of recovery; as soon as we send the package the destination 2621 * can open devices and potentially start running. 2622 * Lets just check again we've not got any errors. 2623 */ 2624 ret = qemu_file_get_error(ms->to_dst_file); 2625 if (ret) { 2626 error_setg(errp, "postcopy_start: Migration stream errored (pre package)"); 2627 goto fail_closefb; 2628 } 2629 2630 restart_block = false; 2631 2632 /* Now send that blob */ 2633 if (qemu_savevm_send_packaged(ms->to_dst_file, bioc->data, bioc->usage)) { 2634 error_setg(errp, "%s: Failed to send packaged data", __func__); 2635 goto fail_closefb; 2636 } 2637 qemu_fclose(fb); 2638 2639 /* Send a notify to give a chance for anything that needs to happen 2640 * at the transition to postcopy and after the device state; in particular 2641 * spice needs to trigger a transition now 2642 */ 2643 migration_call_notifiers(ms, MIG_EVENT_PRECOPY_DONE, NULL); 2644 2645 migration_downtime_end(ms); 2646 2647 bql_unlock(); 2648 2649 if (migrate_postcopy_ram()) { 2650 /* 2651 * Although this ping is just for debug, it could potentially be 2652 * used for getting a better measurement of downtime at the source. 2653 */ 2654 qemu_savevm_send_ping(ms->to_dst_file, 4); 2655 } 2656 2657 if (migrate_release_ram()) { 2658 ram_postcopy_migrated_memory_release(ms); 2659 } 2660 2661 ret = qemu_file_get_error(ms->to_dst_file); 2662 if (ret) { 2663 error_setg_errno(errp, -ret, "postcopy_start: Migration stream error"); 2664 bql_lock(); 2665 goto fail; 2666 } 2667 trace_postcopy_preempt_enabled(migrate_postcopy_preempt()); 2668 2669 return ret; 2670 2671 fail_closefb: 2672 qemu_fclose(fb); 2673 fail: 2674 migrate_set_state(&ms->state, MIGRATION_STATUS_POSTCOPY_ACTIVE, 2675 MIGRATION_STATUS_FAILED); 2676 if (restart_block) { 2677 /* A failure happened early enough that we know the destination hasn't 2678 * accessed block devices, so we're safe to recover. 2679 */ 2680 Error *local_err = NULL; 2681 2682 bdrv_activate_all(&local_err); 2683 if (local_err) { 2684 error_report_err(local_err); 2685 } 2686 } 2687 migration_call_notifiers(ms, MIG_EVENT_PRECOPY_FAILED, NULL); 2688 bql_unlock(); 2689 return -1; 2690 } 2691 2692 /** 2693 * migration_maybe_pause: Pause if required to by 2694 * migrate_pause_before_switchover called with the BQL locked 2695 * Returns: 0 on success 2696 */ 2697 static int migration_maybe_pause(MigrationState *s, 2698 int *current_active_state, 2699 int new_state) 2700 { 2701 if (!migrate_pause_before_switchover()) { 2702 return 0; 2703 } 2704 2705 /* Since leaving this state is not atomic with posting the semaphore 2706 * it's possible that someone could have issued multiple migrate_continue 2707 * and the semaphore is incorrectly positive at this point; 2708 * the docs say it's undefined to reinit a semaphore that's already 2709 * init'd, so use timedwait to eat up any existing posts. 2710 */ 2711 while (qemu_sem_timedwait(&s->pause_sem, 1) == 0) { 2712 /* This block intentionally left blank */ 2713 } 2714 2715 /* 2716 * If the migration is cancelled when it is in the completion phase, 2717 * the migration state is set to MIGRATION_STATUS_CANCELLING. 2718 * So we don't need to wait a semaphore, otherwise we would always 2719 * wait for the 'pause_sem' semaphore. 2720 */ 2721 if (s->state != MIGRATION_STATUS_CANCELLING) { 2722 bql_unlock(); 2723 migrate_set_state(&s->state, *current_active_state, 2724 MIGRATION_STATUS_PRE_SWITCHOVER); 2725 qemu_sem_wait(&s->pause_sem); 2726 migrate_set_state(&s->state, MIGRATION_STATUS_PRE_SWITCHOVER, 2727 new_state); 2728 *current_active_state = new_state; 2729 bql_lock(); 2730 } 2731 2732 return s->state == new_state ? 0 : -EINVAL; 2733 } 2734 2735 static int migration_completion_precopy(MigrationState *s, 2736 int *current_active_state) 2737 { 2738 int ret; 2739 2740 bql_lock(); 2741 2742 if (!migrate_mode_is_cpr(s)) { 2743 ret = migration_stop_vm(s, RUN_STATE_FINISH_MIGRATE); 2744 if (ret < 0) { 2745 goto out_unlock; 2746 } 2747 } 2748 2749 ret = migration_maybe_pause(s, current_active_state, 2750 MIGRATION_STATUS_DEVICE); 2751 if (ret < 0) { 2752 goto out_unlock; 2753 } 2754 2755 /* 2756 * Inactivate disks except in COLO, and track that we have done so in order 2757 * to remember to reactivate them if migration fails or is cancelled. 2758 */ 2759 s->block_inactive = !migrate_colo(); 2760 migration_rate_set(RATE_LIMIT_DISABLED); 2761 ret = qemu_savevm_state_complete_precopy(s->to_dst_file, false, 2762 s->block_inactive); 2763 out_unlock: 2764 bql_unlock(); 2765 return ret; 2766 } 2767 2768 static void migration_completion_postcopy(MigrationState *s) 2769 { 2770 trace_migration_completion_postcopy_end(); 2771 2772 bql_lock(); 2773 qemu_savevm_state_complete_postcopy(s->to_dst_file); 2774 bql_unlock(); 2775 2776 /* 2777 * Shutdown the postcopy fast path thread. This is only needed when dest 2778 * QEMU binary is old (7.1/7.2). QEMU 8.0+ doesn't need this. 2779 */ 2780 if (migrate_postcopy_preempt() && s->preempt_pre_7_2) { 2781 postcopy_preempt_shutdown_file(s); 2782 } 2783 2784 trace_migration_completion_postcopy_end_after_complete(); 2785 } 2786 2787 static void migration_completion_failed(MigrationState *s, 2788 int current_active_state) 2789 { 2790 if (s->block_inactive && (s->state == MIGRATION_STATUS_ACTIVE || 2791 s->state == MIGRATION_STATUS_DEVICE)) { 2792 /* 2793 * If not doing postcopy, vm_start() will be called: let's 2794 * regain control on images. 2795 */ 2796 Error *local_err = NULL; 2797 2798 bql_lock(); 2799 bdrv_activate_all(&local_err); 2800 if (local_err) { 2801 error_report_err(local_err); 2802 } else { 2803 s->block_inactive = false; 2804 } 2805 bql_unlock(); 2806 } 2807 2808 migrate_set_state(&s->state, current_active_state, 2809 MIGRATION_STATUS_FAILED); 2810 } 2811 2812 /** 2813 * migration_completion: Used by migration_thread when there's not much left. 2814 * The caller 'breaks' the loop when this returns. 2815 * 2816 * @s: Current migration state 2817 */ 2818 static void migration_completion(MigrationState *s) 2819 { 2820 int ret = 0; 2821 int current_active_state = s->state; 2822 Error *local_err = NULL; 2823 2824 if (s->state == MIGRATION_STATUS_ACTIVE) { 2825 ret = migration_completion_precopy(s, ¤t_active_state); 2826 } else if (s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE) { 2827 migration_completion_postcopy(s); 2828 } else { 2829 ret = -1; 2830 } 2831 2832 if (ret < 0) { 2833 goto fail; 2834 } 2835 2836 if (close_return_path_on_source(s)) { 2837 goto fail; 2838 } 2839 2840 if (qemu_file_get_error(s->to_dst_file)) { 2841 trace_migration_completion_file_err(); 2842 goto fail; 2843 } 2844 2845 if (migrate_colo() && s->state == MIGRATION_STATUS_ACTIVE) { 2846 /* COLO does not support postcopy */ 2847 migrate_set_state(&s->state, MIGRATION_STATUS_ACTIVE, 2848 MIGRATION_STATUS_COLO); 2849 } else { 2850 migration_completion_end(s); 2851 } 2852 2853 return; 2854 2855 fail: 2856 if (qemu_file_get_error_obj(s->to_dst_file, &local_err)) { 2857 migrate_set_error(s, local_err); 2858 error_free(local_err); 2859 } else if (ret) { 2860 error_setg_errno(&local_err, -ret, "Error in migration completion"); 2861 migrate_set_error(s, local_err); 2862 error_free(local_err); 2863 } 2864 2865 migration_completion_failed(s, current_active_state); 2866 } 2867 2868 /** 2869 * bg_migration_completion: Used by bg_migration_thread when after all the 2870 * RAM has been saved. The caller 'breaks' the loop when this returns. 2871 * 2872 * @s: Current migration state 2873 */ 2874 static void bg_migration_completion(MigrationState *s) 2875 { 2876 int current_active_state = s->state; 2877 2878 if (s->state == MIGRATION_STATUS_ACTIVE) { 2879 /* 2880 * By this moment we have RAM content saved into the migration stream. 2881 * The next step is to flush the non-RAM content (device state) 2882 * right after the ram content. The device state has been stored into 2883 * the temporary buffer before RAM saving started. 2884 */ 2885 qemu_put_buffer(s->to_dst_file, s->bioc->data, s->bioc->usage); 2886 qemu_fflush(s->to_dst_file); 2887 } else if (s->state == MIGRATION_STATUS_CANCELLING) { 2888 goto fail; 2889 } 2890 2891 if (qemu_file_get_error(s->to_dst_file)) { 2892 trace_migration_completion_file_err(); 2893 goto fail; 2894 } 2895 2896 migration_completion_end(s); 2897 return; 2898 2899 fail: 2900 migrate_set_state(&s->state, current_active_state, 2901 MIGRATION_STATUS_FAILED); 2902 } 2903 2904 typedef enum MigThrError { 2905 /* No error detected */ 2906 MIG_THR_ERR_NONE = 0, 2907 /* Detected error, but resumed successfully */ 2908 MIG_THR_ERR_RECOVERED = 1, 2909 /* Detected fatal error, need to exit */ 2910 MIG_THR_ERR_FATAL = 2, 2911 } MigThrError; 2912 2913 static int postcopy_resume_handshake(MigrationState *s) 2914 { 2915 qemu_savevm_send_postcopy_resume(s->to_dst_file); 2916 2917 while (s->state == MIGRATION_STATUS_POSTCOPY_RECOVER) { 2918 if (migration_rp_wait(s)) { 2919 return -1; 2920 } 2921 } 2922 2923 if (s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE) { 2924 return 0; 2925 } 2926 2927 return -1; 2928 } 2929 2930 /* Return zero if success, or <0 for error */ 2931 static int postcopy_do_resume(MigrationState *s) 2932 { 2933 int ret; 2934 2935 /* 2936 * Call all the resume_prepare() hooks, so that modules can be 2937 * ready for the migration resume. 2938 */ 2939 ret = qemu_savevm_state_resume_prepare(s); 2940 if (ret) { 2941 error_report("%s: resume_prepare() failure detected: %d", 2942 __func__, ret); 2943 return ret; 2944 } 2945 2946 /* 2947 * If preempt is enabled, re-establish the preempt channel. Note that 2948 * we do it after resume prepare to make sure the main channel will be 2949 * created before the preempt channel. E.g. with weak network, the 2950 * dest QEMU may get messed up with the preempt and main channels on 2951 * the order of connection setup. This guarantees the correct order. 2952 */ 2953 ret = postcopy_preempt_establish_channel(s); 2954 if (ret) { 2955 error_report("%s: postcopy_preempt_establish_channel(): %d", 2956 __func__, ret); 2957 return ret; 2958 } 2959 2960 /* 2961 * Last handshake with destination on the resume (destination will 2962 * switch to postcopy-active afterwards) 2963 */ 2964 ret = postcopy_resume_handshake(s); 2965 if (ret) { 2966 error_report("%s: handshake failed: %d", __func__, ret); 2967 return ret; 2968 } 2969 2970 return 0; 2971 } 2972 2973 /* 2974 * We don't return until we are in a safe state to continue current 2975 * postcopy migration. Returns MIG_THR_ERR_RECOVERED if recovered, or 2976 * MIG_THR_ERR_FATAL if unrecovery failure happened. 2977 */ 2978 static MigThrError postcopy_pause(MigrationState *s) 2979 { 2980 assert(s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE); 2981 2982 while (true) { 2983 QEMUFile *file; 2984 2985 /* 2986 * We're already pausing, so ignore any errors on the return 2987 * path and just wait for the thread to finish. It will be 2988 * re-created when we resume. 2989 */ 2990 close_return_path_on_source(s); 2991 2992 /* 2993 * Current channel is possibly broken. Release it. Note that this is 2994 * guaranteed even without lock because to_dst_file should only be 2995 * modified by the migration thread. That also guarantees that the 2996 * unregister of yank is safe too without the lock. It should be safe 2997 * even to be within the qemu_file_lock, but we didn't do that to avoid 2998 * taking more mutex (yank_lock) within qemu_file_lock. TL;DR: we make 2999 * the qemu_file_lock critical section as small as possible. 3000 */ 3001 assert(s->to_dst_file); 3002 migration_ioc_unregister_yank_from_file(s->to_dst_file); 3003 qemu_mutex_lock(&s->qemu_file_lock); 3004 file = s->to_dst_file; 3005 s->to_dst_file = NULL; 3006 qemu_mutex_unlock(&s->qemu_file_lock); 3007 3008 qemu_file_shutdown(file); 3009 qemu_fclose(file); 3010 3011 migrate_set_state(&s->state, s->state, 3012 MIGRATION_STATUS_POSTCOPY_PAUSED); 3013 3014 error_report("Detected IO failure for postcopy. " 3015 "Migration paused."); 3016 3017 /* 3018 * We wait until things fixed up. Then someone will setup the 3019 * status back for us. 3020 */ 3021 while (s->state == MIGRATION_STATUS_POSTCOPY_PAUSED) { 3022 qemu_sem_wait(&s->postcopy_pause_sem); 3023 } 3024 3025 if (s->state == MIGRATION_STATUS_POSTCOPY_RECOVER) { 3026 /* Woken up by a recover procedure. Give it a shot */ 3027 3028 /* Do the resume logic */ 3029 if (postcopy_do_resume(s) == 0) { 3030 /* Let's continue! */ 3031 trace_postcopy_pause_continued(); 3032 return MIG_THR_ERR_RECOVERED; 3033 } else { 3034 /* 3035 * Something wrong happened during the recovery, let's 3036 * pause again. Pause is always better than throwing 3037 * data away. 3038 */ 3039 continue; 3040 } 3041 } else { 3042 /* This is not right... Time to quit. */ 3043 return MIG_THR_ERR_FATAL; 3044 } 3045 } 3046 } 3047 3048 void migration_file_set_error(int err) 3049 { 3050 MigrationState *s = current_migration; 3051 3052 WITH_QEMU_LOCK_GUARD(&s->qemu_file_lock) { 3053 if (s->to_dst_file) { 3054 qemu_file_set_error(s->to_dst_file, err); 3055 } 3056 } 3057 } 3058 3059 static MigThrError migration_detect_error(MigrationState *s) 3060 { 3061 int ret; 3062 int state = s->state; 3063 Error *local_error = NULL; 3064 3065 if (state == MIGRATION_STATUS_CANCELLING || 3066 state == MIGRATION_STATUS_CANCELLED) { 3067 /* End the migration, but don't set the state to failed */ 3068 return MIG_THR_ERR_FATAL; 3069 } 3070 3071 /* 3072 * Try to detect any file errors. Note that postcopy_qemufile_src will 3073 * be NULL when postcopy preempt is not enabled. 3074 */ 3075 ret = qemu_file_get_error_obj_any(s->to_dst_file, 3076 s->postcopy_qemufile_src, 3077 &local_error); 3078 if (!ret) { 3079 /* Everything is fine */ 3080 assert(!local_error); 3081 return MIG_THR_ERR_NONE; 3082 } 3083 3084 if (local_error) { 3085 migrate_set_error(s, local_error); 3086 error_free(local_error); 3087 } 3088 3089 if (state == MIGRATION_STATUS_POSTCOPY_ACTIVE && ret) { 3090 /* 3091 * For postcopy, we allow the network to be down for a 3092 * while. After that, it can be continued by a 3093 * recovery phase. 3094 */ 3095 return postcopy_pause(s); 3096 } else { 3097 /* 3098 * For precopy (or postcopy with error outside IO), we fail 3099 * with no time. 3100 */ 3101 migrate_set_state(&s->state, state, MIGRATION_STATUS_FAILED); 3102 trace_migration_thread_file_err(); 3103 3104 /* Time to stop the migration, now. */ 3105 return MIG_THR_ERR_FATAL; 3106 } 3107 } 3108 3109 static void migration_completion_end(MigrationState *s) 3110 { 3111 uint64_t bytes = migration_transferred_bytes(); 3112 int64_t end_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); 3113 int64_t transfer_time; 3114 3115 /* 3116 * Take the BQL here so that query-migrate on the QMP thread sees: 3117 * - atomic update of s->total_time and s->mbps; 3118 * - correct ordering of s->mbps update vs. s->state; 3119 */ 3120 bql_lock(); 3121 migration_downtime_end(s); 3122 s->total_time = end_time - s->start_time; 3123 transfer_time = s->total_time - s->setup_time; 3124 if (transfer_time) { 3125 s->mbps = ((double) bytes * 8.0) / transfer_time / 1000; 3126 } 3127 3128 migrate_set_state(&s->state, s->state, 3129 MIGRATION_STATUS_COMPLETED); 3130 bql_unlock(); 3131 } 3132 3133 static void update_iteration_initial_status(MigrationState *s) 3134 { 3135 /* 3136 * Update these three fields at the same time to avoid mismatch info lead 3137 * wrong speed calculation. 3138 */ 3139 s->iteration_start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); 3140 s->iteration_initial_bytes = migration_transferred_bytes(); 3141 s->iteration_initial_pages = ram_get_total_transferred_pages(); 3142 } 3143 3144 static void migration_update_counters(MigrationState *s, 3145 int64_t current_time) 3146 { 3147 uint64_t transferred, transferred_pages, time_spent; 3148 uint64_t current_bytes; /* bytes transferred since the beginning */ 3149 uint64_t switchover_bw; 3150 /* Expected bandwidth when switching over to destination QEMU */ 3151 double expected_bw_per_ms; 3152 double bandwidth; 3153 3154 if (current_time < s->iteration_start_time + BUFFER_DELAY) { 3155 return; 3156 } 3157 3158 switchover_bw = migrate_avail_switchover_bandwidth(); 3159 current_bytes = migration_transferred_bytes(); 3160 transferred = current_bytes - s->iteration_initial_bytes; 3161 time_spent = current_time - s->iteration_start_time; 3162 bandwidth = (double)transferred / time_spent; 3163 3164 if (switchover_bw) { 3165 /* 3166 * If the user specified a switchover bandwidth, let's trust the 3167 * user so that can be more accurate than what we estimated. 3168 */ 3169 expected_bw_per_ms = switchover_bw / 1000; 3170 } else { 3171 /* If the user doesn't specify bandwidth, we use the estimated */ 3172 expected_bw_per_ms = bandwidth; 3173 } 3174 3175 s->threshold_size = expected_bw_per_ms * migrate_downtime_limit(); 3176 3177 s->mbps = (((double) transferred * 8.0) / 3178 ((double) time_spent / 1000.0)) / 1000.0 / 1000.0; 3179 3180 transferred_pages = ram_get_total_transferred_pages() - 3181 s->iteration_initial_pages; 3182 s->pages_per_second = (double) transferred_pages / 3183 (((double) time_spent / 1000.0)); 3184 3185 /* 3186 * if we haven't sent anything, we don't want to 3187 * recalculate. 10000 is a small enough number for our purposes 3188 */ 3189 if (stat64_get(&mig_stats.dirty_pages_rate) && 3190 transferred > 10000) { 3191 s->expected_downtime = 3192 stat64_get(&mig_stats.dirty_bytes_last_sync) / expected_bw_per_ms; 3193 } 3194 3195 migration_rate_reset(); 3196 3197 update_iteration_initial_status(s); 3198 3199 trace_migrate_transferred(transferred, time_spent, 3200 /* Both in unit bytes/ms */ 3201 bandwidth, switchover_bw / 1000, 3202 s->threshold_size); 3203 } 3204 3205 static bool migration_can_switchover(MigrationState *s) 3206 { 3207 if (!migrate_switchover_ack()) { 3208 return true; 3209 } 3210 3211 /* No reason to wait for switchover ACK if VM is stopped */ 3212 if (!runstate_is_running()) { 3213 return true; 3214 } 3215 3216 return s->switchover_acked; 3217 } 3218 3219 /* Migration thread iteration status */ 3220 typedef enum { 3221 MIG_ITERATE_RESUME, /* Resume current iteration */ 3222 MIG_ITERATE_SKIP, /* Skip current iteration */ 3223 MIG_ITERATE_BREAK, /* Break the loop */ 3224 } MigIterateState; 3225 3226 /* 3227 * Return true if continue to the next iteration directly, false 3228 * otherwise. 3229 */ 3230 static MigIterateState migration_iteration_run(MigrationState *s) 3231 { 3232 uint64_t must_precopy, can_postcopy, pending_size; 3233 Error *local_err = NULL; 3234 bool in_postcopy = s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE; 3235 bool can_switchover = migration_can_switchover(s); 3236 3237 qemu_savevm_state_pending_estimate(&must_precopy, &can_postcopy); 3238 pending_size = must_precopy + can_postcopy; 3239 trace_migrate_pending_estimate(pending_size, must_precopy, can_postcopy); 3240 3241 if (pending_size < s->threshold_size) { 3242 qemu_savevm_state_pending_exact(&must_precopy, &can_postcopy); 3243 pending_size = must_precopy + can_postcopy; 3244 trace_migrate_pending_exact(pending_size, must_precopy, can_postcopy); 3245 } 3246 3247 if ((!pending_size || pending_size < s->threshold_size) && can_switchover) { 3248 trace_migration_thread_low_pending(pending_size); 3249 migration_completion(s); 3250 return MIG_ITERATE_BREAK; 3251 } 3252 3253 /* Still a significant amount to transfer */ 3254 if (!in_postcopy && must_precopy <= s->threshold_size && can_switchover && 3255 qatomic_read(&s->start_postcopy)) { 3256 if (postcopy_start(s, &local_err)) { 3257 migrate_set_error(s, local_err); 3258 error_report_err(local_err); 3259 } 3260 return MIG_ITERATE_SKIP; 3261 } 3262 3263 /* Just another iteration step */ 3264 qemu_savevm_state_iterate(s->to_dst_file, in_postcopy); 3265 return MIG_ITERATE_RESUME; 3266 } 3267 3268 static void migration_iteration_finish(MigrationState *s) 3269 { 3270 /* If we enabled cpu throttling for auto-converge, turn it off. */ 3271 cpu_throttle_stop(); 3272 3273 bql_lock(); 3274 switch (s->state) { 3275 case MIGRATION_STATUS_COMPLETED: 3276 runstate_set(RUN_STATE_POSTMIGRATE); 3277 break; 3278 case MIGRATION_STATUS_COLO: 3279 assert(migrate_colo()); 3280 migrate_start_colo_process(s); 3281 s->vm_old_state = RUN_STATE_RUNNING; 3282 /* Fallthrough */ 3283 case MIGRATION_STATUS_FAILED: 3284 case MIGRATION_STATUS_CANCELLED: 3285 case MIGRATION_STATUS_CANCELLING: 3286 if (runstate_is_live(s->vm_old_state)) { 3287 if (!runstate_check(RUN_STATE_SHUTDOWN)) { 3288 vm_start(); 3289 } 3290 } else { 3291 if (runstate_check(RUN_STATE_FINISH_MIGRATE)) { 3292 runstate_set(s->vm_old_state); 3293 } 3294 } 3295 break; 3296 3297 default: 3298 /* Should not reach here, but if so, forgive the VM. */ 3299 error_report("%s: Unknown ending state %d", __func__, s->state); 3300 break; 3301 } 3302 3303 migration_bh_schedule(migrate_fd_cleanup_bh, s); 3304 bql_unlock(); 3305 } 3306 3307 static void bg_migration_iteration_finish(MigrationState *s) 3308 { 3309 /* 3310 * Stop tracking RAM writes - un-protect memory, un-register UFFD 3311 * memory ranges, flush kernel wait queues and wake up threads 3312 * waiting for write fault to be resolved. 3313 */ 3314 ram_write_tracking_stop(); 3315 3316 bql_lock(); 3317 switch (s->state) { 3318 case MIGRATION_STATUS_COMPLETED: 3319 case MIGRATION_STATUS_ACTIVE: 3320 case MIGRATION_STATUS_FAILED: 3321 case MIGRATION_STATUS_CANCELLED: 3322 case MIGRATION_STATUS_CANCELLING: 3323 break; 3324 3325 default: 3326 /* Should not reach here, but if so, forgive the VM. */ 3327 error_report("%s: Unknown ending state %d", __func__, s->state); 3328 break; 3329 } 3330 3331 migration_bh_schedule(migrate_fd_cleanup_bh, s); 3332 bql_unlock(); 3333 } 3334 3335 /* 3336 * Return true if continue to the next iteration directly, false 3337 * otherwise. 3338 */ 3339 static MigIterateState bg_migration_iteration_run(MigrationState *s) 3340 { 3341 int res; 3342 3343 res = qemu_savevm_state_iterate(s->to_dst_file, false); 3344 if (res > 0) { 3345 bg_migration_completion(s); 3346 return MIG_ITERATE_BREAK; 3347 } 3348 3349 return MIG_ITERATE_RESUME; 3350 } 3351 3352 void migration_make_urgent_request(void) 3353 { 3354 qemu_sem_post(&migrate_get_current()->rate_limit_sem); 3355 } 3356 3357 void migration_consume_urgent_request(void) 3358 { 3359 qemu_sem_wait(&migrate_get_current()->rate_limit_sem); 3360 } 3361 3362 /* Returns true if the rate limiting was broken by an urgent request */ 3363 bool migration_rate_limit(void) 3364 { 3365 int64_t now = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); 3366 MigrationState *s = migrate_get_current(); 3367 3368 bool urgent = false; 3369 migration_update_counters(s, now); 3370 if (migration_rate_exceeded(s->to_dst_file)) { 3371 3372 if (qemu_file_get_error(s->to_dst_file)) { 3373 return false; 3374 } 3375 /* 3376 * Wait for a delay to do rate limiting OR 3377 * something urgent to post the semaphore. 3378 */ 3379 int ms = s->iteration_start_time + BUFFER_DELAY - now; 3380 trace_migration_rate_limit_pre(ms); 3381 if (qemu_sem_timedwait(&s->rate_limit_sem, ms) == 0) { 3382 /* 3383 * We were woken by one or more urgent things but 3384 * the timedwait will have consumed one of them. 3385 * The service routine for the urgent wake will dec 3386 * the semaphore itself for each item it consumes, 3387 * so add this one we just eat back. 3388 */ 3389 qemu_sem_post(&s->rate_limit_sem); 3390 urgent = true; 3391 } 3392 trace_migration_rate_limit_post(urgent); 3393 } 3394 return urgent; 3395 } 3396 3397 /* 3398 * if failover devices are present, wait they are completely 3399 * unplugged 3400 */ 3401 3402 static void qemu_savevm_wait_unplug(MigrationState *s, int old_state, 3403 int new_state) 3404 { 3405 if (qemu_savevm_state_guest_unplug_pending()) { 3406 migrate_set_state(&s->state, old_state, MIGRATION_STATUS_WAIT_UNPLUG); 3407 3408 while (s->state == MIGRATION_STATUS_WAIT_UNPLUG && 3409 qemu_savevm_state_guest_unplug_pending()) { 3410 qemu_sem_timedwait(&s->wait_unplug_sem, 250); 3411 } 3412 if (s->state != MIGRATION_STATUS_WAIT_UNPLUG) { 3413 int timeout = 120; /* 30 seconds */ 3414 /* 3415 * migration has been canceled 3416 * but as we have started an unplug we must wait the end 3417 * to be able to plug back the card 3418 */ 3419 while (timeout-- && qemu_savevm_state_guest_unplug_pending()) { 3420 qemu_sem_timedwait(&s->wait_unplug_sem, 250); 3421 } 3422 if (qemu_savevm_state_guest_unplug_pending() && 3423 !qtest_enabled()) { 3424 warn_report("migration: partially unplugged device on " 3425 "failure"); 3426 } 3427 } 3428 3429 migrate_set_state(&s->state, MIGRATION_STATUS_WAIT_UNPLUG, new_state); 3430 } else { 3431 migrate_set_state(&s->state, old_state, new_state); 3432 } 3433 } 3434 3435 /* 3436 * Master migration thread on the source VM. 3437 * It drives the migration and pumps the data down the outgoing channel. 3438 */ 3439 static void *migration_thread(void *opaque) 3440 { 3441 MigrationState *s = opaque; 3442 MigrationThread *thread = NULL; 3443 int64_t setup_start = qemu_clock_get_ms(QEMU_CLOCK_HOST); 3444 MigThrError thr_error; 3445 bool urgent = false; 3446 Error *local_err = NULL; 3447 int ret; 3448 3449 thread = migration_threads_add("live_migration", qemu_get_thread_id()); 3450 3451 rcu_register_thread(); 3452 3453 object_ref(OBJECT(s)); 3454 update_iteration_initial_status(s); 3455 3456 if (!multifd_send_setup()) { 3457 goto out; 3458 } 3459 3460 bql_lock(); 3461 qemu_savevm_state_header(s->to_dst_file); 3462 bql_unlock(); 3463 3464 /* 3465 * If we opened the return path, we need to make sure dst has it 3466 * opened as well. 3467 */ 3468 if (s->rp_state.rp_thread_created) { 3469 /* Now tell the dest that it should open its end so it can reply */ 3470 qemu_savevm_send_open_return_path(s->to_dst_file); 3471 3472 /* And do a ping that will make stuff easier to debug */ 3473 qemu_savevm_send_ping(s->to_dst_file, 1); 3474 } 3475 3476 if (migrate_postcopy()) { 3477 /* 3478 * Tell the destination that we *might* want to do postcopy later; 3479 * if the other end can't do postcopy it should fail now, nice and 3480 * early. 3481 */ 3482 qemu_savevm_send_postcopy_advise(s->to_dst_file); 3483 } 3484 3485 if (migrate_colo()) { 3486 /* Notify migration destination that we enable COLO */ 3487 qemu_savevm_send_colo_enable(s->to_dst_file); 3488 } 3489 3490 bql_lock(); 3491 ret = qemu_savevm_state_setup(s->to_dst_file, &local_err); 3492 bql_unlock(); 3493 3494 qemu_savevm_wait_unplug(s, MIGRATION_STATUS_SETUP, 3495 MIGRATION_STATUS_ACTIVE); 3496 3497 /* 3498 * Handle SETUP failures after waiting for virtio-net-failover 3499 * devices to unplug. This to preserve migration state transitions. 3500 */ 3501 if (ret) { 3502 migrate_set_error(s, local_err); 3503 error_free(local_err); 3504 migrate_set_state(&s->state, MIGRATION_STATUS_ACTIVE, 3505 MIGRATION_STATUS_FAILED); 3506 goto out; 3507 } 3508 3509 s->setup_time = qemu_clock_get_ms(QEMU_CLOCK_HOST) - setup_start; 3510 3511 trace_migration_thread_setup_complete(); 3512 3513 while (migration_is_active()) { 3514 if (urgent || !migration_rate_exceeded(s->to_dst_file)) { 3515 MigIterateState iter_state = migration_iteration_run(s); 3516 if (iter_state == MIG_ITERATE_SKIP) { 3517 continue; 3518 } else if (iter_state == MIG_ITERATE_BREAK) { 3519 break; 3520 } 3521 } 3522 3523 /* 3524 * Try to detect any kind of failures, and see whether we 3525 * should stop the migration now. 3526 */ 3527 thr_error = migration_detect_error(s); 3528 if (thr_error == MIG_THR_ERR_FATAL) { 3529 /* Stop migration */ 3530 break; 3531 } else if (thr_error == MIG_THR_ERR_RECOVERED) { 3532 /* 3533 * Just recovered from a e.g. network failure, reset all 3534 * the local variables. This is important to avoid 3535 * breaking transferred_bytes and bandwidth calculation 3536 */ 3537 update_iteration_initial_status(s); 3538 } 3539 3540 urgent = migration_rate_limit(); 3541 } 3542 3543 out: 3544 trace_migration_thread_after_loop(); 3545 migration_iteration_finish(s); 3546 object_unref(OBJECT(s)); 3547 rcu_unregister_thread(); 3548 migration_threads_remove(thread); 3549 return NULL; 3550 } 3551 3552 static void bg_migration_vm_start_bh(void *opaque) 3553 { 3554 MigrationState *s = opaque; 3555 3556 vm_resume(s->vm_old_state); 3557 migration_downtime_end(s); 3558 } 3559 3560 /** 3561 * Background snapshot thread, based on live migration code. 3562 * This is an alternative implementation of live migration mechanism 3563 * introduced specifically to support background snapshots. 3564 * 3565 * It takes advantage of userfault_fd write protection mechanism introduced 3566 * in v5.7 kernel. Compared to existing dirty page logging migration much 3567 * lesser stream traffic is produced resulting in smaller snapshot images, 3568 * simply cause of no page duplicates can get into the stream. 3569 * 3570 * Another key point is that generated vmstate stream reflects machine state 3571 * 'frozen' at the beginning of snapshot creation compared to dirty page logging 3572 * mechanism, which effectively results in that saved snapshot is the state of VM 3573 * at the end of the process. 3574 */ 3575 static void *bg_migration_thread(void *opaque) 3576 { 3577 MigrationState *s = opaque; 3578 int64_t setup_start; 3579 MigThrError thr_error; 3580 QEMUFile *fb; 3581 bool early_fail = true; 3582 Error *local_err = NULL; 3583 int ret; 3584 3585 rcu_register_thread(); 3586 object_ref(OBJECT(s)); 3587 3588 migration_rate_set(RATE_LIMIT_DISABLED); 3589 3590 setup_start = qemu_clock_get_ms(QEMU_CLOCK_HOST); 3591 /* 3592 * We want to save vmstate for the moment when migration has been 3593 * initiated but also we want to save RAM content while VM is running. 3594 * The RAM content should appear first in the vmstate. So, we first 3595 * stash the non-RAM part of the vmstate to the temporary buffer, 3596 * then write RAM part of the vmstate to the migration stream 3597 * with vCPUs running and, finally, write stashed non-RAM part of 3598 * the vmstate from the buffer to the migration stream. 3599 */ 3600 s->bioc = qio_channel_buffer_new(512 * 1024); 3601 qio_channel_set_name(QIO_CHANNEL(s->bioc), "vmstate-buffer"); 3602 fb = qemu_file_new_output(QIO_CHANNEL(s->bioc)); 3603 object_unref(OBJECT(s->bioc)); 3604 3605 update_iteration_initial_status(s); 3606 3607 /* 3608 * Prepare for tracking memory writes with UFFD-WP - populate 3609 * RAM pages before protecting. 3610 */ 3611 #ifdef __linux__ 3612 ram_write_tracking_prepare(); 3613 #endif 3614 3615 bql_lock(); 3616 qemu_savevm_state_header(s->to_dst_file); 3617 ret = qemu_savevm_state_setup(s->to_dst_file, &local_err); 3618 bql_unlock(); 3619 3620 qemu_savevm_wait_unplug(s, MIGRATION_STATUS_SETUP, 3621 MIGRATION_STATUS_ACTIVE); 3622 3623 /* 3624 * Handle SETUP failures after waiting for virtio-net-failover 3625 * devices to unplug. This to preserve migration state transitions. 3626 */ 3627 if (ret) { 3628 migrate_set_error(s, local_err); 3629 error_free(local_err); 3630 migrate_set_state(&s->state, MIGRATION_STATUS_ACTIVE, 3631 MIGRATION_STATUS_FAILED); 3632 goto fail_setup; 3633 } 3634 3635 s->setup_time = qemu_clock_get_ms(QEMU_CLOCK_HOST) - setup_start; 3636 3637 trace_migration_thread_setup_complete(); 3638 3639 bql_lock(); 3640 3641 if (migration_stop_vm(s, RUN_STATE_PAUSED)) { 3642 goto fail; 3643 } 3644 /* 3645 * Put vCPUs in sync with shadow context structures, then 3646 * save their state to channel-buffer along with devices. 3647 */ 3648 cpu_synchronize_all_states(); 3649 if (qemu_savevm_state_complete_precopy_non_iterable(fb, false, false)) { 3650 goto fail; 3651 } 3652 /* 3653 * Since we are going to get non-iterable state data directly 3654 * from s->bioc->data, explicit flush is needed here. 3655 */ 3656 qemu_fflush(fb); 3657 3658 /* Now initialize UFFD context and start tracking RAM writes */ 3659 if (ram_write_tracking_start()) { 3660 goto fail; 3661 } 3662 early_fail = false; 3663 3664 /* 3665 * Start VM from BH handler to avoid write-fault lock here. 3666 * UFFD-WP protection for the whole RAM is already enabled so 3667 * calling VM state change notifiers from vm_start() would initiate 3668 * writes to virtio VQs memory which is in write-protected region. 3669 */ 3670 migration_bh_schedule(bg_migration_vm_start_bh, s); 3671 bql_unlock(); 3672 3673 while (migration_is_active()) { 3674 MigIterateState iter_state = bg_migration_iteration_run(s); 3675 if (iter_state == MIG_ITERATE_SKIP) { 3676 continue; 3677 } else if (iter_state == MIG_ITERATE_BREAK) { 3678 break; 3679 } 3680 3681 /* 3682 * Try to detect any kind of failures, and see whether we 3683 * should stop the migration now. 3684 */ 3685 thr_error = migration_detect_error(s); 3686 if (thr_error == MIG_THR_ERR_FATAL) { 3687 /* Stop migration */ 3688 break; 3689 } 3690 3691 migration_update_counters(s, qemu_clock_get_ms(QEMU_CLOCK_REALTIME)); 3692 } 3693 3694 trace_migration_thread_after_loop(); 3695 3696 fail: 3697 if (early_fail) { 3698 migrate_set_state(&s->state, MIGRATION_STATUS_ACTIVE, 3699 MIGRATION_STATUS_FAILED); 3700 bql_unlock(); 3701 } 3702 3703 fail_setup: 3704 bg_migration_iteration_finish(s); 3705 3706 qemu_fclose(fb); 3707 object_unref(OBJECT(s)); 3708 rcu_unregister_thread(); 3709 3710 return NULL; 3711 } 3712 3713 void migrate_fd_connect(MigrationState *s, Error *error_in) 3714 { 3715 Error *local_err = NULL; 3716 uint64_t rate_limit; 3717 bool resume = s->state == MIGRATION_STATUS_POSTCOPY_PAUSED; 3718 int ret; 3719 3720 /* 3721 * If there's a previous error, free it and prepare for another one. 3722 * Meanwhile if migration completes successfully, there won't have an error 3723 * dumped when calling migrate_fd_cleanup(). 3724 */ 3725 migrate_error_free(s); 3726 3727 s->expected_downtime = migrate_downtime_limit(); 3728 if (error_in) { 3729 migrate_fd_error(s, error_in); 3730 if (resume) { 3731 /* 3732 * Don't do cleanup for resume if channel is invalid, but only dump 3733 * the error. We wait for another channel connect from the user. 3734 * The error_report still gives HMP user a hint on what failed. 3735 * It's normally done in migrate_fd_cleanup(), but call it here 3736 * explicitly. 3737 */ 3738 error_report_err(error_copy(s->error)); 3739 } else { 3740 migrate_fd_cleanup(s); 3741 } 3742 return; 3743 } 3744 3745 if (resume) { 3746 /* This is a resumed migration */ 3747 rate_limit = migrate_max_postcopy_bandwidth(); 3748 } else { 3749 /* This is a fresh new migration */ 3750 rate_limit = migrate_max_bandwidth(); 3751 3752 /* Notify before starting migration thread */ 3753 if (migration_call_notifiers(s, MIG_EVENT_PRECOPY_SETUP, &local_err)) { 3754 goto fail; 3755 } 3756 } 3757 3758 migration_rate_set(rate_limit); 3759 qemu_file_set_blocking(s->to_dst_file, true); 3760 3761 /* 3762 * Open the return path. For postcopy, it is used exclusively. For 3763 * precopy, only if user specified "return-path" capability would 3764 * QEMU uses the return path. 3765 */ 3766 if (migrate_postcopy_ram() || migrate_return_path()) { 3767 if (open_return_path_on_source(s)) { 3768 error_setg(&local_err, "Unable to open return-path for postcopy"); 3769 goto fail; 3770 } 3771 } 3772 3773 /* 3774 * This needs to be done before resuming a postcopy. Note: for newer 3775 * QEMUs we will delay the channel creation until postcopy_start(), to 3776 * avoid disorder of channel creations. 3777 */ 3778 if (migrate_postcopy_preempt() && s->preempt_pre_7_2) { 3779 postcopy_preempt_setup(s); 3780 } 3781 3782 if (resume) { 3783 /* Wakeup the main migration thread to do the recovery */ 3784 migrate_set_state(&s->state, MIGRATION_STATUS_POSTCOPY_PAUSED, 3785 MIGRATION_STATUS_POSTCOPY_RECOVER); 3786 qemu_sem_post(&s->postcopy_pause_sem); 3787 return; 3788 } 3789 3790 if (migrate_mode_is_cpr(s)) { 3791 ret = migration_stop_vm(s, RUN_STATE_FINISH_MIGRATE); 3792 if (ret < 0) { 3793 error_setg(&local_err, "migration_stop_vm failed, error %d", -ret); 3794 goto fail; 3795 } 3796 } 3797 3798 if (migrate_background_snapshot()) { 3799 qemu_thread_create(&s->thread, "bg_snapshot", 3800 bg_migration_thread, s, QEMU_THREAD_JOINABLE); 3801 } else { 3802 qemu_thread_create(&s->thread, "live_migration", 3803 migration_thread, s, QEMU_THREAD_JOINABLE); 3804 } 3805 s->migration_thread_running = true; 3806 return; 3807 3808 fail: 3809 migrate_set_error(s, local_err); 3810 migrate_set_state(&s->state, s->state, MIGRATION_STATUS_FAILED); 3811 error_report_err(local_err); 3812 migrate_fd_cleanup(s); 3813 } 3814 3815 static void migration_class_init(ObjectClass *klass, void *data) 3816 { 3817 DeviceClass *dc = DEVICE_CLASS(klass); 3818 3819 dc->user_creatable = false; 3820 device_class_set_props(dc, migration_properties); 3821 } 3822 3823 static void migration_instance_finalize(Object *obj) 3824 { 3825 MigrationState *ms = MIGRATION_OBJ(obj); 3826 3827 qemu_mutex_destroy(&ms->error_mutex); 3828 qemu_mutex_destroy(&ms->qemu_file_lock); 3829 qemu_sem_destroy(&ms->wait_unplug_sem); 3830 qemu_sem_destroy(&ms->rate_limit_sem); 3831 qemu_sem_destroy(&ms->pause_sem); 3832 qemu_sem_destroy(&ms->postcopy_pause_sem); 3833 qemu_sem_destroy(&ms->rp_state.rp_sem); 3834 qemu_sem_destroy(&ms->rp_state.rp_pong_acks); 3835 qemu_sem_destroy(&ms->postcopy_qemufile_src_sem); 3836 error_free(ms->error); 3837 } 3838 3839 static void migration_instance_init(Object *obj) 3840 { 3841 MigrationState *ms = MIGRATION_OBJ(obj); 3842 3843 ms->state = MIGRATION_STATUS_NONE; 3844 ms->mbps = -1; 3845 ms->pages_per_second = -1; 3846 qemu_sem_init(&ms->pause_sem, 0); 3847 qemu_mutex_init(&ms->error_mutex); 3848 3849 migrate_params_init(&ms->parameters); 3850 3851 qemu_sem_init(&ms->postcopy_pause_sem, 0); 3852 qemu_sem_init(&ms->rp_state.rp_sem, 0); 3853 qemu_sem_init(&ms->rp_state.rp_pong_acks, 0); 3854 qemu_sem_init(&ms->rate_limit_sem, 0); 3855 qemu_sem_init(&ms->wait_unplug_sem, 0); 3856 qemu_sem_init(&ms->postcopy_qemufile_src_sem, 0); 3857 qemu_mutex_init(&ms->qemu_file_lock); 3858 } 3859 3860 /* 3861 * Return true if check pass, false otherwise. Error will be put 3862 * inside errp if provided. 3863 */ 3864 static bool migration_object_check(MigrationState *ms, Error **errp) 3865 { 3866 /* Assuming all off */ 3867 bool old_caps[MIGRATION_CAPABILITY__MAX] = { 0 }; 3868 3869 if (!migrate_params_check(&ms->parameters, errp)) { 3870 return false; 3871 } 3872 3873 return migrate_caps_check(old_caps, ms->capabilities, errp); 3874 } 3875 3876 static const TypeInfo migration_type = { 3877 .name = TYPE_MIGRATION, 3878 /* 3879 * NOTE: TYPE_MIGRATION is not really a device, as the object is 3880 * not created using qdev_new(), it is not attached to the qdev 3881 * device tree, and it is never realized. 3882 * 3883 * TODO: Make this TYPE_OBJECT once QOM provides something like 3884 * TYPE_DEVICE's "-global" properties. 3885 */ 3886 .parent = TYPE_DEVICE, 3887 .class_init = migration_class_init, 3888 .class_size = sizeof(MigrationClass), 3889 .instance_size = sizeof(MigrationState), 3890 .instance_init = migration_instance_init, 3891 .instance_finalize = migration_instance_finalize, 3892 }; 3893 3894 static void register_migration_types(void) 3895 { 3896 type_register_static(&migration_type); 3897 } 3898 3899 type_init(register_migration_types); 3900