1 /* 2 * QEMU live migration 3 * 4 * Copyright IBM, Corp. 2008 5 * 6 * Authors: 7 * Anthony Liguori <aliguori@us.ibm.com> 8 * 9 * This work is licensed under the terms of the GNU GPL, version 2. See 10 * the COPYING file in the top-level directory. 11 * 12 * Contributions after 2012-01-13 are licensed under the terms of the 13 * GNU GPL, version 2 or (at your option) any later version. 14 */ 15 16 #include "qemu/osdep.h" 17 #include "qemu/cutils.h" 18 #include "qemu/error-report.h" 19 #include "qemu/main-loop.h" 20 #include "migration/blocker.h" 21 #include "exec.h" 22 #include "fd.h" 23 #include "file.h" 24 #include "socket.h" 25 #include "sysemu/runstate.h" 26 #include "sysemu/sysemu.h" 27 #include "sysemu/cpu-throttle.h" 28 #include "rdma.h" 29 #include "ram.h" 30 #include "ram-compress.h" 31 #include "migration/global_state.h" 32 #include "migration/misc.h" 33 #include "migration.h" 34 #include "migration-stats.h" 35 #include "savevm.h" 36 #include "qemu-file.h" 37 #include "channel.h" 38 #include "migration/vmstate.h" 39 #include "block/block.h" 40 #include "qapi/error.h" 41 #include "qapi/clone-visitor.h" 42 #include "qapi/qapi-visit-migration.h" 43 #include "qapi/qapi-visit-sockets.h" 44 #include "qapi/qapi-commands-migration.h" 45 #include "qapi/qapi-events-migration.h" 46 #include "qapi/qmp/qerror.h" 47 #include "qapi/qmp/qnull.h" 48 #include "qemu/rcu.h" 49 #include "block.h" 50 #include "postcopy-ram.h" 51 #include "qemu/thread.h" 52 #include "trace.h" 53 #include "exec/target_page.h" 54 #include "io/channel-buffer.h" 55 #include "io/channel-tls.h" 56 #include "migration/colo.h" 57 #include "hw/boards.h" 58 #include "monitor/monitor.h" 59 #include "net/announce.h" 60 #include "qemu/queue.h" 61 #include "multifd.h" 62 #include "threadinfo.h" 63 #include "qemu/yank.h" 64 #include "sysemu/cpus.h" 65 #include "yank_functions.h" 66 #include "sysemu/qtest.h" 67 #include "options.h" 68 #include "sysemu/dirtylimit.h" 69 #include "qemu/sockets.h" 70 #include "sysemu/kvm.h" 71 72 #define NOTIFIER_ELEM_INIT(array, elem) \ 73 [elem] = NOTIFIER_WITH_RETURN_LIST_INITIALIZER((array)[elem]) 74 75 static NotifierWithReturnList migration_state_notifiers[] = { 76 NOTIFIER_ELEM_INIT(migration_state_notifiers, MIG_MODE_NORMAL), 77 NOTIFIER_ELEM_INIT(migration_state_notifiers, MIG_MODE_CPR_REBOOT), 78 }; 79 80 /* Messages sent on the return path from destination to source */ 81 enum mig_rp_message_type { 82 MIG_RP_MSG_INVALID = 0, /* Must be 0 */ 83 MIG_RP_MSG_SHUT, /* sibling will not send any more RP messages */ 84 MIG_RP_MSG_PONG, /* Response to a PING; data (seq: be32 ) */ 85 86 MIG_RP_MSG_REQ_PAGES_ID, /* data (start: be64, len: be32, id: string) */ 87 MIG_RP_MSG_REQ_PAGES, /* data (start: be64, len: be32) */ 88 MIG_RP_MSG_RECV_BITMAP, /* send recved_bitmap back to source */ 89 MIG_RP_MSG_RESUME_ACK, /* tell source that we are ready to resume */ 90 MIG_RP_MSG_SWITCHOVER_ACK, /* Tell source it's OK to do switchover */ 91 92 MIG_RP_MSG_MAX 93 }; 94 95 /* When we add fault tolerance, we could have several 96 migrations at once. For now we don't need to add 97 dynamic creation of migration */ 98 99 static MigrationState *current_migration; 100 static MigrationIncomingState *current_incoming; 101 102 static GSList *migration_blockers[MIG_MODE__MAX]; 103 104 static bool migration_object_check(MigrationState *ms, Error **errp); 105 static int migration_maybe_pause(MigrationState *s, 106 int *current_active_state, 107 int new_state); 108 static void migrate_fd_cancel(MigrationState *s); 109 static bool close_return_path_on_source(MigrationState *s); 110 111 static void migration_downtime_start(MigrationState *s) 112 { 113 trace_vmstate_downtime_checkpoint("src-downtime-start"); 114 s->downtime_start = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); 115 } 116 117 static void migration_downtime_end(MigrationState *s) 118 { 119 int64_t now = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); 120 121 /* 122 * If downtime already set, should mean that postcopy already set it, 123 * then that should be the real downtime already. 124 */ 125 if (!s->downtime) { 126 s->downtime = now - s->downtime_start; 127 } 128 129 trace_vmstate_downtime_checkpoint("src-downtime-end"); 130 } 131 132 static bool migration_needs_multiple_sockets(void) 133 { 134 return migrate_multifd() || migrate_postcopy_preempt(); 135 } 136 137 static bool transport_supports_multi_channels(MigrationAddress *addr) 138 { 139 if (addr->transport == MIGRATION_ADDRESS_TYPE_SOCKET) { 140 SocketAddress *saddr = &addr->u.socket; 141 142 return saddr->type == SOCKET_ADDRESS_TYPE_INET || 143 saddr->type == SOCKET_ADDRESS_TYPE_UNIX || 144 saddr->type == SOCKET_ADDRESS_TYPE_VSOCK; 145 } 146 147 return false; 148 } 149 150 static bool 151 migration_channels_and_transport_compatible(MigrationAddress *addr, 152 Error **errp) 153 { 154 if (migration_needs_multiple_sockets() && 155 !transport_supports_multi_channels(addr)) { 156 error_setg(errp, "Migration requires multi-channel URIs (e.g. tcp)"); 157 return false; 158 } 159 160 return true; 161 } 162 163 static gint page_request_addr_cmp(gconstpointer ap, gconstpointer bp) 164 { 165 uintptr_t a = (uintptr_t) ap, b = (uintptr_t) bp; 166 167 return (a > b) - (a < b); 168 } 169 170 static int migration_stop_vm(MigrationState *s, RunState state) 171 { 172 int ret; 173 174 migration_downtime_start(s); 175 176 s->vm_old_state = runstate_get(); 177 global_state_store(); 178 179 ret = vm_stop_force_state(state); 180 181 trace_vmstate_downtime_checkpoint("src-vm-stopped"); 182 trace_migration_completion_vm_stop(ret); 183 184 return ret; 185 } 186 187 void migration_object_init(void) 188 { 189 /* This can only be called once. */ 190 assert(!current_migration); 191 current_migration = MIGRATION_OBJ(object_new(TYPE_MIGRATION)); 192 193 /* 194 * Init the migrate incoming object as well no matter whether 195 * we'll use it or not. 196 */ 197 assert(!current_incoming); 198 current_incoming = g_new0(MigrationIncomingState, 1); 199 current_incoming->state = MIGRATION_STATUS_NONE; 200 current_incoming->postcopy_remote_fds = 201 g_array_new(FALSE, TRUE, sizeof(struct PostCopyFD)); 202 qemu_mutex_init(¤t_incoming->rp_mutex); 203 qemu_mutex_init(¤t_incoming->postcopy_prio_thread_mutex); 204 qemu_event_init(¤t_incoming->main_thread_load_event, false); 205 qemu_sem_init(¤t_incoming->postcopy_pause_sem_dst, 0); 206 qemu_sem_init(¤t_incoming->postcopy_pause_sem_fault, 0); 207 qemu_sem_init(¤t_incoming->postcopy_pause_sem_fast_load, 0); 208 qemu_sem_init(¤t_incoming->postcopy_qemufile_dst_done, 0); 209 210 qemu_mutex_init(¤t_incoming->page_request_mutex); 211 qemu_cond_init(¤t_incoming->page_request_cond); 212 current_incoming->page_requested = g_tree_new(page_request_addr_cmp); 213 214 migration_object_check(current_migration, &error_fatal); 215 216 blk_mig_init(); 217 ram_mig_init(); 218 dirty_bitmap_mig_init(); 219 } 220 221 typedef struct { 222 QEMUBH *bh; 223 QEMUBHFunc *cb; 224 void *opaque; 225 } MigrationBH; 226 227 static void migration_bh_dispatch_bh(void *opaque) 228 { 229 MigrationState *s = migrate_get_current(); 230 MigrationBH *migbh = opaque; 231 232 /* cleanup this BH */ 233 qemu_bh_delete(migbh->bh); 234 migbh->bh = NULL; 235 236 /* dispatch the other one */ 237 migbh->cb(migbh->opaque); 238 object_unref(OBJECT(s)); 239 240 g_free(migbh); 241 } 242 243 void migration_bh_schedule(QEMUBHFunc *cb, void *opaque) 244 { 245 MigrationState *s = migrate_get_current(); 246 MigrationBH *migbh = g_new0(MigrationBH, 1); 247 QEMUBH *bh = qemu_bh_new(migration_bh_dispatch_bh, migbh); 248 249 /* Store these to dispatch when the BH runs */ 250 migbh->bh = bh; 251 migbh->cb = cb; 252 migbh->opaque = opaque; 253 254 /* 255 * Ref the state for bh, because it may be called when 256 * there're already no other refs 257 */ 258 object_ref(OBJECT(s)); 259 qemu_bh_schedule(bh); 260 } 261 262 void migration_cancel(const Error *error) 263 { 264 if (error) { 265 migrate_set_error(current_migration, error); 266 } 267 if (migrate_dirty_limit()) { 268 qmp_cancel_vcpu_dirty_limit(false, -1, NULL); 269 } 270 migrate_fd_cancel(current_migration); 271 } 272 273 void migration_shutdown(void) 274 { 275 /* 276 * When the QEMU main thread exit, the COLO thread 277 * may wait a semaphore. So, we should wakeup the 278 * COLO thread before migration shutdown. 279 */ 280 colo_shutdown(); 281 /* 282 * Cancel the current migration - that will (eventually) 283 * stop the migration using this structure 284 */ 285 migration_cancel(NULL); 286 object_unref(OBJECT(current_migration)); 287 288 /* 289 * Cancel outgoing migration of dirty bitmaps. It should 290 * at least unref used block nodes. 291 */ 292 dirty_bitmap_mig_cancel_outgoing(); 293 294 /* 295 * Cancel incoming migration of dirty bitmaps. Dirty bitmaps 296 * are non-critical data, and their loss never considered as 297 * something serious. 298 */ 299 dirty_bitmap_mig_cancel_incoming(); 300 } 301 302 /* For outgoing */ 303 MigrationState *migrate_get_current(void) 304 { 305 /* This can only be called after the object created. */ 306 assert(current_migration); 307 return current_migration; 308 } 309 310 MigrationIncomingState *migration_incoming_get_current(void) 311 { 312 assert(current_incoming); 313 return current_incoming; 314 } 315 316 void migration_incoming_transport_cleanup(MigrationIncomingState *mis) 317 { 318 if (mis->socket_address_list) { 319 qapi_free_SocketAddressList(mis->socket_address_list); 320 mis->socket_address_list = NULL; 321 } 322 323 if (mis->transport_cleanup) { 324 mis->transport_cleanup(mis->transport_data); 325 mis->transport_data = mis->transport_cleanup = NULL; 326 } 327 } 328 329 void migration_incoming_state_destroy(void) 330 { 331 struct MigrationIncomingState *mis = migration_incoming_get_current(); 332 333 multifd_recv_cleanup(); 334 compress_threads_load_cleanup(); 335 336 if (mis->to_src_file) { 337 /* Tell source that we are done */ 338 migrate_send_rp_shut(mis, qemu_file_get_error(mis->from_src_file) != 0); 339 qemu_fclose(mis->to_src_file); 340 mis->to_src_file = NULL; 341 } 342 343 if (mis->from_src_file) { 344 migration_ioc_unregister_yank_from_file(mis->from_src_file); 345 qemu_fclose(mis->from_src_file); 346 mis->from_src_file = NULL; 347 } 348 if (mis->postcopy_remote_fds) { 349 g_array_free(mis->postcopy_remote_fds, TRUE); 350 mis->postcopy_remote_fds = NULL; 351 } 352 353 migration_incoming_transport_cleanup(mis); 354 qemu_event_reset(&mis->main_thread_load_event); 355 356 if (mis->page_requested) { 357 g_tree_destroy(mis->page_requested); 358 mis->page_requested = NULL; 359 } 360 361 if (mis->postcopy_qemufile_dst) { 362 migration_ioc_unregister_yank_from_file(mis->postcopy_qemufile_dst); 363 qemu_fclose(mis->postcopy_qemufile_dst); 364 mis->postcopy_qemufile_dst = NULL; 365 } 366 367 yank_unregister_instance(MIGRATION_YANK_INSTANCE); 368 } 369 370 static void migrate_generate_event(int new_state) 371 { 372 if (migrate_events()) { 373 qapi_event_send_migration(new_state); 374 } 375 } 376 377 /* 378 * Send a message on the return channel back to the source 379 * of the migration. 380 */ 381 static int migrate_send_rp_message(MigrationIncomingState *mis, 382 enum mig_rp_message_type message_type, 383 uint16_t len, void *data) 384 { 385 int ret = 0; 386 387 trace_migrate_send_rp_message((int)message_type, len); 388 QEMU_LOCK_GUARD(&mis->rp_mutex); 389 390 /* 391 * It's possible that the file handle got lost due to network 392 * failures. 393 */ 394 if (!mis->to_src_file) { 395 ret = -EIO; 396 return ret; 397 } 398 399 qemu_put_be16(mis->to_src_file, (unsigned int)message_type); 400 qemu_put_be16(mis->to_src_file, len); 401 qemu_put_buffer(mis->to_src_file, data, len); 402 return qemu_fflush(mis->to_src_file); 403 } 404 405 /* Request one page from the source VM at the given start address. 406 * rb: the RAMBlock to request the page in 407 * Start: Address offset within the RB 408 * Len: Length in bytes required - must be a multiple of pagesize 409 */ 410 int migrate_send_rp_message_req_pages(MigrationIncomingState *mis, 411 RAMBlock *rb, ram_addr_t start) 412 { 413 uint8_t bufc[12 + 1 + 255]; /* start (8), len (4), rbname up to 256 */ 414 size_t msglen = 12; /* start + len */ 415 size_t len = qemu_ram_pagesize(rb); 416 enum mig_rp_message_type msg_type; 417 const char *rbname; 418 int rbname_len; 419 420 *(uint64_t *)bufc = cpu_to_be64((uint64_t)start); 421 *(uint32_t *)(bufc + 8) = cpu_to_be32((uint32_t)len); 422 423 /* 424 * We maintain the last ramblock that we requested for page. Note that we 425 * don't need locking because this function will only be called within the 426 * postcopy ram fault thread. 427 */ 428 if (rb != mis->last_rb) { 429 mis->last_rb = rb; 430 431 rbname = qemu_ram_get_idstr(rb); 432 rbname_len = strlen(rbname); 433 434 assert(rbname_len < 256); 435 436 bufc[msglen++] = rbname_len; 437 memcpy(bufc + msglen, rbname, rbname_len); 438 msglen += rbname_len; 439 msg_type = MIG_RP_MSG_REQ_PAGES_ID; 440 } else { 441 msg_type = MIG_RP_MSG_REQ_PAGES; 442 } 443 444 return migrate_send_rp_message(mis, msg_type, msglen, bufc); 445 } 446 447 int migrate_send_rp_req_pages(MigrationIncomingState *mis, 448 RAMBlock *rb, ram_addr_t start, uint64_t haddr) 449 { 450 void *aligned = (void *)(uintptr_t)ROUND_DOWN(haddr, qemu_ram_pagesize(rb)); 451 bool received = false; 452 453 WITH_QEMU_LOCK_GUARD(&mis->page_request_mutex) { 454 received = ramblock_recv_bitmap_test_byte_offset(rb, start); 455 if (!received && !g_tree_lookup(mis->page_requested, aligned)) { 456 /* 457 * The page has not been received, and it's not yet in the page 458 * request list. Queue it. Set the value of element to 1, so that 459 * things like g_tree_lookup() will return TRUE (1) when found. 460 */ 461 g_tree_insert(mis->page_requested, aligned, (gpointer)1); 462 qatomic_inc(&mis->page_requested_count); 463 trace_postcopy_page_req_add(aligned, mis->page_requested_count); 464 } 465 } 466 467 /* 468 * If the page is there, skip sending the message. We don't even need the 469 * lock because as long as the page arrived, it'll be there forever. 470 */ 471 if (received) { 472 return 0; 473 } 474 475 return migrate_send_rp_message_req_pages(mis, rb, start); 476 } 477 478 static bool migration_colo_enabled; 479 bool migration_incoming_colo_enabled(void) 480 { 481 return migration_colo_enabled; 482 } 483 484 void migration_incoming_disable_colo(void) 485 { 486 ram_block_discard_disable(false); 487 migration_colo_enabled = false; 488 } 489 490 int migration_incoming_enable_colo(void) 491 { 492 #ifndef CONFIG_REPLICATION 493 error_report("ENABLE_COLO command come in migration stream, but COLO " 494 "module is not built in"); 495 return -ENOTSUP; 496 #endif 497 498 if (!migrate_colo()) { 499 error_report("ENABLE_COLO command come in migration stream, but c-colo " 500 "capability is not set"); 501 return -EINVAL; 502 } 503 504 if (ram_block_discard_disable(true)) { 505 error_report("COLO: cannot disable RAM discard"); 506 return -EBUSY; 507 } 508 migration_colo_enabled = true; 509 return 0; 510 } 511 512 void migrate_add_address(SocketAddress *address) 513 { 514 MigrationIncomingState *mis = migration_incoming_get_current(); 515 516 QAPI_LIST_PREPEND(mis->socket_address_list, 517 QAPI_CLONE(SocketAddress, address)); 518 } 519 520 bool migrate_uri_parse(const char *uri, MigrationChannel **channel, 521 Error **errp) 522 { 523 g_autoptr(MigrationChannel) val = g_new0(MigrationChannel, 1); 524 g_autoptr(MigrationAddress) addr = g_new0(MigrationAddress, 1); 525 InetSocketAddress *isock = &addr->u.rdma; 526 strList **tail = &addr->u.exec.args; 527 528 if (strstart(uri, "exec:", NULL)) { 529 addr->transport = MIGRATION_ADDRESS_TYPE_EXEC; 530 #ifdef WIN32 531 QAPI_LIST_APPEND(tail, g_strdup(exec_get_cmd_path())); 532 QAPI_LIST_APPEND(tail, g_strdup("/c")); 533 #else 534 QAPI_LIST_APPEND(tail, g_strdup("/bin/sh")); 535 QAPI_LIST_APPEND(tail, g_strdup("-c")); 536 #endif 537 QAPI_LIST_APPEND(tail, g_strdup(uri + strlen("exec:"))); 538 } else if (strstart(uri, "rdma:", NULL)) { 539 if (inet_parse(isock, uri + strlen("rdma:"), errp)) { 540 qapi_free_InetSocketAddress(isock); 541 return false; 542 } 543 addr->transport = MIGRATION_ADDRESS_TYPE_RDMA; 544 } else if (strstart(uri, "tcp:", NULL) || 545 strstart(uri, "unix:", NULL) || 546 strstart(uri, "vsock:", NULL) || 547 strstart(uri, "fd:", NULL)) { 548 addr->transport = MIGRATION_ADDRESS_TYPE_SOCKET; 549 SocketAddress *saddr = socket_parse(uri, errp); 550 if (!saddr) { 551 return false; 552 } 553 addr->u.socket.type = saddr->type; 554 addr->u.socket.u = saddr->u; 555 /* Don't free the objects inside; their ownership moved to "addr" */ 556 g_free(saddr); 557 } else if (strstart(uri, "file:", NULL)) { 558 addr->transport = MIGRATION_ADDRESS_TYPE_FILE; 559 addr->u.file.filename = g_strdup(uri + strlen("file:")); 560 if (file_parse_offset(addr->u.file.filename, &addr->u.file.offset, 561 errp)) { 562 return false; 563 } 564 } else { 565 error_setg(errp, "unknown migration protocol: %s", uri); 566 return false; 567 } 568 569 val->channel_type = MIGRATION_CHANNEL_TYPE_MAIN; 570 val->addr = g_steal_pointer(&addr); 571 *channel = g_steal_pointer(&val); 572 return true; 573 } 574 575 static void qemu_start_incoming_migration(const char *uri, bool has_channels, 576 MigrationChannelList *channels, 577 Error **errp) 578 { 579 g_autoptr(MigrationChannel) channel = NULL; 580 MigrationAddress *addr = NULL; 581 MigrationIncomingState *mis = migration_incoming_get_current(); 582 583 /* 584 * Having preliminary checks for uri and channel 585 */ 586 if (!uri == !channels) { 587 error_setg(errp, "need either 'uri' or 'channels' argument"); 588 return; 589 } 590 591 if (channels) { 592 /* To verify that Migrate channel list has only item */ 593 if (channels->next) { 594 error_setg(errp, "Channel list has more than one entries"); 595 return; 596 } 597 addr = channels->value->addr; 598 } 599 600 if (uri) { 601 /* caller uses the old URI syntax */ 602 if (!migrate_uri_parse(uri, &channel, errp)) { 603 return; 604 } 605 addr = channel->addr; 606 } 607 608 /* transport mechanism not suitable for migration? */ 609 if (!migration_channels_and_transport_compatible(addr, errp)) { 610 return; 611 } 612 613 migrate_set_state(&mis->state, MIGRATION_STATUS_NONE, 614 MIGRATION_STATUS_SETUP); 615 616 if (addr->transport == MIGRATION_ADDRESS_TYPE_SOCKET) { 617 SocketAddress *saddr = &addr->u.socket; 618 if (saddr->type == SOCKET_ADDRESS_TYPE_INET || 619 saddr->type == SOCKET_ADDRESS_TYPE_UNIX || 620 saddr->type == SOCKET_ADDRESS_TYPE_VSOCK) { 621 socket_start_incoming_migration(saddr, errp); 622 } else if (saddr->type == SOCKET_ADDRESS_TYPE_FD) { 623 fd_start_incoming_migration(saddr->u.fd.str, errp); 624 } 625 #ifdef CONFIG_RDMA 626 } else if (addr->transport == MIGRATION_ADDRESS_TYPE_RDMA) { 627 if (migrate_compress()) { 628 error_setg(errp, "RDMA and compression can't be used together"); 629 return; 630 } 631 if (migrate_xbzrle()) { 632 error_setg(errp, "RDMA and XBZRLE can't be used together"); 633 return; 634 } 635 if (migrate_multifd()) { 636 error_setg(errp, "RDMA and multifd can't be used together"); 637 return; 638 } 639 rdma_start_incoming_migration(&addr->u.rdma, errp); 640 #endif 641 } else if (addr->transport == MIGRATION_ADDRESS_TYPE_EXEC) { 642 exec_start_incoming_migration(addr->u.exec.args, errp); 643 } else if (addr->transport == MIGRATION_ADDRESS_TYPE_FILE) { 644 file_start_incoming_migration(&addr->u.file, errp); 645 } else { 646 error_setg(errp, "unknown migration protocol: %s", uri); 647 } 648 } 649 650 static void process_incoming_migration_bh(void *opaque) 651 { 652 Error *local_err = NULL; 653 MigrationIncomingState *mis = opaque; 654 655 trace_vmstate_downtime_checkpoint("dst-precopy-bh-enter"); 656 657 /* If capability late_block_activate is set: 658 * Only fire up the block code now if we're going to restart the 659 * VM, else 'cont' will do it. 660 * This causes file locking to happen; so we don't want it to happen 661 * unless we really are starting the VM. 662 */ 663 if (!migrate_late_block_activate() || 664 (autostart && (!global_state_received() || 665 runstate_is_live(global_state_get_runstate())))) { 666 /* Make sure all file formats throw away their mutable metadata. 667 * If we get an error here, just don't restart the VM yet. */ 668 bdrv_activate_all(&local_err); 669 if (local_err) { 670 error_report_err(local_err); 671 local_err = NULL; 672 autostart = false; 673 } 674 } 675 676 /* 677 * This must happen after all error conditions are dealt with and 678 * we're sure the VM is going to be running on this host. 679 */ 680 qemu_announce_self(&mis->announce_timer, migrate_announce_params()); 681 682 trace_vmstate_downtime_checkpoint("dst-precopy-bh-announced"); 683 684 multifd_recv_shutdown(); 685 686 dirty_bitmap_mig_before_vm_start(); 687 688 if (!global_state_received() || 689 runstate_is_live(global_state_get_runstate())) { 690 if (autostart) { 691 vm_start(); 692 } else { 693 runstate_set(RUN_STATE_PAUSED); 694 } 695 } else if (migration_incoming_colo_enabled()) { 696 migration_incoming_disable_colo(); 697 vm_start(); 698 } else { 699 runstate_set(global_state_get_runstate()); 700 } 701 trace_vmstate_downtime_checkpoint("dst-precopy-bh-vm-started"); 702 /* 703 * This must happen after any state changes since as soon as an external 704 * observer sees this event they might start to prod at the VM assuming 705 * it's ready to use. 706 */ 707 migrate_set_state(&mis->state, MIGRATION_STATUS_ACTIVE, 708 MIGRATION_STATUS_COMPLETED); 709 migration_incoming_state_destroy(); 710 } 711 712 static void coroutine_fn 713 process_incoming_migration_co(void *opaque) 714 { 715 MigrationIncomingState *mis = migration_incoming_get_current(); 716 PostcopyState ps; 717 int ret; 718 719 assert(mis->from_src_file); 720 721 if (compress_threads_load_setup(mis->from_src_file)) { 722 error_report("Failed to setup decompress threads"); 723 goto fail; 724 } 725 726 mis->largest_page_size = qemu_ram_pagesize_largest(); 727 postcopy_state_set(POSTCOPY_INCOMING_NONE); 728 migrate_set_state(&mis->state, MIGRATION_STATUS_SETUP, 729 MIGRATION_STATUS_ACTIVE); 730 731 mis->loadvm_co = qemu_coroutine_self(); 732 ret = qemu_loadvm_state(mis->from_src_file); 733 mis->loadvm_co = NULL; 734 735 trace_vmstate_downtime_checkpoint("dst-precopy-loadvm-completed"); 736 737 ps = postcopy_state_get(); 738 trace_process_incoming_migration_co_end(ret, ps); 739 if (ps != POSTCOPY_INCOMING_NONE) { 740 if (ps == POSTCOPY_INCOMING_ADVISE) { 741 /* 742 * Where a migration had postcopy enabled (and thus went to advise) 743 * but managed to complete within the precopy period, we can use 744 * the normal exit. 745 */ 746 postcopy_ram_incoming_cleanup(mis); 747 } else if (ret >= 0) { 748 /* 749 * Postcopy was started, cleanup should happen at the end of the 750 * postcopy thread. 751 */ 752 trace_process_incoming_migration_co_postcopy_end_main(); 753 return; 754 } 755 /* Else if something went wrong then just fall out of the normal exit */ 756 } 757 758 if (ret < 0) { 759 MigrationState *s = migrate_get_current(); 760 761 if (migrate_has_error(s)) { 762 WITH_QEMU_LOCK_GUARD(&s->error_mutex) { 763 error_report_err(s->error); 764 } 765 } 766 error_report("load of migration failed: %s", strerror(-ret)); 767 goto fail; 768 } 769 770 if (colo_incoming_co() < 0) { 771 goto fail; 772 } 773 774 migration_bh_schedule(process_incoming_migration_bh, mis); 775 return; 776 fail: 777 migrate_set_state(&mis->state, MIGRATION_STATUS_ACTIVE, 778 MIGRATION_STATUS_FAILED); 779 qemu_fclose(mis->from_src_file); 780 781 multifd_recv_cleanup(); 782 compress_threads_load_cleanup(); 783 784 exit(EXIT_FAILURE); 785 } 786 787 /** 788 * migration_incoming_setup: Setup incoming migration 789 * @f: file for main migration channel 790 */ 791 static void migration_incoming_setup(QEMUFile *f) 792 { 793 MigrationIncomingState *mis = migration_incoming_get_current(); 794 795 if (!mis->from_src_file) { 796 mis->from_src_file = f; 797 } 798 qemu_file_set_blocking(f, false); 799 } 800 801 void migration_incoming_process(void) 802 { 803 Coroutine *co = qemu_coroutine_create(process_incoming_migration_co, NULL); 804 qemu_coroutine_enter(co); 805 } 806 807 /* Returns true if recovered from a paused migration, otherwise false */ 808 static bool postcopy_try_recover(void) 809 { 810 MigrationIncomingState *mis = migration_incoming_get_current(); 811 812 if (mis->state == MIGRATION_STATUS_POSTCOPY_PAUSED) { 813 /* Resumed from a paused postcopy migration */ 814 815 /* This should be set already in migration_incoming_setup() */ 816 assert(mis->from_src_file); 817 /* Postcopy has standalone thread to do vm load */ 818 qemu_file_set_blocking(mis->from_src_file, true); 819 820 /* Re-configure the return path */ 821 mis->to_src_file = qemu_file_get_return_path(mis->from_src_file); 822 823 migrate_set_state(&mis->state, MIGRATION_STATUS_POSTCOPY_PAUSED, 824 MIGRATION_STATUS_POSTCOPY_RECOVER); 825 826 /* 827 * Here, we only wake up the main loading thread (while the 828 * rest threads will still be waiting), so that we can receive 829 * commands from source now, and answer it if needed. The 830 * rest threads will be woken up afterwards until we are sure 831 * that source is ready to reply to page requests. 832 */ 833 qemu_sem_post(&mis->postcopy_pause_sem_dst); 834 return true; 835 } 836 837 return false; 838 } 839 840 void migration_fd_process_incoming(QEMUFile *f) 841 { 842 migration_incoming_setup(f); 843 if (postcopy_try_recover()) { 844 return; 845 } 846 migration_incoming_process(); 847 } 848 849 /* 850 * Returns true when we want to start a new incoming migration process, 851 * false otherwise. 852 */ 853 static bool migration_should_start_incoming(bool main_channel) 854 { 855 /* Multifd doesn't start unless all channels are established */ 856 if (migrate_multifd()) { 857 return migration_has_all_channels(); 858 } 859 860 /* Preempt channel only starts when the main channel is created */ 861 if (migrate_postcopy_preempt()) { 862 return main_channel; 863 } 864 865 /* 866 * For all the rest types of migration, we should only reach here when 867 * it's the main channel that's being created, and we should always 868 * proceed with this channel. 869 */ 870 assert(main_channel); 871 return true; 872 } 873 874 void migration_ioc_process_incoming(QIOChannel *ioc, Error **errp) 875 { 876 MigrationIncomingState *mis = migration_incoming_get_current(); 877 Error *local_err = NULL; 878 QEMUFile *f; 879 bool default_channel = true; 880 uint32_t channel_magic = 0; 881 int ret = 0; 882 883 if (migrate_multifd() && !migrate_postcopy_ram() && 884 qio_channel_has_feature(ioc, QIO_CHANNEL_FEATURE_READ_MSG_PEEK)) { 885 /* 886 * With multiple channels, it is possible that we receive channels 887 * out of order on destination side, causing incorrect mapping of 888 * source channels on destination side. Check channel MAGIC to 889 * decide type of channel. Please note this is best effort, postcopy 890 * preempt channel does not send any magic number so avoid it for 891 * postcopy live migration. Also tls live migration already does 892 * tls handshake while initializing main channel so with tls this 893 * issue is not possible. 894 */ 895 ret = migration_channel_read_peek(ioc, (void *)&channel_magic, 896 sizeof(channel_magic), errp); 897 898 if (ret != 0) { 899 return; 900 } 901 902 default_channel = (channel_magic == cpu_to_be32(QEMU_VM_FILE_MAGIC)); 903 } else { 904 default_channel = !mis->from_src_file; 905 } 906 907 if (multifd_recv_setup(errp) != 0) { 908 return; 909 } 910 911 if (default_channel) { 912 f = qemu_file_new_input(ioc); 913 migration_incoming_setup(f); 914 } else { 915 /* Multiple connections */ 916 assert(migration_needs_multiple_sockets()); 917 if (migrate_multifd()) { 918 multifd_recv_new_channel(ioc, &local_err); 919 } else { 920 assert(migrate_postcopy_preempt()); 921 f = qemu_file_new_input(ioc); 922 postcopy_preempt_new_channel(mis, f); 923 } 924 if (local_err) { 925 error_propagate(errp, local_err); 926 return; 927 } 928 } 929 930 if (migration_should_start_incoming(default_channel)) { 931 /* If it's a recovery, we're done */ 932 if (postcopy_try_recover()) { 933 return; 934 } 935 migration_incoming_process(); 936 } 937 } 938 939 /** 940 * @migration_has_all_channels: We have received all channels that we need 941 * 942 * Returns true when we have got connections to all the channels that 943 * we need for migration. 944 */ 945 bool migration_has_all_channels(void) 946 { 947 MigrationIncomingState *mis = migration_incoming_get_current(); 948 949 if (!mis->from_src_file) { 950 return false; 951 } 952 953 if (migrate_multifd()) { 954 return multifd_recv_all_channels_created(); 955 } 956 957 if (migrate_postcopy_preempt()) { 958 return mis->postcopy_qemufile_dst != NULL; 959 } 960 961 return true; 962 } 963 964 int migrate_send_rp_switchover_ack(MigrationIncomingState *mis) 965 { 966 return migrate_send_rp_message(mis, MIG_RP_MSG_SWITCHOVER_ACK, 0, NULL); 967 } 968 969 /* 970 * Send a 'SHUT' message on the return channel with the given value 971 * to indicate that we've finished with the RP. Non-0 value indicates 972 * error. 973 */ 974 void migrate_send_rp_shut(MigrationIncomingState *mis, 975 uint32_t value) 976 { 977 uint32_t buf; 978 979 buf = cpu_to_be32(value); 980 migrate_send_rp_message(mis, MIG_RP_MSG_SHUT, sizeof(buf), &buf); 981 } 982 983 /* 984 * Send a 'PONG' message on the return channel with the given value 985 * (normally in response to a 'PING') 986 */ 987 void migrate_send_rp_pong(MigrationIncomingState *mis, 988 uint32_t value) 989 { 990 uint32_t buf; 991 992 buf = cpu_to_be32(value); 993 migrate_send_rp_message(mis, MIG_RP_MSG_PONG, sizeof(buf), &buf); 994 } 995 996 void migrate_send_rp_recv_bitmap(MigrationIncomingState *mis, 997 char *block_name) 998 { 999 char buf[512]; 1000 int len; 1001 int64_t res; 1002 1003 /* 1004 * First, we send the header part. It contains only the len of 1005 * idstr, and the idstr itself. 1006 */ 1007 len = strlen(block_name); 1008 buf[0] = len; 1009 memcpy(buf + 1, block_name, len); 1010 1011 if (mis->state != MIGRATION_STATUS_POSTCOPY_RECOVER) { 1012 error_report("%s: MSG_RP_RECV_BITMAP only used for recovery", 1013 __func__); 1014 return; 1015 } 1016 1017 migrate_send_rp_message(mis, MIG_RP_MSG_RECV_BITMAP, len + 1, buf); 1018 1019 /* 1020 * Next, we dump the received bitmap to the stream. 1021 * 1022 * TODO: currently we are safe since we are the only one that is 1023 * using the to_src_file handle (fault thread is still paused), 1024 * and it's ok even not taking the mutex. However the best way is 1025 * to take the lock before sending the message header, and release 1026 * the lock after sending the bitmap. 1027 */ 1028 qemu_mutex_lock(&mis->rp_mutex); 1029 res = ramblock_recv_bitmap_send(mis->to_src_file, block_name); 1030 qemu_mutex_unlock(&mis->rp_mutex); 1031 1032 trace_migrate_send_rp_recv_bitmap(block_name, res); 1033 } 1034 1035 void migrate_send_rp_resume_ack(MigrationIncomingState *mis, uint32_t value) 1036 { 1037 uint32_t buf; 1038 1039 buf = cpu_to_be32(value); 1040 migrate_send_rp_message(mis, MIG_RP_MSG_RESUME_ACK, sizeof(buf), &buf); 1041 } 1042 1043 /* 1044 * Return true if we're already in the middle of a migration 1045 * (i.e. any of the active or setup states) 1046 */ 1047 bool migration_is_setup_or_active(int state) 1048 { 1049 switch (state) { 1050 case MIGRATION_STATUS_ACTIVE: 1051 case MIGRATION_STATUS_POSTCOPY_ACTIVE: 1052 case MIGRATION_STATUS_POSTCOPY_PAUSED: 1053 case MIGRATION_STATUS_POSTCOPY_RECOVER: 1054 case MIGRATION_STATUS_SETUP: 1055 case MIGRATION_STATUS_PRE_SWITCHOVER: 1056 case MIGRATION_STATUS_DEVICE: 1057 case MIGRATION_STATUS_WAIT_UNPLUG: 1058 case MIGRATION_STATUS_COLO: 1059 return true; 1060 1061 default: 1062 return false; 1063 1064 } 1065 } 1066 1067 bool migration_is_running(int state) 1068 { 1069 switch (state) { 1070 case MIGRATION_STATUS_ACTIVE: 1071 case MIGRATION_STATUS_POSTCOPY_ACTIVE: 1072 case MIGRATION_STATUS_POSTCOPY_PAUSED: 1073 case MIGRATION_STATUS_POSTCOPY_RECOVER: 1074 case MIGRATION_STATUS_SETUP: 1075 case MIGRATION_STATUS_PRE_SWITCHOVER: 1076 case MIGRATION_STATUS_DEVICE: 1077 case MIGRATION_STATUS_WAIT_UNPLUG: 1078 case MIGRATION_STATUS_CANCELLING: 1079 return true; 1080 1081 default: 1082 return false; 1083 1084 } 1085 } 1086 1087 static bool migrate_show_downtime(MigrationState *s) 1088 { 1089 return (s->state == MIGRATION_STATUS_COMPLETED) || migration_in_postcopy(); 1090 } 1091 1092 static void populate_time_info(MigrationInfo *info, MigrationState *s) 1093 { 1094 info->has_status = true; 1095 info->has_setup_time = true; 1096 info->setup_time = s->setup_time; 1097 1098 if (s->state == MIGRATION_STATUS_COMPLETED) { 1099 info->has_total_time = true; 1100 info->total_time = s->total_time; 1101 } else { 1102 info->has_total_time = true; 1103 info->total_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME) - 1104 s->start_time; 1105 } 1106 1107 if (migrate_show_downtime(s)) { 1108 info->has_downtime = true; 1109 info->downtime = s->downtime; 1110 } else { 1111 info->has_expected_downtime = true; 1112 info->expected_downtime = s->expected_downtime; 1113 } 1114 } 1115 1116 static void populate_ram_info(MigrationInfo *info, MigrationState *s) 1117 { 1118 size_t page_size = qemu_target_page_size(); 1119 1120 info->ram = g_malloc0(sizeof(*info->ram)); 1121 info->ram->transferred = migration_transferred_bytes(); 1122 info->ram->total = ram_bytes_total(); 1123 info->ram->duplicate = stat64_get(&mig_stats.zero_pages); 1124 /* legacy value. It is not used anymore */ 1125 info->ram->skipped = 0; 1126 info->ram->normal = stat64_get(&mig_stats.normal_pages); 1127 info->ram->normal_bytes = info->ram->normal * page_size; 1128 info->ram->mbps = s->mbps; 1129 info->ram->dirty_sync_count = 1130 stat64_get(&mig_stats.dirty_sync_count); 1131 info->ram->dirty_sync_missed_zero_copy = 1132 stat64_get(&mig_stats.dirty_sync_missed_zero_copy); 1133 info->ram->postcopy_requests = 1134 stat64_get(&mig_stats.postcopy_requests); 1135 info->ram->page_size = page_size; 1136 info->ram->multifd_bytes = stat64_get(&mig_stats.multifd_bytes); 1137 info->ram->pages_per_second = s->pages_per_second; 1138 info->ram->precopy_bytes = stat64_get(&mig_stats.precopy_bytes); 1139 info->ram->downtime_bytes = stat64_get(&mig_stats.downtime_bytes); 1140 info->ram->postcopy_bytes = stat64_get(&mig_stats.postcopy_bytes); 1141 1142 if (migrate_xbzrle()) { 1143 info->xbzrle_cache = g_malloc0(sizeof(*info->xbzrle_cache)); 1144 info->xbzrle_cache->cache_size = migrate_xbzrle_cache_size(); 1145 info->xbzrle_cache->bytes = xbzrle_counters.bytes; 1146 info->xbzrle_cache->pages = xbzrle_counters.pages; 1147 info->xbzrle_cache->cache_miss = xbzrle_counters.cache_miss; 1148 info->xbzrle_cache->cache_miss_rate = xbzrle_counters.cache_miss_rate; 1149 info->xbzrle_cache->encoding_rate = xbzrle_counters.encoding_rate; 1150 info->xbzrle_cache->overflow = xbzrle_counters.overflow; 1151 } 1152 1153 populate_compress(info); 1154 1155 if (cpu_throttle_active()) { 1156 info->has_cpu_throttle_percentage = true; 1157 info->cpu_throttle_percentage = cpu_throttle_get_percentage(); 1158 } 1159 1160 if (s->state != MIGRATION_STATUS_COMPLETED) { 1161 info->ram->remaining = ram_bytes_remaining(); 1162 info->ram->dirty_pages_rate = 1163 stat64_get(&mig_stats.dirty_pages_rate); 1164 } 1165 1166 if (migrate_dirty_limit() && dirtylimit_in_service()) { 1167 info->has_dirty_limit_throttle_time_per_round = true; 1168 info->dirty_limit_throttle_time_per_round = 1169 dirtylimit_throttle_time_per_round(); 1170 1171 info->has_dirty_limit_ring_full_time = true; 1172 info->dirty_limit_ring_full_time = dirtylimit_ring_full_time(); 1173 } 1174 } 1175 1176 static void populate_disk_info(MigrationInfo *info) 1177 { 1178 if (blk_mig_active()) { 1179 info->disk = g_malloc0(sizeof(*info->disk)); 1180 info->disk->transferred = blk_mig_bytes_transferred(); 1181 info->disk->remaining = blk_mig_bytes_remaining(); 1182 info->disk->total = blk_mig_bytes_total(); 1183 } 1184 } 1185 1186 static void fill_source_migration_info(MigrationInfo *info) 1187 { 1188 MigrationState *s = migrate_get_current(); 1189 int state = qatomic_read(&s->state); 1190 GSList *cur_blocker = migration_blockers[migrate_mode()]; 1191 1192 info->blocked_reasons = NULL; 1193 1194 /* 1195 * There are two types of reasons a migration might be blocked; 1196 * a) devices marked in VMState as non-migratable, and 1197 * b) Explicit migration blockers 1198 * We need to add both of them here. 1199 */ 1200 qemu_savevm_non_migratable_list(&info->blocked_reasons); 1201 1202 while (cur_blocker) { 1203 QAPI_LIST_PREPEND(info->blocked_reasons, 1204 g_strdup(error_get_pretty(cur_blocker->data))); 1205 cur_blocker = g_slist_next(cur_blocker); 1206 } 1207 info->has_blocked_reasons = info->blocked_reasons != NULL; 1208 1209 switch (state) { 1210 case MIGRATION_STATUS_NONE: 1211 /* no migration has happened ever */ 1212 /* do not overwrite destination migration status */ 1213 return; 1214 case MIGRATION_STATUS_SETUP: 1215 info->has_status = true; 1216 info->has_total_time = false; 1217 break; 1218 case MIGRATION_STATUS_ACTIVE: 1219 case MIGRATION_STATUS_CANCELLING: 1220 case MIGRATION_STATUS_POSTCOPY_ACTIVE: 1221 case MIGRATION_STATUS_PRE_SWITCHOVER: 1222 case MIGRATION_STATUS_DEVICE: 1223 case MIGRATION_STATUS_POSTCOPY_PAUSED: 1224 case MIGRATION_STATUS_POSTCOPY_RECOVER: 1225 /* TODO add some postcopy stats */ 1226 populate_time_info(info, s); 1227 populate_ram_info(info, s); 1228 populate_disk_info(info); 1229 migration_populate_vfio_info(info); 1230 break; 1231 case MIGRATION_STATUS_COLO: 1232 info->has_status = true; 1233 /* TODO: display COLO specific information (checkpoint info etc.) */ 1234 break; 1235 case MIGRATION_STATUS_COMPLETED: 1236 populate_time_info(info, s); 1237 populate_ram_info(info, s); 1238 migration_populate_vfio_info(info); 1239 break; 1240 case MIGRATION_STATUS_FAILED: 1241 info->has_status = true; 1242 break; 1243 case MIGRATION_STATUS_CANCELLED: 1244 info->has_status = true; 1245 break; 1246 case MIGRATION_STATUS_WAIT_UNPLUG: 1247 info->has_status = true; 1248 break; 1249 } 1250 info->status = state; 1251 1252 QEMU_LOCK_GUARD(&s->error_mutex); 1253 if (s->error) { 1254 info->error_desc = g_strdup(error_get_pretty(s->error)); 1255 } 1256 } 1257 1258 static void fill_destination_migration_info(MigrationInfo *info) 1259 { 1260 MigrationIncomingState *mis = migration_incoming_get_current(); 1261 1262 if (mis->socket_address_list) { 1263 info->has_socket_address = true; 1264 info->socket_address = 1265 QAPI_CLONE(SocketAddressList, mis->socket_address_list); 1266 } 1267 1268 switch (mis->state) { 1269 case MIGRATION_STATUS_NONE: 1270 return; 1271 case MIGRATION_STATUS_SETUP: 1272 case MIGRATION_STATUS_CANCELLING: 1273 case MIGRATION_STATUS_CANCELLED: 1274 case MIGRATION_STATUS_ACTIVE: 1275 case MIGRATION_STATUS_POSTCOPY_ACTIVE: 1276 case MIGRATION_STATUS_POSTCOPY_PAUSED: 1277 case MIGRATION_STATUS_POSTCOPY_RECOVER: 1278 case MIGRATION_STATUS_FAILED: 1279 case MIGRATION_STATUS_COLO: 1280 info->has_status = true; 1281 break; 1282 case MIGRATION_STATUS_COMPLETED: 1283 info->has_status = true; 1284 fill_destination_postcopy_migration_info(info); 1285 break; 1286 } 1287 info->status = mis->state; 1288 } 1289 1290 MigrationInfo *qmp_query_migrate(Error **errp) 1291 { 1292 MigrationInfo *info = g_malloc0(sizeof(*info)); 1293 1294 fill_destination_migration_info(info); 1295 fill_source_migration_info(info); 1296 1297 return info; 1298 } 1299 1300 void qmp_migrate_start_postcopy(Error **errp) 1301 { 1302 MigrationState *s = migrate_get_current(); 1303 1304 if (!migrate_postcopy()) { 1305 error_setg(errp, "Enable postcopy with migrate_set_capability before" 1306 " the start of migration"); 1307 return; 1308 } 1309 1310 if (s->state == MIGRATION_STATUS_NONE) { 1311 error_setg(errp, "Postcopy must be started after migration has been" 1312 " started"); 1313 return; 1314 } 1315 /* 1316 * we don't error if migration has finished since that would be racy 1317 * with issuing this command. 1318 */ 1319 qatomic_set(&s->start_postcopy, true); 1320 } 1321 1322 /* shared migration helpers */ 1323 1324 void migrate_set_state(int *state, int old_state, int new_state) 1325 { 1326 assert(new_state < MIGRATION_STATUS__MAX); 1327 if (qatomic_cmpxchg(state, old_state, new_state) == old_state) { 1328 trace_migrate_set_state(MigrationStatus_str(new_state)); 1329 migrate_generate_event(new_state); 1330 } 1331 } 1332 1333 static void migrate_fd_cleanup(MigrationState *s) 1334 { 1335 MigrationEventType type; 1336 1337 g_free(s->hostname); 1338 s->hostname = NULL; 1339 json_writer_free(s->vmdesc); 1340 s->vmdesc = NULL; 1341 1342 qemu_savevm_state_cleanup(); 1343 1344 if (s->to_dst_file) { 1345 QEMUFile *tmp; 1346 1347 trace_migrate_fd_cleanup(); 1348 bql_unlock(); 1349 if (s->migration_thread_running) { 1350 qemu_thread_join(&s->thread); 1351 s->migration_thread_running = false; 1352 } 1353 bql_lock(); 1354 1355 multifd_send_shutdown(); 1356 qemu_mutex_lock(&s->qemu_file_lock); 1357 tmp = s->to_dst_file; 1358 s->to_dst_file = NULL; 1359 qemu_mutex_unlock(&s->qemu_file_lock); 1360 /* 1361 * Close the file handle without the lock to make sure the 1362 * critical section won't block for long. 1363 */ 1364 migration_ioc_unregister_yank_from_file(tmp); 1365 qemu_fclose(tmp); 1366 } 1367 1368 /* 1369 * We already cleaned up to_dst_file, so errors from the return 1370 * path might be due to that, ignore them. 1371 */ 1372 close_return_path_on_source(s); 1373 1374 assert(!migration_is_active(s)); 1375 1376 if (s->state == MIGRATION_STATUS_CANCELLING) { 1377 migrate_set_state(&s->state, MIGRATION_STATUS_CANCELLING, 1378 MIGRATION_STATUS_CANCELLED); 1379 } 1380 1381 if (s->error) { 1382 /* It is used on info migrate. We can't free it */ 1383 error_report_err(error_copy(s->error)); 1384 } 1385 type = migration_has_failed(s) ? MIG_EVENT_PRECOPY_FAILED : 1386 MIG_EVENT_PRECOPY_DONE; 1387 migration_call_notifiers(s, type, NULL); 1388 block_cleanup_parameters(); 1389 yank_unregister_instance(MIGRATION_YANK_INSTANCE); 1390 } 1391 1392 static void migrate_fd_cleanup_bh(void *opaque) 1393 { 1394 migrate_fd_cleanup(opaque); 1395 } 1396 1397 void migrate_set_error(MigrationState *s, const Error *error) 1398 { 1399 QEMU_LOCK_GUARD(&s->error_mutex); 1400 if (!s->error) { 1401 s->error = error_copy(error); 1402 } 1403 } 1404 1405 bool migrate_has_error(MigrationState *s) 1406 { 1407 /* The lock is not helpful here, but still follow the rule */ 1408 QEMU_LOCK_GUARD(&s->error_mutex); 1409 return qatomic_read(&s->error); 1410 } 1411 1412 static void migrate_error_free(MigrationState *s) 1413 { 1414 QEMU_LOCK_GUARD(&s->error_mutex); 1415 if (s->error) { 1416 error_free(s->error); 1417 s->error = NULL; 1418 } 1419 } 1420 1421 static void migrate_fd_error(MigrationState *s, const Error *error) 1422 { 1423 trace_migrate_fd_error(error_get_pretty(error)); 1424 assert(s->to_dst_file == NULL); 1425 migrate_set_state(&s->state, MIGRATION_STATUS_SETUP, 1426 MIGRATION_STATUS_FAILED); 1427 migrate_set_error(s, error); 1428 } 1429 1430 static void migrate_fd_cancel(MigrationState *s) 1431 { 1432 int old_state ; 1433 1434 trace_migrate_fd_cancel(); 1435 1436 WITH_QEMU_LOCK_GUARD(&s->qemu_file_lock) { 1437 if (s->rp_state.from_dst_file) { 1438 /* shutdown the rp socket, so causing the rp thread to shutdown */ 1439 qemu_file_shutdown(s->rp_state.from_dst_file); 1440 } 1441 } 1442 1443 do { 1444 old_state = s->state; 1445 if (!migration_is_running(old_state)) { 1446 break; 1447 } 1448 /* If the migration is paused, kick it out of the pause */ 1449 if (old_state == MIGRATION_STATUS_PRE_SWITCHOVER) { 1450 qemu_sem_post(&s->pause_sem); 1451 } 1452 migrate_set_state(&s->state, old_state, MIGRATION_STATUS_CANCELLING); 1453 } while (s->state != MIGRATION_STATUS_CANCELLING); 1454 1455 /* 1456 * If we're unlucky the migration code might be stuck somewhere in a 1457 * send/write while the network has failed and is waiting to timeout; 1458 * if we've got shutdown(2) available then we can force it to quit. 1459 */ 1460 if (s->state == MIGRATION_STATUS_CANCELLING) { 1461 WITH_QEMU_LOCK_GUARD(&s->qemu_file_lock) { 1462 if (s->to_dst_file) { 1463 qemu_file_shutdown(s->to_dst_file); 1464 } 1465 } 1466 } 1467 if (s->state == MIGRATION_STATUS_CANCELLING && s->block_inactive) { 1468 Error *local_err = NULL; 1469 1470 bdrv_activate_all(&local_err); 1471 if (local_err) { 1472 error_report_err(local_err); 1473 } else { 1474 s->block_inactive = false; 1475 } 1476 } 1477 } 1478 1479 void migration_add_notifier_mode(NotifierWithReturn *notify, 1480 MigrationNotifyFunc func, MigMode mode) 1481 { 1482 notify->notify = (NotifierWithReturnFunc)func; 1483 notifier_with_return_list_add(&migration_state_notifiers[mode], notify); 1484 } 1485 1486 void migration_add_notifier(NotifierWithReturn *notify, 1487 MigrationNotifyFunc func) 1488 { 1489 migration_add_notifier_mode(notify, func, MIG_MODE_NORMAL); 1490 } 1491 1492 void migration_remove_notifier(NotifierWithReturn *notify) 1493 { 1494 if (notify->notify) { 1495 notifier_with_return_remove(notify); 1496 notify->notify = NULL; 1497 } 1498 } 1499 1500 int migration_call_notifiers(MigrationState *s, MigrationEventType type, 1501 Error **errp) 1502 { 1503 MigMode mode = s->parameters.mode; 1504 MigrationEvent e; 1505 int ret; 1506 1507 e.type = type; 1508 ret = notifier_with_return_list_notify(&migration_state_notifiers[mode], 1509 &e, errp); 1510 assert(!ret || type == MIG_EVENT_PRECOPY_SETUP); 1511 return ret; 1512 } 1513 1514 bool migration_in_setup(MigrationState *s) 1515 { 1516 return s->state == MIGRATION_STATUS_SETUP; 1517 } 1518 1519 bool migration_has_finished(MigrationState *s) 1520 { 1521 return s->state == MIGRATION_STATUS_COMPLETED; 1522 } 1523 1524 bool migration_has_failed(MigrationState *s) 1525 { 1526 return (s->state == MIGRATION_STATUS_CANCELLED || 1527 s->state == MIGRATION_STATUS_FAILED); 1528 } 1529 1530 bool migration_in_postcopy(void) 1531 { 1532 MigrationState *s = migrate_get_current(); 1533 1534 switch (s->state) { 1535 case MIGRATION_STATUS_POSTCOPY_ACTIVE: 1536 case MIGRATION_STATUS_POSTCOPY_PAUSED: 1537 case MIGRATION_STATUS_POSTCOPY_RECOVER: 1538 return true; 1539 default: 1540 return false; 1541 } 1542 } 1543 1544 bool migration_postcopy_is_alive(int state) 1545 { 1546 switch (state) { 1547 case MIGRATION_STATUS_POSTCOPY_ACTIVE: 1548 case MIGRATION_STATUS_POSTCOPY_RECOVER: 1549 return true; 1550 default: 1551 return false; 1552 } 1553 } 1554 1555 bool migration_in_incoming_postcopy(void) 1556 { 1557 PostcopyState ps = postcopy_state_get(); 1558 1559 return ps >= POSTCOPY_INCOMING_DISCARD && ps < POSTCOPY_INCOMING_END; 1560 } 1561 1562 bool migration_incoming_postcopy_advised(void) 1563 { 1564 PostcopyState ps = postcopy_state_get(); 1565 1566 return ps >= POSTCOPY_INCOMING_ADVISE && ps < POSTCOPY_INCOMING_END; 1567 } 1568 1569 bool migration_in_bg_snapshot(void) 1570 { 1571 MigrationState *s = migrate_get_current(); 1572 1573 return migrate_background_snapshot() && 1574 migration_is_setup_or_active(s->state); 1575 } 1576 1577 bool migration_is_idle(void) 1578 { 1579 MigrationState *s = current_migration; 1580 1581 if (!s) { 1582 return true; 1583 } 1584 1585 switch (s->state) { 1586 case MIGRATION_STATUS_NONE: 1587 case MIGRATION_STATUS_CANCELLED: 1588 case MIGRATION_STATUS_COMPLETED: 1589 case MIGRATION_STATUS_FAILED: 1590 return true; 1591 case MIGRATION_STATUS_SETUP: 1592 case MIGRATION_STATUS_CANCELLING: 1593 case MIGRATION_STATUS_ACTIVE: 1594 case MIGRATION_STATUS_POSTCOPY_ACTIVE: 1595 case MIGRATION_STATUS_COLO: 1596 case MIGRATION_STATUS_PRE_SWITCHOVER: 1597 case MIGRATION_STATUS_DEVICE: 1598 case MIGRATION_STATUS_WAIT_UNPLUG: 1599 return false; 1600 case MIGRATION_STATUS__MAX: 1601 g_assert_not_reached(); 1602 } 1603 1604 return false; 1605 } 1606 1607 bool migration_is_active(MigrationState *s) 1608 { 1609 return (s->state == MIGRATION_STATUS_ACTIVE || 1610 s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE); 1611 } 1612 1613 bool migrate_mode_is_cpr(MigrationState *s) 1614 { 1615 return s->parameters.mode == MIG_MODE_CPR_REBOOT; 1616 } 1617 1618 int migrate_init(MigrationState *s, Error **errp) 1619 { 1620 int ret; 1621 1622 ret = qemu_savevm_state_prepare(errp); 1623 if (ret) { 1624 return ret; 1625 } 1626 1627 /* 1628 * Reinitialise all migration state, except 1629 * parameters/capabilities that the user set, and 1630 * locks. 1631 */ 1632 s->to_dst_file = NULL; 1633 s->state = MIGRATION_STATUS_NONE; 1634 s->rp_state.from_dst_file = NULL; 1635 s->mbps = 0.0; 1636 s->pages_per_second = 0.0; 1637 s->downtime = 0; 1638 s->expected_downtime = 0; 1639 s->setup_time = 0; 1640 s->start_postcopy = false; 1641 s->migration_thread_running = false; 1642 error_free(s->error); 1643 s->error = NULL; 1644 s->vmdesc = NULL; 1645 1646 migrate_set_state(&s->state, MIGRATION_STATUS_NONE, MIGRATION_STATUS_SETUP); 1647 1648 s->start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); 1649 s->total_time = 0; 1650 s->vm_old_state = -1; 1651 s->iteration_initial_bytes = 0; 1652 s->threshold_size = 0; 1653 s->switchover_acked = false; 1654 s->rdma_migration = false; 1655 /* 1656 * set mig_stats memory to zero for a new migration 1657 */ 1658 memset(&mig_stats, 0, sizeof(mig_stats)); 1659 migration_reset_vfio_bytes_transferred(); 1660 1661 return 0; 1662 } 1663 1664 static bool is_busy(Error **reasonp, Error **errp) 1665 { 1666 ERRP_GUARD(); 1667 1668 /* Snapshots are similar to migrations, so check RUN_STATE_SAVE_VM too. */ 1669 if (runstate_check(RUN_STATE_SAVE_VM) || !migration_is_idle()) { 1670 error_propagate_prepend(errp, *reasonp, 1671 "disallowing migration blocker " 1672 "(migration/snapshot in progress) for: "); 1673 *reasonp = NULL; 1674 return true; 1675 } 1676 return false; 1677 } 1678 1679 static bool is_only_migratable(Error **reasonp, Error **errp, int modes) 1680 { 1681 ERRP_GUARD(); 1682 1683 if (only_migratable && (modes & BIT(MIG_MODE_NORMAL))) { 1684 error_propagate_prepend(errp, *reasonp, 1685 "disallowing migration blocker " 1686 "(--only-migratable) for: "); 1687 *reasonp = NULL; 1688 return true; 1689 } 1690 return false; 1691 } 1692 1693 static int get_modes(MigMode mode, va_list ap) 1694 { 1695 int modes = 0; 1696 1697 while (mode != -1 && mode != MIG_MODE_ALL) { 1698 assert(mode >= MIG_MODE_NORMAL && mode < MIG_MODE__MAX); 1699 modes |= BIT(mode); 1700 mode = va_arg(ap, MigMode); 1701 } 1702 if (mode == MIG_MODE_ALL) { 1703 modes = BIT(MIG_MODE__MAX) - 1; 1704 } 1705 return modes; 1706 } 1707 1708 static int add_blockers(Error **reasonp, Error **errp, int modes) 1709 { 1710 for (MigMode mode = 0; mode < MIG_MODE__MAX; mode++) { 1711 if (modes & BIT(mode)) { 1712 migration_blockers[mode] = g_slist_prepend(migration_blockers[mode], 1713 *reasonp); 1714 } 1715 } 1716 return 0; 1717 } 1718 1719 int migrate_add_blocker(Error **reasonp, Error **errp) 1720 { 1721 return migrate_add_blocker_modes(reasonp, errp, MIG_MODE_ALL); 1722 } 1723 1724 int migrate_add_blocker_normal(Error **reasonp, Error **errp) 1725 { 1726 return migrate_add_blocker_modes(reasonp, errp, MIG_MODE_NORMAL, -1); 1727 } 1728 1729 int migrate_add_blocker_modes(Error **reasonp, Error **errp, MigMode mode, ...) 1730 { 1731 int modes; 1732 va_list ap; 1733 1734 va_start(ap, mode); 1735 modes = get_modes(mode, ap); 1736 va_end(ap); 1737 1738 if (is_only_migratable(reasonp, errp, modes)) { 1739 return -EACCES; 1740 } else if (is_busy(reasonp, errp)) { 1741 return -EBUSY; 1742 } 1743 return add_blockers(reasonp, errp, modes); 1744 } 1745 1746 int migrate_add_blocker_internal(Error **reasonp, Error **errp) 1747 { 1748 int modes = BIT(MIG_MODE__MAX) - 1; 1749 1750 if (is_busy(reasonp, errp)) { 1751 return -EBUSY; 1752 } 1753 return add_blockers(reasonp, errp, modes); 1754 } 1755 1756 void migrate_del_blocker(Error **reasonp) 1757 { 1758 if (*reasonp) { 1759 for (MigMode mode = 0; mode < MIG_MODE__MAX; mode++) { 1760 migration_blockers[mode] = g_slist_remove(migration_blockers[mode], 1761 *reasonp); 1762 } 1763 error_free(*reasonp); 1764 *reasonp = NULL; 1765 } 1766 } 1767 1768 void qmp_migrate_incoming(const char *uri, bool has_channels, 1769 MigrationChannelList *channels, Error **errp) 1770 { 1771 Error *local_err = NULL; 1772 static bool once = true; 1773 1774 if (!once) { 1775 error_setg(errp, "The incoming migration has already been started"); 1776 return; 1777 } 1778 if (!runstate_check(RUN_STATE_INMIGRATE)) { 1779 error_setg(errp, "'-incoming' was not specified on the command line"); 1780 return; 1781 } 1782 1783 if (!yank_register_instance(MIGRATION_YANK_INSTANCE, errp)) { 1784 return; 1785 } 1786 1787 qemu_start_incoming_migration(uri, has_channels, channels, &local_err); 1788 1789 if (local_err) { 1790 yank_unregister_instance(MIGRATION_YANK_INSTANCE); 1791 error_propagate(errp, local_err); 1792 return; 1793 } 1794 1795 once = false; 1796 } 1797 1798 void qmp_migrate_recover(const char *uri, Error **errp) 1799 { 1800 MigrationIncomingState *mis = migration_incoming_get_current(); 1801 1802 /* 1803 * Don't even bother to use ERRP_GUARD() as it _must_ always be set by 1804 * callers (no one should ignore a recover failure); if there is, it's a 1805 * programming error. 1806 */ 1807 assert(errp); 1808 1809 if (mis->state != MIGRATION_STATUS_POSTCOPY_PAUSED) { 1810 error_setg(errp, "Migrate recover can only be run " 1811 "when postcopy is paused."); 1812 return; 1813 } 1814 1815 /* If there's an existing transport, release it */ 1816 migration_incoming_transport_cleanup(mis); 1817 1818 /* 1819 * Note that this call will never start a real migration; it will 1820 * only re-setup the migration stream and poke existing migration 1821 * to continue using that newly established channel. 1822 */ 1823 qemu_start_incoming_migration(uri, false, NULL, errp); 1824 } 1825 1826 void qmp_migrate_pause(Error **errp) 1827 { 1828 MigrationState *ms = migrate_get_current(); 1829 MigrationIncomingState *mis = migration_incoming_get_current(); 1830 int ret = 0; 1831 1832 if (migration_postcopy_is_alive(ms->state)) { 1833 /* Source side, during postcopy */ 1834 Error *error = NULL; 1835 1836 /* Tell the core migration that we're pausing */ 1837 error_setg(&error, "Postcopy migration is paused by the user"); 1838 migrate_set_error(ms, error); 1839 error_free(error); 1840 1841 qemu_mutex_lock(&ms->qemu_file_lock); 1842 if (ms->to_dst_file) { 1843 ret = qemu_file_shutdown(ms->to_dst_file); 1844 } 1845 qemu_mutex_unlock(&ms->qemu_file_lock); 1846 if (ret) { 1847 error_setg(errp, "Failed to pause source migration"); 1848 } 1849 1850 /* 1851 * Kick the migration thread out of any waiting windows (on behalf 1852 * of the rp thread). 1853 */ 1854 migration_rp_kick(ms); 1855 1856 return; 1857 } 1858 1859 if (migration_postcopy_is_alive(mis->state)) { 1860 ret = qemu_file_shutdown(mis->from_src_file); 1861 if (ret) { 1862 error_setg(errp, "Failed to pause destination migration"); 1863 } 1864 return; 1865 } 1866 1867 error_setg(errp, "migrate-pause is currently only supported " 1868 "during postcopy-active or postcopy-recover state"); 1869 } 1870 1871 bool migration_is_blocked(Error **errp) 1872 { 1873 GSList *blockers = migration_blockers[migrate_mode()]; 1874 1875 if (qemu_savevm_state_blocked(errp)) { 1876 return true; 1877 } 1878 1879 if (blockers) { 1880 error_propagate(errp, error_copy(blockers->data)); 1881 return true; 1882 } 1883 1884 return false; 1885 } 1886 1887 /* Returns true if continue to migrate, or false if error detected */ 1888 static bool migrate_prepare(MigrationState *s, bool blk, bool blk_inc, 1889 bool resume, Error **errp) 1890 { 1891 if (blk_inc) { 1892 warn_report("parameter 'inc' is deprecated;" 1893 " use blockdev-mirror with NBD instead"); 1894 } 1895 1896 if (blk) { 1897 warn_report("parameter 'blk' is deprecated;" 1898 " use blockdev-mirror with NBD instead"); 1899 } 1900 1901 if (resume) { 1902 if (s->state != MIGRATION_STATUS_POSTCOPY_PAUSED) { 1903 error_setg(errp, "Cannot resume if there is no " 1904 "paused migration"); 1905 return false; 1906 } 1907 1908 /* 1909 * Postcopy recovery won't work well with release-ram 1910 * capability since release-ram will drop the page buffer as 1911 * long as the page is put into the send buffer. So if there 1912 * is a network failure happened, any page buffers that have 1913 * not yet reached the destination VM but have already been 1914 * sent from the source VM will be lost forever. Let's refuse 1915 * the client from resuming such a postcopy migration. 1916 * Luckily release-ram was designed to only be used when src 1917 * and destination VMs are on the same host, so it should be 1918 * fine. 1919 */ 1920 if (migrate_release_ram()) { 1921 error_setg(errp, "Postcopy recovery cannot work " 1922 "when release-ram capability is set"); 1923 return false; 1924 } 1925 1926 /* This is a resume, skip init status */ 1927 return true; 1928 } 1929 1930 if (migration_is_running(s->state)) { 1931 error_setg(errp, QERR_MIGRATION_ACTIVE); 1932 return false; 1933 } 1934 1935 if (runstate_check(RUN_STATE_INMIGRATE)) { 1936 error_setg(errp, "Guest is waiting for an incoming migration"); 1937 return false; 1938 } 1939 1940 if (runstate_check(RUN_STATE_POSTMIGRATE)) { 1941 error_setg(errp, "Can't migrate the vm that was paused due to " 1942 "previous migration"); 1943 return false; 1944 } 1945 1946 if (kvm_hwpoisoned_mem()) { 1947 error_setg(errp, "Can't migrate this vm with hardware poisoned memory, " 1948 "please reboot the vm and try again"); 1949 return false; 1950 } 1951 1952 if (migration_is_blocked(errp)) { 1953 return false; 1954 } 1955 1956 if (migrate_mode_is_cpr(s)) { 1957 const char *conflict = NULL; 1958 1959 if (migrate_postcopy()) { 1960 conflict = "postcopy"; 1961 } else if (migrate_background_snapshot()) { 1962 conflict = "background snapshot"; 1963 } else if (migrate_colo()) { 1964 conflict = "COLO"; 1965 } 1966 1967 if (conflict) { 1968 error_setg(errp, "Cannot use %s with CPR", conflict); 1969 return false; 1970 } 1971 } 1972 1973 if (blk || blk_inc) { 1974 if (migrate_colo()) { 1975 error_setg(errp, "No disk migration is required in COLO mode"); 1976 return false; 1977 } 1978 if (migrate_block() || migrate_block_incremental()) { 1979 error_setg(errp, "Command options are incompatible with " 1980 "current migration capabilities"); 1981 return false; 1982 } 1983 if (!migrate_cap_set(MIGRATION_CAPABILITY_BLOCK, true, errp)) { 1984 return false; 1985 } 1986 s->must_remove_block_options = true; 1987 } 1988 1989 if (blk_inc) { 1990 migrate_set_block_incremental(true); 1991 } 1992 1993 if (migrate_init(s, errp)) { 1994 return false; 1995 } 1996 1997 return true; 1998 } 1999 2000 void qmp_migrate(const char *uri, bool has_channels, 2001 MigrationChannelList *channels, bool has_blk, bool blk, 2002 bool has_inc, bool inc, bool has_detach, bool detach, 2003 bool has_resume, bool resume, Error **errp) 2004 { 2005 bool resume_requested; 2006 Error *local_err = NULL; 2007 MigrationState *s = migrate_get_current(); 2008 g_autoptr(MigrationChannel) channel = NULL; 2009 MigrationAddress *addr = NULL; 2010 2011 /* 2012 * Having preliminary checks for uri and channel 2013 */ 2014 if (!uri == !channels) { 2015 error_setg(errp, "need either 'uri' or 'channels' argument"); 2016 return; 2017 } 2018 2019 if (channels) { 2020 /* To verify that Migrate channel list has only item */ 2021 if (channels->next) { 2022 error_setg(errp, "Channel list has more than one entries"); 2023 return; 2024 } 2025 addr = channels->value->addr; 2026 } 2027 2028 if (uri) { 2029 /* caller uses the old URI syntax */ 2030 if (!migrate_uri_parse(uri, &channel, errp)) { 2031 return; 2032 } 2033 addr = channel->addr; 2034 } 2035 2036 /* transport mechanism not suitable for migration? */ 2037 if (!migration_channels_and_transport_compatible(addr, errp)) { 2038 return; 2039 } 2040 2041 resume_requested = has_resume && resume; 2042 if (!migrate_prepare(s, has_blk && blk, has_inc && inc, 2043 resume_requested, errp)) { 2044 /* Error detected, put into errp */ 2045 return; 2046 } 2047 2048 if (!resume_requested) { 2049 if (!yank_register_instance(MIGRATION_YANK_INSTANCE, errp)) { 2050 return; 2051 } 2052 } 2053 2054 if (addr->transport == MIGRATION_ADDRESS_TYPE_SOCKET) { 2055 SocketAddress *saddr = &addr->u.socket; 2056 if (saddr->type == SOCKET_ADDRESS_TYPE_INET || 2057 saddr->type == SOCKET_ADDRESS_TYPE_UNIX || 2058 saddr->type == SOCKET_ADDRESS_TYPE_VSOCK) { 2059 socket_start_outgoing_migration(s, saddr, &local_err); 2060 } else if (saddr->type == SOCKET_ADDRESS_TYPE_FD) { 2061 fd_start_outgoing_migration(s, saddr->u.fd.str, &local_err); 2062 } 2063 #ifdef CONFIG_RDMA 2064 } else if (addr->transport == MIGRATION_ADDRESS_TYPE_RDMA) { 2065 rdma_start_outgoing_migration(s, &addr->u.rdma, &local_err); 2066 #endif 2067 } else if (addr->transport == MIGRATION_ADDRESS_TYPE_EXEC) { 2068 exec_start_outgoing_migration(s, addr->u.exec.args, &local_err); 2069 } else if (addr->transport == MIGRATION_ADDRESS_TYPE_FILE) { 2070 file_start_outgoing_migration(s, &addr->u.file, &local_err); 2071 } else { 2072 error_setg(&local_err, QERR_INVALID_PARAMETER_VALUE, "uri", 2073 "a valid migration protocol"); 2074 migrate_set_state(&s->state, MIGRATION_STATUS_SETUP, 2075 MIGRATION_STATUS_FAILED); 2076 block_cleanup_parameters(); 2077 } 2078 2079 if (local_err) { 2080 if (!resume_requested) { 2081 yank_unregister_instance(MIGRATION_YANK_INSTANCE); 2082 } 2083 migrate_fd_error(s, local_err); 2084 error_propagate(errp, local_err); 2085 return; 2086 } 2087 } 2088 2089 void qmp_migrate_cancel(Error **errp) 2090 { 2091 migration_cancel(NULL); 2092 } 2093 2094 void qmp_migrate_continue(MigrationStatus state, Error **errp) 2095 { 2096 MigrationState *s = migrate_get_current(); 2097 if (s->state != state) { 2098 error_setg(errp, "Migration not in expected state: %s", 2099 MigrationStatus_str(s->state)); 2100 return; 2101 } 2102 qemu_sem_post(&s->pause_sem); 2103 } 2104 2105 int migration_rp_wait(MigrationState *s) 2106 { 2107 /* If migration has failure already, ignore the wait */ 2108 if (migrate_has_error(s)) { 2109 return -1; 2110 } 2111 2112 qemu_sem_wait(&s->rp_state.rp_sem); 2113 2114 /* After wait, double check that there's no failure */ 2115 if (migrate_has_error(s)) { 2116 return -1; 2117 } 2118 2119 return 0; 2120 } 2121 2122 void migration_rp_kick(MigrationState *s) 2123 { 2124 qemu_sem_post(&s->rp_state.rp_sem); 2125 } 2126 2127 static struct rp_cmd_args { 2128 ssize_t len; /* -1 = variable */ 2129 const char *name; 2130 } rp_cmd_args[] = { 2131 [MIG_RP_MSG_INVALID] = { .len = -1, .name = "INVALID" }, 2132 [MIG_RP_MSG_SHUT] = { .len = 4, .name = "SHUT" }, 2133 [MIG_RP_MSG_PONG] = { .len = 4, .name = "PONG" }, 2134 [MIG_RP_MSG_REQ_PAGES] = { .len = 12, .name = "REQ_PAGES" }, 2135 [MIG_RP_MSG_REQ_PAGES_ID] = { .len = -1, .name = "REQ_PAGES_ID" }, 2136 [MIG_RP_MSG_RECV_BITMAP] = { .len = -1, .name = "RECV_BITMAP" }, 2137 [MIG_RP_MSG_RESUME_ACK] = { .len = 4, .name = "RESUME_ACK" }, 2138 [MIG_RP_MSG_SWITCHOVER_ACK] = { .len = 0, .name = "SWITCHOVER_ACK" }, 2139 [MIG_RP_MSG_MAX] = { .len = -1, .name = "MAX" }, 2140 }; 2141 2142 /* 2143 * Process a request for pages received on the return path, 2144 * We're allowed to send more than requested (e.g. to round to our page size) 2145 * and we don't need to send pages that have already been sent. 2146 */ 2147 static void 2148 migrate_handle_rp_req_pages(MigrationState *ms, const char* rbname, 2149 ram_addr_t start, size_t len, Error **errp) 2150 { 2151 long our_host_ps = qemu_real_host_page_size(); 2152 2153 trace_migrate_handle_rp_req_pages(rbname, start, len); 2154 2155 /* 2156 * Since we currently insist on matching page sizes, just sanity check 2157 * we're being asked for whole host pages. 2158 */ 2159 if (!QEMU_IS_ALIGNED(start, our_host_ps) || 2160 !QEMU_IS_ALIGNED(len, our_host_ps)) { 2161 error_setg(errp, "MIG_RP_MSG_REQ_PAGES: Misaligned page request, start:" 2162 RAM_ADDR_FMT " len: %zd", start, len); 2163 return; 2164 } 2165 2166 ram_save_queue_pages(rbname, start, len, errp); 2167 } 2168 2169 static bool migrate_handle_rp_recv_bitmap(MigrationState *s, char *block_name, 2170 Error **errp) 2171 { 2172 RAMBlock *block = qemu_ram_block_by_name(block_name); 2173 2174 if (!block) { 2175 error_setg(errp, "MIG_RP_MSG_RECV_BITMAP has invalid block name '%s'", 2176 block_name); 2177 return false; 2178 } 2179 2180 /* Fetch the received bitmap and refresh the dirty bitmap */ 2181 return ram_dirty_bitmap_reload(s, block, errp); 2182 } 2183 2184 static bool migrate_handle_rp_resume_ack(MigrationState *s, 2185 uint32_t value, Error **errp) 2186 { 2187 trace_source_return_path_thread_resume_ack(value); 2188 2189 if (value != MIGRATION_RESUME_ACK_VALUE) { 2190 error_setg(errp, "illegal resume_ack value %"PRIu32, value); 2191 return false; 2192 } 2193 2194 /* Now both sides are active. */ 2195 migrate_set_state(&s->state, MIGRATION_STATUS_POSTCOPY_RECOVER, 2196 MIGRATION_STATUS_POSTCOPY_ACTIVE); 2197 2198 /* Notify send thread that time to continue send pages */ 2199 migration_rp_kick(s); 2200 2201 return true; 2202 } 2203 2204 /* 2205 * Release ms->rp_state.from_dst_file (and postcopy_qemufile_src if 2206 * existed) in a safe way. 2207 */ 2208 static void migration_release_dst_files(MigrationState *ms) 2209 { 2210 QEMUFile *file; 2211 2212 WITH_QEMU_LOCK_GUARD(&ms->qemu_file_lock) { 2213 /* 2214 * Reset the from_dst_file pointer first before releasing it, as we 2215 * can't block within lock section 2216 */ 2217 file = ms->rp_state.from_dst_file; 2218 ms->rp_state.from_dst_file = NULL; 2219 } 2220 2221 /* 2222 * Do the same to postcopy fast path socket too if there is. No 2223 * locking needed because this qemufile should only be managed by 2224 * return path thread. 2225 */ 2226 if (ms->postcopy_qemufile_src) { 2227 migration_ioc_unregister_yank_from_file(ms->postcopy_qemufile_src); 2228 qemu_file_shutdown(ms->postcopy_qemufile_src); 2229 qemu_fclose(ms->postcopy_qemufile_src); 2230 ms->postcopy_qemufile_src = NULL; 2231 } 2232 2233 qemu_fclose(file); 2234 } 2235 2236 /* 2237 * Handles messages sent on the return path towards the source VM 2238 * 2239 */ 2240 static void *source_return_path_thread(void *opaque) 2241 { 2242 MigrationState *ms = opaque; 2243 QEMUFile *rp = ms->rp_state.from_dst_file; 2244 uint16_t header_len, header_type; 2245 uint8_t buf[512]; 2246 uint32_t tmp32, sibling_error; 2247 ram_addr_t start = 0; /* =0 to silence warning */ 2248 size_t len = 0, expected_len; 2249 Error *err = NULL; 2250 int res; 2251 2252 trace_source_return_path_thread_entry(); 2253 rcu_register_thread(); 2254 2255 while (migration_is_setup_or_active(ms->state)) { 2256 trace_source_return_path_thread_loop_top(); 2257 2258 header_type = qemu_get_be16(rp); 2259 header_len = qemu_get_be16(rp); 2260 2261 if (qemu_file_get_error(rp)) { 2262 qemu_file_get_error_obj(rp, &err); 2263 goto out; 2264 } 2265 2266 if (header_type >= MIG_RP_MSG_MAX || 2267 header_type == MIG_RP_MSG_INVALID) { 2268 error_setg(&err, "Received invalid message 0x%04x length 0x%04x", 2269 header_type, header_len); 2270 goto out; 2271 } 2272 2273 if ((rp_cmd_args[header_type].len != -1 && 2274 header_len != rp_cmd_args[header_type].len) || 2275 header_len > sizeof(buf)) { 2276 error_setg(&err, "Received '%s' message (0x%04x) with" 2277 "incorrect length %d expecting %zu", 2278 rp_cmd_args[header_type].name, header_type, header_len, 2279 (size_t)rp_cmd_args[header_type].len); 2280 goto out; 2281 } 2282 2283 /* We know we've got a valid header by this point */ 2284 res = qemu_get_buffer(rp, buf, header_len); 2285 if (res != header_len) { 2286 error_setg(&err, "Failed reading data for message 0x%04x" 2287 " read %d expected %d", 2288 header_type, res, header_len); 2289 goto out; 2290 } 2291 2292 /* OK, we have the message and the data */ 2293 switch (header_type) { 2294 case MIG_RP_MSG_SHUT: 2295 sibling_error = ldl_be_p(buf); 2296 trace_source_return_path_thread_shut(sibling_error); 2297 if (sibling_error) { 2298 error_setg(&err, "Sibling indicated error %d", sibling_error); 2299 } 2300 /* 2301 * We'll let the main thread deal with closing the RP 2302 * we could do a shutdown(2) on it, but we're the only user 2303 * anyway, so there's nothing gained. 2304 */ 2305 goto out; 2306 2307 case MIG_RP_MSG_PONG: 2308 tmp32 = ldl_be_p(buf); 2309 trace_source_return_path_thread_pong(tmp32); 2310 qemu_sem_post(&ms->rp_state.rp_pong_acks); 2311 break; 2312 2313 case MIG_RP_MSG_REQ_PAGES: 2314 start = ldq_be_p(buf); 2315 len = ldl_be_p(buf + 8); 2316 migrate_handle_rp_req_pages(ms, NULL, start, len, &err); 2317 if (err) { 2318 goto out; 2319 } 2320 break; 2321 2322 case MIG_RP_MSG_REQ_PAGES_ID: 2323 expected_len = 12 + 1; /* header + termination */ 2324 2325 if (header_len >= expected_len) { 2326 start = ldq_be_p(buf); 2327 len = ldl_be_p(buf + 8); 2328 /* Now we expect an idstr */ 2329 tmp32 = buf[12]; /* Length of the following idstr */ 2330 buf[13 + tmp32] = '\0'; 2331 expected_len += tmp32; 2332 } 2333 if (header_len != expected_len) { 2334 error_setg(&err, "Req_Page_id with length %d expecting %zd", 2335 header_len, expected_len); 2336 goto out; 2337 } 2338 migrate_handle_rp_req_pages(ms, (char *)&buf[13], start, len, 2339 &err); 2340 if (err) { 2341 goto out; 2342 } 2343 break; 2344 2345 case MIG_RP_MSG_RECV_BITMAP: 2346 if (header_len < 1) { 2347 error_setg(&err, "MIG_RP_MSG_RECV_BITMAP missing block name"); 2348 goto out; 2349 } 2350 /* Format: len (1B) + idstr (<255B). This ends the idstr. */ 2351 buf[buf[0] + 1] = '\0'; 2352 if (!migrate_handle_rp_recv_bitmap(ms, (char *)(buf + 1), &err)) { 2353 goto out; 2354 } 2355 break; 2356 2357 case MIG_RP_MSG_RESUME_ACK: 2358 tmp32 = ldl_be_p(buf); 2359 if (!migrate_handle_rp_resume_ack(ms, tmp32, &err)) { 2360 goto out; 2361 } 2362 break; 2363 2364 case MIG_RP_MSG_SWITCHOVER_ACK: 2365 ms->switchover_acked = true; 2366 trace_source_return_path_thread_switchover_acked(); 2367 break; 2368 2369 default: 2370 break; 2371 } 2372 } 2373 2374 out: 2375 if (err) { 2376 migrate_set_error(ms, err); 2377 error_free(err); 2378 trace_source_return_path_thread_bad_end(); 2379 } 2380 2381 if (ms->state == MIGRATION_STATUS_POSTCOPY_RECOVER) { 2382 /* 2383 * this will be extremely unlikely: that we got yet another network 2384 * issue during recovering of the 1st network failure.. during this 2385 * period the main migration thread can be waiting on rp_sem for 2386 * this thread to sync with the other side. 2387 * 2388 * When this happens, explicitly kick the migration thread out of 2389 * RECOVER stage and back to PAUSED, so the admin can try 2390 * everything again. 2391 */ 2392 migration_rp_kick(ms); 2393 } 2394 2395 trace_source_return_path_thread_end(); 2396 rcu_unregister_thread(); 2397 2398 return NULL; 2399 } 2400 2401 static int open_return_path_on_source(MigrationState *ms) 2402 { 2403 ms->rp_state.from_dst_file = qemu_file_get_return_path(ms->to_dst_file); 2404 if (!ms->rp_state.from_dst_file) { 2405 return -1; 2406 } 2407 2408 trace_open_return_path_on_source(); 2409 2410 qemu_thread_create(&ms->rp_state.rp_thread, "return path", 2411 source_return_path_thread, ms, QEMU_THREAD_JOINABLE); 2412 ms->rp_state.rp_thread_created = true; 2413 2414 trace_open_return_path_on_source_continue(); 2415 2416 return 0; 2417 } 2418 2419 /* Return true if error detected, or false otherwise */ 2420 static bool close_return_path_on_source(MigrationState *ms) 2421 { 2422 if (!ms->rp_state.rp_thread_created) { 2423 return false; 2424 } 2425 2426 trace_migration_return_path_end_before(); 2427 2428 /* 2429 * If this is a normal exit then the destination will send a SHUT 2430 * and the rp_thread will exit, however if there's an error we 2431 * need to cause it to exit. shutdown(2), if we have it, will 2432 * cause it to unblock if it's stuck waiting for the destination. 2433 */ 2434 WITH_QEMU_LOCK_GUARD(&ms->qemu_file_lock) { 2435 if (ms->to_dst_file && ms->rp_state.from_dst_file && 2436 qemu_file_get_error(ms->to_dst_file)) { 2437 qemu_file_shutdown(ms->rp_state.from_dst_file); 2438 } 2439 } 2440 2441 qemu_thread_join(&ms->rp_state.rp_thread); 2442 ms->rp_state.rp_thread_created = false; 2443 migration_release_dst_files(ms); 2444 trace_migration_return_path_end_after(); 2445 2446 /* Return path will persist the error in MigrationState when quit */ 2447 return migrate_has_error(ms); 2448 } 2449 2450 static inline void 2451 migration_wait_main_channel(MigrationState *ms) 2452 { 2453 /* Wait until one PONG message received */ 2454 qemu_sem_wait(&ms->rp_state.rp_pong_acks); 2455 } 2456 2457 /* 2458 * Switch from normal iteration to postcopy 2459 * Returns non-0 on error 2460 */ 2461 static int postcopy_start(MigrationState *ms, Error **errp) 2462 { 2463 int ret; 2464 QIOChannelBuffer *bioc; 2465 QEMUFile *fb; 2466 uint64_t bandwidth = migrate_max_postcopy_bandwidth(); 2467 bool restart_block = false; 2468 int cur_state = MIGRATION_STATUS_ACTIVE; 2469 2470 if (migrate_postcopy_preempt()) { 2471 migration_wait_main_channel(ms); 2472 if (postcopy_preempt_establish_channel(ms)) { 2473 migrate_set_state(&ms->state, ms->state, MIGRATION_STATUS_FAILED); 2474 return -1; 2475 } 2476 } 2477 2478 if (!migrate_pause_before_switchover()) { 2479 migrate_set_state(&ms->state, MIGRATION_STATUS_ACTIVE, 2480 MIGRATION_STATUS_POSTCOPY_ACTIVE); 2481 } 2482 2483 trace_postcopy_start(); 2484 bql_lock(); 2485 trace_postcopy_start_set_run(); 2486 2487 ret = migration_stop_vm(ms, RUN_STATE_FINISH_MIGRATE); 2488 if (ret < 0) { 2489 goto fail; 2490 } 2491 2492 ret = migration_maybe_pause(ms, &cur_state, 2493 MIGRATION_STATUS_POSTCOPY_ACTIVE); 2494 if (ret < 0) { 2495 goto fail; 2496 } 2497 2498 ret = bdrv_inactivate_all(); 2499 if (ret < 0) { 2500 goto fail; 2501 } 2502 restart_block = true; 2503 2504 /* 2505 * Cause any non-postcopiable, but iterative devices to 2506 * send out their final data. 2507 */ 2508 qemu_savevm_state_complete_precopy(ms->to_dst_file, true, false); 2509 2510 /* 2511 * in Finish migrate and with the io-lock held everything should 2512 * be quiet, but we've potentially still got dirty pages and we 2513 * need to tell the destination to throw any pages it's already received 2514 * that are dirty 2515 */ 2516 if (migrate_postcopy_ram()) { 2517 ram_postcopy_send_discard_bitmap(ms); 2518 } 2519 2520 /* 2521 * send rest of state - note things that are doing postcopy 2522 * will notice we're in POSTCOPY_ACTIVE and not actually 2523 * wrap their state up here 2524 */ 2525 migration_rate_set(bandwidth); 2526 if (migrate_postcopy_ram()) { 2527 /* Ping just for debugging, helps line traces up */ 2528 qemu_savevm_send_ping(ms->to_dst_file, 2); 2529 } 2530 2531 /* 2532 * While loading the device state we may trigger page transfer 2533 * requests and the fd must be free to process those, and thus 2534 * the destination must read the whole device state off the fd before 2535 * it starts processing it. Unfortunately the ad-hoc migration format 2536 * doesn't allow the destination to know the size to read without fully 2537 * parsing it through each devices load-state code (especially the open 2538 * coded devices that use get/put). 2539 * So we wrap the device state up in a package with a length at the start; 2540 * to do this we use a qemu_buf to hold the whole of the device state. 2541 */ 2542 bioc = qio_channel_buffer_new(4096); 2543 qio_channel_set_name(QIO_CHANNEL(bioc), "migration-postcopy-buffer"); 2544 fb = qemu_file_new_output(QIO_CHANNEL(bioc)); 2545 object_unref(OBJECT(bioc)); 2546 2547 /* 2548 * Make sure the receiver can get incoming pages before we send the rest 2549 * of the state 2550 */ 2551 qemu_savevm_send_postcopy_listen(fb); 2552 2553 qemu_savevm_state_complete_precopy(fb, false, false); 2554 if (migrate_postcopy_ram()) { 2555 qemu_savevm_send_ping(fb, 3); 2556 } 2557 2558 qemu_savevm_send_postcopy_run(fb); 2559 2560 /* <><> end of stuff going into the package */ 2561 2562 /* Last point of recovery; as soon as we send the package the destination 2563 * can open devices and potentially start running. 2564 * Lets just check again we've not got any errors. 2565 */ 2566 ret = qemu_file_get_error(ms->to_dst_file); 2567 if (ret) { 2568 error_setg(errp, "postcopy_start: Migration stream errored (pre package)"); 2569 goto fail_closefb; 2570 } 2571 2572 restart_block = false; 2573 2574 /* Now send that blob */ 2575 if (qemu_savevm_send_packaged(ms->to_dst_file, bioc->data, bioc->usage)) { 2576 goto fail_closefb; 2577 } 2578 qemu_fclose(fb); 2579 2580 /* Send a notify to give a chance for anything that needs to happen 2581 * at the transition to postcopy and after the device state; in particular 2582 * spice needs to trigger a transition now 2583 */ 2584 migration_call_notifiers(ms, MIG_EVENT_PRECOPY_DONE, NULL); 2585 2586 migration_downtime_end(ms); 2587 2588 bql_unlock(); 2589 2590 if (migrate_postcopy_ram()) { 2591 /* 2592 * Although this ping is just for debug, it could potentially be 2593 * used for getting a better measurement of downtime at the source. 2594 */ 2595 qemu_savevm_send_ping(ms->to_dst_file, 4); 2596 } 2597 2598 if (migrate_release_ram()) { 2599 ram_postcopy_migrated_memory_release(ms); 2600 } 2601 2602 ret = qemu_file_get_error(ms->to_dst_file); 2603 if (ret) { 2604 error_setg_errno(errp, -ret, "postcopy_start: Migration stream error"); 2605 bql_lock(); 2606 goto fail; 2607 } 2608 trace_postcopy_preempt_enabled(migrate_postcopy_preempt()); 2609 2610 return ret; 2611 2612 fail_closefb: 2613 qemu_fclose(fb); 2614 fail: 2615 migrate_set_state(&ms->state, MIGRATION_STATUS_POSTCOPY_ACTIVE, 2616 MIGRATION_STATUS_FAILED); 2617 if (restart_block) { 2618 /* A failure happened early enough that we know the destination hasn't 2619 * accessed block devices, so we're safe to recover. 2620 */ 2621 Error *local_err = NULL; 2622 2623 bdrv_activate_all(&local_err); 2624 if (local_err) { 2625 error_report_err(local_err); 2626 } 2627 } 2628 migration_call_notifiers(ms, MIG_EVENT_PRECOPY_FAILED, NULL); 2629 bql_unlock(); 2630 return -1; 2631 } 2632 2633 /** 2634 * migration_maybe_pause: Pause if required to by 2635 * migrate_pause_before_switchover called with the BQL locked 2636 * Returns: 0 on success 2637 */ 2638 static int migration_maybe_pause(MigrationState *s, 2639 int *current_active_state, 2640 int new_state) 2641 { 2642 if (!migrate_pause_before_switchover()) { 2643 return 0; 2644 } 2645 2646 /* Since leaving this state is not atomic with posting the semaphore 2647 * it's possible that someone could have issued multiple migrate_continue 2648 * and the semaphore is incorrectly positive at this point; 2649 * the docs say it's undefined to reinit a semaphore that's already 2650 * init'd, so use timedwait to eat up any existing posts. 2651 */ 2652 while (qemu_sem_timedwait(&s->pause_sem, 1) == 0) { 2653 /* This block intentionally left blank */ 2654 } 2655 2656 /* 2657 * If the migration is cancelled when it is in the completion phase, 2658 * the migration state is set to MIGRATION_STATUS_CANCELLING. 2659 * So we don't need to wait a semaphore, otherwise we would always 2660 * wait for the 'pause_sem' semaphore. 2661 */ 2662 if (s->state != MIGRATION_STATUS_CANCELLING) { 2663 bql_unlock(); 2664 migrate_set_state(&s->state, *current_active_state, 2665 MIGRATION_STATUS_PRE_SWITCHOVER); 2666 qemu_sem_wait(&s->pause_sem); 2667 migrate_set_state(&s->state, MIGRATION_STATUS_PRE_SWITCHOVER, 2668 new_state); 2669 *current_active_state = new_state; 2670 bql_lock(); 2671 } 2672 2673 return s->state == new_state ? 0 : -EINVAL; 2674 } 2675 2676 static int migration_completion_precopy(MigrationState *s, 2677 int *current_active_state) 2678 { 2679 int ret; 2680 2681 bql_lock(); 2682 2683 if (!migrate_mode_is_cpr(s)) { 2684 ret = migration_stop_vm(s, RUN_STATE_FINISH_MIGRATE); 2685 if (ret < 0) { 2686 goto out_unlock; 2687 } 2688 } 2689 2690 ret = migration_maybe_pause(s, current_active_state, 2691 MIGRATION_STATUS_DEVICE); 2692 if (ret < 0) { 2693 goto out_unlock; 2694 } 2695 2696 /* 2697 * Inactivate disks except in COLO, and track that we have done so in order 2698 * to remember to reactivate them if migration fails or is cancelled. 2699 */ 2700 s->block_inactive = !migrate_colo(); 2701 migration_rate_set(RATE_LIMIT_DISABLED); 2702 ret = qemu_savevm_state_complete_precopy(s->to_dst_file, false, 2703 s->block_inactive); 2704 out_unlock: 2705 bql_unlock(); 2706 return ret; 2707 } 2708 2709 static void migration_completion_postcopy(MigrationState *s) 2710 { 2711 trace_migration_completion_postcopy_end(); 2712 2713 bql_lock(); 2714 qemu_savevm_state_complete_postcopy(s->to_dst_file); 2715 bql_unlock(); 2716 2717 /* 2718 * Shutdown the postcopy fast path thread. This is only needed when dest 2719 * QEMU binary is old (7.1/7.2). QEMU 8.0+ doesn't need this. 2720 */ 2721 if (migrate_postcopy_preempt() && s->preempt_pre_7_2) { 2722 postcopy_preempt_shutdown_file(s); 2723 } 2724 2725 trace_migration_completion_postcopy_end_after_complete(); 2726 } 2727 2728 static void migration_completion_failed(MigrationState *s, 2729 int current_active_state) 2730 { 2731 if (s->block_inactive && (s->state == MIGRATION_STATUS_ACTIVE || 2732 s->state == MIGRATION_STATUS_DEVICE)) { 2733 /* 2734 * If not doing postcopy, vm_start() will be called: let's 2735 * regain control on images. 2736 */ 2737 Error *local_err = NULL; 2738 2739 bql_lock(); 2740 bdrv_activate_all(&local_err); 2741 if (local_err) { 2742 error_report_err(local_err); 2743 } else { 2744 s->block_inactive = false; 2745 } 2746 bql_unlock(); 2747 } 2748 2749 migrate_set_state(&s->state, current_active_state, 2750 MIGRATION_STATUS_FAILED); 2751 } 2752 2753 /** 2754 * migration_completion: Used by migration_thread when there's not much left. 2755 * The caller 'breaks' the loop when this returns. 2756 * 2757 * @s: Current migration state 2758 */ 2759 static void migration_completion(MigrationState *s) 2760 { 2761 int ret = 0; 2762 int current_active_state = s->state; 2763 2764 if (s->state == MIGRATION_STATUS_ACTIVE) { 2765 ret = migration_completion_precopy(s, ¤t_active_state); 2766 } else if (s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE) { 2767 migration_completion_postcopy(s); 2768 } else { 2769 ret = -1; 2770 } 2771 2772 if (ret < 0) { 2773 goto fail; 2774 } 2775 2776 if (close_return_path_on_source(s)) { 2777 goto fail; 2778 } 2779 2780 if (qemu_file_get_error(s->to_dst_file)) { 2781 trace_migration_completion_file_err(); 2782 goto fail; 2783 } 2784 2785 if (migrate_colo() && s->state == MIGRATION_STATUS_ACTIVE) { 2786 /* COLO does not support postcopy */ 2787 migrate_set_state(&s->state, MIGRATION_STATUS_ACTIVE, 2788 MIGRATION_STATUS_COLO); 2789 } else { 2790 migrate_set_state(&s->state, current_active_state, 2791 MIGRATION_STATUS_COMPLETED); 2792 } 2793 2794 return; 2795 2796 fail: 2797 migration_completion_failed(s, current_active_state); 2798 } 2799 2800 /** 2801 * bg_migration_completion: Used by bg_migration_thread when after all the 2802 * RAM has been saved. The caller 'breaks' the loop when this returns. 2803 * 2804 * @s: Current migration state 2805 */ 2806 static void bg_migration_completion(MigrationState *s) 2807 { 2808 int current_active_state = s->state; 2809 2810 if (s->state == MIGRATION_STATUS_ACTIVE) { 2811 /* 2812 * By this moment we have RAM content saved into the migration stream. 2813 * The next step is to flush the non-RAM content (device state) 2814 * right after the ram content. The device state has been stored into 2815 * the temporary buffer before RAM saving started. 2816 */ 2817 qemu_put_buffer(s->to_dst_file, s->bioc->data, s->bioc->usage); 2818 qemu_fflush(s->to_dst_file); 2819 } else if (s->state == MIGRATION_STATUS_CANCELLING) { 2820 goto fail; 2821 } 2822 2823 if (qemu_file_get_error(s->to_dst_file)) { 2824 trace_migration_completion_file_err(); 2825 goto fail; 2826 } 2827 2828 migrate_set_state(&s->state, current_active_state, 2829 MIGRATION_STATUS_COMPLETED); 2830 return; 2831 2832 fail: 2833 migrate_set_state(&s->state, current_active_state, 2834 MIGRATION_STATUS_FAILED); 2835 } 2836 2837 typedef enum MigThrError { 2838 /* No error detected */ 2839 MIG_THR_ERR_NONE = 0, 2840 /* Detected error, but resumed successfully */ 2841 MIG_THR_ERR_RECOVERED = 1, 2842 /* Detected fatal error, need to exit */ 2843 MIG_THR_ERR_FATAL = 2, 2844 } MigThrError; 2845 2846 static int postcopy_resume_handshake(MigrationState *s) 2847 { 2848 qemu_savevm_send_postcopy_resume(s->to_dst_file); 2849 2850 while (s->state == MIGRATION_STATUS_POSTCOPY_RECOVER) { 2851 if (migration_rp_wait(s)) { 2852 return -1; 2853 } 2854 } 2855 2856 if (s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE) { 2857 return 0; 2858 } 2859 2860 return -1; 2861 } 2862 2863 /* Return zero if success, or <0 for error */ 2864 static int postcopy_do_resume(MigrationState *s) 2865 { 2866 int ret; 2867 2868 /* 2869 * Call all the resume_prepare() hooks, so that modules can be 2870 * ready for the migration resume. 2871 */ 2872 ret = qemu_savevm_state_resume_prepare(s); 2873 if (ret) { 2874 error_report("%s: resume_prepare() failure detected: %d", 2875 __func__, ret); 2876 return ret; 2877 } 2878 2879 /* 2880 * If preempt is enabled, re-establish the preempt channel. Note that 2881 * we do it after resume prepare to make sure the main channel will be 2882 * created before the preempt channel. E.g. with weak network, the 2883 * dest QEMU may get messed up with the preempt and main channels on 2884 * the order of connection setup. This guarantees the correct order. 2885 */ 2886 ret = postcopy_preempt_establish_channel(s); 2887 if (ret) { 2888 error_report("%s: postcopy_preempt_establish_channel(): %d", 2889 __func__, ret); 2890 return ret; 2891 } 2892 2893 /* 2894 * Last handshake with destination on the resume (destination will 2895 * switch to postcopy-active afterwards) 2896 */ 2897 ret = postcopy_resume_handshake(s); 2898 if (ret) { 2899 error_report("%s: handshake failed: %d", __func__, ret); 2900 return ret; 2901 } 2902 2903 return 0; 2904 } 2905 2906 /* 2907 * We don't return until we are in a safe state to continue current 2908 * postcopy migration. Returns MIG_THR_ERR_RECOVERED if recovered, or 2909 * MIG_THR_ERR_FATAL if unrecovery failure happened. 2910 */ 2911 static MigThrError postcopy_pause(MigrationState *s) 2912 { 2913 assert(s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE); 2914 2915 while (true) { 2916 QEMUFile *file; 2917 2918 /* 2919 * Current channel is possibly broken. Release it. Note that this is 2920 * guaranteed even without lock because to_dst_file should only be 2921 * modified by the migration thread. That also guarantees that the 2922 * unregister of yank is safe too without the lock. It should be safe 2923 * even to be within the qemu_file_lock, but we didn't do that to avoid 2924 * taking more mutex (yank_lock) within qemu_file_lock. TL;DR: we make 2925 * the qemu_file_lock critical section as small as possible. 2926 */ 2927 assert(s->to_dst_file); 2928 migration_ioc_unregister_yank_from_file(s->to_dst_file); 2929 qemu_mutex_lock(&s->qemu_file_lock); 2930 file = s->to_dst_file; 2931 s->to_dst_file = NULL; 2932 qemu_mutex_unlock(&s->qemu_file_lock); 2933 2934 qemu_file_shutdown(file); 2935 qemu_fclose(file); 2936 2937 /* 2938 * We're already pausing, so ignore any errors on the return 2939 * path and just wait for the thread to finish. It will be 2940 * re-created when we resume. 2941 */ 2942 close_return_path_on_source(s); 2943 2944 migrate_set_state(&s->state, s->state, 2945 MIGRATION_STATUS_POSTCOPY_PAUSED); 2946 2947 error_report("Detected IO failure for postcopy. " 2948 "Migration paused."); 2949 2950 /* 2951 * We wait until things fixed up. Then someone will setup the 2952 * status back for us. 2953 */ 2954 while (s->state == MIGRATION_STATUS_POSTCOPY_PAUSED) { 2955 qemu_sem_wait(&s->postcopy_pause_sem); 2956 } 2957 2958 if (s->state == MIGRATION_STATUS_POSTCOPY_RECOVER) { 2959 /* Woken up by a recover procedure. Give it a shot */ 2960 2961 /* Do the resume logic */ 2962 if (postcopy_do_resume(s) == 0) { 2963 /* Let's continue! */ 2964 trace_postcopy_pause_continued(); 2965 return MIG_THR_ERR_RECOVERED; 2966 } else { 2967 /* 2968 * Something wrong happened during the recovery, let's 2969 * pause again. Pause is always better than throwing 2970 * data away. 2971 */ 2972 continue; 2973 } 2974 } else { 2975 /* This is not right... Time to quit. */ 2976 return MIG_THR_ERR_FATAL; 2977 } 2978 } 2979 } 2980 2981 static MigThrError migration_detect_error(MigrationState *s) 2982 { 2983 int ret; 2984 int state = s->state; 2985 Error *local_error = NULL; 2986 2987 if (state == MIGRATION_STATUS_CANCELLING || 2988 state == MIGRATION_STATUS_CANCELLED) { 2989 /* End the migration, but don't set the state to failed */ 2990 return MIG_THR_ERR_FATAL; 2991 } 2992 2993 /* 2994 * Try to detect any file errors. Note that postcopy_qemufile_src will 2995 * be NULL when postcopy preempt is not enabled. 2996 */ 2997 ret = qemu_file_get_error_obj_any(s->to_dst_file, 2998 s->postcopy_qemufile_src, 2999 &local_error); 3000 if (!ret) { 3001 /* Everything is fine */ 3002 assert(!local_error); 3003 return MIG_THR_ERR_NONE; 3004 } 3005 3006 if (local_error) { 3007 migrate_set_error(s, local_error); 3008 error_free(local_error); 3009 } 3010 3011 if (state == MIGRATION_STATUS_POSTCOPY_ACTIVE && ret) { 3012 /* 3013 * For postcopy, we allow the network to be down for a 3014 * while. After that, it can be continued by a 3015 * recovery phase. 3016 */ 3017 return postcopy_pause(s); 3018 } else { 3019 /* 3020 * For precopy (or postcopy with error outside IO), we fail 3021 * with no time. 3022 */ 3023 migrate_set_state(&s->state, state, MIGRATION_STATUS_FAILED); 3024 trace_migration_thread_file_err(); 3025 3026 /* Time to stop the migration, now. */ 3027 return MIG_THR_ERR_FATAL; 3028 } 3029 } 3030 3031 static void migration_calculate_complete(MigrationState *s) 3032 { 3033 uint64_t bytes = migration_transferred_bytes(); 3034 int64_t end_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); 3035 int64_t transfer_time; 3036 3037 migration_downtime_end(s); 3038 s->total_time = end_time - s->start_time; 3039 transfer_time = s->total_time - s->setup_time; 3040 if (transfer_time) { 3041 s->mbps = ((double) bytes * 8.0) / transfer_time / 1000; 3042 } 3043 } 3044 3045 static void update_iteration_initial_status(MigrationState *s) 3046 { 3047 /* 3048 * Update these three fields at the same time to avoid mismatch info lead 3049 * wrong speed calculation. 3050 */ 3051 s->iteration_start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); 3052 s->iteration_initial_bytes = migration_transferred_bytes(); 3053 s->iteration_initial_pages = ram_get_total_transferred_pages(); 3054 } 3055 3056 static void migration_update_counters(MigrationState *s, 3057 int64_t current_time) 3058 { 3059 uint64_t transferred, transferred_pages, time_spent; 3060 uint64_t current_bytes; /* bytes transferred since the beginning */ 3061 uint64_t switchover_bw; 3062 /* Expected bandwidth when switching over to destination QEMU */ 3063 double expected_bw_per_ms; 3064 double bandwidth; 3065 3066 if (current_time < s->iteration_start_time + BUFFER_DELAY) { 3067 return; 3068 } 3069 3070 switchover_bw = migrate_avail_switchover_bandwidth(); 3071 current_bytes = migration_transferred_bytes(); 3072 transferred = current_bytes - s->iteration_initial_bytes; 3073 time_spent = current_time - s->iteration_start_time; 3074 bandwidth = (double)transferred / time_spent; 3075 3076 if (switchover_bw) { 3077 /* 3078 * If the user specified a switchover bandwidth, let's trust the 3079 * user so that can be more accurate than what we estimated. 3080 */ 3081 expected_bw_per_ms = switchover_bw / 1000; 3082 } else { 3083 /* If the user doesn't specify bandwidth, we use the estimated */ 3084 expected_bw_per_ms = bandwidth; 3085 } 3086 3087 s->threshold_size = expected_bw_per_ms * migrate_downtime_limit(); 3088 3089 s->mbps = (((double) transferred * 8.0) / 3090 ((double) time_spent / 1000.0)) / 1000.0 / 1000.0; 3091 3092 transferred_pages = ram_get_total_transferred_pages() - 3093 s->iteration_initial_pages; 3094 s->pages_per_second = (double) transferred_pages / 3095 (((double) time_spent / 1000.0)); 3096 3097 /* 3098 * if we haven't sent anything, we don't want to 3099 * recalculate. 10000 is a small enough number for our purposes 3100 */ 3101 if (stat64_get(&mig_stats.dirty_pages_rate) && 3102 transferred > 10000) { 3103 s->expected_downtime = 3104 stat64_get(&mig_stats.dirty_bytes_last_sync) / expected_bw_per_ms; 3105 } 3106 3107 migration_rate_reset(); 3108 3109 update_iteration_initial_status(s); 3110 3111 trace_migrate_transferred(transferred, time_spent, 3112 /* Both in unit bytes/ms */ 3113 bandwidth, switchover_bw / 1000, 3114 s->threshold_size); 3115 } 3116 3117 static bool migration_can_switchover(MigrationState *s) 3118 { 3119 if (!migrate_switchover_ack()) { 3120 return true; 3121 } 3122 3123 /* No reason to wait for switchover ACK if VM is stopped */ 3124 if (!runstate_is_running()) { 3125 return true; 3126 } 3127 3128 return s->switchover_acked; 3129 } 3130 3131 /* Migration thread iteration status */ 3132 typedef enum { 3133 MIG_ITERATE_RESUME, /* Resume current iteration */ 3134 MIG_ITERATE_SKIP, /* Skip current iteration */ 3135 MIG_ITERATE_BREAK, /* Break the loop */ 3136 } MigIterateState; 3137 3138 /* 3139 * Return true if continue to the next iteration directly, false 3140 * otherwise. 3141 */ 3142 static MigIterateState migration_iteration_run(MigrationState *s) 3143 { 3144 uint64_t must_precopy, can_postcopy; 3145 Error *local_err = NULL; 3146 bool in_postcopy = s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE; 3147 bool can_switchover = migration_can_switchover(s); 3148 3149 qemu_savevm_state_pending_estimate(&must_precopy, &can_postcopy); 3150 uint64_t pending_size = must_precopy + can_postcopy; 3151 3152 trace_migrate_pending_estimate(pending_size, must_precopy, can_postcopy); 3153 3154 if (must_precopy <= s->threshold_size) { 3155 qemu_savevm_state_pending_exact(&must_precopy, &can_postcopy); 3156 pending_size = must_precopy + can_postcopy; 3157 trace_migrate_pending_exact(pending_size, must_precopy, can_postcopy); 3158 } 3159 3160 if ((!pending_size || pending_size < s->threshold_size) && can_switchover) { 3161 trace_migration_thread_low_pending(pending_size); 3162 migration_completion(s); 3163 return MIG_ITERATE_BREAK; 3164 } 3165 3166 /* Still a significant amount to transfer */ 3167 if (!in_postcopy && must_precopy <= s->threshold_size && can_switchover && 3168 qatomic_read(&s->start_postcopy)) { 3169 if (postcopy_start(s, &local_err)) { 3170 migrate_set_error(s, local_err); 3171 error_report_err(local_err); 3172 } 3173 return MIG_ITERATE_SKIP; 3174 } 3175 3176 /* Just another iteration step */ 3177 qemu_savevm_state_iterate(s->to_dst_file, in_postcopy); 3178 return MIG_ITERATE_RESUME; 3179 } 3180 3181 static void migration_iteration_finish(MigrationState *s) 3182 { 3183 /* If we enabled cpu throttling for auto-converge, turn it off. */ 3184 cpu_throttle_stop(); 3185 3186 bql_lock(); 3187 switch (s->state) { 3188 case MIGRATION_STATUS_COMPLETED: 3189 migration_calculate_complete(s); 3190 runstate_set(RUN_STATE_POSTMIGRATE); 3191 break; 3192 case MIGRATION_STATUS_COLO: 3193 assert(migrate_colo()); 3194 migrate_start_colo_process(s); 3195 s->vm_old_state = RUN_STATE_RUNNING; 3196 /* Fallthrough */ 3197 case MIGRATION_STATUS_FAILED: 3198 case MIGRATION_STATUS_CANCELLED: 3199 case MIGRATION_STATUS_CANCELLING: 3200 if (runstate_is_live(s->vm_old_state)) { 3201 if (!runstate_check(RUN_STATE_SHUTDOWN)) { 3202 vm_start(); 3203 } 3204 } else { 3205 if (runstate_check(RUN_STATE_FINISH_MIGRATE)) { 3206 runstate_set(s->vm_old_state); 3207 } 3208 } 3209 break; 3210 3211 default: 3212 /* Should not reach here, but if so, forgive the VM. */ 3213 error_report("%s: Unknown ending state %d", __func__, s->state); 3214 break; 3215 } 3216 3217 migration_bh_schedule(migrate_fd_cleanup_bh, s); 3218 bql_unlock(); 3219 } 3220 3221 static void bg_migration_iteration_finish(MigrationState *s) 3222 { 3223 /* 3224 * Stop tracking RAM writes - un-protect memory, un-register UFFD 3225 * memory ranges, flush kernel wait queues and wake up threads 3226 * waiting for write fault to be resolved. 3227 */ 3228 ram_write_tracking_stop(); 3229 3230 bql_lock(); 3231 switch (s->state) { 3232 case MIGRATION_STATUS_COMPLETED: 3233 migration_calculate_complete(s); 3234 break; 3235 3236 case MIGRATION_STATUS_ACTIVE: 3237 case MIGRATION_STATUS_FAILED: 3238 case MIGRATION_STATUS_CANCELLED: 3239 case MIGRATION_STATUS_CANCELLING: 3240 break; 3241 3242 default: 3243 /* Should not reach here, but if so, forgive the VM. */ 3244 error_report("%s: Unknown ending state %d", __func__, s->state); 3245 break; 3246 } 3247 3248 migration_bh_schedule(migrate_fd_cleanup_bh, s); 3249 bql_unlock(); 3250 } 3251 3252 /* 3253 * Return true if continue to the next iteration directly, false 3254 * otherwise. 3255 */ 3256 static MigIterateState bg_migration_iteration_run(MigrationState *s) 3257 { 3258 int res; 3259 3260 res = qemu_savevm_state_iterate(s->to_dst_file, false); 3261 if (res > 0) { 3262 bg_migration_completion(s); 3263 return MIG_ITERATE_BREAK; 3264 } 3265 3266 return MIG_ITERATE_RESUME; 3267 } 3268 3269 void migration_make_urgent_request(void) 3270 { 3271 qemu_sem_post(&migrate_get_current()->rate_limit_sem); 3272 } 3273 3274 void migration_consume_urgent_request(void) 3275 { 3276 qemu_sem_wait(&migrate_get_current()->rate_limit_sem); 3277 } 3278 3279 /* Returns true if the rate limiting was broken by an urgent request */ 3280 bool migration_rate_limit(void) 3281 { 3282 int64_t now = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); 3283 MigrationState *s = migrate_get_current(); 3284 3285 bool urgent = false; 3286 migration_update_counters(s, now); 3287 if (migration_rate_exceeded(s->to_dst_file)) { 3288 3289 if (qemu_file_get_error(s->to_dst_file)) { 3290 return false; 3291 } 3292 /* 3293 * Wait for a delay to do rate limiting OR 3294 * something urgent to post the semaphore. 3295 */ 3296 int ms = s->iteration_start_time + BUFFER_DELAY - now; 3297 trace_migration_rate_limit_pre(ms); 3298 if (qemu_sem_timedwait(&s->rate_limit_sem, ms) == 0) { 3299 /* 3300 * We were woken by one or more urgent things but 3301 * the timedwait will have consumed one of them. 3302 * The service routine for the urgent wake will dec 3303 * the semaphore itself for each item it consumes, 3304 * so add this one we just eat back. 3305 */ 3306 qemu_sem_post(&s->rate_limit_sem); 3307 urgent = true; 3308 } 3309 trace_migration_rate_limit_post(urgent); 3310 } 3311 return urgent; 3312 } 3313 3314 /* 3315 * if failover devices are present, wait they are completely 3316 * unplugged 3317 */ 3318 3319 static void qemu_savevm_wait_unplug(MigrationState *s, int old_state, 3320 int new_state) 3321 { 3322 if (qemu_savevm_state_guest_unplug_pending()) { 3323 migrate_set_state(&s->state, old_state, MIGRATION_STATUS_WAIT_UNPLUG); 3324 3325 while (s->state == MIGRATION_STATUS_WAIT_UNPLUG && 3326 qemu_savevm_state_guest_unplug_pending()) { 3327 qemu_sem_timedwait(&s->wait_unplug_sem, 250); 3328 } 3329 if (s->state != MIGRATION_STATUS_WAIT_UNPLUG) { 3330 int timeout = 120; /* 30 seconds */ 3331 /* 3332 * migration has been canceled 3333 * but as we have started an unplug we must wait the end 3334 * to be able to plug back the card 3335 */ 3336 while (timeout-- && qemu_savevm_state_guest_unplug_pending()) { 3337 qemu_sem_timedwait(&s->wait_unplug_sem, 250); 3338 } 3339 if (qemu_savevm_state_guest_unplug_pending() && 3340 !qtest_enabled()) { 3341 warn_report("migration: partially unplugged device on " 3342 "failure"); 3343 } 3344 } 3345 3346 migrate_set_state(&s->state, MIGRATION_STATUS_WAIT_UNPLUG, new_state); 3347 } else { 3348 migrate_set_state(&s->state, old_state, new_state); 3349 } 3350 } 3351 3352 /* 3353 * Master migration thread on the source VM. 3354 * It drives the migration and pumps the data down the outgoing channel. 3355 */ 3356 static void *migration_thread(void *opaque) 3357 { 3358 MigrationState *s = opaque; 3359 MigrationThread *thread = NULL; 3360 int64_t setup_start = qemu_clock_get_ms(QEMU_CLOCK_HOST); 3361 MigThrError thr_error; 3362 bool urgent = false; 3363 3364 thread = migration_threads_add("live_migration", qemu_get_thread_id()); 3365 3366 rcu_register_thread(); 3367 3368 object_ref(OBJECT(s)); 3369 update_iteration_initial_status(s); 3370 3371 if (!multifd_send_setup()) { 3372 goto out; 3373 } 3374 3375 bql_lock(); 3376 qemu_savevm_state_header(s->to_dst_file); 3377 bql_unlock(); 3378 3379 /* 3380 * If we opened the return path, we need to make sure dst has it 3381 * opened as well. 3382 */ 3383 if (s->rp_state.rp_thread_created) { 3384 /* Now tell the dest that it should open its end so it can reply */ 3385 qemu_savevm_send_open_return_path(s->to_dst_file); 3386 3387 /* And do a ping that will make stuff easier to debug */ 3388 qemu_savevm_send_ping(s->to_dst_file, 1); 3389 } 3390 3391 if (migrate_postcopy()) { 3392 /* 3393 * Tell the destination that we *might* want to do postcopy later; 3394 * if the other end can't do postcopy it should fail now, nice and 3395 * early. 3396 */ 3397 qemu_savevm_send_postcopy_advise(s->to_dst_file); 3398 } 3399 3400 if (migrate_colo()) { 3401 /* Notify migration destination that we enable COLO */ 3402 qemu_savevm_send_colo_enable(s->to_dst_file); 3403 } 3404 3405 bql_lock(); 3406 qemu_savevm_state_setup(s->to_dst_file); 3407 bql_unlock(); 3408 3409 qemu_savevm_wait_unplug(s, MIGRATION_STATUS_SETUP, 3410 MIGRATION_STATUS_ACTIVE); 3411 3412 s->setup_time = qemu_clock_get_ms(QEMU_CLOCK_HOST) - setup_start; 3413 3414 trace_migration_thread_setup_complete(); 3415 3416 while (migration_is_active(s)) { 3417 if (urgent || !migration_rate_exceeded(s->to_dst_file)) { 3418 MigIterateState iter_state = migration_iteration_run(s); 3419 if (iter_state == MIG_ITERATE_SKIP) { 3420 continue; 3421 } else if (iter_state == MIG_ITERATE_BREAK) { 3422 break; 3423 } 3424 } 3425 3426 /* 3427 * Try to detect any kind of failures, and see whether we 3428 * should stop the migration now. 3429 */ 3430 thr_error = migration_detect_error(s); 3431 if (thr_error == MIG_THR_ERR_FATAL) { 3432 /* Stop migration */ 3433 break; 3434 } else if (thr_error == MIG_THR_ERR_RECOVERED) { 3435 /* 3436 * Just recovered from a e.g. network failure, reset all 3437 * the local variables. This is important to avoid 3438 * breaking transferred_bytes and bandwidth calculation 3439 */ 3440 update_iteration_initial_status(s); 3441 } 3442 3443 urgent = migration_rate_limit(); 3444 } 3445 3446 out: 3447 trace_migration_thread_after_loop(); 3448 migration_iteration_finish(s); 3449 object_unref(OBJECT(s)); 3450 rcu_unregister_thread(); 3451 migration_threads_remove(thread); 3452 return NULL; 3453 } 3454 3455 static void bg_migration_vm_start_bh(void *opaque) 3456 { 3457 MigrationState *s = opaque; 3458 3459 vm_resume(s->vm_old_state); 3460 migration_downtime_end(s); 3461 } 3462 3463 /** 3464 * Background snapshot thread, based on live migration code. 3465 * This is an alternative implementation of live migration mechanism 3466 * introduced specifically to support background snapshots. 3467 * 3468 * It takes advantage of userfault_fd write protection mechanism introduced 3469 * in v5.7 kernel. Compared to existing dirty page logging migration much 3470 * lesser stream traffic is produced resulting in smaller snapshot images, 3471 * simply cause of no page duplicates can get into the stream. 3472 * 3473 * Another key point is that generated vmstate stream reflects machine state 3474 * 'frozen' at the beginning of snapshot creation compared to dirty page logging 3475 * mechanism, which effectively results in that saved snapshot is the state of VM 3476 * at the end of the process. 3477 */ 3478 static void *bg_migration_thread(void *opaque) 3479 { 3480 MigrationState *s = opaque; 3481 int64_t setup_start; 3482 MigThrError thr_error; 3483 QEMUFile *fb; 3484 bool early_fail = true; 3485 3486 rcu_register_thread(); 3487 object_ref(OBJECT(s)); 3488 3489 migration_rate_set(RATE_LIMIT_DISABLED); 3490 3491 setup_start = qemu_clock_get_ms(QEMU_CLOCK_HOST); 3492 /* 3493 * We want to save vmstate for the moment when migration has been 3494 * initiated but also we want to save RAM content while VM is running. 3495 * The RAM content should appear first in the vmstate. So, we first 3496 * stash the non-RAM part of the vmstate to the temporary buffer, 3497 * then write RAM part of the vmstate to the migration stream 3498 * with vCPUs running and, finally, write stashed non-RAM part of 3499 * the vmstate from the buffer to the migration stream. 3500 */ 3501 s->bioc = qio_channel_buffer_new(512 * 1024); 3502 qio_channel_set_name(QIO_CHANNEL(s->bioc), "vmstate-buffer"); 3503 fb = qemu_file_new_output(QIO_CHANNEL(s->bioc)); 3504 object_unref(OBJECT(s->bioc)); 3505 3506 update_iteration_initial_status(s); 3507 3508 /* 3509 * Prepare for tracking memory writes with UFFD-WP - populate 3510 * RAM pages before protecting. 3511 */ 3512 #ifdef __linux__ 3513 ram_write_tracking_prepare(); 3514 #endif 3515 3516 bql_lock(); 3517 qemu_savevm_state_header(s->to_dst_file); 3518 qemu_savevm_state_setup(s->to_dst_file); 3519 bql_unlock(); 3520 3521 qemu_savevm_wait_unplug(s, MIGRATION_STATUS_SETUP, 3522 MIGRATION_STATUS_ACTIVE); 3523 3524 s->setup_time = qemu_clock_get_ms(QEMU_CLOCK_HOST) - setup_start; 3525 3526 trace_migration_thread_setup_complete(); 3527 3528 bql_lock(); 3529 3530 if (migration_stop_vm(s, RUN_STATE_PAUSED)) { 3531 goto fail; 3532 } 3533 /* 3534 * Put vCPUs in sync with shadow context structures, then 3535 * save their state to channel-buffer along with devices. 3536 */ 3537 cpu_synchronize_all_states(); 3538 if (qemu_savevm_state_complete_precopy_non_iterable(fb, false, false)) { 3539 goto fail; 3540 } 3541 /* 3542 * Since we are going to get non-iterable state data directly 3543 * from s->bioc->data, explicit flush is needed here. 3544 */ 3545 qemu_fflush(fb); 3546 3547 /* Now initialize UFFD context and start tracking RAM writes */ 3548 if (ram_write_tracking_start()) { 3549 goto fail; 3550 } 3551 early_fail = false; 3552 3553 /* 3554 * Start VM from BH handler to avoid write-fault lock here. 3555 * UFFD-WP protection for the whole RAM is already enabled so 3556 * calling VM state change notifiers from vm_start() would initiate 3557 * writes to virtio VQs memory which is in write-protected region. 3558 */ 3559 migration_bh_schedule(bg_migration_vm_start_bh, s); 3560 bql_unlock(); 3561 3562 while (migration_is_active(s)) { 3563 MigIterateState iter_state = bg_migration_iteration_run(s); 3564 if (iter_state == MIG_ITERATE_SKIP) { 3565 continue; 3566 } else if (iter_state == MIG_ITERATE_BREAK) { 3567 break; 3568 } 3569 3570 /* 3571 * Try to detect any kind of failures, and see whether we 3572 * should stop the migration now. 3573 */ 3574 thr_error = migration_detect_error(s); 3575 if (thr_error == MIG_THR_ERR_FATAL) { 3576 /* Stop migration */ 3577 break; 3578 } 3579 3580 migration_update_counters(s, qemu_clock_get_ms(QEMU_CLOCK_REALTIME)); 3581 } 3582 3583 trace_migration_thread_after_loop(); 3584 3585 fail: 3586 if (early_fail) { 3587 migrate_set_state(&s->state, MIGRATION_STATUS_ACTIVE, 3588 MIGRATION_STATUS_FAILED); 3589 bql_unlock(); 3590 } 3591 3592 bg_migration_iteration_finish(s); 3593 3594 qemu_fclose(fb); 3595 object_unref(OBJECT(s)); 3596 rcu_unregister_thread(); 3597 3598 return NULL; 3599 } 3600 3601 void migrate_fd_connect(MigrationState *s, Error *error_in) 3602 { 3603 Error *local_err = NULL; 3604 uint64_t rate_limit; 3605 bool resume = s->state == MIGRATION_STATUS_POSTCOPY_PAUSED; 3606 int ret; 3607 3608 /* 3609 * If there's a previous error, free it and prepare for another one. 3610 * Meanwhile if migration completes successfully, there won't have an error 3611 * dumped when calling migrate_fd_cleanup(). 3612 */ 3613 migrate_error_free(s); 3614 3615 s->expected_downtime = migrate_downtime_limit(); 3616 if (error_in) { 3617 migrate_fd_error(s, error_in); 3618 if (resume) { 3619 /* 3620 * Don't do cleanup for resume if channel is invalid, but only dump 3621 * the error. We wait for another channel connect from the user. 3622 * The error_report still gives HMP user a hint on what failed. 3623 * It's normally done in migrate_fd_cleanup(), but call it here 3624 * explicitly. 3625 */ 3626 error_report_err(error_copy(s->error)); 3627 } else { 3628 migrate_fd_cleanup(s); 3629 } 3630 return; 3631 } 3632 3633 if (resume) { 3634 /* This is a resumed migration */ 3635 rate_limit = migrate_max_postcopy_bandwidth(); 3636 } else { 3637 /* This is a fresh new migration */ 3638 rate_limit = migrate_max_bandwidth(); 3639 3640 /* Notify before starting migration thread */ 3641 if (migration_call_notifiers(s, MIG_EVENT_PRECOPY_SETUP, &local_err)) { 3642 goto fail; 3643 } 3644 } 3645 3646 migration_rate_set(rate_limit); 3647 qemu_file_set_blocking(s->to_dst_file, true); 3648 3649 /* 3650 * Open the return path. For postcopy, it is used exclusively. For 3651 * precopy, only if user specified "return-path" capability would 3652 * QEMU uses the return path. 3653 */ 3654 if (migrate_postcopy_ram() || migrate_return_path()) { 3655 if (open_return_path_on_source(s)) { 3656 error_setg(&local_err, "Unable to open return-path for postcopy"); 3657 goto fail; 3658 } 3659 } 3660 3661 /* 3662 * This needs to be done before resuming a postcopy. Note: for newer 3663 * QEMUs we will delay the channel creation until postcopy_start(), to 3664 * avoid disorder of channel creations. 3665 */ 3666 if (migrate_postcopy_preempt() && s->preempt_pre_7_2) { 3667 postcopy_preempt_setup(s); 3668 } 3669 3670 if (resume) { 3671 /* Wakeup the main migration thread to do the recovery */ 3672 migrate_set_state(&s->state, MIGRATION_STATUS_POSTCOPY_PAUSED, 3673 MIGRATION_STATUS_POSTCOPY_RECOVER); 3674 qemu_sem_post(&s->postcopy_pause_sem); 3675 return; 3676 } 3677 3678 if (migrate_mode_is_cpr(s)) { 3679 ret = migration_stop_vm(s, RUN_STATE_FINISH_MIGRATE); 3680 if (ret < 0) { 3681 error_setg(&local_err, "migration_stop_vm failed, error %d", -ret); 3682 goto fail; 3683 } 3684 } 3685 3686 if (migrate_background_snapshot()) { 3687 qemu_thread_create(&s->thread, "bg_snapshot", 3688 bg_migration_thread, s, QEMU_THREAD_JOINABLE); 3689 } else { 3690 qemu_thread_create(&s->thread, "live_migration", 3691 migration_thread, s, QEMU_THREAD_JOINABLE); 3692 } 3693 s->migration_thread_running = true; 3694 return; 3695 3696 fail: 3697 migrate_set_error(s, local_err); 3698 migrate_set_state(&s->state, s->state, MIGRATION_STATUS_FAILED); 3699 error_report_err(local_err); 3700 migrate_fd_cleanup(s); 3701 } 3702 3703 static void migration_class_init(ObjectClass *klass, void *data) 3704 { 3705 DeviceClass *dc = DEVICE_CLASS(klass); 3706 3707 dc->user_creatable = false; 3708 device_class_set_props(dc, migration_properties); 3709 } 3710 3711 static void migration_instance_finalize(Object *obj) 3712 { 3713 MigrationState *ms = MIGRATION_OBJ(obj); 3714 3715 qemu_mutex_destroy(&ms->error_mutex); 3716 qemu_mutex_destroy(&ms->qemu_file_lock); 3717 qemu_sem_destroy(&ms->wait_unplug_sem); 3718 qemu_sem_destroy(&ms->rate_limit_sem); 3719 qemu_sem_destroy(&ms->pause_sem); 3720 qemu_sem_destroy(&ms->postcopy_pause_sem); 3721 qemu_sem_destroy(&ms->rp_state.rp_sem); 3722 qemu_sem_destroy(&ms->rp_state.rp_pong_acks); 3723 qemu_sem_destroy(&ms->postcopy_qemufile_src_sem); 3724 error_free(ms->error); 3725 } 3726 3727 static void migration_instance_init(Object *obj) 3728 { 3729 MigrationState *ms = MIGRATION_OBJ(obj); 3730 3731 ms->state = MIGRATION_STATUS_NONE; 3732 ms->mbps = -1; 3733 ms->pages_per_second = -1; 3734 qemu_sem_init(&ms->pause_sem, 0); 3735 qemu_mutex_init(&ms->error_mutex); 3736 3737 migrate_params_init(&ms->parameters); 3738 3739 qemu_sem_init(&ms->postcopy_pause_sem, 0); 3740 qemu_sem_init(&ms->rp_state.rp_sem, 0); 3741 qemu_sem_init(&ms->rp_state.rp_pong_acks, 0); 3742 qemu_sem_init(&ms->rate_limit_sem, 0); 3743 qemu_sem_init(&ms->wait_unplug_sem, 0); 3744 qemu_sem_init(&ms->postcopy_qemufile_src_sem, 0); 3745 qemu_mutex_init(&ms->qemu_file_lock); 3746 } 3747 3748 /* 3749 * Return true if check pass, false otherwise. Error will be put 3750 * inside errp if provided. 3751 */ 3752 static bool migration_object_check(MigrationState *ms, Error **errp) 3753 { 3754 /* Assuming all off */ 3755 bool old_caps[MIGRATION_CAPABILITY__MAX] = { 0 }; 3756 3757 if (!migrate_params_check(&ms->parameters, errp)) { 3758 return false; 3759 } 3760 3761 return migrate_caps_check(old_caps, ms->capabilities, errp); 3762 } 3763 3764 static const TypeInfo migration_type = { 3765 .name = TYPE_MIGRATION, 3766 /* 3767 * NOTE: TYPE_MIGRATION is not really a device, as the object is 3768 * not created using qdev_new(), it is not attached to the qdev 3769 * device tree, and it is never realized. 3770 * 3771 * TODO: Make this TYPE_OBJECT once QOM provides something like 3772 * TYPE_DEVICE's "-global" properties. 3773 */ 3774 .parent = TYPE_DEVICE, 3775 .class_init = migration_class_init, 3776 .class_size = sizeof(MigrationClass), 3777 .instance_size = sizeof(MigrationState), 3778 .instance_init = migration_instance_init, 3779 .instance_finalize = migration_instance_finalize, 3780 }; 3781 3782 static void register_migration_types(void) 3783 { 3784 type_register_static(&migration_type); 3785 } 3786 3787 type_init(register_migration_types); 3788