1 /* 2 * QEMU live migration 3 * 4 * Copyright IBM, Corp. 2008 5 * 6 * Authors: 7 * Anthony Liguori <aliguori@us.ibm.com> 8 * 9 * This work is licensed under the terms of the GNU GPL, version 2. See 10 * the COPYING file in the top-level directory. 11 * 12 * Contributions after 2012-01-13 are licensed under the terms of the 13 * GNU GPL, version 2 or (at your option) any later version. 14 */ 15 16 #include "qemu/osdep.h" 17 #include "qemu/cutils.h" 18 #include "qemu/error-report.h" 19 #include "qemu/main-loop.h" 20 #include "migration/blocker.h" 21 #include "exec.h" 22 #include "fd.h" 23 #include "file.h" 24 #include "socket.h" 25 #include "sysemu/runstate.h" 26 #include "sysemu/sysemu.h" 27 #include "sysemu/cpu-throttle.h" 28 #include "rdma.h" 29 #include "ram.h" 30 #include "ram-compress.h" 31 #include "migration/global_state.h" 32 #include "migration/misc.h" 33 #include "migration.h" 34 #include "migration-stats.h" 35 #include "savevm.h" 36 #include "qemu-file.h" 37 #include "channel.h" 38 #include "migration/vmstate.h" 39 #include "block/block.h" 40 #include "qapi/error.h" 41 #include "qapi/clone-visitor.h" 42 #include "qapi/qapi-visit-migration.h" 43 #include "qapi/qapi-visit-sockets.h" 44 #include "qapi/qapi-commands-migration.h" 45 #include "qapi/qapi-events-migration.h" 46 #include "qapi/qmp/qerror.h" 47 #include "qapi/qmp/qnull.h" 48 #include "qemu/rcu.h" 49 #include "block.h" 50 #include "postcopy-ram.h" 51 #include "qemu/thread.h" 52 #include "trace.h" 53 #include "exec/target_page.h" 54 #include "io/channel-buffer.h" 55 #include "io/channel-tls.h" 56 #include "migration/colo.h" 57 #include "hw/boards.h" 58 #include "monitor/monitor.h" 59 #include "net/announce.h" 60 #include "qemu/queue.h" 61 #include "multifd.h" 62 #include "threadinfo.h" 63 #include "qemu/yank.h" 64 #include "sysemu/cpus.h" 65 #include "yank_functions.h" 66 #include "sysemu/qtest.h" 67 #include "options.h" 68 #include "sysemu/dirtylimit.h" 69 #include "qemu/sockets.h" 70 #include "sysemu/kvm.h" 71 72 #define NOTIFIER_ELEM_INIT(array, elem) \ 73 [elem] = NOTIFIER_WITH_RETURN_LIST_INITIALIZER((array)[elem]) 74 75 static NotifierWithReturnList migration_state_notifiers[] = { 76 NOTIFIER_ELEM_INIT(migration_state_notifiers, MIG_MODE_NORMAL), 77 NOTIFIER_ELEM_INIT(migration_state_notifiers, MIG_MODE_CPR_REBOOT), 78 }; 79 80 /* Messages sent on the return path from destination to source */ 81 enum mig_rp_message_type { 82 MIG_RP_MSG_INVALID = 0, /* Must be 0 */ 83 MIG_RP_MSG_SHUT, /* sibling will not send any more RP messages */ 84 MIG_RP_MSG_PONG, /* Response to a PING; data (seq: be32 ) */ 85 86 MIG_RP_MSG_REQ_PAGES_ID, /* data (start: be64, len: be32, id: string) */ 87 MIG_RP_MSG_REQ_PAGES, /* data (start: be64, len: be32) */ 88 MIG_RP_MSG_RECV_BITMAP, /* send recved_bitmap back to source */ 89 MIG_RP_MSG_RESUME_ACK, /* tell source that we are ready to resume */ 90 MIG_RP_MSG_SWITCHOVER_ACK, /* Tell source it's OK to do switchover */ 91 92 MIG_RP_MSG_MAX 93 }; 94 95 /* When we add fault tolerance, we could have several 96 migrations at once. For now we don't need to add 97 dynamic creation of migration */ 98 99 static MigrationState *current_migration; 100 static MigrationIncomingState *current_incoming; 101 102 static GSList *migration_blockers[MIG_MODE__MAX]; 103 104 static bool migration_object_check(MigrationState *ms, Error **errp); 105 static int migration_maybe_pause(MigrationState *s, 106 int *current_active_state, 107 int new_state); 108 static void migrate_fd_cancel(MigrationState *s); 109 static bool close_return_path_on_source(MigrationState *s); 110 static void migration_completion_end(MigrationState *s); 111 112 static void migration_downtime_start(MigrationState *s) 113 { 114 trace_vmstate_downtime_checkpoint("src-downtime-start"); 115 s->downtime_start = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); 116 } 117 118 static void migration_downtime_end(MigrationState *s) 119 { 120 int64_t now = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); 121 122 /* 123 * If downtime already set, should mean that postcopy already set it, 124 * then that should be the real downtime already. 125 */ 126 if (!s->downtime) { 127 s->downtime = now - s->downtime_start; 128 } 129 130 trace_vmstate_downtime_checkpoint("src-downtime-end"); 131 } 132 133 static bool migration_needs_multiple_sockets(void) 134 { 135 return migrate_multifd() || migrate_postcopy_preempt(); 136 } 137 138 static bool transport_supports_multi_channels(MigrationAddress *addr) 139 { 140 if (addr->transport == MIGRATION_ADDRESS_TYPE_SOCKET) { 141 SocketAddress *saddr = &addr->u.socket; 142 143 return saddr->type == SOCKET_ADDRESS_TYPE_INET || 144 saddr->type == SOCKET_ADDRESS_TYPE_UNIX || 145 saddr->type == SOCKET_ADDRESS_TYPE_VSOCK; 146 } 147 148 return false; 149 } 150 151 static bool 152 migration_channels_and_transport_compatible(MigrationAddress *addr, 153 Error **errp) 154 { 155 if (migration_needs_multiple_sockets() && 156 !transport_supports_multi_channels(addr)) { 157 error_setg(errp, "Migration requires multi-channel URIs (e.g. tcp)"); 158 return false; 159 } 160 161 return true; 162 } 163 164 static gint page_request_addr_cmp(gconstpointer ap, gconstpointer bp) 165 { 166 uintptr_t a = (uintptr_t) ap, b = (uintptr_t) bp; 167 168 return (a > b) - (a < b); 169 } 170 171 static int migration_stop_vm(MigrationState *s, RunState state) 172 { 173 int ret; 174 175 migration_downtime_start(s); 176 177 s->vm_old_state = runstate_get(); 178 global_state_store(); 179 180 ret = vm_stop_force_state(state); 181 182 trace_vmstate_downtime_checkpoint("src-vm-stopped"); 183 trace_migration_completion_vm_stop(ret); 184 185 return ret; 186 } 187 188 void migration_object_init(void) 189 { 190 /* This can only be called once. */ 191 assert(!current_migration); 192 current_migration = MIGRATION_OBJ(object_new(TYPE_MIGRATION)); 193 194 /* 195 * Init the migrate incoming object as well no matter whether 196 * we'll use it or not. 197 */ 198 assert(!current_incoming); 199 current_incoming = g_new0(MigrationIncomingState, 1); 200 current_incoming->state = MIGRATION_STATUS_NONE; 201 current_incoming->postcopy_remote_fds = 202 g_array_new(FALSE, TRUE, sizeof(struct PostCopyFD)); 203 qemu_mutex_init(¤t_incoming->rp_mutex); 204 qemu_mutex_init(¤t_incoming->postcopy_prio_thread_mutex); 205 qemu_event_init(¤t_incoming->main_thread_load_event, false); 206 qemu_sem_init(¤t_incoming->postcopy_pause_sem_dst, 0); 207 qemu_sem_init(¤t_incoming->postcopy_pause_sem_fault, 0); 208 qemu_sem_init(¤t_incoming->postcopy_pause_sem_fast_load, 0); 209 qemu_sem_init(¤t_incoming->postcopy_qemufile_dst_done, 0); 210 211 qemu_mutex_init(¤t_incoming->page_request_mutex); 212 qemu_cond_init(¤t_incoming->page_request_cond); 213 current_incoming->page_requested = g_tree_new(page_request_addr_cmp); 214 215 migration_object_check(current_migration, &error_fatal); 216 217 blk_mig_init(); 218 ram_mig_init(); 219 dirty_bitmap_mig_init(); 220 } 221 222 typedef struct { 223 QEMUBH *bh; 224 QEMUBHFunc *cb; 225 void *opaque; 226 } MigrationBH; 227 228 static void migration_bh_dispatch_bh(void *opaque) 229 { 230 MigrationState *s = migrate_get_current(); 231 MigrationBH *migbh = opaque; 232 233 /* cleanup this BH */ 234 qemu_bh_delete(migbh->bh); 235 migbh->bh = NULL; 236 237 /* dispatch the other one */ 238 migbh->cb(migbh->opaque); 239 object_unref(OBJECT(s)); 240 241 g_free(migbh); 242 } 243 244 void migration_bh_schedule(QEMUBHFunc *cb, void *opaque) 245 { 246 MigrationState *s = migrate_get_current(); 247 MigrationBH *migbh = g_new0(MigrationBH, 1); 248 QEMUBH *bh = qemu_bh_new(migration_bh_dispatch_bh, migbh); 249 250 /* Store these to dispatch when the BH runs */ 251 migbh->bh = bh; 252 migbh->cb = cb; 253 migbh->opaque = opaque; 254 255 /* 256 * Ref the state for bh, because it may be called when 257 * there're already no other refs 258 */ 259 object_ref(OBJECT(s)); 260 qemu_bh_schedule(bh); 261 } 262 263 void migration_cancel(const Error *error) 264 { 265 if (error) { 266 migrate_set_error(current_migration, error); 267 } 268 if (migrate_dirty_limit()) { 269 qmp_cancel_vcpu_dirty_limit(false, -1, NULL); 270 } 271 migrate_fd_cancel(current_migration); 272 } 273 274 void migration_shutdown(void) 275 { 276 /* 277 * When the QEMU main thread exit, the COLO thread 278 * may wait a semaphore. So, we should wakeup the 279 * COLO thread before migration shutdown. 280 */ 281 colo_shutdown(); 282 /* 283 * Cancel the current migration - that will (eventually) 284 * stop the migration using this structure 285 */ 286 migration_cancel(NULL); 287 object_unref(OBJECT(current_migration)); 288 289 /* 290 * Cancel outgoing migration of dirty bitmaps. It should 291 * at least unref used block nodes. 292 */ 293 dirty_bitmap_mig_cancel_outgoing(); 294 295 /* 296 * Cancel incoming migration of dirty bitmaps. Dirty bitmaps 297 * are non-critical data, and their loss never considered as 298 * something serious. 299 */ 300 dirty_bitmap_mig_cancel_incoming(); 301 } 302 303 /* For outgoing */ 304 MigrationState *migrate_get_current(void) 305 { 306 /* This can only be called after the object created. */ 307 assert(current_migration); 308 return current_migration; 309 } 310 311 MigrationIncomingState *migration_incoming_get_current(void) 312 { 313 assert(current_incoming); 314 return current_incoming; 315 } 316 317 void migration_incoming_transport_cleanup(MigrationIncomingState *mis) 318 { 319 if (mis->socket_address_list) { 320 qapi_free_SocketAddressList(mis->socket_address_list); 321 mis->socket_address_list = NULL; 322 } 323 324 if (mis->transport_cleanup) { 325 mis->transport_cleanup(mis->transport_data); 326 mis->transport_data = mis->transport_cleanup = NULL; 327 } 328 } 329 330 void migration_incoming_state_destroy(void) 331 { 332 struct MigrationIncomingState *mis = migration_incoming_get_current(); 333 334 multifd_recv_cleanup(); 335 compress_threads_load_cleanup(); 336 337 if (mis->to_src_file) { 338 /* Tell source that we are done */ 339 migrate_send_rp_shut(mis, qemu_file_get_error(mis->from_src_file) != 0); 340 qemu_fclose(mis->to_src_file); 341 mis->to_src_file = NULL; 342 } 343 344 if (mis->from_src_file) { 345 migration_ioc_unregister_yank_from_file(mis->from_src_file); 346 qemu_fclose(mis->from_src_file); 347 mis->from_src_file = NULL; 348 } 349 if (mis->postcopy_remote_fds) { 350 g_array_free(mis->postcopy_remote_fds, TRUE); 351 mis->postcopy_remote_fds = NULL; 352 } 353 354 migration_incoming_transport_cleanup(mis); 355 qemu_event_reset(&mis->main_thread_load_event); 356 357 if (mis->page_requested) { 358 g_tree_destroy(mis->page_requested); 359 mis->page_requested = NULL; 360 } 361 362 if (mis->postcopy_qemufile_dst) { 363 migration_ioc_unregister_yank_from_file(mis->postcopy_qemufile_dst); 364 qemu_fclose(mis->postcopy_qemufile_dst); 365 mis->postcopy_qemufile_dst = NULL; 366 } 367 368 yank_unregister_instance(MIGRATION_YANK_INSTANCE); 369 } 370 371 static void migrate_generate_event(int new_state) 372 { 373 if (migrate_events()) { 374 qapi_event_send_migration(new_state); 375 } 376 } 377 378 /* 379 * Send a message on the return channel back to the source 380 * of the migration. 381 */ 382 static int migrate_send_rp_message(MigrationIncomingState *mis, 383 enum mig_rp_message_type message_type, 384 uint16_t len, void *data) 385 { 386 int ret = 0; 387 388 trace_migrate_send_rp_message((int)message_type, len); 389 QEMU_LOCK_GUARD(&mis->rp_mutex); 390 391 /* 392 * It's possible that the file handle got lost due to network 393 * failures. 394 */ 395 if (!mis->to_src_file) { 396 ret = -EIO; 397 return ret; 398 } 399 400 qemu_put_be16(mis->to_src_file, (unsigned int)message_type); 401 qemu_put_be16(mis->to_src_file, len); 402 qemu_put_buffer(mis->to_src_file, data, len); 403 return qemu_fflush(mis->to_src_file); 404 } 405 406 /* Request one page from the source VM at the given start address. 407 * rb: the RAMBlock to request the page in 408 * Start: Address offset within the RB 409 * Len: Length in bytes required - must be a multiple of pagesize 410 */ 411 int migrate_send_rp_message_req_pages(MigrationIncomingState *mis, 412 RAMBlock *rb, ram_addr_t start) 413 { 414 uint8_t bufc[12 + 1 + 255]; /* start (8), len (4), rbname up to 256 */ 415 size_t msglen = 12; /* start + len */ 416 size_t len = qemu_ram_pagesize(rb); 417 enum mig_rp_message_type msg_type; 418 const char *rbname; 419 int rbname_len; 420 421 *(uint64_t *)bufc = cpu_to_be64((uint64_t)start); 422 *(uint32_t *)(bufc + 8) = cpu_to_be32((uint32_t)len); 423 424 /* 425 * We maintain the last ramblock that we requested for page. Note that we 426 * don't need locking because this function will only be called within the 427 * postcopy ram fault thread. 428 */ 429 if (rb != mis->last_rb) { 430 mis->last_rb = rb; 431 432 rbname = qemu_ram_get_idstr(rb); 433 rbname_len = strlen(rbname); 434 435 assert(rbname_len < 256); 436 437 bufc[msglen++] = rbname_len; 438 memcpy(bufc + msglen, rbname, rbname_len); 439 msglen += rbname_len; 440 msg_type = MIG_RP_MSG_REQ_PAGES_ID; 441 } else { 442 msg_type = MIG_RP_MSG_REQ_PAGES; 443 } 444 445 return migrate_send_rp_message(mis, msg_type, msglen, bufc); 446 } 447 448 int migrate_send_rp_req_pages(MigrationIncomingState *mis, 449 RAMBlock *rb, ram_addr_t start, uint64_t haddr) 450 { 451 void *aligned = (void *)(uintptr_t)ROUND_DOWN(haddr, qemu_ram_pagesize(rb)); 452 bool received = false; 453 454 WITH_QEMU_LOCK_GUARD(&mis->page_request_mutex) { 455 received = ramblock_recv_bitmap_test_byte_offset(rb, start); 456 if (!received && !g_tree_lookup(mis->page_requested, aligned)) { 457 /* 458 * The page has not been received, and it's not yet in the page 459 * request list. Queue it. Set the value of element to 1, so that 460 * things like g_tree_lookup() will return TRUE (1) when found. 461 */ 462 g_tree_insert(mis->page_requested, aligned, (gpointer)1); 463 qatomic_inc(&mis->page_requested_count); 464 trace_postcopy_page_req_add(aligned, mis->page_requested_count); 465 } 466 } 467 468 /* 469 * If the page is there, skip sending the message. We don't even need the 470 * lock because as long as the page arrived, it'll be there forever. 471 */ 472 if (received) { 473 return 0; 474 } 475 476 return migrate_send_rp_message_req_pages(mis, rb, start); 477 } 478 479 static bool migration_colo_enabled; 480 bool migration_incoming_colo_enabled(void) 481 { 482 return migration_colo_enabled; 483 } 484 485 void migration_incoming_disable_colo(void) 486 { 487 ram_block_discard_disable(false); 488 migration_colo_enabled = false; 489 } 490 491 int migration_incoming_enable_colo(void) 492 { 493 #ifndef CONFIG_REPLICATION 494 error_report("ENABLE_COLO command come in migration stream, but COLO " 495 "module is not built in"); 496 return -ENOTSUP; 497 #endif 498 499 if (!migrate_colo()) { 500 error_report("ENABLE_COLO command come in migration stream, but c-colo " 501 "capability is not set"); 502 return -EINVAL; 503 } 504 505 if (ram_block_discard_disable(true)) { 506 error_report("COLO: cannot disable RAM discard"); 507 return -EBUSY; 508 } 509 migration_colo_enabled = true; 510 return 0; 511 } 512 513 void migrate_add_address(SocketAddress *address) 514 { 515 MigrationIncomingState *mis = migration_incoming_get_current(); 516 517 QAPI_LIST_PREPEND(mis->socket_address_list, 518 QAPI_CLONE(SocketAddress, address)); 519 } 520 521 bool migrate_uri_parse(const char *uri, MigrationChannel **channel, 522 Error **errp) 523 { 524 g_autoptr(MigrationChannel) val = g_new0(MigrationChannel, 1); 525 g_autoptr(MigrationAddress) addr = g_new0(MigrationAddress, 1); 526 InetSocketAddress *isock = &addr->u.rdma; 527 strList **tail = &addr->u.exec.args; 528 529 if (strstart(uri, "exec:", NULL)) { 530 addr->transport = MIGRATION_ADDRESS_TYPE_EXEC; 531 #ifdef WIN32 532 QAPI_LIST_APPEND(tail, g_strdup(exec_get_cmd_path())); 533 QAPI_LIST_APPEND(tail, g_strdup("/c")); 534 #else 535 QAPI_LIST_APPEND(tail, g_strdup("/bin/sh")); 536 QAPI_LIST_APPEND(tail, g_strdup("-c")); 537 #endif 538 QAPI_LIST_APPEND(tail, g_strdup(uri + strlen("exec:"))); 539 } else if (strstart(uri, "rdma:", NULL)) { 540 if (inet_parse(isock, uri + strlen("rdma:"), errp)) { 541 qapi_free_InetSocketAddress(isock); 542 return false; 543 } 544 addr->transport = MIGRATION_ADDRESS_TYPE_RDMA; 545 } else if (strstart(uri, "tcp:", NULL) || 546 strstart(uri, "unix:", NULL) || 547 strstart(uri, "vsock:", NULL) || 548 strstart(uri, "fd:", NULL)) { 549 addr->transport = MIGRATION_ADDRESS_TYPE_SOCKET; 550 SocketAddress *saddr = socket_parse(uri, errp); 551 if (!saddr) { 552 return false; 553 } 554 addr->u.socket.type = saddr->type; 555 addr->u.socket.u = saddr->u; 556 /* Don't free the objects inside; their ownership moved to "addr" */ 557 g_free(saddr); 558 } else if (strstart(uri, "file:", NULL)) { 559 addr->transport = MIGRATION_ADDRESS_TYPE_FILE; 560 addr->u.file.filename = g_strdup(uri + strlen("file:")); 561 if (file_parse_offset(addr->u.file.filename, &addr->u.file.offset, 562 errp)) { 563 return false; 564 } 565 } else { 566 error_setg(errp, "unknown migration protocol: %s", uri); 567 return false; 568 } 569 570 val->channel_type = MIGRATION_CHANNEL_TYPE_MAIN; 571 val->addr = g_steal_pointer(&addr); 572 *channel = g_steal_pointer(&val); 573 return true; 574 } 575 576 static void qemu_start_incoming_migration(const char *uri, bool has_channels, 577 MigrationChannelList *channels, 578 Error **errp) 579 { 580 g_autoptr(MigrationChannel) channel = NULL; 581 MigrationAddress *addr = NULL; 582 MigrationIncomingState *mis = migration_incoming_get_current(); 583 584 /* 585 * Having preliminary checks for uri and channel 586 */ 587 if (!uri == !channels) { 588 error_setg(errp, "need either 'uri' or 'channels' argument"); 589 return; 590 } 591 592 if (channels) { 593 /* To verify that Migrate channel list has only item */ 594 if (channels->next) { 595 error_setg(errp, "Channel list has more than one entries"); 596 return; 597 } 598 addr = channels->value->addr; 599 } 600 601 if (uri) { 602 /* caller uses the old URI syntax */ 603 if (!migrate_uri_parse(uri, &channel, errp)) { 604 return; 605 } 606 addr = channel->addr; 607 } 608 609 /* transport mechanism not suitable for migration? */ 610 if (!migration_channels_and_transport_compatible(addr, errp)) { 611 return; 612 } 613 614 migrate_set_state(&mis->state, MIGRATION_STATUS_NONE, 615 MIGRATION_STATUS_SETUP); 616 617 if (addr->transport == MIGRATION_ADDRESS_TYPE_SOCKET) { 618 SocketAddress *saddr = &addr->u.socket; 619 if (saddr->type == SOCKET_ADDRESS_TYPE_INET || 620 saddr->type == SOCKET_ADDRESS_TYPE_UNIX || 621 saddr->type == SOCKET_ADDRESS_TYPE_VSOCK) { 622 socket_start_incoming_migration(saddr, errp); 623 } else if (saddr->type == SOCKET_ADDRESS_TYPE_FD) { 624 fd_start_incoming_migration(saddr->u.fd.str, errp); 625 } 626 #ifdef CONFIG_RDMA 627 } else if (addr->transport == MIGRATION_ADDRESS_TYPE_RDMA) { 628 if (migrate_compress()) { 629 error_setg(errp, "RDMA and compression can't be used together"); 630 return; 631 } 632 if (migrate_xbzrle()) { 633 error_setg(errp, "RDMA and XBZRLE can't be used together"); 634 return; 635 } 636 if (migrate_multifd()) { 637 error_setg(errp, "RDMA and multifd can't be used together"); 638 return; 639 } 640 rdma_start_incoming_migration(&addr->u.rdma, errp); 641 #endif 642 } else if (addr->transport == MIGRATION_ADDRESS_TYPE_EXEC) { 643 exec_start_incoming_migration(addr->u.exec.args, errp); 644 } else if (addr->transport == MIGRATION_ADDRESS_TYPE_FILE) { 645 file_start_incoming_migration(&addr->u.file, errp); 646 } else { 647 error_setg(errp, "unknown migration protocol: %s", uri); 648 } 649 } 650 651 static void process_incoming_migration_bh(void *opaque) 652 { 653 Error *local_err = NULL; 654 MigrationIncomingState *mis = opaque; 655 656 trace_vmstate_downtime_checkpoint("dst-precopy-bh-enter"); 657 658 /* If capability late_block_activate is set: 659 * Only fire up the block code now if we're going to restart the 660 * VM, else 'cont' will do it. 661 * This causes file locking to happen; so we don't want it to happen 662 * unless we really are starting the VM. 663 */ 664 if (!migrate_late_block_activate() || 665 (autostart && (!global_state_received() || 666 runstate_is_live(global_state_get_runstate())))) { 667 /* Make sure all file formats throw away their mutable metadata. 668 * If we get an error here, just don't restart the VM yet. */ 669 bdrv_activate_all(&local_err); 670 if (local_err) { 671 error_report_err(local_err); 672 local_err = NULL; 673 autostart = false; 674 } 675 } 676 677 /* 678 * This must happen after all error conditions are dealt with and 679 * we're sure the VM is going to be running on this host. 680 */ 681 qemu_announce_self(&mis->announce_timer, migrate_announce_params()); 682 683 trace_vmstate_downtime_checkpoint("dst-precopy-bh-announced"); 684 685 multifd_recv_shutdown(); 686 687 dirty_bitmap_mig_before_vm_start(); 688 689 if (!global_state_received() || 690 runstate_is_live(global_state_get_runstate())) { 691 if (autostart) { 692 vm_start(); 693 } else { 694 runstate_set(RUN_STATE_PAUSED); 695 } 696 } else if (migration_incoming_colo_enabled()) { 697 migration_incoming_disable_colo(); 698 vm_start(); 699 } else { 700 runstate_set(global_state_get_runstate()); 701 } 702 trace_vmstate_downtime_checkpoint("dst-precopy-bh-vm-started"); 703 /* 704 * This must happen after any state changes since as soon as an external 705 * observer sees this event they might start to prod at the VM assuming 706 * it's ready to use. 707 */ 708 migrate_set_state(&mis->state, MIGRATION_STATUS_ACTIVE, 709 MIGRATION_STATUS_COMPLETED); 710 migration_incoming_state_destroy(); 711 } 712 713 static void coroutine_fn 714 process_incoming_migration_co(void *opaque) 715 { 716 MigrationIncomingState *mis = migration_incoming_get_current(); 717 PostcopyState ps; 718 int ret; 719 720 assert(mis->from_src_file); 721 722 if (compress_threads_load_setup(mis->from_src_file)) { 723 error_report("Failed to setup decompress threads"); 724 goto fail; 725 } 726 727 mis->largest_page_size = qemu_ram_pagesize_largest(); 728 postcopy_state_set(POSTCOPY_INCOMING_NONE); 729 migrate_set_state(&mis->state, MIGRATION_STATUS_SETUP, 730 MIGRATION_STATUS_ACTIVE); 731 732 mis->loadvm_co = qemu_coroutine_self(); 733 ret = qemu_loadvm_state(mis->from_src_file); 734 mis->loadvm_co = NULL; 735 736 trace_vmstate_downtime_checkpoint("dst-precopy-loadvm-completed"); 737 738 ps = postcopy_state_get(); 739 trace_process_incoming_migration_co_end(ret, ps); 740 if (ps != POSTCOPY_INCOMING_NONE) { 741 if (ps == POSTCOPY_INCOMING_ADVISE) { 742 /* 743 * Where a migration had postcopy enabled (and thus went to advise) 744 * but managed to complete within the precopy period, we can use 745 * the normal exit. 746 */ 747 postcopy_ram_incoming_cleanup(mis); 748 } else if (ret >= 0) { 749 /* 750 * Postcopy was started, cleanup should happen at the end of the 751 * postcopy thread. 752 */ 753 trace_process_incoming_migration_co_postcopy_end_main(); 754 return; 755 } 756 /* Else if something went wrong then just fall out of the normal exit */ 757 } 758 759 if (ret < 0) { 760 MigrationState *s = migrate_get_current(); 761 762 if (migrate_has_error(s)) { 763 WITH_QEMU_LOCK_GUARD(&s->error_mutex) { 764 error_report_err(s->error); 765 } 766 } 767 error_report("load of migration failed: %s", strerror(-ret)); 768 goto fail; 769 } 770 771 if (colo_incoming_co() < 0) { 772 goto fail; 773 } 774 775 migration_bh_schedule(process_incoming_migration_bh, mis); 776 return; 777 fail: 778 migrate_set_state(&mis->state, MIGRATION_STATUS_ACTIVE, 779 MIGRATION_STATUS_FAILED); 780 qemu_fclose(mis->from_src_file); 781 782 multifd_recv_cleanup(); 783 compress_threads_load_cleanup(); 784 785 exit(EXIT_FAILURE); 786 } 787 788 /** 789 * migration_incoming_setup: Setup incoming migration 790 * @f: file for main migration channel 791 */ 792 static void migration_incoming_setup(QEMUFile *f) 793 { 794 MigrationIncomingState *mis = migration_incoming_get_current(); 795 796 if (!mis->from_src_file) { 797 mis->from_src_file = f; 798 } 799 qemu_file_set_blocking(f, false); 800 } 801 802 void migration_incoming_process(void) 803 { 804 Coroutine *co = qemu_coroutine_create(process_incoming_migration_co, NULL); 805 qemu_coroutine_enter(co); 806 } 807 808 /* Returns true if recovered from a paused migration, otherwise false */ 809 static bool postcopy_try_recover(void) 810 { 811 MigrationIncomingState *mis = migration_incoming_get_current(); 812 813 if (mis->state == MIGRATION_STATUS_POSTCOPY_PAUSED) { 814 /* Resumed from a paused postcopy migration */ 815 816 /* This should be set already in migration_incoming_setup() */ 817 assert(mis->from_src_file); 818 /* Postcopy has standalone thread to do vm load */ 819 qemu_file_set_blocking(mis->from_src_file, true); 820 821 /* Re-configure the return path */ 822 mis->to_src_file = qemu_file_get_return_path(mis->from_src_file); 823 824 migrate_set_state(&mis->state, MIGRATION_STATUS_POSTCOPY_PAUSED, 825 MIGRATION_STATUS_POSTCOPY_RECOVER); 826 827 /* 828 * Here, we only wake up the main loading thread (while the 829 * rest threads will still be waiting), so that we can receive 830 * commands from source now, and answer it if needed. The 831 * rest threads will be woken up afterwards until we are sure 832 * that source is ready to reply to page requests. 833 */ 834 qemu_sem_post(&mis->postcopy_pause_sem_dst); 835 return true; 836 } 837 838 return false; 839 } 840 841 void migration_fd_process_incoming(QEMUFile *f) 842 { 843 migration_incoming_setup(f); 844 if (postcopy_try_recover()) { 845 return; 846 } 847 migration_incoming_process(); 848 } 849 850 /* 851 * Returns true when we want to start a new incoming migration process, 852 * false otherwise. 853 */ 854 static bool migration_should_start_incoming(bool main_channel) 855 { 856 /* Multifd doesn't start unless all channels are established */ 857 if (migrate_multifd()) { 858 return migration_has_all_channels(); 859 } 860 861 /* Preempt channel only starts when the main channel is created */ 862 if (migrate_postcopy_preempt()) { 863 return main_channel; 864 } 865 866 /* 867 * For all the rest types of migration, we should only reach here when 868 * it's the main channel that's being created, and we should always 869 * proceed with this channel. 870 */ 871 assert(main_channel); 872 return true; 873 } 874 875 void migration_ioc_process_incoming(QIOChannel *ioc, Error **errp) 876 { 877 MigrationIncomingState *mis = migration_incoming_get_current(); 878 Error *local_err = NULL; 879 QEMUFile *f; 880 bool default_channel = true; 881 uint32_t channel_magic = 0; 882 int ret = 0; 883 884 if (migrate_multifd() && !migrate_postcopy_ram() && 885 qio_channel_has_feature(ioc, QIO_CHANNEL_FEATURE_READ_MSG_PEEK)) { 886 /* 887 * With multiple channels, it is possible that we receive channels 888 * out of order on destination side, causing incorrect mapping of 889 * source channels on destination side. Check channel MAGIC to 890 * decide type of channel. Please note this is best effort, postcopy 891 * preempt channel does not send any magic number so avoid it for 892 * postcopy live migration. Also tls live migration already does 893 * tls handshake while initializing main channel so with tls this 894 * issue is not possible. 895 */ 896 ret = migration_channel_read_peek(ioc, (void *)&channel_magic, 897 sizeof(channel_magic), errp); 898 899 if (ret != 0) { 900 return; 901 } 902 903 default_channel = (channel_magic == cpu_to_be32(QEMU_VM_FILE_MAGIC)); 904 } else { 905 default_channel = !mis->from_src_file; 906 } 907 908 if (multifd_recv_setup(errp) != 0) { 909 return; 910 } 911 912 if (default_channel) { 913 f = qemu_file_new_input(ioc); 914 migration_incoming_setup(f); 915 } else { 916 /* Multiple connections */ 917 assert(migration_needs_multiple_sockets()); 918 if (migrate_multifd()) { 919 multifd_recv_new_channel(ioc, &local_err); 920 } else { 921 assert(migrate_postcopy_preempt()); 922 f = qemu_file_new_input(ioc); 923 postcopy_preempt_new_channel(mis, f); 924 } 925 if (local_err) { 926 error_propagate(errp, local_err); 927 return; 928 } 929 } 930 931 if (migration_should_start_incoming(default_channel)) { 932 /* If it's a recovery, we're done */ 933 if (postcopy_try_recover()) { 934 return; 935 } 936 migration_incoming_process(); 937 } 938 } 939 940 /** 941 * @migration_has_all_channels: We have received all channels that we need 942 * 943 * Returns true when we have got connections to all the channels that 944 * we need for migration. 945 */ 946 bool migration_has_all_channels(void) 947 { 948 MigrationIncomingState *mis = migration_incoming_get_current(); 949 950 if (!mis->from_src_file) { 951 return false; 952 } 953 954 if (migrate_multifd()) { 955 return multifd_recv_all_channels_created(); 956 } 957 958 if (migrate_postcopy_preempt()) { 959 return mis->postcopy_qemufile_dst != NULL; 960 } 961 962 return true; 963 } 964 965 int migrate_send_rp_switchover_ack(MigrationIncomingState *mis) 966 { 967 return migrate_send_rp_message(mis, MIG_RP_MSG_SWITCHOVER_ACK, 0, NULL); 968 } 969 970 /* 971 * Send a 'SHUT' message on the return channel with the given value 972 * to indicate that we've finished with the RP. Non-0 value indicates 973 * error. 974 */ 975 void migrate_send_rp_shut(MigrationIncomingState *mis, 976 uint32_t value) 977 { 978 uint32_t buf; 979 980 buf = cpu_to_be32(value); 981 migrate_send_rp_message(mis, MIG_RP_MSG_SHUT, sizeof(buf), &buf); 982 } 983 984 /* 985 * Send a 'PONG' message on the return channel with the given value 986 * (normally in response to a 'PING') 987 */ 988 void migrate_send_rp_pong(MigrationIncomingState *mis, 989 uint32_t value) 990 { 991 uint32_t buf; 992 993 buf = cpu_to_be32(value); 994 migrate_send_rp_message(mis, MIG_RP_MSG_PONG, sizeof(buf), &buf); 995 } 996 997 void migrate_send_rp_recv_bitmap(MigrationIncomingState *mis, 998 char *block_name) 999 { 1000 char buf[512]; 1001 int len; 1002 int64_t res; 1003 1004 /* 1005 * First, we send the header part. It contains only the len of 1006 * idstr, and the idstr itself. 1007 */ 1008 len = strlen(block_name); 1009 buf[0] = len; 1010 memcpy(buf + 1, block_name, len); 1011 1012 if (mis->state != MIGRATION_STATUS_POSTCOPY_RECOVER) { 1013 error_report("%s: MSG_RP_RECV_BITMAP only used for recovery", 1014 __func__); 1015 return; 1016 } 1017 1018 migrate_send_rp_message(mis, MIG_RP_MSG_RECV_BITMAP, len + 1, buf); 1019 1020 /* 1021 * Next, we dump the received bitmap to the stream. 1022 * 1023 * TODO: currently we are safe since we are the only one that is 1024 * using the to_src_file handle (fault thread is still paused), 1025 * and it's ok even not taking the mutex. However the best way is 1026 * to take the lock before sending the message header, and release 1027 * the lock after sending the bitmap. 1028 */ 1029 qemu_mutex_lock(&mis->rp_mutex); 1030 res = ramblock_recv_bitmap_send(mis->to_src_file, block_name); 1031 qemu_mutex_unlock(&mis->rp_mutex); 1032 1033 trace_migrate_send_rp_recv_bitmap(block_name, res); 1034 } 1035 1036 void migrate_send_rp_resume_ack(MigrationIncomingState *mis, uint32_t value) 1037 { 1038 uint32_t buf; 1039 1040 buf = cpu_to_be32(value); 1041 migrate_send_rp_message(mis, MIG_RP_MSG_RESUME_ACK, sizeof(buf), &buf); 1042 } 1043 1044 /* 1045 * Return true if we're already in the middle of a migration 1046 * (i.e. any of the active or setup states) 1047 */ 1048 bool migration_is_setup_or_active(int state) 1049 { 1050 switch (state) { 1051 case MIGRATION_STATUS_ACTIVE: 1052 case MIGRATION_STATUS_POSTCOPY_ACTIVE: 1053 case MIGRATION_STATUS_POSTCOPY_PAUSED: 1054 case MIGRATION_STATUS_POSTCOPY_RECOVER: 1055 case MIGRATION_STATUS_SETUP: 1056 case MIGRATION_STATUS_PRE_SWITCHOVER: 1057 case MIGRATION_STATUS_DEVICE: 1058 case MIGRATION_STATUS_WAIT_UNPLUG: 1059 case MIGRATION_STATUS_COLO: 1060 return true; 1061 1062 default: 1063 return false; 1064 1065 } 1066 } 1067 1068 bool migration_is_running(int state) 1069 { 1070 switch (state) { 1071 case MIGRATION_STATUS_ACTIVE: 1072 case MIGRATION_STATUS_POSTCOPY_ACTIVE: 1073 case MIGRATION_STATUS_POSTCOPY_PAUSED: 1074 case MIGRATION_STATUS_POSTCOPY_RECOVER: 1075 case MIGRATION_STATUS_SETUP: 1076 case MIGRATION_STATUS_PRE_SWITCHOVER: 1077 case MIGRATION_STATUS_DEVICE: 1078 case MIGRATION_STATUS_WAIT_UNPLUG: 1079 case MIGRATION_STATUS_CANCELLING: 1080 return true; 1081 1082 default: 1083 return false; 1084 1085 } 1086 } 1087 1088 static bool migrate_show_downtime(MigrationState *s) 1089 { 1090 return (s->state == MIGRATION_STATUS_COMPLETED) || migration_in_postcopy(); 1091 } 1092 1093 static void populate_time_info(MigrationInfo *info, MigrationState *s) 1094 { 1095 info->has_status = true; 1096 info->has_setup_time = true; 1097 info->setup_time = s->setup_time; 1098 1099 if (s->state == MIGRATION_STATUS_COMPLETED) { 1100 info->has_total_time = true; 1101 info->total_time = s->total_time; 1102 } else { 1103 info->has_total_time = true; 1104 info->total_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME) - 1105 s->start_time; 1106 } 1107 1108 if (migrate_show_downtime(s)) { 1109 info->has_downtime = true; 1110 info->downtime = s->downtime; 1111 } else { 1112 info->has_expected_downtime = true; 1113 info->expected_downtime = s->expected_downtime; 1114 } 1115 } 1116 1117 static void populate_ram_info(MigrationInfo *info, MigrationState *s) 1118 { 1119 size_t page_size = qemu_target_page_size(); 1120 1121 info->ram = g_malloc0(sizeof(*info->ram)); 1122 info->ram->transferred = migration_transferred_bytes(); 1123 info->ram->total = ram_bytes_total(); 1124 info->ram->duplicate = stat64_get(&mig_stats.zero_pages); 1125 /* legacy value. It is not used anymore */ 1126 info->ram->skipped = 0; 1127 info->ram->normal = stat64_get(&mig_stats.normal_pages); 1128 info->ram->normal_bytes = info->ram->normal * page_size; 1129 info->ram->mbps = s->mbps; 1130 info->ram->dirty_sync_count = 1131 stat64_get(&mig_stats.dirty_sync_count); 1132 info->ram->dirty_sync_missed_zero_copy = 1133 stat64_get(&mig_stats.dirty_sync_missed_zero_copy); 1134 info->ram->postcopy_requests = 1135 stat64_get(&mig_stats.postcopy_requests); 1136 info->ram->page_size = page_size; 1137 info->ram->multifd_bytes = stat64_get(&mig_stats.multifd_bytes); 1138 info->ram->pages_per_second = s->pages_per_second; 1139 info->ram->precopy_bytes = stat64_get(&mig_stats.precopy_bytes); 1140 info->ram->downtime_bytes = stat64_get(&mig_stats.downtime_bytes); 1141 info->ram->postcopy_bytes = stat64_get(&mig_stats.postcopy_bytes); 1142 1143 if (migrate_xbzrle()) { 1144 info->xbzrle_cache = g_malloc0(sizeof(*info->xbzrle_cache)); 1145 info->xbzrle_cache->cache_size = migrate_xbzrle_cache_size(); 1146 info->xbzrle_cache->bytes = xbzrle_counters.bytes; 1147 info->xbzrle_cache->pages = xbzrle_counters.pages; 1148 info->xbzrle_cache->cache_miss = xbzrle_counters.cache_miss; 1149 info->xbzrle_cache->cache_miss_rate = xbzrle_counters.cache_miss_rate; 1150 info->xbzrle_cache->encoding_rate = xbzrle_counters.encoding_rate; 1151 info->xbzrle_cache->overflow = xbzrle_counters.overflow; 1152 } 1153 1154 populate_compress(info); 1155 1156 if (cpu_throttle_active()) { 1157 info->has_cpu_throttle_percentage = true; 1158 info->cpu_throttle_percentage = cpu_throttle_get_percentage(); 1159 } 1160 1161 if (s->state != MIGRATION_STATUS_COMPLETED) { 1162 info->ram->remaining = ram_bytes_remaining(); 1163 info->ram->dirty_pages_rate = 1164 stat64_get(&mig_stats.dirty_pages_rate); 1165 } 1166 1167 if (migrate_dirty_limit() && dirtylimit_in_service()) { 1168 info->has_dirty_limit_throttle_time_per_round = true; 1169 info->dirty_limit_throttle_time_per_round = 1170 dirtylimit_throttle_time_per_round(); 1171 1172 info->has_dirty_limit_ring_full_time = true; 1173 info->dirty_limit_ring_full_time = dirtylimit_ring_full_time(); 1174 } 1175 } 1176 1177 static void populate_disk_info(MigrationInfo *info) 1178 { 1179 if (blk_mig_active()) { 1180 info->disk = g_malloc0(sizeof(*info->disk)); 1181 info->disk->transferred = blk_mig_bytes_transferred(); 1182 info->disk->remaining = blk_mig_bytes_remaining(); 1183 info->disk->total = blk_mig_bytes_total(); 1184 } 1185 } 1186 1187 static void fill_source_migration_info(MigrationInfo *info) 1188 { 1189 MigrationState *s = migrate_get_current(); 1190 int state = qatomic_read(&s->state); 1191 GSList *cur_blocker = migration_blockers[migrate_mode()]; 1192 1193 info->blocked_reasons = NULL; 1194 1195 /* 1196 * There are two types of reasons a migration might be blocked; 1197 * a) devices marked in VMState as non-migratable, and 1198 * b) Explicit migration blockers 1199 * We need to add both of them here. 1200 */ 1201 qemu_savevm_non_migratable_list(&info->blocked_reasons); 1202 1203 while (cur_blocker) { 1204 QAPI_LIST_PREPEND(info->blocked_reasons, 1205 g_strdup(error_get_pretty(cur_blocker->data))); 1206 cur_blocker = g_slist_next(cur_blocker); 1207 } 1208 info->has_blocked_reasons = info->blocked_reasons != NULL; 1209 1210 switch (state) { 1211 case MIGRATION_STATUS_NONE: 1212 /* no migration has happened ever */ 1213 /* do not overwrite destination migration status */ 1214 return; 1215 case MIGRATION_STATUS_SETUP: 1216 info->has_status = true; 1217 info->has_total_time = false; 1218 break; 1219 case MIGRATION_STATUS_ACTIVE: 1220 case MIGRATION_STATUS_CANCELLING: 1221 case MIGRATION_STATUS_POSTCOPY_ACTIVE: 1222 case MIGRATION_STATUS_PRE_SWITCHOVER: 1223 case MIGRATION_STATUS_DEVICE: 1224 case MIGRATION_STATUS_POSTCOPY_PAUSED: 1225 case MIGRATION_STATUS_POSTCOPY_RECOVER: 1226 /* TODO add some postcopy stats */ 1227 populate_time_info(info, s); 1228 populate_ram_info(info, s); 1229 populate_disk_info(info); 1230 migration_populate_vfio_info(info); 1231 break; 1232 case MIGRATION_STATUS_COLO: 1233 info->has_status = true; 1234 /* TODO: display COLO specific information (checkpoint info etc.) */ 1235 break; 1236 case MIGRATION_STATUS_COMPLETED: 1237 populate_time_info(info, s); 1238 populate_ram_info(info, s); 1239 migration_populate_vfio_info(info); 1240 break; 1241 case MIGRATION_STATUS_FAILED: 1242 info->has_status = true; 1243 break; 1244 case MIGRATION_STATUS_CANCELLED: 1245 info->has_status = true; 1246 break; 1247 case MIGRATION_STATUS_WAIT_UNPLUG: 1248 info->has_status = true; 1249 break; 1250 } 1251 info->status = state; 1252 1253 QEMU_LOCK_GUARD(&s->error_mutex); 1254 if (s->error) { 1255 info->error_desc = g_strdup(error_get_pretty(s->error)); 1256 } 1257 } 1258 1259 static void fill_destination_migration_info(MigrationInfo *info) 1260 { 1261 MigrationIncomingState *mis = migration_incoming_get_current(); 1262 1263 if (mis->socket_address_list) { 1264 info->has_socket_address = true; 1265 info->socket_address = 1266 QAPI_CLONE(SocketAddressList, mis->socket_address_list); 1267 } 1268 1269 switch (mis->state) { 1270 case MIGRATION_STATUS_NONE: 1271 return; 1272 case MIGRATION_STATUS_SETUP: 1273 case MIGRATION_STATUS_CANCELLING: 1274 case MIGRATION_STATUS_CANCELLED: 1275 case MIGRATION_STATUS_ACTIVE: 1276 case MIGRATION_STATUS_POSTCOPY_ACTIVE: 1277 case MIGRATION_STATUS_POSTCOPY_PAUSED: 1278 case MIGRATION_STATUS_POSTCOPY_RECOVER: 1279 case MIGRATION_STATUS_FAILED: 1280 case MIGRATION_STATUS_COLO: 1281 info->has_status = true; 1282 break; 1283 case MIGRATION_STATUS_COMPLETED: 1284 info->has_status = true; 1285 fill_destination_postcopy_migration_info(info); 1286 break; 1287 } 1288 info->status = mis->state; 1289 } 1290 1291 MigrationInfo *qmp_query_migrate(Error **errp) 1292 { 1293 MigrationInfo *info = g_malloc0(sizeof(*info)); 1294 1295 fill_destination_migration_info(info); 1296 fill_source_migration_info(info); 1297 1298 return info; 1299 } 1300 1301 void qmp_migrate_start_postcopy(Error **errp) 1302 { 1303 MigrationState *s = migrate_get_current(); 1304 1305 if (!migrate_postcopy()) { 1306 error_setg(errp, "Enable postcopy with migrate_set_capability before" 1307 " the start of migration"); 1308 return; 1309 } 1310 1311 if (s->state == MIGRATION_STATUS_NONE) { 1312 error_setg(errp, "Postcopy must be started after migration has been" 1313 " started"); 1314 return; 1315 } 1316 /* 1317 * we don't error if migration has finished since that would be racy 1318 * with issuing this command. 1319 */ 1320 qatomic_set(&s->start_postcopy, true); 1321 } 1322 1323 /* shared migration helpers */ 1324 1325 void migrate_set_state(int *state, int old_state, int new_state) 1326 { 1327 assert(new_state < MIGRATION_STATUS__MAX); 1328 if (qatomic_cmpxchg(state, old_state, new_state) == old_state) { 1329 trace_migrate_set_state(MigrationStatus_str(new_state)); 1330 migrate_generate_event(new_state); 1331 } 1332 } 1333 1334 static void migrate_fd_cleanup(MigrationState *s) 1335 { 1336 MigrationEventType type; 1337 1338 g_free(s->hostname); 1339 s->hostname = NULL; 1340 json_writer_free(s->vmdesc); 1341 s->vmdesc = NULL; 1342 1343 qemu_savevm_state_cleanup(); 1344 1345 close_return_path_on_source(s); 1346 1347 if (s->to_dst_file) { 1348 QEMUFile *tmp; 1349 1350 trace_migrate_fd_cleanup(); 1351 bql_unlock(); 1352 if (s->migration_thread_running) { 1353 qemu_thread_join(&s->thread); 1354 s->migration_thread_running = false; 1355 } 1356 bql_lock(); 1357 1358 multifd_send_shutdown(); 1359 qemu_mutex_lock(&s->qemu_file_lock); 1360 tmp = s->to_dst_file; 1361 s->to_dst_file = NULL; 1362 qemu_mutex_unlock(&s->qemu_file_lock); 1363 /* 1364 * Close the file handle without the lock to make sure the 1365 * critical section won't block for long. 1366 */ 1367 migration_ioc_unregister_yank_from_file(tmp); 1368 qemu_fclose(tmp); 1369 } 1370 1371 assert(!migration_is_active(s)); 1372 1373 if (s->state == MIGRATION_STATUS_CANCELLING) { 1374 migrate_set_state(&s->state, MIGRATION_STATUS_CANCELLING, 1375 MIGRATION_STATUS_CANCELLED); 1376 } 1377 1378 if (s->error) { 1379 /* It is used on info migrate. We can't free it */ 1380 error_report_err(error_copy(s->error)); 1381 } 1382 type = migration_has_failed(s) ? MIG_EVENT_PRECOPY_FAILED : 1383 MIG_EVENT_PRECOPY_DONE; 1384 migration_call_notifiers(s, type, NULL); 1385 block_cleanup_parameters(); 1386 yank_unregister_instance(MIGRATION_YANK_INSTANCE); 1387 } 1388 1389 static void migrate_fd_cleanup_bh(void *opaque) 1390 { 1391 migrate_fd_cleanup(opaque); 1392 } 1393 1394 void migrate_set_error(MigrationState *s, const Error *error) 1395 { 1396 QEMU_LOCK_GUARD(&s->error_mutex); 1397 if (!s->error) { 1398 s->error = error_copy(error); 1399 } 1400 } 1401 1402 bool migrate_has_error(MigrationState *s) 1403 { 1404 /* The lock is not helpful here, but still follow the rule */ 1405 QEMU_LOCK_GUARD(&s->error_mutex); 1406 return qatomic_read(&s->error); 1407 } 1408 1409 static void migrate_error_free(MigrationState *s) 1410 { 1411 QEMU_LOCK_GUARD(&s->error_mutex); 1412 if (s->error) { 1413 error_free(s->error); 1414 s->error = NULL; 1415 } 1416 } 1417 1418 static void migrate_fd_error(MigrationState *s, const Error *error) 1419 { 1420 trace_migrate_fd_error(error_get_pretty(error)); 1421 assert(s->to_dst_file == NULL); 1422 migrate_set_state(&s->state, MIGRATION_STATUS_SETUP, 1423 MIGRATION_STATUS_FAILED); 1424 migrate_set_error(s, error); 1425 } 1426 1427 static void migrate_fd_cancel(MigrationState *s) 1428 { 1429 int old_state ; 1430 1431 trace_migrate_fd_cancel(); 1432 1433 WITH_QEMU_LOCK_GUARD(&s->qemu_file_lock) { 1434 if (s->rp_state.from_dst_file) { 1435 /* shutdown the rp socket, so causing the rp thread to shutdown */ 1436 qemu_file_shutdown(s->rp_state.from_dst_file); 1437 } 1438 } 1439 1440 do { 1441 old_state = s->state; 1442 if (!migration_is_running(old_state)) { 1443 break; 1444 } 1445 /* If the migration is paused, kick it out of the pause */ 1446 if (old_state == MIGRATION_STATUS_PRE_SWITCHOVER) { 1447 qemu_sem_post(&s->pause_sem); 1448 } 1449 migrate_set_state(&s->state, old_state, MIGRATION_STATUS_CANCELLING); 1450 } while (s->state != MIGRATION_STATUS_CANCELLING); 1451 1452 /* 1453 * If we're unlucky the migration code might be stuck somewhere in a 1454 * send/write while the network has failed and is waiting to timeout; 1455 * if we've got shutdown(2) available then we can force it to quit. 1456 */ 1457 if (s->state == MIGRATION_STATUS_CANCELLING) { 1458 WITH_QEMU_LOCK_GUARD(&s->qemu_file_lock) { 1459 if (s->to_dst_file) { 1460 qemu_file_shutdown(s->to_dst_file); 1461 } 1462 } 1463 } 1464 if (s->state == MIGRATION_STATUS_CANCELLING && s->block_inactive) { 1465 Error *local_err = NULL; 1466 1467 bdrv_activate_all(&local_err); 1468 if (local_err) { 1469 error_report_err(local_err); 1470 } else { 1471 s->block_inactive = false; 1472 } 1473 } 1474 } 1475 1476 void migration_add_notifier_mode(NotifierWithReturn *notify, 1477 MigrationNotifyFunc func, MigMode mode) 1478 { 1479 notify->notify = (NotifierWithReturnFunc)func; 1480 notifier_with_return_list_add(&migration_state_notifiers[mode], notify); 1481 } 1482 1483 void migration_add_notifier(NotifierWithReturn *notify, 1484 MigrationNotifyFunc func) 1485 { 1486 migration_add_notifier_mode(notify, func, MIG_MODE_NORMAL); 1487 } 1488 1489 void migration_remove_notifier(NotifierWithReturn *notify) 1490 { 1491 if (notify->notify) { 1492 notifier_with_return_remove(notify); 1493 notify->notify = NULL; 1494 } 1495 } 1496 1497 int migration_call_notifiers(MigrationState *s, MigrationEventType type, 1498 Error **errp) 1499 { 1500 MigMode mode = s->parameters.mode; 1501 MigrationEvent e; 1502 int ret; 1503 1504 e.type = type; 1505 ret = notifier_with_return_list_notify(&migration_state_notifiers[mode], 1506 &e, errp); 1507 assert(!ret || type == MIG_EVENT_PRECOPY_SETUP); 1508 return ret; 1509 } 1510 1511 bool migration_in_setup(MigrationState *s) 1512 { 1513 return s->state == MIGRATION_STATUS_SETUP; 1514 } 1515 1516 bool migration_has_finished(MigrationState *s) 1517 { 1518 return s->state == MIGRATION_STATUS_COMPLETED; 1519 } 1520 1521 bool migration_has_failed(MigrationState *s) 1522 { 1523 return (s->state == MIGRATION_STATUS_CANCELLED || 1524 s->state == MIGRATION_STATUS_FAILED); 1525 } 1526 1527 bool migration_in_postcopy(void) 1528 { 1529 MigrationState *s = migrate_get_current(); 1530 1531 switch (s->state) { 1532 case MIGRATION_STATUS_POSTCOPY_ACTIVE: 1533 case MIGRATION_STATUS_POSTCOPY_PAUSED: 1534 case MIGRATION_STATUS_POSTCOPY_RECOVER: 1535 return true; 1536 default: 1537 return false; 1538 } 1539 } 1540 1541 bool migration_postcopy_is_alive(int state) 1542 { 1543 switch (state) { 1544 case MIGRATION_STATUS_POSTCOPY_ACTIVE: 1545 case MIGRATION_STATUS_POSTCOPY_RECOVER: 1546 return true; 1547 default: 1548 return false; 1549 } 1550 } 1551 1552 bool migration_in_incoming_postcopy(void) 1553 { 1554 PostcopyState ps = postcopy_state_get(); 1555 1556 return ps >= POSTCOPY_INCOMING_DISCARD && ps < POSTCOPY_INCOMING_END; 1557 } 1558 1559 bool migration_incoming_postcopy_advised(void) 1560 { 1561 PostcopyState ps = postcopy_state_get(); 1562 1563 return ps >= POSTCOPY_INCOMING_ADVISE && ps < POSTCOPY_INCOMING_END; 1564 } 1565 1566 bool migration_in_bg_snapshot(void) 1567 { 1568 MigrationState *s = migrate_get_current(); 1569 1570 return migrate_background_snapshot() && 1571 migration_is_setup_or_active(s->state); 1572 } 1573 1574 bool migration_is_idle(void) 1575 { 1576 MigrationState *s = current_migration; 1577 1578 if (!s) { 1579 return true; 1580 } 1581 1582 switch (s->state) { 1583 case MIGRATION_STATUS_NONE: 1584 case MIGRATION_STATUS_CANCELLED: 1585 case MIGRATION_STATUS_COMPLETED: 1586 case MIGRATION_STATUS_FAILED: 1587 return true; 1588 case MIGRATION_STATUS_SETUP: 1589 case MIGRATION_STATUS_CANCELLING: 1590 case MIGRATION_STATUS_ACTIVE: 1591 case MIGRATION_STATUS_POSTCOPY_ACTIVE: 1592 case MIGRATION_STATUS_COLO: 1593 case MIGRATION_STATUS_PRE_SWITCHOVER: 1594 case MIGRATION_STATUS_DEVICE: 1595 case MIGRATION_STATUS_WAIT_UNPLUG: 1596 return false; 1597 case MIGRATION_STATUS__MAX: 1598 g_assert_not_reached(); 1599 } 1600 1601 return false; 1602 } 1603 1604 bool migration_is_active(MigrationState *s) 1605 { 1606 return (s->state == MIGRATION_STATUS_ACTIVE || 1607 s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE); 1608 } 1609 1610 bool migrate_mode_is_cpr(MigrationState *s) 1611 { 1612 return s->parameters.mode == MIG_MODE_CPR_REBOOT; 1613 } 1614 1615 int migrate_init(MigrationState *s, Error **errp) 1616 { 1617 int ret; 1618 1619 ret = qemu_savevm_state_prepare(errp); 1620 if (ret) { 1621 return ret; 1622 } 1623 1624 /* 1625 * Reinitialise all migration state, except 1626 * parameters/capabilities that the user set, and 1627 * locks. 1628 */ 1629 s->to_dst_file = NULL; 1630 s->state = MIGRATION_STATUS_NONE; 1631 s->rp_state.from_dst_file = NULL; 1632 s->mbps = 0.0; 1633 s->pages_per_second = 0.0; 1634 s->downtime = 0; 1635 s->expected_downtime = 0; 1636 s->setup_time = 0; 1637 s->start_postcopy = false; 1638 s->migration_thread_running = false; 1639 error_free(s->error); 1640 s->error = NULL; 1641 s->vmdesc = NULL; 1642 1643 migrate_set_state(&s->state, MIGRATION_STATUS_NONE, MIGRATION_STATUS_SETUP); 1644 1645 s->start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); 1646 s->total_time = 0; 1647 s->vm_old_state = -1; 1648 s->iteration_initial_bytes = 0; 1649 s->threshold_size = 0; 1650 s->switchover_acked = false; 1651 s->rdma_migration = false; 1652 /* 1653 * set mig_stats memory to zero for a new migration 1654 */ 1655 memset(&mig_stats, 0, sizeof(mig_stats)); 1656 migration_reset_vfio_bytes_transferred(); 1657 1658 return 0; 1659 } 1660 1661 static bool is_busy(Error **reasonp, Error **errp) 1662 { 1663 ERRP_GUARD(); 1664 1665 /* Snapshots are similar to migrations, so check RUN_STATE_SAVE_VM too. */ 1666 if (runstate_check(RUN_STATE_SAVE_VM) || !migration_is_idle()) { 1667 error_propagate_prepend(errp, *reasonp, 1668 "disallowing migration blocker " 1669 "(migration/snapshot in progress) for: "); 1670 *reasonp = NULL; 1671 return true; 1672 } 1673 return false; 1674 } 1675 1676 static bool is_only_migratable(Error **reasonp, Error **errp, int modes) 1677 { 1678 ERRP_GUARD(); 1679 1680 if (only_migratable && (modes & BIT(MIG_MODE_NORMAL))) { 1681 error_propagate_prepend(errp, *reasonp, 1682 "disallowing migration blocker " 1683 "(--only-migratable) for: "); 1684 *reasonp = NULL; 1685 return true; 1686 } 1687 return false; 1688 } 1689 1690 static int get_modes(MigMode mode, va_list ap) 1691 { 1692 int modes = 0; 1693 1694 while (mode != -1 && mode != MIG_MODE_ALL) { 1695 assert(mode >= MIG_MODE_NORMAL && mode < MIG_MODE__MAX); 1696 modes |= BIT(mode); 1697 mode = va_arg(ap, MigMode); 1698 } 1699 if (mode == MIG_MODE_ALL) { 1700 modes = BIT(MIG_MODE__MAX) - 1; 1701 } 1702 return modes; 1703 } 1704 1705 static int add_blockers(Error **reasonp, Error **errp, int modes) 1706 { 1707 for (MigMode mode = 0; mode < MIG_MODE__MAX; mode++) { 1708 if (modes & BIT(mode)) { 1709 migration_blockers[mode] = g_slist_prepend(migration_blockers[mode], 1710 *reasonp); 1711 } 1712 } 1713 return 0; 1714 } 1715 1716 int migrate_add_blocker(Error **reasonp, Error **errp) 1717 { 1718 return migrate_add_blocker_modes(reasonp, errp, MIG_MODE_ALL); 1719 } 1720 1721 int migrate_add_blocker_normal(Error **reasonp, Error **errp) 1722 { 1723 return migrate_add_blocker_modes(reasonp, errp, MIG_MODE_NORMAL, -1); 1724 } 1725 1726 int migrate_add_blocker_modes(Error **reasonp, Error **errp, MigMode mode, ...) 1727 { 1728 int modes; 1729 va_list ap; 1730 1731 va_start(ap, mode); 1732 modes = get_modes(mode, ap); 1733 va_end(ap); 1734 1735 if (is_only_migratable(reasonp, errp, modes)) { 1736 return -EACCES; 1737 } else if (is_busy(reasonp, errp)) { 1738 return -EBUSY; 1739 } 1740 return add_blockers(reasonp, errp, modes); 1741 } 1742 1743 int migrate_add_blocker_internal(Error **reasonp, Error **errp) 1744 { 1745 int modes = BIT(MIG_MODE__MAX) - 1; 1746 1747 if (is_busy(reasonp, errp)) { 1748 return -EBUSY; 1749 } 1750 return add_blockers(reasonp, errp, modes); 1751 } 1752 1753 void migrate_del_blocker(Error **reasonp) 1754 { 1755 if (*reasonp) { 1756 for (MigMode mode = 0; mode < MIG_MODE__MAX; mode++) { 1757 migration_blockers[mode] = g_slist_remove(migration_blockers[mode], 1758 *reasonp); 1759 } 1760 error_free(*reasonp); 1761 *reasonp = NULL; 1762 } 1763 } 1764 1765 void qmp_migrate_incoming(const char *uri, bool has_channels, 1766 MigrationChannelList *channels, Error **errp) 1767 { 1768 Error *local_err = NULL; 1769 static bool once = true; 1770 1771 if (!once) { 1772 error_setg(errp, "The incoming migration has already been started"); 1773 return; 1774 } 1775 if (!runstate_check(RUN_STATE_INMIGRATE)) { 1776 error_setg(errp, "'-incoming' was not specified on the command line"); 1777 return; 1778 } 1779 1780 if (!yank_register_instance(MIGRATION_YANK_INSTANCE, errp)) { 1781 return; 1782 } 1783 1784 qemu_start_incoming_migration(uri, has_channels, channels, &local_err); 1785 1786 if (local_err) { 1787 yank_unregister_instance(MIGRATION_YANK_INSTANCE); 1788 error_propagate(errp, local_err); 1789 return; 1790 } 1791 1792 once = false; 1793 } 1794 1795 void qmp_migrate_recover(const char *uri, Error **errp) 1796 { 1797 MigrationIncomingState *mis = migration_incoming_get_current(); 1798 1799 /* 1800 * Don't even bother to use ERRP_GUARD() as it _must_ always be set by 1801 * callers (no one should ignore a recover failure); if there is, it's a 1802 * programming error. 1803 */ 1804 assert(errp); 1805 1806 if (mis->state != MIGRATION_STATUS_POSTCOPY_PAUSED) { 1807 error_setg(errp, "Migrate recover can only be run " 1808 "when postcopy is paused."); 1809 return; 1810 } 1811 1812 /* If there's an existing transport, release it */ 1813 migration_incoming_transport_cleanup(mis); 1814 1815 /* 1816 * Note that this call will never start a real migration; it will 1817 * only re-setup the migration stream and poke existing migration 1818 * to continue using that newly established channel. 1819 */ 1820 qemu_start_incoming_migration(uri, false, NULL, errp); 1821 } 1822 1823 void qmp_migrate_pause(Error **errp) 1824 { 1825 MigrationState *ms = migrate_get_current(); 1826 MigrationIncomingState *mis = migration_incoming_get_current(); 1827 int ret = 0; 1828 1829 if (migration_postcopy_is_alive(ms->state)) { 1830 /* Source side, during postcopy */ 1831 Error *error = NULL; 1832 1833 /* Tell the core migration that we're pausing */ 1834 error_setg(&error, "Postcopy migration is paused by the user"); 1835 migrate_set_error(ms, error); 1836 error_free(error); 1837 1838 qemu_mutex_lock(&ms->qemu_file_lock); 1839 if (ms->to_dst_file) { 1840 ret = qemu_file_shutdown(ms->to_dst_file); 1841 } 1842 qemu_mutex_unlock(&ms->qemu_file_lock); 1843 if (ret) { 1844 error_setg(errp, "Failed to pause source migration"); 1845 } 1846 1847 /* 1848 * Kick the migration thread out of any waiting windows (on behalf 1849 * of the rp thread). 1850 */ 1851 migration_rp_kick(ms); 1852 1853 return; 1854 } 1855 1856 if (migration_postcopy_is_alive(mis->state)) { 1857 ret = qemu_file_shutdown(mis->from_src_file); 1858 if (ret) { 1859 error_setg(errp, "Failed to pause destination migration"); 1860 } 1861 return; 1862 } 1863 1864 error_setg(errp, "migrate-pause is currently only supported " 1865 "during postcopy-active or postcopy-recover state"); 1866 } 1867 1868 bool migration_is_blocked(Error **errp) 1869 { 1870 GSList *blockers = migration_blockers[migrate_mode()]; 1871 1872 if (qemu_savevm_state_blocked(errp)) { 1873 return true; 1874 } 1875 1876 if (blockers) { 1877 error_propagate(errp, error_copy(blockers->data)); 1878 return true; 1879 } 1880 1881 return false; 1882 } 1883 1884 /* Returns true if continue to migrate, or false if error detected */ 1885 static bool migrate_prepare(MigrationState *s, bool blk, bool blk_inc, 1886 bool resume, Error **errp) 1887 { 1888 if (blk_inc) { 1889 warn_report("parameter 'inc' is deprecated;" 1890 " use blockdev-mirror with NBD instead"); 1891 } 1892 1893 if (blk) { 1894 warn_report("parameter 'blk' is deprecated;" 1895 " use blockdev-mirror with NBD instead"); 1896 } 1897 1898 if (resume) { 1899 if (s->state != MIGRATION_STATUS_POSTCOPY_PAUSED) { 1900 error_setg(errp, "Cannot resume if there is no " 1901 "paused migration"); 1902 return false; 1903 } 1904 1905 /* 1906 * Postcopy recovery won't work well with release-ram 1907 * capability since release-ram will drop the page buffer as 1908 * long as the page is put into the send buffer. So if there 1909 * is a network failure happened, any page buffers that have 1910 * not yet reached the destination VM but have already been 1911 * sent from the source VM will be lost forever. Let's refuse 1912 * the client from resuming such a postcopy migration. 1913 * Luckily release-ram was designed to only be used when src 1914 * and destination VMs are on the same host, so it should be 1915 * fine. 1916 */ 1917 if (migrate_release_ram()) { 1918 error_setg(errp, "Postcopy recovery cannot work " 1919 "when release-ram capability is set"); 1920 return false; 1921 } 1922 1923 /* This is a resume, skip init status */ 1924 return true; 1925 } 1926 1927 if (migration_is_running(s->state)) { 1928 error_setg(errp, QERR_MIGRATION_ACTIVE); 1929 return false; 1930 } 1931 1932 if (runstate_check(RUN_STATE_INMIGRATE)) { 1933 error_setg(errp, "Guest is waiting for an incoming migration"); 1934 return false; 1935 } 1936 1937 if (runstate_check(RUN_STATE_POSTMIGRATE)) { 1938 error_setg(errp, "Can't migrate the vm that was paused due to " 1939 "previous migration"); 1940 return false; 1941 } 1942 1943 if (kvm_hwpoisoned_mem()) { 1944 error_setg(errp, "Can't migrate this vm with hardware poisoned memory, " 1945 "please reboot the vm and try again"); 1946 return false; 1947 } 1948 1949 if (migration_is_blocked(errp)) { 1950 return false; 1951 } 1952 1953 if (migrate_mapped_ram()) { 1954 if (migrate_tls()) { 1955 error_setg(errp, "Cannot use TLS with mapped-ram"); 1956 return false; 1957 } 1958 } 1959 1960 if (migrate_mode_is_cpr(s)) { 1961 const char *conflict = NULL; 1962 1963 if (migrate_postcopy()) { 1964 conflict = "postcopy"; 1965 } else if (migrate_background_snapshot()) { 1966 conflict = "background snapshot"; 1967 } else if (migrate_colo()) { 1968 conflict = "COLO"; 1969 } 1970 1971 if (conflict) { 1972 error_setg(errp, "Cannot use %s with CPR", conflict); 1973 return false; 1974 } 1975 } 1976 1977 if (blk || blk_inc) { 1978 if (migrate_colo()) { 1979 error_setg(errp, "No disk migration is required in COLO mode"); 1980 return false; 1981 } 1982 if (migrate_block() || migrate_block_incremental()) { 1983 error_setg(errp, "Command options are incompatible with " 1984 "current migration capabilities"); 1985 return false; 1986 } 1987 if (!migrate_cap_set(MIGRATION_CAPABILITY_BLOCK, true, errp)) { 1988 return false; 1989 } 1990 s->must_remove_block_options = true; 1991 } 1992 1993 if (blk_inc) { 1994 migrate_set_block_incremental(true); 1995 } 1996 1997 if (migrate_init(s, errp)) { 1998 return false; 1999 } 2000 2001 return true; 2002 } 2003 2004 void qmp_migrate(const char *uri, bool has_channels, 2005 MigrationChannelList *channels, bool has_blk, bool blk, 2006 bool has_inc, bool inc, bool has_detach, bool detach, 2007 bool has_resume, bool resume, Error **errp) 2008 { 2009 bool resume_requested; 2010 Error *local_err = NULL; 2011 MigrationState *s = migrate_get_current(); 2012 g_autoptr(MigrationChannel) channel = NULL; 2013 MigrationAddress *addr = NULL; 2014 2015 /* 2016 * Having preliminary checks for uri and channel 2017 */ 2018 if (!uri == !channels) { 2019 error_setg(errp, "need either 'uri' or 'channels' argument"); 2020 return; 2021 } 2022 2023 if (channels) { 2024 /* To verify that Migrate channel list has only item */ 2025 if (channels->next) { 2026 error_setg(errp, "Channel list has more than one entries"); 2027 return; 2028 } 2029 addr = channels->value->addr; 2030 } 2031 2032 if (uri) { 2033 /* caller uses the old URI syntax */ 2034 if (!migrate_uri_parse(uri, &channel, errp)) { 2035 return; 2036 } 2037 addr = channel->addr; 2038 } 2039 2040 /* transport mechanism not suitable for migration? */ 2041 if (!migration_channels_and_transport_compatible(addr, errp)) { 2042 return; 2043 } 2044 2045 resume_requested = has_resume && resume; 2046 if (!migrate_prepare(s, has_blk && blk, has_inc && inc, 2047 resume_requested, errp)) { 2048 /* Error detected, put into errp */ 2049 return; 2050 } 2051 2052 if (!resume_requested) { 2053 if (!yank_register_instance(MIGRATION_YANK_INSTANCE, errp)) { 2054 return; 2055 } 2056 } 2057 2058 if (addr->transport == MIGRATION_ADDRESS_TYPE_SOCKET) { 2059 SocketAddress *saddr = &addr->u.socket; 2060 if (saddr->type == SOCKET_ADDRESS_TYPE_INET || 2061 saddr->type == SOCKET_ADDRESS_TYPE_UNIX || 2062 saddr->type == SOCKET_ADDRESS_TYPE_VSOCK) { 2063 socket_start_outgoing_migration(s, saddr, &local_err); 2064 } else if (saddr->type == SOCKET_ADDRESS_TYPE_FD) { 2065 fd_start_outgoing_migration(s, saddr->u.fd.str, &local_err); 2066 } 2067 #ifdef CONFIG_RDMA 2068 } else if (addr->transport == MIGRATION_ADDRESS_TYPE_RDMA) { 2069 rdma_start_outgoing_migration(s, &addr->u.rdma, &local_err); 2070 #endif 2071 } else if (addr->transport == MIGRATION_ADDRESS_TYPE_EXEC) { 2072 exec_start_outgoing_migration(s, addr->u.exec.args, &local_err); 2073 } else if (addr->transport == MIGRATION_ADDRESS_TYPE_FILE) { 2074 file_start_outgoing_migration(s, &addr->u.file, &local_err); 2075 } else { 2076 error_setg(&local_err, QERR_INVALID_PARAMETER_VALUE, "uri", 2077 "a valid migration protocol"); 2078 migrate_set_state(&s->state, MIGRATION_STATUS_SETUP, 2079 MIGRATION_STATUS_FAILED); 2080 block_cleanup_parameters(); 2081 } 2082 2083 if (local_err) { 2084 if (!resume_requested) { 2085 yank_unregister_instance(MIGRATION_YANK_INSTANCE); 2086 } 2087 migrate_fd_error(s, local_err); 2088 error_propagate(errp, local_err); 2089 return; 2090 } 2091 } 2092 2093 void qmp_migrate_cancel(Error **errp) 2094 { 2095 migration_cancel(NULL); 2096 } 2097 2098 void qmp_migrate_continue(MigrationStatus state, Error **errp) 2099 { 2100 MigrationState *s = migrate_get_current(); 2101 if (s->state != state) { 2102 error_setg(errp, "Migration not in expected state: %s", 2103 MigrationStatus_str(s->state)); 2104 return; 2105 } 2106 qemu_sem_post(&s->pause_sem); 2107 } 2108 2109 int migration_rp_wait(MigrationState *s) 2110 { 2111 /* If migration has failure already, ignore the wait */ 2112 if (migrate_has_error(s)) { 2113 return -1; 2114 } 2115 2116 qemu_sem_wait(&s->rp_state.rp_sem); 2117 2118 /* After wait, double check that there's no failure */ 2119 if (migrate_has_error(s)) { 2120 return -1; 2121 } 2122 2123 return 0; 2124 } 2125 2126 void migration_rp_kick(MigrationState *s) 2127 { 2128 qemu_sem_post(&s->rp_state.rp_sem); 2129 } 2130 2131 static struct rp_cmd_args { 2132 ssize_t len; /* -1 = variable */ 2133 const char *name; 2134 } rp_cmd_args[] = { 2135 [MIG_RP_MSG_INVALID] = { .len = -1, .name = "INVALID" }, 2136 [MIG_RP_MSG_SHUT] = { .len = 4, .name = "SHUT" }, 2137 [MIG_RP_MSG_PONG] = { .len = 4, .name = "PONG" }, 2138 [MIG_RP_MSG_REQ_PAGES] = { .len = 12, .name = "REQ_PAGES" }, 2139 [MIG_RP_MSG_REQ_PAGES_ID] = { .len = -1, .name = "REQ_PAGES_ID" }, 2140 [MIG_RP_MSG_RECV_BITMAP] = { .len = -1, .name = "RECV_BITMAP" }, 2141 [MIG_RP_MSG_RESUME_ACK] = { .len = 4, .name = "RESUME_ACK" }, 2142 [MIG_RP_MSG_SWITCHOVER_ACK] = { .len = 0, .name = "SWITCHOVER_ACK" }, 2143 [MIG_RP_MSG_MAX] = { .len = -1, .name = "MAX" }, 2144 }; 2145 2146 /* 2147 * Process a request for pages received on the return path, 2148 * We're allowed to send more than requested (e.g. to round to our page size) 2149 * and we don't need to send pages that have already been sent. 2150 */ 2151 static void 2152 migrate_handle_rp_req_pages(MigrationState *ms, const char* rbname, 2153 ram_addr_t start, size_t len, Error **errp) 2154 { 2155 long our_host_ps = qemu_real_host_page_size(); 2156 2157 trace_migrate_handle_rp_req_pages(rbname, start, len); 2158 2159 /* 2160 * Since we currently insist on matching page sizes, just sanity check 2161 * we're being asked for whole host pages. 2162 */ 2163 if (!QEMU_IS_ALIGNED(start, our_host_ps) || 2164 !QEMU_IS_ALIGNED(len, our_host_ps)) { 2165 error_setg(errp, "MIG_RP_MSG_REQ_PAGES: Misaligned page request, start:" 2166 RAM_ADDR_FMT " len: %zd", start, len); 2167 return; 2168 } 2169 2170 ram_save_queue_pages(rbname, start, len, errp); 2171 } 2172 2173 static bool migrate_handle_rp_recv_bitmap(MigrationState *s, char *block_name, 2174 Error **errp) 2175 { 2176 RAMBlock *block = qemu_ram_block_by_name(block_name); 2177 2178 if (!block) { 2179 error_setg(errp, "MIG_RP_MSG_RECV_BITMAP has invalid block name '%s'", 2180 block_name); 2181 return false; 2182 } 2183 2184 /* Fetch the received bitmap and refresh the dirty bitmap */ 2185 return ram_dirty_bitmap_reload(s, block, errp); 2186 } 2187 2188 static bool migrate_handle_rp_resume_ack(MigrationState *s, 2189 uint32_t value, Error **errp) 2190 { 2191 trace_source_return_path_thread_resume_ack(value); 2192 2193 if (value != MIGRATION_RESUME_ACK_VALUE) { 2194 error_setg(errp, "illegal resume_ack value %"PRIu32, value); 2195 return false; 2196 } 2197 2198 /* Now both sides are active. */ 2199 migrate_set_state(&s->state, MIGRATION_STATUS_POSTCOPY_RECOVER, 2200 MIGRATION_STATUS_POSTCOPY_ACTIVE); 2201 2202 /* Notify send thread that time to continue send pages */ 2203 migration_rp_kick(s); 2204 2205 return true; 2206 } 2207 2208 /* 2209 * Release ms->rp_state.from_dst_file (and postcopy_qemufile_src if 2210 * existed) in a safe way. 2211 */ 2212 static void migration_release_dst_files(MigrationState *ms) 2213 { 2214 QEMUFile *file; 2215 2216 WITH_QEMU_LOCK_GUARD(&ms->qemu_file_lock) { 2217 /* 2218 * Reset the from_dst_file pointer first before releasing it, as we 2219 * can't block within lock section 2220 */ 2221 file = ms->rp_state.from_dst_file; 2222 ms->rp_state.from_dst_file = NULL; 2223 } 2224 2225 /* 2226 * Do the same to postcopy fast path socket too if there is. No 2227 * locking needed because this qemufile should only be managed by 2228 * return path thread. 2229 */ 2230 if (ms->postcopy_qemufile_src) { 2231 migration_ioc_unregister_yank_from_file(ms->postcopy_qemufile_src); 2232 qemu_file_shutdown(ms->postcopy_qemufile_src); 2233 qemu_fclose(ms->postcopy_qemufile_src); 2234 ms->postcopy_qemufile_src = NULL; 2235 } 2236 2237 qemu_fclose(file); 2238 } 2239 2240 /* 2241 * Handles messages sent on the return path towards the source VM 2242 * 2243 */ 2244 static void *source_return_path_thread(void *opaque) 2245 { 2246 MigrationState *ms = opaque; 2247 QEMUFile *rp = ms->rp_state.from_dst_file; 2248 uint16_t header_len, header_type; 2249 uint8_t buf[512]; 2250 uint32_t tmp32, sibling_error; 2251 ram_addr_t start = 0; /* =0 to silence warning */ 2252 size_t len = 0, expected_len; 2253 Error *err = NULL; 2254 int res; 2255 2256 trace_source_return_path_thread_entry(); 2257 rcu_register_thread(); 2258 2259 while (migration_is_setup_or_active(ms->state)) { 2260 trace_source_return_path_thread_loop_top(); 2261 2262 header_type = qemu_get_be16(rp); 2263 header_len = qemu_get_be16(rp); 2264 2265 if (qemu_file_get_error(rp)) { 2266 qemu_file_get_error_obj(rp, &err); 2267 goto out; 2268 } 2269 2270 if (header_type >= MIG_RP_MSG_MAX || 2271 header_type == MIG_RP_MSG_INVALID) { 2272 error_setg(&err, "Received invalid message 0x%04x length 0x%04x", 2273 header_type, header_len); 2274 goto out; 2275 } 2276 2277 if ((rp_cmd_args[header_type].len != -1 && 2278 header_len != rp_cmd_args[header_type].len) || 2279 header_len > sizeof(buf)) { 2280 error_setg(&err, "Received '%s' message (0x%04x) with" 2281 "incorrect length %d expecting %zu", 2282 rp_cmd_args[header_type].name, header_type, header_len, 2283 (size_t)rp_cmd_args[header_type].len); 2284 goto out; 2285 } 2286 2287 /* We know we've got a valid header by this point */ 2288 res = qemu_get_buffer(rp, buf, header_len); 2289 if (res != header_len) { 2290 error_setg(&err, "Failed reading data for message 0x%04x" 2291 " read %d expected %d", 2292 header_type, res, header_len); 2293 goto out; 2294 } 2295 2296 /* OK, we have the message and the data */ 2297 switch (header_type) { 2298 case MIG_RP_MSG_SHUT: 2299 sibling_error = ldl_be_p(buf); 2300 trace_source_return_path_thread_shut(sibling_error); 2301 if (sibling_error) { 2302 error_setg(&err, "Sibling indicated error %d", sibling_error); 2303 } 2304 /* 2305 * We'll let the main thread deal with closing the RP 2306 * we could do a shutdown(2) on it, but we're the only user 2307 * anyway, so there's nothing gained. 2308 */ 2309 goto out; 2310 2311 case MIG_RP_MSG_PONG: 2312 tmp32 = ldl_be_p(buf); 2313 trace_source_return_path_thread_pong(tmp32); 2314 qemu_sem_post(&ms->rp_state.rp_pong_acks); 2315 break; 2316 2317 case MIG_RP_MSG_REQ_PAGES: 2318 start = ldq_be_p(buf); 2319 len = ldl_be_p(buf + 8); 2320 migrate_handle_rp_req_pages(ms, NULL, start, len, &err); 2321 if (err) { 2322 goto out; 2323 } 2324 break; 2325 2326 case MIG_RP_MSG_REQ_PAGES_ID: 2327 expected_len = 12 + 1; /* header + termination */ 2328 2329 if (header_len >= expected_len) { 2330 start = ldq_be_p(buf); 2331 len = ldl_be_p(buf + 8); 2332 /* Now we expect an idstr */ 2333 tmp32 = buf[12]; /* Length of the following idstr */ 2334 buf[13 + tmp32] = '\0'; 2335 expected_len += tmp32; 2336 } 2337 if (header_len != expected_len) { 2338 error_setg(&err, "Req_Page_id with length %d expecting %zd", 2339 header_len, expected_len); 2340 goto out; 2341 } 2342 migrate_handle_rp_req_pages(ms, (char *)&buf[13], start, len, 2343 &err); 2344 if (err) { 2345 goto out; 2346 } 2347 break; 2348 2349 case MIG_RP_MSG_RECV_BITMAP: 2350 if (header_len < 1) { 2351 error_setg(&err, "MIG_RP_MSG_RECV_BITMAP missing block name"); 2352 goto out; 2353 } 2354 /* Format: len (1B) + idstr (<255B). This ends the idstr. */ 2355 buf[buf[0] + 1] = '\0'; 2356 if (!migrate_handle_rp_recv_bitmap(ms, (char *)(buf + 1), &err)) { 2357 goto out; 2358 } 2359 break; 2360 2361 case MIG_RP_MSG_RESUME_ACK: 2362 tmp32 = ldl_be_p(buf); 2363 if (!migrate_handle_rp_resume_ack(ms, tmp32, &err)) { 2364 goto out; 2365 } 2366 break; 2367 2368 case MIG_RP_MSG_SWITCHOVER_ACK: 2369 ms->switchover_acked = true; 2370 trace_source_return_path_thread_switchover_acked(); 2371 break; 2372 2373 default: 2374 break; 2375 } 2376 } 2377 2378 out: 2379 if (err) { 2380 migrate_set_error(ms, err); 2381 error_free(err); 2382 trace_source_return_path_thread_bad_end(); 2383 } 2384 2385 if (ms->state == MIGRATION_STATUS_POSTCOPY_RECOVER) { 2386 /* 2387 * this will be extremely unlikely: that we got yet another network 2388 * issue during recovering of the 1st network failure.. during this 2389 * period the main migration thread can be waiting on rp_sem for 2390 * this thread to sync with the other side. 2391 * 2392 * When this happens, explicitly kick the migration thread out of 2393 * RECOVER stage and back to PAUSED, so the admin can try 2394 * everything again. 2395 */ 2396 migration_rp_kick(ms); 2397 } 2398 2399 trace_source_return_path_thread_end(); 2400 rcu_unregister_thread(); 2401 2402 return NULL; 2403 } 2404 2405 static int open_return_path_on_source(MigrationState *ms) 2406 { 2407 ms->rp_state.from_dst_file = qemu_file_get_return_path(ms->to_dst_file); 2408 if (!ms->rp_state.from_dst_file) { 2409 return -1; 2410 } 2411 2412 trace_open_return_path_on_source(); 2413 2414 qemu_thread_create(&ms->rp_state.rp_thread, "return path", 2415 source_return_path_thread, ms, QEMU_THREAD_JOINABLE); 2416 ms->rp_state.rp_thread_created = true; 2417 2418 trace_open_return_path_on_source_continue(); 2419 2420 return 0; 2421 } 2422 2423 /* Return true if error detected, or false otherwise */ 2424 static bool close_return_path_on_source(MigrationState *ms) 2425 { 2426 if (!ms->rp_state.rp_thread_created) { 2427 return false; 2428 } 2429 2430 trace_migration_return_path_end_before(); 2431 2432 /* 2433 * If this is a normal exit then the destination will send a SHUT 2434 * and the rp_thread will exit, however if there's an error we 2435 * need to cause it to exit. shutdown(2), if we have it, will 2436 * cause it to unblock if it's stuck waiting for the destination. 2437 */ 2438 WITH_QEMU_LOCK_GUARD(&ms->qemu_file_lock) { 2439 if (migrate_has_error(ms) && ms->rp_state.from_dst_file) { 2440 qemu_file_shutdown(ms->rp_state.from_dst_file); 2441 } 2442 } 2443 2444 qemu_thread_join(&ms->rp_state.rp_thread); 2445 ms->rp_state.rp_thread_created = false; 2446 migration_release_dst_files(ms); 2447 trace_migration_return_path_end_after(); 2448 2449 /* Return path will persist the error in MigrationState when quit */ 2450 return migrate_has_error(ms); 2451 } 2452 2453 static inline void 2454 migration_wait_main_channel(MigrationState *ms) 2455 { 2456 /* Wait until one PONG message received */ 2457 qemu_sem_wait(&ms->rp_state.rp_pong_acks); 2458 } 2459 2460 /* 2461 * Switch from normal iteration to postcopy 2462 * Returns non-0 on error 2463 */ 2464 static int postcopy_start(MigrationState *ms, Error **errp) 2465 { 2466 int ret; 2467 QIOChannelBuffer *bioc; 2468 QEMUFile *fb; 2469 uint64_t bandwidth = migrate_max_postcopy_bandwidth(); 2470 bool restart_block = false; 2471 int cur_state = MIGRATION_STATUS_ACTIVE; 2472 2473 if (migrate_postcopy_preempt()) { 2474 migration_wait_main_channel(ms); 2475 if (postcopy_preempt_establish_channel(ms)) { 2476 migrate_set_state(&ms->state, ms->state, MIGRATION_STATUS_FAILED); 2477 return -1; 2478 } 2479 } 2480 2481 if (!migrate_pause_before_switchover()) { 2482 migrate_set_state(&ms->state, MIGRATION_STATUS_ACTIVE, 2483 MIGRATION_STATUS_POSTCOPY_ACTIVE); 2484 } 2485 2486 trace_postcopy_start(); 2487 bql_lock(); 2488 trace_postcopy_start_set_run(); 2489 2490 ret = migration_stop_vm(ms, RUN_STATE_FINISH_MIGRATE); 2491 if (ret < 0) { 2492 goto fail; 2493 } 2494 2495 ret = migration_maybe_pause(ms, &cur_state, 2496 MIGRATION_STATUS_POSTCOPY_ACTIVE); 2497 if (ret < 0) { 2498 goto fail; 2499 } 2500 2501 ret = bdrv_inactivate_all(); 2502 if (ret < 0) { 2503 goto fail; 2504 } 2505 restart_block = true; 2506 2507 /* 2508 * Cause any non-postcopiable, but iterative devices to 2509 * send out their final data. 2510 */ 2511 qemu_savevm_state_complete_precopy(ms->to_dst_file, true, false); 2512 2513 /* 2514 * in Finish migrate and with the io-lock held everything should 2515 * be quiet, but we've potentially still got dirty pages and we 2516 * need to tell the destination to throw any pages it's already received 2517 * that are dirty 2518 */ 2519 if (migrate_postcopy_ram()) { 2520 ram_postcopy_send_discard_bitmap(ms); 2521 } 2522 2523 /* 2524 * send rest of state - note things that are doing postcopy 2525 * will notice we're in POSTCOPY_ACTIVE and not actually 2526 * wrap their state up here 2527 */ 2528 migration_rate_set(bandwidth); 2529 if (migrate_postcopy_ram()) { 2530 /* Ping just for debugging, helps line traces up */ 2531 qemu_savevm_send_ping(ms->to_dst_file, 2); 2532 } 2533 2534 /* 2535 * While loading the device state we may trigger page transfer 2536 * requests and the fd must be free to process those, and thus 2537 * the destination must read the whole device state off the fd before 2538 * it starts processing it. Unfortunately the ad-hoc migration format 2539 * doesn't allow the destination to know the size to read without fully 2540 * parsing it through each devices load-state code (especially the open 2541 * coded devices that use get/put). 2542 * So we wrap the device state up in a package with a length at the start; 2543 * to do this we use a qemu_buf to hold the whole of the device state. 2544 */ 2545 bioc = qio_channel_buffer_new(4096); 2546 qio_channel_set_name(QIO_CHANNEL(bioc), "migration-postcopy-buffer"); 2547 fb = qemu_file_new_output(QIO_CHANNEL(bioc)); 2548 object_unref(OBJECT(bioc)); 2549 2550 /* 2551 * Make sure the receiver can get incoming pages before we send the rest 2552 * of the state 2553 */ 2554 qemu_savevm_send_postcopy_listen(fb); 2555 2556 qemu_savevm_state_complete_precopy(fb, false, false); 2557 if (migrate_postcopy_ram()) { 2558 qemu_savevm_send_ping(fb, 3); 2559 } 2560 2561 qemu_savevm_send_postcopy_run(fb); 2562 2563 /* <><> end of stuff going into the package */ 2564 2565 /* Last point of recovery; as soon as we send the package the destination 2566 * can open devices and potentially start running. 2567 * Lets just check again we've not got any errors. 2568 */ 2569 ret = qemu_file_get_error(ms->to_dst_file); 2570 if (ret) { 2571 error_setg(errp, "postcopy_start: Migration stream errored (pre package)"); 2572 goto fail_closefb; 2573 } 2574 2575 restart_block = false; 2576 2577 /* Now send that blob */ 2578 if (qemu_savevm_send_packaged(ms->to_dst_file, bioc->data, bioc->usage)) { 2579 goto fail_closefb; 2580 } 2581 qemu_fclose(fb); 2582 2583 /* Send a notify to give a chance for anything that needs to happen 2584 * at the transition to postcopy and after the device state; in particular 2585 * spice needs to trigger a transition now 2586 */ 2587 migration_call_notifiers(ms, MIG_EVENT_PRECOPY_DONE, NULL); 2588 2589 migration_downtime_end(ms); 2590 2591 bql_unlock(); 2592 2593 if (migrate_postcopy_ram()) { 2594 /* 2595 * Although this ping is just for debug, it could potentially be 2596 * used for getting a better measurement of downtime at the source. 2597 */ 2598 qemu_savevm_send_ping(ms->to_dst_file, 4); 2599 } 2600 2601 if (migrate_release_ram()) { 2602 ram_postcopy_migrated_memory_release(ms); 2603 } 2604 2605 ret = qemu_file_get_error(ms->to_dst_file); 2606 if (ret) { 2607 error_setg_errno(errp, -ret, "postcopy_start: Migration stream error"); 2608 bql_lock(); 2609 goto fail; 2610 } 2611 trace_postcopy_preempt_enabled(migrate_postcopy_preempt()); 2612 2613 return ret; 2614 2615 fail_closefb: 2616 qemu_fclose(fb); 2617 fail: 2618 migrate_set_state(&ms->state, MIGRATION_STATUS_POSTCOPY_ACTIVE, 2619 MIGRATION_STATUS_FAILED); 2620 if (restart_block) { 2621 /* A failure happened early enough that we know the destination hasn't 2622 * accessed block devices, so we're safe to recover. 2623 */ 2624 Error *local_err = NULL; 2625 2626 bdrv_activate_all(&local_err); 2627 if (local_err) { 2628 error_report_err(local_err); 2629 } 2630 } 2631 migration_call_notifiers(ms, MIG_EVENT_PRECOPY_FAILED, NULL); 2632 bql_unlock(); 2633 return -1; 2634 } 2635 2636 /** 2637 * migration_maybe_pause: Pause if required to by 2638 * migrate_pause_before_switchover called with the BQL locked 2639 * Returns: 0 on success 2640 */ 2641 static int migration_maybe_pause(MigrationState *s, 2642 int *current_active_state, 2643 int new_state) 2644 { 2645 if (!migrate_pause_before_switchover()) { 2646 return 0; 2647 } 2648 2649 /* Since leaving this state is not atomic with posting the semaphore 2650 * it's possible that someone could have issued multiple migrate_continue 2651 * and the semaphore is incorrectly positive at this point; 2652 * the docs say it's undefined to reinit a semaphore that's already 2653 * init'd, so use timedwait to eat up any existing posts. 2654 */ 2655 while (qemu_sem_timedwait(&s->pause_sem, 1) == 0) { 2656 /* This block intentionally left blank */ 2657 } 2658 2659 /* 2660 * If the migration is cancelled when it is in the completion phase, 2661 * the migration state is set to MIGRATION_STATUS_CANCELLING. 2662 * So we don't need to wait a semaphore, otherwise we would always 2663 * wait for the 'pause_sem' semaphore. 2664 */ 2665 if (s->state != MIGRATION_STATUS_CANCELLING) { 2666 bql_unlock(); 2667 migrate_set_state(&s->state, *current_active_state, 2668 MIGRATION_STATUS_PRE_SWITCHOVER); 2669 qemu_sem_wait(&s->pause_sem); 2670 migrate_set_state(&s->state, MIGRATION_STATUS_PRE_SWITCHOVER, 2671 new_state); 2672 *current_active_state = new_state; 2673 bql_lock(); 2674 } 2675 2676 return s->state == new_state ? 0 : -EINVAL; 2677 } 2678 2679 static int migration_completion_precopy(MigrationState *s, 2680 int *current_active_state) 2681 { 2682 int ret; 2683 2684 bql_lock(); 2685 2686 if (!migrate_mode_is_cpr(s)) { 2687 ret = migration_stop_vm(s, RUN_STATE_FINISH_MIGRATE); 2688 if (ret < 0) { 2689 goto out_unlock; 2690 } 2691 } 2692 2693 ret = migration_maybe_pause(s, current_active_state, 2694 MIGRATION_STATUS_DEVICE); 2695 if (ret < 0) { 2696 goto out_unlock; 2697 } 2698 2699 /* 2700 * Inactivate disks except in COLO, and track that we have done so in order 2701 * to remember to reactivate them if migration fails or is cancelled. 2702 */ 2703 s->block_inactive = !migrate_colo(); 2704 migration_rate_set(RATE_LIMIT_DISABLED); 2705 ret = qemu_savevm_state_complete_precopy(s->to_dst_file, false, 2706 s->block_inactive); 2707 out_unlock: 2708 bql_unlock(); 2709 return ret; 2710 } 2711 2712 static void migration_completion_postcopy(MigrationState *s) 2713 { 2714 trace_migration_completion_postcopy_end(); 2715 2716 bql_lock(); 2717 qemu_savevm_state_complete_postcopy(s->to_dst_file); 2718 bql_unlock(); 2719 2720 /* 2721 * Shutdown the postcopy fast path thread. This is only needed when dest 2722 * QEMU binary is old (7.1/7.2). QEMU 8.0+ doesn't need this. 2723 */ 2724 if (migrate_postcopy_preempt() && s->preempt_pre_7_2) { 2725 postcopy_preempt_shutdown_file(s); 2726 } 2727 2728 trace_migration_completion_postcopy_end_after_complete(); 2729 } 2730 2731 static void migration_completion_failed(MigrationState *s, 2732 int current_active_state) 2733 { 2734 if (s->block_inactive && (s->state == MIGRATION_STATUS_ACTIVE || 2735 s->state == MIGRATION_STATUS_DEVICE)) { 2736 /* 2737 * If not doing postcopy, vm_start() will be called: let's 2738 * regain control on images. 2739 */ 2740 Error *local_err = NULL; 2741 2742 bql_lock(); 2743 bdrv_activate_all(&local_err); 2744 if (local_err) { 2745 error_report_err(local_err); 2746 } else { 2747 s->block_inactive = false; 2748 } 2749 bql_unlock(); 2750 } 2751 2752 migrate_set_state(&s->state, current_active_state, 2753 MIGRATION_STATUS_FAILED); 2754 } 2755 2756 /** 2757 * migration_completion: Used by migration_thread when there's not much left. 2758 * The caller 'breaks' the loop when this returns. 2759 * 2760 * @s: Current migration state 2761 */ 2762 static void migration_completion(MigrationState *s) 2763 { 2764 int ret = 0; 2765 int current_active_state = s->state; 2766 2767 if (s->state == MIGRATION_STATUS_ACTIVE) { 2768 ret = migration_completion_precopy(s, ¤t_active_state); 2769 } else if (s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE) { 2770 migration_completion_postcopy(s); 2771 } else { 2772 ret = -1; 2773 } 2774 2775 if (ret < 0) { 2776 goto fail; 2777 } 2778 2779 if (close_return_path_on_source(s)) { 2780 goto fail; 2781 } 2782 2783 if (qemu_file_get_error(s->to_dst_file)) { 2784 trace_migration_completion_file_err(); 2785 goto fail; 2786 } 2787 2788 if (migrate_colo() && s->state == MIGRATION_STATUS_ACTIVE) { 2789 /* COLO does not support postcopy */ 2790 migrate_set_state(&s->state, MIGRATION_STATUS_ACTIVE, 2791 MIGRATION_STATUS_COLO); 2792 } else { 2793 migration_completion_end(s); 2794 } 2795 2796 return; 2797 2798 fail: 2799 migration_completion_failed(s, current_active_state); 2800 } 2801 2802 /** 2803 * bg_migration_completion: Used by bg_migration_thread when after all the 2804 * RAM has been saved. The caller 'breaks' the loop when this returns. 2805 * 2806 * @s: Current migration state 2807 */ 2808 static void bg_migration_completion(MigrationState *s) 2809 { 2810 int current_active_state = s->state; 2811 2812 if (s->state == MIGRATION_STATUS_ACTIVE) { 2813 /* 2814 * By this moment we have RAM content saved into the migration stream. 2815 * The next step is to flush the non-RAM content (device state) 2816 * right after the ram content. The device state has been stored into 2817 * the temporary buffer before RAM saving started. 2818 */ 2819 qemu_put_buffer(s->to_dst_file, s->bioc->data, s->bioc->usage); 2820 qemu_fflush(s->to_dst_file); 2821 } else if (s->state == MIGRATION_STATUS_CANCELLING) { 2822 goto fail; 2823 } 2824 2825 if (qemu_file_get_error(s->to_dst_file)) { 2826 trace_migration_completion_file_err(); 2827 goto fail; 2828 } 2829 2830 migration_completion_end(s); 2831 return; 2832 2833 fail: 2834 migrate_set_state(&s->state, current_active_state, 2835 MIGRATION_STATUS_FAILED); 2836 } 2837 2838 typedef enum MigThrError { 2839 /* No error detected */ 2840 MIG_THR_ERR_NONE = 0, 2841 /* Detected error, but resumed successfully */ 2842 MIG_THR_ERR_RECOVERED = 1, 2843 /* Detected fatal error, need to exit */ 2844 MIG_THR_ERR_FATAL = 2, 2845 } MigThrError; 2846 2847 static int postcopy_resume_handshake(MigrationState *s) 2848 { 2849 qemu_savevm_send_postcopy_resume(s->to_dst_file); 2850 2851 while (s->state == MIGRATION_STATUS_POSTCOPY_RECOVER) { 2852 if (migration_rp_wait(s)) { 2853 return -1; 2854 } 2855 } 2856 2857 if (s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE) { 2858 return 0; 2859 } 2860 2861 return -1; 2862 } 2863 2864 /* Return zero if success, or <0 for error */ 2865 static int postcopy_do_resume(MigrationState *s) 2866 { 2867 int ret; 2868 2869 /* 2870 * Call all the resume_prepare() hooks, so that modules can be 2871 * ready for the migration resume. 2872 */ 2873 ret = qemu_savevm_state_resume_prepare(s); 2874 if (ret) { 2875 error_report("%s: resume_prepare() failure detected: %d", 2876 __func__, ret); 2877 return ret; 2878 } 2879 2880 /* 2881 * If preempt is enabled, re-establish the preempt channel. Note that 2882 * we do it after resume prepare to make sure the main channel will be 2883 * created before the preempt channel. E.g. with weak network, the 2884 * dest QEMU may get messed up with the preempt and main channels on 2885 * the order of connection setup. This guarantees the correct order. 2886 */ 2887 ret = postcopy_preempt_establish_channel(s); 2888 if (ret) { 2889 error_report("%s: postcopy_preempt_establish_channel(): %d", 2890 __func__, ret); 2891 return ret; 2892 } 2893 2894 /* 2895 * Last handshake with destination on the resume (destination will 2896 * switch to postcopy-active afterwards) 2897 */ 2898 ret = postcopy_resume_handshake(s); 2899 if (ret) { 2900 error_report("%s: handshake failed: %d", __func__, ret); 2901 return ret; 2902 } 2903 2904 return 0; 2905 } 2906 2907 /* 2908 * We don't return until we are in a safe state to continue current 2909 * postcopy migration. Returns MIG_THR_ERR_RECOVERED if recovered, or 2910 * MIG_THR_ERR_FATAL if unrecovery failure happened. 2911 */ 2912 static MigThrError postcopy_pause(MigrationState *s) 2913 { 2914 assert(s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE); 2915 2916 while (true) { 2917 QEMUFile *file; 2918 2919 /* 2920 * We're already pausing, so ignore any errors on the return 2921 * path and just wait for the thread to finish. It will be 2922 * re-created when we resume. 2923 */ 2924 close_return_path_on_source(s); 2925 2926 /* 2927 * Current channel is possibly broken. Release it. Note that this is 2928 * guaranteed even without lock because to_dst_file should only be 2929 * modified by the migration thread. That also guarantees that the 2930 * unregister of yank is safe too without the lock. It should be safe 2931 * even to be within the qemu_file_lock, but we didn't do that to avoid 2932 * taking more mutex (yank_lock) within qemu_file_lock. TL;DR: we make 2933 * the qemu_file_lock critical section as small as possible. 2934 */ 2935 assert(s->to_dst_file); 2936 migration_ioc_unregister_yank_from_file(s->to_dst_file); 2937 qemu_mutex_lock(&s->qemu_file_lock); 2938 file = s->to_dst_file; 2939 s->to_dst_file = NULL; 2940 qemu_mutex_unlock(&s->qemu_file_lock); 2941 2942 qemu_file_shutdown(file); 2943 qemu_fclose(file); 2944 2945 migrate_set_state(&s->state, s->state, 2946 MIGRATION_STATUS_POSTCOPY_PAUSED); 2947 2948 error_report("Detected IO failure for postcopy. " 2949 "Migration paused."); 2950 2951 /* 2952 * We wait until things fixed up. Then someone will setup the 2953 * status back for us. 2954 */ 2955 while (s->state == MIGRATION_STATUS_POSTCOPY_PAUSED) { 2956 qemu_sem_wait(&s->postcopy_pause_sem); 2957 } 2958 2959 if (s->state == MIGRATION_STATUS_POSTCOPY_RECOVER) { 2960 /* Woken up by a recover procedure. Give it a shot */ 2961 2962 /* Do the resume logic */ 2963 if (postcopy_do_resume(s) == 0) { 2964 /* Let's continue! */ 2965 trace_postcopy_pause_continued(); 2966 return MIG_THR_ERR_RECOVERED; 2967 } else { 2968 /* 2969 * Something wrong happened during the recovery, let's 2970 * pause again. Pause is always better than throwing 2971 * data away. 2972 */ 2973 continue; 2974 } 2975 } else { 2976 /* This is not right... Time to quit. */ 2977 return MIG_THR_ERR_FATAL; 2978 } 2979 } 2980 } 2981 2982 static MigThrError migration_detect_error(MigrationState *s) 2983 { 2984 int ret; 2985 int state = s->state; 2986 Error *local_error = NULL; 2987 2988 if (state == MIGRATION_STATUS_CANCELLING || 2989 state == MIGRATION_STATUS_CANCELLED) { 2990 /* End the migration, but don't set the state to failed */ 2991 return MIG_THR_ERR_FATAL; 2992 } 2993 2994 /* 2995 * Try to detect any file errors. Note that postcopy_qemufile_src will 2996 * be NULL when postcopy preempt is not enabled. 2997 */ 2998 ret = qemu_file_get_error_obj_any(s->to_dst_file, 2999 s->postcopy_qemufile_src, 3000 &local_error); 3001 if (!ret) { 3002 /* Everything is fine */ 3003 assert(!local_error); 3004 return MIG_THR_ERR_NONE; 3005 } 3006 3007 if (local_error) { 3008 migrate_set_error(s, local_error); 3009 error_free(local_error); 3010 } 3011 3012 if (state == MIGRATION_STATUS_POSTCOPY_ACTIVE && ret) { 3013 /* 3014 * For postcopy, we allow the network to be down for a 3015 * while. After that, it can be continued by a 3016 * recovery phase. 3017 */ 3018 return postcopy_pause(s); 3019 } else { 3020 /* 3021 * For precopy (or postcopy with error outside IO), we fail 3022 * with no time. 3023 */ 3024 migrate_set_state(&s->state, state, MIGRATION_STATUS_FAILED); 3025 trace_migration_thread_file_err(); 3026 3027 /* Time to stop the migration, now. */ 3028 return MIG_THR_ERR_FATAL; 3029 } 3030 } 3031 3032 static void migration_completion_end(MigrationState *s) 3033 { 3034 uint64_t bytes = migration_transferred_bytes(); 3035 int64_t end_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); 3036 int64_t transfer_time; 3037 3038 /* 3039 * Take the BQL here so that query-migrate on the QMP thread sees: 3040 * - atomic update of s->total_time and s->mbps; 3041 * - correct ordering of s->mbps update vs. s->state; 3042 */ 3043 bql_lock(); 3044 migration_downtime_end(s); 3045 s->total_time = end_time - s->start_time; 3046 transfer_time = s->total_time - s->setup_time; 3047 if (transfer_time) { 3048 s->mbps = ((double) bytes * 8.0) / transfer_time / 1000; 3049 } 3050 3051 migrate_set_state(&s->state, s->state, 3052 MIGRATION_STATUS_COMPLETED); 3053 bql_unlock(); 3054 } 3055 3056 static void update_iteration_initial_status(MigrationState *s) 3057 { 3058 /* 3059 * Update these three fields at the same time to avoid mismatch info lead 3060 * wrong speed calculation. 3061 */ 3062 s->iteration_start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); 3063 s->iteration_initial_bytes = migration_transferred_bytes(); 3064 s->iteration_initial_pages = ram_get_total_transferred_pages(); 3065 } 3066 3067 static void migration_update_counters(MigrationState *s, 3068 int64_t current_time) 3069 { 3070 uint64_t transferred, transferred_pages, time_spent; 3071 uint64_t current_bytes; /* bytes transferred since the beginning */ 3072 uint64_t switchover_bw; 3073 /* Expected bandwidth when switching over to destination QEMU */ 3074 double expected_bw_per_ms; 3075 double bandwidth; 3076 3077 if (current_time < s->iteration_start_time + BUFFER_DELAY) { 3078 return; 3079 } 3080 3081 switchover_bw = migrate_avail_switchover_bandwidth(); 3082 current_bytes = migration_transferred_bytes(); 3083 transferred = current_bytes - s->iteration_initial_bytes; 3084 time_spent = current_time - s->iteration_start_time; 3085 bandwidth = (double)transferred / time_spent; 3086 3087 if (switchover_bw) { 3088 /* 3089 * If the user specified a switchover bandwidth, let's trust the 3090 * user so that can be more accurate than what we estimated. 3091 */ 3092 expected_bw_per_ms = switchover_bw / 1000; 3093 } else { 3094 /* If the user doesn't specify bandwidth, we use the estimated */ 3095 expected_bw_per_ms = bandwidth; 3096 } 3097 3098 s->threshold_size = expected_bw_per_ms * migrate_downtime_limit(); 3099 3100 s->mbps = (((double) transferred * 8.0) / 3101 ((double) time_spent / 1000.0)) / 1000.0 / 1000.0; 3102 3103 transferred_pages = ram_get_total_transferred_pages() - 3104 s->iteration_initial_pages; 3105 s->pages_per_second = (double) transferred_pages / 3106 (((double) time_spent / 1000.0)); 3107 3108 /* 3109 * if we haven't sent anything, we don't want to 3110 * recalculate. 10000 is a small enough number for our purposes 3111 */ 3112 if (stat64_get(&mig_stats.dirty_pages_rate) && 3113 transferred > 10000) { 3114 s->expected_downtime = 3115 stat64_get(&mig_stats.dirty_bytes_last_sync) / expected_bw_per_ms; 3116 } 3117 3118 migration_rate_reset(); 3119 3120 update_iteration_initial_status(s); 3121 3122 trace_migrate_transferred(transferred, time_spent, 3123 /* Both in unit bytes/ms */ 3124 bandwidth, switchover_bw / 1000, 3125 s->threshold_size); 3126 } 3127 3128 static bool migration_can_switchover(MigrationState *s) 3129 { 3130 if (!migrate_switchover_ack()) { 3131 return true; 3132 } 3133 3134 /* No reason to wait for switchover ACK if VM is stopped */ 3135 if (!runstate_is_running()) { 3136 return true; 3137 } 3138 3139 return s->switchover_acked; 3140 } 3141 3142 /* Migration thread iteration status */ 3143 typedef enum { 3144 MIG_ITERATE_RESUME, /* Resume current iteration */ 3145 MIG_ITERATE_SKIP, /* Skip current iteration */ 3146 MIG_ITERATE_BREAK, /* Break the loop */ 3147 } MigIterateState; 3148 3149 /* 3150 * Return true if continue to the next iteration directly, false 3151 * otherwise. 3152 */ 3153 static MigIterateState migration_iteration_run(MigrationState *s) 3154 { 3155 uint64_t must_precopy, can_postcopy; 3156 Error *local_err = NULL; 3157 bool in_postcopy = s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE; 3158 bool can_switchover = migration_can_switchover(s); 3159 3160 qemu_savevm_state_pending_estimate(&must_precopy, &can_postcopy); 3161 uint64_t pending_size = must_precopy + can_postcopy; 3162 3163 trace_migrate_pending_estimate(pending_size, must_precopy, can_postcopy); 3164 3165 if (must_precopy <= s->threshold_size) { 3166 qemu_savevm_state_pending_exact(&must_precopy, &can_postcopy); 3167 pending_size = must_precopy + can_postcopy; 3168 trace_migrate_pending_exact(pending_size, must_precopy, can_postcopy); 3169 } 3170 3171 if ((!pending_size || pending_size < s->threshold_size) && can_switchover) { 3172 trace_migration_thread_low_pending(pending_size); 3173 migration_completion(s); 3174 return MIG_ITERATE_BREAK; 3175 } 3176 3177 /* Still a significant amount to transfer */ 3178 if (!in_postcopy && must_precopy <= s->threshold_size && can_switchover && 3179 qatomic_read(&s->start_postcopy)) { 3180 if (postcopy_start(s, &local_err)) { 3181 migrate_set_error(s, local_err); 3182 error_report_err(local_err); 3183 } 3184 return MIG_ITERATE_SKIP; 3185 } 3186 3187 /* Just another iteration step */ 3188 qemu_savevm_state_iterate(s->to_dst_file, in_postcopy); 3189 return MIG_ITERATE_RESUME; 3190 } 3191 3192 static void migration_iteration_finish(MigrationState *s) 3193 { 3194 /* If we enabled cpu throttling for auto-converge, turn it off. */ 3195 cpu_throttle_stop(); 3196 3197 bql_lock(); 3198 switch (s->state) { 3199 case MIGRATION_STATUS_COMPLETED: 3200 runstate_set(RUN_STATE_POSTMIGRATE); 3201 break; 3202 case MIGRATION_STATUS_COLO: 3203 assert(migrate_colo()); 3204 migrate_start_colo_process(s); 3205 s->vm_old_state = RUN_STATE_RUNNING; 3206 /* Fallthrough */ 3207 case MIGRATION_STATUS_FAILED: 3208 case MIGRATION_STATUS_CANCELLED: 3209 case MIGRATION_STATUS_CANCELLING: 3210 if (runstate_is_live(s->vm_old_state)) { 3211 if (!runstate_check(RUN_STATE_SHUTDOWN)) { 3212 vm_start(); 3213 } 3214 } else { 3215 if (runstate_check(RUN_STATE_FINISH_MIGRATE)) { 3216 runstate_set(s->vm_old_state); 3217 } 3218 } 3219 break; 3220 3221 default: 3222 /* Should not reach here, but if so, forgive the VM. */ 3223 error_report("%s: Unknown ending state %d", __func__, s->state); 3224 break; 3225 } 3226 3227 migration_bh_schedule(migrate_fd_cleanup_bh, s); 3228 bql_unlock(); 3229 } 3230 3231 static void bg_migration_iteration_finish(MigrationState *s) 3232 { 3233 /* 3234 * Stop tracking RAM writes - un-protect memory, un-register UFFD 3235 * memory ranges, flush kernel wait queues and wake up threads 3236 * waiting for write fault to be resolved. 3237 */ 3238 ram_write_tracking_stop(); 3239 3240 bql_lock(); 3241 switch (s->state) { 3242 case MIGRATION_STATUS_COMPLETED: 3243 case MIGRATION_STATUS_ACTIVE: 3244 case MIGRATION_STATUS_FAILED: 3245 case MIGRATION_STATUS_CANCELLED: 3246 case MIGRATION_STATUS_CANCELLING: 3247 break; 3248 3249 default: 3250 /* Should not reach here, but if so, forgive the VM. */ 3251 error_report("%s: Unknown ending state %d", __func__, s->state); 3252 break; 3253 } 3254 3255 migration_bh_schedule(migrate_fd_cleanup_bh, s); 3256 bql_unlock(); 3257 } 3258 3259 /* 3260 * Return true if continue to the next iteration directly, false 3261 * otherwise. 3262 */ 3263 static MigIterateState bg_migration_iteration_run(MigrationState *s) 3264 { 3265 int res; 3266 3267 res = qemu_savevm_state_iterate(s->to_dst_file, false); 3268 if (res > 0) { 3269 bg_migration_completion(s); 3270 return MIG_ITERATE_BREAK; 3271 } 3272 3273 return MIG_ITERATE_RESUME; 3274 } 3275 3276 void migration_make_urgent_request(void) 3277 { 3278 qemu_sem_post(&migrate_get_current()->rate_limit_sem); 3279 } 3280 3281 void migration_consume_urgent_request(void) 3282 { 3283 qemu_sem_wait(&migrate_get_current()->rate_limit_sem); 3284 } 3285 3286 /* Returns true if the rate limiting was broken by an urgent request */ 3287 bool migration_rate_limit(void) 3288 { 3289 int64_t now = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); 3290 MigrationState *s = migrate_get_current(); 3291 3292 bool urgent = false; 3293 migration_update_counters(s, now); 3294 if (migration_rate_exceeded(s->to_dst_file)) { 3295 3296 if (qemu_file_get_error(s->to_dst_file)) { 3297 return false; 3298 } 3299 /* 3300 * Wait for a delay to do rate limiting OR 3301 * something urgent to post the semaphore. 3302 */ 3303 int ms = s->iteration_start_time + BUFFER_DELAY - now; 3304 trace_migration_rate_limit_pre(ms); 3305 if (qemu_sem_timedwait(&s->rate_limit_sem, ms) == 0) { 3306 /* 3307 * We were woken by one or more urgent things but 3308 * the timedwait will have consumed one of them. 3309 * The service routine for the urgent wake will dec 3310 * the semaphore itself for each item it consumes, 3311 * so add this one we just eat back. 3312 */ 3313 qemu_sem_post(&s->rate_limit_sem); 3314 urgent = true; 3315 } 3316 trace_migration_rate_limit_post(urgent); 3317 } 3318 return urgent; 3319 } 3320 3321 /* 3322 * if failover devices are present, wait they are completely 3323 * unplugged 3324 */ 3325 3326 static void qemu_savevm_wait_unplug(MigrationState *s, int old_state, 3327 int new_state) 3328 { 3329 if (qemu_savevm_state_guest_unplug_pending()) { 3330 migrate_set_state(&s->state, old_state, MIGRATION_STATUS_WAIT_UNPLUG); 3331 3332 while (s->state == MIGRATION_STATUS_WAIT_UNPLUG && 3333 qemu_savevm_state_guest_unplug_pending()) { 3334 qemu_sem_timedwait(&s->wait_unplug_sem, 250); 3335 } 3336 if (s->state != MIGRATION_STATUS_WAIT_UNPLUG) { 3337 int timeout = 120; /* 30 seconds */ 3338 /* 3339 * migration has been canceled 3340 * but as we have started an unplug we must wait the end 3341 * to be able to plug back the card 3342 */ 3343 while (timeout-- && qemu_savevm_state_guest_unplug_pending()) { 3344 qemu_sem_timedwait(&s->wait_unplug_sem, 250); 3345 } 3346 if (qemu_savevm_state_guest_unplug_pending() && 3347 !qtest_enabled()) { 3348 warn_report("migration: partially unplugged device on " 3349 "failure"); 3350 } 3351 } 3352 3353 migrate_set_state(&s->state, MIGRATION_STATUS_WAIT_UNPLUG, new_state); 3354 } else { 3355 migrate_set_state(&s->state, old_state, new_state); 3356 } 3357 } 3358 3359 /* 3360 * Master migration thread on the source VM. 3361 * It drives the migration and pumps the data down the outgoing channel. 3362 */ 3363 static void *migration_thread(void *opaque) 3364 { 3365 MigrationState *s = opaque; 3366 MigrationThread *thread = NULL; 3367 int64_t setup_start = qemu_clock_get_ms(QEMU_CLOCK_HOST); 3368 MigThrError thr_error; 3369 bool urgent = false; 3370 3371 thread = migration_threads_add("live_migration", qemu_get_thread_id()); 3372 3373 rcu_register_thread(); 3374 3375 object_ref(OBJECT(s)); 3376 update_iteration_initial_status(s); 3377 3378 if (!multifd_send_setup()) { 3379 goto out; 3380 } 3381 3382 bql_lock(); 3383 qemu_savevm_state_header(s->to_dst_file); 3384 bql_unlock(); 3385 3386 /* 3387 * If we opened the return path, we need to make sure dst has it 3388 * opened as well. 3389 */ 3390 if (s->rp_state.rp_thread_created) { 3391 /* Now tell the dest that it should open its end so it can reply */ 3392 qemu_savevm_send_open_return_path(s->to_dst_file); 3393 3394 /* And do a ping that will make stuff easier to debug */ 3395 qemu_savevm_send_ping(s->to_dst_file, 1); 3396 } 3397 3398 if (migrate_postcopy()) { 3399 /* 3400 * Tell the destination that we *might* want to do postcopy later; 3401 * if the other end can't do postcopy it should fail now, nice and 3402 * early. 3403 */ 3404 qemu_savevm_send_postcopy_advise(s->to_dst_file); 3405 } 3406 3407 if (migrate_colo()) { 3408 /* Notify migration destination that we enable COLO */ 3409 qemu_savevm_send_colo_enable(s->to_dst_file); 3410 } 3411 3412 bql_lock(); 3413 qemu_savevm_state_setup(s->to_dst_file); 3414 bql_unlock(); 3415 3416 qemu_savevm_wait_unplug(s, MIGRATION_STATUS_SETUP, 3417 MIGRATION_STATUS_ACTIVE); 3418 3419 s->setup_time = qemu_clock_get_ms(QEMU_CLOCK_HOST) - setup_start; 3420 3421 trace_migration_thread_setup_complete(); 3422 3423 while (migration_is_active(s)) { 3424 if (urgent || !migration_rate_exceeded(s->to_dst_file)) { 3425 MigIterateState iter_state = migration_iteration_run(s); 3426 if (iter_state == MIG_ITERATE_SKIP) { 3427 continue; 3428 } else if (iter_state == MIG_ITERATE_BREAK) { 3429 break; 3430 } 3431 } 3432 3433 /* 3434 * Try to detect any kind of failures, and see whether we 3435 * should stop the migration now. 3436 */ 3437 thr_error = migration_detect_error(s); 3438 if (thr_error == MIG_THR_ERR_FATAL) { 3439 /* Stop migration */ 3440 break; 3441 } else if (thr_error == MIG_THR_ERR_RECOVERED) { 3442 /* 3443 * Just recovered from a e.g. network failure, reset all 3444 * the local variables. This is important to avoid 3445 * breaking transferred_bytes and bandwidth calculation 3446 */ 3447 update_iteration_initial_status(s); 3448 } 3449 3450 urgent = migration_rate_limit(); 3451 } 3452 3453 out: 3454 trace_migration_thread_after_loop(); 3455 migration_iteration_finish(s); 3456 object_unref(OBJECT(s)); 3457 rcu_unregister_thread(); 3458 migration_threads_remove(thread); 3459 return NULL; 3460 } 3461 3462 static void bg_migration_vm_start_bh(void *opaque) 3463 { 3464 MigrationState *s = opaque; 3465 3466 vm_resume(s->vm_old_state); 3467 migration_downtime_end(s); 3468 } 3469 3470 /** 3471 * Background snapshot thread, based on live migration code. 3472 * This is an alternative implementation of live migration mechanism 3473 * introduced specifically to support background snapshots. 3474 * 3475 * It takes advantage of userfault_fd write protection mechanism introduced 3476 * in v5.7 kernel. Compared to existing dirty page logging migration much 3477 * lesser stream traffic is produced resulting in smaller snapshot images, 3478 * simply cause of no page duplicates can get into the stream. 3479 * 3480 * Another key point is that generated vmstate stream reflects machine state 3481 * 'frozen' at the beginning of snapshot creation compared to dirty page logging 3482 * mechanism, which effectively results in that saved snapshot is the state of VM 3483 * at the end of the process. 3484 */ 3485 static void *bg_migration_thread(void *opaque) 3486 { 3487 MigrationState *s = opaque; 3488 int64_t setup_start; 3489 MigThrError thr_error; 3490 QEMUFile *fb; 3491 bool early_fail = true; 3492 3493 rcu_register_thread(); 3494 object_ref(OBJECT(s)); 3495 3496 migration_rate_set(RATE_LIMIT_DISABLED); 3497 3498 setup_start = qemu_clock_get_ms(QEMU_CLOCK_HOST); 3499 /* 3500 * We want to save vmstate for the moment when migration has been 3501 * initiated but also we want to save RAM content while VM is running. 3502 * The RAM content should appear first in the vmstate. So, we first 3503 * stash the non-RAM part of the vmstate to the temporary buffer, 3504 * then write RAM part of the vmstate to the migration stream 3505 * with vCPUs running and, finally, write stashed non-RAM part of 3506 * the vmstate from the buffer to the migration stream. 3507 */ 3508 s->bioc = qio_channel_buffer_new(512 * 1024); 3509 qio_channel_set_name(QIO_CHANNEL(s->bioc), "vmstate-buffer"); 3510 fb = qemu_file_new_output(QIO_CHANNEL(s->bioc)); 3511 object_unref(OBJECT(s->bioc)); 3512 3513 update_iteration_initial_status(s); 3514 3515 /* 3516 * Prepare for tracking memory writes with UFFD-WP - populate 3517 * RAM pages before protecting. 3518 */ 3519 #ifdef __linux__ 3520 ram_write_tracking_prepare(); 3521 #endif 3522 3523 bql_lock(); 3524 qemu_savevm_state_header(s->to_dst_file); 3525 qemu_savevm_state_setup(s->to_dst_file); 3526 bql_unlock(); 3527 3528 qemu_savevm_wait_unplug(s, MIGRATION_STATUS_SETUP, 3529 MIGRATION_STATUS_ACTIVE); 3530 3531 s->setup_time = qemu_clock_get_ms(QEMU_CLOCK_HOST) - setup_start; 3532 3533 trace_migration_thread_setup_complete(); 3534 3535 bql_lock(); 3536 3537 if (migration_stop_vm(s, RUN_STATE_PAUSED)) { 3538 goto fail; 3539 } 3540 /* 3541 * Put vCPUs in sync with shadow context structures, then 3542 * save their state to channel-buffer along with devices. 3543 */ 3544 cpu_synchronize_all_states(); 3545 if (qemu_savevm_state_complete_precopy_non_iterable(fb, false, false)) { 3546 goto fail; 3547 } 3548 /* 3549 * Since we are going to get non-iterable state data directly 3550 * from s->bioc->data, explicit flush is needed here. 3551 */ 3552 qemu_fflush(fb); 3553 3554 /* Now initialize UFFD context and start tracking RAM writes */ 3555 if (ram_write_tracking_start()) { 3556 goto fail; 3557 } 3558 early_fail = false; 3559 3560 /* 3561 * Start VM from BH handler to avoid write-fault lock here. 3562 * UFFD-WP protection for the whole RAM is already enabled so 3563 * calling VM state change notifiers from vm_start() would initiate 3564 * writes to virtio VQs memory which is in write-protected region. 3565 */ 3566 migration_bh_schedule(bg_migration_vm_start_bh, s); 3567 bql_unlock(); 3568 3569 while (migration_is_active(s)) { 3570 MigIterateState iter_state = bg_migration_iteration_run(s); 3571 if (iter_state == MIG_ITERATE_SKIP) { 3572 continue; 3573 } else if (iter_state == MIG_ITERATE_BREAK) { 3574 break; 3575 } 3576 3577 /* 3578 * Try to detect any kind of failures, and see whether we 3579 * should stop the migration now. 3580 */ 3581 thr_error = migration_detect_error(s); 3582 if (thr_error == MIG_THR_ERR_FATAL) { 3583 /* Stop migration */ 3584 break; 3585 } 3586 3587 migration_update_counters(s, qemu_clock_get_ms(QEMU_CLOCK_REALTIME)); 3588 } 3589 3590 trace_migration_thread_after_loop(); 3591 3592 fail: 3593 if (early_fail) { 3594 migrate_set_state(&s->state, MIGRATION_STATUS_ACTIVE, 3595 MIGRATION_STATUS_FAILED); 3596 bql_unlock(); 3597 } 3598 3599 bg_migration_iteration_finish(s); 3600 3601 qemu_fclose(fb); 3602 object_unref(OBJECT(s)); 3603 rcu_unregister_thread(); 3604 3605 return NULL; 3606 } 3607 3608 void migrate_fd_connect(MigrationState *s, Error *error_in) 3609 { 3610 Error *local_err = NULL; 3611 uint64_t rate_limit; 3612 bool resume = s->state == MIGRATION_STATUS_POSTCOPY_PAUSED; 3613 int ret; 3614 3615 /* 3616 * If there's a previous error, free it and prepare for another one. 3617 * Meanwhile if migration completes successfully, there won't have an error 3618 * dumped when calling migrate_fd_cleanup(). 3619 */ 3620 migrate_error_free(s); 3621 3622 s->expected_downtime = migrate_downtime_limit(); 3623 if (error_in) { 3624 migrate_fd_error(s, error_in); 3625 if (resume) { 3626 /* 3627 * Don't do cleanup for resume if channel is invalid, but only dump 3628 * the error. We wait for another channel connect from the user. 3629 * The error_report still gives HMP user a hint on what failed. 3630 * It's normally done in migrate_fd_cleanup(), but call it here 3631 * explicitly. 3632 */ 3633 error_report_err(error_copy(s->error)); 3634 } else { 3635 migrate_fd_cleanup(s); 3636 } 3637 return; 3638 } 3639 3640 if (resume) { 3641 /* This is a resumed migration */ 3642 rate_limit = migrate_max_postcopy_bandwidth(); 3643 } else { 3644 /* This is a fresh new migration */ 3645 rate_limit = migrate_max_bandwidth(); 3646 3647 /* Notify before starting migration thread */ 3648 if (migration_call_notifiers(s, MIG_EVENT_PRECOPY_SETUP, &local_err)) { 3649 goto fail; 3650 } 3651 } 3652 3653 migration_rate_set(rate_limit); 3654 qemu_file_set_blocking(s->to_dst_file, true); 3655 3656 /* 3657 * Open the return path. For postcopy, it is used exclusively. For 3658 * precopy, only if user specified "return-path" capability would 3659 * QEMU uses the return path. 3660 */ 3661 if (migrate_postcopy_ram() || migrate_return_path()) { 3662 if (open_return_path_on_source(s)) { 3663 error_setg(&local_err, "Unable to open return-path for postcopy"); 3664 goto fail; 3665 } 3666 } 3667 3668 /* 3669 * This needs to be done before resuming a postcopy. Note: for newer 3670 * QEMUs we will delay the channel creation until postcopy_start(), to 3671 * avoid disorder of channel creations. 3672 */ 3673 if (migrate_postcopy_preempt() && s->preempt_pre_7_2) { 3674 postcopy_preempt_setup(s); 3675 } 3676 3677 if (resume) { 3678 /* Wakeup the main migration thread to do the recovery */ 3679 migrate_set_state(&s->state, MIGRATION_STATUS_POSTCOPY_PAUSED, 3680 MIGRATION_STATUS_POSTCOPY_RECOVER); 3681 qemu_sem_post(&s->postcopy_pause_sem); 3682 return; 3683 } 3684 3685 if (migrate_mode_is_cpr(s)) { 3686 ret = migration_stop_vm(s, RUN_STATE_FINISH_MIGRATE); 3687 if (ret < 0) { 3688 error_setg(&local_err, "migration_stop_vm failed, error %d", -ret); 3689 goto fail; 3690 } 3691 } 3692 3693 if (migrate_background_snapshot()) { 3694 qemu_thread_create(&s->thread, "bg_snapshot", 3695 bg_migration_thread, s, QEMU_THREAD_JOINABLE); 3696 } else { 3697 qemu_thread_create(&s->thread, "live_migration", 3698 migration_thread, s, QEMU_THREAD_JOINABLE); 3699 } 3700 s->migration_thread_running = true; 3701 return; 3702 3703 fail: 3704 migrate_set_error(s, local_err); 3705 migrate_set_state(&s->state, s->state, MIGRATION_STATUS_FAILED); 3706 error_report_err(local_err); 3707 migrate_fd_cleanup(s); 3708 } 3709 3710 static void migration_class_init(ObjectClass *klass, void *data) 3711 { 3712 DeviceClass *dc = DEVICE_CLASS(klass); 3713 3714 dc->user_creatable = false; 3715 device_class_set_props(dc, migration_properties); 3716 } 3717 3718 static void migration_instance_finalize(Object *obj) 3719 { 3720 MigrationState *ms = MIGRATION_OBJ(obj); 3721 3722 qemu_mutex_destroy(&ms->error_mutex); 3723 qemu_mutex_destroy(&ms->qemu_file_lock); 3724 qemu_sem_destroy(&ms->wait_unplug_sem); 3725 qemu_sem_destroy(&ms->rate_limit_sem); 3726 qemu_sem_destroy(&ms->pause_sem); 3727 qemu_sem_destroy(&ms->postcopy_pause_sem); 3728 qemu_sem_destroy(&ms->rp_state.rp_sem); 3729 qemu_sem_destroy(&ms->rp_state.rp_pong_acks); 3730 qemu_sem_destroy(&ms->postcopy_qemufile_src_sem); 3731 error_free(ms->error); 3732 } 3733 3734 static void migration_instance_init(Object *obj) 3735 { 3736 MigrationState *ms = MIGRATION_OBJ(obj); 3737 3738 ms->state = MIGRATION_STATUS_NONE; 3739 ms->mbps = -1; 3740 ms->pages_per_second = -1; 3741 qemu_sem_init(&ms->pause_sem, 0); 3742 qemu_mutex_init(&ms->error_mutex); 3743 3744 migrate_params_init(&ms->parameters); 3745 3746 qemu_sem_init(&ms->postcopy_pause_sem, 0); 3747 qemu_sem_init(&ms->rp_state.rp_sem, 0); 3748 qemu_sem_init(&ms->rp_state.rp_pong_acks, 0); 3749 qemu_sem_init(&ms->rate_limit_sem, 0); 3750 qemu_sem_init(&ms->wait_unplug_sem, 0); 3751 qemu_sem_init(&ms->postcopy_qemufile_src_sem, 0); 3752 qemu_mutex_init(&ms->qemu_file_lock); 3753 } 3754 3755 /* 3756 * Return true if check pass, false otherwise. Error will be put 3757 * inside errp if provided. 3758 */ 3759 static bool migration_object_check(MigrationState *ms, Error **errp) 3760 { 3761 /* Assuming all off */ 3762 bool old_caps[MIGRATION_CAPABILITY__MAX] = { 0 }; 3763 3764 if (!migrate_params_check(&ms->parameters, errp)) { 3765 return false; 3766 } 3767 3768 return migrate_caps_check(old_caps, ms->capabilities, errp); 3769 } 3770 3771 static const TypeInfo migration_type = { 3772 .name = TYPE_MIGRATION, 3773 /* 3774 * NOTE: TYPE_MIGRATION is not really a device, as the object is 3775 * not created using qdev_new(), it is not attached to the qdev 3776 * device tree, and it is never realized. 3777 * 3778 * TODO: Make this TYPE_OBJECT once QOM provides something like 3779 * TYPE_DEVICE's "-global" properties. 3780 */ 3781 .parent = TYPE_DEVICE, 3782 .class_init = migration_class_init, 3783 .class_size = sizeof(MigrationClass), 3784 .instance_size = sizeof(MigrationState), 3785 .instance_init = migration_instance_init, 3786 .instance_finalize = migration_instance_finalize, 3787 }; 3788 3789 static void register_migration_types(void) 3790 { 3791 type_register_static(&migration_type); 3792 } 3793 3794 type_init(register_migration_types); 3795