1 /* 2 * QEMU live migration 3 * 4 * Copyright IBM, Corp. 2008 5 * 6 * Authors: 7 * Anthony Liguori <aliguori@us.ibm.com> 8 * 9 * This work is licensed under the terms of the GNU GPL, version 2. See 10 * the COPYING file in the top-level directory. 11 * 12 * Contributions after 2012-01-13 are licensed under the terms of the 13 * GNU GPL, version 2 or (at your option) any later version. 14 */ 15 16 #include "qemu/osdep.h" 17 #include "qemu/cutils.h" 18 #include "qemu/error-report.h" 19 #include "qemu/main-loop.h" 20 #include "migration/blocker.h" 21 #include "exec.h" 22 #include "fd.h" 23 #include "file.h" 24 #include "socket.h" 25 #include "sysemu/runstate.h" 26 #include "sysemu/sysemu.h" 27 #include "sysemu/cpu-throttle.h" 28 #include "rdma.h" 29 #include "ram.h" 30 #include "ram-compress.h" 31 #include "migration/global_state.h" 32 #include "migration/misc.h" 33 #include "migration.h" 34 #include "migration-stats.h" 35 #include "savevm.h" 36 #include "qemu-file.h" 37 #include "channel.h" 38 #include "migration/vmstate.h" 39 #include "block/block.h" 40 #include "qapi/error.h" 41 #include "qapi/clone-visitor.h" 42 #include "qapi/qapi-visit-migration.h" 43 #include "qapi/qapi-visit-sockets.h" 44 #include "qapi/qapi-commands-migration.h" 45 #include "qapi/qapi-events-migration.h" 46 #include "qapi/qmp/qerror.h" 47 #include "qapi/qmp/qnull.h" 48 #include "qemu/rcu.h" 49 #include "block.h" 50 #include "postcopy-ram.h" 51 #include "qemu/thread.h" 52 #include "trace.h" 53 #include "exec/target_page.h" 54 #include "io/channel-buffer.h" 55 #include "io/channel-tls.h" 56 #include "migration/colo.h" 57 #include "hw/boards.h" 58 #include "monitor/monitor.h" 59 #include "net/announce.h" 60 #include "qemu/queue.h" 61 #include "multifd.h" 62 #include "threadinfo.h" 63 #include "qemu/yank.h" 64 #include "sysemu/cpus.h" 65 #include "yank_functions.h" 66 #include "sysemu/qtest.h" 67 #include "options.h" 68 #include "sysemu/dirtylimit.h" 69 #include "qemu/sockets.h" 70 #include "sysemu/kvm.h" 71 72 #define NOTIFIER_ELEM_INIT(array, elem) \ 73 [elem] = NOTIFIER_WITH_RETURN_LIST_INITIALIZER((array)[elem]) 74 75 static NotifierWithReturnList migration_state_notifiers[] = { 76 NOTIFIER_ELEM_INIT(migration_state_notifiers, MIG_MODE_NORMAL), 77 NOTIFIER_ELEM_INIT(migration_state_notifiers, MIG_MODE_CPR_REBOOT), 78 }; 79 80 /* Messages sent on the return path from destination to source */ 81 enum mig_rp_message_type { 82 MIG_RP_MSG_INVALID = 0, /* Must be 0 */ 83 MIG_RP_MSG_SHUT, /* sibling will not send any more RP messages */ 84 MIG_RP_MSG_PONG, /* Response to a PING; data (seq: be32 ) */ 85 86 MIG_RP_MSG_REQ_PAGES_ID, /* data (start: be64, len: be32, id: string) */ 87 MIG_RP_MSG_REQ_PAGES, /* data (start: be64, len: be32) */ 88 MIG_RP_MSG_RECV_BITMAP, /* send recved_bitmap back to source */ 89 MIG_RP_MSG_RESUME_ACK, /* tell source that we are ready to resume */ 90 MIG_RP_MSG_SWITCHOVER_ACK, /* Tell source it's OK to do switchover */ 91 92 MIG_RP_MSG_MAX 93 }; 94 95 /* When we add fault tolerance, we could have several 96 migrations at once. For now we don't need to add 97 dynamic creation of migration */ 98 99 static MigrationState *current_migration; 100 static MigrationIncomingState *current_incoming; 101 102 static GSList *migration_blockers[MIG_MODE__MAX]; 103 104 static bool migration_object_check(MigrationState *ms, Error **errp); 105 static int migration_maybe_pause(MigrationState *s, 106 int *current_active_state, 107 int new_state); 108 static void migrate_fd_cancel(MigrationState *s); 109 static bool close_return_path_on_source(MigrationState *s); 110 static void migration_completion_end(MigrationState *s); 111 112 static void migration_downtime_start(MigrationState *s) 113 { 114 trace_vmstate_downtime_checkpoint("src-downtime-start"); 115 s->downtime_start = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); 116 } 117 118 static void migration_downtime_end(MigrationState *s) 119 { 120 int64_t now = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); 121 122 /* 123 * If downtime already set, should mean that postcopy already set it, 124 * then that should be the real downtime already. 125 */ 126 if (!s->downtime) { 127 s->downtime = now - s->downtime_start; 128 } 129 130 trace_vmstate_downtime_checkpoint("src-downtime-end"); 131 } 132 133 static bool migration_needs_multiple_sockets(void) 134 { 135 return migrate_multifd() || migrate_postcopy_preempt(); 136 } 137 138 static bool transport_supports_multi_channels(MigrationAddress *addr) 139 { 140 if (addr->transport == MIGRATION_ADDRESS_TYPE_SOCKET) { 141 SocketAddress *saddr = &addr->u.socket; 142 143 return saddr->type == SOCKET_ADDRESS_TYPE_INET || 144 saddr->type == SOCKET_ADDRESS_TYPE_UNIX || 145 saddr->type == SOCKET_ADDRESS_TYPE_VSOCK; 146 } 147 148 return false; 149 } 150 151 static bool 152 migration_channels_and_transport_compatible(MigrationAddress *addr, 153 Error **errp) 154 { 155 if (migration_needs_multiple_sockets() && 156 !transport_supports_multi_channels(addr)) { 157 error_setg(errp, "Migration requires multi-channel URIs (e.g. tcp)"); 158 return false; 159 } 160 161 return true; 162 } 163 164 static gint page_request_addr_cmp(gconstpointer ap, gconstpointer bp) 165 { 166 uintptr_t a = (uintptr_t) ap, b = (uintptr_t) bp; 167 168 return (a > b) - (a < b); 169 } 170 171 static int migration_stop_vm(MigrationState *s, RunState state) 172 { 173 int ret; 174 175 migration_downtime_start(s); 176 177 s->vm_old_state = runstate_get(); 178 global_state_store(); 179 180 ret = vm_stop_force_state(state); 181 182 trace_vmstate_downtime_checkpoint("src-vm-stopped"); 183 trace_migration_completion_vm_stop(ret); 184 185 return ret; 186 } 187 188 void migration_object_init(void) 189 { 190 /* This can only be called once. */ 191 assert(!current_migration); 192 current_migration = MIGRATION_OBJ(object_new(TYPE_MIGRATION)); 193 194 /* 195 * Init the migrate incoming object as well no matter whether 196 * we'll use it or not. 197 */ 198 assert(!current_incoming); 199 current_incoming = g_new0(MigrationIncomingState, 1); 200 current_incoming->state = MIGRATION_STATUS_NONE; 201 current_incoming->postcopy_remote_fds = 202 g_array_new(FALSE, TRUE, sizeof(struct PostCopyFD)); 203 qemu_mutex_init(¤t_incoming->rp_mutex); 204 qemu_mutex_init(¤t_incoming->postcopy_prio_thread_mutex); 205 qemu_event_init(¤t_incoming->main_thread_load_event, false); 206 qemu_sem_init(¤t_incoming->postcopy_pause_sem_dst, 0); 207 qemu_sem_init(¤t_incoming->postcopy_pause_sem_fault, 0); 208 qemu_sem_init(¤t_incoming->postcopy_pause_sem_fast_load, 0); 209 qemu_sem_init(¤t_incoming->postcopy_qemufile_dst_done, 0); 210 211 qemu_mutex_init(¤t_incoming->page_request_mutex); 212 qemu_cond_init(¤t_incoming->page_request_cond); 213 current_incoming->page_requested = g_tree_new(page_request_addr_cmp); 214 215 migration_object_check(current_migration, &error_fatal); 216 217 blk_mig_init(); 218 ram_mig_init(); 219 dirty_bitmap_mig_init(); 220 } 221 222 typedef struct { 223 QEMUBH *bh; 224 QEMUBHFunc *cb; 225 void *opaque; 226 } MigrationBH; 227 228 static void migration_bh_dispatch_bh(void *opaque) 229 { 230 MigrationState *s = migrate_get_current(); 231 MigrationBH *migbh = opaque; 232 233 /* cleanup this BH */ 234 qemu_bh_delete(migbh->bh); 235 migbh->bh = NULL; 236 237 /* dispatch the other one */ 238 migbh->cb(migbh->opaque); 239 object_unref(OBJECT(s)); 240 241 g_free(migbh); 242 } 243 244 void migration_bh_schedule(QEMUBHFunc *cb, void *opaque) 245 { 246 MigrationState *s = migrate_get_current(); 247 MigrationBH *migbh = g_new0(MigrationBH, 1); 248 QEMUBH *bh = qemu_bh_new(migration_bh_dispatch_bh, migbh); 249 250 /* Store these to dispatch when the BH runs */ 251 migbh->bh = bh; 252 migbh->cb = cb; 253 migbh->opaque = opaque; 254 255 /* 256 * Ref the state for bh, because it may be called when 257 * there're already no other refs 258 */ 259 object_ref(OBJECT(s)); 260 qemu_bh_schedule(bh); 261 } 262 263 void migration_cancel(const Error *error) 264 { 265 if (error) { 266 migrate_set_error(current_migration, error); 267 } 268 if (migrate_dirty_limit()) { 269 qmp_cancel_vcpu_dirty_limit(false, -1, NULL); 270 } 271 migrate_fd_cancel(current_migration); 272 } 273 274 void migration_shutdown(void) 275 { 276 /* 277 * When the QEMU main thread exit, the COLO thread 278 * may wait a semaphore. So, we should wakeup the 279 * COLO thread before migration shutdown. 280 */ 281 colo_shutdown(); 282 /* 283 * Cancel the current migration - that will (eventually) 284 * stop the migration using this structure 285 */ 286 migration_cancel(NULL); 287 object_unref(OBJECT(current_migration)); 288 289 /* 290 * Cancel outgoing migration of dirty bitmaps. It should 291 * at least unref used block nodes. 292 */ 293 dirty_bitmap_mig_cancel_outgoing(); 294 295 /* 296 * Cancel incoming migration of dirty bitmaps. Dirty bitmaps 297 * are non-critical data, and their loss never considered as 298 * something serious. 299 */ 300 dirty_bitmap_mig_cancel_incoming(); 301 } 302 303 /* For outgoing */ 304 MigrationState *migrate_get_current(void) 305 { 306 /* This can only be called after the object created. */ 307 assert(current_migration); 308 return current_migration; 309 } 310 311 MigrationIncomingState *migration_incoming_get_current(void) 312 { 313 assert(current_incoming); 314 return current_incoming; 315 } 316 317 void migration_incoming_transport_cleanup(MigrationIncomingState *mis) 318 { 319 if (mis->socket_address_list) { 320 qapi_free_SocketAddressList(mis->socket_address_list); 321 mis->socket_address_list = NULL; 322 } 323 324 if (mis->transport_cleanup) { 325 mis->transport_cleanup(mis->transport_data); 326 mis->transport_data = mis->transport_cleanup = NULL; 327 } 328 } 329 330 void migration_incoming_state_destroy(void) 331 { 332 struct MigrationIncomingState *mis = migration_incoming_get_current(); 333 334 multifd_recv_cleanup(); 335 compress_threads_load_cleanup(); 336 337 if (mis->to_src_file) { 338 /* Tell source that we are done */ 339 migrate_send_rp_shut(mis, qemu_file_get_error(mis->from_src_file) != 0); 340 qemu_fclose(mis->to_src_file); 341 mis->to_src_file = NULL; 342 } 343 344 if (mis->from_src_file) { 345 migration_ioc_unregister_yank_from_file(mis->from_src_file); 346 qemu_fclose(mis->from_src_file); 347 mis->from_src_file = NULL; 348 } 349 if (mis->postcopy_remote_fds) { 350 g_array_free(mis->postcopy_remote_fds, TRUE); 351 mis->postcopy_remote_fds = NULL; 352 } 353 354 migration_incoming_transport_cleanup(mis); 355 qemu_event_reset(&mis->main_thread_load_event); 356 357 if (mis->page_requested) { 358 g_tree_destroy(mis->page_requested); 359 mis->page_requested = NULL; 360 } 361 362 if (mis->postcopy_qemufile_dst) { 363 migration_ioc_unregister_yank_from_file(mis->postcopy_qemufile_dst); 364 qemu_fclose(mis->postcopy_qemufile_dst); 365 mis->postcopy_qemufile_dst = NULL; 366 } 367 368 yank_unregister_instance(MIGRATION_YANK_INSTANCE); 369 } 370 371 static void migrate_generate_event(int new_state) 372 { 373 if (migrate_events()) { 374 qapi_event_send_migration(new_state); 375 } 376 } 377 378 /* 379 * Send a message on the return channel back to the source 380 * of the migration. 381 */ 382 static int migrate_send_rp_message(MigrationIncomingState *mis, 383 enum mig_rp_message_type message_type, 384 uint16_t len, void *data) 385 { 386 int ret = 0; 387 388 trace_migrate_send_rp_message((int)message_type, len); 389 QEMU_LOCK_GUARD(&mis->rp_mutex); 390 391 /* 392 * It's possible that the file handle got lost due to network 393 * failures. 394 */ 395 if (!mis->to_src_file) { 396 ret = -EIO; 397 return ret; 398 } 399 400 qemu_put_be16(mis->to_src_file, (unsigned int)message_type); 401 qemu_put_be16(mis->to_src_file, len); 402 qemu_put_buffer(mis->to_src_file, data, len); 403 return qemu_fflush(mis->to_src_file); 404 } 405 406 /* Request one page from the source VM at the given start address. 407 * rb: the RAMBlock to request the page in 408 * Start: Address offset within the RB 409 * Len: Length in bytes required - must be a multiple of pagesize 410 */ 411 int migrate_send_rp_message_req_pages(MigrationIncomingState *mis, 412 RAMBlock *rb, ram_addr_t start) 413 { 414 uint8_t bufc[12 + 1 + 255]; /* start (8), len (4), rbname up to 256 */ 415 size_t msglen = 12; /* start + len */ 416 size_t len = qemu_ram_pagesize(rb); 417 enum mig_rp_message_type msg_type; 418 const char *rbname; 419 int rbname_len; 420 421 *(uint64_t *)bufc = cpu_to_be64((uint64_t)start); 422 *(uint32_t *)(bufc + 8) = cpu_to_be32((uint32_t)len); 423 424 /* 425 * We maintain the last ramblock that we requested for page. Note that we 426 * don't need locking because this function will only be called within the 427 * postcopy ram fault thread. 428 */ 429 if (rb != mis->last_rb) { 430 mis->last_rb = rb; 431 432 rbname = qemu_ram_get_idstr(rb); 433 rbname_len = strlen(rbname); 434 435 assert(rbname_len < 256); 436 437 bufc[msglen++] = rbname_len; 438 memcpy(bufc + msglen, rbname, rbname_len); 439 msglen += rbname_len; 440 msg_type = MIG_RP_MSG_REQ_PAGES_ID; 441 } else { 442 msg_type = MIG_RP_MSG_REQ_PAGES; 443 } 444 445 return migrate_send_rp_message(mis, msg_type, msglen, bufc); 446 } 447 448 int migrate_send_rp_req_pages(MigrationIncomingState *mis, 449 RAMBlock *rb, ram_addr_t start, uint64_t haddr) 450 { 451 void *aligned = (void *)(uintptr_t)ROUND_DOWN(haddr, qemu_ram_pagesize(rb)); 452 bool received = false; 453 454 WITH_QEMU_LOCK_GUARD(&mis->page_request_mutex) { 455 received = ramblock_recv_bitmap_test_byte_offset(rb, start); 456 if (!received && !g_tree_lookup(mis->page_requested, aligned)) { 457 /* 458 * The page has not been received, and it's not yet in the page 459 * request list. Queue it. Set the value of element to 1, so that 460 * things like g_tree_lookup() will return TRUE (1) when found. 461 */ 462 g_tree_insert(mis->page_requested, aligned, (gpointer)1); 463 qatomic_inc(&mis->page_requested_count); 464 trace_postcopy_page_req_add(aligned, mis->page_requested_count); 465 } 466 } 467 468 /* 469 * If the page is there, skip sending the message. We don't even need the 470 * lock because as long as the page arrived, it'll be there forever. 471 */ 472 if (received) { 473 return 0; 474 } 475 476 return migrate_send_rp_message_req_pages(mis, rb, start); 477 } 478 479 static bool migration_colo_enabled; 480 bool migration_incoming_colo_enabled(void) 481 { 482 return migration_colo_enabled; 483 } 484 485 void migration_incoming_disable_colo(void) 486 { 487 ram_block_discard_disable(false); 488 migration_colo_enabled = false; 489 } 490 491 int migration_incoming_enable_colo(void) 492 { 493 #ifndef CONFIG_REPLICATION 494 error_report("ENABLE_COLO command come in migration stream, but COLO " 495 "module is not built in"); 496 return -ENOTSUP; 497 #endif 498 499 if (!migrate_colo()) { 500 error_report("ENABLE_COLO command come in migration stream, but c-colo " 501 "capability is not set"); 502 return -EINVAL; 503 } 504 505 if (ram_block_discard_disable(true)) { 506 error_report("COLO: cannot disable RAM discard"); 507 return -EBUSY; 508 } 509 migration_colo_enabled = true; 510 return 0; 511 } 512 513 void migrate_add_address(SocketAddress *address) 514 { 515 MigrationIncomingState *mis = migration_incoming_get_current(); 516 517 QAPI_LIST_PREPEND(mis->socket_address_list, 518 QAPI_CLONE(SocketAddress, address)); 519 } 520 521 bool migrate_uri_parse(const char *uri, MigrationChannel **channel, 522 Error **errp) 523 { 524 g_autoptr(MigrationChannel) val = g_new0(MigrationChannel, 1); 525 g_autoptr(MigrationAddress) addr = g_new0(MigrationAddress, 1); 526 InetSocketAddress *isock = &addr->u.rdma; 527 strList **tail = &addr->u.exec.args; 528 529 if (strstart(uri, "exec:", NULL)) { 530 addr->transport = MIGRATION_ADDRESS_TYPE_EXEC; 531 #ifdef WIN32 532 QAPI_LIST_APPEND(tail, g_strdup(exec_get_cmd_path())); 533 QAPI_LIST_APPEND(tail, g_strdup("/c")); 534 #else 535 QAPI_LIST_APPEND(tail, g_strdup("/bin/sh")); 536 QAPI_LIST_APPEND(tail, g_strdup("-c")); 537 #endif 538 QAPI_LIST_APPEND(tail, g_strdup(uri + strlen("exec:"))); 539 } else if (strstart(uri, "rdma:", NULL)) { 540 if (inet_parse(isock, uri + strlen("rdma:"), errp)) { 541 qapi_free_InetSocketAddress(isock); 542 return false; 543 } 544 addr->transport = MIGRATION_ADDRESS_TYPE_RDMA; 545 } else if (strstart(uri, "tcp:", NULL) || 546 strstart(uri, "unix:", NULL) || 547 strstart(uri, "vsock:", NULL) || 548 strstart(uri, "fd:", NULL)) { 549 addr->transport = MIGRATION_ADDRESS_TYPE_SOCKET; 550 SocketAddress *saddr = socket_parse(uri, errp); 551 if (!saddr) { 552 return false; 553 } 554 addr->u.socket.type = saddr->type; 555 addr->u.socket.u = saddr->u; 556 /* Don't free the objects inside; their ownership moved to "addr" */ 557 g_free(saddr); 558 } else if (strstart(uri, "file:", NULL)) { 559 addr->transport = MIGRATION_ADDRESS_TYPE_FILE; 560 addr->u.file.filename = g_strdup(uri + strlen("file:")); 561 if (file_parse_offset(addr->u.file.filename, &addr->u.file.offset, 562 errp)) { 563 return false; 564 } 565 } else { 566 error_setg(errp, "unknown migration protocol: %s", uri); 567 return false; 568 } 569 570 val->channel_type = MIGRATION_CHANNEL_TYPE_MAIN; 571 val->addr = g_steal_pointer(&addr); 572 *channel = g_steal_pointer(&val); 573 return true; 574 } 575 576 static void qemu_start_incoming_migration(const char *uri, bool has_channels, 577 MigrationChannelList *channels, 578 Error **errp) 579 { 580 g_autoptr(MigrationChannel) channel = NULL; 581 MigrationAddress *addr = NULL; 582 MigrationIncomingState *mis = migration_incoming_get_current(); 583 584 /* 585 * Having preliminary checks for uri and channel 586 */ 587 if (!uri == !channels) { 588 error_setg(errp, "need either 'uri' or 'channels' argument"); 589 return; 590 } 591 592 if (channels) { 593 /* To verify that Migrate channel list has only item */ 594 if (channels->next) { 595 error_setg(errp, "Channel list has more than one entries"); 596 return; 597 } 598 addr = channels->value->addr; 599 } 600 601 if (uri) { 602 /* caller uses the old URI syntax */ 603 if (!migrate_uri_parse(uri, &channel, errp)) { 604 return; 605 } 606 addr = channel->addr; 607 } 608 609 /* transport mechanism not suitable for migration? */ 610 if (!migration_channels_and_transport_compatible(addr, errp)) { 611 return; 612 } 613 614 migrate_set_state(&mis->state, MIGRATION_STATUS_NONE, 615 MIGRATION_STATUS_SETUP); 616 617 if (addr->transport == MIGRATION_ADDRESS_TYPE_SOCKET) { 618 SocketAddress *saddr = &addr->u.socket; 619 if (saddr->type == SOCKET_ADDRESS_TYPE_INET || 620 saddr->type == SOCKET_ADDRESS_TYPE_UNIX || 621 saddr->type == SOCKET_ADDRESS_TYPE_VSOCK) { 622 socket_start_incoming_migration(saddr, errp); 623 } else if (saddr->type == SOCKET_ADDRESS_TYPE_FD) { 624 fd_start_incoming_migration(saddr->u.fd.str, errp); 625 } 626 #ifdef CONFIG_RDMA 627 } else if (addr->transport == MIGRATION_ADDRESS_TYPE_RDMA) { 628 if (migrate_compress()) { 629 error_setg(errp, "RDMA and compression can't be used together"); 630 return; 631 } 632 if (migrate_xbzrle()) { 633 error_setg(errp, "RDMA and XBZRLE can't be used together"); 634 return; 635 } 636 if (migrate_multifd()) { 637 error_setg(errp, "RDMA and multifd can't be used together"); 638 return; 639 } 640 rdma_start_incoming_migration(&addr->u.rdma, errp); 641 #endif 642 } else if (addr->transport == MIGRATION_ADDRESS_TYPE_EXEC) { 643 exec_start_incoming_migration(addr->u.exec.args, errp); 644 } else if (addr->transport == MIGRATION_ADDRESS_TYPE_FILE) { 645 file_start_incoming_migration(&addr->u.file, errp); 646 } else { 647 error_setg(errp, "unknown migration protocol: %s", uri); 648 } 649 } 650 651 static void process_incoming_migration_bh(void *opaque) 652 { 653 Error *local_err = NULL; 654 MigrationIncomingState *mis = opaque; 655 656 trace_vmstate_downtime_checkpoint("dst-precopy-bh-enter"); 657 658 /* If capability late_block_activate is set: 659 * Only fire up the block code now if we're going to restart the 660 * VM, else 'cont' will do it. 661 * This causes file locking to happen; so we don't want it to happen 662 * unless we really are starting the VM. 663 */ 664 if (!migrate_late_block_activate() || 665 (autostart && (!global_state_received() || 666 runstate_is_live(global_state_get_runstate())))) { 667 /* Make sure all file formats throw away their mutable metadata. 668 * If we get an error here, just don't restart the VM yet. */ 669 bdrv_activate_all(&local_err); 670 if (local_err) { 671 error_report_err(local_err); 672 local_err = NULL; 673 autostart = false; 674 } 675 } 676 677 /* 678 * This must happen after all error conditions are dealt with and 679 * we're sure the VM is going to be running on this host. 680 */ 681 qemu_announce_self(&mis->announce_timer, migrate_announce_params()); 682 683 trace_vmstate_downtime_checkpoint("dst-precopy-bh-announced"); 684 685 multifd_recv_shutdown(); 686 687 dirty_bitmap_mig_before_vm_start(); 688 689 if (!global_state_received() || 690 runstate_is_live(global_state_get_runstate())) { 691 if (autostart) { 692 vm_start(); 693 } else { 694 runstate_set(RUN_STATE_PAUSED); 695 } 696 } else if (migration_incoming_colo_enabled()) { 697 migration_incoming_disable_colo(); 698 vm_start(); 699 } else { 700 runstate_set(global_state_get_runstate()); 701 } 702 trace_vmstate_downtime_checkpoint("dst-precopy-bh-vm-started"); 703 /* 704 * This must happen after any state changes since as soon as an external 705 * observer sees this event they might start to prod at the VM assuming 706 * it's ready to use. 707 */ 708 migrate_set_state(&mis->state, MIGRATION_STATUS_ACTIVE, 709 MIGRATION_STATUS_COMPLETED); 710 migration_incoming_state_destroy(); 711 } 712 713 static void coroutine_fn 714 process_incoming_migration_co(void *opaque) 715 { 716 MigrationIncomingState *mis = migration_incoming_get_current(); 717 PostcopyState ps; 718 int ret; 719 720 assert(mis->from_src_file); 721 722 if (compress_threads_load_setup(mis->from_src_file)) { 723 error_report("Failed to setup decompress threads"); 724 goto fail; 725 } 726 727 mis->largest_page_size = qemu_ram_pagesize_largest(); 728 postcopy_state_set(POSTCOPY_INCOMING_NONE); 729 migrate_set_state(&mis->state, MIGRATION_STATUS_SETUP, 730 MIGRATION_STATUS_ACTIVE); 731 732 mis->loadvm_co = qemu_coroutine_self(); 733 ret = qemu_loadvm_state(mis->from_src_file); 734 mis->loadvm_co = NULL; 735 736 trace_vmstate_downtime_checkpoint("dst-precopy-loadvm-completed"); 737 738 ps = postcopy_state_get(); 739 trace_process_incoming_migration_co_end(ret, ps); 740 if (ps != POSTCOPY_INCOMING_NONE) { 741 if (ps == POSTCOPY_INCOMING_ADVISE) { 742 /* 743 * Where a migration had postcopy enabled (and thus went to advise) 744 * but managed to complete within the precopy period, we can use 745 * the normal exit. 746 */ 747 postcopy_ram_incoming_cleanup(mis); 748 } else if (ret >= 0) { 749 /* 750 * Postcopy was started, cleanup should happen at the end of the 751 * postcopy thread. 752 */ 753 trace_process_incoming_migration_co_postcopy_end_main(); 754 return; 755 } 756 /* Else if something went wrong then just fall out of the normal exit */ 757 } 758 759 if (ret < 0) { 760 MigrationState *s = migrate_get_current(); 761 762 if (migrate_has_error(s)) { 763 WITH_QEMU_LOCK_GUARD(&s->error_mutex) { 764 error_report_err(s->error); 765 } 766 } 767 error_report("load of migration failed: %s", strerror(-ret)); 768 goto fail; 769 } 770 771 if (colo_incoming_co() < 0) { 772 goto fail; 773 } 774 775 migration_bh_schedule(process_incoming_migration_bh, mis); 776 return; 777 fail: 778 migrate_set_state(&mis->state, MIGRATION_STATUS_ACTIVE, 779 MIGRATION_STATUS_FAILED); 780 qemu_fclose(mis->from_src_file); 781 782 multifd_recv_cleanup(); 783 compress_threads_load_cleanup(); 784 785 exit(EXIT_FAILURE); 786 } 787 788 /** 789 * migration_incoming_setup: Setup incoming migration 790 * @f: file for main migration channel 791 */ 792 static void migration_incoming_setup(QEMUFile *f) 793 { 794 MigrationIncomingState *mis = migration_incoming_get_current(); 795 796 if (!mis->from_src_file) { 797 mis->from_src_file = f; 798 } 799 qemu_file_set_blocking(f, false); 800 } 801 802 void migration_incoming_process(void) 803 { 804 Coroutine *co = qemu_coroutine_create(process_incoming_migration_co, NULL); 805 qemu_coroutine_enter(co); 806 } 807 808 /* Returns true if recovered from a paused migration, otherwise false */ 809 static bool postcopy_try_recover(void) 810 { 811 MigrationIncomingState *mis = migration_incoming_get_current(); 812 813 if (mis->state == MIGRATION_STATUS_POSTCOPY_PAUSED) { 814 /* Resumed from a paused postcopy migration */ 815 816 /* This should be set already in migration_incoming_setup() */ 817 assert(mis->from_src_file); 818 /* Postcopy has standalone thread to do vm load */ 819 qemu_file_set_blocking(mis->from_src_file, true); 820 821 /* Re-configure the return path */ 822 mis->to_src_file = qemu_file_get_return_path(mis->from_src_file); 823 824 migrate_set_state(&mis->state, MIGRATION_STATUS_POSTCOPY_PAUSED, 825 MIGRATION_STATUS_POSTCOPY_RECOVER); 826 827 /* 828 * Here, we only wake up the main loading thread (while the 829 * rest threads will still be waiting), so that we can receive 830 * commands from source now, and answer it if needed. The 831 * rest threads will be woken up afterwards until we are sure 832 * that source is ready to reply to page requests. 833 */ 834 qemu_sem_post(&mis->postcopy_pause_sem_dst); 835 return true; 836 } 837 838 return false; 839 } 840 841 void migration_fd_process_incoming(QEMUFile *f) 842 { 843 migration_incoming_setup(f); 844 if (postcopy_try_recover()) { 845 return; 846 } 847 migration_incoming_process(); 848 } 849 850 /* 851 * Returns true when we want to start a new incoming migration process, 852 * false otherwise. 853 */ 854 static bool migration_should_start_incoming(bool main_channel) 855 { 856 /* Multifd doesn't start unless all channels are established */ 857 if (migrate_multifd()) { 858 return migration_has_all_channels(); 859 } 860 861 /* Preempt channel only starts when the main channel is created */ 862 if (migrate_postcopy_preempt()) { 863 return main_channel; 864 } 865 866 /* 867 * For all the rest types of migration, we should only reach here when 868 * it's the main channel that's being created, and we should always 869 * proceed with this channel. 870 */ 871 assert(main_channel); 872 return true; 873 } 874 875 void migration_ioc_process_incoming(QIOChannel *ioc, Error **errp) 876 { 877 MigrationIncomingState *mis = migration_incoming_get_current(); 878 Error *local_err = NULL; 879 QEMUFile *f; 880 bool default_channel = true; 881 uint32_t channel_magic = 0; 882 int ret = 0; 883 884 if (migrate_multifd() && !migrate_postcopy_ram() && 885 qio_channel_has_feature(ioc, QIO_CHANNEL_FEATURE_READ_MSG_PEEK)) { 886 /* 887 * With multiple channels, it is possible that we receive channels 888 * out of order on destination side, causing incorrect mapping of 889 * source channels on destination side. Check channel MAGIC to 890 * decide type of channel. Please note this is best effort, postcopy 891 * preempt channel does not send any magic number so avoid it for 892 * postcopy live migration. Also tls live migration already does 893 * tls handshake while initializing main channel so with tls this 894 * issue is not possible. 895 */ 896 ret = migration_channel_read_peek(ioc, (void *)&channel_magic, 897 sizeof(channel_magic), errp); 898 899 if (ret != 0) { 900 return; 901 } 902 903 default_channel = (channel_magic == cpu_to_be32(QEMU_VM_FILE_MAGIC)); 904 } else { 905 default_channel = !mis->from_src_file; 906 } 907 908 if (multifd_recv_setup(errp) != 0) { 909 return; 910 } 911 912 if (default_channel) { 913 f = qemu_file_new_input(ioc); 914 migration_incoming_setup(f); 915 } else { 916 /* Multiple connections */ 917 assert(migration_needs_multiple_sockets()); 918 if (migrate_multifd()) { 919 multifd_recv_new_channel(ioc, &local_err); 920 } else { 921 assert(migrate_postcopy_preempt()); 922 f = qemu_file_new_input(ioc); 923 postcopy_preempt_new_channel(mis, f); 924 } 925 if (local_err) { 926 error_propagate(errp, local_err); 927 return; 928 } 929 } 930 931 if (migration_should_start_incoming(default_channel)) { 932 /* If it's a recovery, we're done */ 933 if (postcopy_try_recover()) { 934 return; 935 } 936 migration_incoming_process(); 937 } 938 } 939 940 /** 941 * @migration_has_all_channels: We have received all channels that we need 942 * 943 * Returns true when we have got connections to all the channels that 944 * we need for migration. 945 */ 946 bool migration_has_all_channels(void) 947 { 948 MigrationIncomingState *mis = migration_incoming_get_current(); 949 950 if (!mis->from_src_file) { 951 return false; 952 } 953 954 if (migrate_multifd()) { 955 return multifd_recv_all_channels_created(); 956 } 957 958 if (migrate_postcopy_preempt()) { 959 return mis->postcopy_qemufile_dst != NULL; 960 } 961 962 return true; 963 } 964 965 int migrate_send_rp_switchover_ack(MigrationIncomingState *mis) 966 { 967 return migrate_send_rp_message(mis, MIG_RP_MSG_SWITCHOVER_ACK, 0, NULL); 968 } 969 970 /* 971 * Send a 'SHUT' message on the return channel with the given value 972 * to indicate that we've finished with the RP. Non-0 value indicates 973 * error. 974 */ 975 void migrate_send_rp_shut(MigrationIncomingState *mis, 976 uint32_t value) 977 { 978 uint32_t buf; 979 980 buf = cpu_to_be32(value); 981 migrate_send_rp_message(mis, MIG_RP_MSG_SHUT, sizeof(buf), &buf); 982 } 983 984 /* 985 * Send a 'PONG' message on the return channel with the given value 986 * (normally in response to a 'PING') 987 */ 988 void migrate_send_rp_pong(MigrationIncomingState *mis, 989 uint32_t value) 990 { 991 uint32_t buf; 992 993 buf = cpu_to_be32(value); 994 migrate_send_rp_message(mis, MIG_RP_MSG_PONG, sizeof(buf), &buf); 995 } 996 997 void migrate_send_rp_recv_bitmap(MigrationIncomingState *mis, 998 char *block_name) 999 { 1000 char buf[512]; 1001 int len; 1002 int64_t res; 1003 1004 /* 1005 * First, we send the header part. It contains only the len of 1006 * idstr, and the idstr itself. 1007 */ 1008 len = strlen(block_name); 1009 buf[0] = len; 1010 memcpy(buf + 1, block_name, len); 1011 1012 if (mis->state != MIGRATION_STATUS_POSTCOPY_RECOVER) { 1013 error_report("%s: MSG_RP_RECV_BITMAP only used for recovery", 1014 __func__); 1015 return; 1016 } 1017 1018 migrate_send_rp_message(mis, MIG_RP_MSG_RECV_BITMAP, len + 1, buf); 1019 1020 /* 1021 * Next, we dump the received bitmap to the stream. 1022 * 1023 * TODO: currently we are safe since we are the only one that is 1024 * using the to_src_file handle (fault thread is still paused), 1025 * and it's ok even not taking the mutex. However the best way is 1026 * to take the lock before sending the message header, and release 1027 * the lock after sending the bitmap. 1028 */ 1029 qemu_mutex_lock(&mis->rp_mutex); 1030 res = ramblock_recv_bitmap_send(mis->to_src_file, block_name); 1031 qemu_mutex_unlock(&mis->rp_mutex); 1032 1033 trace_migrate_send_rp_recv_bitmap(block_name, res); 1034 } 1035 1036 void migrate_send_rp_resume_ack(MigrationIncomingState *mis, uint32_t value) 1037 { 1038 uint32_t buf; 1039 1040 buf = cpu_to_be32(value); 1041 migrate_send_rp_message(mis, MIG_RP_MSG_RESUME_ACK, sizeof(buf), &buf); 1042 } 1043 1044 /* 1045 * Return true if we're already in the middle of a migration 1046 * (i.e. any of the active or setup states) 1047 */ 1048 bool migration_is_setup_or_active(int state) 1049 { 1050 switch (state) { 1051 case MIGRATION_STATUS_ACTIVE: 1052 case MIGRATION_STATUS_POSTCOPY_ACTIVE: 1053 case MIGRATION_STATUS_POSTCOPY_PAUSED: 1054 case MIGRATION_STATUS_POSTCOPY_RECOVER: 1055 case MIGRATION_STATUS_SETUP: 1056 case MIGRATION_STATUS_PRE_SWITCHOVER: 1057 case MIGRATION_STATUS_DEVICE: 1058 case MIGRATION_STATUS_WAIT_UNPLUG: 1059 case MIGRATION_STATUS_COLO: 1060 return true; 1061 1062 default: 1063 return false; 1064 1065 } 1066 } 1067 1068 bool migration_is_running(int state) 1069 { 1070 switch (state) { 1071 case MIGRATION_STATUS_ACTIVE: 1072 case MIGRATION_STATUS_POSTCOPY_ACTIVE: 1073 case MIGRATION_STATUS_POSTCOPY_PAUSED: 1074 case MIGRATION_STATUS_POSTCOPY_RECOVER: 1075 case MIGRATION_STATUS_SETUP: 1076 case MIGRATION_STATUS_PRE_SWITCHOVER: 1077 case MIGRATION_STATUS_DEVICE: 1078 case MIGRATION_STATUS_WAIT_UNPLUG: 1079 case MIGRATION_STATUS_CANCELLING: 1080 return true; 1081 1082 default: 1083 return false; 1084 1085 } 1086 } 1087 1088 static bool migrate_show_downtime(MigrationState *s) 1089 { 1090 return (s->state == MIGRATION_STATUS_COMPLETED) || migration_in_postcopy(); 1091 } 1092 1093 static void populate_time_info(MigrationInfo *info, MigrationState *s) 1094 { 1095 info->has_status = true; 1096 info->has_setup_time = true; 1097 info->setup_time = s->setup_time; 1098 1099 if (s->state == MIGRATION_STATUS_COMPLETED) { 1100 info->has_total_time = true; 1101 info->total_time = s->total_time; 1102 } else { 1103 info->has_total_time = true; 1104 info->total_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME) - 1105 s->start_time; 1106 } 1107 1108 if (migrate_show_downtime(s)) { 1109 info->has_downtime = true; 1110 info->downtime = s->downtime; 1111 } else { 1112 info->has_expected_downtime = true; 1113 info->expected_downtime = s->expected_downtime; 1114 } 1115 } 1116 1117 static void populate_ram_info(MigrationInfo *info, MigrationState *s) 1118 { 1119 size_t page_size = qemu_target_page_size(); 1120 1121 info->ram = g_malloc0(sizeof(*info->ram)); 1122 info->ram->transferred = migration_transferred_bytes(); 1123 info->ram->total = ram_bytes_total(); 1124 info->ram->duplicate = stat64_get(&mig_stats.zero_pages); 1125 /* legacy value. It is not used anymore */ 1126 info->ram->skipped = 0; 1127 info->ram->normal = stat64_get(&mig_stats.normal_pages); 1128 info->ram->normal_bytes = info->ram->normal * page_size; 1129 info->ram->mbps = s->mbps; 1130 info->ram->dirty_sync_count = 1131 stat64_get(&mig_stats.dirty_sync_count); 1132 info->ram->dirty_sync_missed_zero_copy = 1133 stat64_get(&mig_stats.dirty_sync_missed_zero_copy); 1134 info->ram->postcopy_requests = 1135 stat64_get(&mig_stats.postcopy_requests); 1136 info->ram->page_size = page_size; 1137 info->ram->multifd_bytes = stat64_get(&mig_stats.multifd_bytes); 1138 info->ram->pages_per_second = s->pages_per_second; 1139 info->ram->precopy_bytes = stat64_get(&mig_stats.precopy_bytes); 1140 info->ram->downtime_bytes = stat64_get(&mig_stats.downtime_bytes); 1141 info->ram->postcopy_bytes = stat64_get(&mig_stats.postcopy_bytes); 1142 1143 if (migrate_xbzrle()) { 1144 info->xbzrle_cache = g_malloc0(sizeof(*info->xbzrle_cache)); 1145 info->xbzrle_cache->cache_size = migrate_xbzrle_cache_size(); 1146 info->xbzrle_cache->bytes = xbzrle_counters.bytes; 1147 info->xbzrle_cache->pages = xbzrle_counters.pages; 1148 info->xbzrle_cache->cache_miss = xbzrle_counters.cache_miss; 1149 info->xbzrle_cache->cache_miss_rate = xbzrle_counters.cache_miss_rate; 1150 info->xbzrle_cache->encoding_rate = xbzrle_counters.encoding_rate; 1151 info->xbzrle_cache->overflow = xbzrle_counters.overflow; 1152 } 1153 1154 populate_compress(info); 1155 1156 if (cpu_throttle_active()) { 1157 info->has_cpu_throttle_percentage = true; 1158 info->cpu_throttle_percentage = cpu_throttle_get_percentage(); 1159 } 1160 1161 if (s->state != MIGRATION_STATUS_COMPLETED) { 1162 info->ram->remaining = ram_bytes_remaining(); 1163 info->ram->dirty_pages_rate = 1164 stat64_get(&mig_stats.dirty_pages_rate); 1165 } 1166 1167 if (migrate_dirty_limit() && dirtylimit_in_service()) { 1168 info->has_dirty_limit_throttle_time_per_round = true; 1169 info->dirty_limit_throttle_time_per_round = 1170 dirtylimit_throttle_time_per_round(); 1171 1172 info->has_dirty_limit_ring_full_time = true; 1173 info->dirty_limit_ring_full_time = dirtylimit_ring_full_time(); 1174 } 1175 } 1176 1177 static void populate_disk_info(MigrationInfo *info) 1178 { 1179 if (blk_mig_active()) { 1180 info->disk = g_malloc0(sizeof(*info->disk)); 1181 info->disk->transferred = blk_mig_bytes_transferred(); 1182 info->disk->remaining = blk_mig_bytes_remaining(); 1183 info->disk->total = blk_mig_bytes_total(); 1184 } 1185 } 1186 1187 static void fill_source_migration_info(MigrationInfo *info) 1188 { 1189 MigrationState *s = migrate_get_current(); 1190 int state = qatomic_read(&s->state); 1191 GSList *cur_blocker = migration_blockers[migrate_mode()]; 1192 1193 info->blocked_reasons = NULL; 1194 1195 /* 1196 * There are two types of reasons a migration might be blocked; 1197 * a) devices marked in VMState as non-migratable, and 1198 * b) Explicit migration blockers 1199 * We need to add both of them here. 1200 */ 1201 qemu_savevm_non_migratable_list(&info->blocked_reasons); 1202 1203 while (cur_blocker) { 1204 QAPI_LIST_PREPEND(info->blocked_reasons, 1205 g_strdup(error_get_pretty(cur_blocker->data))); 1206 cur_blocker = g_slist_next(cur_blocker); 1207 } 1208 info->has_blocked_reasons = info->blocked_reasons != NULL; 1209 1210 switch (state) { 1211 case MIGRATION_STATUS_NONE: 1212 /* no migration has happened ever */ 1213 /* do not overwrite destination migration status */ 1214 return; 1215 case MIGRATION_STATUS_SETUP: 1216 info->has_status = true; 1217 info->has_total_time = false; 1218 break; 1219 case MIGRATION_STATUS_ACTIVE: 1220 case MIGRATION_STATUS_CANCELLING: 1221 case MIGRATION_STATUS_POSTCOPY_ACTIVE: 1222 case MIGRATION_STATUS_PRE_SWITCHOVER: 1223 case MIGRATION_STATUS_DEVICE: 1224 case MIGRATION_STATUS_POSTCOPY_PAUSED: 1225 case MIGRATION_STATUS_POSTCOPY_RECOVER: 1226 /* TODO add some postcopy stats */ 1227 populate_time_info(info, s); 1228 populate_ram_info(info, s); 1229 populate_disk_info(info); 1230 migration_populate_vfio_info(info); 1231 break; 1232 case MIGRATION_STATUS_COLO: 1233 info->has_status = true; 1234 /* TODO: display COLO specific information (checkpoint info etc.) */ 1235 break; 1236 case MIGRATION_STATUS_COMPLETED: 1237 populate_time_info(info, s); 1238 populate_ram_info(info, s); 1239 migration_populate_vfio_info(info); 1240 break; 1241 case MIGRATION_STATUS_FAILED: 1242 info->has_status = true; 1243 break; 1244 case MIGRATION_STATUS_CANCELLED: 1245 info->has_status = true; 1246 break; 1247 case MIGRATION_STATUS_WAIT_UNPLUG: 1248 info->has_status = true; 1249 break; 1250 } 1251 info->status = state; 1252 1253 QEMU_LOCK_GUARD(&s->error_mutex); 1254 if (s->error) { 1255 info->error_desc = g_strdup(error_get_pretty(s->error)); 1256 } 1257 } 1258 1259 static void fill_destination_migration_info(MigrationInfo *info) 1260 { 1261 MigrationIncomingState *mis = migration_incoming_get_current(); 1262 1263 if (mis->socket_address_list) { 1264 info->has_socket_address = true; 1265 info->socket_address = 1266 QAPI_CLONE(SocketAddressList, mis->socket_address_list); 1267 } 1268 1269 switch (mis->state) { 1270 case MIGRATION_STATUS_NONE: 1271 return; 1272 case MIGRATION_STATUS_SETUP: 1273 case MIGRATION_STATUS_CANCELLING: 1274 case MIGRATION_STATUS_CANCELLED: 1275 case MIGRATION_STATUS_ACTIVE: 1276 case MIGRATION_STATUS_POSTCOPY_ACTIVE: 1277 case MIGRATION_STATUS_POSTCOPY_PAUSED: 1278 case MIGRATION_STATUS_POSTCOPY_RECOVER: 1279 case MIGRATION_STATUS_FAILED: 1280 case MIGRATION_STATUS_COLO: 1281 info->has_status = true; 1282 break; 1283 case MIGRATION_STATUS_COMPLETED: 1284 info->has_status = true; 1285 fill_destination_postcopy_migration_info(info); 1286 break; 1287 } 1288 info->status = mis->state; 1289 } 1290 1291 MigrationInfo *qmp_query_migrate(Error **errp) 1292 { 1293 MigrationInfo *info = g_malloc0(sizeof(*info)); 1294 1295 fill_destination_migration_info(info); 1296 fill_source_migration_info(info); 1297 1298 return info; 1299 } 1300 1301 void qmp_migrate_start_postcopy(Error **errp) 1302 { 1303 MigrationState *s = migrate_get_current(); 1304 1305 if (!migrate_postcopy()) { 1306 error_setg(errp, "Enable postcopy with migrate_set_capability before" 1307 " the start of migration"); 1308 return; 1309 } 1310 1311 if (s->state == MIGRATION_STATUS_NONE) { 1312 error_setg(errp, "Postcopy must be started after migration has been" 1313 " started"); 1314 return; 1315 } 1316 /* 1317 * we don't error if migration has finished since that would be racy 1318 * with issuing this command. 1319 */ 1320 qatomic_set(&s->start_postcopy, true); 1321 } 1322 1323 /* shared migration helpers */ 1324 1325 void migrate_set_state(int *state, int old_state, int new_state) 1326 { 1327 assert(new_state < MIGRATION_STATUS__MAX); 1328 if (qatomic_cmpxchg(state, old_state, new_state) == old_state) { 1329 trace_migrate_set_state(MigrationStatus_str(new_state)); 1330 migrate_generate_event(new_state); 1331 } 1332 } 1333 1334 static void migrate_fd_cleanup(MigrationState *s) 1335 { 1336 MigrationEventType type; 1337 1338 g_free(s->hostname); 1339 s->hostname = NULL; 1340 json_writer_free(s->vmdesc); 1341 s->vmdesc = NULL; 1342 1343 qemu_savevm_state_cleanup(); 1344 1345 close_return_path_on_source(s); 1346 1347 if (s->to_dst_file) { 1348 QEMUFile *tmp; 1349 1350 trace_migrate_fd_cleanup(); 1351 bql_unlock(); 1352 if (s->migration_thread_running) { 1353 qemu_thread_join(&s->thread); 1354 s->migration_thread_running = false; 1355 } 1356 bql_lock(); 1357 1358 multifd_send_shutdown(); 1359 qemu_mutex_lock(&s->qemu_file_lock); 1360 tmp = s->to_dst_file; 1361 s->to_dst_file = NULL; 1362 qemu_mutex_unlock(&s->qemu_file_lock); 1363 /* 1364 * Close the file handle without the lock to make sure the 1365 * critical section won't block for long. 1366 */ 1367 migration_ioc_unregister_yank_from_file(tmp); 1368 qemu_fclose(tmp); 1369 } 1370 1371 assert(!migration_is_active(s)); 1372 1373 if (s->state == MIGRATION_STATUS_CANCELLING) { 1374 migrate_set_state(&s->state, MIGRATION_STATUS_CANCELLING, 1375 MIGRATION_STATUS_CANCELLED); 1376 } 1377 1378 if (s->error) { 1379 /* It is used on info migrate. We can't free it */ 1380 error_report_err(error_copy(s->error)); 1381 } 1382 type = migration_has_failed(s) ? MIG_EVENT_PRECOPY_FAILED : 1383 MIG_EVENT_PRECOPY_DONE; 1384 migration_call_notifiers(s, type, NULL); 1385 block_cleanup_parameters(); 1386 yank_unregister_instance(MIGRATION_YANK_INSTANCE); 1387 } 1388 1389 static void migrate_fd_cleanup_bh(void *opaque) 1390 { 1391 migrate_fd_cleanup(opaque); 1392 } 1393 1394 void migrate_set_error(MigrationState *s, const Error *error) 1395 { 1396 QEMU_LOCK_GUARD(&s->error_mutex); 1397 if (!s->error) { 1398 s->error = error_copy(error); 1399 } 1400 } 1401 1402 bool migrate_has_error(MigrationState *s) 1403 { 1404 /* The lock is not helpful here, but still follow the rule */ 1405 QEMU_LOCK_GUARD(&s->error_mutex); 1406 return qatomic_read(&s->error); 1407 } 1408 1409 static void migrate_error_free(MigrationState *s) 1410 { 1411 QEMU_LOCK_GUARD(&s->error_mutex); 1412 if (s->error) { 1413 error_free(s->error); 1414 s->error = NULL; 1415 } 1416 } 1417 1418 static void migrate_fd_error(MigrationState *s, const Error *error) 1419 { 1420 trace_migrate_fd_error(error_get_pretty(error)); 1421 assert(s->to_dst_file == NULL); 1422 migrate_set_state(&s->state, MIGRATION_STATUS_SETUP, 1423 MIGRATION_STATUS_FAILED); 1424 migrate_set_error(s, error); 1425 } 1426 1427 static void migrate_fd_cancel(MigrationState *s) 1428 { 1429 int old_state ; 1430 1431 trace_migrate_fd_cancel(); 1432 1433 WITH_QEMU_LOCK_GUARD(&s->qemu_file_lock) { 1434 if (s->rp_state.from_dst_file) { 1435 /* shutdown the rp socket, so causing the rp thread to shutdown */ 1436 qemu_file_shutdown(s->rp_state.from_dst_file); 1437 } 1438 } 1439 1440 do { 1441 old_state = s->state; 1442 if (!migration_is_running(old_state)) { 1443 break; 1444 } 1445 /* If the migration is paused, kick it out of the pause */ 1446 if (old_state == MIGRATION_STATUS_PRE_SWITCHOVER) { 1447 qemu_sem_post(&s->pause_sem); 1448 } 1449 migrate_set_state(&s->state, old_state, MIGRATION_STATUS_CANCELLING); 1450 } while (s->state != MIGRATION_STATUS_CANCELLING); 1451 1452 /* 1453 * If we're unlucky the migration code might be stuck somewhere in a 1454 * send/write while the network has failed and is waiting to timeout; 1455 * if we've got shutdown(2) available then we can force it to quit. 1456 */ 1457 if (s->state == MIGRATION_STATUS_CANCELLING) { 1458 WITH_QEMU_LOCK_GUARD(&s->qemu_file_lock) { 1459 if (s->to_dst_file) { 1460 qemu_file_shutdown(s->to_dst_file); 1461 } 1462 } 1463 } 1464 if (s->state == MIGRATION_STATUS_CANCELLING && s->block_inactive) { 1465 Error *local_err = NULL; 1466 1467 bdrv_activate_all(&local_err); 1468 if (local_err) { 1469 error_report_err(local_err); 1470 } else { 1471 s->block_inactive = false; 1472 } 1473 } 1474 } 1475 1476 void migration_add_notifier_mode(NotifierWithReturn *notify, 1477 MigrationNotifyFunc func, MigMode mode) 1478 { 1479 notify->notify = (NotifierWithReturnFunc)func; 1480 notifier_with_return_list_add(&migration_state_notifiers[mode], notify); 1481 } 1482 1483 void migration_add_notifier(NotifierWithReturn *notify, 1484 MigrationNotifyFunc func) 1485 { 1486 migration_add_notifier_mode(notify, func, MIG_MODE_NORMAL); 1487 } 1488 1489 void migration_remove_notifier(NotifierWithReturn *notify) 1490 { 1491 if (notify->notify) { 1492 notifier_with_return_remove(notify); 1493 notify->notify = NULL; 1494 } 1495 } 1496 1497 int migration_call_notifiers(MigrationState *s, MigrationEventType type, 1498 Error **errp) 1499 { 1500 MigMode mode = s->parameters.mode; 1501 MigrationEvent e; 1502 int ret; 1503 1504 e.type = type; 1505 ret = notifier_with_return_list_notify(&migration_state_notifiers[mode], 1506 &e, errp); 1507 assert(!ret || type == MIG_EVENT_PRECOPY_SETUP); 1508 return ret; 1509 } 1510 1511 bool migration_in_setup(MigrationState *s) 1512 { 1513 return s->state == MIGRATION_STATUS_SETUP; 1514 } 1515 1516 bool migration_has_finished(MigrationState *s) 1517 { 1518 return s->state == MIGRATION_STATUS_COMPLETED; 1519 } 1520 1521 bool migration_has_failed(MigrationState *s) 1522 { 1523 return (s->state == MIGRATION_STATUS_CANCELLED || 1524 s->state == MIGRATION_STATUS_FAILED); 1525 } 1526 1527 bool migration_in_postcopy(void) 1528 { 1529 MigrationState *s = migrate_get_current(); 1530 1531 switch (s->state) { 1532 case MIGRATION_STATUS_POSTCOPY_ACTIVE: 1533 case MIGRATION_STATUS_POSTCOPY_PAUSED: 1534 case MIGRATION_STATUS_POSTCOPY_RECOVER: 1535 return true; 1536 default: 1537 return false; 1538 } 1539 } 1540 1541 bool migration_postcopy_is_alive(int state) 1542 { 1543 switch (state) { 1544 case MIGRATION_STATUS_POSTCOPY_ACTIVE: 1545 case MIGRATION_STATUS_POSTCOPY_RECOVER: 1546 return true; 1547 default: 1548 return false; 1549 } 1550 } 1551 1552 bool migration_in_incoming_postcopy(void) 1553 { 1554 PostcopyState ps = postcopy_state_get(); 1555 1556 return ps >= POSTCOPY_INCOMING_DISCARD && ps < POSTCOPY_INCOMING_END; 1557 } 1558 1559 bool migration_incoming_postcopy_advised(void) 1560 { 1561 PostcopyState ps = postcopy_state_get(); 1562 1563 return ps >= POSTCOPY_INCOMING_ADVISE && ps < POSTCOPY_INCOMING_END; 1564 } 1565 1566 bool migration_in_bg_snapshot(void) 1567 { 1568 MigrationState *s = migrate_get_current(); 1569 1570 return migrate_background_snapshot() && 1571 migration_is_setup_or_active(s->state); 1572 } 1573 1574 bool migration_is_idle(void) 1575 { 1576 MigrationState *s = current_migration; 1577 1578 if (!s) { 1579 return true; 1580 } 1581 1582 switch (s->state) { 1583 case MIGRATION_STATUS_NONE: 1584 case MIGRATION_STATUS_CANCELLED: 1585 case MIGRATION_STATUS_COMPLETED: 1586 case MIGRATION_STATUS_FAILED: 1587 return true; 1588 case MIGRATION_STATUS_SETUP: 1589 case MIGRATION_STATUS_CANCELLING: 1590 case MIGRATION_STATUS_ACTIVE: 1591 case MIGRATION_STATUS_POSTCOPY_ACTIVE: 1592 case MIGRATION_STATUS_COLO: 1593 case MIGRATION_STATUS_PRE_SWITCHOVER: 1594 case MIGRATION_STATUS_DEVICE: 1595 case MIGRATION_STATUS_WAIT_UNPLUG: 1596 return false; 1597 case MIGRATION_STATUS__MAX: 1598 g_assert_not_reached(); 1599 } 1600 1601 return false; 1602 } 1603 1604 bool migration_is_active(MigrationState *s) 1605 { 1606 return (s->state == MIGRATION_STATUS_ACTIVE || 1607 s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE); 1608 } 1609 1610 bool migrate_mode_is_cpr(MigrationState *s) 1611 { 1612 return s->parameters.mode == MIG_MODE_CPR_REBOOT; 1613 } 1614 1615 int migrate_init(MigrationState *s, Error **errp) 1616 { 1617 int ret; 1618 1619 ret = qemu_savevm_state_prepare(errp); 1620 if (ret) { 1621 return ret; 1622 } 1623 1624 /* 1625 * Reinitialise all migration state, except 1626 * parameters/capabilities that the user set, and 1627 * locks. 1628 */ 1629 s->to_dst_file = NULL; 1630 s->state = MIGRATION_STATUS_NONE; 1631 s->rp_state.from_dst_file = NULL; 1632 s->mbps = 0.0; 1633 s->pages_per_second = 0.0; 1634 s->downtime = 0; 1635 s->expected_downtime = 0; 1636 s->setup_time = 0; 1637 s->start_postcopy = false; 1638 s->migration_thread_running = false; 1639 error_free(s->error); 1640 s->error = NULL; 1641 s->vmdesc = NULL; 1642 1643 migrate_set_state(&s->state, MIGRATION_STATUS_NONE, MIGRATION_STATUS_SETUP); 1644 1645 s->start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); 1646 s->total_time = 0; 1647 s->vm_old_state = -1; 1648 s->iteration_initial_bytes = 0; 1649 s->threshold_size = 0; 1650 s->switchover_acked = false; 1651 s->rdma_migration = false; 1652 /* 1653 * set mig_stats memory to zero for a new migration 1654 */ 1655 memset(&mig_stats, 0, sizeof(mig_stats)); 1656 migration_reset_vfio_bytes_transferred(); 1657 1658 return 0; 1659 } 1660 1661 static bool is_busy(Error **reasonp, Error **errp) 1662 { 1663 ERRP_GUARD(); 1664 1665 /* Snapshots are similar to migrations, so check RUN_STATE_SAVE_VM too. */ 1666 if (runstate_check(RUN_STATE_SAVE_VM) || !migration_is_idle()) { 1667 error_propagate_prepend(errp, *reasonp, 1668 "disallowing migration blocker " 1669 "(migration/snapshot in progress) for: "); 1670 *reasonp = NULL; 1671 return true; 1672 } 1673 return false; 1674 } 1675 1676 static bool is_only_migratable(Error **reasonp, Error **errp, int modes) 1677 { 1678 ERRP_GUARD(); 1679 1680 if (only_migratable && (modes & BIT(MIG_MODE_NORMAL))) { 1681 error_propagate_prepend(errp, *reasonp, 1682 "disallowing migration blocker " 1683 "(--only-migratable) for: "); 1684 *reasonp = NULL; 1685 return true; 1686 } 1687 return false; 1688 } 1689 1690 static int get_modes(MigMode mode, va_list ap) 1691 { 1692 int modes = 0; 1693 1694 while (mode != -1 && mode != MIG_MODE_ALL) { 1695 assert(mode >= MIG_MODE_NORMAL && mode < MIG_MODE__MAX); 1696 modes |= BIT(mode); 1697 mode = va_arg(ap, MigMode); 1698 } 1699 if (mode == MIG_MODE_ALL) { 1700 modes = BIT(MIG_MODE__MAX) - 1; 1701 } 1702 return modes; 1703 } 1704 1705 static int add_blockers(Error **reasonp, Error **errp, int modes) 1706 { 1707 for (MigMode mode = 0; mode < MIG_MODE__MAX; mode++) { 1708 if (modes & BIT(mode)) { 1709 migration_blockers[mode] = g_slist_prepend(migration_blockers[mode], 1710 *reasonp); 1711 } 1712 } 1713 return 0; 1714 } 1715 1716 int migrate_add_blocker(Error **reasonp, Error **errp) 1717 { 1718 return migrate_add_blocker_modes(reasonp, errp, MIG_MODE_ALL); 1719 } 1720 1721 int migrate_add_blocker_normal(Error **reasonp, Error **errp) 1722 { 1723 return migrate_add_blocker_modes(reasonp, errp, MIG_MODE_NORMAL, -1); 1724 } 1725 1726 int migrate_add_blocker_modes(Error **reasonp, Error **errp, MigMode mode, ...) 1727 { 1728 int modes; 1729 va_list ap; 1730 1731 va_start(ap, mode); 1732 modes = get_modes(mode, ap); 1733 va_end(ap); 1734 1735 if (is_only_migratable(reasonp, errp, modes)) { 1736 return -EACCES; 1737 } else if (is_busy(reasonp, errp)) { 1738 return -EBUSY; 1739 } 1740 return add_blockers(reasonp, errp, modes); 1741 } 1742 1743 int migrate_add_blocker_internal(Error **reasonp, Error **errp) 1744 { 1745 int modes = BIT(MIG_MODE__MAX) - 1; 1746 1747 if (is_busy(reasonp, errp)) { 1748 return -EBUSY; 1749 } 1750 return add_blockers(reasonp, errp, modes); 1751 } 1752 1753 void migrate_del_blocker(Error **reasonp) 1754 { 1755 if (*reasonp) { 1756 for (MigMode mode = 0; mode < MIG_MODE__MAX; mode++) { 1757 migration_blockers[mode] = g_slist_remove(migration_blockers[mode], 1758 *reasonp); 1759 } 1760 error_free(*reasonp); 1761 *reasonp = NULL; 1762 } 1763 } 1764 1765 void qmp_migrate_incoming(const char *uri, bool has_channels, 1766 MigrationChannelList *channels, Error **errp) 1767 { 1768 Error *local_err = NULL; 1769 static bool once = true; 1770 1771 if (!once) { 1772 error_setg(errp, "The incoming migration has already been started"); 1773 return; 1774 } 1775 if (!runstate_check(RUN_STATE_INMIGRATE)) { 1776 error_setg(errp, "'-incoming' was not specified on the command line"); 1777 return; 1778 } 1779 1780 if (!yank_register_instance(MIGRATION_YANK_INSTANCE, errp)) { 1781 return; 1782 } 1783 1784 qemu_start_incoming_migration(uri, has_channels, channels, &local_err); 1785 1786 if (local_err) { 1787 yank_unregister_instance(MIGRATION_YANK_INSTANCE); 1788 error_propagate(errp, local_err); 1789 return; 1790 } 1791 1792 once = false; 1793 } 1794 1795 void qmp_migrate_recover(const char *uri, Error **errp) 1796 { 1797 MigrationIncomingState *mis = migration_incoming_get_current(); 1798 1799 /* 1800 * Don't even bother to use ERRP_GUARD() as it _must_ always be set by 1801 * callers (no one should ignore a recover failure); if there is, it's a 1802 * programming error. 1803 */ 1804 assert(errp); 1805 1806 if (mis->state != MIGRATION_STATUS_POSTCOPY_PAUSED) { 1807 error_setg(errp, "Migrate recover can only be run " 1808 "when postcopy is paused."); 1809 return; 1810 } 1811 1812 /* If there's an existing transport, release it */ 1813 migration_incoming_transport_cleanup(mis); 1814 1815 /* 1816 * Note that this call will never start a real migration; it will 1817 * only re-setup the migration stream and poke existing migration 1818 * to continue using that newly established channel. 1819 */ 1820 qemu_start_incoming_migration(uri, false, NULL, errp); 1821 } 1822 1823 void qmp_migrate_pause(Error **errp) 1824 { 1825 MigrationState *ms = migrate_get_current(); 1826 MigrationIncomingState *mis = migration_incoming_get_current(); 1827 int ret = 0; 1828 1829 if (migration_postcopy_is_alive(ms->state)) { 1830 /* Source side, during postcopy */ 1831 Error *error = NULL; 1832 1833 /* Tell the core migration that we're pausing */ 1834 error_setg(&error, "Postcopy migration is paused by the user"); 1835 migrate_set_error(ms, error); 1836 error_free(error); 1837 1838 qemu_mutex_lock(&ms->qemu_file_lock); 1839 if (ms->to_dst_file) { 1840 ret = qemu_file_shutdown(ms->to_dst_file); 1841 } 1842 qemu_mutex_unlock(&ms->qemu_file_lock); 1843 if (ret) { 1844 error_setg(errp, "Failed to pause source migration"); 1845 } 1846 1847 /* 1848 * Kick the migration thread out of any waiting windows (on behalf 1849 * of the rp thread). 1850 */ 1851 migration_rp_kick(ms); 1852 1853 return; 1854 } 1855 1856 if (migration_postcopy_is_alive(mis->state)) { 1857 ret = qemu_file_shutdown(mis->from_src_file); 1858 if (ret) { 1859 error_setg(errp, "Failed to pause destination migration"); 1860 } 1861 return; 1862 } 1863 1864 error_setg(errp, "migrate-pause is currently only supported " 1865 "during postcopy-active or postcopy-recover state"); 1866 } 1867 1868 bool migration_is_blocked(Error **errp) 1869 { 1870 GSList *blockers = migration_blockers[migrate_mode()]; 1871 1872 if (qemu_savevm_state_blocked(errp)) { 1873 return true; 1874 } 1875 1876 if (blockers) { 1877 error_propagate(errp, error_copy(blockers->data)); 1878 return true; 1879 } 1880 1881 return false; 1882 } 1883 1884 /* Returns true if continue to migrate, or false if error detected */ 1885 static bool migrate_prepare(MigrationState *s, bool blk, bool blk_inc, 1886 bool resume, Error **errp) 1887 { 1888 if (blk_inc) { 1889 warn_report("parameter 'inc' is deprecated;" 1890 " use blockdev-mirror with NBD instead"); 1891 } 1892 1893 if (blk) { 1894 warn_report("parameter 'blk' is deprecated;" 1895 " use blockdev-mirror with NBD instead"); 1896 } 1897 1898 if (resume) { 1899 if (s->state != MIGRATION_STATUS_POSTCOPY_PAUSED) { 1900 error_setg(errp, "Cannot resume if there is no " 1901 "paused migration"); 1902 return false; 1903 } 1904 1905 /* 1906 * Postcopy recovery won't work well with release-ram 1907 * capability since release-ram will drop the page buffer as 1908 * long as the page is put into the send buffer. So if there 1909 * is a network failure happened, any page buffers that have 1910 * not yet reached the destination VM but have already been 1911 * sent from the source VM will be lost forever. Let's refuse 1912 * the client from resuming such a postcopy migration. 1913 * Luckily release-ram was designed to only be used when src 1914 * and destination VMs are on the same host, so it should be 1915 * fine. 1916 */ 1917 if (migrate_release_ram()) { 1918 error_setg(errp, "Postcopy recovery cannot work " 1919 "when release-ram capability is set"); 1920 return false; 1921 } 1922 1923 /* This is a resume, skip init status */ 1924 return true; 1925 } 1926 1927 if (migration_is_running(s->state)) { 1928 error_setg(errp, QERR_MIGRATION_ACTIVE); 1929 return false; 1930 } 1931 1932 if (runstate_check(RUN_STATE_INMIGRATE)) { 1933 error_setg(errp, "Guest is waiting for an incoming migration"); 1934 return false; 1935 } 1936 1937 if (runstate_check(RUN_STATE_POSTMIGRATE)) { 1938 error_setg(errp, "Can't migrate the vm that was paused due to " 1939 "previous migration"); 1940 return false; 1941 } 1942 1943 if (kvm_hwpoisoned_mem()) { 1944 error_setg(errp, "Can't migrate this vm with hardware poisoned memory, " 1945 "please reboot the vm and try again"); 1946 return false; 1947 } 1948 1949 if (migration_is_blocked(errp)) { 1950 return false; 1951 } 1952 1953 if (migrate_mode_is_cpr(s)) { 1954 const char *conflict = NULL; 1955 1956 if (migrate_postcopy()) { 1957 conflict = "postcopy"; 1958 } else if (migrate_background_snapshot()) { 1959 conflict = "background snapshot"; 1960 } else if (migrate_colo()) { 1961 conflict = "COLO"; 1962 } 1963 1964 if (conflict) { 1965 error_setg(errp, "Cannot use %s with CPR", conflict); 1966 return false; 1967 } 1968 } 1969 1970 if (blk || blk_inc) { 1971 if (migrate_colo()) { 1972 error_setg(errp, "No disk migration is required in COLO mode"); 1973 return false; 1974 } 1975 if (migrate_block() || migrate_block_incremental()) { 1976 error_setg(errp, "Command options are incompatible with " 1977 "current migration capabilities"); 1978 return false; 1979 } 1980 if (!migrate_cap_set(MIGRATION_CAPABILITY_BLOCK, true, errp)) { 1981 return false; 1982 } 1983 s->must_remove_block_options = true; 1984 } 1985 1986 if (blk_inc) { 1987 migrate_set_block_incremental(true); 1988 } 1989 1990 if (migrate_init(s, errp)) { 1991 return false; 1992 } 1993 1994 return true; 1995 } 1996 1997 void qmp_migrate(const char *uri, bool has_channels, 1998 MigrationChannelList *channels, bool has_blk, bool blk, 1999 bool has_inc, bool inc, bool has_detach, bool detach, 2000 bool has_resume, bool resume, Error **errp) 2001 { 2002 bool resume_requested; 2003 Error *local_err = NULL; 2004 MigrationState *s = migrate_get_current(); 2005 g_autoptr(MigrationChannel) channel = NULL; 2006 MigrationAddress *addr = NULL; 2007 2008 /* 2009 * Having preliminary checks for uri and channel 2010 */ 2011 if (!uri == !channels) { 2012 error_setg(errp, "need either 'uri' or 'channels' argument"); 2013 return; 2014 } 2015 2016 if (channels) { 2017 /* To verify that Migrate channel list has only item */ 2018 if (channels->next) { 2019 error_setg(errp, "Channel list has more than one entries"); 2020 return; 2021 } 2022 addr = channels->value->addr; 2023 } 2024 2025 if (uri) { 2026 /* caller uses the old URI syntax */ 2027 if (!migrate_uri_parse(uri, &channel, errp)) { 2028 return; 2029 } 2030 addr = channel->addr; 2031 } 2032 2033 /* transport mechanism not suitable for migration? */ 2034 if (!migration_channels_and_transport_compatible(addr, errp)) { 2035 return; 2036 } 2037 2038 resume_requested = has_resume && resume; 2039 if (!migrate_prepare(s, has_blk && blk, has_inc && inc, 2040 resume_requested, errp)) { 2041 /* Error detected, put into errp */ 2042 return; 2043 } 2044 2045 if (!resume_requested) { 2046 if (!yank_register_instance(MIGRATION_YANK_INSTANCE, errp)) { 2047 return; 2048 } 2049 } 2050 2051 if (addr->transport == MIGRATION_ADDRESS_TYPE_SOCKET) { 2052 SocketAddress *saddr = &addr->u.socket; 2053 if (saddr->type == SOCKET_ADDRESS_TYPE_INET || 2054 saddr->type == SOCKET_ADDRESS_TYPE_UNIX || 2055 saddr->type == SOCKET_ADDRESS_TYPE_VSOCK) { 2056 socket_start_outgoing_migration(s, saddr, &local_err); 2057 } else if (saddr->type == SOCKET_ADDRESS_TYPE_FD) { 2058 fd_start_outgoing_migration(s, saddr->u.fd.str, &local_err); 2059 } 2060 #ifdef CONFIG_RDMA 2061 } else if (addr->transport == MIGRATION_ADDRESS_TYPE_RDMA) { 2062 rdma_start_outgoing_migration(s, &addr->u.rdma, &local_err); 2063 #endif 2064 } else if (addr->transport == MIGRATION_ADDRESS_TYPE_EXEC) { 2065 exec_start_outgoing_migration(s, addr->u.exec.args, &local_err); 2066 } else if (addr->transport == MIGRATION_ADDRESS_TYPE_FILE) { 2067 file_start_outgoing_migration(s, &addr->u.file, &local_err); 2068 } else { 2069 error_setg(&local_err, QERR_INVALID_PARAMETER_VALUE, "uri", 2070 "a valid migration protocol"); 2071 migrate_set_state(&s->state, MIGRATION_STATUS_SETUP, 2072 MIGRATION_STATUS_FAILED); 2073 block_cleanup_parameters(); 2074 } 2075 2076 if (local_err) { 2077 if (!resume_requested) { 2078 yank_unregister_instance(MIGRATION_YANK_INSTANCE); 2079 } 2080 migrate_fd_error(s, local_err); 2081 error_propagate(errp, local_err); 2082 return; 2083 } 2084 } 2085 2086 void qmp_migrate_cancel(Error **errp) 2087 { 2088 migration_cancel(NULL); 2089 } 2090 2091 void qmp_migrate_continue(MigrationStatus state, Error **errp) 2092 { 2093 MigrationState *s = migrate_get_current(); 2094 if (s->state != state) { 2095 error_setg(errp, "Migration not in expected state: %s", 2096 MigrationStatus_str(s->state)); 2097 return; 2098 } 2099 qemu_sem_post(&s->pause_sem); 2100 } 2101 2102 int migration_rp_wait(MigrationState *s) 2103 { 2104 /* If migration has failure already, ignore the wait */ 2105 if (migrate_has_error(s)) { 2106 return -1; 2107 } 2108 2109 qemu_sem_wait(&s->rp_state.rp_sem); 2110 2111 /* After wait, double check that there's no failure */ 2112 if (migrate_has_error(s)) { 2113 return -1; 2114 } 2115 2116 return 0; 2117 } 2118 2119 void migration_rp_kick(MigrationState *s) 2120 { 2121 qemu_sem_post(&s->rp_state.rp_sem); 2122 } 2123 2124 static struct rp_cmd_args { 2125 ssize_t len; /* -1 = variable */ 2126 const char *name; 2127 } rp_cmd_args[] = { 2128 [MIG_RP_MSG_INVALID] = { .len = -1, .name = "INVALID" }, 2129 [MIG_RP_MSG_SHUT] = { .len = 4, .name = "SHUT" }, 2130 [MIG_RP_MSG_PONG] = { .len = 4, .name = "PONG" }, 2131 [MIG_RP_MSG_REQ_PAGES] = { .len = 12, .name = "REQ_PAGES" }, 2132 [MIG_RP_MSG_REQ_PAGES_ID] = { .len = -1, .name = "REQ_PAGES_ID" }, 2133 [MIG_RP_MSG_RECV_BITMAP] = { .len = -1, .name = "RECV_BITMAP" }, 2134 [MIG_RP_MSG_RESUME_ACK] = { .len = 4, .name = "RESUME_ACK" }, 2135 [MIG_RP_MSG_SWITCHOVER_ACK] = { .len = 0, .name = "SWITCHOVER_ACK" }, 2136 [MIG_RP_MSG_MAX] = { .len = -1, .name = "MAX" }, 2137 }; 2138 2139 /* 2140 * Process a request for pages received on the return path, 2141 * We're allowed to send more than requested (e.g. to round to our page size) 2142 * and we don't need to send pages that have already been sent. 2143 */ 2144 static void 2145 migrate_handle_rp_req_pages(MigrationState *ms, const char* rbname, 2146 ram_addr_t start, size_t len, Error **errp) 2147 { 2148 long our_host_ps = qemu_real_host_page_size(); 2149 2150 trace_migrate_handle_rp_req_pages(rbname, start, len); 2151 2152 /* 2153 * Since we currently insist on matching page sizes, just sanity check 2154 * we're being asked for whole host pages. 2155 */ 2156 if (!QEMU_IS_ALIGNED(start, our_host_ps) || 2157 !QEMU_IS_ALIGNED(len, our_host_ps)) { 2158 error_setg(errp, "MIG_RP_MSG_REQ_PAGES: Misaligned page request, start:" 2159 RAM_ADDR_FMT " len: %zd", start, len); 2160 return; 2161 } 2162 2163 ram_save_queue_pages(rbname, start, len, errp); 2164 } 2165 2166 static bool migrate_handle_rp_recv_bitmap(MigrationState *s, char *block_name, 2167 Error **errp) 2168 { 2169 RAMBlock *block = qemu_ram_block_by_name(block_name); 2170 2171 if (!block) { 2172 error_setg(errp, "MIG_RP_MSG_RECV_BITMAP has invalid block name '%s'", 2173 block_name); 2174 return false; 2175 } 2176 2177 /* Fetch the received bitmap and refresh the dirty bitmap */ 2178 return ram_dirty_bitmap_reload(s, block, errp); 2179 } 2180 2181 static bool migrate_handle_rp_resume_ack(MigrationState *s, 2182 uint32_t value, Error **errp) 2183 { 2184 trace_source_return_path_thread_resume_ack(value); 2185 2186 if (value != MIGRATION_RESUME_ACK_VALUE) { 2187 error_setg(errp, "illegal resume_ack value %"PRIu32, value); 2188 return false; 2189 } 2190 2191 /* Now both sides are active. */ 2192 migrate_set_state(&s->state, MIGRATION_STATUS_POSTCOPY_RECOVER, 2193 MIGRATION_STATUS_POSTCOPY_ACTIVE); 2194 2195 /* Notify send thread that time to continue send pages */ 2196 migration_rp_kick(s); 2197 2198 return true; 2199 } 2200 2201 /* 2202 * Release ms->rp_state.from_dst_file (and postcopy_qemufile_src if 2203 * existed) in a safe way. 2204 */ 2205 static void migration_release_dst_files(MigrationState *ms) 2206 { 2207 QEMUFile *file; 2208 2209 WITH_QEMU_LOCK_GUARD(&ms->qemu_file_lock) { 2210 /* 2211 * Reset the from_dst_file pointer first before releasing it, as we 2212 * can't block within lock section 2213 */ 2214 file = ms->rp_state.from_dst_file; 2215 ms->rp_state.from_dst_file = NULL; 2216 } 2217 2218 /* 2219 * Do the same to postcopy fast path socket too if there is. No 2220 * locking needed because this qemufile should only be managed by 2221 * return path thread. 2222 */ 2223 if (ms->postcopy_qemufile_src) { 2224 migration_ioc_unregister_yank_from_file(ms->postcopy_qemufile_src); 2225 qemu_file_shutdown(ms->postcopy_qemufile_src); 2226 qemu_fclose(ms->postcopy_qemufile_src); 2227 ms->postcopy_qemufile_src = NULL; 2228 } 2229 2230 qemu_fclose(file); 2231 } 2232 2233 /* 2234 * Handles messages sent on the return path towards the source VM 2235 * 2236 */ 2237 static void *source_return_path_thread(void *opaque) 2238 { 2239 MigrationState *ms = opaque; 2240 QEMUFile *rp = ms->rp_state.from_dst_file; 2241 uint16_t header_len, header_type; 2242 uint8_t buf[512]; 2243 uint32_t tmp32, sibling_error; 2244 ram_addr_t start = 0; /* =0 to silence warning */ 2245 size_t len = 0, expected_len; 2246 Error *err = NULL; 2247 int res; 2248 2249 trace_source_return_path_thread_entry(); 2250 rcu_register_thread(); 2251 2252 while (migration_is_setup_or_active(ms->state)) { 2253 trace_source_return_path_thread_loop_top(); 2254 2255 header_type = qemu_get_be16(rp); 2256 header_len = qemu_get_be16(rp); 2257 2258 if (qemu_file_get_error(rp)) { 2259 qemu_file_get_error_obj(rp, &err); 2260 goto out; 2261 } 2262 2263 if (header_type >= MIG_RP_MSG_MAX || 2264 header_type == MIG_RP_MSG_INVALID) { 2265 error_setg(&err, "Received invalid message 0x%04x length 0x%04x", 2266 header_type, header_len); 2267 goto out; 2268 } 2269 2270 if ((rp_cmd_args[header_type].len != -1 && 2271 header_len != rp_cmd_args[header_type].len) || 2272 header_len > sizeof(buf)) { 2273 error_setg(&err, "Received '%s' message (0x%04x) with" 2274 "incorrect length %d expecting %zu", 2275 rp_cmd_args[header_type].name, header_type, header_len, 2276 (size_t)rp_cmd_args[header_type].len); 2277 goto out; 2278 } 2279 2280 /* We know we've got a valid header by this point */ 2281 res = qemu_get_buffer(rp, buf, header_len); 2282 if (res != header_len) { 2283 error_setg(&err, "Failed reading data for message 0x%04x" 2284 " read %d expected %d", 2285 header_type, res, header_len); 2286 goto out; 2287 } 2288 2289 /* OK, we have the message and the data */ 2290 switch (header_type) { 2291 case MIG_RP_MSG_SHUT: 2292 sibling_error = ldl_be_p(buf); 2293 trace_source_return_path_thread_shut(sibling_error); 2294 if (sibling_error) { 2295 error_setg(&err, "Sibling indicated error %d", sibling_error); 2296 } 2297 /* 2298 * We'll let the main thread deal with closing the RP 2299 * we could do a shutdown(2) on it, but we're the only user 2300 * anyway, so there's nothing gained. 2301 */ 2302 goto out; 2303 2304 case MIG_RP_MSG_PONG: 2305 tmp32 = ldl_be_p(buf); 2306 trace_source_return_path_thread_pong(tmp32); 2307 qemu_sem_post(&ms->rp_state.rp_pong_acks); 2308 break; 2309 2310 case MIG_RP_MSG_REQ_PAGES: 2311 start = ldq_be_p(buf); 2312 len = ldl_be_p(buf + 8); 2313 migrate_handle_rp_req_pages(ms, NULL, start, len, &err); 2314 if (err) { 2315 goto out; 2316 } 2317 break; 2318 2319 case MIG_RP_MSG_REQ_PAGES_ID: 2320 expected_len = 12 + 1; /* header + termination */ 2321 2322 if (header_len >= expected_len) { 2323 start = ldq_be_p(buf); 2324 len = ldl_be_p(buf + 8); 2325 /* Now we expect an idstr */ 2326 tmp32 = buf[12]; /* Length of the following idstr */ 2327 buf[13 + tmp32] = '\0'; 2328 expected_len += tmp32; 2329 } 2330 if (header_len != expected_len) { 2331 error_setg(&err, "Req_Page_id with length %d expecting %zd", 2332 header_len, expected_len); 2333 goto out; 2334 } 2335 migrate_handle_rp_req_pages(ms, (char *)&buf[13], start, len, 2336 &err); 2337 if (err) { 2338 goto out; 2339 } 2340 break; 2341 2342 case MIG_RP_MSG_RECV_BITMAP: 2343 if (header_len < 1) { 2344 error_setg(&err, "MIG_RP_MSG_RECV_BITMAP missing block name"); 2345 goto out; 2346 } 2347 /* Format: len (1B) + idstr (<255B). This ends the idstr. */ 2348 buf[buf[0] + 1] = '\0'; 2349 if (!migrate_handle_rp_recv_bitmap(ms, (char *)(buf + 1), &err)) { 2350 goto out; 2351 } 2352 break; 2353 2354 case MIG_RP_MSG_RESUME_ACK: 2355 tmp32 = ldl_be_p(buf); 2356 if (!migrate_handle_rp_resume_ack(ms, tmp32, &err)) { 2357 goto out; 2358 } 2359 break; 2360 2361 case MIG_RP_MSG_SWITCHOVER_ACK: 2362 ms->switchover_acked = true; 2363 trace_source_return_path_thread_switchover_acked(); 2364 break; 2365 2366 default: 2367 break; 2368 } 2369 } 2370 2371 out: 2372 if (err) { 2373 migrate_set_error(ms, err); 2374 error_free(err); 2375 trace_source_return_path_thread_bad_end(); 2376 } 2377 2378 if (ms->state == MIGRATION_STATUS_POSTCOPY_RECOVER) { 2379 /* 2380 * this will be extremely unlikely: that we got yet another network 2381 * issue during recovering of the 1st network failure.. during this 2382 * period the main migration thread can be waiting on rp_sem for 2383 * this thread to sync with the other side. 2384 * 2385 * When this happens, explicitly kick the migration thread out of 2386 * RECOVER stage and back to PAUSED, so the admin can try 2387 * everything again. 2388 */ 2389 migration_rp_kick(ms); 2390 } 2391 2392 trace_source_return_path_thread_end(); 2393 rcu_unregister_thread(); 2394 2395 return NULL; 2396 } 2397 2398 static int open_return_path_on_source(MigrationState *ms) 2399 { 2400 ms->rp_state.from_dst_file = qemu_file_get_return_path(ms->to_dst_file); 2401 if (!ms->rp_state.from_dst_file) { 2402 return -1; 2403 } 2404 2405 trace_open_return_path_on_source(); 2406 2407 qemu_thread_create(&ms->rp_state.rp_thread, "return path", 2408 source_return_path_thread, ms, QEMU_THREAD_JOINABLE); 2409 ms->rp_state.rp_thread_created = true; 2410 2411 trace_open_return_path_on_source_continue(); 2412 2413 return 0; 2414 } 2415 2416 /* Return true if error detected, or false otherwise */ 2417 static bool close_return_path_on_source(MigrationState *ms) 2418 { 2419 if (!ms->rp_state.rp_thread_created) { 2420 return false; 2421 } 2422 2423 trace_migration_return_path_end_before(); 2424 2425 /* 2426 * If this is a normal exit then the destination will send a SHUT 2427 * and the rp_thread will exit, however if there's an error we 2428 * need to cause it to exit. shutdown(2), if we have it, will 2429 * cause it to unblock if it's stuck waiting for the destination. 2430 */ 2431 WITH_QEMU_LOCK_GUARD(&ms->qemu_file_lock) { 2432 if (migrate_has_error(ms) && ms->rp_state.from_dst_file) { 2433 qemu_file_shutdown(ms->rp_state.from_dst_file); 2434 } 2435 } 2436 2437 qemu_thread_join(&ms->rp_state.rp_thread); 2438 ms->rp_state.rp_thread_created = false; 2439 migration_release_dst_files(ms); 2440 trace_migration_return_path_end_after(); 2441 2442 /* Return path will persist the error in MigrationState when quit */ 2443 return migrate_has_error(ms); 2444 } 2445 2446 static inline void 2447 migration_wait_main_channel(MigrationState *ms) 2448 { 2449 /* Wait until one PONG message received */ 2450 qemu_sem_wait(&ms->rp_state.rp_pong_acks); 2451 } 2452 2453 /* 2454 * Switch from normal iteration to postcopy 2455 * Returns non-0 on error 2456 */ 2457 static int postcopy_start(MigrationState *ms, Error **errp) 2458 { 2459 int ret; 2460 QIOChannelBuffer *bioc; 2461 QEMUFile *fb; 2462 uint64_t bandwidth = migrate_max_postcopy_bandwidth(); 2463 bool restart_block = false; 2464 int cur_state = MIGRATION_STATUS_ACTIVE; 2465 2466 if (migrate_postcopy_preempt()) { 2467 migration_wait_main_channel(ms); 2468 if (postcopy_preempt_establish_channel(ms)) { 2469 migrate_set_state(&ms->state, ms->state, MIGRATION_STATUS_FAILED); 2470 return -1; 2471 } 2472 } 2473 2474 if (!migrate_pause_before_switchover()) { 2475 migrate_set_state(&ms->state, MIGRATION_STATUS_ACTIVE, 2476 MIGRATION_STATUS_POSTCOPY_ACTIVE); 2477 } 2478 2479 trace_postcopy_start(); 2480 bql_lock(); 2481 trace_postcopy_start_set_run(); 2482 2483 ret = migration_stop_vm(ms, RUN_STATE_FINISH_MIGRATE); 2484 if (ret < 0) { 2485 goto fail; 2486 } 2487 2488 ret = migration_maybe_pause(ms, &cur_state, 2489 MIGRATION_STATUS_POSTCOPY_ACTIVE); 2490 if (ret < 0) { 2491 goto fail; 2492 } 2493 2494 ret = bdrv_inactivate_all(); 2495 if (ret < 0) { 2496 goto fail; 2497 } 2498 restart_block = true; 2499 2500 /* 2501 * Cause any non-postcopiable, but iterative devices to 2502 * send out their final data. 2503 */ 2504 qemu_savevm_state_complete_precopy(ms->to_dst_file, true, false); 2505 2506 /* 2507 * in Finish migrate and with the io-lock held everything should 2508 * be quiet, but we've potentially still got dirty pages and we 2509 * need to tell the destination to throw any pages it's already received 2510 * that are dirty 2511 */ 2512 if (migrate_postcopy_ram()) { 2513 ram_postcopy_send_discard_bitmap(ms); 2514 } 2515 2516 /* 2517 * send rest of state - note things that are doing postcopy 2518 * will notice we're in POSTCOPY_ACTIVE and not actually 2519 * wrap their state up here 2520 */ 2521 migration_rate_set(bandwidth); 2522 if (migrate_postcopy_ram()) { 2523 /* Ping just for debugging, helps line traces up */ 2524 qemu_savevm_send_ping(ms->to_dst_file, 2); 2525 } 2526 2527 /* 2528 * While loading the device state we may trigger page transfer 2529 * requests and the fd must be free to process those, and thus 2530 * the destination must read the whole device state off the fd before 2531 * it starts processing it. Unfortunately the ad-hoc migration format 2532 * doesn't allow the destination to know the size to read without fully 2533 * parsing it through each devices load-state code (especially the open 2534 * coded devices that use get/put). 2535 * So we wrap the device state up in a package with a length at the start; 2536 * to do this we use a qemu_buf to hold the whole of the device state. 2537 */ 2538 bioc = qio_channel_buffer_new(4096); 2539 qio_channel_set_name(QIO_CHANNEL(bioc), "migration-postcopy-buffer"); 2540 fb = qemu_file_new_output(QIO_CHANNEL(bioc)); 2541 object_unref(OBJECT(bioc)); 2542 2543 /* 2544 * Make sure the receiver can get incoming pages before we send the rest 2545 * of the state 2546 */ 2547 qemu_savevm_send_postcopy_listen(fb); 2548 2549 qemu_savevm_state_complete_precopy(fb, false, false); 2550 if (migrate_postcopy_ram()) { 2551 qemu_savevm_send_ping(fb, 3); 2552 } 2553 2554 qemu_savevm_send_postcopy_run(fb); 2555 2556 /* <><> end of stuff going into the package */ 2557 2558 /* Last point of recovery; as soon as we send the package the destination 2559 * can open devices and potentially start running. 2560 * Lets just check again we've not got any errors. 2561 */ 2562 ret = qemu_file_get_error(ms->to_dst_file); 2563 if (ret) { 2564 error_setg(errp, "postcopy_start: Migration stream errored (pre package)"); 2565 goto fail_closefb; 2566 } 2567 2568 restart_block = false; 2569 2570 /* Now send that blob */ 2571 if (qemu_savevm_send_packaged(ms->to_dst_file, bioc->data, bioc->usage)) { 2572 goto fail_closefb; 2573 } 2574 qemu_fclose(fb); 2575 2576 /* Send a notify to give a chance for anything that needs to happen 2577 * at the transition to postcopy and after the device state; in particular 2578 * spice needs to trigger a transition now 2579 */ 2580 migration_call_notifiers(ms, MIG_EVENT_PRECOPY_DONE, NULL); 2581 2582 migration_downtime_end(ms); 2583 2584 bql_unlock(); 2585 2586 if (migrate_postcopy_ram()) { 2587 /* 2588 * Although this ping is just for debug, it could potentially be 2589 * used for getting a better measurement of downtime at the source. 2590 */ 2591 qemu_savevm_send_ping(ms->to_dst_file, 4); 2592 } 2593 2594 if (migrate_release_ram()) { 2595 ram_postcopy_migrated_memory_release(ms); 2596 } 2597 2598 ret = qemu_file_get_error(ms->to_dst_file); 2599 if (ret) { 2600 error_setg_errno(errp, -ret, "postcopy_start: Migration stream error"); 2601 bql_lock(); 2602 goto fail; 2603 } 2604 trace_postcopy_preempt_enabled(migrate_postcopy_preempt()); 2605 2606 return ret; 2607 2608 fail_closefb: 2609 qemu_fclose(fb); 2610 fail: 2611 migrate_set_state(&ms->state, MIGRATION_STATUS_POSTCOPY_ACTIVE, 2612 MIGRATION_STATUS_FAILED); 2613 if (restart_block) { 2614 /* A failure happened early enough that we know the destination hasn't 2615 * accessed block devices, so we're safe to recover. 2616 */ 2617 Error *local_err = NULL; 2618 2619 bdrv_activate_all(&local_err); 2620 if (local_err) { 2621 error_report_err(local_err); 2622 } 2623 } 2624 migration_call_notifiers(ms, MIG_EVENT_PRECOPY_FAILED, NULL); 2625 bql_unlock(); 2626 return -1; 2627 } 2628 2629 /** 2630 * migration_maybe_pause: Pause if required to by 2631 * migrate_pause_before_switchover called with the BQL locked 2632 * Returns: 0 on success 2633 */ 2634 static int migration_maybe_pause(MigrationState *s, 2635 int *current_active_state, 2636 int new_state) 2637 { 2638 if (!migrate_pause_before_switchover()) { 2639 return 0; 2640 } 2641 2642 /* Since leaving this state is not atomic with posting the semaphore 2643 * it's possible that someone could have issued multiple migrate_continue 2644 * and the semaphore is incorrectly positive at this point; 2645 * the docs say it's undefined to reinit a semaphore that's already 2646 * init'd, so use timedwait to eat up any existing posts. 2647 */ 2648 while (qemu_sem_timedwait(&s->pause_sem, 1) == 0) { 2649 /* This block intentionally left blank */ 2650 } 2651 2652 /* 2653 * If the migration is cancelled when it is in the completion phase, 2654 * the migration state is set to MIGRATION_STATUS_CANCELLING. 2655 * So we don't need to wait a semaphore, otherwise we would always 2656 * wait for the 'pause_sem' semaphore. 2657 */ 2658 if (s->state != MIGRATION_STATUS_CANCELLING) { 2659 bql_unlock(); 2660 migrate_set_state(&s->state, *current_active_state, 2661 MIGRATION_STATUS_PRE_SWITCHOVER); 2662 qemu_sem_wait(&s->pause_sem); 2663 migrate_set_state(&s->state, MIGRATION_STATUS_PRE_SWITCHOVER, 2664 new_state); 2665 *current_active_state = new_state; 2666 bql_lock(); 2667 } 2668 2669 return s->state == new_state ? 0 : -EINVAL; 2670 } 2671 2672 static int migration_completion_precopy(MigrationState *s, 2673 int *current_active_state) 2674 { 2675 int ret; 2676 2677 bql_lock(); 2678 2679 if (!migrate_mode_is_cpr(s)) { 2680 ret = migration_stop_vm(s, RUN_STATE_FINISH_MIGRATE); 2681 if (ret < 0) { 2682 goto out_unlock; 2683 } 2684 } 2685 2686 ret = migration_maybe_pause(s, current_active_state, 2687 MIGRATION_STATUS_DEVICE); 2688 if (ret < 0) { 2689 goto out_unlock; 2690 } 2691 2692 /* 2693 * Inactivate disks except in COLO, and track that we have done so in order 2694 * to remember to reactivate them if migration fails or is cancelled. 2695 */ 2696 s->block_inactive = !migrate_colo(); 2697 migration_rate_set(RATE_LIMIT_DISABLED); 2698 ret = qemu_savevm_state_complete_precopy(s->to_dst_file, false, 2699 s->block_inactive); 2700 out_unlock: 2701 bql_unlock(); 2702 return ret; 2703 } 2704 2705 static void migration_completion_postcopy(MigrationState *s) 2706 { 2707 trace_migration_completion_postcopy_end(); 2708 2709 bql_lock(); 2710 qemu_savevm_state_complete_postcopy(s->to_dst_file); 2711 bql_unlock(); 2712 2713 /* 2714 * Shutdown the postcopy fast path thread. This is only needed when dest 2715 * QEMU binary is old (7.1/7.2). QEMU 8.0+ doesn't need this. 2716 */ 2717 if (migrate_postcopy_preempt() && s->preempt_pre_7_2) { 2718 postcopy_preempt_shutdown_file(s); 2719 } 2720 2721 trace_migration_completion_postcopy_end_after_complete(); 2722 } 2723 2724 static void migration_completion_failed(MigrationState *s, 2725 int current_active_state) 2726 { 2727 if (s->block_inactive && (s->state == MIGRATION_STATUS_ACTIVE || 2728 s->state == MIGRATION_STATUS_DEVICE)) { 2729 /* 2730 * If not doing postcopy, vm_start() will be called: let's 2731 * regain control on images. 2732 */ 2733 Error *local_err = NULL; 2734 2735 bql_lock(); 2736 bdrv_activate_all(&local_err); 2737 if (local_err) { 2738 error_report_err(local_err); 2739 } else { 2740 s->block_inactive = false; 2741 } 2742 bql_unlock(); 2743 } 2744 2745 migrate_set_state(&s->state, current_active_state, 2746 MIGRATION_STATUS_FAILED); 2747 } 2748 2749 /** 2750 * migration_completion: Used by migration_thread when there's not much left. 2751 * The caller 'breaks' the loop when this returns. 2752 * 2753 * @s: Current migration state 2754 */ 2755 static void migration_completion(MigrationState *s) 2756 { 2757 int ret = 0; 2758 int current_active_state = s->state; 2759 2760 if (s->state == MIGRATION_STATUS_ACTIVE) { 2761 ret = migration_completion_precopy(s, ¤t_active_state); 2762 } else if (s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE) { 2763 migration_completion_postcopy(s); 2764 } else { 2765 ret = -1; 2766 } 2767 2768 if (ret < 0) { 2769 goto fail; 2770 } 2771 2772 if (close_return_path_on_source(s)) { 2773 goto fail; 2774 } 2775 2776 if (qemu_file_get_error(s->to_dst_file)) { 2777 trace_migration_completion_file_err(); 2778 goto fail; 2779 } 2780 2781 if (migrate_colo() && s->state == MIGRATION_STATUS_ACTIVE) { 2782 /* COLO does not support postcopy */ 2783 migrate_set_state(&s->state, MIGRATION_STATUS_ACTIVE, 2784 MIGRATION_STATUS_COLO); 2785 } else { 2786 migration_completion_end(s); 2787 } 2788 2789 return; 2790 2791 fail: 2792 migration_completion_failed(s, current_active_state); 2793 } 2794 2795 /** 2796 * bg_migration_completion: Used by bg_migration_thread when after all the 2797 * RAM has been saved. The caller 'breaks' the loop when this returns. 2798 * 2799 * @s: Current migration state 2800 */ 2801 static void bg_migration_completion(MigrationState *s) 2802 { 2803 int current_active_state = s->state; 2804 2805 if (s->state == MIGRATION_STATUS_ACTIVE) { 2806 /* 2807 * By this moment we have RAM content saved into the migration stream. 2808 * The next step is to flush the non-RAM content (device state) 2809 * right after the ram content. The device state has been stored into 2810 * the temporary buffer before RAM saving started. 2811 */ 2812 qemu_put_buffer(s->to_dst_file, s->bioc->data, s->bioc->usage); 2813 qemu_fflush(s->to_dst_file); 2814 } else if (s->state == MIGRATION_STATUS_CANCELLING) { 2815 goto fail; 2816 } 2817 2818 if (qemu_file_get_error(s->to_dst_file)) { 2819 trace_migration_completion_file_err(); 2820 goto fail; 2821 } 2822 2823 migration_completion_end(s); 2824 return; 2825 2826 fail: 2827 migrate_set_state(&s->state, current_active_state, 2828 MIGRATION_STATUS_FAILED); 2829 } 2830 2831 typedef enum MigThrError { 2832 /* No error detected */ 2833 MIG_THR_ERR_NONE = 0, 2834 /* Detected error, but resumed successfully */ 2835 MIG_THR_ERR_RECOVERED = 1, 2836 /* Detected fatal error, need to exit */ 2837 MIG_THR_ERR_FATAL = 2, 2838 } MigThrError; 2839 2840 static int postcopy_resume_handshake(MigrationState *s) 2841 { 2842 qemu_savevm_send_postcopy_resume(s->to_dst_file); 2843 2844 while (s->state == MIGRATION_STATUS_POSTCOPY_RECOVER) { 2845 if (migration_rp_wait(s)) { 2846 return -1; 2847 } 2848 } 2849 2850 if (s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE) { 2851 return 0; 2852 } 2853 2854 return -1; 2855 } 2856 2857 /* Return zero if success, or <0 for error */ 2858 static int postcopy_do_resume(MigrationState *s) 2859 { 2860 int ret; 2861 2862 /* 2863 * Call all the resume_prepare() hooks, so that modules can be 2864 * ready for the migration resume. 2865 */ 2866 ret = qemu_savevm_state_resume_prepare(s); 2867 if (ret) { 2868 error_report("%s: resume_prepare() failure detected: %d", 2869 __func__, ret); 2870 return ret; 2871 } 2872 2873 /* 2874 * If preempt is enabled, re-establish the preempt channel. Note that 2875 * we do it after resume prepare to make sure the main channel will be 2876 * created before the preempt channel. E.g. with weak network, the 2877 * dest QEMU may get messed up with the preempt and main channels on 2878 * the order of connection setup. This guarantees the correct order. 2879 */ 2880 ret = postcopy_preempt_establish_channel(s); 2881 if (ret) { 2882 error_report("%s: postcopy_preempt_establish_channel(): %d", 2883 __func__, ret); 2884 return ret; 2885 } 2886 2887 /* 2888 * Last handshake with destination on the resume (destination will 2889 * switch to postcopy-active afterwards) 2890 */ 2891 ret = postcopy_resume_handshake(s); 2892 if (ret) { 2893 error_report("%s: handshake failed: %d", __func__, ret); 2894 return ret; 2895 } 2896 2897 return 0; 2898 } 2899 2900 /* 2901 * We don't return until we are in a safe state to continue current 2902 * postcopy migration. Returns MIG_THR_ERR_RECOVERED if recovered, or 2903 * MIG_THR_ERR_FATAL if unrecovery failure happened. 2904 */ 2905 static MigThrError postcopy_pause(MigrationState *s) 2906 { 2907 assert(s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE); 2908 2909 while (true) { 2910 QEMUFile *file; 2911 2912 /* 2913 * We're already pausing, so ignore any errors on the return 2914 * path and just wait for the thread to finish. It will be 2915 * re-created when we resume. 2916 */ 2917 close_return_path_on_source(s); 2918 2919 /* 2920 * Current channel is possibly broken. Release it. Note that this is 2921 * guaranteed even without lock because to_dst_file should only be 2922 * modified by the migration thread. That also guarantees that the 2923 * unregister of yank is safe too without the lock. It should be safe 2924 * even to be within the qemu_file_lock, but we didn't do that to avoid 2925 * taking more mutex (yank_lock) within qemu_file_lock. TL;DR: we make 2926 * the qemu_file_lock critical section as small as possible. 2927 */ 2928 assert(s->to_dst_file); 2929 migration_ioc_unregister_yank_from_file(s->to_dst_file); 2930 qemu_mutex_lock(&s->qemu_file_lock); 2931 file = s->to_dst_file; 2932 s->to_dst_file = NULL; 2933 qemu_mutex_unlock(&s->qemu_file_lock); 2934 2935 qemu_file_shutdown(file); 2936 qemu_fclose(file); 2937 2938 migrate_set_state(&s->state, s->state, 2939 MIGRATION_STATUS_POSTCOPY_PAUSED); 2940 2941 error_report("Detected IO failure for postcopy. " 2942 "Migration paused."); 2943 2944 /* 2945 * We wait until things fixed up. Then someone will setup the 2946 * status back for us. 2947 */ 2948 while (s->state == MIGRATION_STATUS_POSTCOPY_PAUSED) { 2949 qemu_sem_wait(&s->postcopy_pause_sem); 2950 } 2951 2952 if (s->state == MIGRATION_STATUS_POSTCOPY_RECOVER) { 2953 /* Woken up by a recover procedure. Give it a shot */ 2954 2955 /* Do the resume logic */ 2956 if (postcopy_do_resume(s) == 0) { 2957 /* Let's continue! */ 2958 trace_postcopy_pause_continued(); 2959 return MIG_THR_ERR_RECOVERED; 2960 } else { 2961 /* 2962 * Something wrong happened during the recovery, let's 2963 * pause again. Pause is always better than throwing 2964 * data away. 2965 */ 2966 continue; 2967 } 2968 } else { 2969 /* This is not right... Time to quit. */ 2970 return MIG_THR_ERR_FATAL; 2971 } 2972 } 2973 } 2974 2975 static MigThrError migration_detect_error(MigrationState *s) 2976 { 2977 int ret; 2978 int state = s->state; 2979 Error *local_error = NULL; 2980 2981 if (state == MIGRATION_STATUS_CANCELLING || 2982 state == MIGRATION_STATUS_CANCELLED) { 2983 /* End the migration, but don't set the state to failed */ 2984 return MIG_THR_ERR_FATAL; 2985 } 2986 2987 /* 2988 * Try to detect any file errors. Note that postcopy_qemufile_src will 2989 * be NULL when postcopy preempt is not enabled. 2990 */ 2991 ret = qemu_file_get_error_obj_any(s->to_dst_file, 2992 s->postcopy_qemufile_src, 2993 &local_error); 2994 if (!ret) { 2995 /* Everything is fine */ 2996 assert(!local_error); 2997 return MIG_THR_ERR_NONE; 2998 } 2999 3000 if (local_error) { 3001 migrate_set_error(s, local_error); 3002 error_free(local_error); 3003 } 3004 3005 if (state == MIGRATION_STATUS_POSTCOPY_ACTIVE && ret) { 3006 /* 3007 * For postcopy, we allow the network to be down for a 3008 * while. After that, it can be continued by a 3009 * recovery phase. 3010 */ 3011 return postcopy_pause(s); 3012 } else { 3013 /* 3014 * For precopy (or postcopy with error outside IO), we fail 3015 * with no time. 3016 */ 3017 migrate_set_state(&s->state, state, MIGRATION_STATUS_FAILED); 3018 trace_migration_thread_file_err(); 3019 3020 /* Time to stop the migration, now. */ 3021 return MIG_THR_ERR_FATAL; 3022 } 3023 } 3024 3025 static void migration_completion_end(MigrationState *s) 3026 { 3027 uint64_t bytes = migration_transferred_bytes(); 3028 int64_t end_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); 3029 int64_t transfer_time; 3030 3031 /* 3032 * Take the BQL here so that query-migrate on the QMP thread sees: 3033 * - atomic update of s->total_time and s->mbps; 3034 * - correct ordering of s->mbps update vs. s->state; 3035 */ 3036 bql_lock(); 3037 migration_downtime_end(s); 3038 s->total_time = end_time - s->start_time; 3039 transfer_time = s->total_time - s->setup_time; 3040 if (transfer_time) { 3041 s->mbps = ((double) bytes * 8.0) / transfer_time / 1000; 3042 } 3043 3044 migrate_set_state(&s->state, s->state, 3045 MIGRATION_STATUS_COMPLETED); 3046 bql_unlock(); 3047 } 3048 3049 static void update_iteration_initial_status(MigrationState *s) 3050 { 3051 /* 3052 * Update these three fields at the same time to avoid mismatch info lead 3053 * wrong speed calculation. 3054 */ 3055 s->iteration_start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); 3056 s->iteration_initial_bytes = migration_transferred_bytes(); 3057 s->iteration_initial_pages = ram_get_total_transferred_pages(); 3058 } 3059 3060 static void migration_update_counters(MigrationState *s, 3061 int64_t current_time) 3062 { 3063 uint64_t transferred, transferred_pages, time_spent; 3064 uint64_t current_bytes; /* bytes transferred since the beginning */ 3065 uint64_t switchover_bw; 3066 /* Expected bandwidth when switching over to destination QEMU */ 3067 double expected_bw_per_ms; 3068 double bandwidth; 3069 3070 if (current_time < s->iteration_start_time + BUFFER_DELAY) { 3071 return; 3072 } 3073 3074 switchover_bw = migrate_avail_switchover_bandwidth(); 3075 current_bytes = migration_transferred_bytes(); 3076 transferred = current_bytes - s->iteration_initial_bytes; 3077 time_spent = current_time - s->iteration_start_time; 3078 bandwidth = (double)transferred / time_spent; 3079 3080 if (switchover_bw) { 3081 /* 3082 * If the user specified a switchover bandwidth, let's trust the 3083 * user so that can be more accurate than what we estimated. 3084 */ 3085 expected_bw_per_ms = switchover_bw / 1000; 3086 } else { 3087 /* If the user doesn't specify bandwidth, we use the estimated */ 3088 expected_bw_per_ms = bandwidth; 3089 } 3090 3091 s->threshold_size = expected_bw_per_ms * migrate_downtime_limit(); 3092 3093 s->mbps = (((double) transferred * 8.0) / 3094 ((double) time_spent / 1000.0)) / 1000.0 / 1000.0; 3095 3096 transferred_pages = ram_get_total_transferred_pages() - 3097 s->iteration_initial_pages; 3098 s->pages_per_second = (double) transferred_pages / 3099 (((double) time_spent / 1000.0)); 3100 3101 /* 3102 * if we haven't sent anything, we don't want to 3103 * recalculate. 10000 is a small enough number for our purposes 3104 */ 3105 if (stat64_get(&mig_stats.dirty_pages_rate) && 3106 transferred > 10000) { 3107 s->expected_downtime = 3108 stat64_get(&mig_stats.dirty_bytes_last_sync) / expected_bw_per_ms; 3109 } 3110 3111 migration_rate_reset(); 3112 3113 update_iteration_initial_status(s); 3114 3115 trace_migrate_transferred(transferred, time_spent, 3116 /* Both in unit bytes/ms */ 3117 bandwidth, switchover_bw / 1000, 3118 s->threshold_size); 3119 } 3120 3121 static bool migration_can_switchover(MigrationState *s) 3122 { 3123 if (!migrate_switchover_ack()) { 3124 return true; 3125 } 3126 3127 /* No reason to wait for switchover ACK if VM is stopped */ 3128 if (!runstate_is_running()) { 3129 return true; 3130 } 3131 3132 return s->switchover_acked; 3133 } 3134 3135 /* Migration thread iteration status */ 3136 typedef enum { 3137 MIG_ITERATE_RESUME, /* Resume current iteration */ 3138 MIG_ITERATE_SKIP, /* Skip current iteration */ 3139 MIG_ITERATE_BREAK, /* Break the loop */ 3140 } MigIterateState; 3141 3142 /* 3143 * Return true if continue to the next iteration directly, false 3144 * otherwise. 3145 */ 3146 static MigIterateState migration_iteration_run(MigrationState *s) 3147 { 3148 uint64_t must_precopy, can_postcopy; 3149 Error *local_err = NULL; 3150 bool in_postcopy = s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE; 3151 bool can_switchover = migration_can_switchover(s); 3152 3153 qemu_savevm_state_pending_estimate(&must_precopy, &can_postcopy); 3154 uint64_t pending_size = must_precopy + can_postcopy; 3155 3156 trace_migrate_pending_estimate(pending_size, must_precopy, can_postcopy); 3157 3158 if (must_precopy <= s->threshold_size) { 3159 qemu_savevm_state_pending_exact(&must_precopy, &can_postcopy); 3160 pending_size = must_precopy + can_postcopy; 3161 trace_migrate_pending_exact(pending_size, must_precopy, can_postcopy); 3162 } 3163 3164 if ((!pending_size || pending_size < s->threshold_size) && can_switchover) { 3165 trace_migration_thread_low_pending(pending_size); 3166 migration_completion(s); 3167 return MIG_ITERATE_BREAK; 3168 } 3169 3170 /* Still a significant amount to transfer */ 3171 if (!in_postcopy && must_precopy <= s->threshold_size && can_switchover && 3172 qatomic_read(&s->start_postcopy)) { 3173 if (postcopy_start(s, &local_err)) { 3174 migrate_set_error(s, local_err); 3175 error_report_err(local_err); 3176 } 3177 return MIG_ITERATE_SKIP; 3178 } 3179 3180 /* Just another iteration step */ 3181 qemu_savevm_state_iterate(s->to_dst_file, in_postcopy); 3182 return MIG_ITERATE_RESUME; 3183 } 3184 3185 static void migration_iteration_finish(MigrationState *s) 3186 { 3187 /* If we enabled cpu throttling for auto-converge, turn it off. */ 3188 cpu_throttle_stop(); 3189 3190 bql_lock(); 3191 switch (s->state) { 3192 case MIGRATION_STATUS_COMPLETED: 3193 runstate_set(RUN_STATE_POSTMIGRATE); 3194 break; 3195 case MIGRATION_STATUS_COLO: 3196 assert(migrate_colo()); 3197 migrate_start_colo_process(s); 3198 s->vm_old_state = RUN_STATE_RUNNING; 3199 /* Fallthrough */ 3200 case MIGRATION_STATUS_FAILED: 3201 case MIGRATION_STATUS_CANCELLED: 3202 case MIGRATION_STATUS_CANCELLING: 3203 if (runstate_is_live(s->vm_old_state)) { 3204 if (!runstate_check(RUN_STATE_SHUTDOWN)) { 3205 vm_start(); 3206 } 3207 } else { 3208 if (runstate_check(RUN_STATE_FINISH_MIGRATE)) { 3209 runstate_set(s->vm_old_state); 3210 } 3211 } 3212 break; 3213 3214 default: 3215 /* Should not reach here, but if so, forgive the VM. */ 3216 error_report("%s: Unknown ending state %d", __func__, s->state); 3217 break; 3218 } 3219 3220 migration_bh_schedule(migrate_fd_cleanup_bh, s); 3221 bql_unlock(); 3222 } 3223 3224 static void bg_migration_iteration_finish(MigrationState *s) 3225 { 3226 /* 3227 * Stop tracking RAM writes - un-protect memory, un-register UFFD 3228 * memory ranges, flush kernel wait queues and wake up threads 3229 * waiting for write fault to be resolved. 3230 */ 3231 ram_write_tracking_stop(); 3232 3233 bql_lock(); 3234 switch (s->state) { 3235 case MIGRATION_STATUS_COMPLETED: 3236 case MIGRATION_STATUS_ACTIVE: 3237 case MIGRATION_STATUS_FAILED: 3238 case MIGRATION_STATUS_CANCELLED: 3239 case MIGRATION_STATUS_CANCELLING: 3240 break; 3241 3242 default: 3243 /* Should not reach here, but if so, forgive the VM. */ 3244 error_report("%s: Unknown ending state %d", __func__, s->state); 3245 break; 3246 } 3247 3248 migration_bh_schedule(migrate_fd_cleanup_bh, s); 3249 bql_unlock(); 3250 } 3251 3252 /* 3253 * Return true if continue to the next iteration directly, false 3254 * otherwise. 3255 */ 3256 static MigIterateState bg_migration_iteration_run(MigrationState *s) 3257 { 3258 int res; 3259 3260 res = qemu_savevm_state_iterate(s->to_dst_file, false); 3261 if (res > 0) { 3262 bg_migration_completion(s); 3263 return MIG_ITERATE_BREAK; 3264 } 3265 3266 return MIG_ITERATE_RESUME; 3267 } 3268 3269 void migration_make_urgent_request(void) 3270 { 3271 qemu_sem_post(&migrate_get_current()->rate_limit_sem); 3272 } 3273 3274 void migration_consume_urgent_request(void) 3275 { 3276 qemu_sem_wait(&migrate_get_current()->rate_limit_sem); 3277 } 3278 3279 /* Returns true if the rate limiting was broken by an urgent request */ 3280 bool migration_rate_limit(void) 3281 { 3282 int64_t now = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); 3283 MigrationState *s = migrate_get_current(); 3284 3285 bool urgent = false; 3286 migration_update_counters(s, now); 3287 if (migration_rate_exceeded(s->to_dst_file)) { 3288 3289 if (qemu_file_get_error(s->to_dst_file)) { 3290 return false; 3291 } 3292 /* 3293 * Wait for a delay to do rate limiting OR 3294 * something urgent to post the semaphore. 3295 */ 3296 int ms = s->iteration_start_time + BUFFER_DELAY - now; 3297 trace_migration_rate_limit_pre(ms); 3298 if (qemu_sem_timedwait(&s->rate_limit_sem, ms) == 0) { 3299 /* 3300 * We were woken by one or more urgent things but 3301 * the timedwait will have consumed one of them. 3302 * The service routine for the urgent wake will dec 3303 * the semaphore itself for each item it consumes, 3304 * so add this one we just eat back. 3305 */ 3306 qemu_sem_post(&s->rate_limit_sem); 3307 urgent = true; 3308 } 3309 trace_migration_rate_limit_post(urgent); 3310 } 3311 return urgent; 3312 } 3313 3314 /* 3315 * if failover devices are present, wait they are completely 3316 * unplugged 3317 */ 3318 3319 static void qemu_savevm_wait_unplug(MigrationState *s, int old_state, 3320 int new_state) 3321 { 3322 if (qemu_savevm_state_guest_unplug_pending()) { 3323 migrate_set_state(&s->state, old_state, MIGRATION_STATUS_WAIT_UNPLUG); 3324 3325 while (s->state == MIGRATION_STATUS_WAIT_UNPLUG && 3326 qemu_savevm_state_guest_unplug_pending()) { 3327 qemu_sem_timedwait(&s->wait_unplug_sem, 250); 3328 } 3329 if (s->state != MIGRATION_STATUS_WAIT_UNPLUG) { 3330 int timeout = 120; /* 30 seconds */ 3331 /* 3332 * migration has been canceled 3333 * but as we have started an unplug we must wait the end 3334 * to be able to plug back the card 3335 */ 3336 while (timeout-- && qemu_savevm_state_guest_unplug_pending()) { 3337 qemu_sem_timedwait(&s->wait_unplug_sem, 250); 3338 } 3339 if (qemu_savevm_state_guest_unplug_pending() && 3340 !qtest_enabled()) { 3341 warn_report("migration: partially unplugged device on " 3342 "failure"); 3343 } 3344 } 3345 3346 migrate_set_state(&s->state, MIGRATION_STATUS_WAIT_UNPLUG, new_state); 3347 } else { 3348 migrate_set_state(&s->state, old_state, new_state); 3349 } 3350 } 3351 3352 /* 3353 * Master migration thread on the source VM. 3354 * It drives the migration and pumps the data down the outgoing channel. 3355 */ 3356 static void *migration_thread(void *opaque) 3357 { 3358 MigrationState *s = opaque; 3359 MigrationThread *thread = NULL; 3360 int64_t setup_start = qemu_clock_get_ms(QEMU_CLOCK_HOST); 3361 MigThrError thr_error; 3362 bool urgent = false; 3363 3364 thread = migration_threads_add("live_migration", qemu_get_thread_id()); 3365 3366 rcu_register_thread(); 3367 3368 object_ref(OBJECT(s)); 3369 update_iteration_initial_status(s); 3370 3371 if (!multifd_send_setup()) { 3372 goto out; 3373 } 3374 3375 bql_lock(); 3376 qemu_savevm_state_header(s->to_dst_file); 3377 bql_unlock(); 3378 3379 /* 3380 * If we opened the return path, we need to make sure dst has it 3381 * opened as well. 3382 */ 3383 if (s->rp_state.rp_thread_created) { 3384 /* Now tell the dest that it should open its end so it can reply */ 3385 qemu_savevm_send_open_return_path(s->to_dst_file); 3386 3387 /* And do a ping that will make stuff easier to debug */ 3388 qemu_savevm_send_ping(s->to_dst_file, 1); 3389 } 3390 3391 if (migrate_postcopy()) { 3392 /* 3393 * Tell the destination that we *might* want to do postcopy later; 3394 * if the other end can't do postcopy it should fail now, nice and 3395 * early. 3396 */ 3397 qemu_savevm_send_postcopy_advise(s->to_dst_file); 3398 } 3399 3400 if (migrate_colo()) { 3401 /* Notify migration destination that we enable COLO */ 3402 qemu_savevm_send_colo_enable(s->to_dst_file); 3403 } 3404 3405 bql_lock(); 3406 qemu_savevm_state_setup(s->to_dst_file); 3407 bql_unlock(); 3408 3409 qemu_savevm_wait_unplug(s, MIGRATION_STATUS_SETUP, 3410 MIGRATION_STATUS_ACTIVE); 3411 3412 s->setup_time = qemu_clock_get_ms(QEMU_CLOCK_HOST) - setup_start; 3413 3414 trace_migration_thread_setup_complete(); 3415 3416 while (migration_is_active(s)) { 3417 if (urgent || !migration_rate_exceeded(s->to_dst_file)) { 3418 MigIterateState iter_state = migration_iteration_run(s); 3419 if (iter_state == MIG_ITERATE_SKIP) { 3420 continue; 3421 } else if (iter_state == MIG_ITERATE_BREAK) { 3422 break; 3423 } 3424 } 3425 3426 /* 3427 * Try to detect any kind of failures, and see whether we 3428 * should stop the migration now. 3429 */ 3430 thr_error = migration_detect_error(s); 3431 if (thr_error == MIG_THR_ERR_FATAL) { 3432 /* Stop migration */ 3433 break; 3434 } else if (thr_error == MIG_THR_ERR_RECOVERED) { 3435 /* 3436 * Just recovered from a e.g. network failure, reset all 3437 * the local variables. This is important to avoid 3438 * breaking transferred_bytes and bandwidth calculation 3439 */ 3440 update_iteration_initial_status(s); 3441 } 3442 3443 urgent = migration_rate_limit(); 3444 } 3445 3446 out: 3447 trace_migration_thread_after_loop(); 3448 migration_iteration_finish(s); 3449 object_unref(OBJECT(s)); 3450 rcu_unregister_thread(); 3451 migration_threads_remove(thread); 3452 return NULL; 3453 } 3454 3455 static void bg_migration_vm_start_bh(void *opaque) 3456 { 3457 MigrationState *s = opaque; 3458 3459 vm_resume(s->vm_old_state); 3460 migration_downtime_end(s); 3461 } 3462 3463 /** 3464 * Background snapshot thread, based on live migration code. 3465 * This is an alternative implementation of live migration mechanism 3466 * introduced specifically to support background snapshots. 3467 * 3468 * It takes advantage of userfault_fd write protection mechanism introduced 3469 * in v5.7 kernel. Compared to existing dirty page logging migration much 3470 * lesser stream traffic is produced resulting in smaller snapshot images, 3471 * simply cause of no page duplicates can get into the stream. 3472 * 3473 * Another key point is that generated vmstate stream reflects machine state 3474 * 'frozen' at the beginning of snapshot creation compared to dirty page logging 3475 * mechanism, which effectively results in that saved snapshot is the state of VM 3476 * at the end of the process. 3477 */ 3478 static void *bg_migration_thread(void *opaque) 3479 { 3480 MigrationState *s = opaque; 3481 int64_t setup_start; 3482 MigThrError thr_error; 3483 QEMUFile *fb; 3484 bool early_fail = true; 3485 3486 rcu_register_thread(); 3487 object_ref(OBJECT(s)); 3488 3489 migration_rate_set(RATE_LIMIT_DISABLED); 3490 3491 setup_start = qemu_clock_get_ms(QEMU_CLOCK_HOST); 3492 /* 3493 * We want to save vmstate for the moment when migration has been 3494 * initiated but also we want to save RAM content while VM is running. 3495 * The RAM content should appear first in the vmstate. So, we first 3496 * stash the non-RAM part of the vmstate to the temporary buffer, 3497 * then write RAM part of the vmstate to the migration stream 3498 * with vCPUs running and, finally, write stashed non-RAM part of 3499 * the vmstate from the buffer to the migration stream. 3500 */ 3501 s->bioc = qio_channel_buffer_new(512 * 1024); 3502 qio_channel_set_name(QIO_CHANNEL(s->bioc), "vmstate-buffer"); 3503 fb = qemu_file_new_output(QIO_CHANNEL(s->bioc)); 3504 object_unref(OBJECT(s->bioc)); 3505 3506 update_iteration_initial_status(s); 3507 3508 /* 3509 * Prepare for tracking memory writes with UFFD-WP - populate 3510 * RAM pages before protecting. 3511 */ 3512 #ifdef __linux__ 3513 ram_write_tracking_prepare(); 3514 #endif 3515 3516 bql_lock(); 3517 qemu_savevm_state_header(s->to_dst_file); 3518 qemu_savevm_state_setup(s->to_dst_file); 3519 bql_unlock(); 3520 3521 qemu_savevm_wait_unplug(s, MIGRATION_STATUS_SETUP, 3522 MIGRATION_STATUS_ACTIVE); 3523 3524 s->setup_time = qemu_clock_get_ms(QEMU_CLOCK_HOST) - setup_start; 3525 3526 trace_migration_thread_setup_complete(); 3527 3528 bql_lock(); 3529 3530 if (migration_stop_vm(s, RUN_STATE_PAUSED)) { 3531 goto fail; 3532 } 3533 /* 3534 * Put vCPUs in sync with shadow context structures, then 3535 * save their state to channel-buffer along with devices. 3536 */ 3537 cpu_synchronize_all_states(); 3538 if (qemu_savevm_state_complete_precopy_non_iterable(fb, false, false)) { 3539 goto fail; 3540 } 3541 /* 3542 * Since we are going to get non-iterable state data directly 3543 * from s->bioc->data, explicit flush is needed here. 3544 */ 3545 qemu_fflush(fb); 3546 3547 /* Now initialize UFFD context and start tracking RAM writes */ 3548 if (ram_write_tracking_start()) { 3549 goto fail; 3550 } 3551 early_fail = false; 3552 3553 /* 3554 * Start VM from BH handler to avoid write-fault lock here. 3555 * UFFD-WP protection for the whole RAM is already enabled so 3556 * calling VM state change notifiers from vm_start() would initiate 3557 * writes to virtio VQs memory which is in write-protected region. 3558 */ 3559 migration_bh_schedule(bg_migration_vm_start_bh, s); 3560 bql_unlock(); 3561 3562 while (migration_is_active(s)) { 3563 MigIterateState iter_state = bg_migration_iteration_run(s); 3564 if (iter_state == MIG_ITERATE_SKIP) { 3565 continue; 3566 } else if (iter_state == MIG_ITERATE_BREAK) { 3567 break; 3568 } 3569 3570 /* 3571 * Try to detect any kind of failures, and see whether we 3572 * should stop the migration now. 3573 */ 3574 thr_error = migration_detect_error(s); 3575 if (thr_error == MIG_THR_ERR_FATAL) { 3576 /* Stop migration */ 3577 break; 3578 } 3579 3580 migration_update_counters(s, qemu_clock_get_ms(QEMU_CLOCK_REALTIME)); 3581 } 3582 3583 trace_migration_thread_after_loop(); 3584 3585 fail: 3586 if (early_fail) { 3587 migrate_set_state(&s->state, MIGRATION_STATUS_ACTIVE, 3588 MIGRATION_STATUS_FAILED); 3589 bql_unlock(); 3590 } 3591 3592 bg_migration_iteration_finish(s); 3593 3594 qemu_fclose(fb); 3595 object_unref(OBJECT(s)); 3596 rcu_unregister_thread(); 3597 3598 return NULL; 3599 } 3600 3601 void migrate_fd_connect(MigrationState *s, Error *error_in) 3602 { 3603 Error *local_err = NULL; 3604 uint64_t rate_limit; 3605 bool resume = s->state == MIGRATION_STATUS_POSTCOPY_PAUSED; 3606 int ret; 3607 3608 /* 3609 * If there's a previous error, free it and prepare for another one. 3610 * Meanwhile if migration completes successfully, there won't have an error 3611 * dumped when calling migrate_fd_cleanup(). 3612 */ 3613 migrate_error_free(s); 3614 3615 s->expected_downtime = migrate_downtime_limit(); 3616 if (error_in) { 3617 migrate_fd_error(s, error_in); 3618 if (resume) { 3619 /* 3620 * Don't do cleanup for resume if channel is invalid, but only dump 3621 * the error. We wait for another channel connect from the user. 3622 * The error_report still gives HMP user a hint on what failed. 3623 * It's normally done in migrate_fd_cleanup(), but call it here 3624 * explicitly. 3625 */ 3626 error_report_err(error_copy(s->error)); 3627 } else { 3628 migrate_fd_cleanup(s); 3629 } 3630 return; 3631 } 3632 3633 if (resume) { 3634 /* This is a resumed migration */ 3635 rate_limit = migrate_max_postcopy_bandwidth(); 3636 } else { 3637 /* This is a fresh new migration */ 3638 rate_limit = migrate_max_bandwidth(); 3639 3640 /* Notify before starting migration thread */ 3641 if (migration_call_notifiers(s, MIG_EVENT_PRECOPY_SETUP, &local_err)) { 3642 goto fail; 3643 } 3644 } 3645 3646 migration_rate_set(rate_limit); 3647 qemu_file_set_blocking(s->to_dst_file, true); 3648 3649 /* 3650 * Open the return path. For postcopy, it is used exclusively. For 3651 * precopy, only if user specified "return-path" capability would 3652 * QEMU uses the return path. 3653 */ 3654 if (migrate_postcopy_ram() || migrate_return_path()) { 3655 if (open_return_path_on_source(s)) { 3656 error_setg(&local_err, "Unable to open return-path for postcopy"); 3657 goto fail; 3658 } 3659 } 3660 3661 /* 3662 * This needs to be done before resuming a postcopy. Note: for newer 3663 * QEMUs we will delay the channel creation until postcopy_start(), to 3664 * avoid disorder of channel creations. 3665 */ 3666 if (migrate_postcopy_preempt() && s->preempt_pre_7_2) { 3667 postcopy_preempt_setup(s); 3668 } 3669 3670 if (resume) { 3671 /* Wakeup the main migration thread to do the recovery */ 3672 migrate_set_state(&s->state, MIGRATION_STATUS_POSTCOPY_PAUSED, 3673 MIGRATION_STATUS_POSTCOPY_RECOVER); 3674 qemu_sem_post(&s->postcopy_pause_sem); 3675 return; 3676 } 3677 3678 if (migrate_mode_is_cpr(s)) { 3679 ret = migration_stop_vm(s, RUN_STATE_FINISH_MIGRATE); 3680 if (ret < 0) { 3681 error_setg(&local_err, "migration_stop_vm failed, error %d", -ret); 3682 goto fail; 3683 } 3684 } 3685 3686 if (migrate_background_snapshot()) { 3687 qemu_thread_create(&s->thread, "bg_snapshot", 3688 bg_migration_thread, s, QEMU_THREAD_JOINABLE); 3689 } else { 3690 qemu_thread_create(&s->thread, "live_migration", 3691 migration_thread, s, QEMU_THREAD_JOINABLE); 3692 } 3693 s->migration_thread_running = true; 3694 return; 3695 3696 fail: 3697 migrate_set_error(s, local_err); 3698 migrate_set_state(&s->state, s->state, MIGRATION_STATUS_FAILED); 3699 error_report_err(local_err); 3700 migrate_fd_cleanup(s); 3701 } 3702 3703 static void migration_class_init(ObjectClass *klass, void *data) 3704 { 3705 DeviceClass *dc = DEVICE_CLASS(klass); 3706 3707 dc->user_creatable = false; 3708 device_class_set_props(dc, migration_properties); 3709 } 3710 3711 static void migration_instance_finalize(Object *obj) 3712 { 3713 MigrationState *ms = MIGRATION_OBJ(obj); 3714 3715 qemu_mutex_destroy(&ms->error_mutex); 3716 qemu_mutex_destroy(&ms->qemu_file_lock); 3717 qemu_sem_destroy(&ms->wait_unplug_sem); 3718 qemu_sem_destroy(&ms->rate_limit_sem); 3719 qemu_sem_destroy(&ms->pause_sem); 3720 qemu_sem_destroy(&ms->postcopy_pause_sem); 3721 qemu_sem_destroy(&ms->rp_state.rp_sem); 3722 qemu_sem_destroy(&ms->rp_state.rp_pong_acks); 3723 qemu_sem_destroy(&ms->postcopy_qemufile_src_sem); 3724 error_free(ms->error); 3725 } 3726 3727 static void migration_instance_init(Object *obj) 3728 { 3729 MigrationState *ms = MIGRATION_OBJ(obj); 3730 3731 ms->state = MIGRATION_STATUS_NONE; 3732 ms->mbps = -1; 3733 ms->pages_per_second = -1; 3734 qemu_sem_init(&ms->pause_sem, 0); 3735 qemu_mutex_init(&ms->error_mutex); 3736 3737 migrate_params_init(&ms->parameters); 3738 3739 qemu_sem_init(&ms->postcopy_pause_sem, 0); 3740 qemu_sem_init(&ms->rp_state.rp_sem, 0); 3741 qemu_sem_init(&ms->rp_state.rp_pong_acks, 0); 3742 qemu_sem_init(&ms->rate_limit_sem, 0); 3743 qemu_sem_init(&ms->wait_unplug_sem, 0); 3744 qemu_sem_init(&ms->postcopy_qemufile_src_sem, 0); 3745 qemu_mutex_init(&ms->qemu_file_lock); 3746 } 3747 3748 /* 3749 * Return true if check pass, false otherwise. Error will be put 3750 * inside errp if provided. 3751 */ 3752 static bool migration_object_check(MigrationState *ms, Error **errp) 3753 { 3754 /* Assuming all off */ 3755 bool old_caps[MIGRATION_CAPABILITY__MAX] = { 0 }; 3756 3757 if (!migrate_params_check(&ms->parameters, errp)) { 3758 return false; 3759 } 3760 3761 return migrate_caps_check(old_caps, ms->capabilities, errp); 3762 } 3763 3764 static const TypeInfo migration_type = { 3765 .name = TYPE_MIGRATION, 3766 /* 3767 * NOTE: TYPE_MIGRATION is not really a device, as the object is 3768 * not created using qdev_new(), it is not attached to the qdev 3769 * device tree, and it is never realized. 3770 * 3771 * TODO: Make this TYPE_OBJECT once QOM provides something like 3772 * TYPE_DEVICE's "-global" properties. 3773 */ 3774 .parent = TYPE_DEVICE, 3775 .class_init = migration_class_init, 3776 .class_size = sizeof(MigrationClass), 3777 .instance_size = sizeof(MigrationState), 3778 .instance_init = migration_instance_init, 3779 .instance_finalize = migration_instance_finalize, 3780 }; 3781 3782 static void register_migration_types(void) 3783 { 3784 type_register_static(&migration_type); 3785 } 3786 3787 type_init(register_migration_types); 3788