1 /* 2 * QEMU live migration 3 * 4 * Copyright IBM, Corp. 2008 5 * 6 * Authors: 7 * Anthony Liguori <aliguori@us.ibm.com> 8 * 9 * This work is licensed under the terms of the GNU GPL, version 2. See 10 * the COPYING file in the top-level directory. 11 * 12 * Contributions after 2012-01-13 are licensed under the terms of the 13 * GNU GPL, version 2 or (at your option) any later version. 14 */ 15 16 #include "qemu/osdep.h" 17 #include "qemu/cutils.h" 18 #include "qemu/error-report.h" 19 #include "qemu/main-loop.h" 20 #include "migration/blocker.h" 21 #include "exec.h" 22 #include "fd.h" 23 #include "socket.h" 24 #include "sysemu/runstate.h" 25 #include "sysemu/sysemu.h" 26 #include "sysemu/cpu-throttle.h" 27 #include "rdma.h" 28 #include "ram.h" 29 #include "migration/global_state.h" 30 #include "migration/misc.h" 31 #include "migration.h" 32 #include "savevm.h" 33 #include "qemu-file.h" 34 #include "channel.h" 35 #include "migration/vmstate.h" 36 #include "block/block.h" 37 #include "qapi/error.h" 38 #include "qapi/clone-visitor.h" 39 #include "qapi/qapi-visit-migration.h" 40 #include "qapi/qapi-visit-sockets.h" 41 #include "qapi/qapi-commands-migration.h" 42 #include "qapi/qapi-events-migration.h" 43 #include "qapi/qmp/qerror.h" 44 #include "qapi/qmp/qnull.h" 45 #include "qemu/rcu.h" 46 #include "block.h" 47 #include "postcopy-ram.h" 48 #include "qemu/thread.h" 49 #include "trace.h" 50 #include "exec/target_page.h" 51 #include "io/channel-buffer.h" 52 #include "io/channel-tls.h" 53 #include "migration/colo.h" 54 #include "hw/boards.h" 55 #include "monitor/monitor.h" 56 #include "net/announce.h" 57 #include "qemu/queue.h" 58 #include "multifd.h" 59 #include "threadinfo.h" 60 #include "qemu/yank.h" 61 #include "sysemu/cpus.h" 62 #include "yank_functions.h" 63 #include "sysemu/qtest.h" 64 #include "options.h" 65 66 static NotifierList migration_state_notifiers = 67 NOTIFIER_LIST_INITIALIZER(migration_state_notifiers); 68 69 /* Messages sent on the return path from destination to source */ 70 enum mig_rp_message_type { 71 MIG_RP_MSG_INVALID = 0, /* Must be 0 */ 72 MIG_RP_MSG_SHUT, /* sibling will not send any more RP messages */ 73 MIG_RP_MSG_PONG, /* Response to a PING; data (seq: be32 ) */ 74 75 MIG_RP_MSG_REQ_PAGES_ID, /* data (start: be64, len: be32, id: string) */ 76 MIG_RP_MSG_REQ_PAGES, /* data (start: be64, len: be32) */ 77 MIG_RP_MSG_RECV_BITMAP, /* send recved_bitmap back to source */ 78 MIG_RP_MSG_RESUME_ACK, /* tell source that we are ready to resume */ 79 80 MIG_RP_MSG_MAX 81 }; 82 83 /* When we add fault tolerance, we could have several 84 migrations at once. For now we don't need to add 85 dynamic creation of migration */ 86 87 static MigrationState *current_migration; 88 static MigrationIncomingState *current_incoming; 89 90 static GSList *migration_blockers; 91 92 static bool migration_object_check(MigrationState *ms, Error **errp); 93 static int migration_maybe_pause(MigrationState *s, 94 int *current_active_state, 95 int new_state); 96 static void migrate_fd_cancel(MigrationState *s); 97 98 static bool migration_needs_multiple_sockets(void) 99 { 100 return migrate_multifd() || migrate_postcopy_preempt(); 101 } 102 103 static bool uri_supports_multi_channels(const char *uri) 104 { 105 return strstart(uri, "tcp:", NULL) || strstart(uri, "unix:", NULL) || 106 strstart(uri, "vsock:", NULL); 107 } 108 109 static bool 110 migration_channels_and_uri_compatible(const char *uri, Error **errp) 111 { 112 if (migration_needs_multiple_sockets() && 113 !uri_supports_multi_channels(uri)) { 114 error_setg(errp, "Migration requires multi-channel URIs (e.g. tcp)"); 115 return false; 116 } 117 118 return true; 119 } 120 121 static gint page_request_addr_cmp(gconstpointer ap, gconstpointer bp) 122 { 123 uintptr_t a = (uintptr_t) ap, b = (uintptr_t) bp; 124 125 return (a > b) - (a < b); 126 } 127 128 void migration_object_init(void) 129 { 130 /* This can only be called once. */ 131 assert(!current_migration); 132 current_migration = MIGRATION_OBJ(object_new(TYPE_MIGRATION)); 133 134 /* 135 * Init the migrate incoming object as well no matter whether 136 * we'll use it or not. 137 */ 138 assert(!current_incoming); 139 current_incoming = g_new0(MigrationIncomingState, 1); 140 current_incoming->state = MIGRATION_STATUS_NONE; 141 current_incoming->postcopy_remote_fds = 142 g_array_new(FALSE, TRUE, sizeof(struct PostCopyFD)); 143 qemu_mutex_init(¤t_incoming->rp_mutex); 144 qemu_mutex_init(¤t_incoming->postcopy_prio_thread_mutex); 145 qemu_event_init(¤t_incoming->main_thread_load_event, false); 146 qemu_sem_init(¤t_incoming->postcopy_pause_sem_dst, 0); 147 qemu_sem_init(¤t_incoming->postcopy_pause_sem_fault, 0); 148 qemu_sem_init(¤t_incoming->postcopy_pause_sem_fast_load, 0); 149 qemu_sem_init(¤t_incoming->postcopy_qemufile_dst_done, 0); 150 151 qemu_mutex_init(¤t_incoming->page_request_mutex); 152 current_incoming->page_requested = g_tree_new(page_request_addr_cmp); 153 154 migration_object_check(current_migration, &error_fatal); 155 156 blk_mig_init(); 157 ram_mig_init(); 158 dirty_bitmap_mig_init(); 159 } 160 161 void migration_cancel(const Error *error) 162 { 163 if (error) { 164 migrate_set_error(current_migration, error); 165 } 166 migrate_fd_cancel(current_migration); 167 } 168 169 void migration_shutdown(void) 170 { 171 /* 172 * When the QEMU main thread exit, the COLO thread 173 * may wait a semaphore. So, we should wakeup the 174 * COLO thread before migration shutdown. 175 */ 176 colo_shutdown(); 177 /* 178 * Cancel the current migration - that will (eventually) 179 * stop the migration using this structure 180 */ 181 migration_cancel(NULL); 182 object_unref(OBJECT(current_migration)); 183 184 /* 185 * Cancel outgoing migration of dirty bitmaps. It should 186 * at least unref used block nodes. 187 */ 188 dirty_bitmap_mig_cancel_outgoing(); 189 190 /* 191 * Cancel incoming migration of dirty bitmaps. Dirty bitmaps 192 * are non-critical data, and their loss never considered as 193 * something serious. 194 */ 195 dirty_bitmap_mig_cancel_incoming(); 196 } 197 198 /* For outgoing */ 199 MigrationState *migrate_get_current(void) 200 { 201 /* This can only be called after the object created. */ 202 assert(current_migration); 203 return current_migration; 204 } 205 206 MigrationIncomingState *migration_incoming_get_current(void) 207 { 208 assert(current_incoming); 209 return current_incoming; 210 } 211 212 void migration_incoming_transport_cleanup(MigrationIncomingState *mis) 213 { 214 if (mis->socket_address_list) { 215 qapi_free_SocketAddressList(mis->socket_address_list); 216 mis->socket_address_list = NULL; 217 } 218 219 if (mis->transport_cleanup) { 220 mis->transport_cleanup(mis->transport_data); 221 mis->transport_data = mis->transport_cleanup = NULL; 222 } 223 } 224 225 void migration_incoming_state_destroy(void) 226 { 227 struct MigrationIncomingState *mis = migration_incoming_get_current(); 228 229 multifd_load_cleanup(); 230 231 if (mis->to_src_file) { 232 /* Tell source that we are done */ 233 migrate_send_rp_shut(mis, qemu_file_get_error(mis->from_src_file) != 0); 234 qemu_fclose(mis->to_src_file); 235 mis->to_src_file = NULL; 236 } 237 238 if (mis->from_src_file) { 239 migration_ioc_unregister_yank_from_file(mis->from_src_file); 240 qemu_fclose(mis->from_src_file); 241 mis->from_src_file = NULL; 242 } 243 if (mis->postcopy_remote_fds) { 244 g_array_free(mis->postcopy_remote_fds, TRUE); 245 mis->postcopy_remote_fds = NULL; 246 } 247 248 migration_incoming_transport_cleanup(mis); 249 qemu_event_reset(&mis->main_thread_load_event); 250 251 if (mis->page_requested) { 252 g_tree_destroy(mis->page_requested); 253 mis->page_requested = NULL; 254 } 255 256 if (mis->postcopy_qemufile_dst) { 257 migration_ioc_unregister_yank_from_file(mis->postcopy_qemufile_dst); 258 qemu_fclose(mis->postcopy_qemufile_dst); 259 mis->postcopy_qemufile_dst = NULL; 260 } 261 262 yank_unregister_instance(MIGRATION_YANK_INSTANCE); 263 } 264 265 static void migrate_generate_event(int new_state) 266 { 267 if (migrate_events()) { 268 qapi_event_send_migration(new_state); 269 } 270 } 271 272 /* 273 * Send a message on the return channel back to the source 274 * of the migration. 275 */ 276 static int migrate_send_rp_message(MigrationIncomingState *mis, 277 enum mig_rp_message_type message_type, 278 uint16_t len, void *data) 279 { 280 int ret = 0; 281 282 trace_migrate_send_rp_message((int)message_type, len); 283 QEMU_LOCK_GUARD(&mis->rp_mutex); 284 285 /* 286 * It's possible that the file handle got lost due to network 287 * failures. 288 */ 289 if (!mis->to_src_file) { 290 ret = -EIO; 291 return ret; 292 } 293 294 qemu_put_be16(mis->to_src_file, (unsigned int)message_type); 295 qemu_put_be16(mis->to_src_file, len); 296 qemu_put_buffer(mis->to_src_file, data, len); 297 qemu_fflush(mis->to_src_file); 298 299 /* It's possible that qemu file got error during sending */ 300 ret = qemu_file_get_error(mis->to_src_file); 301 302 return ret; 303 } 304 305 /* Request one page from the source VM at the given start address. 306 * rb: the RAMBlock to request the page in 307 * Start: Address offset within the RB 308 * Len: Length in bytes required - must be a multiple of pagesize 309 */ 310 int migrate_send_rp_message_req_pages(MigrationIncomingState *mis, 311 RAMBlock *rb, ram_addr_t start) 312 { 313 uint8_t bufc[12 + 1 + 255]; /* start (8), len (4), rbname up to 256 */ 314 size_t msglen = 12; /* start + len */ 315 size_t len = qemu_ram_pagesize(rb); 316 enum mig_rp_message_type msg_type; 317 const char *rbname; 318 int rbname_len; 319 320 *(uint64_t *)bufc = cpu_to_be64((uint64_t)start); 321 *(uint32_t *)(bufc + 8) = cpu_to_be32((uint32_t)len); 322 323 /* 324 * We maintain the last ramblock that we requested for page. Note that we 325 * don't need locking because this function will only be called within the 326 * postcopy ram fault thread. 327 */ 328 if (rb != mis->last_rb) { 329 mis->last_rb = rb; 330 331 rbname = qemu_ram_get_idstr(rb); 332 rbname_len = strlen(rbname); 333 334 assert(rbname_len < 256); 335 336 bufc[msglen++] = rbname_len; 337 memcpy(bufc + msglen, rbname, rbname_len); 338 msglen += rbname_len; 339 msg_type = MIG_RP_MSG_REQ_PAGES_ID; 340 } else { 341 msg_type = MIG_RP_MSG_REQ_PAGES; 342 } 343 344 return migrate_send_rp_message(mis, msg_type, msglen, bufc); 345 } 346 347 int migrate_send_rp_req_pages(MigrationIncomingState *mis, 348 RAMBlock *rb, ram_addr_t start, uint64_t haddr) 349 { 350 void *aligned = (void *)(uintptr_t)ROUND_DOWN(haddr, qemu_ram_pagesize(rb)); 351 bool received = false; 352 353 WITH_QEMU_LOCK_GUARD(&mis->page_request_mutex) { 354 received = ramblock_recv_bitmap_test_byte_offset(rb, start); 355 if (!received && !g_tree_lookup(mis->page_requested, aligned)) { 356 /* 357 * The page has not been received, and it's not yet in the page 358 * request list. Queue it. Set the value of element to 1, so that 359 * things like g_tree_lookup() will return TRUE (1) when found. 360 */ 361 g_tree_insert(mis->page_requested, aligned, (gpointer)1); 362 mis->page_requested_count++; 363 trace_postcopy_page_req_add(aligned, mis->page_requested_count); 364 } 365 } 366 367 /* 368 * If the page is there, skip sending the message. We don't even need the 369 * lock because as long as the page arrived, it'll be there forever. 370 */ 371 if (received) { 372 return 0; 373 } 374 375 return migrate_send_rp_message_req_pages(mis, rb, start); 376 } 377 378 static bool migration_colo_enabled; 379 bool migration_incoming_colo_enabled(void) 380 { 381 return migration_colo_enabled; 382 } 383 384 void migration_incoming_disable_colo(void) 385 { 386 ram_block_discard_disable(false); 387 migration_colo_enabled = false; 388 } 389 390 int migration_incoming_enable_colo(void) 391 { 392 if (ram_block_discard_disable(true)) { 393 error_report("COLO: cannot disable RAM discard"); 394 return -EBUSY; 395 } 396 migration_colo_enabled = true; 397 return 0; 398 } 399 400 void migrate_add_address(SocketAddress *address) 401 { 402 MigrationIncomingState *mis = migration_incoming_get_current(); 403 404 QAPI_LIST_PREPEND(mis->socket_address_list, 405 QAPI_CLONE(SocketAddress, address)); 406 } 407 408 static void qemu_start_incoming_migration(const char *uri, Error **errp) 409 { 410 const char *p = NULL; 411 412 /* URI is not suitable for migration? */ 413 if (!migration_channels_and_uri_compatible(uri, errp)) { 414 return; 415 } 416 417 qapi_event_send_migration(MIGRATION_STATUS_SETUP); 418 if (strstart(uri, "tcp:", &p) || 419 strstart(uri, "unix:", NULL) || 420 strstart(uri, "vsock:", NULL)) { 421 socket_start_incoming_migration(p ? p : uri, errp); 422 #ifdef CONFIG_RDMA 423 } else if (strstart(uri, "rdma:", &p)) { 424 rdma_start_incoming_migration(p, errp); 425 #endif 426 } else if (strstart(uri, "exec:", &p)) { 427 exec_start_incoming_migration(p, errp); 428 } else if (strstart(uri, "fd:", &p)) { 429 fd_start_incoming_migration(p, errp); 430 } else { 431 error_setg(errp, "unknown migration protocol: %s", uri); 432 } 433 } 434 435 static void process_incoming_migration_bh(void *opaque) 436 { 437 Error *local_err = NULL; 438 MigrationIncomingState *mis = opaque; 439 440 /* If capability late_block_activate is set: 441 * Only fire up the block code now if we're going to restart the 442 * VM, else 'cont' will do it. 443 * This causes file locking to happen; so we don't want it to happen 444 * unless we really are starting the VM. 445 */ 446 if (!migrate_late_block_activate() || 447 (autostart && (!global_state_received() || 448 global_state_get_runstate() == RUN_STATE_RUNNING))) { 449 /* Make sure all file formats throw away their mutable metadata. 450 * If we get an error here, just don't restart the VM yet. */ 451 bdrv_activate_all(&local_err); 452 if (local_err) { 453 error_report_err(local_err); 454 local_err = NULL; 455 autostart = false; 456 } 457 } 458 459 /* 460 * This must happen after all error conditions are dealt with and 461 * we're sure the VM is going to be running on this host. 462 */ 463 qemu_announce_self(&mis->announce_timer, migrate_announce_params()); 464 465 multifd_load_shutdown(); 466 467 dirty_bitmap_mig_before_vm_start(); 468 469 if (!global_state_received() || 470 global_state_get_runstate() == RUN_STATE_RUNNING) { 471 if (autostart) { 472 vm_start(); 473 } else { 474 runstate_set(RUN_STATE_PAUSED); 475 } 476 } else if (migration_incoming_colo_enabled()) { 477 migration_incoming_disable_colo(); 478 vm_start(); 479 } else { 480 runstate_set(global_state_get_runstate()); 481 } 482 /* 483 * This must happen after any state changes since as soon as an external 484 * observer sees this event they might start to prod at the VM assuming 485 * it's ready to use. 486 */ 487 migrate_set_state(&mis->state, MIGRATION_STATUS_ACTIVE, 488 MIGRATION_STATUS_COMPLETED); 489 qemu_bh_delete(mis->bh); 490 migration_incoming_state_destroy(); 491 } 492 493 static void coroutine_fn 494 process_incoming_migration_co(void *opaque) 495 { 496 MigrationIncomingState *mis = migration_incoming_get_current(); 497 PostcopyState ps; 498 int ret; 499 Error *local_err = NULL; 500 501 assert(mis->from_src_file); 502 mis->migration_incoming_co = qemu_coroutine_self(); 503 mis->largest_page_size = qemu_ram_pagesize_largest(); 504 postcopy_state_set(POSTCOPY_INCOMING_NONE); 505 migrate_set_state(&mis->state, MIGRATION_STATUS_NONE, 506 MIGRATION_STATUS_ACTIVE); 507 ret = qemu_loadvm_state(mis->from_src_file); 508 509 ps = postcopy_state_get(); 510 trace_process_incoming_migration_co_end(ret, ps); 511 if (ps != POSTCOPY_INCOMING_NONE) { 512 if (ps == POSTCOPY_INCOMING_ADVISE) { 513 /* 514 * Where a migration had postcopy enabled (and thus went to advise) 515 * but managed to complete within the precopy period, we can use 516 * the normal exit. 517 */ 518 postcopy_ram_incoming_cleanup(mis); 519 } else if (ret >= 0) { 520 /* 521 * Postcopy was started, cleanup should happen at the end of the 522 * postcopy thread. 523 */ 524 trace_process_incoming_migration_co_postcopy_end_main(); 525 return; 526 } 527 /* Else if something went wrong then just fall out of the normal exit */ 528 } 529 530 /* we get COLO info, and know if we are in COLO mode */ 531 if (!ret && migration_incoming_colo_enabled()) { 532 /* Make sure all file formats throw away their mutable metadata */ 533 bdrv_activate_all(&local_err); 534 if (local_err) { 535 error_report_err(local_err); 536 goto fail; 537 } 538 539 qemu_thread_create(&mis->colo_incoming_thread, "COLO incoming", 540 colo_process_incoming_thread, mis, QEMU_THREAD_JOINABLE); 541 mis->have_colo_incoming_thread = true; 542 qemu_coroutine_yield(); 543 544 qemu_mutex_unlock_iothread(); 545 /* Wait checkpoint incoming thread exit before free resource */ 546 qemu_thread_join(&mis->colo_incoming_thread); 547 qemu_mutex_lock_iothread(); 548 /* We hold the global iothread lock, so it is safe here */ 549 colo_release_ram_cache(); 550 } 551 552 if (ret < 0) { 553 error_report("load of migration failed: %s", strerror(-ret)); 554 goto fail; 555 } 556 mis->bh = qemu_bh_new(process_incoming_migration_bh, mis); 557 qemu_bh_schedule(mis->bh); 558 mis->migration_incoming_co = NULL; 559 return; 560 fail: 561 local_err = NULL; 562 migrate_set_state(&mis->state, MIGRATION_STATUS_ACTIVE, 563 MIGRATION_STATUS_FAILED); 564 qemu_fclose(mis->from_src_file); 565 566 multifd_load_cleanup(); 567 568 exit(EXIT_FAILURE); 569 } 570 571 /** 572 * migration_incoming_setup: Setup incoming migration 573 * @f: file for main migration channel 574 * @errp: where to put errors 575 * 576 * Returns: %true on success, %false on error. 577 */ 578 static bool migration_incoming_setup(QEMUFile *f, Error **errp) 579 { 580 MigrationIncomingState *mis = migration_incoming_get_current(); 581 582 if (!mis->from_src_file) { 583 mis->from_src_file = f; 584 } 585 qemu_file_set_blocking(f, false); 586 return true; 587 } 588 589 void migration_incoming_process(void) 590 { 591 Coroutine *co = qemu_coroutine_create(process_incoming_migration_co, NULL); 592 qemu_coroutine_enter(co); 593 } 594 595 /* Returns true if recovered from a paused migration, otherwise false */ 596 static bool postcopy_try_recover(void) 597 { 598 MigrationIncomingState *mis = migration_incoming_get_current(); 599 600 if (mis->state == MIGRATION_STATUS_POSTCOPY_PAUSED) { 601 /* Resumed from a paused postcopy migration */ 602 603 /* This should be set already in migration_incoming_setup() */ 604 assert(mis->from_src_file); 605 /* Postcopy has standalone thread to do vm load */ 606 qemu_file_set_blocking(mis->from_src_file, true); 607 608 /* Re-configure the return path */ 609 mis->to_src_file = qemu_file_get_return_path(mis->from_src_file); 610 611 migrate_set_state(&mis->state, MIGRATION_STATUS_POSTCOPY_PAUSED, 612 MIGRATION_STATUS_POSTCOPY_RECOVER); 613 614 /* 615 * Here, we only wake up the main loading thread (while the 616 * rest threads will still be waiting), so that we can receive 617 * commands from source now, and answer it if needed. The 618 * rest threads will be woken up afterwards until we are sure 619 * that source is ready to reply to page requests. 620 */ 621 qemu_sem_post(&mis->postcopy_pause_sem_dst); 622 return true; 623 } 624 625 return false; 626 } 627 628 void migration_fd_process_incoming(QEMUFile *f, Error **errp) 629 { 630 if (!migration_incoming_setup(f, errp)) { 631 return; 632 } 633 if (postcopy_try_recover()) { 634 return; 635 } 636 migration_incoming_process(); 637 } 638 639 /* 640 * Returns true when we want to start a new incoming migration process, 641 * false otherwise. 642 */ 643 static bool migration_should_start_incoming(bool main_channel) 644 { 645 /* Multifd doesn't start unless all channels are established */ 646 if (migrate_multifd()) { 647 return migration_has_all_channels(); 648 } 649 650 /* Preempt channel only starts when the main channel is created */ 651 if (migrate_postcopy_preempt()) { 652 return main_channel; 653 } 654 655 /* 656 * For all the rest types of migration, we should only reach here when 657 * it's the main channel that's being created, and we should always 658 * proceed with this channel. 659 */ 660 assert(main_channel); 661 return true; 662 } 663 664 void migration_ioc_process_incoming(QIOChannel *ioc, Error **errp) 665 { 666 MigrationIncomingState *mis = migration_incoming_get_current(); 667 Error *local_err = NULL; 668 QEMUFile *f; 669 bool default_channel = true; 670 uint32_t channel_magic = 0; 671 int ret = 0; 672 673 if (migrate_multifd() && !migrate_postcopy_ram() && 674 qio_channel_has_feature(ioc, QIO_CHANNEL_FEATURE_READ_MSG_PEEK)) { 675 /* 676 * With multiple channels, it is possible that we receive channels 677 * out of order on destination side, causing incorrect mapping of 678 * source channels on destination side. Check channel MAGIC to 679 * decide type of channel. Please note this is best effort, postcopy 680 * preempt channel does not send any magic number so avoid it for 681 * postcopy live migration. Also tls live migration already does 682 * tls handshake while initializing main channel so with tls this 683 * issue is not possible. 684 */ 685 ret = migration_channel_read_peek(ioc, (void *)&channel_magic, 686 sizeof(channel_magic), &local_err); 687 688 if (ret != 0) { 689 error_propagate(errp, local_err); 690 return; 691 } 692 693 default_channel = (channel_magic == cpu_to_be32(QEMU_VM_FILE_MAGIC)); 694 } else { 695 default_channel = !mis->from_src_file; 696 } 697 698 if (multifd_load_setup(errp) != 0) { 699 error_setg(errp, "Failed to setup multifd channels"); 700 return; 701 } 702 703 if (default_channel) { 704 f = qemu_file_new_input(ioc); 705 706 if (!migration_incoming_setup(f, errp)) { 707 return; 708 } 709 } else { 710 /* Multiple connections */ 711 assert(migration_needs_multiple_sockets()); 712 if (migrate_multifd()) { 713 multifd_recv_new_channel(ioc, &local_err); 714 } else { 715 assert(migrate_postcopy_preempt()); 716 f = qemu_file_new_input(ioc); 717 postcopy_preempt_new_channel(mis, f); 718 } 719 if (local_err) { 720 error_propagate(errp, local_err); 721 return; 722 } 723 } 724 725 if (migration_should_start_incoming(default_channel)) { 726 /* If it's a recovery, we're done */ 727 if (postcopy_try_recover()) { 728 return; 729 } 730 migration_incoming_process(); 731 } 732 } 733 734 /** 735 * @migration_has_all_channels: We have received all channels that we need 736 * 737 * Returns true when we have got connections to all the channels that 738 * we need for migration. 739 */ 740 bool migration_has_all_channels(void) 741 { 742 MigrationIncomingState *mis = migration_incoming_get_current(); 743 744 if (!mis->from_src_file) { 745 return false; 746 } 747 748 if (migrate_multifd()) { 749 return multifd_recv_all_channels_created(); 750 } 751 752 if (migrate_postcopy_preempt()) { 753 return mis->postcopy_qemufile_dst != NULL; 754 } 755 756 return true; 757 } 758 759 /* 760 * Send a 'SHUT' message on the return channel with the given value 761 * to indicate that we've finished with the RP. Non-0 value indicates 762 * error. 763 */ 764 void migrate_send_rp_shut(MigrationIncomingState *mis, 765 uint32_t value) 766 { 767 uint32_t buf; 768 769 buf = cpu_to_be32(value); 770 migrate_send_rp_message(mis, MIG_RP_MSG_SHUT, sizeof(buf), &buf); 771 } 772 773 /* 774 * Send a 'PONG' message on the return channel with the given value 775 * (normally in response to a 'PING') 776 */ 777 void migrate_send_rp_pong(MigrationIncomingState *mis, 778 uint32_t value) 779 { 780 uint32_t buf; 781 782 buf = cpu_to_be32(value); 783 migrate_send_rp_message(mis, MIG_RP_MSG_PONG, sizeof(buf), &buf); 784 } 785 786 void migrate_send_rp_recv_bitmap(MigrationIncomingState *mis, 787 char *block_name) 788 { 789 char buf[512]; 790 int len; 791 int64_t res; 792 793 /* 794 * First, we send the header part. It contains only the len of 795 * idstr, and the idstr itself. 796 */ 797 len = strlen(block_name); 798 buf[0] = len; 799 memcpy(buf + 1, block_name, len); 800 801 if (mis->state != MIGRATION_STATUS_POSTCOPY_RECOVER) { 802 error_report("%s: MSG_RP_RECV_BITMAP only used for recovery", 803 __func__); 804 return; 805 } 806 807 migrate_send_rp_message(mis, MIG_RP_MSG_RECV_BITMAP, len + 1, buf); 808 809 /* 810 * Next, we dump the received bitmap to the stream. 811 * 812 * TODO: currently we are safe since we are the only one that is 813 * using the to_src_file handle (fault thread is still paused), 814 * and it's ok even not taking the mutex. However the best way is 815 * to take the lock before sending the message header, and release 816 * the lock after sending the bitmap. 817 */ 818 qemu_mutex_lock(&mis->rp_mutex); 819 res = ramblock_recv_bitmap_send(mis->to_src_file, block_name); 820 qemu_mutex_unlock(&mis->rp_mutex); 821 822 trace_migrate_send_rp_recv_bitmap(block_name, res); 823 } 824 825 void migrate_send_rp_resume_ack(MigrationIncomingState *mis, uint32_t value) 826 { 827 uint32_t buf; 828 829 buf = cpu_to_be32(value); 830 migrate_send_rp_message(mis, MIG_RP_MSG_RESUME_ACK, sizeof(buf), &buf); 831 } 832 833 /* 834 * Return true if we're already in the middle of a migration 835 * (i.e. any of the active or setup states) 836 */ 837 bool migration_is_setup_or_active(int state) 838 { 839 switch (state) { 840 case MIGRATION_STATUS_ACTIVE: 841 case MIGRATION_STATUS_POSTCOPY_ACTIVE: 842 case MIGRATION_STATUS_POSTCOPY_PAUSED: 843 case MIGRATION_STATUS_POSTCOPY_RECOVER: 844 case MIGRATION_STATUS_SETUP: 845 case MIGRATION_STATUS_PRE_SWITCHOVER: 846 case MIGRATION_STATUS_DEVICE: 847 case MIGRATION_STATUS_WAIT_UNPLUG: 848 case MIGRATION_STATUS_COLO: 849 return true; 850 851 default: 852 return false; 853 854 } 855 } 856 857 bool migration_is_running(int state) 858 { 859 switch (state) { 860 case MIGRATION_STATUS_ACTIVE: 861 case MIGRATION_STATUS_POSTCOPY_ACTIVE: 862 case MIGRATION_STATUS_POSTCOPY_PAUSED: 863 case MIGRATION_STATUS_POSTCOPY_RECOVER: 864 case MIGRATION_STATUS_SETUP: 865 case MIGRATION_STATUS_PRE_SWITCHOVER: 866 case MIGRATION_STATUS_DEVICE: 867 case MIGRATION_STATUS_WAIT_UNPLUG: 868 case MIGRATION_STATUS_CANCELLING: 869 return true; 870 871 default: 872 return false; 873 874 } 875 } 876 877 static bool migrate_show_downtime(MigrationState *s) 878 { 879 return (s->state == MIGRATION_STATUS_COMPLETED) || migration_in_postcopy(); 880 } 881 882 static void populate_time_info(MigrationInfo *info, MigrationState *s) 883 { 884 info->has_status = true; 885 info->has_setup_time = true; 886 info->setup_time = s->setup_time; 887 888 if (s->state == MIGRATION_STATUS_COMPLETED) { 889 info->has_total_time = true; 890 info->total_time = s->total_time; 891 } else { 892 info->has_total_time = true; 893 info->total_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME) - 894 s->start_time; 895 } 896 897 if (migrate_show_downtime(s)) { 898 info->has_downtime = true; 899 info->downtime = s->downtime; 900 } else { 901 info->has_expected_downtime = true; 902 info->expected_downtime = s->expected_downtime; 903 } 904 } 905 906 static void populate_ram_info(MigrationInfo *info, MigrationState *s) 907 { 908 size_t page_size = qemu_target_page_size(); 909 910 info->ram = g_malloc0(sizeof(*info->ram)); 911 info->ram->transferred = stat64_get(&ram_counters.transferred); 912 info->ram->total = ram_bytes_total(); 913 info->ram->duplicate = stat64_get(&ram_counters.zero_pages); 914 /* legacy value. It is not used anymore */ 915 info->ram->skipped = 0; 916 info->ram->normal = stat64_get(&ram_counters.normal_pages); 917 info->ram->normal_bytes = info->ram->normal * page_size; 918 info->ram->mbps = s->mbps; 919 info->ram->dirty_sync_count = 920 stat64_get(&ram_counters.dirty_sync_count); 921 info->ram->dirty_sync_missed_zero_copy = 922 stat64_get(&ram_counters.dirty_sync_missed_zero_copy); 923 info->ram->postcopy_requests = 924 stat64_get(&ram_counters.postcopy_requests); 925 info->ram->page_size = page_size; 926 info->ram->multifd_bytes = stat64_get(&ram_counters.multifd_bytes); 927 info->ram->pages_per_second = s->pages_per_second; 928 info->ram->precopy_bytes = stat64_get(&ram_counters.precopy_bytes); 929 info->ram->downtime_bytes = stat64_get(&ram_counters.downtime_bytes); 930 info->ram->postcopy_bytes = stat64_get(&ram_counters.postcopy_bytes); 931 932 if (migrate_xbzrle()) { 933 info->xbzrle_cache = g_malloc0(sizeof(*info->xbzrle_cache)); 934 info->xbzrle_cache->cache_size = migrate_xbzrle_cache_size(); 935 info->xbzrle_cache->bytes = xbzrle_counters.bytes; 936 info->xbzrle_cache->pages = xbzrle_counters.pages; 937 info->xbzrle_cache->cache_miss = xbzrle_counters.cache_miss; 938 info->xbzrle_cache->cache_miss_rate = xbzrle_counters.cache_miss_rate; 939 info->xbzrle_cache->encoding_rate = xbzrle_counters.encoding_rate; 940 info->xbzrle_cache->overflow = xbzrle_counters.overflow; 941 } 942 943 if (migrate_compress()) { 944 info->compression = g_malloc0(sizeof(*info->compression)); 945 info->compression->pages = compression_counters.pages; 946 info->compression->busy = compression_counters.busy; 947 info->compression->busy_rate = compression_counters.busy_rate; 948 info->compression->compressed_size = 949 compression_counters.compressed_size; 950 info->compression->compression_rate = 951 compression_counters.compression_rate; 952 } 953 954 if (cpu_throttle_active()) { 955 info->has_cpu_throttle_percentage = true; 956 info->cpu_throttle_percentage = cpu_throttle_get_percentage(); 957 } 958 959 if (s->state != MIGRATION_STATUS_COMPLETED) { 960 info->ram->remaining = ram_bytes_remaining(); 961 info->ram->dirty_pages_rate = 962 stat64_get(&ram_counters.dirty_pages_rate); 963 } 964 } 965 966 static void populate_disk_info(MigrationInfo *info) 967 { 968 if (blk_mig_active()) { 969 info->disk = g_malloc0(sizeof(*info->disk)); 970 info->disk->transferred = blk_mig_bytes_transferred(); 971 info->disk->remaining = blk_mig_bytes_remaining(); 972 info->disk->total = blk_mig_bytes_total(); 973 } 974 } 975 976 static void fill_source_migration_info(MigrationInfo *info) 977 { 978 MigrationState *s = migrate_get_current(); 979 int state = qatomic_read(&s->state); 980 GSList *cur_blocker = migration_blockers; 981 982 info->blocked_reasons = NULL; 983 984 /* 985 * There are two types of reasons a migration might be blocked; 986 * a) devices marked in VMState as non-migratable, and 987 * b) Explicit migration blockers 988 * We need to add both of them here. 989 */ 990 qemu_savevm_non_migratable_list(&info->blocked_reasons); 991 992 while (cur_blocker) { 993 QAPI_LIST_PREPEND(info->blocked_reasons, 994 g_strdup(error_get_pretty(cur_blocker->data))); 995 cur_blocker = g_slist_next(cur_blocker); 996 } 997 info->has_blocked_reasons = info->blocked_reasons != NULL; 998 999 switch (state) { 1000 case MIGRATION_STATUS_NONE: 1001 /* no migration has happened ever */ 1002 /* do not overwrite destination migration status */ 1003 return; 1004 case MIGRATION_STATUS_SETUP: 1005 info->has_status = true; 1006 info->has_total_time = false; 1007 break; 1008 case MIGRATION_STATUS_ACTIVE: 1009 case MIGRATION_STATUS_CANCELLING: 1010 case MIGRATION_STATUS_POSTCOPY_ACTIVE: 1011 case MIGRATION_STATUS_PRE_SWITCHOVER: 1012 case MIGRATION_STATUS_DEVICE: 1013 case MIGRATION_STATUS_POSTCOPY_PAUSED: 1014 case MIGRATION_STATUS_POSTCOPY_RECOVER: 1015 /* TODO add some postcopy stats */ 1016 populate_time_info(info, s); 1017 populate_ram_info(info, s); 1018 populate_disk_info(info); 1019 populate_vfio_info(info); 1020 break; 1021 case MIGRATION_STATUS_COLO: 1022 info->has_status = true; 1023 /* TODO: display COLO specific information (checkpoint info etc.) */ 1024 break; 1025 case MIGRATION_STATUS_COMPLETED: 1026 populate_time_info(info, s); 1027 populate_ram_info(info, s); 1028 populate_vfio_info(info); 1029 break; 1030 case MIGRATION_STATUS_FAILED: 1031 info->has_status = true; 1032 if (s->error) { 1033 info->error_desc = g_strdup(error_get_pretty(s->error)); 1034 } 1035 break; 1036 case MIGRATION_STATUS_CANCELLED: 1037 info->has_status = true; 1038 break; 1039 case MIGRATION_STATUS_WAIT_UNPLUG: 1040 info->has_status = true; 1041 break; 1042 } 1043 info->status = state; 1044 } 1045 1046 static void fill_destination_migration_info(MigrationInfo *info) 1047 { 1048 MigrationIncomingState *mis = migration_incoming_get_current(); 1049 1050 if (mis->socket_address_list) { 1051 info->has_socket_address = true; 1052 info->socket_address = 1053 QAPI_CLONE(SocketAddressList, mis->socket_address_list); 1054 } 1055 1056 switch (mis->state) { 1057 case MIGRATION_STATUS_NONE: 1058 return; 1059 case MIGRATION_STATUS_SETUP: 1060 case MIGRATION_STATUS_CANCELLING: 1061 case MIGRATION_STATUS_CANCELLED: 1062 case MIGRATION_STATUS_ACTIVE: 1063 case MIGRATION_STATUS_POSTCOPY_ACTIVE: 1064 case MIGRATION_STATUS_POSTCOPY_PAUSED: 1065 case MIGRATION_STATUS_POSTCOPY_RECOVER: 1066 case MIGRATION_STATUS_FAILED: 1067 case MIGRATION_STATUS_COLO: 1068 info->has_status = true; 1069 break; 1070 case MIGRATION_STATUS_COMPLETED: 1071 info->has_status = true; 1072 fill_destination_postcopy_migration_info(info); 1073 break; 1074 } 1075 info->status = mis->state; 1076 } 1077 1078 MigrationInfo *qmp_query_migrate(Error **errp) 1079 { 1080 MigrationInfo *info = g_malloc0(sizeof(*info)); 1081 1082 fill_destination_migration_info(info); 1083 fill_source_migration_info(info); 1084 1085 return info; 1086 } 1087 1088 void qmp_migrate_start_postcopy(Error **errp) 1089 { 1090 MigrationState *s = migrate_get_current(); 1091 1092 if (!migrate_postcopy()) { 1093 error_setg(errp, "Enable postcopy with migrate_set_capability before" 1094 " the start of migration"); 1095 return; 1096 } 1097 1098 if (s->state == MIGRATION_STATUS_NONE) { 1099 error_setg(errp, "Postcopy must be started after migration has been" 1100 " started"); 1101 return; 1102 } 1103 /* 1104 * we don't error if migration has finished since that would be racy 1105 * with issuing this command. 1106 */ 1107 qatomic_set(&s->start_postcopy, true); 1108 } 1109 1110 /* shared migration helpers */ 1111 1112 void migrate_set_state(int *state, int old_state, int new_state) 1113 { 1114 assert(new_state < MIGRATION_STATUS__MAX); 1115 if (qatomic_cmpxchg(state, old_state, new_state) == old_state) { 1116 trace_migrate_set_state(MigrationStatus_str(new_state)); 1117 migrate_generate_event(new_state); 1118 } 1119 } 1120 1121 static void migrate_fd_cleanup(MigrationState *s) 1122 { 1123 qemu_bh_delete(s->cleanup_bh); 1124 s->cleanup_bh = NULL; 1125 1126 g_free(s->hostname); 1127 s->hostname = NULL; 1128 json_writer_free(s->vmdesc); 1129 s->vmdesc = NULL; 1130 1131 qemu_savevm_state_cleanup(); 1132 1133 if (s->to_dst_file) { 1134 QEMUFile *tmp; 1135 1136 trace_migrate_fd_cleanup(); 1137 qemu_mutex_unlock_iothread(); 1138 if (s->migration_thread_running) { 1139 qemu_thread_join(&s->thread); 1140 s->migration_thread_running = false; 1141 } 1142 qemu_mutex_lock_iothread(); 1143 1144 multifd_save_cleanup(); 1145 qemu_mutex_lock(&s->qemu_file_lock); 1146 tmp = s->to_dst_file; 1147 s->to_dst_file = NULL; 1148 qemu_mutex_unlock(&s->qemu_file_lock); 1149 /* 1150 * Close the file handle without the lock to make sure the 1151 * critical section won't block for long. 1152 */ 1153 migration_ioc_unregister_yank_from_file(tmp); 1154 qemu_fclose(tmp); 1155 } 1156 1157 if (s->postcopy_qemufile_src) { 1158 migration_ioc_unregister_yank_from_file(s->postcopy_qemufile_src); 1159 qemu_fclose(s->postcopy_qemufile_src); 1160 s->postcopy_qemufile_src = NULL; 1161 } 1162 1163 assert(!migration_is_active(s)); 1164 1165 if (s->state == MIGRATION_STATUS_CANCELLING) { 1166 migrate_set_state(&s->state, MIGRATION_STATUS_CANCELLING, 1167 MIGRATION_STATUS_CANCELLED); 1168 } 1169 1170 if (s->error) { 1171 /* It is used on info migrate. We can't free it */ 1172 error_report_err(error_copy(s->error)); 1173 } 1174 notifier_list_notify(&migration_state_notifiers, s); 1175 block_cleanup_parameters(); 1176 yank_unregister_instance(MIGRATION_YANK_INSTANCE); 1177 } 1178 1179 static void migrate_fd_cleanup_schedule(MigrationState *s) 1180 { 1181 /* 1182 * Ref the state for bh, because it may be called when 1183 * there're already no other refs 1184 */ 1185 object_ref(OBJECT(s)); 1186 qemu_bh_schedule(s->cleanup_bh); 1187 } 1188 1189 static void migrate_fd_cleanup_bh(void *opaque) 1190 { 1191 MigrationState *s = opaque; 1192 migrate_fd_cleanup(s); 1193 object_unref(OBJECT(s)); 1194 } 1195 1196 void migrate_set_error(MigrationState *s, const Error *error) 1197 { 1198 QEMU_LOCK_GUARD(&s->error_mutex); 1199 if (!s->error) { 1200 s->error = error_copy(error); 1201 } 1202 } 1203 1204 static void migrate_error_free(MigrationState *s) 1205 { 1206 QEMU_LOCK_GUARD(&s->error_mutex); 1207 if (s->error) { 1208 error_free(s->error); 1209 s->error = NULL; 1210 } 1211 } 1212 1213 void migrate_fd_error(MigrationState *s, const Error *error) 1214 { 1215 trace_migrate_fd_error(error_get_pretty(error)); 1216 assert(s->to_dst_file == NULL); 1217 migrate_set_state(&s->state, MIGRATION_STATUS_SETUP, 1218 MIGRATION_STATUS_FAILED); 1219 migrate_set_error(s, error); 1220 } 1221 1222 static void migrate_fd_cancel(MigrationState *s) 1223 { 1224 int old_state ; 1225 QEMUFile *f = migrate_get_current()->to_dst_file; 1226 trace_migrate_fd_cancel(); 1227 1228 WITH_QEMU_LOCK_GUARD(&s->qemu_file_lock) { 1229 if (s->rp_state.from_dst_file) { 1230 /* shutdown the rp socket, so causing the rp thread to shutdown */ 1231 qemu_file_shutdown(s->rp_state.from_dst_file); 1232 } 1233 } 1234 1235 do { 1236 old_state = s->state; 1237 if (!migration_is_running(old_state)) { 1238 break; 1239 } 1240 /* If the migration is paused, kick it out of the pause */ 1241 if (old_state == MIGRATION_STATUS_PRE_SWITCHOVER) { 1242 qemu_sem_post(&s->pause_sem); 1243 } 1244 migrate_set_state(&s->state, old_state, MIGRATION_STATUS_CANCELLING); 1245 } while (s->state != MIGRATION_STATUS_CANCELLING); 1246 1247 /* 1248 * If we're unlucky the migration code might be stuck somewhere in a 1249 * send/write while the network has failed and is waiting to timeout; 1250 * if we've got shutdown(2) available then we can force it to quit. 1251 * The outgoing qemu file gets closed in migrate_fd_cleanup that is 1252 * called in a bh, so there is no race against this cancel. 1253 */ 1254 if (s->state == MIGRATION_STATUS_CANCELLING && f) { 1255 qemu_file_shutdown(f); 1256 } 1257 if (s->state == MIGRATION_STATUS_CANCELLING && s->block_inactive) { 1258 Error *local_err = NULL; 1259 1260 bdrv_activate_all(&local_err); 1261 if (local_err) { 1262 error_report_err(local_err); 1263 } else { 1264 s->block_inactive = false; 1265 } 1266 } 1267 } 1268 1269 void add_migration_state_change_notifier(Notifier *notify) 1270 { 1271 notifier_list_add(&migration_state_notifiers, notify); 1272 } 1273 1274 void remove_migration_state_change_notifier(Notifier *notify) 1275 { 1276 notifier_remove(notify); 1277 } 1278 1279 bool migration_in_setup(MigrationState *s) 1280 { 1281 return s->state == MIGRATION_STATUS_SETUP; 1282 } 1283 1284 bool migration_has_finished(MigrationState *s) 1285 { 1286 return s->state == MIGRATION_STATUS_COMPLETED; 1287 } 1288 1289 bool migration_has_failed(MigrationState *s) 1290 { 1291 return (s->state == MIGRATION_STATUS_CANCELLED || 1292 s->state == MIGRATION_STATUS_FAILED); 1293 } 1294 1295 bool migration_in_postcopy(void) 1296 { 1297 MigrationState *s = migrate_get_current(); 1298 1299 switch (s->state) { 1300 case MIGRATION_STATUS_POSTCOPY_ACTIVE: 1301 case MIGRATION_STATUS_POSTCOPY_PAUSED: 1302 case MIGRATION_STATUS_POSTCOPY_RECOVER: 1303 return true; 1304 default: 1305 return false; 1306 } 1307 } 1308 1309 bool migration_in_postcopy_after_devices(MigrationState *s) 1310 { 1311 return migration_in_postcopy() && s->postcopy_after_devices; 1312 } 1313 1314 bool migration_in_incoming_postcopy(void) 1315 { 1316 PostcopyState ps = postcopy_state_get(); 1317 1318 return ps >= POSTCOPY_INCOMING_DISCARD && ps < POSTCOPY_INCOMING_END; 1319 } 1320 1321 bool migration_incoming_postcopy_advised(void) 1322 { 1323 PostcopyState ps = postcopy_state_get(); 1324 1325 return ps >= POSTCOPY_INCOMING_ADVISE && ps < POSTCOPY_INCOMING_END; 1326 } 1327 1328 bool migration_in_bg_snapshot(void) 1329 { 1330 MigrationState *s = migrate_get_current(); 1331 1332 return migrate_background_snapshot() && 1333 migration_is_setup_or_active(s->state); 1334 } 1335 1336 bool migration_is_idle(void) 1337 { 1338 MigrationState *s = current_migration; 1339 1340 if (!s) { 1341 return true; 1342 } 1343 1344 switch (s->state) { 1345 case MIGRATION_STATUS_NONE: 1346 case MIGRATION_STATUS_CANCELLED: 1347 case MIGRATION_STATUS_COMPLETED: 1348 case MIGRATION_STATUS_FAILED: 1349 return true; 1350 case MIGRATION_STATUS_SETUP: 1351 case MIGRATION_STATUS_CANCELLING: 1352 case MIGRATION_STATUS_ACTIVE: 1353 case MIGRATION_STATUS_POSTCOPY_ACTIVE: 1354 case MIGRATION_STATUS_COLO: 1355 case MIGRATION_STATUS_PRE_SWITCHOVER: 1356 case MIGRATION_STATUS_DEVICE: 1357 case MIGRATION_STATUS_WAIT_UNPLUG: 1358 return false; 1359 case MIGRATION_STATUS__MAX: 1360 g_assert_not_reached(); 1361 } 1362 1363 return false; 1364 } 1365 1366 bool migration_is_active(MigrationState *s) 1367 { 1368 return (s->state == MIGRATION_STATUS_ACTIVE || 1369 s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE); 1370 } 1371 1372 void migrate_init(MigrationState *s) 1373 { 1374 /* 1375 * Reinitialise all migration state, except 1376 * parameters/capabilities that the user set, and 1377 * locks. 1378 */ 1379 s->cleanup_bh = 0; 1380 s->vm_start_bh = 0; 1381 s->to_dst_file = NULL; 1382 s->state = MIGRATION_STATUS_NONE; 1383 s->rp_state.from_dst_file = NULL; 1384 s->rp_state.error = false; 1385 s->mbps = 0.0; 1386 s->pages_per_second = 0.0; 1387 s->downtime = 0; 1388 s->expected_downtime = 0; 1389 s->setup_time = 0; 1390 s->start_postcopy = false; 1391 s->postcopy_after_devices = false; 1392 s->migration_thread_running = false; 1393 error_free(s->error); 1394 s->error = NULL; 1395 s->hostname = NULL; 1396 1397 migrate_set_state(&s->state, MIGRATION_STATUS_NONE, MIGRATION_STATUS_SETUP); 1398 1399 s->start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); 1400 s->total_time = 0; 1401 s->vm_was_running = false; 1402 s->iteration_initial_bytes = 0; 1403 s->threshold_size = 0; 1404 } 1405 1406 int migrate_add_blocker_internal(Error *reason, Error **errp) 1407 { 1408 /* Snapshots are similar to migrations, so check RUN_STATE_SAVE_VM too. */ 1409 if (runstate_check(RUN_STATE_SAVE_VM) || !migration_is_idle()) { 1410 error_propagate_prepend(errp, error_copy(reason), 1411 "disallowing migration blocker " 1412 "(migration/snapshot in progress) for: "); 1413 return -EBUSY; 1414 } 1415 1416 migration_blockers = g_slist_prepend(migration_blockers, reason); 1417 return 0; 1418 } 1419 1420 int migrate_add_blocker(Error *reason, Error **errp) 1421 { 1422 if (only_migratable) { 1423 error_propagate_prepend(errp, error_copy(reason), 1424 "disallowing migration blocker " 1425 "(--only-migratable) for: "); 1426 return -EACCES; 1427 } 1428 1429 return migrate_add_blocker_internal(reason, errp); 1430 } 1431 1432 void migrate_del_blocker(Error *reason) 1433 { 1434 migration_blockers = g_slist_remove(migration_blockers, reason); 1435 } 1436 1437 void qmp_migrate_incoming(const char *uri, Error **errp) 1438 { 1439 Error *local_err = NULL; 1440 static bool once = true; 1441 1442 if (!once) { 1443 error_setg(errp, "The incoming migration has already been started"); 1444 return; 1445 } 1446 if (!runstate_check(RUN_STATE_INMIGRATE)) { 1447 error_setg(errp, "'-incoming' was not specified on the command line"); 1448 return; 1449 } 1450 1451 if (!yank_register_instance(MIGRATION_YANK_INSTANCE, errp)) { 1452 return; 1453 } 1454 1455 qemu_start_incoming_migration(uri, &local_err); 1456 1457 if (local_err) { 1458 yank_unregister_instance(MIGRATION_YANK_INSTANCE); 1459 error_propagate(errp, local_err); 1460 return; 1461 } 1462 1463 once = false; 1464 } 1465 1466 void qmp_migrate_recover(const char *uri, Error **errp) 1467 { 1468 MigrationIncomingState *mis = migration_incoming_get_current(); 1469 1470 /* 1471 * Don't even bother to use ERRP_GUARD() as it _must_ always be set by 1472 * callers (no one should ignore a recover failure); if there is, it's a 1473 * programming error. 1474 */ 1475 assert(errp); 1476 1477 if (mis->state != MIGRATION_STATUS_POSTCOPY_PAUSED) { 1478 error_setg(errp, "Migrate recover can only be run " 1479 "when postcopy is paused."); 1480 return; 1481 } 1482 1483 /* If there's an existing transport, release it */ 1484 migration_incoming_transport_cleanup(mis); 1485 1486 /* 1487 * Note that this call will never start a real migration; it will 1488 * only re-setup the migration stream and poke existing migration 1489 * to continue using that newly established channel. 1490 */ 1491 qemu_start_incoming_migration(uri, errp); 1492 } 1493 1494 void qmp_migrate_pause(Error **errp) 1495 { 1496 MigrationState *ms = migrate_get_current(); 1497 MigrationIncomingState *mis = migration_incoming_get_current(); 1498 int ret; 1499 1500 if (ms->state == MIGRATION_STATUS_POSTCOPY_ACTIVE) { 1501 /* Source side, during postcopy */ 1502 qemu_mutex_lock(&ms->qemu_file_lock); 1503 ret = qemu_file_shutdown(ms->to_dst_file); 1504 qemu_mutex_unlock(&ms->qemu_file_lock); 1505 if (ret) { 1506 error_setg(errp, "Failed to pause source migration"); 1507 } 1508 return; 1509 } 1510 1511 if (mis->state == MIGRATION_STATUS_POSTCOPY_ACTIVE) { 1512 ret = qemu_file_shutdown(mis->from_src_file); 1513 if (ret) { 1514 error_setg(errp, "Failed to pause destination migration"); 1515 } 1516 return; 1517 } 1518 1519 error_setg(errp, "migrate-pause is currently only supported " 1520 "during postcopy-active state"); 1521 } 1522 1523 bool migration_is_blocked(Error **errp) 1524 { 1525 if (qemu_savevm_state_blocked(errp)) { 1526 return true; 1527 } 1528 1529 if (migration_blockers) { 1530 error_propagate(errp, error_copy(migration_blockers->data)); 1531 return true; 1532 } 1533 1534 return false; 1535 } 1536 1537 /* Returns true if continue to migrate, or false if error detected */ 1538 static bool migrate_prepare(MigrationState *s, bool blk, bool blk_inc, 1539 bool resume, Error **errp) 1540 { 1541 Error *local_err = NULL; 1542 1543 if (resume) { 1544 if (s->state != MIGRATION_STATUS_POSTCOPY_PAUSED) { 1545 error_setg(errp, "Cannot resume if there is no " 1546 "paused migration"); 1547 return false; 1548 } 1549 1550 /* 1551 * Postcopy recovery won't work well with release-ram 1552 * capability since release-ram will drop the page buffer as 1553 * long as the page is put into the send buffer. So if there 1554 * is a network failure happened, any page buffers that have 1555 * not yet reached the destination VM but have already been 1556 * sent from the source VM will be lost forever. Let's refuse 1557 * the client from resuming such a postcopy migration. 1558 * Luckily release-ram was designed to only be used when src 1559 * and destination VMs are on the same host, so it should be 1560 * fine. 1561 */ 1562 if (migrate_release_ram()) { 1563 error_setg(errp, "Postcopy recovery cannot work " 1564 "when release-ram capability is set"); 1565 return false; 1566 } 1567 1568 /* This is a resume, skip init status */ 1569 return true; 1570 } 1571 1572 if (migration_is_running(s->state)) { 1573 error_setg(errp, QERR_MIGRATION_ACTIVE); 1574 return false; 1575 } 1576 1577 if (runstate_check(RUN_STATE_INMIGRATE)) { 1578 error_setg(errp, "Guest is waiting for an incoming migration"); 1579 return false; 1580 } 1581 1582 if (runstate_check(RUN_STATE_POSTMIGRATE)) { 1583 error_setg(errp, "Can't migrate the vm that was paused due to " 1584 "previous migration"); 1585 return false; 1586 } 1587 1588 if (migration_is_blocked(errp)) { 1589 return false; 1590 } 1591 1592 if (blk || blk_inc) { 1593 if (migrate_colo()) { 1594 error_setg(errp, "No disk migration is required in COLO mode"); 1595 return false; 1596 } 1597 if (migrate_block() || migrate_block_incremental()) { 1598 error_setg(errp, "Command options are incompatible with " 1599 "current migration capabilities"); 1600 return false; 1601 } 1602 if (!migrate_cap_set(MIGRATION_CAPABILITY_BLOCK, true, &local_err)) { 1603 error_propagate(errp, local_err); 1604 return false; 1605 } 1606 s->must_remove_block_options = true; 1607 } 1608 1609 if (blk_inc) { 1610 migrate_set_block_incremental(true); 1611 } 1612 1613 migrate_init(s); 1614 /* 1615 * set ram_counters compression_counters memory to zero for a 1616 * new migration 1617 */ 1618 memset(&ram_counters, 0, sizeof(ram_counters)); 1619 memset(&compression_counters, 0, sizeof(compression_counters)); 1620 1621 return true; 1622 } 1623 1624 void qmp_migrate(const char *uri, bool has_blk, bool blk, 1625 bool has_inc, bool inc, bool has_detach, bool detach, 1626 bool has_resume, bool resume, Error **errp) 1627 { 1628 Error *local_err = NULL; 1629 MigrationState *s = migrate_get_current(); 1630 const char *p = NULL; 1631 1632 /* URI is not suitable for migration? */ 1633 if (!migration_channels_and_uri_compatible(uri, errp)) { 1634 return; 1635 } 1636 1637 if (!migrate_prepare(s, has_blk && blk, has_inc && inc, 1638 has_resume && resume, errp)) { 1639 /* Error detected, put into errp */ 1640 return; 1641 } 1642 1643 if (!(has_resume && resume)) { 1644 if (!yank_register_instance(MIGRATION_YANK_INSTANCE, errp)) { 1645 return; 1646 } 1647 } 1648 1649 if (strstart(uri, "tcp:", &p) || 1650 strstart(uri, "unix:", NULL) || 1651 strstart(uri, "vsock:", NULL)) { 1652 socket_start_outgoing_migration(s, p ? p : uri, &local_err); 1653 #ifdef CONFIG_RDMA 1654 } else if (strstart(uri, "rdma:", &p)) { 1655 rdma_start_outgoing_migration(s, p, &local_err); 1656 #endif 1657 } else if (strstart(uri, "exec:", &p)) { 1658 exec_start_outgoing_migration(s, p, &local_err); 1659 } else if (strstart(uri, "fd:", &p)) { 1660 fd_start_outgoing_migration(s, p, &local_err); 1661 } else { 1662 if (!(has_resume && resume)) { 1663 yank_unregister_instance(MIGRATION_YANK_INSTANCE); 1664 } 1665 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "uri", 1666 "a valid migration protocol"); 1667 migrate_set_state(&s->state, MIGRATION_STATUS_SETUP, 1668 MIGRATION_STATUS_FAILED); 1669 block_cleanup_parameters(); 1670 return; 1671 } 1672 1673 if (local_err) { 1674 if (!(has_resume && resume)) { 1675 yank_unregister_instance(MIGRATION_YANK_INSTANCE); 1676 } 1677 migrate_fd_error(s, local_err); 1678 error_propagate(errp, local_err); 1679 return; 1680 } 1681 } 1682 1683 void qmp_migrate_cancel(Error **errp) 1684 { 1685 migration_cancel(NULL); 1686 } 1687 1688 void qmp_migrate_continue(MigrationStatus state, Error **errp) 1689 { 1690 MigrationState *s = migrate_get_current(); 1691 if (s->state != state) { 1692 error_setg(errp, "Migration not in expected state: %s", 1693 MigrationStatus_str(s->state)); 1694 return; 1695 } 1696 qemu_sem_post(&s->pause_sem); 1697 } 1698 1699 /* migration thread support */ 1700 /* 1701 * Something bad happened to the RP stream, mark an error 1702 * The caller shall print or trace something to indicate why 1703 */ 1704 static void mark_source_rp_bad(MigrationState *s) 1705 { 1706 s->rp_state.error = true; 1707 } 1708 1709 static struct rp_cmd_args { 1710 ssize_t len; /* -1 = variable */ 1711 const char *name; 1712 } rp_cmd_args[] = { 1713 [MIG_RP_MSG_INVALID] = { .len = -1, .name = "INVALID" }, 1714 [MIG_RP_MSG_SHUT] = { .len = 4, .name = "SHUT" }, 1715 [MIG_RP_MSG_PONG] = { .len = 4, .name = "PONG" }, 1716 [MIG_RP_MSG_REQ_PAGES] = { .len = 12, .name = "REQ_PAGES" }, 1717 [MIG_RP_MSG_REQ_PAGES_ID] = { .len = -1, .name = "REQ_PAGES_ID" }, 1718 [MIG_RP_MSG_RECV_BITMAP] = { .len = -1, .name = "RECV_BITMAP" }, 1719 [MIG_RP_MSG_RESUME_ACK] = { .len = 4, .name = "RESUME_ACK" }, 1720 [MIG_RP_MSG_MAX] = { .len = -1, .name = "MAX" }, 1721 }; 1722 1723 /* 1724 * Process a request for pages received on the return path, 1725 * We're allowed to send more than requested (e.g. to round to our page size) 1726 * and we don't need to send pages that have already been sent. 1727 */ 1728 static void migrate_handle_rp_req_pages(MigrationState *ms, const char* rbname, 1729 ram_addr_t start, size_t len) 1730 { 1731 long our_host_ps = qemu_real_host_page_size(); 1732 1733 trace_migrate_handle_rp_req_pages(rbname, start, len); 1734 1735 /* 1736 * Since we currently insist on matching page sizes, just sanity check 1737 * we're being asked for whole host pages. 1738 */ 1739 if (!QEMU_IS_ALIGNED(start, our_host_ps) || 1740 !QEMU_IS_ALIGNED(len, our_host_ps)) { 1741 error_report("%s: Misaligned page request, start: " RAM_ADDR_FMT 1742 " len: %zd", __func__, start, len); 1743 mark_source_rp_bad(ms); 1744 return; 1745 } 1746 1747 if (ram_save_queue_pages(rbname, start, len)) { 1748 mark_source_rp_bad(ms); 1749 } 1750 } 1751 1752 /* Return true to retry, false to quit */ 1753 static bool postcopy_pause_return_path_thread(MigrationState *s) 1754 { 1755 trace_postcopy_pause_return_path(); 1756 1757 qemu_sem_wait(&s->postcopy_pause_rp_sem); 1758 1759 trace_postcopy_pause_return_path_continued(); 1760 1761 return true; 1762 } 1763 1764 static int migrate_handle_rp_recv_bitmap(MigrationState *s, char *block_name) 1765 { 1766 RAMBlock *block = qemu_ram_block_by_name(block_name); 1767 1768 if (!block) { 1769 error_report("%s: invalid block name '%s'", __func__, block_name); 1770 return -EINVAL; 1771 } 1772 1773 /* Fetch the received bitmap and refresh the dirty bitmap */ 1774 return ram_dirty_bitmap_reload(s, block); 1775 } 1776 1777 static int migrate_handle_rp_resume_ack(MigrationState *s, uint32_t value) 1778 { 1779 trace_source_return_path_thread_resume_ack(value); 1780 1781 if (value != MIGRATION_RESUME_ACK_VALUE) { 1782 error_report("%s: illegal resume_ack value %"PRIu32, 1783 __func__, value); 1784 return -1; 1785 } 1786 1787 /* Now both sides are active. */ 1788 migrate_set_state(&s->state, MIGRATION_STATUS_POSTCOPY_RECOVER, 1789 MIGRATION_STATUS_POSTCOPY_ACTIVE); 1790 1791 /* Notify send thread that time to continue send pages */ 1792 qemu_sem_post(&s->rp_state.rp_sem); 1793 1794 return 0; 1795 } 1796 1797 /* 1798 * Release ms->rp_state.from_dst_file (and postcopy_qemufile_src if 1799 * existed) in a safe way. 1800 */ 1801 static void migration_release_dst_files(MigrationState *ms) 1802 { 1803 QEMUFile *file; 1804 1805 WITH_QEMU_LOCK_GUARD(&ms->qemu_file_lock) { 1806 /* 1807 * Reset the from_dst_file pointer first before releasing it, as we 1808 * can't block within lock section 1809 */ 1810 file = ms->rp_state.from_dst_file; 1811 ms->rp_state.from_dst_file = NULL; 1812 } 1813 1814 /* 1815 * Do the same to postcopy fast path socket too if there is. No 1816 * locking needed because this qemufile should only be managed by 1817 * return path thread. 1818 */ 1819 if (ms->postcopy_qemufile_src) { 1820 migration_ioc_unregister_yank_from_file(ms->postcopy_qemufile_src); 1821 qemu_file_shutdown(ms->postcopy_qemufile_src); 1822 qemu_fclose(ms->postcopy_qemufile_src); 1823 ms->postcopy_qemufile_src = NULL; 1824 } 1825 1826 qemu_fclose(file); 1827 } 1828 1829 /* 1830 * Handles messages sent on the return path towards the source VM 1831 * 1832 */ 1833 static void *source_return_path_thread(void *opaque) 1834 { 1835 MigrationState *ms = opaque; 1836 QEMUFile *rp = ms->rp_state.from_dst_file; 1837 uint16_t header_len, header_type; 1838 uint8_t buf[512]; 1839 uint32_t tmp32, sibling_error; 1840 ram_addr_t start = 0; /* =0 to silence warning */ 1841 size_t len = 0, expected_len; 1842 int res; 1843 1844 trace_source_return_path_thread_entry(); 1845 rcu_register_thread(); 1846 1847 retry: 1848 while (!ms->rp_state.error && !qemu_file_get_error(rp) && 1849 migration_is_setup_or_active(ms->state)) { 1850 trace_source_return_path_thread_loop_top(); 1851 header_type = qemu_get_be16(rp); 1852 header_len = qemu_get_be16(rp); 1853 1854 if (qemu_file_get_error(rp)) { 1855 mark_source_rp_bad(ms); 1856 goto out; 1857 } 1858 1859 if (header_type >= MIG_RP_MSG_MAX || 1860 header_type == MIG_RP_MSG_INVALID) { 1861 error_report("RP: Received invalid message 0x%04x length 0x%04x", 1862 header_type, header_len); 1863 mark_source_rp_bad(ms); 1864 goto out; 1865 } 1866 1867 if ((rp_cmd_args[header_type].len != -1 && 1868 header_len != rp_cmd_args[header_type].len) || 1869 header_len > sizeof(buf)) { 1870 error_report("RP: Received '%s' message (0x%04x) with" 1871 "incorrect length %d expecting %zu", 1872 rp_cmd_args[header_type].name, header_type, header_len, 1873 (size_t)rp_cmd_args[header_type].len); 1874 mark_source_rp_bad(ms); 1875 goto out; 1876 } 1877 1878 /* We know we've got a valid header by this point */ 1879 res = qemu_get_buffer(rp, buf, header_len); 1880 if (res != header_len) { 1881 error_report("RP: Failed reading data for message 0x%04x" 1882 " read %d expected %d", 1883 header_type, res, header_len); 1884 mark_source_rp_bad(ms); 1885 goto out; 1886 } 1887 1888 /* OK, we have the message and the data */ 1889 switch (header_type) { 1890 case MIG_RP_MSG_SHUT: 1891 sibling_error = ldl_be_p(buf); 1892 trace_source_return_path_thread_shut(sibling_error); 1893 if (sibling_error) { 1894 error_report("RP: Sibling indicated error %d", sibling_error); 1895 mark_source_rp_bad(ms); 1896 } 1897 /* 1898 * We'll let the main thread deal with closing the RP 1899 * we could do a shutdown(2) on it, but we're the only user 1900 * anyway, so there's nothing gained. 1901 */ 1902 goto out; 1903 1904 case MIG_RP_MSG_PONG: 1905 tmp32 = ldl_be_p(buf); 1906 trace_source_return_path_thread_pong(tmp32); 1907 qemu_sem_post(&ms->rp_state.rp_pong_acks); 1908 break; 1909 1910 case MIG_RP_MSG_REQ_PAGES: 1911 start = ldq_be_p(buf); 1912 len = ldl_be_p(buf + 8); 1913 migrate_handle_rp_req_pages(ms, NULL, start, len); 1914 break; 1915 1916 case MIG_RP_MSG_REQ_PAGES_ID: 1917 expected_len = 12 + 1; /* header + termination */ 1918 1919 if (header_len >= expected_len) { 1920 start = ldq_be_p(buf); 1921 len = ldl_be_p(buf + 8); 1922 /* Now we expect an idstr */ 1923 tmp32 = buf[12]; /* Length of the following idstr */ 1924 buf[13 + tmp32] = '\0'; 1925 expected_len += tmp32; 1926 } 1927 if (header_len != expected_len) { 1928 error_report("RP: Req_Page_id with length %d expecting %zd", 1929 header_len, expected_len); 1930 mark_source_rp_bad(ms); 1931 goto out; 1932 } 1933 migrate_handle_rp_req_pages(ms, (char *)&buf[13], start, len); 1934 break; 1935 1936 case MIG_RP_MSG_RECV_BITMAP: 1937 if (header_len < 1) { 1938 error_report("%s: missing block name", __func__); 1939 mark_source_rp_bad(ms); 1940 goto out; 1941 } 1942 /* Format: len (1B) + idstr (<255B). This ends the idstr. */ 1943 buf[buf[0] + 1] = '\0'; 1944 if (migrate_handle_rp_recv_bitmap(ms, (char *)(buf + 1))) { 1945 mark_source_rp_bad(ms); 1946 goto out; 1947 } 1948 break; 1949 1950 case MIG_RP_MSG_RESUME_ACK: 1951 tmp32 = ldl_be_p(buf); 1952 if (migrate_handle_rp_resume_ack(ms, tmp32)) { 1953 mark_source_rp_bad(ms); 1954 goto out; 1955 } 1956 break; 1957 1958 default: 1959 break; 1960 } 1961 } 1962 1963 out: 1964 res = qemu_file_get_error(rp); 1965 if (res) { 1966 if (res && migration_in_postcopy()) { 1967 /* 1968 * Maybe there is something we can do: it looks like a 1969 * network down issue, and we pause for a recovery. 1970 */ 1971 migration_release_dst_files(ms); 1972 rp = NULL; 1973 if (postcopy_pause_return_path_thread(ms)) { 1974 /* 1975 * Reload rp, reset the rest. Referencing it is safe since 1976 * it's reset only by us above, or when migration completes 1977 */ 1978 rp = ms->rp_state.from_dst_file; 1979 ms->rp_state.error = false; 1980 goto retry; 1981 } 1982 } 1983 1984 trace_source_return_path_thread_bad_end(); 1985 mark_source_rp_bad(ms); 1986 } 1987 1988 trace_source_return_path_thread_end(); 1989 migration_release_dst_files(ms); 1990 rcu_unregister_thread(); 1991 return NULL; 1992 } 1993 1994 static int open_return_path_on_source(MigrationState *ms, 1995 bool create_thread) 1996 { 1997 ms->rp_state.from_dst_file = qemu_file_get_return_path(ms->to_dst_file); 1998 if (!ms->rp_state.from_dst_file) { 1999 return -1; 2000 } 2001 2002 trace_open_return_path_on_source(); 2003 2004 if (!create_thread) { 2005 /* We're done */ 2006 return 0; 2007 } 2008 2009 qemu_thread_create(&ms->rp_state.rp_thread, "return path", 2010 source_return_path_thread, ms, QEMU_THREAD_JOINABLE); 2011 ms->rp_state.rp_thread_created = true; 2012 2013 trace_open_return_path_on_source_continue(); 2014 2015 return 0; 2016 } 2017 2018 /* Returns 0 if the RP was ok, otherwise there was an error on the RP */ 2019 static int await_return_path_close_on_source(MigrationState *ms) 2020 { 2021 /* 2022 * If this is a normal exit then the destination will send a SHUT and the 2023 * rp_thread will exit, however if there's an error we need to cause 2024 * it to exit. 2025 */ 2026 if (qemu_file_get_error(ms->to_dst_file) && ms->rp_state.from_dst_file) { 2027 /* 2028 * shutdown(2), if we have it, will cause it to unblock if it's stuck 2029 * waiting for the destination. 2030 */ 2031 qemu_file_shutdown(ms->rp_state.from_dst_file); 2032 mark_source_rp_bad(ms); 2033 } 2034 trace_await_return_path_close_on_source_joining(); 2035 qemu_thread_join(&ms->rp_state.rp_thread); 2036 ms->rp_state.rp_thread_created = false; 2037 trace_await_return_path_close_on_source_close(); 2038 return ms->rp_state.error; 2039 } 2040 2041 static inline void 2042 migration_wait_main_channel(MigrationState *ms) 2043 { 2044 /* Wait until one PONG message received */ 2045 qemu_sem_wait(&ms->rp_state.rp_pong_acks); 2046 } 2047 2048 /* 2049 * Switch from normal iteration to postcopy 2050 * Returns non-0 on error 2051 */ 2052 static int postcopy_start(MigrationState *ms) 2053 { 2054 int ret; 2055 QIOChannelBuffer *bioc; 2056 QEMUFile *fb; 2057 int64_t time_at_stop = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); 2058 int64_t bandwidth = migrate_max_postcopy_bandwidth(); 2059 bool restart_block = false; 2060 int cur_state = MIGRATION_STATUS_ACTIVE; 2061 2062 if (migrate_postcopy_preempt()) { 2063 migration_wait_main_channel(ms); 2064 if (postcopy_preempt_establish_channel(ms)) { 2065 migrate_set_state(&ms->state, ms->state, MIGRATION_STATUS_FAILED); 2066 return -1; 2067 } 2068 } 2069 2070 if (!migrate_pause_before_switchover()) { 2071 migrate_set_state(&ms->state, MIGRATION_STATUS_ACTIVE, 2072 MIGRATION_STATUS_POSTCOPY_ACTIVE); 2073 } 2074 2075 trace_postcopy_start(); 2076 qemu_mutex_lock_iothread(); 2077 trace_postcopy_start_set_run(); 2078 2079 qemu_system_wakeup_request(QEMU_WAKEUP_REASON_OTHER, NULL); 2080 global_state_store(); 2081 ret = vm_stop_force_state(RUN_STATE_FINISH_MIGRATE); 2082 if (ret < 0) { 2083 goto fail; 2084 } 2085 2086 ret = migration_maybe_pause(ms, &cur_state, 2087 MIGRATION_STATUS_POSTCOPY_ACTIVE); 2088 if (ret < 0) { 2089 goto fail; 2090 } 2091 2092 ret = bdrv_inactivate_all(); 2093 if (ret < 0) { 2094 goto fail; 2095 } 2096 restart_block = true; 2097 2098 /* 2099 * Cause any non-postcopiable, but iterative devices to 2100 * send out their final data. 2101 */ 2102 qemu_savevm_state_complete_precopy(ms->to_dst_file, true, false); 2103 2104 /* 2105 * in Finish migrate and with the io-lock held everything should 2106 * be quiet, but we've potentially still got dirty pages and we 2107 * need to tell the destination to throw any pages it's already received 2108 * that are dirty 2109 */ 2110 if (migrate_postcopy_ram()) { 2111 ram_postcopy_send_discard_bitmap(ms); 2112 } 2113 2114 /* 2115 * send rest of state - note things that are doing postcopy 2116 * will notice we're in POSTCOPY_ACTIVE and not actually 2117 * wrap their state up here 2118 */ 2119 /* 0 max-postcopy-bandwidth means unlimited */ 2120 if (!bandwidth) { 2121 qemu_file_set_rate_limit(ms->to_dst_file, INT64_MAX); 2122 } else { 2123 qemu_file_set_rate_limit(ms->to_dst_file, bandwidth / XFER_LIMIT_RATIO); 2124 } 2125 if (migrate_postcopy_ram()) { 2126 /* Ping just for debugging, helps line traces up */ 2127 qemu_savevm_send_ping(ms->to_dst_file, 2); 2128 } 2129 2130 /* 2131 * While loading the device state we may trigger page transfer 2132 * requests and the fd must be free to process those, and thus 2133 * the destination must read the whole device state off the fd before 2134 * it starts processing it. Unfortunately the ad-hoc migration format 2135 * doesn't allow the destination to know the size to read without fully 2136 * parsing it through each devices load-state code (especially the open 2137 * coded devices that use get/put). 2138 * So we wrap the device state up in a package with a length at the start; 2139 * to do this we use a qemu_buf to hold the whole of the device state. 2140 */ 2141 bioc = qio_channel_buffer_new(4096); 2142 qio_channel_set_name(QIO_CHANNEL(bioc), "migration-postcopy-buffer"); 2143 fb = qemu_file_new_output(QIO_CHANNEL(bioc)); 2144 object_unref(OBJECT(bioc)); 2145 2146 /* 2147 * Make sure the receiver can get incoming pages before we send the rest 2148 * of the state 2149 */ 2150 qemu_savevm_send_postcopy_listen(fb); 2151 2152 qemu_savevm_state_complete_precopy(fb, false, false); 2153 if (migrate_postcopy_ram()) { 2154 qemu_savevm_send_ping(fb, 3); 2155 } 2156 2157 qemu_savevm_send_postcopy_run(fb); 2158 2159 /* <><> end of stuff going into the package */ 2160 2161 /* Last point of recovery; as soon as we send the package the destination 2162 * can open devices and potentially start running. 2163 * Lets just check again we've not got any errors. 2164 */ 2165 ret = qemu_file_get_error(ms->to_dst_file); 2166 if (ret) { 2167 error_report("postcopy_start: Migration stream errored (pre package)"); 2168 goto fail_closefb; 2169 } 2170 2171 restart_block = false; 2172 2173 /* Now send that blob */ 2174 if (qemu_savevm_send_packaged(ms->to_dst_file, bioc->data, bioc->usage)) { 2175 goto fail_closefb; 2176 } 2177 qemu_fclose(fb); 2178 2179 /* Send a notify to give a chance for anything that needs to happen 2180 * at the transition to postcopy and after the device state; in particular 2181 * spice needs to trigger a transition now 2182 */ 2183 ms->postcopy_after_devices = true; 2184 notifier_list_notify(&migration_state_notifiers, ms); 2185 2186 ms->downtime = qemu_clock_get_ms(QEMU_CLOCK_REALTIME) - time_at_stop; 2187 2188 qemu_mutex_unlock_iothread(); 2189 2190 if (migrate_postcopy_ram()) { 2191 /* 2192 * Although this ping is just for debug, it could potentially be 2193 * used for getting a better measurement of downtime at the source. 2194 */ 2195 qemu_savevm_send_ping(ms->to_dst_file, 4); 2196 } 2197 2198 if (migrate_release_ram()) { 2199 ram_postcopy_migrated_memory_release(ms); 2200 } 2201 2202 ret = qemu_file_get_error(ms->to_dst_file); 2203 if (ret) { 2204 error_report("postcopy_start: Migration stream errored"); 2205 migrate_set_state(&ms->state, MIGRATION_STATUS_POSTCOPY_ACTIVE, 2206 MIGRATION_STATUS_FAILED); 2207 } 2208 2209 trace_postcopy_preempt_enabled(migrate_postcopy_preempt()); 2210 2211 return ret; 2212 2213 fail_closefb: 2214 qemu_fclose(fb); 2215 fail: 2216 migrate_set_state(&ms->state, MIGRATION_STATUS_POSTCOPY_ACTIVE, 2217 MIGRATION_STATUS_FAILED); 2218 if (restart_block) { 2219 /* A failure happened early enough that we know the destination hasn't 2220 * accessed block devices, so we're safe to recover. 2221 */ 2222 Error *local_err = NULL; 2223 2224 bdrv_activate_all(&local_err); 2225 if (local_err) { 2226 error_report_err(local_err); 2227 } 2228 } 2229 qemu_mutex_unlock_iothread(); 2230 return -1; 2231 } 2232 2233 /** 2234 * migration_maybe_pause: Pause if required to by 2235 * migrate_pause_before_switchover called with the iothread locked 2236 * Returns: 0 on success 2237 */ 2238 static int migration_maybe_pause(MigrationState *s, 2239 int *current_active_state, 2240 int new_state) 2241 { 2242 if (!migrate_pause_before_switchover()) { 2243 return 0; 2244 } 2245 2246 /* Since leaving this state is not atomic with posting the semaphore 2247 * it's possible that someone could have issued multiple migrate_continue 2248 * and the semaphore is incorrectly positive at this point; 2249 * the docs say it's undefined to reinit a semaphore that's already 2250 * init'd, so use timedwait to eat up any existing posts. 2251 */ 2252 while (qemu_sem_timedwait(&s->pause_sem, 1) == 0) { 2253 /* This block intentionally left blank */ 2254 } 2255 2256 /* 2257 * If the migration is cancelled when it is in the completion phase, 2258 * the migration state is set to MIGRATION_STATUS_CANCELLING. 2259 * So we don't need to wait a semaphore, otherwise we would always 2260 * wait for the 'pause_sem' semaphore. 2261 */ 2262 if (s->state != MIGRATION_STATUS_CANCELLING) { 2263 qemu_mutex_unlock_iothread(); 2264 migrate_set_state(&s->state, *current_active_state, 2265 MIGRATION_STATUS_PRE_SWITCHOVER); 2266 qemu_sem_wait(&s->pause_sem); 2267 migrate_set_state(&s->state, MIGRATION_STATUS_PRE_SWITCHOVER, 2268 new_state); 2269 *current_active_state = new_state; 2270 qemu_mutex_lock_iothread(); 2271 } 2272 2273 return s->state == new_state ? 0 : -EINVAL; 2274 } 2275 2276 /** 2277 * migration_completion: Used by migration_thread when there's not much left. 2278 * The caller 'breaks' the loop when this returns. 2279 * 2280 * @s: Current migration state 2281 */ 2282 static void migration_completion(MigrationState *s) 2283 { 2284 int ret; 2285 int current_active_state = s->state; 2286 2287 if (s->state == MIGRATION_STATUS_ACTIVE) { 2288 qemu_mutex_lock_iothread(); 2289 s->downtime_start = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); 2290 qemu_system_wakeup_request(QEMU_WAKEUP_REASON_OTHER, NULL); 2291 s->vm_was_running = runstate_is_running(); 2292 ret = global_state_store(); 2293 2294 if (!ret) { 2295 ret = vm_stop_force_state(RUN_STATE_FINISH_MIGRATE); 2296 trace_migration_completion_vm_stop(ret); 2297 if (ret >= 0) { 2298 ret = migration_maybe_pause(s, ¤t_active_state, 2299 MIGRATION_STATUS_DEVICE); 2300 } 2301 if (ret >= 0) { 2302 s->block_inactive = !migrate_colo(); 2303 qemu_file_set_rate_limit(s->to_dst_file, INT64_MAX); 2304 ret = qemu_savevm_state_complete_precopy(s->to_dst_file, false, 2305 s->block_inactive); 2306 } 2307 } 2308 qemu_mutex_unlock_iothread(); 2309 2310 if (ret < 0) { 2311 goto fail; 2312 } 2313 } else if (s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE) { 2314 trace_migration_completion_postcopy_end(); 2315 2316 qemu_mutex_lock_iothread(); 2317 qemu_savevm_state_complete_postcopy(s->to_dst_file); 2318 qemu_mutex_unlock_iothread(); 2319 2320 /* 2321 * Shutdown the postcopy fast path thread. This is only needed 2322 * when dest QEMU binary is old (7.1/7.2). QEMU 8.0+ doesn't need 2323 * this. 2324 */ 2325 if (migrate_postcopy_preempt() && s->preempt_pre_7_2) { 2326 postcopy_preempt_shutdown_file(s); 2327 } 2328 2329 trace_migration_completion_postcopy_end_after_complete(); 2330 } else { 2331 goto fail; 2332 } 2333 2334 /* 2335 * If rp was opened we must clean up the thread before 2336 * cleaning everything else up (since if there are no failures 2337 * it will wait for the destination to send it's status in 2338 * a SHUT command). 2339 */ 2340 if (s->rp_state.rp_thread_created) { 2341 int rp_error; 2342 trace_migration_return_path_end_before(); 2343 rp_error = await_return_path_close_on_source(s); 2344 trace_migration_return_path_end_after(rp_error); 2345 if (rp_error) { 2346 goto fail_invalidate; 2347 } 2348 } 2349 2350 if (qemu_file_get_error(s->to_dst_file)) { 2351 trace_migration_completion_file_err(); 2352 goto fail_invalidate; 2353 } 2354 2355 if (migrate_colo() && s->state == MIGRATION_STATUS_ACTIVE) { 2356 /* COLO does not support postcopy */ 2357 migrate_set_state(&s->state, MIGRATION_STATUS_ACTIVE, 2358 MIGRATION_STATUS_COLO); 2359 } else { 2360 migrate_set_state(&s->state, current_active_state, 2361 MIGRATION_STATUS_COMPLETED); 2362 } 2363 2364 return; 2365 2366 fail_invalidate: 2367 /* If not doing postcopy, vm_start() will be called: let's regain 2368 * control on images. 2369 */ 2370 if (s->state == MIGRATION_STATUS_ACTIVE || 2371 s->state == MIGRATION_STATUS_DEVICE) { 2372 Error *local_err = NULL; 2373 2374 qemu_mutex_lock_iothread(); 2375 bdrv_activate_all(&local_err); 2376 if (local_err) { 2377 error_report_err(local_err); 2378 s->block_inactive = true; 2379 } else { 2380 s->block_inactive = false; 2381 } 2382 qemu_mutex_unlock_iothread(); 2383 } 2384 2385 fail: 2386 migrate_set_state(&s->state, current_active_state, 2387 MIGRATION_STATUS_FAILED); 2388 } 2389 2390 /** 2391 * bg_migration_completion: Used by bg_migration_thread when after all the 2392 * RAM has been saved. The caller 'breaks' the loop when this returns. 2393 * 2394 * @s: Current migration state 2395 */ 2396 static void bg_migration_completion(MigrationState *s) 2397 { 2398 int current_active_state = s->state; 2399 2400 /* 2401 * Stop tracking RAM writes - un-protect memory, un-register UFFD 2402 * memory ranges, flush kernel wait queues and wake up threads 2403 * waiting for write fault to be resolved. 2404 */ 2405 ram_write_tracking_stop(); 2406 2407 if (s->state == MIGRATION_STATUS_ACTIVE) { 2408 /* 2409 * By this moment we have RAM content saved into the migration stream. 2410 * The next step is to flush the non-RAM content (device state) 2411 * right after the ram content. The device state has been stored into 2412 * the temporary buffer before RAM saving started. 2413 */ 2414 qemu_put_buffer(s->to_dst_file, s->bioc->data, s->bioc->usage); 2415 qemu_fflush(s->to_dst_file); 2416 } else if (s->state == MIGRATION_STATUS_CANCELLING) { 2417 goto fail; 2418 } 2419 2420 if (qemu_file_get_error(s->to_dst_file)) { 2421 trace_migration_completion_file_err(); 2422 goto fail; 2423 } 2424 2425 migrate_set_state(&s->state, current_active_state, 2426 MIGRATION_STATUS_COMPLETED); 2427 return; 2428 2429 fail: 2430 migrate_set_state(&s->state, current_active_state, 2431 MIGRATION_STATUS_FAILED); 2432 } 2433 2434 typedef enum MigThrError { 2435 /* No error detected */ 2436 MIG_THR_ERR_NONE = 0, 2437 /* Detected error, but resumed successfully */ 2438 MIG_THR_ERR_RECOVERED = 1, 2439 /* Detected fatal error, need to exit */ 2440 MIG_THR_ERR_FATAL = 2, 2441 } MigThrError; 2442 2443 static int postcopy_resume_handshake(MigrationState *s) 2444 { 2445 qemu_savevm_send_postcopy_resume(s->to_dst_file); 2446 2447 while (s->state == MIGRATION_STATUS_POSTCOPY_RECOVER) { 2448 qemu_sem_wait(&s->rp_state.rp_sem); 2449 } 2450 2451 if (s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE) { 2452 return 0; 2453 } 2454 2455 return -1; 2456 } 2457 2458 /* Return zero if success, or <0 for error */ 2459 static int postcopy_do_resume(MigrationState *s) 2460 { 2461 int ret; 2462 2463 /* 2464 * Call all the resume_prepare() hooks, so that modules can be 2465 * ready for the migration resume. 2466 */ 2467 ret = qemu_savevm_state_resume_prepare(s); 2468 if (ret) { 2469 error_report("%s: resume_prepare() failure detected: %d", 2470 __func__, ret); 2471 return ret; 2472 } 2473 2474 /* 2475 * If preempt is enabled, re-establish the preempt channel. Note that 2476 * we do it after resume prepare to make sure the main channel will be 2477 * created before the preempt channel. E.g. with weak network, the 2478 * dest QEMU may get messed up with the preempt and main channels on 2479 * the order of connection setup. This guarantees the correct order. 2480 */ 2481 ret = postcopy_preempt_establish_channel(s); 2482 if (ret) { 2483 error_report("%s: postcopy_preempt_establish_channel(): %d", 2484 __func__, ret); 2485 return ret; 2486 } 2487 2488 /* 2489 * Last handshake with destination on the resume (destination will 2490 * switch to postcopy-active afterwards) 2491 */ 2492 ret = postcopy_resume_handshake(s); 2493 if (ret) { 2494 error_report("%s: handshake failed: %d", __func__, ret); 2495 return ret; 2496 } 2497 2498 return 0; 2499 } 2500 2501 /* 2502 * We don't return until we are in a safe state to continue current 2503 * postcopy migration. Returns MIG_THR_ERR_RECOVERED if recovered, or 2504 * MIG_THR_ERR_FATAL if unrecovery failure happened. 2505 */ 2506 static MigThrError postcopy_pause(MigrationState *s) 2507 { 2508 assert(s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE); 2509 2510 while (true) { 2511 QEMUFile *file; 2512 2513 /* 2514 * Current channel is possibly broken. Release it. Note that this is 2515 * guaranteed even without lock because to_dst_file should only be 2516 * modified by the migration thread. That also guarantees that the 2517 * unregister of yank is safe too without the lock. It should be safe 2518 * even to be within the qemu_file_lock, but we didn't do that to avoid 2519 * taking more mutex (yank_lock) within qemu_file_lock. TL;DR: we make 2520 * the qemu_file_lock critical section as small as possible. 2521 */ 2522 assert(s->to_dst_file); 2523 migration_ioc_unregister_yank_from_file(s->to_dst_file); 2524 qemu_mutex_lock(&s->qemu_file_lock); 2525 file = s->to_dst_file; 2526 s->to_dst_file = NULL; 2527 qemu_mutex_unlock(&s->qemu_file_lock); 2528 2529 qemu_file_shutdown(file); 2530 qemu_fclose(file); 2531 2532 migrate_set_state(&s->state, s->state, 2533 MIGRATION_STATUS_POSTCOPY_PAUSED); 2534 2535 error_report("Detected IO failure for postcopy. " 2536 "Migration paused."); 2537 2538 /* 2539 * We wait until things fixed up. Then someone will setup the 2540 * status back for us. 2541 */ 2542 while (s->state == MIGRATION_STATUS_POSTCOPY_PAUSED) { 2543 qemu_sem_wait(&s->postcopy_pause_sem); 2544 } 2545 2546 if (s->state == MIGRATION_STATUS_POSTCOPY_RECOVER) { 2547 /* Woken up by a recover procedure. Give it a shot */ 2548 2549 /* 2550 * Firstly, let's wake up the return path now, with a new 2551 * return path channel. 2552 */ 2553 qemu_sem_post(&s->postcopy_pause_rp_sem); 2554 2555 /* Do the resume logic */ 2556 if (postcopy_do_resume(s) == 0) { 2557 /* Let's continue! */ 2558 trace_postcopy_pause_continued(); 2559 return MIG_THR_ERR_RECOVERED; 2560 } else { 2561 /* 2562 * Something wrong happened during the recovery, let's 2563 * pause again. Pause is always better than throwing 2564 * data away. 2565 */ 2566 continue; 2567 } 2568 } else { 2569 /* This is not right... Time to quit. */ 2570 return MIG_THR_ERR_FATAL; 2571 } 2572 } 2573 } 2574 2575 static MigThrError migration_detect_error(MigrationState *s) 2576 { 2577 int ret; 2578 int state = s->state; 2579 Error *local_error = NULL; 2580 2581 if (state == MIGRATION_STATUS_CANCELLING || 2582 state == MIGRATION_STATUS_CANCELLED) { 2583 /* End the migration, but don't set the state to failed */ 2584 return MIG_THR_ERR_FATAL; 2585 } 2586 2587 /* 2588 * Try to detect any file errors. Note that postcopy_qemufile_src will 2589 * be NULL when postcopy preempt is not enabled. 2590 */ 2591 ret = qemu_file_get_error_obj_any(s->to_dst_file, 2592 s->postcopy_qemufile_src, 2593 &local_error); 2594 if (!ret) { 2595 /* Everything is fine */ 2596 assert(!local_error); 2597 return MIG_THR_ERR_NONE; 2598 } 2599 2600 if (local_error) { 2601 migrate_set_error(s, local_error); 2602 error_free(local_error); 2603 } 2604 2605 if (state == MIGRATION_STATUS_POSTCOPY_ACTIVE && ret) { 2606 /* 2607 * For postcopy, we allow the network to be down for a 2608 * while. After that, it can be continued by a 2609 * recovery phase. 2610 */ 2611 return postcopy_pause(s); 2612 } else { 2613 /* 2614 * For precopy (or postcopy with error outside IO), we fail 2615 * with no time. 2616 */ 2617 migrate_set_state(&s->state, state, MIGRATION_STATUS_FAILED); 2618 trace_migration_thread_file_err(); 2619 2620 /* Time to stop the migration, now. */ 2621 return MIG_THR_ERR_FATAL; 2622 } 2623 } 2624 2625 /* How many bytes have we transferred since the beginning of the migration */ 2626 static uint64_t migration_total_bytes(MigrationState *s) 2627 { 2628 return qemu_file_total_transferred(s->to_dst_file) + 2629 stat64_get(&ram_counters.multifd_bytes); 2630 } 2631 2632 static void migration_calculate_complete(MigrationState *s) 2633 { 2634 uint64_t bytes = migration_total_bytes(s); 2635 int64_t end_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); 2636 int64_t transfer_time; 2637 2638 s->total_time = end_time - s->start_time; 2639 if (!s->downtime) { 2640 /* 2641 * It's still not set, so we are precopy migration. For 2642 * postcopy, downtime is calculated during postcopy_start(). 2643 */ 2644 s->downtime = end_time - s->downtime_start; 2645 } 2646 2647 transfer_time = s->total_time - s->setup_time; 2648 if (transfer_time) { 2649 s->mbps = ((double) bytes * 8.0) / transfer_time / 1000; 2650 } 2651 } 2652 2653 static void update_iteration_initial_status(MigrationState *s) 2654 { 2655 /* 2656 * Update these three fields at the same time to avoid mismatch info lead 2657 * wrong speed calculation. 2658 */ 2659 s->iteration_start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); 2660 s->iteration_initial_bytes = migration_total_bytes(s); 2661 s->iteration_initial_pages = ram_get_total_transferred_pages(); 2662 } 2663 2664 static void migration_update_counters(MigrationState *s, 2665 int64_t current_time) 2666 { 2667 uint64_t transferred, transferred_pages, time_spent; 2668 uint64_t current_bytes; /* bytes transferred since the beginning */ 2669 double bandwidth; 2670 2671 if (current_time < s->iteration_start_time + BUFFER_DELAY) { 2672 return; 2673 } 2674 2675 current_bytes = migration_total_bytes(s); 2676 transferred = current_bytes - s->iteration_initial_bytes; 2677 time_spent = current_time - s->iteration_start_time; 2678 bandwidth = (double)transferred / time_spent; 2679 s->threshold_size = bandwidth * migrate_downtime_limit(); 2680 2681 s->mbps = (((double) transferred * 8.0) / 2682 ((double) time_spent / 1000.0)) / 1000.0 / 1000.0; 2683 2684 transferred_pages = ram_get_total_transferred_pages() - 2685 s->iteration_initial_pages; 2686 s->pages_per_second = (double) transferred_pages / 2687 (((double) time_spent / 1000.0)); 2688 2689 /* 2690 * if we haven't sent anything, we don't want to 2691 * recalculate. 10000 is a small enough number for our purposes 2692 */ 2693 if (stat64_get(&ram_counters.dirty_pages_rate) && 2694 transferred > 10000) { 2695 s->expected_downtime = 2696 stat64_get(&ram_counters.dirty_bytes_last_sync) / bandwidth; 2697 } 2698 2699 qemu_file_reset_rate_limit(s->to_dst_file); 2700 2701 update_iteration_initial_status(s); 2702 2703 trace_migrate_transferred(transferred, time_spent, 2704 bandwidth, s->threshold_size); 2705 } 2706 2707 /* Migration thread iteration status */ 2708 typedef enum { 2709 MIG_ITERATE_RESUME, /* Resume current iteration */ 2710 MIG_ITERATE_SKIP, /* Skip current iteration */ 2711 MIG_ITERATE_BREAK, /* Break the loop */ 2712 } MigIterateState; 2713 2714 /* 2715 * Return true if continue to the next iteration directly, false 2716 * otherwise. 2717 */ 2718 static MigIterateState migration_iteration_run(MigrationState *s) 2719 { 2720 uint64_t must_precopy, can_postcopy; 2721 bool in_postcopy = s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE; 2722 2723 qemu_savevm_state_pending_estimate(&must_precopy, &can_postcopy); 2724 uint64_t pending_size = must_precopy + can_postcopy; 2725 2726 trace_migrate_pending_estimate(pending_size, must_precopy, can_postcopy); 2727 2728 if (must_precopy <= s->threshold_size) { 2729 qemu_savevm_state_pending_exact(&must_precopy, &can_postcopy); 2730 pending_size = must_precopy + can_postcopy; 2731 trace_migrate_pending_exact(pending_size, must_precopy, can_postcopy); 2732 } 2733 2734 if (!pending_size || pending_size < s->threshold_size) { 2735 trace_migration_thread_low_pending(pending_size); 2736 migration_completion(s); 2737 return MIG_ITERATE_BREAK; 2738 } 2739 2740 /* Still a significant amount to transfer */ 2741 if (!in_postcopy && must_precopy <= s->threshold_size && 2742 qatomic_read(&s->start_postcopy)) { 2743 if (postcopy_start(s)) { 2744 error_report("%s: postcopy failed to start", __func__); 2745 } 2746 return MIG_ITERATE_SKIP; 2747 } 2748 2749 /* Just another iteration step */ 2750 qemu_savevm_state_iterate(s->to_dst_file, in_postcopy); 2751 return MIG_ITERATE_RESUME; 2752 } 2753 2754 static void migration_iteration_finish(MigrationState *s) 2755 { 2756 /* If we enabled cpu throttling for auto-converge, turn it off. */ 2757 cpu_throttle_stop(); 2758 2759 qemu_mutex_lock_iothread(); 2760 switch (s->state) { 2761 case MIGRATION_STATUS_COMPLETED: 2762 migration_calculate_complete(s); 2763 runstate_set(RUN_STATE_POSTMIGRATE); 2764 break; 2765 case MIGRATION_STATUS_COLO: 2766 if (!migrate_colo()) { 2767 error_report("%s: critical error: calling COLO code without " 2768 "COLO enabled", __func__); 2769 } 2770 migrate_start_colo_process(s); 2771 s->vm_was_running = true; 2772 /* Fallthrough */ 2773 case MIGRATION_STATUS_FAILED: 2774 case MIGRATION_STATUS_CANCELLED: 2775 case MIGRATION_STATUS_CANCELLING: 2776 if (s->vm_was_running) { 2777 if (!runstate_check(RUN_STATE_SHUTDOWN)) { 2778 vm_start(); 2779 } 2780 } else { 2781 if (runstate_check(RUN_STATE_FINISH_MIGRATE)) { 2782 runstate_set(RUN_STATE_POSTMIGRATE); 2783 } 2784 } 2785 break; 2786 2787 default: 2788 /* Should not reach here, but if so, forgive the VM. */ 2789 error_report("%s: Unknown ending state %d", __func__, s->state); 2790 break; 2791 } 2792 migrate_fd_cleanup_schedule(s); 2793 qemu_mutex_unlock_iothread(); 2794 } 2795 2796 static void bg_migration_iteration_finish(MigrationState *s) 2797 { 2798 qemu_mutex_lock_iothread(); 2799 switch (s->state) { 2800 case MIGRATION_STATUS_COMPLETED: 2801 migration_calculate_complete(s); 2802 break; 2803 2804 case MIGRATION_STATUS_ACTIVE: 2805 case MIGRATION_STATUS_FAILED: 2806 case MIGRATION_STATUS_CANCELLED: 2807 case MIGRATION_STATUS_CANCELLING: 2808 break; 2809 2810 default: 2811 /* Should not reach here, but if so, forgive the VM. */ 2812 error_report("%s: Unknown ending state %d", __func__, s->state); 2813 break; 2814 } 2815 2816 migrate_fd_cleanup_schedule(s); 2817 qemu_mutex_unlock_iothread(); 2818 } 2819 2820 /* 2821 * Return true if continue to the next iteration directly, false 2822 * otherwise. 2823 */ 2824 static MigIterateState bg_migration_iteration_run(MigrationState *s) 2825 { 2826 int res; 2827 2828 res = qemu_savevm_state_iterate(s->to_dst_file, false); 2829 if (res > 0) { 2830 bg_migration_completion(s); 2831 return MIG_ITERATE_BREAK; 2832 } 2833 2834 return MIG_ITERATE_RESUME; 2835 } 2836 2837 void migration_make_urgent_request(void) 2838 { 2839 qemu_sem_post(&migrate_get_current()->rate_limit_sem); 2840 } 2841 2842 void migration_consume_urgent_request(void) 2843 { 2844 qemu_sem_wait(&migrate_get_current()->rate_limit_sem); 2845 } 2846 2847 /* Returns true if the rate limiting was broken by an urgent request */ 2848 bool migration_rate_limit(void) 2849 { 2850 int64_t now = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); 2851 MigrationState *s = migrate_get_current(); 2852 2853 bool urgent = false; 2854 migration_update_counters(s, now); 2855 if (qemu_file_rate_limit(s->to_dst_file)) { 2856 2857 if (qemu_file_get_error(s->to_dst_file)) { 2858 return false; 2859 } 2860 /* 2861 * Wait for a delay to do rate limiting OR 2862 * something urgent to post the semaphore. 2863 */ 2864 int ms = s->iteration_start_time + BUFFER_DELAY - now; 2865 trace_migration_rate_limit_pre(ms); 2866 if (qemu_sem_timedwait(&s->rate_limit_sem, ms) == 0) { 2867 /* 2868 * We were woken by one or more urgent things but 2869 * the timedwait will have consumed one of them. 2870 * The service routine for the urgent wake will dec 2871 * the semaphore itself for each item it consumes, 2872 * so add this one we just eat back. 2873 */ 2874 qemu_sem_post(&s->rate_limit_sem); 2875 urgent = true; 2876 } 2877 trace_migration_rate_limit_post(urgent); 2878 } 2879 return urgent; 2880 } 2881 2882 /* 2883 * if failover devices are present, wait they are completely 2884 * unplugged 2885 */ 2886 2887 static void qemu_savevm_wait_unplug(MigrationState *s, int old_state, 2888 int new_state) 2889 { 2890 if (qemu_savevm_state_guest_unplug_pending()) { 2891 migrate_set_state(&s->state, old_state, MIGRATION_STATUS_WAIT_UNPLUG); 2892 2893 while (s->state == MIGRATION_STATUS_WAIT_UNPLUG && 2894 qemu_savevm_state_guest_unplug_pending()) { 2895 qemu_sem_timedwait(&s->wait_unplug_sem, 250); 2896 } 2897 if (s->state != MIGRATION_STATUS_WAIT_UNPLUG) { 2898 int timeout = 120; /* 30 seconds */ 2899 /* 2900 * migration has been canceled 2901 * but as we have started an unplug we must wait the end 2902 * to be able to plug back the card 2903 */ 2904 while (timeout-- && qemu_savevm_state_guest_unplug_pending()) { 2905 qemu_sem_timedwait(&s->wait_unplug_sem, 250); 2906 } 2907 if (qemu_savevm_state_guest_unplug_pending() && 2908 !qtest_enabled()) { 2909 warn_report("migration: partially unplugged device on " 2910 "failure"); 2911 } 2912 } 2913 2914 migrate_set_state(&s->state, MIGRATION_STATUS_WAIT_UNPLUG, new_state); 2915 } else { 2916 migrate_set_state(&s->state, old_state, new_state); 2917 } 2918 } 2919 2920 /* 2921 * Master migration thread on the source VM. 2922 * It drives the migration and pumps the data down the outgoing channel. 2923 */ 2924 static void *migration_thread(void *opaque) 2925 { 2926 MigrationState *s = opaque; 2927 MigrationThread *thread = NULL; 2928 int64_t setup_start = qemu_clock_get_ms(QEMU_CLOCK_HOST); 2929 MigThrError thr_error; 2930 bool urgent = false; 2931 2932 thread = MigrationThreadAdd("live_migration", qemu_get_thread_id()); 2933 2934 rcu_register_thread(); 2935 2936 object_ref(OBJECT(s)); 2937 update_iteration_initial_status(s); 2938 2939 qemu_savevm_state_header(s->to_dst_file); 2940 2941 /* 2942 * If we opened the return path, we need to make sure dst has it 2943 * opened as well. 2944 */ 2945 if (s->rp_state.rp_thread_created) { 2946 /* Now tell the dest that it should open its end so it can reply */ 2947 qemu_savevm_send_open_return_path(s->to_dst_file); 2948 2949 /* And do a ping that will make stuff easier to debug */ 2950 qemu_savevm_send_ping(s->to_dst_file, 1); 2951 } 2952 2953 if (migrate_postcopy()) { 2954 /* 2955 * Tell the destination that we *might* want to do postcopy later; 2956 * if the other end can't do postcopy it should fail now, nice and 2957 * early. 2958 */ 2959 qemu_savevm_send_postcopy_advise(s->to_dst_file); 2960 } 2961 2962 if (migrate_colo()) { 2963 /* Notify migration destination that we enable COLO */ 2964 qemu_savevm_send_colo_enable(s->to_dst_file); 2965 } 2966 2967 qemu_savevm_state_setup(s->to_dst_file); 2968 2969 qemu_savevm_wait_unplug(s, MIGRATION_STATUS_SETUP, 2970 MIGRATION_STATUS_ACTIVE); 2971 2972 s->setup_time = qemu_clock_get_ms(QEMU_CLOCK_HOST) - setup_start; 2973 2974 trace_migration_thread_setup_complete(); 2975 2976 while (migration_is_active(s)) { 2977 if (urgent || !qemu_file_rate_limit(s->to_dst_file)) { 2978 MigIterateState iter_state = migration_iteration_run(s); 2979 if (iter_state == MIG_ITERATE_SKIP) { 2980 continue; 2981 } else if (iter_state == MIG_ITERATE_BREAK) { 2982 break; 2983 } 2984 } 2985 2986 /* 2987 * Try to detect any kind of failures, and see whether we 2988 * should stop the migration now. 2989 */ 2990 thr_error = migration_detect_error(s); 2991 if (thr_error == MIG_THR_ERR_FATAL) { 2992 /* Stop migration */ 2993 break; 2994 } else if (thr_error == MIG_THR_ERR_RECOVERED) { 2995 /* 2996 * Just recovered from a e.g. network failure, reset all 2997 * the local variables. This is important to avoid 2998 * breaking transferred_bytes and bandwidth calculation 2999 */ 3000 update_iteration_initial_status(s); 3001 } 3002 3003 urgent = migration_rate_limit(); 3004 } 3005 3006 trace_migration_thread_after_loop(); 3007 migration_iteration_finish(s); 3008 object_unref(OBJECT(s)); 3009 rcu_unregister_thread(); 3010 MigrationThreadDel(thread); 3011 return NULL; 3012 } 3013 3014 static void bg_migration_vm_start_bh(void *opaque) 3015 { 3016 MigrationState *s = opaque; 3017 3018 qemu_bh_delete(s->vm_start_bh); 3019 s->vm_start_bh = NULL; 3020 3021 vm_start(); 3022 s->downtime = qemu_clock_get_ms(QEMU_CLOCK_REALTIME) - s->downtime_start; 3023 } 3024 3025 /** 3026 * Background snapshot thread, based on live migration code. 3027 * This is an alternative implementation of live migration mechanism 3028 * introduced specifically to support background snapshots. 3029 * 3030 * It takes advantage of userfault_fd write protection mechanism introduced 3031 * in v5.7 kernel. Compared to existing dirty page logging migration much 3032 * lesser stream traffic is produced resulting in smaller snapshot images, 3033 * simply cause of no page duplicates can get into the stream. 3034 * 3035 * Another key point is that generated vmstate stream reflects machine state 3036 * 'frozen' at the beginning of snapshot creation compared to dirty page logging 3037 * mechanism, which effectively results in that saved snapshot is the state of VM 3038 * at the end of the process. 3039 */ 3040 static void *bg_migration_thread(void *opaque) 3041 { 3042 MigrationState *s = opaque; 3043 int64_t setup_start; 3044 MigThrError thr_error; 3045 QEMUFile *fb; 3046 bool early_fail = true; 3047 3048 rcu_register_thread(); 3049 object_ref(OBJECT(s)); 3050 3051 qemu_file_set_rate_limit(s->to_dst_file, INT64_MAX); 3052 3053 setup_start = qemu_clock_get_ms(QEMU_CLOCK_HOST); 3054 /* 3055 * We want to save vmstate for the moment when migration has been 3056 * initiated but also we want to save RAM content while VM is running. 3057 * The RAM content should appear first in the vmstate. So, we first 3058 * stash the non-RAM part of the vmstate to the temporary buffer, 3059 * then write RAM part of the vmstate to the migration stream 3060 * with vCPUs running and, finally, write stashed non-RAM part of 3061 * the vmstate from the buffer to the migration stream. 3062 */ 3063 s->bioc = qio_channel_buffer_new(512 * 1024); 3064 qio_channel_set_name(QIO_CHANNEL(s->bioc), "vmstate-buffer"); 3065 fb = qemu_file_new_output(QIO_CHANNEL(s->bioc)); 3066 object_unref(OBJECT(s->bioc)); 3067 3068 update_iteration_initial_status(s); 3069 3070 /* 3071 * Prepare for tracking memory writes with UFFD-WP - populate 3072 * RAM pages before protecting. 3073 */ 3074 #ifdef __linux__ 3075 ram_write_tracking_prepare(); 3076 #endif 3077 3078 qemu_savevm_state_header(s->to_dst_file); 3079 qemu_savevm_state_setup(s->to_dst_file); 3080 3081 qemu_savevm_wait_unplug(s, MIGRATION_STATUS_SETUP, 3082 MIGRATION_STATUS_ACTIVE); 3083 3084 s->setup_time = qemu_clock_get_ms(QEMU_CLOCK_HOST) - setup_start; 3085 3086 trace_migration_thread_setup_complete(); 3087 s->downtime_start = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); 3088 3089 qemu_mutex_lock_iothread(); 3090 3091 /* 3092 * If VM is currently in suspended state, then, to make a valid runstate 3093 * transition in vm_stop_force_state() we need to wakeup it up. 3094 */ 3095 qemu_system_wakeup_request(QEMU_WAKEUP_REASON_OTHER, NULL); 3096 s->vm_was_running = runstate_is_running(); 3097 3098 if (global_state_store()) { 3099 goto fail; 3100 } 3101 /* Forcibly stop VM before saving state of vCPUs and devices */ 3102 if (vm_stop_force_state(RUN_STATE_PAUSED)) { 3103 goto fail; 3104 } 3105 /* 3106 * Put vCPUs in sync with shadow context structures, then 3107 * save their state to channel-buffer along with devices. 3108 */ 3109 cpu_synchronize_all_states(); 3110 if (qemu_savevm_state_complete_precopy_non_iterable(fb, false, false)) { 3111 goto fail; 3112 } 3113 /* 3114 * Since we are going to get non-iterable state data directly 3115 * from s->bioc->data, explicit flush is needed here. 3116 */ 3117 qemu_fflush(fb); 3118 3119 /* Now initialize UFFD context and start tracking RAM writes */ 3120 if (ram_write_tracking_start()) { 3121 goto fail; 3122 } 3123 early_fail = false; 3124 3125 /* 3126 * Start VM from BH handler to avoid write-fault lock here. 3127 * UFFD-WP protection for the whole RAM is already enabled so 3128 * calling VM state change notifiers from vm_start() would initiate 3129 * writes to virtio VQs memory which is in write-protected region. 3130 */ 3131 s->vm_start_bh = qemu_bh_new(bg_migration_vm_start_bh, s); 3132 qemu_bh_schedule(s->vm_start_bh); 3133 3134 qemu_mutex_unlock_iothread(); 3135 3136 while (migration_is_active(s)) { 3137 MigIterateState iter_state = bg_migration_iteration_run(s); 3138 if (iter_state == MIG_ITERATE_SKIP) { 3139 continue; 3140 } else if (iter_state == MIG_ITERATE_BREAK) { 3141 break; 3142 } 3143 3144 /* 3145 * Try to detect any kind of failures, and see whether we 3146 * should stop the migration now. 3147 */ 3148 thr_error = migration_detect_error(s); 3149 if (thr_error == MIG_THR_ERR_FATAL) { 3150 /* Stop migration */ 3151 break; 3152 } 3153 3154 migration_update_counters(s, qemu_clock_get_ms(QEMU_CLOCK_REALTIME)); 3155 } 3156 3157 trace_migration_thread_after_loop(); 3158 3159 fail: 3160 if (early_fail) { 3161 migrate_set_state(&s->state, MIGRATION_STATUS_ACTIVE, 3162 MIGRATION_STATUS_FAILED); 3163 qemu_mutex_unlock_iothread(); 3164 } 3165 3166 bg_migration_iteration_finish(s); 3167 3168 qemu_fclose(fb); 3169 object_unref(OBJECT(s)); 3170 rcu_unregister_thread(); 3171 3172 return NULL; 3173 } 3174 3175 void migrate_fd_connect(MigrationState *s, Error *error_in) 3176 { 3177 Error *local_err = NULL; 3178 int64_t rate_limit; 3179 bool resume = s->state == MIGRATION_STATUS_POSTCOPY_PAUSED; 3180 3181 /* 3182 * If there's a previous error, free it and prepare for another one. 3183 * Meanwhile if migration completes successfully, there won't have an error 3184 * dumped when calling migrate_fd_cleanup(). 3185 */ 3186 migrate_error_free(s); 3187 3188 s->expected_downtime = migrate_downtime_limit(); 3189 if (resume) { 3190 assert(s->cleanup_bh); 3191 } else { 3192 assert(!s->cleanup_bh); 3193 s->cleanup_bh = qemu_bh_new(migrate_fd_cleanup_bh, s); 3194 } 3195 if (error_in) { 3196 migrate_fd_error(s, error_in); 3197 if (resume) { 3198 /* 3199 * Don't do cleanup for resume if channel is invalid, but only dump 3200 * the error. We wait for another channel connect from the user. 3201 * The error_report still gives HMP user a hint on what failed. 3202 * It's normally done in migrate_fd_cleanup(), but call it here 3203 * explicitly. 3204 */ 3205 error_report_err(error_copy(s->error)); 3206 } else { 3207 migrate_fd_cleanup(s); 3208 } 3209 return; 3210 } 3211 3212 if (resume) { 3213 /* This is a resumed migration */ 3214 rate_limit = migrate_max_postcopy_bandwidth() / 3215 XFER_LIMIT_RATIO; 3216 } else { 3217 /* This is a fresh new migration */ 3218 rate_limit = migrate_max_bandwidth() / XFER_LIMIT_RATIO; 3219 3220 /* Notify before starting migration thread */ 3221 notifier_list_notify(&migration_state_notifiers, s); 3222 } 3223 3224 qemu_file_set_rate_limit(s->to_dst_file, rate_limit); 3225 qemu_file_set_blocking(s->to_dst_file, true); 3226 3227 /* 3228 * Open the return path. For postcopy, it is used exclusively. For 3229 * precopy, only if user specified "return-path" capability would 3230 * QEMU uses the return path. 3231 */ 3232 if (migrate_postcopy_ram() || migrate_return_path()) { 3233 if (open_return_path_on_source(s, !resume)) { 3234 error_report("Unable to open return-path for postcopy"); 3235 migrate_set_state(&s->state, s->state, MIGRATION_STATUS_FAILED); 3236 migrate_fd_cleanup(s); 3237 return; 3238 } 3239 } 3240 3241 /* 3242 * This needs to be done before resuming a postcopy. Note: for newer 3243 * QEMUs we will delay the channel creation until postcopy_start(), to 3244 * avoid disorder of channel creations. 3245 */ 3246 if (migrate_postcopy_preempt() && s->preempt_pre_7_2) { 3247 postcopy_preempt_setup(s); 3248 } 3249 3250 if (resume) { 3251 /* Wakeup the main migration thread to do the recovery */ 3252 migrate_set_state(&s->state, MIGRATION_STATUS_POSTCOPY_PAUSED, 3253 MIGRATION_STATUS_POSTCOPY_RECOVER); 3254 qemu_sem_post(&s->postcopy_pause_sem); 3255 return; 3256 } 3257 3258 if (multifd_save_setup(&local_err) != 0) { 3259 error_report_err(local_err); 3260 migrate_set_state(&s->state, MIGRATION_STATUS_SETUP, 3261 MIGRATION_STATUS_FAILED); 3262 migrate_fd_cleanup(s); 3263 return; 3264 } 3265 3266 if (migrate_background_snapshot()) { 3267 qemu_thread_create(&s->thread, "bg_snapshot", 3268 bg_migration_thread, s, QEMU_THREAD_JOINABLE); 3269 } else { 3270 qemu_thread_create(&s->thread, "live_migration", 3271 migration_thread, s, QEMU_THREAD_JOINABLE); 3272 } 3273 s->migration_thread_running = true; 3274 } 3275 3276 static void migration_class_init(ObjectClass *klass, void *data) 3277 { 3278 DeviceClass *dc = DEVICE_CLASS(klass); 3279 3280 dc->user_creatable = false; 3281 device_class_set_props(dc, migration_properties); 3282 } 3283 3284 static void migration_instance_finalize(Object *obj) 3285 { 3286 MigrationState *ms = MIGRATION_OBJ(obj); 3287 3288 qemu_mutex_destroy(&ms->error_mutex); 3289 qemu_mutex_destroy(&ms->qemu_file_lock); 3290 qemu_sem_destroy(&ms->wait_unplug_sem); 3291 qemu_sem_destroy(&ms->rate_limit_sem); 3292 qemu_sem_destroy(&ms->pause_sem); 3293 qemu_sem_destroy(&ms->postcopy_pause_sem); 3294 qemu_sem_destroy(&ms->postcopy_pause_rp_sem); 3295 qemu_sem_destroy(&ms->rp_state.rp_sem); 3296 qemu_sem_destroy(&ms->rp_state.rp_pong_acks); 3297 qemu_sem_destroy(&ms->postcopy_qemufile_src_sem); 3298 error_free(ms->error); 3299 } 3300 3301 static void migration_instance_init(Object *obj) 3302 { 3303 MigrationState *ms = MIGRATION_OBJ(obj); 3304 3305 ms->state = MIGRATION_STATUS_NONE; 3306 ms->mbps = -1; 3307 ms->pages_per_second = -1; 3308 qemu_sem_init(&ms->pause_sem, 0); 3309 qemu_mutex_init(&ms->error_mutex); 3310 3311 migrate_params_init(&ms->parameters); 3312 3313 qemu_sem_init(&ms->postcopy_pause_sem, 0); 3314 qemu_sem_init(&ms->postcopy_pause_rp_sem, 0); 3315 qemu_sem_init(&ms->rp_state.rp_sem, 0); 3316 qemu_sem_init(&ms->rp_state.rp_pong_acks, 0); 3317 qemu_sem_init(&ms->rate_limit_sem, 0); 3318 qemu_sem_init(&ms->wait_unplug_sem, 0); 3319 qemu_sem_init(&ms->postcopy_qemufile_src_sem, 0); 3320 qemu_mutex_init(&ms->qemu_file_lock); 3321 } 3322 3323 /* 3324 * Return true if check pass, false otherwise. Error will be put 3325 * inside errp if provided. 3326 */ 3327 static bool migration_object_check(MigrationState *ms, Error **errp) 3328 { 3329 /* Assuming all off */ 3330 bool old_caps[MIGRATION_CAPABILITY__MAX] = { 0 }; 3331 3332 if (!migrate_params_check(&ms->parameters, errp)) { 3333 return false; 3334 } 3335 3336 return migrate_caps_check(old_caps, ms->capabilities, errp); 3337 } 3338 3339 static const TypeInfo migration_type = { 3340 .name = TYPE_MIGRATION, 3341 /* 3342 * NOTE: TYPE_MIGRATION is not really a device, as the object is 3343 * not created using qdev_new(), it is not attached to the qdev 3344 * device tree, and it is never realized. 3345 * 3346 * TODO: Make this TYPE_OBJECT once QOM provides something like 3347 * TYPE_DEVICE's "-global" properties. 3348 */ 3349 .parent = TYPE_DEVICE, 3350 .class_init = migration_class_init, 3351 .class_size = sizeof(MigrationClass), 3352 .instance_size = sizeof(MigrationState), 3353 .instance_init = migration_instance_init, 3354 .instance_finalize = migration_instance_finalize, 3355 }; 3356 3357 static void register_migration_types(void) 3358 { 3359 type_register_static(&migration_type); 3360 } 3361 3362 type_init(register_migration_types); 3363