1 /* 2 * QEMU live migration 3 * 4 * Copyright IBM, Corp. 2008 5 * 6 * Authors: 7 * Anthony Liguori <aliguori@us.ibm.com> 8 * 9 * This work is licensed under the terms of the GNU GPL, version 2. See 10 * the COPYING file in the top-level directory. 11 * 12 * Contributions after 2012-01-13 are licensed under the terms of the 13 * GNU GPL, version 2 or (at your option) any later version. 14 */ 15 16 #include "qemu/osdep.h" 17 #include "qemu/cutils.h" 18 #include "qemu/error-report.h" 19 #include "qemu/main-loop.h" 20 #include "migration/blocker.h" 21 #include "exec.h" 22 #include "fd.h" 23 #include "socket.h" 24 #include "sysemu/runstate.h" 25 #include "sysemu/sysemu.h" 26 #include "sysemu/cpu-throttle.h" 27 #include "rdma.h" 28 #include "ram.h" 29 #include "migration/global_state.h" 30 #include "migration/misc.h" 31 #include "migration.h" 32 #include "savevm.h" 33 #include "qemu-file-channel.h" 34 #include "qemu-file.h" 35 #include "migration/vmstate.h" 36 #include "block/block.h" 37 #include "qapi/error.h" 38 #include "qapi/clone-visitor.h" 39 #include "qapi/qapi-visit-migration.h" 40 #include "qapi/qapi-visit-sockets.h" 41 #include "qapi/qapi-commands-migration.h" 42 #include "qapi/qapi-events-migration.h" 43 #include "qapi/qmp/qerror.h" 44 #include "qapi/qmp/qnull.h" 45 #include "qemu/rcu.h" 46 #include "block.h" 47 #include "postcopy-ram.h" 48 #include "qemu/thread.h" 49 #include "trace.h" 50 #include "exec/target_page.h" 51 #include "io/channel-buffer.h" 52 #include "migration/colo.h" 53 #include "hw/boards.h" 54 #include "hw/qdev-properties.h" 55 #include "hw/qdev-properties-system.h" 56 #include "monitor/monitor.h" 57 #include "net/announce.h" 58 #include "qemu/queue.h" 59 #include "multifd.h" 60 #include "qemu/yank.h" 61 #include "sysemu/cpus.h" 62 #include "yank_functions.h" 63 #include "sysemu/qtest.h" 64 65 #define MAX_THROTTLE (128 << 20) /* Migration transfer speed throttling */ 66 67 /* Amount of time to allocate to each "chunk" of bandwidth-throttled 68 * data. */ 69 #define BUFFER_DELAY 100 70 #define XFER_LIMIT_RATIO (1000 / BUFFER_DELAY) 71 72 /* Time in milliseconds we are allowed to stop the source, 73 * for sending the last part */ 74 #define DEFAULT_MIGRATE_SET_DOWNTIME 300 75 76 /* Maximum migrate downtime set to 2000 seconds */ 77 #define MAX_MIGRATE_DOWNTIME_SECONDS 2000 78 #define MAX_MIGRATE_DOWNTIME (MAX_MIGRATE_DOWNTIME_SECONDS * 1000) 79 80 /* Default compression thread count */ 81 #define DEFAULT_MIGRATE_COMPRESS_THREAD_COUNT 8 82 /* Default decompression thread count, usually decompression is at 83 * least 4 times as fast as compression.*/ 84 #define DEFAULT_MIGRATE_DECOMPRESS_THREAD_COUNT 2 85 /*0: means nocompress, 1: best speed, ... 9: best compress ratio */ 86 #define DEFAULT_MIGRATE_COMPRESS_LEVEL 1 87 /* Define default autoconverge cpu throttle migration parameters */ 88 #define DEFAULT_MIGRATE_THROTTLE_TRIGGER_THRESHOLD 50 89 #define DEFAULT_MIGRATE_CPU_THROTTLE_INITIAL 20 90 #define DEFAULT_MIGRATE_CPU_THROTTLE_INCREMENT 10 91 #define DEFAULT_MIGRATE_MAX_CPU_THROTTLE 99 92 93 /* Migration XBZRLE default cache size */ 94 #define DEFAULT_MIGRATE_XBZRLE_CACHE_SIZE (64 * 1024 * 1024) 95 96 /* The delay time (in ms) between two COLO checkpoints */ 97 #define DEFAULT_MIGRATE_X_CHECKPOINT_DELAY (200 * 100) 98 #define DEFAULT_MIGRATE_MULTIFD_CHANNELS 2 99 #define DEFAULT_MIGRATE_MULTIFD_COMPRESSION MULTIFD_COMPRESSION_NONE 100 /* 0: means nocompress, 1: best speed, ... 9: best compress ratio */ 101 #define DEFAULT_MIGRATE_MULTIFD_ZLIB_LEVEL 1 102 /* 0: means nocompress, 1: best speed, ... 20: best compress ratio */ 103 #define DEFAULT_MIGRATE_MULTIFD_ZSTD_LEVEL 1 104 105 /* Background transfer rate for postcopy, 0 means unlimited, note 106 * that page requests can still exceed this limit. 107 */ 108 #define DEFAULT_MIGRATE_MAX_POSTCOPY_BANDWIDTH 0 109 110 /* 111 * Parameters for self_announce_delay giving a stream of RARP/ARP 112 * packets after migration. 113 */ 114 #define DEFAULT_MIGRATE_ANNOUNCE_INITIAL 50 115 #define DEFAULT_MIGRATE_ANNOUNCE_MAX 550 116 #define DEFAULT_MIGRATE_ANNOUNCE_ROUNDS 5 117 #define DEFAULT_MIGRATE_ANNOUNCE_STEP 100 118 119 static NotifierList migration_state_notifiers = 120 NOTIFIER_LIST_INITIALIZER(migration_state_notifiers); 121 122 /* Messages sent on the return path from destination to source */ 123 enum mig_rp_message_type { 124 MIG_RP_MSG_INVALID = 0, /* Must be 0 */ 125 MIG_RP_MSG_SHUT, /* sibling will not send any more RP messages */ 126 MIG_RP_MSG_PONG, /* Response to a PING; data (seq: be32 ) */ 127 128 MIG_RP_MSG_REQ_PAGES_ID, /* data (start: be64, len: be32, id: string) */ 129 MIG_RP_MSG_REQ_PAGES, /* data (start: be64, len: be32) */ 130 MIG_RP_MSG_RECV_BITMAP, /* send recved_bitmap back to source */ 131 MIG_RP_MSG_RESUME_ACK, /* tell source that we are ready to resume */ 132 133 MIG_RP_MSG_MAX 134 }; 135 136 /* Migration capabilities set */ 137 struct MigrateCapsSet { 138 int size; /* Capability set size */ 139 MigrationCapability caps[]; /* Variadic array of capabilities */ 140 }; 141 typedef struct MigrateCapsSet MigrateCapsSet; 142 143 /* Define and initialize MigrateCapsSet */ 144 #define INITIALIZE_MIGRATE_CAPS_SET(_name, ...) \ 145 MigrateCapsSet _name = { \ 146 .size = sizeof((int []) { __VA_ARGS__ }) / sizeof(int), \ 147 .caps = { __VA_ARGS__ } \ 148 } 149 150 /* Background-snapshot compatibility check list */ 151 static const 152 INITIALIZE_MIGRATE_CAPS_SET(check_caps_background_snapshot, 153 MIGRATION_CAPABILITY_POSTCOPY_RAM, 154 MIGRATION_CAPABILITY_DIRTY_BITMAPS, 155 MIGRATION_CAPABILITY_POSTCOPY_BLOCKTIME, 156 MIGRATION_CAPABILITY_LATE_BLOCK_ACTIVATE, 157 MIGRATION_CAPABILITY_RETURN_PATH, 158 MIGRATION_CAPABILITY_MULTIFD, 159 MIGRATION_CAPABILITY_PAUSE_BEFORE_SWITCHOVER, 160 MIGRATION_CAPABILITY_AUTO_CONVERGE, 161 MIGRATION_CAPABILITY_RELEASE_RAM, 162 MIGRATION_CAPABILITY_RDMA_PIN_ALL, 163 MIGRATION_CAPABILITY_COMPRESS, 164 MIGRATION_CAPABILITY_XBZRLE, 165 MIGRATION_CAPABILITY_X_COLO, 166 MIGRATION_CAPABILITY_VALIDATE_UUID); 167 168 /* When we add fault tolerance, we could have several 169 migrations at once. For now we don't need to add 170 dynamic creation of migration */ 171 172 static MigrationState *current_migration; 173 static MigrationIncomingState *current_incoming; 174 175 static GSList *migration_blockers; 176 177 static bool migration_object_check(MigrationState *ms, Error **errp); 178 static int migration_maybe_pause(MigrationState *s, 179 int *current_active_state, 180 int new_state); 181 static void migrate_fd_cancel(MigrationState *s); 182 183 static bool migrate_allow_multi_channels = true; 184 185 void migrate_protocol_allow_multi_channels(bool allow) 186 { 187 migrate_allow_multi_channels = allow; 188 } 189 190 bool migrate_multi_channels_is_allowed(void) 191 { 192 return migrate_allow_multi_channels; 193 } 194 195 static gint page_request_addr_cmp(gconstpointer ap, gconstpointer bp) 196 { 197 uintptr_t a = (uintptr_t) ap, b = (uintptr_t) bp; 198 199 return (a > b) - (a < b); 200 } 201 202 void migration_object_init(void) 203 { 204 /* This can only be called once. */ 205 assert(!current_migration); 206 current_migration = MIGRATION_OBJ(object_new(TYPE_MIGRATION)); 207 208 /* 209 * Init the migrate incoming object as well no matter whether 210 * we'll use it or not. 211 */ 212 assert(!current_incoming); 213 current_incoming = g_new0(MigrationIncomingState, 1); 214 current_incoming->state = MIGRATION_STATUS_NONE; 215 current_incoming->postcopy_remote_fds = 216 g_array_new(FALSE, TRUE, sizeof(struct PostCopyFD)); 217 qemu_mutex_init(¤t_incoming->rp_mutex); 218 qemu_event_init(¤t_incoming->main_thread_load_event, false); 219 qemu_sem_init(¤t_incoming->postcopy_pause_sem_dst, 0); 220 qemu_sem_init(¤t_incoming->postcopy_pause_sem_fault, 0); 221 qemu_mutex_init(¤t_incoming->page_request_mutex); 222 current_incoming->page_requested = g_tree_new(page_request_addr_cmp); 223 224 migration_object_check(current_migration, &error_fatal); 225 226 blk_mig_init(); 227 ram_mig_init(); 228 dirty_bitmap_mig_init(); 229 } 230 231 void migration_cancel(const Error *error) 232 { 233 if (error) { 234 migrate_set_error(current_migration, error); 235 } 236 migrate_fd_cancel(current_migration); 237 } 238 239 void migration_shutdown(void) 240 { 241 /* 242 * When the QEMU main thread exit, the COLO thread 243 * may wait a semaphore. So, we should wakeup the 244 * COLO thread before migration shutdown. 245 */ 246 colo_shutdown(); 247 /* 248 * Cancel the current migration - that will (eventually) 249 * stop the migration using this structure 250 */ 251 migration_cancel(NULL); 252 object_unref(OBJECT(current_migration)); 253 254 /* 255 * Cancel outgoing migration of dirty bitmaps. It should 256 * at least unref used block nodes. 257 */ 258 dirty_bitmap_mig_cancel_outgoing(); 259 260 /* 261 * Cancel incoming migration of dirty bitmaps. Dirty bitmaps 262 * are non-critical data, and their loss never considered as 263 * something serious. 264 */ 265 dirty_bitmap_mig_cancel_incoming(); 266 } 267 268 /* For outgoing */ 269 MigrationState *migrate_get_current(void) 270 { 271 /* This can only be called after the object created. */ 272 assert(current_migration); 273 return current_migration; 274 } 275 276 MigrationIncomingState *migration_incoming_get_current(void) 277 { 278 assert(current_incoming); 279 return current_incoming; 280 } 281 282 void migration_incoming_transport_cleanup(MigrationIncomingState *mis) 283 { 284 if (mis->socket_address_list) { 285 qapi_free_SocketAddressList(mis->socket_address_list); 286 mis->socket_address_list = NULL; 287 } 288 289 if (mis->transport_cleanup) { 290 mis->transport_cleanup(mis->transport_data); 291 mis->transport_data = mis->transport_cleanup = NULL; 292 } 293 } 294 295 void migration_incoming_state_destroy(void) 296 { 297 struct MigrationIncomingState *mis = migration_incoming_get_current(); 298 299 if (mis->to_src_file) { 300 /* Tell source that we are done */ 301 migrate_send_rp_shut(mis, qemu_file_get_error(mis->from_src_file) != 0); 302 qemu_fclose(mis->to_src_file); 303 mis->to_src_file = NULL; 304 } 305 306 if (mis->from_src_file) { 307 migration_ioc_unregister_yank_from_file(mis->from_src_file); 308 qemu_fclose(mis->from_src_file); 309 mis->from_src_file = NULL; 310 } 311 if (mis->postcopy_remote_fds) { 312 g_array_free(mis->postcopy_remote_fds, TRUE); 313 mis->postcopy_remote_fds = NULL; 314 } 315 316 migration_incoming_transport_cleanup(mis); 317 qemu_event_reset(&mis->main_thread_load_event); 318 319 if (mis->page_requested) { 320 g_tree_destroy(mis->page_requested); 321 mis->page_requested = NULL; 322 } 323 324 yank_unregister_instance(MIGRATION_YANK_INSTANCE); 325 } 326 327 static void migrate_generate_event(int new_state) 328 { 329 if (migrate_use_events()) { 330 qapi_event_send_migration(new_state); 331 } 332 } 333 334 static bool migrate_late_block_activate(void) 335 { 336 MigrationState *s; 337 338 s = migrate_get_current(); 339 340 return s->enabled_capabilities[ 341 MIGRATION_CAPABILITY_LATE_BLOCK_ACTIVATE]; 342 } 343 344 /* 345 * Send a message on the return channel back to the source 346 * of the migration. 347 */ 348 static int migrate_send_rp_message(MigrationIncomingState *mis, 349 enum mig_rp_message_type message_type, 350 uint16_t len, void *data) 351 { 352 int ret = 0; 353 354 trace_migrate_send_rp_message((int)message_type, len); 355 QEMU_LOCK_GUARD(&mis->rp_mutex); 356 357 /* 358 * It's possible that the file handle got lost due to network 359 * failures. 360 */ 361 if (!mis->to_src_file) { 362 ret = -EIO; 363 return ret; 364 } 365 366 qemu_put_be16(mis->to_src_file, (unsigned int)message_type); 367 qemu_put_be16(mis->to_src_file, len); 368 qemu_put_buffer(mis->to_src_file, data, len); 369 qemu_fflush(mis->to_src_file); 370 371 /* It's possible that qemu file got error during sending */ 372 ret = qemu_file_get_error(mis->to_src_file); 373 374 return ret; 375 } 376 377 /* Request one page from the source VM at the given start address. 378 * rb: the RAMBlock to request the page in 379 * Start: Address offset within the RB 380 * Len: Length in bytes required - must be a multiple of pagesize 381 */ 382 int migrate_send_rp_message_req_pages(MigrationIncomingState *mis, 383 RAMBlock *rb, ram_addr_t start) 384 { 385 uint8_t bufc[12 + 1 + 255]; /* start (8), len (4), rbname up to 256 */ 386 size_t msglen = 12; /* start + len */ 387 size_t len = qemu_ram_pagesize(rb); 388 enum mig_rp_message_type msg_type; 389 const char *rbname; 390 int rbname_len; 391 392 *(uint64_t *)bufc = cpu_to_be64((uint64_t)start); 393 *(uint32_t *)(bufc + 8) = cpu_to_be32((uint32_t)len); 394 395 /* 396 * We maintain the last ramblock that we requested for page. Note that we 397 * don't need locking because this function will only be called within the 398 * postcopy ram fault thread. 399 */ 400 if (rb != mis->last_rb) { 401 mis->last_rb = rb; 402 403 rbname = qemu_ram_get_idstr(rb); 404 rbname_len = strlen(rbname); 405 406 assert(rbname_len < 256); 407 408 bufc[msglen++] = rbname_len; 409 memcpy(bufc + msglen, rbname, rbname_len); 410 msglen += rbname_len; 411 msg_type = MIG_RP_MSG_REQ_PAGES_ID; 412 } else { 413 msg_type = MIG_RP_MSG_REQ_PAGES; 414 } 415 416 return migrate_send_rp_message(mis, msg_type, msglen, bufc); 417 } 418 419 int migrate_send_rp_req_pages(MigrationIncomingState *mis, 420 RAMBlock *rb, ram_addr_t start, uint64_t haddr) 421 { 422 void *aligned = (void *)(uintptr_t)ROUND_DOWN(haddr, qemu_ram_pagesize(rb)); 423 bool received = false; 424 425 WITH_QEMU_LOCK_GUARD(&mis->page_request_mutex) { 426 received = ramblock_recv_bitmap_test_byte_offset(rb, start); 427 if (!received && !g_tree_lookup(mis->page_requested, aligned)) { 428 /* 429 * The page has not been received, and it's not yet in the page 430 * request list. Queue it. Set the value of element to 1, so that 431 * things like g_tree_lookup() will return TRUE (1) when found. 432 */ 433 g_tree_insert(mis->page_requested, aligned, (gpointer)1); 434 mis->page_requested_count++; 435 trace_postcopy_page_req_add(aligned, mis->page_requested_count); 436 } 437 } 438 439 /* 440 * If the page is there, skip sending the message. We don't even need the 441 * lock because as long as the page arrived, it'll be there forever. 442 */ 443 if (received) { 444 return 0; 445 } 446 447 return migrate_send_rp_message_req_pages(mis, rb, start); 448 } 449 450 static bool migration_colo_enabled; 451 bool migration_incoming_colo_enabled(void) 452 { 453 return migration_colo_enabled; 454 } 455 456 void migration_incoming_disable_colo(void) 457 { 458 ram_block_discard_disable(false); 459 migration_colo_enabled = false; 460 } 461 462 int migration_incoming_enable_colo(void) 463 { 464 if (ram_block_discard_disable(true)) { 465 error_report("COLO: cannot disable RAM discard"); 466 return -EBUSY; 467 } 468 migration_colo_enabled = true; 469 return 0; 470 } 471 472 void migrate_add_address(SocketAddress *address) 473 { 474 MigrationIncomingState *mis = migration_incoming_get_current(); 475 476 QAPI_LIST_PREPEND(mis->socket_address_list, 477 QAPI_CLONE(SocketAddress, address)); 478 } 479 480 static void qemu_start_incoming_migration(const char *uri, Error **errp) 481 { 482 const char *p = NULL; 483 484 migrate_protocol_allow_multi_channels(false); /* reset it anyway */ 485 qapi_event_send_migration(MIGRATION_STATUS_SETUP); 486 if (strstart(uri, "tcp:", &p) || 487 strstart(uri, "unix:", NULL) || 488 strstart(uri, "vsock:", NULL)) { 489 migrate_protocol_allow_multi_channels(true); 490 socket_start_incoming_migration(p ? p : uri, errp); 491 #ifdef CONFIG_RDMA 492 } else if (strstart(uri, "rdma:", &p)) { 493 rdma_start_incoming_migration(p, errp); 494 #endif 495 } else if (strstart(uri, "exec:", &p)) { 496 exec_start_incoming_migration(p, errp); 497 } else if (strstart(uri, "fd:", &p)) { 498 fd_start_incoming_migration(p, errp); 499 } else { 500 error_setg(errp, "unknown migration protocol: %s", uri); 501 } 502 } 503 504 static void process_incoming_migration_bh(void *opaque) 505 { 506 Error *local_err = NULL; 507 MigrationIncomingState *mis = opaque; 508 509 /* If capability late_block_activate is set: 510 * Only fire up the block code now if we're going to restart the 511 * VM, else 'cont' will do it. 512 * This causes file locking to happen; so we don't want it to happen 513 * unless we really are starting the VM. 514 */ 515 if (!migrate_late_block_activate() || 516 (autostart && (!global_state_received() || 517 global_state_get_runstate() == RUN_STATE_RUNNING))) { 518 /* Make sure all file formats throw away their mutable metadata. 519 * If we get an error here, just don't restart the VM yet. */ 520 bdrv_activate_all(&local_err); 521 if (local_err) { 522 error_report_err(local_err); 523 local_err = NULL; 524 autostart = false; 525 } 526 } 527 528 /* 529 * This must happen after all error conditions are dealt with and 530 * we're sure the VM is going to be running on this host. 531 */ 532 qemu_announce_self(&mis->announce_timer, migrate_announce_params()); 533 534 if (multifd_load_cleanup(&local_err) != 0) { 535 error_report_err(local_err); 536 autostart = false; 537 } 538 /* If global state section was not received or we are in running 539 state, we need to obey autostart. Any other state is set with 540 runstate_set. */ 541 542 dirty_bitmap_mig_before_vm_start(); 543 544 if (!global_state_received() || 545 global_state_get_runstate() == RUN_STATE_RUNNING) { 546 if (autostart) { 547 vm_start(); 548 } else { 549 runstate_set(RUN_STATE_PAUSED); 550 } 551 } else if (migration_incoming_colo_enabled()) { 552 migration_incoming_disable_colo(); 553 vm_start(); 554 } else { 555 runstate_set(global_state_get_runstate()); 556 } 557 /* 558 * This must happen after any state changes since as soon as an external 559 * observer sees this event they might start to prod at the VM assuming 560 * it's ready to use. 561 */ 562 migrate_set_state(&mis->state, MIGRATION_STATUS_ACTIVE, 563 MIGRATION_STATUS_COMPLETED); 564 qemu_bh_delete(mis->bh); 565 migration_incoming_state_destroy(); 566 } 567 568 static void process_incoming_migration_co(void *opaque) 569 { 570 MigrationIncomingState *mis = migration_incoming_get_current(); 571 PostcopyState ps; 572 int ret; 573 Error *local_err = NULL; 574 575 assert(mis->from_src_file); 576 mis->migration_incoming_co = qemu_coroutine_self(); 577 mis->largest_page_size = qemu_ram_pagesize_largest(); 578 postcopy_state_set(POSTCOPY_INCOMING_NONE); 579 migrate_set_state(&mis->state, MIGRATION_STATUS_NONE, 580 MIGRATION_STATUS_ACTIVE); 581 ret = qemu_loadvm_state(mis->from_src_file); 582 583 ps = postcopy_state_get(); 584 trace_process_incoming_migration_co_end(ret, ps); 585 if (ps != POSTCOPY_INCOMING_NONE) { 586 if (ps == POSTCOPY_INCOMING_ADVISE) { 587 /* 588 * Where a migration had postcopy enabled (and thus went to advise) 589 * but managed to complete within the precopy period, we can use 590 * the normal exit. 591 */ 592 postcopy_ram_incoming_cleanup(mis); 593 } else if (ret >= 0) { 594 /* 595 * Postcopy was started, cleanup should happen at the end of the 596 * postcopy thread. 597 */ 598 trace_process_incoming_migration_co_postcopy_end_main(); 599 return; 600 } 601 /* Else if something went wrong then just fall out of the normal exit */ 602 } 603 604 /* we get COLO info, and know if we are in COLO mode */ 605 if (!ret && migration_incoming_colo_enabled()) { 606 /* Make sure all file formats throw away their mutable metadata */ 607 bdrv_activate_all(&local_err); 608 if (local_err) { 609 error_report_err(local_err); 610 goto fail; 611 } 612 613 qemu_thread_create(&mis->colo_incoming_thread, "COLO incoming", 614 colo_process_incoming_thread, mis, QEMU_THREAD_JOINABLE); 615 mis->have_colo_incoming_thread = true; 616 qemu_coroutine_yield(); 617 618 qemu_mutex_unlock_iothread(); 619 /* Wait checkpoint incoming thread exit before free resource */ 620 qemu_thread_join(&mis->colo_incoming_thread); 621 qemu_mutex_lock_iothread(); 622 /* We hold the global iothread lock, so it is safe here */ 623 colo_release_ram_cache(); 624 } 625 626 if (ret < 0) { 627 error_report("load of migration failed: %s", strerror(-ret)); 628 goto fail; 629 } 630 mis->bh = qemu_bh_new(process_incoming_migration_bh, mis); 631 qemu_bh_schedule(mis->bh); 632 mis->migration_incoming_co = NULL; 633 return; 634 fail: 635 local_err = NULL; 636 migrate_set_state(&mis->state, MIGRATION_STATUS_ACTIVE, 637 MIGRATION_STATUS_FAILED); 638 qemu_fclose(mis->from_src_file); 639 if (multifd_load_cleanup(&local_err) != 0) { 640 error_report_err(local_err); 641 } 642 exit(EXIT_FAILURE); 643 } 644 645 /** 646 * migration_incoming_setup: Setup incoming migration 647 * @f: file for main migration channel 648 * @errp: where to put errors 649 * 650 * Returns: %true on success, %false on error. 651 */ 652 static bool migration_incoming_setup(QEMUFile *f, Error **errp) 653 { 654 MigrationIncomingState *mis = migration_incoming_get_current(); 655 656 if (multifd_load_setup(errp) != 0) { 657 return false; 658 } 659 660 if (!mis->from_src_file) { 661 mis->from_src_file = f; 662 } 663 qemu_file_set_blocking(f, false); 664 return true; 665 } 666 667 void migration_incoming_process(void) 668 { 669 Coroutine *co = qemu_coroutine_create(process_incoming_migration_co, NULL); 670 qemu_coroutine_enter(co); 671 } 672 673 /* Returns true if recovered from a paused migration, otherwise false */ 674 static bool postcopy_try_recover(void) 675 { 676 MigrationIncomingState *mis = migration_incoming_get_current(); 677 678 if (mis->state == MIGRATION_STATUS_POSTCOPY_PAUSED) { 679 /* Resumed from a paused postcopy migration */ 680 681 /* This should be set already in migration_incoming_setup() */ 682 assert(mis->from_src_file); 683 /* Postcopy has standalone thread to do vm load */ 684 qemu_file_set_blocking(mis->from_src_file, true); 685 686 /* Re-configure the return path */ 687 mis->to_src_file = qemu_file_get_return_path(mis->from_src_file); 688 689 migrate_set_state(&mis->state, MIGRATION_STATUS_POSTCOPY_PAUSED, 690 MIGRATION_STATUS_POSTCOPY_RECOVER); 691 692 /* 693 * Here, we only wake up the main loading thread (while the 694 * fault thread will still be waiting), so that we can receive 695 * commands from source now, and answer it if needed. The 696 * fault thread will be woken up afterwards until we are sure 697 * that source is ready to reply to page requests. 698 */ 699 qemu_sem_post(&mis->postcopy_pause_sem_dst); 700 return true; 701 } 702 703 return false; 704 } 705 706 void migration_fd_process_incoming(QEMUFile *f, Error **errp) 707 { 708 if (!migration_incoming_setup(f, errp)) { 709 return; 710 } 711 if (postcopy_try_recover()) { 712 return; 713 } 714 migration_incoming_process(); 715 } 716 717 void migration_ioc_process_incoming(QIOChannel *ioc, Error **errp) 718 { 719 MigrationIncomingState *mis = migration_incoming_get_current(); 720 Error *local_err = NULL; 721 bool start_migration; 722 723 if (!mis->from_src_file) { 724 /* The first connection (multifd may have multiple) */ 725 QEMUFile *f = qemu_fopen_channel_input(ioc); 726 727 if (!migration_incoming_setup(f, errp)) { 728 return; 729 } 730 731 /* 732 * Common migration only needs one channel, so we can start 733 * right now. Multifd needs more than one channel, we wait. 734 */ 735 start_migration = !migrate_use_multifd(); 736 } else { 737 /* Multiple connections */ 738 assert(migrate_use_multifd()); 739 start_migration = multifd_recv_new_channel(ioc, &local_err); 740 if (local_err) { 741 error_propagate(errp, local_err); 742 return; 743 } 744 } 745 746 if (start_migration) { 747 /* If it's a recovery, we're done */ 748 if (postcopy_try_recover()) { 749 return; 750 } 751 migration_incoming_process(); 752 } 753 } 754 755 /** 756 * @migration_has_all_channels: We have received all channels that we need 757 * 758 * Returns true when we have got connections to all the channels that 759 * we need for migration. 760 */ 761 bool migration_has_all_channels(void) 762 { 763 MigrationIncomingState *mis = migration_incoming_get_current(); 764 bool all_channels; 765 766 all_channels = multifd_recv_all_channels_created(); 767 768 return all_channels && mis->from_src_file != NULL; 769 } 770 771 /* 772 * Send a 'SHUT' message on the return channel with the given value 773 * to indicate that we've finished with the RP. Non-0 value indicates 774 * error. 775 */ 776 void migrate_send_rp_shut(MigrationIncomingState *mis, 777 uint32_t value) 778 { 779 uint32_t buf; 780 781 buf = cpu_to_be32(value); 782 migrate_send_rp_message(mis, MIG_RP_MSG_SHUT, sizeof(buf), &buf); 783 } 784 785 /* 786 * Send a 'PONG' message on the return channel with the given value 787 * (normally in response to a 'PING') 788 */ 789 void migrate_send_rp_pong(MigrationIncomingState *mis, 790 uint32_t value) 791 { 792 uint32_t buf; 793 794 buf = cpu_to_be32(value); 795 migrate_send_rp_message(mis, MIG_RP_MSG_PONG, sizeof(buf), &buf); 796 } 797 798 void migrate_send_rp_recv_bitmap(MigrationIncomingState *mis, 799 char *block_name) 800 { 801 char buf[512]; 802 int len; 803 int64_t res; 804 805 /* 806 * First, we send the header part. It contains only the len of 807 * idstr, and the idstr itself. 808 */ 809 len = strlen(block_name); 810 buf[0] = len; 811 memcpy(buf + 1, block_name, len); 812 813 if (mis->state != MIGRATION_STATUS_POSTCOPY_RECOVER) { 814 error_report("%s: MSG_RP_RECV_BITMAP only used for recovery", 815 __func__); 816 return; 817 } 818 819 migrate_send_rp_message(mis, MIG_RP_MSG_RECV_BITMAP, len + 1, buf); 820 821 /* 822 * Next, we dump the received bitmap to the stream. 823 * 824 * TODO: currently we are safe since we are the only one that is 825 * using the to_src_file handle (fault thread is still paused), 826 * and it's ok even not taking the mutex. However the best way is 827 * to take the lock before sending the message header, and release 828 * the lock after sending the bitmap. 829 */ 830 qemu_mutex_lock(&mis->rp_mutex); 831 res = ramblock_recv_bitmap_send(mis->to_src_file, block_name); 832 qemu_mutex_unlock(&mis->rp_mutex); 833 834 trace_migrate_send_rp_recv_bitmap(block_name, res); 835 } 836 837 void migrate_send_rp_resume_ack(MigrationIncomingState *mis, uint32_t value) 838 { 839 uint32_t buf; 840 841 buf = cpu_to_be32(value); 842 migrate_send_rp_message(mis, MIG_RP_MSG_RESUME_ACK, sizeof(buf), &buf); 843 } 844 845 MigrationCapabilityStatusList *qmp_query_migrate_capabilities(Error **errp) 846 { 847 MigrationCapabilityStatusList *head = NULL, **tail = &head; 848 MigrationCapabilityStatus *caps; 849 MigrationState *s = migrate_get_current(); 850 int i; 851 852 for (i = 0; i < MIGRATION_CAPABILITY__MAX; i++) { 853 #ifndef CONFIG_LIVE_BLOCK_MIGRATION 854 if (i == MIGRATION_CAPABILITY_BLOCK) { 855 continue; 856 } 857 #endif 858 caps = g_malloc0(sizeof(*caps)); 859 caps->capability = i; 860 caps->state = s->enabled_capabilities[i]; 861 QAPI_LIST_APPEND(tail, caps); 862 } 863 864 return head; 865 } 866 867 MigrationParameters *qmp_query_migrate_parameters(Error **errp) 868 { 869 MigrationParameters *params; 870 MigrationState *s = migrate_get_current(); 871 872 /* TODO use QAPI_CLONE() instead of duplicating it inline */ 873 params = g_malloc0(sizeof(*params)); 874 params->has_compress_level = true; 875 params->compress_level = s->parameters.compress_level; 876 params->has_compress_threads = true; 877 params->compress_threads = s->parameters.compress_threads; 878 params->has_compress_wait_thread = true; 879 params->compress_wait_thread = s->parameters.compress_wait_thread; 880 params->has_decompress_threads = true; 881 params->decompress_threads = s->parameters.decompress_threads; 882 params->has_throttle_trigger_threshold = true; 883 params->throttle_trigger_threshold = s->parameters.throttle_trigger_threshold; 884 params->has_cpu_throttle_initial = true; 885 params->cpu_throttle_initial = s->parameters.cpu_throttle_initial; 886 params->has_cpu_throttle_increment = true; 887 params->cpu_throttle_increment = s->parameters.cpu_throttle_increment; 888 params->has_cpu_throttle_tailslow = true; 889 params->cpu_throttle_tailslow = s->parameters.cpu_throttle_tailslow; 890 params->has_tls_creds = true; 891 params->tls_creds = g_strdup(s->parameters.tls_creds); 892 params->has_tls_hostname = true; 893 params->tls_hostname = g_strdup(s->parameters.tls_hostname); 894 params->has_tls_authz = true; 895 params->tls_authz = g_strdup(s->parameters.tls_authz ? 896 s->parameters.tls_authz : ""); 897 params->has_max_bandwidth = true; 898 params->max_bandwidth = s->parameters.max_bandwidth; 899 params->has_downtime_limit = true; 900 params->downtime_limit = s->parameters.downtime_limit; 901 params->has_x_checkpoint_delay = true; 902 params->x_checkpoint_delay = s->parameters.x_checkpoint_delay; 903 params->has_block_incremental = true; 904 params->block_incremental = s->parameters.block_incremental; 905 params->has_multifd_channels = true; 906 params->multifd_channels = s->parameters.multifd_channels; 907 params->has_multifd_compression = true; 908 params->multifd_compression = s->parameters.multifd_compression; 909 params->has_multifd_zlib_level = true; 910 params->multifd_zlib_level = s->parameters.multifd_zlib_level; 911 params->has_multifd_zstd_level = true; 912 params->multifd_zstd_level = s->parameters.multifd_zstd_level; 913 #ifdef CONFIG_LINUX 914 params->has_zero_copy_send = true; 915 params->zero_copy_send = s->parameters.zero_copy_send; 916 #endif 917 params->has_xbzrle_cache_size = true; 918 params->xbzrle_cache_size = s->parameters.xbzrle_cache_size; 919 params->has_max_postcopy_bandwidth = true; 920 params->max_postcopy_bandwidth = s->parameters.max_postcopy_bandwidth; 921 params->has_max_cpu_throttle = true; 922 params->max_cpu_throttle = s->parameters.max_cpu_throttle; 923 params->has_announce_initial = true; 924 params->announce_initial = s->parameters.announce_initial; 925 params->has_announce_max = true; 926 params->announce_max = s->parameters.announce_max; 927 params->has_announce_rounds = true; 928 params->announce_rounds = s->parameters.announce_rounds; 929 params->has_announce_step = true; 930 params->announce_step = s->parameters.announce_step; 931 932 if (s->parameters.has_block_bitmap_mapping) { 933 params->has_block_bitmap_mapping = true; 934 params->block_bitmap_mapping = 935 QAPI_CLONE(BitmapMigrationNodeAliasList, 936 s->parameters.block_bitmap_mapping); 937 } 938 939 return params; 940 } 941 942 AnnounceParameters *migrate_announce_params(void) 943 { 944 static AnnounceParameters ap; 945 946 MigrationState *s = migrate_get_current(); 947 948 ap.initial = s->parameters.announce_initial; 949 ap.max = s->parameters.announce_max; 950 ap.rounds = s->parameters.announce_rounds; 951 ap.step = s->parameters.announce_step; 952 953 return ≈ 954 } 955 956 /* 957 * Return true if we're already in the middle of a migration 958 * (i.e. any of the active or setup states) 959 */ 960 bool migration_is_setup_or_active(int state) 961 { 962 switch (state) { 963 case MIGRATION_STATUS_ACTIVE: 964 case MIGRATION_STATUS_POSTCOPY_ACTIVE: 965 case MIGRATION_STATUS_POSTCOPY_PAUSED: 966 case MIGRATION_STATUS_POSTCOPY_RECOVER: 967 case MIGRATION_STATUS_SETUP: 968 case MIGRATION_STATUS_PRE_SWITCHOVER: 969 case MIGRATION_STATUS_DEVICE: 970 case MIGRATION_STATUS_WAIT_UNPLUG: 971 case MIGRATION_STATUS_COLO: 972 return true; 973 974 default: 975 return false; 976 977 } 978 } 979 980 bool migration_is_running(int state) 981 { 982 switch (state) { 983 case MIGRATION_STATUS_ACTIVE: 984 case MIGRATION_STATUS_POSTCOPY_ACTIVE: 985 case MIGRATION_STATUS_POSTCOPY_PAUSED: 986 case MIGRATION_STATUS_POSTCOPY_RECOVER: 987 case MIGRATION_STATUS_SETUP: 988 case MIGRATION_STATUS_PRE_SWITCHOVER: 989 case MIGRATION_STATUS_DEVICE: 990 case MIGRATION_STATUS_WAIT_UNPLUG: 991 case MIGRATION_STATUS_CANCELLING: 992 return true; 993 994 default: 995 return false; 996 997 } 998 } 999 1000 static void populate_time_info(MigrationInfo *info, MigrationState *s) 1001 { 1002 info->has_status = true; 1003 info->has_setup_time = true; 1004 info->setup_time = s->setup_time; 1005 if (s->state == MIGRATION_STATUS_COMPLETED) { 1006 info->has_total_time = true; 1007 info->total_time = s->total_time; 1008 info->has_downtime = true; 1009 info->downtime = s->downtime; 1010 } else { 1011 info->has_total_time = true; 1012 info->total_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME) - 1013 s->start_time; 1014 info->has_expected_downtime = true; 1015 info->expected_downtime = s->expected_downtime; 1016 } 1017 } 1018 1019 static void populate_ram_info(MigrationInfo *info, MigrationState *s) 1020 { 1021 size_t page_size = qemu_target_page_size(); 1022 1023 info->has_ram = true; 1024 info->ram = g_malloc0(sizeof(*info->ram)); 1025 info->ram->transferred = ram_counters.transferred; 1026 info->ram->total = ram_bytes_total(); 1027 info->ram->duplicate = ram_counters.duplicate; 1028 /* legacy value. It is not used anymore */ 1029 info->ram->skipped = 0; 1030 info->ram->normal = ram_counters.normal; 1031 info->ram->normal_bytes = ram_counters.normal * page_size; 1032 info->ram->mbps = s->mbps; 1033 info->ram->dirty_sync_count = ram_counters.dirty_sync_count; 1034 info->ram->postcopy_requests = ram_counters.postcopy_requests; 1035 info->ram->page_size = page_size; 1036 info->ram->multifd_bytes = ram_counters.multifd_bytes; 1037 info->ram->pages_per_second = s->pages_per_second; 1038 info->ram->precopy_bytes = ram_counters.precopy_bytes; 1039 info->ram->downtime_bytes = ram_counters.downtime_bytes; 1040 info->ram->postcopy_bytes = ram_counters.postcopy_bytes; 1041 1042 if (migrate_use_xbzrle()) { 1043 info->has_xbzrle_cache = true; 1044 info->xbzrle_cache = g_malloc0(sizeof(*info->xbzrle_cache)); 1045 info->xbzrle_cache->cache_size = migrate_xbzrle_cache_size(); 1046 info->xbzrle_cache->bytes = xbzrle_counters.bytes; 1047 info->xbzrle_cache->pages = xbzrle_counters.pages; 1048 info->xbzrle_cache->cache_miss = xbzrle_counters.cache_miss; 1049 info->xbzrle_cache->cache_miss_rate = xbzrle_counters.cache_miss_rate; 1050 info->xbzrle_cache->encoding_rate = xbzrle_counters.encoding_rate; 1051 info->xbzrle_cache->overflow = xbzrle_counters.overflow; 1052 } 1053 1054 if (migrate_use_compression()) { 1055 info->has_compression = true; 1056 info->compression = g_malloc0(sizeof(*info->compression)); 1057 info->compression->pages = compression_counters.pages; 1058 info->compression->busy = compression_counters.busy; 1059 info->compression->busy_rate = compression_counters.busy_rate; 1060 info->compression->compressed_size = 1061 compression_counters.compressed_size; 1062 info->compression->compression_rate = 1063 compression_counters.compression_rate; 1064 } 1065 1066 if (cpu_throttle_active()) { 1067 info->has_cpu_throttle_percentage = true; 1068 info->cpu_throttle_percentage = cpu_throttle_get_percentage(); 1069 } 1070 1071 if (s->state != MIGRATION_STATUS_COMPLETED) { 1072 info->ram->remaining = ram_bytes_remaining(); 1073 info->ram->dirty_pages_rate = ram_counters.dirty_pages_rate; 1074 } 1075 } 1076 1077 static void populate_disk_info(MigrationInfo *info) 1078 { 1079 if (blk_mig_active()) { 1080 info->has_disk = true; 1081 info->disk = g_malloc0(sizeof(*info->disk)); 1082 info->disk->transferred = blk_mig_bytes_transferred(); 1083 info->disk->remaining = blk_mig_bytes_remaining(); 1084 info->disk->total = blk_mig_bytes_total(); 1085 } 1086 } 1087 1088 static void fill_source_migration_info(MigrationInfo *info) 1089 { 1090 MigrationState *s = migrate_get_current(); 1091 int state = qatomic_read(&s->state); 1092 GSList *cur_blocker = migration_blockers; 1093 1094 info->blocked_reasons = NULL; 1095 1096 /* 1097 * There are two types of reasons a migration might be blocked; 1098 * a) devices marked in VMState as non-migratable, and 1099 * b) Explicit migration blockers 1100 * We need to add both of them here. 1101 */ 1102 qemu_savevm_non_migratable_list(&info->blocked_reasons); 1103 1104 while (cur_blocker) { 1105 QAPI_LIST_PREPEND(info->blocked_reasons, 1106 g_strdup(error_get_pretty(cur_blocker->data))); 1107 cur_blocker = g_slist_next(cur_blocker); 1108 } 1109 info->has_blocked_reasons = info->blocked_reasons != NULL; 1110 1111 switch (state) { 1112 case MIGRATION_STATUS_NONE: 1113 /* no migration has happened ever */ 1114 /* do not overwrite destination migration status */ 1115 return; 1116 case MIGRATION_STATUS_SETUP: 1117 info->has_status = true; 1118 info->has_total_time = false; 1119 break; 1120 case MIGRATION_STATUS_ACTIVE: 1121 case MIGRATION_STATUS_CANCELLING: 1122 case MIGRATION_STATUS_POSTCOPY_ACTIVE: 1123 case MIGRATION_STATUS_PRE_SWITCHOVER: 1124 case MIGRATION_STATUS_DEVICE: 1125 case MIGRATION_STATUS_POSTCOPY_PAUSED: 1126 case MIGRATION_STATUS_POSTCOPY_RECOVER: 1127 /* TODO add some postcopy stats */ 1128 populate_time_info(info, s); 1129 populate_ram_info(info, s); 1130 populate_disk_info(info); 1131 populate_vfio_info(info); 1132 break; 1133 case MIGRATION_STATUS_COLO: 1134 info->has_status = true; 1135 /* TODO: display COLO specific information (checkpoint info etc.) */ 1136 break; 1137 case MIGRATION_STATUS_COMPLETED: 1138 populate_time_info(info, s); 1139 populate_ram_info(info, s); 1140 populate_vfio_info(info); 1141 break; 1142 case MIGRATION_STATUS_FAILED: 1143 info->has_status = true; 1144 if (s->error) { 1145 info->has_error_desc = true; 1146 info->error_desc = g_strdup(error_get_pretty(s->error)); 1147 } 1148 break; 1149 case MIGRATION_STATUS_CANCELLED: 1150 info->has_status = true; 1151 break; 1152 case MIGRATION_STATUS_WAIT_UNPLUG: 1153 info->has_status = true; 1154 break; 1155 } 1156 info->status = state; 1157 } 1158 1159 typedef enum WriteTrackingSupport { 1160 WT_SUPPORT_UNKNOWN = 0, 1161 WT_SUPPORT_ABSENT, 1162 WT_SUPPORT_AVAILABLE, 1163 WT_SUPPORT_COMPATIBLE 1164 } WriteTrackingSupport; 1165 1166 static 1167 WriteTrackingSupport migrate_query_write_tracking(void) 1168 { 1169 /* Check if kernel supports required UFFD features */ 1170 if (!ram_write_tracking_available()) { 1171 return WT_SUPPORT_ABSENT; 1172 } 1173 /* 1174 * Check if current memory configuration is 1175 * compatible with required UFFD features. 1176 */ 1177 if (!ram_write_tracking_compatible()) { 1178 return WT_SUPPORT_AVAILABLE; 1179 } 1180 1181 return WT_SUPPORT_COMPATIBLE; 1182 } 1183 1184 /** 1185 * @migration_caps_check - check capability validity 1186 * 1187 * @cap_list: old capability list, array of bool 1188 * @params: new capabilities to be applied soon 1189 * @errp: set *errp if the check failed, with reason 1190 * 1191 * Returns true if check passed, otherwise false. 1192 */ 1193 static bool migrate_caps_check(bool *cap_list, 1194 MigrationCapabilityStatusList *params, 1195 Error **errp) 1196 { 1197 MigrationCapabilityStatusList *cap; 1198 bool old_postcopy_cap; 1199 MigrationIncomingState *mis = migration_incoming_get_current(); 1200 1201 old_postcopy_cap = cap_list[MIGRATION_CAPABILITY_POSTCOPY_RAM]; 1202 1203 for (cap = params; cap; cap = cap->next) { 1204 cap_list[cap->value->capability] = cap->value->state; 1205 } 1206 1207 #ifndef CONFIG_LIVE_BLOCK_MIGRATION 1208 if (cap_list[MIGRATION_CAPABILITY_BLOCK]) { 1209 error_setg(errp, "QEMU compiled without old-style (blk/-b, inc/-i) " 1210 "block migration"); 1211 error_append_hint(errp, "Use drive_mirror+NBD instead.\n"); 1212 return false; 1213 } 1214 #endif 1215 1216 #ifndef CONFIG_REPLICATION 1217 if (cap_list[MIGRATION_CAPABILITY_X_COLO]) { 1218 error_setg(errp, "QEMU compiled without replication module" 1219 " can't enable COLO"); 1220 error_append_hint(errp, "Please enable replication before COLO.\n"); 1221 return false; 1222 } 1223 #endif 1224 1225 if (cap_list[MIGRATION_CAPABILITY_POSTCOPY_RAM]) { 1226 /* This check is reasonably expensive, so only when it's being 1227 * set the first time, also it's only the destination that needs 1228 * special support. 1229 */ 1230 if (!old_postcopy_cap && runstate_check(RUN_STATE_INMIGRATE) && 1231 !postcopy_ram_supported_by_host(mis)) { 1232 /* postcopy_ram_supported_by_host will have emitted a more 1233 * detailed message 1234 */ 1235 error_setg(errp, "Postcopy is not supported"); 1236 return false; 1237 } 1238 1239 if (cap_list[MIGRATION_CAPABILITY_X_IGNORE_SHARED]) { 1240 error_setg(errp, "Postcopy is not compatible with ignore-shared"); 1241 return false; 1242 } 1243 } 1244 1245 if (cap_list[MIGRATION_CAPABILITY_BACKGROUND_SNAPSHOT]) { 1246 WriteTrackingSupport wt_support; 1247 int idx; 1248 /* 1249 * Check if 'background-snapshot' capability is supported by 1250 * host kernel and compatible with guest memory configuration. 1251 */ 1252 wt_support = migrate_query_write_tracking(); 1253 if (wt_support < WT_SUPPORT_AVAILABLE) { 1254 error_setg(errp, "Background-snapshot is not supported by host kernel"); 1255 return false; 1256 } 1257 if (wt_support < WT_SUPPORT_COMPATIBLE) { 1258 error_setg(errp, "Background-snapshot is not compatible " 1259 "with guest memory configuration"); 1260 return false; 1261 } 1262 1263 /* 1264 * Check if there are any migration capabilities 1265 * incompatible with 'background-snapshot'. 1266 */ 1267 for (idx = 0; idx < check_caps_background_snapshot.size; idx++) { 1268 int incomp_cap = check_caps_background_snapshot.caps[idx]; 1269 if (cap_list[incomp_cap]) { 1270 error_setg(errp, 1271 "Background-snapshot is not compatible with %s", 1272 MigrationCapability_str(incomp_cap)); 1273 return false; 1274 } 1275 } 1276 } 1277 1278 /* incoming side only */ 1279 if (runstate_check(RUN_STATE_INMIGRATE) && 1280 !migrate_multi_channels_is_allowed() && 1281 cap_list[MIGRATION_CAPABILITY_MULTIFD]) { 1282 error_setg(errp, "multifd is not supported by current protocol"); 1283 return false; 1284 } 1285 1286 return true; 1287 } 1288 1289 static void fill_destination_migration_info(MigrationInfo *info) 1290 { 1291 MigrationIncomingState *mis = migration_incoming_get_current(); 1292 1293 if (mis->socket_address_list) { 1294 info->has_socket_address = true; 1295 info->socket_address = 1296 QAPI_CLONE(SocketAddressList, mis->socket_address_list); 1297 } 1298 1299 switch (mis->state) { 1300 case MIGRATION_STATUS_NONE: 1301 return; 1302 case MIGRATION_STATUS_SETUP: 1303 case MIGRATION_STATUS_CANCELLING: 1304 case MIGRATION_STATUS_CANCELLED: 1305 case MIGRATION_STATUS_ACTIVE: 1306 case MIGRATION_STATUS_POSTCOPY_ACTIVE: 1307 case MIGRATION_STATUS_POSTCOPY_PAUSED: 1308 case MIGRATION_STATUS_POSTCOPY_RECOVER: 1309 case MIGRATION_STATUS_FAILED: 1310 case MIGRATION_STATUS_COLO: 1311 info->has_status = true; 1312 break; 1313 case MIGRATION_STATUS_COMPLETED: 1314 info->has_status = true; 1315 fill_destination_postcopy_migration_info(info); 1316 break; 1317 } 1318 info->status = mis->state; 1319 } 1320 1321 MigrationInfo *qmp_query_migrate(Error **errp) 1322 { 1323 MigrationInfo *info = g_malloc0(sizeof(*info)); 1324 1325 fill_destination_migration_info(info); 1326 fill_source_migration_info(info); 1327 1328 return info; 1329 } 1330 1331 void qmp_migrate_set_capabilities(MigrationCapabilityStatusList *params, 1332 Error **errp) 1333 { 1334 MigrationState *s = migrate_get_current(); 1335 MigrationCapabilityStatusList *cap; 1336 bool cap_list[MIGRATION_CAPABILITY__MAX]; 1337 1338 if (migration_is_running(s->state)) { 1339 error_setg(errp, QERR_MIGRATION_ACTIVE); 1340 return; 1341 } 1342 1343 memcpy(cap_list, s->enabled_capabilities, sizeof(cap_list)); 1344 if (!migrate_caps_check(cap_list, params, errp)) { 1345 return; 1346 } 1347 1348 for (cap = params; cap; cap = cap->next) { 1349 s->enabled_capabilities[cap->value->capability] = cap->value->state; 1350 } 1351 } 1352 1353 /* 1354 * Check whether the parameters are valid. Error will be put into errp 1355 * (if provided). Return true if valid, otherwise false. 1356 */ 1357 static bool migrate_params_check(MigrationParameters *params, Error **errp) 1358 { 1359 if (params->has_compress_level && 1360 (params->compress_level > 9)) { 1361 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "compress_level", 1362 "a value between 0 and 9"); 1363 return false; 1364 } 1365 1366 if (params->has_compress_threads && (params->compress_threads < 1)) { 1367 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, 1368 "compress_threads", 1369 "a value between 1 and 255"); 1370 return false; 1371 } 1372 1373 if (params->has_decompress_threads && (params->decompress_threads < 1)) { 1374 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, 1375 "decompress_threads", 1376 "a value between 1 and 255"); 1377 return false; 1378 } 1379 1380 if (params->has_throttle_trigger_threshold && 1381 (params->throttle_trigger_threshold < 1 || 1382 params->throttle_trigger_threshold > 100)) { 1383 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, 1384 "throttle_trigger_threshold", 1385 "an integer in the range of 1 to 100"); 1386 return false; 1387 } 1388 1389 if (params->has_cpu_throttle_initial && 1390 (params->cpu_throttle_initial < 1 || 1391 params->cpu_throttle_initial > 99)) { 1392 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, 1393 "cpu_throttle_initial", 1394 "an integer in the range of 1 to 99"); 1395 return false; 1396 } 1397 1398 if (params->has_cpu_throttle_increment && 1399 (params->cpu_throttle_increment < 1 || 1400 params->cpu_throttle_increment > 99)) { 1401 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, 1402 "cpu_throttle_increment", 1403 "an integer in the range of 1 to 99"); 1404 return false; 1405 } 1406 1407 if (params->has_max_bandwidth && (params->max_bandwidth > SIZE_MAX)) { 1408 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, 1409 "max_bandwidth", 1410 "an integer in the range of 0 to "stringify(SIZE_MAX) 1411 " bytes/second"); 1412 return false; 1413 } 1414 1415 if (params->has_downtime_limit && 1416 (params->downtime_limit > MAX_MIGRATE_DOWNTIME)) { 1417 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, 1418 "downtime_limit", 1419 "an integer in the range of 0 to " 1420 stringify(MAX_MIGRATE_DOWNTIME)" ms"); 1421 return false; 1422 } 1423 1424 /* x_checkpoint_delay is now always positive */ 1425 1426 if (params->has_multifd_channels && (params->multifd_channels < 1)) { 1427 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, 1428 "multifd_channels", 1429 "a value between 1 and 255"); 1430 return false; 1431 } 1432 1433 if (params->has_multifd_zlib_level && 1434 (params->multifd_zlib_level > 9)) { 1435 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "multifd_zlib_level", 1436 "a value between 0 and 9"); 1437 return false; 1438 } 1439 1440 if (params->has_multifd_zstd_level && 1441 (params->multifd_zstd_level > 20)) { 1442 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "multifd_zstd_level", 1443 "a value between 0 and 20"); 1444 return false; 1445 } 1446 1447 if (params->has_xbzrle_cache_size && 1448 (params->xbzrle_cache_size < qemu_target_page_size() || 1449 !is_power_of_2(params->xbzrle_cache_size))) { 1450 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, 1451 "xbzrle_cache_size", 1452 "a power of two no less than the target page size"); 1453 return false; 1454 } 1455 1456 if (params->has_max_cpu_throttle && 1457 (params->max_cpu_throttle < params->cpu_throttle_initial || 1458 params->max_cpu_throttle > 99)) { 1459 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, 1460 "max_cpu_throttle", 1461 "an integer in the range of cpu_throttle_initial to 99"); 1462 return false; 1463 } 1464 1465 if (params->has_announce_initial && 1466 params->announce_initial > 100000) { 1467 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, 1468 "announce_initial", 1469 "a value between 0 and 100000"); 1470 return false; 1471 } 1472 if (params->has_announce_max && 1473 params->announce_max > 100000) { 1474 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, 1475 "announce_max", 1476 "a value between 0 and 100000"); 1477 return false; 1478 } 1479 if (params->has_announce_rounds && 1480 params->announce_rounds > 1000) { 1481 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, 1482 "announce_rounds", 1483 "a value between 0 and 1000"); 1484 return false; 1485 } 1486 if (params->has_announce_step && 1487 (params->announce_step < 1 || 1488 params->announce_step > 10000)) { 1489 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, 1490 "announce_step", 1491 "a value between 0 and 10000"); 1492 return false; 1493 } 1494 1495 if (params->has_block_bitmap_mapping && 1496 !check_dirty_bitmap_mig_alias_map(params->block_bitmap_mapping, errp)) { 1497 error_prepend(errp, "Invalid mapping given for block-bitmap-mapping: "); 1498 return false; 1499 } 1500 1501 return true; 1502 } 1503 1504 static void migrate_params_test_apply(MigrateSetParameters *params, 1505 MigrationParameters *dest) 1506 { 1507 *dest = migrate_get_current()->parameters; 1508 1509 /* TODO use QAPI_CLONE() instead of duplicating it inline */ 1510 1511 if (params->has_compress_level) { 1512 dest->compress_level = params->compress_level; 1513 } 1514 1515 if (params->has_compress_threads) { 1516 dest->compress_threads = params->compress_threads; 1517 } 1518 1519 if (params->has_compress_wait_thread) { 1520 dest->compress_wait_thread = params->compress_wait_thread; 1521 } 1522 1523 if (params->has_decompress_threads) { 1524 dest->decompress_threads = params->decompress_threads; 1525 } 1526 1527 if (params->has_throttle_trigger_threshold) { 1528 dest->throttle_trigger_threshold = params->throttle_trigger_threshold; 1529 } 1530 1531 if (params->has_cpu_throttle_initial) { 1532 dest->cpu_throttle_initial = params->cpu_throttle_initial; 1533 } 1534 1535 if (params->has_cpu_throttle_increment) { 1536 dest->cpu_throttle_increment = params->cpu_throttle_increment; 1537 } 1538 1539 if (params->has_cpu_throttle_tailslow) { 1540 dest->cpu_throttle_tailslow = params->cpu_throttle_tailslow; 1541 } 1542 1543 if (params->has_tls_creds) { 1544 assert(params->tls_creds->type == QTYPE_QSTRING); 1545 dest->tls_creds = params->tls_creds->u.s; 1546 } 1547 1548 if (params->has_tls_hostname) { 1549 assert(params->tls_hostname->type == QTYPE_QSTRING); 1550 dest->tls_hostname = params->tls_hostname->u.s; 1551 } 1552 1553 if (params->has_max_bandwidth) { 1554 dest->max_bandwidth = params->max_bandwidth; 1555 } 1556 1557 if (params->has_downtime_limit) { 1558 dest->downtime_limit = params->downtime_limit; 1559 } 1560 1561 if (params->has_x_checkpoint_delay) { 1562 dest->x_checkpoint_delay = params->x_checkpoint_delay; 1563 } 1564 1565 if (params->has_block_incremental) { 1566 dest->block_incremental = params->block_incremental; 1567 } 1568 if (params->has_multifd_channels) { 1569 dest->multifd_channels = params->multifd_channels; 1570 } 1571 if (params->has_multifd_compression) { 1572 dest->multifd_compression = params->multifd_compression; 1573 } 1574 #ifdef CONFIG_LINUX 1575 if (params->has_zero_copy_send) { 1576 dest->zero_copy_send = params->zero_copy_send; 1577 } 1578 #endif 1579 if (params->has_xbzrle_cache_size) { 1580 dest->xbzrle_cache_size = params->xbzrle_cache_size; 1581 } 1582 if (params->has_max_postcopy_bandwidth) { 1583 dest->max_postcopy_bandwidth = params->max_postcopy_bandwidth; 1584 } 1585 if (params->has_max_cpu_throttle) { 1586 dest->max_cpu_throttle = params->max_cpu_throttle; 1587 } 1588 if (params->has_announce_initial) { 1589 dest->announce_initial = params->announce_initial; 1590 } 1591 if (params->has_announce_max) { 1592 dest->announce_max = params->announce_max; 1593 } 1594 if (params->has_announce_rounds) { 1595 dest->announce_rounds = params->announce_rounds; 1596 } 1597 if (params->has_announce_step) { 1598 dest->announce_step = params->announce_step; 1599 } 1600 1601 if (params->has_block_bitmap_mapping) { 1602 dest->has_block_bitmap_mapping = true; 1603 dest->block_bitmap_mapping = params->block_bitmap_mapping; 1604 } 1605 } 1606 1607 static void migrate_params_apply(MigrateSetParameters *params, Error **errp) 1608 { 1609 MigrationState *s = migrate_get_current(); 1610 1611 /* TODO use QAPI_CLONE() instead of duplicating it inline */ 1612 1613 if (params->has_compress_level) { 1614 s->parameters.compress_level = params->compress_level; 1615 } 1616 1617 if (params->has_compress_threads) { 1618 s->parameters.compress_threads = params->compress_threads; 1619 } 1620 1621 if (params->has_compress_wait_thread) { 1622 s->parameters.compress_wait_thread = params->compress_wait_thread; 1623 } 1624 1625 if (params->has_decompress_threads) { 1626 s->parameters.decompress_threads = params->decompress_threads; 1627 } 1628 1629 if (params->has_throttle_trigger_threshold) { 1630 s->parameters.throttle_trigger_threshold = params->throttle_trigger_threshold; 1631 } 1632 1633 if (params->has_cpu_throttle_initial) { 1634 s->parameters.cpu_throttle_initial = params->cpu_throttle_initial; 1635 } 1636 1637 if (params->has_cpu_throttle_increment) { 1638 s->parameters.cpu_throttle_increment = params->cpu_throttle_increment; 1639 } 1640 1641 if (params->has_cpu_throttle_tailslow) { 1642 s->parameters.cpu_throttle_tailslow = params->cpu_throttle_tailslow; 1643 } 1644 1645 if (params->has_tls_creds) { 1646 g_free(s->parameters.tls_creds); 1647 assert(params->tls_creds->type == QTYPE_QSTRING); 1648 s->parameters.tls_creds = g_strdup(params->tls_creds->u.s); 1649 } 1650 1651 if (params->has_tls_hostname) { 1652 g_free(s->parameters.tls_hostname); 1653 assert(params->tls_hostname->type == QTYPE_QSTRING); 1654 s->parameters.tls_hostname = g_strdup(params->tls_hostname->u.s); 1655 } 1656 1657 if (params->has_tls_authz) { 1658 g_free(s->parameters.tls_authz); 1659 assert(params->tls_authz->type == QTYPE_QSTRING); 1660 s->parameters.tls_authz = g_strdup(params->tls_authz->u.s); 1661 } 1662 1663 if (params->has_max_bandwidth) { 1664 s->parameters.max_bandwidth = params->max_bandwidth; 1665 if (s->to_dst_file && !migration_in_postcopy()) { 1666 qemu_file_set_rate_limit(s->to_dst_file, 1667 s->parameters.max_bandwidth / XFER_LIMIT_RATIO); 1668 } 1669 } 1670 1671 if (params->has_downtime_limit) { 1672 s->parameters.downtime_limit = params->downtime_limit; 1673 } 1674 1675 if (params->has_x_checkpoint_delay) { 1676 s->parameters.x_checkpoint_delay = params->x_checkpoint_delay; 1677 if (migration_in_colo_state()) { 1678 colo_checkpoint_notify(s); 1679 } 1680 } 1681 1682 if (params->has_block_incremental) { 1683 s->parameters.block_incremental = params->block_incremental; 1684 } 1685 if (params->has_multifd_channels) { 1686 s->parameters.multifd_channels = params->multifd_channels; 1687 } 1688 if (params->has_multifd_compression) { 1689 s->parameters.multifd_compression = params->multifd_compression; 1690 } 1691 #ifdef CONFIG_LINUX 1692 if (params->has_zero_copy_send) { 1693 s->parameters.zero_copy_send = params->zero_copy_send; 1694 } 1695 #endif 1696 if (params->has_xbzrle_cache_size) { 1697 s->parameters.xbzrle_cache_size = params->xbzrle_cache_size; 1698 xbzrle_cache_resize(params->xbzrle_cache_size, errp); 1699 } 1700 if (params->has_max_postcopy_bandwidth) { 1701 s->parameters.max_postcopy_bandwidth = params->max_postcopy_bandwidth; 1702 if (s->to_dst_file && migration_in_postcopy()) { 1703 qemu_file_set_rate_limit(s->to_dst_file, 1704 s->parameters.max_postcopy_bandwidth / XFER_LIMIT_RATIO); 1705 } 1706 } 1707 if (params->has_max_cpu_throttle) { 1708 s->parameters.max_cpu_throttle = params->max_cpu_throttle; 1709 } 1710 if (params->has_announce_initial) { 1711 s->parameters.announce_initial = params->announce_initial; 1712 } 1713 if (params->has_announce_max) { 1714 s->parameters.announce_max = params->announce_max; 1715 } 1716 if (params->has_announce_rounds) { 1717 s->parameters.announce_rounds = params->announce_rounds; 1718 } 1719 if (params->has_announce_step) { 1720 s->parameters.announce_step = params->announce_step; 1721 } 1722 1723 if (params->has_block_bitmap_mapping) { 1724 qapi_free_BitmapMigrationNodeAliasList( 1725 s->parameters.block_bitmap_mapping); 1726 1727 s->parameters.has_block_bitmap_mapping = true; 1728 s->parameters.block_bitmap_mapping = 1729 QAPI_CLONE(BitmapMigrationNodeAliasList, 1730 params->block_bitmap_mapping); 1731 } 1732 } 1733 1734 void qmp_migrate_set_parameters(MigrateSetParameters *params, Error **errp) 1735 { 1736 MigrationParameters tmp; 1737 1738 /* TODO Rewrite "" to null instead */ 1739 if (params->has_tls_creds 1740 && params->tls_creds->type == QTYPE_QNULL) { 1741 qobject_unref(params->tls_creds->u.n); 1742 params->tls_creds->type = QTYPE_QSTRING; 1743 params->tls_creds->u.s = strdup(""); 1744 } 1745 /* TODO Rewrite "" to null instead */ 1746 if (params->has_tls_hostname 1747 && params->tls_hostname->type == QTYPE_QNULL) { 1748 qobject_unref(params->tls_hostname->u.n); 1749 params->tls_hostname->type = QTYPE_QSTRING; 1750 params->tls_hostname->u.s = strdup(""); 1751 } 1752 1753 migrate_params_test_apply(params, &tmp); 1754 1755 if (!migrate_params_check(&tmp, errp)) { 1756 /* Invalid parameter */ 1757 return; 1758 } 1759 1760 migrate_params_apply(params, errp); 1761 } 1762 1763 1764 void qmp_migrate_start_postcopy(Error **errp) 1765 { 1766 MigrationState *s = migrate_get_current(); 1767 1768 if (!migrate_postcopy()) { 1769 error_setg(errp, "Enable postcopy with migrate_set_capability before" 1770 " the start of migration"); 1771 return; 1772 } 1773 1774 if (s->state == MIGRATION_STATUS_NONE) { 1775 error_setg(errp, "Postcopy must be started after migration has been" 1776 " started"); 1777 return; 1778 } 1779 /* 1780 * we don't error if migration has finished since that would be racy 1781 * with issuing this command. 1782 */ 1783 qatomic_set(&s->start_postcopy, true); 1784 } 1785 1786 /* shared migration helpers */ 1787 1788 void migrate_set_state(int *state, int old_state, int new_state) 1789 { 1790 assert(new_state < MIGRATION_STATUS__MAX); 1791 if (qatomic_cmpxchg(state, old_state, new_state) == old_state) { 1792 trace_migrate_set_state(MigrationStatus_str(new_state)); 1793 migrate_generate_event(new_state); 1794 } 1795 } 1796 1797 static MigrationCapabilityStatus *migrate_cap_add(MigrationCapability index, 1798 bool state) 1799 { 1800 MigrationCapabilityStatus *cap; 1801 1802 cap = g_new0(MigrationCapabilityStatus, 1); 1803 cap->capability = index; 1804 cap->state = state; 1805 1806 return cap; 1807 } 1808 1809 void migrate_set_block_enabled(bool value, Error **errp) 1810 { 1811 MigrationCapabilityStatusList *cap = NULL; 1812 1813 QAPI_LIST_PREPEND(cap, migrate_cap_add(MIGRATION_CAPABILITY_BLOCK, value)); 1814 qmp_migrate_set_capabilities(cap, errp); 1815 qapi_free_MigrationCapabilityStatusList(cap); 1816 } 1817 1818 static void migrate_set_block_incremental(MigrationState *s, bool value) 1819 { 1820 s->parameters.block_incremental = value; 1821 } 1822 1823 static void block_cleanup_parameters(MigrationState *s) 1824 { 1825 if (s->must_remove_block_options) { 1826 /* setting to false can never fail */ 1827 migrate_set_block_enabled(false, &error_abort); 1828 migrate_set_block_incremental(s, false); 1829 s->must_remove_block_options = false; 1830 } 1831 } 1832 1833 static void migrate_fd_cleanup(MigrationState *s) 1834 { 1835 qemu_bh_delete(s->cleanup_bh); 1836 s->cleanup_bh = NULL; 1837 1838 g_free(s->hostname); 1839 s->hostname = NULL; 1840 1841 qemu_savevm_state_cleanup(); 1842 1843 if (s->to_dst_file) { 1844 QEMUFile *tmp; 1845 1846 trace_migrate_fd_cleanup(); 1847 qemu_mutex_unlock_iothread(); 1848 if (s->migration_thread_running) { 1849 qemu_thread_join(&s->thread); 1850 s->migration_thread_running = false; 1851 } 1852 qemu_mutex_lock_iothread(); 1853 1854 multifd_save_cleanup(); 1855 qemu_mutex_lock(&s->qemu_file_lock); 1856 tmp = s->to_dst_file; 1857 s->to_dst_file = NULL; 1858 qemu_mutex_unlock(&s->qemu_file_lock); 1859 /* 1860 * Close the file handle without the lock to make sure the 1861 * critical section won't block for long. 1862 */ 1863 migration_ioc_unregister_yank_from_file(tmp); 1864 qemu_fclose(tmp); 1865 } 1866 1867 assert(!migration_is_active(s)); 1868 1869 if (s->state == MIGRATION_STATUS_CANCELLING) { 1870 migrate_set_state(&s->state, MIGRATION_STATUS_CANCELLING, 1871 MIGRATION_STATUS_CANCELLED); 1872 } 1873 1874 if (s->error) { 1875 /* It is used on info migrate. We can't free it */ 1876 error_report_err(error_copy(s->error)); 1877 } 1878 notifier_list_notify(&migration_state_notifiers, s); 1879 block_cleanup_parameters(s); 1880 yank_unregister_instance(MIGRATION_YANK_INSTANCE); 1881 } 1882 1883 static void migrate_fd_cleanup_schedule(MigrationState *s) 1884 { 1885 /* 1886 * Ref the state for bh, because it may be called when 1887 * there're already no other refs 1888 */ 1889 object_ref(OBJECT(s)); 1890 qemu_bh_schedule(s->cleanup_bh); 1891 } 1892 1893 static void migrate_fd_cleanup_bh(void *opaque) 1894 { 1895 MigrationState *s = opaque; 1896 migrate_fd_cleanup(s); 1897 object_unref(OBJECT(s)); 1898 } 1899 1900 void migrate_set_error(MigrationState *s, const Error *error) 1901 { 1902 QEMU_LOCK_GUARD(&s->error_mutex); 1903 if (!s->error) { 1904 s->error = error_copy(error); 1905 } 1906 } 1907 1908 static void migrate_error_free(MigrationState *s) 1909 { 1910 QEMU_LOCK_GUARD(&s->error_mutex); 1911 if (s->error) { 1912 error_free(s->error); 1913 s->error = NULL; 1914 } 1915 } 1916 1917 void migrate_fd_error(MigrationState *s, const Error *error) 1918 { 1919 trace_migrate_fd_error(error_get_pretty(error)); 1920 assert(s->to_dst_file == NULL); 1921 migrate_set_state(&s->state, MIGRATION_STATUS_SETUP, 1922 MIGRATION_STATUS_FAILED); 1923 migrate_set_error(s, error); 1924 } 1925 1926 static void migrate_fd_cancel(MigrationState *s) 1927 { 1928 int old_state ; 1929 QEMUFile *f = migrate_get_current()->to_dst_file; 1930 trace_migrate_fd_cancel(); 1931 1932 WITH_QEMU_LOCK_GUARD(&s->qemu_file_lock) { 1933 if (s->rp_state.from_dst_file) { 1934 /* shutdown the rp socket, so causing the rp thread to shutdown */ 1935 qemu_file_shutdown(s->rp_state.from_dst_file); 1936 } 1937 } 1938 1939 do { 1940 old_state = s->state; 1941 if (!migration_is_running(old_state)) { 1942 break; 1943 } 1944 /* If the migration is paused, kick it out of the pause */ 1945 if (old_state == MIGRATION_STATUS_PRE_SWITCHOVER) { 1946 qemu_sem_post(&s->pause_sem); 1947 } 1948 migrate_set_state(&s->state, old_state, MIGRATION_STATUS_CANCELLING); 1949 } while (s->state != MIGRATION_STATUS_CANCELLING); 1950 1951 /* 1952 * If we're unlucky the migration code might be stuck somewhere in a 1953 * send/write while the network has failed and is waiting to timeout; 1954 * if we've got shutdown(2) available then we can force it to quit. 1955 * The outgoing qemu file gets closed in migrate_fd_cleanup that is 1956 * called in a bh, so there is no race against this cancel. 1957 */ 1958 if (s->state == MIGRATION_STATUS_CANCELLING && f) { 1959 qemu_file_shutdown(f); 1960 } 1961 if (s->state == MIGRATION_STATUS_CANCELLING && s->block_inactive) { 1962 Error *local_err = NULL; 1963 1964 bdrv_activate_all(&local_err); 1965 if (local_err) { 1966 error_report_err(local_err); 1967 } else { 1968 s->block_inactive = false; 1969 } 1970 } 1971 } 1972 1973 void add_migration_state_change_notifier(Notifier *notify) 1974 { 1975 notifier_list_add(&migration_state_notifiers, notify); 1976 } 1977 1978 void remove_migration_state_change_notifier(Notifier *notify) 1979 { 1980 notifier_remove(notify); 1981 } 1982 1983 bool migration_in_setup(MigrationState *s) 1984 { 1985 return s->state == MIGRATION_STATUS_SETUP; 1986 } 1987 1988 bool migration_has_finished(MigrationState *s) 1989 { 1990 return s->state == MIGRATION_STATUS_COMPLETED; 1991 } 1992 1993 bool migration_has_failed(MigrationState *s) 1994 { 1995 return (s->state == MIGRATION_STATUS_CANCELLED || 1996 s->state == MIGRATION_STATUS_FAILED); 1997 } 1998 1999 bool migration_in_postcopy(void) 2000 { 2001 MigrationState *s = migrate_get_current(); 2002 2003 switch (s->state) { 2004 case MIGRATION_STATUS_POSTCOPY_ACTIVE: 2005 case MIGRATION_STATUS_POSTCOPY_PAUSED: 2006 case MIGRATION_STATUS_POSTCOPY_RECOVER: 2007 return true; 2008 default: 2009 return false; 2010 } 2011 } 2012 2013 bool migration_in_postcopy_after_devices(MigrationState *s) 2014 { 2015 return migration_in_postcopy() && s->postcopy_after_devices; 2016 } 2017 2018 bool migration_in_incoming_postcopy(void) 2019 { 2020 PostcopyState ps = postcopy_state_get(); 2021 2022 return ps >= POSTCOPY_INCOMING_DISCARD && ps < POSTCOPY_INCOMING_END; 2023 } 2024 2025 bool migration_in_bg_snapshot(void) 2026 { 2027 MigrationState *s = migrate_get_current(); 2028 2029 return migrate_background_snapshot() && 2030 migration_is_setup_or_active(s->state); 2031 } 2032 2033 bool migration_is_idle(void) 2034 { 2035 MigrationState *s = current_migration; 2036 2037 if (!s) { 2038 return true; 2039 } 2040 2041 switch (s->state) { 2042 case MIGRATION_STATUS_NONE: 2043 case MIGRATION_STATUS_CANCELLED: 2044 case MIGRATION_STATUS_COMPLETED: 2045 case MIGRATION_STATUS_FAILED: 2046 return true; 2047 case MIGRATION_STATUS_SETUP: 2048 case MIGRATION_STATUS_CANCELLING: 2049 case MIGRATION_STATUS_ACTIVE: 2050 case MIGRATION_STATUS_POSTCOPY_ACTIVE: 2051 case MIGRATION_STATUS_COLO: 2052 case MIGRATION_STATUS_PRE_SWITCHOVER: 2053 case MIGRATION_STATUS_DEVICE: 2054 case MIGRATION_STATUS_WAIT_UNPLUG: 2055 return false; 2056 case MIGRATION_STATUS__MAX: 2057 g_assert_not_reached(); 2058 } 2059 2060 return false; 2061 } 2062 2063 bool migration_is_active(MigrationState *s) 2064 { 2065 return (s->state == MIGRATION_STATUS_ACTIVE || 2066 s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE); 2067 } 2068 2069 void migrate_init(MigrationState *s) 2070 { 2071 /* 2072 * Reinitialise all migration state, except 2073 * parameters/capabilities that the user set, and 2074 * locks. 2075 */ 2076 s->cleanup_bh = 0; 2077 s->vm_start_bh = 0; 2078 s->to_dst_file = NULL; 2079 s->state = MIGRATION_STATUS_NONE; 2080 s->rp_state.from_dst_file = NULL; 2081 s->rp_state.error = false; 2082 s->mbps = 0.0; 2083 s->pages_per_second = 0.0; 2084 s->downtime = 0; 2085 s->expected_downtime = 0; 2086 s->setup_time = 0; 2087 s->start_postcopy = false; 2088 s->postcopy_after_devices = false; 2089 s->migration_thread_running = false; 2090 error_free(s->error); 2091 s->error = NULL; 2092 s->hostname = NULL; 2093 2094 migrate_set_state(&s->state, MIGRATION_STATUS_NONE, MIGRATION_STATUS_SETUP); 2095 2096 s->start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); 2097 s->total_time = 0; 2098 s->vm_was_running = false; 2099 s->iteration_initial_bytes = 0; 2100 s->threshold_size = 0; 2101 } 2102 2103 int migrate_add_blocker_internal(Error *reason, Error **errp) 2104 { 2105 /* Snapshots are similar to migrations, so check RUN_STATE_SAVE_VM too. */ 2106 if (runstate_check(RUN_STATE_SAVE_VM) || !migration_is_idle()) { 2107 error_propagate_prepend(errp, error_copy(reason), 2108 "disallowing migration blocker " 2109 "(migration/snapshot in progress) for: "); 2110 return -EBUSY; 2111 } 2112 2113 migration_blockers = g_slist_prepend(migration_blockers, reason); 2114 return 0; 2115 } 2116 2117 int migrate_add_blocker(Error *reason, Error **errp) 2118 { 2119 if (only_migratable) { 2120 error_propagate_prepend(errp, error_copy(reason), 2121 "disallowing migration blocker " 2122 "(--only-migratable) for: "); 2123 return -EACCES; 2124 } 2125 2126 return migrate_add_blocker_internal(reason, errp); 2127 } 2128 2129 void migrate_del_blocker(Error *reason) 2130 { 2131 migration_blockers = g_slist_remove(migration_blockers, reason); 2132 } 2133 2134 void qmp_migrate_incoming(const char *uri, Error **errp) 2135 { 2136 Error *local_err = NULL; 2137 static bool once = true; 2138 2139 if (!once) { 2140 error_setg(errp, "The incoming migration has already been started"); 2141 return; 2142 } 2143 if (!runstate_check(RUN_STATE_INMIGRATE)) { 2144 error_setg(errp, "'-incoming' was not specified on the command line"); 2145 return; 2146 } 2147 2148 if (!yank_register_instance(MIGRATION_YANK_INSTANCE, errp)) { 2149 return; 2150 } 2151 2152 qemu_start_incoming_migration(uri, &local_err); 2153 2154 if (local_err) { 2155 yank_unregister_instance(MIGRATION_YANK_INSTANCE); 2156 error_propagate(errp, local_err); 2157 return; 2158 } 2159 2160 once = false; 2161 } 2162 2163 void qmp_migrate_recover(const char *uri, Error **errp) 2164 { 2165 MigrationIncomingState *mis = migration_incoming_get_current(); 2166 2167 /* 2168 * Don't even bother to use ERRP_GUARD() as it _must_ always be set by 2169 * callers (no one should ignore a recover failure); if there is, it's a 2170 * programming error. 2171 */ 2172 assert(errp); 2173 2174 if (mis->state != MIGRATION_STATUS_POSTCOPY_PAUSED) { 2175 error_setg(errp, "Migrate recover can only be run " 2176 "when postcopy is paused."); 2177 return; 2178 } 2179 2180 /* If there's an existing transport, release it */ 2181 migration_incoming_transport_cleanup(mis); 2182 2183 /* 2184 * Note that this call will never start a real migration; it will 2185 * only re-setup the migration stream and poke existing migration 2186 * to continue using that newly established channel. 2187 */ 2188 qemu_start_incoming_migration(uri, errp); 2189 } 2190 2191 void qmp_migrate_pause(Error **errp) 2192 { 2193 MigrationState *ms = migrate_get_current(); 2194 MigrationIncomingState *mis = migration_incoming_get_current(); 2195 int ret; 2196 2197 if (ms->state == MIGRATION_STATUS_POSTCOPY_ACTIVE) { 2198 /* Source side, during postcopy */ 2199 qemu_mutex_lock(&ms->qemu_file_lock); 2200 ret = qemu_file_shutdown(ms->to_dst_file); 2201 qemu_mutex_unlock(&ms->qemu_file_lock); 2202 if (ret) { 2203 error_setg(errp, "Failed to pause source migration"); 2204 } 2205 return; 2206 } 2207 2208 if (mis->state == MIGRATION_STATUS_POSTCOPY_ACTIVE) { 2209 ret = qemu_file_shutdown(mis->from_src_file); 2210 if (ret) { 2211 error_setg(errp, "Failed to pause destination migration"); 2212 } 2213 return; 2214 } 2215 2216 error_setg(errp, "migrate-pause is currently only supported " 2217 "during postcopy-active state"); 2218 } 2219 2220 bool migration_is_blocked(Error **errp) 2221 { 2222 if (qemu_savevm_state_blocked(errp)) { 2223 return true; 2224 } 2225 2226 if (migration_blockers) { 2227 error_propagate(errp, error_copy(migration_blockers->data)); 2228 return true; 2229 } 2230 2231 return false; 2232 } 2233 2234 /* Returns true if continue to migrate, or false if error detected */ 2235 static bool migrate_prepare(MigrationState *s, bool blk, bool blk_inc, 2236 bool resume, Error **errp) 2237 { 2238 Error *local_err = NULL; 2239 2240 if (resume) { 2241 if (s->state != MIGRATION_STATUS_POSTCOPY_PAUSED) { 2242 error_setg(errp, "Cannot resume if there is no " 2243 "paused migration"); 2244 return false; 2245 } 2246 2247 /* 2248 * Postcopy recovery won't work well with release-ram 2249 * capability since release-ram will drop the page buffer as 2250 * long as the page is put into the send buffer. So if there 2251 * is a network failure happened, any page buffers that have 2252 * not yet reached the destination VM but have already been 2253 * sent from the source VM will be lost forever. Let's refuse 2254 * the client from resuming such a postcopy migration. 2255 * Luckily release-ram was designed to only be used when src 2256 * and destination VMs are on the same host, so it should be 2257 * fine. 2258 */ 2259 if (migrate_release_ram()) { 2260 error_setg(errp, "Postcopy recovery cannot work " 2261 "when release-ram capability is set"); 2262 return false; 2263 } 2264 2265 /* This is a resume, skip init status */ 2266 return true; 2267 } 2268 2269 if (migration_is_running(s->state)) { 2270 error_setg(errp, QERR_MIGRATION_ACTIVE); 2271 return false; 2272 } 2273 2274 if (runstate_check(RUN_STATE_INMIGRATE)) { 2275 error_setg(errp, "Guest is waiting for an incoming migration"); 2276 return false; 2277 } 2278 2279 if (runstate_check(RUN_STATE_POSTMIGRATE)) { 2280 error_setg(errp, "Can't migrate the vm that was paused due to " 2281 "previous migration"); 2282 return false; 2283 } 2284 2285 if (migration_is_blocked(errp)) { 2286 return false; 2287 } 2288 2289 if (blk || blk_inc) { 2290 if (migrate_colo_enabled()) { 2291 error_setg(errp, "No disk migration is required in COLO mode"); 2292 return false; 2293 } 2294 if (migrate_use_block() || migrate_use_block_incremental()) { 2295 error_setg(errp, "Command options are incompatible with " 2296 "current migration capabilities"); 2297 return false; 2298 } 2299 migrate_set_block_enabled(true, &local_err); 2300 if (local_err) { 2301 error_propagate(errp, local_err); 2302 return false; 2303 } 2304 s->must_remove_block_options = true; 2305 } 2306 2307 if (blk_inc) { 2308 migrate_set_block_incremental(s, true); 2309 } 2310 2311 migrate_init(s); 2312 /* 2313 * set ram_counters compression_counters memory to zero for a 2314 * new migration 2315 */ 2316 memset(&ram_counters, 0, sizeof(ram_counters)); 2317 memset(&compression_counters, 0, sizeof(compression_counters)); 2318 2319 return true; 2320 } 2321 2322 void qmp_migrate(const char *uri, bool has_blk, bool blk, 2323 bool has_inc, bool inc, bool has_detach, bool detach, 2324 bool has_resume, bool resume, Error **errp) 2325 { 2326 Error *local_err = NULL; 2327 MigrationState *s = migrate_get_current(); 2328 const char *p = NULL; 2329 2330 if (!migrate_prepare(s, has_blk && blk, has_inc && inc, 2331 has_resume && resume, errp)) { 2332 /* Error detected, put into errp */ 2333 return; 2334 } 2335 2336 if (!(has_resume && resume)) { 2337 if (!yank_register_instance(MIGRATION_YANK_INSTANCE, errp)) { 2338 return; 2339 } 2340 } 2341 2342 migrate_protocol_allow_multi_channels(false); 2343 if (strstart(uri, "tcp:", &p) || 2344 strstart(uri, "unix:", NULL) || 2345 strstart(uri, "vsock:", NULL)) { 2346 migrate_protocol_allow_multi_channels(true); 2347 socket_start_outgoing_migration(s, p ? p : uri, &local_err); 2348 #ifdef CONFIG_RDMA 2349 } else if (strstart(uri, "rdma:", &p)) { 2350 rdma_start_outgoing_migration(s, p, &local_err); 2351 #endif 2352 } else if (strstart(uri, "exec:", &p)) { 2353 exec_start_outgoing_migration(s, p, &local_err); 2354 } else if (strstart(uri, "fd:", &p)) { 2355 fd_start_outgoing_migration(s, p, &local_err); 2356 } else { 2357 if (!(has_resume && resume)) { 2358 yank_unregister_instance(MIGRATION_YANK_INSTANCE); 2359 } 2360 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "uri", 2361 "a valid migration protocol"); 2362 migrate_set_state(&s->state, MIGRATION_STATUS_SETUP, 2363 MIGRATION_STATUS_FAILED); 2364 block_cleanup_parameters(s); 2365 return; 2366 } 2367 2368 if (local_err) { 2369 if (!(has_resume && resume)) { 2370 yank_unregister_instance(MIGRATION_YANK_INSTANCE); 2371 } 2372 migrate_fd_error(s, local_err); 2373 error_propagate(errp, local_err); 2374 return; 2375 } 2376 } 2377 2378 void qmp_migrate_cancel(Error **errp) 2379 { 2380 migration_cancel(NULL); 2381 } 2382 2383 void qmp_migrate_continue(MigrationStatus state, Error **errp) 2384 { 2385 MigrationState *s = migrate_get_current(); 2386 if (s->state != state) { 2387 error_setg(errp, "Migration not in expected state: %s", 2388 MigrationStatus_str(s->state)); 2389 return; 2390 } 2391 qemu_sem_post(&s->pause_sem); 2392 } 2393 2394 bool migrate_release_ram(void) 2395 { 2396 MigrationState *s; 2397 2398 s = migrate_get_current(); 2399 2400 return s->enabled_capabilities[MIGRATION_CAPABILITY_RELEASE_RAM]; 2401 } 2402 2403 bool migrate_postcopy_ram(void) 2404 { 2405 MigrationState *s; 2406 2407 s = migrate_get_current(); 2408 2409 return s->enabled_capabilities[MIGRATION_CAPABILITY_POSTCOPY_RAM]; 2410 } 2411 2412 bool migrate_postcopy(void) 2413 { 2414 return migrate_postcopy_ram() || migrate_dirty_bitmaps(); 2415 } 2416 2417 bool migrate_auto_converge(void) 2418 { 2419 MigrationState *s; 2420 2421 s = migrate_get_current(); 2422 2423 return s->enabled_capabilities[MIGRATION_CAPABILITY_AUTO_CONVERGE]; 2424 } 2425 2426 bool migrate_zero_blocks(void) 2427 { 2428 MigrationState *s; 2429 2430 s = migrate_get_current(); 2431 2432 return s->enabled_capabilities[MIGRATION_CAPABILITY_ZERO_BLOCKS]; 2433 } 2434 2435 bool migrate_postcopy_blocktime(void) 2436 { 2437 MigrationState *s; 2438 2439 s = migrate_get_current(); 2440 2441 return s->enabled_capabilities[MIGRATION_CAPABILITY_POSTCOPY_BLOCKTIME]; 2442 } 2443 2444 bool migrate_use_compression(void) 2445 { 2446 MigrationState *s; 2447 2448 s = migrate_get_current(); 2449 2450 return s->enabled_capabilities[MIGRATION_CAPABILITY_COMPRESS]; 2451 } 2452 2453 int migrate_compress_level(void) 2454 { 2455 MigrationState *s; 2456 2457 s = migrate_get_current(); 2458 2459 return s->parameters.compress_level; 2460 } 2461 2462 int migrate_compress_threads(void) 2463 { 2464 MigrationState *s; 2465 2466 s = migrate_get_current(); 2467 2468 return s->parameters.compress_threads; 2469 } 2470 2471 int migrate_compress_wait_thread(void) 2472 { 2473 MigrationState *s; 2474 2475 s = migrate_get_current(); 2476 2477 return s->parameters.compress_wait_thread; 2478 } 2479 2480 int migrate_decompress_threads(void) 2481 { 2482 MigrationState *s; 2483 2484 s = migrate_get_current(); 2485 2486 return s->parameters.decompress_threads; 2487 } 2488 2489 bool migrate_dirty_bitmaps(void) 2490 { 2491 MigrationState *s; 2492 2493 s = migrate_get_current(); 2494 2495 return s->enabled_capabilities[MIGRATION_CAPABILITY_DIRTY_BITMAPS]; 2496 } 2497 2498 bool migrate_ignore_shared(void) 2499 { 2500 MigrationState *s; 2501 2502 s = migrate_get_current(); 2503 2504 return s->enabled_capabilities[MIGRATION_CAPABILITY_X_IGNORE_SHARED]; 2505 } 2506 2507 bool migrate_validate_uuid(void) 2508 { 2509 MigrationState *s; 2510 2511 s = migrate_get_current(); 2512 2513 return s->enabled_capabilities[MIGRATION_CAPABILITY_VALIDATE_UUID]; 2514 } 2515 2516 bool migrate_use_events(void) 2517 { 2518 MigrationState *s; 2519 2520 s = migrate_get_current(); 2521 2522 return s->enabled_capabilities[MIGRATION_CAPABILITY_EVENTS]; 2523 } 2524 2525 bool migrate_use_multifd(void) 2526 { 2527 MigrationState *s; 2528 2529 s = migrate_get_current(); 2530 2531 return s->enabled_capabilities[MIGRATION_CAPABILITY_MULTIFD]; 2532 } 2533 2534 bool migrate_pause_before_switchover(void) 2535 { 2536 MigrationState *s; 2537 2538 s = migrate_get_current(); 2539 2540 return s->enabled_capabilities[ 2541 MIGRATION_CAPABILITY_PAUSE_BEFORE_SWITCHOVER]; 2542 } 2543 2544 int migrate_multifd_channels(void) 2545 { 2546 MigrationState *s; 2547 2548 s = migrate_get_current(); 2549 2550 return s->parameters.multifd_channels; 2551 } 2552 2553 MultiFDCompression migrate_multifd_compression(void) 2554 { 2555 MigrationState *s; 2556 2557 s = migrate_get_current(); 2558 2559 return s->parameters.multifd_compression; 2560 } 2561 2562 int migrate_multifd_zlib_level(void) 2563 { 2564 MigrationState *s; 2565 2566 s = migrate_get_current(); 2567 2568 return s->parameters.multifd_zlib_level; 2569 } 2570 2571 int migrate_multifd_zstd_level(void) 2572 { 2573 MigrationState *s; 2574 2575 s = migrate_get_current(); 2576 2577 return s->parameters.multifd_zstd_level; 2578 } 2579 2580 #ifdef CONFIG_LINUX 2581 bool migrate_use_zero_copy_send(void) 2582 { 2583 MigrationState *s; 2584 2585 s = migrate_get_current(); 2586 2587 return s->parameters.zero_copy_send; 2588 } 2589 #endif 2590 2591 int migrate_use_xbzrle(void) 2592 { 2593 MigrationState *s; 2594 2595 s = migrate_get_current(); 2596 2597 return s->enabled_capabilities[MIGRATION_CAPABILITY_XBZRLE]; 2598 } 2599 2600 uint64_t migrate_xbzrle_cache_size(void) 2601 { 2602 MigrationState *s; 2603 2604 s = migrate_get_current(); 2605 2606 return s->parameters.xbzrle_cache_size; 2607 } 2608 2609 static int64_t migrate_max_postcopy_bandwidth(void) 2610 { 2611 MigrationState *s; 2612 2613 s = migrate_get_current(); 2614 2615 return s->parameters.max_postcopy_bandwidth; 2616 } 2617 2618 bool migrate_use_block(void) 2619 { 2620 MigrationState *s; 2621 2622 s = migrate_get_current(); 2623 2624 return s->enabled_capabilities[MIGRATION_CAPABILITY_BLOCK]; 2625 } 2626 2627 bool migrate_use_return_path(void) 2628 { 2629 MigrationState *s; 2630 2631 s = migrate_get_current(); 2632 2633 return s->enabled_capabilities[MIGRATION_CAPABILITY_RETURN_PATH]; 2634 } 2635 2636 bool migrate_use_block_incremental(void) 2637 { 2638 MigrationState *s; 2639 2640 s = migrate_get_current(); 2641 2642 return s->parameters.block_incremental; 2643 } 2644 2645 bool migrate_background_snapshot(void) 2646 { 2647 MigrationState *s; 2648 2649 s = migrate_get_current(); 2650 2651 return s->enabled_capabilities[MIGRATION_CAPABILITY_BACKGROUND_SNAPSHOT]; 2652 } 2653 2654 /* migration thread support */ 2655 /* 2656 * Something bad happened to the RP stream, mark an error 2657 * The caller shall print or trace something to indicate why 2658 */ 2659 static void mark_source_rp_bad(MigrationState *s) 2660 { 2661 s->rp_state.error = true; 2662 } 2663 2664 static struct rp_cmd_args { 2665 ssize_t len; /* -1 = variable */ 2666 const char *name; 2667 } rp_cmd_args[] = { 2668 [MIG_RP_MSG_INVALID] = { .len = -1, .name = "INVALID" }, 2669 [MIG_RP_MSG_SHUT] = { .len = 4, .name = "SHUT" }, 2670 [MIG_RP_MSG_PONG] = { .len = 4, .name = "PONG" }, 2671 [MIG_RP_MSG_REQ_PAGES] = { .len = 12, .name = "REQ_PAGES" }, 2672 [MIG_RP_MSG_REQ_PAGES_ID] = { .len = -1, .name = "REQ_PAGES_ID" }, 2673 [MIG_RP_MSG_RECV_BITMAP] = { .len = -1, .name = "RECV_BITMAP" }, 2674 [MIG_RP_MSG_RESUME_ACK] = { .len = 4, .name = "RESUME_ACK" }, 2675 [MIG_RP_MSG_MAX] = { .len = -1, .name = "MAX" }, 2676 }; 2677 2678 /* 2679 * Process a request for pages received on the return path, 2680 * We're allowed to send more than requested (e.g. to round to our page size) 2681 * and we don't need to send pages that have already been sent. 2682 */ 2683 static void migrate_handle_rp_req_pages(MigrationState *ms, const char* rbname, 2684 ram_addr_t start, size_t len) 2685 { 2686 long our_host_ps = qemu_real_host_page_size(); 2687 2688 trace_migrate_handle_rp_req_pages(rbname, start, len); 2689 2690 /* 2691 * Since we currently insist on matching page sizes, just sanity check 2692 * we're being asked for whole host pages. 2693 */ 2694 if (!QEMU_IS_ALIGNED(start, our_host_ps) || 2695 !QEMU_IS_ALIGNED(len, our_host_ps)) { 2696 error_report("%s: Misaligned page request, start: " RAM_ADDR_FMT 2697 " len: %zd", __func__, start, len); 2698 mark_source_rp_bad(ms); 2699 return; 2700 } 2701 2702 if (ram_save_queue_pages(rbname, start, len)) { 2703 mark_source_rp_bad(ms); 2704 } 2705 } 2706 2707 /* Return true to retry, false to quit */ 2708 static bool postcopy_pause_return_path_thread(MigrationState *s) 2709 { 2710 trace_postcopy_pause_return_path(); 2711 2712 qemu_sem_wait(&s->postcopy_pause_rp_sem); 2713 2714 trace_postcopy_pause_return_path_continued(); 2715 2716 return true; 2717 } 2718 2719 static int migrate_handle_rp_recv_bitmap(MigrationState *s, char *block_name) 2720 { 2721 RAMBlock *block = qemu_ram_block_by_name(block_name); 2722 2723 if (!block) { 2724 error_report("%s: invalid block name '%s'", __func__, block_name); 2725 return -EINVAL; 2726 } 2727 2728 /* Fetch the received bitmap and refresh the dirty bitmap */ 2729 return ram_dirty_bitmap_reload(s, block); 2730 } 2731 2732 static int migrate_handle_rp_resume_ack(MigrationState *s, uint32_t value) 2733 { 2734 trace_source_return_path_thread_resume_ack(value); 2735 2736 if (value != MIGRATION_RESUME_ACK_VALUE) { 2737 error_report("%s: illegal resume_ack value %"PRIu32, 2738 __func__, value); 2739 return -1; 2740 } 2741 2742 /* Now both sides are active. */ 2743 migrate_set_state(&s->state, MIGRATION_STATUS_POSTCOPY_RECOVER, 2744 MIGRATION_STATUS_POSTCOPY_ACTIVE); 2745 2746 /* Notify send thread that time to continue send pages */ 2747 qemu_sem_post(&s->rp_state.rp_sem); 2748 2749 return 0; 2750 } 2751 2752 /* Release ms->rp_state.from_dst_file in a safe way */ 2753 static void migration_release_from_dst_file(MigrationState *ms) 2754 { 2755 QEMUFile *file; 2756 2757 WITH_QEMU_LOCK_GUARD(&ms->qemu_file_lock) { 2758 /* 2759 * Reset the from_dst_file pointer first before releasing it, as we 2760 * can't block within lock section 2761 */ 2762 file = ms->rp_state.from_dst_file; 2763 ms->rp_state.from_dst_file = NULL; 2764 } 2765 2766 qemu_fclose(file); 2767 } 2768 2769 /* 2770 * Handles messages sent on the return path towards the source VM 2771 * 2772 */ 2773 static void *source_return_path_thread(void *opaque) 2774 { 2775 MigrationState *ms = opaque; 2776 QEMUFile *rp = ms->rp_state.from_dst_file; 2777 uint16_t header_len, header_type; 2778 uint8_t buf[512]; 2779 uint32_t tmp32, sibling_error; 2780 ram_addr_t start = 0; /* =0 to silence warning */ 2781 size_t len = 0, expected_len; 2782 int res; 2783 2784 trace_source_return_path_thread_entry(); 2785 rcu_register_thread(); 2786 2787 retry: 2788 while (!ms->rp_state.error && !qemu_file_get_error(rp) && 2789 migration_is_setup_or_active(ms->state)) { 2790 trace_source_return_path_thread_loop_top(); 2791 header_type = qemu_get_be16(rp); 2792 header_len = qemu_get_be16(rp); 2793 2794 if (qemu_file_get_error(rp)) { 2795 mark_source_rp_bad(ms); 2796 goto out; 2797 } 2798 2799 if (header_type >= MIG_RP_MSG_MAX || 2800 header_type == MIG_RP_MSG_INVALID) { 2801 error_report("RP: Received invalid message 0x%04x length 0x%04x", 2802 header_type, header_len); 2803 mark_source_rp_bad(ms); 2804 goto out; 2805 } 2806 2807 if ((rp_cmd_args[header_type].len != -1 && 2808 header_len != rp_cmd_args[header_type].len) || 2809 header_len > sizeof(buf)) { 2810 error_report("RP: Received '%s' message (0x%04x) with" 2811 "incorrect length %d expecting %zu", 2812 rp_cmd_args[header_type].name, header_type, header_len, 2813 (size_t)rp_cmd_args[header_type].len); 2814 mark_source_rp_bad(ms); 2815 goto out; 2816 } 2817 2818 /* We know we've got a valid header by this point */ 2819 res = qemu_get_buffer(rp, buf, header_len); 2820 if (res != header_len) { 2821 error_report("RP: Failed reading data for message 0x%04x" 2822 " read %d expected %d", 2823 header_type, res, header_len); 2824 mark_source_rp_bad(ms); 2825 goto out; 2826 } 2827 2828 /* OK, we have the message and the data */ 2829 switch (header_type) { 2830 case MIG_RP_MSG_SHUT: 2831 sibling_error = ldl_be_p(buf); 2832 trace_source_return_path_thread_shut(sibling_error); 2833 if (sibling_error) { 2834 error_report("RP: Sibling indicated error %d", sibling_error); 2835 mark_source_rp_bad(ms); 2836 } 2837 /* 2838 * We'll let the main thread deal with closing the RP 2839 * we could do a shutdown(2) on it, but we're the only user 2840 * anyway, so there's nothing gained. 2841 */ 2842 goto out; 2843 2844 case MIG_RP_MSG_PONG: 2845 tmp32 = ldl_be_p(buf); 2846 trace_source_return_path_thread_pong(tmp32); 2847 break; 2848 2849 case MIG_RP_MSG_REQ_PAGES: 2850 start = ldq_be_p(buf); 2851 len = ldl_be_p(buf + 8); 2852 migrate_handle_rp_req_pages(ms, NULL, start, len); 2853 break; 2854 2855 case MIG_RP_MSG_REQ_PAGES_ID: 2856 expected_len = 12 + 1; /* header + termination */ 2857 2858 if (header_len >= expected_len) { 2859 start = ldq_be_p(buf); 2860 len = ldl_be_p(buf + 8); 2861 /* Now we expect an idstr */ 2862 tmp32 = buf[12]; /* Length of the following idstr */ 2863 buf[13 + tmp32] = '\0'; 2864 expected_len += tmp32; 2865 } 2866 if (header_len != expected_len) { 2867 error_report("RP: Req_Page_id with length %d expecting %zd", 2868 header_len, expected_len); 2869 mark_source_rp_bad(ms); 2870 goto out; 2871 } 2872 migrate_handle_rp_req_pages(ms, (char *)&buf[13], start, len); 2873 break; 2874 2875 case MIG_RP_MSG_RECV_BITMAP: 2876 if (header_len < 1) { 2877 error_report("%s: missing block name", __func__); 2878 mark_source_rp_bad(ms); 2879 goto out; 2880 } 2881 /* Format: len (1B) + idstr (<255B). This ends the idstr. */ 2882 buf[buf[0] + 1] = '\0'; 2883 if (migrate_handle_rp_recv_bitmap(ms, (char *)(buf + 1))) { 2884 mark_source_rp_bad(ms); 2885 goto out; 2886 } 2887 break; 2888 2889 case MIG_RP_MSG_RESUME_ACK: 2890 tmp32 = ldl_be_p(buf); 2891 if (migrate_handle_rp_resume_ack(ms, tmp32)) { 2892 mark_source_rp_bad(ms); 2893 goto out; 2894 } 2895 break; 2896 2897 default: 2898 break; 2899 } 2900 } 2901 2902 out: 2903 res = qemu_file_get_error(rp); 2904 if (res) { 2905 if (res && migration_in_postcopy()) { 2906 /* 2907 * Maybe there is something we can do: it looks like a 2908 * network down issue, and we pause for a recovery. 2909 */ 2910 migration_release_from_dst_file(ms); 2911 rp = NULL; 2912 if (postcopy_pause_return_path_thread(ms)) { 2913 /* 2914 * Reload rp, reset the rest. Referencing it is safe since 2915 * it's reset only by us above, or when migration completes 2916 */ 2917 rp = ms->rp_state.from_dst_file; 2918 ms->rp_state.error = false; 2919 goto retry; 2920 } 2921 } 2922 2923 trace_source_return_path_thread_bad_end(); 2924 mark_source_rp_bad(ms); 2925 } 2926 2927 trace_source_return_path_thread_end(); 2928 migration_release_from_dst_file(ms); 2929 rcu_unregister_thread(); 2930 return NULL; 2931 } 2932 2933 static int open_return_path_on_source(MigrationState *ms, 2934 bool create_thread) 2935 { 2936 ms->rp_state.from_dst_file = qemu_file_get_return_path(ms->to_dst_file); 2937 if (!ms->rp_state.from_dst_file) { 2938 return -1; 2939 } 2940 2941 trace_open_return_path_on_source(); 2942 2943 if (!create_thread) { 2944 /* We're done */ 2945 return 0; 2946 } 2947 2948 qemu_thread_create(&ms->rp_state.rp_thread, "return path", 2949 source_return_path_thread, ms, QEMU_THREAD_JOINABLE); 2950 ms->rp_state.rp_thread_created = true; 2951 2952 trace_open_return_path_on_source_continue(); 2953 2954 return 0; 2955 } 2956 2957 /* Returns 0 if the RP was ok, otherwise there was an error on the RP */ 2958 static int await_return_path_close_on_source(MigrationState *ms) 2959 { 2960 /* 2961 * If this is a normal exit then the destination will send a SHUT and the 2962 * rp_thread will exit, however if there's an error we need to cause 2963 * it to exit. 2964 */ 2965 if (qemu_file_get_error(ms->to_dst_file) && ms->rp_state.from_dst_file) { 2966 /* 2967 * shutdown(2), if we have it, will cause it to unblock if it's stuck 2968 * waiting for the destination. 2969 */ 2970 qemu_file_shutdown(ms->rp_state.from_dst_file); 2971 mark_source_rp_bad(ms); 2972 } 2973 trace_await_return_path_close_on_source_joining(); 2974 qemu_thread_join(&ms->rp_state.rp_thread); 2975 ms->rp_state.rp_thread_created = false; 2976 trace_await_return_path_close_on_source_close(); 2977 return ms->rp_state.error; 2978 } 2979 2980 /* 2981 * Switch from normal iteration to postcopy 2982 * Returns non-0 on error 2983 */ 2984 static int postcopy_start(MigrationState *ms) 2985 { 2986 int ret; 2987 QIOChannelBuffer *bioc; 2988 QEMUFile *fb; 2989 int64_t time_at_stop = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); 2990 int64_t bandwidth = migrate_max_postcopy_bandwidth(); 2991 bool restart_block = false; 2992 int cur_state = MIGRATION_STATUS_ACTIVE; 2993 if (!migrate_pause_before_switchover()) { 2994 migrate_set_state(&ms->state, MIGRATION_STATUS_ACTIVE, 2995 MIGRATION_STATUS_POSTCOPY_ACTIVE); 2996 } 2997 2998 trace_postcopy_start(); 2999 qemu_mutex_lock_iothread(); 3000 trace_postcopy_start_set_run(); 3001 3002 qemu_system_wakeup_request(QEMU_WAKEUP_REASON_OTHER, NULL); 3003 global_state_store(); 3004 ret = vm_stop_force_state(RUN_STATE_FINISH_MIGRATE); 3005 if (ret < 0) { 3006 goto fail; 3007 } 3008 3009 ret = migration_maybe_pause(ms, &cur_state, 3010 MIGRATION_STATUS_POSTCOPY_ACTIVE); 3011 if (ret < 0) { 3012 goto fail; 3013 } 3014 3015 ret = bdrv_inactivate_all(); 3016 if (ret < 0) { 3017 goto fail; 3018 } 3019 restart_block = true; 3020 3021 /* 3022 * Cause any non-postcopiable, but iterative devices to 3023 * send out their final data. 3024 */ 3025 qemu_savevm_state_complete_precopy(ms->to_dst_file, true, false); 3026 3027 /* 3028 * in Finish migrate and with the io-lock held everything should 3029 * be quiet, but we've potentially still got dirty pages and we 3030 * need to tell the destination to throw any pages it's already received 3031 * that are dirty 3032 */ 3033 if (migrate_postcopy_ram()) { 3034 ram_postcopy_send_discard_bitmap(ms); 3035 } 3036 3037 /* 3038 * send rest of state - note things that are doing postcopy 3039 * will notice we're in POSTCOPY_ACTIVE and not actually 3040 * wrap their state up here 3041 */ 3042 /* 0 max-postcopy-bandwidth means unlimited */ 3043 if (!bandwidth) { 3044 qemu_file_set_rate_limit(ms->to_dst_file, INT64_MAX); 3045 } else { 3046 qemu_file_set_rate_limit(ms->to_dst_file, bandwidth / XFER_LIMIT_RATIO); 3047 } 3048 if (migrate_postcopy_ram()) { 3049 /* Ping just for debugging, helps line traces up */ 3050 qemu_savevm_send_ping(ms->to_dst_file, 2); 3051 } 3052 3053 /* 3054 * While loading the device state we may trigger page transfer 3055 * requests and the fd must be free to process those, and thus 3056 * the destination must read the whole device state off the fd before 3057 * it starts processing it. Unfortunately the ad-hoc migration format 3058 * doesn't allow the destination to know the size to read without fully 3059 * parsing it through each devices load-state code (especially the open 3060 * coded devices that use get/put). 3061 * So we wrap the device state up in a package with a length at the start; 3062 * to do this we use a qemu_buf to hold the whole of the device state. 3063 */ 3064 bioc = qio_channel_buffer_new(4096); 3065 qio_channel_set_name(QIO_CHANNEL(bioc), "migration-postcopy-buffer"); 3066 fb = qemu_fopen_channel_output(QIO_CHANNEL(bioc)); 3067 object_unref(OBJECT(bioc)); 3068 3069 /* 3070 * Make sure the receiver can get incoming pages before we send the rest 3071 * of the state 3072 */ 3073 qemu_savevm_send_postcopy_listen(fb); 3074 3075 qemu_savevm_state_complete_precopy(fb, false, false); 3076 if (migrate_postcopy_ram()) { 3077 qemu_savevm_send_ping(fb, 3); 3078 } 3079 3080 qemu_savevm_send_postcopy_run(fb); 3081 3082 /* <><> end of stuff going into the package */ 3083 3084 /* Last point of recovery; as soon as we send the package the destination 3085 * can open devices and potentially start running. 3086 * Lets just check again we've not got any errors. 3087 */ 3088 ret = qemu_file_get_error(ms->to_dst_file); 3089 if (ret) { 3090 error_report("postcopy_start: Migration stream errored (pre package)"); 3091 goto fail_closefb; 3092 } 3093 3094 restart_block = false; 3095 3096 /* Now send that blob */ 3097 if (qemu_savevm_send_packaged(ms->to_dst_file, bioc->data, bioc->usage)) { 3098 goto fail_closefb; 3099 } 3100 qemu_fclose(fb); 3101 3102 /* Send a notify to give a chance for anything that needs to happen 3103 * at the transition to postcopy and after the device state; in particular 3104 * spice needs to trigger a transition now 3105 */ 3106 ms->postcopy_after_devices = true; 3107 notifier_list_notify(&migration_state_notifiers, ms); 3108 3109 ms->downtime = qemu_clock_get_ms(QEMU_CLOCK_REALTIME) - time_at_stop; 3110 3111 qemu_mutex_unlock_iothread(); 3112 3113 if (migrate_postcopy_ram()) { 3114 /* 3115 * Although this ping is just for debug, it could potentially be 3116 * used for getting a better measurement of downtime at the source. 3117 */ 3118 qemu_savevm_send_ping(ms->to_dst_file, 4); 3119 } 3120 3121 if (migrate_release_ram()) { 3122 ram_postcopy_migrated_memory_release(ms); 3123 } 3124 3125 ret = qemu_file_get_error(ms->to_dst_file); 3126 if (ret) { 3127 error_report("postcopy_start: Migration stream errored"); 3128 migrate_set_state(&ms->state, MIGRATION_STATUS_POSTCOPY_ACTIVE, 3129 MIGRATION_STATUS_FAILED); 3130 } 3131 3132 return ret; 3133 3134 fail_closefb: 3135 qemu_fclose(fb); 3136 fail: 3137 migrate_set_state(&ms->state, MIGRATION_STATUS_POSTCOPY_ACTIVE, 3138 MIGRATION_STATUS_FAILED); 3139 if (restart_block) { 3140 /* A failure happened early enough that we know the destination hasn't 3141 * accessed block devices, so we're safe to recover. 3142 */ 3143 Error *local_err = NULL; 3144 3145 bdrv_activate_all(&local_err); 3146 if (local_err) { 3147 error_report_err(local_err); 3148 } 3149 } 3150 qemu_mutex_unlock_iothread(); 3151 return -1; 3152 } 3153 3154 /** 3155 * migration_maybe_pause: Pause if required to by 3156 * migrate_pause_before_switchover called with the iothread locked 3157 * Returns: 0 on success 3158 */ 3159 static int migration_maybe_pause(MigrationState *s, 3160 int *current_active_state, 3161 int new_state) 3162 { 3163 if (!migrate_pause_before_switchover()) { 3164 return 0; 3165 } 3166 3167 /* Since leaving this state is not atomic with posting the semaphore 3168 * it's possible that someone could have issued multiple migrate_continue 3169 * and the semaphore is incorrectly positive at this point; 3170 * the docs say it's undefined to reinit a semaphore that's already 3171 * init'd, so use timedwait to eat up any existing posts. 3172 */ 3173 while (qemu_sem_timedwait(&s->pause_sem, 1) == 0) { 3174 /* This block intentionally left blank */ 3175 } 3176 3177 /* 3178 * If the migration is cancelled when it is in the completion phase, 3179 * the migration state is set to MIGRATION_STATUS_CANCELLING. 3180 * So we don't need to wait a semaphore, otherwise we would always 3181 * wait for the 'pause_sem' semaphore. 3182 */ 3183 if (s->state != MIGRATION_STATUS_CANCELLING) { 3184 qemu_mutex_unlock_iothread(); 3185 migrate_set_state(&s->state, *current_active_state, 3186 MIGRATION_STATUS_PRE_SWITCHOVER); 3187 qemu_sem_wait(&s->pause_sem); 3188 migrate_set_state(&s->state, MIGRATION_STATUS_PRE_SWITCHOVER, 3189 new_state); 3190 *current_active_state = new_state; 3191 qemu_mutex_lock_iothread(); 3192 } 3193 3194 return s->state == new_state ? 0 : -EINVAL; 3195 } 3196 3197 /** 3198 * migration_completion: Used by migration_thread when there's not much left. 3199 * The caller 'breaks' the loop when this returns. 3200 * 3201 * @s: Current migration state 3202 */ 3203 static void migration_completion(MigrationState *s) 3204 { 3205 int ret; 3206 int current_active_state = s->state; 3207 3208 if (s->state == MIGRATION_STATUS_ACTIVE) { 3209 qemu_mutex_lock_iothread(); 3210 s->downtime_start = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); 3211 qemu_system_wakeup_request(QEMU_WAKEUP_REASON_OTHER, NULL); 3212 s->vm_was_running = runstate_is_running(); 3213 ret = global_state_store(); 3214 3215 if (!ret) { 3216 bool inactivate = !migrate_colo_enabled(); 3217 ret = vm_stop_force_state(RUN_STATE_FINISH_MIGRATE); 3218 trace_migration_completion_vm_stop(ret); 3219 if (ret >= 0) { 3220 ret = migration_maybe_pause(s, ¤t_active_state, 3221 MIGRATION_STATUS_DEVICE); 3222 } 3223 if (ret >= 0) { 3224 qemu_file_set_rate_limit(s->to_dst_file, INT64_MAX); 3225 ret = qemu_savevm_state_complete_precopy(s->to_dst_file, false, 3226 inactivate); 3227 } 3228 if (inactivate && ret >= 0) { 3229 s->block_inactive = true; 3230 } 3231 } 3232 qemu_mutex_unlock_iothread(); 3233 3234 if (ret < 0) { 3235 goto fail; 3236 } 3237 } else if (s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE) { 3238 trace_migration_completion_postcopy_end(); 3239 3240 qemu_mutex_lock_iothread(); 3241 qemu_savevm_state_complete_postcopy(s->to_dst_file); 3242 qemu_mutex_unlock_iothread(); 3243 3244 trace_migration_completion_postcopy_end_after_complete(); 3245 } else { 3246 goto fail; 3247 } 3248 3249 /* 3250 * If rp was opened we must clean up the thread before 3251 * cleaning everything else up (since if there are no failures 3252 * it will wait for the destination to send it's status in 3253 * a SHUT command). 3254 */ 3255 if (s->rp_state.rp_thread_created) { 3256 int rp_error; 3257 trace_migration_return_path_end_before(); 3258 rp_error = await_return_path_close_on_source(s); 3259 trace_migration_return_path_end_after(rp_error); 3260 if (rp_error) { 3261 goto fail_invalidate; 3262 } 3263 } 3264 3265 if (qemu_file_get_error(s->to_dst_file)) { 3266 trace_migration_completion_file_err(); 3267 goto fail_invalidate; 3268 } 3269 3270 if (migrate_colo_enabled() && s->state == MIGRATION_STATUS_ACTIVE) { 3271 /* COLO does not support postcopy */ 3272 migrate_set_state(&s->state, MIGRATION_STATUS_ACTIVE, 3273 MIGRATION_STATUS_COLO); 3274 } else { 3275 migrate_set_state(&s->state, current_active_state, 3276 MIGRATION_STATUS_COMPLETED); 3277 } 3278 3279 return; 3280 3281 fail_invalidate: 3282 /* If not doing postcopy, vm_start() will be called: let's regain 3283 * control on images. 3284 */ 3285 if (s->state == MIGRATION_STATUS_ACTIVE || 3286 s->state == MIGRATION_STATUS_DEVICE) { 3287 Error *local_err = NULL; 3288 3289 qemu_mutex_lock_iothread(); 3290 bdrv_activate_all(&local_err); 3291 if (local_err) { 3292 error_report_err(local_err); 3293 } else { 3294 s->block_inactive = false; 3295 } 3296 qemu_mutex_unlock_iothread(); 3297 } 3298 3299 fail: 3300 migrate_set_state(&s->state, current_active_state, 3301 MIGRATION_STATUS_FAILED); 3302 } 3303 3304 /** 3305 * bg_migration_completion: Used by bg_migration_thread when after all the 3306 * RAM has been saved. The caller 'breaks' the loop when this returns. 3307 * 3308 * @s: Current migration state 3309 */ 3310 static void bg_migration_completion(MigrationState *s) 3311 { 3312 int current_active_state = s->state; 3313 3314 /* 3315 * Stop tracking RAM writes - un-protect memory, un-register UFFD 3316 * memory ranges, flush kernel wait queues and wake up threads 3317 * waiting for write fault to be resolved. 3318 */ 3319 ram_write_tracking_stop(); 3320 3321 if (s->state == MIGRATION_STATUS_ACTIVE) { 3322 /* 3323 * By this moment we have RAM content saved into the migration stream. 3324 * The next step is to flush the non-RAM content (device state) 3325 * right after the ram content. The device state has been stored into 3326 * the temporary buffer before RAM saving started. 3327 */ 3328 qemu_put_buffer(s->to_dst_file, s->bioc->data, s->bioc->usage); 3329 qemu_fflush(s->to_dst_file); 3330 } else if (s->state == MIGRATION_STATUS_CANCELLING) { 3331 goto fail; 3332 } 3333 3334 if (qemu_file_get_error(s->to_dst_file)) { 3335 trace_migration_completion_file_err(); 3336 goto fail; 3337 } 3338 3339 migrate_set_state(&s->state, current_active_state, 3340 MIGRATION_STATUS_COMPLETED); 3341 return; 3342 3343 fail: 3344 migrate_set_state(&s->state, current_active_state, 3345 MIGRATION_STATUS_FAILED); 3346 } 3347 3348 bool migrate_colo_enabled(void) 3349 { 3350 MigrationState *s = migrate_get_current(); 3351 return s->enabled_capabilities[MIGRATION_CAPABILITY_X_COLO]; 3352 } 3353 3354 typedef enum MigThrError { 3355 /* No error detected */ 3356 MIG_THR_ERR_NONE = 0, 3357 /* Detected error, but resumed successfully */ 3358 MIG_THR_ERR_RECOVERED = 1, 3359 /* Detected fatal error, need to exit */ 3360 MIG_THR_ERR_FATAL = 2, 3361 } MigThrError; 3362 3363 static int postcopy_resume_handshake(MigrationState *s) 3364 { 3365 qemu_savevm_send_postcopy_resume(s->to_dst_file); 3366 3367 while (s->state == MIGRATION_STATUS_POSTCOPY_RECOVER) { 3368 qemu_sem_wait(&s->rp_state.rp_sem); 3369 } 3370 3371 if (s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE) { 3372 return 0; 3373 } 3374 3375 return -1; 3376 } 3377 3378 /* Return zero if success, or <0 for error */ 3379 static int postcopy_do_resume(MigrationState *s) 3380 { 3381 int ret; 3382 3383 /* 3384 * Call all the resume_prepare() hooks, so that modules can be 3385 * ready for the migration resume. 3386 */ 3387 ret = qemu_savevm_state_resume_prepare(s); 3388 if (ret) { 3389 error_report("%s: resume_prepare() failure detected: %d", 3390 __func__, ret); 3391 return ret; 3392 } 3393 3394 /* 3395 * Last handshake with destination on the resume (destination will 3396 * switch to postcopy-active afterwards) 3397 */ 3398 ret = postcopy_resume_handshake(s); 3399 if (ret) { 3400 error_report("%s: handshake failed: %d", __func__, ret); 3401 return ret; 3402 } 3403 3404 return 0; 3405 } 3406 3407 /* 3408 * We don't return until we are in a safe state to continue current 3409 * postcopy migration. Returns MIG_THR_ERR_RECOVERED if recovered, or 3410 * MIG_THR_ERR_FATAL if unrecovery failure happened. 3411 */ 3412 static MigThrError postcopy_pause(MigrationState *s) 3413 { 3414 assert(s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE); 3415 3416 while (true) { 3417 QEMUFile *file; 3418 3419 /* 3420 * Current channel is possibly broken. Release it. Note that this is 3421 * guaranteed even without lock because to_dst_file should only be 3422 * modified by the migration thread. That also guarantees that the 3423 * unregister of yank is safe too without the lock. It should be safe 3424 * even to be within the qemu_file_lock, but we didn't do that to avoid 3425 * taking more mutex (yank_lock) within qemu_file_lock. TL;DR: we make 3426 * the qemu_file_lock critical section as small as possible. 3427 */ 3428 assert(s->to_dst_file); 3429 migration_ioc_unregister_yank_from_file(s->to_dst_file); 3430 qemu_mutex_lock(&s->qemu_file_lock); 3431 file = s->to_dst_file; 3432 s->to_dst_file = NULL; 3433 qemu_mutex_unlock(&s->qemu_file_lock); 3434 3435 qemu_file_shutdown(file); 3436 qemu_fclose(file); 3437 3438 migrate_set_state(&s->state, s->state, 3439 MIGRATION_STATUS_POSTCOPY_PAUSED); 3440 3441 error_report("Detected IO failure for postcopy. " 3442 "Migration paused."); 3443 3444 /* 3445 * We wait until things fixed up. Then someone will setup the 3446 * status back for us. 3447 */ 3448 while (s->state == MIGRATION_STATUS_POSTCOPY_PAUSED) { 3449 qemu_sem_wait(&s->postcopy_pause_sem); 3450 } 3451 3452 if (s->state == MIGRATION_STATUS_POSTCOPY_RECOVER) { 3453 /* Woken up by a recover procedure. Give it a shot */ 3454 3455 /* 3456 * Firstly, let's wake up the return path now, with a new 3457 * return path channel. 3458 */ 3459 qemu_sem_post(&s->postcopy_pause_rp_sem); 3460 3461 /* Do the resume logic */ 3462 if (postcopy_do_resume(s) == 0) { 3463 /* Let's continue! */ 3464 trace_postcopy_pause_continued(); 3465 return MIG_THR_ERR_RECOVERED; 3466 } else { 3467 /* 3468 * Something wrong happened during the recovery, let's 3469 * pause again. Pause is always better than throwing 3470 * data away. 3471 */ 3472 continue; 3473 } 3474 } else { 3475 /* This is not right... Time to quit. */ 3476 return MIG_THR_ERR_FATAL; 3477 } 3478 } 3479 } 3480 3481 static MigThrError migration_detect_error(MigrationState *s) 3482 { 3483 int ret; 3484 int state = s->state; 3485 Error *local_error = NULL; 3486 3487 if (state == MIGRATION_STATUS_CANCELLING || 3488 state == MIGRATION_STATUS_CANCELLED) { 3489 /* End the migration, but don't set the state to failed */ 3490 return MIG_THR_ERR_FATAL; 3491 } 3492 3493 /* Try to detect any file errors */ 3494 ret = qemu_file_get_error_obj(s->to_dst_file, &local_error); 3495 if (!ret) { 3496 /* Everything is fine */ 3497 assert(!local_error); 3498 return MIG_THR_ERR_NONE; 3499 } 3500 3501 if (local_error) { 3502 migrate_set_error(s, local_error); 3503 error_free(local_error); 3504 } 3505 3506 if (state == MIGRATION_STATUS_POSTCOPY_ACTIVE && ret) { 3507 /* 3508 * For postcopy, we allow the network to be down for a 3509 * while. After that, it can be continued by a 3510 * recovery phase. 3511 */ 3512 return postcopy_pause(s); 3513 } else { 3514 /* 3515 * For precopy (or postcopy with error outside IO), we fail 3516 * with no time. 3517 */ 3518 migrate_set_state(&s->state, state, MIGRATION_STATUS_FAILED); 3519 trace_migration_thread_file_err(); 3520 3521 /* Time to stop the migration, now. */ 3522 return MIG_THR_ERR_FATAL; 3523 } 3524 } 3525 3526 /* How many bytes have we transferred since the beginning of the migration */ 3527 static uint64_t migration_total_bytes(MigrationState *s) 3528 { 3529 return qemu_ftell(s->to_dst_file) + ram_counters.multifd_bytes; 3530 } 3531 3532 static void migration_calculate_complete(MigrationState *s) 3533 { 3534 uint64_t bytes = migration_total_bytes(s); 3535 int64_t end_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); 3536 int64_t transfer_time; 3537 3538 s->total_time = end_time - s->start_time; 3539 if (!s->downtime) { 3540 /* 3541 * It's still not set, so we are precopy migration. For 3542 * postcopy, downtime is calculated during postcopy_start(). 3543 */ 3544 s->downtime = end_time - s->downtime_start; 3545 } 3546 3547 transfer_time = s->total_time - s->setup_time; 3548 if (transfer_time) { 3549 s->mbps = ((double) bytes * 8.0) / transfer_time / 1000; 3550 } 3551 } 3552 3553 static void update_iteration_initial_status(MigrationState *s) 3554 { 3555 /* 3556 * Update these three fields at the same time to avoid mismatch info lead 3557 * wrong speed calculation. 3558 */ 3559 s->iteration_start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); 3560 s->iteration_initial_bytes = migration_total_bytes(s); 3561 s->iteration_initial_pages = ram_get_total_transferred_pages(); 3562 } 3563 3564 static void migration_update_counters(MigrationState *s, 3565 int64_t current_time) 3566 { 3567 uint64_t transferred, transferred_pages, time_spent; 3568 uint64_t current_bytes; /* bytes transferred since the beginning */ 3569 double bandwidth; 3570 3571 if (current_time < s->iteration_start_time + BUFFER_DELAY) { 3572 return; 3573 } 3574 3575 current_bytes = migration_total_bytes(s); 3576 transferred = current_bytes - s->iteration_initial_bytes; 3577 time_spent = current_time - s->iteration_start_time; 3578 bandwidth = (double)transferred / time_spent; 3579 s->threshold_size = bandwidth * s->parameters.downtime_limit; 3580 3581 s->mbps = (((double) transferred * 8.0) / 3582 ((double) time_spent / 1000.0)) / 1000.0 / 1000.0; 3583 3584 transferred_pages = ram_get_total_transferred_pages() - 3585 s->iteration_initial_pages; 3586 s->pages_per_second = (double) transferred_pages / 3587 (((double) time_spent / 1000.0)); 3588 3589 /* 3590 * if we haven't sent anything, we don't want to 3591 * recalculate. 10000 is a small enough number for our purposes 3592 */ 3593 if (ram_counters.dirty_pages_rate && transferred > 10000) { 3594 s->expected_downtime = ram_counters.remaining / bandwidth; 3595 } 3596 3597 qemu_file_reset_rate_limit(s->to_dst_file); 3598 3599 update_iteration_initial_status(s); 3600 3601 trace_migrate_transferred(transferred, time_spent, 3602 bandwidth, s->threshold_size); 3603 } 3604 3605 /* Migration thread iteration status */ 3606 typedef enum { 3607 MIG_ITERATE_RESUME, /* Resume current iteration */ 3608 MIG_ITERATE_SKIP, /* Skip current iteration */ 3609 MIG_ITERATE_BREAK, /* Break the loop */ 3610 } MigIterateState; 3611 3612 /* 3613 * Return true if continue to the next iteration directly, false 3614 * otherwise. 3615 */ 3616 static MigIterateState migration_iteration_run(MigrationState *s) 3617 { 3618 uint64_t pending_size, pend_pre, pend_compat, pend_post; 3619 bool in_postcopy = s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE; 3620 3621 qemu_savevm_state_pending(s->to_dst_file, s->threshold_size, &pend_pre, 3622 &pend_compat, &pend_post); 3623 pending_size = pend_pre + pend_compat + pend_post; 3624 3625 trace_migrate_pending(pending_size, s->threshold_size, 3626 pend_pre, pend_compat, pend_post); 3627 3628 if (pending_size && pending_size >= s->threshold_size) { 3629 /* Still a significant amount to transfer */ 3630 if (!in_postcopy && pend_pre <= s->threshold_size && 3631 qatomic_read(&s->start_postcopy)) { 3632 if (postcopy_start(s)) { 3633 error_report("%s: postcopy failed to start", __func__); 3634 } 3635 return MIG_ITERATE_SKIP; 3636 } 3637 /* Just another iteration step */ 3638 qemu_savevm_state_iterate(s->to_dst_file, in_postcopy); 3639 } else { 3640 trace_migration_thread_low_pending(pending_size); 3641 migration_completion(s); 3642 return MIG_ITERATE_BREAK; 3643 } 3644 3645 return MIG_ITERATE_RESUME; 3646 } 3647 3648 static void migration_iteration_finish(MigrationState *s) 3649 { 3650 /* If we enabled cpu throttling for auto-converge, turn it off. */ 3651 cpu_throttle_stop(); 3652 3653 qemu_mutex_lock_iothread(); 3654 switch (s->state) { 3655 case MIGRATION_STATUS_COMPLETED: 3656 migration_calculate_complete(s); 3657 runstate_set(RUN_STATE_POSTMIGRATE); 3658 break; 3659 case MIGRATION_STATUS_COLO: 3660 if (!migrate_colo_enabled()) { 3661 error_report("%s: critical error: calling COLO code without " 3662 "COLO enabled", __func__); 3663 } 3664 migrate_start_colo_process(s); 3665 s->vm_was_running = true; 3666 /* Fallthrough */ 3667 case MIGRATION_STATUS_FAILED: 3668 case MIGRATION_STATUS_CANCELLED: 3669 case MIGRATION_STATUS_CANCELLING: 3670 if (s->vm_was_running) { 3671 if (!runstate_check(RUN_STATE_SHUTDOWN)) { 3672 vm_start(); 3673 } 3674 } else { 3675 if (runstate_check(RUN_STATE_FINISH_MIGRATE)) { 3676 runstate_set(RUN_STATE_POSTMIGRATE); 3677 } 3678 } 3679 break; 3680 3681 default: 3682 /* Should not reach here, but if so, forgive the VM. */ 3683 error_report("%s: Unknown ending state %d", __func__, s->state); 3684 break; 3685 } 3686 migrate_fd_cleanup_schedule(s); 3687 qemu_mutex_unlock_iothread(); 3688 } 3689 3690 static void bg_migration_iteration_finish(MigrationState *s) 3691 { 3692 qemu_mutex_lock_iothread(); 3693 switch (s->state) { 3694 case MIGRATION_STATUS_COMPLETED: 3695 migration_calculate_complete(s); 3696 break; 3697 3698 case MIGRATION_STATUS_ACTIVE: 3699 case MIGRATION_STATUS_FAILED: 3700 case MIGRATION_STATUS_CANCELLED: 3701 case MIGRATION_STATUS_CANCELLING: 3702 break; 3703 3704 default: 3705 /* Should not reach here, but if so, forgive the VM. */ 3706 error_report("%s: Unknown ending state %d", __func__, s->state); 3707 break; 3708 } 3709 3710 migrate_fd_cleanup_schedule(s); 3711 qemu_mutex_unlock_iothread(); 3712 } 3713 3714 /* 3715 * Return true if continue to the next iteration directly, false 3716 * otherwise. 3717 */ 3718 static MigIterateState bg_migration_iteration_run(MigrationState *s) 3719 { 3720 int res; 3721 3722 res = qemu_savevm_state_iterate(s->to_dst_file, false); 3723 if (res > 0) { 3724 bg_migration_completion(s); 3725 return MIG_ITERATE_BREAK; 3726 } 3727 3728 return MIG_ITERATE_RESUME; 3729 } 3730 3731 void migration_make_urgent_request(void) 3732 { 3733 qemu_sem_post(&migrate_get_current()->rate_limit_sem); 3734 } 3735 3736 void migration_consume_urgent_request(void) 3737 { 3738 qemu_sem_wait(&migrate_get_current()->rate_limit_sem); 3739 } 3740 3741 /* Returns true if the rate limiting was broken by an urgent request */ 3742 bool migration_rate_limit(void) 3743 { 3744 int64_t now = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); 3745 MigrationState *s = migrate_get_current(); 3746 3747 bool urgent = false; 3748 migration_update_counters(s, now); 3749 if (qemu_file_rate_limit(s->to_dst_file)) { 3750 3751 if (qemu_file_get_error(s->to_dst_file)) { 3752 return false; 3753 } 3754 /* 3755 * Wait for a delay to do rate limiting OR 3756 * something urgent to post the semaphore. 3757 */ 3758 int ms = s->iteration_start_time + BUFFER_DELAY - now; 3759 trace_migration_rate_limit_pre(ms); 3760 if (qemu_sem_timedwait(&s->rate_limit_sem, ms) == 0) { 3761 /* 3762 * We were woken by one or more urgent things but 3763 * the timedwait will have consumed one of them. 3764 * The service routine for the urgent wake will dec 3765 * the semaphore itself for each item it consumes, 3766 * so add this one we just eat back. 3767 */ 3768 qemu_sem_post(&s->rate_limit_sem); 3769 urgent = true; 3770 } 3771 trace_migration_rate_limit_post(urgent); 3772 } 3773 return urgent; 3774 } 3775 3776 /* 3777 * if failover devices are present, wait they are completely 3778 * unplugged 3779 */ 3780 3781 static void qemu_savevm_wait_unplug(MigrationState *s, int old_state, 3782 int new_state) 3783 { 3784 if (qemu_savevm_state_guest_unplug_pending()) { 3785 migrate_set_state(&s->state, old_state, MIGRATION_STATUS_WAIT_UNPLUG); 3786 3787 while (s->state == MIGRATION_STATUS_WAIT_UNPLUG && 3788 qemu_savevm_state_guest_unplug_pending()) { 3789 qemu_sem_timedwait(&s->wait_unplug_sem, 250); 3790 } 3791 if (s->state != MIGRATION_STATUS_WAIT_UNPLUG) { 3792 int timeout = 120; /* 30 seconds */ 3793 /* 3794 * migration has been canceled 3795 * but as we have started an unplug we must wait the end 3796 * to be able to plug back the card 3797 */ 3798 while (timeout-- && qemu_savevm_state_guest_unplug_pending()) { 3799 qemu_sem_timedwait(&s->wait_unplug_sem, 250); 3800 } 3801 if (qemu_savevm_state_guest_unplug_pending() && 3802 !qtest_enabled()) { 3803 warn_report("migration: partially unplugged device on " 3804 "failure"); 3805 } 3806 } 3807 3808 migrate_set_state(&s->state, MIGRATION_STATUS_WAIT_UNPLUG, new_state); 3809 } else { 3810 migrate_set_state(&s->state, old_state, new_state); 3811 } 3812 } 3813 3814 /* 3815 * Master migration thread on the source VM. 3816 * It drives the migration and pumps the data down the outgoing channel. 3817 */ 3818 static void *migration_thread(void *opaque) 3819 { 3820 MigrationState *s = opaque; 3821 int64_t setup_start = qemu_clock_get_ms(QEMU_CLOCK_HOST); 3822 MigThrError thr_error; 3823 bool urgent = false; 3824 3825 rcu_register_thread(); 3826 3827 object_ref(OBJECT(s)); 3828 update_iteration_initial_status(s); 3829 3830 qemu_savevm_state_header(s->to_dst_file); 3831 3832 /* 3833 * If we opened the return path, we need to make sure dst has it 3834 * opened as well. 3835 */ 3836 if (s->rp_state.rp_thread_created) { 3837 /* Now tell the dest that it should open its end so it can reply */ 3838 qemu_savevm_send_open_return_path(s->to_dst_file); 3839 3840 /* And do a ping that will make stuff easier to debug */ 3841 qemu_savevm_send_ping(s->to_dst_file, 1); 3842 } 3843 3844 if (migrate_postcopy()) { 3845 /* 3846 * Tell the destination that we *might* want to do postcopy later; 3847 * if the other end can't do postcopy it should fail now, nice and 3848 * early. 3849 */ 3850 qemu_savevm_send_postcopy_advise(s->to_dst_file); 3851 } 3852 3853 if (migrate_colo_enabled()) { 3854 /* Notify migration destination that we enable COLO */ 3855 qemu_savevm_send_colo_enable(s->to_dst_file); 3856 } 3857 3858 qemu_savevm_state_setup(s->to_dst_file); 3859 3860 qemu_savevm_wait_unplug(s, MIGRATION_STATUS_SETUP, 3861 MIGRATION_STATUS_ACTIVE); 3862 3863 s->setup_time = qemu_clock_get_ms(QEMU_CLOCK_HOST) - setup_start; 3864 3865 trace_migration_thread_setup_complete(); 3866 3867 while (migration_is_active(s)) { 3868 if (urgent || !qemu_file_rate_limit(s->to_dst_file)) { 3869 MigIterateState iter_state = migration_iteration_run(s); 3870 if (iter_state == MIG_ITERATE_SKIP) { 3871 continue; 3872 } else if (iter_state == MIG_ITERATE_BREAK) { 3873 break; 3874 } 3875 } 3876 3877 /* 3878 * Try to detect any kind of failures, and see whether we 3879 * should stop the migration now. 3880 */ 3881 thr_error = migration_detect_error(s); 3882 if (thr_error == MIG_THR_ERR_FATAL) { 3883 /* Stop migration */ 3884 break; 3885 } else if (thr_error == MIG_THR_ERR_RECOVERED) { 3886 /* 3887 * Just recovered from a e.g. network failure, reset all 3888 * the local variables. This is important to avoid 3889 * breaking transferred_bytes and bandwidth calculation 3890 */ 3891 update_iteration_initial_status(s); 3892 } 3893 3894 urgent = migration_rate_limit(); 3895 } 3896 3897 trace_migration_thread_after_loop(); 3898 migration_iteration_finish(s); 3899 object_unref(OBJECT(s)); 3900 rcu_unregister_thread(); 3901 return NULL; 3902 } 3903 3904 static void bg_migration_vm_start_bh(void *opaque) 3905 { 3906 MigrationState *s = opaque; 3907 3908 qemu_bh_delete(s->vm_start_bh); 3909 s->vm_start_bh = NULL; 3910 3911 vm_start(); 3912 s->downtime = qemu_clock_get_ms(QEMU_CLOCK_REALTIME) - s->downtime_start; 3913 } 3914 3915 /** 3916 * Background snapshot thread, based on live migration code. 3917 * This is an alternative implementation of live migration mechanism 3918 * introduced specifically to support background snapshots. 3919 * 3920 * It takes advantage of userfault_fd write protection mechanism introduced 3921 * in v5.7 kernel. Compared to existing dirty page logging migration much 3922 * lesser stream traffic is produced resulting in smaller snapshot images, 3923 * simply cause of no page duplicates can get into the stream. 3924 * 3925 * Another key point is that generated vmstate stream reflects machine state 3926 * 'frozen' at the beginning of snapshot creation compared to dirty page logging 3927 * mechanism, which effectively results in that saved snapshot is the state of VM 3928 * at the end of the process. 3929 */ 3930 static void *bg_migration_thread(void *opaque) 3931 { 3932 MigrationState *s = opaque; 3933 int64_t setup_start; 3934 MigThrError thr_error; 3935 QEMUFile *fb; 3936 bool early_fail = true; 3937 3938 rcu_register_thread(); 3939 object_ref(OBJECT(s)); 3940 3941 qemu_file_set_rate_limit(s->to_dst_file, INT64_MAX); 3942 3943 setup_start = qemu_clock_get_ms(QEMU_CLOCK_HOST); 3944 /* 3945 * We want to save vmstate for the moment when migration has been 3946 * initiated but also we want to save RAM content while VM is running. 3947 * The RAM content should appear first in the vmstate. So, we first 3948 * stash the non-RAM part of the vmstate to the temporary buffer, 3949 * then write RAM part of the vmstate to the migration stream 3950 * with vCPUs running and, finally, write stashed non-RAM part of 3951 * the vmstate from the buffer to the migration stream. 3952 */ 3953 s->bioc = qio_channel_buffer_new(512 * 1024); 3954 qio_channel_set_name(QIO_CHANNEL(s->bioc), "vmstate-buffer"); 3955 fb = qemu_fopen_channel_output(QIO_CHANNEL(s->bioc)); 3956 object_unref(OBJECT(s->bioc)); 3957 3958 update_iteration_initial_status(s); 3959 3960 /* 3961 * Prepare for tracking memory writes with UFFD-WP - populate 3962 * RAM pages before protecting. 3963 */ 3964 #ifdef __linux__ 3965 ram_write_tracking_prepare(); 3966 #endif 3967 3968 qemu_savevm_state_header(s->to_dst_file); 3969 qemu_savevm_state_setup(s->to_dst_file); 3970 3971 qemu_savevm_wait_unplug(s, MIGRATION_STATUS_SETUP, 3972 MIGRATION_STATUS_ACTIVE); 3973 3974 s->setup_time = qemu_clock_get_ms(QEMU_CLOCK_HOST) - setup_start; 3975 3976 trace_migration_thread_setup_complete(); 3977 s->downtime_start = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); 3978 3979 qemu_mutex_lock_iothread(); 3980 3981 /* 3982 * If VM is currently in suspended state, then, to make a valid runstate 3983 * transition in vm_stop_force_state() we need to wakeup it up. 3984 */ 3985 qemu_system_wakeup_request(QEMU_WAKEUP_REASON_OTHER, NULL); 3986 s->vm_was_running = runstate_is_running(); 3987 3988 if (global_state_store()) { 3989 goto fail; 3990 } 3991 /* Forcibly stop VM before saving state of vCPUs and devices */ 3992 if (vm_stop_force_state(RUN_STATE_PAUSED)) { 3993 goto fail; 3994 } 3995 /* 3996 * Put vCPUs in sync with shadow context structures, then 3997 * save their state to channel-buffer along with devices. 3998 */ 3999 cpu_synchronize_all_states(); 4000 if (qemu_savevm_state_complete_precopy_non_iterable(fb, false, false)) { 4001 goto fail; 4002 } 4003 /* 4004 * Since we are going to get non-iterable state data directly 4005 * from s->bioc->data, explicit flush is needed here. 4006 */ 4007 qemu_fflush(fb); 4008 4009 /* Now initialize UFFD context and start tracking RAM writes */ 4010 if (ram_write_tracking_start()) { 4011 goto fail; 4012 } 4013 early_fail = false; 4014 4015 /* 4016 * Start VM from BH handler to avoid write-fault lock here. 4017 * UFFD-WP protection for the whole RAM is already enabled so 4018 * calling VM state change notifiers from vm_start() would initiate 4019 * writes to virtio VQs memory which is in write-protected region. 4020 */ 4021 s->vm_start_bh = qemu_bh_new(bg_migration_vm_start_bh, s); 4022 qemu_bh_schedule(s->vm_start_bh); 4023 4024 qemu_mutex_unlock_iothread(); 4025 4026 while (migration_is_active(s)) { 4027 MigIterateState iter_state = bg_migration_iteration_run(s); 4028 if (iter_state == MIG_ITERATE_SKIP) { 4029 continue; 4030 } else if (iter_state == MIG_ITERATE_BREAK) { 4031 break; 4032 } 4033 4034 /* 4035 * Try to detect any kind of failures, and see whether we 4036 * should stop the migration now. 4037 */ 4038 thr_error = migration_detect_error(s); 4039 if (thr_error == MIG_THR_ERR_FATAL) { 4040 /* Stop migration */ 4041 break; 4042 } 4043 4044 migration_update_counters(s, qemu_clock_get_ms(QEMU_CLOCK_REALTIME)); 4045 } 4046 4047 trace_migration_thread_after_loop(); 4048 4049 fail: 4050 if (early_fail) { 4051 migrate_set_state(&s->state, MIGRATION_STATUS_ACTIVE, 4052 MIGRATION_STATUS_FAILED); 4053 qemu_mutex_unlock_iothread(); 4054 } 4055 4056 bg_migration_iteration_finish(s); 4057 4058 qemu_fclose(fb); 4059 object_unref(OBJECT(s)); 4060 rcu_unregister_thread(); 4061 4062 return NULL; 4063 } 4064 4065 void migrate_fd_connect(MigrationState *s, Error *error_in) 4066 { 4067 Error *local_err = NULL; 4068 int64_t rate_limit; 4069 bool resume = s->state == MIGRATION_STATUS_POSTCOPY_PAUSED; 4070 4071 /* 4072 * If there's a previous error, free it and prepare for another one. 4073 * Meanwhile if migration completes successfully, there won't have an error 4074 * dumped when calling migrate_fd_cleanup(). 4075 */ 4076 migrate_error_free(s); 4077 4078 s->expected_downtime = s->parameters.downtime_limit; 4079 if (resume) { 4080 assert(s->cleanup_bh); 4081 } else { 4082 assert(!s->cleanup_bh); 4083 s->cleanup_bh = qemu_bh_new(migrate_fd_cleanup_bh, s); 4084 } 4085 if (error_in) { 4086 migrate_fd_error(s, error_in); 4087 if (resume) { 4088 /* 4089 * Don't do cleanup for resume if channel is invalid, but only dump 4090 * the error. We wait for another channel connect from the user. 4091 * The error_report still gives HMP user a hint on what failed. 4092 * It's normally done in migrate_fd_cleanup(), but call it here 4093 * explicitly. 4094 */ 4095 error_report_err(error_copy(s->error)); 4096 } else { 4097 migrate_fd_cleanup(s); 4098 } 4099 return; 4100 } 4101 4102 if (resume) { 4103 /* This is a resumed migration */ 4104 rate_limit = s->parameters.max_postcopy_bandwidth / 4105 XFER_LIMIT_RATIO; 4106 } else { 4107 /* This is a fresh new migration */ 4108 rate_limit = s->parameters.max_bandwidth / XFER_LIMIT_RATIO; 4109 4110 /* Notify before starting migration thread */ 4111 notifier_list_notify(&migration_state_notifiers, s); 4112 } 4113 4114 qemu_file_set_rate_limit(s->to_dst_file, rate_limit); 4115 qemu_file_set_blocking(s->to_dst_file, true); 4116 4117 /* 4118 * Open the return path. For postcopy, it is used exclusively. For 4119 * precopy, only if user specified "return-path" capability would 4120 * QEMU uses the return path. 4121 */ 4122 if (migrate_postcopy_ram() || migrate_use_return_path()) { 4123 if (open_return_path_on_source(s, !resume)) { 4124 error_report("Unable to open return-path for postcopy"); 4125 migrate_set_state(&s->state, s->state, MIGRATION_STATUS_FAILED); 4126 migrate_fd_cleanup(s); 4127 return; 4128 } 4129 } 4130 4131 if (resume) { 4132 /* Wakeup the main migration thread to do the recovery */ 4133 migrate_set_state(&s->state, MIGRATION_STATUS_POSTCOPY_PAUSED, 4134 MIGRATION_STATUS_POSTCOPY_RECOVER); 4135 qemu_sem_post(&s->postcopy_pause_sem); 4136 return; 4137 } 4138 4139 if (multifd_save_setup(&local_err) != 0) { 4140 error_report_err(local_err); 4141 migrate_set_state(&s->state, MIGRATION_STATUS_SETUP, 4142 MIGRATION_STATUS_FAILED); 4143 migrate_fd_cleanup(s); 4144 return; 4145 } 4146 4147 if (migrate_background_snapshot()) { 4148 qemu_thread_create(&s->thread, "bg_snapshot", 4149 bg_migration_thread, s, QEMU_THREAD_JOINABLE); 4150 } else { 4151 qemu_thread_create(&s->thread, "live_migration", 4152 migration_thread, s, QEMU_THREAD_JOINABLE); 4153 } 4154 s->migration_thread_running = true; 4155 } 4156 4157 void migration_global_dump(Monitor *mon) 4158 { 4159 MigrationState *ms = migrate_get_current(); 4160 4161 monitor_printf(mon, "globals:\n"); 4162 monitor_printf(mon, "store-global-state: %s\n", 4163 ms->store_global_state ? "on" : "off"); 4164 monitor_printf(mon, "only-migratable: %s\n", 4165 only_migratable ? "on" : "off"); 4166 monitor_printf(mon, "send-configuration: %s\n", 4167 ms->send_configuration ? "on" : "off"); 4168 monitor_printf(mon, "send-section-footer: %s\n", 4169 ms->send_section_footer ? "on" : "off"); 4170 monitor_printf(mon, "decompress-error-check: %s\n", 4171 ms->decompress_error_check ? "on" : "off"); 4172 monitor_printf(mon, "clear-bitmap-shift: %u\n", 4173 ms->clear_bitmap_shift); 4174 } 4175 4176 #define DEFINE_PROP_MIG_CAP(name, x) \ 4177 DEFINE_PROP_BOOL(name, MigrationState, enabled_capabilities[x], false) 4178 4179 static Property migration_properties[] = { 4180 DEFINE_PROP_BOOL("store-global-state", MigrationState, 4181 store_global_state, true), 4182 DEFINE_PROP_BOOL("send-configuration", MigrationState, 4183 send_configuration, true), 4184 DEFINE_PROP_BOOL("send-section-footer", MigrationState, 4185 send_section_footer, true), 4186 DEFINE_PROP_BOOL("decompress-error-check", MigrationState, 4187 decompress_error_check, true), 4188 DEFINE_PROP_UINT8("x-clear-bitmap-shift", MigrationState, 4189 clear_bitmap_shift, CLEAR_BITMAP_SHIFT_DEFAULT), 4190 4191 /* Migration parameters */ 4192 DEFINE_PROP_UINT8("x-compress-level", MigrationState, 4193 parameters.compress_level, 4194 DEFAULT_MIGRATE_COMPRESS_LEVEL), 4195 DEFINE_PROP_UINT8("x-compress-threads", MigrationState, 4196 parameters.compress_threads, 4197 DEFAULT_MIGRATE_COMPRESS_THREAD_COUNT), 4198 DEFINE_PROP_BOOL("x-compress-wait-thread", MigrationState, 4199 parameters.compress_wait_thread, true), 4200 DEFINE_PROP_UINT8("x-decompress-threads", MigrationState, 4201 parameters.decompress_threads, 4202 DEFAULT_MIGRATE_DECOMPRESS_THREAD_COUNT), 4203 DEFINE_PROP_UINT8("x-throttle-trigger-threshold", MigrationState, 4204 parameters.throttle_trigger_threshold, 4205 DEFAULT_MIGRATE_THROTTLE_TRIGGER_THRESHOLD), 4206 DEFINE_PROP_UINT8("x-cpu-throttle-initial", MigrationState, 4207 parameters.cpu_throttle_initial, 4208 DEFAULT_MIGRATE_CPU_THROTTLE_INITIAL), 4209 DEFINE_PROP_UINT8("x-cpu-throttle-increment", MigrationState, 4210 parameters.cpu_throttle_increment, 4211 DEFAULT_MIGRATE_CPU_THROTTLE_INCREMENT), 4212 DEFINE_PROP_BOOL("x-cpu-throttle-tailslow", MigrationState, 4213 parameters.cpu_throttle_tailslow, false), 4214 DEFINE_PROP_SIZE("x-max-bandwidth", MigrationState, 4215 parameters.max_bandwidth, MAX_THROTTLE), 4216 DEFINE_PROP_UINT64("x-downtime-limit", MigrationState, 4217 parameters.downtime_limit, 4218 DEFAULT_MIGRATE_SET_DOWNTIME), 4219 DEFINE_PROP_UINT32("x-checkpoint-delay", MigrationState, 4220 parameters.x_checkpoint_delay, 4221 DEFAULT_MIGRATE_X_CHECKPOINT_DELAY), 4222 DEFINE_PROP_UINT8("multifd-channels", MigrationState, 4223 parameters.multifd_channels, 4224 DEFAULT_MIGRATE_MULTIFD_CHANNELS), 4225 DEFINE_PROP_MULTIFD_COMPRESSION("multifd-compression", MigrationState, 4226 parameters.multifd_compression, 4227 DEFAULT_MIGRATE_MULTIFD_COMPRESSION), 4228 DEFINE_PROP_UINT8("multifd-zlib-level", MigrationState, 4229 parameters.multifd_zlib_level, 4230 DEFAULT_MIGRATE_MULTIFD_ZLIB_LEVEL), 4231 DEFINE_PROP_UINT8("multifd-zstd-level", MigrationState, 4232 parameters.multifd_zstd_level, 4233 DEFAULT_MIGRATE_MULTIFD_ZSTD_LEVEL), 4234 #ifdef CONFIG_LINUX 4235 DEFINE_PROP_BOOL("zero_copy_send", MigrationState, 4236 parameters.zero_copy_send, false), 4237 #endif 4238 DEFINE_PROP_SIZE("xbzrle-cache-size", MigrationState, 4239 parameters.xbzrle_cache_size, 4240 DEFAULT_MIGRATE_XBZRLE_CACHE_SIZE), 4241 DEFINE_PROP_SIZE("max-postcopy-bandwidth", MigrationState, 4242 parameters.max_postcopy_bandwidth, 4243 DEFAULT_MIGRATE_MAX_POSTCOPY_BANDWIDTH), 4244 DEFINE_PROP_UINT8("max-cpu-throttle", MigrationState, 4245 parameters.max_cpu_throttle, 4246 DEFAULT_MIGRATE_MAX_CPU_THROTTLE), 4247 DEFINE_PROP_SIZE("announce-initial", MigrationState, 4248 parameters.announce_initial, 4249 DEFAULT_MIGRATE_ANNOUNCE_INITIAL), 4250 DEFINE_PROP_SIZE("announce-max", MigrationState, 4251 parameters.announce_max, 4252 DEFAULT_MIGRATE_ANNOUNCE_MAX), 4253 DEFINE_PROP_SIZE("announce-rounds", MigrationState, 4254 parameters.announce_rounds, 4255 DEFAULT_MIGRATE_ANNOUNCE_ROUNDS), 4256 DEFINE_PROP_SIZE("announce-step", MigrationState, 4257 parameters.announce_step, 4258 DEFAULT_MIGRATE_ANNOUNCE_STEP), 4259 4260 /* Migration capabilities */ 4261 DEFINE_PROP_MIG_CAP("x-xbzrle", MIGRATION_CAPABILITY_XBZRLE), 4262 DEFINE_PROP_MIG_CAP("x-rdma-pin-all", MIGRATION_CAPABILITY_RDMA_PIN_ALL), 4263 DEFINE_PROP_MIG_CAP("x-auto-converge", MIGRATION_CAPABILITY_AUTO_CONVERGE), 4264 DEFINE_PROP_MIG_CAP("x-zero-blocks", MIGRATION_CAPABILITY_ZERO_BLOCKS), 4265 DEFINE_PROP_MIG_CAP("x-compress", MIGRATION_CAPABILITY_COMPRESS), 4266 DEFINE_PROP_MIG_CAP("x-events", MIGRATION_CAPABILITY_EVENTS), 4267 DEFINE_PROP_MIG_CAP("x-postcopy-ram", MIGRATION_CAPABILITY_POSTCOPY_RAM), 4268 DEFINE_PROP_MIG_CAP("x-colo", MIGRATION_CAPABILITY_X_COLO), 4269 DEFINE_PROP_MIG_CAP("x-release-ram", MIGRATION_CAPABILITY_RELEASE_RAM), 4270 DEFINE_PROP_MIG_CAP("x-block", MIGRATION_CAPABILITY_BLOCK), 4271 DEFINE_PROP_MIG_CAP("x-return-path", MIGRATION_CAPABILITY_RETURN_PATH), 4272 DEFINE_PROP_MIG_CAP("x-multifd", MIGRATION_CAPABILITY_MULTIFD), 4273 DEFINE_PROP_MIG_CAP("x-background-snapshot", 4274 MIGRATION_CAPABILITY_BACKGROUND_SNAPSHOT), 4275 4276 DEFINE_PROP_END_OF_LIST(), 4277 }; 4278 4279 static void migration_class_init(ObjectClass *klass, void *data) 4280 { 4281 DeviceClass *dc = DEVICE_CLASS(klass); 4282 4283 dc->user_creatable = false; 4284 device_class_set_props(dc, migration_properties); 4285 } 4286 4287 static void migration_instance_finalize(Object *obj) 4288 { 4289 MigrationState *ms = MIGRATION_OBJ(obj); 4290 MigrationParameters *params = &ms->parameters; 4291 4292 qemu_mutex_destroy(&ms->error_mutex); 4293 qemu_mutex_destroy(&ms->qemu_file_lock); 4294 g_free(params->tls_hostname); 4295 g_free(params->tls_creds); 4296 qemu_sem_destroy(&ms->wait_unplug_sem); 4297 qemu_sem_destroy(&ms->rate_limit_sem); 4298 qemu_sem_destroy(&ms->pause_sem); 4299 qemu_sem_destroy(&ms->postcopy_pause_sem); 4300 qemu_sem_destroy(&ms->postcopy_pause_rp_sem); 4301 qemu_sem_destroy(&ms->rp_state.rp_sem); 4302 error_free(ms->error); 4303 } 4304 4305 static void migration_instance_init(Object *obj) 4306 { 4307 MigrationState *ms = MIGRATION_OBJ(obj); 4308 MigrationParameters *params = &ms->parameters; 4309 4310 ms->state = MIGRATION_STATUS_NONE; 4311 ms->mbps = -1; 4312 ms->pages_per_second = -1; 4313 qemu_sem_init(&ms->pause_sem, 0); 4314 qemu_mutex_init(&ms->error_mutex); 4315 4316 params->tls_hostname = g_strdup(""); 4317 params->tls_creds = g_strdup(""); 4318 4319 /* Set has_* up only for parameter checks */ 4320 params->has_compress_level = true; 4321 params->has_compress_threads = true; 4322 params->has_decompress_threads = true; 4323 params->has_throttle_trigger_threshold = true; 4324 params->has_cpu_throttle_initial = true; 4325 params->has_cpu_throttle_increment = true; 4326 params->has_cpu_throttle_tailslow = true; 4327 params->has_max_bandwidth = true; 4328 params->has_downtime_limit = true; 4329 params->has_x_checkpoint_delay = true; 4330 params->has_block_incremental = true; 4331 params->has_multifd_channels = true; 4332 params->has_multifd_compression = true; 4333 params->has_multifd_zlib_level = true; 4334 params->has_multifd_zstd_level = true; 4335 #ifdef CONFIG_LINUX 4336 params->has_zero_copy_send = true; 4337 #endif 4338 params->has_xbzrle_cache_size = true; 4339 params->has_max_postcopy_bandwidth = true; 4340 params->has_max_cpu_throttle = true; 4341 params->has_announce_initial = true; 4342 params->has_announce_max = true; 4343 params->has_announce_rounds = true; 4344 params->has_announce_step = true; 4345 4346 qemu_sem_init(&ms->postcopy_pause_sem, 0); 4347 qemu_sem_init(&ms->postcopy_pause_rp_sem, 0); 4348 qemu_sem_init(&ms->rp_state.rp_sem, 0); 4349 qemu_sem_init(&ms->rate_limit_sem, 0); 4350 qemu_sem_init(&ms->wait_unplug_sem, 0); 4351 qemu_mutex_init(&ms->qemu_file_lock); 4352 } 4353 4354 /* 4355 * Return true if check pass, false otherwise. Error will be put 4356 * inside errp if provided. 4357 */ 4358 static bool migration_object_check(MigrationState *ms, Error **errp) 4359 { 4360 MigrationCapabilityStatusList *head = NULL; 4361 /* Assuming all off */ 4362 bool cap_list[MIGRATION_CAPABILITY__MAX] = { 0 }, ret; 4363 int i; 4364 4365 if (!migrate_params_check(&ms->parameters, errp)) { 4366 return false; 4367 } 4368 4369 for (i = 0; i < MIGRATION_CAPABILITY__MAX; i++) { 4370 if (ms->enabled_capabilities[i]) { 4371 QAPI_LIST_PREPEND(head, migrate_cap_add(i, true)); 4372 } 4373 } 4374 4375 ret = migrate_caps_check(cap_list, head, errp); 4376 4377 /* It works with head == NULL */ 4378 qapi_free_MigrationCapabilityStatusList(head); 4379 4380 return ret; 4381 } 4382 4383 static const TypeInfo migration_type = { 4384 .name = TYPE_MIGRATION, 4385 /* 4386 * NOTE: TYPE_MIGRATION is not really a device, as the object is 4387 * not created using qdev_new(), it is not attached to the qdev 4388 * device tree, and it is never realized. 4389 * 4390 * TODO: Make this TYPE_OBJECT once QOM provides something like 4391 * TYPE_DEVICE's "-global" properties. 4392 */ 4393 .parent = TYPE_DEVICE, 4394 .class_init = migration_class_init, 4395 .class_size = sizeof(MigrationClass), 4396 .instance_size = sizeof(MigrationState), 4397 .instance_init = migration_instance_init, 4398 .instance_finalize = migration_instance_finalize, 4399 }; 4400 4401 static void register_migration_types(void) 4402 { 4403 type_register_static(&migration_type); 4404 } 4405 4406 type_init(register_migration_types); 4407