1 /* 2 * QEMU live migration 3 * 4 * Copyright IBM, Corp. 2008 5 * 6 * Authors: 7 * Anthony Liguori <aliguori@us.ibm.com> 8 * 9 * This work is licensed under the terms of the GNU GPL, version 2. See 10 * the COPYING file in the top-level directory. 11 * 12 * Contributions after 2012-01-13 are licensed under the terms of the 13 * GNU GPL, version 2 or (at your option) any later version. 14 */ 15 16 #include "qemu/osdep.h" 17 #include "qemu/cutils.h" 18 #include "qemu/error-report.h" 19 #include "qemu/main-loop.h" 20 #include "migration/migration.h" 21 #include "migration/qemu-file.h" 22 #include "sysemu/sysemu.h" 23 #include "block/block.h" 24 #include "qapi/qmp/qerror.h" 25 #include "qapi/util.h" 26 #include "qemu/sockets.h" 27 #include "qemu/rcu.h" 28 #include "migration/block.h" 29 #include "postcopy-ram.h" 30 #include "qemu/thread.h" 31 #include "qmp-commands.h" 32 #include "trace.h" 33 #include "qapi-event.h" 34 #include "qom/cpu.h" 35 #include "exec/memory.h" 36 #include "exec/address-spaces.h" 37 #include "io/channel-buffer.h" 38 #include "io/channel-tls.h" 39 #include "migration/colo.h" 40 41 #define MAX_THROTTLE (32 << 20) /* Migration transfer speed throttling */ 42 43 /* Amount of time to allocate to each "chunk" of bandwidth-throttled 44 * data. */ 45 #define BUFFER_DELAY 100 46 #define XFER_LIMIT_RATIO (1000 / BUFFER_DELAY) 47 48 /* Time in milliseconds we are allowed to stop the source, 49 * for sending the last part */ 50 #define DEFAULT_MIGRATE_SET_DOWNTIME 300 51 52 /* Maximum migrate downtime set to 2000 seconds */ 53 #define MAX_MIGRATE_DOWNTIME_SECONDS 2000 54 #define MAX_MIGRATE_DOWNTIME (MAX_MIGRATE_DOWNTIME_SECONDS * 1000) 55 56 /* Default compression thread count */ 57 #define DEFAULT_MIGRATE_COMPRESS_THREAD_COUNT 8 58 /* Default decompression thread count, usually decompression is at 59 * least 4 times as fast as compression.*/ 60 #define DEFAULT_MIGRATE_DECOMPRESS_THREAD_COUNT 2 61 /*0: means nocompress, 1: best speed, ... 9: best compress ratio */ 62 #define DEFAULT_MIGRATE_COMPRESS_LEVEL 1 63 /* Define default autoconverge cpu throttle migration parameters */ 64 #define DEFAULT_MIGRATE_CPU_THROTTLE_INITIAL 20 65 #define DEFAULT_MIGRATE_CPU_THROTTLE_INCREMENT 10 66 67 /* Migration XBZRLE default cache size */ 68 #define DEFAULT_MIGRATE_CACHE_SIZE (64 * 1024 * 1024) 69 70 /* The delay time (in ms) between two COLO checkpoints 71 * Note: Please change this default value to 10000 when we support hybrid mode. 72 */ 73 #define DEFAULT_MIGRATE_X_CHECKPOINT_DELAY 200 74 75 static NotifierList migration_state_notifiers = 76 NOTIFIER_LIST_INITIALIZER(migration_state_notifiers); 77 78 static bool deferred_incoming; 79 80 /* 81 * Current state of incoming postcopy; note this is not part of 82 * MigrationIncomingState since it's state is used during cleanup 83 * at the end as MIS is being freed. 84 */ 85 static PostcopyState incoming_postcopy_state; 86 87 /* When we add fault tolerance, we could have several 88 migrations at once. For now we don't need to add 89 dynamic creation of migration */ 90 91 /* For outgoing */ 92 MigrationState *migrate_get_current(void) 93 { 94 static bool once; 95 static MigrationState current_migration = { 96 .state = MIGRATION_STATUS_NONE, 97 .xbzrle_cache_size = DEFAULT_MIGRATE_CACHE_SIZE, 98 .mbps = -1, 99 .parameters = { 100 .compress_level = DEFAULT_MIGRATE_COMPRESS_LEVEL, 101 .compress_threads = DEFAULT_MIGRATE_COMPRESS_THREAD_COUNT, 102 .decompress_threads = DEFAULT_MIGRATE_DECOMPRESS_THREAD_COUNT, 103 .cpu_throttle_initial = DEFAULT_MIGRATE_CPU_THROTTLE_INITIAL, 104 .cpu_throttle_increment = DEFAULT_MIGRATE_CPU_THROTTLE_INCREMENT, 105 .max_bandwidth = MAX_THROTTLE, 106 .downtime_limit = DEFAULT_MIGRATE_SET_DOWNTIME, 107 .x_checkpoint_delay = DEFAULT_MIGRATE_X_CHECKPOINT_DELAY, 108 }, 109 }; 110 111 if (!once) { 112 current_migration.parameters.tls_creds = g_strdup(""); 113 current_migration.parameters.tls_hostname = g_strdup(""); 114 once = true; 115 } 116 return ¤t_migration; 117 } 118 119 MigrationIncomingState *migration_incoming_get_current(void) 120 { 121 static bool once; 122 static MigrationIncomingState mis_current; 123 124 if (!once) { 125 mis_current.state = MIGRATION_STATUS_NONE; 126 memset(&mis_current, 0, sizeof(MigrationIncomingState)); 127 QLIST_INIT(&mis_current.loadvm_handlers); 128 qemu_mutex_init(&mis_current.rp_mutex); 129 qemu_event_init(&mis_current.main_thread_load_event, false); 130 once = true; 131 } 132 return &mis_current; 133 } 134 135 void migration_incoming_state_destroy(void) 136 { 137 struct MigrationIncomingState *mis = migration_incoming_get_current(); 138 139 qemu_event_destroy(&mis->main_thread_load_event); 140 loadvm_free_handlers(mis); 141 } 142 143 144 typedef struct { 145 bool optional; 146 uint32_t size; 147 uint8_t runstate[100]; 148 RunState state; 149 bool received; 150 } GlobalState; 151 152 static GlobalState global_state; 153 154 int global_state_store(void) 155 { 156 if (!runstate_store((char *)global_state.runstate, 157 sizeof(global_state.runstate))) { 158 error_report("runstate name too big: %s", global_state.runstate); 159 trace_migrate_state_too_big(); 160 return -EINVAL; 161 } 162 return 0; 163 } 164 165 void global_state_store_running(void) 166 { 167 const char *state = RunState_lookup[RUN_STATE_RUNNING]; 168 strncpy((char *)global_state.runstate, 169 state, sizeof(global_state.runstate)); 170 } 171 172 static bool global_state_received(void) 173 { 174 return global_state.received; 175 } 176 177 static RunState global_state_get_runstate(void) 178 { 179 return global_state.state; 180 } 181 182 void global_state_set_optional(void) 183 { 184 global_state.optional = true; 185 } 186 187 static bool global_state_needed(void *opaque) 188 { 189 GlobalState *s = opaque; 190 char *runstate = (char *)s->runstate; 191 192 /* If it is not optional, it is mandatory */ 193 194 if (s->optional == false) { 195 return true; 196 } 197 198 /* If state is running or paused, it is not needed */ 199 200 if (strcmp(runstate, "running") == 0 || 201 strcmp(runstate, "paused") == 0) { 202 return false; 203 } 204 205 /* for any other state it is needed */ 206 return true; 207 } 208 209 static int global_state_post_load(void *opaque, int version_id) 210 { 211 GlobalState *s = opaque; 212 Error *local_err = NULL; 213 int r; 214 char *runstate = (char *)s->runstate; 215 216 s->received = true; 217 trace_migrate_global_state_post_load(runstate); 218 219 r = qapi_enum_parse(RunState_lookup, runstate, RUN_STATE__MAX, 220 -1, &local_err); 221 222 if (r == -1) { 223 if (local_err) { 224 error_report_err(local_err); 225 } 226 return -EINVAL; 227 } 228 s->state = r; 229 230 return 0; 231 } 232 233 static void global_state_pre_save(void *opaque) 234 { 235 GlobalState *s = opaque; 236 237 trace_migrate_global_state_pre_save((char *)s->runstate); 238 s->size = strlen((char *)s->runstate) + 1; 239 } 240 241 static const VMStateDescription vmstate_globalstate = { 242 .name = "globalstate", 243 .version_id = 1, 244 .minimum_version_id = 1, 245 .post_load = global_state_post_load, 246 .pre_save = global_state_pre_save, 247 .needed = global_state_needed, 248 .fields = (VMStateField[]) { 249 VMSTATE_UINT32(size, GlobalState), 250 VMSTATE_BUFFER(runstate, GlobalState), 251 VMSTATE_END_OF_LIST() 252 }, 253 }; 254 255 void register_global_state(void) 256 { 257 /* We would use it independently that we receive it */ 258 strcpy((char *)&global_state.runstate, ""); 259 global_state.received = false; 260 vmstate_register(NULL, 0, &vmstate_globalstate, &global_state); 261 } 262 263 static void migrate_generate_event(int new_state) 264 { 265 if (migrate_use_events()) { 266 qapi_event_send_migration(new_state, &error_abort); 267 } 268 } 269 270 /* 271 * Called on -incoming with a defer: uri. 272 * The migration can be started later after any parameters have been 273 * changed. 274 */ 275 static void deferred_incoming_migration(Error **errp) 276 { 277 if (deferred_incoming) { 278 error_setg(errp, "Incoming migration already deferred"); 279 } 280 deferred_incoming = true; 281 } 282 283 /* Request a range of pages from the source VM at the given 284 * start address. 285 * rbname: Name of the RAMBlock to request the page in, if NULL it's the same 286 * as the last request (a name must have been given previously) 287 * Start: Address offset within the RB 288 * Len: Length in bytes required - must be a multiple of pagesize 289 */ 290 void migrate_send_rp_req_pages(MigrationIncomingState *mis, const char *rbname, 291 ram_addr_t start, size_t len) 292 { 293 uint8_t bufc[12 + 1 + 255]; /* start (8), len (4), rbname up to 256 */ 294 size_t msglen = 12; /* start + len */ 295 296 *(uint64_t *)bufc = cpu_to_be64((uint64_t)start); 297 *(uint32_t *)(bufc + 8) = cpu_to_be32((uint32_t)len); 298 299 if (rbname) { 300 int rbname_len = strlen(rbname); 301 assert(rbname_len < 256); 302 303 bufc[msglen++] = rbname_len; 304 memcpy(bufc + msglen, rbname, rbname_len); 305 msglen += rbname_len; 306 migrate_send_rp_message(mis, MIG_RP_MSG_REQ_PAGES_ID, msglen, bufc); 307 } else { 308 migrate_send_rp_message(mis, MIG_RP_MSG_REQ_PAGES, msglen, bufc); 309 } 310 } 311 312 void qemu_start_incoming_migration(const char *uri, Error **errp) 313 { 314 const char *p; 315 316 qapi_event_send_migration(MIGRATION_STATUS_SETUP, &error_abort); 317 if (!strcmp(uri, "defer")) { 318 deferred_incoming_migration(errp); 319 } else if (strstart(uri, "tcp:", &p)) { 320 tcp_start_incoming_migration(p, errp); 321 #ifdef CONFIG_RDMA 322 } else if (strstart(uri, "rdma:", &p)) { 323 rdma_start_incoming_migration(p, errp); 324 #endif 325 } else if (strstart(uri, "exec:", &p)) { 326 exec_start_incoming_migration(p, errp); 327 } else if (strstart(uri, "unix:", &p)) { 328 unix_start_incoming_migration(p, errp); 329 } else if (strstart(uri, "fd:", &p)) { 330 fd_start_incoming_migration(p, errp); 331 } else { 332 error_setg(errp, "unknown migration protocol: %s", uri); 333 } 334 } 335 336 static void process_incoming_migration_bh(void *opaque) 337 { 338 Error *local_err = NULL; 339 MigrationIncomingState *mis = opaque; 340 341 /* Make sure all file formats flush their mutable metadata. 342 * If we get an error here, just don't restart the VM yet. */ 343 bdrv_invalidate_cache_all(&local_err); 344 if (local_err) { 345 error_report_err(local_err); 346 local_err = NULL; 347 autostart = false; 348 } 349 350 /* 351 * This must happen after all error conditions are dealt with and 352 * we're sure the VM is going to be running on this host. 353 */ 354 qemu_announce_self(); 355 356 /* If global state section was not received or we are in running 357 state, we need to obey autostart. Any other state is set with 358 runstate_set. */ 359 360 if (!global_state_received() || 361 global_state_get_runstate() == RUN_STATE_RUNNING) { 362 if (autostart) { 363 vm_start(); 364 } else { 365 runstate_set(RUN_STATE_PAUSED); 366 } 367 } else { 368 runstate_set(global_state_get_runstate()); 369 } 370 migrate_decompress_threads_join(); 371 /* 372 * This must happen after any state changes since as soon as an external 373 * observer sees this event they might start to prod at the VM assuming 374 * it's ready to use. 375 */ 376 migrate_set_state(&mis->state, MIGRATION_STATUS_ACTIVE, 377 MIGRATION_STATUS_COMPLETED); 378 qemu_bh_delete(mis->bh); 379 migration_incoming_state_destroy(); 380 } 381 382 static void process_incoming_migration_co(void *opaque) 383 { 384 QEMUFile *f = opaque; 385 MigrationIncomingState *mis = migration_incoming_get_current(); 386 PostcopyState ps; 387 int ret; 388 389 mis->from_src_file = f; 390 mis->largest_page_size = qemu_ram_pagesize_largest(); 391 postcopy_state_set(POSTCOPY_INCOMING_NONE); 392 migrate_set_state(&mis->state, MIGRATION_STATUS_NONE, 393 MIGRATION_STATUS_ACTIVE); 394 ret = qemu_loadvm_state(f); 395 396 ps = postcopy_state_get(); 397 trace_process_incoming_migration_co_end(ret, ps); 398 if (ps != POSTCOPY_INCOMING_NONE) { 399 if (ps == POSTCOPY_INCOMING_ADVISE) { 400 /* 401 * Where a migration had postcopy enabled (and thus went to advise) 402 * but managed to complete within the precopy period, we can use 403 * the normal exit. 404 */ 405 postcopy_ram_incoming_cleanup(mis); 406 } else if (ret >= 0) { 407 /* 408 * Postcopy was started, cleanup should happen at the end of the 409 * postcopy thread. 410 */ 411 trace_process_incoming_migration_co_postcopy_end_main(); 412 return; 413 } 414 /* Else if something went wrong then just fall out of the normal exit */ 415 } 416 417 /* we get COLO info, and know if we are in COLO mode */ 418 if (!ret && migration_incoming_enable_colo()) { 419 mis->migration_incoming_co = qemu_coroutine_self(); 420 qemu_thread_create(&mis->colo_incoming_thread, "COLO incoming", 421 colo_process_incoming_thread, mis, QEMU_THREAD_JOINABLE); 422 mis->have_colo_incoming_thread = true; 423 qemu_coroutine_yield(); 424 425 /* Wait checkpoint incoming thread exit before free resource */ 426 qemu_thread_join(&mis->colo_incoming_thread); 427 } 428 429 if (ret < 0) { 430 migrate_set_state(&mis->state, MIGRATION_STATUS_ACTIVE, 431 MIGRATION_STATUS_FAILED); 432 error_report("load of migration failed: %s", strerror(-ret)); 433 migrate_decompress_threads_join(); 434 exit(EXIT_FAILURE); 435 } 436 437 qemu_fclose(f); 438 free_xbzrle_decoded_buf(); 439 440 mis->bh = qemu_bh_new(process_incoming_migration_bh, mis); 441 qemu_bh_schedule(mis->bh); 442 } 443 444 void migration_fd_process_incoming(QEMUFile *f) 445 { 446 Coroutine *co = qemu_coroutine_create(process_incoming_migration_co, f); 447 448 migrate_decompress_threads_create(); 449 qemu_file_set_blocking(f, false); 450 qemu_coroutine_enter(co); 451 } 452 453 454 void migration_channel_process_incoming(MigrationState *s, 455 QIOChannel *ioc) 456 { 457 trace_migration_set_incoming_channel( 458 ioc, object_get_typename(OBJECT(ioc))); 459 460 if (s->parameters.tls_creds && 461 *s->parameters.tls_creds && 462 !object_dynamic_cast(OBJECT(ioc), 463 TYPE_QIO_CHANNEL_TLS)) { 464 Error *local_err = NULL; 465 migration_tls_channel_process_incoming(s, ioc, &local_err); 466 if (local_err) { 467 error_report_err(local_err); 468 } 469 } else { 470 QEMUFile *f = qemu_fopen_channel_input(ioc); 471 migration_fd_process_incoming(f); 472 } 473 } 474 475 476 void migration_channel_connect(MigrationState *s, 477 QIOChannel *ioc, 478 const char *hostname) 479 { 480 trace_migration_set_outgoing_channel( 481 ioc, object_get_typename(OBJECT(ioc)), hostname); 482 483 if (s->parameters.tls_creds && 484 *s->parameters.tls_creds && 485 !object_dynamic_cast(OBJECT(ioc), 486 TYPE_QIO_CHANNEL_TLS)) { 487 Error *local_err = NULL; 488 migration_tls_channel_connect(s, ioc, hostname, &local_err); 489 if (local_err) { 490 migrate_fd_error(s, local_err); 491 error_free(local_err); 492 } 493 } else { 494 QEMUFile *f = qemu_fopen_channel_output(ioc); 495 496 s->to_dst_file = f; 497 498 migrate_fd_connect(s); 499 } 500 } 501 502 503 /* 504 * Send a message on the return channel back to the source 505 * of the migration. 506 */ 507 void migrate_send_rp_message(MigrationIncomingState *mis, 508 enum mig_rp_message_type message_type, 509 uint16_t len, void *data) 510 { 511 trace_migrate_send_rp_message((int)message_type, len); 512 qemu_mutex_lock(&mis->rp_mutex); 513 qemu_put_be16(mis->to_src_file, (unsigned int)message_type); 514 qemu_put_be16(mis->to_src_file, len); 515 qemu_put_buffer(mis->to_src_file, data, len); 516 qemu_fflush(mis->to_src_file); 517 qemu_mutex_unlock(&mis->rp_mutex); 518 } 519 520 /* 521 * Send a 'SHUT' message on the return channel with the given value 522 * to indicate that we've finished with the RP. Non-0 value indicates 523 * error. 524 */ 525 void migrate_send_rp_shut(MigrationIncomingState *mis, 526 uint32_t value) 527 { 528 uint32_t buf; 529 530 buf = cpu_to_be32(value); 531 migrate_send_rp_message(mis, MIG_RP_MSG_SHUT, sizeof(buf), &buf); 532 } 533 534 /* 535 * Send a 'PONG' message on the return channel with the given value 536 * (normally in response to a 'PING') 537 */ 538 void migrate_send_rp_pong(MigrationIncomingState *mis, 539 uint32_t value) 540 { 541 uint32_t buf; 542 543 buf = cpu_to_be32(value); 544 migrate_send_rp_message(mis, MIG_RP_MSG_PONG, sizeof(buf), &buf); 545 } 546 547 MigrationCapabilityStatusList *qmp_query_migrate_capabilities(Error **errp) 548 { 549 MigrationCapabilityStatusList *head = NULL; 550 MigrationCapabilityStatusList *caps; 551 MigrationState *s = migrate_get_current(); 552 int i; 553 554 caps = NULL; /* silence compiler warning */ 555 for (i = 0; i < MIGRATION_CAPABILITY__MAX; i++) { 556 if (i == MIGRATION_CAPABILITY_X_COLO && !colo_supported()) { 557 continue; 558 } 559 if (head == NULL) { 560 head = g_malloc0(sizeof(*caps)); 561 caps = head; 562 } else { 563 caps->next = g_malloc0(sizeof(*caps)); 564 caps = caps->next; 565 } 566 caps->value = 567 g_malloc(sizeof(*caps->value)); 568 caps->value->capability = i; 569 caps->value->state = s->enabled_capabilities[i]; 570 } 571 572 return head; 573 } 574 575 MigrationParameters *qmp_query_migrate_parameters(Error **errp) 576 { 577 MigrationParameters *params; 578 MigrationState *s = migrate_get_current(); 579 580 params = g_malloc0(sizeof(*params)); 581 params->has_compress_level = true; 582 params->compress_level = s->parameters.compress_level; 583 params->has_compress_threads = true; 584 params->compress_threads = s->parameters.compress_threads; 585 params->has_decompress_threads = true; 586 params->decompress_threads = s->parameters.decompress_threads; 587 params->has_cpu_throttle_initial = true; 588 params->cpu_throttle_initial = s->parameters.cpu_throttle_initial; 589 params->has_cpu_throttle_increment = true; 590 params->cpu_throttle_increment = s->parameters.cpu_throttle_increment; 591 params->has_tls_creds = !!s->parameters.tls_creds; 592 params->tls_creds = g_strdup(s->parameters.tls_creds); 593 params->has_tls_hostname = !!s->parameters.tls_hostname; 594 params->tls_hostname = g_strdup(s->parameters.tls_hostname); 595 params->has_max_bandwidth = true; 596 params->max_bandwidth = s->parameters.max_bandwidth; 597 params->has_downtime_limit = true; 598 params->downtime_limit = s->parameters.downtime_limit; 599 params->has_x_checkpoint_delay = true; 600 params->x_checkpoint_delay = s->parameters.x_checkpoint_delay; 601 602 return params; 603 } 604 605 /* 606 * Return true if we're already in the middle of a migration 607 * (i.e. any of the active or setup states) 608 */ 609 static bool migration_is_setup_or_active(int state) 610 { 611 switch (state) { 612 case MIGRATION_STATUS_ACTIVE: 613 case MIGRATION_STATUS_POSTCOPY_ACTIVE: 614 case MIGRATION_STATUS_SETUP: 615 return true; 616 617 default: 618 return false; 619 620 } 621 } 622 623 static void get_xbzrle_cache_stats(MigrationInfo *info) 624 { 625 if (migrate_use_xbzrle()) { 626 info->has_xbzrle_cache = true; 627 info->xbzrle_cache = g_malloc0(sizeof(*info->xbzrle_cache)); 628 info->xbzrle_cache->cache_size = migrate_xbzrle_cache_size(); 629 info->xbzrle_cache->bytes = xbzrle_mig_bytes_transferred(); 630 info->xbzrle_cache->pages = xbzrle_mig_pages_transferred(); 631 info->xbzrle_cache->cache_miss = xbzrle_mig_pages_cache_miss(); 632 info->xbzrle_cache->cache_miss_rate = xbzrle_mig_cache_miss_rate(); 633 info->xbzrle_cache->overflow = xbzrle_mig_pages_overflow(); 634 } 635 } 636 637 static void populate_ram_info(MigrationInfo *info, MigrationState *s) 638 { 639 info->has_ram = true; 640 info->ram = g_malloc0(sizeof(*info->ram)); 641 info->ram->transferred = ram_bytes_transferred(); 642 info->ram->total = ram_bytes_total(); 643 info->ram->duplicate = dup_mig_pages_transferred(); 644 /* legacy value. It is not used anymore */ 645 info->ram->skipped = 0; 646 info->ram->normal = norm_mig_pages_transferred(); 647 info->ram->normal_bytes = norm_mig_pages_transferred() * 648 qemu_target_page_size(); 649 info->ram->mbps = s->mbps; 650 info->ram->dirty_sync_count = ram_dirty_sync_count(); 651 info->ram->postcopy_requests = ram_postcopy_requests(); 652 info->ram->page_size = qemu_target_page_size(); 653 654 if (s->state != MIGRATION_STATUS_COMPLETED) { 655 info->ram->remaining = ram_bytes_remaining(); 656 info->ram->dirty_pages_rate = ram_dirty_pages_rate(); 657 } 658 } 659 660 MigrationInfo *qmp_query_migrate(Error **errp) 661 { 662 MigrationInfo *info = g_malloc0(sizeof(*info)); 663 MigrationState *s = migrate_get_current(); 664 665 switch (s->state) { 666 case MIGRATION_STATUS_NONE: 667 /* no migration has happened ever */ 668 break; 669 case MIGRATION_STATUS_SETUP: 670 info->has_status = true; 671 info->has_total_time = false; 672 break; 673 case MIGRATION_STATUS_ACTIVE: 674 case MIGRATION_STATUS_CANCELLING: 675 info->has_status = true; 676 info->has_total_time = true; 677 info->total_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME) 678 - s->total_time; 679 info->has_expected_downtime = true; 680 info->expected_downtime = s->expected_downtime; 681 info->has_setup_time = true; 682 info->setup_time = s->setup_time; 683 684 populate_ram_info(info, s); 685 686 if (blk_mig_active()) { 687 info->has_disk = true; 688 info->disk = g_malloc0(sizeof(*info->disk)); 689 info->disk->transferred = blk_mig_bytes_transferred(); 690 info->disk->remaining = blk_mig_bytes_remaining(); 691 info->disk->total = blk_mig_bytes_total(); 692 } 693 694 if (cpu_throttle_active()) { 695 info->has_cpu_throttle_percentage = true; 696 info->cpu_throttle_percentage = cpu_throttle_get_percentage(); 697 } 698 699 get_xbzrle_cache_stats(info); 700 break; 701 case MIGRATION_STATUS_POSTCOPY_ACTIVE: 702 /* Mostly the same as active; TODO add some postcopy stats */ 703 info->has_status = true; 704 info->has_total_time = true; 705 info->total_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME) 706 - s->total_time; 707 info->has_expected_downtime = true; 708 info->expected_downtime = s->expected_downtime; 709 info->has_setup_time = true; 710 info->setup_time = s->setup_time; 711 712 populate_ram_info(info, s); 713 714 if (blk_mig_active()) { 715 info->has_disk = true; 716 info->disk = g_malloc0(sizeof(*info->disk)); 717 info->disk->transferred = blk_mig_bytes_transferred(); 718 info->disk->remaining = blk_mig_bytes_remaining(); 719 info->disk->total = blk_mig_bytes_total(); 720 } 721 722 get_xbzrle_cache_stats(info); 723 break; 724 case MIGRATION_STATUS_COLO: 725 info->has_status = true; 726 /* TODO: display COLO specific information (checkpoint info etc.) */ 727 break; 728 case MIGRATION_STATUS_COMPLETED: 729 get_xbzrle_cache_stats(info); 730 731 info->has_status = true; 732 info->has_total_time = true; 733 info->total_time = s->total_time; 734 info->has_downtime = true; 735 info->downtime = s->downtime; 736 info->has_setup_time = true; 737 info->setup_time = s->setup_time; 738 739 populate_ram_info(info, s); 740 break; 741 case MIGRATION_STATUS_FAILED: 742 info->has_status = true; 743 if (s->error) { 744 info->has_error_desc = true; 745 info->error_desc = g_strdup(error_get_pretty(s->error)); 746 } 747 break; 748 case MIGRATION_STATUS_CANCELLED: 749 info->has_status = true; 750 break; 751 } 752 info->status = s->state; 753 754 return info; 755 } 756 757 void qmp_migrate_set_capabilities(MigrationCapabilityStatusList *params, 758 Error **errp) 759 { 760 MigrationState *s = migrate_get_current(); 761 MigrationCapabilityStatusList *cap; 762 bool old_postcopy_cap = migrate_postcopy_ram(); 763 764 if (migration_is_setup_or_active(s->state)) { 765 error_setg(errp, QERR_MIGRATION_ACTIVE); 766 return; 767 } 768 769 for (cap = params; cap; cap = cap->next) { 770 if (cap->value->capability == MIGRATION_CAPABILITY_X_COLO) { 771 if (!colo_supported()) { 772 error_setg(errp, "COLO is not currently supported, please" 773 " configure with --enable-colo option in order to" 774 " support COLO feature"); 775 continue; 776 } 777 } 778 s->enabled_capabilities[cap->value->capability] = cap->value->state; 779 } 780 781 if (migrate_postcopy_ram()) { 782 if (migrate_use_compression()) { 783 /* The decompression threads asynchronously write into RAM 784 * rather than use the atomic copies needed to avoid 785 * userfaulting. It should be possible to fix the decompression 786 * threads for compatibility in future. 787 */ 788 error_report("Postcopy is not currently compatible with " 789 "compression"); 790 s->enabled_capabilities[MIGRATION_CAPABILITY_POSTCOPY_RAM] = 791 false; 792 } 793 /* This check is reasonably expensive, so only when it's being 794 * set the first time, also it's only the destination that needs 795 * special support. 796 */ 797 if (!old_postcopy_cap && runstate_check(RUN_STATE_INMIGRATE) && 798 !postcopy_ram_supported_by_host()) { 799 /* postcopy_ram_supported_by_host will have emitted a more 800 * detailed message 801 */ 802 error_report("Postcopy is not supported"); 803 s->enabled_capabilities[MIGRATION_CAPABILITY_POSTCOPY_RAM] = 804 false; 805 } 806 } 807 } 808 809 void qmp_migrate_set_parameters(MigrationParameters *params, Error **errp) 810 { 811 MigrationState *s = migrate_get_current(); 812 813 if (params->has_compress_level && 814 (params->compress_level < 0 || params->compress_level > 9)) { 815 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "compress_level", 816 "is invalid, it should be in the range of 0 to 9"); 817 return; 818 } 819 if (params->has_compress_threads && 820 (params->compress_threads < 1 || params->compress_threads > 255)) { 821 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, 822 "compress_threads", 823 "is invalid, it should be in the range of 1 to 255"); 824 return; 825 } 826 if (params->has_decompress_threads && 827 (params->decompress_threads < 1 || params->decompress_threads > 255)) { 828 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, 829 "decompress_threads", 830 "is invalid, it should be in the range of 1 to 255"); 831 return; 832 } 833 if (params->has_cpu_throttle_initial && 834 (params->cpu_throttle_initial < 1 || 835 params->cpu_throttle_initial > 99)) { 836 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, 837 "cpu_throttle_initial", 838 "an integer in the range of 1 to 99"); 839 return; 840 } 841 if (params->has_cpu_throttle_increment && 842 (params->cpu_throttle_increment < 1 || 843 params->cpu_throttle_increment > 99)) { 844 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, 845 "cpu_throttle_increment", 846 "an integer in the range of 1 to 99"); 847 return; 848 } 849 if (params->has_max_bandwidth && 850 (params->max_bandwidth < 0 || params->max_bandwidth > SIZE_MAX)) { 851 error_setg(errp, "Parameter 'max_bandwidth' expects an integer in the" 852 " range of 0 to %zu bytes/second", SIZE_MAX); 853 return; 854 } 855 if (params->has_downtime_limit && 856 (params->downtime_limit < 0 || 857 params->downtime_limit > MAX_MIGRATE_DOWNTIME)) { 858 error_setg(errp, "Parameter 'downtime_limit' expects an integer in " 859 "the range of 0 to %d milliseconds", 860 MAX_MIGRATE_DOWNTIME); 861 return; 862 } 863 if (params->has_x_checkpoint_delay && (params->x_checkpoint_delay < 0)) { 864 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, 865 "x_checkpoint_delay", 866 "is invalid, it should be positive"); 867 } 868 869 if (params->has_compress_level) { 870 s->parameters.compress_level = params->compress_level; 871 } 872 if (params->has_compress_threads) { 873 s->parameters.compress_threads = params->compress_threads; 874 } 875 if (params->has_decompress_threads) { 876 s->parameters.decompress_threads = params->decompress_threads; 877 } 878 if (params->has_cpu_throttle_initial) { 879 s->parameters.cpu_throttle_initial = params->cpu_throttle_initial; 880 } 881 if (params->has_cpu_throttle_increment) { 882 s->parameters.cpu_throttle_increment = params->cpu_throttle_increment; 883 } 884 if (params->has_tls_creds) { 885 g_free(s->parameters.tls_creds); 886 s->parameters.tls_creds = g_strdup(params->tls_creds); 887 } 888 if (params->has_tls_hostname) { 889 g_free(s->parameters.tls_hostname); 890 s->parameters.tls_hostname = g_strdup(params->tls_hostname); 891 } 892 if (params->has_max_bandwidth) { 893 s->parameters.max_bandwidth = params->max_bandwidth; 894 if (s->to_dst_file) { 895 qemu_file_set_rate_limit(s->to_dst_file, 896 s->parameters.max_bandwidth / XFER_LIMIT_RATIO); 897 } 898 } 899 if (params->has_downtime_limit) { 900 s->parameters.downtime_limit = params->downtime_limit; 901 } 902 903 if (params->has_x_checkpoint_delay) { 904 s->parameters.x_checkpoint_delay = params->x_checkpoint_delay; 905 if (migration_in_colo_state()) { 906 colo_checkpoint_notify(s); 907 } 908 } 909 } 910 911 912 void qmp_migrate_start_postcopy(Error **errp) 913 { 914 MigrationState *s = migrate_get_current(); 915 916 if (!migrate_postcopy_ram()) { 917 error_setg(errp, "Enable postcopy with migrate_set_capability before" 918 " the start of migration"); 919 return; 920 } 921 922 if (s->state == MIGRATION_STATUS_NONE) { 923 error_setg(errp, "Postcopy must be started after migration has been" 924 " started"); 925 return; 926 } 927 /* 928 * we don't error if migration has finished since that would be racy 929 * with issuing this command. 930 */ 931 atomic_set(&s->start_postcopy, true); 932 } 933 934 /* shared migration helpers */ 935 936 void migrate_set_state(int *state, int old_state, int new_state) 937 { 938 if (atomic_cmpxchg(state, old_state, new_state) == old_state) { 939 trace_migrate_set_state(new_state); 940 migrate_generate_event(new_state); 941 } 942 } 943 944 static void migrate_fd_cleanup(void *opaque) 945 { 946 MigrationState *s = opaque; 947 948 qemu_bh_delete(s->cleanup_bh); 949 s->cleanup_bh = NULL; 950 951 migration_page_queue_free(); 952 953 if (s->to_dst_file) { 954 trace_migrate_fd_cleanup(); 955 qemu_mutex_unlock_iothread(); 956 if (s->migration_thread_running) { 957 qemu_thread_join(&s->thread); 958 s->migration_thread_running = false; 959 } 960 qemu_mutex_lock_iothread(); 961 962 migrate_compress_threads_join(); 963 qemu_fclose(s->to_dst_file); 964 s->to_dst_file = NULL; 965 } 966 967 assert((s->state != MIGRATION_STATUS_ACTIVE) && 968 (s->state != MIGRATION_STATUS_POSTCOPY_ACTIVE)); 969 970 if (s->state == MIGRATION_STATUS_CANCELLING) { 971 migrate_set_state(&s->state, MIGRATION_STATUS_CANCELLING, 972 MIGRATION_STATUS_CANCELLED); 973 } 974 975 notifier_list_notify(&migration_state_notifiers, s); 976 } 977 978 void migrate_fd_error(MigrationState *s, const Error *error) 979 { 980 trace_migrate_fd_error(error_get_pretty(error)); 981 assert(s->to_dst_file == NULL); 982 migrate_set_state(&s->state, MIGRATION_STATUS_SETUP, 983 MIGRATION_STATUS_FAILED); 984 if (!s->error) { 985 s->error = error_copy(error); 986 } 987 notifier_list_notify(&migration_state_notifiers, s); 988 } 989 990 static void migrate_fd_cancel(MigrationState *s) 991 { 992 int old_state ; 993 QEMUFile *f = migrate_get_current()->to_dst_file; 994 trace_migrate_fd_cancel(); 995 996 if (s->rp_state.from_dst_file) { 997 /* shutdown the rp socket, so causing the rp thread to shutdown */ 998 qemu_file_shutdown(s->rp_state.from_dst_file); 999 } 1000 1001 do { 1002 old_state = s->state; 1003 if (!migration_is_setup_or_active(old_state)) { 1004 break; 1005 } 1006 migrate_set_state(&s->state, old_state, MIGRATION_STATUS_CANCELLING); 1007 } while (s->state != MIGRATION_STATUS_CANCELLING); 1008 1009 /* 1010 * If we're unlucky the migration code might be stuck somewhere in a 1011 * send/write while the network has failed and is waiting to timeout; 1012 * if we've got shutdown(2) available then we can force it to quit. 1013 * The outgoing qemu file gets closed in migrate_fd_cleanup that is 1014 * called in a bh, so there is no race against this cancel. 1015 */ 1016 if (s->state == MIGRATION_STATUS_CANCELLING && f) { 1017 qemu_file_shutdown(f); 1018 } 1019 if (s->state == MIGRATION_STATUS_CANCELLING && s->block_inactive) { 1020 Error *local_err = NULL; 1021 1022 bdrv_invalidate_cache_all(&local_err); 1023 if (local_err) { 1024 error_report_err(local_err); 1025 } else { 1026 s->block_inactive = false; 1027 } 1028 } 1029 } 1030 1031 void add_migration_state_change_notifier(Notifier *notify) 1032 { 1033 notifier_list_add(&migration_state_notifiers, notify); 1034 } 1035 1036 void remove_migration_state_change_notifier(Notifier *notify) 1037 { 1038 notifier_remove(notify); 1039 } 1040 1041 bool migration_in_setup(MigrationState *s) 1042 { 1043 return s->state == MIGRATION_STATUS_SETUP; 1044 } 1045 1046 bool migration_has_finished(MigrationState *s) 1047 { 1048 return s->state == MIGRATION_STATUS_COMPLETED; 1049 } 1050 1051 bool migration_has_failed(MigrationState *s) 1052 { 1053 return (s->state == MIGRATION_STATUS_CANCELLED || 1054 s->state == MIGRATION_STATUS_FAILED); 1055 } 1056 1057 bool migration_in_postcopy(void) 1058 { 1059 MigrationState *s = migrate_get_current(); 1060 1061 return (s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE); 1062 } 1063 1064 bool migration_in_postcopy_after_devices(MigrationState *s) 1065 { 1066 return migration_in_postcopy() && s->postcopy_after_devices; 1067 } 1068 1069 bool migration_is_idle(void) 1070 { 1071 MigrationState *s = migrate_get_current(); 1072 1073 switch (s->state) { 1074 case MIGRATION_STATUS_NONE: 1075 case MIGRATION_STATUS_CANCELLED: 1076 case MIGRATION_STATUS_COMPLETED: 1077 case MIGRATION_STATUS_FAILED: 1078 return true; 1079 case MIGRATION_STATUS_SETUP: 1080 case MIGRATION_STATUS_CANCELLING: 1081 case MIGRATION_STATUS_ACTIVE: 1082 case MIGRATION_STATUS_POSTCOPY_ACTIVE: 1083 case MIGRATION_STATUS_COLO: 1084 return false; 1085 case MIGRATION_STATUS__MAX: 1086 g_assert_not_reached(); 1087 } 1088 1089 return false; 1090 } 1091 1092 MigrationState *migrate_init(const MigrationParams *params) 1093 { 1094 MigrationState *s = migrate_get_current(); 1095 1096 /* 1097 * Reinitialise all migration state, except 1098 * parameters/capabilities that the user set, and 1099 * locks. 1100 */ 1101 s->bytes_xfer = 0; 1102 s->xfer_limit = 0; 1103 s->cleanup_bh = 0; 1104 s->to_dst_file = NULL; 1105 s->state = MIGRATION_STATUS_NONE; 1106 s->params = *params; 1107 s->rp_state.from_dst_file = NULL; 1108 s->rp_state.error = false; 1109 s->mbps = 0.0; 1110 s->downtime = 0; 1111 s->expected_downtime = 0; 1112 s->setup_time = 0; 1113 s->start_postcopy = false; 1114 s->postcopy_after_devices = false; 1115 s->migration_thread_running = false; 1116 error_free(s->error); 1117 s->error = NULL; 1118 1119 migrate_set_state(&s->state, MIGRATION_STATUS_NONE, MIGRATION_STATUS_SETUP); 1120 1121 s->total_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); 1122 return s; 1123 } 1124 1125 static GSList *migration_blockers; 1126 1127 int migrate_add_blocker(Error *reason, Error **errp) 1128 { 1129 if (only_migratable) { 1130 error_propagate(errp, error_copy(reason)); 1131 error_prepend(errp, "disallowing migration blocker " 1132 "(--only_migratable) for: "); 1133 return -EACCES; 1134 } 1135 1136 if (migration_is_idle()) { 1137 migration_blockers = g_slist_prepend(migration_blockers, reason); 1138 return 0; 1139 } 1140 1141 error_propagate(errp, error_copy(reason)); 1142 error_prepend(errp, "disallowing migration blocker (migration in " 1143 "progress) for: "); 1144 return -EBUSY; 1145 } 1146 1147 void migrate_del_blocker(Error *reason) 1148 { 1149 migration_blockers = g_slist_remove(migration_blockers, reason); 1150 } 1151 1152 int check_migratable(Object *obj, Error **err) 1153 { 1154 DeviceClass *dc = DEVICE_GET_CLASS(obj); 1155 if (only_migratable && dc->vmsd) { 1156 if (dc->vmsd->unmigratable) { 1157 error_setg(err, "Device %s is not migratable, but " 1158 "--only-migratable was specified", 1159 object_get_typename(obj)); 1160 return -1; 1161 } 1162 } 1163 1164 return 0; 1165 } 1166 1167 void qmp_migrate_incoming(const char *uri, Error **errp) 1168 { 1169 Error *local_err = NULL; 1170 static bool once = true; 1171 1172 if (!deferred_incoming) { 1173 error_setg(errp, "For use with '-incoming defer'"); 1174 return; 1175 } 1176 if (!once) { 1177 error_setg(errp, "The incoming migration has already been started"); 1178 } 1179 1180 qemu_start_incoming_migration(uri, &local_err); 1181 1182 if (local_err) { 1183 error_propagate(errp, local_err); 1184 return; 1185 } 1186 1187 once = false; 1188 } 1189 1190 bool migration_is_blocked(Error **errp) 1191 { 1192 if (qemu_savevm_state_blocked(errp)) { 1193 return true; 1194 } 1195 1196 if (migration_blockers) { 1197 *errp = error_copy(migration_blockers->data); 1198 return true; 1199 } 1200 1201 return false; 1202 } 1203 1204 void qmp_migrate(const char *uri, bool has_blk, bool blk, 1205 bool has_inc, bool inc, bool has_detach, bool detach, 1206 Error **errp) 1207 { 1208 Error *local_err = NULL; 1209 MigrationState *s = migrate_get_current(); 1210 MigrationParams params; 1211 const char *p; 1212 1213 params.blk = has_blk && blk; 1214 params.shared = has_inc && inc; 1215 1216 if (migration_is_setup_or_active(s->state) || 1217 s->state == MIGRATION_STATUS_CANCELLING || 1218 s->state == MIGRATION_STATUS_COLO) { 1219 error_setg(errp, QERR_MIGRATION_ACTIVE); 1220 return; 1221 } 1222 if (runstate_check(RUN_STATE_INMIGRATE)) { 1223 error_setg(errp, "Guest is waiting for an incoming migration"); 1224 return; 1225 } 1226 1227 if (migration_is_blocked(errp)) { 1228 return; 1229 } 1230 1231 s = migrate_init(¶ms); 1232 1233 if (strstart(uri, "tcp:", &p)) { 1234 tcp_start_outgoing_migration(s, p, &local_err); 1235 #ifdef CONFIG_RDMA 1236 } else if (strstart(uri, "rdma:", &p)) { 1237 rdma_start_outgoing_migration(s, p, &local_err); 1238 #endif 1239 } else if (strstart(uri, "exec:", &p)) { 1240 exec_start_outgoing_migration(s, p, &local_err); 1241 } else if (strstart(uri, "unix:", &p)) { 1242 unix_start_outgoing_migration(s, p, &local_err); 1243 } else if (strstart(uri, "fd:", &p)) { 1244 fd_start_outgoing_migration(s, p, &local_err); 1245 } else { 1246 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "uri", 1247 "a valid migration protocol"); 1248 migrate_set_state(&s->state, MIGRATION_STATUS_SETUP, 1249 MIGRATION_STATUS_FAILED); 1250 return; 1251 } 1252 1253 if (local_err) { 1254 migrate_fd_error(s, local_err); 1255 error_propagate(errp, local_err); 1256 return; 1257 } 1258 } 1259 1260 void qmp_migrate_cancel(Error **errp) 1261 { 1262 migrate_fd_cancel(migrate_get_current()); 1263 } 1264 1265 void qmp_migrate_set_cache_size(int64_t value, Error **errp) 1266 { 1267 MigrationState *s = migrate_get_current(); 1268 int64_t new_size; 1269 1270 /* Check for truncation */ 1271 if (value != (size_t)value) { 1272 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "cache size", 1273 "exceeding address space"); 1274 return; 1275 } 1276 1277 /* Cache should not be larger than guest ram size */ 1278 if (value > ram_bytes_total()) { 1279 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "cache size", 1280 "exceeds guest ram size "); 1281 return; 1282 } 1283 1284 new_size = xbzrle_cache_resize(value); 1285 if (new_size < 0) { 1286 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "cache size", 1287 "is smaller than page size"); 1288 return; 1289 } 1290 1291 s->xbzrle_cache_size = new_size; 1292 } 1293 1294 int64_t qmp_query_migrate_cache_size(Error **errp) 1295 { 1296 return migrate_xbzrle_cache_size(); 1297 } 1298 1299 void qmp_migrate_set_speed(int64_t value, Error **errp) 1300 { 1301 MigrationParameters p = { 1302 .has_max_bandwidth = true, 1303 .max_bandwidth = value, 1304 }; 1305 1306 qmp_migrate_set_parameters(&p, errp); 1307 } 1308 1309 void qmp_migrate_set_downtime(double value, Error **errp) 1310 { 1311 if (value < 0 || value > MAX_MIGRATE_DOWNTIME_SECONDS) { 1312 error_setg(errp, "Parameter 'downtime_limit' expects an integer in " 1313 "the range of 0 to %d seconds", 1314 MAX_MIGRATE_DOWNTIME_SECONDS); 1315 return; 1316 } 1317 1318 value *= 1000; /* Convert to milliseconds */ 1319 value = MAX(0, MIN(INT64_MAX, value)); 1320 1321 MigrationParameters p = { 1322 .has_downtime_limit = true, 1323 .downtime_limit = value, 1324 }; 1325 1326 qmp_migrate_set_parameters(&p, errp); 1327 } 1328 1329 bool migrate_release_ram(void) 1330 { 1331 MigrationState *s; 1332 1333 s = migrate_get_current(); 1334 1335 return s->enabled_capabilities[MIGRATION_CAPABILITY_RELEASE_RAM]; 1336 } 1337 1338 bool migrate_postcopy_ram(void) 1339 { 1340 MigrationState *s; 1341 1342 s = migrate_get_current(); 1343 1344 return s->enabled_capabilities[MIGRATION_CAPABILITY_POSTCOPY_RAM]; 1345 } 1346 1347 bool migrate_auto_converge(void) 1348 { 1349 MigrationState *s; 1350 1351 s = migrate_get_current(); 1352 1353 return s->enabled_capabilities[MIGRATION_CAPABILITY_AUTO_CONVERGE]; 1354 } 1355 1356 bool migrate_zero_blocks(void) 1357 { 1358 MigrationState *s; 1359 1360 s = migrate_get_current(); 1361 1362 return s->enabled_capabilities[MIGRATION_CAPABILITY_ZERO_BLOCKS]; 1363 } 1364 1365 bool migrate_use_compression(void) 1366 { 1367 MigrationState *s; 1368 1369 s = migrate_get_current(); 1370 1371 return s->enabled_capabilities[MIGRATION_CAPABILITY_COMPRESS]; 1372 } 1373 1374 int migrate_compress_level(void) 1375 { 1376 MigrationState *s; 1377 1378 s = migrate_get_current(); 1379 1380 return s->parameters.compress_level; 1381 } 1382 1383 int migrate_compress_threads(void) 1384 { 1385 MigrationState *s; 1386 1387 s = migrate_get_current(); 1388 1389 return s->parameters.compress_threads; 1390 } 1391 1392 int migrate_decompress_threads(void) 1393 { 1394 MigrationState *s; 1395 1396 s = migrate_get_current(); 1397 1398 return s->parameters.decompress_threads; 1399 } 1400 1401 bool migrate_use_events(void) 1402 { 1403 MigrationState *s; 1404 1405 s = migrate_get_current(); 1406 1407 return s->enabled_capabilities[MIGRATION_CAPABILITY_EVENTS]; 1408 } 1409 1410 int migrate_use_xbzrle(void) 1411 { 1412 MigrationState *s; 1413 1414 s = migrate_get_current(); 1415 1416 return s->enabled_capabilities[MIGRATION_CAPABILITY_XBZRLE]; 1417 } 1418 1419 int64_t migrate_xbzrle_cache_size(void) 1420 { 1421 MigrationState *s; 1422 1423 s = migrate_get_current(); 1424 1425 return s->xbzrle_cache_size; 1426 } 1427 1428 /* migration thread support */ 1429 /* 1430 * Something bad happened to the RP stream, mark an error 1431 * The caller shall print or trace something to indicate why 1432 */ 1433 static void mark_source_rp_bad(MigrationState *s) 1434 { 1435 s->rp_state.error = true; 1436 } 1437 1438 static struct rp_cmd_args { 1439 ssize_t len; /* -1 = variable */ 1440 const char *name; 1441 } rp_cmd_args[] = { 1442 [MIG_RP_MSG_INVALID] = { .len = -1, .name = "INVALID" }, 1443 [MIG_RP_MSG_SHUT] = { .len = 4, .name = "SHUT" }, 1444 [MIG_RP_MSG_PONG] = { .len = 4, .name = "PONG" }, 1445 [MIG_RP_MSG_REQ_PAGES] = { .len = 12, .name = "REQ_PAGES" }, 1446 [MIG_RP_MSG_REQ_PAGES_ID] = { .len = -1, .name = "REQ_PAGES_ID" }, 1447 [MIG_RP_MSG_MAX] = { .len = -1, .name = "MAX" }, 1448 }; 1449 1450 /* 1451 * Process a request for pages received on the return path, 1452 * We're allowed to send more than requested (e.g. to round to our page size) 1453 * and we don't need to send pages that have already been sent. 1454 */ 1455 static void migrate_handle_rp_req_pages(MigrationState *ms, const char* rbname, 1456 ram_addr_t start, size_t len) 1457 { 1458 long our_host_ps = getpagesize(); 1459 1460 trace_migrate_handle_rp_req_pages(rbname, start, len); 1461 1462 /* 1463 * Since we currently insist on matching page sizes, just sanity check 1464 * we're being asked for whole host pages. 1465 */ 1466 if (start & (our_host_ps-1) || 1467 (len & (our_host_ps-1))) { 1468 error_report("%s: Misaligned page request, start: " RAM_ADDR_FMT 1469 " len: %zd", __func__, start, len); 1470 mark_source_rp_bad(ms); 1471 return; 1472 } 1473 1474 if (ram_save_queue_pages(rbname, start, len)) { 1475 mark_source_rp_bad(ms); 1476 } 1477 } 1478 1479 /* 1480 * Handles messages sent on the return path towards the source VM 1481 * 1482 */ 1483 static void *source_return_path_thread(void *opaque) 1484 { 1485 MigrationState *ms = opaque; 1486 QEMUFile *rp = ms->rp_state.from_dst_file; 1487 uint16_t header_len, header_type; 1488 uint8_t buf[512]; 1489 uint32_t tmp32, sibling_error; 1490 ram_addr_t start = 0; /* =0 to silence warning */ 1491 size_t len = 0, expected_len; 1492 int res; 1493 1494 trace_source_return_path_thread_entry(); 1495 while (!ms->rp_state.error && !qemu_file_get_error(rp) && 1496 migration_is_setup_or_active(ms->state)) { 1497 trace_source_return_path_thread_loop_top(); 1498 header_type = qemu_get_be16(rp); 1499 header_len = qemu_get_be16(rp); 1500 1501 if (header_type >= MIG_RP_MSG_MAX || 1502 header_type == MIG_RP_MSG_INVALID) { 1503 error_report("RP: Received invalid message 0x%04x length 0x%04x", 1504 header_type, header_len); 1505 mark_source_rp_bad(ms); 1506 goto out; 1507 } 1508 1509 if ((rp_cmd_args[header_type].len != -1 && 1510 header_len != rp_cmd_args[header_type].len) || 1511 header_len > sizeof(buf)) { 1512 error_report("RP: Received '%s' message (0x%04x) with" 1513 "incorrect length %d expecting %zu", 1514 rp_cmd_args[header_type].name, header_type, header_len, 1515 (size_t)rp_cmd_args[header_type].len); 1516 mark_source_rp_bad(ms); 1517 goto out; 1518 } 1519 1520 /* We know we've got a valid header by this point */ 1521 res = qemu_get_buffer(rp, buf, header_len); 1522 if (res != header_len) { 1523 error_report("RP: Failed reading data for message 0x%04x" 1524 " read %d expected %d", 1525 header_type, res, header_len); 1526 mark_source_rp_bad(ms); 1527 goto out; 1528 } 1529 1530 /* OK, we have the message and the data */ 1531 switch (header_type) { 1532 case MIG_RP_MSG_SHUT: 1533 sibling_error = ldl_be_p(buf); 1534 trace_source_return_path_thread_shut(sibling_error); 1535 if (sibling_error) { 1536 error_report("RP: Sibling indicated error %d", sibling_error); 1537 mark_source_rp_bad(ms); 1538 } 1539 /* 1540 * We'll let the main thread deal with closing the RP 1541 * we could do a shutdown(2) on it, but we're the only user 1542 * anyway, so there's nothing gained. 1543 */ 1544 goto out; 1545 1546 case MIG_RP_MSG_PONG: 1547 tmp32 = ldl_be_p(buf); 1548 trace_source_return_path_thread_pong(tmp32); 1549 break; 1550 1551 case MIG_RP_MSG_REQ_PAGES: 1552 start = ldq_be_p(buf); 1553 len = ldl_be_p(buf + 8); 1554 migrate_handle_rp_req_pages(ms, NULL, start, len); 1555 break; 1556 1557 case MIG_RP_MSG_REQ_PAGES_ID: 1558 expected_len = 12 + 1; /* header + termination */ 1559 1560 if (header_len >= expected_len) { 1561 start = ldq_be_p(buf); 1562 len = ldl_be_p(buf + 8); 1563 /* Now we expect an idstr */ 1564 tmp32 = buf[12]; /* Length of the following idstr */ 1565 buf[13 + tmp32] = '\0'; 1566 expected_len += tmp32; 1567 } 1568 if (header_len != expected_len) { 1569 error_report("RP: Req_Page_id with length %d expecting %zd", 1570 header_len, expected_len); 1571 mark_source_rp_bad(ms); 1572 goto out; 1573 } 1574 migrate_handle_rp_req_pages(ms, (char *)&buf[13], start, len); 1575 break; 1576 1577 default: 1578 break; 1579 } 1580 } 1581 if (qemu_file_get_error(rp)) { 1582 trace_source_return_path_thread_bad_end(); 1583 mark_source_rp_bad(ms); 1584 } 1585 1586 trace_source_return_path_thread_end(); 1587 out: 1588 ms->rp_state.from_dst_file = NULL; 1589 qemu_fclose(rp); 1590 return NULL; 1591 } 1592 1593 static int open_return_path_on_source(MigrationState *ms) 1594 { 1595 1596 ms->rp_state.from_dst_file = qemu_file_get_return_path(ms->to_dst_file); 1597 if (!ms->rp_state.from_dst_file) { 1598 return -1; 1599 } 1600 1601 trace_open_return_path_on_source(); 1602 qemu_thread_create(&ms->rp_state.rp_thread, "return path", 1603 source_return_path_thread, ms, QEMU_THREAD_JOINABLE); 1604 1605 trace_open_return_path_on_source_continue(); 1606 1607 return 0; 1608 } 1609 1610 /* Returns 0 if the RP was ok, otherwise there was an error on the RP */ 1611 static int await_return_path_close_on_source(MigrationState *ms) 1612 { 1613 /* 1614 * If this is a normal exit then the destination will send a SHUT and the 1615 * rp_thread will exit, however if there's an error we need to cause 1616 * it to exit. 1617 */ 1618 if (qemu_file_get_error(ms->to_dst_file) && ms->rp_state.from_dst_file) { 1619 /* 1620 * shutdown(2), if we have it, will cause it to unblock if it's stuck 1621 * waiting for the destination. 1622 */ 1623 qemu_file_shutdown(ms->rp_state.from_dst_file); 1624 mark_source_rp_bad(ms); 1625 } 1626 trace_await_return_path_close_on_source_joining(); 1627 qemu_thread_join(&ms->rp_state.rp_thread); 1628 trace_await_return_path_close_on_source_close(); 1629 return ms->rp_state.error; 1630 } 1631 1632 /* 1633 * Switch from normal iteration to postcopy 1634 * Returns non-0 on error 1635 */ 1636 static int postcopy_start(MigrationState *ms, bool *old_vm_running) 1637 { 1638 int ret; 1639 QIOChannelBuffer *bioc; 1640 QEMUFile *fb; 1641 int64_t time_at_stop = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); 1642 bool restart_block = false; 1643 migrate_set_state(&ms->state, MIGRATION_STATUS_ACTIVE, 1644 MIGRATION_STATUS_POSTCOPY_ACTIVE); 1645 1646 trace_postcopy_start(); 1647 qemu_mutex_lock_iothread(); 1648 trace_postcopy_start_set_run(); 1649 1650 qemu_system_wakeup_request(QEMU_WAKEUP_REASON_OTHER); 1651 *old_vm_running = runstate_is_running(); 1652 global_state_store(); 1653 ret = vm_stop_force_state(RUN_STATE_FINISH_MIGRATE); 1654 if (ret < 0) { 1655 goto fail; 1656 } 1657 1658 ret = bdrv_inactivate_all(); 1659 if (ret < 0) { 1660 goto fail; 1661 } 1662 restart_block = true; 1663 1664 /* 1665 * Cause any non-postcopiable, but iterative devices to 1666 * send out their final data. 1667 */ 1668 qemu_savevm_state_complete_precopy(ms->to_dst_file, true); 1669 1670 /* 1671 * in Finish migrate and with the io-lock held everything should 1672 * be quiet, but we've potentially still got dirty pages and we 1673 * need to tell the destination to throw any pages it's already received 1674 * that are dirty 1675 */ 1676 if (ram_postcopy_send_discard_bitmap(ms)) { 1677 error_report("postcopy send discard bitmap failed"); 1678 goto fail; 1679 } 1680 1681 /* 1682 * send rest of state - note things that are doing postcopy 1683 * will notice we're in POSTCOPY_ACTIVE and not actually 1684 * wrap their state up here 1685 */ 1686 qemu_file_set_rate_limit(ms->to_dst_file, INT64_MAX); 1687 /* Ping just for debugging, helps line traces up */ 1688 qemu_savevm_send_ping(ms->to_dst_file, 2); 1689 1690 /* 1691 * While loading the device state we may trigger page transfer 1692 * requests and the fd must be free to process those, and thus 1693 * the destination must read the whole device state off the fd before 1694 * it starts processing it. Unfortunately the ad-hoc migration format 1695 * doesn't allow the destination to know the size to read without fully 1696 * parsing it through each devices load-state code (especially the open 1697 * coded devices that use get/put). 1698 * So we wrap the device state up in a package with a length at the start; 1699 * to do this we use a qemu_buf to hold the whole of the device state. 1700 */ 1701 bioc = qio_channel_buffer_new(4096); 1702 qio_channel_set_name(QIO_CHANNEL(bioc), "migration-postcopy-buffer"); 1703 fb = qemu_fopen_channel_output(QIO_CHANNEL(bioc)); 1704 object_unref(OBJECT(bioc)); 1705 1706 /* 1707 * Make sure the receiver can get incoming pages before we send the rest 1708 * of the state 1709 */ 1710 qemu_savevm_send_postcopy_listen(fb); 1711 1712 qemu_savevm_state_complete_precopy(fb, false); 1713 qemu_savevm_send_ping(fb, 3); 1714 1715 qemu_savevm_send_postcopy_run(fb); 1716 1717 /* <><> end of stuff going into the package */ 1718 1719 /* Last point of recovery; as soon as we send the package the destination 1720 * can open devices and potentially start running. 1721 * Lets just check again we've not got any errors. 1722 */ 1723 ret = qemu_file_get_error(ms->to_dst_file); 1724 if (ret) { 1725 error_report("postcopy_start: Migration stream errored (pre package)"); 1726 goto fail_closefb; 1727 } 1728 1729 restart_block = false; 1730 1731 /* Now send that blob */ 1732 if (qemu_savevm_send_packaged(ms->to_dst_file, bioc->data, bioc->usage)) { 1733 goto fail_closefb; 1734 } 1735 qemu_fclose(fb); 1736 1737 /* Send a notify to give a chance for anything that needs to happen 1738 * at the transition to postcopy and after the device state; in particular 1739 * spice needs to trigger a transition now 1740 */ 1741 ms->postcopy_after_devices = true; 1742 notifier_list_notify(&migration_state_notifiers, ms); 1743 1744 ms->downtime = qemu_clock_get_ms(QEMU_CLOCK_REALTIME) - time_at_stop; 1745 1746 qemu_mutex_unlock_iothread(); 1747 1748 /* 1749 * Although this ping is just for debug, it could potentially be 1750 * used for getting a better measurement of downtime at the source. 1751 */ 1752 qemu_savevm_send_ping(ms->to_dst_file, 4); 1753 1754 if (migrate_release_ram()) { 1755 ram_postcopy_migrated_memory_release(ms); 1756 } 1757 1758 ret = qemu_file_get_error(ms->to_dst_file); 1759 if (ret) { 1760 error_report("postcopy_start: Migration stream errored"); 1761 migrate_set_state(&ms->state, MIGRATION_STATUS_POSTCOPY_ACTIVE, 1762 MIGRATION_STATUS_FAILED); 1763 } 1764 1765 return ret; 1766 1767 fail_closefb: 1768 qemu_fclose(fb); 1769 fail: 1770 migrate_set_state(&ms->state, MIGRATION_STATUS_POSTCOPY_ACTIVE, 1771 MIGRATION_STATUS_FAILED); 1772 if (restart_block) { 1773 /* A failure happened early enough that we know the destination hasn't 1774 * accessed block devices, so we're safe to recover. 1775 */ 1776 Error *local_err = NULL; 1777 1778 bdrv_invalidate_cache_all(&local_err); 1779 if (local_err) { 1780 error_report_err(local_err); 1781 } 1782 } 1783 qemu_mutex_unlock_iothread(); 1784 return -1; 1785 } 1786 1787 /** 1788 * migration_completion: Used by migration_thread when there's not much left. 1789 * The caller 'breaks' the loop when this returns. 1790 * 1791 * @s: Current migration state 1792 * @current_active_state: The migration state we expect to be in 1793 * @*old_vm_running: Pointer to old_vm_running flag 1794 * @*start_time: Pointer to time to update 1795 */ 1796 static void migration_completion(MigrationState *s, int current_active_state, 1797 bool *old_vm_running, 1798 int64_t *start_time) 1799 { 1800 int ret; 1801 1802 if (s->state == MIGRATION_STATUS_ACTIVE) { 1803 qemu_mutex_lock_iothread(); 1804 *start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); 1805 qemu_system_wakeup_request(QEMU_WAKEUP_REASON_OTHER); 1806 *old_vm_running = runstate_is_running(); 1807 ret = global_state_store(); 1808 1809 if (!ret) { 1810 ret = vm_stop_force_state(RUN_STATE_FINISH_MIGRATE); 1811 /* 1812 * Don't mark the image with BDRV_O_INACTIVE flag if 1813 * we will go into COLO stage later. 1814 */ 1815 if (ret >= 0 && !migrate_colo_enabled()) { 1816 ret = bdrv_inactivate_all(); 1817 } 1818 if (ret >= 0) { 1819 qemu_file_set_rate_limit(s->to_dst_file, INT64_MAX); 1820 qemu_savevm_state_complete_precopy(s->to_dst_file, false); 1821 s->block_inactive = true; 1822 } 1823 } 1824 qemu_mutex_unlock_iothread(); 1825 1826 if (ret < 0) { 1827 goto fail; 1828 } 1829 } else if (s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE) { 1830 trace_migration_completion_postcopy_end(); 1831 1832 qemu_savevm_state_complete_postcopy(s->to_dst_file); 1833 trace_migration_completion_postcopy_end_after_complete(); 1834 } 1835 1836 /* 1837 * If rp was opened we must clean up the thread before 1838 * cleaning everything else up (since if there are no failures 1839 * it will wait for the destination to send it's status in 1840 * a SHUT command). 1841 * Postcopy opens rp if enabled (even if it's not avtivated) 1842 */ 1843 if (migrate_postcopy_ram()) { 1844 int rp_error; 1845 trace_migration_completion_postcopy_end_before_rp(); 1846 rp_error = await_return_path_close_on_source(s); 1847 trace_migration_completion_postcopy_end_after_rp(rp_error); 1848 if (rp_error) { 1849 goto fail_invalidate; 1850 } 1851 } 1852 1853 if (qemu_file_get_error(s->to_dst_file)) { 1854 trace_migration_completion_file_err(); 1855 goto fail_invalidate; 1856 } 1857 1858 if (!migrate_colo_enabled()) { 1859 migrate_set_state(&s->state, current_active_state, 1860 MIGRATION_STATUS_COMPLETED); 1861 } 1862 1863 return; 1864 1865 fail_invalidate: 1866 /* If not doing postcopy, vm_start() will be called: let's regain 1867 * control on images. 1868 */ 1869 if (s->state == MIGRATION_STATUS_ACTIVE) { 1870 Error *local_err = NULL; 1871 1872 qemu_mutex_lock_iothread(); 1873 bdrv_invalidate_cache_all(&local_err); 1874 if (local_err) { 1875 error_report_err(local_err); 1876 } else { 1877 s->block_inactive = false; 1878 } 1879 qemu_mutex_unlock_iothread(); 1880 } 1881 1882 fail: 1883 migrate_set_state(&s->state, current_active_state, 1884 MIGRATION_STATUS_FAILED); 1885 } 1886 1887 bool migrate_colo_enabled(void) 1888 { 1889 MigrationState *s = migrate_get_current(); 1890 return s->enabled_capabilities[MIGRATION_CAPABILITY_X_COLO]; 1891 } 1892 1893 /* 1894 * Master migration thread on the source VM. 1895 * It drives the migration and pumps the data down the outgoing channel. 1896 */ 1897 static void *migration_thread(void *opaque) 1898 { 1899 MigrationState *s = opaque; 1900 /* Used by the bandwidth calcs, updated later */ 1901 int64_t initial_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); 1902 int64_t setup_start = qemu_clock_get_ms(QEMU_CLOCK_HOST); 1903 int64_t initial_bytes = 0; 1904 /* 1905 * The final stage happens when the remaining data is smaller than 1906 * this threshold; it's calculated from the requested downtime and 1907 * measured bandwidth 1908 */ 1909 int64_t threshold_size = 0; 1910 int64_t start_time = initial_time; 1911 int64_t end_time; 1912 bool old_vm_running = false; 1913 bool entered_postcopy = false; 1914 /* The active state we expect to be in; ACTIVE or POSTCOPY_ACTIVE */ 1915 enum MigrationStatus current_active_state = MIGRATION_STATUS_ACTIVE; 1916 bool enable_colo = migrate_colo_enabled(); 1917 1918 rcu_register_thread(); 1919 1920 qemu_savevm_state_header(s->to_dst_file); 1921 1922 if (migrate_postcopy_ram()) { 1923 /* Now tell the dest that it should open its end so it can reply */ 1924 qemu_savevm_send_open_return_path(s->to_dst_file); 1925 1926 /* And do a ping that will make stuff easier to debug */ 1927 qemu_savevm_send_ping(s->to_dst_file, 1); 1928 1929 /* 1930 * Tell the destination that we *might* want to do postcopy later; 1931 * if the other end can't do postcopy it should fail now, nice and 1932 * early. 1933 */ 1934 qemu_savevm_send_postcopy_advise(s->to_dst_file); 1935 } 1936 1937 qemu_savevm_state_begin(s->to_dst_file, &s->params); 1938 1939 s->setup_time = qemu_clock_get_ms(QEMU_CLOCK_HOST) - setup_start; 1940 migrate_set_state(&s->state, MIGRATION_STATUS_SETUP, 1941 MIGRATION_STATUS_ACTIVE); 1942 1943 trace_migration_thread_setup_complete(); 1944 1945 while (s->state == MIGRATION_STATUS_ACTIVE || 1946 s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE) { 1947 int64_t current_time; 1948 uint64_t pending_size; 1949 1950 if (!qemu_file_rate_limit(s->to_dst_file)) { 1951 uint64_t pend_post, pend_nonpost; 1952 1953 qemu_savevm_state_pending(s->to_dst_file, threshold_size, 1954 &pend_nonpost, &pend_post); 1955 pending_size = pend_nonpost + pend_post; 1956 trace_migrate_pending(pending_size, threshold_size, 1957 pend_post, pend_nonpost); 1958 if (pending_size && pending_size >= threshold_size) { 1959 /* Still a significant amount to transfer */ 1960 1961 if (migrate_postcopy_ram() && 1962 s->state != MIGRATION_STATUS_POSTCOPY_ACTIVE && 1963 pend_nonpost <= threshold_size && 1964 atomic_read(&s->start_postcopy)) { 1965 1966 if (!postcopy_start(s, &old_vm_running)) { 1967 current_active_state = MIGRATION_STATUS_POSTCOPY_ACTIVE; 1968 entered_postcopy = true; 1969 } 1970 1971 continue; 1972 } 1973 /* Just another iteration step */ 1974 qemu_savevm_state_iterate(s->to_dst_file, entered_postcopy); 1975 } else { 1976 trace_migration_thread_low_pending(pending_size); 1977 migration_completion(s, current_active_state, 1978 &old_vm_running, &start_time); 1979 break; 1980 } 1981 } 1982 1983 if (qemu_file_get_error(s->to_dst_file)) { 1984 migrate_set_state(&s->state, current_active_state, 1985 MIGRATION_STATUS_FAILED); 1986 trace_migration_thread_file_err(); 1987 break; 1988 } 1989 current_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); 1990 if (current_time >= initial_time + BUFFER_DELAY) { 1991 uint64_t transferred_bytes = qemu_ftell(s->to_dst_file) - 1992 initial_bytes; 1993 uint64_t time_spent = current_time - initial_time; 1994 double bandwidth = (double)transferred_bytes / time_spent; 1995 threshold_size = bandwidth * s->parameters.downtime_limit; 1996 1997 s->mbps = (((double) transferred_bytes * 8.0) / 1998 ((double) time_spent / 1000.0)) / 1000.0 / 1000.0; 1999 2000 trace_migrate_transferred(transferred_bytes, time_spent, 2001 bandwidth, threshold_size); 2002 /* if we haven't sent anything, we don't want to recalculate 2003 10000 is a small enough number for our purposes */ 2004 if (ram_dirty_pages_rate() && transferred_bytes > 10000) { 2005 s->expected_downtime = ram_dirty_pages_rate() * 2006 qemu_target_page_size() / bandwidth; 2007 } 2008 2009 qemu_file_reset_rate_limit(s->to_dst_file); 2010 initial_time = current_time; 2011 initial_bytes = qemu_ftell(s->to_dst_file); 2012 } 2013 if (qemu_file_rate_limit(s->to_dst_file)) { 2014 /* usleep expects microseconds */ 2015 g_usleep((initial_time + BUFFER_DELAY - current_time)*1000); 2016 } 2017 } 2018 2019 trace_migration_thread_after_loop(); 2020 /* If we enabled cpu throttling for auto-converge, turn it off. */ 2021 cpu_throttle_stop(); 2022 end_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); 2023 2024 qemu_mutex_lock_iothread(); 2025 /* 2026 * The resource has been allocated by migration will be reused in COLO 2027 * process, so don't release them. 2028 */ 2029 if (!enable_colo) { 2030 qemu_savevm_state_cleanup(); 2031 } 2032 if (s->state == MIGRATION_STATUS_COMPLETED) { 2033 uint64_t transferred_bytes = qemu_ftell(s->to_dst_file); 2034 s->total_time = end_time - s->total_time; 2035 if (!entered_postcopy) { 2036 s->downtime = end_time - start_time; 2037 } 2038 if (s->total_time) { 2039 s->mbps = (((double) transferred_bytes * 8.0) / 2040 ((double) s->total_time)) / 1000; 2041 } 2042 runstate_set(RUN_STATE_POSTMIGRATE); 2043 } else { 2044 if (s->state == MIGRATION_STATUS_ACTIVE && enable_colo) { 2045 migrate_start_colo_process(s); 2046 qemu_savevm_state_cleanup(); 2047 /* 2048 * Fixme: we will run VM in COLO no matter its old running state. 2049 * After exited COLO, we will keep running. 2050 */ 2051 old_vm_running = true; 2052 } 2053 if (old_vm_running && !entered_postcopy) { 2054 vm_start(); 2055 } else { 2056 if (runstate_check(RUN_STATE_FINISH_MIGRATE)) { 2057 runstate_set(RUN_STATE_POSTMIGRATE); 2058 } 2059 } 2060 } 2061 qemu_bh_schedule(s->cleanup_bh); 2062 qemu_mutex_unlock_iothread(); 2063 2064 rcu_unregister_thread(); 2065 return NULL; 2066 } 2067 2068 void migrate_fd_connect(MigrationState *s) 2069 { 2070 s->expected_downtime = s->parameters.downtime_limit; 2071 s->cleanup_bh = qemu_bh_new(migrate_fd_cleanup, s); 2072 2073 qemu_file_set_blocking(s->to_dst_file, true); 2074 qemu_file_set_rate_limit(s->to_dst_file, 2075 s->parameters.max_bandwidth / XFER_LIMIT_RATIO); 2076 2077 /* Notify before starting migration thread */ 2078 notifier_list_notify(&migration_state_notifiers, s); 2079 2080 /* 2081 * Open the return path; currently for postcopy but other things might 2082 * also want it. 2083 */ 2084 if (migrate_postcopy_ram()) { 2085 if (open_return_path_on_source(s)) { 2086 error_report("Unable to open return-path for postcopy"); 2087 migrate_set_state(&s->state, MIGRATION_STATUS_SETUP, 2088 MIGRATION_STATUS_FAILED); 2089 migrate_fd_cleanup(s); 2090 return; 2091 } 2092 } 2093 2094 migrate_compress_threads_create(); 2095 qemu_thread_create(&s->thread, "live_migration", migration_thread, s, 2096 QEMU_THREAD_JOINABLE); 2097 s->migration_thread_running = true; 2098 } 2099 2100 PostcopyState postcopy_state_get(void) 2101 { 2102 return atomic_mb_read(&incoming_postcopy_state); 2103 } 2104 2105 /* Set the state and return the old state */ 2106 PostcopyState postcopy_state_set(PostcopyState new_state) 2107 { 2108 return atomic_xchg(&incoming_postcopy_state, new_state); 2109 } 2110 2111