1 /* 2 * QEMU live migration 3 * 4 * Copyright IBM, Corp. 2008 5 * 6 * Authors: 7 * Anthony Liguori <aliguori@us.ibm.com> 8 * 9 * This work is licensed under the terms of the GNU GPL, version 2. See 10 * the COPYING file in the top-level directory. 11 * 12 * Contributions after 2012-01-13 are licensed under the terms of the 13 * GNU GPL, version 2 or (at your option) any later version. 14 */ 15 16 #include "qemu/osdep.h" 17 #include "qemu/cutils.h" 18 #include "qemu/error-report.h" 19 #include "qemu/main-loop.h" 20 #include "migration/blocker.h" 21 #include "migration/migration.h" 22 #include "migration/qemu-file.h" 23 #include "sysemu/sysemu.h" 24 #include "block/block.h" 25 #include "qapi/qmp/qerror.h" 26 #include "qapi/util.h" 27 #include "qemu/sockets.h" 28 #include "qemu/rcu.h" 29 #include "migration/block.h" 30 #include "postcopy-ram.h" 31 #include "qemu/thread.h" 32 #include "qmp-commands.h" 33 #include "trace.h" 34 #include "qapi-event.h" 35 #include "qom/cpu.h" 36 #include "exec/memory.h" 37 #include "exec/address-spaces.h" 38 #include "io/channel-buffer.h" 39 #include "io/channel-tls.h" 40 #include "migration/colo.h" 41 42 #define MAX_THROTTLE (32 << 20) /* Migration transfer speed throttling */ 43 44 /* Amount of time to allocate to each "chunk" of bandwidth-throttled 45 * data. */ 46 #define BUFFER_DELAY 100 47 #define XFER_LIMIT_RATIO (1000 / BUFFER_DELAY) 48 49 /* Time in milliseconds we are allowed to stop the source, 50 * for sending the last part */ 51 #define DEFAULT_MIGRATE_SET_DOWNTIME 300 52 53 /* Maximum migrate downtime set to 2000 seconds */ 54 #define MAX_MIGRATE_DOWNTIME_SECONDS 2000 55 #define MAX_MIGRATE_DOWNTIME (MAX_MIGRATE_DOWNTIME_SECONDS * 1000) 56 57 /* Default compression thread count */ 58 #define DEFAULT_MIGRATE_COMPRESS_THREAD_COUNT 8 59 /* Default decompression thread count, usually decompression is at 60 * least 4 times as fast as compression.*/ 61 #define DEFAULT_MIGRATE_DECOMPRESS_THREAD_COUNT 2 62 /*0: means nocompress, 1: best speed, ... 9: best compress ratio */ 63 #define DEFAULT_MIGRATE_COMPRESS_LEVEL 1 64 /* Define default autoconverge cpu throttle migration parameters */ 65 #define DEFAULT_MIGRATE_CPU_THROTTLE_INITIAL 20 66 #define DEFAULT_MIGRATE_CPU_THROTTLE_INCREMENT 10 67 68 /* Migration XBZRLE default cache size */ 69 #define DEFAULT_MIGRATE_CACHE_SIZE (64 * 1024 * 1024) 70 71 /* The delay time (in ms) between two COLO checkpoints 72 * Note: Please change this default value to 10000 when we support hybrid mode. 73 */ 74 #define DEFAULT_MIGRATE_X_CHECKPOINT_DELAY 200 75 76 static NotifierList migration_state_notifiers = 77 NOTIFIER_LIST_INITIALIZER(migration_state_notifiers); 78 79 static bool deferred_incoming; 80 81 /* When we add fault tolerance, we could have several 82 migrations at once. For now we don't need to add 83 dynamic creation of migration */ 84 85 /* For outgoing */ 86 MigrationState *migrate_get_current(void) 87 { 88 static bool once; 89 static MigrationState current_migration = { 90 .state = MIGRATION_STATUS_NONE, 91 .xbzrle_cache_size = DEFAULT_MIGRATE_CACHE_SIZE, 92 .mbps = -1, 93 .parameters = { 94 .compress_level = DEFAULT_MIGRATE_COMPRESS_LEVEL, 95 .compress_threads = DEFAULT_MIGRATE_COMPRESS_THREAD_COUNT, 96 .decompress_threads = DEFAULT_MIGRATE_DECOMPRESS_THREAD_COUNT, 97 .cpu_throttle_initial = DEFAULT_MIGRATE_CPU_THROTTLE_INITIAL, 98 .cpu_throttle_increment = DEFAULT_MIGRATE_CPU_THROTTLE_INCREMENT, 99 .max_bandwidth = MAX_THROTTLE, 100 .downtime_limit = DEFAULT_MIGRATE_SET_DOWNTIME, 101 .x_checkpoint_delay = DEFAULT_MIGRATE_X_CHECKPOINT_DELAY, 102 }, 103 }; 104 105 if (!once) { 106 current_migration.parameters.tls_creds = g_strdup(""); 107 current_migration.parameters.tls_hostname = g_strdup(""); 108 once = true; 109 } 110 return ¤t_migration; 111 } 112 113 MigrationIncomingState *migration_incoming_get_current(void) 114 { 115 static bool once; 116 static MigrationIncomingState mis_current; 117 118 if (!once) { 119 mis_current.state = MIGRATION_STATUS_NONE; 120 memset(&mis_current, 0, sizeof(MigrationIncomingState)); 121 QLIST_INIT(&mis_current.loadvm_handlers); 122 qemu_mutex_init(&mis_current.rp_mutex); 123 qemu_event_init(&mis_current.main_thread_load_event, false); 124 once = true; 125 } 126 return &mis_current; 127 } 128 129 void migration_incoming_state_destroy(void) 130 { 131 struct MigrationIncomingState *mis = migration_incoming_get_current(); 132 133 qemu_event_destroy(&mis->main_thread_load_event); 134 loadvm_free_handlers(mis); 135 } 136 137 138 typedef struct { 139 bool optional; 140 uint32_t size; 141 uint8_t runstate[100]; 142 RunState state; 143 bool received; 144 } GlobalState; 145 146 static GlobalState global_state; 147 148 int global_state_store(void) 149 { 150 if (!runstate_store((char *)global_state.runstate, 151 sizeof(global_state.runstate))) { 152 error_report("runstate name too big: %s", global_state.runstate); 153 trace_migrate_state_too_big(); 154 return -EINVAL; 155 } 156 return 0; 157 } 158 159 void global_state_store_running(void) 160 { 161 const char *state = RunState_lookup[RUN_STATE_RUNNING]; 162 strncpy((char *)global_state.runstate, 163 state, sizeof(global_state.runstate)); 164 } 165 166 static bool global_state_received(void) 167 { 168 return global_state.received; 169 } 170 171 static RunState global_state_get_runstate(void) 172 { 173 return global_state.state; 174 } 175 176 void global_state_set_optional(void) 177 { 178 global_state.optional = true; 179 } 180 181 static bool global_state_needed(void *opaque) 182 { 183 GlobalState *s = opaque; 184 char *runstate = (char *)s->runstate; 185 186 /* If it is not optional, it is mandatory */ 187 188 if (s->optional == false) { 189 return true; 190 } 191 192 /* If state is running or paused, it is not needed */ 193 194 if (strcmp(runstate, "running") == 0 || 195 strcmp(runstate, "paused") == 0) { 196 return false; 197 } 198 199 /* for any other state it is needed */ 200 return true; 201 } 202 203 static int global_state_post_load(void *opaque, int version_id) 204 { 205 GlobalState *s = opaque; 206 Error *local_err = NULL; 207 int r; 208 char *runstate = (char *)s->runstate; 209 210 s->received = true; 211 trace_migrate_global_state_post_load(runstate); 212 213 r = qapi_enum_parse(RunState_lookup, runstate, RUN_STATE__MAX, 214 -1, &local_err); 215 216 if (r == -1) { 217 if (local_err) { 218 error_report_err(local_err); 219 } 220 return -EINVAL; 221 } 222 s->state = r; 223 224 return 0; 225 } 226 227 static void global_state_pre_save(void *opaque) 228 { 229 GlobalState *s = opaque; 230 231 trace_migrate_global_state_pre_save((char *)s->runstate); 232 s->size = strlen((char *)s->runstate) + 1; 233 } 234 235 static const VMStateDescription vmstate_globalstate = { 236 .name = "globalstate", 237 .version_id = 1, 238 .minimum_version_id = 1, 239 .post_load = global_state_post_load, 240 .pre_save = global_state_pre_save, 241 .needed = global_state_needed, 242 .fields = (VMStateField[]) { 243 VMSTATE_UINT32(size, GlobalState), 244 VMSTATE_BUFFER(runstate, GlobalState), 245 VMSTATE_END_OF_LIST() 246 }, 247 }; 248 249 void register_global_state(void) 250 { 251 /* We would use it independently that we receive it */ 252 strcpy((char *)&global_state.runstate, ""); 253 global_state.received = false; 254 vmstate_register(NULL, 0, &vmstate_globalstate, &global_state); 255 } 256 257 static void migrate_generate_event(int new_state) 258 { 259 if (migrate_use_events()) { 260 qapi_event_send_migration(new_state, &error_abort); 261 } 262 } 263 264 /* 265 * Called on -incoming with a defer: uri. 266 * The migration can be started later after any parameters have been 267 * changed. 268 */ 269 static void deferred_incoming_migration(Error **errp) 270 { 271 if (deferred_incoming) { 272 error_setg(errp, "Incoming migration already deferred"); 273 } 274 deferred_incoming = true; 275 } 276 277 /* Request a range of pages from the source VM at the given 278 * start address. 279 * rbname: Name of the RAMBlock to request the page in, if NULL it's the same 280 * as the last request (a name must have been given previously) 281 * Start: Address offset within the RB 282 * Len: Length in bytes required - must be a multiple of pagesize 283 */ 284 void migrate_send_rp_req_pages(MigrationIncomingState *mis, const char *rbname, 285 ram_addr_t start, size_t len) 286 { 287 uint8_t bufc[12 + 1 + 255]; /* start (8), len (4), rbname up to 256 */ 288 size_t msglen = 12; /* start + len */ 289 290 *(uint64_t *)bufc = cpu_to_be64((uint64_t)start); 291 *(uint32_t *)(bufc + 8) = cpu_to_be32((uint32_t)len); 292 293 if (rbname) { 294 int rbname_len = strlen(rbname); 295 assert(rbname_len < 256); 296 297 bufc[msglen++] = rbname_len; 298 memcpy(bufc + msglen, rbname, rbname_len); 299 msglen += rbname_len; 300 migrate_send_rp_message(mis, MIG_RP_MSG_REQ_PAGES_ID, msglen, bufc); 301 } else { 302 migrate_send_rp_message(mis, MIG_RP_MSG_REQ_PAGES, msglen, bufc); 303 } 304 } 305 306 void qemu_start_incoming_migration(const char *uri, Error **errp) 307 { 308 const char *p; 309 310 qapi_event_send_migration(MIGRATION_STATUS_SETUP, &error_abort); 311 if (!strcmp(uri, "defer")) { 312 deferred_incoming_migration(errp); 313 } else if (strstart(uri, "tcp:", &p)) { 314 tcp_start_incoming_migration(p, errp); 315 #ifdef CONFIG_RDMA 316 } else if (strstart(uri, "rdma:", &p)) { 317 rdma_start_incoming_migration(p, errp); 318 #endif 319 } else if (strstart(uri, "exec:", &p)) { 320 exec_start_incoming_migration(p, errp); 321 } else if (strstart(uri, "unix:", &p)) { 322 unix_start_incoming_migration(p, errp); 323 } else if (strstart(uri, "fd:", &p)) { 324 fd_start_incoming_migration(p, errp); 325 } else { 326 error_setg(errp, "unknown migration protocol: %s", uri); 327 } 328 } 329 330 static void process_incoming_migration_bh(void *opaque) 331 { 332 Error *local_err = NULL; 333 MigrationIncomingState *mis = opaque; 334 335 /* Make sure all file formats flush their mutable metadata. 336 * If we get an error here, just don't restart the VM yet. */ 337 bdrv_invalidate_cache_all(&local_err); 338 if (local_err) { 339 error_report_err(local_err); 340 local_err = NULL; 341 autostart = false; 342 } 343 344 /* 345 * This must happen after all error conditions are dealt with and 346 * we're sure the VM is going to be running on this host. 347 */ 348 qemu_announce_self(); 349 350 /* If global state section was not received or we are in running 351 state, we need to obey autostart. Any other state is set with 352 runstate_set. */ 353 354 if (!global_state_received() || 355 global_state_get_runstate() == RUN_STATE_RUNNING) { 356 if (autostart) { 357 vm_start(); 358 } else { 359 runstate_set(RUN_STATE_PAUSED); 360 } 361 } else { 362 runstate_set(global_state_get_runstate()); 363 } 364 migrate_decompress_threads_join(); 365 /* 366 * This must happen after any state changes since as soon as an external 367 * observer sees this event they might start to prod at the VM assuming 368 * it's ready to use. 369 */ 370 migrate_set_state(&mis->state, MIGRATION_STATUS_ACTIVE, 371 MIGRATION_STATUS_COMPLETED); 372 qemu_bh_delete(mis->bh); 373 migration_incoming_state_destroy(); 374 } 375 376 static void process_incoming_migration_co(void *opaque) 377 { 378 QEMUFile *f = opaque; 379 MigrationIncomingState *mis = migration_incoming_get_current(); 380 PostcopyState ps; 381 int ret; 382 383 mis->from_src_file = f; 384 mis->largest_page_size = qemu_ram_pagesize_largest(); 385 postcopy_state_set(POSTCOPY_INCOMING_NONE); 386 migrate_set_state(&mis->state, MIGRATION_STATUS_NONE, 387 MIGRATION_STATUS_ACTIVE); 388 ret = qemu_loadvm_state(f); 389 390 ps = postcopy_state_get(); 391 trace_process_incoming_migration_co_end(ret, ps); 392 if (ps != POSTCOPY_INCOMING_NONE) { 393 if (ps == POSTCOPY_INCOMING_ADVISE) { 394 /* 395 * Where a migration had postcopy enabled (and thus went to advise) 396 * but managed to complete within the precopy period, we can use 397 * the normal exit. 398 */ 399 postcopy_ram_incoming_cleanup(mis); 400 } else if (ret >= 0) { 401 /* 402 * Postcopy was started, cleanup should happen at the end of the 403 * postcopy thread. 404 */ 405 trace_process_incoming_migration_co_postcopy_end_main(); 406 return; 407 } 408 /* Else if something went wrong then just fall out of the normal exit */ 409 } 410 411 /* we get COLO info, and know if we are in COLO mode */ 412 if (!ret && migration_incoming_enable_colo()) { 413 mis->migration_incoming_co = qemu_coroutine_self(); 414 qemu_thread_create(&mis->colo_incoming_thread, "COLO incoming", 415 colo_process_incoming_thread, mis, QEMU_THREAD_JOINABLE); 416 mis->have_colo_incoming_thread = true; 417 qemu_coroutine_yield(); 418 419 /* Wait checkpoint incoming thread exit before free resource */ 420 qemu_thread_join(&mis->colo_incoming_thread); 421 } 422 423 if (ret < 0) { 424 migrate_set_state(&mis->state, MIGRATION_STATUS_ACTIVE, 425 MIGRATION_STATUS_FAILED); 426 error_report("load of migration failed: %s", strerror(-ret)); 427 migrate_decompress_threads_join(); 428 exit(EXIT_FAILURE); 429 } 430 431 qemu_fclose(f); 432 free_xbzrle_decoded_buf(); 433 434 mis->bh = qemu_bh_new(process_incoming_migration_bh, mis); 435 qemu_bh_schedule(mis->bh); 436 } 437 438 void migration_fd_process_incoming(QEMUFile *f) 439 { 440 Coroutine *co = qemu_coroutine_create(process_incoming_migration_co, f); 441 442 migrate_decompress_threads_create(); 443 qemu_file_set_blocking(f, false); 444 qemu_coroutine_enter(co); 445 } 446 447 448 void migration_channel_process_incoming(MigrationState *s, 449 QIOChannel *ioc) 450 { 451 trace_migration_set_incoming_channel( 452 ioc, object_get_typename(OBJECT(ioc))); 453 454 if (s->parameters.tls_creds && 455 *s->parameters.tls_creds && 456 !object_dynamic_cast(OBJECT(ioc), 457 TYPE_QIO_CHANNEL_TLS)) { 458 Error *local_err = NULL; 459 migration_tls_channel_process_incoming(s, ioc, &local_err); 460 if (local_err) { 461 error_report_err(local_err); 462 } 463 } else { 464 QEMUFile *f = qemu_fopen_channel_input(ioc); 465 migration_fd_process_incoming(f); 466 } 467 } 468 469 470 void migration_channel_connect(MigrationState *s, 471 QIOChannel *ioc, 472 const char *hostname) 473 { 474 trace_migration_set_outgoing_channel( 475 ioc, object_get_typename(OBJECT(ioc)), hostname); 476 477 if (s->parameters.tls_creds && 478 *s->parameters.tls_creds && 479 !object_dynamic_cast(OBJECT(ioc), 480 TYPE_QIO_CHANNEL_TLS)) { 481 Error *local_err = NULL; 482 migration_tls_channel_connect(s, ioc, hostname, &local_err); 483 if (local_err) { 484 migrate_fd_error(s, local_err); 485 error_free(local_err); 486 } 487 } else { 488 QEMUFile *f = qemu_fopen_channel_output(ioc); 489 490 s->to_dst_file = f; 491 492 migrate_fd_connect(s); 493 } 494 } 495 496 497 /* 498 * Send a message on the return channel back to the source 499 * of the migration. 500 */ 501 void migrate_send_rp_message(MigrationIncomingState *mis, 502 enum mig_rp_message_type message_type, 503 uint16_t len, void *data) 504 { 505 trace_migrate_send_rp_message((int)message_type, len); 506 qemu_mutex_lock(&mis->rp_mutex); 507 qemu_put_be16(mis->to_src_file, (unsigned int)message_type); 508 qemu_put_be16(mis->to_src_file, len); 509 qemu_put_buffer(mis->to_src_file, data, len); 510 qemu_fflush(mis->to_src_file); 511 qemu_mutex_unlock(&mis->rp_mutex); 512 } 513 514 /* 515 * Send a 'SHUT' message on the return channel with the given value 516 * to indicate that we've finished with the RP. Non-0 value indicates 517 * error. 518 */ 519 void migrate_send_rp_shut(MigrationIncomingState *mis, 520 uint32_t value) 521 { 522 uint32_t buf; 523 524 buf = cpu_to_be32(value); 525 migrate_send_rp_message(mis, MIG_RP_MSG_SHUT, sizeof(buf), &buf); 526 } 527 528 /* 529 * Send a 'PONG' message on the return channel with the given value 530 * (normally in response to a 'PING') 531 */ 532 void migrate_send_rp_pong(MigrationIncomingState *mis, 533 uint32_t value) 534 { 535 uint32_t buf; 536 537 buf = cpu_to_be32(value); 538 migrate_send_rp_message(mis, MIG_RP_MSG_PONG, sizeof(buf), &buf); 539 } 540 541 MigrationCapabilityStatusList *qmp_query_migrate_capabilities(Error **errp) 542 { 543 MigrationCapabilityStatusList *head = NULL; 544 MigrationCapabilityStatusList *caps; 545 MigrationState *s = migrate_get_current(); 546 int i; 547 548 caps = NULL; /* silence compiler warning */ 549 for (i = 0; i < MIGRATION_CAPABILITY__MAX; i++) { 550 if (i == MIGRATION_CAPABILITY_X_COLO && !colo_supported()) { 551 continue; 552 } 553 if (head == NULL) { 554 head = g_malloc0(sizeof(*caps)); 555 caps = head; 556 } else { 557 caps->next = g_malloc0(sizeof(*caps)); 558 caps = caps->next; 559 } 560 caps->value = 561 g_malloc(sizeof(*caps->value)); 562 caps->value->capability = i; 563 caps->value->state = s->enabled_capabilities[i]; 564 } 565 566 return head; 567 } 568 569 MigrationParameters *qmp_query_migrate_parameters(Error **errp) 570 { 571 MigrationParameters *params; 572 MigrationState *s = migrate_get_current(); 573 574 params = g_malloc0(sizeof(*params)); 575 params->has_compress_level = true; 576 params->compress_level = s->parameters.compress_level; 577 params->has_compress_threads = true; 578 params->compress_threads = s->parameters.compress_threads; 579 params->has_decompress_threads = true; 580 params->decompress_threads = s->parameters.decompress_threads; 581 params->has_cpu_throttle_initial = true; 582 params->cpu_throttle_initial = s->parameters.cpu_throttle_initial; 583 params->has_cpu_throttle_increment = true; 584 params->cpu_throttle_increment = s->parameters.cpu_throttle_increment; 585 params->has_tls_creds = !!s->parameters.tls_creds; 586 params->tls_creds = g_strdup(s->parameters.tls_creds); 587 params->has_tls_hostname = !!s->parameters.tls_hostname; 588 params->tls_hostname = g_strdup(s->parameters.tls_hostname); 589 params->has_max_bandwidth = true; 590 params->max_bandwidth = s->parameters.max_bandwidth; 591 params->has_downtime_limit = true; 592 params->downtime_limit = s->parameters.downtime_limit; 593 params->has_x_checkpoint_delay = true; 594 params->x_checkpoint_delay = s->parameters.x_checkpoint_delay; 595 596 return params; 597 } 598 599 /* 600 * Return true if we're already in the middle of a migration 601 * (i.e. any of the active or setup states) 602 */ 603 static bool migration_is_setup_or_active(int state) 604 { 605 switch (state) { 606 case MIGRATION_STATUS_ACTIVE: 607 case MIGRATION_STATUS_POSTCOPY_ACTIVE: 608 case MIGRATION_STATUS_SETUP: 609 return true; 610 611 default: 612 return false; 613 614 } 615 } 616 617 static void get_xbzrle_cache_stats(MigrationInfo *info) 618 { 619 if (migrate_use_xbzrle()) { 620 info->has_xbzrle_cache = true; 621 info->xbzrle_cache = g_malloc0(sizeof(*info->xbzrle_cache)); 622 info->xbzrle_cache->cache_size = migrate_xbzrle_cache_size(); 623 info->xbzrle_cache->bytes = xbzrle_mig_bytes_transferred(); 624 info->xbzrle_cache->pages = xbzrle_mig_pages_transferred(); 625 info->xbzrle_cache->cache_miss = xbzrle_mig_pages_cache_miss(); 626 info->xbzrle_cache->cache_miss_rate = xbzrle_mig_cache_miss_rate(); 627 info->xbzrle_cache->overflow = xbzrle_mig_pages_overflow(); 628 } 629 } 630 631 static void populate_ram_info(MigrationInfo *info, MigrationState *s) 632 { 633 info->has_ram = true; 634 info->ram = g_malloc0(sizeof(*info->ram)); 635 info->ram->transferred = ram_bytes_transferred(); 636 info->ram->total = ram_bytes_total(); 637 info->ram->duplicate = dup_mig_pages_transferred(); 638 /* legacy value. It is not used anymore */ 639 info->ram->skipped = 0; 640 info->ram->normal = norm_mig_pages_transferred(); 641 info->ram->normal_bytes = norm_mig_pages_transferred() * 642 qemu_target_page_size(); 643 info->ram->mbps = s->mbps; 644 info->ram->dirty_sync_count = ram_dirty_sync_count(); 645 info->ram->postcopy_requests = ram_postcopy_requests(); 646 info->ram->page_size = qemu_target_page_size(); 647 648 if (s->state != MIGRATION_STATUS_COMPLETED) { 649 info->ram->remaining = ram_bytes_remaining(); 650 info->ram->dirty_pages_rate = ram_dirty_pages_rate(); 651 } 652 } 653 654 MigrationInfo *qmp_query_migrate(Error **errp) 655 { 656 MigrationInfo *info = g_malloc0(sizeof(*info)); 657 MigrationState *s = migrate_get_current(); 658 659 switch (s->state) { 660 case MIGRATION_STATUS_NONE: 661 /* no migration has happened ever */ 662 break; 663 case MIGRATION_STATUS_SETUP: 664 info->has_status = true; 665 info->has_total_time = false; 666 break; 667 case MIGRATION_STATUS_ACTIVE: 668 case MIGRATION_STATUS_CANCELLING: 669 info->has_status = true; 670 info->has_total_time = true; 671 info->total_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME) 672 - s->total_time; 673 info->has_expected_downtime = true; 674 info->expected_downtime = s->expected_downtime; 675 info->has_setup_time = true; 676 info->setup_time = s->setup_time; 677 678 populate_ram_info(info, s); 679 680 if (blk_mig_active()) { 681 info->has_disk = true; 682 info->disk = g_malloc0(sizeof(*info->disk)); 683 info->disk->transferred = blk_mig_bytes_transferred(); 684 info->disk->remaining = blk_mig_bytes_remaining(); 685 info->disk->total = blk_mig_bytes_total(); 686 } 687 688 if (cpu_throttle_active()) { 689 info->has_cpu_throttle_percentage = true; 690 info->cpu_throttle_percentage = cpu_throttle_get_percentage(); 691 } 692 693 get_xbzrle_cache_stats(info); 694 break; 695 case MIGRATION_STATUS_POSTCOPY_ACTIVE: 696 /* Mostly the same as active; TODO add some postcopy stats */ 697 info->has_status = true; 698 info->has_total_time = true; 699 info->total_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME) 700 - s->total_time; 701 info->has_expected_downtime = true; 702 info->expected_downtime = s->expected_downtime; 703 info->has_setup_time = true; 704 info->setup_time = s->setup_time; 705 706 populate_ram_info(info, s); 707 708 if (blk_mig_active()) { 709 info->has_disk = true; 710 info->disk = g_malloc0(sizeof(*info->disk)); 711 info->disk->transferred = blk_mig_bytes_transferred(); 712 info->disk->remaining = blk_mig_bytes_remaining(); 713 info->disk->total = blk_mig_bytes_total(); 714 } 715 716 get_xbzrle_cache_stats(info); 717 break; 718 case MIGRATION_STATUS_COLO: 719 info->has_status = true; 720 /* TODO: display COLO specific information (checkpoint info etc.) */ 721 break; 722 case MIGRATION_STATUS_COMPLETED: 723 get_xbzrle_cache_stats(info); 724 725 info->has_status = true; 726 info->has_total_time = true; 727 info->total_time = s->total_time; 728 info->has_downtime = true; 729 info->downtime = s->downtime; 730 info->has_setup_time = true; 731 info->setup_time = s->setup_time; 732 733 populate_ram_info(info, s); 734 break; 735 case MIGRATION_STATUS_FAILED: 736 info->has_status = true; 737 if (s->error) { 738 info->has_error_desc = true; 739 info->error_desc = g_strdup(error_get_pretty(s->error)); 740 } 741 break; 742 case MIGRATION_STATUS_CANCELLED: 743 info->has_status = true; 744 break; 745 } 746 info->status = s->state; 747 748 return info; 749 } 750 751 void qmp_migrate_set_capabilities(MigrationCapabilityStatusList *params, 752 Error **errp) 753 { 754 MigrationState *s = migrate_get_current(); 755 MigrationCapabilityStatusList *cap; 756 bool old_postcopy_cap = migrate_postcopy_ram(); 757 758 if (migration_is_setup_or_active(s->state)) { 759 error_setg(errp, QERR_MIGRATION_ACTIVE); 760 return; 761 } 762 763 for (cap = params; cap; cap = cap->next) { 764 if (cap->value->capability == MIGRATION_CAPABILITY_X_COLO) { 765 if (!colo_supported()) { 766 error_setg(errp, "COLO is not currently supported, please" 767 " configure with --enable-colo option in order to" 768 " support COLO feature"); 769 continue; 770 } 771 } 772 s->enabled_capabilities[cap->value->capability] = cap->value->state; 773 } 774 775 if (migrate_postcopy_ram()) { 776 if (migrate_use_compression()) { 777 /* The decompression threads asynchronously write into RAM 778 * rather than use the atomic copies needed to avoid 779 * userfaulting. It should be possible to fix the decompression 780 * threads for compatibility in future. 781 */ 782 error_report("Postcopy is not currently compatible with " 783 "compression"); 784 s->enabled_capabilities[MIGRATION_CAPABILITY_POSTCOPY_RAM] = 785 false; 786 } 787 /* This check is reasonably expensive, so only when it's being 788 * set the first time, also it's only the destination that needs 789 * special support. 790 */ 791 if (!old_postcopy_cap && runstate_check(RUN_STATE_INMIGRATE) && 792 !postcopy_ram_supported_by_host()) { 793 /* postcopy_ram_supported_by_host will have emitted a more 794 * detailed message 795 */ 796 error_report("Postcopy is not supported"); 797 s->enabled_capabilities[MIGRATION_CAPABILITY_POSTCOPY_RAM] = 798 false; 799 } 800 } 801 } 802 803 void qmp_migrate_set_parameters(MigrationParameters *params, Error **errp) 804 { 805 MigrationState *s = migrate_get_current(); 806 807 if (params->has_compress_level && 808 (params->compress_level < 0 || params->compress_level > 9)) { 809 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "compress_level", 810 "is invalid, it should be in the range of 0 to 9"); 811 return; 812 } 813 if (params->has_compress_threads && 814 (params->compress_threads < 1 || params->compress_threads > 255)) { 815 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, 816 "compress_threads", 817 "is invalid, it should be in the range of 1 to 255"); 818 return; 819 } 820 if (params->has_decompress_threads && 821 (params->decompress_threads < 1 || params->decompress_threads > 255)) { 822 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, 823 "decompress_threads", 824 "is invalid, it should be in the range of 1 to 255"); 825 return; 826 } 827 if (params->has_cpu_throttle_initial && 828 (params->cpu_throttle_initial < 1 || 829 params->cpu_throttle_initial > 99)) { 830 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, 831 "cpu_throttle_initial", 832 "an integer in the range of 1 to 99"); 833 return; 834 } 835 if (params->has_cpu_throttle_increment && 836 (params->cpu_throttle_increment < 1 || 837 params->cpu_throttle_increment > 99)) { 838 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, 839 "cpu_throttle_increment", 840 "an integer in the range of 1 to 99"); 841 return; 842 } 843 if (params->has_max_bandwidth && 844 (params->max_bandwidth < 0 || params->max_bandwidth > SIZE_MAX)) { 845 error_setg(errp, "Parameter 'max_bandwidth' expects an integer in the" 846 " range of 0 to %zu bytes/second", SIZE_MAX); 847 return; 848 } 849 if (params->has_downtime_limit && 850 (params->downtime_limit < 0 || 851 params->downtime_limit > MAX_MIGRATE_DOWNTIME)) { 852 error_setg(errp, "Parameter 'downtime_limit' expects an integer in " 853 "the range of 0 to %d milliseconds", 854 MAX_MIGRATE_DOWNTIME); 855 return; 856 } 857 if (params->has_x_checkpoint_delay && (params->x_checkpoint_delay < 0)) { 858 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, 859 "x_checkpoint_delay", 860 "is invalid, it should be positive"); 861 } 862 863 if (params->has_compress_level) { 864 s->parameters.compress_level = params->compress_level; 865 } 866 if (params->has_compress_threads) { 867 s->parameters.compress_threads = params->compress_threads; 868 } 869 if (params->has_decompress_threads) { 870 s->parameters.decompress_threads = params->decompress_threads; 871 } 872 if (params->has_cpu_throttle_initial) { 873 s->parameters.cpu_throttle_initial = params->cpu_throttle_initial; 874 } 875 if (params->has_cpu_throttle_increment) { 876 s->parameters.cpu_throttle_increment = params->cpu_throttle_increment; 877 } 878 if (params->has_tls_creds) { 879 g_free(s->parameters.tls_creds); 880 s->parameters.tls_creds = g_strdup(params->tls_creds); 881 } 882 if (params->has_tls_hostname) { 883 g_free(s->parameters.tls_hostname); 884 s->parameters.tls_hostname = g_strdup(params->tls_hostname); 885 } 886 if (params->has_max_bandwidth) { 887 s->parameters.max_bandwidth = params->max_bandwidth; 888 if (s->to_dst_file) { 889 qemu_file_set_rate_limit(s->to_dst_file, 890 s->parameters.max_bandwidth / XFER_LIMIT_RATIO); 891 } 892 } 893 if (params->has_downtime_limit) { 894 s->parameters.downtime_limit = params->downtime_limit; 895 } 896 897 if (params->has_x_checkpoint_delay) { 898 s->parameters.x_checkpoint_delay = params->x_checkpoint_delay; 899 if (migration_in_colo_state()) { 900 colo_checkpoint_notify(s); 901 } 902 } 903 } 904 905 906 void qmp_migrate_start_postcopy(Error **errp) 907 { 908 MigrationState *s = migrate_get_current(); 909 910 if (!migrate_postcopy_ram()) { 911 error_setg(errp, "Enable postcopy with migrate_set_capability before" 912 " the start of migration"); 913 return; 914 } 915 916 if (s->state == MIGRATION_STATUS_NONE) { 917 error_setg(errp, "Postcopy must be started after migration has been" 918 " started"); 919 return; 920 } 921 /* 922 * we don't error if migration has finished since that would be racy 923 * with issuing this command. 924 */ 925 atomic_set(&s->start_postcopy, true); 926 } 927 928 /* shared migration helpers */ 929 930 void migrate_set_state(int *state, int old_state, int new_state) 931 { 932 if (atomic_cmpxchg(state, old_state, new_state) == old_state) { 933 trace_migrate_set_state(new_state); 934 migrate_generate_event(new_state); 935 } 936 } 937 938 static void migrate_fd_cleanup(void *opaque) 939 { 940 MigrationState *s = opaque; 941 942 qemu_bh_delete(s->cleanup_bh); 943 s->cleanup_bh = NULL; 944 945 migration_page_queue_free(); 946 947 if (s->to_dst_file) { 948 trace_migrate_fd_cleanup(); 949 qemu_mutex_unlock_iothread(); 950 if (s->migration_thread_running) { 951 qemu_thread_join(&s->thread); 952 s->migration_thread_running = false; 953 } 954 qemu_mutex_lock_iothread(); 955 956 migrate_compress_threads_join(); 957 qemu_fclose(s->to_dst_file); 958 s->to_dst_file = NULL; 959 } 960 961 assert((s->state != MIGRATION_STATUS_ACTIVE) && 962 (s->state != MIGRATION_STATUS_POSTCOPY_ACTIVE)); 963 964 if (s->state == MIGRATION_STATUS_CANCELLING) { 965 migrate_set_state(&s->state, MIGRATION_STATUS_CANCELLING, 966 MIGRATION_STATUS_CANCELLED); 967 } 968 969 notifier_list_notify(&migration_state_notifiers, s); 970 } 971 972 void migrate_fd_error(MigrationState *s, const Error *error) 973 { 974 trace_migrate_fd_error(error_get_pretty(error)); 975 assert(s->to_dst_file == NULL); 976 migrate_set_state(&s->state, MIGRATION_STATUS_SETUP, 977 MIGRATION_STATUS_FAILED); 978 if (!s->error) { 979 s->error = error_copy(error); 980 } 981 notifier_list_notify(&migration_state_notifiers, s); 982 } 983 984 static void migrate_fd_cancel(MigrationState *s) 985 { 986 int old_state ; 987 QEMUFile *f = migrate_get_current()->to_dst_file; 988 trace_migrate_fd_cancel(); 989 990 if (s->rp_state.from_dst_file) { 991 /* shutdown the rp socket, so causing the rp thread to shutdown */ 992 qemu_file_shutdown(s->rp_state.from_dst_file); 993 } 994 995 do { 996 old_state = s->state; 997 if (!migration_is_setup_or_active(old_state)) { 998 break; 999 } 1000 migrate_set_state(&s->state, old_state, MIGRATION_STATUS_CANCELLING); 1001 } while (s->state != MIGRATION_STATUS_CANCELLING); 1002 1003 /* 1004 * If we're unlucky the migration code might be stuck somewhere in a 1005 * send/write while the network has failed and is waiting to timeout; 1006 * if we've got shutdown(2) available then we can force it to quit. 1007 * The outgoing qemu file gets closed in migrate_fd_cleanup that is 1008 * called in a bh, so there is no race against this cancel. 1009 */ 1010 if (s->state == MIGRATION_STATUS_CANCELLING && f) { 1011 qemu_file_shutdown(f); 1012 } 1013 if (s->state == MIGRATION_STATUS_CANCELLING && s->block_inactive) { 1014 Error *local_err = NULL; 1015 1016 bdrv_invalidate_cache_all(&local_err); 1017 if (local_err) { 1018 error_report_err(local_err); 1019 } else { 1020 s->block_inactive = false; 1021 } 1022 } 1023 } 1024 1025 void add_migration_state_change_notifier(Notifier *notify) 1026 { 1027 notifier_list_add(&migration_state_notifiers, notify); 1028 } 1029 1030 void remove_migration_state_change_notifier(Notifier *notify) 1031 { 1032 notifier_remove(notify); 1033 } 1034 1035 bool migration_in_setup(MigrationState *s) 1036 { 1037 return s->state == MIGRATION_STATUS_SETUP; 1038 } 1039 1040 bool migration_has_finished(MigrationState *s) 1041 { 1042 return s->state == MIGRATION_STATUS_COMPLETED; 1043 } 1044 1045 bool migration_has_failed(MigrationState *s) 1046 { 1047 return (s->state == MIGRATION_STATUS_CANCELLED || 1048 s->state == MIGRATION_STATUS_FAILED); 1049 } 1050 1051 bool migration_in_postcopy(void) 1052 { 1053 MigrationState *s = migrate_get_current(); 1054 1055 return (s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE); 1056 } 1057 1058 bool migration_in_postcopy_after_devices(MigrationState *s) 1059 { 1060 return migration_in_postcopy() && s->postcopy_after_devices; 1061 } 1062 1063 bool migration_is_idle(void) 1064 { 1065 MigrationState *s = migrate_get_current(); 1066 1067 switch (s->state) { 1068 case MIGRATION_STATUS_NONE: 1069 case MIGRATION_STATUS_CANCELLED: 1070 case MIGRATION_STATUS_COMPLETED: 1071 case MIGRATION_STATUS_FAILED: 1072 return true; 1073 case MIGRATION_STATUS_SETUP: 1074 case MIGRATION_STATUS_CANCELLING: 1075 case MIGRATION_STATUS_ACTIVE: 1076 case MIGRATION_STATUS_POSTCOPY_ACTIVE: 1077 case MIGRATION_STATUS_COLO: 1078 return false; 1079 case MIGRATION_STATUS__MAX: 1080 g_assert_not_reached(); 1081 } 1082 1083 return false; 1084 } 1085 1086 MigrationState *migrate_init(const MigrationParams *params) 1087 { 1088 MigrationState *s = migrate_get_current(); 1089 1090 /* 1091 * Reinitialise all migration state, except 1092 * parameters/capabilities that the user set, and 1093 * locks. 1094 */ 1095 s->bytes_xfer = 0; 1096 s->xfer_limit = 0; 1097 s->cleanup_bh = 0; 1098 s->to_dst_file = NULL; 1099 s->state = MIGRATION_STATUS_NONE; 1100 s->params = *params; 1101 s->rp_state.from_dst_file = NULL; 1102 s->rp_state.error = false; 1103 s->mbps = 0.0; 1104 s->downtime = 0; 1105 s->expected_downtime = 0; 1106 s->setup_time = 0; 1107 s->start_postcopy = false; 1108 s->postcopy_after_devices = false; 1109 s->migration_thread_running = false; 1110 error_free(s->error); 1111 s->error = NULL; 1112 1113 migrate_set_state(&s->state, MIGRATION_STATUS_NONE, MIGRATION_STATUS_SETUP); 1114 1115 s->total_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); 1116 return s; 1117 } 1118 1119 static GSList *migration_blockers; 1120 1121 int migrate_add_blocker(Error *reason, Error **errp) 1122 { 1123 if (only_migratable) { 1124 error_propagate(errp, error_copy(reason)); 1125 error_prepend(errp, "disallowing migration blocker " 1126 "(--only_migratable) for: "); 1127 return -EACCES; 1128 } 1129 1130 if (migration_is_idle()) { 1131 migration_blockers = g_slist_prepend(migration_blockers, reason); 1132 return 0; 1133 } 1134 1135 error_propagate(errp, error_copy(reason)); 1136 error_prepend(errp, "disallowing migration blocker (migration in " 1137 "progress) for: "); 1138 return -EBUSY; 1139 } 1140 1141 void migrate_del_blocker(Error *reason) 1142 { 1143 migration_blockers = g_slist_remove(migration_blockers, reason); 1144 } 1145 1146 void qmp_migrate_incoming(const char *uri, Error **errp) 1147 { 1148 Error *local_err = NULL; 1149 static bool once = true; 1150 1151 if (!deferred_incoming) { 1152 error_setg(errp, "For use with '-incoming defer'"); 1153 return; 1154 } 1155 if (!once) { 1156 error_setg(errp, "The incoming migration has already been started"); 1157 } 1158 1159 qemu_start_incoming_migration(uri, &local_err); 1160 1161 if (local_err) { 1162 error_propagate(errp, local_err); 1163 return; 1164 } 1165 1166 once = false; 1167 } 1168 1169 bool migration_is_blocked(Error **errp) 1170 { 1171 if (qemu_savevm_state_blocked(errp)) { 1172 return true; 1173 } 1174 1175 if (migration_blockers) { 1176 *errp = error_copy(migration_blockers->data); 1177 return true; 1178 } 1179 1180 return false; 1181 } 1182 1183 void qmp_migrate(const char *uri, bool has_blk, bool blk, 1184 bool has_inc, bool inc, bool has_detach, bool detach, 1185 Error **errp) 1186 { 1187 Error *local_err = NULL; 1188 MigrationState *s = migrate_get_current(); 1189 MigrationParams params; 1190 const char *p; 1191 1192 params.blk = has_blk && blk; 1193 params.shared = has_inc && inc; 1194 1195 if (migration_is_setup_or_active(s->state) || 1196 s->state == MIGRATION_STATUS_CANCELLING || 1197 s->state == MIGRATION_STATUS_COLO) { 1198 error_setg(errp, QERR_MIGRATION_ACTIVE); 1199 return; 1200 } 1201 if (runstate_check(RUN_STATE_INMIGRATE)) { 1202 error_setg(errp, "Guest is waiting for an incoming migration"); 1203 return; 1204 } 1205 1206 if (migration_is_blocked(errp)) { 1207 return; 1208 } 1209 1210 s = migrate_init(¶ms); 1211 1212 if (strstart(uri, "tcp:", &p)) { 1213 tcp_start_outgoing_migration(s, p, &local_err); 1214 #ifdef CONFIG_RDMA 1215 } else if (strstart(uri, "rdma:", &p)) { 1216 rdma_start_outgoing_migration(s, p, &local_err); 1217 #endif 1218 } else if (strstart(uri, "exec:", &p)) { 1219 exec_start_outgoing_migration(s, p, &local_err); 1220 } else if (strstart(uri, "unix:", &p)) { 1221 unix_start_outgoing_migration(s, p, &local_err); 1222 } else if (strstart(uri, "fd:", &p)) { 1223 fd_start_outgoing_migration(s, p, &local_err); 1224 } else { 1225 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "uri", 1226 "a valid migration protocol"); 1227 migrate_set_state(&s->state, MIGRATION_STATUS_SETUP, 1228 MIGRATION_STATUS_FAILED); 1229 return; 1230 } 1231 1232 if (local_err) { 1233 migrate_fd_error(s, local_err); 1234 error_propagate(errp, local_err); 1235 return; 1236 } 1237 } 1238 1239 void qmp_migrate_cancel(Error **errp) 1240 { 1241 migrate_fd_cancel(migrate_get_current()); 1242 } 1243 1244 void qmp_migrate_set_cache_size(int64_t value, Error **errp) 1245 { 1246 MigrationState *s = migrate_get_current(); 1247 int64_t new_size; 1248 1249 /* Check for truncation */ 1250 if (value != (size_t)value) { 1251 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "cache size", 1252 "exceeding address space"); 1253 return; 1254 } 1255 1256 /* Cache should not be larger than guest ram size */ 1257 if (value > ram_bytes_total()) { 1258 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "cache size", 1259 "exceeds guest ram size "); 1260 return; 1261 } 1262 1263 new_size = xbzrle_cache_resize(value); 1264 if (new_size < 0) { 1265 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "cache size", 1266 "is smaller than page size"); 1267 return; 1268 } 1269 1270 s->xbzrle_cache_size = new_size; 1271 } 1272 1273 int64_t qmp_query_migrate_cache_size(Error **errp) 1274 { 1275 return migrate_xbzrle_cache_size(); 1276 } 1277 1278 void qmp_migrate_set_speed(int64_t value, Error **errp) 1279 { 1280 MigrationParameters p = { 1281 .has_max_bandwidth = true, 1282 .max_bandwidth = value, 1283 }; 1284 1285 qmp_migrate_set_parameters(&p, errp); 1286 } 1287 1288 void qmp_migrate_set_downtime(double value, Error **errp) 1289 { 1290 if (value < 0 || value > MAX_MIGRATE_DOWNTIME_SECONDS) { 1291 error_setg(errp, "Parameter 'downtime_limit' expects an integer in " 1292 "the range of 0 to %d seconds", 1293 MAX_MIGRATE_DOWNTIME_SECONDS); 1294 return; 1295 } 1296 1297 value *= 1000; /* Convert to milliseconds */ 1298 value = MAX(0, MIN(INT64_MAX, value)); 1299 1300 MigrationParameters p = { 1301 .has_downtime_limit = true, 1302 .downtime_limit = value, 1303 }; 1304 1305 qmp_migrate_set_parameters(&p, errp); 1306 } 1307 1308 bool migrate_release_ram(void) 1309 { 1310 MigrationState *s; 1311 1312 s = migrate_get_current(); 1313 1314 return s->enabled_capabilities[MIGRATION_CAPABILITY_RELEASE_RAM]; 1315 } 1316 1317 bool migrate_postcopy_ram(void) 1318 { 1319 MigrationState *s; 1320 1321 s = migrate_get_current(); 1322 1323 return s->enabled_capabilities[MIGRATION_CAPABILITY_POSTCOPY_RAM]; 1324 } 1325 1326 bool migrate_auto_converge(void) 1327 { 1328 MigrationState *s; 1329 1330 s = migrate_get_current(); 1331 1332 return s->enabled_capabilities[MIGRATION_CAPABILITY_AUTO_CONVERGE]; 1333 } 1334 1335 bool migrate_zero_blocks(void) 1336 { 1337 MigrationState *s; 1338 1339 s = migrate_get_current(); 1340 1341 return s->enabled_capabilities[MIGRATION_CAPABILITY_ZERO_BLOCKS]; 1342 } 1343 1344 bool migrate_use_compression(void) 1345 { 1346 MigrationState *s; 1347 1348 s = migrate_get_current(); 1349 1350 return s->enabled_capabilities[MIGRATION_CAPABILITY_COMPRESS]; 1351 } 1352 1353 int migrate_compress_level(void) 1354 { 1355 MigrationState *s; 1356 1357 s = migrate_get_current(); 1358 1359 return s->parameters.compress_level; 1360 } 1361 1362 int migrate_compress_threads(void) 1363 { 1364 MigrationState *s; 1365 1366 s = migrate_get_current(); 1367 1368 return s->parameters.compress_threads; 1369 } 1370 1371 int migrate_decompress_threads(void) 1372 { 1373 MigrationState *s; 1374 1375 s = migrate_get_current(); 1376 1377 return s->parameters.decompress_threads; 1378 } 1379 1380 bool migrate_use_events(void) 1381 { 1382 MigrationState *s; 1383 1384 s = migrate_get_current(); 1385 1386 return s->enabled_capabilities[MIGRATION_CAPABILITY_EVENTS]; 1387 } 1388 1389 int migrate_use_xbzrle(void) 1390 { 1391 MigrationState *s; 1392 1393 s = migrate_get_current(); 1394 1395 return s->enabled_capabilities[MIGRATION_CAPABILITY_XBZRLE]; 1396 } 1397 1398 int64_t migrate_xbzrle_cache_size(void) 1399 { 1400 MigrationState *s; 1401 1402 s = migrate_get_current(); 1403 1404 return s->xbzrle_cache_size; 1405 } 1406 1407 /* migration thread support */ 1408 /* 1409 * Something bad happened to the RP stream, mark an error 1410 * The caller shall print or trace something to indicate why 1411 */ 1412 static void mark_source_rp_bad(MigrationState *s) 1413 { 1414 s->rp_state.error = true; 1415 } 1416 1417 static struct rp_cmd_args { 1418 ssize_t len; /* -1 = variable */ 1419 const char *name; 1420 } rp_cmd_args[] = { 1421 [MIG_RP_MSG_INVALID] = { .len = -1, .name = "INVALID" }, 1422 [MIG_RP_MSG_SHUT] = { .len = 4, .name = "SHUT" }, 1423 [MIG_RP_MSG_PONG] = { .len = 4, .name = "PONG" }, 1424 [MIG_RP_MSG_REQ_PAGES] = { .len = 12, .name = "REQ_PAGES" }, 1425 [MIG_RP_MSG_REQ_PAGES_ID] = { .len = -1, .name = "REQ_PAGES_ID" }, 1426 [MIG_RP_MSG_MAX] = { .len = -1, .name = "MAX" }, 1427 }; 1428 1429 /* 1430 * Process a request for pages received on the return path, 1431 * We're allowed to send more than requested (e.g. to round to our page size) 1432 * and we don't need to send pages that have already been sent. 1433 */ 1434 static void migrate_handle_rp_req_pages(MigrationState *ms, const char* rbname, 1435 ram_addr_t start, size_t len) 1436 { 1437 long our_host_ps = getpagesize(); 1438 1439 trace_migrate_handle_rp_req_pages(rbname, start, len); 1440 1441 /* 1442 * Since we currently insist on matching page sizes, just sanity check 1443 * we're being asked for whole host pages. 1444 */ 1445 if (start & (our_host_ps-1) || 1446 (len & (our_host_ps-1))) { 1447 error_report("%s: Misaligned page request, start: " RAM_ADDR_FMT 1448 " len: %zd", __func__, start, len); 1449 mark_source_rp_bad(ms); 1450 return; 1451 } 1452 1453 if (ram_save_queue_pages(rbname, start, len)) { 1454 mark_source_rp_bad(ms); 1455 } 1456 } 1457 1458 /* 1459 * Handles messages sent on the return path towards the source VM 1460 * 1461 */ 1462 static void *source_return_path_thread(void *opaque) 1463 { 1464 MigrationState *ms = opaque; 1465 QEMUFile *rp = ms->rp_state.from_dst_file; 1466 uint16_t header_len, header_type; 1467 uint8_t buf[512]; 1468 uint32_t tmp32, sibling_error; 1469 ram_addr_t start = 0; /* =0 to silence warning */ 1470 size_t len = 0, expected_len; 1471 int res; 1472 1473 trace_source_return_path_thread_entry(); 1474 while (!ms->rp_state.error && !qemu_file_get_error(rp) && 1475 migration_is_setup_or_active(ms->state)) { 1476 trace_source_return_path_thread_loop_top(); 1477 header_type = qemu_get_be16(rp); 1478 header_len = qemu_get_be16(rp); 1479 1480 if (header_type >= MIG_RP_MSG_MAX || 1481 header_type == MIG_RP_MSG_INVALID) { 1482 error_report("RP: Received invalid message 0x%04x length 0x%04x", 1483 header_type, header_len); 1484 mark_source_rp_bad(ms); 1485 goto out; 1486 } 1487 1488 if ((rp_cmd_args[header_type].len != -1 && 1489 header_len != rp_cmd_args[header_type].len) || 1490 header_len > sizeof(buf)) { 1491 error_report("RP: Received '%s' message (0x%04x) with" 1492 "incorrect length %d expecting %zu", 1493 rp_cmd_args[header_type].name, header_type, header_len, 1494 (size_t)rp_cmd_args[header_type].len); 1495 mark_source_rp_bad(ms); 1496 goto out; 1497 } 1498 1499 /* We know we've got a valid header by this point */ 1500 res = qemu_get_buffer(rp, buf, header_len); 1501 if (res != header_len) { 1502 error_report("RP: Failed reading data for message 0x%04x" 1503 " read %d expected %d", 1504 header_type, res, header_len); 1505 mark_source_rp_bad(ms); 1506 goto out; 1507 } 1508 1509 /* OK, we have the message and the data */ 1510 switch (header_type) { 1511 case MIG_RP_MSG_SHUT: 1512 sibling_error = ldl_be_p(buf); 1513 trace_source_return_path_thread_shut(sibling_error); 1514 if (sibling_error) { 1515 error_report("RP: Sibling indicated error %d", sibling_error); 1516 mark_source_rp_bad(ms); 1517 } 1518 /* 1519 * We'll let the main thread deal with closing the RP 1520 * we could do a shutdown(2) on it, but we're the only user 1521 * anyway, so there's nothing gained. 1522 */ 1523 goto out; 1524 1525 case MIG_RP_MSG_PONG: 1526 tmp32 = ldl_be_p(buf); 1527 trace_source_return_path_thread_pong(tmp32); 1528 break; 1529 1530 case MIG_RP_MSG_REQ_PAGES: 1531 start = ldq_be_p(buf); 1532 len = ldl_be_p(buf + 8); 1533 migrate_handle_rp_req_pages(ms, NULL, start, len); 1534 break; 1535 1536 case MIG_RP_MSG_REQ_PAGES_ID: 1537 expected_len = 12 + 1; /* header + termination */ 1538 1539 if (header_len >= expected_len) { 1540 start = ldq_be_p(buf); 1541 len = ldl_be_p(buf + 8); 1542 /* Now we expect an idstr */ 1543 tmp32 = buf[12]; /* Length of the following idstr */ 1544 buf[13 + tmp32] = '\0'; 1545 expected_len += tmp32; 1546 } 1547 if (header_len != expected_len) { 1548 error_report("RP: Req_Page_id with length %d expecting %zd", 1549 header_len, expected_len); 1550 mark_source_rp_bad(ms); 1551 goto out; 1552 } 1553 migrate_handle_rp_req_pages(ms, (char *)&buf[13], start, len); 1554 break; 1555 1556 default: 1557 break; 1558 } 1559 } 1560 if (qemu_file_get_error(rp)) { 1561 trace_source_return_path_thread_bad_end(); 1562 mark_source_rp_bad(ms); 1563 } 1564 1565 trace_source_return_path_thread_end(); 1566 out: 1567 ms->rp_state.from_dst_file = NULL; 1568 qemu_fclose(rp); 1569 return NULL; 1570 } 1571 1572 static int open_return_path_on_source(MigrationState *ms) 1573 { 1574 1575 ms->rp_state.from_dst_file = qemu_file_get_return_path(ms->to_dst_file); 1576 if (!ms->rp_state.from_dst_file) { 1577 return -1; 1578 } 1579 1580 trace_open_return_path_on_source(); 1581 qemu_thread_create(&ms->rp_state.rp_thread, "return path", 1582 source_return_path_thread, ms, QEMU_THREAD_JOINABLE); 1583 1584 trace_open_return_path_on_source_continue(); 1585 1586 return 0; 1587 } 1588 1589 /* Returns 0 if the RP was ok, otherwise there was an error on the RP */ 1590 static int await_return_path_close_on_source(MigrationState *ms) 1591 { 1592 /* 1593 * If this is a normal exit then the destination will send a SHUT and the 1594 * rp_thread will exit, however if there's an error we need to cause 1595 * it to exit. 1596 */ 1597 if (qemu_file_get_error(ms->to_dst_file) && ms->rp_state.from_dst_file) { 1598 /* 1599 * shutdown(2), if we have it, will cause it to unblock if it's stuck 1600 * waiting for the destination. 1601 */ 1602 qemu_file_shutdown(ms->rp_state.from_dst_file); 1603 mark_source_rp_bad(ms); 1604 } 1605 trace_await_return_path_close_on_source_joining(); 1606 qemu_thread_join(&ms->rp_state.rp_thread); 1607 trace_await_return_path_close_on_source_close(); 1608 return ms->rp_state.error; 1609 } 1610 1611 /* 1612 * Switch from normal iteration to postcopy 1613 * Returns non-0 on error 1614 */ 1615 static int postcopy_start(MigrationState *ms, bool *old_vm_running) 1616 { 1617 int ret; 1618 QIOChannelBuffer *bioc; 1619 QEMUFile *fb; 1620 int64_t time_at_stop = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); 1621 bool restart_block = false; 1622 migrate_set_state(&ms->state, MIGRATION_STATUS_ACTIVE, 1623 MIGRATION_STATUS_POSTCOPY_ACTIVE); 1624 1625 trace_postcopy_start(); 1626 qemu_mutex_lock_iothread(); 1627 trace_postcopy_start_set_run(); 1628 1629 qemu_system_wakeup_request(QEMU_WAKEUP_REASON_OTHER); 1630 *old_vm_running = runstate_is_running(); 1631 global_state_store(); 1632 ret = vm_stop_force_state(RUN_STATE_FINISH_MIGRATE); 1633 if (ret < 0) { 1634 goto fail; 1635 } 1636 1637 ret = bdrv_inactivate_all(); 1638 if (ret < 0) { 1639 goto fail; 1640 } 1641 restart_block = true; 1642 1643 /* 1644 * Cause any non-postcopiable, but iterative devices to 1645 * send out their final data. 1646 */ 1647 qemu_savevm_state_complete_precopy(ms->to_dst_file, true); 1648 1649 /* 1650 * in Finish migrate and with the io-lock held everything should 1651 * be quiet, but we've potentially still got dirty pages and we 1652 * need to tell the destination to throw any pages it's already received 1653 * that are dirty 1654 */ 1655 if (ram_postcopy_send_discard_bitmap(ms)) { 1656 error_report("postcopy send discard bitmap failed"); 1657 goto fail; 1658 } 1659 1660 /* 1661 * send rest of state - note things that are doing postcopy 1662 * will notice we're in POSTCOPY_ACTIVE and not actually 1663 * wrap their state up here 1664 */ 1665 qemu_file_set_rate_limit(ms->to_dst_file, INT64_MAX); 1666 /* Ping just for debugging, helps line traces up */ 1667 qemu_savevm_send_ping(ms->to_dst_file, 2); 1668 1669 /* 1670 * While loading the device state we may trigger page transfer 1671 * requests and the fd must be free to process those, and thus 1672 * the destination must read the whole device state off the fd before 1673 * it starts processing it. Unfortunately the ad-hoc migration format 1674 * doesn't allow the destination to know the size to read without fully 1675 * parsing it through each devices load-state code (especially the open 1676 * coded devices that use get/put). 1677 * So we wrap the device state up in a package with a length at the start; 1678 * to do this we use a qemu_buf to hold the whole of the device state. 1679 */ 1680 bioc = qio_channel_buffer_new(4096); 1681 qio_channel_set_name(QIO_CHANNEL(bioc), "migration-postcopy-buffer"); 1682 fb = qemu_fopen_channel_output(QIO_CHANNEL(bioc)); 1683 object_unref(OBJECT(bioc)); 1684 1685 /* 1686 * Make sure the receiver can get incoming pages before we send the rest 1687 * of the state 1688 */ 1689 qemu_savevm_send_postcopy_listen(fb); 1690 1691 qemu_savevm_state_complete_precopy(fb, false); 1692 qemu_savevm_send_ping(fb, 3); 1693 1694 qemu_savevm_send_postcopy_run(fb); 1695 1696 /* <><> end of stuff going into the package */ 1697 1698 /* Last point of recovery; as soon as we send the package the destination 1699 * can open devices and potentially start running. 1700 * Lets just check again we've not got any errors. 1701 */ 1702 ret = qemu_file_get_error(ms->to_dst_file); 1703 if (ret) { 1704 error_report("postcopy_start: Migration stream errored (pre package)"); 1705 goto fail_closefb; 1706 } 1707 1708 restart_block = false; 1709 1710 /* Now send that blob */ 1711 if (qemu_savevm_send_packaged(ms->to_dst_file, bioc->data, bioc->usage)) { 1712 goto fail_closefb; 1713 } 1714 qemu_fclose(fb); 1715 1716 /* Send a notify to give a chance for anything that needs to happen 1717 * at the transition to postcopy and after the device state; in particular 1718 * spice needs to trigger a transition now 1719 */ 1720 ms->postcopy_after_devices = true; 1721 notifier_list_notify(&migration_state_notifiers, ms); 1722 1723 ms->downtime = qemu_clock_get_ms(QEMU_CLOCK_REALTIME) - time_at_stop; 1724 1725 qemu_mutex_unlock_iothread(); 1726 1727 /* 1728 * Although this ping is just for debug, it could potentially be 1729 * used for getting a better measurement of downtime at the source. 1730 */ 1731 qemu_savevm_send_ping(ms->to_dst_file, 4); 1732 1733 if (migrate_release_ram()) { 1734 ram_postcopy_migrated_memory_release(ms); 1735 } 1736 1737 ret = qemu_file_get_error(ms->to_dst_file); 1738 if (ret) { 1739 error_report("postcopy_start: Migration stream errored"); 1740 migrate_set_state(&ms->state, MIGRATION_STATUS_POSTCOPY_ACTIVE, 1741 MIGRATION_STATUS_FAILED); 1742 } 1743 1744 return ret; 1745 1746 fail_closefb: 1747 qemu_fclose(fb); 1748 fail: 1749 migrate_set_state(&ms->state, MIGRATION_STATUS_POSTCOPY_ACTIVE, 1750 MIGRATION_STATUS_FAILED); 1751 if (restart_block) { 1752 /* A failure happened early enough that we know the destination hasn't 1753 * accessed block devices, so we're safe to recover. 1754 */ 1755 Error *local_err = NULL; 1756 1757 bdrv_invalidate_cache_all(&local_err); 1758 if (local_err) { 1759 error_report_err(local_err); 1760 } 1761 } 1762 qemu_mutex_unlock_iothread(); 1763 return -1; 1764 } 1765 1766 /** 1767 * migration_completion: Used by migration_thread when there's not much left. 1768 * The caller 'breaks' the loop when this returns. 1769 * 1770 * @s: Current migration state 1771 * @current_active_state: The migration state we expect to be in 1772 * @*old_vm_running: Pointer to old_vm_running flag 1773 * @*start_time: Pointer to time to update 1774 */ 1775 static void migration_completion(MigrationState *s, int current_active_state, 1776 bool *old_vm_running, 1777 int64_t *start_time) 1778 { 1779 int ret; 1780 1781 if (s->state == MIGRATION_STATUS_ACTIVE) { 1782 qemu_mutex_lock_iothread(); 1783 *start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); 1784 qemu_system_wakeup_request(QEMU_WAKEUP_REASON_OTHER); 1785 *old_vm_running = runstate_is_running(); 1786 ret = global_state_store(); 1787 1788 if (!ret) { 1789 ret = vm_stop_force_state(RUN_STATE_FINISH_MIGRATE); 1790 /* 1791 * Don't mark the image with BDRV_O_INACTIVE flag if 1792 * we will go into COLO stage later. 1793 */ 1794 if (ret >= 0 && !migrate_colo_enabled()) { 1795 ret = bdrv_inactivate_all(); 1796 } 1797 if (ret >= 0) { 1798 qemu_file_set_rate_limit(s->to_dst_file, INT64_MAX); 1799 qemu_savevm_state_complete_precopy(s->to_dst_file, false); 1800 s->block_inactive = true; 1801 } 1802 } 1803 qemu_mutex_unlock_iothread(); 1804 1805 if (ret < 0) { 1806 goto fail; 1807 } 1808 } else if (s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE) { 1809 trace_migration_completion_postcopy_end(); 1810 1811 qemu_savevm_state_complete_postcopy(s->to_dst_file); 1812 trace_migration_completion_postcopy_end_after_complete(); 1813 } 1814 1815 /* 1816 * If rp was opened we must clean up the thread before 1817 * cleaning everything else up (since if there are no failures 1818 * it will wait for the destination to send it's status in 1819 * a SHUT command). 1820 * Postcopy opens rp if enabled (even if it's not avtivated) 1821 */ 1822 if (migrate_postcopy_ram()) { 1823 int rp_error; 1824 trace_migration_completion_postcopy_end_before_rp(); 1825 rp_error = await_return_path_close_on_source(s); 1826 trace_migration_completion_postcopy_end_after_rp(rp_error); 1827 if (rp_error) { 1828 goto fail_invalidate; 1829 } 1830 } 1831 1832 if (qemu_file_get_error(s->to_dst_file)) { 1833 trace_migration_completion_file_err(); 1834 goto fail_invalidate; 1835 } 1836 1837 if (!migrate_colo_enabled()) { 1838 migrate_set_state(&s->state, current_active_state, 1839 MIGRATION_STATUS_COMPLETED); 1840 } 1841 1842 return; 1843 1844 fail_invalidate: 1845 /* If not doing postcopy, vm_start() will be called: let's regain 1846 * control on images. 1847 */ 1848 if (s->state == MIGRATION_STATUS_ACTIVE) { 1849 Error *local_err = NULL; 1850 1851 qemu_mutex_lock_iothread(); 1852 bdrv_invalidate_cache_all(&local_err); 1853 if (local_err) { 1854 error_report_err(local_err); 1855 } else { 1856 s->block_inactive = false; 1857 } 1858 qemu_mutex_unlock_iothread(); 1859 } 1860 1861 fail: 1862 migrate_set_state(&s->state, current_active_state, 1863 MIGRATION_STATUS_FAILED); 1864 } 1865 1866 bool migrate_colo_enabled(void) 1867 { 1868 MigrationState *s = migrate_get_current(); 1869 return s->enabled_capabilities[MIGRATION_CAPABILITY_X_COLO]; 1870 } 1871 1872 /* 1873 * Master migration thread on the source VM. 1874 * It drives the migration and pumps the data down the outgoing channel. 1875 */ 1876 static void *migration_thread(void *opaque) 1877 { 1878 MigrationState *s = opaque; 1879 /* Used by the bandwidth calcs, updated later */ 1880 int64_t initial_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); 1881 int64_t setup_start = qemu_clock_get_ms(QEMU_CLOCK_HOST); 1882 int64_t initial_bytes = 0; 1883 /* 1884 * The final stage happens when the remaining data is smaller than 1885 * this threshold; it's calculated from the requested downtime and 1886 * measured bandwidth 1887 */ 1888 int64_t threshold_size = 0; 1889 int64_t start_time = initial_time; 1890 int64_t end_time; 1891 bool old_vm_running = false; 1892 bool entered_postcopy = false; 1893 /* The active state we expect to be in; ACTIVE or POSTCOPY_ACTIVE */ 1894 enum MigrationStatus current_active_state = MIGRATION_STATUS_ACTIVE; 1895 bool enable_colo = migrate_colo_enabled(); 1896 1897 rcu_register_thread(); 1898 1899 qemu_savevm_state_header(s->to_dst_file); 1900 1901 if (migrate_postcopy_ram()) { 1902 /* Now tell the dest that it should open its end so it can reply */ 1903 qemu_savevm_send_open_return_path(s->to_dst_file); 1904 1905 /* And do a ping that will make stuff easier to debug */ 1906 qemu_savevm_send_ping(s->to_dst_file, 1); 1907 1908 /* 1909 * Tell the destination that we *might* want to do postcopy later; 1910 * if the other end can't do postcopy it should fail now, nice and 1911 * early. 1912 */ 1913 qemu_savevm_send_postcopy_advise(s->to_dst_file); 1914 } 1915 1916 qemu_savevm_state_begin(s->to_dst_file, &s->params); 1917 1918 s->setup_time = qemu_clock_get_ms(QEMU_CLOCK_HOST) - setup_start; 1919 migrate_set_state(&s->state, MIGRATION_STATUS_SETUP, 1920 MIGRATION_STATUS_ACTIVE); 1921 1922 trace_migration_thread_setup_complete(); 1923 1924 while (s->state == MIGRATION_STATUS_ACTIVE || 1925 s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE) { 1926 int64_t current_time; 1927 uint64_t pending_size; 1928 1929 if (!qemu_file_rate_limit(s->to_dst_file)) { 1930 uint64_t pend_post, pend_nonpost; 1931 1932 qemu_savevm_state_pending(s->to_dst_file, threshold_size, 1933 &pend_nonpost, &pend_post); 1934 pending_size = pend_nonpost + pend_post; 1935 trace_migrate_pending(pending_size, threshold_size, 1936 pend_post, pend_nonpost); 1937 if (pending_size && pending_size >= threshold_size) { 1938 /* Still a significant amount to transfer */ 1939 1940 if (migrate_postcopy_ram() && 1941 s->state != MIGRATION_STATUS_POSTCOPY_ACTIVE && 1942 pend_nonpost <= threshold_size && 1943 atomic_read(&s->start_postcopy)) { 1944 1945 if (!postcopy_start(s, &old_vm_running)) { 1946 current_active_state = MIGRATION_STATUS_POSTCOPY_ACTIVE; 1947 entered_postcopy = true; 1948 } 1949 1950 continue; 1951 } 1952 /* Just another iteration step */ 1953 qemu_savevm_state_iterate(s->to_dst_file, entered_postcopy); 1954 } else { 1955 trace_migration_thread_low_pending(pending_size); 1956 migration_completion(s, current_active_state, 1957 &old_vm_running, &start_time); 1958 break; 1959 } 1960 } 1961 1962 if (qemu_file_get_error(s->to_dst_file)) { 1963 migrate_set_state(&s->state, current_active_state, 1964 MIGRATION_STATUS_FAILED); 1965 trace_migration_thread_file_err(); 1966 break; 1967 } 1968 current_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); 1969 if (current_time >= initial_time + BUFFER_DELAY) { 1970 uint64_t transferred_bytes = qemu_ftell(s->to_dst_file) - 1971 initial_bytes; 1972 uint64_t time_spent = current_time - initial_time; 1973 double bandwidth = (double)transferred_bytes / time_spent; 1974 threshold_size = bandwidth * s->parameters.downtime_limit; 1975 1976 s->mbps = (((double) transferred_bytes * 8.0) / 1977 ((double) time_spent / 1000.0)) / 1000.0 / 1000.0; 1978 1979 trace_migrate_transferred(transferred_bytes, time_spent, 1980 bandwidth, threshold_size); 1981 /* if we haven't sent anything, we don't want to recalculate 1982 10000 is a small enough number for our purposes */ 1983 if (ram_dirty_pages_rate() && transferred_bytes > 10000) { 1984 s->expected_downtime = ram_dirty_pages_rate() * 1985 qemu_target_page_size() / bandwidth; 1986 } 1987 1988 qemu_file_reset_rate_limit(s->to_dst_file); 1989 initial_time = current_time; 1990 initial_bytes = qemu_ftell(s->to_dst_file); 1991 } 1992 if (qemu_file_rate_limit(s->to_dst_file)) { 1993 /* usleep expects microseconds */ 1994 g_usleep((initial_time + BUFFER_DELAY - current_time)*1000); 1995 } 1996 } 1997 1998 trace_migration_thread_after_loop(); 1999 /* If we enabled cpu throttling for auto-converge, turn it off. */ 2000 cpu_throttle_stop(); 2001 end_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); 2002 2003 qemu_mutex_lock_iothread(); 2004 /* 2005 * The resource has been allocated by migration will be reused in COLO 2006 * process, so don't release them. 2007 */ 2008 if (!enable_colo) { 2009 qemu_savevm_state_cleanup(); 2010 } 2011 if (s->state == MIGRATION_STATUS_COMPLETED) { 2012 uint64_t transferred_bytes = qemu_ftell(s->to_dst_file); 2013 s->total_time = end_time - s->total_time; 2014 if (!entered_postcopy) { 2015 s->downtime = end_time - start_time; 2016 } 2017 if (s->total_time) { 2018 s->mbps = (((double) transferred_bytes * 8.0) / 2019 ((double) s->total_time)) / 1000; 2020 } 2021 runstate_set(RUN_STATE_POSTMIGRATE); 2022 } else { 2023 if (s->state == MIGRATION_STATUS_ACTIVE && enable_colo) { 2024 migrate_start_colo_process(s); 2025 qemu_savevm_state_cleanup(); 2026 /* 2027 * Fixme: we will run VM in COLO no matter its old running state. 2028 * After exited COLO, we will keep running. 2029 */ 2030 old_vm_running = true; 2031 } 2032 if (old_vm_running && !entered_postcopy) { 2033 vm_start(); 2034 } else { 2035 if (runstate_check(RUN_STATE_FINISH_MIGRATE)) { 2036 runstate_set(RUN_STATE_POSTMIGRATE); 2037 } 2038 } 2039 } 2040 qemu_bh_schedule(s->cleanup_bh); 2041 qemu_mutex_unlock_iothread(); 2042 2043 rcu_unregister_thread(); 2044 return NULL; 2045 } 2046 2047 void migrate_fd_connect(MigrationState *s) 2048 { 2049 s->expected_downtime = s->parameters.downtime_limit; 2050 s->cleanup_bh = qemu_bh_new(migrate_fd_cleanup, s); 2051 2052 qemu_file_set_blocking(s->to_dst_file, true); 2053 qemu_file_set_rate_limit(s->to_dst_file, 2054 s->parameters.max_bandwidth / XFER_LIMIT_RATIO); 2055 2056 /* Notify before starting migration thread */ 2057 notifier_list_notify(&migration_state_notifiers, s); 2058 2059 /* 2060 * Open the return path; currently for postcopy but other things might 2061 * also want it. 2062 */ 2063 if (migrate_postcopy_ram()) { 2064 if (open_return_path_on_source(s)) { 2065 error_report("Unable to open return-path for postcopy"); 2066 migrate_set_state(&s->state, MIGRATION_STATUS_SETUP, 2067 MIGRATION_STATUS_FAILED); 2068 migrate_fd_cleanup(s); 2069 return; 2070 } 2071 } 2072 2073 migrate_compress_threads_create(); 2074 qemu_thread_create(&s->thread, "live_migration", migration_thread, s, 2075 QEMU_THREAD_JOINABLE); 2076 s->migration_thread_running = true; 2077 } 2078 2079