1 /* 2 * QEMU live migration 3 * 4 * Copyright IBM, Corp. 2008 5 * 6 * Authors: 7 * Anthony Liguori <aliguori@us.ibm.com> 8 * 9 * This work is licensed under the terms of the GNU GPL, version 2. See 10 * the COPYING file in the top-level directory. 11 * 12 * Contributions after 2012-01-13 are licensed under the terms of the 13 * GNU GPL, version 2 or (at your option) any later version. 14 */ 15 16 #include "qemu/osdep.h" 17 #include "qemu-common.h" 18 #include "qemu/error-report.h" 19 #include "qemu/main-loop.h" 20 #include "migration/migration.h" 21 #include "migration/qemu-file.h" 22 #include "sysemu/sysemu.h" 23 #include "block/block.h" 24 #include "qapi/qmp/qerror.h" 25 #include "qapi/util.h" 26 #include "qemu/sockets.h" 27 #include "qemu/rcu.h" 28 #include "migration/block.h" 29 #include "migration/postcopy-ram.h" 30 #include "qemu/thread.h" 31 #include "qmp-commands.h" 32 #include "trace.h" 33 #include "qapi-event.h" 34 #include "qom/cpu.h" 35 #include "exec/memory.h" 36 #include "exec/address-spaces.h" 37 38 #define MAX_THROTTLE (32 << 20) /* Migration transfer speed throttling */ 39 40 /* Amount of time to allocate to each "chunk" of bandwidth-throttled 41 * data. */ 42 #define BUFFER_DELAY 100 43 #define XFER_LIMIT_RATIO (1000 / BUFFER_DELAY) 44 45 /* Default compression thread count */ 46 #define DEFAULT_MIGRATE_COMPRESS_THREAD_COUNT 8 47 /* Default decompression thread count, usually decompression is at 48 * least 4 times as fast as compression.*/ 49 #define DEFAULT_MIGRATE_DECOMPRESS_THREAD_COUNT 2 50 /*0: means nocompress, 1: best speed, ... 9: best compress ratio */ 51 #define DEFAULT_MIGRATE_COMPRESS_LEVEL 1 52 /* Define default autoconverge cpu throttle migration parameters */ 53 #define DEFAULT_MIGRATE_X_CPU_THROTTLE_INITIAL 20 54 #define DEFAULT_MIGRATE_X_CPU_THROTTLE_INCREMENT 10 55 56 /* Migration XBZRLE default cache size */ 57 #define DEFAULT_MIGRATE_CACHE_SIZE (64 * 1024 * 1024) 58 59 static NotifierList migration_state_notifiers = 60 NOTIFIER_LIST_INITIALIZER(migration_state_notifiers); 61 62 static bool deferred_incoming; 63 64 /* 65 * Current state of incoming postcopy; note this is not part of 66 * MigrationIncomingState since it's state is used during cleanup 67 * at the end as MIS is being freed. 68 */ 69 static PostcopyState incoming_postcopy_state; 70 71 /* When we add fault tolerance, we could have several 72 migrations at once. For now we don't need to add 73 dynamic creation of migration */ 74 75 /* For outgoing */ 76 MigrationState *migrate_get_current(void) 77 { 78 static bool once; 79 static MigrationState current_migration = { 80 .state = MIGRATION_STATUS_NONE, 81 .bandwidth_limit = MAX_THROTTLE, 82 .xbzrle_cache_size = DEFAULT_MIGRATE_CACHE_SIZE, 83 .mbps = -1, 84 .parameters[MIGRATION_PARAMETER_COMPRESS_LEVEL] = 85 DEFAULT_MIGRATE_COMPRESS_LEVEL, 86 .parameters[MIGRATION_PARAMETER_COMPRESS_THREADS] = 87 DEFAULT_MIGRATE_COMPRESS_THREAD_COUNT, 88 .parameters[MIGRATION_PARAMETER_DECOMPRESS_THREADS] = 89 DEFAULT_MIGRATE_DECOMPRESS_THREAD_COUNT, 90 .parameters[MIGRATION_PARAMETER_X_CPU_THROTTLE_INITIAL] = 91 DEFAULT_MIGRATE_X_CPU_THROTTLE_INITIAL, 92 .parameters[MIGRATION_PARAMETER_X_CPU_THROTTLE_INCREMENT] = 93 DEFAULT_MIGRATE_X_CPU_THROTTLE_INCREMENT, 94 }; 95 96 if (!once) { 97 qemu_mutex_init(¤t_migration.src_page_req_mutex); 98 once = true; 99 } 100 return ¤t_migration; 101 } 102 103 /* For incoming */ 104 static MigrationIncomingState *mis_current; 105 106 MigrationIncomingState *migration_incoming_get_current(void) 107 { 108 return mis_current; 109 } 110 111 MigrationIncomingState *migration_incoming_state_new(QEMUFile* f) 112 { 113 mis_current = g_new0(MigrationIncomingState, 1); 114 mis_current->from_src_file = f; 115 mis_current->state = MIGRATION_STATUS_NONE; 116 QLIST_INIT(&mis_current->loadvm_handlers); 117 qemu_mutex_init(&mis_current->rp_mutex); 118 qemu_event_init(&mis_current->main_thread_load_event, false); 119 120 return mis_current; 121 } 122 123 void migration_incoming_state_destroy(void) 124 { 125 qemu_event_destroy(&mis_current->main_thread_load_event); 126 loadvm_free_handlers(mis_current); 127 g_free(mis_current); 128 mis_current = NULL; 129 } 130 131 132 typedef struct { 133 bool optional; 134 uint32_t size; 135 uint8_t runstate[100]; 136 RunState state; 137 bool received; 138 } GlobalState; 139 140 static GlobalState global_state; 141 142 int global_state_store(void) 143 { 144 if (!runstate_store((char *)global_state.runstate, 145 sizeof(global_state.runstate))) { 146 error_report("runstate name too big: %s", global_state.runstate); 147 trace_migrate_state_too_big(); 148 return -EINVAL; 149 } 150 return 0; 151 } 152 153 void global_state_store_running(void) 154 { 155 const char *state = RunState_lookup[RUN_STATE_RUNNING]; 156 strncpy((char *)global_state.runstate, 157 state, sizeof(global_state.runstate)); 158 } 159 160 static bool global_state_received(void) 161 { 162 return global_state.received; 163 } 164 165 static RunState global_state_get_runstate(void) 166 { 167 return global_state.state; 168 } 169 170 void global_state_set_optional(void) 171 { 172 global_state.optional = true; 173 } 174 175 static bool global_state_needed(void *opaque) 176 { 177 GlobalState *s = opaque; 178 char *runstate = (char *)s->runstate; 179 180 /* If it is not optional, it is mandatory */ 181 182 if (s->optional == false) { 183 return true; 184 } 185 186 /* If state is running or paused, it is not needed */ 187 188 if (strcmp(runstate, "running") == 0 || 189 strcmp(runstate, "paused") == 0) { 190 return false; 191 } 192 193 /* for any other state it is needed */ 194 return true; 195 } 196 197 static int global_state_post_load(void *opaque, int version_id) 198 { 199 GlobalState *s = opaque; 200 Error *local_err = NULL; 201 int r; 202 char *runstate = (char *)s->runstate; 203 204 s->received = true; 205 trace_migrate_global_state_post_load(runstate); 206 207 r = qapi_enum_parse(RunState_lookup, runstate, RUN_STATE__MAX, 208 -1, &local_err); 209 210 if (r == -1) { 211 if (local_err) { 212 error_report_err(local_err); 213 } 214 return -EINVAL; 215 } 216 s->state = r; 217 218 return 0; 219 } 220 221 static void global_state_pre_save(void *opaque) 222 { 223 GlobalState *s = opaque; 224 225 trace_migrate_global_state_pre_save((char *)s->runstate); 226 s->size = strlen((char *)s->runstate) + 1; 227 } 228 229 static const VMStateDescription vmstate_globalstate = { 230 .name = "globalstate", 231 .version_id = 1, 232 .minimum_version_id = 1, 233 .post_load = global_state_post_load, 234 .pre_save = global_state_pre_save, 235 .needed = global_state_needed, 236 .fields = (VMStateField[]) { 237 VMSTATE_UINT32(size, GlobalState), 238 VMSTATE_BUFFER(runstate, GlobalState), 239 VMSTATE_END_OF_LIST() 240 }, 241 }; 242 243 void register_global_state(void) 244 { 245 /* We would use it independently that we receive it */ 246 strcpy((char *)&global_state.runstate, ""); 247 global_state.received = false; 248 vmstate_register(NULL, 0, &vmstate_globalstate, &global_state); 249 } 250 251 static void migrate_generate_event(int new_state) 252 { 253 if (migrate_use_events()) { 254 qapi_event_send_migration(new_state, &error_abort); 255 } 256 } 257 258 /* 259 * Called on -incoming with a defer: uri. 260 * The migration can be started later after any parameters have been 261 * changed. 262 */ 263 static void deferred_incoming_migration(Error **errp) 264 { 265 if (deferred_incoming) { 266 error_setg(errp, "Incoming migration already deferred"); 267 } 268 deferred_incoming = true; 269 } 270 271 /* Request a range of pages from the source VM at the given 272 * start address. 273 * rbname: Name of the RAMBlock to request the page in, if NULL it's the same 274 * as the last request (a name must have been given previously) 275 * Start: Address offset within the RB 276 * Len: Length in bytes required - must be a multiple of pagesize 277 */ 278 void migrate_send_rp_req_pages(MigrationIncomingState *mis, const char *rbname, 279 ram_addr_t start, size_t len) 280 { 281 uint8_t bufc[12 + 1 + 255]; /* start (8), len (4), rbname upto 256 */ 282 size_t msglen = 12; /* start + len */ 283 284 *(uint64_t *)bufc = cpu_to_be64((uint64_t)start); 285 *(uint32_t *)(bufc + 8) = cpu_to_be32((uint32_t)len); 286 287 if (rbname) { 288 int rbname_len = strlen(rbname); 289 assert(rbname_len < 256); 290 291 bufc[msglen++] = rbname_len; 292 memcpy(bufc + msglen, rbname, rbname_len); 293 msglen += rbname_len; 294 migrate_send_rp_message(mis, MIG_RP_MSG_REQ_PAGES_ID, msglen, bufc); 295 } else { 296 migrate_send_rp_message(mis, MIG_RP_MSG_REQ_PAGES, msglen, bufc); 297 } 298 } 299 300 void qemu_start_incoming_migration(const char *uri, Error **errp) 301 { 302 const char *p; 303 304 qapi_event_send_migration(MIGRATION_STATUS_SETUP, &error_abort); 305 if (!strcmp(uri, "defer")) { 306 deferred_incoming_migration(errp); 307 } else if (strstart(uri, "tcp:", &p)) { 308 tcp_start_incoming_migration(p, errp); 309 #ifdef CONFIG_RDMA 310 } else if (strstart(uri, "rdma:", &p)) { 311 rdma_start_incoming_migration(p, errp); 312 #endif 313 #if !defined(WIN32) 314 } else if (strstart(uri, "exec:", &p)) { 315 exec_start_incoming_migration(p, errp); 316 } else if (strstart(uri, "unix:", &p)) { 317 unix_start_incoming_migration(p, errp); 318 } else if (strstart(uri, "fd:", &p)) { 319 fd_start_incoming_migration(p, errp); 320 #endif 321 } else { 322 error_setg(errp, "unknown migration protocol: %s", uri); 323 } 324 } 325 326 static void process_incoming_migration_co(void *opaque) 327 { 328 QEMUFile *f = opaque; 329 Error *local_err = NULL; 330 MigrationIncomingState *mis; 331 PostcopyState ps; 332 int ret; 333 334 mis = migration_incoming_state_new(f); 335 postcopy_state_set(POSTCOPY_INCOMING_NONE); 336 migrate_set_state(&mis->state, MIGRATION_STATUS_NONE, 337 MIGRATION_STATUS_ACTIVE); 338 ret = qemu_loadvm_state(f); 339 340 ps = postcopy_state_get(); 341 trace_process_incoming_migration_co_end(ret, ps); 342 if (ps != POSTCOPY_INCOMING_NONE) { 343 if (ps == POSTCOPY_INCOMING_ADVISE) { 344 /* 345 * Where a migration had postcopy enabled (and thus went to advise) 346 * but managed to complete within the precopy period, we can use 347 * the normal exit. 348 */ 349 postcopy_ram_incoming_cleanup(mis); 350 } else if (ret >= 0) { 351 /* 352 * Postcopy was started, cleanup should happen at the end of the 353 * postcopy thread. 354 */ 355 trace_process_incoming_migration_co_postcopy_end_main(); 356 return; 357 } 358 /* Else if something went wrong then just fall out of the normal exit */ 359 } 360 361 qemu_fclose(f); 362 free_xbzrle_decoded_buf(); 363 364 if (ret < 0) { 365 migrate_set_state(&mis->state, MIGRATION_STATUS_ACTIVE, 366 MIGRATION_STATUS_FAILED); 367 error_report("load of migration failed: %s", strerror(-ret)); 368 migrate_decompress_threads_join(); 369 exit(EXIT_FAILURE); 370 } 371 372 /* Make sure all file formats flush their mutable metadata */ 373 bdrv_invalidate_cache_all(&local_err); 374 if (local_err) { 375 migrate_set_state(&mis->state, MIGRATION_STATUS_ACTIVE, 376 MIGRATION_STATUS_FAILED); 377 error_report_err(local_err); 378 migrate_decompress_threads_join(); 379 exit(EXIT_FAILURE); 380 } 381 382 /* 383 * This must happen after all error conditions are dealt with and 384 * we're sure the VM is going to be running on this host. 385 */ 386 qemu_announce_self(); 387 388 /* If global state section was not received or we are in running 389 state, we need to obey autostart. Any other state is set with 390 runstate_set. */ 391 392 if (!global_state_received() || 393 global_state_get_runstate() == RUN_STATE_RUNNING) { 394 if (autostart) { 395 vm_start(); 396 } else { 397 runstate_set(RUN_STATE_PAUSED); 398 } 399 } else { 400 runstate_set(global_state_get_runstate()); 401 } 402 migrate_decompress_threads_join(); 403 /* 404 * This must happen after any state changes since as soon as an external 405 * observer sees this event they might start to prod at the VM assuming 406 * it's ready to use. 407 */ 408 migrate_set_state(&mis->state, MIGRATION_STATUS_ACTIVE, 409 MIGRATION_STATUS_COMPLETED); 410 migration_incoming_state_destroy(); 411 } 412 413 void process_incoming_migration(QEMUFile *f) 414 { 415 Coroutine *co = qemu_coroutine_create(process_incoming_migration_co); 416 int fd = qemu_get_fd(f); 417 418 assert(fd != -1); 419 migrate_decompress_threads_create(); 420 qemu_set_nonblock(fd); 421 qemu_coroutine_enter(co, f); 422 } 423 424 /* 425 * Send a message on the return channel back to the source 426 * of the migration. 427 */ 428 void migrate_send_rp_message(MigrationIncomingState *mis, 429 enum mig_rp_message_type message_type, 430 uint16_t len, void *data) 431 { 432 trace_migrate_send_rp_message((int)message_type, len); 433 qemu_mutex_lock(&mis->rp_mutex); 434 qemu_put_be16(mis->to_src_file, (unsigned int)message_type); 435 qemu_put_be16(mis->to_src_file, len); 436 qemu_put_buffer(mis->to_src_file, data, len); 437 qemu_fflush(mis->to_src_file); 438 qemu_mutex_unlock(&mis->rp_mutex); 439 } 440 441 /* 442 * Send a 'SHUT' message on the return channel with the given value 443 * to indicate that we've finished with the RP. Non-0 value indicates 444 * error. 445 */ 446 void migrate_send_rp_shut(MigrationIncomingState *mis, 447 uint32_t value) 448 { 449 uint32_t buf; 450 451 buf = cpu_to_be32(value); 452 migrate_send_rp_message(mis, MIG_RP_MSG_SHUT, sizeof(buf), &buf); 453 } 454 455 /* 456 * Send a 'PONG' message on the return channel with the given value 457 * (normally in response to a 'PING') 458 */ 459 void migrate_send_rp_pong(MigrationIncomingState *mis, 460 uint32_t value) 461 { 462 uint32_t buf; 463 464 buf = cpu_to_be32(value); 465 migrate_send_rp_message(mis, MIG_RP_MSG_PONG, sizeof(buf), &buf); 466 } 467 468 /* amount of nanoseconds we are willing to wait for migration to be down. 469 * the choice of nanoseconds is because it is the maximum resolution that 470 * get_clock() can achieve. It is an internal measure. All user-visible 471 * units must be in seconds */ 472 static uint64_t max_downtime = 300000000; 473 474 uint64_t migrate_max_downtime(void) 475 { 476 return max_downtime; 477 } 478 479 MigrationCapabilityStatusList *qmp_query_migrate_capabilities(Error **errp) 480 { 481 MigrationCapabilityStatusList *head = NULL; 482 MigrationCapabilityStatusList *caps; 483 MigrationState *s = migrate_get_current(); 484 int i; 485 486 caps = NULL; /* silence compiler warning */ 487 for (i = 0; i < MIGRATION_CAPABILITY__MAX; i++) { 488 if (head == NULL) { 489 head = g_malloc0(sizeof(*caps)); 490 caps = head; 491 } else { 492 caps->next = g_malloc0(sizeof(*caps)); 493 caps = caps->next; 494 } 495 caps->value = 496 g_malloc(sizeof(*caps->value)); 497 caps->value->capability = i; 498 caps->value->state = s->enabled_capabilities[i]; 499 } 500 501 return head; 502 } 503 504 MigrationParameters *qmp_query_migrate_parameters(Error **errp) 505 { 506 MigrationParameters *params; 507 MigrationState *s = migrate_get_current(); 508 509 params = g_malloc0(sizeof(*params)); 510 params->compress_level = s->parameters[MIGRATION_PARAMETER_COMPRESS_LEVEL]; 511 params->compress_threads = 512 s->parameters[MIGRATION_PARAMETER_COMPRESS_THREADS]; 513 params->decompress_threads = 514 s->parameters[MIGRATION_PARAMETER_DECOMPRESS_THREADS]; 515 params->x_cpu_throttle_initial = 516 s->parameters[MIGRATION_PARAMETER_X_CPU_THROTTLE_INITIAL]; 517 params->x_cpu_throttle_increment = 518 s->parameters[MIGRATION_PARAMETER_X_CPU_THROTTLE_INCREMENT]; 519 520 return params; 521 } 522 523 /* 524 * Return true if we're already in the middle of a migration 525 * (i.e. any of the active or setup states) 526 */ 527 static bool migration_is_setup_or_active(int state) 528 { 529 switch (state) { 530 case MIGRATION_STATUS_ACTIVE: 531 case MIGRATION_STATUS_POSTCOPY_ACTIVE: 532 case MIGRATION_STATUS_SETUP: 533 return true; 534 535 default: 536 return false; 537 538 } 539 } 540 541 static void get_xbzrle_cache_stats(MigrationInfo *info) 542 { 543 if (migrate_use_xbzrle()) { 544 info->has_xbzrle_cache = true; 545 info->xbzrle_cache = g_malloc0(sizeof(*info->xbzrle_cache)); 546 info->xbzrle_cache->cache_size = migrate_xbzrle_cache_size(); 547 info->xbzrle_cache->bytes = xbzrle_mig_bytes_transferred(); 548 info->xbzrle_cache->pages = xbzrle_mig_pages_transferred(); 549 info->xbzrle_cache->cache_miss = xbzrle_mig_pages_cache_miss(); 550 info->xbzrle_cache->cache_miss_rate = xbzrle_mig_cache_miss_rate(); 551 info->xbzrle_cache->overflow = xbzrle_mig_pages_overflow(); 552 } 553 } 554 555 MigrationInfo *qmp_query_migrate(Error **errp) 556 { 557 MigrationInfo *info = g_malloc0(sizeof(*info)); 558 MigrationState *s = migrate_get_current(); 559 560 switch (s->state) { 561 case MIGRATION_STATUS_NONE: 562 /* no migration has happened ever */ 563 break; 564 case MIGRATION_STATUS_SETUP: 565 info->has_status = true; 566 info->has_total_time = false; 567 break; 568 case MIGRATION_STATUS_ACTIVE: 569 case MIGRATION_STATUS_CANCELLING: 570 info->has_status = true; 571 info->has_total_time = true; 572 info->total_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME) 573 - s->total_time; 574 info->has_expected_downtime = true; 575 info->expected_downtime = s->expected_downtime; 576 info->has_setup_time = true; 577 info->setup_time = s->setup_time; 578 579 info->has_ram = true; 580 info->ram = g_malloc0(sizeof(*info->ram)); 581 info->ram->transferred = ram_bytes_transferred(); 582 info->ram->remaining = ram_bytes_remaining(); 583 info->ram->total = ram_bytes_total(); 584 info->ram->duplicate = dup_mig_pages_transferred(); 585 info->ram->skipped = skipped_mig_pages_transferred(); 586 info->ram->normal = norm_mig_pages_transferred(); 587 info->ram->normal_bytes = norm_mig_bytes_transferred(); 588 info->ram->dirty_pages_rate = s->dirty_pages_rate; 589 info->ram->mbps = s->mbps; 590 info->ram->dirty_sync_count = s->dirty_sync_count; 591 592 if (blk_mig_active()) { 593 info->has_disk = true; 594 info->disk = g_malloc0(sizeof(*info->disk)); 595 info->disk->transferred = blk_mig_bytes_transferred(); 596 info->disk->remaining = blk_mig_bytes_remaining(); 597 info->disk->total = blk_mig_bytes_total(); 598 } 599 600 if (cpu_throttle_active()) { 601 info->has_x_cpu_throttle_percentage = true; 602 info->x_cpu_throttle_percentage = cpu_throttle_get_percentage(); 603 } 604 605 get_xbzrle_cache_stats(info); 606 break; 607 case MIGRATION_STATUS_POSTCOPY_ACTIVE: 608 /* Mostly the same as active; TODO add some postcopy stats */ 609 info->has_status = true; 610 info->has_total_time = true; 611 info->total_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME) 612 - s->total_time; 613 info->has_expected_downtime = true; 614 info->expected_downtime = s->expected_downtime; 615 info->has_setup_time = true; 616 info->setup_time = s->setup_time; 617 618 info->has_ram = true; 619 info->ram = g_malloc0(sizeof(*info->ram)); 620 info->ram->transferred = ram_bytes_transferred(); 621 info->ram->remaining = ram_bytes_remaining(); 622 info->ram->total = ram_bytes_total(); 623 info->ram->duplicate = dup_mig_pages_transferred(); 624 info->ram->skipped = skipped_mig_pages_transferred(); 625 info->ram->normal = norm_mig_pages_transferred(); 626 info->ram->normal_bytes = norm_mig_bytes_transferred(); 627 info->ram->dirty_pages_rate = s->dirty_pages_rate; 628 info->ram->mbps = s->mbps; 629 630 if (blk_mig_active()) { 631 info->has_disk = true; 632 info->disk = g_malloc0(sizeof(*info->disk)); 633 info->disk->transferred = blk_mig_bytes_transferred(); 634 info->disk->remaining = blk_mig_bytes_remaining(); 635 info->disk->total = blk_mig_bytes_total(); 636 } 637 638 get_xbzrle_cache_stats(info); 639 break; 640 case MIGRATION_STATUS_COMPLETED: 641 get_xbzrle_cache_stats(info); 642 643 info->has_status = true; 644 info->has_total_time = true; 645 info->total_time = s->total_time; 646 info->has_downtime = true; 647 info->downtime = s->downtime; 648 info->has_setup_time = true; 649 info->setup_time = s->setup_time; 650 651 info->has_ram = true; 652 info->ram = g_malloc0(sizeof(*info->ram)); 653 info->ram->transferred = ram_bytes_transferred(); 654 info->ram->remaining = 0; 655 info->ram->total = ram_bytes_total(); 656 info->ram->duplicate = dup_mig_pages_transferred(); 657 info->ram->skipped = skipped_mig_pages_transferred(); 658 info->ram->normal = norm_mig_pages_transferred(); 659 info->ram->normal_bytes = norm_mig_bytes_transferred(); 660 info->ram->mbps = s->mbps; 661 info->ram->dirty_sync_count = s->dirty_sync_count; 662 break; 663 case MIGRATION_STATUS_FAILED: 664 info->has_status = true; 665 break; 666 case MIGRATION_STATUS_CANCELLED: 667 info->has_status = true; 668 break; 669 } 670 info->status = s->state; 671 672 return info; 673 } 674 675 void qmp_migrate_set_capabilities(MigrationCapabilityStatusList *params, 676 Error **errp) 677 { 678 MigrationState *s = migrate_get_current(); 679 MigrationCapabilityStatusList *cap; 680 681 if (migration_is_setup_or_active(s->state)) { 682 error_setg(errp, QERR_MIGRATION_ACTIVE); 683 return; 684 } 685 686 for (cap = params; cap; cap = cap->next) { 687 s->enabled_capabilities[cap->value->capability] = cap->value->state; 688 } 689 690 if (migrate_postcopy_ram()) { 691 if (migrate_use_compression()) { 692 /* The decompression threads asynchronously write into RAM 693 * rather than use the atomic copies needed to avoid 694 * userfaulting. It should be possible to fix the decompression 695 * threads for compatibility in future. 696 */ 697 error_report("Postcopy is not currently compatible with " 698 "compression"); 699 s->enabled_capabilities[MIGRATION_CAPABILITY_X_POSTCOPY_RAM] = 700 false; 701 } 702 } 703 } 704 705 void qmp_migrate_set_parameters(bool has_compress_level, 706 int64_t compress_level, 707 bool has_compress_threads, 708 int64_t compress_threads, 709 bool has_decompress_threads, 710 int64_t decompress_threads, 711 bool has_x_cpu_throttle_initial, 712 int64_t x_cpu_throttle_initial, 713 bool has_x_cpu_throttle_increment, 714 int64_t x_cpu_throttle_increment, Error **errp) 715 { 716 MigrationState *s = migrate_get_current(); 717 718 if (has_compress_level && (compress_level < 0 || compress_level > 9)) { 719 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "compress_level", 720 "is invalid, it should be in the range of 0 to 9"); 721 return; 722 } 723 if (has_compress_threads && 724 (compress_threads < 1 || compress_threads > 255)) { 725 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, 726 "compress_threads", 727 "is invalid, it should be in the range of 1 to 255"); 728 return; 729 } 730 if (has_decompress_threads && 731 (decompress_threads < 1 || decompress_threads > 255)) { 732 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, 733 "decompress_threads", 734 "is invalid, it should be in the range of 1 to 255"); 735 return; 736 } 737 if (has_x_cpu_throttle_initial && 738 (x_cpu_throttle_initial < 1 || x_cpu_throttle_initial > 99)) { 739 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, 740 "x_cpu_throttle_initial", 741 "an integer in the range of 1 to 99"); 742 } 743 if (has_x_cpu_throttle_increment && 744 (x_cpu_throttle_increment < 1 || x_cpu_throttle_increment > 99)) { 745 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, 746 "x_cpu_throttle_increment", 747 "an integer in the range of 1 to 99"); 748 } 749 750 if (has_compress_level) { 751 s->parameters[MIGRATION_PARAMETER_COMPRESS_LEVEL] = compress_level; 752 } 753 if (has_compress_threads) { 754 s->parameters[MIGRATION_PARAMETER_COMPRESS_THREADS] = compress_threads; 755 } 756 if (has_decompress_threads) { 757 s->parameters[MIGRATION_PARAMETER_DECOMPRESS_THREADS] = 758 decompress_threads; 759 } 760 if (has_x_cpu_throttle_initial) { 761 s->parameters[MIGRATION_PARAMETER_X_CPU_THROTTLE_INITIAL] = 762 x_cpu_throttle_initial; 763 } 764 765 if (has_x_cpu_throttle_increment) { 766 s->parameters[MIGRATION_PARAMETER_X_CPU_THROTTLE_INCREMENT] = 767 x_cpu_throttle_increment; 768 } 769 } 770 771 void qmp_migrate_start_postcopy(Error **errp) 772 { 773 MigrationState *s = migrate_get_current(); 774 775 if (!migrate_postcopy_ram()) { 776 error_setg(errp, "Enable postcopy with migrate_set_capability before" 777 " the start of migration"); 778 return; 779 } 780 781 if (s->state == MIGRATION_STATUS_NONE) { 782 error_setg(errp, "Postcopy must be started after migration has been" 783 " started"); 784 return; 785 } 786 /* 787 * we don't error if migration has finished since that would be racy 788 * with issuing this command. 789 */ 790 atomic_set(&s->start_postcopy, true); 791 } 792 793 /* shared migration helpers */ 794 795 void migrate_set_state(int *state, int old_state, int new_state) 796 { 797 if (atomic_cmpxchg(state, old_state, new_state) == old_state) { 798 trace_migrate_set_state(new_state); 799 migrate_generate_event(new_state); 800 } 801 } 802 803 static void migrate_fd_cleanup(void *opaque) 804 { 805 MigrationState *s = opaque; 806 807 qemu_bh_delete(s->cleanup_bh); 808 s->cleanup_bh = NULL; 809 810 flush_page_queue(s); 811 812 if (s->to_dst_file) { 813 trace_migrate_fd_cleanup(); 814 qemu_mutex_unlock_iothread(); 815 if (s->migration_thread_running) { 816 qemu_thread_join(&s->thread); 817 s->migration_thread_running = false; 818 } 819 qemu_mutex_lock_iothread(); 820 821 migrate_compress_threads_join(); 822 qemu_fclose(s->to_dst_file); 823 s->to_dst_file = NULL; 824 } 825 826 assert((s->state != MIGRATION_STATUS_ACTIVE) && 827 (s->state != MIGRATION_STATUS_POSTCOPY_ACTIVE)); 828 829 if (s->state == MIGRATION_STATUS_CANCELLING) { 830 migrate_set_state(&s->state, MIGRATION_STATUS_CANCELLING, 831 MIGRATION_STATUS_CANCELLED); 832 } 833 834 notifier_list_notify(&migration_state_notifiers, s); 835 } 836 837 void migrate_fd_error(MigrationState *s) 838 { 839 trace_migrate_fd_error(); 840 assert(s->to_dst_file == NULL); 841 migrate_set_state(&s->state, MIGRATION_STATUS_SETUP, 842 MIGRATION_STATUS_FAILED); 843 notifier_list_notify(&migration_state_notifiers, s); 844 } 845 846 static void migrate_fd_cancel(MigrationState *s) 847 { 848 int old_state ; 849 QEMUFile *f = migrate_get_current()->to_dst_file; 850 trace_migrate_fd_cancel(); 851 852 if (s->rp_state.from_dst_file) { 853 /* shutdown the rp socket, so causing the rp thread to shutdown */ 854 qemu_file_shutdown(s->rp_state.from_dst_file); 855 } 856 857 do { 858 old_state = s->state; 859 if (!migration_is_setup_or_active(old_state)) { 860 break; 861 } 862 migrate_set_state(&s->state, old_state, MIGRATION_STATUS_CANCELLING); 863 } while (s->state != MIGRATION_STATUS_CANCELLING); 864 865 /* 866 * If we're unlucky the migration code might be stuck somewhere in a 867 * send/write while the network has failed and is waiting to timeout; 868 * if we've got shutdown(2) available then we can force it to quit. 869 * The outgoing qemu file gets closed in migrate_fd_cleanup that is 870 * called in a bh, so there is no race against this cancel. 871 */ 872 if (s->state == MIGRATION_STATUS_CANCELLING && f) { 873 qemu_file_shutdown(f); 874 } 875 } 876 877 void add_migration_state_change_notifier(Notifier *notify) 878 { 879 notifier_list_add(&migration_state_notifiers, notify); 880 } 881 882 void remove_migration_state_change_notifier(Notifier *notify) 883 { 884 notifier_remove(notify); 885 } 886 887 bool migration_in_setup(MigrationState *s) 888 { 889 return s->state == MIGRATION_STATUS_SETUP; 890 } 891 892 bool migration_has_finished(MigrationState *s) 893 { 894 return s->state == MIGRATION_STATUS_COMPLETED; 895 } 896 897 bool migration_has_failed(MigrationState *s) 898 { 899 return (s->state == MIGRATION_STATUS_CANCELLED || 900 s->state == MIGRATION_STATUS_FAILED); 901 } 902 903 bool migration_in_postcopy(MigrationState *s) 904 { 905 return (s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE); 906 } 907 908 MigrationState *migrate_init(const MigrationParams *params) 909 { 910 MigrationState *s = migrate_get_current(); 911 912 /* 913 * Reinitialise all migration state, except 914 * parameters/capabilities that the user set, and 915 * locks. 916 */ 917 s->bytes_xfer = 0; 918 s->xfer_limit = 0; 919 s->cleanup_bh = 0; 920 s->to_dst_file = NULL; 921 s->state = MIGRATION_STATUS_NONE; 922 s->params = *params; 923 s->rp_state.from_dst_file = NULL; 924 s->rp_state.error = false; 925 s->mbps = 0.0; 926 s->downtime = 0; 927 s->expected_downtime = 0; 928 s->dirty_pages_rate = 0; 929 s->dirty_bytes_rate = 0; 930 s->setup_time = 0; 931 s->dirty_sync_count = 0; 932 s->start_postcopy = false; 933 s->migration_thread_running = false; 934 s->last_req_rb = NULL; 935 936 migrate_set_state(&s->state, MIGRATION_STATUS_NONE, MIGRATION_STATUS_SETUP); 937 938 QSIMPLEQ_INIT(&s->src_page_requests); 939 940 s->total_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); 941 return s; 942 } 943 944 static GSList *migration_blockers; 945 946 void migrate_add_blocker(Error *reason) 947 { 948 migration_blockers = g_slist_prepend(migration_blockers, reason); 949 } 950 951 void migrate_del_blocker(Error *reason) 952 { 953 migration_blockers = g_slist_remove(migration_blockers, reason); 954 } 955 956 void qmp_migrate_incoming(const char *uri, Error **errp) 957 { 958 Error *local_err = NULL; 959 static bool once = true; 960 961 if (!deferred_incoming) { 962 error_setg(errp, "For use with '-incoming defer'"); 963 return; 964 } 965 if (!once) { 966 error_setg(errp, "The incoming migration has already been started"); 967 } 968 969 qemu_start_incoming_migration(uri, &local_err); 970 971 if (local_err) { 972 error_propagate(errp, local_err); 973 return; 974 } 975 976 once = false; 977 } 978 979 void qmp_migrate(const char *uri, bool has_blk, bool blk, 980 bool has_inc, bool inc, bool has_detach, bool detach, 981 Error **errp) 982 { 983 Error *local_err = NULL; 984 MigrationState *s = migrate_get_current(); 985 MigrationParams params; 986 const char *p; 987 988 params.blk = has_blk && blk; 989 params.shared = has_inc && inc; 990 991 if (migration_is_setup_or_active(s->state) || 992 s->state == MIGRATION_STATUS_CANCELLING) { 993 error_setg(errp, QERR_MIGRATION_ACTIVE); 994 return; 995 } 996 if (runstate_check(RUN_STATE_INMIGRATE)) { 997 error_setg(errp, "Guest is waiting for an incoming migration"); 998 return; 999 } 1000 1001 if (qemu_savevm_state_blocked(errp)) { 1002 return; 1003 } 1004 1005 if (migration_blockers) { 1006 *errp = error_copy(migration_blockers->data); 1007 return; 1008 } 1009 1010 s = migrate_init(¶ms); 1011 1012 if (strstart(uri, "tcp:", &p)) { 1013 tcp_start_outgoing_migration(s, p, &local_err); 1014 #ifdef CONFIG_RDMA 1015 } else if (strstart(uri, "rdma:", &p)) { 1016 rdma_start_outgoing_migration(s, p, &local_err); 1017 #endif 1018 #if !defined(WIN32) 1019 } else if (strstart(uri, "exec:", &p)) { 1020 exec_start_outgoing_migration(s, p, &local_err); 1021 } else if (strstart(uri, "unix:", &p)) { 1022 unix_start_outgoing_migration(s, p, &local_err); 1023 } else if (strstart(uri, "fd:", &p)) { 1024 fd_start_outgoing_migration(s, p, &local_err); 1025 #endif 1026 } else { 1027 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "uri", 1028 "a valid migration protocol"); 1029 migrate_set_state(&s->state, MIGRATION_STATUS_SETUP, 1030 MIGRATION_STATUS_FAILED); 1031 return; 1032 } 1033 1034 if (local_err) { 1035 migrate_fd_error(s); 1036 error_propagate(errp, local_err); 1037 return; 1038 } 1039 } 1040 1041 void qmp_migrate_cancel(Error **errp) 1042 { 1043 migrate_fd_cancel(migrate_get_current()); 1044 } 1045 1046 void qmp_migrate_set_cache_size(int64_t value, Error **errp) 1047 { 1048 MigrationState *s = migrate_get_current(); 1049 int64_t new_size; 1050 1051 /* Check for truncation */ 1052 if (value != (size_t)value) { 1053 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "cache size", 1054 "exceeding address space"); 1055 return; 1056 } 1057 1058 /* Cache should not be larger than guest ram size */ 1059 if (value > ram_bytes_total()) { 1060 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "cache size", 1061 "exceeds guest ram size "); 1062 return; 1063 } 1064 1065 new_size = xbzrle_cache_resize(value); 1066 if (new_size < 0) { 1067 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "cache size", 1068 "is smaller than page size"); 1069 return; 1070 } 1071 1072 s->xbzrle_cache_size = new_size; 1073 } 1074 1075 int64_t qmp_query_migrate_cache_size(Error **errp) 1076 { 1077 return migrate_xbzrle_cache_size(); 1078 } 1079 1080 void qmp_migrate_set_speed(int64_t value, Error **errp) 1081 { 1082 MigrationState *s; 1083 1084 if (value < 0) { 1085 value = 0; 1086 } 1087 if (value > SIZE_MAX) { 1088 value = SIZE_MAX; 1089 } 1090 1091 s = migrate_get_current(); 1092 s->bandwidth_limit = value; 1093 if (s->to_dst_file) { 1094 qemu_file_set_rate_limit(s->to_dst_file, 1095 s->bandwidth_limit / XFER_LIMIT_RATIO); 1096 } 1097 } 1098 1099 void qmp_migrate_set_downtime(double value, Error **errp) 1100 { 1101 value *= 1e9; 1102 value = MAX(0, MIN(UINT64_MAX, value)); 1103 max_downtime = (uint64_t)value; 1104 } 1105 1106 bool migrate_postcopy_ram(void) 1107 { 1108 MigrationState *s; 1109 1110 s = migrate_get_current(); 1111 1112 return s->enabled_capabilities[MIGRATION_CAPABILITY_X_POSTCOPY_RAM]; 1113 } 1114 1115 bool migrate_auto_converge(void) 1116 { 1117 MigrationState *s; 1118 1119 s = migrate_get_current(); 1120 1121 return s->enabled_capabilities[MIGRATION_CAPABILITY_AUTO_CONVERGE]; 1122 } 1123 1124 bool migrate_zero_blocks(void) 1125 { 1126 MigrationState *s; 1127 1128 s = migrate_get_current(); 1129 1130 return s->enabled_capabilities[MIGRATION_CAPABILITY_ZERO_BLOCKS]; 1131 } 1132 1133 bool migrate_use_compression(void) 1134 { 1135 MigrationState *s; 1136 1137 s = migrate_get_current(); 1138 1139 return s->enabled_capabilities[MIGRATION_CAPABILITY_COMPRESS]; 1140 } 1141 1142 int migrate_compress_level(void) 1143 { 1144 MigrationState *s; 1145 1146 s = migrate_get_current(); 1147 1148 return s->parameters[MIGRATION_PARAMETER_COMPRESS_LEVEL]; 1149 } 1150 1151 int migrate_compress_threads(void) 1152 { 1153 MigrationState *s; 1154 1155 s = migrate_get_current(); 1156 1157 return s->parameters[MIGRATION_PARAMETER_COMPRESS_THREADS]; 1158 } 1159 1160 int migrate_decompress_threads(void) 1161 { 1162 MigrationState *s; 1163 1164 s = migrate_get_current(); 1165 1166 return s->parameters[MIGRATION_PARAMETER_DECOMPRESS_THREADS]; 1167 } 1168 1169 bool migrate_use_events(void) 1170 { 1171 MigrationState *s; 1172 1173 s = migrate_get_current(); 1174 1175 return s->enabled_capabilities[MIGRATION_CAPABILITY_EVENTS]; 1176 } 1177 1178 int migrate_use_xbzrle(void) 1179 { 1180 MigrationState *s; 1181 1182 s = migrate_get_current(); 1183 1184 return s->enabled_capabilities[MIGRATION_CAPABILITY_XBZRLE]; 1185 } 1186 1187 int64_t migrate_xbzrle_cache_size(void) 1188 { 1189 MigrationState *s; 1190 1191 s = migrate_get_current(); 1192 1193 return s->xbzrle_cache_size; 1194 } 1195 1196 /* migration thread support */ 1197 /* 1198 * Something bad happened to the RP stream, mark an error 1199 * The caller shall print or trace something to indicate why 1200 */ 1201 static void mark_source_rp_bad(MigrationState *s) 1202 { 1203 s->rp_state.error = true; 1204 } 1205 1206 static struct rp_cmd_args { 1207 ssize_t len; /* -1 = variable */ 1208 const char *name; 1209 } rp_cmd_args[] = { 1210 [MIG_RP_MSG_INVALID] = { .len = -1, .name = "INVALID" }, 1211 [MIG_RP_MSG_SHUT] = { .len = 4, .name = "SHUT" }, 1212 [MIG_RP_MSG_PONG] = { .len = 4, .name = "PONG" }, 1213 [MIG_RP_MSG_REQ_PAGES] = { .len = 12, .name = "REQ_PAGES" }, 1214 [MIG_RP_MSG_REQ_PAGES_ID] = { .len = -1, .name = "REQ_PAGES_ID" }, 1215 [MIG_RP_MSG_MAX] = { .len = -1, .name = "MAX" }, 1216 }; 1217 1218 /* 1219 * Process a request for pages received on the return path, 1220 * We're allowed to send more than requested (e.g. to round to our page size) 1221 * and we don't need to send pages that have already been sent. 1222 */ 1223 static void migrate_handle_rp_req_pages(MigrationState *ms, const char* rbname, 1224 ram_addr_t start, size_t len) 1225 { 1226 long our_host_ps = getpagesize(); 1227 1228 trace_migrate_handle_rp_req_pages(rbname, start, len); 1229 1230 /* 1231 * Since we currently insist on matching page sizes, just sanity check 1232 * we're being asked for whole host pages. 1233 */ 1234 if (start & (our_host_ps-1) || 1235 (len & (our_host_ps-1))) { 1236 error_report("%s: Misaligned page request, start: " RAM_ADDR_FMT 1237 " len: %zd", __func__, start, len); 1238 mark_source_rp_bad(ms); 1239 return; 1240 } 1241 1242 if (ram_save_queue_pages(ms, rbname, start, len)) { 1243 mark_source_rp_bad(ms); 1244 } 1245 } 1246 1247 /* 1248 * Handles messages sent on the return path towards the source VM 1249 * 1250 */ 1251 static void *source_return_path_thread(void *opaque) 1252 { 1253 MigrationState *ms = opaque; 1254 QEMUFile *rp = ms->rp_state.from_dst_file; 1255 uint16_t header_len, header_type; 1256 const int max_len = 512; 1257 uint8_t buf[max_len]; 1258 uint32_t tmp32, sibling_error; 1259 ram_addr_t start = 0; /* =0 to silence warning */ 1260 size_t len = 0, expected_len; 1261 int res; 1262 1263 trace_source_return_path_thread_entry(); 1264 while (!ms->rp_state.error && !qemu_file_get_error(rp) && 1265 migration_is_setup_or_active(ms->state)) { 1266 trace_source_return_path_thread_loop_top(); 1267 header_type = qemu_get_be16(rp); 1268 header_len = qemu_get_be16(rp); 1269 1270 if (header_type >= MIG_RP_MSG_MAX || 1271 header_type == MIG_RP_MSG_INVALID) { 1272 error_report("RP: Received invalid message 0x%04x length 0x%04x", 1273 header_type, header_len); 1274 mark_source_rp_bad(ms); 1275 goto out; 1276 } 1277 1278 if ((rp_cmd_args[header_type].len != -1 && 1279 header_len != rp_cmd_args[header_type].len) || 1280 header_len > max_len) { 1281 error_report("RP: Received '%s' message (0x%04x) with" 1282 "incorrect length %d expecting %zu", 1283 rp_cmd_args[header_type].name, header_type, header_len, 1284 (size_t)rp_cmd_args[header_type].len); 1285 mark_source_rp_bad(ms); 1286 goto out; 1287 } 1288 1289 /* We know we've got a valid header by this point */ 1290 res = qemu_get_buffer(rp, buf, header_len); 1291 if (res != header_len) { 1292 error_report("RP: Failed reading data for message 0x%04x" 1293 " read %d expected %d", 1294 header_type, res, header_len); 1295 mark_source_rp_bad(ms); 1296 goto out; 1297 } 1298 1299 /* OK, we have the message and the data */ 1300 switch (header_type) { 1301 case MIG_RP_MSG_SHUT: 1302 sibling_error = be32_to_cpup((uint32_t *)buf); 1303 trace_source_return_path_thread_shut(sibling_error); 1304 if (sibling_error) { 1305 error_report("RP: Sibling indicated error %d", sibling_error); 1306 mark_source_rp_bad(ms); 1307 } 1308 /* 1309 * We'll let the main thread deal with closing the RP 1310 * we could do a shutdown(2) on it, but we're the only user 1311 * anyway, so there's nothing gained. 1312 */ 1313 goto out; 1314 1315 case MIG_RP_MSG_PONG: 1316 tmp32 = be32_to_cpup((uint32_t *)buf); 1317 trace_source_return_path_thread_pong(tmp32); 1318 break; 1319 1320 case MIG_RP_MSG_REQ_PAGES: 1321 start = be64_to_cpup((uint64_t *)buf); 1322 len = be32_to_cpup((uint32_t *)(buf + 8)); 1323 migrate_handle_rp_req_pages(ms, NULL, start, len); 1324 break; 1325 1326 case MIG_RP_MSG_REQ_PAGES_ID: 1327 expected_len = 12 + 1; /* header + termination */ 1328 1329 if (header_len >= expected_len) { 1330 start = be64_to_cpup((uint64_t *)buf); 1331 len = be32_to_cpup((uint32_t *)(buf + 8)); 1332 /* Now we expect an idstr */ 1333 tmp32 = buf[12]; /* Length of the following idstr */ 1334 buf[13 + tmp32] = '\0'; 1335 expected_len += tmp32; 1336 } 1337 if (header_len != expected_len) { 1338 error_report("RP: Req_Page_id with length %d expecting %zd", 1339 header_len, expected_len); 1340 mark_source_rp_bad(ms); 1341 goto out; 1342 } 1343 migrate_handle_rp_req_pages(ms, (char *)&buf[13], start, len); 1344 break; 1345 1346 default: 1347 break; 1348 } 1349 } 1350 if (qemu_file_get_error(rp)) { 1351 trace_source_return_path_thread_bad_end(); 1352 mark_source_rp_bad(ms); 1353 } 1354 1355 trace_source_return_path_thread_end(); 1356 out: 1357 ms->rp_state.from_dst_file = NULL; 1358 qemu_fclose(rp); 1359 return NULL; 1360 } 1361 1362 static int open_return_path_on_source(MigrationState *ms) 1363 { 1364 1365 ms->rp_state.from_dst_file = qemu_file_get_return_path(ms->to_dst_file); 1366 if (!ms->rp_state.from_dst_file) { 1367 return -1; 1368 } 1369 1370 trace_open_return_path_on_source(); 1371 qemu_thread_create(&ms->rp_state.rp_thread, "return path", 1372 source_return_path_thread, ms, QEMU_THREAD_JOINABLE); 1373 1374 trace_open_return_path_on_source_continue(); 1375 1376 return 0; 1377 } 1378 1379 /* Returns 0 if the RP was ok, otherwise there was an error on the RP */ 1380 static int await_return_path_close_on_source(MigrationState *ms) 1381 { 1382 /* 1383 * If this is a normal exit then the destination will send a SHUT and the 1384 * rp_thread will exit, however if there's an error we need to cause 1385 * it to exit. 1386 */ 1387 if (qemu_file_get_error(ms->to_dst_file) && ms->rp_state.from_dst_file) { 1388 /* 1389 * shutdown(2), if we have it, will cause it to unblock if it's stuck 1390 * waiting for the destination. 1391 */ 1392 qemu_file_shutdown(ms->rp_state.from_dst_file); 1393 mark_source_rp_bad(ms); 1394 } 1395 trace_await_return_path_close_on_source_joining(); 1396 qemu_thread_join(&ms->rp_state.rp_thread); 1397 trace_await_return_path_close_on_source_close(); 1398 return ms->rp_state.error; 1399 } 1400 1401 /* 1402 * Switch from normal iteration to postcopy 1403 * Returns non-0 on error 1404 */ 1405 static int postcopy_start(MigrationState *ms, bool *old_vm_running) 1406 { 1407 int ret; 1408 const QEMUSizedBuffer *qsb; 1409 int64_t time_at_stop = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); 1410 migrate_set_state(&ms->state, MIGRATION_STATUS_ACTIVE, 1411 MIGRATION_STATUS_POSTCOPY_ACTIVE); 1412 1413 trace_postcopy_start(); 1414 qemu_mutex_lock_iothread(); 1415 trace_postcopy_start_set_run(); 1416 1417 qemu_system_wakeup_request(QEMU_WAKEUP_REASON_OTHER); 1418 *old_vm_running = runstate_is_running(); 1419 global_state_store(); 1420 ret = vm_stop_force_state(RUN_STATE_FINISH_MIGRATE); 1421 if (ret < 0) { 1422 goto fail; 1423 } 1424 1425 ret = bdrv_inactivate_all(); 1426 if (ret < 0) { 1427 goto fail; 1428 } 1429 1430 /* 1431 * Cause any non-postcopiable, but iterative devices to 1432 * send out their final data. 1433 */ 1434 qemu_savevm_state_complete_precopy(ms->to_dst_file, true); 1435 1436 /* 1437 * in Finish migrate and with the io-lock held everything should 1438 * be quiet, but we've potentially still got dirty pages and we 1439 * need to tell the destination to throw any pages it's already received 1440 * that are dirty 1441 */ 1442 if (ram_postcopy_send_discard_bitmap(ms)) { 1443 error_report("postcopy send discard bitmap failed"); 1444 goto fail; 1445 } 1446 1447 /* 1448 * send rest of state - note things that are doing postcopy 1449 * will notice we're in POSTCOPY_ACTIVE and not actually 1450 * wrap their state up here 1451 */ 1452 qemu_file_set_rate_limit(ms->to_dst_file, INT64_MAX); 1453 /* Ping just for debugging, helps line traces up */ 1454 qemu_savevm_send_ping(ms->to_dst_file, 2); 1455 1456 /* 1457 * While loading the device state we may trigger page transfer 1458 * requests and the fd must be free to process those, and thus 1459 * the destination must read the whole device state off the fd before 1460 * it starts processing it. Unfortunately the ad-hoc migration format 1461 * doesn't allow the destination to know the size to read without fully 1462 * parsing it through each devices load-state code (especially the open 1463 * coded devices that use get/put). 1464 * So we wrap the device state up in a package with a length at the start; 1465 * to do this we use a qemu_buf to hold the whole of the device state. 1466 */ 1467 QEMUFile *fb = qemu_bufopen("w", NULL); 1468 if (!fb) { 1469 error_report("Failed to create buffered file"); 1470 goto fail; 1471 } 1472 1473 /* 1474 * Make sure the receiver can get incoming pages before we send the rest 1475 * of the state 1476 */ 1477 qemu_savevm_send_postcopy_listen(fb); 1478 1479 qemu_savevm_state_complete_precopy(fb, false); 1480 qemu_savevm_send_ping(fb, 3); 1481 1482 qemu_savevm_send_postcopy_run(fb); 1483 1484 /* <><> end of stuff going into the package */ 1485 qsb = qemu_buf_get(fb); 1486 1487 /* Now send that blob */ 1488 if (qemu_savevm_send_packaged(ms->to_dst_file, qsb)) { 1489 goto fail_closefb; 1490 } 1491 qemu_fclose(fb); 1492 ms->downtime = qemu_clock_get_ms(QEMU_CLOCK_REALTIME) - time_at_stop; 1493 1494 qemu_mutex_unlock_iothread(); 1495 1496 /* 1497 * Although this ping is just for debug, it could potentially be 1498 * used for getting a better measurement of downtime at the source. 1499 */ 1500 qemu_savevm_send_ping(ms->to_dst_file, 4); 1501 1502 ret = qemu_file_get_error(ms->to_dst_file); 1503 if (ret) { 1504 error_report("postcopy_start: Migration stream errored"); 1505 migrate_set_state(&ms->state, MIGRATION_STATUS_POSTCOPY_ACTIVE, 1506 MIGRATION_STATUS_FAILED); 1507 } 1508 1509 return ret; 1510 1511 fail_closefb: 1512 qemu_fclose(fb); 1513 fail: 1514 migrate_set_state(&ms->state, MIGRATION_STATUS_POSTCOPY_ACTIVE, 1515 MIGRATION_STATUS_FAILED); 1516 qemu_mutex_unlock_iothread(); 1517 return -1; 1518 } 1519 1520 /** 1521 * migration_completion: Used by migration_thread when there's not much left. 1522 * The caller 'breaks' the loop when this returns. 1523 * 1524 * @s: Current migration state 1525 * @current_active_state: The migration state we expect to be in 1526 * @*old_vm_running: Pointer to old_vm_running flag 1527 * @*start_time: Pointer to time to update 1528 */ 1529 static void migration_completion(MigrationState *s, int current_active_state, 1530 bool *old_vm_running, 1531 int64_t *start_time) 1532 { 1533 int ret; 1534 1535 if (s->state == MIGRATION_STATUS_ACTIVE) { 1536 qemu_mutex_lock_iothread(); 1537 *start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); 1538 qemu_system_wakeup_request(QEMU_WAKEUP_REASON_OTHER); 1539 *old_vm_running = runstate_is_running(); 1540 ret = global_state_store(); 1541 1542 if (!ret) { 1543 ret = vm_stop_force_state(RUN_STATE_FINISH_MIGRATE); 1544 if (ret >= 0) { 1545 ret = bdrv_inactivate_all(); 1546 } 1547 if (ret >= 0) { 1548 qemu_file_set_rate_limit(s->to_dst_file, INT64_MAX); 1549 qemu_savevm_state_complete_precopy(s->to_dst_file, false); 1550 } 1551 } 1552 qemu_mutex_unlock_iothread(); 1553 1554 if (ret < 0) { 1555 goto fail; 1556 } 1557 } else if (s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE) { 1558 trace_migration_completion_postcopy_end(); 1559 1560 qemu_savevm_state_complete_postcopy(s->to_dst_file); 1561 trace_migration_completion_postcopy_end_after_complete(); 1562 } 1563 1564 /* 1565 * If rp was opened we must clean up the thread before 1566 * cleaning everything else up (since if there are no failures 1567 * it will wait for the destination to send it's status in 1568 * a SHUT command). 1569 * Postcopy opens rp if enabled (even if it's not avtivated) 1570 */ 1571 if (migrate_postcopy_ram()) { 1572 int rp_error; 1573 trace_migration_completion_postcopy_end_before_rp(); 1574 rp_error = await_return_path_close_on_source(s); 1575 trace_migration_completion_postcopy_end_after_rp(rp_error); 1576 if (rp_error) { 1577 goto fail; 1578 } 1579 } 1580 1581 if (qemu_file_get_error(s->to_dst_file)) { 1582 trace_migration_completion_file_err(); 1583 goto fail; 1584 } 1585 1586 migrate_set_state(&s->state, current_active_state, 1587 MIGRATION_STATUS_COMPLETED); 1588 return; 1589 1590 fail: 1591 migrate_set_state(&s->state, current_active_state, 1592 MIGRATION_STATUS_FAILED); 1593 } 1594 1595 /* 1596 * Master migration thread on the source VM. 1597 * It drives the migration and pumps the data down the outgoing channel. 1598 */ 1599 static void *migration_thread(void *opaque) 1600 { 1601 MigrationState *s = opaque; 1602 /* Used by the bandwidth calcs, updated later */ 1603 int64_t initial_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); 1604 int64_t setup_start = qemu_clock_get_ms(QEMU_CLOCK_HOST); 1605 int64_t initial_bytes = 0; 1606 int64_t max_size = 0; 1607 int64_t start_time = initial_time; 1608 int64_t end_time; 1609 bool old_vm_running = false; 1610 bool entered_postcopy = false; 1611 /* The active state we expect to be in; ACTIVE or POSTCOPY_ACTIVE */ 1612 enum MigrationStatus current_active_state = MIGRATION_STATUS_ACTIVE; 1613 1614 rcu_register_thread(); 1615 1616 qemu_savevm_state_header(s->to_dst_file); 1617 1618 if (migrate_postcopy_ram()) { 1619 /* Now tell the dest that it should open its end so it can reply */ 1620 qemu_savevm_send_open_return_path(s->to_dst_file); 1621 1622 /* And do a ping that will make stuff easier to debug */ 1623 qemu_savevm_send_ping(s->to_dst_file, 1); 1624 1625 /* 1626 * Tell the destination that we *might* want to do postcopy later; 1627 * if the other end can't do postcopy it should fail now, nice and 1628 * early. 1629 */ 1630 qemu_savevm_send_postcopy_advise(s->to_dst_file); 1631 } 1632 1633 qemu_savevm_state_begin(s->to_dst_file, &s->params); 1634 1635 s->setup_time = qemu_clock_get_ms(QEMU_CLOCK_HOST) - setup_start; 1636 current_active_state = MIGRATION_STATUS_ACTIVE; 1637 migrate_set_state(&s->state, MIGRATION_STATUS_SETUP, 1638 MIGRATION_STATUS_ACTIVE); 1639 1640 trace_migration_thread_setup_complete(); 1641 1642 while (s->state == MIGRATION_STATUS_ACTIVE || 1643 s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE) { 1644 int64_t current_time; 1645 uint64_t pending_size; 1646 1647 if (!qemu_file_rate_limit(s->to_dst_file)) { 1648 uint64_t pend_post, pend_nonpost; 1649 1650 qemu_savevm_state_pending(s->to_dst_file, max_size, &pend_nonpost, 1651 &pend_post); 1652 pending_size = pend_nonpost + pend_post; 1653 trace_migrate_pending(pending_size, max_size, 1654 pend_post, pend_nonpost); 1655 if (pending_size && pending_size >= max_size) { 1656 /* Still a significant amount to transfer */ 1657 1658 if (migrate_postcopy_ram() && 1659 s->state != MIGRATION_STATUS_POSTCOPY_ACTIVE && 1660 pend_nonpost <= max_size && 1661 atomic_read(&s->start_postcopy)) { 1662 1663 if (!postcopy_start(s, &old_vm_running)) { 1664 current_active_state = MIGRATION_STATUS_POSTCOPY_ACTIVE; 1665 entered_postcopy = true; 1666 } 1667 1668 continue; 1669 } 1670 /* Just another iteration step */ 1671 qemu_savevm_state_iterate(s->to_dst_file, entered_postcopy); 1672 } else { 1673 trace_migration_thread_low_pending(pending_size); 1674 migration_completion(s, current_active_state, 1675 &old_vm_running, &start_time); 1676 break; 1677 } 1678 } 1679 1680 if (qemu_file_get_error(s->to_dst_file)) { 1681 migrate_set_state(&s->state, current_active_state, 1682 MIGRATION_STATUS_FAILED); 1683 trace_migration_thread_file_err(); 1684 break; 1685 } 1686 current_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); 1687 if (current_time >= initial_time + BUFFER_DELAY) { 1688 uint64_t transferred_bytes = qemu_ftell(s->to_dst_file) - 1689 initial_bytes; 1690 uint64_t time_spent = current_time - initial_time; 1691 double bandwidth = (double)transferred_bytes / time_spent; 1692 max_size = bandwidth * migrate_max_downtime() / 1000000; 1693 1694 s->mbps = (((double) transferred_bytes * 8.0) / 1695 ((double) time_spent / 1000.0)) / 1000.0 / 1000.0; 1696 1697 trace_migrate_transferred(transferred_bytes, time_spent, 1698 bandwidth, max_size); 1699 /* if we haven't sent anything, we don't want to recalculate 1700 10000 is a small enough number for our purposes */ 1701 if (s->dirty_bytes_rate && transferred_bytes > 10000) { 1702 s->expected_downtime = s->dirty_bytes_rate / bandwidth; 1703 } 1704 1705 qemu_file_reset_rate_limit(s->to_dst_file); 1706 initial_time = current_time; 1707 initial_bytes = qemu_ftell(s->to_dst_file); 1708 } 1709 if (qemu_file_rate_limit(s->to_dst_file)) { 1710 /* usleep expects microseconds */ 1711 g_usleep((initial_time + BUFFER_DELAY - current_time)*1000); 1712 } 1713 } 1714 1715 trace_migration_thread_after_loop(); 1716 /* If we enabled cpu throttling for auto-converge, turn it off. */ 1717 cpu_throttle_stop(); 1718 end_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); 1719 1720 qemu_mutex_lock_iothread(); 1721 qemu_savevm_state_cleanup(); 1722 if (s->state == MIGRATION_STATUS_COMPLETED) { 1723 uint64_t transferred_bytes = qemu_ftell(s->to_dst_file); 1724 s->total_time = end_time - s->total_time; 1725 if (!entered_postcopy) { 1726 s->downtime = end_time - start_time; 1727 } 1728 if (s->total_time) { 1729 s->mbps = (((double) transferred_bytes * 8.0) / 1730 ((double) s->total_time)) / 1000; 1731 } 1732 runstate_set(RUN_STATE_POSTMIGRATE); 1733 } else { 1734 if (old_vm_running && !entered_postcopy) { 1735 vm_start(); 1736 } 1737 } 1738 qemu_bh_schedule(s->cleanup_bh); 1739 qemu_mutex_unlock_iothread(); 1740 1741 rcu_unregister_thread(); 1742 return NULL; 1743 } 1744 1745 void migrate_fd_connect(MigrationState *s) 1746 { 1747 /* This is a best 1st approximation. ns to ms */ 1748 s->expected_downtime = max_downtime/1000000; 1749 s->cleanup_bh = qemu_bh_new(migrate_fd_cleanup, s); 1750 1751 qemu_file_set_rate_limit(s->to_dst_file, 1752 s->bandwidth_limit / XFER_LIMIT_RATIO); 1753 1754 /* Notify before starting migration thread */ 1755 notifier_list_notify(&migration_state_notifiers, s); 1756 1757 /* 1758 * Open the return path; currently for postcopy but other things might 1759 * also want it. 1760 */ 1761 if (migrate_postcopy_ram()) { 1762 if (open_return_path_on_source(s)) { 1763 error_report("Unable to open return-path for postcopy"); 1764 migrate_set_state(&s->state, MIGRATION_STATUS_SETUP, 1765 MIGRATION_STATUS_FAILED); 1766 migrate_fd_cleanup(s); 1767 return; 1768 } 1769 } 1770 1771 migrate_compress_threads_create(); 1772 qemu_thread_create(&s->thread, "migration", migration_thread, s, 1773 QEMU_THREAD_JOINABLE); 1774 s->migration_thread_running = true; 1775 } 1776 1777 PostcopyState postcopy_state_get(void) 1778 { 1779 return atomic_mb_read(&incoming_postcopy_state); 1780 } 1781 1782 /* Set the state and return the old state */ 1783 PostcopyState postcopy_state_set(PostcopyState new_state) 1784 { 1785 return atomic_xchg(&incoming_postcopy_state, new_state); 1786 } 1787 1788