1 /* 2 * QEMU live migration 3 * 4 * Copyright IBM, Corp. 2008 5 * 6 * Authors: 7 * Anthony Liguori <aliguori@us.ibm.com> 8 * 9 * This work is licensed under the terms of the GNU GPL, version 2. See 10 * the COPYING file in the top-level directory. 11 * 12 * Contributions after 2012-01-13 are licensed under the terms of the 13 * GNU GPL, version 2 or (at your option) any later version. 14 */ 15 16 #include "qemu-common.h" 17 #include "qemu/error-report.h" 18 #include "qemu/main-loop.h" 19 #include "migration/migration.h" 20 #include "migration/qemu-file.h" 21 #include "sysemu/sysemu.h" 22 #include "block/block.h" 23 #include "qapi/qmp/qerror.h" 24 #include "qapi/util.h" 25 #include "qemu/sockets.h" 26 #include "qemu/rcu.h" 27 #include "migration/block.h" 28 #include "migration/postcopy-ram.h" 29 #include "qemu/thread.h" 30 #include "qmp-commands.h" 31 #include "trace.h" 32 #include "qapi-event.h" 33 #include "qom/cpu.h" 34 #include "exec/memory.h" 35 #include "exec/address-spaces.h" 36 37 #define MAX_THROTTLE (32 << 20) /* Migration transfer speed throttling */ 38 39 /* Amount of time to allocate to each "chunk" of bandwidth-throttled 40 * data. */ 41 #define BUFFER_DELAY 100 42 #define XFER_LIMIT_RATIO (1000 / BUFFER_DELAY) 43 44 /* Default compression thread count */ 45 #define DEFAULT_MIGRATE_COMPRESS_THREAD_COUNT 8 46 /* Default decompression thread count, usually decompression is at 47 * least 4 times as fast as compression.*/ 48 #define DEFAULT_MIGRATE_DECOMPRESS_THREAD_COUNT 2 49 /*0: means nocompress, 1: best speed, ... 9: best compress ratio */ 50 #define DEFAULT_MIGRATE_COMPRESS_LEVEL 1 51 /* Define default autoconverge cpu throttle migration parameters */ 52 #define DEFAULT_MIGRATE_X_CPU_THROTTLE_INITIAL 20 53 #define DEFAULT_MIGRATE_X_CPU_THROTTLE_INCREMENT 10 54 55 /* Migration XBZRLE default cache size */ 56 #define DEFAULT_MIGRATE_CACHE_SIZE (64 * 1024 * 1024) 57 58 static NotifierList migration_state_notifiers = 59 NOTIFIER_LIST_INITIALIZER(migration_state_notifiers); 60 61 static bool deferred_incoming; 62 63 /* 64 * Current state of incoming postcopy; note this is not part of 65 * MigrationIncomingState since it's state is used during cleanup 66 * at the end as MIS is being freed. 67 */ 68 static PostcopyState incoming_postcopy_state; 69 70 /* When we add fault tolerance, we could have several 71 migrations at once. For now we don't need to add 72 dynamic creation of migration */ 73 74 /* For outgoing */ 75 MigrationState *migrate_get_current(void) 76 { 77 static bool once; 78 static MigrationState current_migration = { 79 .state = MIGRATION_STATUS_NONE, 80 .bandwidth_limit = MAX_THROTTLE, 81 .xbzrle_cache_size = DEFAULT_MIGRATE_CACHE_SIZE, 82 .mbps = -1, 83 .parameters[MIGRATION_PARAMETER_COMPRESS_LEVEL] = 84 DEFAULT_MIGRATE_COMPRESS_LEVEL, 85 .parameters[MIGRATION_PARAMETER_COMPRESS_THREADS] = 86 DEFAULT_MIGRATE_COMPRESS_THREAD_COUNT, 87 .parameters[MIGRATION_PARAMETER_DECOMPRESS_THREADS] = 88 DEFAULT_MIGRATE_DECOMPRESS_THREAD_COUNT, 89 .parameters[MIGRATION_PARAMETER_X_CPU_THROTTLE_INITIAL] = 90 DEFAULT_MIGRATE_X_CPU_THROTTLE_INITIAL, 91 .parameters[MIGRATION_PARAMETER_X_CPU_THROTTLE_INCREMENT] = 92 DEFAULT_MIGRATE_X_CPU_THROTTLE_INCREMENT, 93 }; 94 95 if (!once) { 96 qemu_mutex_init(¤t_migration.src_page_req_mutex); 97 once = true; 98 } 99 return ¤t_migration; 100 } 101 102 /* For incoming */ 103 static MigrationIncomingState *mis_current; 104 105 MigrationIncomingState *migration_incoming_get_current(void) 106 { 107 return mis_current; 108 } 109 110 MigrationIncomingState *migration_incoming_state_new(QEMUFile* f) 111 { 112 mis_current = g_new0(MigrationIncomingState, 1); 113 mis_current->from_src_file = f; 114 QLIST_INIT(&mis_current->loadvm_handlers); 115 qemu_mutex_init(&mis_current->rp_mutex); 116 qemu_event_init(&mis_current->main_thread_load_event, false); 117 118 return mis_current; 119 } 120 121 void migration_incoming_state_destroy(void) 122 { 123 qemu_event_destroy(&mis_current->main_thread_load_event); 124 loadvm_free_handlers(mis_current); 125 g_free(mis_current); 126 mis_current = NULL; 127 } 128 129 130 typedef struct { 131 bool optional; 132 uint32_t size; 133 uint8_t runstate[100]; 134 RunState state; 135 bool received; 136 } GlobalState; 137 138 static GlobalState global_state; 139 140 int global_state_store(void) 141 { 142 if (!runstate_store((char *)global_state.runstate, 143 sizeof(global_state.runstate))) { 144 error_report("runstate name too big: %s", global_state.runstate); 145 trace_migrate_state_too_big(); 146 return -EINVAL; 147 } 148 return 0; 149 } 150 151 void global_state_store_running(void) 152 { 153 const char *state = RunState_lookup[RUN_STATE_RUNNING]; 154 strncpy((char *)global_state.runstate, 155 state, sizeof(global_state.runstate)); 156 } 157 158 static bool global_state_received(void) 159 { 160 return global_state.received; 161 } 162 163 static RunState global_state_get_runstate(void) 164 { 165 return global_state.state; 166 } 167 168 void global_state_set_optional(void) 169 { 170 global_state.optional = true; 171 } 172 173 static bool global_state_needed(void *opaque) 174 { 175 GlobalState *s = opaque; 176 char *runstate = (char *)s->runstate; 177 178 /* If it is not optional, it is mandatory */ 179 180 if (s->optional == false) { 181 return true; 182 } 183 184 /* If state is running or paused, it is not needed */ 185 186 if (strcmp(runstate, "running") == 0 || 187 strcmp(runstate, "paused") == 0) { 188 return false; 189 } 190 191 /* for any other state it is needed */ 192 return true; 193 } 194 195 static int global_state_post_load(void *opaque, int version_id) 196 { 197 GlobalState *s = opaque; 198 Error *local_err = NULL; 199 int r; 200 char *runstate = (char *)s->runstate; 201 202 s->received = true; 203 trace_migrate_global_state_post_load(runstate); 204 205 r = qapi_enum_parse(RunState_lookup, runstate, RUN_STATE_MAX, 206 -1, &local_err); 207 208 if (r == -1) { 209 if (local_err) { 210 error_report_err(local_err); 211 } 212 return -EINVAL; 213 } 214 s->state = r; 215 216 return 0; 217 } 218 219 static void global_state_pre_save(void *opaque) 220 { 221 GlobalState *s = opaque; 222 223 trace_migrate_global_state_pre_save((char *)s->runstate); 224 s->size = strlen((char *)s->runstate) + 1; 225 } 226 227 static const VMStateDescription vmstate_globalstate = { 228 .name = "globalstate", 229 .version_id = 1, 230 .minimum_version_id = 1, 231 .post_load = global_state_post_load, 232 .pre_save = global_state_pre_save, 233 .needed = global_state_needed, 234 .fields = (VMStateField[]) { 235 VMSTATE_UINT32(size, GlobalState), 236 VMSTATE_BUFFER(runstate, GlobalState), 237 VMSTATE_END_OF_LIST() 238 }, 239 }; 240 241 void register_global_state(void) 242 { 243 /* We would use it independently that we receive it */ 244 strcpy((char *)&global_state.runstate, ""); 245 global_state.received = false; 246 vmstate_register(NULL, 0, &vmstate_globalstate, &global_state); 247 } 248 249 static void migrate_generate_event(int new_state) 250 { 251 if (migrate_use_events()) { 252 qapi_event_send_migration(new_state, &error_abort); 253 } 254 } 255 256 /* 257 * Called on -incoming with a defer: uri. 258 * The migration can be started later after any parameters have been 259 * changed. 260 */ 261 static void deferred_incoming_migration(Error **errp) 262 { 263 if (deferred_incoming) { 264 error_setg(errp, "Incoming migration already deferred"); 265 } 266 deferred_incoming = true; 267 } 268 269 /* Request a range of pages from the source VM at the given 270 * start address. 271 * rbname: Name of the RAMBlock to request the page in, if NULL it's the same 272 * as the last request (a name must have been given previously) 273 * Start: Address offset within the RB 274 * Len: Length in bytes required - must be a multiple of pagesize 275 */ 276 void migrate_send_rp_req_pages(MigrationIncomingState *mis, const char *rbname, 277 ram_addr_t start, size_t len) 278 { 279 uint8_t bufc[12 + 1 + 255]; /* start (8), len (4), rbname upto 256 */ 280 size_t msglen = 12; /* start + len */ 281 282 *(uint64_t *)bufc = cpu_to_be64((uint64_t)start); 283 *(uint32_t *)(bufc + 8) = cpu_to_be32((uint32_t)len); 284 285 if (rbname) { 286 int rbname_len = strlen(rbname); 287 assert(rbname_len < 256); 288 289 bufc[msglen++] = rbname_len; 290 memcpy(bufc + msglen, rbname, rbname_len); 291 msglen += rbname_len; 292 migrate_send_rp_message(mis, MIG_RP_MSG_REQ_PAGES_ID, msglen, bufc); 293 } else { 294 migrate_send_rp_message(mis, MIG_RP_MSG_REQ_PAGES, msglen, bufc); 295 } 296 } 297 298 void qemu_start_incoming_migration(const char *uri, Error **errp) 299 { 300 const char *p; 301 302 qapi_event_send_migration(MIGRATION_STATUS_SETUP, &error_abort); 303 if (!strcmp(uri, "defer")) { 304 deferred_incoming_migration(errp); 305 } else if (strstart(uri, "tcp:", &p)) { 306 tcp_start_incoming_migration(p, errp); 307 #ifdef CONFIG_RDMA 308 } else if (strstart(uri, "rdma:", &p)) { 309 rdma_start_incoming_migration(p, errp); 310 #endif 311 #if !defined(WIN32) 312 } else if (strstart(uri, "exec:", &p)) { 313 exec_start_incoming_migration(p, errp); 314 } else if (strstart(uri, "unix:", &p)) { 315 unix_start_incoming_migration(p, errp); 316 } else if (strstart(uri, "fd:", &p)) { 317 fd_start_incoming_migration(p, errp); 318 #endif 319 } else { 320 error_setg(errp, "unknown migration protocol: %s", uri); 321 } 322 } 323 324 static void process_incoming_migration_co(void *opaque) 325 { 326 QEMUFile *f = opaque; 327 Error *local_err = NULL; 328 MigrationIncomingState *mis; 329 PostcopyState ps; 330 int ret; 331 332 mis = migration_incoming_state_new(f); 333 postcopy_state_set(POSTCOPY_INCOMING_NONE); 334 migrate_generate_event(MIGRATION_STATUS_ACTIVE); 335 336 ret = qemu_loadvm_state(f); 337 338 ps = postcopy_state_get(); 339 trace_process_incoming_migration_co_end(ret, ps); 340 if (ps != POSTCOPY_INCOMING_NONE) { 341 if (ps == POSTCOPY_INCOMING_ADVISE) { 342 /* 343 * Where a migration had postcopy enabled (and thus went to advise) 344 * but managed to complete within the precopy period, we can use 345 * the normal exit. 346 */ 347 postcopy_ram_incoming_cleanup(mis); 348 } else if (ret >= 0) { 349 /* 350 * Postcopy was started, cleanup should happen at the end of the 351 * postcopy thread. 352 */ 353 trace_process_incoming_migration_co_postcopy_end_main(); 354 return; 355 } 356 /* Else if something went wrong then just fall out of the normal exit */ 357 } 358 359 qemu_fclose(f); 360 free_xbzrle_decoded_buf(); 361 migration_incoming_state_destroy(); 362 363 if (ret < 0) { 364 migrate_generate_event(MIGRATION_STATUS_FAILED); 365 error_report("load of migration failed: %s", strerror(-ret)); 366 migrate_decompress_threads_join(); 367 exit(EXIT_FAILURE); 368 } 369 370 /* Make sure all file formats flush their mutable metadata */ 371 bdrv_invalidate_cache_all(&local_err); 372 if (local_err) { 373 migrate_generate_event(MIGRATION_STATUS_FAILED); 374 error_report_err(local_err); 375 migrate_decompress_threads_join(); 376 exit(EXIT_FAILURE); 377 } 378 379 /* 380 * This must happen after all error conditions are dealt with and 381 * we're sure the VM is going to be running on this host. 382 */ 383 qemu_announce_self(); 384 385 /* If global state section was not received or we are in running 386 state, we need to obey autostart. Any other state is set with 387 runstate_set. */ 388 389 if (!global_state_received() || 390 global_state_get_runstate() == RUN_STATE_RUNNING) { 391 if (autostart) { 392 vm_start(); 393 } else { 394 runstate_set(RUN_STATE_PAUSED); 395 } 396 } else { 397 runstate_set(global_state_get_runstate()); 398 } 399 migrate_decompress_threads_join(); 400 /* 401 * This must happen after any state changes since as soon as an external 402 * observer sees this event they might start to prod at the VM assuming 403 * it's ready to use. 404 */ 405 migrate_generate_event(MIGRATION_STATUS_COMPLETED); 406 } 407 408 void process_incoming_migration(QEMUFile *f) 409 { 410 Coroutine *co = qemu_coroutine_create(process_incoming_migration_co); 411 int fd = qemu_get_fd(f); 412 413 assert(fd != -1); 414 migrate_decompress_threads_create(); 415 qemu_set_nonblock(fd); 416 qemu_coroutine_enter(co, f); 417 } 418 419 /* 420 * Send a message on the return channel back to the source 421 * of the migration. 422 */ 423 void migrate_send_rp_message(MigrationIncomingState *mis, 424 enum mig_rp_message_type message_type, 425 uint16_t len, void *data) 426 { 427 trace_migrate_send_rp_message((int)message_type, len); 428 qemu_mutex_lock(&mis->rp_mutex); 429 qemu_put_be16(mis->to_src_file, (unsigned int)message_type); 430 qemu_put_be16(mis->to_src_file, len); 431 qemu_put_buffer(mis->to_src_file, data, len); 432 qemu_fflush(mis->to_src_file); 433 qemu_mutex_unlock(&mis->rp_mutex); 434 } 435 436 /* 437 * Send a 'SHUT' message on the return channel with the given value 438 * to indicate that we've finished with the RP. Non-0 value indicates 439 * error. 440 */ 441 void migrate_send_rp_shut(MigrationIncomingState *mis, 442 uint32_t value) 443 { 444 uint32_t buf; 445 446 buf = cpu_to_be32(value); 447 migrate_send_rp_message(mis, MIG_RP_MSG_SHUT, sizeof(buf), &buf); 448 } 449 450 /* 451 * Send a 'PONG' message on the return channel with the given value 452 * (normally in response to a 'PING') 453 */ 454 void migrate_send_rp_pong(MigrationIncomingState *mis, 455 uint32_t value) 456 { 457 uint32_t buf; 458 459 buf = cpu_to_be32(value); 460 migrate_send_rp_message(mis, MIG_RP_MSG_PONG, sizeof(buf), &buf); 461 } 462 463 /* amount of nanoseconds we are willing to wait for migration to be down. 464 * the choice of nanoseconds is because it is the maximum resolution that 465 * get_clock() can achieve. It is an internal measure. All user-visible 466 * units must be in seconds */ 467 static uint64_t max_downtime = 300000000; 468 469 uint64_t migrate_max_downtime(void) 470 { 471 return max_downtime; 472 } 473 474 MigrationCapabilityStatusList *qmp_query_migrate_capabilities(Error **errp) 475 { 476 MigrationCapabilityStatusList *head = NULL; 477 MigrationCapabilityStatusList *caps; 478 MigrationState *s = migrate_get_current(); 479 int i; 480 481 caps = NULL; /* silence compiler warning */ 482 for (i = 0; i < MIGRATION_CAPABILITY_MAX; i++) { 483 if (head == NULL) { 484 head = g_malloc0(sizeof(*caps)); 485 caps = head; 486 } else { 487 caps->next = g_malloc0(sizeof(*caps)); 488 caps = caps->next; 489 } 490 caps->value = 491 g_malloc(sizeof(*caps->value)); 492 caps->value->capability = i; 493 caps->value->state = s->enabled_capabilities[i]; 494 } 495 496 return head; 497 } 498 499 MigrationParameters *qmp_query_migrate_parameters(Error **errp) 500 { 501 MigrationParameters *params; 502 MigrationState *s = migrate_get_current(); 503 504 params = g_malloc0(sizeof(*params)); 505 params->compress_level = s->parameters[MIGRATION_PARAMETER_COMPRESS_LEVEL]; 506 params->compress_threads = 507 s->parameters[MIGRATION_PARAMETER_COMPRESS_THREADS]; 508 params->decompress_threads = 509 s->parameters[MIGRATION_PARAMETER_DECOMPRESS_THREADS]; 510 params->x_cpu_throttle_initial = 511 s->parameters[MIGRATION_PARAMETER_X_CPU_THROTTLE_INITIAL]; 512 params->x_cpu_throttle_increment = 513 s->parameters[MIGRATION_PARAMETER_X_CPU_THROTTLE_INCREMENT]; 514 515 return params; 516 } 517 518 /* 519 * Return true if we're already in the middle of a migration 520 * (i.e. any of the active or setup states) 521 */ 522 static bool migration_is_setup_or_active(int state) 523 { 524 switch (state) { 525 case MIGRATION_STATUS_ACTIVE: 526 case MIGRATION_STATUS_POSTCOPY_ACTIVE: 527 case MIGRATION_STATUS_SETUP: 528 return true; 529 530 default: 531 return false; 532 533 } 534 } 535 536 static void get_xbzrle_cache_stats(MigrationInfo *info) 537 { 538 if (migrate_use_xbzrle()) { 539 info->has_xbzrle_cache = true; 540 info->xbzrle_cache = g_malloc0(sizeof(*info->xbzrle_cache)); 541 info->xbzrle_cache->cache_size = migrate_xbzrle_cache_size(); 542 info->xbzrle_cache->bytes = xbzrle_mig_bytes_transferred(); 543 info->xbzrle_cache->pages = xbzrle_mig_pages_transferred(); 544 info->xbzrle_cache->cache_miss = xbzrle_mig_pages_cache_miss(); 545 info->xbzrle_cache->cache_miss_rate = xbzrle_mig_cache_miss_rate(); 546 info->xbzrle_cache->overflow = xbzrle_mig_pages_overflow(); 547 } 548 } 549 550 MigrationInfo *qmp_query_migrate(Error **errp) 551 { 552 MigrationInfo *info = g_malloc0(sizeof(*info)); 553 MigrationState *s = migrate_get_current(); 554 555 switch (s->state) { 556 case MIGRATION_STATUS_NONE: 557 /* no migration has happened ever */ 558 break; 559 case MIGRATION_STATUS_SETUP: 560 info->has_status = true; 561 info->has_total_time = false; 562 break; 563 case MIGRATION_STATUS_ACTIVE: 564 case MIGRATION_STATUS_CANCELLING: 565 info->has_status = true; 566 info->has_total_time = true; 567 info->total_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME) 568 - s->total_time; 569 info->has_expected_downtime = true; 570 info->expected_downtime = s->expected_downtime; 571 info->has_setup_time = true; 572 info->setup_time = s->setup_time; 573 574 info->has_ram = true; 575 info->ram = g_malloc0(sizeof(*info->ram)); 576 info->ram->transferred = ram_bytes_transferred(); 577 info->ram->remaining = ram_bytes_remaining(); 578 info->ram->total = ram_bytes_total(); 579 info->ram->duplicate = dup_mig_pages_transferred(); 580 info->ram->skipped = skipped_mig_pages_transferred(); 581 info->ram->normal = norm_mig_pages_transferred(); 582 info->ram->normal_bytes = norm_mig_bytes_transferred(); 583 info->ram->dirty_pages_rate = s->dirty_pages_rate; 584 info->ram->mbps = s->mbps; 585 info->ram->dirty_sync_count = s->dirty_sync_count; 586 587 if (blk_mig_active()) { 588 info->has_disk = true; 589 info->disk = g_malloc0(sizeof(*info->disk)); 590 info->disk->transferred = blk_mig_bytes_transferred(); 591 info->disk->remaining = blk_mig_bytes_remaining(); 592 info->disk->total = blk_mig_bytes_total(); 593 } 594 595 if (cpu_throttle_active()) { 596 info->has_x_cpu_throttle_percentage = true; 597 info->x_cpu_throttle_percentage = cpu_throttle_get_percentage(); 598 } 599 600 get_xbzrle_cache_stats(info); 601 break; 602 case MIGRATION_STATUS_POSTCOPY_ACTIVE: 603 /* Mostly the same as active; TODO add some postcopy stats */ 604 info->has_status = true; 605 info->has_total_time = true; 606 info->total_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME) 607 - s->total_time; 608 info->has_expected_downtime = true; 609 info->expected_downtime = s->expected_downtime; 610 info->has_setup_time = true; 611 info->setup_time = s->setup_time; 612 613 info->has_ram = true; 614 info->ram = g_malloc0(sizeof(*info->ram)); 615 info->ram->transferred = ram_bytes_transferred(); 616 info->ram->remaining = ram_bytes_remaining(); 617 info->ram->total = ram_bytes_total(); 618 info->ram->duplicate = dup_mig_pages_transferred(); 619 info->ram->skipped = skipped_mig_pages_transferred(); 620 info->ram->normal = norm_mig_pages_transferred(); 621 info->ram->normal_bytes = norm_mig_bytes_transferred(); 622 info->ram->dirty_pages_rate = s->dirty_pages_rate; 623 info->ram->mbps = s->mbps; 624 625 if (blk_mig_active()) { 626 info->has_disk = true; 627 info->disk = g_malloc0(sizeof(*info->disk)); 628 info->disk->transferred = blk_mig_bytes_transferred(); 629 info->disk->remaining = blk_mig_bytes_remaining(); 630 info->disk->total = blk_mig_bytes_total(); 631 } 632 633 get_xbzrle_cache_stats(info); 634 break; 635 case MIGRATION_STATUS_COMPLETED: 636 get_xbzrle_cache_stats(info); 637 638 info->has_status = true; 639 info->has_total_time = true; 640 info->total_time = s->total_time; 641 info->has_downtime = true; 642 info->downtime = s->downtime; 643 info->has_setup_time = true; 644 info->setup_time = s->setup_time; 645 646 info->has_ram = true; 647 info->ram = g_malloc0(sizeof(*info->ram)); 648 info->ram->transferred = ram_bytes_transferred(); 649 info->ram->remaining = 0; 650 info->ram->total = ram_bytes_total(); 651 info->ram->duplicate = dup_mig_pages_transferred(); 652 info->ram->skipped = skipped_mig_pages_transferred(); 653 info->ram->normal = norm_mig_pages_transferred(); 654 info->ram->normal_bytes = norm_mig_bytes_transferred(); 655 info->ram->mbps = s->mbps; 656 info->ram->dirty_sync_count = s->dirty_sync_count; 657 break; 658 case MIGRATION_STATUS_FAILED: 659 info->has_status = true; 660 break; 661 case MIGRATION_STATUS_CANCELLED: 662 info->has_status = true; 663 break; 664 } 665 info->status = s->state; 666 667 return info; 668 } 669 670 void qmp_migrate_set_capabilities(MigrationCapabilityStatusList *params, 671 Error **errp) 672 { 673 MigrationState *s = migrate_get_current(); 674 MigrationCapabilityStatusList *cap; 675 676 if (migration_is_setup_or_active(s->state)) { 677 error_setg(errp, QERR_MIGRATION_ACTIVE); 678 return; 679 } 680 681 for (cap = params; cap; cap = cap->next) { 682 s->enabled_capabilities[cap->value->capability] = cap->value->state; 683 } 684 685 if (migrate_postcopy_ram()) { 686 if (migrate_use_compression()) { 687 /* The decompression threads asynchronously write into RAM 688 * rather than use the atomic copies needed to avoid 689 * userfaulting. It should be possible to fix the decompression 690 * threads for compatibility in future. 691 */ 692 error_report("Postcopy is not currently compatible with " 693 "compression"); 694 s->enabled_capabilities[MIGRATION_CAPABILITY_X_POSTCOPY_RAM] = 695 false; 696 } 697 } 698 } 699 700 void qmp_migrate_set_parameters(bool has_compress_level, 701 int64_t compress_level, 702 bool has_compress_threads, 703 int64_t compress_threads, 704 bool has_decompress_threads, 705 int64_t decompress_threads, 706 bool has_x_cpu_throttle_initial, 707 int64_t x_cpu_throttle_initial, 708 bool has_x_cpu_throttle_increment, 709 int64_t x_cpu_throttle_increment, Error **errp) 710 { 711 MigrationState *s = migrate_get_current(); 712 713 if (has_compress_level && (compress_level < 0 || compress_level > 9)) { 714 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "compress_level", 715 "is invalid, it should be in the range of 0 to 9"); 716 return; 717 } 718 if (has_compress_threads && 719 (compress_threads < 1 || compress_threads > 255)) { 720 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, 721 "compress_threads", 722 "is invalid, it should be in the range of 1 to 255"); 723 return; 724 } 725 if (has_decompress_threads && 726 (decompress_threads < 1 || decompress_threads > 255)) { 727 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, 728 "decompress_threads", 729 "is invalid, it should be in the range of 1 to 255"); 730 return; 731 } 732 if (has_x_cpu_throttle_initial && 733 (x_cpu_throttle_initial < 1 || x_cpu_throttle_initial > 99)) { 734 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, 735 "x_cpu_throttle_initial", 736 "an integer in the range of 1 to 99"); 737 } 738 if (has_x_cpu_throttle_increment && 739 (x_cpu_throttle_increment < 1 || x_cpu_throttle_increment > 99)) { 740 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, 741 "x_cpu_throttle_increment", 742 "an integer in the range of 1 to 99"); 743 } 744 745 if (has_compress_level) { 746 s->parameters[MIGRATION_PARAMETER_COMPRESS_LEVEL] = compress_level; 747 } 748 if (has_compress_threads) { 749 s->parameters[MIGRATION_PARAMETER_COMPRESS_THREADS] = compress_threads; 750 } 751 if (has_decompress_threads) { 752 s->parameters[MIGRATION_PARAMETER_DECOMPRESS_THREADS] = 753 decompress_threads; 754 } 755 if (has_x_cpu_throttle_initial) { 756 s->parameters[MIGRATION_PARAMETER_X_CPU_THROTTLE_INITIAL] = 757 x_cpu_throttle_initial; 758 } 759 760 if (has_x_cpu_throttle_increment) { 761 s->parameters[MIGRATION_PARAMETER_X_CPU_THROTTLE_INCREMENT] = 762 x_cpu_throttle_increment; 763 } 764 } 765 766 void qmp_migrate_start_postcopy(Error **errp) 767 { 768 MigrationState *s = migrate_get_current(); 769 770 if (!migrate_postcopy_ram()) { 771 error_setg(errp, "Enable postcopy with migration_set_capability before" 772 " the start of migration"); 773 return; 774 } 775 776 if (s->state == MIGRATION_STATUS_NONE) { 777 error_setg(errp, "Postcopy must be started after migration has been" 778 " started"); 779 return; 780 } 781 /* 782 * we don't error if migration has finished since that would be racy 783 * with issuing this command. 784 */ 785 atomic_set(&s->start_postcopy, true); 786 } 787 788 /* shared migration helpers */ 789 790 static void migrate_set_state(MigrationState *s, int old_state, int new_state) 791 { 792 if (atomic_cmpxchg(&s->state, old_state, new_state) == old_state) { 793 trace_migrate_set_state(new_state); 794 migrate_generate_event(new_state); 795 } 796 } 797 798 static void migrate_fd_cleanup(void *opaque) 799 { 800 MigrationState *s = opaque; 801 802 qemu_bh_delete(s->cleanup_bh); 803 s->cleanup_bh = NULL; 804 805 flush_page_queue(s); 806 807 if (s->file) { 808 trace_migrate_fd_cleanup(); 809 qemu_mutex_unlock_iothread(); 810 if (s->migration_thread_running) { 811 qemu_thread_join(&s->thread); 812 s->migration_thread_running = false; 813 } 814 qemu_mutex_lock_iothread(); 815 816 migrate_compress_threads_join(); 817 qemu_fclose(s->file); 818 s->file = NULL; 819 } 820 821 assert((s->state != MIGRATION_STATUS_ACTIVE) && 822 (s->state != MIGRATION_STATUS_POSTCOPY_ACTIVE)); 823 824 if (s->state == MIGRATION_STATUS_CANCELLING) { 825 migrate_set_state(s, MIGRATION_STATUS_CANCELLING, 826 MIGRATION_STATUS_CANCELLED); 827 } 828 829 notifier_list_notify(&migration_state_notifiers, s); 830 } 831 832 void migrate_fd_error(MigrationState *s) 833 { 834 trace_migrate_fd_error(); 835 assert(s->file == NULL); 836 migrate_set_state(s, MIGRATION_STATUS_SETUP, MIGRATION_STATUS_FAILED); 837 notifier_list_notify(&migration_state_notifiers, s); 838 } 839 840 static void migrate_fd_cancel(MigrationState *s) 841 { 842 int old_state ; 843 QEMUFile *f = migrate_get_current()->file; 844 trace_migrate_fd_cancel(); 845 846 if (s->rp_state.from_dst_file) { 847 /* shutdown the rp socket, so causing the rp thread to shutdown */ 848 qemu_file_shutdown(s->rp_state.from_dst_file); 849 } 850 851 do { 852 old_state = s->state; 853 if (!migration_is_setup_or_active(old_state)) { 854 break; 855 } 856 migrate_set_state(s, old_state, MIGRATION_STATUS_CANCELLING); 857 } while (s->state != MIGRATION_STATUS_CANCELLING); 858 859 /* 860 * If we're unlucky the migration code might be stuck somewhere in a 861 * send/write while the network has failed and is waiting to timeout; 862 * if we've got shutdown(2) available then we can force it to quit. 863 * The outgoing qemu file gets closed in migrate_fd_cleanup that is 864 * called in a bh, so there is no race against this cancel. 865 */ 866 if (s->state == MIGRATION_STATUS_CANCELLING && f) { 867 qemu_file_shutdown(f); 868 } 869 } 870 871 void add_migration_state_change_notifier(Notifier *notify) 872 { 873 notifier_list_add(&migration_state_notifiers, notify); 874 } 875 876 void remove_migration_state_change_notifier(Notifier *notify) 877 { 878 notifier_remove(notify); 879 } 880 881 bool migration_in_setup(MigrationState *s) 882 { 883 return s->state == MIGRATION_STATUS_SETUP; 884 } 885 886 bool migration_has_finished(MigrationState *s) 887 { 888 return s->state == MIGRATION_STATUS_COMPLETED; 889 } 890 891 bool migration_has_failed(MigrationState *s) 892 { 893 return (s->state == MIGRATION_STATUS_CANCELLED || 894 s->state == MIGRATION_STATUS_FAILED); 895 } 896 897 bool migration_in_postcopy(MigrationState *s) 898 { 899 return (s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE); 900 } 901 902 MigrationState *migrate_init(const MigrationParams *params) 903 { 904 MigrationState *s = migrate_get_current(); 905 int64_t bandwidth_limit = s->bandwidth_limit; 906 bool enabled_capabilities[MIGRATION_CAPABILITY_MAX]; 907 int64_t xbzrle_cache_size = s->xbzrle_cache_size; 908 int compress_level = s->parameters[MIGRATION_PARAMETER_COMPRESS_LEVEL]; 909 int compress_thread_count = 910 s->parameters[MIGRATION_PARAMETER_COMPRESS_THREADS]; 911 int decompress_thread_count = 912 s->parameters[MIGRATION_PARAMETER_DECOMPRESS_THREADS]; 913 int x_cpu_throttle_initial = 914 s->parameters[MIGRATION_PARAMETER_X_CPU_THROTTLE_INITIAL]; 915 int x_cpu_throttle_increment = 916 s->parameters[MIGRATION_PARAMETER_X_CPU_THROTTLE_INCREMENT]; 917 918 memcpy(enabled_capabilities, s->enabled_capabilities, 919 sizeof(enabled_capabilities)); 920 921 memset(s, 0, sizeof(*s)); 922 s->params = *params; 923 memcpy(s->enabled_capabilities, enabled_capabilities, 924 sizeof(enabled_capabilities)); 925 s->xbzrle_cache_size = xbzrle_cache_size; 926 927 s->parameters[MIGRATION_PARAMETER_COMPRESS_LEVEL] = compress_level; 928 s->parameters[MIGRATION_PARAMETER_COMPRESS_THREADS] = 929 compress_thread_count; 930 s->parameters[MIGRATION_PARAMETER_DECOMPRESS_THREADS] = 931 decompress_thread_count; 932 s->parameters[MIGRATION_PARAMETER_X_CPU_THROTTLE_INITIAL] = 933 x_cpu_throttle_initial; 934 s->parameters[MIGRATION_PARAMETER_X_CPU_THROTTLE_INCREMENT] = 935 x_cpu_throttle_increment; 936 s->bandwidth_limit = bandwidth_limit; 937 migrate_set_state(s, MIGRATION_STATUS_NONE, MIGRATION_STATUS_SETUP); 938 939 QSIMPLEQ_INIT(&s->src_page_requests); 940 941 s->total_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); 942 return s; 943 } 944 945 static GSList *migration_blockers; 946 947 void migrate_add_blocker(Error *reason) 948 { 949 migration_blockers = g_slist_prepend(migration_blockers, reason); 950 } 951 952 void migrate_del_blocker(Error *reason) 953 { 954 migration_blockers = g_slist_remove(migration_blockers, reason); 955 } 956 957 void qmp_migrate_incoming(const char *uri, Error **errp) 958 { 959 Error *local_err = NULL; 960 static bool once = true; 961 962 if (!deferred_incoming) { 963 error_setg(errp, "For use with '-incoming defer'"); 964 return; 965 } 966 if (!once) { 967 error_setg(errp, "The incoming migration has already been started"); 968 } 969 970 qemu_start_incoming_migration(uri, &local_err); 971 972 if (local_err) { 973 error_propagate(errp, local_err); 974 return; 975 } 976 977 once = false; 978 } 979 980 void qmp_migrate(const char *uri, bool has_blk, bool blk, 981 bool has_inc, bool inc, bool has_detach, bool detach, 982 Error **errp) 983 { 984 Error *local_err = NULL; 985 MigrationState *s = migrate_get_current(); 986 MigrationParams params; 987 const char *p; 988 989 params.blk = has_blk && blk; 990 params.shared = has_inc && inc; 991 992 if (migration_is_setup_or_active(s->state) || 993 s->state == MIGRATION_STATUS_CANCELLING) { 994 error_setg(errp, QERR_MIGRATION_ACTIVE); 995 return; 996 } 997 if (runstate_check(RUN_STATE_INMIGRATE)) { 998 error_setg(errp, "Guest is waiting for an incoming migration"); 999 return; 1000 } 1001 1002 if (qemu_savevm_state_blocked(errp)) { 1003 return; 1004 } 1005 1006 if (migration_blockers) { 1007 *errp = error_copy(migration_blockers->data); 1008 return; 1009 } 1010 1011 /* We are starting a new migration, so we want to start in a clean 1012 state. This change is only needed if previous migration 1013 failed/was cancelled. We don't use migrate_set_state() because 1014 we are setting the initial state, not changing it. */ 1015 s->state = MIGRATION_STATUS_NONE; 1016 1017 s = migrate_init(¶ms); 1018 1019 if (strstart(uri, "tcp:", &p)) { 1020 tcp_start_outgoing_migration(s, p, &local_err); 1021 #ifdef CONFIG_RDMA 1022 } else if (strstart(uri, "rdma:", &p)) { 1023 rdma_start_outgoing_migration(s, p, &local_err); 1024 #endif 1025 #if !defined(WIN32) 1026 } else if (strstart(uri, "exec:", &p)) { 1027 exec_start_outgoing_migration(s, p, &local_err); 1028 } else if (strstart(uri, "unix:", &p)) { 1029 unix_start_outgoing_migration(s, p, &local_err); 1030 } else if (strstart(uri, "fd:", &p)) { 1031 fd_start_outgoing_migration(s, p, &local_err); 1032 #endif 1033 } else { 1034 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "uri", 1035 "a valid migration protocol"); 1036 migrate_set_state(s, MIGRATION_STATUS_SETUP, MIGRATION_STATUS_FAILED); 1037 return; 1038 } 1039 1040 if (local_err) { 1041 migrate_fd_error(s); 1042 error_propagate(errp, local_err); 1043 return; 1044 } 1045 } 1046 1047 void qmp_migrate_cancel(Error **errp) 1048 { 1049 migrate_fd_cancel(migrate_get_current()); 1050 } 1051 1052 void qmp_migrate_set_cache_size(int64_t value, Error **errp) 1053 { 1054 MigrationState *s = migrate_get_current(); 1055 int64_t new_size; 1056 1057 /* Check for truncation */ 1058 if (value != (size_t)value) { 1059 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "cache size", 1060 "exceeding address space"); 1061 return; 1062 } 1063 1064 /* Cache should not be larger than guest ram size */ 1065 if (value > ram_bytes_total()) { 1066 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "cache size", 1067 "exceeds guest ram size "); 1068 return; 1069 } 1070 1071 new_size = xbzrle_cache_resize(value); 1072 if (new_size < 0) { 1073 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "cache size", 1074 "is smaller than page size"); 1075 return; 1076 } 1077 1078 s->xbzrle_cache_size = new_size; 1079 } 1080 1081 int64_t qmp_query_migrate_cache_size(Error **errp) 1082 { 1083 return migrate_xbzrle_cache_size(); 1084 } 1085 1086 void qmp_migrate_set_speed(int64_t value, Error **errp) 1087 { 1088 MigrationState *s; 1089 1090 if (value < 0) { 1091 value = 0; 1092 } 1093 if (value > SIZE_MAX) { 1094 value = SIZE_MAX; 1095 } 1096 1097 s = migrate_get_current(); 1098 s->bandwidth_limit = value; 1099 if (s->file) { 1100 qemu_file_set_rate_limit(s->file, s->bandwidth_limit / XFER_LIMIT_RATIO); 1101 } 1102 } 1103 1104 void qmp_migrate_set_downtime(double value, Error **errp) 1105 { 1106 value *= 1e9; 1107 value = MAX(0, MIN(UINT64_MAX, value)); 1108 max_downtime = (uint64_t)value; 1109 } 1110 1111 bool migrate_postcopy_ram(void) 1112 { 1113 MigrationState *s; 1114 1115 s = migrate_get_current(); 1116 1117 return s->enabled_capabilities[MIGRATION_CAPABILITY_X_POSTCOPY_RAM]; 1118 } 1119 1120 bool migrate_auto_converge(void) 1121 { 1122 MigrationState *s; 1123 1124 s = migrate_get_current(); 1125 1126 return s->enabled_capabilities[MIGRATION_CAPABILITY_AUTO_CONVERGE]; 1127 } 1128 1129 bool migrate_zero_blocks(void) 1130 { 1131 MigrationState *s; 1132 1133 s = migrate_get_current(); 1134 1135 return s->enabled_capabilities[MIGRATION_CAPABILITY_ZERO_BLOCKS]; 1136 } 1137 1138 bool migrate_use_compression(void) 1139 { 1140 MigrationState *s; 1141 1142 s = migrate_get_current(); 1143 1144 return s->enabled_capabilities[MIGRATION_CAPABILITY_COMPRESS]; 1145 } 1146 1147 int migrate_compress_level(void) 1148 { 1149 MigrationState *s; 1150 1151 s = migrate_get_current(); 1152 1153 return s->parameters[MIGRATION_PARAMETER_COMPRESS_LEVEL]; 1154 } 1155 1156 int migrate_compress_threads(void) 1157 { 1158 MigrationState *s; 1159 1160 s = migrate_get_current(); 1161 1162 return s->parameters[MIGRATION_PARAMETER_COMPRESS_THREADS]; 1163 } 1164 1165 int migrate_decompress_threads(void) 1166 { 1167 MigrationState *s; 1168 1169 s = migrate_get_current(); 1170 1171 return s->parameters[MIGRATION_PARAMETER_DECOMPRESS_THREADS]; 1172 } 1173 1174 bool migrate_use_events(void) 1175 { 1176 MigrationState *s; 1177 1178 s = migrate_get_current(); 1179 1180 return s->enabled_capabilities[MIGRATION_CAPABILITY_EVENTS]; 1181 } 1182 1183 int migrate_use_xbzrle(void) 1184 { 1185 MigrationState *s; 1186 1187 s = migrate_get_current(); 1188 1189 return s->enabled_capabilities[MIGRATION_CAPABILITY_XBZRLE]; 1190 } 1191 1192 int64_t migrate_xbzrle_cache_size(void) 1193 { 1194 MigrationState *s; 1195 1196 s = migrate_get_current(); 1197 1198 return s->xbzrle_cache_size; 1199 } 1200 1201 /* migration thread support */ 1202 /* 1203 * Something bad happened to the RP stream, mark an error 1204 * The caller shall print or trace something to indicate why 1205 */ 1206 static void mark_source_rp_bad(MigrationState *s) 1207 { 1208 s->rp_state.error = true; 1209 } 1210 1211 static struct rp_cmd_args { 1212 ssize_t len; /* -1 = variable */ 1213 const char *name; 1214 } rp_cmd_args[] = { 1215 [MIG_RP_MSG_INVALID] = { .len = -1, .name = "INVALID" }, 1216 [MIG_RP_MSG_SHUT] = { .len = 4, .name = "SHUT" }, 1217 [MIG_RP_MSG_PONG] = { .len = 4, .name = "PONG" }, 1218 [MIG_RP_MSG_REQ_PAGES] = { .len = 12, .name = "REQ_PAGES" }, 1219 [MIG_RP_MSG_REQ_PAGES_ID] = { .len = -1, .name = "REQ_PAGES_ID" }, 1220 [MIG_RP_MSG_MAX] = { .len = -1, .name = "MAX" }, 1221 }; 1222 1223 /* 1224 * Process a request for pages received on the return path, 1225 * We're allowed to send more than requested (e.g. to round to our page size) 1226 * and we don't need to send pages that have already been sent. 1227 */ 1228 static void migrate_handle_rp_req_pages(MigrationState *ms, const char* rbname, 1229 ram_addr_t start, size_t len) 1230 { 1231 long our_host_ps = getpagesize(); 1232 1233 trace_migrate_handle_rp_req_pages(rbname, start, len); 1234 1235 /* 1236 * Since we currently insist on matching page sizes, just sanity check 1237 * we're being asked for whole host pages. 1238 */ 1239 if (start & (our_host_ps-1) || 1240 (len & (our_host_ps-1))) { 1241 error_report("%s: Misaligned page request, start: " RAM_ADDR_FMT 1242 " len: %zd", __func__, start, len); 1243 mark_source_rp_bad(ms); 1244 return; 1245 } 1246 1247 if (ram_save_queue_pages(ms, rbname, start, len)) { 1248 mark_source_rp_bad(ms); 1249 } 1250 } 1251 1252 /* 1253 * Handles messages sent on the return path towards the source VM 1254 * 1255 */ 1256 static void *source_return_path_thread(void *opaque) 1257 { 1258 MigrationState *ms = opaque; 1259 QEMUFile *rp = ms->rp_state.from_dst_file; 1260 uint16_t header_len, header_type; 1261 const int max_len = 512; 1262 uint8_t buf[max_len]; 1263 uint32_t tmp32, sibling_error; 1264 ram_addr_t start = 0; /* =0 to silence warning */ 1265 size_t len = 0, expected_len; 1266 int res; 1267 1268 trace_source_return_path_thread_entry(); 1269 while (!ms->rp_state.error && !qemu_file_get_error(rp) && 1270 migration_is_setup_or_active(ms->state)) { 1271 trace_source_return_path_thread_loop_top(); 1272 header_type = qemu_get_be16(rp); 1273 header_len = qemu_get_be16(rp); 1274 1275 if (header_type >= MIG_RP_MSG_MAX || 1276 header_type == MIG_RP_MSG_INVALID) { 1277 error_report("RP: Received invalid message 0x%04x length 0x%04x", 1278 header_type, header_len); 1279 mark_source_rp_bad(ms); 1280 goto out; 1281 } 1282 1283 if ((rp_cmd_args[header_type].len != -1 && 1284 header_len != rp_cmd_args[header_type].len) || 1285 header_len > max_len) { 1286 error_report("RP: Received '%s' message (0x%04x) with" 1287 "incorrect length %d expecting %zu", 1288 rp_cmd_args[header_type].name, header_type, header_len, 1289 (size_t)rp_cmd_args[header_type].len); 1290 mark_source_rp_bad(ms); 1291 goto out; 1292 } 1293 1294 /* We know we've got a valid header by this point */ 1295 res = qemu_get_buffer(rp, buf, header_len); 1296 if (res != header_len) { 1297 error_report("RP: Failed reading data for message 0x%04x" 1298 " read %d expected %d", 1299 header_type, res, header_len); 1300 mark_source_rp_bad(ms); 1301 goto out; 1302 } 1303 1304 /* OK, we have the message and the data */ 1305 switch (header_type) { 1306 case MIG_RP_MSG_SHUT: 1307 sibling_error = be32_to_cpup((uint32_t *)buf); 1308 trace_source_return_path_thread_shut(sibling_error); 1309 if (sibling_error) { 1310 error_report("RP: Sibling indicated error %d", sibling_error); 1311 mark_source_rp_bad(ms); 1312 } 1313 /* 1314 * We'll let the main thread deal with closing the RP 1315 * we could do a shutdown(2) on it, but we're the only user 1316 * anyway, so there's nothing gained. 1317 */ 1318 goto out; 1319 1320 case MIG_RP_MSG_PONG: 1321 tmp32 = be32_to_cpup((uint32_t *)buf); 1322 trace_source_return_path_thread_pong(tmp32); 1323 break; 1324 1325 case MIG_RP_MSG_REQ_PAGES: 1326 start = be64_to_cpup((uint64_t *)buf); 1327 len = be32_to_cpup((uint32_t *)(buf + 8)); 1328 migrate_handle_rp_req_pages(ms, NULL, start, len); 1329 break; 1330 1331 case MIG_RP_MSG_REQ_PAGES_ID: 1332 expected_len = 12 + 1; /* header + termination */ 1333 1334 if (header_len >= expected_len) { 1335 start = be64_to_cpup((uint64_t *)buf); 1336 len = be32_to_cpup((uint32_t *)(buf + 8)); 1337 /* Now we expect an idstr */ 1338 tmp32 = buf[12]; /* Length of the following idstr */ 1339 buf[13 + tmp32] = '\0'; 1340 expected_len += tmp32; 1341 } 1342 if (header_len != expected_len) { 1343 error_report("RP: Req_Page_id with length %d expecting %zd", 1344 header_len, expected_len); 1345 mark_source_rp_bad(ms); 1346 goto out; 1347 } 1348 migrate_handle_rp_req_pages(ms, (char *)&buf[13], start, len); 1349 break; 1350 1351 default: 1352 break; 1353 } 1354 } 1355 if (rp && qemu_file_get_error(rp)) { 1356 trace_source_return_path_thread_bad_end(); 1357 mark_source_rp_bad(ms); 1358 } 1359 1360 trace_source_return_path_thread_end(); 1361 out: 1362 ms->rp_state.from_dst_file = NULL; 1363 qemu_fclose(rp); 1364 return NULL; 1365 } 1366 1367 static int open_return_path_on_source(MigrationState *ms) 1368 { 1369 1370 ms->rp_state.from_dst_file = qemu_file_get_return_path(ms->file); 1371 if (!ms->rp_state.from_dst_file) { 1372 return -1; 1373 } 1374 1375 trace_open_return_path_on_source(); 1376 qemu_thread_create(&ms->rp_state.rp_thread, "return path", 1377 source_return_path_thread, ms, QEMU_THREAD_JOINABLE); 1378 1379 trace_open_return_path_on_source_continue(); 1380 1381 return 0; 1382 } 1383 1384 /* Returns 0 if the RP was ok, otherwise there was an error on the RP */ 1385 static int await_return_path_close_on_source(MigrationState *ms) 1386 { 1387 /* 1388 * If this is a normal exit then the destination will send a SHUT and the 1389 * rp_thread will exit, however if there's an error we need to cause 1390 * it to exit. 1391 */ 1392 if (qemu_file_get_error(ms->file) && ms->rp_state.from_dst_file) { 1393 /* 1394 * shutdown(2), if we have it, will cause it to unblock if it's stuck 1395 * waiting for the destination. 1396 */ 1397 qemu_file_shutdown(ms->rp_state.from_dst_file); 1398 mark_source_rp_bad(ms); 1399 } 1400 trace_await_return_path_close_on_source_joining(); 1401 qemu_thread_join(&ms->rp_state.rp_thread); 1402 trace_await_return_path_close_on_source_close(); 1403 return ms->rp_state.error; 1404 } 1405 1406 /* 1407 * Switch from normal iteration to postcopy 1408 * Returns non-0 on error 1409 */ 1410 static int postcopy_start(MigrationState *ms, bool *old_vm_running) 1411 { 1412 int ret; 1413 const QEMUSizedBuffer *qsb; 1414 int64_t time_at_stop = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); 1415 migrate_set_state(ms, MIGRATION_STATUS_ACTIVE, 1416 MIGRATION_STATUS_POSTCOPY_ACTIVE); 1417 1418 trace_postcopy_start(); 1419 qemu_mutex_lock_iothread(); 1420 trace_postcopy_start_set_run(); 1421 1422 qemu_system_wakeup_request(QEMU_WAKEUP_REASON_OTHER); 1423 *old_vm_running = runstate_is_running(); 1424 global_state_store(); 1425 ret = vm_stop_force_state(RUN_STATE_FINISH_MIGRATE); 1426 1427 if (ret < 0) { 1428 goto fail; 1429 } 1430 1431 /* 1432 * in Finish migrate and with the io-lock held everything should 1433 * be quiet, but we've potentially still got dirty pages and we 1434 * need to tell the destination to throw any pages it's already received 1435 * that are dirty 1436 */ 1437 if (ram_postcopy_send_discard_bitmap(ms)) { 1438 error_report("postcopy send discard bitmap failed"); 1439 goto fail; 1440 } 1441 1442 /* 1443 * send rest of state - note things that are doing postcopy 1444 * will notice we're in POSTCOPY_ACTIVE and not actually 1445 * wrap their state up here 1446 */ 1447 qemu_file_set_rate_limit(ms->file, INT64_MAX); 1448 /* Ping just for debugging, helps line traces up */ 1449 qemu_savevm_send_ping(ms->file, 2); 1450 1451 /* 1452 * While loading the device state we may trigger page transfer 1453 * requests and the fd must be free to process those, and thus 1454 * the destination must read the whole device state off the fd before 1455 * it starts processing it. Unfortunately the ad-hoc migration format 1456 * doesn't allow the destination to know the size to read without fully 1457 * parsing it through each devices load-state code (especially the open 1458 * coded devices that use get/put). 1459 * So we wrap the device state up in a package with a length at the start; 1460 * to do this we use a qemu_buf to hold the whole of the device state. 1461 */ 1462 QEMUFile *fb = qemu_bufopen("w", NULL); 1463 if (!fb) { 1464 error_report("Failed to create buffered file"); 1465 goto fail; 1466 } 1467 1468 /* 1469 * Make sure the receiver can get incoming pages before we send the rest 1470 * of the state 1471 */ 1472 qemu_savevm_send_postcopy_listen(fb); 1473 1474 qemu_savevm_state_complete_precopy(fb); 1475 qemu_savevm_send_ping(fb, 3); 1476 1477 qemu_savevm_send_postcopy_run(fb); 1478 1479 /* <><> end of stuff going into the package */ 1480 qsb = qemu_buf_get(fb); 1481 1482 /* Now send that blob */ 1483 if (qemu_savevm_send_packaged(ms->file, qsb)) { 1484 goto fail_closefb; 1485 } 1486 qemu_fclose(fb); 1487 ms->downtime = qemu_clock_get_ms(QEMU_CLOCK_REALTIME) - time_at_stop; 1488 1489 qemu_mutex_unlock_iothread(); 1490 1491 /* 1492 * Although this ping is just for debug, it could potentially be 1493 * used for getting a better measurement of downtime at the source. 1494 */ 1495 qemu_savevm_send_ping(ms->file, 4); 1496 1497 ret = qemu_file_get_error(ms->file); 1498 if (ret) { 1499 error_report("postcopy_start: Migration stream errored"); 1500 migrate_set_state(ms, MIGRATION_STATUS_POSTCOPY_ACTIVE, 1501 MIGRATION_STATUS_FAILED); 1502 } 1503 1504 return ret; 1505 1506 fail_closefb: 1507 qemu_fclose(fb); 1508 fail: 1509 migrate_set_state(ms, MIGRATION_STATUS_POSTCOPY_ACTIVE, 1510 MIGRATION_STATUS_FAILED); 1511 qemu_mutex_unlock_iothread(); 1512 return -1; 1513 } 1514 1515 /** 1516 * migration_completion: Used by migration_thread when there's not much left. 1517 * The caller 'breaks' the loop when this returns. 1518 * 1519 * @s: Current migration state 1520 * @current_active_state: The migration state we expect to be in 1521 * @*old_vm_running: Pointer to old_vm_running flag 1522 * @*start_time: Pointer to time to update 1523 */ 1524 static void migration_completion(MigrationState *s, int current_active_state, 1525 bool *old_vm_running, 1526 int64_t *start_time) 1527 { 1528 int ret; 1529 1530 if (s->state == MIGRATION_STATUS_ACTIVE) { 1531 qemu_mutex_lock_iothread(); 1532 *start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); 1533 qemu_system_wakeup_request(QEMU_WAKEUP_REASON_OTHER); 1534 *old_vm_running = runstate_is_running(); 1535 ret = global_state_store(); 1536 1537 if (!ret) { 1538 ret = vm_stop_force_state(RUN_STATE_FINISH_MIGRATE); 1539 if (ret >= 0) { 1540 qemu_file_set_rate_limit(s->file, INT64_MAX); 1541 qemu_savevm_state_complete_precopy(s->file); 1542 } 1543 } 1544 qemu_mutex_unlock_iothread(); 1545 1546 if (ret < 0) { 1547 goto fail; 1548 } 1549 } else if (s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE) { 1550 trace_migration_completion_postcopy_end(); 1551 1552 qemu_savevm_state_complete_postcopy(s->file); 1553 trace_migration_completion_postcopy_end_after_complete(); 1554 } 1555 1556 /* 1557 * If rp was opened we must clean up the thread before 1558 * cleaning everything else up (since if there are no failures 1559 * it will wait for the destination to send it's status in 1560 * a SHUT command). 1561 * Postcopy opens rp if enabled (even if it's not avtivated) 1562 */ 1563 if (migrate_postcopy_ram()) { 1564 int rp_error; 1565 trace_migration_completion_postcopy_end_before_rp(); 1566 rp_error = await_return_path_close_on_source(s); 1567 trace_migration_completion_postcopy_end_after_rp(rp_error); 1568 if (rp_error) { 1569 goto fail; 1570 } 1571 } 1572 1573 if (qemu_file_get_error(s->file)) { 1574 trace_migration_completion_file_err(); 1575 goto fail; 1576 } 1577 1578 migrate_set_state(s, current_active_state, MIGRATION_STATUS_COMPLETED); 1579 return; 1580 1581 fail: 1582 migrate_set_state(s, current_active_state, MIGRATION_STATUS_FAILED); 1583 } 1584 1585 /* 1586 * Master migration thread on the source VM. 1587 * It drives the migration and pumps the data down the outgoing channel. 1588 */ 1589 static void *migration_thread(void *opaque) 1590 { 1591 MigrationState *s = opaque; 1592 /* Used by the bandwidth calcs, updated later */ 1593 int64_t initial_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); 1594 int64_t setup_start = qemu_clock_get_ms(QEMU_CLOCK_HOST); 1595 int64_t initial_bytes = 0; 1596 int64_t max_size = 0; 1597 int64_t start_time = initial_time; 1598 int64_t end_time; 1599 bool old_vm_running = false; 1600 bool entered_postcopy = false; 1601 /* The active state we expect to be in; ACTIVE or POSTCOPY_ACTIVE */ 1602 enum MigrationStatus current_active_state = MIGRATION_STATUS_ACTIVE; 1603 1604 rcu_register_thread(); 1605 1606 qemu_savevm_state_header(s->file); 1607 1608 if (migrate_postcopy_ram()) { 1609 /* Now tell the dest that it should open its end so it can reply */ 1610 qemu_savevm_send_open_return_path(s->file); 1611 1612 /* And do a ping that will make stuff easier to debug */ 1613 qemu_savevm_send_ping(s->file, 1); 1614 1615 /* 1616 * Tell the destination that we *might* want to do postcopy later; 1617 * if the other end can't do postcopy it should fail now, nice and 1618 * early. 1619 */ 1620 qemu_savevm_send_postcopy_advise(s->file); 1621 } 1622 1623 qemu_savevm_state_begin(s->file, &s->params); 1624 1625 s->setup_time = qemu_clock_get_ms(QEMU_CLOCK_HOST) - setup_start; 1626 current_active_state = MIGRATION_STATUS_ACTIVE; 1627 migrate_set_state(s, MIGRATION_STATUS_SETUP, MIGRATION_STATUS_ACTIVE); 1628 1629 trace_migration_thread_setup_complete(); 1630 1631 while (s->state == MIGRATION_STATUS_ACTIVE || 1632 s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE) { 1633 int64_t current_time; 1634 uint64_t pending_size; 1635 1636 if (!qemu_file_rate_limit(s->file)) { 1637 uint64_t pend_post, pend_nonpost; 1638 1639 qemu_savevm_state_pending(s->file, max_size, &pend_nonpost, 1640 &pend_post); 1641 pending_size = pend_nonpost + pend_post; 1642 trace_migrate_pending(pending_size, max_size, 1643 pend_post, pend_nonpost); 1644 if (pending_size && pending_size >= max_size) { 1645 /* Still a significant amount to transfer */ 1646 1647 current_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); 1648 if (migrate_postcopy_ram() && 1649 s->state != MIGRATION_STATUS_POSTCOPY_ACTIVE && 1650 pend_nonpost <= max_size && 1651 atomic_read(&s->start_postcopy)) { 1652 1653 if (!postcopy_start(s, &old_vm_running)) { 1654 current_active_state = MIGRATION_STATUS_POSTCOPY_ACTIVE; 1655 entered_postcopy = true; 1656 } 1657 1658 continue; 1659 } 1660 /* Just another iteration step */ 1661 qemu_savevm_state_iterate(s->file, entered_postcopy); 1662 } else { 1663 trace_migration_thread_low_pending(pending_size); 1664 migration_completion(s, current_active_state, 1665 &old_vm_running, &start_time); 1666 break; 1667 } 1668 } 1669 1670 if (qemu_file_get_error(s->file)) { 1671 migrate_set_state(s, current_active_state, MIGRATION_STATUS_FAILED); 1672 trace_migration_thread_file_err(); 1673 break; 1674 } 1675 current_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); 1676 if (current_time >= initial_time + BUFFER_DELAY) { 1677 uint64_t transferred_bytes = qemu_ftell(s->file) - initial_bytes; 1678 uint64_t time_spent = current_time - initial_time; 1679 double bandwidth = transferred_bytes / time_spent; 1680 max_size = bandwidth * migrate_max_downtime() / 1000000; 1681 1682 s->mbps = time_spent ? (((double) transferred_bytes * 8.0) / 1683 ((double) time_spent / 1000.0)) / 1000.0 / 1000.0 : -1; 1684 1685 trace_migrate_transferred(transferred_bytes, time_spent, 1686 bandwidth, max_size); 1687 /* if we haven't sent anything, we don't want to recalculate 1688 10000 is a small enough number for our purposes */ 1689 if (s->dirty_bytes_rate && transferred_bytes > 10000) { 1690 s->expected_downtime = s->dirty_bytes_rate / bandwidth; 1691 } 1692 1693 qemu_file_reset_rate_limit(s->file); 1694 initial_time = current_time; 1695 initial_bytes = qemu_ftell(s->file); 1696 } 1697 if (qemu_file_rate_limit(s->file)) { 1698 /* usleep expects microseconds */ 1699 g_usleep((initial_time + BUFFER_DELAY - current_time)*1000); 1700 } 1701 } 1702 1703 trace_migration_thread_after_loop(); 1704 /* If we enabled cpu throttling for auto-converge, turn it off. */ 1705 cpu_throttle_stop(); 1706 end_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); 1707 1708 qemu_mutex_lock_iothread(); 1709 qemu_savevm_state_cleanup(); 1710 if (s->state == MIGRATION_STATUS_COMPLETED) { 1711 uint64_t transferred_bytes = qemu_ftell(s->file); 1712 s->total_time = end_time - s->total_time; 1713 if (!entered_postcopy) { 1714 s->downtime = end_time - start_time; 1715 } 1716 if (s->total_time) { 1717 s->mbps = (((double) transferred_bytes * 8.0) / 1718 ((double) s->total_time)) / 1000; 1719 } 1720 runstate_set(RUN_STATE_POSTMIGRATE); 1721 } else { 1722 if (old_vm_running && !entered_postcopy) { 1723 vm_start(); 1724 } 1725 } 1726 qemu_bh_schedule(s->cleanup_bh); 1727 qemu_mutex_unlock_iothread(); 1728 1729 rcu_unregister_thread(); 1730 return NULL; 1731 } 1732 1733 void migrate_fd_connect(MigrationState *s) 1734 { 1735 /* This is a best 1st approximation. ns to ms */ 1736 s->expected_downtime = max_downtime/1000000; 1737 s->cleanup_bh = qemu_bh_new(migrate_fd_cleanup, s); 1738 1739 qemu_file_set_rate_limit(s->file, 1740 s->bandwidth_limit / XFER_LIMIT_RATIO); 1741 1742 /* Notify before starting migration thread */ 1743 notifier_list_notify(&migration_state_notifiers, s); 1744 1745 /* 1746 * Open the return path; currently for postcopy but other things might 1747 * also want it. 1748 */ 1749 if (migrate_postcopy_ram()) { 1750 if (open_return_path_on_source(s)) { 1751 error_report("Unable to open return-path for postcopy"); 1752 migrate_set_state(s, MIGRATION_STATUS_SETUP, 1753 MIGRATION_STATUS_FAILED); 1754 migrate_fd_cleanup(s); 1755 return; 1756 } 1757 } 1758 1759 migrate_compress_threads_create(); 1760 qemu_thread_create(&s->thread, "migration", migration_thread, s, 1761 QEMU_THREAD_JOINABLE); 1762 s->migration_thread_running = true; 1763 } 1764 1765 PostcopyState postcopy_state_get(void) 1766 { 1767 return atomic_mb_read(&incoming_postcopy_state); 1768 } 1769 1770 /* Set the state and return the old state */ 1771 PostcopyState postcopy_state_set(PostcopyState new_state) 1772 { 1773 return atomic_xchg(&incoming_postcopy_state, new_state); 1774 } 1775 1776