1 /* 2 * QEMU live migration 3 * 4 * Copyright IBM, Corp. 2008 5 * 6 * Authors: 7 * Anthony Liguori <aliguori@us.ibm.com> 8 * 9 * This work is licensed under the terms of the GNU GPL, version 2. See 10 * the COPYING file in the top-level directory. 11 * 12 * Contributions after 2012-01-13 are licensed under the terms of the 13 * GNU GPL, version 2 or (at your option) any later version. 14 */ 15 16 #include "qemu-common.h" 17 #include "qemu/main-loop.h" 18 #include "migration/migration.h" 19 #include "monitor/monitor.h" 20 #include "migration/qemu-file.h" 21 #include "sysemu/sysemu.h" 22 #include "block/block.h" 23 #include "qemu/sockets.h" 24 #include "migration/block.h" 25 #include "qemu/thread.h" 26 #include "qmp-commands.h" 27 #include "trace.h" 28 29 #define MAX_THROTTLE (32 << 20) /* Migration speed throttling */ 30 31 /* Amount of time to allocate to each "chunk" of bandwidth-throttled 32 * data. */ 33 #define BUFFER_DELAY 100 34 #define XFER_LIMIT_RATIO (1000 / BUFFER_DELAY) 35 36 /* Default compression thread count */ 37 #define DEFAULT_MIGRATE_COMPRESS_THREAD_COUNT 8 38 /* Default decompression thread count, usually decompression is at 39 * least 4 times as fast as compression.*/ 40 #define DEFAULT_MIGRATE_DECOMPRESS_THREAD_COUNT 2 41 /*0: means nocompress, 1: best speed, ... 9: best compress ratio */ 42 #define DEFAULT_MIGRATE_COMPRESS_LEVEL 1 43 44 /* Migration XBZRLE default cache size */ 45 #define DEFAULT_MIGRATE_CACHE_SIZE (64 * 1024 * 1024) 46 47 static NotifierList migration_state_notifiers = 48 NOTIFIER_LIST_INITIALIZER(migration_state_notifiers); 49 50 static bool deferred_incoming; 51 52 /* When we add fault tolerance, we could have several 53 migrations at once. For now we don't need to add 54 dynamic creation of migration */ 55 56 /* For outgoing */ 57 MigrationState *migrate_get_current(void) 58 { 59 static MigrationState current_migration = { 60 .state = MIGRATION_STATUS_NONE, 61 .bandwidth_limit = MAX_THROTTLE, 62 .xbzrle_cache_size = DEFAULT_MIGRATE_CACHE_SIZE, 63 .mbps = -1, 64 .parameters[MIGRATION_PARAMETER_COMPRESS_LEVEL] = 65 DEFAULT_MIGRATE_COMPRESS_LEVEL, 66 .parameters[MIGRATION_PARAMETER_COMPRESS_THREADS] = 67 DEFAULT_MIGRATE_COMPRESS_THREAD_COUNT, 68 .parameters[MIGRATION_PARAMETER_DECOMPRESS_THREADS] = 69 DEFAULT_MIGRATE_DECOMPRESS_THREAD_COUNT, 70 }; 71 72 return ¤t_migration; 73 } 74 75 /* For incoming */ 76 static MigrationIncomingState *mis_current; 77 78 MigrationIncomingState *migration_incoming_get_current(void) 79 { 80 return mis_current; 81 } 82 83 MigrationIncomingState *migration_incoming_state_new(QEMUFile* f) 84 { 85 mis_current = g_malloc0(sizeof(MigrationIncomingState)); 86 mis_current->file = f; 87 QLIST_INIT(&mis_current->loadvm_handlers); 88 89 return mis_current; 90 } 91 92 void migration_incoming_state_destroy(void) 93 { 94 loadvm_free_handlers(mis_current); 95 g_free(mis_current); 96 mis_current = NULL; 97 } 98 99 /* 100 * Called on -incoming with a defer: uri. 101 * The migration can be started later after any parameters have been 102 * changed. 103 */ 104 static void deferred_incoming_migration(Error **errp) 105 { 106 if (deferred_incoming) { 107 error_setg(errp, "Incoming migration already deferred"); 108 } 109 deferred_incoming = true; 110 } 111 112 void qemu_start_incoming_migration(const char *uri, Error **errp) 113 { 114 const char *p; 115 116 if (!strcmp(uri, "defer")) { 117 deferred_incoming_migration(errp); 118 } else if (strstart(uri, "tcp:", &p)) { 119 tcp_start_incoming_migration(p, errp); 120 #ifdef CONFIG_RDMA 121 } else if (strstart(uri, "rdma:", &p)) { 122 rdma_start_incoming_migration(p, errp); 123 #endif 124 #if !defined(WIN32) 125 } else if (strstart(uri, "exec:", &p)) { 126 exec_start_incoming_migration(p, errp); 127 } else if (strstart(uri, "unix:", &p)) { 128 unix_start_incoming_migration(p, errp); 129 } else if (strstart(uri, "fd:", &p)) { 130 fd_start_incoming_migration(p, errp); 131 #endif 132 } else { 133 error_setg(errp, "unknown migration protocol: %s", uri); 134 } 135 } 136 137 static void process_incoming_migration_co(void *opaque) 138 { 139 QEMUFile *f = opaque; 140 Error *local_err = NULL; 141 int ret; 142 143 migration_incoming_state_new(f); 144 145 ret = qemu_loadvm_state(f); 146 147 qemu_fclose(f); 148 free_xbzrle_decoded_buf(); 149 migration_incoming_state_destroy(); 150 151 if (ret < 0) { 152 error_report("load of migration failed: %s", strerror(-ret)); 153 migrate_decompress_threads_join(); 154 exit(EXIT_FAILURE); 155 } 156 qemu_announce_self(); 157 158 /* Make sure all file formats flush their mutable metadata */ 159 bdrv_invalidate_cache_all(&local_err); 160 if (local_err) { 161 error_report_err(local_err); 162 migrate_decompress_threads_join(); 163 exit(EXIT_FAILURE); 164 } 165 166 if (autostart) { 167 vm_start(); 168 } else { 169 runstate_set(RUN_STATE_PAUSED); 170 } 171 migrate_decompress_threads_join(); 172 } 173 174 void process_incoming_migration(QEMUFile *f) 175 { 176 Coroutine *co = qemu_coroutine_create(process_incoming_migration_co); 177 int fd = qemu_get_fd(f); 178 179 assert(fd != -1); 180 migrate_decompress_threads_create(); 181 qemu_set_nonblock(fd); 182 qemu_coroutine_enter(co, f); 183 } 184 185 /* amount of nanoseconds we are willing to wait for migration to be down. 186 * the choice of nanoseconds is because it is the maximum resolution that 187 * get_clock() can achieve. It is an internal measure. All user-visible 188 * units must be in seconds */ 189 static uint64_t max_downtime = 300000000; 190 191 uint64_t migrate_max_downtime(void) 192 { 193 return max_downtime; 194 } 195 196 MigrationCapabilityStatusList *qmp_query_migrate_capabilities(Error **errp) 197 { 198 MigrationCapabilityStatusList *head = NULL; 199 MigrationCapabilityStatusList *caps; 200 MigrationState *s = migrate_get_current(); 201 int i; 202 203 caps = NULL; /* silence compiler warning */ 204 for (i = 0; i < MIGRATION_CAPABILITY_MAX; i++) { 205 if (head == NULL) { 206 head = g_malloc0(sizeof(*caps)); 207 caps = head; 208 } else { 209 caps->next = g_malloc0(sizeof(*caps)); 210 caps = caps->next; 211 } 212 caps->value = 213 g_malloc(sizeof(*caps->value)); 214 caps->value->capability = i; 215 caps->value->state = s->enabled_capabilities[i]; 216 } 217 218 return head; 219 } 220 221 MigrationParameters *qmp_query_migrate_parameters(Error **errp) 222 { 223 MigrationParameters *params; 224 MigrationState *s = migrate_get_current(); 225 226 params = g_malloc0(sizeof(*params)); 227 params->compress_level = s->parameters[MIGRATION_PARAMETER_COMPRESS_LEVEL]; 228 params->compress_threads = 229 s->parameters[MIGRATION_PARAMETER_COMPRESS_THREADS]; 230 params->decompress_threads = 231 s->parameters[MIGRATION_PARAMETER_DECOMPRESS_THREADS]; 232 233 return params; 234 } 235 236 static void get_xbzrle_cache_stats(MigrationInfo *info) 237 { 238 if (migrate_use_xbzrle()) { 239 info->has_xbzrle_cache = true; 240 info->xbzrle_cache = g_malloc0(sizeof(*info->xbzrle_cache)); 241 info->xbzrle_cache->cache_size = migrate_xbzrle_cache_size(); 242 info->xbzrle_cache->bytes = xbzrle_mig_bytes_transferred(); 243 info->xbzrle_cache->pages = xbzrle_mig_pages_transferred(); 244 info->xbzrle_cache->cache_miss = xbzrle_mig_pages_cache_miss(); 245 info->xbzrle_cache->cache_miss_rate = xbzrle_mig_cache_miss_rate(); 246 info->xbzrle_cache->overflow = xbzrle_mig_pages_overflow(); 247 } 248 } 249 250 MigrationInfo *qmp_query_migrate(Error **errp) 251 { 252 MigrationInfo *info = g_malloc0(sizeof(*info)); 253 MigrationState *s = migrate_get_current(); 254 255 switch (s->state) { 256 case MIGRATION_STATUS_NONE: 257 /* no migration has happened ever */ 258 break; 259 case MIGRATION_STATUS_SETUP: 260 info->has_status = true; 261 info->has_total_time = false; 262 break; 263 case MIGRATION_STATUS_ACTIVE: 264 case MIGRATION_STATUS_CANCELLING: 265 info->has_status = true; 266 info->has_total_time = true; 267 info->total_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME) 268 - s->total_time; 269 info->has_expected_downtime = true; 270 info->expected_downtime = s->expected_downtime; 271 info->has_setup_time = true; 272 info->setup_time = s->setup_time; 273 274 info->has_ram = true; 275 info->ram = g_malloc0(sizeof(*info->ram)); 276 info->ram->transferred = ram_bytes_transferred(); 277 info->ram->remaining = ram_bytes_remaining(); 278 info->ram->total = ram_bytes_total(); 279 info->ram->duplicate = dup_mig_pages_transferred(); 280 info->ram->skipped = skipped_mig_pages_transferred(); 281 info->ram->normal = norm_mig_pages_transferred(); 282 info->ram->normal_bytes = norm_mig_bytes_transferred(); 283 info->ram->dirty_pages_rate = s->dirty_pages_rate; 284 info->ram->mbps = s->mbps; 285 info->ram->dirty_sync_count = s->dirty_sync_count; 286 287 if (blk_mig_active()) { 288 info->has_disk = true; 289 info->disk = g_malloc0(sizeof(*info->disk)); 290 info->disk->transferred = blk_mig_bytes_transferred(); 291 info->disk->remaining = blk_mig_bytes_remaining(); 292 info->disk->total = blk_mig_bytes_total(); 293 } 294 295 get_xbzrle_cache_stats(info); 296 break; 297 case MIGRATION_STATUS_COMPLETED: 298 get_xbzrle_cache_stats(info); 299 300 info->has_status = true; 301 info->has_total_time = true; 302 info->total_time = s->total_time; 303 info->has_downtime = true; 304 info->downtime = s->downtime; 305 info->has_setup_time = true; 306 info->setup_time = s->setup_time; 307 308 info->has_ram = true; 309 info->ram = g_malloc0(sizeof(*info->ram)); 310 info->ram->transferred = ram_bytes_transferred(); 311 info->ram->remaining = 0; 312 info->ram->total = ram_bytes_total(); 313 info->ram->duplicate = dup_mig_pages_transferred(); 314 info->ram->skipped = skipped_mig_pages_transferred(); 315 info->ram->normal = norm_mig_pages_transferred(); 316 info->ram->normal_bytes = norm_mig_bytes_transferred(); 317 info->ram->mbps = s->mbps; 318 info->ram->dirty_sync_count = s->dirty_sync_count; 319 break; 320 case MIGRATION_STATUS_FAILED: 321 info->has_status = true; 322 break; 323 case MIGRATION_STATUS_CANCELLED: 324 info->has_status = true; 325 break; 326 } 327 info->status = s->state; 328 329 return info; 330 } 331 332 void qmp_migrate_set_capabilities(MigrationCapabilityStatusList *params, 333 Error **errp) 334 { 335 MigrationState *s = migrate_get_current(); 336 MigrationCapabilityStatusList *cap; 337 338 if (s->state == MIGRATION_STATUS_ACTIVE || 339 s->state == MIGRATION_STATUS_SETUP) { 340 error_setg(errp, QERR_MIGRATION_ACTIVE); 341 return; 342 } 343 344 for (cap = params; cap; cap = cap->next) { 345 s->enabled_capabilities[cap->value->capability] = cap->value->state; 346 } 347 } 348 349 void qmp_migrate_set_parameters(bool has_compress_level, 350 int64_t compress_level, 351 bool has_compress_threads, 352 int64_t compress_threads, 353 bool has_decompress_threads, 354 int64_t decompress_threads, Error **errp) 355 { 356 MigrationState *s = migrate_get_current(); 357 358 if (has_compress_level && (compress_level < 0 || compress_level > 9)) { 359 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "compress_level", 360 "is invalid, it should be in the range of 0 to 9"); 361 return; 362 } 363 if (has_compress_threads && 364 (compress_threads < 1 || compress_threads > 255)) { 365 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, 366 "compress_threads", 367 "is invalid, it should be in the range of 1 to 255"); 368 return; 369 } 370 if (has_decompress_threads && 371 (decompress_threads < 1 || decompress_threads > 255)) { 372 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, 373 "decompress_threads", 374 "is invalid, it should be in the range of 1 to 255"); 375 return; 376 } 377 378 if (has_compress_level) { 379 s->parameters[MIGRATION_PARAMETER_COMPRESS_LEVEL] = compress_level; 380 } 381 if (has_compress_threads) { 382 s->parameters[MIGRATION_PARAMETER_COMPRESS_THREADS] = compress_threads; 383 } 384 if (has_decompress_threads) { 385 s->parameters[MIGRATION_PARAMETER_DECOMPRESS_THREADS] = 386 decompress_threads; 387 } 388 } 389 390 /* shared migration helpers */ 391 392 static void migrate_set_state(MigrationState *s, int old_state, int new_state) 393 { 394 if (atomic_cmpxchg(&s->state, old_state, new_state) == new_state) { 395 trace_migrate_set_state(new_state); 396 } 397 } 398 399 static void migrate_fd_cleanup(void *opaque) 400 { 401 MigrationState *s = opaque; 402 403 qemu_bh_delete(s->cleanup_bh); 404 s->cleanup_bh = NULL; 405 406 if (s->file) { 407 trace_migrate_fd_cleanup(); 408 qemu_mutex_unlock_iothread(); 409 qemu_thread_join(&s->thread); 410 qemu_mutex_lock_iothread(); 411 412 migrate_compress_threads_join(); 413 qemu_fclose(s->file); 414 s->file = NULL; 415 } 416 417 assert(s->state != MIGRATION_STATUS_ACTIVE); 418 419 if (s->state != MIGRATION_STATUS_COMPLETED) { 420 qemu_savevm_state_cancel(); 421 if (s->state == MIGRATION_STATUS_CANCELLING) { 422 migrate_set_state(s, MIGRATION_STATUS_CANCELLING, 423 MIGRATION_STATUS_CANCELLED); 424 } 425 } 426 427 notifier_list_notify(&migration_state_notifiers, s); 428 } 429 430 void migrate_fd_error(MigrationState *s) 431 { 432 trace_migrate_fd_error(); 433 assert(s->file == NULL); 434 s->state = MIGRATION_STATUS_FAILED; 435 trace_migrate_set_state(MIGRATION_STATUS_FAILED); 436 notifier_list_notify(&migration_state_notifiers, s); 437 } 438 439 static void migrate_fd_cancel(MigrationState *s) 440 { 441 int old_state ; 442 QEMUFile *f = migrate_get_current()->file; 443 trace_migrate_fd_cancel(); 444 445 do { 446 old_state = s->state; 447 if (old_state != MIGRATION_STATUS_SETUP && 448 old_state != MIGRATION_STATUS_ACTIVE) { 449 break; 450 } 451 migrate_set_state(s, old_state, MIGRATION_STATUS_CANCELLING); 452 } while (s->state != MIGRATION_STATUS_CANCELLING); 453 454 /* 455 * If we're unlucky the migration code might be stuck somewhere in a 456 * send/write while the network has failed and is waiting to timeout; 457 * if we've got shutdown(2) available then we can force it to quit. 458 * The outgoing qemu file gets closed in migrate_fd_cleanup that is 459 * called in a bh, so there is no race against this cancel. 460 */ 461 if (s->state == MIGRATION_STATUS_CANCELLING && f) { 462 qemu_file_shutdown(f); 463 } 464 } 465 466 void add_migration_state_change_notifier(Notifier *notify) 467 { 468 notifier_list_add(&migration_state_notifiers, notify); 469 } 470 471 void remove_migration_state_change_notifier(Notifier *notify) 472 { 473 notifier_remove(notify); 474 } 475 476 bool migration_in_setup(MigrationState *s) 477 { 478 return s->state == MIGRATION_STATUS_SETUP; 479 } 480 481 bool migration_has_finished(MigrationState *s) 482 { 483 return s->state == MIGRATION_STATUS_COMPLETED; 484 } 485 486 bool migration_has_failed(MigrationState *s) 487 { 488 return (s->state == MIGRATION_STATUS_CANCELLED || 489 s->state == MIGRATION_STATUS_FAILED); 490 } 491 492 static MigrationState *migrate_init(const MigrationParams *params) 493 { 494 MigrationState *s = migrate_get_current(); 495 int64_t bandwidth_limit = s->bandwidth_limit; 496 bool enabled_capabilities[MIGRATION_CAPABILITY_MAX]; 497 int64_t xbzrle_cache_size = s->xbzrle_cache_size; 498 int compress_level = s->parameters[MIGRATION_PARAMETER_COMPRESS_LEVEL]; 499 int compress_thread_count = 500 s->parameters[MIGRATION_PARAMETER_COMPRESS_THREADS]; 501 int decompress_thread_count = 502 s->parameters[MIGRATION_PARAMETER_DECOMPRESS_THREADS]; 503 504 memcpy(enabled_capabilities, s->enabled_capabilities, 505 sizeof(enabled_capabilities)); 506 507 memset(s, 0, sizeof(*s)); 508 s->params = *params; 509 memcpy(s->enabled_capabilities, enabled_capabilities, 510 sizeof(enabled_capabilities)); 511 s->xbzrle_cache_size = xbzrle_cache_size; 512 513 s->parameters[MIGRATION_PARAMETER_COMPRESS_LEVEL] = compress_level; 514 s->parameters[MIGRATION_PARAMETER_COMPRESS_THREADS] = 515 compress_thread_count; 516 s->parameters[MIGRATION_PARAMETER_DECOMPRESS_THREADS] = 517 decompress_thread_count; 518 s->bandwidth_limit = bandwidth_limit; 519 s->state = MIGRATION_STATUS_SETUP; 520 trace_migrate_set_state(MIGRATION_STATUS_SETUP); 521 522 s->total_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); 523 return s; 524 } 525 526 static GSList *migration_blockers; 527 528 void migrate_add_blocker(Error *reason) 529 { 530 migration_blockers = g_slist_prepend(migration_blockers, reason); 531 } 532 533 void migrate_del_blocker(Error *reason) 534 { 535 migration_blockers = g_slist_remove(migration_blockers, reason); 536 } 537 538 void qmp_migrate_incoming(const char *uri, Error **errp) 539 { 540 Error *local_err = NULL; 541 static bool once = true; 542 543 if (!deferred_incoming) { 544 error_setg(errp, "For use with '-incoming defer'"); 545 return; 546 } 547 if (!once) { 548 error_setg(errp, "The incoming migration has already been started"); 549 } 550 551 qemu_start_incoming_migration(uri, &local_err); 552 553 if (local_err) { 554 error_propagate(errp, local_err); 555 return; 556 } 557 558 once = false; 559 } 560 561 void qmp_migrate(const char *uri, bool has_blk, bool blk, 562 bool has_inc, bool inc, bool has_detach, bool detach, 563 Error **errp) 564 { 565 Error *local_err = NULL; 566 MigrationState *s = migrate_get_current(); 567 MigrationParams params; 568 const char *p; 569 570 params.blk = has_blk && blk; 571 params.shared = has_inc && inc; 572 573 if (s->state == MIGRATION_STATUS_ACTIVE || 574 s->state == MIGRATION_STATUS_SETUP || 575 s->state == MIGRATION_STATUS_CANCELLING) { 576 error_setg(errp, QERR_MIGRATION_ACTIVE); 577 return; 578 } 579 580 if (runstate_check(RUN_STATE_INMIGRATE)) { 581 error_setg(errp, "Guest is waiting for an incoming migration"); 582 return; 583 } 584 585 if (qemu_savevm_state_blocked(errp)) { 586 return; 587 } 588 589 if (migration_blockers) { 590 *errp = error_copy(migration_blockers->data); 591 return; 592 } 593 594 s = migrate_init(¶ms); 595 596 if (strstart(uri, "tcp:", &p)) { 597 tcp_start_outgoing_migration(s, p, &local_err); 598 #ifdef CONFIG_RDMA 599 } else if (strstart(uri, "rdma:", &p)) { 600 rdma_start_outgoing_migration(s, p, &local_err); 601 #endif 602 #if !defined(WIN32) 603 } else if (strstart(uri, "exec:", &p)) { 604 exec_start_outgoing_migration(s, p, &local_err); 605 } else if (strstart(uri, "unix:", &p)) { 606 unix_start_outgoing_migration(s, p, &local_err); 607 } else if (strstart(uri, "fd:", &p)) { 608 fd_start_outgoing_migration(s, p, &local_err); 609 #endif 610 } else { 611 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "uri", 612 "a valid migration protocol"); 613 s->state = MIGRATION_STATUS_FAILED; 614 return; 615 } 616 617 if (local_err) { 618 migrate_fd_error(s); 619 error_propagate(errp, local_err); 620 return; 621 } 622 } 623 624 void qmp_migrate_cancel(Error **errp) 625 { 626 migrate_fd_cancel(migrate_get_current()); 627 } 628 629 void qmp_migrate_set_cache_size(int64_t value, Error **errp) 630 { 631 MigrationState *s = migrate_get_current(); 632 int64_t new_size; 633 634 /* Check for truncation */ 635 if (value != (size_t)value) { 636 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "cache size", 637 "exceeding address space"); 638 return; 639 } 640 641 /* Cache should not be larger than guest ram size */ 642 if (value > ram_bytes_total()) { 643 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "cache size", 644 "exceeds guest ram size "); 645 return; 646 } 647 648 new_size = xbzrle_cache_resize(value); 649 if (new_size < 0) { 650 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "cache size", 651 "is smaller than page size"); 652 return; 653 } 654 655 s->xbzrle_cache_size = new_size; 656 } 657 658 int64_t qmp_query_migrate_cache_size(Error **errp) 659 { 660 return migrate_xbzrle_cache_size(); 661 } 662 663 void qmp_migrate_set_speed(int64_t value, Error **errp) 664 { 665 MigrationState *s; 666 667 if (value < 0) { 668 value = 0; 669 } 670 if (value > SIZE_MAX) { 671 value = SIZE_MAX; 672 } 673 674 s = migrate_get_current(); 675 s->bandwidth_limit = value; 676 if (s->file) { 677 qemu_file_set_rate_limit(s->file, s->bandwidth_limit / XFER_LIMIT_RATIO); 678 } 679 } 680 681 void qmp_migrate_set_downtime(double value, Error **errp) 682 { 683 value *= 1e9; 684 value = MAX(0, MIN(UINT64_MAX, value)); 685 max_downtime = (uint64_t)value; 686 } 687 688 bool migrate_auto_converge(void) 689 { 690 MigrationState *s; 691 692 s = migrate_get_current(); 693 694 return s->enabled_capabilities[MIGRATION_CAPABILITY_AUTO_CONVERGE]; 695 } 696 697 bool migrate_zero_blocks(void) 698 { 699 MigrationState *s; 700 701 s = migrate_get_current(); 702 703 return s->enabled_capabilities[MIGRATION_CAPABILITY_ZERO_BLOCKS]; 704 } 705 706 bool migrate_use_compression(void) 707 { 708 MigrationState *s; 709 710 s = migrate_get_current(); 711 712 return s->enabled_capabilities[MIGRATION_CAPABILITY_COMPRESS]; 713 } 714 715 int migrate_compress_level(void) 716 { 717 MigrationState *s; 718 719 s = migrate_get_current(); 720 721 return s->parameters[MIGRATION_PARAMETER_COMPRESS_LEVEL]; 722 } 723 724 int migrate_compress_threads(void) 725 { 726 MigrationState *s; 727 728 s = migrate_get_current(); 729 730 return s->parameters[MIGRATION_PARAMETER_COMPRESS_THREADS]; 731 } 732 733 int migrate_decompress_threads(void) 734 { 735 MigrationState *s; 736 737 s = migrate_get_current(); 738 739 return s->parameters[MIGRATION_PARAMETER_DECOMPRESS_THREADS]; 740 } 741 742 int migrate_use_xbzrle(void) 743 { 744 MigrationState *s; 745 746 s = migrate_get_current(); 747 748 return s->enabled_capabilities[MIGRATION_CAPABILITY_XBZRLE]; 749 } 750 751 int64_t migrate_xbzrle_cache_size(void) 752 { 753 MigrationState *s; 754 755 s = migrate_get_current(); 756 757 return s->xbzrle_cache_size; 758 } 759 760 /* migration thread support */ 761 762 static void *migration_thread(void *opaque) 763 { 764 MigrationState *s = opaque; 765 int64_t initial_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); 766 int64_t setup_start = qemu_clock_get_ms(QEMU_CLOCK_HOST); 767 int64_t initial_bytes = 0; 768 int64_t max_size = 0; 769 int64_t start_time = initial_time; 770 bool old_vm_running = false; 771 772 qemu_savevm_state_header(s->file); 773 qemu_savevm_state_begin(s->file, &s->params); 774 775 s->setup_time = qemu_clock_get_ms(QEMU_CLOCK_HOST) - setup_start; 776 migrate_set_state(s, MIGRATION_STATUS_SETUP, MIGRATION_STATUS_ACTIVE); 777 778 while (s->state == MIGRATION_STATUS_ACTIVE) { 779 int64_t current_time; 780 uint64_t pending_size; 781 782 if (!qemu_file_rate_limit(s->file)) { 783 pending_size = qemu_savevm_state_pending(s->file, max_size); 784 trace_migrate_pending(pending_size, max_size); 785 if (pending_size && pending_size >= max_size) { 786 qemu_savevm_state_iterate(s->file); 787 } else { 788 int ret; 789 790 qemu_mutex_lock_iothread(); 791 start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); 792 qemu_system_wakeup_request(QEMU_WAKEUP_REASON_OTHER); 793 old_vm_running = runstate_is_running(); 794 795 ret = vm_stop_force_state(RUN_STATE_FINISH_MIGRATE); 796 if (ret >= 0) { 797 qemu_file_set_rate_limit(s->file, INT64_MAX); 798 qemu_savevm_state_complete(s->file); 799 } 800 qemu_mutex_unlock_iothread(); 801 802 if (ret < 0) { 803 migrate_set_state(s, MIGRATION_STATUS_ACTIVE, 804 MIGRATION_STATUS_FAILED); 805 break; 806 } 807 808 if (!qemu_file_get_error(s->file)) { 809 migrate_set_state(s, MIGRATION_STATUS_ACTIVE, 810 MIGRATION_STATUS_COMPLETED); 811 break; 812 } 813 } 814 } 815 816 if (qemu_file_get_error(s->file)) { 817 migrate_set_state(s, MIGRATION_STATUS_ACTIVE, 818 MIGRATION_STATUS_FAILED); 819 break; 820 } 821 current_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); 822 if (current_time >= initial_time + BUFFER_DELAY) { 823 uint64_t transferred_bytes = qemu_ftell(s->file) - initial_bytes; 824 uint64_t time_spent = current_time - initial_time; 825 double bandwidth = transferred_bytes / time_spent; 826 max_size = bandwidth * migrate_max_downtime() / 1000000; 827 828 s->mbps = time_spent ? (((double) transferred_bytes * 8.0) / 829 ((double) time_spent / 1000.0)) / 1000.0 / 1000.0 : -1; 830 831 trace_migrate_transferred(transferred_bytes, time_spent, 832 bandwidth, max_size); 833 /* if we haven't sent anything, we don't want to recalculate 834 10000 is a small enough number for our purposes */ 835 if (s->dirty_bytes_rate && transferred_bytes > 10000) { 836 s->expected_downtime = s->dirty_bytes_rate / bandwidth; 837 } 838 839 qemu_file_reset_rate_limit(s->file); 840 initial_time = current_time; 841 initial_bytes = qemu_ftell(s->file); 842 } 843 if (qemu_file_rate_limit(s->file)) { 844 /* usleep expects microseconds */ 845 g_usleep((initial_time + BUFFER_DELAY - current_time)*1000); 846 } 847 } 848 849 qemu_mutex_lock_iothread(); 850 if (s->state == MIGRATION_STATUS_COMPLETED) { 851 int64_t end_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); 852 uint64_t transferred_bytes = qemu_ftell(s->file); 853 s->total_time = end_time - s->total_time; 854 s->downtime = end_time - start_time; 855 if (s->total_time) { 856 s->mbps = (((double) transferred_bytes * 8.0) / 857 ((double) s->total_time)) / 1000; 858 } 859 runstate_set(RUN_STATE_POSTMIGRATE); 860 } else { 861 if (old_vm_running) { 862 vm_start(); 863 } 864 } 865 qemu_bh_schedule(s->cleanup_bh); 866 qemu_mutex_unlock_iothread(); 867 868 return NULL; 869 } 870 871 void migrate_fd_connect(MigrationState *s) 872 { 873 /* This is a best 1st approximation. ns to ms */ 874 s->expected_downtime = max_downtime/1000000; 875 s->cleanup_bh = qemu_bh_new(migrate_fd_cleanup, s); 876 877 qemu_file_set_rate_limit(s->file, 878 s->bandwidth_limit / XFER_LIMIT_RATIO); 879 880 /* Notify before starting migration thread */ 881 notifier_list_notify(&migration_state_notifiers, s); 882 883 migrate_compress_threads_create(); 884 qemu_thread_create(&s->thread, "migration", migration_thread, s, 885 QEMU_THREAD_JOINABLE); 886 } 887