1 /* 2 * QEMU live migration 3 * 4 * Copyright IBM, Corp. 2008 5 * 6 * Authors: 7 * Anthony Liguori <aliguori@us.ibm.com> 8 * 9 * This work is licensed under the terms of the GNU GPL, version 2. See 10 * the COPYING file in the top-level directory. 11 * 12 * Contributions after 2012-01-13 are licensed under the terms of the 13 * GNU GPL, version 2 or (at your option) any later version. 14 */ 15 16 #include "qemu/osdep.h" 17 #include "qemu/cutils.h" 18 #include "qemu/error-report.h" 19 #include "migration/blocker.h" 20 #include "exec.h" 21 #include "fd.h" 22 #include "socket.h" 23 #include "rdma.h" 24 #include "ram.h" 25 #include "migration/global_state.h" 26 #include "migration/misc.h" 27 #include "migration.h" 28 #include "savevm.h" 29 #include "qemu-file-channel.h" 30 #include "qemu-file.h" 31 #include "migration/vmstate.h" 32 #include "block/block.h" 33 #include "qapi/qmp/qerror.h" 34 #include "qapi/util.h" 35 #include "qemu/rcu.h" 36 #include "block.h" 37 #include "postcopy-ram.h" 38 #include "qemu/thread.h" 39 #include "qmp-commands.h" 40 #include "trace.h" 41 #include "qapi-event.h" 42 #include "exec/target_page.h" 43 #include "io/channel-buffer.h" 44 #include "migration/colo.h" 45 #include "hw/boards.h" 46 #include "monitor/monitor.h" 47 48 #define MAX_THROTTLE (32 << 20) /* Migration transfer speed throttling */ 49 50 /* Amount of time to allocate to each "chunk" of bandwidth-throttled 51 * data. */ 52 #define BUFFER_DELAY 100 53 #define XFER_LIMIT_RATIO (1000 / BUFFER_DELAY) 54 55 /* Time in milliseconds we are allowed to stop the source, 56 * for sending the last part */ 57 #define DEFAULT_MIGRATE_SET_DOWNTIME 300 58 59 /* Maximum migrate downtime set to 2000 seconds */ 60 #define MAX_MIGRATE_DOWNTIME_SECONDS 2000 61 #define MAX_MIGRATE_DOWNTIME (MAX_MIGRATE_DOWNTIME_SECONDS * 1000) 62 63 /* Default compression thread count */ 64 #define DEFAULT_MIGRATE_COMPRESS_THREAD_COUNT 8 65 /* Default decompression thread count, usually decompression is at 66 * least 4 times as fast as compression.*/ 67 #define DEFAULT_MIGRATE_DECOMPRESS_THREAD_COUNT 2 68 /*0: means nocompress, 1: best speed, ... 9: best compress ratio */ 69 #define DEFAULT_MIGRATE_COMPRESS_LEVEL 1 70 /* Define default autoconverge cpu throttle migration parameters */ 71 #define DEFAULT_MIGRATE_CPU_THROTTLE_INITIAL 20 72 #define DEFAULT_MIGRATE_CPU_THROTTLE_INCREMENT 10 73 74 /* Migration XBZRLE default cache size */ 75 #define DEFAULT_MIGRATE_CACHE_SIZE (64 * 1024 * 1024) 76 77 /* The delay time (in ms) between two COLO checkpoints 78 * Note: Please change this default value to 10000 when we support hybrid mode. 79 */ 80 #define DEFAULT_MIGRATE_X_CHECKPOINT_DELAY 200 81 82 static NotifierList migration_state_notifiers = 83 NOTIFIER_LIST_INITIALIZER(migration_state_notifiers); 84 85 static bool deferred_incoming; 86 87 /* Messages sent on the return path from destination to source */ 88 enum mig_rp_message_type { 89 MIG_RP_MSG_INVALID = 0, /* Must be 0 */ 90 MIG_RP_MSG_SHUT, /* sibling will not send any more RP messages */ 91 MIG_RP_MSG_PONG, /* Response to a PING; data (seq: be32 ) */ 92 93 MIG_RP_MSG_REQ_PAGES_ID, /* data (start: be64, len: be32, id: string) */ 94 MIG_RP_MSG_REQ_PAGES, /* data (start: be64, len: be32) */ 95 96 MIG_RP_MSG_MAX 97 }; 98 99 /* When we add fault tolerance, we could have several 100 migrations at once. For now we don't need to add 101 dynamic creation of migration */ 102 103 static MigrationState *current_migration; 104 105 void migration_object_init(void) 106 { 107 MachineState *ms = MACHINE(qdev_get_machine()); 108 109 /* This can only be called once. */ 110 assert(!current_migration); 111 current_migration = MIGRATION_OBJ(object_new(TYPE_MIGRATION)); 112 113 /* 114 * We cannot really do this in migration_instance_init() since at 115 * that time global properties are not yet applied, then this 116 * value will be definitely replaced by something else. 117 */ 118 if (ms->enforce_config_section) { 119 current_migration->send_configuration = true; 120 } 121 } 122 123 /* For outgoing */ 124 MigrationState *migrate_get_current(void) 125 { 126 /* This can only be called after the object created. */ 127 assert(current_migration); 128 return current_migration; 129 } 130 131 void migration_only_migratable_set(void) 132 { 133 migrate_get_current()->only_migratable = true; 134 } 135 136 MigrationIncomingState *migration_incoming_get_current(void) 137 { 138 static bool once; 139 static MigrationIncomingState mis_current; 140 141 if (!once) { 142 mis_current.state = MIGRATION_STATUS_NONE; 143 memset(&mis_current, 0, sizeof(MigrationIncomingState)); 144 qemu_mutex_init(&mis_current.rp_mutex); 145 qemu_event_init(&mis_current.main_thread_load_event, false); 146 once = true; 147 } 148 return &mis_current; 149 } 150 151 void migration_incoming_state_destroy(void) 152 { 153 struct MigrationIncomingState *mis = migration_incoming_get_current(); 154 155 if (mis->to_src_file) { 156 /* Tell source that we are done */ 157 migrate_send_rp_shut(mis, qemu_file_get_error(mis->from_src_file) != 0); 158 qemu_fclose(mis->to_src_file); 159 mis->to_src_file = NULL; 160 } 161 162 if (mis->from_src_file) { 163 qemu_fclose(mis->from_src_file); 164 mis->from_src_file = NULL; 165 } 166 167 qemu_event_destroy(&mis->main_thread_load_event); 168 } 169 170 static void migrate_generate_event(int new_state) 171 { 172 if (migrate_use_events()) { 173 qapi_event_send_migration(new_state, &error_abort); 174 } 175 } 176 177 /* 178 * Called on -incoming with a defer: uri. 179 * The migration can be started later after any parameters have been 180 * changed. 181 */ 182 static void deferred_incoming_migration(Error **errp) 183 { 184 if (deferred_incoming) { 185 error_setg(errp, "Incoming migration already deferred"); 186 } 187 deferred_incoming = true; 188 } 189 190 /* 191 * Send a message on the return channel back to the source 192 * of the migration. 193 */ 194 static void migrate_send_rp_message(MigrationIncomingState *mis, 195 enum mig_rp_message_type message_type, 196 uint16_t len, void *data) 197 { 198 trace_migrate_send_rp_message((int)message_type, len); 199 qemu_mutex_lock(&mis->rp_mutex); 200 qemu_put_be16(mis->to_src_file, (unsigned int)message_type); 201 qemu_put_be16(mis->to_src_file, len); 202 qemu_put_buffer(mis->to_src_file, data, len); 203 qemu_fflush(mis->to_src_file); 204 qemu_mutex_unlock(&mis->rp_mutex); 205 } 206 207 /* Request a range of pages from the source VM at the given 208 * start address. 209 * rbname: Name of the RAMBlock to request the page in, if NULL it's the same 210 * as the last request (a name must have been given previously) 211 * Start: Address offset within the RB 212 * Len: Length in bytes required - must be a multiple of pagesize 213 */ 214 void migrate_send_rp_req_pages(MigrationIncomingState *mis, const char *rbname, 215 ram_addr_t start, size_t len) 216 { 217 uint8_t bufc[12 + 1 + 255]; /* start (8), len (4), rbname up to 256 */ 218 size_t msglen = 12; /* start + len */ 219 220 *(uint64_t *)bufc = cpu_to_be64((uint64_t)start); 221 *(uint32_t *)(bufc + 8) = cpu_to_be32((uint32_t)len); 222 223 if (rbname) { 224 int rbname_len = strlen(rbname); 225 assert(rbname_len < 256); 226 227 bufc[msglen++] = rbname_len; 228 memcpy(bufc + msglen, rbname, rbname_len); 229 msglen += rbname_len; 230 migrate_send_rp_message(mis, MIG_RP_MSG_REQ_PAGES_ID, msglen, bufc); 231 } else { 232 migrate_send_rp_message(mis, MIG_RP_MSG_REQ_PAGES, msglen, bufc); 233 } 234 } 235 236 void qemu_start_incoming_migration(const char *uri, Error **errp) 237 { 238 const char *p; 239 240 qapi_event_send_migration(MIGRATION_STATUS_SETUP, &error_abort); 241 if (!strcmp(uri, "defer")) { 242 deferred_incoming_migration(errp); 243 } else if (strstart(uri, "tcp:", &p)) { 244 tcp_start_incoming_migration(p, errp); 245 #ifdef CONFIG_RDMA 246 } else if (strstart(uri, "rdma:", &p)) { 247 rdma_start_incoming_migration(p, errp); 248 #endif 249 } else if (strstart(uri, "exec:", &p)) { 250 exec_start_incoming_migration(p, errp); 251 } else if (strstart(uri, "unix:", &p)) { 252 unix_start_incoming_migration(p, errp); 253 } else if (strstart(uri, "fd:", &p)) { 254 fd_start_incoming_migration(p, errp); 255 } else { 256 error_setg(errp, "unknown migration protocol: %s", uri); 257 } 258 } 259 260 static void process_incoming_migration_bh(void *opaque) 261 { 262 Error *local_err = NULL; 263 MigrationIncomingState *mis = opaque; 264 265 /* Make sure all file formats flush their mutable metadata. 266 * If we get an error here, just don't restart the VM yet. */ 267 bdrv_invalidate_cache_all(&local_err); 268 if (local_err) { 269 error_report_err(local_err); 270 local_err = NULL; 271 autostart = false; 272 } 273 274 /* 275 * This must happen after all error conditions are dealt with and 276 * we're sure the VM is going to be running on this host. 277 */ 278 qemu_announce_self(); 279 280 /* If global state section was not received or we are in running 281 state, we need to obey autostart. Any other state is set with 282 runstate_set. */ 283 284 if (!global_state_received() || 285 global_state_get_runstate() == RUN_STATE_RUNNING) { 286 if (autostart) { 287 vm_start(); 288 } else { 289 runstate_set(RUN_STATE_PAUSED); 290 } 291 } else { 292 runstate_set(global_state_get_runstate()); 293 } 294 migrate_decompress_threads_join(); 295 /* 296 * This must happen after any state changes since as soon as an external 297 * observer sees this event they might start to prod at the VM assuming 298 * it's ready to use. 299 */ 300 migrate_set_state(&mis->state, MIGRATION_STATUS_ACTIVE, 301 MIGRATION_STATUS_COMPLETED); 302 qemu_bh_delete(mis->bh); 303 migration_incoming_state_destroy(); 304 } 305 306 static void process_incoming_migration_co(void *opaque) 307 { 308 QEMUFile *f = opaque; 309 MigrationIncomingState *mis = migration_incoming_get_current(); 310 PostcopyState ps; 311 int ret; 312 313 mis->from_src_file = f; 314 mis->largest_page_size = qemu_ram_pagesize_largest(); 315 postcopy_state_set(POSTCOPY_INCOMING_NONE); 316 migrate_set_state(&mis->state, MIGRATION_STATUS_NONE, 317 MIGRATION_STATUS_ACTIVE); 318 ret = qemu_loadvm_state(f); 319 320 ps = postcopy_state_get(); 321 trace_process_incoming_migration_co_end(ret, ps); 322 if (ps != POSTCOPY_INCOMING_NONE) { 323 if (ps == POSTCOPY_INCOMING_ADVISE) { 324 /* 325 * Where a migration had postcopy enabled (and thus went to advise) 326 * but managed to complete within the precopy period, we can use 327 * the normal exit. 328 */ 329 postcopy_ram_incoming_cleanup(mis); 330 } else if (ret >= 0) { 331 /* 332 * Postcopy was started, cleanup should happen at the end of the 333 * postcopy thread. 334 */ 335 trace_process_incoming_migration_co_postcopy_end_main(); 336 return; 337 } 338 /* Else if something went wrong then just fall out of the normal exit */ 339 } 340 341 /* we get COLO info, and know if we are in COLO mode */ 342 if (!ret && migration_incoming_enable_colo()) { 343 mis->migration_incoming_co = qemu_coroutine_self(); 344 qemu_thread_create(&mis->colo_incoming_thread, "COLO incoming", 345 colo_process_incoming_thread, mis, QEMU_THREAD_JOINABLE); 346 mis->have_colo_incoming_thread = true; 347 qemu_coroutine_yield(); 348 349 /* Wait checkpoint incoming thread exit before free resource */ 350 qemu_thread_join(&mis->colo_incoming_thread); 351 } 352 353 if (ret < 0) { 354 migrate_set_state(&mis->state, MIGRATION_STATUS_ACTIVE, 355 MIGRATION_STATUS_FAILED); 356 error_report("load of migration failed: %s", strerror(-ret)); 357 migrate_decompress_threads_join(); 358 exit(EXIT_FAILURE); 359 } 360 361 free_xbzrle_decoded_buf(); 362 363 mis->bh = qemu_bh_new(process_incoming_migration_bh, mis); 364 qemu_bh_schedule(mis->bh); 365 } 366 367 void migration_fd_process_incoming(QEMUFile *f) 368 { 369 Coroutine *co = qemu_coroutine_create(process_incoming_migration_co, f); 370 371 migrate_decompress_threads_create(); 372 qemu_file_set_blocking(f, false); 373 qemu_coroutine_enter(co); 374 } 375 376 /* 377 * Send a 'SHUT' message on the return channel with the given value 378 * to indicate that we've finished with the RP. Non-0 value indicates 379 * error. 380 */ 381 void migrate_send_rp_shut(MigrationIncomingState *mis, 382 uint32_t value) 383 { 384 uint32_t buf; 385 386 buf = cpu_to_be32(value); 387 migrate_send_rp_message(mis, MIG_RP_MSG_SHUT, sizeof(buf), &buf); 388 } 389 390 /* 391 * Send a 'PONG' message on the return channel with the given value 392 * (normally in response to a 'PING') 393 */ 394 void migrate_send_rp_pong(MigrationIncomingState *mis, 395 uint32_t value) 396 { 397 uint32_t buf; 398 399 buf = cpu_to_be32(value); 400 migrate_send_rp_message(mis, MIG_RP_MSG_PONG, sizeof(buf), &buf); 401 } 402 403 MigrationCapabilityStatusList *qmp_query_migrate_capabilities(Error **errp) 404 { 405 MigrationCapabilityStatusList *head = NULL; 406 MigrationCapabilityStatusList *caps; 407 MigrationState *s = migrate_get_current(); 408 int i; 409 410 caps = NULL; /* silence compiler warning */ 411 for (i = 0; i < MIGRATION_CAPABILITY__MAX; i++) { 412 #ifndef CONFIG_LIVE_BLOCK_MIGRATION 413 if (i == MIGRATION_CAPABILITY_BLOCK) { 414 continue; 415 } 416 #endif 417 if (i == MIGRATION_CAPABILITY_X_COLO && !colo_supported()) { 418 continue; 419 } 420 if (head == NULL) { 421 head = g_malloc0(sizeof(*caps)); 422 caps = head; 423 } else { 424 caps->next = g_malloc0(sizeof(*caps)); 425 caps = caps->next; 426 } 427 caps->value = 428 g_malloc(sizeof(*caps->value)); 429 caps->value->capability = i; 430 caps->value->state = s->enabled_capabilities[i]; 431 } 432 433 return head; 434 } 435 436 MigrationParameters *qmp_query_migrate_parameters(Error **errp) 437 { 438 MigrationParameters *params; 439 MigrationState *s = migrate_get_current(); 440 441 params = g_malloc0(sizeof(*params)); 442 params->has_compress_level = true; 443 params->compress_level = s->parameters.compress_level; 444 params->has_compress_threads = true; 445 params->compress_threads = s->parameters.compress_threads; 446 params->has_decompress_threads = true; 447 params->decompress_threads = s->parameters.decompress_threads; 448 params->has_cpu_throttle_initial = true; 449 params->cpu_throttle_initial = s->parameters.cpu_throttle_initial; 450 params->has_cpu_throttle_increment = true; 451 params->cpu_throttle_increment = s->parameters.cpu_throttle_increment; 452 params->has_tls_creds = !!s->parameters.tls_creds; 453 params->tls_creds = g_strdup(s->parameters.tls_creds); 454 params->has_tls_hostname = !!s->parameters.tls_hostname; 455 params->tls_hostname = g_strdup(s->parameters.tls_hostname); 456 params->has_max_bandwidth = true; 457 params->max_bandwidth = s->parameters.max_bandwidth; 458 params->has_downtime_limit = true; 459 params->downtime_limit = s->parameters.downtime_limit; 460 params->has_x_checkpoint_delay = true; 461 params->x_checkpoint_delay = s->parameters.x_checkpoint_delay; 462 params->has_block_incremental = true; 463 params->block_incremental = s->parameters.block_incremental; 464 465 return params; 466 } 467 468 /* 469 * Return true if we're already in the middle of a migration 470 * (i.e. any of the active or setup states) 471 */ 472 static bool migration_is_setup_or_active(int state) 473 { 474 switch (state) { 475 case MIGRATION_STATUS_ACTIVE: 476 case MIGRATION_STATUS_POSTCOPY_ACTIVE: 477 case MIGRATION_STATUS_SETUP: 478 return true; 479 480 default: 481 return false; 482 483 } 484 } 485 486 static void populate_ram_info(MigrationInfo *info, MigrationState *s) 487 { 488 info->has_ram = true; 489 info->ram = g_malloc0(sizeof(*info->ram)); 490 info->ram->transferred = ram_counters.transferred; 491 info->ram->total = ram_bytes_total(); 492 info->ram->duplicate = ram_counters.duplicate; 493 /* legacy value. It is not used anymore */ 494 info->ram->skipped = 0; 495 info->ram->normal = ram_counters.normal; 496 info->ram->normal_bytes = ram_counters.normal * 497 qemu_target_page_size(); 498 info->ram->mbps = s->mbps; 499 info->ram->dirty_sync_count = ram_counters.dirty_sync_count; 500 info->ram->postcopy_requests = ram_counters.postcopy_requests; 501 info->ram->page_size = qemu_target_page_size(); 502 503 if (migrate_use_xbzrle()) { 504 info->has_xbzrle_cache = true; 505 info->xbzrle_cache = g_malloc0(sizeof(*info->xbzrle_cache)); 506 info->xbzrle_cache->cache_size = migrate_xbzrle_cache_size(); 507 info->xbzrle_cache->bytes = xbzrle_counters.bytes; 508 info->xbzrle_cache->pages = xbzrle_counters.pages; 509 info->xbzrle_cache->cache_miss = xbzrle_counters.cache_miss; 510 info->xbzrle_cache->cache_miss_rate = xbzrle_counters.cache_miss_rate; 511 info->xbzrle_cache->overflow = xbzrle_counters.overflow; 512 } 513 514 if (cpu_throttle_active()) { 515 info->has_cpu_throttle_percentage = true; 516 info->cpu_throttle_percentage = cpu_throttle_get_percentage(); 517 } 518 519 if (s->state != MIGRATION_STATUS_COMPLETED) { 520 info->ram->remaining = ram_bytes_remaining(); 521 info->ram->dirty_pages_rate = ram_counters.dirty_pages_rate; 522 } 523 } 524 525 static void populate_disk_info(MigrationInfo *info) 526 { 527 if (blk_mig_active()) { 528 info->has_disk = true; 529 info->disk = g_malloc0(sizeof(*info->disk)); 530 info->disk->transferred = blk_mig_bytes_transferred(); 531 info->disk->remaining = blk_mig_bytes_remaining(); 532 info->disk->total = blk_mig_bytes_total(); 533 } 534 } 535 536 MigrationInfo *qmp_query_migrate(Error **errp) 537 { 538 MigrationInfo *info = g_malloc0(sizeof(*info)); 539 MigrationState *s = migrate_get_current(); 540 541 switch (s->state) { 542 case MIGRATION_STATUS_NONE: 543 /* no migration has happened ever */ 544 break; 545 case MIGRATION_STATUS_SETUP: 546 info->has_status = true; 547 info->has_total_time = false; 548 break; 549 case MIGRATION_STATUS_ACTIVE: 550 case MIGRATION_STATUS_CANCELLING: 551 case MIGRATION_STATUS_POSTCOPY_ACTIVE: 552 /* TODO add some postcopy stats */ 553 info->has_status = true; 554 info->has_total_time = true; 555 info->total_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME) 556 - s->total_time; 557 info->has_expected_downtime = true; 558 info->expected_downtime = s->expected_downtime; 559 info->has_setup_time = true; 560 info->setup_time = s->setup_time; 561 562 populate_ram_info(info, s); 563 populate_disk_info(info); 564 break; 565 case MIGRATION_STATUS_COLO: 566 info->has_status = true; 567 /* TODO: display COLO specific information (checkpoint info etc.) */ 568 break; 569 case MIGRATION_STATUS_COMPLETED: 570 info->has_status = true; 571 info->has_total_time = true; 572 info->total_time = s->total_time; 573 info->has_downtime = true; 574 info->downtime = s->downtime; 575 info->has_setup_time = true; 576 info->setup_time = s->setup_time; 577 578 populate_ram_info(info, s); 579 break; 580 case MIGRATION_STATUS_FAILED: 581 info->has_status = true; 582 if (s->error) { 583 info->has_error_desc = true; 584 info->error_desc = g_strdup(error_get_pretty(s->error)); 585 } 586 break; 587 case MIGRATION_STATUS_CANCELLED: 588 info->has_status = true; 589 break; 590 } 591 info->status = s->state; 592 593 return info; 594 } 595 596 void qmp_migrate_set_capabilities(MigrationCapabilityStatusList *params, 597 Error **errp) 598 { 599 MigrationState *s = migrate_get_current(); 600 MigrationCapabilityStatusList *cap; 601 bool old_postcopy_cap = migrate_postcopy_ram(); 602 603 if (migration_is_setup_or_active(s->state)) { 604 error_setg(errp, QERR_MIGRATION_ACTIVE); 605 return; 606 } 607 608 for (cap = params; cap; cap = cap->next) { 609 #ifndef CONFIG_LIVE_BLOCK_MIGRATION 610 if (cap->value->capability == MIGRATION_CAPABILITY_BLOCK 611 && cap->value->state) { 612 error_setg(errp, "QEMU compiled without old-style (blk/-b, inc/-i) " 613 "block migration"); 614 error_append_hint(errp, "Use drive_mirror+NBD instead.\n"); 615 continue; 616 } 617 #endif 618 if (cap->value->capability == MIGRATION_CAPABILITY_X_COLO) { 619 if (!colo_supported()) { 620 error_setg(errp, "COLO is not currently supported, please" 621 " configure with --enable-colo option in order to" 622 " support COLO feature"); 623 continue; 624 } 625 } 626 s->enabled_capabilities[cap->value->capability] = cap->value->state; 627 } 628 629 if (migrate_postcopy_ram()) { 630 if (migrate_use_compression()) { 631 /* The decompression threads asynchronously write into RAM 632 * rather than use the atomic copies needed to avoid 633 * userfaulting. It should be possible to fix the decompression 634 * threads for compatibility in future. 635 */ 636 error_report("Postcopy is not currently compatible with " 637 "compression"); 638 s->enabled_capabilities[MIGRATION_CAPABILITY_POSTCOPY_RAM] = 639 false; 640 } 641 /* This check is reasonably expensive, so only when it's being 642 * set the first time, also it's only the destination that needs 643 * special support. 644 */ 645 if (!old_postcopy_cap && runstate_check(RUN_STATE_INMIGRATE) && 646 !postcopy_ram_supported_by_host()) { 647 /* postcopy_ram_supported_by_host will have emitted a more 648 * detailed message 649 */ 650 error_report("Postcopy is not supported"); 651 s->enabled_capabilities[MIGRATION_CAPABILITY_POSTCOPY_RAM] = 652 false; 653 } 654 } 655 } 656 657 void qmp_migrate_set_parameters(MigrationParameters *params, Error **errp) 658 { 659 MigrationState *s = migrate_get_current(); 660 661 if (params->has_compress_level && 662 (params->compress_level < 0 || params->compress_level > 9)) { 663 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "compress_level", 664 "is invalid, it should be in the range of 0 to 9"); 665 return; 666 } 667 if (params->has_compress_threads && 668 (params->compress_threads < 1 || params->compress_threads > 255)) { 669 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, 670 "compress_threads", 671 "is invalid, it should be in the range of 1 to 255"); 672 return; 673 } 674 if (params->has_decompress_threads && 675 (params->decompress_threads < 1 || params->decompress_threads > 255)) { 676 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, 677 "decompress_threads", 678 "is invalid, it should be in the range of 1 to 255"); 679 return; 680 } 681 if (params->has_cpu_throttle_initial && 682 (params->cpu_throttle_initial < 1 || 683 params->cpu_throttle_initial > 99)) { 684 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, 685 "cpu_throttle_initial", 686 "an integer in the range of 1 to 99"); 687 return; 688 } 689 if (params->has_cpu_throttle_increment && 690 (params->cpu_throttle_increment < 1 || 691 params->cpu_throttle_increment > 99)) { 692 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, 693 "cpu_throttle_increment", 694 "an integer in the range of 1 to 99"); 695 return; 696 } 697 if (params->has_max_bandwidth && 698 (params->max_bandwidth < 0 || params->max_bandwidth > SIZE_MAX)) { 699 error_setg(errp, "Parameter 'max_bandwidth' expects an integer in the" 700 " range of 0 to %zu bytes/second", SIZE_MAX); 701 return; 702 } 703 if (params->has_downtime_limit && 704 (params->downtime_limit < 0 || 705 params->downtime_limit > MAX_MIGRATE_DOWNTIME)) { 706 error_setg(errp, "Parameter 'downtime_limit' expects an integer in " 707 "the range of 0 to %d milliseconds", 708 MAX_MIGRATE_DOWNTIME); 709 return; 710 } 711 if (params->has_x_checkpoint_delay && (params->x_checkpoint_delay < 0)) { 712 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, 713 "x_checkpoint_delay", 714 "is invalid, it should be positive"); 715 } 716 717 if (params->has_compress_level) { 718 s->parameters.compress_level = params->compress_level; 719 } 720 if (params->has_compress_threads) { 721 s->parameters.compress_threads = params->compress_threads; 722 } 723 if (params->has_decompress_threads) { 724 s->parameters.decompress_threads = params->decompress_threads; 725 } 726 if (params->has_cpu_throttle_initial) { 727 s->parameters.cpu_throttle_initial = params->cpu_throttle_initial; 728 } 729 if (params->has_cpu_throttle_increment) { 730 s->parameters.cpu_throttle_increment = params->cpu_throttle_increment; 731 } 732 if (params->has_tls_creds) { 733 g_free(s->parameters.tls_creds); 734 s->parameters.tls_creds = g_strdup(params->tls_creds); 735 } 736 if (params->has_tls_hostname) { 737 g_free(s->parameters.tls_hostname); 738 s->parameters.tls_hostname = g_strdup(params->tls_hostname); 739 } 740 if (params->has_max_bandwidth) { 741 s->parameters.max_bandwidth = params->max_bandwidth; 742 if (s->to_dst_file) { 743 qemu_file_set_rate_limit(s->to_dst_file, 744 s->parameters.max_bandwidth / XFER_LIMIT_RATIO); 745 } 746 } 747 if (params->has_downtime_limit) { 748 s->parameters.downtime_limit = params->downtime_limit; 749 } 750 751 if (params->has_x_checkpoint_delay) { 752 s->parameters.x_checkpoint_delay = params->x_checkpoint_delay; 753 if (migration_in_colo_state()) { 754 colo_checkpoint_notify(s); 755 } 756 } 757 if (params->has_block_incremental) { 758 s->parameters.block_incremental = params->block_incremental; 759 } 760 } 761 762 763 void qmp_migrate_start_postcopy(Error **errp) 764 { 765 MigrationState *s = migrate_get_current(); 766 767 if (!migrate_postcopy_ram()) { 768 error_setg(errp, "Enable postcopy with migrate_set_capability before" 769 " the start of migration"); 770 return; 771 } 772 773 if (s->state == MIGRATION_STATUS_NONE) { 774 error_setg(errp, "Postcopy must be started after migration has been" 775 " started"); 776 return; 777 } 778 /* 779 * we don't error if migration has finished since that would be racy 780 * with issuing this command. 781 */ 782 atomic_set(&s->start_postcopy, true); 783 } 784 785 /* shared migration helpers */ 786 787 void migrate_set_state(int *state, int old_state, int new_state) 788 { 789 if (atomic_cmpxchg(state, old_state, new_state) == old_state) { 790 trace_migrate_set_state(new_state); 791 migrate_generate_event(new_state); 792 } 793 } 794 795 void migrate_set_block_enabled(bool value, Error **errp) 796 { 797 MigrationCapabilityStatusList *cap; 798 799 cap = g_new0(MigrationCapabilityStatusList, 1); 800 cap->value = g_new0(MigrationCapabilityStatus, 1); 801 cap->value->capability = MIGRATION_CAPABILITY_BLOCK; 802 cap->value->state = value; 803 qmp_migrate_set_capabilities(cap, errp); 804 qapi_free_MigrationCapabilityStatusList(cap); 805 } 806 807 static void migrate_set_block_incremental(MigrationState *s, bool value) 808 { 809 s->parameters.block_incremental = value; 810 } 811 812 static void block_cleanup_parameters(MigrationState *s) 813 { 814 if (s->must_remove_block_options) { 815 /* setting to false can never fail */ 816 migrate_set_block_enabled(false, &error_abort); 817 migrate_set_block_incremental(s, false); 818 s->must_remove_block_options = false; 819 } 820 } 821 822 static void migrate_fd_cleanup(void *opaque) 823 { 824 MigrationState *s = opaque; 825 826 qemu_bh_delete(s->cleanup_bh); 827 s->cleanup_bh = NULL; 828 829 if (s->to_dst_file) { 830 trace_migrate_fd_cleanup(); 831 qemu_mutex_unlock_iothread(); 832 if (s->migration_thread_running) { 833 qemu_thread_join(&s->thread); 834 s->migration_thread_running = false; 835 } 836 qemu_mutex_lock_iothread(); 837 838 migrate_compress_threads_join(); 839 qemu_fclose(s->to_dst_file); 840 s->to_dst_file = NULL; 841 } 842 843 assert((s->state != MIGRATION_STATUS_ACTIVE) && 844 (s->state != MIGRATION_STATUS_POSTCOPY_ACTIVE)); 845 846 if (s->state == MIGRATION_STATUS_CANCELLING) { 847 migrate_set_state(&s->state, MIGRATION_STATUS_CANCELLING, 848 MIGRATION_STATUS_CANCELLED); 849 } 850 851 notifier_list_notify(&migration_state_notifiers, s); 852 block_cleanup_parameters(s); 853 } 854 855 void migrate_fd_error(MigrationState *s, const Error *error) 856 { 857 trace_migrate_fd_error(error_get_pretty(error)); 858 assert(s->to_dst_file == NULL); 859 migrate_set_state(&s->state, MIGRATION_STATUS_SETUP, 860 MIGRATION_STATUS_FAILED); 861 if (!s->error) { 862 s->error = error_copy(error); 863 } 864 notifier_list_notify(&migration_state_notifiers, s); 865 block_cleanup_parameters(s); 866 } 867 868 static void migrate_fd_cancel(MigrationState *s) 869 { 870 int old_state ; 871 QEMUFile *f = migrate_get_current()->to_dst_file; 872 trace_migrate_fd_cancel(); 873 874 if (s->rp_state.from_dst_file) { 875 /* shutdown the rp socket, so causing the rp thread to shutdown */ 876 qemu_file_shutdown(s->rp_state.from_dst_file); 877 } 878 879 do { 880 old_state = s->state; 881 if (!migration_is_setup_or_active(old_state)) { 882 break; 883 } 884 migrate_set_state(&s->state, old_state, MIGRATION_STATUS_CANCELLING); 885 } while (s->state != MIGRATION_STATUS_CANCELLING); 886 887 /* 888 * If we're unlucky the migration code might be stuck somewhere in a 889 * send/write while the network has failed and is waiting to timeout; 890 * if we've got shutdown(2) available then we can force it to quit. 891 * The outgoing qemu file gets closed in migrate_fd_cleanup that is 892 * called in a bh, so there is no race against this cancel. 893 */ 894 if (s->state == MIGRATION_STATUS_CANCELLING && f) { 895 qemu_file_shutdown(f); 896 } 897 if (s->state == MIGRATION_STATUS_CANCELLING && s->block_inactive) { 898 Error *local_err = NULL; 899 900 bdrv_invalidate_cache_all(&local_err); 901 if (local_err) { 902 error_report_err(local_err); 903 } else { 904 s->block_inactive = false; 905 } 906 } 907 block_cleanup_parameters(s); 908 } 909 910 void add_migration_state_change_notifier(Notifier *notify) 911 { 912 notifier_list_add(&migration_state_notifiers, notify); 913 } 914 915 void remove_migration_state_change_notifier(Notifier *notify) 916 { 917 notifier_remove(notify); 918 } 919 920 bool migration_in_setup(MigrationState *s) 921 { 922 return s->state == MIGRATION_STATUS_SETUP; 923 } 924 925 bool migration_has_finished(MigrationState *s) 926 { 927 return s->state == MIGRATION_STATUS_COMPLETED; 928 } 929 930 bool migration_has_failed(MigrationState *s) 931 { 932 return (s->state == MIGRATION_STATUS_CANCELLED || 933 s->state == MIGRATION_STATUS_FAILED); 934 } 935 936 bool migration_in_postcopy(void) 937 { 938 MigrationState *s = migrate_get_current(); 939 940 return (s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE); 941 } 942 943 bool migration_in_postcopy_after_devices(MigrationState *s) 944 { 945 return migration_in_postcopy() && s->postcopy_after_devices; 946 } 947 948 bool migration_is_idle(void) 949 { 950 MigrationState *s = migrate_get_current(); 951 952 switch (s->state) { 953 case MIGRATION_STATUS_NONE: 954 case MIGRATION_STATUS_CANCELLED: 955 case MIGRATION_STATUS_COMPLETED: 956 case MIGRATION_STATUS_FAILED: 957 return true; 958 case MIGRATION_STATUS_SETUP: 959 case MIGRATION_STATUS_CANCELLING: 960 case MIGRATION_STATUS_ACTIVE: 961 case MIGRATION_STATUS_POSTCOPY_ACTIVE: 962 case MIGRATION_STATUS_COLO: 963 return false; 964 case MIGRATION_STATUS__MAX: 965 g_assert_not_reached(); 966 } 967 968 return false; 969 } 970 971 MigrationState *migrate_init(void) 972 { 973 MigrationState *s = migrate_get_current(); 974 975 /* 976 * Reinitialise all migration state, except 977 * parameters/capabilities that the user set, and 978 * locks. 979 */ 980 s->bytes_xfer = 0; 981 s->xfer_limit = 0; 982 s->cleanup_bh = 0; 983 s->to_dst_file = NULL; 984 s->state = MIGRATION_STATUS_NONE; 985 s->rp_state.from_dst_file = NULL; 986 s->rp_state.error = false; 987 s->mbps = 0.0; 988 s->downtime = 0; 989 s->expected_downtime = 0; 990 s->setup_time = 0; 991 s->start_postcopy = false; 992 s->postcopy_after_devices = false; 993 s->migration_thread_running = false; 994 error_free(s->error); 995 s->error = NULL; 996 997 migrate_set_state(&s->state, MIGRATION_STATUS_NONE, MIGRATION_STATUS_SETUP); 998 999 s->total_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); 1000 return s; 1001 } 1002 1003 static GSList *migration_blockers; 1004 1005 int migrate_add_blocker(Error *reason, Error **errp) 1006 { 1007 if (migrate_get_current()->only_migratable) { 1008 error_propagate(errp, error_copy(reason)); 1009 error_prepend(errp, "disallowing migration blocker " 1010 "(--only_migratable) for: "); 1011 return -EACCES; 1012 } 1013 1014 if (migration_is_idle()) { 1015 migration_blockers = g_slist_prepend(migration_blockers, reason); 1016 return 0; 1017 } 1018 1019 error_propagate(errp, error_copy(reason)); 1020 error_prepend(errp, "disallowing migration blocker (migration in " 1021 "progress) for: "); 1022 return -EBUSY; 1023 } 1024 1025 void migrate_del_blocker(Error *reason) 1026 { 1027 migration_blockers = g_slist_remove(migration_blockers, reason); 1028 } 1029 1030 void qmp_migrate_incoming(const char *uri, Error **errp) 1031 { 1032 Error *local_err = NULL; 1033 static bool once = true; 1034 1035 if (!deferred_incoming) { 1036 error_setg(errp, "For use with '-incoming defer'"); 1037 return; 1038 } 1039 if (!once) { 1040 error_setg(errp, "The incoming migration has already been started"); 1041 } 1042 1043 qemu_start_incoming_migration(uri, &local_err); 1044 1045 if (local_err) { 1046 error_propagate(errp, local_err); 1047 return; 1048 } 1049 1050 once = false; 1051 } 1052 1053 bool migration_is_blocked(Error **errp) 1054 { 1055 if (qemu_savevm_state_blocked(errp)) { 1056 return true; 1057 } 1058 1059 if (migration_blockers) { 1060 error_propagate(errp, error_copy(migration_blockers->data)); 1061 return true; 1062 } 1063 1064 return false; 1065 } 1066 1067 void qmp_migrate(const char *uri, bool has_blk, bool blk, 1068 bool has_inc, bool inc, bool has_detach, bool detach, 1069 Error **errp) 1070 { 1071 Error *local_err = NULL; 1072 MigrationState *s = migrate_get_current(); 1073 const char *p; 1074 1075 if (migration_is_setup_or_active(s->state) || 1076 s->state == MIGRATION_STATUS_CANCELLING || 1077 s->state == MIGRATION_STATUS_COLO) { 1078 error_setg(errp, QERR_MIGRATION_ACTIVE); 1079 return; 1080 } 1081 if (runstate_check(RUN_STATE_INMIGRATE)) { 1082 error_setg(errp, "Guest is waiting for an incoming migration"); 1083 return; 1084 } 1085 1086 if (migration_is_blocked(errp)) { 1087 return; 1088 } 1089 1090 if ((has_blk && blk) || (has_inc && inc)) { 1091 if (migrate_use_block() || migrate_use_block_incremental()) { 1092 error_setg(errp, "Command options are incompatible with " 1093 "current migration capabilities"); 1094 return; 1095 } 1096 migrate_set_block_enabled(true, &local_err); 1097 if (local_err) { 1098 error_propagate(errp, local_err); 1099 return; 1100 } 1101 s->must_remove_block_options = true; 1102 } 1103 1104 if (has_inc && inc) { 1105 migrate_set_block_incremental(s, true); 1106 } 1107 1108 s = migrate_init(); 1109 1110 if (strstart(uri, "tcp:", &p)) { 1111 tcp_start_outgoing_migration(s, p, &local_err); 1112 #ifdef CONFIG_RDMA 1113 } else if (strstart(uri, "rdma:", &p)) { 1114 rdma_start_outgoing_migration(s, p, &local_err); 1115 #endif 1116 } else if (strstart(uri, "exec:", &p)) { 1117 exec_start_outgoing_migration(s, p, &local_err); 1118 } else if (strstart(uri, "unix:", &p)) { 1119 unix_start_outgoing_migration(s, p, &local_err); 1120 } else if (strstart(uri, "fd:", &p)) { 1121 fd_start_outgoing_migration(s, p, &local_err); 1122 } else { 1123 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "uri", 1124 "a valid migration protocol"); 1125 migrate_set_state(&s->state, MIGRATION_STATUS_SETUP, 1126 MIGRATION_STATUS_FAILED); 1127 return; 1128 } 1129 1130 if (local_err) { 1131 migrate_fd_error(s, local_err); 1132 error_propagate(errp, local_err); 1133 return; 1134 } 1135 } 1136 1137 void qmp_migrate_cancel(Error **errp) 1138 { 1139 migrate_fd_cancel(migrate_get_current()); 1140 } 1141 1142 void qmp_migrate_set_cache_size(int64_t value, Error **errp) 1143 { 1144 MigrationState *s = migrate_get_current(); 1145 int64_t new_size; 1146 1147 /* Check for truncation */ 1148 if (value != (size_t)value) { 1149 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "cache size", 1150 "exceeding address space"); 1151 return; 1152 } 1153 1154 /* Cache should not be larger than guest ram size */ 1155 if (value > ram_bytes_total()) { 1156 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "cache size", 1157 "exceeds guest ram size "); 1158 return; 1159 } 1160 1161 new_size = xbzrle_cache_resize(value); 1162 if (new_size < 0) { 1163 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "cache size", 1164 "is smaller than page size"); 1165 return; 1166 } 1167 1168 s->xbzrle_cache_size = new_size; 1169 } 1170 1171 int64_t qmp_query_migrate_cache_size(Error **errp) 1172 { 1173 return migrate_xbzrle_cache_size(); 1174 } 1175 1176 void qmp_migrate_set_speed(int64_t value, Error **errp) 1177 { 1178 MigrationParameters p = { 1179 .has_max_bandwidth = true, 1180 .max_bandwidth = value, 1181 }; 1182 1183 qmp_migrate_set_parameters(&p, errp); 1184 } 1185 1186 void qmp_migrate_set_downtime(double value, Error **errp) 1187 { 1188 if (value < 0 || value > MAX_MIGRATE_DOWNTIME_SECONDS) { 1189 error_setg(errp, "Parameter 'downtime_limit' expects an integer in " 1190 "the range of 0 to %d seconds", 1191 MAX_MIGRATE_DOWNTIME_SECONDS); 1192 return; 1193 } 1194 1195 value *= 1000; /* Convert to milliseconds */ 1196 value = MAX(0, MIN(INT64_MAX, value)); 1197 1198 MigrationParameters p = { 1199 .has_downtime_limit = true, 1200 .downtime_limit = value, 1201 }; 1202 1203 qmp_migrate_set_parameters(&p, errp); 1204 } 1205 1206 bool migrate_release_ram(void) 1207 { 1208 MigrationState *s; 1209 1210 s = migrate_get_current(); 1211 1212 return s->enabled_capabilities[MIGRATION_CAPABILITY_RELEASE_RAM]; 1213 } 1214 1215 bool migrate_postcopy_ram(void) 1216 { 1217 MigrationState *s; 1218 1219 s = migrate_get_current(); 1220 1221 return s->enabled_capabilities[MIGRATION_CAPABILITY_POSTCOPY_RAM]; 1222 } 1223 1224 bool migrate_auto_converge(void) 1225 { 1226 MigrationState *s; 1227 1228 s = migrate_get_current(); 1229 1230 return s->enabled_capabilities[MIGRATION_CAPABILITY_AUTO_CONVERGE]; 1231 } 1232 1233 bool migrate_zero_blocks(void) 1234 { 1235 MigrationState *s; 1236 1237 s = migrate_get_current(); 1238 1239 return s->enabled_capabilities[MIGRATION_CAPABILITY_ZERO_BLOCKS]; 1240 } 1241 1242 bool migrate_use_compression(void) 1243 { 1244 MigrationState *s; 1245 1246 s = migrate_get_current(); 1247 1248 return s->enabled_capabilities[MIGRATION_CAPABILITY_COMPRESS]; 1249 } 1250 1251 int migrate_compress_level(void) 1252 { 1253 MigrationState *s; 1254 1255 s = migrate_get_current(); 1256 1257 return s->parameters.compress_level; 1258 } 1259 1260 int migrate_compress_threads(void) 1261 { 1262 MigrationState *s; 1263 1264 s = migrate_get_current(); 1265 1266 return s->parameters.compress_threads; 1267 } 1268 1269 int migrate_decompress_threads(void) 1270 { 1271 MigrationState *s; 1272 1273 s = migrate_get_current(); 1274 1275 return s->parameters.decompress_threads; 1276 } 1277 1278 bool migrate_use_events(void) 1279 { 1280 MigrationState *s; 1281 1282 s = migrate_get_current(); 1283 1284 return s->enabled_capabilities[MIGRATION_CAPABILITY_EVENTS]; 1285 } 1286 1287 int migrate_use_xbzrle(void) 1288 { 1289 MigrationState *s; 1290 1291 s = migrate_get_current(); 1292 1293 return s->enabled_capabilities[MIGRATION_CAPABILITY_XBZRLE]; 1294 } 1295 1296 int64_t migrate_xbzrle_cache_size(void) 1297 { 1298 MigrationState *s; 1299 1300 s = migrate_get_current(); 1301 1302 return s->xbzrle_cache_size; 1303 } 1304 1305 bool migrate_use_block(void) 1306 { 1307 MigrationState *s; 1308 1309 s = migrate_get_current(); 1310 1311 return s->enabled_capabilities[MIGRATION_CAPABILITY_BLOCK]; 1312 } 1313 1314 bool migrate_use_return_path(void) 1315 { 1316 MigrationState *s; 1317 1318 s = migrate_get_current(); 1319 1320 return s->enabled_capabilities[MIGRATION_CAPABILITY_RETURN_PATH]; 1321 } 1322 1323 bool migrate_use_block_incremental(void) 1324 { 1325 MigrationState *s; 1326 1327 s = migrate_get_current(); 1328 1329 return s->parameters.block_incremental; 1330 } 1331 1332 /* migration thread support */ 1333 /* 1334 * Something bad happened to the RP stream, mark an error 1335 * The caller shall print or trace something to indicate why 1336 */ 1337 static void mark_source_rp_bad(MigrationState *s) 1338 { 1339 s->rp_state.error = true; 1340 } 1341 1342 static struct rp_cmd_args { 1343 ssize_t len; /* -1 = variable */ 1344 const char *name; 1345 } rp_cmd_args[] = { 1346 [MIG_RP_MSG_INVALID] = { .len = -1, .name = "INVALID" }, 1347 [MIG_RP_MSG_SHUT] = { .len = 4, .name = "SHUT" }, 1348 [MIG_RP_MSG_PONG] = { .len = 4, .name = "PONG" }, 1349 [MIG_RP_MSG_REQ_PAGES] = { .len = 12, .name = "REQ_PAGES" }, 1350 [MIG_RP_MSG_REQ_PAGES_ID] = { .len = -1, .name = "REQ_PAGES_ID" }, 1351 [MIG_RP_MSG_MAX] = { .len = -1, .name = "MAX" }, 1352 }; 1353 1354 /* 1355 * Process a request for pages received on the return path, 1356 * We're allowed to send more than requested (e.g. to round to our page size) 1357 * and we don't need to send pages that have already been sent. 1358 */ 1359 static void migrate_handle_rp_req_pages(MigrationState *ms, const char* rbname, 1360 ram_addr_t start, size_t len) 1361 { 1362 long our_host_ps = getpagesize(); 1363 1364 trace_migrate_handle_rp_req_pages(rbname, start, len); 1365 1366 /* 1367 * Since we currently insist on matching page sizes, just sanity check 1368 * we're being asked for whole host pages. 1369 */ 1370 if (start & (our_host_ps-1) || 1371 (len & (our_host_ps-1))) { 1372 error_report("%s: Misaligned page request, start: " RAM_ADDR_FMT 1373 " len: %zd", __func__, start, len); 1374 mark_source_rp_bad(ms); 1375 return; 1376 } 1377 1378 if (ram_save_queue_pages(rbname, start, len)) { 1379 mark_source_rp_bad(ms); 1380 } 1381 } 1382 1383 /* 1384 * Handles messages sent on the return path towards the source VM 1385 * 1386 */ 1387 static void *source_return_path_thread(void *opaque) 1388 { 1389 MigrationState *ms = opaque; 1390 QEMUFile *rp = ms->rp_state.from_dst_file; 1391 uint16_t header_len, header_type; 1392 uint8_t buf[512]; 1393 uint32_t tmp32, sibling_error; 1394 ram_addr_t start = 0; /* =0 to silence warning */ 1395 size_t len = 0, expected_len; 1396 int res; 1397 1398 trace_source_return_path_thread_entry(); 1399 while (!ms->rp_state.error && !qemu_file_get_error(rp) && 1400 migration_is_setup_or_active(ms->state)) { 1401 trace_source_return_path_thread_loop_top(); 1402 header_type = qemu_get_be16(rp); 1403 header_len = qemu_get_be16(rp); 1404 1405 if (header_type >= MIG_RP_MSG_MAX || 1406 header_type == MIG_RP_MSG_INVALID) { 1407 error_report("RP: Received invalid message 0x%04x length 0x%04x", 1408 header_type, header_len); 1409 mark_source_rp_bad(ms); 1410 goto out; 1411 } 1412 1413 if ((rp_cmd_args[header_type].len != -1 && 1414 header_len != rp_cmd_args[header_type].len) || 1415 header_len > sizeof(buf)) { 1416 error_report("RP: Received '%s' message (0x%04x) with" 1417 "incorrect length %d expecting %zu", 1418 rp_cmd_args[header_type].name, header_type, header_len, 1419 (size_t)rp_cmd_args[header_type].len); 1420 mark_source_rp_bad(ms); 1421 goto out; 1422 } 1423 1424 /* We know we've got a valid header by this point */ 1425 res = qemu_get_buffer(rp, buf, header_len); 1426 if (res != header_len) { 1427 error_report("RP: Failed reading data for message 0x%04x" 1428 " read %d expected %d", 1429 header_type, res, header_len); 1430 mark_source_rp_bad(ms); 1431 goto out; 1432 } 1433 1434 /* OK, we have the message and the data */ 1435 switch (header_type) { 1436 case MIG_RP_MSG_SHUT: 1437 sibling_error = ldl_be_p(buf); 1438 trace_source_return_path_thread_shut(sibling_error); 1439 if (sibling_error) { 1440 error_report("RP: Sibling indicated error %d", sibling_error); 1441 mark_source_rp_bad(ms); 1442 } 1443 /* 1444 * We'll let the main thread deal with closing the RP 1445 * we could do a shutdown(2) on it, but we're the only user 1446 * anyway, so there's nothing gained. 1447 */ 1448 goto out; 1449 1450 case MIG_RP_MSG_PONG: 1451 tmp32 = ldl_be_p(buf); 1452 trace_source_return_path_thread_pong(tmp32); 1453 break; 1454 1455 case MIG_RP_MSG_REQ_PAGES: 1456 start = ldq_be_p(buf); 1457 len = ldl_be_p(buf + 8); 1458 migrate_handle_rp_req_pages(ms, NULL, start, len); 1459 break; 1460 1461 case MIG_RP_MSG_REQ_PAGES_ID: 1462 expected_len = 12 + 1; /* header + termination */ 1463 1464 if (header_len >= expected_len) { 1465 start = ldq_be_p(buf); 1466 len = ldl_be_p(buf + 8); 1467 /* Now we expect an idstr */ 1468 tmp32 = buf[12]; /* Length of the following idstr */ 1469 buf[13 + tmp32] = '\0'; 1470 expected_len += tmp32; 1471 } 1472 if (header_len != expected_len) { 1473 error_report("RP: Req_Page_id with length %d expecting %zd", 1474 header_len, expected_len); 1475 mark_source_rp_bad(ms); 1476 goto out; 1477 } 1478 migrate_handle_rp_req_pages(ms, (char *)&buf[13], start, len); 1479 break; 1480 1481 default: 1482 break; 1483 } 1484 } 1485 if (qemu_file_get_error(rp)) { 1486 trace_source_return_path_thread_bad_end(); 1487 mark_source_rp_bad(ms); 1488 } 1489 1490 trace_source_return_path_thread_end(); 1491 out: 1492 ms->rp_state.from_dst_file = NULL; 1493 qemu_fclose(rp); 1494 return NULL; 1495 } 1496 1497 static int open_return_path_on_source(MigrationState *ms) 1498 { 1499 1500 ms->rp_state.from_dst_file = qemu_file_get_return_path(ms->to_dst_file); 1501 if (!ms->rp_state.from_dst_file) { 1502 return -1; 1503 } 1504 1505 trace_open_return_path_on_source(); 1506 qemu_thread_create(&ms->rp_state.rp_thread, "return path", 1507 source_return_path_thread, ms, QEMU_THREAD_JOINABLE); 1508 1509 trace_open_return_path_on_source_continue(); 1510 1511 return 0; 1512 } 1513 1514 /* Returns 0 if the RP was ok, otherwise there was an error on the RP */ 1515 static int await_return_path_close_on_source(MigrationState *ms) 1516 { 1517 /* 1518 * If this is a normal exit then the destination will send a SHUT and the 1519 * rp_thread will exit, however if there's an error we need to cause 1520 * it to exit. 1521 */ 1522 if (qemu_file_get_error(ms->to_dst_file) && ms->rp_state.from_dst_file) { 1523 /* 1524 * shutdown(2), if we have it, will cause it to unblock if it's stuck 1525 * waiting for the destination. 1526 */ 1527 qemu_file_shutdown(ms->rp_state.from_dst_file); 1528 mark_source_rp_bad(ms); 1529 } 1530 trace_await_return_path_close_on_source_joining(); 1531 qemu_thread_join(&ms->rp_state.rp_thread); 1532 trace_await_return_path_close_on_source_close(); 1533 return ms->rp_state.error; 1534 } 1535 1536 /* 1537 * Switch from normal iteration to postcopy 1538 * Returns non-0 on error 1539 */ 1540 static int postcopy_start(MigrationState *ms, bool *old_vm_running) 1541 { 1542 int ret; 1543 QIOChannelBuffer *bioc; 1544 QEMUFile *fb; 1545 int64_t time_at_stop = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); 1546 bool restart_block = false; 1547 migrate_set_state(&ms->state, MIGRATION_STATUS_ACTIVE, 1548 MIGRATION_STATUS_POSTCOPY_ACTIVE); 1549 1550 trace_postcopy_start(); 1551 qemu_mutex_lock_iothread(); 1552 trace_postcopy_start_set_run(); 1553 1554 qemu_system_wakeup_request(QEMU_WAKEUP_REASON_OTHER); 1555 *old_vm_running = runstate_is_running(); 1556 global_state_store(); 1557 ret = vm_stop_force_state(RUN_STATE_FINISH_MIGRATE); 1558 if (ret < 0) { 1559 goto fail; 1560 } 1561 1562 ret = bdrv_inactivate_all(); 1563 if (ret < 0) { 1564 goto fail; 1565 } 1566 restart_block = true; 1567 1568 /* 1569 * Cause any non-postcopiable, but iterative devices to 1570 * send out their final data. 1571 */ 1572 qemu_savevm_state_complete_precopy(ms->to_dst_file, true, false); 1573 1574 /* 1575 * in Finish migrate and with the io-lock held everything should 1576 * be quiet, but we've potentially still got dirty pages and we 1577 * need to tell the destination to throw any pages it's already received 1578 * that are dirty 1579 */ 1580 if (ram_postcopy_send_discard_bitmap(ms)) { 1581 error_report("postcopy send discard bitmap failed"); 1582 goto fail; 1583 } 1584 1585 /* 1586 * send rest of state - note things that are doing postcopy 1587 * will notice we're in POSTCOPY_ACTIVE and not actually 1588 * wrap their state up here 1589 */ 1590 qemu_file_set_rate_limit(ms->to_dst_file, INT64_MAX); 1591 /* Ping just for debugging, helps line traces up */ 1592 qemu_savevm_send_ping(ms->to_dst_file, 2); 1593 1594 /* 1595 * While loading the device state we may trigger page transfer 1596 * requests and the fd must be free to process those, and thus 1597 * the destination must read the whole device state off the fd before 1598 * it starts processing it. Unfortunately the ad-hoc migration format 1599 * doesn't allow the destination to know the size to read without fully 1600 * parsing it through each devices load-state code (especially the open 1601 * coded devices that use get/put). 1602 * So we wrap the device state up in a package with a length at the start; 1603 * to do this we use a qemu_buf to hold the whole of the device state. 1604 */ 1605 bioc = qio_channel_buffer_new(4096); 1606 qio_channel_set_name(QIO_CHANNEL(bioc), "migration-postcopy-buffer"); 1607 fb = qemu_fopen_channel_output(QIO_CHANNEL(bioc)); 1608 object_unref(OBJECT(bioc)); 1609 1610 /* 1611 * Make sure the receiver can get incoming pages before we send the rest 1612 * of the state 1613 */ 1614 qemu_savevm_send_postcopy_listen(fb); 1615 1616 qemu_savevm_state_complete_precopy(fb, false, false); 1617 qemu_savevm_send_ping(fb, 3); 1618 1619 qemu_savevm_send_postcopy_run(fb); 1620 1621 /* <><> end of stuff going into the package */ 1622 1623 /* Last point of recovery; as soon as we send the package the destination 1624 * can open devices and potentially start running. 1625 * Lets just check again we've not got any errors. 1626 */ 1627 ret = qemu_file_get_error(ms->to_dst_file); 1628 if (ret) { 1629 error_report("postcopy_start: Migration stream errored (pre package)"); 1630 goto fail_closefb; 1631 } 1632 1633 restart_block = false; 1634 1635 /* Now send that blob */ 1636 if (qemu_savevm_send_packaged(ms->to_dst_file, bioc->data, bioc->usage)) { 1637 goto fail_closefb; 1638 } 1639 qemu_fclose(fb); 1640 1641 /* Send a notify to give a chance for anything that needs to happen 1642 * at the transition to postcopy and after the device state; in particular 1643 * spice needs to trigger a transition now 1644 */ 1645 ms->postcopy_after_devices = true; 1646 notifier_list_notify(&migration_state_notifiers, ms); 1647 1648 ms->downtime = qemu_clock_get_ms(QEMU_CLOCK_REALTIME) - time_at_stop; 1649 1650 qemu_mutex_unlock_iothread(); 1651 1652 /* 1653 * Although this ping is just for debug, it could potentially be 1654 * used for getting a better measurement of downtime at the source. 1655 */ 1656 qemu_savevm_send_ping(ms->to_dst_file, 4); 1657 1658 if (migrate_release_ram()) { 1659 ram_postcopy_migrated_memory_release(ms); 1660 } 1661 1662 ret = qemu_file_get_error(ms->to_dst_file); 1663 if (ret) { 1664 error_report("postcopy_start: Migration stream errored"); 1665 migrate_set_state(&ms->state, MIGRATION_STATUS_POSTCOPY_ACTIVE, 1666 MIGRATION_STATUS_FAILED); 1667 } 1668 1669 return ret; 1670 1671 fail_closefb: 1672 qemu_fclose(fb); 1673 fail: 1674 migrate_set_state(&ms->state, MIGRATION_STATUS_POSTCOPY_ACTIVE, 1675 MIGRATION_STATUS_FAILED); 1676 if (restart_block) { 1677 /* A failure happened early enough that we know the destination hasn't 1678 * accessed block devices, so we're safe to recover. 1679 */ 1680 Error *local_err = NULL; 1681 1682 bdrv_invalidate_cache_all(&local_err); 1683 if (local_err) { 1684 error_report_err(local_err); 1685 } 1686 } 1687 qemu_mutex_unlock_iothread(); 1688 return -1; 1689 } 1690 1691 /** 1692 * migration_completion: Used by migration_thread when there's not much left. 1693 * The caller 'breaks' the loop when this returns. 1694 * 1695 * @s: Current migration state 1696 * @current_active_state: The migration state we expect to be in 1697 * @*old_vm_running: Pointer to old_vm_running flag 1698 * @*start_time: Pointer to time to update 1699 */ 1700 static void migration_completion(MigrationState *s, int current_active_state, 1701 bool *old_vm_running, 1702 int64_t *start_time) 1703 { 1704 int ret; 1705 1706 if (s->state == MIGRATION_STATUS_ACTIVE) { 1707 qemu_mutex_lock_iothread(); 1708 *start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); 1709 qemu_system_wakeup_request(QEMU_WAKEUP_REASON_OTHER); 1710 *old_vm_running = runstate_is_running(); 1711 ret = global_state_store(); 1712 1713 if (!ret) { 1714 bool inactivate = !migrate_colo_enabled(); 1715 ret = vm_stop_force_state(RUN_STATE_FINISH_MIGRATE); 1716 if (ret >= 0) { 1717 qemu_file_set_rate_limit(s->to_dst_file, INT64_MAX); 1718 ret = qemu_savevm_state_complete_precopy(s->to_dst_file, false, 1719 inactivate); 1720 } 1721 if (inactivate && ret >= 0) { 1722 s->block_inactive = true; 1723 } 1724 } 1725 qemu_mutex_unlock_iothread(); 1726 1727 if (ret < 0) { 1728 goto fail; 1729 } 1730 } else if (s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE) { 1731 trace_migration_completion_postcopy_end(); 1732 1733 qemu_savevm_state_complete_postcopy(s->to_dst_file); 1734 trace_migration_completion_postcopy_end_after_complete(); 1735 } 1736 1737 /* 1738 * If rp was opened we must clean up the thread before 1739 * cleaning everything else up (since if there are no failures 1740 * it will wait for the destination to send it's status in 1741 * a SHUT command). 1742 */ 1743 if (s->rp_state.from_dst_file) { 1744 int rp_error; 1745 trace_migration_return_path_end_before(); 1746 rp_error = await_return_path_close_on_source(s); 1747 trace_migration_return_path_end_after(rp_error); 1748 if (rp_error) { 1749 goto fail_invalidate; 1750 } 1751 } 1752 1753 if (qemu_file_get_error(s->to_dst_file)) { 1754 trace_migration_completion_file_err(); 1755 goto fail_invalidate; 1756 } 1757 1758 if (!migrate_colo_enabled()) { 1759 migrate_set_state(&s->state, current_active_state, 1760 MIGRATION_STATUS_COMPLETED); 1761 } 1762 1763 return; 1764 1765 fail_invalidate: 1766 /* If not doing postcopy, vm_start() will be called: let's regain 1767 * control on images. 1768 */ 1769 if (s->state == MIGRATION_STATUS_ACTIVE) { 1770 Error *local_err = NULL; 1771 1772 qemu_mutex_lock_iothread(); 1773 bdrv_invalidate_cache_all(&local_err); 1774 if (local_err) { 1775 error_report_err(local_err); 1776 } else { 1777 s->block_inactive = false; 1778 } 1779 qemu_mutex_unlock_iothread(); 1780 } 1781 1782 fail: 1783 migrate_set_state(&s->state, current_active_state, 1784 MIGRATION_STATUS_FAILED); 1785 } 1786 1787 bool migrate_colo_enabled(void) 1788 { 1789 MigrationState *s = migrate_get_current(); 1790 return s->enabled_capabilities[MIGRATION_CAPABILITY_X_COLO]; 1791 } 1792 1793 /* 1794 * Master migration thread on the source VM. 1795 * It drives the migration and pumps the data down the outgoing channel. 1796 */ 1797 static void *migration_thread(void *opaque) 1798 { 1799 MigrationState *s = opaque; 1800 /* Used by the bandwidth calcs, updated later */ 1801 int64_t initial_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); 1802 int64_t setup_start = qemu_clock_get_ms(QEMU_CLOCK_HOST); 1803 int64_t initial_bytes = 0; 1804 /* 1805 * The final stage happens when the remaining data is smaller than 1806 * this threshold; it's calculated from the requested downtime and 1807 * measured bandwidth 1808 */ 1809 int64_t threshold_size = 0; 1810 int64_t start_time = initial_time; 1811 int64_t end_time; 1812 bool old_vm_running = false; 1813 bool entered_postcopy = false; 1814 /* The active state we expect to be in; ACTIVE or POSTCOPY_ACTIVE */ 1815 enum MigrationStatus current_active_state = MIGRATION_STATUS_ACTIVE; 1816 bool enable_colo = migrate_colo_enabled(); 1817 1818 rcu_register_thread(); 1819 1820 qemu_savevm_state_header(s->to_dst_file); 1821 1822 /* 1823 * If we opened the return path, we need to make sure dst has it 1824 * opened as well. 1825 */ 1826 if (s->rp_state.from_dst_file) { 1827 /* Now tell the dest that it should open its end so it can reply */ 1828 qemu_savevm_send_open_return_path(s->to_dst_file); 1829 1830 /* And do a ping that will make stuff easier to debug */ 1831 qemu_savevm_send_ping(s->to_dst_file, 1); 1832 } 1833 1834 if (migrate_postcopy_ram()) { 1835 /* 1836 * Tell the destination that we *might* want to do postcopy later; 1837 * if the other end can't do postcopy it should fail now, nice and 1838 * early. 1839 */ 1840 qemu_savevm_send_postcopy_advise(s->to_dst_file); 1841 } 1842 1843 qemu_savevm_state_begin(s->to_dst_file); 1844 1845 s->setup_time = qemu_clock_get_ms(QEMU_CLOCK_HOST) - setup_start; 1846 migrate_set_state(&s->state, MIGRATION_STATUS_SETUP, 1847 MIGRATION_STATUS_ACTIVE); 1848 1849 trace_migration_thread_setup_complete(); 1850 1851 while (s->state == MIGRATION_STATUS_ACTIVE || 1852 s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE) { 1853 int64_t current_time; 1854 uint64_t pending_size; 1855 1856 if (!qemu_file_rate_limit(s->to_dst_file)) { 1857 uint64_t pend_post, pend_nonpost; 1858 1859 qemu_savevm_state_pending(s->to_dst_file, threshold_size, 1860 &pend_nonpost, &pend_post); 1861 pending_size = pend_nonpost + pend_post; 1862 trace_migrate_pending(pending_size, threshold_size, 1863 pend_post, pend_nonpost); 1864 if (pending_size && pending_size >= threshold_size) { 1865 /* Still a significant amount to transfer */ 1866 1867 if (migrate_postcopy_ram() && 1868 s->state != MIGRATION_STATUS_POSTCOPY_ACTIVE && 1869 pend_nonpost <= threshold_size && 1870 atomic_read(&s->start_postcopy)) { 1871 1872 if (!postcopy_start(s, &old_vm_running)) { 1873 current_active_state = MIGRATION_STATUS_POSTCOPY_ACTIVE; 1874 entered_postcopy = true; 1875 } 1876 1877 continue; 1878 } 1879 /* Just another iteration step */ 1880 qemu_savevm_state_iterate(s->to_dst_file, entered_postcopy); 1881 } else { 1882 trace_migration_thread_low_pending(pending_size); 1883 migration_completion(s, current_active_state, 1884 &old_vm_running, &start_time); 1885 break; 1886 } 1887 } 1888 1889 if (qemu_file_get_error(s->to_dst_file)) { 1890 migrate_set_state(&s->state, current_active_state, 1891 MIGRATION_STATUS_FAILED); 1892 trace_migration_thread_file_err(); 1893 break; 1894 } 1895 current_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); 1896 if (current_time >= initial_time + BUFFER_DELAY) { 1897 uint64_t transferred_bytes = qemu_ftell(s->to_dst_file) - 1898 initial_bytes; 1899 uint64_t time_spent = current_time - initial_time; 1900 double bandwidth = (double)transferred_bytes / time_spent; 1901 threshold_size = bandwidth * s->parameters.downtime_limit; 1902 1903 s->mbps = (((double) transferred_bytes * 8.0) / 1904 ((double) time_spent / 1000.0)) / 1000.0 / 1000.0; 1905 1906 trace_migrate_transferred(transferred_bytes, time_spent, 1907 bandwidth, threshold_size); 1908 /* if we haven't sent anything, we don't want to recalculate 1909 10000 is a small enough number for our purposes */ 1910 if (ram_counters.dirty_pages_rate && transferred_bytes > 10000) { 1911 s->expected_downtime = ram_counters.dirty_pages_rate * 1912 qemu_target_page_size() / bandwidth; 1913 } 1914 1915 qemu_file_reset_rate_limit(s->to_dst_file); 1916 initial_time = current_time; 1917 initial_bytes = qemu_ftell(s->to_dst_file); 1918 } 1919 if (qemu_file_rate_limit(s->to_dst_file)) { 1920 /* usleep expects microseconds */ 1921 g_usleep((initial_time + BUFFER_DELAY - current_time)*1000); 1922 } 1923 } 1924 1925 trace_migration_thread_after_loop(); 1926 /* If we enabled cpu throttling for auto-converge, turn it off. */ 1927 cpu_throttle_stop(); 1928 end_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); 1929 1930 qemu_mutex_lock_iothread(); 1931 /* 1932 * The resource has been allocated by migration will be reused in COLO 1933 * process, so don't release them. 1934 */ 1935 if (!enable_colo) { 1936 qemu_savevm_state_cleanup(); 1937 } 1938 if (s->state == MIGRATION_STATUS_COMPLETED) { 1939 uint64_t transferred_bytes = qemu_ftell(s->to_dst_file); 1940 s->total_time = end_time - s->total_time; 1941 if (!entered_postcopy) { 1942 s->downtime = end_time - start_time; 1943 } 1944 if (s->total_time) { 1945 s->mbps = (((double) transferred_bytes * 8.0) / 1946 ((double) s->total_time)) / 1000; 1947 } 1948 runstate_set(RUN_STATE_POSTMIGRATE); 1949 } else { 1950 if (s->state == MIGRATION_STATUS_ACTIVE && enable_colo) { 1951 migrate_start_colo_process(s); 1952 qemu_savevm_state_cleanup(); 1953 /* 1954 * Fixme: we will run VM in COLO no matter its old running state. 1955 * After exited COLO, we will keep running. 1956 */ 1957 old_vm_running = true; 1958 } 1959 if (old_vm_running && !entered_postcopy) { 1960 vm_start(); 1961 } else { 1962 if (runstate_check(RUN_STATE_FINISH_MIGRATE)) { 1963 runstate_set(RUN_STATE_POSTMIGRATE); 1964 } 1965 } 1966 } 1967 qemu_bh_schedule(s->cleanup_bh); 1968 qemu_mutex_unlock_iothread(); 1969 1970 rcu_unregister_thread(); 1971 return NULL; 1972 } 1973 1974 void migrate_fd_connect(MigrationState *s) 1975 { 1976 s->expected_downtime = s->parameters.downtime_limit; 1977 s->cleanup_bh = qemu_bh_new(migrate_fd_cleanup, s); 1978 1979 qemu_file_set_blocking(s->to_dst_file, true); 1980 qemu_file_set_rate_limit(s->to_dst_file, 1981 s->parameters.max_bandwidth / XFER_LIMIT_RATIO); 1982 1983 /* Notify before starting migration thread */ 1984 notifier_list_notify(&migration_state_notifiers, s); 1985 1986 /* 1987 * Open the return path. For postcopy, it is used exclusively. For 1988 * precopy, only if user specified "return-path" capability would 1989 * QEMU uses the return path. 1990 */ 1991 if (migrate_postcopy_ram() || migrate_use_return_path()) { 1992 if (open_return_path_on_source(s)) { 1993 error_report("Unable to open return-path for postcopy"); 1994 migrate_set_state(&s->state, MIGRATION_STATUS_SETUP, 1995 MIGRATION_STATUS_FAILED); 1996 migrate_fd_cleanup(s); 1997 return; 1998 } 1999 } 2000 2001 migrate_compress_threads_create(); 2002 qemu_thread_create(&s->thread, "live_migration", migration_thread, s, 2003 QEMU_THREAD_JOINABLE); 2004 s->migration_thread_running = true; 2005 } 2006 2007 void migration_global_dump(Monitor *mon) 2008 { 2009 MigrationState *ms = migrate_get_current(); 2010 2011 monitor_printf(mon, "globals: store-global-state=%d, only_migratable=%d, " 2012 "send-configuration=%d, send-section-footer=%d\n", 2013 ms->store_global_state, ms->only_migratable, 2014 ms->send_configuration, ms->send_section_footer); 2015 } 2016 2017 static Property migration_properties[] = { 2018 DEFINE_PROP_BOOL("store-global-state", MigrationState, 2019 store_global_state, true), 2020 DEFINE_PROP_BOOL("only-migratable", MigrationState, only_migratable, false), 2021 DEFINE_PROP_BOOL("send-configuration", MigrationState, 2022 send_configuration, true), 2023 DEFINE_PROP_BOOL("send-section-footer", MigrationState, 2024 send_section_footer, true), 2025 DEFINE_PROP_END_OF_LIST(), 2026 }; 2027 2028 static void migration_class_init(ObjectClass *klass, void *data) 2029 { 2030 DeviceClass *dc = DEVICE_CLASS(klass); 2031 2032 dc->user_creatable = false; 2033 dc->props = migration_properties; 2034 } 2035 2036 static void migration_instance_init(Object *obj) 2037 { 2038 MigrationState *ms = MIGRATION_OBJ(obj); 2039 2040 ms->state = MIGRATION_STATUS_NONE; 2041 ms->xbzrle_cache_size = DEFAULT_MIGRATE_CACHE_SIZE; 2042 ms->mbps = -1; 2043 ms->parameters = (MigrationParameters) { 2044 .compress_level = DEFAULT_MIGRATE_COMPRESS_LEVEL, 2045 .compress_threads = DEFAULT_MIGRATE_COMPRESS_THREAD_COUNT, 2046 .decompress_threads = DEFAULT_MIGRATE_DECOMPRESS_THREAD_COUNT, 2047 .cpu_throttle_initial = DEFAULT_MIGRATE_CPU_THROTTLE_INITIAL, 2048 .cpu_throttle_increment = DEFAULT_MIGRATE_CPU_THROTTLE_INCREMENT, 2049 .max_bandwidth = MAX_THROTTLE, 2050 .downtime_limit = DEFAULT_MIGRATE_SET_DOWNTIME, 2051 .x_checkpoint_delay = DEFAULT_MIGRATE_X_CHECKPOINT_DELAY, 2052 }; 2053 ms->parameters.tls_creds = g_strdup(""); 2054 ms->parameters.tls_hostname = g_strdup(""); 2055 } 2056 2057 static const TypeInfo migration_type = { 2058 .name = TYPE_MIGRATION, 2059 /* 2060 * NOTE: "migration" itself is not really a device. We used 2061 * TYPE_DEVICE here only to leverage some existing QDev features 2062 * like "-global" properties, and HW_COMPAT_* fields (which are 2063 * finally applied as global properties as well). If one day the 2064 * global property feature can be migrated from QDev to QObject in 2065 * general, then we can switch to QObject as well. 2066 */ 2067 .parent = TYPE_DEVICE, 2068 .class_init = migration_class_init, 2069 .class_size = sizeof(MigrationClass), 2070 .instance_size = sizeof(MigrationState), 2071 .instance_init = migration_instance_init, 2072 }; 2073 2074 static void register_migration_types(void) 2075 { 2076 type_register_static(&migration_type); 2077 } 2078 2079 type_init(register_migration_types); 2080