1 /* 2 * QEMU System Emulator 3 * 4 * Copyright (c) 2003-2008 Fabrice Bellard 5 * Copyright (c) 2011-2015 Red Hat Inc 6 * 7 * Authors: 8 * Juan Quintela <quintela@redhat.com> 9 * 10 * Permission is hereby granted, free of charge, to any person obtaining a copy 11 * of this software and associated documentation files (the "Software"), to deal 12 * in the Software without restriction, including without limitation the rights 13 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 14 * copies of the Software, and to permit persons to whom the Software is 15 * furnished to do so, subject to the following conditions: 16 * 17 * The above copyright notice and this permission notice shall be included in 18 * all copies or substantial portions of the Software. 19 * 20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 23 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 24 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 25 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 26 * THE SOFTWARE. 27 */ 28 #include "qemu/osdep.h" 29 #include <zlib.h> 30 #include "qapi-event.h" 31 #include "qemu/bitops.h" 32 #include "qemu/bitmap.h" 33 #include "qemu/timer.h" 34 #include "qemu/main-loop.h" 35 #include "migration/migration.h" 36 #include "migration/postcopy-ram.h" 37 #include "exec/address-spaces.h" 38 #include "migration/page_cache.h" 39 #include "qemu/error-report.h" 40 #include "trace.h" 41 #include "exec/ram_addr.h" 42 #include "qemu/rcu_queue.h" 43 44 #ifdef DEBUG_MIGRATION_RAM 45 #define DPRINTF(fmt, ...) \ 46 do { fprintf(stdout, "migration_ram: " fmt, ## __VA_ARGS__); } while (0) 47 #else 48 #define DPRINTF(fmt, ...) \ 49 do { } while (0) 50 #endif 51 52 static int dirty_rate_high_cnt; 53 54 static uint64_t bitmap_sync_count; 55 56 /***********************************************************/ 57 /* ram save/restore */ 58 59 #define RAM_SAVE_FLAG_FULL 0x01 /* Obsolete, not used anymore */ 60 #define RAM_SAVE_FLAG_COMPRESS 0x02 61 #define RAM_SAVE_FLAG_MEM_SIZE 0x04 62 #define RAM_SAVE_FLAG_PAGE 0x08 63 #define RAM_SAVE_FLAG_EOS 0x10 64 #define RAM_SAVE_FLAG_CONTINUE 0x20 65 #define RAM_SAVE_FLAG_XBZRLE 0x40 66 /* 0x80 is reserved in migration.h start with 0x100 next */ 67 #define RAM_SAVE_FLAG_COMPRESS_PAGE 0x100 68 69 static const uint8_t ZERO_TARGET_PAGE[TARGET_PAGE_SIZE]; 70 71 static inline bool is_zero_range(uint8_t *p, uint64_t size) 72 { 73 return buffer_find_nonzero_offset(p, size) == size; 74 } 75 76 /* struct contains XBZRLE cache and a static page 77 used by the compression */ 78 static struct { 79 /* buffer used for XBZRLE encoding */ 80 uint8_t *encoded_buf; 81 /* buffer for storing page content */ 82 uint8_t *current_buf; 83 /* Cache for XBZRLE, Protected by lock. */ 84 PageCache *cache; 85 QemuMutex lock; 86 } XBZRLE; 87 88 /* buffer used for XBZRLE decoding */ 89 static uint8_t *xbzrle_decoded_buf; 90 91 static void XBZRLE_cache_lock(void) 92 { 93 if (migrate_use_xbzrle()) 94 qemu_mutex_lock(&XBZRLE.lock); 95 } 96 97 static void XBZRLE_cache_unlock(void) 98 { 99 if (migrate_use_xbzrle()) 100 qemu_mutex_unlock(&XBZRLE.lock); 101 } 102 103 /* 104 * called from qmp_migrate_set_cache_size in main thread, possibly while 105 * a migration is in progress. 106 * A running migration maybe using the cache and might finish during this 107 * call, hence changes to the cache are protected by XBZRLE.lock(). 108 */ 109 int64_t xbzrle_cache_resize(int64_t new_size) 110 { 111 PageCache *new_cache; 112 int64_t ret; 113 114 if (new_size < TARGET_PAGE_SIZE) { 115 return -1; 116 } 117 118 XBZRLE_cache_lock(); 119 120 if (XBZRLE.cache != NULL) { 121 if (pow2floor(new_size) == migrate_xbzrle_cache_size()) { 122 goto out_new_size; 123 } 124 new_cache = cache_init(new_size / TARGET_PAGE_SIZE, 125 TARGET_PAGE_SIZE); 126 if (!new_cache) { 127 error_report("Error creating cache"); 128 ret = -1; 129 goto out; 130 } 131 132 cache_fini(XBZRLE.cache); 133 XBZRLE.cache = new_cache; 134 } 135 136 out_new_size: 137 ret = pow2floor(new_size); 138 out: 139 XBZRLE_cache_unlock(); 140 return ret; 141 } 142 143 /* accounting for migration statistics */ 144 typedef struct AccountingInfo { 145 uint64_t dup_pages; 146 uint64_t skipped_pages; 147 uint64_t norm_pages; 148 uint64_t iterations; 149 uint64_t xbzrle_bytes; 150 uint64_t xbzrle_pages; 151 uint64_t xbzrle_cache_miss; 152 double xbzrle_cache_miss_rate; 153 uint64_t xbzrle_overflows; 154 } AccountingInfo; 155 156 static AccountingInfo acct_info; 157 158 static void acct_clear(void) 159 { 160 memset(&acct_info, 0, sizeof(acct_info)); 161 } 162 163 uint64_t dup_mig_bytes_transferred(void) 164 { 165 return acct_info.dup_pages * TARGET_PAGE_SIZE; 166 } 167 168 uint64_t dup_mig_pages_transferred(void) 169 { 170 return acct_info.dup_pages; 171 } 172 173 uint64_t skipped_mig_bytes_transferred(void) 174 { 175 return acct_info.skipped_pages * TARGET_PAGE_SIZE; 176 } 177 178 uint64_t skipped_mig_pages_transferred(void) 179 { 180 return acct_info.skipped_pages; 181 } 182 183 uint64_t norm_mig_bytes_transferred(void) 184 { 185 return acct_info.norm_pages * TARGET_PAGE_SIZE; 186 } 187 188 uint64_t norm_mig_pages_transferred(void) 189 { 190 return acct_info.norm_pages; 191 } 192 193 uint64_t xbzrle_mig_bytes_transferred(void) 194 { 195 return acct_info.xbzrle_bytes; 196 } 197 198 uint64_t xbzrle_mig_pages_transferred(void) 199 { 200 return acct_info.xbzrle_pages; 201 } 202 203 uint64_t xbzrle_mig_pages_cache_miss(void) 204 { 205 return acct_info.xbzrle_cache_miss; 206 } 207 208 double xbzrle_mig_cache_miss_rate(void) 209 { 210 return acct_info.xbzrle_cache_miss_rate; 211 } 212 213 uint64_t xbzrle_mig_pages_overflow(void) 214 { 215 return acct_info.xbzrle_overflows; 216 } 217 218 /* This is the last block that we have visited serching for dirty pages 219 */ 220 static RAMBlock *last_seen_block; 221 /* This is the last block from where we have sent data */ 222 static RAMBlock *last_sent_block; 223 static ram_addr_t last_offset; 224 static QemuMutex migration_bitmap_mutex; 225 static uint64_t migration_dirty_pages; 226 static uint32_t last_version; 227 static bool ram_bulk_stage; 228 229 /* used by the search for pages to send */ 230 struct PageSearchStatus { 231 /* Current block being searched */ 232 RAMBlock *block; 233 /* Current offset to search from */ 234 ram_addr_t offset; 235 /* Set once we wrap around */ 236 bool complete_round; 237 }; 238 typedef struct PageSearchStatus PageSearchStatus; 239 240 static struct BitmapRcu { 241 struct rcu_head rcu; 242 /* Main migration bitmap */ 243 unsigned long *bmap; 244 /* bitmap of pages that haven't been sent even once 245 * only maintained and used in postcopy at the moment 246 * where it's used to send the dirtymap at the start 247 * of the postcopy phase 248 */ 249 unsigned long *unsentmap; 250 } *migration_bitmap_rcu; 251 252 struct CompressParam { 253 bool start; 254 bool done; 255 QEMUFile *file; 256 QemuMutex mutex; 257 QemuCond cond; 258 RAMBlock *block; 259 ram_addr_t offset; 260 }; 261 typedef struct CompressParam CompressParam; 262 263 struct DecompressParam { 264 bool start; 265 QemuMutex mutex; 266 QemuCond cond; 267 void *des; 268 uint8_t *compbuf; 269 int len; 270 }; 271 typedef struct DecompressParam DecompressParam; 272 273 static CompressParam *comp_param; 274 static QemuThread *compress_threads; 275 /* comp_done_cond is used to wake up the migration thread when 276 * one of the compression threads has finished the compression. 277 * comp_done_lock is used to co-work with comp_done_cond. 278 */ 279 static QemuMutex *comp_done_lock; 280 static QemuCond *comp_done_cond; 281 /* The empty QEMUFileOps will be used by file in CompressParam */ 282 static const QEMUFileOps empty_ops = { }; 283 284 static bool compression_switch; 285 static bool quit_comp_thread; 286 static bool quit_decomp_thread; 287 static DecompressParam *decomp_param; 288 static QemuThread *decompress_threads; 289 290 static int do_compress_ram_page(CompressParam *param); 291 292 static void *do_data_compress(void *opaque) 293 { 294 CompressParam *param = opaque; 295 296 while (!quit_comp_thread) { 297 qemu_mutex_lock(¶m->mutex); 298 /* Re-check the quit_comp_thread in case of 299 * terminate_compression_threads is called just before 300 * qemu_mutex_lock(¶m->mutex) and after 301 * while(!quit_comp_thread), re-check it here can make 302 * sure the compression thread terminate as expected. 303 */ 304 while (!param->start && !quit_comp_thread) { 305 qemu_cond_wait(¶m->cond, ¶m->mutex); 306 } 307 if (!quit_comp_thread) { 308 do_compress_ram_page(param); 309 } 310 param->start = false; 311 qemu_mutex_unlock(¶m->mutex); 312 313 qemu_mutex_lock(comp_done_lock); 314 param->done = true; 315 qemu_cond_signal(comp_done_cond); 316 qemu_mutex_unlock(comp_done_lock); 317 } 318 319 return NULL; 320 } 321 322 static inline void terminate_compression_threads(void) 323 { 324 int idx, thread_count; 325 326 thread_count = migrate_compress_threads(); 327 quit_comp_thread = true; 328 for (idx = 0; idx < thread_count; idx++) { 329 qemu_mutex_lock(&comp_param[idx].mutex); 330 qemu_cond_signal(&comp_param[idx].cond); 331 qemu_mutex_unlock(&comp_param[idx].mutex); 332 } 333 } 334 335 void migrate_compress_threads_join(void) 336 { 337 int i, thread_count; 338 339 if (!migrate_use_compression()) { 340 return; 341 } 342 terminate_compression_threads(); 343 thread_count = migrate_compress_threads(); 344 for (i = 0; i < thread_count; i++) { 345 qemu_thread_join(compress_threads + i); 346 qemu_fclose(comp_param[i].file); 347 qemu_mutex_destroy(&comp_param[i].mutex); 348 qemu_cond_destroy(&comp_param[i].cond); 349 } 350 qemu_mutex_destroy(comp_done_lock); 351 qemu_cond_destroy(comp_done_cond); 352 g_free(compress_threads); 353 g_free(comp_param); 354 g_free(comp_done_cond); 355 g_free(comp_done_lock); 356 compress_threads = NULL; 357 comp_param = NULL; 358 comp_done_cond = NULL; 359 comp_done_lock = NULL; 360 } 361 362 void migrate_compress_threads_create(void) 363 { 364 int i, thread_count; 365 366 if (!migrate_use_compression()) { 367 return; 368 } 369 quit_comp_thread = false; 370 compression_switch = true; 371 thread_count = migrate_compress_threads(); 372 compress_threads = g_new0(QemuThread, thread_count); 373 comp_param = g_new0(CompressParam, thread_count); 374 comp_done_cond = g_new0(QemuCond, 1); 375 comp_done_lock = g_new0(QemuMutex, 1); 376 qemu_cond_init(comp_done_cond); 377 qemu_mutex_init(comp_done_lock); 378 for (i = 0; i < thread_count; i++) { 379 /* com_param[i].file is just used as a dummy buffer to save data, set 380 * it's ops to empty. 381 */ 382 comp_param[i].file = qemu_fopen_ops(NULL, &empty_ops); 383 comp_param[i].done = true; 384 qemu_mutex_init(&comp_param[i].mutex); 385 qemu_cond_init(&comp_param[i].cond); 386 qemu_thread_create(compress_threads + i, "compress", 387 do_data_compress, comp_param + i, 388 QEMU_THREAD_JOINABLE); 389 } 390 } 391 392 /** 393 * save_page_header: Write page header to wire 394 * 395 * If this is the 1st block, it also writes the block identification 396 * 397 * Returns: Number of bytes written 398 * 399 * @f: QEMUFile where to send the data 400 * @block: block that contains the page we want to send 401 * @offset: offset inside the block for the page 402 * in the lower bits, it contains flags 403 */ 404 static size_t save_page_header(QEMUFile *f, RAMBlock *block, ram_addr_t offset) 405 { 406 size_t size, len; 407 408 qemu_put_be64(f, offset); 409 size = 8; 410 411 if (!(offset & RAM_SAVE_FLAG_CONTINUE)) { 412 len = strlen(block->idstr); 413 qemu_put_byte(f, len); 414 qemu_put_buffer(f, (uint8_t *)block->idstr, len); 415 size += 1 + len; 416 } 417 return size; 418 } 419 420 /* Reduce amount of guest cpu execution to hopefully slow down memory writes. 421 * If guest dirty memory rate is reduced below the rate at which we can 422 * transfer pages to the destination then we should be able to complete 423 * migration. Some workloads dirty memory way too fast and will not effectively 424 * converge, even with auto-converge. 425 */ 426 static void mig_throttle_guest_down(void) 427 { 428 MigrationState *s = migrate_get_current(); 429 uint64_t pct_initial = 430 s->parameters[MIGRATION_PARAMETER_X_CPU_THROTTLE_INITIAL]; 431 uint64_t pct_icrement = 432 s->parameters[MIGRATION_PARAMETER_X_CPU_THROTTLE_INCREMENT]; 433 434 /* We have not started throttling yet. Let's start it. */ 435 if (!cpu_throttle_active()) { 436 cpu_throttle_set(pct_initial); 437 } else { 438 /* Throttling already on, just increase the rate */ 439 cpu_throttle_set(cpu_throttle_get_percentage() + pct_icrement); 440 } 441 } 442 443 /* Update the xbzrle cache to reflect a page that's been sent as all 0. 444 * The important thing is that a stale (not-yet-0'd) page be replaced 445 * by the new data. 446 * As a bonus, if the page wasn't in the cache it gets added so that 447 * when a small write is made into the 0'd page it gets XBZRLE sent 448 */ 449 static void xbzrle_cache_zero_page(ram_addr_t current_addr) 450 { 451 if (ram_bulk_stage || !migrate_use_xbzrle()) { 452 return; 453 } 454 455 /* We don't care if this fails to allocate a new cache page 456 * as long as it updated an old one */ 457 cache_insert(XBZRLE.cache, current_addr, ZERO_TARGET_PAGE, 458 bitmap_sync_count); 459 } 460 461 #define ENCODING_FLAG_XBZRLE 0x1 462 463 /** 464 * save_xbzrle_page: compress and send current page 465 * 466 * Returns: 1 means that we wrote the page 467 * 0 means that page is identical to the one already sent 468 * -1 means that xbzrle would be longer than normal 469 * 470 * @f: QEMUFile where to send the data 471 * @current_data: 472 * @current_addr: 473 * @block: block that contains the page we want to send 474 * @offset: offset inside the block for the page 475 * @last_stage: if we are at the completion stage 476 * @bytes_transferred: increase it with the number of transferred bytes 477 */ 478 static int save_xbzrle_page(QEMUFile *f, uint8_t **current_data, 479 ram_addr_t current_addr, RAMBlock *block, 480 ram_addr_t offset, bool last_stage, 481 uint64_t *bytes_transferred) 482 { 483 int encoded_len = 0, bytes_xbzrle; 484 uint8_t *prev_cached_page; 485 486 if (!cache_is_cached(XBZRLE.cache, current_addr, bitmap_sync_count)) { 487 acct_info.xbzrle_cache_miss++; 488 if (!last_stage) { 489 if (cache_insert(XBZRLE.cache, current_addr, *current_data, 490 bitmap_sync_count) == -1) { 491 return -1; 492 } else { 493 /* update *current_data when the page has been 494 inserted into cache */ 495 *current_data = get_cached_data(XBZRLE.cache, current_addr); 496 } 497 } 498 return -1; 499 } 500 501 prev_cached_page = get_cached_data(XBZRLE.cache, current_addr); 502 503 /* save current buffer into memory */ 504 memcpy(XBZRLE.current_buf, *current_data, TARGET_PAGE_SIZE); 505 506 /* XBZRLE encoding (if there is no overflow) */ 507 encoded_len = xbzrle_encode_buffer(prev_cached_page, XBZRLE.current_buf, 508 TARGET_PAGE_SIZE, XBZRLE.encoded_buf, 509 TARGET_PAGE_SIZE); 510 if (encoded_len == 0) { 511 DPRINTF("Skipping unmodified page\n"); 512 return 0; 513 } else if (encoded_len == -1) { 514 DPRINTF("Overflow\n"); 515 acct_info.xbzrle_overflows++; 516 /* update data in the cache */ 517 if (!last_stage) { 518 memcpy(prev_cached_page, *current_data, TARGET_PAGE_SIZE); 519 *current_data = prev_cached_page; 520 } 521 return -1; 522 } 523 524 /* we need to update the data in the cache, in order to get the same data */ 525 if (!last_stage) { 526 memcpy(prev_cached_page, XBZRLE.current_buf, TARGET_PAGE_SIZE); 527 } 528 529 /* Send XBZRLE based compressed page */ 530 bytes_xbzrle = save_page_header(f, block, offset | RAM_SAVE_FLAG_XBZRLE); 531 qemu_put_byte(f, ENCODING_FLAG_XBZRLE); 532 qemu_put_be16(f, encoded_len); 533 qemu_put_buffer(f, XBZRLE.encoded_buf, encoded_len); 534 bytes_xbzrle += encoded_len + 1 + 2; 535 acct_info.xbzrle_pages++; 536 acct_info.xbzrle_bytes += bytes_xbzrle; 537 *bytes_transferred += bytes_xbzrle; 538 539 return 1; 540 } 541 542 /* Called with rcu_read_lock() to protect migration_bitmap 543 * rb: The RAMBlock to search for dirty pages in 544 * start: Start address (typically so we can continue from previous page) 545 * ram_addr_abs: Pointer into which to store the address of the dirty page 546 * within the global ram_addr space 547 * 548 * Returns: byte offset within memory region of the start of a dirty page 549 */ 550 static inline 551 ram_addr_t migration_bitmap_find_dirty(RAMBlock *rb, 552 ram_addr_t start, 553 ram_addr_t *ram_addr_abs) 554 { 555 unsigned long base = rb->offset >> TARGET_PAGE_BITS; 556 unsigned long nr = base + (start >> TARGET_PAGE_BITS); 557 uint64_t rb_size = rb->used_length; 558 unsigned long size = base + (rb_size >> TARGET_PAGE_BITS); 559 unsigned long *bitmap; 560 561 unsigned long next; 562 563 bitmap = atomic_rcu_read(&migration_bitmap_rcu)->bmap; 564 if (ram_bulk_stage && nr > base) { 565 next = nr + 1; 566 } else { 567 next = find_next_bit(bitmap, size, nr); 568 } 569 570 *ram_addr_abs = next << TARGET_PAGE_BITS; 571 return (next - base) << TARGET_PAGE_BITS; 572 } 573 574 static inline bool migration_bitmap_clear_dirty(ram_addr_t addr) 575 { 576 bool ret; 577 int nr = addr >> TARGET_PAGE_BITS; 578 unsigned long *bitmap = atomic_rcu_read(&migration_bitmap_rcu)->bmap; 579 580 ret = test_and_clear_bit(nr, bitmap); 581 582 if (ret) { 583 migration_dirty_pages--; 584 } 585 return ret; 586 } 587 588 static void migration_bitmap_sync_range(ram_addr_t start, ram_addr_t length) 589 { 590 unsigned long *bitmap; 591 bitmap = atomic_rcu_read(&migration_bitmap_rcu)->bmap; 592 migration_dirty_pages += 593 cpu_physical_memory_sync_dirty_bitmap(bitmap, start, length); 594 } 595 596 /* Fix me: there are too many global variables used in migration process. */ 597 static int64_t start_time; 598 static int64_t bytes_xfer_prev; 599 static int64_t num_dirty_pages_period; 600 static uint64_t xbzrle_cache_miss_prev; 601 static uint64_t iterations_prev; 602 603 static void migration_bitmap_sync_init(void) 604 { 605 start_time = 0; 606 bytes_xfer_prev = 0; 607 num_dirty_pages_period = 0; 608 xbzrle_cache_miss_prev = 0; 609 iterations_prev = 0; 610 } 611 612 static void migration_bitmap_sync(void) 613 { 614 RAMBlock *block; 615 uint64_t num_dirty_pages_init = migration_dirty_pages; 616 MigrationState *s = migrate_get_current(); 617 int64_t end_time; 618 int64_t bytes_xfer_now; 619 620 bitmap_sync_count++; 621 622 if (!bytes_xfer_prev) { 623 bytes_xfer_prev = ram_bytes_transferred(); 624 } 625 626 if (!start_time) { 627 start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); 628 } 629 630 trace_migration_bitmap_sync_start(); 631 address_space_sync_dirty_bitmap(&address_space_memory); 632 633 qemu_mutex_lock(&migration_bitmap_mutex); 634 rcu_read_lock(); 635 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) { 636 migration_bitmap_sync_range(block->offset, block->used_length); 637 } 638 rcu_read_unlock(); 639 qemu_mutex_unlock(&migration_bitmap_mutex); 640 641 trace_migration_bitmap_sync_end(migration_dirty_pages 642 - num_dirty_pages_init); 643 num_dirty_pages_period += migration_dirty_pages - num_dirty_pages_init; 644 end_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); 645 646 /* more than 1 second = 1000 millisecons */ 647 if (end_time > start_time + 1000) { 648 if (migrate_auto_converge()) { 649 /* The following detection logic can be refined later. For now: 650 Check to see if the dirtied bytes is 50% more than the approx. 651 amount of bytes that just got transferred since the last time we 652 were in this routine. If that happens twice, start or increase 653 throttling */ 654 bytes_xfer_now = ram_bytes_transferred(); 655 656 if (s->dirty_pages_rate && 657 (num_dirty_pages_period * TARGET_PAGE_SIZE > 658 (bytes_xfer_now - bytes_xfer_prev)/2) && 659 (dirty_rate_high_cnt++ >= 2)) { 660 trace_migration_throttle(); 661 dirty_rate_high_cnt = 0; 662 mig_throttle_guest_down(); 663 } 664 bytes_xfer_prev = bytes_xfer_now; 665 } 666 667 if (migrate_use_xbzrle()) { 668 if (iterations_prev != acct_info.iterations) { 669 acct_info.xbzrle_cache_miss_rate = 670 (double)(acct_info.xbzrle_cache_miss - 671 xbzrle_cache_miss_prev) / 672 (acct_info.iterations - iterations_prev); 673 } 674 iterations_prev = acct_info.iterations; 675 xbzrle_cache_miss_prev = acct_info.xbzrle_cache_miss; 676 } 677 s->dirty_pages_rate = num_dirty_pages_period * 1000 678 / (end_time - start_time); 679 s->dirty_bytes_rate = s->dirty_pages_rate * TARGET_PAGE_SIZE; 680 start_time = end_time; 681 num_dirty_pages_period = 0; 682 } 683 s->dirty_sync_count = bitmap_sync_count; 684 if (migrate_use_events()) { 685 qapi_event_send_migration_pass(bitmap_sync_count, NULL); 686 } 687 } 688 689 /** 690 * save_zero_page: Send the zero page to the stream 691 * 692 * Returns: Number of pages written. 693 * 694 * @f: QEMUFile where to send the data 695 * @block: block that contains the page we want to send 696 * @offset: offset inside the block for the page 697 * @p: pointer to the page 698 * @bytes_transferred: increase it with the number of transferred bytes 699 */ 700 static int save_zero_page(QEMUFile *f, RAMBlock *block, ram_addr_t offset, 701 uint8_t *p, uint64_t *bytes_transferred) 702 { 703 int pages = -1; 704 705 if (is_zero_range(p, TARGET_PAGE_SIZE)) { 706 acct_info.dup_pages++; 707 *bytes_transferred += save_page_header(f, block, 708 offset | RAM_SAVE_FLAG_COMPRESS); 709 qemu_put_byte(f, 0); 710 *bytes_transferred += 1; 711 pages = 1; 712 } 713 714 return pages; 715 } 716 717 /** 718 * ram_save_page: Send the given page to the stream 719 * 720 * Returns: Number of pages written. 721 * < 0 - error 722 * >=0 - Number of pages written - this might legally be 0 723 * if xbzrle noticed the page was the same. 724 * 725 * @f: QEMUFile where to send the data 726 * @block: block that contains the page we want to send 727 * @offset: offset inside the block for the page 728 * @last_stage: if we are at the completion stage 729 * @bytes_transferred: increase it with the number of transferred bytes 730 */ 731 static int ram_save_page(QEMUFile *f, PageSearchStatus *pss, 732 bool last_stage, uint64_t *bytes_transferred) 733 { 734 int pages = -1; 735 uint64_t bytes_xmit; 736 ram_addr_t current_addr; 737 uint8_t *p; 738 int ret; 739 bool send_async = true; 740 RAMBlock *block = pss->block; 741 ram_addr_t offset = pss->offset; 742 743 p = block->host + offset; 744 745 /* In doubt sent page as normal */ 746 bytes_xmit = 0; 747 ret = ram_control_save_page(f, block->offset, 748 offset, TARGET_PAGE_SIZE, &bytes_xmit); 749 if (bytes_xmit) { 750 *bytes_transferred += bytes_xmit; 751 pages = 1; 752 } 753 754 XBZRLE_cache_lock(); 755 756 current_addr = block->offset + offset; 757 758 if (block == last_sent_block) { 759 offset |= RAM_SAVE_FLAG_CONTINUE; 760 } 761 if (ret != RAM_SAVE_CONTROL_NOT_SUPP) { 762 if (ret != RAM_SAVE_CONTROL_DELAYED) { 763 if (bytes_xmit > 0) { 764 acct_info.norm_pages++; 765 } else if (bytes_xmit == 0) { 766 acct_info.dup_pages++; 767 } 768 } 769 } else { 770 pages = save_zero_page(f, block, offset, p, bytes_transferred); 771 if (pages > 0) { 772 /* Must let xbzrle know, otherwise a previous (now 0'd) cached 773 * page would be stale 774 */ 775 xbzrle_cache_zero_page(current_addr); 776 } else if (!ram_bulk_stage && migrate_use_xbzrle()) { 777 pages = save_xbzrle_page(f, &p, current_addr, block, 778 offset, last_stage, bytes_transferred); 779 if (!last_stage) { 780 /* Can't send this cached data async, since the cache page 781 * might get updated before it gets to the wire 782 */ 783 send_async = false; 784 } 785 } 786 } 787 788 /* XBZRLE overflow or normal page */ 789 if (pages == -1) { 790 *bytes_transferred += save_page_header(f, block, 791 offset | RAM_SAVE_FLAG_PAGE); 792 if (send_async) { 793 qemu_put_buffer_async(f, p, TARGET_PAGE_SIZE); 794 } else { 795 qemu_put_buffer(f, p, TARGET_PAGE_SIZE); 796 } 797 *bytes_transferred += TARGET_PAGE_SIZE; 798 pages = 1; 799 acct_info.norm_pages++; 800 } 801 802 XBZRLE_cache_unlock(); 803 804 return pages; 805 } 806 807 static int do_compress_ram_page(CompressParam *param) 808 { 809 int bytes_sent, blen; 810 uint8_t *p; 811 RAMBlock *block = param->block; 812 ram_addr_t offset = param->offset; 813 814 p = block->host + (offset & TARGET_PAGE_MASK); 815 816 bytes_sent = save_page_header(param->file, block, offset | 817 RAM_SAVE_FLAG_COMPRESS_PAGE); 818 blen = qemu_put_compression_data(param->file, p, TARGET_PAGE_SIZE, 819 migrate_compress_level()); 820 bytes_sent += blen; 821 822 return bytes_sent; 823 } 824 825 static inline void start_compression(CompressParam *param) 826 { 827 param->done = false; 828 qemu_mutex_lock(¶m->mutex); 829 param->start = true; 830 qemu_cond_signal(¶m->cond); 831 qemu_mutex_unlock(¶m->mutex); 832 } 833 834 static inline void start_decompression(DecompressParam *param) 835 { 836 qemu_mutex_lock(¶m->mutex); 837 param->start = true; 838 qemu_cond_signal(¶m->cond); 839 qemu_mutex_unlock(¶m->mutex); 840 } 841 842 static uint64_t bytes_transferred; 843 844 static void flush_compressed_data(QEMUFile *f) 845 { 846 int idx, len, thread_count; 847 848 if (!migrate_use_compression()) { 849 return; 850 } 851 thread_count = migrate_compress_threads(); 852 for (idx = 0; idx < thread_count; idx++) { 853 if (!comp_param[idx].done) { 854 qemu_mutex_lock(comp_done_lock); 855 while (!comp_param[idx].done && !quit_comp_thread) { 856 qemu_cond_wait(comp_done_cond, comp_done_lock); 857 } 858 qemu_mutex_unlock(comp_done_lock); 859 } 860 if (!quit_comp_thread) { 861 len = qemu_put_qemu_file(f, comp_param[idx].file); 862 bytes_transferred += len; 863 } 864 } 865 } 866 867 static inline void set_compress_params(CompressParam *param, RAMBlock *block, 868 ram_addr_t offset) 869 { 870 param->block = block; 871 param->offset = offset; 872 } 873 874 static int compress_page_with_multi_thread(QEMUFile *f, RAMBlock *block, 875 ram_addr_t offset, 876 uint64_t *bytes_transferred) 877 { 878 int idx, thread_count, bytes_xmit = -1, pages = -1; 879 880 thread_count = migrate_compress_threads(); 881 qemu_mutex_lock(comp_done_lock); 882 while (true) { 883 for (idx = 0; idx < thread_count; idx++) { 884 if (comp_param[idx].done) { 885 bytes_xmit = qemu_put_qemu_file(f, comp_param[idx].file); 886 set_compress_params(&comp_param[idx], block, offset); 887 start_compression(&comp_param[idx]); 888 pages = 1; 889 acct_info.norm_pages++; 890 *bytes_transferred += bytes_xmit; 891 break; 892 } 893 } 894 if (pages > 0) { 895 break; 896 } else { 897 qemu_cond_wait(comp_done_cond, comp_done_lock); 898 } 899 } 900 qemu_mutex_unlock(comp_done_lock); 901 902 return pages; 903 } 904 905 /** 906 * ram_save_compressed_page: compress the given page and send it to the stream 907 * 908 * Returns: Number of pages written. 909 * 910 * @f: QEMUFile where to send the data 911 * @block: block that contains the page we want to send 912 * @offset: offset inside the block for the page 913 * @last_stage: if we are at the completion stage 914 * @bytes_transferred: increase it with the number of transferred bytes 915 */ 916 static int ram_save_compressed_page(QEMUFile *f, PageSearchStatus *pss, 917 bool last_stage, 918 uint64_t *bytes_transferred) 919 { 920 int pages = -1; 921 uint64_t bytes_xmit; 922 uint8_t *p; 923 int ret; 924 RAMBlock *block = pss->block; 925 ram_addr_t offset = pss->offset; 926 927 p = block->host + offset; 928 929 bytes_xmit = 0; 930 ret = ram_control_save_page(f, block->offset, 931 offset, TARGET_PAGE_SIZE, &bytes_xmit); 932 if (bytes_xmit) { 933 *bytes_transferred += bytes_xmit; 934 pages = 1; 935 } 936 if (block == last_sent_block) { 937 offset |= RAM_SAVE_FLAG_CONTINUE; 938 } 939 if (ret != RAM_SAVE_CONTROL_NOT_SUPP) { 940 if (ret != RAM_SAVE_CONTROL_DELAYED) { 941 if (bytes_xmit > 0) { 942 acct_info.norm_pages++; 943 } else if (bytes_xmit == 0) { 944 acct_info.dup_pages++; 945 } 946 } 947 } else { 948 /* When starting the process of a new block, the first page of 949 * the block should be sent out before other pages in the same 950 * block, and all the pages in last block should have been sent 951 * out, keeping this order is important, because the 'cont' flag 952 * is used to avoid resending the block name. 953 */ 954 if (block != last_sent_block) { 955 flush_compressed_data(f); 956 pages = save_zero_page(f, block, offset, p, bytes_transferred); 957 if (pages == -1) { 958 set_compress_params(&comp_param[0], block, offset); 959 /* Use the qemu thread to compress the data to make sure the 960 * first page is sent out before other pages 961 */ 962 bytes_xmit = do_compress_ram_page(&comp_param[0]); 963 acct_info.norm_pages++; 964 qemu_put_qemu_file(f, comp_param[0].file); 965 *bytes_transferred += bytes_xmit; 966 pages = 1; 967 } 968 } else { 969 pages = save_zero_page(f, block, offset, p, bytes_transferred); 970 if (pages == -1) { 971 pages = compress_page_with_multi_thread(f, block, offset, 972 bytes_transferred); 973 } 974 } 975 } 976 977 return pages; 978 } 979 980 /* 981 * Find the next dirty page and update any state associated with 982 * the search process. 983 * 984 * Returns: True if a page is found 985 * 986 * @f: Current migration stream. 987 * @pss: Data about the state of the current dirty page scan. 988 * @*again: Set to false if the search has scanned the whole of RAM 989 * *ram_addr_abs: Pointer into which to store the address of the dirty page 990 * within the global ram_addr space 991 */ 992 static bool find_dirty_block(QEMUFile *f, PageSearchStatus *pss, 993 bool *again, ram_addr_t *ram_addr_abs) 994 { 995 pss->offset = migration_bitmap_find_dirty(pss->block, pss->offset, 996 ram_addr_abs); 997 if (pss->complete_round && pss->block == last_seen_block && 998 pss->offset >= last_offset) { 999 /* 1000 * We've been once around the RAM and haven't found anything. 1001 * Give up. 1002 */ 1003 *again = false; 1004 return false; 1005 } 1006 if (pss->offset >= pss->block->used_length) { 1007 /* Didn't find anything in this RAM Block */ 1008 pss->offset = 0; 1009 pss->block = QLIST_NEXT_RCU(pss->block, next); 1010 if (!pss->block) { 1011 /* Hit the end of the list */ 1012 pss->block = QLIST_FIRST_RCU(&ram_list.blocks); 1013 /* Flag that we've looped */ 1014 pss->complete_round = true; 1015 ram_bulk_stage = false; 1016 if (migrate_use_xbzrle()) { 1017 /* If xbzrle is on, stop using the data compression at this 1018 * point. In theory, xbzrle can do better than compression. 1019 */ 1020 flush_compressed_data(f); 1021 compression_switch = false; 1022 } 1023 } 1024 /* Didn't find anything this time, but try again on the new block */ 1025 *again = true; 1026 return false; 1027 } else { 1028 /* Can go around again, but... */ 1029 *again = true; 1030 /* We've found something so probably don't need to */ 1031 return true; 1032 } 1033 } 1034 1035 /* 1036 * Helper for 'get_queued_page' - gets a page off the queue 1037 * ms: MigrationState in 1038 * *offset: Used to return the offset within the RAMBlock 1039 * ram_addr_abs: global offset in the dirty/sent bitmaps 1040 * 1041 * Returns: block (or NULL if none available) 1042 */ 1043 static RAMBlock *unqueue_page(MigrationState *ms, ram_addr_t *offset, 1044 ram_addr_t *ram_addr_abs) 1045 { 1046 RAMBlock *block = NULL; 1047 1048 qemu_mutex_lock(&ms->src_page_req_mutex); 1049 if (!QSIMPLEQ_EMPTY(&ms->src_page_requests)) { 1050 struct MigrationSrcPageRequest *entry = 1051 QSIMPLEQ_FIRST(&ms->src_page_requests); 1052 block = entry->rb; 1053 *offset = entry->offset; 1054 *ram_addr_abs = (entry->offset + entry->rb->offset) & 1055 TARGET_PAGE_MASK; 1056 1057 if (entry->len > TARGET_PAGE_SIZE) { 1058 entry->len -= TARGET_PAGE_SIZE; 1059 entry->offset += TARGET_PAGE_SIZE; 1060 } else { 1061 memory_region_unref(block->mr); 1062 QSIMPLEQ_REMOVE_HEAD(&ms->src_page_requests, next_req); 1063 g_free(entry); 1064 } 1065 } 1066 qemu_mutex_unlock(&ms->src_page_req_mutex); 1067 1068 return block; 1069 } 1070 1071 /* 1072 * Unqueue a page from the queue fed by postcopy page requests; skips pages 1073 * that are already sent (!dirty) 1074 * 1075 * ms: MigrationState in 1076 * pss: PageSearchStatus structure updated with found block/offset 1077 * ram_addr_abs: global offset in the dirty/sent bitmaps 1078 * 1079 * Returns: true if a queued page is found 1080 */ 1081 static bool get_queued_page(MigrationState *ms, PageSearchStatus *pss, 1082 ram_addr_t *ram_addr_abs) 1083 { 1084 RAMBlock *block; 1085 ram_addr_t offset; 1086 bool dirty; 1087 1088 do { 1089 block = unqueue_page(ms, &offset, ram_addr_abs); 1090 /* 1091 * We're sending this page, and since it's postcopy nothing else 1092 * will dirty it, and we must make sure it doesn't get sent again 1093 * even if this queue request was received after the background 1094 * search already sent it. 1095 */ 1096 if (block) { 1097 unsigned long *bitmap; 1098 bitmap = atomic_rcu_read(&migration_bitmap_rcu)->bmap; 1099 dirty = test_bit(*ram_addr_abs >> TARGET_PAGE_BITS, bitmap); 1100 if (!dirty) { 1101 trace_get_queued_page_not_dirty( 1102 block->idstr, (uint64_t)offset, 1103 (uint64_t)*ram_addr_abs, 1104 test_bit(*ram_addr_abs >> TARGET_PAGE_BITS, 1105 atomic_rcu_read(&migration_bitmap_rcu)->unsentmap)); 1106 } else { 1107 trace_get_queued_page(block->idstr, 1108 (uint64_t)offset, 1109 (uint64_t)*ram_addr_abs); 1110 } 1111 } 1112 1113 } while (block && !dirty); 1114 1115 if (block) { 1116 /* 1117 * As soon as we start servicing pages out of order, then we have 1118 * to kill the bulk stage, since the bulk stage assumes 1119 * in (migration_bitmap_find_and_reset_dirty) that every page is 1120 * dirty, that's no longer true. 1121 */ 1122 ram_bulk_stage = false; 1123 1124 /* 1125 * We want the background search to continue from the queued page 1126 * since the guest is likely to want other pages near to the page 1127 * it just requested. 1128 */ 1129 pss->block = block; 1130 pss->offset = offset; 1131 } 1132 1133 return !!block; 1134 } 1135 1136 /** 1137 * flush_page_queue: Flush any remaining pages in the ram request queue 1138 * it should be empty at the end anyway, but in error cases there may be 1139 * some left. 1140 * 1141 * ms: MigrationState 1142 */ 1143 void flush_page_queue(MigrationState *ms) 1144 { 1145 struct MigrationSrcPageRequest *mspr, *next_mspr; 1146 /* This queue generally should be empty - but in the case of a failed 1147 * migration might have some droppings in. 1148 */ 1149 rcu_read_lock(); 1150 QSIMPLEQ_FOREACH_SAFE(mspr, &ms->src_page_requests, next_req, next_mspr) { 1151 memory_region_unref(mspr->rb->mr); 1152 QSIMPLEQ_REMOVE_HEAD(&ms->src_page_requests, next_req); 1153 g_free(mspr); 1154 } 1155 rcu_read_unlock(); 1156 } 1157 1158 /** 1159 * Queue the pages for transmission, e.g. a request from postcopy destination 1160 * ms: MigrationStatus in which the queue is held 1161 * rbname: The RAMBlock the request is for - may be NULL (to mean reuse last) 1162 * start: Offset from the start of the RAMBlock 1163 * len: Length (in bytes) to send 1164 * Return: 0 on success 1165 */ 1166 int ram_save_queue_pages(MigrationState *ms, const char *rbname, 1167 ram_addr_t start, ram_addr_t len) 1168 { 1169 RAMBlock *ramblock; 1170 1171 rcu_read_lock(); 1172 if (!rbname) { 1173 /* Reuse last RAMBlock */ 1174 ramblock = ms->last_req_rb; 1175 1176 if (!ramblock) { 1177 /* 1178 * Shouldn't happen, we can't reuse the last RAMBlock if 1179 * it's the 1st request. 1180 */ 1181 error_report("ram_save_queue_pages no previous block"); 1182 goto err; 1183 } 1184 } else { 1185 ramblock = qemu_ram_block_by_name(rbname); 1186 1187 if (!ramblock) { 1188 /* We shouldn't be asked for a non-existent RAMBlock */ 1189 error_report("ram_save_queue_pages no block '%s'", rbname); 1190 goto err; 1191 } 1192 ms->last_req_rb = ramblock; 1193 } 1194 trace_ram_save_queue_pages(ramblock->idstr, start, len); 1195 if (start+len > ramblock->used_length) { 1196 error_report("%s request overrun start=" RAM_ADDR_FMT " len=" 1197 RAM_ADDR_FMT " blocklen=" RAM_ADDR_FMT, 1198 __func__, start, len, ramblock->used_length); 1199 goto err; 1200 } 1201 1202 struct MigrationSrcPageRequest *new_entry = 1203 g_malloc0(sizeof(struct MigrationSrcPageRequest)); 1204 new_entry->rb = ramblock; 1205 new_entry->offset = start; 1206 new_entry->len = len; 1207 1208 memory_region_ref(ramblock->mr); 1209 qemu_mutex_lock(&ms->src_page_req_mutex); 1210 QSIMPLEQ_INSERT_TAIL(&ms->src_page_requests, new_entry, next_req); 1211 qemu_mutex_unlock(&ms->src_page_req_mutex); 1212 rcu_read_unlock(); 1213 1214 return 0; 1215 1216 err: 1217 rcu_read_unlock(); 1218 return -1; 1219 } 1220 1221 /** 1222 * ram_save_target_page: Save one target page 1223 * 1224 * 1225 * @f: QEMUFile where to send the data 1226 * @block: pointer to block that contains the page we want to send 1227 * @offset: offset inside the block for the page; 1228 * @last_stage: if we are at the completion stage 1229 * @bytes_transferred: increase it with the number of transferred bytes 1230 * @dirty_ram_abs: Address of the start of the dirty page in ram_addr_t space 1231 * 1232 * Returns: Number of pages written. 1233 */ 1234 static int ram_save_target_page(MigrationState *ms, QEMUFile *f, 1235 PageSearchStatus *pss, 1236 bool last_stage, 1237 uint64_t *bytes_transferred, 1238 ram_addr_t dirty_ram_abs) 1239 { 1240 int res = 0; 1241 1242 /* Check the pages is dirty and if it is send it */ 1243 if (migration_bitmap_clear_dirty(dirty_ram_abs)) { 1244 unsigned long *unsentmap; 1245 if (compression_switch && migrate_use_compression()) { 1246 res = ram_save_compressed_page(f, pss, 1247 last_stage, 1248 bytes_transferred); 1249 } else { 1250 res = ram_save_page(f, pss, last_stage, 1251 bytes_transferred); 1252 } 1253 1254 if (res < 0) { 1255 return res; 1256 } 1257 unsentmap = atomic_rcu_read(&migration_bitmap_rcu)->unsentmap; 1258 if (unsentmap) { 1259 clear_bit(dirty_ram_abs >> TARGET_PAGE_BITS, unsentmap); 1260 } 1261 /* Only update last_sent_block if a block was actually sent; xbzrle 1262 * might have decided the page was identical so didn't bother writing 1263 * to the stream. 1264 */ 1265 if (res > 0) { 1266 last_sent_block = pss->block; 1267 } 1268 } 1269 1270 return res; 1271 } 1272 1273 /** 1274 * ram_save_host_page: Starting at *offset send pages upto the end 1275 * of the current host page. It's valid for the initial 1276 * offset to point into the middle of a host page 1277 * in which case the remainder of the hostpage is sent. 1278 * Only dirty target pages are sent. 1279 * 1280 * Returns: Number of pages written. 1281 * 1282 * @f: QEMUFile where to send the data 1283 * @block: pointer to block that contains the page we want to send 1284 * @offset: offset inside the block for the page; updated to last target page 1285 * sent 1286 * @last_stage: if we are at the completion stage 1287 * @bytes_transferred: increase it with the number of transferred bytes 1288 * @dirty_ram_abs: Address of the start of the dirty page in ram_addr_t space 1289 */ 1290 static int ram_save_host_page(MigrationState *ms, QEMUFile *f, 1291 PageSearchStatus *pss, 1292 bool last_stage, 1293 uint64_t *bytes_transferred, 1294 ram_addr_t dirty_ram_abs) 1295 { 1296 int tmppages, pages = 0; 1297 do { 1298 tmppages = ram_save_target_page(ms, f, pss, last_stage, 1299 bytes_transferred, dirty_ram_abs); 1300 if (tmppages < 0) { 1301 return tmppages; 1302 } 1303 1304 pages += tmppages; 1305 pss->offset += TARGET_PAGE_SIZE; 1306 dirty_ram_abs += TARGET_PAGE_SIZE; 1307 } while (pss->offset & (qemu_host_page_size - 1)); 1308 1309 /* The offset we leave with is the last one we looked at */ 1310 pss->offset -= TARGET_PAGE_SIZE; 1311 return pages; 1312 } 1313 1314 /** 1315 * ram_find_and_save_block: Finds a dirty page and sends it to f 1316 * 1317 * Called within an RCU critical section. 1318 * 1319 * Returns: The number of pages written 1320 * 0 means no dirty pages 1321 * 1322 * @f: QEMUFile where to send the data 1323 * @last_stage: if we are at the completion stage 1324 * @bytes_transferred: increase it with the number of transferred bytes 1325 * 1326 * On systems where host-page-size > target-page-size it will send all the 1327 * pages in a host page that are dirty. 1328 */ 1329 1330 static int ram_find_and_save_block(QEMUFile *f, bool last_stage, 1331 uint64_t *bytes_transferred) 1332 { 1333 PageSearchStatus pss; 1334 MigrationState *ms = migrate_get_current(); 1335 int pages = 0; 1336 bool again, found; 1337 ram_addr_t dirty_ram_abs; /* Address of the start of the dirty page in 1338 ram_addr_t space */ 1339 1340 pss.block = last_seen_block; 1341 pss.offset = last_offset; 1342 pss.complete_round = false; 1343 1344 if (!pss.block) { 1345 pss.block = QLIST_FIRST_RCU(&ram_list.blocks); 1346 } 1347 1348 do { 1349 again = true; 1350 found = get_queued_page(ms, &pss, &dirty_ram_abs); 1351 1352 if (!found) { 1353 /* priority queue empty, so just search for something dirty */ 1354 found = find_dirty_block(f, &pss, &again, &dirty_ram_abs); 1355 } 1356 1357 if (found) { 1358 pages = ram_save_host_page(ms, f, &pss, 1359 last_stage, bytes_transferred, 1360 dirty_ram_abs); 1361 } 1362 } while (!pages && again); 1363 1364 last_seen_block = pss.block; 1365 last_offset = pss.offset; 1366 1367 return pages; 1368 } 1369 1370 void acct_update_position(QEMUFile *f, size_t size, bool zero) 1371 { 1372 uint64_t pages = size / TARGET_PAGE_SIZE; 1373 if (zero) { 1374 acct_info.dup_pages += pages; 1375 } else { 1376 acct_info.norm_pages += pages; 1377 bytes_transferred += size; 1378 qemu_update_position(f, size); 1379 } 1380 } 1381 1382 static ram_addr_t ram_save_remaining(void) 1383 { 1384 return migration_dirty_pages; 1385 } 1386 1387 uint64_t ram_bytes_remaining(void) 1388 { 1389 return ram_save_remaining() * TARGET_PAGE_SIZE; 1390 } 1391 1392 uint64_t ram_bytes_transferred(void) 1393 { 1394 return bytes_transferred; 1395 } 1396 1397 uint64_t ram_bytes_total(void) 1398 { 1399 RAMBlock *block; 1400 uint64_t total = 0; 1401 1402 rcu_read_lock(); 1403 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) 1404 total += block->used_length; 1405 rcu_read_unlock(); 1406 return total; 1407 } 1408 1409 void free_xbzrle_decoded_buf(void) 1410 { 1411 g_free(xbzrle_decoded_buf); 1412 xbzrle_decoded_buf = NULL; 1413 } 1414 1415 static void migration_bitmap_free(struct BitmapRcu *bmap) 1416 { 1417 g_free(bmap->bmap); 1418 g_free(bmap->unsentmap); 1419 g_free(bmap); 1420 } 1421 1422 static void ram_migration_cleanup(void *opaque) 1423 { 1424 /* caller have hold iothread lock or is in a bh, so there is 1425 * no writing race against this migration_bitmap 1426 */ 1427 struct BitmapRcu *bitmap = migration_bitmap_rcu; 1428 atomic_rcu_set(&migration_bitmap_rcu, NULL); 1429 if (bitmap) { 1430 memory_global_dirty_log_stop(); 1431 call_rcu(bitmap, migration_bitmap_free, rcu); 1432 } 1433 1434 XBZRLE_cache_lock(); 1435 if (XBZRLE.cache) { 1436 cache_fini(XBZRLE.cache); 1437 g_free(XBZRLE.encoded_buf); 1438 g_free(XBZRLE.current_buf); 1439 XBZRLE.cache = NULL; 1440 XBZRLE.encoded_buf = NULL; 1441 XBZRLE.current_buf = NULL; 1442 } 1443 XBZRLE_cache_unlock(); 1444 } 1445 1446 static void reset_ram_globals(void) 1447 { 1448 last_seen_block = NULL; 1449 last_sent_block = NULL; 1450 last_offset = 0; 1451 last_version = ram_list.version; 1452 ram_bulk_stage = true; 1453 } 1454 1455 #define MAX_WAIT 50 /* ms, half buffered_file limit */ 1456 1457 void migration_bitmap_extend(ram_addr_t old, ram_addr_t new) 1458 { 1459 /* called in qemu main thread, so there is 1460 * no writing race against this migration_bitmap 1461 */ 1462 if (migration_bitmap_rcu) { 1463 struct BitmapRcu *old_bitmap = migration_bitmap_rcu, *bitmap; 1464 bitmap = g_new(struct BitmapRcu, 1); 1465 bitmap->bmap = bitmap_new(new); 1466 1467 /* prevent migration_bitmap content from being set bit 1468 * by migration_bitmap_sync_range() at the same time. 1469 * it is safe to migration if migration_bitmap is cleared bit 1470 * at the same time. 1471 */ 1472 qemu_mutex_lock(&migration_bitmap_mutex); 1473 bitmap_copy(bitmap->bmap, old_bitmap->bmap, old); 1474 bitmap_set(bitmap->bmap, old, new - old); 1475 1476 /* We don't have a way to safely extend the sentmap 1477 * with RCU; so mark it as missing, entry to postcopy 1478 * will fail. 1479 */ 1480 bitmap->unsentmap = NULL; 1481 1482 atomic_rcu_set(&migration_bitmap_rcu, bitmap); 1483 qemu_mutex_unlock(&migration_bitmap_mutex); 1484 migration_dirty_pages += new - old; 1485 call_rcu(old_bitmap, migration_bitmap_free, rcu); 1486 } 1487 } 1488 1489 /* 1490 * 'expected' is the value you expect the bitmap mostly to be full 1491 * of; it won't bother printing lines that are all this value. 1492 * If 'todump' is null the migration bitmap is dumped. 1493 */ 1494 void ram_debug_dump_bitmap(unsigned long *todump, bool expected) 1495 { 1496 int64_t ram_pages = last_ram_offset() >> TARGET_PAGE_BITS; 1497 1498 int64_t cur; 1499 int64_t linelen = 128; 1500 char linebuf[129]; 1501 1502 if (!todump) { 1503 todump = atomic_rcu_read(&migration_bitmap_rcu)->bmap; 1504 } 1505 1506 for (cur = 0; cur < ram_pages; cur += linelen) { 1507 int64_t curb; 1508 bool found = false; 1509 /* 1510 * Last line; catch the case where the line length 1511 * is longer than remaining ram 1512 */ 1513 if (cur + linelen > ram_pages) { 1514 linelen = ram_pages - cur; 1515 } 1516 for (curb = 0; curb < linelen; curb++) { 1517 bool thisbit = test_bit(cur + curb, todump); 1518 linebuf[curb] = thisbit ? '1' : '.'; 1519 found = found || (thisbit != expected); 1520 } 1521 if (found) { 1522 linebuf[curb] = '\0'; 1523 fprintf(stderr, "0x%08" PRIx64 " : %s\n", cur, linebuf); 1524 } 1525 } 1526 } 1527 1528 /* **** functions for postcopy ***** */ 1529 1530 /* 1531 * Callback from postcopy_each_ram_send_discard for each RAMBlock 1532 * Note: At this point the 'unsentmap' is the processed bitmap combined 1533 * with the dirtymap; so a '1' means it's either dirty or unsent. 1534 * start,length: Indexes into the bitmap for the first bit 1535 * representing the named block and length in target-pages 1536 */ 1537 static int postcopy_send_discard_bm_ram(MigrationState *ms, 1538 PostcopyDiscardState *pds, 1539 unsigned long start, 1540 unsigned long length) 1541 { 1542 unsigned long end = start + length; /* one after the end */ 1543 unsigned long current; 1544 unsigned long *unsentmap; 1545 1546 unsentmap = atomic_rcu_read(&migration_bitmap_rcu)->unsentmap; 1547 for (current = start; current < end; ) { 1548 unsigned long one = find_next_bit(unsentmap, end, current); 1549 1550 if (one <= end) { 1551 unsigned long zero = find_next_zero_bit(unsentmap, end, one + 1); 1552 unsigned long discard_length; 1553 1554 if (zero >= end) { 1555 discard_length = end - one; 1556 } else { 1557 discard_length = zero - one; 1558 } 1559 postcopy_discard_send_range(ms, pds, one, discard_length); 1560 current = one + discard_length; 1561 } else { 1562 current = one; 1563 } 1564 } 1565 1566 return 0; 1567 } 1568 1569 /* 1570 * Utility for the outgoing postcopy code. 1571 * Calls postcopy_send_discard_bm_ram for each RAMBlock 1572 * passing it bitmap indexes and name. 1573 * Returns: 0 on success 1574 * (qemu_ram_foreach_block ends up passing unscaled lengths 1575 * which would mean postcopy code would have to deal with target page) 1576 */ 1577 static int postcopy_each_ram_send_discard(MigrationState *ms) 1578 { 1579 struct RAMBlock *block; 1580 int ret; 1581 1582 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) { 1583 unsigned long first = block->offset >> TARGET_PAGE_BITS; 1584 PostcopyDiscardState *pds = postcopy_discard_send_init(ms, 1585 first, 1586 block->idstr); 1587 1588 /* 1589 * Postcopy sends chunks of bitmap over the wire, but it 1590 * just needs indexes at this point, avoids it having 1591 * target page specific code. 1592 */ 1593 ret = postcopy_send_discard_bm_ram(ms, pds, first, 1594 block->used_length >> TARGET_PAGE_BITS); 1595 postcopy_discard_send_finish(ms, pds); 1596 if (ret) { 1597 return ret; 1598 } 1599 } 1600 1601 return 0; 1602 } 1603 1604 /* 1605 * Helper for postcopy_chunk_hostpages; it's called twice to cleanup 1606 * the two bitmaps, that are similar, but one is inverted. 1607 * 1608 * We search for runs of target-pages that don't start or end on a 1609 * host page boundary; 1610 * unsent_pass=true: Cleans up partially unsent host pages by searching 1611 * the unsentmap 1612 * unsent_pass=false: Cleans up partially dirty host pages by searching 1613 * the main migration bitmap 1614 * 1615 */ 1616 static void postcopy_chunk_hostpages_pass(MigrationState *ms, bool unsent_pass, 1617 RAMBlock *block, 1618 PostcopyDiscardState *pds) 1619 { 1620 unsigned long *bitmap; 1621 unsigned long *unsentmap; 1622 unsigned int host_ratio = qemu_host_page_size / TARGET_PAGE_SIZE; 1623 unsigned long first = block->offset >> TARGET_PAGE_BITS; 1624 unsigned long len = block->used_length >> TARGET_PAGE_BITS; 1625 unsigned long last = first + (len - 1); 1626 unsigned long run_start; 1627 1628 bitmap = atomic_rcu_read(&migration_bitmap_rcu)->bmap; 1629 unsentmap = atomic_rcu_read(&migration_bitmap_rcu)->unsentmap; 1630 1631 if (unsent_pass) { 1632 /* Find a sent page */ 1633 run_start = find_next_zero_bit(unsentmap, last + 1, first); 1634 } else { 1635 /* Find a dirty page */ 1636 run_start = find_next_bit(bitmap, last + 1, first); 1637 } 1638 1639 while (run_start <= last) { 1640 bool do_fixup = false; 1641 unsigned long fixup_start_addr; 1642 unsigned long host_offset; 1643 1644 /* 1645 * If the start of this run of pages is in the middle of a host 1646 * page, then we need to fixup this host page. 1647 */ 1648 host_offset = run_start % host_ratio; 1649 if (host_offset) { 1650 do_fixup = true; 1651 run_start -= host_offset; 1652 fixup_start_addr = run_start; 1653 /* For the next pass */ 1654 run_start = run_start + host_ratio; 1655 } else { 1656 /* Find the end of this run */ 1657 unsigned long run_end; 1658 if (unsent_pass) { 1659 run_end = find_next_bit(unsentmap, last + 1, run_start + 1); 1660 } else { 1661 run_end = find_next_zero_bit(bitmap, last + 1, run_start + 1); 1662 } 1663 /* 1664 * If the end isn't at the start of a host page, then the 1665 * run doesn't finish at the end of a host page 1666 * and we need to discard. 1667 */ 1668 host_offset = run_end % host_ratio; 1669 if (host_offset) { 1670 do_fixup = true; 1671 fixup_start_addr = run_end - host_offset; 1672 /* 1673 * This host page has gone, the next loop iteration starts 1674 * from after the fixup 1675 */ 1676 run_start = fixup_start_addr + host_ratio; 1677 } else { 1678 /* 1679 * No discards on this iteration, next loop starts from 1680 * next sent/dirty page 1681 */ 1682 run_start = run_end + 1; 1683 } 1684 } 1685 1686 if (do_fixup) { 1687 unsigned long page; 1688 1689 /* Tell the destination to discard this page */ 1690 if (unsent_pass || !test_bit(fixup_start_addr, unsentmap)) { 1691 /* For the unsent_pass we: 1692 * discard partially sent pages 1693 * For the !unsent_pass (dirty) we: 1694 * discard partially dirty pages that were sent 1695 * (any partially sent pages were already discarded 1696 * by the previous unsent_pass) 1697 */ 1698 postcopy_discard_send_range(ms, pds, fixup_start_addr, 1699 host_ratio); 1700 } 1701 1702 /* Clean up the bitmap */ 1703 for (page = fixup_start_addr; 1704 page < fixup_start_addr + host_ratio; page++) { 1705 /* All pages in this host page are now not sent */ 1706 set_bit(page, unsentmap); 1707 1708 /* 1709 * Remark them as dirty, updating the count for any pages 1710 * that weren't previously dirty. 1711 */ 1712 migration_dirty_pages += !test_and_set_bit(page, bitmap); 1713 } 1714 } 1715 1716 if (unsent_pass) { 1717 /* Find the next sent page for the next iteration */ 1718 run_start = find_next_zero_bit(unsentmap, last + 1, 1719 run_start); 1720 } else { 1721 /* Find the next dirty page for the next iteration */ 1722 run_start = find_next_bit(bitmap, last + 1, run_start); 1723 } 1724 } 1725 } 1726 1727 /* 1728 * Utility for the outgoing postcopy code. 1729 * 1730 * Discard any partially sent host-page size chunks, mark any partially 1731 * dirty host-page size chunks as all dirty. 1732 * 1733 * Returns: 0 on success 1734 */ 1735 static int postcopy_chunk_hostpages(MigrationState *ms) 1736 { 1737 struct RAMBlock *block; 1738 1739 if (qemu_host_page_size == TARGET_PAGE_SIZE) { 1740 /* Easy case - TPS==HPS - nothing to be done */ 1741 return 0; 1742 } 1743 1744 /* Easiest way to make sure we don't resume in the middle of a host-page */ 1745 last_seen_block = NULL; 1746 last_sent_block = NULL; 1747 last_offset = 0; 1748 1749 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) { 1750 unsigned long first = block->offset >> TARGET_PAGE_BITS; 1751 1752 PostcopyDiscardState *pds = 1753 postcopy_discard_send_init(ms, first, block->idstr); 1754 1755 /* First pass: Discard all partially sent host pages */ 1756 postcopy_chunk_hostpages_pass(ms, true, block, pds); 1757 /* 1758 * Second pass: Ensure that all partially dirty host pages are made 1759 * fully dirty. 1760 */ 1761 postcopy_chunk_hostpages_pass(ms, false, block, pds); 1762 1763 postcopy_discard_send_finish(ms, pds); 1764 } /* ram_list loop */ 1765 1766 return 0; 1767 } 1768 1769 /* 1770 * Transmit the set of pages to be discarded after precopy to the target 1771 * these are pages that: 1772 * a) Have been previously transmitted but are now dirty again 1773 * b) Pages that have never been transmitted, this ensures that 1774 * any pages on the destination that have been mapped by background 1775 * tasks get discarded (transparent huge pages is the specific concern) 1776 * Hopefully this is pretty sparse 1777 */ 1778 int ram_postcopy_send_discard_bitmap(MigrationState *ms) 1779 { 1780 int ret; 1781 unsigned long *bitmap, *unsentmap; 1782 1783 rcu_read_lock(); 1784 1785 /* This should be our last sync, the src is now paused */ 1786 migration_bitmap_sync(); 1787 1788 unsentmap = atomic_rcu_read(&migration_bitmap_rcu)->unsentmap; 1789 if (!unsentmap) { 1790 /* We don't have a safe way to resize the sentmap, so 1791 * if the bitmap was resized it will be NULL at this 1792 * point. 1793 */ 1794 error_report("migration ram resized during precopy phase"); 1795 rcu_read_unlock(); 1796 return -EINVAL; 1797 } 1798 1799 /* Deal with TPS != HPS */ 1800 ret = postcopy_chunk_hostpages(ms); 1801 if (ret) { 1802 rcu_read_unlock(); 1803 return ret; 1804 } 1805 1806 /* 1807 * Update the unsentmap to be unsentmap = unsentmap | dirty 1808 */ 1809 bitmap = atomic_rcu_read(&migration_bitmap_rcu)->bmap; 1810 bitmap_or(unsentmap, unsentmap, bitmap, 1811 last_ram_offset() >> TARGET_PAGE_BITS); 1812 1813 1814 trace_ram_postcopy_send_discard_bitmap(); 1815 #ifdef DEBUG_POSTCOPY 1816 ram_debug_dump_bitmap(unsentmap, true); 1817 #endif 1818 1819 ret = postcopy_each_ram_send_discard(ms); 1820 rcu_read_unlock(); 1821 1822 return ret; 1823 } 1824 1825 /* 1826 * At the start of the postcopy phase of migration, any now-dirty 1827 * precopied pages are discarded. 1828 * 1829 * start, length describe a byte address range within the RAMBlock 1830 * 1831 * Returns 0 on success. 1832 */ 1833 int ram_discard_range(MigrationIncomingState *mis, 1834 const char *block_name, 1835 uint64_t start, size_t length) 1836 { 1837 int ret = -1; 1838 1839 rcu_read_lock(); 1840 RAMBlock *rb = qemu_ram_block_by_name(block_name); 1841 1842 if (!rb) { 1843 error_report("ram_discard_range: Failed to find block '%s'", 1844 block_name); 1845 goto err; 1846 } 1847 1848 uint8_t *host_startaddr = rb->host + start; 1849 1850 if ((uintptr_t)host_startaddr & (qemu_host_page_size - 1)) { 1851 error_report("ram_discard_range: Unaligned start address: %p", 1852 host_startaddr); 1853 goto err; 1854 } 1855 1856 if ((start + length) <= rb->used_length) { 1857 uint8_t *host_endaddr = host_startaddr + length; 1858 if ((uintptr_t)host_endaddr & (qemu_host_page_size - 1)) { 1859 error_report("ram_discard_range: Unaligned end address: %p", 1860 host_endaddr); 1861 goto err; 1862 } 1863 ret = postcopy_ram_discard_range(mis, host_startaddr, length); 1864 } else { 1865 error_report("ram_discard_range: Overrun block '%s' (%" PRIu64 1866 "/%zx/" RAM_ADDR_FMT")", 1867 block_name, start, length, rb->used_length); 1868 } 1869 1870 err: 1871 rcu_read_unlock(); 1872 1873 return ret; 1874 } 1875 1876 1877 /* Each of ram_save_setup, ram_save_iterate and ram_save_complete has 1878 * long-running RCU critical section. When rcu-reclaims in the code 1879 * start to become numerous it will be necessary to reduce the 1880 * granularity of these critical sections. 1881 */ 1882 1883 static int ram_save_setup(QEMUFile *f, void *opaque) 1884 { 1885 RAMBlock *block; 1886 int64_t ram_bitmap_pages; /* Size of bitmap in pages, including gaps */ 1887 1888 dirty_rate_high_cnt = 0; 1889 bitmap_sync_count = 0; 1890 migration_bitmap_sync_init(); 1891 qemu_mutex_init(&migration_bitmap_mutex); 1892 1893 if (migrate_use_xbzrle()) { 1894 XBZRLE_cache_lock(); 1895 XBZRLE.cache = cache_init(migrate_xbzrle_cache_size() / 1896 TARGET_PAGE_SIZE, 1897 TARGET_PAGE_SIZE); 1898 if (!XBZRLE.cache) { 1899 XBZRLE_cache_unlock(); 1900 error_report("Error creating cache"); 1901 return -1; 1902 } 1903 XBZRLE_cache_unlock(); 1904 1905 /* We prefer not to abort if there is no memory */ 1906 XBZRLE.encoded_buf = g_try_malloc0(TARGET_PAGE_SIZE); 1907 if (!XBZRLE.encoded_buf) { 1908 error_report("Error allocating encoded_buf"); 1909 return -1; 1910 } 1911 1912 XBZRLE.current_buf = g_try_malloc(TARGET_PAGE_SIZE); 1913 if (!XBZRLE.current_buf) { 1914 error_report("Error allocating current_buf"); 1915 g_free(XBZRLE.encoded_buf); 1916 XBZRLE.encoded_buf = NULL; 1917 return -1; 1918 } 1919 1920 acct_clear(); 1921 } 1922 1923 /* For memory_global_dirty_log_start below. */ 1924 qemu_mutex_lock_iothread(); 1925 1926 qemu_mutex_lock_ramlist(); 1927 rcu_read_lock(); 1928 bytes_transferred = 0; 1929 reset_ram_globals(); 1930 1931 ram_bitmap_pages = last_ram_offset() >> TARGET_PAGE_BITS; 1932 migration_bitmap_rcu = g_new0(struct BitmapRcu, 1); 1933 migration_bitmap_rcu->bmap = bitmap_new(ram_bitmap_pages); 1934 bitmap_set(migration_bitmap_rcu->bmap, 0, ram_bitmap_pages); 1935 1936 if (migrate_postcopy_ram()) { 1937 migration_bitmap_rcu->unsentmap = bitmap_new(ram_bitmap_pages); 1938 bitmap_set(migration_bitmap_rcu->unsentmap, 0, ram_bitmap_pages); 1939 } 1940 1941 /* 1942 * Count the total number of pages used by ram blocks not including any 1943 * gaps due to alignment or unplugs. 1944 */ 1945 migration_dirty_pages = ram_bytes_total() >> TARGET_PAGE_BITS; 1946 1947 memory_global_dirty_log_start(); 1948 migration_bitmap_sync(); 1949 qemu_mutex_unlock_ramlist(); 1950 qemu_mutex_unlock_iothread(); 1951 1952 qemu_put_be64(f, ram_bytes_total() | RAM_SAVE_FLAG_MEM_SIZE); 1953 1954 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) { 1955 qemu_put_byte(f, strlen(block->idstr)); 1956 qemu_put_buffer(f, (uint8_t *)block->idstr, strlen(block->idstr)); 1957 qemu_put_be64(f, block->used_length); 1958 } 1959 1960 rcu_read_unlock(); 1961 1962 ram_control_before_iterate(f, RAM_CONTROL_SETUP); 1963 ram_control_after_iterate(f, RAM_CONTROL_SETUP); 1964 1965 qemu_put_be64(f, RAM_SAVE_FLAG_EOS); 1966 1967 return 0; 1968 } 1969 1970 static int ram_save_iterate(QEMUFile *f, void *opaque) 1971 { 1972 int ret; 1973 int i; 1974 int64_t t0; 1975 int pages_sent = 0; 1976 1977 rcu_read_lock(); 1978 if (ram_list.version != last_version) { 1979 reset_ram_globals(); 1980 } 1981 1982 /* Read version before ram_list.blocks */ 1983 smp_rmb(); 1984 1985 ram_control_before_iterate(f, RAM_CONTROL_ROUND); 1986 1987 t0 = qemu_clock_get_ns(QEMU_CLOCK_REALTIME); 1988 i = 0; 1989 while ((ret = qemu_file_rate_limit(f)) == 0) { 1990 int pages; 1991 1992 pages = ram_find_and_save_block(f, false, &bytes_transferred); 1993 /* no more pages to sent */ 1994 if (pages == 0) { 1995 break; 1996 } 1997 pages_sent += pages; 1998 acct_info.iterations++; 1999 2000 /* we want to check in the 1st loop, just in case it was the 1st time 2001 and we had to sync the dirty bitmap. 2002 qemu_get_clock_ns() is a bit expensive, so we only check each some 2003 iterations 2004 */ 2005 if ((i & 63) == 0) { 2006 uint64_t t1 = (qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - t0) / 1000000; 2007 if (t1 > MAX_WAIT) { 2008 DPRINTF("big wait: %" PRIu64 " milliseconds, %d iterations\n", 2009 t1, i); 2010 break; 2011 } 2012 } 2013 i++; 2014 } 2015 flush_compressed_data(f); 2016 rcu_read_unlock(); 2017 2018 /* 2019 * Must occur before EOS (or any QEMUFile operation) 2020 * because of RDMA protocol. 2021 */ 2022 ram_control_after_iterate(f, RAM_CONTROL_ROUND); 2023 2024 qemu_put_be64(f, RAM_SAVE_FLAG_EOS); 2025 bytes_transferred += 8; 2026 2027 ret = qemu_file_get_error(f); 2028 if (ret < 0) { 2029 return ret; 2030 } 2031 2032 return pages_sent; 2033 } 2034 2035 /* Called with iothread lock */ 2036 static int ram_save_complete(QEMUFile *f, void *opaque) 2037 { 2038 rcu_read_lock(); 2039 2040 if (!migration_in_postcopy(migrate_get_current())) { 2041 migration_bitmap_sync(); 2042 } 2043 2044 ram_control_before_iterate(f, RAM_CONTROL_FINISH); 2045 2046 /* try transferring iterative blocks of memory */ 2047 2048 /* flush all remaining blocks regardless of rate limiting */ 2049 while (true) { 2050 int pages; 2051 2052 pages = ram_find_and_save_block(f, true, &bytes_transferred); 2053 /* no more blocks to sent */ 2054 if (pages == 0) { 2055 break; 2056 } 2057 } 2058 2059 flush_compressed_data(f); 2060 ram_control_after_iterate(f, RAM_CONTROL_FINISH); 2061 2062 rcu_read_unlock(); 2063 2064 qemu_put_be64(f, RAM_SAVE_FLAG_EOS); 2065 2066 return 0; 2067 } 2068 2069 static void ram_save_pending(QEMUFile *f, void *opaque, uint64_t max_size, 2070 uint64_t *non_postcopiable_pending, 2071 uint64_t *postcopiable_pending) 2072 { 2073 uint64_t remaining_size; 2074 2075 remaining_size = ram_save_remaining() * TARGET_PAGE_SIZE; 2076 2077 if (!migration_in_postcopy(migrate_get_current()) && 2078 remaining_size < max_size) { 2079 qemu_mutex_lock_iothread(); 2080 rcu_read_lock(); 2081 migration_bitmap_sync(); 2082 rcu_read_unlock(); 2083 qemu_mutex_unlock_iothread(); 2084 remaining_size = ram_save_remaining() * TARGET_PAGE_SIZE; 2085 } 2086 2087 /* We can do postcopy, and all the data is postcopiable */ 2088 *postcopiable_pending += remaining_size; 2089 } 2090 2091 static int load_xbzrle(QEMUFile *f, ram_addr_t addr, void *host) 2092 { 2093 unsigned int xh_len; 2094 int xh_flags; 2095 uint8_t *loaded_data; 2096 2097 if (!xbzrle_decoded_buf) { 2098 xbzrle_decoded_buf = g_malloc(TARGET_PAGE_SIZE); 2099 } 2100 loaded_data = xbzrle_decoded_buf; 2101 2102 /* extract RLE header */ 2103 xh_flags = qemu_get_byte(f); 2104 xh_len = qemu_get_be16(f); 2105 2106 if (xh_flags != ENCODING_FLAG_XBZRLE) { 2107 error_report("Failed to load XBZRLE page - wrong compression!"); 2108 return -1; 2109 } 2110 2111 if (xh_len > TARGET_PAGE_SIZE) { 2112 error_report("Failed to load XBZRLE page - len overflow!"); 2113 return -1; 2114 } 2115 /* load data and decode */ 2116 qemu_get_buffer_in_place(f, &loaded_data, xh_len); 2117 2118 /* decode RLE */ 2119 if (xbzrle_decode_buffer(loaded_data, xh_len, host, 2120 TARGET_PAGE_SIZE) == -1) { 2121 error_report("Failed to load XBZRLE page - decode error!"); 2122 return -1; 2123 } 2124 2125 return 0; 2126 } 2127 2128 /* Must be called from within a rcu critical section. 2129 * Returns a pointer from within the RCU-protected ram_list. 2130 */ 2131 /* 2132 * Read a RAMBlock ID from the stream f. 2133 * 2134 * f: Stream to read from 2135 * flags: Page flags (mostly to see if it's a continuation of previous block) 2136 */ 2137 static inline RAMBlock *ram_block_from_stream(QEMUFile *f, 2138 int flags) 2139 { 2140 static RAMBlock *block = NULL; 2141 char id[256]; 2142 uint8_t len; 2143 2144 if (flags & RAM_SAVE_FLAG_CONTINUE) { 2145 if (!block) { 2146 error_report("Ack, bad migration stream!"); 2147 return NULL; 2148 } 2149 return block; 2150 } 2151 2152 len = qemu_get_byte(f); 2153 qemu_get_buffer(f, (uint8_t *)id, len); 2154 id[len] = 0; 2155 2156 block = qemu_ram_block_by_name(id); 2157 if (!block) { 2158 error_report("Can't find block %s", id); 2159 return NULL; 2160 } 2161 2162 return block; 2163 } 2164 2165 static inline void *host_from_ram_block_offset(RAMBlock *block, 2166 ram_addr_t offset) 2167 { 2168 if (!offset_in_ramblock(block, offset)) { 2169 return NULL; 2170 } 2171 2172 return block->host + offset; 2173 } 2174 2175 /* 2176 * If a page (or a whole RDMA chunk) has been 2177 * determined to be zero, then zap it. 2178 */ 2179 void ram_handle_compressed(void *host, uint8_t ch, uint64_t size) 2180 { 2181 if (ch != 0 || !is_zero_range(host, size)) { 2182 memset(host, ch, size); 2183 } 2184 } 2185 2186 static void *do_data_decompress(void *opaque) 2187 { 2188 DecompressParam *param = opaque; 2189 unsigned long pagesize; 2190 2191 while (!quit_decomp_thread) { 2192 qemu_mutex_lock(¶m->mutex); 2193 while (!param->start && !quit_decomp_thread) { 2194 qemu_cond_wait(¶m->cond, ¶m->mutex); 2195 pagesize = TARGET_PAGE_SIZE; 2196 if (!quit_decomp_thread) { 2197 /* uncompress() will return failed in some case, especially 2198 * when the page is dirted when doing the compression, it's 2199 * not a problem because the dirty page will be retransferred 2200 * and uncompress() won't break the data in other pages. 2201 */ 2202 uncompress((Bytef *)param->des, &pagesize, 2203 (const Bytef *)param->compbuf, param->len); 2204 } 2205 param->start = false; 2206 } 2207 qemu_mutex_unlock(¶m->mutex); 2208 } 2209 2210 return NULL; 2211 } 2212 2213 void migrate_decompress_threads_create(void) 2214 { 2215 int i, thread_count; 2216 2217 thread_count = migrate_decompress_threads(); 2218 decompress_threads = g_new0(QemuThread, thread_count); 2219 decomp_param = g_new0(DecompressParam, thread_count); 2220 quit_decomp_thread = false; 2221 for (i = 0; i < thread_count; i++) { 2222 qemu_mutex_init(&decomp_param[i].mutex); 2223 qemu_cond_init(&decomp_param[i].cond); 2224 decomp_param[i].compbuf = g_malloc0(compressBound(TARGET_PAGE_SIZE)); 2225 qemu_thread_create(decompress_threads + i, "decompress", 2226 do_data_decompress, decomp_param + i, 2227 QEMU_THREAD_JOINABLE); 2228 } 2229 } 2230 2231 void migrate_decompress_threads_join(void) 2232 { 2233 int i, thread_count; 2234 2235 quit_decomp_thread = true; 2236 thread_count = migrate_decompress_threads(); 2237 for (i = 0; i < thread_count; i++) { 2238 qemu_mutex_lock(&decomp_param[i].mutex); 2239 qemu_cond_signal(&decomp_param[i].cond); 2240 qemu_mutex_unlock(&decomp_param[i].mutex); 2241 } 2242 for (i = 0; i < thread_count; i++) { 2243 qemu_thread_join(decompress_threads + i); 2244 qemu_mutex_destroy(&decomp_param[i].mutex); 2245 qemu_cond_destroy(&decomp_param[i].cond); 2246 g_free(decomp_param[i].compbuf); 2247 } 2248 g_free(decompress_threads); 2249 g_free(decomp_param); 2250 decompress_threads = NULL; 2251 decomp_param = NULL; 2252 } 2253 2254 static void decompress_data_with_multi_threads(QEMUFile *f, 2255 void *host, int len) 2256 { 2257 int idx, thread_count; 2258 2259 thread_count = migrate_decompress_threads(); 2260 while (true) { 2261 for (idx = 0; idx < thread_count; idx++) { 2262 if (!decomp_param[idx].start) { 2263 qemu_get_buffer(f, decomp_param[idx].compbuf, len); 2264 decomp_param[idx].des = host; 2265 decomp_param[idx].len = len; 2266 start_decompression(&decomp_param[idx]); 2267 break; 2268 } 2269 } 2270 if (idx < thread_count) { 2271 break; 2272 } 2273 } 2274 } 2275 2276 /* 2277 * Allocate data structures etc needed by incoming migration with postcopy-ram 2278 * postcopy-ram's similarly names postcopy_ram_incoming_init does the work 2279 */ 2280 int ram_postcopy_incoming_init(MigrationIncomingState *mis) 2281 { 2282 size_t ram_pages = last_ram_offset() >> TARGET_PAGE_BITS; 2283 2284 return postcopy_ram_incoming_init(mis, ram_pages); 2285 } 2286 2287 /* 2288 * Called in postcopy mode by ram_load(). 2289 * rcu_read_lock is taken prior to this being called. 2290 */ 2291 static int ram_load_postcopy(QEMUFile *f) 2292 { 2293 int flags = 0, ret = 0; 2294 bool place_needed = false; 2295 bool matching_page_sizes = qemu_host_page_size == TARGET_PAGE_SIZE; 2296 MigrationIncomingState *mis = migration_incoming_get_current(); 2297 /* Temporary page that is later 'placed' */ 2298 void *postcopy_host_page = postcopy_get_tmp_page(mis); 2299 void *last_host = NULL; 2300 bool all_zero = false; 2301 2302 while (!ret && !(flags & RAM_SAVE_FLAG_EOS)) { 2303 ram_addr_t addr; 2304 void *host = NULL; 2305 void *page_buffer = NULL; 2306 void *place_source = NULL; 2307 uint8_t ch; 2308 2309 addr = qemu_get_be64(f); 2310 flags = addr & ~TARGET_PAGE_MASK; 2311 addr &= TARGET_PAGE_MASK; 2312 2313 trace_ram_load_postcopy_loop((uint64_t)addr, flags); 2314 place_needed = false; 2315 if (flags & (RAM_SAVE_FLAG_COMPRESS | RAM_SAVE_FLAG_PAGE)) { 2316 RAMBlock *block = ram_block_from_stream(f, flags); 2317 2318 host = host_from_ram_block_offset(block, addr); 2319 if (!host) { 2320 error_report("Illegal RAM offset " RAM_ADDR_FMT, addr); 2321 ret = -EINVAL; 2322 break; 2323 } 2324 page_buffer = host; 2325 /* 2326 * Postcopy requires that we place whole host pages atomically. 2327 * To make it atomic, the data is read into a temporary page 2328 * that's moved into place later. 2329 * The migration protocol uses, possibly smaller, target-pages 2330 * however the source ensures it always sends all the components 2331 * of a host page in order. 2332 */ 2333 page_buffer = postcopy_host_page + 2334 ((uintptr_t)host & ~qemu_host_page_mask); 2335 /* If all TP are zero then we can optimise the place */ 2336 if (!((uintptr_t)host & ~qemu_host_page_mask)) { 2337 all_zero = true; 2338 } else { 2339 /* not the 1st TP within the HP */ 2340 if (host != (last_host + TARGET_PAGE_SIZE)) { 2341 error_report("Non-sequential target page %p/%p", 2342 host, last_host); 2343 ret = -EINVAL; 2344 break; 2345 } 2346 } 2347 2348 2349 /* 2350 * If it's the last part of a host page then we place the host 2351 * page 2352 */ 2353 place_needed = (((uintptr_t)host + TARGET_PAGE_SIZE) & 2354 ~qemu_host_page_mask) == 0; 2355 place_source = postcopy_host_page; 2356 } 2357 last_host = host; 2358 2359 switch (flags & ~RAM_SAVE_FLAG_CONTINUE) { 2360 case RAM_SAVE_FLAG_COMPRESS: 2361 ch = qemu_get_byte(f); 2362 memset(page_buffer, ch, TARGET_PAGE_SIZE); 2363 if (ch) { 2364 all_zero = false; 2365 } 2366 break; 2367 2368 case RAM_SAVE_FLAG_PAGE: 2369 all_zero = false; 2370 if (!place_needed || !matching_page_sizes) { 2371 qemu_get_buffer(f, page_buffer, TARGET_PAGE_SIZE); 2372 } else { 2373 /* Avoids the qemu_file copy during postcopy, which is 2374 * going to do a copy later; can only do it when we 2375 * do this read in one go (matching page sizes) 2376 */ 2377 qemu_get_buffer_in_place(f, (uint8_t **)&place_source, 2378 TARGET_PAGE_SIZE); 2379 } 2380 break; 2381 case RAM_SAVE_FLAG_EOS: 2382 /* normal exit */ 2383 break; 2384 default: 2385 error_report("Unknown combination of migration flags: %#x" 2386 " (postcopy mode)", flags); 2387 ret = -EINVAL; 2388 } 2389 2390 if (place_needed) { 2391 /* This gets called at the last target page in the host page */ 2392 if (all_zero) { 2393 ret = postcopy_place_page_zero(mis, 2394 host + TARGET_PAGE_SIZE - 2395 qemu_host_page_size); 2396 } else { 2397 ret = postcopy_place_page(mis, host + TARGET_PAGE_SIZE - 2398 qemu_host_page_size, 2399 place_source); 2400 } 2401 } 2402 if (!ret) { 2403 ret = qemu_file_get_error(f); 2404 } 2405 } 2406 2407 return ret; 2408 } 2409 2410 static int ram_load(QEMUFile *f, void *opaque, int version_id) 2411 { 2412 int flags = 0, ret = 0; 2413 static uint64_t seq_iter; 2414 int len = 0; 2415 /* 2416 * If system is running in postcopy mode, page inserts to host memory must 2417 * be atomic 2418 */ 2419 bool postcopy_running = postcopy_state_get() >= POSTCOPY_INCOMING_LISTENING; 2420 2421 seq_iter++; 2422 2423 if (version_id != 4) { 2424 ret = -EINVAL; 2425 } 2426 2427 /* This RCU critical section can be very long running. 2428 * When RCU reclaims in the code start to become numerous, 2429 * it will be necessary to reduce the granularity of this 2430 * critical section. 2431 */ 2432 rcu_read_lock(); 2433 2434 if (postcopy_running) { 2435 ret = ram_load_postcopy(f); 2436 } 2437 2438 while (!postcopy_running && !ret && !(flags & RAM_SAVE_FLAG_EOS)) { 2439 ram_addr_t addr, total_ram_bytes; 2440 void *host = NULL; 2441 uint8_t ch; 2442 2443 addr = qemu_get_be64(f); 2444 flags = addr & ~TARGET_PAGE_MASK; 2445 addr &= TARGET_PAGE_MASK; 2446 2447 if (flags & (RAM_SAVE_FLAG_COMPRESS | RAM_SAVE_FLAG_PAGE | 2448 RAM_SAVE_FLAG_COMPRESS_PAGE | RAM_SAVE_FLAG_XBZRLE)) { 2449 RAMBlock *block = ram_block_from_stream(f, flags); 2450 2451 host = host_from_ram_block_offset(block, addr); 2452 if (!host) { 2453 error_report("Illegal RAM offset " RAM_ADDR_FMT, addr); 2454 ret = -EINVAL; 2455 break; 2456 } 2457 } 2458 2459 switch (flags & ~RAM_SAVE_FLAG_CONTINUE) { 2460 case RAM_SAVE_FLAG_MEM_SIZE: 2461 /* Synchronize RAM block list */ 2462 total_ram_bytes = addr; 2463 while (!ret && total_ram_bytes) { 2464 RAMBlock *block; 2465 char id[256]; 2466 ram_addr_t length; 2467 2468 len = qemu_get_byte(f); 2469 qemu_get_buffer(f, (uint8_t *)id, len); 2470 id[len] = 0; 2471 length = qemu_get_be64(f); 2472 2473 block = qemu_ram_block_by_name(id); 2474 if (block) { 2475 if (length != block->used_length) { 2476 Error *local_err = NULL; 2477 2478 ret = qemu_ram_resize(block->offset, length, 2479 &local_err); 2480 if (local_err) { 2481 error_report_err(local_err); 2482 } 2483 } 2484 ram_control_load_hook(f, RAM_CONTROL_BLOCK_REG, 2485 block->idstr); 2486 } else { 2487 error_report("Unknown ramblock \"%s\", cannot " 2488 "accept migration", id); 2489 ret = -EINVAL; 2490 } 2491 2492 total_ram_bytes -= length; 2493 } 2494 break; 2495 2496 case RAM_SAVE_FLAG_COMPRESS: 2497 ch = qemu_get_byte(f); 2498 ram_handle_compressed(host, ch, TARGET_PAGE_SIZE); 2499 break; 2500 2501 case RAM_SAVE_FLAG_PAGE: 2502 qemu_get_buffer(f, host, TARGET_PAGE_SIZE); 2503 break; 2504 2505 case RAM_SAVE_FLAG_COMPRESS_PAGE: 2506 len = qemu_get_be32(f); 2507 if (len < 0 || len > compressBound(TARGET_PAGE_SIZE)) { 2508 error_report("Invalid compressed data length: %d", len); 2509 ret = -EINVAL; 2510 break; 2511 } 2512 decompress_data_with_multi_threads(f, host, len); 2513 break; 2514 2515 case RAM_SAVE_FLAG_XBZRLE: 2516 if (load_xbzrle(f, addr, host) < 0) { 2517 error_report("Failed to decompress XBZRLE page at " 2518 RAM_ADDR_FMT, addr); 2519 ret = -EINVAL; 2520 break; 2521 } 2522 break; 2523 case RAM_SAVE_FLAG_EOS: 2524 /* normal exit */ 2525 break; 2526 default: 2527 if (flags & RAM_SAVE_FLAG_HOOK) { 2528 ram_control_load_hook(f, RAM_CONTROL_HOOK, NULL); 2529 } else { 2530 error_report("Unknown combination of migration flags: %#x", 2531 flags); 2532 ret = -EINVAL; 2533 } 2534 } 2535 if (!ret) { 2536 ret = qemu_file_get_error(f); 2537 } 2538 } 2539 2540 rcu_read_unlock(); 2541 DPRINTF("Completed load of VM with exit code %d seq iteration " 2542 "%" PRIu64 "\n", ret, seq_iter); 2543 return ret; 2544 } 2545 2546 static SaveVMHandlers savevm_ram_handlers = { 2547 .save_live_setup = ram_save_setup, 2548 .save_live_iterate = ram_save_iterate, 2549 .save_live_complete_postcopy = ram_save_complete, 2550 .save_live_complete_precopy = ram_save_complete, 2551 .save_live_pending = ram_save_pending, 2552 .load_state = ram_load, 2553 .cleanup = ram_migration_cleanup, 2554 }; 2555 2556 void ram_mig_init(void) 2557 { 2558 qemu_mutex_init(&XBZRLE.lock); 2559 register_savevm_live(NULL, "ram", 0, 4, &savevm_ram_handlers, NULL); 2560 } 2561