1 /* 2 * QEMU System Emulator 3 * 4 * Copyright (c) 2003-2008 Fabrice Bellard 5 * Copyright (c) 2011-2015 Red Hat Inc 6 * 7 * Authors: 8 * Juan Quintela <quintela@redhat.com> 9 * 10 * Permission is hereby granted, free of charge, to any person obtaining a copy 11 * of this software and associated documentation files (the "Software"), to deal 12 * in the Software without restriction, including without limitation the rights 13 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 14 * copies of the Software, and to permit persons to whom the Software is 15 * furnished to do so, subject to the following conditions: 16 * 17 * The above copyright notice and this permission notice shall be included in 18 * all copies or substantial portions of the Software. 19 * 20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 23 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 24 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 25 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 26 * THE SOFTWARE. 27 */ 28 #include "qemu/osdep.h" 29 #include "qemu-common.h" 30 #include "cpu.h" 31 #include <zlib.h> 32 #include "qapi-event.h" 33 #include "qemu/cutils.h" 34 #include "qemu/bitops.h" 35 #include "qemu/bitmap.h" 36 #include "qemu/timer.h" 37 #include "qemu/main-loop.h" 38 #include "migration/migration.h" 39 #include "migration/postcopy-ram.h" 40 #include "exec/address-spaces.h" 41 #include "migration/page_cache.h" 42 #include "qemu/error-report.h" 43 #include "trace.h" 44 #include "exec/ram_addr.h" 45 #include "qemu/rcu_queue.h" 46 #include "migration/colo.h" 47 48 /***********************************************************/ 49 /* ram save/restore */ 50 51 #define RAM_SAVE_FLAG_FULL 0x01 /* Obsolete, not used anymore */ 52 #define RAM_SAVE_FLAG_COMPRESS 0x02 53 #define RAM_SAVE_FLAG_MEM_SIZE 0x04 54 #define RAM_SAVE_FLAG_PAGE 0x08 55 #define RAM_SAVE_FLAG_EOS 0x10 56 #define RAM_SAVE_FLAG_CONTINUE 0x20 57 #define RAM_SAVE_FLAG_XBZRLE 0x40 58 /* 0x80 is reserved in migration.h start with 0x100 next */ 59 #define RAM_SAVE_FLAG_COMPRESS_PAGE 0x100 60 61 static uint8_t *ZERO_TARGET_PAGE; 62 63 static inline bool is_zero_range(uint8_t *p, uint64_t size) 64 { 65 return buffer_is_zero(p, size); 66 } 67 68 /* struct contains XBZRLE cache and a static page 69 used by the compression */ 70 static struct { 71 /* buffer used for XBZRLE encoding */ 72 uint8_t *encoded_buf; 73 /* buffer for storing page content */ 74 uint8_t *current_buf; 75 /* Cache for XBZRLE, Protected by lock. */ 76 PageCache *cache; 77 QemuMutex lock; 78 } XBZRLE; 79 80 /* buffer used for XBZRLE decoding */ 81 static uint8_t *xbzrle_decoded_buf; 82 83 static void XBZRLE_cache_lock(void) 84 { 85 if (migrate_use_xbzrle()) 86 qemu_mutex_lock(&XBZRLE.lock); 87 } 88 89 static void XBZRLE_cache_unlock(void) 90 { 91 if (migrate_use_xbzrle()) 92 qemu_mutex_unlock(&XBZRLE.lock); 93 } 94 95 /** 96 * xbzrle_cache_resize: resize the xbzrle cache 97 * 98 * This function is called from qmp_migrate_set_cache_size in main 99 * thread, possibly while a migration is in progress. A running 100 * migration may be using the cache and might finish during this call, 101 * hence changes to the cache are protected by XBZRLE.lock(). 102 * 103 * Returns the new_size or negative in case of error. 104 * 105 * @new_size: new cache size 106 */ 107 int64_t xbzrle_cache_resize(int64_t new_size) 108 { 109 PageCache *new_cache; 110 int64_t ret; 111 112 if (new_size < TARGET_PAGE_SIZE) { 113 return -1; 114 } 115 116 XBZRLE_cache_lock(); 117 118 if (XBZRLE.cache != NULL) { 119 if (pow2floor(new_size) == migrate_xbzrle_cache_size()) { 120 goto out_new_size; 121 } 122 new_cache = cache_init(new_size / TARGET_PAGE_SIZE, 123 TARGET_PAGE_SIZE); 124 if (!new_cache) { 125 error_report("Error creating cache"); 126 ret = -1; 127 goto out; 128 } 129 130 cache_fini(XBZRLE.cache); 131 XBZRLE.cache = new_cache; 132 } 133 134 out_new_size: 135 ret = pow2floor(new_size); 136 out: 137 XBZRLE_cache_unlock(); 138 return ret; 139 } 140 141 struct RAMBitmap { 142 struct rcu_head rcu; 143 /* Main migration bitmap */ 144 unsigned long *bmap; 145 /* bitmap of pages that haven't been sent even once 146 * only maintained and used in postcopy at the moment 147 * where it's used to send the dirtymap at the start 148 * of the postcopy phase 149 */ 150 unsigned long *unsentmap; 151 }; 152 typedef struct RAMBitmap RAMBitmap; 153 154 /* 155 * An outstanding page request, on the source, having been received 156 * and queued 157 */ 158 struct RAMSrcPageRequest { 159 RAMBlock *rb; 160 hwaddr offset; 161 hwaddr len; 162 163 QSIMPLEQ_ENTRY(RAMSrcPageRequest) next_req; 164 }; 165 166 /* State of RAM for migration */ 167 struct RAMState { 168 /* QEMUFile used for this migration */ 169 QEMUFile *f; 170 /* Last block that we have visited searching for dirty pages */ 171 RAMBlock *last_seen_block; 172 /* Last block from where we have sent data */ 173 RAMBlock *last_sent_block; 174 /* Last dirty target page we have sent */ 175 ram_addr_t last_page; 176 /* last ram version we have seen */ 177 uint32_t last_version; 178 /* We are in the first round */ 179 bool ram_bulk_stage; 180 /* How many times we have dirty too many pages */ 181 int dirty_rate_high_cnt; 182 /* How many times we have synchronized the bitmap */ 183 uint64_t bitmap_sync_count; 184 /* these variables are used for bitmap sync */ 185 /* last time we did a full bitmap_sync */ 186 int64_t time_last_bitmap_sync; 187 /* bytes transferred at start_time */ 188 uint64_t bytes_xfer_prev; 189 /* number of dirty pages since start_time */ 190 uint64_t num_dirty_pages_period; 191 /* xbzrle misses since the beginning of the period */ 192 uint64_t xbzrle_cache_miss_prev; 193 /* number of iterations at the beginning of period */ 194 uint64_t iterations_prev; 195 /* Accounting fields */ 196 /* number of zero pages. It used to be pages filled by the same char. */ 197 uint64_t zero_pages; 198 /* number of normal transferred pages */ 199 uint64_t norm_pages; 200 /* Iterations since start */ 201 uint64_t iterations; 202 /* xbzrle transmitted bytes. Notice that this is with 203 * compression, they can't be calculated from the pages */ 204 uint64_t xbzrle_bytes; 205 /* xbzrle transmmited pages */ 206 uint64_t xbzrle_pages; 207 /* xbzrle number of cache miss */ 208 uint64_t xbzrle_cache_miss; 209 /* xbzrle miss rate */ 210 double xbzrle_cache_miss_rate; 211 /* xbzrle number of overflows */ 212 uint64_t xbzrle_overflows; 213 /* number of dirty bits in the bitmap */ 214 uint64_t migration_dirty_pages; 215 /* total number of bytes transferred */ 216 uint64_t bytes_transferred; 217 /* number of dirtied pages in the last second */ 218 uint64_t dirty_pages_rate; 219 /* Count of requests incoming from destination */ 220 uint64_t postcopy_requests; 221 /* protects modification of the bitmap */ 222 QemuMutex bitmap_mutex; 223 /* Ram Bitmap protected by RCU */ 224 RAMBitmap *ram_bitmap; 225 /* The RAMBlock used in the last src_page_requests */ 226 RAMBlock *last_req_rb; 227 /* Queue of outstanding page requests from the destination */ 228 QemuMutex src_page_req_mutex; 229 QSIMPLEQ_HEAD(src_page_requests, RAMSrcPageRequest) src_page_requests; 230 }; 231 typedef struct RAMState RAMState; 232 233 static RAMState ram_state; 234 235 uint64_t dup_mig_pages_transferred(void) 236 { 237 return ram_state.zero_pages; 238 } 239 240 uint64_t norm_mig_pages_transferred(void) 241 { 242 return ram_state.norm_pages; 243 } 244 245 uint64_t xbzrle_mig_bytes_transferred(void) 246 { 247 return ram_state.xbzrle_bytes; 248 } 249 250 uint64_t xbzrle_mig_pages_transferred(void) 251 { 252 return ram_state.xbzrle_pages; 253 } 254 255 uint64_t xbzrle_mig_pages_cache_miss(void) 256 { 257 return ram_state.xbzrle_cache_miss; 258 } 259 260 double xbzrle_mig_cache_miss_rate(void) 261 { 262 return ram_state.xbzrle_cache_miss_rate; 263 } 264 265 uint64_t xbzrle_mig_pages_overflow(void) 266 { 267 return ram_state.xbzrle_overflows; 268 } 269 270 uint64_t ram_bytes_transferred(void) 271 { 272 return ram_state.bytes_transferred; 273 } 274 275 uint64_t ram_bytes_remaining(void) 276 { 277 return ram_state.migration_dirty_pages * TARGET_PAGE_SIZE; 278 } 279 280 uint64_t ram_dirty_sync_count(void) 281 { 282 return ram_state.bitmap_sync_count; 283 } 284 285 uint64_t ram_dirty_pages_rate(void) 286 { 287 return ram_state.dirty_pages_rate; 288 } 289 290 uint64_t ram_postcopy_requests(void) 291 { 292 return ram_state.postcopy_requests; 293 } 294 295 /* used by the search for pages to send */ 296 struct PageSearchStatus { 297 /* Current block being searched */ 298 RAMBlock *block; 299 /* Current page to search from */ 300 unsigned long page; 301 /* Set once we wrap around */ 302 bool complete_round; 303 }; 304 typedef struct PageSearchStatus PageSearchStatus; 305 306 struct CompressParam { 307 bool done; 308 bool quit; 309 QEMUFile *file; 310 QemuMutex mutex; 311 QemuCond cond; 312 RAMBlock *block; 313 ram_addr_t offset; 314 }; 315 typedef struct CompressParam CompressParam; 316 317 struct DecompressParam { 318 bool done; 319 bool quit; 320 QemuMutex mutex; 321 QemuCond cond; 322 void *des; 323 uint8_t *compbuf; 324 int len; 325 }; 326 typedef struct DecompressParam DecompressParam; 327 328 static CompressParam *comp_param; 329 static QemuThread *compress_threads; 330 /* comp_done_cond is used to wake up the migration thread when 331 * one of the compression threads has finished the compression. 332 * comp_done_lock is used to co-work with comp_done_cond. 333 */ 334 static QemuMutex comp_done_lock; 335 static QemuCond comp_done_cond; 336 /* The empty QEMUFileOps will be used by file in CompressParam */ 337 static const QEMUFileOps empty_ops = { }; 338 339 static DecompressParam *decomp_param; 340 static QemuThread *decompress_threads; 341 static QemuMutex decomp_done_lock; 342 static QemuCond decomp_done_cond; 343 344 static int do_compress_ram_page(QEMUFile *f, RAMBlock *block, 345 ram_addr_t offset); 346 347 static void *do_data_compress(void *opaque) 348 { 349 CompressParam *param = opaque; 350 RAMBlock *block; 351 ram_addr_t offset; 352 353 qemu_mutex_lock(¶m->mutex); 354 while (!param->quit) { 355 if (param->block) { 356 block = param->block; 357 offset = param->offset; 358 param->block = NULL; 359 qemu_mutex_unlock(¶m->mutex); 360 361 do_compress_ram_page(param->file, block, offset); 362 363 qemu_mutex_lock(&comp_done_lock); 364 param->done = true; 365 qemu_cond_signal(&comp_done_cond); 366 qemu_mutex_unlock(&comp_done_lock); 367 368 qemu_mutex_lock(¶m->mutex); 369 } else { 370 qemu_cond_wait(¶m->cond, ¶m->mutex); 371 } 372 } 373 qemu_mutex_unlock(¶m->mutex); 374 375 return NULL; 376 } 377 378 static inline void terminate_compression_threads(void) 379 { 380 int idx, thread_count; 381 382 thread_count = migrate_compress_threads(); 383 384 for (idx = 0; idx < thread_count; idx++) { 385 qemu_mutex_lock(&comp_param[idx].mutex); 386 comp_param[idx].quit = true; 387 qemu_cond_signal(&comp_param[idx].cond); 388 qemu_mutex_unlock(&comp_param[idx].mutex); 389 } 390 } 391 392 void migrate_compress_threads_join(void) 393 { 394 int i, thread_count; 395 396 if (!migrate_use_compression()) { 397 return; 398 } 399 terminate_compression_threads(); 400 thread_count = migrate_compress_threads(); 401 for (i = 0; i < thread_count; i++) { 402 qemu_thread_join(compress_threads + i); 403 qemu_fclose(comp_param[i].file); 404 qemu_mutex_destroy(&comp_param[i].mutex); 405 qemu_cond_destroy(&comp_param[i].cond); 406 } 407 qemu_mutex_destroy(&comp_done_lock); 408 qemu_cond_destroy(&comp_done_cond); 409 g_free(compress_threads); 410 g_free(comp_param); 411 compress_threads = NULL; 412 comp_param = NULL; 413 } 414 415 void migrate_compress_threads_create(void) 416 { 417 int i, thread_count; 418 419 if (!migrate_use_compression()) { 420 return; 421 } 422 thread_count = migrate_compress_threads(); 423 compress_threads = g_new0(QemuThread, thread_count); 424 comp_param = g_new0(CompressParam, thread_count); 425 qemu_cond_init(&comp_done_cond); 426 qemu_mutex_init(&comp_done_lock); 427 for (i = 0; i < thread_count; i++) { 428 /* comp_param[i].file is just used as a dummy buffer to save data, 429 * set its ops to empty. 430 */ 431 comp_param[i].file = qemu_fopen_ops(NULL, &empty_ops); 432 comp_param[i].done = true; 433 comp_param[i].quit = false; 434 qemu_mutex_init(&comp_param[i].mutex); 435 qemu_cond_init(&comp_param[i].cond); 436 qemu_thread_create(compress_threads + i, "compress", 437 do_data_compress, comp_param + i, 438 QEMU_THREAD_JOINABLE); 439 } 440 } 441 442 /** 443 * save_page_header: write page header to wire 444 * 445 * If this is the 1st block, it also writes the block identification 446 * 447 * Returns the number of bytes written 448 * 449 * @f: QEMUFile where to send the data 450 * @block: block that contains the page we want to send 451 * @offset: offset inside the block for the page 452 * in the lower bits, it contains flags 453 */ 454 static size_t save_page_header(RAMState *rs, RAMBlock *block, ram_addr_t offset) 455 { 456 size_t size, len; 457 458 if (block == rs->last_sent_block) { 459 offset |= RAM_SAVE_FLAG_CONTINUE; 460 } 461 qemu_put_be64(rs->f, offset); 462 size = 8; 463 464 if (!(offset & RAM_SAVE_FLAG_CONTINUE)) { 465 len = strlen(block->idstr); 466 qemu_put_byte(rs->f, len); 467 qemu_put_buffer(rs->f, (uint8_t *)block->idstr, len); 468 size += 1 + len; 469 rs->last_sent_block = block; 470 } 471 return size; 472 } 473 474 /** 475 * mig_throttle_guest_down: throotle down the guest 476 * 477 * Reduce amount of guest cpu execution to hopefully slow down memory 478 * writes. If guest dirty memory rate is reduced below the rate at 479 * which we can transfer pages to the destination then we should be 480 * able to complete migration. Some workloads dirty memory way too 481 * fast and will not effectively converge, even with auto-converge. 482 */ 483 static void mig_throttle_guest_down(void) 484 { 485 MigrationState *s = migrate_get_current(); 486 uint64_t pct_initial = s->parameters.cpu_throttle_initial; 487 uint64_t pct_icrement = s->parameters.cpu_throttle_increment; 488 489 /* We have not started throttling yet. Let's start it. */ 490 if (!cpu_throttle_active()) { 491 cpu_throttle_set(pct_initial); 492 } else { 493 /* Throttling already on, just increase the rate */ 494 cpu_throttle_set(cpu_throttle_get_percentage() + pct_icrement); 495 } 496 } 497 498 /** 499 * xbzrle_cache_zero_page: insert a zero page in the XBZRLE cache 500 * 501 * @rs: current RAM state 502 * @current_addr: address for the zero page 503 * 504 * Update the xbzrle cache to reflect a page that's been sent as all 0. 505 * The important thing is that a stale (not-yet-0'd) page be replaced 506 * by the new data. 507 * As a bonus, if the page wasn't in the cache it gets added so that 508 * when a small write is made into the 0'd page it gets XBZRLE sent. 509 */ 510 static void xbzrle_cache_zero_page(RAMState *rs, ram_addr_t current_addr) 511 { 512 if (rs->ram_bulk_stage || !migrate_use_xbzrle()) { 513 return; 514 } 515 516 /* We don't care if this fails to allocate a new cache page 517 * as long as it updated an old one */ 518 cache_insert(XBZRLE.cache, current_addr, ZERO_TARGET_PAGE, 519 rs->bitmap_sync_count); 520 } 521 522 #define ENCODING_FLAG_XBZRLE 0x1 523 524 /** 525 * save_xbzrle_page: compress and send current page 526 * 527 * Returns: 1 means that we wrote the page 528 * 0 means that page is identical to the one already sent 529 * -1 means that xbzrle would be longer than normal 530 * 531 * @rs: current RAM state 532 * @current_data: pointer to the address of the page contents 533 * @current_addr: addr of the page 534 * @block: block that contains the page we want to send 535 * @offset: offset inside the block for the page 536 * @last_stage: if we are at the completion stage 537 */ 538 static int save_xbzrle_page(RAMState *rs, uint8_t **current_data, 539 ram_addr_t current_addr, RAMBlock *block, 540 ram_addr_t offset, bool last_stage) 541 { 542 int encoded_len = 0, bytes_xbzrle; 543 uint8_t *prev_cached_page; 544 545 if (!cache_is_cached(XBZRLE.cache, current_addr, rs->bitmap_sync_count)) { 546 rs->xbzrle_cache_miss++; 547 if (!last_stage) { 548 if (cache_insert(XBZRLE.cache, current_addr, *current_data, 549 rs->bitmap_sync_count) == -1) { 550 return -1; 551 } else { 552 /* update *current_data when the page has been 553 inserted into cache */ 554 *current_data = get_cached_data(XBZRLE.cache, current_addr); 555 } 556 } 557 return -1; 558 } 559 560 prev_cached_page = get_cached_data(XBZRLE.cache, current_addr); 561 562 /* save current buffer into memory */ 563 memcpy(XBZRLE.current_buf, *current_data, TARGET_PAGE_SIZE); 564 565 /* XBZRLE encoding (if there is no overflow) */ 566 encoded_len = xbzrle_encode_buffer(prev_cached_page, XBZRLE.current_buf, 567 TARGET_PAGE_SIZE, XBZRLE.encoded_buf, 568 TARGET_PAGE_SIZE); 569 if (encoded_len == 0) { 570 trace_save_xbzrle_page_skipping(); 571 return 0; 572 } else if (encoded_len == -1) { 573 trace_save_xbzrle_page_overflow(); 574 rs->xbzrle_overflows++; 575 /* update data in the cache */ 576 if (!last_stage) { 577 memcpy(prev_cached_page, *current_data, TARGET_PAGE_SIZE); 578 *current_data = prev_cached_page; 579 } 580 return -1; 581 } 582 583 /* we need to update the data in the cache, in order to get the same data */ 584 if (!last_stage) { 585 memcpy(prev_cached_page, XBZRLE.current_buf, TARGET_PAGE_SIZE); 586 } 587 588 /* Send XBZRLE based compressed page */ 589 bytes_xbzrle = save_page_header(rs, block, 590 offset | RAM_SAVE_FLAG_XBZRLE); 591 qemu_put_byte(rs->f, ENCODING_FLAG_XBZRLE); 592 qemu_put_be16(rs->f, encoded_len); 593 qemu_put_buffer(rs->f, XBZRLE.encoded_buf, encoded_len); 594 bytes_xbzrle += encoded_len + 1 + 2; 595 rs->xbzrle_pages++; 596 rs->xbzrle_bytes += bytes_xbzrle; 597 rs->bytes_transferred += bytes_xbzrle; 598 599 return 1; 600 } 601 602 /** 603 * migration_bitmap_find_dirty: find the next dirty page from start 604 * 605 * Called with rcu_read_lock() to protect migration_bitmap 606 * 607 * Returns the byte offset within memory region of the start of a dirty page 608 * 609 * @rs: current RAM state 610 * @rb: RAMBlock where to search for dirty pages 611 * @start: page where we start the search 612 */ 613 static inline 614 unsigned long migration_bitmap_find_dirty(RAMState *rs, RAMBlock *rb, 615 unsigned long start) 616 { 617 unsigned long base = rb->offset >> TARGET_PAGE_BITS; 618 unsigned long nr = base + start; 619 uint64_t rb_size = rb->used_length; 620 unsigned long size = base + (rb_size >> TARGET_PAGE_BITS); 621 unsigned long *bitmap; 622 623 unsigned long next; 624 625 bitmap = atomic_rcu_read(&rs->ram_bitmap)->bmap; 626 if (rs->ram_bulk_stage && nr > base) { 627 next = nr + 1; 628 } else { 629 next = find_next_bit(bitmap, size, nr); 630 } 631 632 return next - base; 633 } 634 635 static inline bool migration_bitmap_clear_dirty(RAMState *rs, 636 RAMBlock *rb, 637 unsigned long page) 638 { 639 bool ret; 640 unsigned long *bitmap = atomic_rcu_read(&rs->ram_bitmap)->bmap; 641 unsigned long nr = (rb->offset >> TARGET_PAGE_BITS) + page; 642 643 ret = test_and_clear_bit(nr, bitmap); 644 645 if (ret) { 646 rs->migration_dirty_pages--; 647 } 648 return ret; 649 } 650 651 static void migration_bitmap_sync_range(RAMState *rs, RAMBlock *rb, 652 ram_addr_t start, ram_addr_t length) 653 { 654 unsigned long *bitmap; 655 bitmap = atomic_rcu_read(&rs->ram_bitmap)->bmap; 656 rs->migration_dirty_pages += 657 cpu_physical_memory_sync_dirty_bitmap(bitmap, rb, start, length, 658 &rs->num_dirty_pages_period); 659 } 660 661 /** 662 * ram_pagesize_summary: calculate all the pagesizes of a VM 663 * 664 * Returns a summary bitmap of the page sizes of all RAMBlocks 665 * 666 * For VMs with just normal pages this is equivalent to the host page 667 * size. If it's got some huge pages then it's the OR of all the 668 * different page sizes. 669 */ 670 uint64_t ram_pagesize_summary(void) 671 { 672 RAMBlock *block; 673 uint64_t summary = 0; 674 675 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) { 676 summary |= block->page_size; 677 } 678 679 return summary; 680 } 681 682 static void migration_bitmap_sync(RAMState *rs) 683 { 684 RAMBlock *block; 685 int64_t end_time; 686 uint64_t bytes_xfer_now; 687 688 rs->bitmap_sync_count++; 689 690 if (!rs->bytes_xfer_prev) { 691 rs->bytes_xfer_prev = ram_bytes_transferred(); 692 } 693 694 if (!rs->time_last_bitmap_sync) { 695 rs->time_last_bitmap_sync = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); 696 } 697 698 trace_migration_bitmap_sync_start(); 699 memory_global_dirty_log_sync(); 700 701 qemu_mutex_lock(&rs->bitmap_mutex); 702 rcu_read_lock(); 703 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) { 704 migration_bitmap_sync_range(rs, block, 0, block->used_length); 705 } 706 rcu_read_unlock(); 707 qemu_mutex_unlock(&rs->bitmap_mutex); 708 709 trace_migration_bitmap_sync_end(rs->num_dirty_pages_period); 710 711 end_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); 712 713 /* more than 1 second = 1000 millisecons */ 714 if (end_time > rs->time_last_bitmap_sync + 1000) { 715 if (migrate_auto_converge()) { 716 /* The following detection logic can be refined later. For now: 717 Check to see if the dirtied bytes is 50% more than the approx. 718 amount of bytes that just got transferred since the last time we 719 were in this routine. If that happens twice, start or increase 720 throttling */ 721 bytes_xfer_now = ram_bytes_transferred(); 722 723 if (rs->dirty_pages_rate && 724 (rs->num_dirty_pages_period * TARGET_PAGE_SIZE > 725 (bytes_xfer_now - rs->bytes_xfer_prev) / 2) && 726 (rs->dirty_rate_high_cnt++ >= 2)) { 727 trace_migration_throttle(); 728 rs->dirty_rate_high_cnt = 0; 729 mig_throttle_guest_down(); 730 } 731 rs->bytes_xfer_prev = bytes_xfer_now; 732 } 733 734 if (migrate_use_xbzrle()) { 735 if (rs->iterations_prev != rs->iterations) { 736 rs->xbzrle_cache_miss_rate = 737 (double)(rs->xbzrle_cache_miss - 738 rs->xbzrle_cache_miss_prev) / 739 (rs->iterations - rs->iterations_prev); 740 } 741 rs->iterations_prev = rs->iterations; 742 rs->xbzrle_cache_miss_prev = rs->xbzrle_cache_miss; 743 } 744 rs->dirty_pages_rate = rs->num_dirty_pages_period * 1000 745 / (end_time - rs->time_last_bitmap_sync); 746 rs->time_last_bitmap_sync = end_time; 747 rs->num_dirty_pages_period = 0; 748 } 749 if (migrate_use_events()) { 750 qapi_event_send_migration_pass(rs->bitmap_sync_count, NULL); 751 } 752 } 753 754 /** 755 * save_zero_page: send the zero page to the stream 756 * 757 * Returns the number of pages written. 758 * 759 * @rs: current RAM state 760 * @block: block that contains the page we want to send 761 * @offset: offset inside the block for the page 762 * @p: pointer to the page 763 */ 764 static int save_zero_page(RAMState *rs, RAMBlock *block, ram_addr_t offset, 765 uint8_t *p) 766 { 767 int pages = -1; 768 769 if (is_zero_range(p, TARGET_PAGE_SIZE)) { 770 rs->zero_pages++; 771 rs->bytes_transferred += 772 save_page_header(rs, block, offset | RAM_SAVE_FLAG_COMPRESS); 773 qemu_put_byte(rs->f, 0); 774 rs->bytes_transferred += 1; 775 pages = 1; 776 } 777 778 return pages; 779 } 780 781 static void ram_release_pages(const char *rbname, uint64_t offset, int pages) 782 { 783 if (!migrate_release_ram() || !migration_in_postcopy()) { 784 return; 785 } 786 787 ram_discard_range(rbname, offset, pages << TARGET_PAGE_BITS); 788 } 789 790 /** 791 * ram_save_page: send the given page to the stream 792 * 793 * Returns the number of pages written. 794 * < 0 - error 795 * >=0 - Number of pages written - this might legally be 0 796 * if xbzrle noticed the page was the same. 797 * 798 * @rs: current RAM state 799 * @block: block that contains the page we want to send 800 * @offset: offset inside the block for the page 801 * @last_stage: if we are at the completion stage 802 */ 803 static int ram_save_page(RAMState *rs, PageSearchStatus *pss, bool last_stage) 804 { 805 int pages = -1; 806 uint64_t bytes_xmit; 807 ram_addr_t current_addr; 808 uint8_t *p; 809 int ret; 810 bool send_async = true; 811 RAMBlock *block = pss->block; 812 ram_addr_t offset = pss->page << TARGET_PAGE_BITS; 813 814 p = block->host + offset; 815 816 /* In doubt sent page as normal */ 817 bytes_xmit = 0; 818 ret = ram_control_save_page(rs->f, block->offset, 819 offset, TARGET_PAGE_SIZE, &bytes_xmit); 820 if (bytes_xmit) { 821 rs->bytes_transferred += bytes_xmit; 822 pages = 1; 823 } 824 825 XBZRLE_cache_lock(); 826 827 current_addr = block->offset + offset; 828 829 if (ret != RAM_SAVE_CONTROL_NOT_SUPP) { 830 if (ret != RAM_SAVE_CONTROL_DELAYED) { 831 if (bytes_xmit > 0) { 832 rs->norm_pages++; 833 } else if (bytes_xmit == 0) { 834 rs->zero_pages++; 835 } 836 } 837 } else { 838 pages = save_zero_page(rs, block, offset, p); 839 if (pages > 0) { 840 /* Must let xbzrle know, otherwise a previous (now 0'd) cached 841 * page would be stale 842 */ 843 xbzrle_cache_zero_page(rs, current_addr); 844 ram_release_pages(block->idstr, offset, pages); 845 } else if (!rs->ram_bulk_stage && 846 !migration_in_postcopy() && migrate_use_xbzrle()) { 847 pages = save_xbzrle_page(rs, &p, current_addr, block, 848 offset, last_stage); 849 if (!last_stage) { 850 /* Can't send this cached data async, since the cache page 851 * might get updated before it gets to the wire 852 */ 853 send_async = false; 854 } 855 } 856 } 857 858 /* XBZRLE overflow or normal page */ 859 if (pages == -1) { 860 rs->bytes_transferred += save_page_header(rs, block, 861 offset | RAM_SAVE_FLAG_PAGE); 862 if (send_async) { 863 qemu_put_buffer_async(rs->f, p, TARGET_PAGE_SIZE, 864 migrate_release_ram() & 865 migration_in_postcopy()); 866 } else { 867 qemu_put_buffer(rs->f, p, TARGET_PAGE_SIZE); 868 } 869 rs->bytes_transferred += TARGET_PAGE_SIZE; 870 pages = 1; 871 rs->norm_pages++; 872 } 873 874 XBZRLE_cache_unlock(); 875 876 return pages; 877 } 878 879 static int do_compress_ram_page(QEMUFile *f, RAMBlock *block, 880 ram_addr_t offset) 881 { 882 RAMState *rs = &ram_state; 883 int bytes_sent, blen; 884 uint8_t *p = block->host + (offset & TARGET_PAGE_MASK); 885 886 bytes_sent = save_page_header(rs, block, offset | 887 RAM_SAVE_FLAG_COMPRESS_PAGE); 888 blen = qemu_put_compression_data(f, p, TARGET_PAGE_SIZE, 889 migrate_compress_level()); 890 if (blen < 0) { 891 bytes_sent = 0; 892 qemu_file_set_error(migrate_get_current()->to_dst_file, blen); 893 error_report("compressed data failed!"); 894 } else { 895 bytes_sent += blen; 896 ram_release_pages(block->idstr, offset & TARGET_PAGE_MASK, 1); 897 } 898 899 return bytes_sent; 900 } 901 902 static void flush_compressed_data(RAMState *rs) 903 { 904 int idx, len, thread_count; 905 906 if (!migrate_use_compression()) { 907 return; 908 } 909 thread_count = migrate_compress_threads(); 910 911 qemu_mutex_lock(&comp_done_lock); 912 for (idx = 0; idx < thread_count; idx++) { 913 while (!comp_param[idx].done) { 914 qemu_cond_wait(&comp_done_cond, &comp_done_lock); 915 } 916 } 917 qemu_mutex_unlock(&comp_done_lock); 918 919 for (idx = 0; idx < thread_count; idx++) { 920 qemu_mutex_lock(&comp_param[idx].mutex); 921 if (!comp_param[idx].quit) { 922 len = qemu_put_qemu_file(rs->f, comp_param[idx].file); 923 rs->bytes_transferred += len; 924 } 925 qemu_mutex_unlock(&comp_param[idx].mutex); 926 } 927 } 928 929 static inline void set_compress_params(CompressParam *param, RAMBlock *block, 930 ram_addr_t offset) 931 { 932 param->block = block; 933 param->offset = offset; 934 } 935 936 static int compress_page_with_multi_thread(RAMState *rs, RAMBlock *block, 937 ram_addr_t offset) 938 { 939 int idx, thread_count, bytes_xmit = -1, pages = -1; 940 941 thread_count = migrate_compress_threads(); 942 qemu_mutex_lock(&comp_done_lock); 943 while (true) { 944 for (idx = 0; idx < thread_count; idx++) { 945 if (comp_param[idx].done) { 946 comp_param[idx].done = false; 947 bytes_xmit = qemu_put_qemu_file(rs->f, comp_param[idx].file); 948 qemu_mutex_lock(&comp_param[idx].mutex); 949 set_compress_params(&comp_param[idx], block, offset); 950 qemu_cond_signal(&comp_param[idx].cond); 951 qemu_mutex_unlock(&comp_param[idx].mutex); 952 pages = 1; 953 rs->norm_pages++; 954 rs->bytes_transferred += bytes_xmit; 955 break; 956 } 957 } 958 if (pages > 0) { 959 break; 960 } else { 961 qemu_cond_wait(&comp_done_cond, &comp_done_lock); 962 } 963 } 964 qemu_mutex_unlock(&comp_done_lock); 965 966 return pages; 967 } 968 969 /** 970 * ram_save_compressed_page: compress the given page and send it to the stream 971 * 972 * Returns the number of pages written. 973 * 974 * @rs: current RAM state 975 * @block: block that contains the page we want to send 976 * @offset: offset inside the block for the page 977 * @last_stage: if we are at the completion stage 978 */ 979 static int ram_save_compressed_page(RAMState *rs, PageSearchStatus *pss, 980 bool last_stage) 981 { 982 int pages = -1; 983 uint64_t bytes_xmit = 0; 984 uint8_t *p; 985 int ret, blen; 986 RAMBlock *block = pss->block; 987 ram_addr_t offset = pss->page << TARGET_PAGE_BITS; 988 989 p = block->host + offset; 990 991 ret = ram_control_save_page(rs->f, block->offset, 992 offset, TARGET_PAGE_SIZE, &bytes_xmit); 993 if (bytes_xmit) { 994 rs->bytes_transferred += bytes_xmit; 995 pages = 1; 996 } 997 if (ret != RAM_SAVE_CONTROL_NOT_SUPP) { 998 if (ret != RAM_SAVE_CONTROL_DELAYED) { 999 if (bytes_xmit > 0) { 1000 rs->norm_pages++; 1001 } else if (bytes_xmit == 0) { 1002 rs->zero_pages++; 1003 } 1004 } 1005 } else { 1006 /* When starting the process of a new block, the first page of 1007 * the block should be sent out before other pages in the same 1008 * block, and all the pages in last block should have been sent 1009 * out, keeping this order is important, because the 'cont' flag 1010 * is used to avoid resending the block name. 1011 */ 1012 if (block != rs->last_sent_block) { 1013 flush_compressed_data(rs); 1014 pages = save_zero_page(rs, block, offset, p); 1015 if (pages == -1) { 1016 /* Make sure the first page is sent out before other pages */ 1017 bytes_xmit = save_page_header(rs, block, offset | 1018 RAM_SAVE_FLAG_COMPRESS_PAGE); 1019 blen = qemu_put_compression_data(rs->f, p, TARGET_PAGE_SIZE, 1020 migrate_compress_level()); 1021 if (blen > 0) { 1022 rs->bytes_transferred += bytes_xmit + blen; 1023 rs->norm_pages++; 1024 pages = 1; 1025 } else { 1026 qemu_file_set_error(rs->f, blen); 1027 error_report("compressed data failed!"); 1028 } 1029 } 1030 if (pages > 0) { 1031 ram_release_pages(block->idstr, offset, pages); 1032 } 1033 } else { 1034 pages = save_zero_page(rs, block, offset, p); 1035 if (pages == -1) { 1036 pages = compress_page_with_multi_thread(rs, block, offset); 1037 } else { 1038 ram_release_pages(block->idstr, offset, pages); 1039 } 1040 } 1041 } 1042 1043 return pages; 1044 } 1045 1046 /** 1047 * find_dirty_block: find the next dirty page and update any state 1048 * associated with the search process. 1049 * 1050 * Returns if a page is found 1051 * 1052 * @rs: current RAM state 1053 * @pss: data about the state of the current dirty page scan 1054 * @again: set to false if the search has scanned the whole of RAM 1055 */ 1056 static bool find_dirty_block(RAMState *rs, PageSearchStatus *pss, bool *again) 1057 { 1058 pss->page = migration_bitmap_find_dirty(rs, pss->block, pss->page); 1059 if (pss->complete_round && pss->block == rs->last_seen_block && 1060 pss->page >= rs->last_page) { 1061 /* 1062 * We've been once around the RAM and haven't found anything. 1063 * Give up. 1064 */ 1065 *again = false; 1066 return false; 1067 } 1068 if ((pss->page << TARGET_PAGE_BITS) >= pss->block->used_length) { 1069 /* Didn't find anything in this RAM Block */ 1070 pss->page = 0; 1071 pss->block = QLIST_NEXT_RCU(pss->block, next); 1072 if (!pss->block) { 1073 /* Hit the end of the list */ 1074 pss->block = QLIST_FIRST_RCU(&ram_list.blocks); 1075 /* Flag that we've looped */ 1076 pss->complete_round = true; 1077 rs->ram_bulk_stage = false; 1078 if (migrate_use_xbzrle()) { 1079 /* If xbzrle is on, stop using the data compression at this 1080 * point. In theory, xbzrle can do better than compression. 1081 */ 1082 flush_compressed_data(rs); 1083 } 1084 } 1085 /* Didn't find anything this time, but try again on the new block */ 1086 *again = true; 1087 return false; 1088 } else { 1089 /* Can go around again, but... */ 1090 *again = true; 1091 /* We've found something so probably don't need to */ 1092 return true; 1093 } 1094 } 1095 1096 /** 1097 * unqueue_page: gets a page of the queue 1098 * 1099 * Helper for 'get_queued_page' - gets a page off the queue 1100 * 1101 * Returns the block of the page (or NULL if none available) 1102 * 1103 * @rs: current RAM state 1104 * @offset: used to return the offset within the RAMBlock 1105 */ 1106 static RAMBlock *unqueue_page(RAMState *rs, ram_addr_t *offset) 1107 { 1108 RAMBlock *block = NULL; 1109 1110 qemu_mutex_lock(&rs->src_page_req_mutex); 1111 if (!QSIMPLEQ_EMPTY(&rs->src_page_requests)) { 1112 struct RAMSrcPageRequest *entry = 1113 QSIMPLEQ_FIRST(&rs->src_page_requests); 1114 block = entry->rb; 1115 *offset = entry->offset; 1116 1117 if (entry->len > TARGET_PAGE_SIZE) { 1118 entry->len -= TARGET_PAGE_SIZE; 1119 entry->offset += TARGET_PAGE_SIZE; 1120 } else { 1121 memory_region_unref(block->mr); 1122 QSIMPLEQ_REMOVE_HEAD(&rs->src_page_requests, next_req); 1123 g_free(entry); 1124 } 1125 } 1126 qemu_mutex_unlock(&rs->src_page_req_mutex); 1127 1128 return block; 1129 } 1130 1131 /** 1132 * get_queued_page: unqueue a page from the postocpy requests 1133 * 1134 * Skips pages that are already sent (!dirty) 1135 * 1136 * Returns if a queued page is found 1137 * 1138 * @rs: current RAM state 1139 * @pss: data about the state of the current dirty page scan 1140 */ 1141 static bool get_queued_page(RAMState *rs, PageSearchStatus *pss) 1142 { 1143 RAMBlock *block; 1144 ram_addr_t offset; 1145 bool dirty; 1146 1147 do { 1148 block = unqueue_page(rs, &offset); 1149 /* 1150 * We're sending this page, and since it's postcopy nothing else 1151 * will dirty it, and we must make sure it doesn't get sent again 1152 * even if this queue request was received after the background 1153 * search already sent it. 1154 */ 1155 if (block) { 1156 unsigned long *bitmap; 1157 unsigned long page; 1158 1159 bitmap = atomic_rcu_read(&rs->ram_bitmap)->bmap; 1160 page = (block->offset + offset) >> TARGET_PAGE_BITS; 1161 dirty = test_bit(page, bitmap); 1162 if (!dirty) { 1163 trace_get_queued_page_not_dirty(block->idstr, (uint64_t)offset, 1164 page, 1165 test_bit(page, 1166 atomic_rcu_read(&rs->ram_bitmap)->unsentmap)); 1167 } else { 1168 trace_get_queued_page(block->idstr, (uint64_t)offset, page); 1169 } 1170 } 1171 1172 } while (block && !dirty); 1173 1174 if (block) { 1175 /* 1176 * As soon as we start servicing pages out of order, then we have 1177 * to kill the bulk stage, since the bulk stage assumes 1178 * in (migration_bitmap_find_and_reset_dirty) that every page is 1179 * dirty, that's no longer true. 1180 */ 1181 rs->ram_bulk_stage = false; 1182 1183 /* 1184 * We want the background search to continue from the queued page 1185 * since the guest is likely to want other pages near to the page 1186 * it just requested. 1187 */ 1188 pss->block = block; 1189 pss->page = offset >> TARGET_PAGE_BITS; 1190 } 1191 1192 return !!block; 1193 } 1194 1195 /** 1196 * migration_page_queue_free: drop any remaining pages in the ram 1197 * request queue 1198 * 1199 * It should be empty at the end anyway, but in error cases there may 1200 * be some left. in case that there is any page left, we drop it. 1201 * 1202 */ 1203 void migration_page_queue_free(void) 1204 { 1205 struct RAMSrcPageRequest *mspr, *next_mspr; 1206 RAMState *rs = &ram_state; 1207 /* This queue generally should be empty - but in the case of a failed 1208 * migration might have some droppings in. 1209 */ 1210 rcu_read_lock(); 1211 QSIMPLEQ_FOREACH_SAFE(mspr, &rs->src_page_requests, next_req, next_mspr) { 1212 memory_region_unref(mspr->rb->mr); 1213 QSIMPLEQ_REMOVE_HEAD(&rs->src_page_requests, next_req); 1214 g_free(mspr); 1215 } 1216 rcu_read_unlock(); 1217 } 1218 1219 /** 1220 * ram_save_queue_pages: queue the page for transmission 1221 * 1222 * A request from postcopy destination for example. 1223 * 1224 * Returns zero on success or negative on error 1225 * 1226 * @rbname: Name of the RAMBLock of the request. NULL means the 1227 * same that last one. 1228 * @start: starting address from the start of the RAMBlock 1229 * @len: length (in bytes) to send 1230 */ 1231 int ram_save_queue_pages(const char *rbname, ram_addr_t start, ram_addr_t len) 1232 { 1233 RAMBlock *ramblock; 1234 RAMState *rs = &ram_state; 1235 1236 rs->postcopy_requests++; 1237 rcu_read_lock(); 1238 if (!rbname) { 1239 /* Reuse last RAMBlock */ 1240 ramblock = rs->last_req_rb; 1241 1242 if (!ramblock) { 1243 /* 1244 * Shouldn't happen, we can't reuse the last RAMBlock if 1245 * it's the 1st request. 1246 */ 1247 error_report("ram_save_queue_pages no previous block"); 1248 goto err; 1249 } 1250 } else { 1251 ramblock = qemu_ram_block_by_name(rbname); 1252 1253 if (!ramblock) { 1254 /* We shouldn't be asked for a non-existent RAMBlock */ 1255 error_report("ram_save_queue_pages no block '%s'", rbname); 1256 goto err; 1257 } 1258 rs->last_req_rb = ramblock; 1259 } 1260 trace_ram_save_queue_pages(ramblock->idstr, start, len); 1261 if (start+len > ramblock->used_length) { 1262 error_report("%s request overrun start=" RAM_ADDR_FMT " len=" 1263 RAM_ADDR_FMT " blocklen=" RAM_ADDR_FMT, 1264 __func__, start, len, ramblock->used_length); 1265 goto err; 1266 } 1267 1268 struct RAMSrcPageRequest *new_entry = 1269 g_malloc0(sizeof(struct RAMSrcPageRequest)); 1270 new_entry->rb = ramblock; 1271 new_entry->offset = start; 1272 new_entry->len = len; 1273 1274 memory_region_ref(ramblock->mr); 1275 qemu_mutex_lock(&rs->src_page_req_mutex); 1276 QSIMPLEQ_INSERT_TAIL(&rs->src_page_requests, new_entry, next_req); 1277 qemu_mutex_unlock(&rs->src_page_req_mutex); 1278 rcu_read_unlock(); 1279 1280 return 0; 1281 1282 err: 1283 rcu_read_unlock(); 1284 return -1; 1285 } 1286 1287 /** 1288 * ram_save_target_page: save one target page 1289 * 1290 * Returns the number of pages written 1291 * 1292 * @rs: current RAM state 1293 * @ms: current migration state 1294 * @pss: data about the page we want to send 1295 * @last_stage: if we are at the completion stage 1296 */ 1297 static int ram_save_target_page(RAMState *rs, PageSearchStatus *pss, 1298 bool last_stage) 1299 { 1300 int res = 0; 1301 1302 /* Check the pages is dirty and if it is send it */ 1303 if (migration_bitmap_clear_dirty(rs, pss->block, pss->page)) { 1304 unsigned long *unsentmap; 1305 /* 1306 * If xbzrle is on, stop using the data compression after first 1307 * round of migration even if compression is enabled. In theory, 1308 * xbzrle can do better than compression. 1309 */ 1310 unsigned long page = 1311 (pss->block->offset >> TARGET_PAGE_BITS) + pss->page; 1312 if (migrate_use_compression() 1313 && (rs->ram_bulk_stage || !migrate_use_xbzrle())) { 1314 res = ram_save_compressed_page(rs, pss, last_stage); 1315 } else { 1316 res = ram_save_page(rs, pss, last_stage); 1317 } 1318 1319 if (res < 0) { 1320 return res; 1321 } 1322 unsentmap = atomic_rcu_read(&rs->ram_bitmap)->unsentmap; 1323 if (unsentmap) { 1324 clear_bit(page, unsentmap); 1325 } 1326 } 1327 1328 return res; 1329 } 1330 1331 /** 1332 * ram_save_host_page: save a whole host page 1333 * 1334 * Starting at *offset send pages up to the end of the current host 1335 * page. It's valid for the initial offset to point into the middle of 1336 * a host page in which case the remainder of the hostpage is sent. 1337 * Only dirty target pages are sent. Note that the host page size may 1338 * be a huge page for this block. 1339 * 1340 * Returns the number of pages written or negative on error 1341 * 1342 * @rs: current RAM state 1343 * @ms: current migration state 1344 * @pss: data about the page we want to send 1345 * @last_stage: if we are at the completion stage 1346 */ 1347 static int ram_save_host_page(RAMState *rs, PageSearchStatus *pss, 1348 bool last_stage) 1349 { 1350 int tmppages, pages = 0; 1351 size_t pagesize_bits = 1352 qemu_ram_pagesize(pss->block) >> TARGET_PAGE_BITS; 1353 1354 do { 1355 tmppages = ram_save_target_page(rs, pss, last_stage); 1356 if (tmppages < 0) { 1357 return tmppages; 1358 } 1359 1360 pages += tmppages; 1361 pss->page++; 1362 } while (pss->page & (pagesize_bits - 1)); 1363 1364 /* The offset we leave with is the last one we looked at */ 1365 pss->page--; 1366 return pages; 1367 } 1368 1369 /** 1370 * ram_find_and_save_block: finds a dirty page and sends it to f 1371 * 1372 * Called within an RCU critical section. 1373 * 1374 * Returns the number of pages written where zero means no dirty pages 1375 * 1376 * @rs: current RAM state 1377 * @last_stage: if we are at the completion stage 1378 * 1379 * On systems where host-page-size > target-page-size it will send all the 1380 * pages in a host page that are dirty. 1381 */ 1382 1383 static int ram_find_and_save_block(RAMState *rs, bool last_stage) 1384 { 1385 PageSearchStatus pss; 1386 int pages = 0; 1387 bool again, found; 1388 1389 /* No dirty page as there is zero RAM */ 1390 if (!ram_bytes_total()) { 1391 return pages; 1392 } 1393 1394 pss.block = rs->last_seen_block; 1395 pss.page = rs->last_page; 1396 pss.complete_round = false; 1397 1398 if (!pss.block) { 1399 pss.block = QLIST_FIRST_RCU(&ram_list.blocks); 1400 } 1401 1402 do { 1403 again = true; 1404 found = get_queued_page(rs, &pss); 1405 1406 if (!found) { 1407 /* priority queue empty, so just search for something dirty */ 1408 found = find_dirty_block(rs, &pss, &again); 1409 } 1410 1411 if (found) { 1412 pages = ram_save_host_page(rs, &pss, last_stage); 1413 } 1414 } while (!pages && again); 1415 1416 rs->last_seen_block = pss.block; 1417 rs->last_page = pss.page; 1418 1419 return pages; 1420 } 1421 1422 void acct_update_position(QEMUFile *f, size_t size, bool zero) 1423 { 1424 uint64_t pages = size / TARGET_PAGE_SIZE; 1425 RAMState *rs = &ram_state; 1426 1427 if (zero) { 1428 rs->zero_pages += pages; 1429 } else { 1430 rs->norm_pages += pages; 1431 rs->bytes_transferred += size; 1432 qemu_update_position(f, size); 1433 } 1434 } 1435 1436 uint64_t ram_bytes_total(void) 1437 { 1438 RAMBlock *block; 1439 uint64_t total = 0; 1440 1441 rcu_read_lock(); 1442 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) 1443 total += block->used_length; 1444 rcu_read_unlock(); 1445 return total; 1446 } 1447 1448 void free_xbzrle_decoded_buf(void) 1449 { 1450 g_free(xbzrle_decoded_buf); 1451 xbzrle_decoded_buf = NULL; 1452 } 1453 1454 static void migration_bitmap_free(RAMBitmap *bmap) 1455 { 1456 g_free(bmap->bmap); 1457 g_free(bmap->unsentmap); 1458 g_free(bmap); 1459 } 1460 1461 static void ram_migration_cleanup(void *opaque) 1462 { 1463 RAMState *rs = opaque; 1464 1465 /* caller have hold iothread lock or is in a bh, so there is 1466 * no writing race against this migration_bitmap 1467 */ 1468 RAMBitmap *bitmap = rs->ram_bitmap; 1469 atomic_rcu_set(&rs->ram_bitmap, NULL); 1470 if (bitmap) { 1471 memory_global_dirty_log_stop(); 1472 call_rcu(bitmap, migration_bitmap_free, rcu); 1473 } 1474 1475 XBZRLE_cache_lock(); 1476 if (XBZRLE.cache) { 1477 cache_fini(XBZRLE.cache); 1478 g_free(XBZRLE.encoded_buf); 1479 g_free(XBZRLE.current_buf); 1480 g_free(ZERO_TARGET_PAGE); 1481 XBZRLE.cache = NULL; 1482 XBZRLE.encoded_buf = NULL; 1483 XBZRLE.current_buf = NULL; 1484 } 1485 XBZRLE_cache_unlock(); 1486 } 1487 1488 static void ram_state_reset(RAMState *rs) 1489 { 1490 rs->last_seen_block = NULL; 1491 rs->last_sent_block = NULL; 1492 rs->last_page = 0; 1493 rs->last_version = ram_list.version; 1494 rs->ram_bulk_stage = true; 1495 } 1496 1497 #define MAX_WAIT 50 /* ms, half buffered_file limit */ 1498 1499 /* 1500 * 'expected' is the value you expect the bitmap mostly to be full 1501 * of; it won't bother printing lines that are all this value. 1502 * If 'todump' is null the migration bitmap is dumped. 1503 */ 1504 void ram_debug_dump_bitmap(unsigned long *todump, bool expected) 1505 { 1506 unsigned long ram_pages = last_ram_page(); 1507 RAMState *rs = &ram_state; 1508 int64_t cur; 1509 int64_t linelen = 128; 1510 char linebuf[129]; 1511 1512 if (!todump) { 1513 todump = atomic_rcu_read(&rs->ram_bitmap)->bmap; 1514 } 1515 1516 for (cur = 0; cur < ram_pages; cur += linelen) { 1517 int64_t curb; 1518 bool found = false; 1519 /* 1520 * Last line; catch the case where the line length 1521 * is longer than remaining ram 1522 */ 1523 if (cur + linelen > ram_pages) { 1524 linelen = ram_pages - cur; 1525 } 1526 for (curb = 0; curb < linelen; curb++) { 1527 bool thisbit = test_bit(cur + curb, todump); 1528 linebuf[curb] = thisbit ? '1' : '.'; 1529 found = found || (thisbit != expected); 1530 } 1531 if (found) { 1532 linebuf[curb] = '\0'; 1533 fprintf(stderr, "0x%08" PRIx64 " : %s\n", cur, linebuf); 1534 } 1535 } 1536 } 1537 1538 /* **** functions for postcopy ***** */ 1539 1540 void ram_postcopy_migrated_memory_release(MigrationState *ms) 1541 { 1542 RAMState *rs = &ram_state; 1543 struct RAMBlock *block; 1544 unsigned long *bitmap = atomic_rcu_read(&rs->ram_bitmap)->bmap; 1545 1546 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) { 1547 unsigned long first = block->offset >> TARGET_PAGE_BITS; 1548 unsigned long range = first + (block->used_length >> TARGET_PAGE_BITS); 1549 unsigned long run_start = find_next_zero_bit(bitmap, range, first); 1550 1551 while (run_start < range) { 1552 unsigned long run_end = find_next_bit(bitmap, range, run_start + 1); 1553 ram_discard_range(block->idstr, run_start << TARGET_PAGE_BITS, 1554 (run_end - run_start) << TARGET_PAGE_BITS); 1555 run_start = find_next_zero_bit(bitmap, range, run_end + 1); 1556 } 1557 } 1558 } 1559 1560 /** 1561 * postcopy_send_discard_bm_ram: discard a RAMBlock 1562 * 1563 * Returns zero on success 1564 * 1565 * Callback from postcopy_each_ram_send_discard for each RAMBlock 1566 * Note: At this point the 'unsentmap' is the processed bitmap combined 1567 * with the dirtymap; so a '1' means it's either dirty or unsent. 1568 * 1569 * @ms: current migration state 1570 * @pds: state for postcopy 1571 * @start: RAMBlock starting page 1572 * @length: RAMBlock size 1573 */ 1574 static int postcopy_send_discard_bm_ram(MigrationState *ms, 1575 PostcopyDiscardState *pds, 1576 unsigned long start, 1577 unsigned long length) 1578 { 1579 RAMState *rs = &ram_state; 1580 unsigned long end = start + length; /* one after the end */ 1581 unsigned long current; 1582 unsigned long *unsentmap; 1583 1584 unsentmap = atomic_rcu_read(&rs->ram_bitmap)->unsentmap; 1585 for (current = start; current < end; ) { 1586 unsigned long one = find_next_bit(unsentmap, end, current); 1587 1588 if (one <= end) { 1589 unsigned long zero = find_next_zero_bit(unsentmap, end, one + 1); 1590 unsigned long discard_length; 1591 1592 if (zero >= end) { 1593 discard_length = end - one; 1594 } else { 1595 discard_length = zero - one; 1596 } 1597 if (discard_length) { 1598 postcopy_discard_send_range(ms, pds, one, discard_length); 1599 } 1600 current = one + discard_length; 1601 } else { 1602 current = one; 1603 } 1604 } 1605 1606 return 0; 1607 } 1608 1609 /** 1610 * postcopy_each_ram_send_discard: discard all RAMBlocks 1611 * 1612 * Returns 0 for success or negative for error 1613 * 1614 * Utility for the outgoing postcopy code. 1615 * Calls postcopy_send_discard_bm_ram for each RAMBlock 1616 * passing it bitmap indexes and name. 1617 * (qemu_ram_foreach_block ends up passing unscaled lengths 1618 * which would mean postcopy code would have to deal with target page) 1619 * 1620 * @ms: current migration state 1621 */ 1622 static int postcopy_each_ram_send_discard(MigrationState *ms) 1623 { 1624 struct RAMBlock *block; 1625 int ret; 1626 1627 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) { 1628 unsigned long first = block->offset >> TARGET_PAGE_BITS; 1629 PostcopyDiscardState *pds = postcopy_discard_send_init(ms, 1630 first, 1631 block->idstr); 1632 1633 /* 1634 * Postcopy sends chunks of bitmap over the wire, but it 1635 * just needs indexes at this point, avoids it having 1636 * target page specific code. 1637 */ 1638 ret = postcopy_send_discard_bm_ram(ms, pds, first, 1639 block->used_length >> TARGET_PAGE_BITS); 1640 postcopy_discard_send_finish(ms, pds); 1641 if (ret) { 1642 return ret; 1643 } 1644 } 1645 1646 return 0; 1647 } 1648 1649 /** 1650 * postcopy_chunk_hostpages_pass: canocalize bitmap in hostpages 1651 * 1652 * Helper for postcopy_chunk_hostpages; it's called twice to 1653 * canonicalize the two bitmaps, that are similar, but one is 1654 * inverted. 1655 * 1656 * Postcopy requires that all target pages in a hostpage are dirty or 1657 * clean, not a mix. This function canonicalizes the bitmaps. 1658 * 1659 * @ms: current migration state 1660 * @unsent_pass: if true we need to canonicalize partially unsent host pages 1661 * otherwise we need to canonicalize partially dirty host pages 1662 * @block: block that contains the page we want to canonicalize 1663 * @pds: state for postcopy 1664 */ 1665 static void postcopy_chunk_hostpages_pass(MigrationState *ms, bool unsent_pass, 1666 RAMBlock *block, 1667 PostcopyDiscardState *pds) 1668 { 1669 RAMState *rs = &ram_state; 1670 unsigned long *bitmap; 1671 unsigned long *unsentmap; 1672 unsigned int host_ratio = block->page_size / TARGET_PAGE_SIZE; 1673 unsigned long first = block->offset >> TARGET_PAGE_BITS; 1674 unsigned long len = block->used_length >> TARGET_PAGE_BITS; 1675 unsigned long last = first + (len - 1); 1676 unsigned long run_start; 1677 1678 if (block->page_size == TARGET_PAGE_SIZE) { 1679 /* Easy case - TPS==HPS for a non-huge page RAMBlock */ 1680 return; 1681 } 1682 1683 bitmap = atomic_rcu_read(&rs->ram_bitmap)->bmap; 1684 unsentmap = atomic_rcu_read(&rs->ram_bitmap)->unsentmap; 1685 1686 if (unsent_pass) { 1687 /* Find a sent page */ 1688 run_start = find_next_zero_bit(unsentmap, last + 1, first); 1689 } else { 1690 /* Find a dirty page */ 1691 run_start = find_next_bit(bitmap, last + 1, first); 1692 } 1693 1694 while (run_start <= last) { 1695 bool do_fixup = false; 1696 unsigned long fixup_start_addr; 1697 unsigned long host_offset; 1698 1699 /* 1700 * If the start of this run of pages is in the middle of a host 1701 * page, then we need to fixup this host page. 1702 */ 1703 host_offset = run_start % host_ratio; 1704 if (host_offset) { 1705 do_fixup = true; 1706 run_start -= host_offset; 1707 fixup_start_addr = run_start; 1708 /* For the next pass */ 1709 run_start = run_start + host_ratio; 1710 } else { 1711 /* Find the end of this run */ 1712 unsigned long run_end; 1713 if (unsent_pass) { 1714 run_end = find_next_bit(unsentmap, last + 1, run_start + 1); 1715 } else { 1716 run_end = find_next_zero_bit(bitmap, last + 1, run_start + 1); 1717 } 1718 /* 1719 * If the end isn't at the start of a host page, then the 1720 * run doesn't finish at the end of a host page 1721 * and we need to discard. 1722 */ 1723 host_offset = run_end % host_ratio; 1724 if (host_offset) { 1725 do_fixup = true; 1726 fixup_start_addr = run_end - host_offset; 1727 /* 1728 * This host page has gone, the next loop iteration starts 1729 * from after the fixup 1730 */ 1731 run_start = fixup_start_addr + host_ratio; 1732 } else { 1733 /* 1734 * No discards on this iteration, next loop starts from 1735 * next sent/dirty page 1736 */ 1737 run_start = run_end + 1; 1738 } 1739 } 1740 1741 if (do_fixup) { 1742 unsigned long page; 1743 1744 /* Tell the destination to discard this page */ 1745 if (unsent_pass || !test_bit(fixup_start_addr, unsentmap)) { 1746 /* For the unsent_pass we: 1747 * discard partially sent pages 1748 * For the !unsent_pass (dirty) we: 1749 * discard partially dirty pages that were sent 1750 * (any partially sent pages were already discarded 1751 * by the previous unsent_pass) 1752 */ 1753 postcopy_discard_send_range(ms, pds, fixup_start_addr, 1754 host_ratio); 1755 } 1756 1757 /* Clean up the bitmap */ 1758 for (page = fixup_start_addr; 1759 page < fixup_start_addr + host_ratio; page++) { 1760 /* All pages in this host page are now not sent */ 1761 set_bit(page, unsentmap); 1762 1763 /* 1764 * Remark them as dirty, updating the count for any pages 1765 * that weren't previously dirty. 1766 */ 1767 rs->migration_dirty_pages += !test_and_set_bit(page, bitmap); 1768 } 1769 } 1770 1771 if (unsent_pass) { 1772 /* Find the next sent page for the next iteration */ 1773 run_start = find_next_zero_bit(unsentmap, last + 1, 1774 run_start); 1775 } else { 1776 /* Find the next dirty page for the next iteration */ 1777 run_start = find_next_bit(bitmap, last + 1, run_start); 1778 } 1779 } 1780 } 1781 1782 /** 1783 * postcopy_chuck_hostpages: discrad any partially sent host page 1784 * 1785 * Utility for the outgoing postcopy code. 1786 * 1787 * Discard any partially sent host-page size chunks, mark any partially 1788 * dirty host-page size chunks as all dirty. In this case the host-page 1789 * is the host-page for the particular RAMBlock, i.e. it might be a huge page 1790 * 1791 * Returns zero on success 1792 * 1793 * @ms: current migration state 1794 */ 1795 static int postcopy_chunk_hostpages(MigrationState *ms) 1796 { 1797 RAMState *rs = &ram_state; 1798 struct RAMBlock *block; 1799 1800 /* Easiest way to make sure we don't resume in the middle of a host-page */ 1801 rs->last_seen_block = NULL; 1802 rs->last_sent_block = NULL; 1803 rs->last_page = 0; 1804 1805 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) { 1806 unsigned long first = block->offset >> TARGET_PAGE_BITS; 1807 1808 PostcopyDiscardState *pds = 1809 postcopy_discard_send_init(ms, first, block->idstr); 1810 1811 /* First pass: Discard all partially sent host pages */ 1812 postcopy_chunk_hostpages_pass(ms, true, block, pds); 1813 /* 1814 * Second pass: Ensure that all partially dirty host pages are made 1815 * fully dirty. 1816 */ 1817 postcopy_chunk_hostpages_pass(ms, false, block, pds); 1818 1819 postcopy_discard_send_finish(ms, pds); 1820 } /* ram_list loop */ 1821 1822 return 0; 1823 } 1824 1825 /** 1826 * ram_postcopy_send_discard_bitmap: transmit the discard bitmap 1827 * 1828 * Returns zero on success 1829 * 1830 * Transmit the set of pages to be discarded after precopy to the target 1831 * these are pages that: 1832 * a) Have been previously transmitted but are now dirty again 1833 * b) Pages that have never been transmitted, this ensures that 1834 * any pages on the destination that have been mapped by background 1835 * tasks get discarded (transparent huge pages is the specific concern) 1836 * Hopefully this is pretty sparse 1837 * 1838 * @ms: current migration state 1839 */ 1840 int ram_postcopy_send_discard_bitmap(MigrationState *ms) 1841 { 1842 RAMState *rs = &ram_state; 1843 int ret; 1844 unsigned long *bitmap, *unsentmap; 1845 1846 rcu_read_lock(); 1847 1848 /* This should be our last sync, the src is now paused */ 1849 migration_bitmap_sync(rs); 1850 1851 unsentmap = atomic_rcu_read(&rs->ram_bitmap)->unsentmap; 1852 if (!unsentmap) { 1853 /* We don't have a safe way to resize the sentmap, so 1854 * if the bitmap was resized it will be NULL at this 1855 * point. 1856 */ 1857 error_report("migration ram resized during precopy phase"); 1858 rcu_read_unlock(); 1859 return -EINVAL; 1860 } 1861 1862 /* Deal with TPS != HPS and huge pages */ 1863 ret = postcopy_chunk_hostpages(ms); 1864 if (ret) { 1865 rcu_read_unlock(); 1866 return ret; 1867 } 1868 1869 /* 1870 * Update the unsentmap to be unsentmap = unsentmap | dirty 1871 */ 1872 bitmap = atomic_rcu_read(&rs->ram_bitmap)->bmap; 1873 bitmap_or(unsentmap, unsentmap, bitmap, last_ram_page()); 1874 1875 1876 trace_ram_postcopy_send_discard_bitmap(); 1877 #ifdef DEBUG_POSTCOPY 1878 ram_debug_dump_bitmap(unsentmap, true); 1879 #endif 1880 1881 ret = postcopy_each_ram_send_discard(ms); 1882 rcu_read_unlock(); 1883 1884 return ret; 1885 } 1886 1887 /** 1888 * ram_discard_range: discard dirtied pages at the beginning of postcopy 1889 * 1890 * Returns zero on success 1891 * 1892 * @rbname: name of the RAMBlock of the request. NULL means the 1893 * same that last one. 1894 * @start: RAMBlock starting page 1895 * @length: RAMBlock size 1896 */ 1897 int ram_discard_range(const char *rbname, uint64_t start, size_t length) 1898 { 1899 int ret = -1; 1900 1901 trace_ram_discard_range(rbname, start, length); 1902 1903 rcu_read_lock(); 1904 RAMBlock *rb = qemu_ram_block_by_name(rbname); 1905 1906 if (!rb) { 1907 error_report("ram_discard_range: Failed to find block '%s'", rbname); 1908 goto err; 1909 } 1910 1911 ret = ram_block_discard_range(rb, start, length); 1912 1913 err: 1914 rcu_read_unlock(); 1915 1916 return ret; 1917 } 1918 1919 static int ram_state_init(RAMState *rs) 1920 { 1921 unsigned long ram_bitmap_pages; 1922 1923 memset(rs, 0, sizeof(*rs)); 1924 qemu_mutex_init(&rs->bitmap_mutex); 1925 qemu_mutex_init(&rs->src_page_req_mutex); 1926 QSIMPLEQ_INIT(&rs->src_page_requests); 1927 1928 if (migrate_use_xbzrle()) { 1929 XBZRLE_cache_lock(); 1930 ZERO_TARGET_PAGE = g_malloc0(TARGET_PAGE_SIZE); 1931 XBZRLE.cache = cache_init(migrate_xbzrle_cache_size() / 1932 TARGET_PAGE_SIZE, 1933 TARGET_PAGE_SIZE); 1934 if (!XBZRLE.cache) { 1935 XBZRLE_cache_unlock(); 1936 error_report("Error creating cache"); 1937 return -1; 1938 } 1939 XBZRLE_cache_unlock(); 1940 1941 /* We prefer not to abort if there is no memory */ 1942 XBZRLE.encoded_buf = g_try_malloc0(TARGET_PAGE_SIZE); 1943 if (!XBZRLE.encoded_buf) { 1944 error_report("Error allocating encoded_buf"); 1945 return -1; 1946 } 1947 1948 XBZRLE.current_buf = g_try_malloc(TARGET_PAGE_SIZE); 1949 if (!XBZRLE.current_buf) { 1950 error_report("Error allocating current_buf"); 1951 g_free(XBZRLE.encoded_buf); 1952 XBZRLE.encoded_buf = NULL; 1953 return -1; 1954 } 1955 } 1956 1957 /* For memory_global_dirty_log_start below. */ 1958 qemu_mutex_lock_iothread(); 1959 1960 qemu_mutex_lock_ramlist(); 1961 rcu_read_lock(); 1962 ram_state_reset(rs); 1963 1964 rs->ram_bitmap = g_new0(RAMBitmap, 1); 1965 /* Skip setting bitmap if there is no RAM */ 1966 if (ram_bytes_total()) { 1967 ram_bitmap_pages = last_ram_page(); 1968 rs->ram_bitmap->bmap = bitmap_new(ram_bitmap_pages); 1969 bitmap_set(rs->ram_bitmap->bmap, 0, ram_bitmap_pages); 1970 1971 if (migrate_postcopy_ram()) { 1972 rs->ram_bitmap->unsentmap = bitmap_new(ram_bitmap_pages); 1973 bitmap_set(rs->ram_bitmap->unsentmap, 0, ram_bitmap_pages); 1974 } 1975 } 1976 1977 /* 1978 * Count the total number of pages used by ram blocks not including any 1979 * gaps due to alignment or unplugs. 1980 */ 1981 rs->migration_dirty_pages = ram_bytes_total() >> TARGET_PAGE_BITS; 1982 1983 memory_global_dirty_log_start(); 1984 migration_bitmap_sync(rs); 1985 qemu_mutex_unlock_ramlist(); 1986 qemu_mutex_unlock_iothread(); 1987 rcu_read_unlock(); 1988 1989 return 0; 1990 } 1991 1992 /* 1993 * Each of ram_save_setup, ram_save_iterate and ram_save_complete has 1994 * long-running RCU critical section. When rcu-reclaims in the code 1995 * start to become numerous it will be necessary to reduce the 1996 * granularity of these critical sections. 1997 */ 1998 1999 /** 2000 * ram_save_setup: Setup RAM for migration 2001 * 2002 * Returns zero to indicate success and negative for error 2003 * 2004 * @f: QEMUFile where to send the data 2005 * @opaque: RAMState pointer 2006 */ 2007 static int ram_save_setup(QEMUFile *f, void *opaque) 2008 { 2009 RAMState *rs = opaque; 2010 RAMBlock *block; 2011 2012 /* migration has already setup the bitmap, reuse it. */ 2013 if (!migration_in_colo_state()) { 2014 if (ram_state_init(rs) < 0) { 2015 return -1; 2016 } 2017 } 2018 rs->f = f; 2019 2020 rcu_read_lock(); 2021 2022 qemu_put_be64(f, ram_bytes_total() | RAM_SAVE_FLAG_MEM_SIZE); 2023 2024 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) { 2025 qemu_put_byte(f, strlen(block->idstr)); 2026 qemu_put_buffer(f, (uint8_t *)block->idstr, strlen(block->idstr)); 2027 qemu_put_be64(f, block->used_length); 2028 if (migrate_postcopy_ram() && block->page_size != qemu_host_page_size) { 2029 qemu_put_be64(f, block->page_size); 2030 } 2031 } 2032 2033 rcu_read_unlock(); 2034 2035 ram_control_before_iterate(f, RAM_CONTROL_SETUP); 2036 ram_control_after_iterate(f, RAM_CONTROL_SETUP); 2037 2038 qemu_put_be64(f, RAM_SAVE_FLAG_EOS); 2039 2040 return 0; 2041 } 2042 2043 /** 2044 * ram_save_iterate: iterative stage for migration 2045 * 2046 * Returns zero to indicate success and negative for error 2047 * 2048 * @f: QEMUFile where to send the data 2049 * @opaque: RAMState pointer 2050 */ 2051 static int ram_save_iterate(QEMUFile *f, void *opaque) 2052 { 2053 RAMState *rs = opaque; 2054 int ret; 2055 int i; 2056 int64_t t0; 2057 int done = 0; 2058 2059 rcu_read_lock(); 2060 if (ram_list.version != rs->last_version) { 2061 ram_state_reset(rs); 2062 } 2063 2064 /* Read version before ram_list.blocks */ 2065 smp_rmb(); 2066 2067 ram_control_before_iterate(f, RAM_CONTROL_ROUND); 2068 2069 t0 = qemu_clock_get_ns(QEMU_CLOCK_REALTIME); 2070 i = 0; 2071 while ((ret = qemu_file_rate_limit(f)) == 0) { 2072 int pages; 2073 2074 pages = ram_find_and_save_block(rs, false); 2075 /* no more pages to sent */ 2076 if (pages == 0) { 2077 done = 1; 2078 break; 2079 } 2080 rs->iterations++; 2081 2082 /* we want to check in the 1st loop, just in case it was the 1st time 2083 and we had to sync the dirty bitmap. 2084 qemu_get_clock_ns() is a bit expensive, so we only check each some 2085 iterations 2086 */ 2087 if ((i & 63) == 0) { 2088 uint64_t t1 = (qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - t0) / 1000000; 2089 if (t1 > MAX_WAIT) { 2090 trace_ram_save_iterate_big_wait(t1, i); 2091 break; 2092 } 2093 } 2094 i++; 2095 } 2096 flush_compressed_data(rs); 2097 rcu_read_unlock(); 2098 2099 /* 2100 * Must occur before EOS (or any QEMUFile operation) 2101 * because of RDMA protocol. 2102 */ 2103 ram_control_after_iterate(f, RAM_CONTROL_ROUND); 2104 2105 qemu_put_be64(f, RAM_SAVE_FLAG_EOS); 2106 rs->bytes_transferred += 8; 2107 2108 ret = qemu_file_get_error(f); 2109 if (ret < 0) { 2110 return ret; 2111 } 2112 2113 return done; 2114 } 2115 2116 /** 2117 * ram_save_complete: function called to send the remaining amount of ram 2118 * 2119 * Returns zero to indicate success 2120 * 2121 * Called with iothread lock 2122 * 2123 * @f: QEMUFile where to send the data 2124 * @opaque: RAMState pointer 2125 */ 2126 static int ram_save_complete(QEMUFile *f, void *opaque) 2127 { 2128 RAMState *rs = opaque; 2129 2130 rcu_read_lock(); 2131 2132 if (!migration_in_postcopy()) { 2133 migration_bitmap_sync(rs); 2134 } 2135 2136 ram_control_before_iterate(f, RAM_CONTROL_FINISH); 2137 2138 /* try transferring iterative blocks of memory */ 2139 2140 /* flush all remaining blocks regardless of rate limiting */ 2141 while (true) { 2142 int pages; 2143 2144 pages = ram_find_and_save_block(rs, !migration_in_colo_state()); 2145 /* no more blocks to sent */ 2146 if (pages == 0) { 2147 break; 2148 } 2149 } 2150 2151 flush_compressed_data(rs); 2152 ram_control_after_iterate(f, RAM_CONTROL_FINISH); 2153 2154 rcu_read_unlock(); 2155 2156 qemu_put_be64(f, RAM_SAVE_FLAG_EOS); 2157 2158 return 0; 2159 } 2160 2161 static void ram_save_pending(QEMUFile *f, void *opaque, uint64_t max_size, 2162 uint64_t *non_postcopiable_pending, 2163 uint64_t *postcopiable_pending) 2164 { 2165 RAMState *rs = opaque; 2166 uint64_t remaining_size; 2167 2168 remaining_size = rs->migration_dirty_pages * TARGET_PAGE_SIZE; 2169 2170 if (!migration_in_postcopy() && 2171 remaining_size < max_size) { 2172 qemu_mutex_lock_iothread(); 2173 rcu_read_lock(); 2174 migration_bitmap_sync(rs); 2175 rcu_read_unlock(); 2176 qemu_mutex_unlock_iothread(); 2177 remaining_size = rs->migration_dirty_pages * TARGET_PAGE_SIZE; 2178 } 2179 2180 /* We can do postcopy, and all the data is postcopiable */ 2181 *postcopiable_pending += remaining_size; 2182 } 2183 2184 static int load_xbzrle(QEMUFile *f, ram_addr_t addr, void *host) 2185 { 2186 unsigned int xh_len; 2187 int xh_flags; 2188 uint8_t *loaded_data; 2189 2190 if (!xbzrle_decoded_buf) { 2191 xbzrle_decoded_buf = g_malloc(TARGET_PAGE_SIZE); 2192 } 2193 loaded_data = xbzrle_decoded_buf; 2194 2195 /* extract RLE header */ 2196 xh_flags = qemu_get_byte(f); 2197 xh_len = qemu_get_be16(f); 2198 2199 if (xh_flags != ENCODING_FLAG_XBZRLE) { 2200 error_report("Failed to load XBZRLE page - wrong compression!"); 2201 return -1; 2202 } 2203 2204 if (xh_len > TARGET_PAGE_SIZE) { 2205 error_report("Failed to load XBZRLE page - len overflow!"); 2206 return -1; 2207 } 2208 /* load data and decode */ 2209 qemu_get_buffer_in_place(f, &loaded_data, xh_len); 2210 2211 /* decode RLE */ 2212 if (xbzrle_decode_buffer(loaded_data, xh_len, host, 2213 TARGET_PAGE_SIZE) == -1) { 2214 error_report("Failed to load XBZRLE page - decode error!"); 2215 return -1; 2216 } 2217 2218 return 0; 2219 } 2220 2221 /** 2222 * ram_block_from_stream: read a RAMBlock id from the migration stream 2223 * 2224 * Must be called from within a rcu critical section. 2225 * 2226 * Returns a pointer from within the RCU-protected ram_list. 2227 * 2228 * @f: QEMUFile where to read the data from 2229 * @flags: Page flags (mostly to see if it's a continuation of previous block) 2230 */ 2231 static inline RAMBlock *ram_block_from_stream(QEMUFile *f, int flags) 2232 { 2233 static RAMBlock *block = NULL; 2234 char id[256]; 2235 uint8_t len; 2236 2237 if (flags & RAM_SAVE_FLAG_CONTINUE) { 2238 if (!block) { 2239 error_report("Ack, bad migration stream!"); 2240 return NULL; 2241 } 2242 return block; 2243 } 2244 2245 len = qemu_get_byte(f); 2246 qemu_get_buffer(f, (uint8_t *)id, len); 2247 id[len] = 0; 2248 2249 block = qemu_ram_block_by_name(id); 2250 if (!block) { 2251 error_report("Can't find block %s", id); 2252 return NULL; 2253 } 2254 2255 return block; 2256 } 2257 2258 static inline void *host_from_ram_block_offset(RAMBlock *block, 2259 ram_addr_t offset) 2260 { 2261 if (!offset_in_ramblock(block, offset)) { 2262 return NULL; 2263 } 2264 2265 return block->host + offset; 2266 } 2267 2268 /** 2269 * ram_handle_compressed: handle the zero page case 2270 * 2271 * If a page (or a whole RDMA chunk) has been 2272 * determined to be zero, then zap it. 2273 * 2274 * @host: host address for the zero page 2275 * @ch: what the page is filled from. We only support zero 2276 * @size: size of the zero page 2277 */ 2278 void ram_handle_compressed(void *host, uint8_t ch, uint64_t size) 2279 { 2280 if (ch != 0 || !is_zero_range(host, size)) { 2281 memset(host, ch, size); 2282 } 2283 } 2284 2285 static void *do_data_decompress(void *opaque) 2286 { 2287 DecompressParam *param = opaque; 2288 unsigned long pagesize; 2289 uint8_t *des; 2290 int len; 2291 2292 qemu_mutex_lock(¶m->mutex); 2293 while (!param->quit) { 2294 if (param->des) { 2295 des = param->des; 2296 len = param->len; 2297 param->des = 0; 2298 qemu_mutex_unlock(¶m->mutex); 2299 2300 pagesize = TARGET_PAGE_SIZE; 2301 /* uncompress() will return failed in some case, especially 2302 * when the page is dirted when doing the compression, it's 2303 * not a problem because the dirty page will be retransferred 2304 * and uncompress() won't break the data in other pages. 2305 */ 2306 uncompress((Bytef *)des, &pagesize, 2307 (const Bytef *)param->compbuf, len); 2308 2309 qemu_mutex_lock(&decomp_done_lock); 2310 param->done = true; 2311 qemu_cond_signal(&decomp_done_cond); 2312 qemu_mutex_unlock(&decomp_done_lock); 2313 2314 qemu_mutex_lock(¶m->mutex); 2315 } else { 2316 qemu_cond_wait(¶m->cond, ¶m->mutex); 2317 } 2318 } 2319 qemu_mutex_unlock(¶m->mutex); 2320 2321 return NULL; 2322 } 2323 2324 static void wait_for_decompress_done(void) 2325 { 2326 int idx, thread_count; 2327 2328 if (!migrate_use_compression()) { 2329 return; 2330 } 2331 2332 thread_count = migrate_decompress_threads(); 2333 qemu_mutex_lock(&decomp_done_lock); 2334 for (idx = 0; idx < thread_count; idx++) { 2335 while (!decomp_param[idx].done) { 2336 qemu_cond_wait(&decomp_done_cond, &decomp_done_lock); 2337 } 2338 } 2339 qemu_mutex_unlock(&decomp_done_lock); 2340 } 2341 2342 void migrate_decompress_threads_create(void) 2343 { 2344 int i, thread_count; 2345 2346 thread_count = migrate_decompress_threads(); 2347 decompress_threads = g_new0(QemuThread, thread_count); 2348 decomp_param = g_new0(DecompressParam, thread_count); 2349 qemu_mutex_init(&decomp_done_lock); 2350 qemu_cond_init(&decomp_done_cond); 2351 for (i = 0; i < thread_count; i++) { 2352 qemu_mutex_init(&decomp_param[i].mutex); 2353 qemu_cond_init(&decomp_param[i].cond); 2354 decomp_param[i].compbuf = g_malloc0(compressBound(TARGET_PAGE_SIZE)); 2355 decomp_param[i].done = true; 2356 decomp_param[i].quit = false; 2357 qemu_thread_create(decompress_threads + i, "decompress", 2358 do_data_decompress, decomp_param + i, 2359 QEMU_THREAD_JOINABLE); 2360 } 2361 } 2362 2363 void migrate_decompress_threads_join(void) 2364 { 2365 int i, thread_count; 2366 2367 thread_count = migrate_decompress_threads(); 2368 for (i = 0; i < thread_count; i++) { 2369 qemu_mutex_lock(&decomp_param[i].mutex); 2370 decomp_param[i].quit = true; 2371 qemu_cond_signal(&decomp_param[i].cond); 2372 qemu_mutex_unlock(&decomp_param[i].mutex); 2373 } 2374 for (i = 0; i < thread_count; i++) { 2375 qemu_thread_join(decompress_threads + i); 2376 qemu_mutex_destroy(&decomp_param[i].mutex); 2377 qemu_cond_destroy(&decomp_param[i].cond); 2378 g_free(decomp_param[i].compbuf); 2379 } 2380 g_free(decompress_threads); 2381 g_free(decomp_param); 2382 decompress_threads = NULL; 2383 decomp_param = NULL; 2384 } 2385 2386 static void decompress_data_with_multi_threads(QEMUFile *f, 2387 void *host, int len) 2388 { 2389 int idx, thread_count; 2390 2391 thread_count = migrate_decompress_threads(); 2392 qemu_mutex_lock(&decomp_done_lock); 2393 while (true) { 2394 for (idx = 0; idx < thread_count; idx++) { 2395 if (decomp_param[idx].done) { 2396 decomp_param[idx].done = false; 2397 qemu_mutex_lock(&decomp_param[idx].mutex); 2398 qemu_get_buffer(f, decomp_param[idx].compbuf, len); 2399 decomp_param[idx].des = host; 2400 decomp_param[idx].len = len; 2401 qemu_cond_signal(&decomp_param[idx].cond); 2402 qemu_mutex_unlock(&decomp_param[idx].mutex); 2403 break; 2404 } 2405 } 2406 if (idx < thread_count) { 2407 break; 2408 } else { 2409 qemu_cond_wait(&decomp_done_cond, &decomp_done_lock); 2410 } 2411 } 2412 qemu_mutex_unlock(&decomp_done_lock); 2413 } 2414 2415 /** 2416 * ram_postcopy_incoming_init: allocate postcopy data structures 2417 * 2418 * Returns 0 for success and negative if there was one error 2419 * 2420 * @mis: current migration incoming state 2421 * 2422 * Allocate data structures etc needed by incoming migration with 2423 * postcopy-ram. postcopy-ram's similarly names 2424 * postcopy_ram_incoming_init does the work. 2425 */ 2426 int ram_postcopy_incoming_init(MigrationIncomingState *mis) 2427 { 2428 unsigned long ram_pages = last_ram_page(); 2429 2430 return postcopy_ram_incoming_init(mis, ram_pages); 2431 } 2432 2433 /** 2434 * ram_load_postcopy: load a page in postcopy case 2435 * 2436 * Returns 0 for success or -errno in case of error 2437 * 2438 * Called in postcopy mode by ram_load(). 2439 * rcu_read_lock is taken prior to this being called. 2440 * 2441 * @f: QEMUFile where to send the data 2442 */ 2443 static int ram_load_postcopy(QEMUFile *f) 2444 { 2445 int flags = 0, ret = 0; 2446 bool place_needed = false; 2447 bool matching_page_sizes = false; 2448 MigrationIncomingState *mis = migration_incoming_get_current(); 2449 /* Temporary page that is later 'placed' */ 2450 void *postcopy_host_page = postcopy_get_tmp_page(mis); 2451 void *last_host = NULL; 2452 bool all_zero = false; 2453 2454 while (!ret && !(flags & RAM_SAVE_FLAG_EOS)) { 2455 ram_addr_t addr; 2456 void *host = NULL; 2457 void *page_buffer = NULL; 2458 void *place_source = NULL; 2459 RAMBlock *block = NULL; 2460 uint8_t ch; 2461 2462 addr = qemu_get_be64(f); 2463 flags = addr & ~TARGET_PAGE_MASK; 2464 addr &= TARGET_PAGE_MASK; 2465 2466 trace_ram_load_postcopy_loop((uint64_t)addr, flags); 2467 place_needed = false; 2468 if (flags & (RAM_SAVE_FLAG_COMPRESS | RAM_SAVE_FLAG_PAGE)) { 2469 block = ram_block_from_stream(f, flags); 2470 2471 host = host_from_ram_block_offset(block, addr); 2472 if (!host) { 2473 error_report("Illegal RAM offset " RAM_ADDR_FMT, addr); 2474 ret = -EINVAL; 2475 break; 2476 } 2477 matching_page_sizes = block->page_size == TARGET_PAGE_SIZE; 2478 /* 2479 * Postcopy requires that we place whole host pages atomically; 2480 * these may be huge pages for RAMBlocks that are backed by 2481 * hugetlbfs. 2482 * To make it atomic, the data is read into a temporary page 2483 * that's moved into place later. 2484 * The migration protocol uses, possibly smaller, target-pages 2485 * however the source ensures it always sends all the components 2486 * of a host page in order. 2487 */ 2488 page_buffer = postcopy_host_page + 2489 ((uintptr_t)host & (block->page_size - 1)); 2490 /* If all TP are zero then we can optimise the place */ 2491 if (!((uintptr_t)host & (block->page_size - 1))) { 2492 all_zero = true; 2493 } else { 2494 /* not the 1st TP within the HP */ 2495 if (host != (last_host + TARGET_PAGE_SIZE)) { 2496 error_report("Non-sequential target page %p/%p", 2497 host, last_host); 2498 ret = -EINVAL; 2499 break; 2500 } 2501 } 2502 2503 2504 /* 2505 * If it's the last part of a host page then we place the host 2506 * page 2507 */ 2508 place_needed = (((uintptr_t)host + TARGET_PAGE_SIZE) & 2509 (block->page_size - 1)) == 0; 2510 place_source = postcopy_host_page; 2511 } 2512 last_host = host; 2513 2514 switch (flags & ~RAM_SAVE_FLAG_CONTINUE) { 2515 case RAM_SAVE_FLAG_COMPRESS: 2516 ch = qemu_get_byte(f); 2517 memset(page_buffer, ch, TARGET_PAGE_SIZE); 2518 if (ch) { 2519 all_zero = false; 2520 } 2521 break; 2522 2523 case RAM_SAVE_FLAG_PAGE: 2524 all_zero = false; 2525 if (!place_needed || !matching_page_sizes) { 2526 qemu_get_buffer(f, page_buffer, TARGET_PAGE_SIZE); 2527 } else { 2528 /* Avoids the qemu_file copy during postcopy, which is 2529 * going to do a copy later; can only do it when we 2530 * do this read in one go (matching page sizes) 2531 */ 2532 qemu_get_buffer_in_place(f, (uint8_t **)&place_source, 2533 TARGET_PAGE_SIZE); 2534 } 2535 break; 2536 case RAM_SAVE_FLAG_EOS: 2537 /* normal exit */ 2538 break; 2539 default: 2540 error_report("Unknown combination of migration flags: %#x" 2541 " (postcopy mode)", flags); 2542 ret = -EINVAL; 2543 } 2544 2545 if (place_needed) { 2546 /* This gets called at the last target page in the host page */ 2547 void *place_dest = host + TARGET_PAGE_SIZE - block->page_size; 2548 2549 if (all_zero) { 2550 ret = postcopy_place_page_zero(mis, place_dest, 2551 block->page_size); 2552 } else { 2553 ret = postcopy_place_page(mis, place_dest, 2554 place_source, block->page_size); 2555 } 2556 } 2557 if (!ret) { 2558 ret = qemu_file_get_error(f); 2559 } 2560 } 2561 2562 return ret; 2563 } 2564 2565 static int ram_load(QEMUFile *f, void *opaque, int version_id) 2566 { 2567 int flags = 0, ret = 0; 2568 static uint64_t seq_iter; 2569 int len = 0; 2570 /* 2571 * If system is running in postcopy mode, page inserts to host memory must 2572 * be atomic 2573 */ 2574 bool postcopy_running = postcopy_state_get() >= POSTCOPY_INCOMING_LISTENING; 2575 /* ADVISE is earlier, it shows the source has the postcopy capability on */ 2576 bool postcopy_advised = postcopy_state_get() >= POSTCOPY_INCOMING_ADVISE; 2577 2578 seq_iter++; 2579 2580 if (version_id != 4) { 2581 ret = -EINVAL; 2582 } 2583 2584 /* This RCU critical section can be very long running. 2585 * When RCU reclaims in the code start to become numerous, 2586 * it will be necessary to reduce the granularity of this 2587 * critical section. 2588 */ 2589 rcu_read_lock(); 2590 2591 if (postcopy_running) { 2592 ret = ram_load_postcopy(f); 2593 } 2594 2595 while (!postcopy_running && !ret && !(flags & RAM_SAVE_FLAG_EOS)) { 2596 ram_addr_t addr, total_ram_bytes; 2597 void *host = NULL; 2598 uint8_t ch; 2599 2600 addr = qemu_get_be64(f); 2601 flags = addr & ~TARGET_PAGE_MASK; 2602 addr &= TARGET_PAGE_MASK; 2603 2604 if (flags & (RAM_SAVE_FLAG_COMPRESS | RAM_SAVE_FLAG_PAGE | 2605 RAM_SAVE_FLAG_COMPRESS_PAGE | RAM_SAVE_FLAG_XBZRLE)) { 2606 RAMBlock *block = ram_block_from_stream(f, flags); 2607 2608 host = host_from_ram_block_offset(block, addr); 2609 if (!host) { 2610 error_report("Illegal RAM offset " RAM_ADDR_FMT, addr); 2611 ret = -EINVAL; 2612 break; 2613 } 2614 } 2615 2616 switch (flags & ~RAM_SAVE_FLAG_CONTINUE) { 2617 case RAM_SAVE_FLAG_MEM_SIZE: 2618 /* Synchronize RAM block list */ 2619 total_ram_bytes = addr; 2620 while (!ret && total_ram_bytes) { 2621 RAMBlock *block; 2622 char id[256]; 2623 ram_addr_t length; 2624 2625 len = qemu_get_byte(f); 2626 qemu_get_buffer(f, (uint8_t *)id, len); 2627 id[len] = 0; 2628 length = qemu_get_be64(f); 2629 2630 block = qemu_ram_block_by_name(id); 2631 if (block) { 2632 if (length != block->used_length) { 2633 Error *local_err = NULL; 2634 2635 ret = qemu_ram_resize(block, length, 2636 &local_err); 2637 if (local_err) { 2638 error_report_err(local_err); 2639 } 2640 } 2641 /* For postcopy we need to check hugepage sizes match */ 2642 if (postcopy_advised && 2643 block->page_size != qemu_host_page_size) { 2644 uint64_t remote_page_size = qemu_get_be64(f); 2645 if (remote_page_size != block->page_size) { 2646 error_report("Mismatched RAM page size %s " 2647 "(local) %zd != %" PRId64, 2648 id, block->page_size, 2649 remote_page_size); 2650 ret = -EINVAL; 2651 } 2652 } 2653 ram_control_load_hook(f, RAM_CONTROL_BLOCK_REG, 2654 block->idstr); 2655 } else { 2656 error_report("Unknown ramblock \"%s\", cannot " 2657 "accept migration", id); 2658 ret = -EINVAL; 2659 } 2660 2661 total_ram_bytes -= length; 2662 } 2663 break; 2664 2665 case RAM_SAVE_FLAG_COMPRESS: 2666 ch = qemu_get_byte(f); 2667 ram_handle_compressed(host, ch, TARGET_PAGE_SIZE); 2668 break; 2669 2670 case RAM_SAVE_FLAG_PAGE: 2671 qemu_get_buffer(f, host, TARGET_PAGE_SIZE); 2672 break; 2673 2674 case RAM_SAVE_FLAG_COMPRESS_PAGE: 2675 len = qemu_get_be32(f); 2676 if (len < 0 || len > compressBound(TARGET_PAGE_SIZE)) { 2677 error_report("Invalid compressed data length: %d", len); 2678 ret = -EINVAL; 2679 break; 2680 } 2681 decompress_data_with_multi_threads(f, host, len); 2682 break; 2683 2684 case RAM_SAVE_FLAG_XBZRLE: 2685 if (load_xbzrle(f, addr, host) < 0) { 2686 error_report("Failed to decompress XBZRLE page at " 2687 RAM_ADDR_FMT, addr); 2688 ret = -EINVAL; 2689 break; 2690 } 2691 break; 2692 case RAM_SAVE_FLAG_EOS: 2693 /* normal exit */ 2694 break; 2695 default: 2696 if (flags & RAM_SAVE_FLAG_HOOK) { 2697 ram_control_load_hook(f, RAM_CONTROL_HOOK, NULL); 2698 } else { 2699 error_report("Unknown combination of migration flags: %#x", 2700 flags); 2701 ret = -EINVAL; 2702 } 2703 } 2704 if (!ret) { 2705 ret = qemu_file_get_error(f); 2706 } 2707 } 2708 2709 wait_for_decompress_done(); 2710 rcu_read_unlock(); 2711 trace_ram_load_complete(ret, seq_iter); 2712 return ret; 2713 } 2714 2715 static SaveVMHandlers savevm_ram_handlers = { 2716 .save_live_setup = ram_save_setup, 2717 .save_live_iterate = ram_save_iterate, 2718 .save_live_complete_postcopy = ram_save_complete, 2719 .save_live_complete_precopy = ram_save_complete, 2720 .save_live_pending = ram_save_pending, 2721 .load_state = ram_load, 2722 .cleanup = ram_migration_cleanup, 2723 }; 2724 2725 void ram_mig_init(void) 2726 { 2727 qemu_mutex_init(&XBZRLE.lock); 2728 register_savevm_live(NULL, "ram", 0, 4, &savevm_ram_handlers, &ram_state); 2729 } 2730