1 /* 2 * QEMU System Emulator 3 * 4 * Copyright (c) 2003-2008 Fabrice Bellard 5 * Copyright (c) 2011-2015 Red Hat Inc 6 * 7 * Authors: 8 * Juan Quintela <quintela@redhat.com> 9 * 10 * Permission is hereby granted, free of charge, to any person obtaining a copy 11 * of this software and associated documentation files (the "Software"), to deal 12 * in the Software without restriction, including without limitation the rights 13 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 14 * copies of the Software, and to permit persons to whom the Software is 15 * furnished to do so, subject to the following conditions: 16 * 17 * The above copyright notice and this permission notice shall be included in 18 * all copies or substantial portions of the Software. 19 * 20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 23 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 24 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 25 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 26 * THE SOFTWARE. 27 */ 28 29 #include "qemu/osdep.h" 30 #include "qemu/cutils.h" 31 #include "qemu/bitops.h" 32 #include "qemu/bitmap.h" 33 #include "qemu/madvise.h" 34 #include "qemu/main-loop.h" 35 #include "xbzrle.h" 36 #include "ram-compress.h" 37 #include "ram.h" 38 #include "migration.h" 39 #include "migration-stats.h" 40 #include "migration/register.h" 41 #include "migration/misc.h" 42 #include "qemu-file.h" 43 #include "postcopy-ram.h" 44 #include "page_cache.h" 45 #include "qemu/error-report.h" 46 #include "qapi/error.h" 47 #include "qapi/qapi-types-migration.h" 48 #include "qapi/qapi-events-migration.h" 49 #include "qapi/qapi-commands-migration.h" 50 #include "qapi/qmp/qerror.h" 51 #include "trace.h" 52 #include "exec/ram_addr.h" 53 #include "exec/target_page.h" 54 #include "qemu/rcu_queue.h" 55 #include "migration/colo.h" 56 #include "block.h" 57 #include "sysemu/cpu-throttle.h" 58 #include "savevm.h" 59 #include "qemu/iov.h" 60 #include "multifd.h" 61 #include "sysemu/runstate.h" 62 #include "rdma.h" 63 #include "options.h" 64 #include "sysemu/dirtylimit.h" 65 #include "sysemu/kvm.h" 66 67 #include "hw/boards.h" /* for machine_dump_guest_core() */ 68 69 #if defined(__linux__) 70 #include "qemu/userfaultfd.h" 71 #endif /* defined(__linux__) */ 72 73 /***********************************************************/ 74 /* ram save/restore */ 75 76 /* 77 * RAM_SAVE_FLAG_ZERO used to be named RAM_SAVE_FLAG_COMPRESS, it 78 * worked for pages that were filled with the same char. We switched 79 * it to only search for the zero value. And to avoid confusion with 80 * RAM_SAVE_FLAG_COMPRESS_PAGE just rename it. 81 */ 82 /* 83 * RAM_SAVE_FLAG_FULL was obsoleted in 2009, it can be reused now 84 */ 85 #define RAM_SAVE_FLAG_FULL 0x01 86 #define RAM_SAVE_FLAG_ZERO 0x02 87 #define RAM_SAVE_FLAG_MEM_SIZE 0x04 88 #define RAM_SAVE_FLAG_PAGE 0x08 89 #define RAM_SAVE_FLAG_EOS 0x10 90 #define RAM_SAVE_FLAG_CONTINUE 0x20 91 #define RAM_SAVE_FLAG_XBZRLE 0x40 92 /* 0x80 is reserved in rdma.h for RAM_SAVE_FLAG_HOOK */ 93 #define RAM_SAVE_FLAG_COMPRESS_PAGE 0x100 94 #define RAM_SAVE_FLAG_MULTIFD_FLUSH 0x200 95 /* We can't use any flag that is bigger than 0x200 */ 96 97 XBZRLECacheStats xbzrle_counters; 98 99 /* used by the search for pages to send */ 100 struct PageSearchStatus { 101 /* The migration channel used for a specific host page */ 102 QEMUFile *pss_channel; 103 /* Last block from where we have sent data */ 104 RAMBlock *last_sent_block; 105 /* Current block being searched */ 106 RAMBlock *block; 107 /* Current page to search from */ 108 unsigned long page; 109 /* Set once we wrap around */ 110 bool complete_round; 111 /* Whether we're sending a host page */ 112 bool host_page_sending; 113 /* The start/end of current host page. Invalid if host_page_sending==false */ 114 unsigned long host_page_start; 115 unsigned long host_page_end; 116 }; 117 typedef struct PageSearchStatus PageSearchStatus; 118 119 /* struct contains XBZRLE cache and a static page 120 used by the compression */ 121 static struct { 122 /* buffer used for XBZRLE encoding */ 123 uint8_t *encoded_buf; 124 /* buffer for storing page content */ 125 uint8_t *current_buf; 126 /* Cache for XBZRLE, Protected by lock. */ 127 PageCache *cache; 128 QemuMutex lock; 129 /* it will store a page full of zeros */ 130 uint8_t *zero_target_page; 131 /* buffer used for XBZRLE decoding */ 132 uint8_t *decoded_buf; 133 } XBZRLE; 134 135 static void XBZRLE_cache_lock(void) 136 { 137 if (migrate_xbzrle()) { 138 qemu_mutex_lock(&XBZRLE.lock); 139 } 140 } 141 142 static void XBZRLE_cache_unlock(void) 143 { 144 if (migrate_xbzrle()) { 145 qemu_mutex_unlock(&XBZRLE.lock); 146 } 147 } 148 149 /** 150 * xbzrle_cache_resize: resize the xbzrle cache 151 * 152 * This function is called from migrate_params_apply in main 153 * thread, possibly while a migration is in progress. A running 154 * migration may be using the cache and might finish during this call, 155 * hence changes to the cache are protected by XBZRLE.lock(). 156 * 157 * Returns 0 for success or -1 for error 158 * 159 * @new_size: new cache size 160 * @errp: set *errp if the check failed, with reason 161 */ 162 int xbzrle_cache_resize(uint64_t new_size, Error **errp) 163 { 164 PageCache *new_cache; 165 int64_t ret = 0; 166 167 /* Check for truncation */ 168 if (new_size != (size_t)new_size) { 169 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "cache size", 170 "exceeding address space"); 171 return -1; 172 } 173 174 if (new_size == migrate_xbzrle_cache_size()) { 175 /* nothing to do */ 176 return 0; 177 } 178 179 XBZRLE_cache_lock(); 180 181 if (XBZRLE.cache != NULL) { 182 new_cache = cache_init(new_size, TARGET_PAGE_SIZE, errp); 183 if (!new_cache) { 184 ret = -1; 185 goto out; 186 } 187 188 cache_fini(XBZRLE.cache); 189 XBZRLE.cache = new_cache; 190 } 191 out: 192 XBZRLE_cache_unlock(); 193 return ret; 194 } 195 196 static bool postcopy_preempt_active(void) 197 { 198 return migrate_postcopy_preempt() && migration_in_postcopy(); 199 } 200 201 bool migrate_ram_is_ignored(RAMBlock *block) 202 { 203 return !qemu_ram_is_migratable(block) || 204 (migrate_ignore_shared() && qemu_ram_is_shared(block) 205 && qemu_ram_is_named_file(block)); 206 } 207 208 #undef RAMBLOCK_FOREACH 209 210 int foreach_not_ignored_block(RAMBlockIterFunc func, void *opaque) 211 { 212 RAMBlock *block; 213 int ret = 0; 214 215 RCU_READ_LOCK_GUARD(); 216 217 RAMBLOCK_FOREACH_NOT_IGNORED(block) { 218 ret = func(block, opaque); 219 if (ret) { 220 break; 221 } 222 } 223 return ret; 224 } 225 226 static void ramblock_recv_map_init(void) 227 { 228 RAMBlock *rb; 229 230 RAMBLOCK_FOREACH_NOT_IGNORED(rb) { 231 assert(!rb->receivedmap); 232 rb->receivedmap = bitmap_new(rb->max_length >> qemu_target_page_bits()); 233 } 234 } 235 236 int ramblock_recv_bitmap_test(RAMBlock *rb, void *host_addr) 237 { 238 return test_bit(ramblock_recv_bitmap_offset(host_addr, rb), 239 rb->receivedmap); 240 } 241 242 bool ramblock_recv_bitmap_test_byte_offset(RAMBlock *rb, uint64_t byte_offset) 243 { 244 return test_bit(byte_offset >> TARGET_PAGE_BITS, rb->receivedmap); 245 } 246 247 void ramblock_recv_bitmap_set(RAMBlock *rb, void *host_addr) 248 { 249 set_bit_atomic(ramblock_recv_bitmap_offset(host_addr, rb), rb->receivedmap); 250 } 251 252 void ramblock_recv_bitmap_set_range(RAMBlock *rb, void *host_addr, 253 size_t nr) 254 { 255 bitmap_set_atomic(rb->receivedmap, 256 ramblock_recv_bitmap_offset(host_addr, rb), 257 nr); 258 } 259 260 #define RAMBLOCK_RECV_BITMAP_ENDING (0x0123456789abcdefULL) 261 262 /* 263 * Format: bitmap_size (8 bytes) + whole_bitmap (N bytes). 264 * 265 * Returns >0 if success with sent bytes, or <0 if error. 266 */ 267 int64_t ramblock_recv_bitmap_send(QEMUFile *file, 268 const char *block_name) 269 { 270 RAMBlock *block = qemu_ram_block_by_name(block_name); 271 unsigned long *le_bitmap, nbits; 272 uint64_t size; 273 274 if (!block) { 275 error_report("%s: invalid block name: %s", __func__, block_name); 276 return -1; 277 } 278 279 nbits = block->postcopy_length >> TARGET_PAGE_BITS; 280 281 /* 282 * Make sure the tmp bitmap buffer is big enough, e.g., on 32bit 283 * machines we may need 4 more bytes for padding (see below 284 * comment). So extend it a bit before hand. 285 */ 286 le_bitmap = bitmap_new(nbits + BITS_PER_LONG); 287 288 /* 289 * Always use little endian when sending the bitmap. This is 290 * required that when source and destination VMs are not using the 291 * same endianness. (Note: big endian won't work.) 292 */ 293 bitmap_to_le(le_bitmap, block->receivedmap, nbits); 294 295 /* Size of the bitmap, in bytes */ 296 size = DIV_ROUND_UP(nbits, 8); 297 298 /* 299 * size is always aligned to 8 bytes for 64bit machines, but it 300 * may not be true for 32bit machines. We need this padding to 301 * make sure the migration can survive even between 32bit and 302 * 64bit machines. 303 */ 304 size = ROUND_UP(size, 8); 305 306 qemu_put_be64(file, size); 307 qemu_put_buffer(file, (const uint8_t *)le_bitmap, size); 308 /* 309 * Mark as an end, in case the middle part is screwed up due to 310 * some "mysterious" reason. 311 */ 312 qemu_put_be64(file, RAMBLOCK_RECV_BITMAP_ENDING); 313 qemu_fflush(file); 314 315 g_free(le_bitmap); 316 317 if (qemu_file_get_error(file)) { 318 return qemu_file_get_error(file); 319 } 320 321 return size + sizeof(size); 322 } 323 324 /* 325 * An outstanding page request, on the source, having been received 326 * and queued 327 */ 328 struct RAMSrcPageRequest { 329 RAMBlock *rb; 330 hwaddr offset; 331 hwaddr len; 332 333 QSIMPLEQ_ENTRY(RAMSrcPageRequest) next_req; 334 }; 335 336 /* State of RAM for migration */ 337 struct RAMState { 338 /* 339 * PageSearchStatus structures for the channels when send pages. 340 * Protected by the bitmap_mutex. 341 */ 342 PageSearchStatus pss[RAM_CHANNEL_MAX]; 343 /* UFFD file descriptor, used in 'write-tracking' migration */ 344 int uffdio_fd; 345 /* total ram size in bytes */ 346 uint64_t ram_bytes_total; 347 /* Last block that we have visited searching for dirty pages */ 348 RAMBlock *last_seen_block; 349 /* Last dirty target page we have sent */ 350 ram_addr_t last_page; 351 /* last ram version we have seen */ 352 uint32_t last_version; 353 /* How many times we have dirty too many pages */ 354 int dirty_rate_high_cnt; 355 /* these variables are used for bitmap sync */ 356 /* last time we did a full bitmap_sync */ 357 int64_t time_last_bitmap_sync; 358 /* bytes transferred at start_time */ 359 uint64_t bytes_xfer_prev; 360 /* number of dirty pages since start_time */ 361 uint64_t num_dirty_pages_period; 362 /* xbzrle misses since the beginning of the period */ 363 uint64_t xbzrle_cache_miss_prev; 364 /* Amount of xbzrle pages since the beginning of the period */ 365 uint64_t xbzrle_pages_prev; 366 /* Amount of xbzrle encoded bytes since the beginning of the period */ 367 uint64_t xbzrle_bytes_prev; 368 /* Are we really using XBZRLE (e.g., after the first round). */ 369 bool xbzrle_started; 370 /* Are we on the last stage of migration */ 371 bool last_stage; 372 373 /* total handled target pages at the beginning of period */ 374 uint64_t target_page_count_prev; 375 /* total handled target pages since start */ 376 uint64_t target_page_count; 377 /* number of dirty bits in the bitmap */ 378 uint64_t migration_dirty_pages; 379 /* 380 * Protects: 381 * - dirty/clear bitmap 382 * - migration_dirty_pages 383 * - pss structures 384 */ 385 QemuMutex bitmap_mutex; 386 /* The RAMBlock used in the last src_page_requests */ 387 RAMBlock *last_req_rb; 388 /* Queue of outstanding page requests from the destination */ 389 QemuMutex src_page_req_mutex; 390 QSIMPLEQ_HEAD(, RAMSrcPageRequest) src_page_requests; 391 392 /* 393 * This is only used when postcopy is in recovery phase, to communicate 394 * between the migration thread and the return path thread on dirty 395 * bitmap synchronizations. This field is unused in other stages of 396 * RAM migration. 397 */ 398 unsigned int postcopy_bmap_sync_requested; 399 }; 400 typedef struct RAMState RAMState; 401 402 static RAMState *ram_state; 403 404 static NotifierWithReturnList precopy_notifier_list; 405 406 /* Whether postcopy has queued requests? */ 407 static bool postcopy_has_request(RAMState *rs) 408 { 409 return !QSIMPLEQ_EMPTY_ATOMIC(&rs->src_page_requests); 410 } 411 412 void precopy_infrastructure_init(void) 413 { 414 notifier_with_return_list_init(&precopy_notifier_list); 415 } 416 417 void precopy_add_notifier(NotifierWithReturn *n) 418 { 419 notifier_with_return_list_add(&precopy_notifier_list, n); 420 } 421 422 void precopy_remove_notifier(NotifierWithReturn *n) 423 { 424 notifier_with_return_remove(n); 425 } 426 427 int precopy_notify(PrecopyNotifyReason reason, Error **errp) 428 { 429 PrecopyNotifyData pnd; 430 pnd.reason = reason; 431 pnd.errp = errp; 432 433 return notifier_with_return_list_notify(&precopy_notifier_list, &pnd); 434 } 435 436 uint64_t ram_bytes_remaining(void) 437 { 438 return ram_state ? (ram_state->migration_dirty_pages * TARGET_PAGE_SIZE) : 439 0; 440 } 441 442 void ram_transferred_add(uint64_t bytes) 443 { 444 if (runstate_is_running()) { 445 stat64_add(&mig_stats.precopy_bytes, bytes); 446 } else if (migration_in_postcopy()) { 447 stat64_add(&mig_stats.postcopy_bytes, bytes); 448 } else { 449 stat64_add(&mig_stats.downtime_bytes, bytes); 450 } 451 stat64_add(&mig_stats.transferred, bytes); 452 } 453 454 struct MigrationOps { 455 int (*ram_save_target_page)(RAMState *rs, PageSearchStatus *pss); 456 }; 457 typedef struct MigrationOps MigrationOps; 458 459 MigrationOps *migration_ops; 460 461 static int ram_save_host_page_urgent(PageSearchStatus *pss); 462 463 /* NOTE: page is the PFN not real ram_addr_t. */ 464 static void pss_init(PageSearchStatus *pss, RAMBlock *rb, ram_addr_t page) 465 { 466 pss->block = rb; 467 pss->page = page; 468 pss->complete_round = false; 469 } 470 471 /* 472 * Check whether two PSSs are actively sending the same page. Return true 473 * if it is, false otherwise. 474 */ 475 static bool pss_overlap(PageSearchStatus *pss1, PageSearchStatus *pss2) 476 { 477 return pss1->host_page_sending && pss2->host_page_sending && 478 (pss1->host_page_start == pss2->host_page_start); 479 } 480 481 /** 482 * save_page_header: write page header to wire 483 * 484 * If this is the 1st block, it also writes the block identification 485 * 486 * Returns the number of bytes written 487 * 488 * @pss: current PSS channel status 489 * @block: block that contains the page we want to send 490 * @offset: offset inside the block for the page 491 * in the lower bits, it contains flags 492 */ 493 static size_t save_page_header(PageSearchStatus *pss, QEMUFile *f, 494 RAMBlock *block, ram_addr_t offset) 495 { 496 size_t size, len; 497 bool same_block = (block == pss->last_sent_block); 498 499 if (same_block) { 500 offset |= RAM_SAVE_FLAG_CONTINUE; 501 } 502 qemu_put_be64(f, offset); 503 size = 8; 504 505 if (!same_block) { 506 len = strlen(block->idstr); 507 qemu_put_byte(f, len); 508 qemu_put_buffer(f, (uint8_t *)block->idstr, len); 509 size += 1 + len; 510 pss->last_sent_block = block; 511 } 512 return size; 513 } 514 515 /** 516 * mig_throttle_guest_down: throttle down the guest 517 * 518 * Reduce amount of guest cpu execution to hopefully slow down memory 519 * writes. If guest dirty memory rate is reduced below the rate at 520 * which we can transfer pages to the destination then we should be 521 * able to complete migration. Some workloads dirty memory way too 522 * fast and will not effectively converge, even with auto-converge. 523 */ 524 static void mig_throttle_guest_down(uint64_t bytes_dirty_period, 525 uint64_t bytes_dirty_threshold) 526 { 527 uint64_t pct_initial = migrate_cpu_throttle_initial(); 528 uint64_t pct_increment = migrate_cpu_throttle_increment(); 529 bool pct_tailslow = migrate_cpu_throttle_tailslow(); 530 int pct_max = migrate_max_cpu_throttle(); 531 532 uint64_t throttle_now = cpu_throttle_get_percentage(); 533 uint64_t cpu_now, cpu_ideal, throttle_inc; 534 535 /* We have not started throttling yet. Let's start it. */ 536 if (!cpu_throttle_active()) { 537 cpu_throttle_set(pct_initial); 538 } else { 539 /* Throttling already on, just increase the rate */ 540 if (!pct_tailslow) { 541 throttle_inc = pct_increment; 542 } else { 543 /* Compute the ideal CPU percentage used by Guest, which may 544 * make the dirty rate match the dirty rate threshold. */ 545 cpu_now = 100 - throttle_now; 546 cpu_ideal = cpu_now * (bytes_dirty_threshold * 1.0 / 547 bytes_dirty_period); 548 throttle_inc = MIN(cpu_now - cpu_ideal, pct_increment); 549 } 550 cpu_throttle_set(MIN(throttle_now + throttle_inc, pct_max)); 551 } 552 } 553 554 void mig_throttle_counter_reset(void) 555 { 556 RAMState *rs = ram_state; 557 558 rs->time_last_bitmap_sync = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); 559 rs->num_dirty_pages_period = 0; 560 rs->bytes_xfer_prev = stat64_get(&mig_stats.transferred); 561 } 562 563 /** 564 * xbzrle_cache_zero_page: insert a zero page in the XBZRLE cache 565 * 566 * @current_addr: address for the zero page 567 * 568 * Update the xbzrle cache to reflect a page that's been sent as all 0. 569 * The important thing is that a stale (not-yet-0'd) page be replaced 570 * by the new data. 571 * As a bonus, if the page wasn't in the cache it gets added so that 572 * when a small write is made into the 0'd page it gets XBZRLE sent. 573 */ 574 static void xbzrle_cache_zero_page(ram_addr_t current_addr) 575 { 576 /* We don't care if this fails to allocate a new cache page 577 * as long as it updated an old one */ 578 cache_insert(XBZRLE.cache, current_addr, XBZRLE.zero_target_page, 579 stat64_get(&mig_stats.dirty_sync_count)); 580 } 581 582 #define ENCODING_FLAG_XBZRLE 0x1 583 584 /** 585 * save_xbzrle_page: compress and send current page 586 * 587 * Returns: 1 means that we wrote the page 588 * 0 means that page is identical to the one already sent 589 * -1 means that xbzrle would be longer than normal 590 * 591 * @rs: current RAM state 592 * @pss: current PSS channel 593 * @current_data: pointer to the address of the page contents 594 * @current_addr: addr of the page 595 * @block: block that contains the page we want to send 596 * @offset: offset inside the block for the page 597 */ 598 static int save_xbzrle_page(RAMState *rs, PageSearchStatus *pss, 599 uint8_t **current_data, ram_addr_t current_addr, 600 RAMBlock *block, ram_addr_t offset) 601 { 602 int encoded_len = 0, bytes_xbzrle; 603 uint8_t *prev_cached_page; 604 QEMUFile *file = pss->pss_channel; 605 uint64_t generation = stat64_get(&mig_stats.dirty_sync_count); 606 607 if (!cache_is_cached(XBZRLE.cache, current_addr, generation)) { 608 xbzrle_counters.cache_miss++; 609 if (!rs->last_stage) { 610 if (cache_insert(XBZRLE.cache, current_addr, *current_data, 611 generation) == -1) { 612 return -1; 613 } else { 614 /* update *current_data when the page has been 615 inserted into cache */ 616 *current_data = get_cached_data(XBZRLE.cache, current_addr); 617 } 618 } 619 return -1; 620 } 621 622 /* 623 * Reaching here means the page has hit the xbzrle cache, no matter what 624 * encoding result it is (normal encoding, overflow or skipping the page), 625 * count the page as encoded. This is used to calculate the encoding rate. 626 * 627 * Example: 2 pages (8KB) being encoded, first page encoding generates 2KB, 628 * 2nd page turns out to be skipped (i.e. no new bytes written to the 629 * page), the overall encoding rate will be 8KB / 2KB = 4, which has the 630 * skipped page included. In this way, the encoding rate can tell if the 631 * guest page is good for xbzrle encoding. 632 */ 633 xbzrle_counters.pages++; 634 prev_cached_page = get_cached_data(XBZRLE.cache, current_addr); 635 636 /* save current buffer into memory */ 637 memcpy(XBZRLE.current_buf, *current_data, TARGET_PAGE_SIZE); 638 639 /* XBZRLE encoding (if there is no overflow) */ 640 encoded_len = xbzrle_encode_buffer(prev_cached_page, XBZRLE.current_buf, 641 TARGET_PAGE_SIZE, XBZRLE.encoded_buf, 642 TARGET_PAGE_SIZE); 643 644 /* 645 * Update the cache contents, so that it corresponds to the data 646 * sent, in all cases except where we skip the page. 647 */ 648 if (!rs->last_stage && encoded_len != 0) { 649 memcpy(prev_cached_page, XBZRLE.current_buf, TARGET_PAGE_SIZE); 650 /* 651 * In the case where we couldn't compress, ensure that the caller 652 * sends the data from the cache, since the guest might have 653 * changed the RAM since we copied it. 654 */ 655 *current_data = prev_cached_page; 656 } 657 658 if (encoded_len == 0) { 659 trace_save_xbzrle_page_skipping(); 660 return 0; 661 } else if (encoded_len == -1) { 662 trace_save_xbzrle_page_overflow(); 663 xbzrle_counters.overflow++; 664 xbzrle_counters.bytes += TARGET_PAGE_SIZE; 665 return -1; 666 } 667 668 /* Send XBZRLE based compressed page */ 669 bytes_xbzrle = save_page_header(pss, pss->pss_channel, block, 670 offset | RAM_SAVE_FLAG_XBZRLE); 671 qemu_put_byte(file, ENCODING_FLAG_XBZRLE); 672 qemu_put_be16(file, encoded_len); 673 qemu_put_buffer(file, XBZRLE.encoded_buf, encoded_len); 674 bytes_xbzrle += encoded_len + 1 + 2; 675 /* 676 * Like compressed_size (please see update_compress_thread_counts), 677 * the xbzrle encoded bytes don't count the 8 byte header with 678 * RAM_SAVE_FLAG_CONTINUE. 679 */ 680 xbzrle_counters.bytes += bytes_xbzrle - 8; 681 ram_transferred_add(bytes_xbzrle); 682 683 return 1; 684 } 685 686 /** 687 * pss_find_next_dirty: find the next dirty page of current ramblock 688 * 689 * This function updates pss->page to point to the next dirty page index 690 * within the ramblock to migrate, or the end of ramblock when nothing 691 * found. Note that when pss->host_page_sending==true it means we're 692 * during sending a host page, so we won't look for dirty page that is 693 * outside the host page boundary. 694 * 695 * @pss: the current page search status 696 */ 697 static void pss_find_next_dirty(PageSearchStatus *pss) 698 { 699 RAMBlock *rb = pss->block; 700 unsigned long size = rb->used_length >> TARGET_PAGE_BITS; 701 unsigned long *bitmap = rb->bmap; 702 703 if (migrate_ram_is_ignored(rb)) { 704 /* Points directly to the end, so we know no dirty page */ 705 pss->page = size; 706 return; 707 } 708 709 /* 710 * If during sending a host page, only look for dirty pages within the 711 * current host page being send. 712 */ 713 if (pss->host_page_sending) { 714 assert(pss->host_page_end); 715 size = MIN(size, pss->host_page_end); 716 } 717 718 pss->page = find_next_bit(bitmap, size, pss->page); 719 } 720 721 static void migration_clear_memory_region_dirty_bitmap(RAMBlock *rb, 722 unsigned long page) 723 { 724 uint8_t shift; 725 hwaddr size, start; 726 727 if (!rb->clear_bmap || !clear_bmap_test_and_clear(rb, page)) { 728 return; 729 } 730 731 shift = rb->clear_bmap_shift; 732 /* 733 * CLEAR_BITMAP_SHIFT_MIN should always guarantee this... this 734 * can make things easier sometimes since then start address 735 * of the small chunk will always be 64 pages aligned so the 736 * bitmap will always be aligned to unsigned long. We should 737 * even be able to remove this restriction but I'm simply 738 * keeping it. 739 */ 740 assert(shift >= 6); 741 742 size = 1ULL << (TARGET_PAGE_BITS + shift); 743 start = QEMU_ALIGN_DOWN((ram_addr_t)page << TARGET_PAGE_BITS, size); 744 trace_migration_bitmap_clear_dirty(rb->idstr, start, size, page); 745 memory_region_clear_dirty_bitmap(rb->mr, start, size); 746 } 747 748 static void 749 migration_clear_memory_region_dirty_bitmap_range(RAMBlock *rb, 750 unsigned long start, 751 unsigned long npages) 752 { 753 unsigned long i, chunk_pages = 1UL << rb->clear_bmap_shift; 754 unsigned long chunk_start = QEMU_ALIGN_DOWN(start, chunk_pages); 755 unsigned long chunk_end = QEMU_ALIGN_UP(start + npages, chunk_pages); 756 757 /* 758 * Clear pages from start to start + npages - 1, so the end boundary is 759 * exclusive. 760 */ 761 for (i = chunk_start; i < chunk_end; i += chunk_pages) { 762 migration_clear_memory_region_dirty_bitmap(rb, i); 763 } 764 } 765 766 /* 767 * colo_bitmap_find_diry:find contiguous dirty pages from start 768 * 769 * Returns the page offset within memory region of the start of the contiguout 770 * dirty page 771 * 772 * @rs: current RAM state 773 * @rb: RAMBlock where to search for dirty pages 774 * @start: page where we start the search 775 * @num: the number of contiguous dirty pages 776 */ 777 static inline 778 unsigned long colo_bitmap_find_dirty(RAMState *rs, RAMBlock *rb, 779 unsigned long start, unsigned long *num) 780 { 781 unsigned long size = rb->used_length >> TARGET_PAGE_BITS; 782 unsigned long *bitmap = rb->bmap; 783 unsigned long first, next; 784 785 *num = 0; 786 787 if (migrate_ram_is_ignored(rb)) { 788 return size; 789 } 790 791 first = find_next_bit(bitmap, size, start); 792 if (first >= size) { 793 return first; 794 } 795 next = find_next_zero_bit(bitmap, size, first + 1); 796 assert(next >= first); 797 *num = next - first; 798 return first; 799 } 800 801 static inline bool migration_bitmap_clear_dirty(RAMState *rs, 802 RAMBlock *rb, 803 unsigned long page) 804 { 805 bool ret; 806 807 /* 808 * Clear dirty bitmap if needed. This _must_ be called before we 809 * send any of the page in the chunk because we need to make sure 810 * we can capture further page content changes when we sync dirty 811 * log the next time. So as long as we are going to send any of 812 * the page in the chunk we clear the remote dirty bitmap for all. 813 * Clearing it earlier won't be a problem, but too late will. 814 */ 815 migration_clear_memory_region_dirty_bitmap(rb, page); 816 817 ret = test_and_clear_bit(page, rb->bmap); 818 if (ret) { 819 rs->migration_dirty_pages--; 820 } 821 822 return ret; 823 } 824 825 static void dirty_bitmap_clear_section(MemoryRegionSection *section, 826 void *opaque) 827 { 828 const hwaddr offset = section->offset_within_region; 829 const hwaddr size = int128_get64(section->size); 830 const unsigned long start = offset >> TARGET_PAGE_BITS; 831 const unsigned long npages = size >> TARGET_PAGE_BITS; 832 RAMBlock *rb = section->mr->ram_block; 833 uint64_t *cleared_bits = opaque; 834 835 /* 836 * We don't grab ram_state->bitmap_mutex because we expect to run 837 * only when starting migration or during postcopy recovery where 838 * we don't have concurrent access. 839 */ 840 if (!migration_in_postcopy() && !migrate_background_snapshot()) { 841 migration_clear_memory_region_dirty_bitmap_range(rb, start, npages); 842 } 843 *cleared_bits += bitmap_count_one_with_offset(rb->bmap, start, npages); 844 bitmap_clear(rb->bmap, start, npages); 845 } 846 847 /* 848 * Exclude all dirty pages from migration that fall into a discarded range as 849 * managed by a RamDiscardManager responsible for the mapped memory region of 850 * the RAMBlock. Clear the corresponding bits in the dirty bitmaps. 851 * 852 * Discarded pages ("logically unplugged") have undefined content and must 853 * not get migrated, because even reading these pages for migration might 854 * result in undesired behavior. 855 * 856 * Returns the number of cleared bits in the RAMBlock dirty bitmap. 857 * 858 * Note: The result is only stable while migrating (precopy/postcopy). 859 */ 860 static uint64_t ramblock_dirty_bitmap_clear_discarded_pages(RAMBlock *rb) 861 { 862 uint64_t cleared_bits = 0; 863 864 if (rb->mr && rb->bmap && memory_region_has_ram_discard_manager(rb->mr)) { 865 RamDiscardManager *rdm = memory_region_get_ram_discard_manager(rb->mr); 866 MemoryRegionSection section = { 867 .mr = rb->mr, 868 .offset_within_region = 0, 869 .size = int128_make64(qemu_ram_get_used_length(rb)), 870 }; 871 872 ram_discard_manager_replay_discarded(rdm, §ion, 873 dirty_bitmap_clear_section, 874 &cleared_bits); 875 } 876 return cleared_bits; 877 } 878 879 /* 880 * Check if a host-page aligned page falls into a discarded range as managed by 881 * a RamDiscardManager responsible for the mapped memory region of the RAMBlock. 882 * 883 * Note: The result is only stable while migrating (precopy/postcopy). 884 */ 885 bool ramblock_page_is_discarded(RAMBlock *rb, ram_addr_t start) 886 { 887 if (rb->mr && memory_region_has_ram_discard_manager(rb->mr)) { 888 RamDiscardManager *rdm = memory_region_get_ram_discard_manager(rb->mr); 889 MemoryRegionSection section = { 890 .mr = rb->mr, 891 .offset_within_region = start, 892 .size = int128_make64(qemu_ram_pagesize(rb)), 893 }; 894 895 return !ram_discard_manager_is_populated(rdm, §ion); 896 } 897 return false; 898 } 899 900 /* Called with RCU critical section */ 901 static void ramblock_sync_dirty_bitmap(RAMState *rs, RAMBlock *rb) 902 { 903 uint64_t new_dirty_pages = 904 cpu_physical_memory_sync_dirty_bitmap(rb, 0, rb->used_length); 905 906 rs->migration_dirty_pages += new_dirty_pages; 907 rs->num_dirty_pages_period += new_dirty_pages; 908 } 909 910 /** 911 * ram_pagesize_summary: calculate all the pagesizes of a VM 912 * 913 * Returns a summary bitmap of the page sizes of all RAMBlocks 914 * 915 * For VMs with just normal pages this is equivalent to the host page 916 * size. If it's got some huge pages then it's the OR of all the 917 * different page sizes. 918 */ 919 uint64_t ram_pagesize_summary(void) 920 { 921 RAMBlock *block; 922 uint64_t summary = 0; 923 924 RAMBLOCK_FOREACH_NOT_IGNORED(block) { 925 summary |= block->page_size; 926 } 927 928 return summary; 929 } 930 931 uint64_t ram_get_total_transferred_pages(void) 932 { 933 return stat64_get(&mig_stats.normal_pages) + 934 stat64_get(&mig_stats.zero_pages) + 935 compress_ram_pages() + xbzrle_counters.pages; 936 } 937 938 static void migration_update_rates(RAMState *rs, int64_t end_time) 939 { 940 uint64_t page_count = rs->target_page_count - rs->target_page_count_prev; 941 942 /* calculate period counters */ 943 stat64_set(&mig_stats.dirty_pages_rate, 944 rs->num_dirty_pages_period * 1000 / 945 (end_time - rs->time_last_bitmap_sync)); 946 947 if (!page_count) { 948 return; 949 } 950 951 if (migrate_xbzrle()) { 952 double encoded_size, unencoded_size; 953 954 xbzrle_counters.cache_miss_rate = (double)(xbzrle_counters.cache_miss - 955 rs->xbzrle_cache_miss_prev) / page_count; 956 rs->xbzrle_cache_miss_prev = xbzrle_counters.cache_miss; 957 unencoded_size = (xbzrle_counters.pages - rs->xbzrle_pages_prev) * 958 TARGET_PAGE_SIZE; 959 encoded_size = xbzrle_counters.bytes - rs->xbzrle_bytes_prev; 960 if (xbzrle_counters.pages == rs->xbzrle_pages_prev || !encoded_size) { 961 xbzrle_counters.encoding_rate = 0; 962 } else { 963 xbzrle_counters.encoding_rate = unencoded_size / encoded_size; 964 } 965 rs->xbzrle_pages_prev = xbzrle_counters.pages; 966 rs->xbzrle_bytes_prev = xbzrle_counters.bytes; 967 } 968 compress_update_rates(page_count); 969 } 970 971 /* 972 * Enable dirty-limit to throttle down the guest 973 */ 974 static void migration_dirty_limit_guest(void) 975 { 976 /* 977 * dirty page rate quota for all vCPUs fetched from 978 * migration parameter 'vcpu_dirty_limit' 979 */ 980 static int64_t quota_dirtyrate; 981 MigrationState *s = migrate_get_current(); 982 983 /* 984 * If dirty limit already enabled and migration parameter 985 * vcpu-dirty-limit untouched. 986 */ 987 if (dirtylimit_in_service() && 988 quota_dirtyrate == s->parameters.vcpu_dirty_limit) { 989 return; 990 } 991 992 quota_dirtyrate = s->parameters.vcpu_dirty_limit; 993 994 /* 995 * Set all vCPU a quota dirtyrate, note that the second 996 * parameter will be ignored if setting all vCPU for the vm 997 */ 998 qmp_set_vcpu_dirty_limit(false, -1, quota_dirtyrate, NULL); 999 trace_migration_dirty_limit_guest(quota_dirtyrate); 1000 } 1001 1002 static void migration_trigger_throttle(RAMState *rs) 1003 { 1004 uint64_t threshold = migrate_throttle_trigger_threshold(); 1005 uint64_t bytes_xfer_period = 1006 stat64_get(&mig_stats.transferred) - rs->bytes_xfer_prev; 1007 uint64_t bytes_dirty_period = rs->num_dirty_pages_period * TARGET_PAGE_SIZE; 1008 uint64_t bytes_dirty_threshold = bytes_xfer_period * threshold / 100; 1009 1010 /* During block migration the auto-converge logic incorrectly detects 1011 * that ram migration makes no progress. Avoid this by disabling the 1012 * throttling logic during the bulk phase of block migration. */ 1013 if (blk_mig_bulk_active()) { 1014 return; 1015 } 1016 1017 /* 1018 * The following detection logic can be refined later. For now: 1019 * Check to see if the ratio between dirtied bytes and the approx. 1020 * amount of bytes that just got transferred since the last time 1021 * we were in this routine reaches the threshold. If that happens 1022 * twice, start or increase throttling. 1023 */ 1024 if ((bytes_dirty_period > bytes_dirty_threshold) && 1025 (++rs->dirty_rate_high_cnt >= 2)) { 1026 rs->dirty_rate_high_cnt = 0; 1027 if (migrate_auto_converge()) { 1028 trace_migration_throttle(); 1029 mig_throttle_guest_down(bytes_dirty_period, 1030 bytes_dirty_threshold); 1031 } else if (migrate_dirty_limit()) { 1032 migration_dirty_limit_guest(); 1033 } 1034 } 1035 } 1036 1037 static void migration_bitmap_sync(RAMState *rs, bool last_stage) 1038 { 1039 RAMBlock *block; 1040 int64_t end_time; 1041 1042 stat64_add(&mig_stats.dirty_sync_count, 1); 1043 1044 if (!rs->time_last_bitmap_sync) { 1045 rs->time_last_bitmap_sync = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); 1046 } 1047 1048 trace_migration_bitmap_sync_start(); 1049 memory_global_dirty_log_sync(last_stage); 1050 1051 qemu_mutex_lock(&rs->bitmap_mutex); 1052 WITH_RCU_READ_LOCK_GUARD() { 1053 RAMBLOCK_FOREACH_NOT_IGNORED(block) { 1054 ramblock_sync_dirty_bitmap(rs, block); 1055 } 1056 stat64_set(&mig_stats.dirty_bytes_last_sync, ram_bytes_remaining()); 1057 } 1058 qemu_mutex_unlock(&rs->bitmap_mutex); 1059 1060 memory_global_after_dirty_log_sync(); 1061 trace_migration_bitmap_sync_end(rs->num_dirty_pages_period); 1062 1063 end_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); 1064 1065 /* more than 1 second = 1000 millisecons */ 1066 if (end_time > rs->time_last_bitmap_sync + 1000) { 1067 migration_trigger_throttle(rs); 1068 1069 migration_update_rates(rs, end_time); 1070 1071 rs->target_page_count_prev = rs->target_page_count; 1072 1073 /* reset period counters */ 1074 rs->time_last_bitmap_sync = end_time; 1075 rs->num_dirty_pages_period = 0; 1076 rs->bytes_xfer_prev = stat64_get(&mig_stats.transferred); 1077 } 1078 if (migrate_events()) { 1079 uint64_t generation = stat64_get(&mig_stats.dirty_sync_count); 1080 qapi_event_send_migration_pass(generation); 1081 } 1082 } 1083 1084 static void migration_bitmap_sync_precopy(RAMState *rs, bool last_stage) 1085 { 1086 Error *local_err = NULL; 1087 1088 /* 1089 * The current notifier usage is just an optimization to migration, so we 1090 * don't stop the normal migration process in the error case. 1091 */ 1092 if (precopy_notify(PRECOPY_NOTIFY_BEFORE_BITMAP_SYNC, &local_err)) { 1093 error_report_err(local_err); 1094 local_err = NULL; 1095 } 1096 1097 migration_bitmap_sync(rs, last_stage); 1098 1099 if (precopy_notify(PRECOPY_NOTIFY_AFTER_BITMAP_SYNC, &local_err)) { 1100 error_report_err(local_err); 1101 } 1102 } 1103 1104 void ram_release_page(const char *rbname, uint64_t offset) 1105 { 1106 if (!migrate_release_ram() || !migration_in_postcopy()) { 1107 return; 1108 } 1109 1110 ram_discard_range(rbname, offset, TARGET_PAGE_SIZE); 1111 } 1112 1113 /** 1114 * save_zero_page: send the zero page to the stream 1115 * 1116 * Returns the number of pages written. 1117 * 1118 * @rs: current RAM state 1119 * @pss: current PSS channel 1120 * @offset: offset inside the block for the page 1121 */ 1122 static int save_zero_page(RAMState *rs, PageSearchStatus *pss, 1123 ram_addr_t offset) 1124 { 1125 uint8_t *p = pss->block->host + offset; 1126 QEMUFile *file = pss->pss_channel; 1127 int len = 0; 1128 1129 if (!buffer_is_zero(p, TARGET_PAGE_SIZE)) { 1130 return 0; 1131 } 1132 1133 len += save_page_header(pss, file, pss->block, offset | RAM_SAVE_FLAG_ZERO); 1134 qemu_put_byte(file, 0); 1135 len += 1; 1136 ram_release_page(pss->block->idstr, offset); 1137 1138 stat64_add(&mig_stats.zero_pages, 1); 1139 ram_transferred_add(len); 1140 1141 /* 1142 * Must let xbzrle know, otherwise a previous (now 0'd) cached 1143 * page would be stale. 1144 */ 1145 if (rs->xbzrle_started) { 1146 XBZRLE_cache_lock(); 1147 xbzrle_cache_zero_page(pss->block->offset + offset); 1148 XBZRLE_cache_unlock(); 1149 } 1150 1151 return len; 1152 } 1153 1154 /* 1155 * @pages: the number of pages written by the control path, 1156 * < 0 - error 1157 * > 0 - number of pages written 1158 * 1159 * Return true if the pages has been saved, otherwise false is returned. 1160 */ 1161 static bool control_save_page(PageSearchStatus *pss, 1162 ram_addr_t offset, int *pages) 1163 { 1164 int ret; 1165 1166 ret = rdma_control_save_page(pss->pss_channel, pss->block->offset, offset, 1167 TARGET_PAGE_SIZE); 1168 if (ret == RAM_SAVE_CONTROL_NOT_SUPP) { 1169 return false; 1170 } 1171 1172 if (ret == RAM_SAVE_CONTROL_DELAYED) { 1173 *pages = 1; 1174 return true; 1175 } 1176 *pages = ret; 1177 return true; 1178 } 1179 1180 /* 1181 * directly send the page to the stream 1182 * 1183 * Returns the number of pages written. 1184 * 1185 * @pss: current PSS channel 1186 * @block: block that contains the page we want to send 1187 * @offset: offset inside the block for the page 1188 * @buf: the page to be sent 1189 * @async: send to page asyncly 1190 */ 1191 static int save_normal_page(PageSearchStatus *pss, RAMBlock *block, 1192 ram_addr_t offset, uint8_t *buf, bool async) 1193 { 1194 QEMUFile *file = pss->pss_channel; 1195 1196 ram_transferred_add(save_page_header(pss, pss->pss_channel, block, 1197 offset | RAM_SAVE_FLAG_PAGE)); 1198 if (async) { 1199 qemu_put_buffer_async(file, buf, TARGET_PAGE_SIZE, 1200 migrate_release_ram() && 1201 migration_in_postcopy()); 1202 } else { 1203 qemu_put_buffer(file, buf, TARGET_PAGE_SIZE); 1204 } 1205 ram_transferred_add(TARGET_PAGE_SIZE); 1206 stat64_add(&mig_stats.normal_pages, 1); 1207 return 1; 1208 } 1209 1210 /** 1211 * ram_save_page: send the given page to the stream 1212 * 1213 * Returns the number of pages written. 1214 * < 0 - error 1215 * >=0 - Number of pages written - this might legally be 0 1216 * if xbzrle noticed the page was the same. 1217 * 1218 * @rs: current RAM state 1219 * @block: block that contains the page we want to send 1220 * @offset: offset inside the block for the page 1221 */ 1222 static int ram_save_page(RAMState *rs, PageSearchStatus *pss) 1223 { 1224 int pages = -1; 1225 uint8_t *p; 1226 bool send_async = true; 1227 RAMBlock *block = pss->block; 1228 ram_addr_t offset = ((ram_addr_t)pss->page) << TARGET_PAGE_BITS; 1229 ram_addr_t current_addr = block->offset + offset; 1230 1231 p = block->host + offset; 1232 trace_ram_save_page(block->idstr, (uint64_t)offset, p); 1233 1234 XBZRLE_cache_lock(); 1235 if (rs->xbzrle_started && !migration_in_postcopy()) { 1236 pages = save_xbzrle_page(rs, pss, &p, current_addr, 1237 block, offset); 1238 if (!rs->last_stage) { 1239 /* Can't send this cached data async, since the cache page 1240 * might get updated before it gets to the wire 1241 */ 1242 send_async = false; 1243 } 1244 } 1245 1246 /* XBZRLE overflow or normal page */ 1247 if (pages == -1) { 1248 pages = save_normal_page(pss, block, offset, p, send_async); 1249 } 1250 1251 XBZRLE_cache_unlock(); 1252 1253 return pages; 1254 } 1255 1256 static int ram_save_multifd_page(QEMUFile *file, RAMBlock *block, 1257 ram_addr_t offset) 1258 { 1259 if (multifd_queue_page(file, block, offset) < 0) { 1260 return -1; 1261 } 1262 stat64_add(&mig_stats.normal_pages, 1); 1263 1264 return 1; 1265 } 1266 1267 int compress_send_queued_data(CompressParam *param) 1268 { 1269 PageSearchStatus *pss = &ram_state->pss[RAM_CHANNEL_PRECOPY]; 1270 MigrationState *ms = migrate_get_current(); 1271 QEMUFile *file = ms->to_dst_file; 1272 int len = 0; 1273 1274 RAMBlock *block = param->block; 1275 ram_addr_t offset = param->offset; 1276 1277 if (param->result == RES_NONE) { 1278 return 0; 1279 } 1280 1281 assert(block == pss->last_sent_block); 1282 1283 if (param->result == RES_ZEROPAGE) { 1284 assert(qemu_file_buffer_empty(param->file)); 1285 len += save_page_header(pss, file, block, offset | RAM_SAVE_FLAG_ZERO); 1286 qemu_put_byte(file, 0); 1287 len += 1; 1288 ram_release_page(block->idstr, offset); 1289 } else if (param->result == RES_COMPRESS) { 1290 assert(!qemu_file_buffer_empty(param->file)); 1291 len += save_page_header(pss, file, block, 1292 offset | RAM_SAVE_FLAG_COMPRESS_PAGE); 1293 len += qemu_put_qemu_file(file, param->file); 1294 } else { 1295 abort(); 1296 } 1297 1298 update_compress_thread_counts(param, len); 1299 1300 return len; 1301 } 1302 1303 #define PAGE_ALL_CLEAN 0 1304 #define PAGE_TRY_AGAIN 1 1305 #define PAGE_DIRTY_FOUND 2 1306 /** 1307 * find_dirty_block: find the next dirty page and update any state 1308 * associated with the search process. 1309 * 1310 * Returns: 1311 * <0: An error happened 1312 * PAGE_ALL_CLEAN: no dirty page found, give up 1313 * PAGE_TRY_AGAIN: no dirty page found, retry for next block 1314 * PAGE_DIRTY_FOUND: dirty page found 1315 * 1316 * @rs: current RAM state 1317 * @pss: data about the state of the current dirty page scan 1318 * @again: set to false if the search has scanned the whole of RAM 1319 */ 1320 static int find_dirty_block(RAMState *rs, PageSearchStatus *pss) 1321 { 1322 /* Update pss->page for the next dirty bit in ramblock */ 1323 pss_find_next_dirty(pss); 1324 1325 if (pss->complete_round && pss->block == rs->last_seen_block && 1326 pss->page >= rs->last_page) { 1327 /* 1328 * We've been once around the RAM and haven't found anything. 1329 * Give up. 1330 */ 1331 return PAGE_ALL_CLEAN; 1332 } 1333 if (!offset_in_ramblock(pss->block, 1334 ((ram_addr_t)pss->page) << TARGET_PAGE_BITS)) { 1335 /* Didn't find anything in this RAM Block */ 1336 pss->page = 0; 1337 pss->block = QLIST_NEXT_RCU(pss->block, next); 1338 if (!pss->block) { 1339 if (migrate_multifd() && 1340 !migrate_multifd_flush_after_each_section()) { 1341 QEMUFile *f = rs->pss[RAM_CHANNEL_PRECOPY].pss_channel; 1342 int ret = multifd_send_sync_main(f); 1343 if (ret < 0) { 1344 return ret; 1345 } 1346 qemu_put_be64(f, RAM_SAVE_FLAG_MULTIFD_FLUSH); 1347 qemu_fflush(f); 1348 } 1349 /* 1350 * If memory migration starts over, we will meet a dirtied page 1351 * which may still exists in compression threads's ring, so we 1352 * should flush the compressed data to make sure the new page 1353 * is not overwritten by the old one in the destination. 1354 * 1355 * Also If xbzrle is on, stop using the data compression at this 1356 * point. In theory, xbzrle can do better than compression. 1357 */ 1358 compress_flush_data(); 1359 1360 /* Hit the end of the list */ 1361 pss->block = QLIST_FIRST_RCU(&ram_list.blocks); 1362 /* Flag that we've looped */ 1363 pss->complete_round = true; 1364 /* After the first round, enable XBZRLE. */ 1365 if (migrate_xbzrle()) { 1366 rs->xbzrle_started = true; 1367 } 1368 } 1369 /* Didn't find anything this time, but try again on the new block */ 1370 return PAGE_TRY_AGAIN; 1371 } else { 1372 /* We've found something */ 1373 return PAGE_DIRTY_FOUND; 1374 } 1375 } 1376 1377 /** 1378 * unqueue_page: gets a page of the queue 1379 * 1380 * Helper for 'get_queued_page' - gets a page off the queue 1381 * 1382 * Returns the block of the page (or NULL if none available) 1383 * 1384 * @rs: current RAM state 1385 * @offset: used to return the offset within the RAMBlock 1386 */ 1387 static RAMBlock *unqueue_page(RAMState *rs, ram_addr_t *offset) 1388 { 1389 struct RAMSrcPageRequest *entry; 1390 RAMBlock *block = NULL; 1391 1392 if (!postcopy_has_request(rs)) { 1393 return NULL; 1394 } 1395 1396 QEMU_LOCK_GUARD(&rs->src_page_req_mutex); 1397 1398 /* 1399 * This should _never_ change even after we take the lock, because no one 1400 * should be taking anything off the request list other than us. 1401 */ 1402 assert(postcopy_has_request(rs)); 1403 1404 entry = QSIMPLEQ_FIRST(&rs->src_page_requests); 1405 block = entry->rb; 1406 *offset = entry->offset; 1407 1408 if (entry->len > TARGET_PAGE_SIZE) { 1409 entry->len -= TARGET_PAGE_SIZE; 1410 entry->offset += TARGET_PAGE_SIZE; 1411 } else { 1412 memory_region_unref(block->mr); 1413 QSIMPLEQ_REMOVE_HEAD(&rs->src_page_requests, next_req); 1414 g_free(entry); 1415 migration_consume_urgent_request(); 1416 } 1417 1418 return block; 1419 } 1420 1421 #if defined(__linux__) 1422 /** 1423 * poll_fault_page: try to get next UFFD write fault page and, if pending fault 1424 * is found, return RAM block pointer and page offset 1425 * 1426 * Returns pointer to the RAMBlock containing faulting page, 1427 * NULL if no write faults are pending 1428 * 1429 * @rs: current RAM state 1430 * @offset: page offset from the beginning of the block 1431 */ 1432 static RAMBlock *poll_fault_page(RAMState *rs, ram_addr_t *offset) 1433 { 1434 struct uffd_msg uffd_msg; 1435 void *page_address; 1436 RAMBlock *block; 1437 int res; 1438 1439 if (!migrate_background_snapshot()) { 1440 return NULL; 1441 } 1442 1443 res = uffd_read_events(rs->uffdio_fd, &uffd_msg, 1); 1444 if (res <= 0) { 1445 return NULL; 1446 } 1447 1448 page_address = (void *)(uintptr_t) uffd_msg.arg.pagefault.address; 1449 block = qemu_ram_block_from_host(page_address, false, offset); 1450 assert(block && (block->flags & RAM_UF_WRITEPROTECT) != 0); 1451 return block; 1452 } 1453 1454 /** 1455 * ram_save_release_protection: release UFFD write protection after 1456 * a range of pages has been saved 1457 * 1458 * @rs: current RAM state 1459 * @pss: page-search-status structure 1460 * @start_page: index of the first page in the range relative to pss->block 1461 * 1462 * Returns 0 on success, negative value in case of an error 1463 */ 1464 static int ram_save_release_protection(RAMState *rs, PageSearchStatus *pss, 1465 unsigned long start_page) 1466 { 1467 int res = 0; 1468 1469 /* Check if page is from UFFD-managed region. */ 1470 if (pss->block->flags & RAM_UF_WRITEPROTECT) { 1471 void *page_address = pss->block->host + (start_page << TARGET_PAGE_BITS); 1472 uint64_t run_length = (pss->page - start_page) << TARGET_PAGE_BITS; 1473 1474 /* Flush async buffers before un-protect. */ 1475 qemu_fflush(pss->pss_channel); 1476 /* Un-protect memory range. */ 1477 res = uffd_change_protection(rs->uffdio_fd, page_address, run_length, 1478 false, false); 1479 } 1480 1481 return res; 1482 } 1483 1484 /* ram_write_tracking_available: check if kernel supports required UFFD features 1485 * 1486 * Returns true if supports, false otherwise 1487 */ 1488 bool ram_write_tracking_available(void) 1489 { 1490 uint64_t uffd_features; 1491 int res; 1492 1493 res = uffd_query_features(&uffd_features); 1494 return (res == 0 && 1495 (uffd_features & UFFD_FEATURE_PAGEFAULT_FLAG_WP) != 0); 1496 } 1497 1498 /* ram_write_tracking_compatible: check if guest configuration is 1499 * compatible with 'write-tracking' 1500 * 1501 * Returns true if compatible, false otherwise 1502 */ 1503 bool ram_write_tracking_compatible(void) 1504 { 1505 const uint64_t uffd_ioctls_mask = BIT(_UFFDIO_WRITEPROTECT); 1506 int uffd_fd; 1507 RAMBlock *block; 1508 bool ret = false; 1509 1510 /* Open UFFD file descriptor */ 1511 uffd_fd = uffd_create_fd(UFFD_FEATURE_PAGEFAULT_FLAG_WP, false); 1512 if (uffd_fd < 0) { 1513 return false; 1514 } 1515 1516 RCU_READ_LOCK_GUARD(); 1517 1518 RAMBLOCK_FOREACH_NOT_IGNORED(block) { 1519 uint64_t uffd_ioctls; 1520 1521 /* Nothing to do with read-only and MMIO-writable regions */ 1522 if (block->mr->readonly || block->mr->rom_device) { 1523 continue; 1524 } 1525 /* Try to register block memory via UFFD-IO to track writes */ 1526 if (uffd_register_memory(uffd_fd, block->host, block->max_length, 1527 UFFDIO_REGISTER_MODE_WP, &uffd_ioctls)) { 1528 goto out; 1529 } 1530 if ((uffd_ioctls & uffd_ioctls_mask) != uffd_ioctls_mask) { 1531 goto out; 1532 } 1533 } 1534 ret = true; 1535 1536 out: 1537 uffd_close_fd(uffd_fd); 1538 return ret; 1539 } 1540 1541 static inline void populate_read_range(RAMBlock *block, ram_addr_t offset, 1542 ram_addr_t size) 1543 { 1544 const ram_addr_t end = offset + size; 1545 1546 /* 1547 * We read one byte of each page; this will preallocate page tables if 1548 * required and populate the shared zeropage on MAP_PRIVATE anonymous memory 1549 * where no page was populated yet. This might require adaption when 1550 * supporting other mappings, like shmem. 1551 */ 1552 for (; offset < end; offset += block->page_size) { 1553 char tmp = *((char *)block->host + offset); 1554 1555 /* Don't optimize the read out */ 1556 asm volatile("" : "+r" (tmp)); 1557 } 1558 } 1559 1560 static inline int populate_read_section(MemoryRegionSection *section, 1561 void *opaque) 1562 { 1563 const hwaddr size = int128_get64(section->size); 1564 hwaddr offset = section->offset_within_region; 1565 RAMBlock *block = section->mr->ram_block; 1566 1567 populate_read_range(block, offset, size); 1568 return 0; 1569 } 1570 1571 /* 1572 * ram_block_populate_read: preallocate page tables and populate pages in the 1573 * RAM block by reading a byte of each page. 1574 * 1575 * Since it's solely used for userfault_fd WP feature, here we just 1576 * hardcode page size to qemu_real_host_page_size. 1577 * 1578 * @block: RAM block to populate 1579 */ 1580 static void ram_block_populate_read(RAMBlock *rb) 1581 { 1582 /* 1583 * Skip populating all pages that fall into a discarded range as managed by 1584 * a RamDiscardManager responsible for the mapped memory region of the 1585 * RAMBlock. Such discarded ("logically unplugged") parts of a RAMBlock 1586 * must not get populated automatically. We don't have to track 1587 * modifications via userfaultfd WP reliably, because these pages will 1588 * not be part of the migration stream either way -- see 1589 * ramblock_dirty_bitmap_exclude_discarded_pages(). 1590 * 1591 * Note: The result is only stable while migrating (precopy/postcopy). 1592 */ 1593 if (rb->mr && memory_region_has_ram_discard_manager(rb->mr)) { 1594 RamDiscardManager *rdm = memory_region_get_ram_discard_manager(rb->mr); 1595 MemoryRegionSection section = { 1596 .mr = rb->mr, 1597 .offset_within_region = 0, 1598 .size = rb->mr->size, 1599 }; 1600 1601 ram_discard_manager_replay_populated(rdm, §ion, 1602 populate_read_section, NULL); 1603 } else { 1604 populate_read_range(rb, 0, rb->used_length); 1605 } 1606 } 1607 1608 /* 1609 * ram_write_tracking_prepare: prepare for UFFD-WP memory tracking 1610 */ 1611 void ram_write_tracking_prepare(void) 1612 { 1613 RAMBlock *block; 1614 1615 RCU_READ_LOCK_GUARD(); 1616 1617 RAMBLOCK_FOREACH_NOT_IGNORED(block) { 1618 /* Nothing to do with read-only and MMIO-writable regions */ 1619 if (block->mr->readonly || block->mr->rom_device) { 1620 continue; 1621 } 1622 1623 /* 1624 * Populate pages of the RAM block before enabling userfault_fd 1625 * write protection. 1626 * 1627 * This stage is required since ioctl(UFFDIO_WRITEPROTECT) with 1628 * UFFDIO_WRITEPROTECT_MODE_WP mode setting would silently skip 1629 * pages with pte_none() entries in page table. 1630 */ 1631 ram_block_populate_read(block); 1632 } 1633 } 1634 1635 static inline int uffd_protect_section(MemoryRegionSection *section, 1636 void *opaque) 1637 { 1638 const hwaddr size = int128_get64(section->size); 1639 const hwaddr offset = section->offset_within_region; 1640 RAMBlock *rb = section->mr->ram_block; 1641 int uffd_fd = (uintptr_t)opaque; 1642 1643 return uffd_change_protection(uffd_fd, rb->host + offset, size, true, 1644 false); 1645 } 1646 1647 static int ram_block_uffd_protect(RAMBlock *rb, int uffd_fd) 1648 { 1649 assert(rb->flags & RAM_UF_WRITEPROTECT); 1650 1651 /* See ram_block_populate_read() */ 1652 if (rb->mr && memory_region_has_ram_discard_manager(rb->mr)) { 1653 RamDiscardManager *rdm = memory_region_get_ram_discard_manager(rb->mr); 1654 MemoryRegionSection section = { 1655 .mr = rb->mr, 1656 .offset_within_region = 0, 1657 .size = rb->mr->size, 1658 }; 1659 1660 return ram_discard_manager_replay_populated(rdm, §ion, 1661 uffd_protect_section, 1662 (void *)(uintptr_t)uffd_fd); 1663 } 1664 return uffd_change_protection(uffd_fd, rb->host, 1665 rb->used_length, true, false); 1666 } 1667 1668 /* 1669 * ram_write_tracking_start: start UFFD-WP memory tracking 1670 * 1671 * Returns 0 for success or negative value in case of error 1672 */ 1673 int ram_write_tracking_start(void) 1674 { 1675 int uffd_fd; 1676 RAMState *rs = ram_state; 1677 RAMBlock *block; 1678 1679 /* Open UFFD file descriptor */ 1680 uffd_fd = uffd_create_fd(UFFD_FEATURE_PAGEFAULT_FLAG_WP, true); 1681 if (uffd_fd < 0) { 1682 return uffd_fd; 1683 } 1684 rs->uffdio_fd = uffd_fd; 1685 1686 RCU_READ_LOCK_GUARD(); 1687 1688 RAMBLOCK_FOREACH_NOT_IGNORED(block) { 1689 /* Nothing to do with read-only and MMIO-writable regions */ 1690 if (block->mr->readonly || block->mr->rom_device) { 1691 continue; 1692 } 1693 1694 /* Register block memory with UFFD to track writes */ 1695 if (uffd_register_memory(rs->uffdio_fd, block->host, 1696 block->max_length, UFFDIO_REGISTER_MODE_WP, NULL)) { 1697 goto fail; 1698 } 1699 block->flags |= RAM_UF_WRITEPROTECT; 1700 memory_region_ref(block->mr); 1701 1702 /* Apply UFFD write protection to the block memory range */ 1703 if (ram_block_uffd_protect(block, uffd_fd)) { 1704 goto fail; 1705 } 1706 1707 trace_ram_write_tracking_ramblock_start(block->idstr, block->page_size, 1708 block->host, block->max_length); 1709 } 1710 1711 return 0; 1712 1713 fail: 1714 error_report("ram_write_tracking_start() failed: restoring initial memory state"); 1715 1716 RAMBLOCK_FOREACH_NOT_IGNORED(block) { 1717 if ((block->flags & RAM_UF_WRITEPROTECT) == 0) { 1718 continue; 1719 } 1720 uffd_unregister_memory(rs->uffdio_fd, block->host, block->max_length); 1721 /* Cleanup flags and remove reference */ 1722 block->flags &= ~RAM_UF_WRITEPROTECT; 1723 memory_region_unref(block->mr); 1724 } 1725 1726 uffd_close_fd(uffd_fd); 1727 rs->uffdio_fd = -1; 1728 return -1; 1729 } 1730 1731 /** 1732 * ram_write_tracking_stop: stop UFFD-WP memory tracking and remove protection 1733 */ 1734 void ram_write_tracking_stop(void) 1735 { 1736 RAMState *rs = ram_state; 1737 RAMBlock *block; 1738 1739 RCU_READ_LOCK_GUARD(); 1740 1741 RAMBLOCK_FOREACH_NOT_IGNORED(block) { 1742 if ((block->flags & RAM_UF_WRITEPROTECT) == 0) { 1743 continue; 1744 } 1745 uffd_unregister_memory(rs->uffdio_fd, block->host, block->max_length); 1746 1747 trace_ram_write_tracking_ramblock_stop(block->idstr, block->page_size, 1748 block->host, block->max_length); 1749 1750 /* Cleanup flags and remove reference */ 1751 block->flags &= ~RAM_UF_WRITEPROTECT; 1752 memory_region_unref(block->mr); 1753 } 1754 1755 /* Finally close UFFD file descriptor */ 1756 uffd_close_fd(rs->uffdio_fd); 1757 rs->uffdio_fd = -1; 1758 } 1759 1760 #else 1761 /* No target OS support, stubs just fail or ignore */ 1762 1763 static RAMBlock *poll_fault_page(RAMState *rs, ram_addr_t *offset) 1764 { 1765 (void) rs; 1766 (void) offset; 1767 1768 return NULL; 1769 } 1770 1771 static int ram_save_release_protection(RAMState *rs, PageSearchStatus *pss, 1772 unsigned long start_page) 1773 { 1774 (void) rs; 1775 (void) pss; 1776 (void) start_page; 1777 1778 return 0; 1779 } 1780 1781 bool ram_write_tracking_available(void) 1782 { 1783 return false; 1784 } 1785 1786 bool ram_write_tracking_compatible(void) 1787 { 1788 assert(0); 1789 return false; 1790 } 1791 1792 int ram_write_tracking_start(void) 1793 { 1794 assert(0); 1795 return -1; 1796 } 1797 1798 void ram_write_tracking_stop(void) 1799 { 1800 assert(0); 1801 } 1802 #endif /* defined(__linux__) */ 1803 1804 /** 1805 * get_queued_page: unqueue a page from the postcopy requests 1806 * 1807 * Skips pages that are already sent (!dirty) 1808 * 1809 * Returns true if a queued page is found 1810 * 1811 * @rs: current RAM state 1812 * @pss: data about the state of the current dirty page scan 1813 */ 1814 static bool get_queued_page(RAMState *rs, PageSearchStatus *pss) 1815 { 1816 RAMBlock *block; 1817 ram_addr_t offset; 1818 bool dirty; 1819 1820 do { 1821 block = unqueue_page(rs, &offset); 1822 /* 1823 * We're sending this page, and since it's postcopy nothing else 1824 * will dirty it, and we must make sure it doesn't get sent again 1825 * even if this queue request was received after the background 1826 * search already sent it. 1827 */ 1828 if (block) { 1829 unsigned long page; 1830 1831 page = offset >> TARGET_PAGE_BITS; 1832 dirty = test_bit(page, block->bmap); 1833 if (!dirty) { 1834 trace_get_queued_page_not_dirty(block->idstr, (uint64_t)offset, 1835 page); 1836 } else { 1837 trace_get_queued_page(block->idstr, (uint64_t)offset, page); 1838 } 1839 } 1840 1841 } while (block && !dirty); 1842 1843 if (!block) { 1844 /* 1845 * Poll write faults too if background snapshot is enabled; that's 1846 * when we have vcpus got blocked by the write protected pages. 1847 */ 1848 block = poll_fault_page(rs, &offset); 1849 } 1850 1851 if (block) { 1852 /* 1853 * We want the background search to continue from the queued page 1854 * since the guest is likely to want other pages near to the page 1855 * it just requested. 1856 */ 1857 pss->block = block; 1858 pss->page = offset >> TARGET_PAGE_BITS; 1859 1860 /* 1861 * This unqueued page would break the "one round" check, even is 1862 * really rare. 1863 */ 1864 pss->complete_round = false; 1865 } 1866 1867 return !!block; 1868 } 1869 1870 /** 1871 * migration_page_queue_free: drop any remaining pages in the ram 1872 * request queue 1873 * 1874 * It should be empty at the end anyway, but in error cases there may 1875 * be some left. in case that there is any page left, we drop it. 1876 * 1877 */ 1878 static void migration_page_queue_free(RAMState *rs) 1879 { 1880 struct RAMSrcPageRequest *mspr, *next_mspr; 1881 /* This queue generally should be empty - but in the case of a failed 1882 * migration might have some droppings in. 1883 */ 1884 RCU_READ_LOCK_GUARD(); 1885 QSIMPLEQ_FOREACH_SAFE(mspr, &rs->src_page_requests, next_req, next_mspr) { 1886 memory_region_unref(mspr->rb->mr); 1887 QSIMPLEQ_REMOVE_HEAD(&rs->src_page_requests, next_req); 1888 g_free(mspr); 1889 } 1890 } 1891 1892 /** 1893 * ram_save_queue_pages: queue the page for transmission 1894 * 1895 * A request from postcopy destination for example. 1896 * 1897 * Returns zero on success or negative on error 1898 * 1899 * @rbname: Name of the RAMBLock of the request. NULL means the 1900 * same that last one. 1901 * @start: starting address from the start of the RAMBlock 1902 * @len: length (in bytes) to send 1903 */ 1904 int ram_save_queue_pages(const char *rbname, ram_addr_t start, ram_addr_t len) 1905 { 1906 RAMBlock *ramblock; 1907 RAMState *rs = ram_state; 1908 1909 stat64_add(&mig_stats.postcopy_requests, 1); 1910 RCU_READ_LOCK_GUARD(); 1911 1912 if (!rbname) { 1913 /* Reuse last RAMBlock */ 1914 ramblock = rs->last_req_rb; 1915 1916 if (!ramblock) { 1917 /* 1918 * Shouldn't happen, we can't reuse the last RAMBlock if 1919 * it's the 1st request. 1920 */ 1921 error_report("ram_save_queue_pages no previous block"); 1922 return -1; 1923 } 1924 } else { 1925 ramblock = qemu_ram_block_by_name(rbname); 1926 1927 if (!ramblock) { 1928 /* We shouldn't be asked for a non-existent RAMBlock */ 1929 error_report("ram_save_queue_pages no block '%s'", rbname); 1930 return -1; 1931 } 1932 rs->last_req_rb = ramblock; 1933 } 1934 trace_ram_save_queue_pages(ramblock->idstr, start, len); 1935 if (!offset_in_ramblock(ramblock, start + len - 1)) { 1936 error_report("%s request overrun start=" RAM_ADDR_FMT " len=" 1937 RAM_ADDR_FMT " blocklen=" RAM_ADDR_FMT, 1938 __func__, start, len, ramblock->used_length); 1939 return -1; 1940 } 1941 1942 /* 1943 * When with postcopy preempt, we send back the page directly in the 1944 * rp-return thread. 1945 */ 1946 if (postcopy_preempt_active()) { 1947 ram_addr_t page_start = start >> TARGET_PAGE_BITS; 1948 size_t page_size = qemu_ram_pagesize(ramblock); 1949 PageSearchStatus *pss = &ram_state->pss[RAM_CHANNEL_POSTCOPY]; 1950 int ret = 0; 1951 1952 qemu_mutex_lock(&rs->bitmap_mutex); 1953 1954 pss_init(pss, ramblock, page_start); 1955 /* 1956 * Always use the preempt channel, and make sure it's there. It's 1957 * safe to access without lock, because when rp-thread is running 1958 * we should be the only one who operates on the qemufile 1959 */ 1960 pss->pss_channel = migrate_get_current()->postcopy_qemufile_src; 1961 assert(pss->pss_channel); 1962 1963 /* 1964 * It must be either one or multiple of host page size. Just 1965 * assert; if something wrong we're mostly split brain anyway. 1966 */ 1967 assert(len % page_size == 0); 1968 while (len) { 1969 if (ram_save_host_page_urgent(pss)) { 1970 error_report("%s: ram_save_host_page_urgent() failed: " 1971 "ramblock=%s, start_addr=0x"RAM_ADDR_FMT, 1972 __func__, ramblock->idstr, start); 1973 ret = -1; 1974 break; 1975 } 1976 /* 1977 * NOTE: after ram_save_host_page_urgent() succeeded, pss->page 1978 * will automatically be moved and point to the next host page 1979 * we're going to send, so no need to update here. 1980 * 1981 * Normally QEMU never sends >1 host page in requests, so 1982 * logically we don't even need that as the loop should only 1983 * run once, but just to be consistent. 1984 */ 1985 len -= page_size; 1986 }; 1987 qemu_mutex_unlock(&rs->bitmap_mutex); 1988 1989 return ret; 1990 } 1991 1992 struct RAMSrcPageRequest *new_entry = 1993 g_new0(struct RAMSrcPageRequest, 1); 1994 new_entry->rb = ramblock; 1995 new_entry->offset = start; 1996 new_entry->len = len; 1997 1998 memory_region_ref(ramblock->mr); 1999 qemu_mutex_lock(&rs->src_page_req_mutex); 2000 QSIMPLEQ_INSERT_TAIL(&rs->src_page_requests, new_entry, next_req); 2001 migration_make_urgent_request(); 2002 qemu_mutex_unlock(&rs->src_page_req_mutex); 2003 2004 return 0; 2005 } 2006 2007 /* 2008 * try to compress the page before posting it out, return true if the page 2009 * has been properly handled by compression, otherwise needs other 2010 * paths to handle it 2011 */ 2012 static bool save_compress_page(RAMState *rs, PageSearchStatus *pss, 2013 ram_addr_t offset) 2014 { 2015 if (!migrate_compress()) { 2016 return false; 2017 } 2018 2019 /* 2020 * When starting the process of a new block, the first page of 2021 * the block should be sent out before other pages in the same 2022 * block, and all the pages in last block should have been sent 2023 * out, keeping this order is important, because the 'cont' flag 2024 * is used to avoid resending the block name. 2025 * 2026 * We post the fist page as normal page as compression will take 2027 * much CPU resource. 2028 */ 2029 if (pss->block != pss->last_sent_block) { 2030 compress_flush_data(); 2031 return false; 2032 } 2033 2034 return compress_page_with_multi_thread(pss->block, offset, 2035 compress_send_queued_data); 2036 } 2037 2038 /** 2039 * ram_save_target_page_legacy: save one target page 2040 * 2041 * Returns the number of pages written 2042 * 2043 * @rs: current RAM state 2044 * @pss: data about the page we want to send 2045 */ 2046 static int ram_save_target_page_legacy(RAMState *rs, PageSearchStatus *pss) 2047 { 2048 RAMBlock *block = pss->block; 2049 ram_addr_t offset = ((ram_addr_t)pss->page) << TARGET_PAGE_BITS; 2050 int res; 2051 2052 if (control_save_page(pss, offset, &res)) { 2053 return res; 2054 } 2055 2056 if (save_compress_page(rs, pss, offset)) { 2057 return 1; 2058 } 2059 2060 if (save_zero_page(rs, pss, offset)) { 2061 return 1; 2062 } 2063 2064 /* 2065 * Do not use multifd in postcopy as one whole host page should be 2066 * placed. Meanwhile postcopy requires atomic update of pages, so even 2067 * if host page size == guest page size the dest guest during run may 2068 * still see partially copied pages which is data corruption. 2069 */ 2070 if (migrate_multifd() && !migration_in_postcopy()) { 2071 return ram_save_multifd_page(pss->pss_channel, block, offset); 2072 } 2073 2074 return ram_save_page(rs, pss); 2075 } 2076 2077 /* Should be called before sending a host page */ 2078 static void pss_host_page_prepare(PageSearchStatus *pss) 2079 { 2080 /* How many guest pages are there in one host page? */ 2081 size_t guest_pfns = qemu_ram_pagesize(pss->block) >> TARGET_PAGE_BITS; 2082 2083 pss->host_page_sending = true; 2084 if (guest_pfns <= 1) { 2085 /* 2086 * This covers both when guest psize == host psize, or when guest 2087 * has larger psize than the host (guest_pfns==0). 2088 * 2089 * For the latter, we always send one whole guest page per 2090 * iteration of the host page (example: an Alpha VM on x86 host 2091 * will have guest psize 8K while host psize 4K). 2092 */ 2093 pss->host_page_start = pss->page; 2094 pss->host_page_end = pss->page + 1; 2095 } else { 2096 /* 2097 * The host page spans over multiple guest pages, we send them 2098 * within the same host page iteration. 2099 */ 2100 pss->host_page_start = ROUND_DOWN(pss->page, guest_pfns); 2101 pss->host_page_end = ROUND_UP(pss->page + 1, guest_pfns); 2102 } 2103 } 2104 2105 /* 2106 * Whether the page pointed by PSS is within the host page being sent. 2107 * Must be called after a previous pss_host_page_prepare(). 2108 */ 2109 static bool pss_within_range(PageSearchStatus *pss) 2110 { 2111 ram_addr_t ram_addr; 2112 2113 assert(pss->host_page_sending); 2114 2115 /* Over host-page boundary? */ 2116 if (pss->page >= pss->host_page_end) { 2117 return false; 2118 } 2119 2120 ram_addr = ((ram_addr_t)pss->page) << TARGET_PAGE_BITS; 2121 2122 return offset_in_ramblock(pss->block, ram_addr); 2123 } 2124 2125 static void pss_host_page_finish(PageSearchStatus *pss) 2126 { 2127 pss->host_page_sending = false; 2128 /* This is not needed, but just to reset it */ 2129 pss->host_page_start = pss->host_page_end = 0; 2130 } 2131 2132 /* 2133 * Send an urgent host page specified by `pss'. Need to be called with 2134 * bitmap_mutex held. 2135 * 2136 * Returns 0 if save host page succeeded, false otherwise. 2137 */ 2138 static int ram_save_host_page_urgent(PageSearchStatus *pss) 2139 { 2140 bool page_dirty, sent = false; 2141 RAMState *rs = ram_state; 2142 int ret = 0; 2143 2144 trace_postcopy_preempt_send_host_page(pss->block->idstr, pss->page); 2145 pss_host_page_prepare(pss); 2146 2147 /* 2148 * If precopy is sending the same page, let it be done in precopy, or 2149 * we could send the same page in two channels and none of them will 2150 * receive the whole page. 2151 */ 2152 if (pss_overlap(pss, &ram_state->pss[RAM_CHANNEL_PRECOPY])) { 2153 trace_postcopy_preempt_hit(pss->block->idstr, 2154 pss->page << TARGET_PAGE_BITS); 2155 return 0; 2156 } 2157 2158 do { 2159 page_dirty = migration_bitmap_clear_dirty(rs, pss->block, pss->page); 2160 2161 if (page_dirty) { 2162 /* Be strict to return code; it must be 1, or what else? */ 2163 if (migration_ops->ram_save_target_page(rs, pss) != 1) { 2164 error_report_once("%s: ram_save_target_page failed", __func__); 2165 ret = -1; 2166 goto out; 2167 } 2168 sent = true; 2169 } 2170 pss_find_next_dirty(pss); 2171 } while (pss_within_range(pss)); 2172 out: 2173 pss_host_page_finish(pss); 2174 /* For urgent requests, flush immediately if sent */ 2175 if (sent) { 2176 qemu_fflush(pss->pss_channel); 2177 } 2178 return ret; 2179 } 2180 2181 /** 2182 * ram_save_host_page: save a whole host page 2183 * 2184 * Starting at *offset send pages up to the end of the current host 2185 * page. It's valid for the initial offset to point into the middle of 2186 * a host page in which case the remainder of the hostpage is sent. 2187 * Only dirty target pages are sent. Note that the host page size may 2188 * be a huge page for this block. 2189 * 2190 * The saving stops at the boundary of the used_length of the block 2191 * if the RAMBlock isn't a multiple of the host page size. 2192 * 2193 * The caller must be with ram_state.bitmap_mutex held to call this 2194 * function. Note that this function can temporarily release the lock, but 2195 * when the function is returned it'll make sure the lock is still held. 2196 * 2197 * Returns the number of pages written or negative on error 2198 * 2199 * @rs: current RAM state 2200 * @pss: data about the page we want to send 2201 */ 2202 static int ram_save_host_page(RAMState *rs, PageSearchStatus *pss) 2203 { 2204 bool page_dirty, preempt_active = postcopy_preempt_active(); 2205 int tmppages, pages = 0; 2206 size_t pagesize_bits = 2207 qemu_ram_pagesize(pss->block) >> TARGET_PAGE_BITS; 2208 unsigned long start_page = pss->page; 2209 int res; 2210 2211 if (migrate_ram_is_ignored(pss->block)) { 2212 error_report("block %s should not be migrated !", pss->block->idstr); 2213 return 0; 2214 } 2215 2216 /* Update host page boundary information */ 2217 pss_host_page_prepare(pss); 2218 2219 do { 2220 page_dirty = migration_bitmap_clear_dirty(rs, pss->block, pss->page); 2221 2222 /* Check the pages is dirty and if it is send it */ 2223 if (page_dirty) { 2224 /* 2225 * Properly yield the lock only in postcopy preempt mode 2226 * because both migration thread and rp-return thread can 2227 * operate on the bitmaps. 2228 */ 2229 if (preempt_active) { 2230 qemu_mutex_unlock(&rs->bitmap_mutex); 2231 } 2232 tmppages = migration_ops->ram_save_target_page(rs, pss); 2233 if (tmppages >= 0) { 2234 pages += tmppages; 2235 /* 2236 * Allow rate limiting to happen in the middle of huge pages if 2237 * something is sent in the current iteration. 2238 */ 2239 if (pagesize_bits > 1 && tmppages > 0) { 2240 migration_rate_limit(); 2241 } 2242 } 2243 if (preempt_active) { 2244 qemu_mutex_lock(&rs->bitmap_mutex); 2245 } 2246 } else { 2247 tmppages = 0; 2248 } 2249 2250 if (tmppages < 0) { 2251 pss_host_page_finish(pss); 2252 return tmppages; 2253 } 2254 2255 pss_find_next_dirty(pss); 2256 } while (pss_within_range(pss)); 2257 2258 pss_host_page_finish(pss); 2259 2260 res = ram_save_release_protection(rs, pss, start_page); 2261 return (res < 0 ? res : pages); 2262 } 2263 2264 /** 2265 * ram_find_and_save_block: finds a dirty page and sends it to f 2266 * 2267 * Called within an RCU critical section. 2268 * 2269 * Returns the number of pages written where zero means no dirty pages, 2270 * or negative on error 2271 * 2272 * @rs: current RAM state 2273 * 2274 * On systems where host-page-size > target-page-size it will send all the 2275 * pages in a host page that are dirty. 2276 */ 2277 static int ram_find_and_save_block(RAMState *rs) 2278 { 2279 PageSearchStatus *pss = &rs->pss[RAM_CHANNEL_PRECOPY]; 2280 int pages = 0; 2281 2282 /* No dirty page as there is zero RAM */ 2283 if (!rs->ram_bytes_total) { 2284 return pages; 2285 } 2286 2287 /* 2288 * Always keep last_seen_block/last_page valid during this procedure, 2289 * because find_dirty_block() relies on these values (e.g., we compare 2290 * last_seen_block with pss.block to see whether we searched all the 2291 * ramblocks) to detect the completion of migration. Having NULL value 2292 * of last_seen_block can conditionally cause below loop to run forever. 2293 */ 2294 if (!rs->last_seen_block) { 2295 rs->last_seen_block = QLIST_FIRST_RCU(&ram_list.blocks); 2296 rs->last_page = 0; 2297 } 2298 2299 pss_init(pss, rs->last_seen_block, rs->last_page); 2300 2301 while (true){ 2302 if (!get_queued_page(rs, pss)) { 2303 /* priority queue empty, so just search for something dirty */ 2304 int res = find_dirty_block(rs, pss); 2305 if (res != PAGE_DIRTY_FOUND) { 2306 if (res == PAGE_ALL_CLEAN) { 2307 break; 2308 } else if (res == PAGE_TRY_AGAIN) { 2309 continue; 2310 } else if (res < 0) { 2311 pages = res; 2312 break; 2313 } 2314 } 2315 } 2316 pages = ram_save_host_page(rs, pss); 2317 if (pages) { 2318 break; 2319 } 2320 } 2321 2322 rs->last_seen_block = pss->block; 2323 rs->last_page = pss->page; 2324 2325 return pages; 2326 } 2327 2328 static uint64_t ram_bytes_total_with_ignored(void) 2329 { 2330 RAMBlock *block; 2331 uint64_t total = 0; 2332 2333 RCU_READ_LOCK_GUARD(); 2334 2335 RAMBLOCK_FOREACH_MIGRATABLE(block) { 2336 total += block->used_length; 2337 } 2338 return total; 2339 } 2340 2341 uint64_t ram_bytes_total(void) 2342 { 2343 RAMBlock *block; 2344 uint64_t total = 0; 2345 2346 RCU_READ_LOCK_GUARD(); 2347 2348 RAMBLOCK_FOREACH_NOT_IGNORED(block) { 2349 total += block->used_length; 2350 } 2351 return total; 2352 } 2353 2354 static void xbzrle_load_setup(void) 2355 { 2356 XBZRLE.decoded_buf = g_malloc(TARGET_PAGE_SIZE); 2357 } 2358 2359 static void xbzrle_load_cleanup(void) 2360 { 2361 g_free(XBZRLE.decoded_buf); 2362 XBZRLE.decoded_buf = NULL; 2363 } 2364 2365 static void ram_state_cleanup(RAMState **rsp) 2366 { 2367 if (*rsp) { 2368 migration_page_queue_free(*rsp); 2369 qemu_mutex_destroy(&(*rsp)->bitmap_mutex); 2370 qemu_mutex_destroy(&(*rsp)->src_page_req_mutex); 2371 g_free(*rsp); 2372 *rsp = NULL; 2373 } 2374 } 2375 2376 static void xbzrle_cleanup(void) 2377 { 2378 XBZRLE_cache_lock(); 2379 if (XBZRLE.cache) { 2380 cache_fini(XBZRLE.cache); 2381 g_free(XBZRLE.encoded_buf); 2382 g_free(XBZRLE.current_buf); 2383 g_free(XBZRLE.zero_target_page); 2384 XBZRLE.cache = NULL; 2385 XBZRLE.encoded_buf = NULL; 2386 XBZRLE.current_buf = NULL; 2387 XBZRLE.zero_target_page = NULL; 2388 } 2389 XBZRLE_cache_unlock(); 2390 } 2391 2392 static void ram_save_cleanup(void *opaque) 2393 { 2394 RAMState **rsp = opaque; 2395 RAMBlock *block; 2396 2397 /* We don't use dirty log with background snapshots */ 2398 if (!migrate_background_snapshot()) { 2399 /* caller have hold iothread lock or is in a bh, so there is 2400 * no writing race against the migration bitmap 2401 */ 2402 if (global_dirty_tracking & GLOBAL_DIRTY_MIGRATION) { 2403 /* 2404 * do not stop dirty log without starting it, since 2405 * memory_global_dirty_log_stop will assert that 2406 * memory_global_dirty_log_start/stop used in pairs 2407 */ 2408 memory_global_dirty_log_stop(GLOBAL_DIRTY_MIGRATION); 2409 } 2410 } 2411 2412 RAMBLOCK_FOREACH_NOT_IGNORED(block) { 2413 g_free(block->clear_bmap); 2414 block->clear_bmap = NULL; 2415 g_free(block->bmap); 2416 block->bmap = NULL; 2417 } 2418 2419 xbzrle_cleanup(); 2420 compress_threads_save_cleanup(); 2421 ram_state_cleanup(rsp); 2422 g_free(migration_ops); 2423 migration_ops = NULL; 2424 } 2425 2426 static void ram_state_reset(RAMState *rs) 2427 { 2428 int i; 2429 2430 for (i = 0; i < RAM_CHANNEL_MAX; i++) { 2431 rs->pss[i].last_sent_block = NULL; 2432 } 2433 2434 rs->last_seen_block = NULL; 2435 rs->last_page = 0; 2436 rs->last_version = ram_list.version; 2437 rs->xbzrle_started = false; 2438 } 2439 2440 #define MAX_WAIT 50 /* ms, half buffered_file limit */ 2441 2442 /* **** functions for postcopy ***** */ 2443 2444 void ram_postcopy_migrated_memory_release(MigrationState *ms) 2445 { 2446 struct RAMBlock *block; 2447 2448 RAMBLOCK_FOREACH_NOT_IGNORED(block) { 2449 unsigned long *bitmap = block->bmap; 2450 unsigned long range = block->used_length >> TARGET_PAGE_BITS; 2451 unsigned long run_start = find_next_zero_bit(bitmap, range, 0); 2452 2453 while (run_start < range) { 2454 unsigned long run_end = find_next_bit(bitmap, range, run_start + 1); 2455 ram_discard_range(block->idstr, 2456 ((ram_addr_t)run_start) << TARGET_PAGE_BITS, 2457 ((ram_addr_t)(run_end - run_start)) 2458 << TARGET_PAGE_BITS); 2459 run_start = find_next_zero_bit(bitmap, range, run_end + 1); 2460 } 2461 } 2462 } 2463 2464 /** 2465 * postcopy_send_discard_bm_ram: discard a RAMBlock 2466 * 2467 * Callback from postcopy_each_ram_send_discard for each RAMBlock 2468 * 2469 * @ms: current migration state 2470 * @block: RAMBlock to discard 2471 */ 2472 static void postcopy_send_discard_bm_ram(MigrationState *ms, RAMBlock *block) 2473 { 2474 unsigned long end = block->used_length >> TARGET_PAGE_BITS; 2475 unsigned long current; 2476 unsigned long *bitmap = block->bmap; 2477 2478 for (current = 0; current < end; ) { 2479 unsigned long one = find_next_bit(bitmap, end, current); 2480 unsigned long zero, discard_length; 2481 2482 if (one >= end) { 2483 break; 2484 } 2485 2486 zero = find_next_zero_bit(bitmap, end, one + 1); 2487 2488 if (zero >= end) { 2489 discard_length = end - one; 2490 } else { 2491 discard_length = zero - one; 2492 } 2493 postcopy_discard_send_range(ms, one, discard_length); 2494 current = one + discard_length; 2495 } 2496 } 2497 2498 static void postcopy_chunk_hostpages_pass(MigrationState *ms, RAMBlock *block); 2499 2500 /** 2501 * postcopy_each_ram_send_discard: discard all RAMBlocks 2502 * 2503 * Utility for the outgoing postcopy code. 2504 * Calls postcopy_send_discard_bm_ram for each RAMBlock 2505 * passing it bitmap indexes and name. 2506 * (qemu_ram_foreach_block ends up passing unscaled lengths 2507 * which would mean postcopy code would have to deal with target page) 2508 * 2509 * @ms: current migration state 2510 */ 2511 static void postcopy_each_ram_send_discard(MigrationState *ms) 2512 { 2513 struct RAMBlock *block; 2514 2515 RAMBLOCK_FOREACH_NOT_IGNORED(block) { 2516 postcopy_discard_send_init(ms, block->idstr); 2517 2518 /* 2519 * Deal with TPS != HPS and huge pages. It discard any partially sent 2520 * host-page size chunks, mark any partially dirty host-page size 2521 * chunks as all dirty. In this case the host-page is the host-page 2522 * for the particular RAMBlock, i.e. it might be a huge page. 2523 */ 2524 postcopy_chunk_hostpages_pass(ms, block); 2525 2526 /* 2527 * Postcopy sends chunks of bitmap over the wire, but it 2528 * just needs indexes at this point, avoids it having 2529 * target page specific code. 2530 */ 2531 postcopy_send_discard_bm_ram(ms, block); 2532 postcopy_discard_send_finish(ms); 2533 } 2534 } 2535 2536 /** 2537 * postcopy_chunk_hostpages_pass: canonicalize bitmap in hostpages 2538 * 2539 * Helper for postcopy_chunk_hostpages; it's called twice to 2540 * canonicalize the two bitmaps, that are similar, but one is 2541 * inverted. 2542 * 2543 * Postcopy requires that all target pages in a hostpage are dirty or 2544 * clean, not a mix. This function canonicalizes the bitmaps. 2545 * 2546 * @ms: current migration state 2547 * @block: block that contains the page we want to canonicalize 2548 */ 2549 static void postcopy_chunk_hostpages_pass(MigrationState *ms, RAMBlock *block) 2550 { 2551 RAMState *rs = ram_state; 2552 unsigned long *bitmap = block->bmap; 2553 unsigned int host_ratio = block->page_size / TARGET_PAGE_SIZE; 2554 unsigned long pages = block->used_length >> TARGET_PAGE_BITS; 2555 unsigned long run_start; 2556 2557 if (block->page_size == TARGET_PAGE_SIZE) { 2558 /* Easy case - TPS==HPS for a non-huge page RAMBlock */ 2559 return; 2560 } 2561 2562 /* Find a dirty page */ 2563 run_start = find_next_bit(bitmap, pages, 0); 2564 2565 while (run_start < pages) { 2566 2567 /* 2568 * If the start of this run of pages is in the middle of a host 2569 * page, then we need to fixup this host page. 2570 */ 2571 if (QEMU_IS_ALIGNED(run_start, host_ratio)) { 2572 /* Find the end of this run */ 2573 run_start = find_next_zero_bit(bitmap, pages, run_start + 1); 2574 /* 2575 * If the end isn't at the start of a host page, then the 2576 * run doesn't finish at the end of a host page 2577 * and we need to discard. 2578 */ 2579 } 2580 2581 if (!QEMU_IS_ALIGNED(run_start, host_ratio)) { 2582 unsigned long page; 2583 unsigned long fixup_start_addr = QEMU_ALIGN_DOWN(run_start, 2584 host_ratio); 2585 run_start = QEMU_ALIGN_UP(run_start, host_ratio); 2586 2587 /* Clean up the bitmap */ 2588 for (page = fixup_start_addr; 2589 page < fixup_start_addr + host_ratio; page++) { 2590 /* 2591 * Remark them as dirty, updating the count for any pages 2592 * that weren't previously dirty. 2593 */ 2594 rs->migration_dirty_pages += !test_and_set_bit(page, bitmap); 2595 } 2596 } 2597 2598 /* Find the next dirty page for the next iteration */ 2599 run_start = find_next_bit(bitmap, pages, run_start); 2600 } 2601 } 2602 2603 /** 2604 * ram_postcopy_send_discard_bitmap: transmit the discard bitmap 2605 * 2606 * Transmit the set of pages to be discarded after precopy to the target 2607 * these are pages that: 2608 * a) Have been previously transmitted but are now dirty again 2609 * b) Pages that have never been transmitted, this ensures that 2610 * any pages on the destination that have been mapped by background 2611 * tasks get discarded (transparent huge pages is the specific concern) 2612 * Hopefully this is pretty sparse 2613 * 2614 * @ms: current migration state 2615 */ 2616 void ram_postcopy_send_discard_bitmap(MigrationState *ms) 2617 { 2618 RAMState *rs = ram_state; 2619 2620 RCU_READ_LOCK_GUARD(); 2621 2622 /* This should be our last sync, the src is now paused */ 2623 migration_bitmap_sync(rs, false); 2624 2625 /* Easiest way to make sure we don't resume in the middle of a host-page */ 2626 rs->pss[RAM_CHANNEL_PRECOPY].last_sent_block = NULL; 2627 rs->last_seen_block = NULL; 2628 rs->last_page = 0; 2629 2630 postcopy_each_ram_send_discard(ms); 2631 2632 trace_ram_postcopy_send_discard_bitmap(); 2633 } 2634 2635 /** 2636 * ram_discard_range: discard dirtied pages at the beginning of postcopy 2637 * 2638 * Returns zero on success 2639 * 2640 * @rbname: name of the RAMBlock of the request. NULL means the 2641 * same that last one. 2642 * @start: RAMBlock starting page 2643 * @length: RAMBlock size 2644 */ 2645 int ram_discard_range(const char *rbname, uint64_t start, size_t length) 2646 { 2647 trace_ram_discard_range(rbname, start, length); 2648 2649 RCU_READ_LOCK_GUARD(); 2650 RAMBlock *rb = qemu_ram_block_by_name(rbname); 2651 2652 if (!rb) { 2653 error_report("ram_discard_range: Failed to find block '%s'", rbname); 2654 return -1; 2655 } 2656 2657 /* 2658 * On source VM, we don't need to update the received bitmap since 2659 * we don't even have one. 2660 */ 2661 if (rb->receivedmap) { 2662 bitmap_clear(rb->receivedmap, start >> qemu_target_page_bits(), 2663 length >> qemu_target_page_bits()); 2664 } 2665 2666 return ram_block_discard_range(rb, start, length); 2667 } 2668 2669 /* 2670 * For every allocation, we will try not to crash the VM if the 2671 * allocation failed. 2672 */ 2673 static int xbzrle_init(void) 2674 { 2675 Error *local_err = NULL; 2676 2677 if (!migrate_xbzrle()) { 2678 return 0; 2679 } 2680 2681 XBZRLE_cache_lock(); 2682 2683 XBZRLE.zero_target_page = g_try_malloc0(TARGET_PAGE_SIZE); 2684 if (!XBZRLE.zero_target_page) { 2685 error_report("%s: Error allocating zero page", __func__); 2686 goto err_out; 2687 } 2688 2689 XBZRLE.cache = cache_init(migrate_xbzrle_cache_size(), 2690 TARGET_PAGE_SIZE, &local_err); 2691 if (!XBZRLE.cache) { 2692 error_report_err(local_err); 2693 goto free_zero_page; 2694 } 2695 2696 XBZRLE.encoded_buf = g_try_malloc0(TARGET_PAGE_SIZE); 2697 if (!XBZRLE.encoded_buf) { 2698 error_report("%s: Error allocating encoded_buf", __func__); 2699 goto free_cache; 2700 } 2701 2702 XBZRLE.current_buf = g_try_malloc(TARGET_PAGE_SIZE); 2703 if (!XBZRLE.current_buf) { 2704 error_report("%s: Error allocating current_buf", __func__); 2705 goto free_encoded_buf; 2706 } 2707 2708 /* We are all good */ 2709 XBZRLE_cache_unlock(); 2710 return 0; 2711 2712 free_encoded_buf: 2713 g_free(XBZRLE.encoded_buf); 2714 XBZRLE.encoded_buf = NULL; 2715 free_cache: 2716 cache_fini(XBZRLE.cache); 2717 XBZRLE.cache = NULL; 2718 free_zero_page: 2719 g_free(XBZRLE.zero_target_page); 2720 XBZRLE.zero_target_page = NULL; 2721 err_out: 2722 XBZRLE_cache_unlock(); 2723 return -ENOMEM; 2724 } 2725 2726 static int ram_state_init(RAMState **rsp) 2727 { 2728 *rsp = g_try_new0(RAMState, 1); 2729 2730 if (!*rsp) { 2731 error_report("%s: Init ramstate fail", __func__); 2732 return -1; 2733 } 2734 2735 qemu_mutex_init(&(*rsp)->bitmap_mutex); 2736 qemu_mutex_init(&(*rsp)->src_page_req_mutex); 2737 QSIMPLEQ_INIT(&(*rsp)->src_page_requests); 2738 (*rsp)->ram_bytes_total = ram_bytes_total(); 2739 2740 /* 2741 * Count the total number of pages used by ram blocks not including any 2742 * gaps due to alignment or unplugs. 2743 * This must match with the initial values of dirty bitmap. 2744 */ 2745 (*rsp)->migration_dirty_pages = (*rsp)->ram_bytes_total >> TARGET_PAGE_BITS; 2746 ram_state_reset(*rsp); 2747 2748 return 0; 2749 } 2750 2751 static void ram_list_init_bitmaps(void) 2752 { 2753 MigrationState *ms = migrate_get_current(); 2754 RAMBlock *block; 2755 unsigned long pages; 2756 uint8_t shift; 2757 2758 /* Skip setting bitmap if there is no RAM */ 2759 if (ram_bytes_total()) { 2760 shift = ms->clear_bitmap_shift; 2761 if (shift > CLEAR_BITMAP_SHIFT_MAX) { 2762 error_report("clear_bitmap_shift (%u) too big, using " 2763 "max value (%u)", shift, CLEAR_BITMAP_SHIFT_MAX); 2764 shift = CLEAR_BITMAP_SHIFT_MAX; 2765 } else if (shift < CLEAR_BITMAP_SHIFT_MIN) { 2766 error_report("clear_bitmap_shift (%u) too small, using " 2767 "min value (%u)", shift, CLEAR_BITMAP_SHIFT_MIN); 2768 shift = CLEAR_BITMAP_SHIFT_MIN; 2769 } 2770 2771 RAMBLOCK_FOREACH_NOT_IGNORED(block) { 2772 pages = block->max_length >> TARGET_PAGE_BITS; 2773 /* 2774 * The initial dirty bitmap for migration must be set with all 2775 * ones to make sure we'll migrate every guest RAM page to 2776 * destination. 2777 * Here we set RAMBlock.bmap all to 1 because when rebegin a 2778 * new migration after a failed migration, ram_list. 2779 * dirty_memory[DIRTY_MEMORY_MIGRATION] don't include the whole 2780 * guest memory. 2781 */ 2782 block->bmap = bitmap_new(pages); 2783 bitmap_set(block->bmap, 0, pages); 2784 block->clear_bmap_shift = shift; 2785 block->clear_bmap = bitmap_new(clear_bmap_size(pages, shift)); 2786 } 2787 } 2788 } 2789 2790 static void migration_bitmap_clear_discarded_pages(RAMState *rs) 2791 { 2792 unsigned long pages; 2793 RAMBlock *rb; 2794 2795 RCU_READ_LOCK_GUARD(); 2796 2797 RAMBLOCK_FOREACH_NOT_IGNORED(rb) { 2798 pages = ramblock_dirty_bitmap_clear_discarded_pages(rb); 2799 rs->migration_dirty_pages -= pages; 2800 } 2801 } 2802 2803 static void ram_init_bitmaps(RAMState *rs) 2804 { 2805 qemu_mutex_lock_ramlist(); 2806 2807 WITH_RCU_READ_LOCK_GUARD() { 2808 ram_list_init_bitmaps(); 2809 /* We don't use dirty log with background snapshots */ 2810 if (!migrate_background_snapshot()) { 2811 memory_global_dirty_log_start(GLOBAL_DIRTY_MIGRATION); 2812 migration_bitmap_sync_precopy(rs, false); 2813 } 2814 } 2815 qemu_mutex_unlock_ramlist(); 2816 2817 /* 2818 * After an eventual first bitmap sync, fixup the initial bitmap 2819 * containing all 1s to exclude any discarded pages from migration. 2820 */ 2821 migration_bitmap_clear_discarded_pages(rs); 2822 } 2823 2824 static int ram_init_all(RAMState **rsp) 2825 { 2826 if (ram_state_init(rsp)) { 2827 return -1; 2828 } 2829 2830 if (xbzrle_init()) { 2831 ram_state_cleanup(rsp); 2832 return -1; 2833 } 2834 2835 ram_init_bitmaps(*rsp); 2836 2837 return 0; 2838 } 2839 2840 static void ram_state_resume_prepare(RAMState *rs, QEMUFile *out) 2841 { 2842 RAMBlock *block; 2843 uint64_t pages = 0; 2844 2845 /* 2846 * Postcopy is not using xbzrle/compression, so no need for that. 2847 * Also, since source are already halted, we don't need to care 2848 * about dirty page logging as well. 2849 */ 2850 2851 RAMBLOCK_FOREACH_NOT_IGNORED(block) { 2852 pages += bitmap_count_one(block->bmap, 2853 block->used_length >> TARGET_PAGE_BITS); 2854 } 2855 2856 /* This may not be aligned with current bitmaps. Recalculate. */ 2857 rs->migration_dirty_pages = pages; 2858 2859 ram_state_reset(rs); 2860 2861 /* Update RAMState cache of output QEMUFile */ 2862 rs->pss[RAM_CHANNEL_PRECOPY].pss_channel = out; 2863 2864 trace_ram_state_resume_prepare(pages); 2865 } 2866 2867 /* 2868 * This function clears bits of the free pages reported by the caller from the 2869 * migration dirty bitmap. @addr is the host address corresponding to the 2870 * start of the continuous guest free pages, and @len is the total bytes of 2871 * those pages. 2872 */ 2873 void qemu_guest_free_page_hint(void *addr, size_t len) 2874 { 2875 RAMBlock *block; 2876 ram_addr_t offset; 2877 size_t used_len, start, npages; 2878 MigrationState *s = migrate_get_current(); 2879 2880 /* This function is currently expected to be used during live migration */ 2881 if (!migration_is_setup_or_active(s->state)) { 2882 return; 2883 } 2884 2885 for (; len > 0; len -= used_len, addr += used_len) { 2886 block = qemu_ram_block_from_host(addr, false, &offset); 2887 if (unlikely(!block || offset >= block->used_length)) { 2888 /* 2889 * The implementation might not support RAMBlock resize during 2890 * live migration, but it could happen in theory with future 2891 * updates. So we add a check here to capture that case. 2892 */ 2893 error_report_once("%s unexpected error", __func__); 2894 return; 2895 } 2896 2897 if (len <= block->used_length - offset) { 2898 used_len = len; 2899 } else { 2900 used_len = block->used_length - offset; 2901 } 2902 2903 start = offset >> TARGET_PAGE_BITS; 2904 npages = used_len >> TARGET_PAGE_BITS; 2905 2906 qemu_mutex_lock(&ram_state->bitmap_mutex); 2907 /* 2908 * The skipped free pages are equavalent to be sent from clear_bmap's 2909 * perspective, so clear the bits from the memory region bitmap which 2910 * are initially set. Otherwise those skipped pages will be sent in 2911 * the next round after syncing from the memory region bitmap. 2912 */ 2913 migration_clear_memory_region_dirty_bitmap_range(block, start, npages); 2914 ram_state->migration_dirty_pages -= 2915 bitmap_count_one_with_offset(block->bmap, start, npages); 2916 bitmap_clear(block->bmap, start, npages); 2917 qemu_mutex_unlock(&ram_state->bitmap_mutex); 2918 } 2919 } 2920 2921 /* 2922 * Each of ram_save_setup, ram_save_iterate and ram_save_complete has 2923 * long-running RCU critical section. When rcu-reclaims in the code 2924 * start to become numerous it will be necessary to reduce the 2925 * granularity of these critical sections. 2926 */ 2927 2928 /** 2929 * ram_save_setup: Setup RAM for migration 2930 * 2931 * Returns zero to indicate success and negative for error 2932 * 2933 * @f: QEMUFile where to send the data 2934 * @opaque: RAMState pointer 2935 */ 2936 static int ram_save_setup(QEMUFile *f, void *opaque) 2937 { 2938 RAMState **rsp = opaque; 2939 RAMBlock *block; 2940 int ret; 2941 2942 if (compress_threads_save_setup()) { 2943 return -1; 2944 } 2945 2946 /* migration has already setup the bitmap, reuse it. */ 2947 if (!migration_in_colo_state()) { 2948 if (ram_init_all(rsp) != 0) { 2949 compress_threads_save_cleanup(); 2950 return -1; 2951 } 2952 } 2953 (*rsp)->pss[RAM_CHANNEL_PRECOPY].pss_channel = f; 2954 2955 WITH_RCU_READ_LOCK_GUARD() { 2956 qemu_put_be64(f, ram_bytes_total_with_ignored() 2957 | RAM_SAVE_FLAG_MEM_SIZE); 2958 2959 RAMBLOCK_FOREACH_MIGRATABLE(block) { 2960 qemu_put_byte(f, strlen(block->idstr)); 2961 qemu_put_buffer(f, (uint8_t *)block->idstr, strlen(block->idstr)); 2962 qemu_put_be64(f, block->used_length); 2963 if (migrate_postcopy_ram() && block->page_size != 2964 qemu_host_page_size) { 2965 qemu_put_be64(f, block->page_size); 2966 } 2967 if (migrate_ignore_shared()) { 2968 qemu_put_be64(f, block->mr->addr); 2969 } 2970 } 2971 } 2972 2973 ret = rdma_registration_start(f, RAM_CONTROL_SETUP); 2974 if (ret < 0) { 2975 qemu_file_set_error(f, ret); 2976 } 2977 2978 ret = rdma_registration_stop(f, RAM_CONTROL_SETUP); 2979 if (ret < 0) { 2980 qemu_file_set_error(f, ret); 2981 } 2982 2983 migration_ops = g_malloc0(sizeof(MigrationOps)); 2984 migration_ops->ram_save_target_page = ram_save_target_page_legacy; 2985 2986 qemu_mutex_unlock_iothread(); 2987 ret = multifd_send_sync_main(f); 2988 qemu_mutex_lock_iothread(); 2989 if (ret < 0) { 2990 return ret; 2991 } 2992 2993 if (migrate_multifd() && !migrate_multifd_flush_after_each_section()) { 2994 qemu_put_be64(f, RAM_SAVE_FLAG_MULTIFD_FLUSH); 2995 } 2996 2997 qemu_put_be64(f, RAM_SAVE_FLAG_EOS); 2998 qemu_fflush(f); 2999 3000 return 0; 3001 } 3002 3003 /** 3004 * ram_save_iterate: iterative stage for migration 3005 * 3006 * Returns zero to indicate success and negative for error 3007 * 3008 * @f: QEMUFile where to send the data 3009 * @opaque: RAMState pointer 3010 */ 3011 static int ram_save_iterate(QEMUFile *f, void *opaque) 3012 { 3013 RAMState **temp = opaque; 3014 RAMState *rs = *temp; 3015 int ret = 0; 3016 int i; 3017 int64_t t0; 3018 int done = 0; 3019 3020 if (blk_mig_bulk_active()) { 3021 /* Avoid transferring ram during bulk phase of block migration as 3022 * the bulk phase will usually take a long time and transferring 3023 * ram updates during that time is pointless. */ 3024 goto out; 3025 } 3026 3027 /* 3028 * We'll take this lock a little bit long, but it's okay for two reasons. 3029 * Firstly, the only possible other thread to take it is who calls 3030 * qemu_guest_free_page_hint(), which should be rare; secondly, see 3031 * MAX_WAIT (if curious, further see commit 4508bd9ed8053ce) below, which 3032 * guarantees that we'll at least released it in a regular basis. 3033 */ 3034 qemu_mutex_lock(&rs->bitmap_mutex); 3035 WITH_RCU_READ_LOCK_GUARD() { 3036 if (ram_list.version != rs->last_version) { 3037 ram_state_reset(rs); 3038 } 3039 3040 /* Read version before ram_list.blocks */ 3041 smp_rmb(); 3042 3043 ret = rdma_registration_start(f, RAM_CONTROL_ROUND); 3044 if (ret < 0) { 3045 qemu_file_set_error(f, ret); 3046 } 3047 3048 t0 = qemu_clock_get_ns(QEMU_CLOCK_REALTIME); 3049 i = 0; 3050 while ((ret = migration_rate_exceeded(f)) == 0 || 3051 postcopy_has_request(rs)) { 3052 int pages; 3053 3054 if (qemu_file_get_error(f)) { 3055 break; 3056 } 3057 3058 pages = ram_find_and_save_block(rs); 3059 /* no more pages to sent */ 3060 if (pages == 0) { 3061 done = 1; 3062 break; 3063 } 3064 3065 if (pages < 0) { 3066 qemu_file_set_error(f, pages); 3067 break; 3068 } 3069 3070 rs->target_page_count += pages; 3071 3072 /* 3073 * During postcopy, it is necessary to make sure one whole host 3074 * page is sent in one chunk. 3075 */ 3076 if (migrate_postcopy_ram()) { 3077 compress_flush_data(); 3078 } 3079 3080 /* 3081 * we want to check in the 1st loop, just in case it was the 1st 3082 * time and we had to sync the dirty bitmap. 3083 * qemu_clock_get_ns() is a bit expensive, so we only check each 3084 * some iterations 3085 */ 3086 if ((i & 63) == 0) { 3087 uint64_t t1 = (qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - t0) / 3088 1000000; 3089 if (t1 > MAX_WAIT) { 3090 trace_ram_save_iterate_big_wait(t1, i); 3091 break; 3092 } 3093 } 3094 i++; 3095 } 3096 } 3097 qemu_mutex_unlock(&rs->bitmap_mutex); 3098 3099 /* 3100 * Must occur before EOS (or any QEMUFile operation) 3101 * because of RDMA protocol. 3102 */ 3103 ret = rdma_registration_stop(f, RAM_CONTROL_ROUND); 3104 if (ret < 0) { 3105 qemu_file_set_error(f, ret); 3106 } 3107 3108 out: 3109 if (ret >= 0 3110 && migration_is_setup_or_active(migrate_get_current()->state)) { 3111 if (migrate_multifd() && migrate_multifd_flush_after_each_section()) { 3112 ret = multifd_send_sync_main(rs->pss[RAM_CHANNEL_PRECOPY].pss_channel); 3113 if (ret < 0) { 3114 return ret; 3115 } 3116 } 3117 3118 qemu_put_be64(f, RAM_SAVE_FLAG_EOS); 3119 qemu_fflush(f); 3120 ram_transferred_add(8); 3121 3122 ret = qemu_file_get_error(f); 3123 } 3124 if (ret < 0) { 3125 return ret; 3126 } 3127 3128 return done; 3129 } 3130 3131 /** 3132 * ram_save_complete: function called to send the remaining amount of ram 3133 * 3134 * Returns zero to indicate success or negative on error 3135 * 3136 * Called with iothread lock 3137 * 3138 * @f: QEMUFile where to send the data 3139 * @opaque: RAMState pointer 3140 */ 3141 static int ram_save_complete(QEMUFile *f, void *opaque) 3142 { 3143 RAMState **temp = opaque; 3144 RAMState *rs = *temp; 3145 int ret = 0; 3146 3147 rs->last_stage = !migration_in_colo_state(); 3148 3149 WITH_RCU_READ_LOCK_GUARD() { 3150 int rdma_reg_ret; 3151 3152 if (!migration_in_postcopy()) { 3153 migration_bitmap_sync_precopy(rs, true); 3154 } 3155 3156 ret = rdma_registration_start(f, RAM_CONTROL_FINISH); 3157 if (ret < 0) { 3158 qemu_file_set_error(f, ret); 3159 } 3160 3161 /* try transferring iterative blocks of memory */ 3162 3163 /* flush all remaining blocks regardless of rate limiting */ 3164 qemu_mutex_lock(&rs->bitmap_mutex); 3165 while (true) { 3166 int pages; 3167 3168 pages = ram_find_and_save_block(rs); 3169 /* no more blocks to sent */ 3170 if (pages == 0) { 3171 break; 3172 } 3173 if (pages < 0) { 3174 ret = pages; 3175 break; 3176 } 3177 } 3178 qemu_mutex_unlock(&rs->bitmap_mutex); 3179 3180 compress_flush_data(); 3181 3182 rdma_reg_ret = rdma_registration_stop(f, RAM_CONTROL_FINISH); 3183 if (rdma_reg_ret < 0) { 3184 qemu_file_set_error(f, rdma_reg_ret); 3185 } 3186 } 3187 3188 if (ret < 0) { 3189 return ret; 3190 } 3191 3192 ret = multifd_send_sync_main(rs->pss[RAM_CHANNEL_PRECOPY].pss_channel); 3193 if (ret < 0) { 3194 return ret; 3195 } 3196 3197 if (migrate_multifd() && !migrate_multifd_flush_after_each_section()) { 3198 qemu_put_be64(f, RAM_SAVE_FLAG_MULTIFD_FLUSH); 3199 } 3200 qemu_put_be64(f, RAM_SAVE_FLAG_EOS); 3201 qemu_fflush(f); 3202 3203 return 0; 3204 } 3205 3206 static void ram_state_pending_estimate(void *opaque, uint64_t *must_precopy, 3207 uint64_t *can_postcopy) 3208 { 3209 RAMState **temp = opaque; 3210 RAMState *rs = *temp; 3211 3212 uint64_t remaining_size = rs->migration_dirty_pages * TARGET_PAGE_SIZE; 3213 3214 if (migrate_postcopy_ram()) { 3215 /* We can do postcopy, and all the data is postcopiable */ 3216 *can_postcopy += remaining_size; 3217 } else { 3218 *must_precopy += remaining_size; 3219 } 3220 } 3221 3222 static void ram_state_pending_exact(void *opaque, uint64_t *must_precopy, 3223 uint64_t *can_postcopy) 3224 { 3225 MigrationState *s = migrate_get_current(); 3226 RAMState **temp = opaque; 3227 RAMState *rs = *temp; 3228 3229 uint64_t remaining_size = rs->migration_dirty_pages * TARGET_PAGE_SIZE; 3230 3231 if (!migration_in_postcopy() && remaining_size < s->threshold_size) { 3232 qemu_mutex_lock_iothread(); 3233 WITH_RCU_READ_LOCK_GUARD() { 3234 migration_bitmap_sync_precopy(rs, false); 3235 } 3236 qemu_mutex_unlock_iothread(); 3237 remaining_size = rs->migration_dirty_pages * TARGET_PAGE_SIZE; 3238 } 3239 3240 if (migrate_postcopy_ram()) { 3241 /* We can do postcopy, and all the data is postcopiable */ 3242 *can_postcopy += remaining_size; 3243 } else { 3244 *must_precopy += remaining_size; 3245 } 3246 } 3247 3248 static int load_xbzrle(QEMUFile *f, ram_addr_t addr, void *host) 3249 { 3250 unsigned int xh_len; 3251 int xh_flags; 3252 uint8_t *loaded_data; 3253 3254 /* extract RLE header */ 3255 xh_flags = qemu_get_byte(f); 3256 xh_len = qemu_get_be16(f); 3257 3258 if (xh_flags != ENCODING_FLAG_XBZRLE) { 3259 error_report("Failed to load XBZRLE page - wrong compression!"); 3260 return -1; 3261 } 3262 3263 if (xh_len > TARGET_PAGE_SIZE) { 3264 error_report("Failed to load XBZRLE page - len overflow!"); 3265 return -1; 3266 } 3267 loaded_data = XBZRLE.decoded_buf; 3268 /* load data and decode */ 3269 /* it can change loaded_data to point to an internal buffer */ 3270 qemu_get_buffer_in_place(f, &loaded_data, xh_len); 3271 3272 /* decode RLE */ 3273 if (xbzrle_decode_buffer(loaded_data, xh_len, host, 3274 TARGET_PAGE_SIZE) == -1) { 3275 error_report("Failed to load XBZRLE page - decode error!"); 3276 return -1; 3277 } 3278 3279 return 0; 3280 } 3281 3282 /** 3283 * ram_block_from_stream: read a RAMBlock id from the migration stream 3284 * 3285 * Must be called from within a rcu critical section. 3286 * 3287 * Returns a pointer from within the RCU-protected ram_list. 3288 * 3289 * @mis: the migration incoming state pointer 3290 * @f: QEMUFile where to read the data from 3291 * @flags: Page flags (mostly to see if it's a continuation of previous block) 3292 * @channel: the channel we're using 3293 */ 3294 static inline RAMBlock *ram_block_from_stream(MigrationIncomingState *mis, 3295 QEMUFile *f, int flags, 3296 int channel) 3297 { 3298 RAMBlock *block = mis->last_recv_block[channel]; 3299 char id[256]; 3300 uint8_t len; 3301 3302 if (flags & RAM_SAVE_FLAG_CONTINUE) { 3303 if (!block) { 3304 error_report("Ack, bad migration stream!"); 3305 return NULL; 3306 } 3307 return block; 3308 } 3309 3310 len = qemu_get_byte(f); 3311 qemu_get_buffer(f, (uint8_t *)id, len); 3312 id[len] = 0; 3313 3314 block = qemu_ram_block_by_name(id); 3315 if (!block) { 3316 error_report("Can't find block %s", id); 3317 return NULL; 3318 } 3319 3320 if (migrate_ram_is_ignored(block)) { 3321 error_report("block %s should not be migrated !", id); 3322 return NULL; 3323 } 3324 3325 mis->last_recv_block[channel] = block; 3326 3327 return block; 3328 } 3329 3330 static inline void *host_from_ram_block_offset(RAMBlock *block, 3331 ram_addr_t offset) 3332 { 3333 if (!offset_in_ramblock(block, offset)) { 3334 return NULL; 3335 } 3336 3337 return block->host + offset; 3338 } 3339 3340 static void *host_page_from_ram_block_offset(RAMBlock *block, 3341 ram_addr_t offset) 3342 { 3343 /* Note: Explicitly no check against offset_in_ramblock(). */ 3344 return (void *)QEMU_ALIGN_DOWN((uintptr_t)(block->host + offset), 3345 block->page_size); 3346 } 3347 3348 static ram_addr_t host_page_offset_from_ram_block_offset(RAMBlock *block, 3349 ram_addr_t offset) 3350 { 3351 return ((uintptr_t)block->host + offset) & (block->page_size - 1); 3352 } 3353 3354 void colo_record_bitmap(RAMBlock *block, ram_addr_t *normal, uint32_t pages) 3355 { 3356 qemu_mutex_lock(&ram_state->bitmap_mutex); 3357 for (int i = 0; i < pages; i++) { 3358 ram_addr_t offset = normal[i]; 3359 ram_state->migration_dirty_pages += !test_and_set_bit( 3360 offset >> TARGET_PAGE_BITS, 3361 block->bmap); 3362 } 3363 qemu_mutex_unlock(&ram_state->bitmap_mutex); 3364 } 3365 3366 static inline void *colo_cache_from_block_offset(RAMBlock *block, 3367 ram_addr_t offset, bool record_bitmap) 3368 { 3369 if (!offset_in_ramblock(block, offset)) { 3370 return NULL; 3371 } 3372 if (!block->colo_cache) { 3373 error_report("%s: colo_cache is NULL in block :%s", 3374 __func__, block->idstr); 3375 return NULL; 3376 } 3377 3378 /* 3379 * During colo checkpoint, we need bitmap of these migrated pages. 3380 * It help us to decide which pages in ram cache should be flushed 3381 * into VM's RAM later. 3382 */ 3383 if (record_bitmap) { 3384 colo_record_bitmap(block, &offset, 1); 3385 } 3386 return block->colo_cache + offset; 3387 } 3388 3389 /** 3390 * ram_handle_zero: handle the zero page case 3391 * 3392 * If a page (or a whole RDMA chunk) has been 3393 * determined to be zero, then zap it. 3394 * 3395 * @host: host address for the zero page 3396 * @ch: what the page is filled from. We only support zero 3397 * @size: size of the zero page 3398 */ 3399 void ram_handle_zero(void *host, uint64_t size) 3400 { 3401 if (!buffer_is_zero(host, size)) { 3402 memset(host, 0, size); 3403 } 3404 } 3405 3406 static void colo_init_ram_state(void) 3407 { 3408 ram_state_init(&ram_state); 3409 } 3410 3411 /* 3412 * colo cache: this is for secondary VM, we cache the whole 3413 * memory of the secondary VM, it is need to hold the global lock 3414 * to call this helper. 3415 */ 3416 int colo_init_ram_cache(void) 3417 { 3418 RAMBlock *block; 3419 3420 WITH_RCU_READ_LOCK_GUARD() { 3421 RAMBLOCK_FOREACH_NOT_IGNORED(block) { 3422 block->colo_cache = qemu_anon_ram_alloc(block->used_length, 3423 NULL, false, false); 3424 if (!block->colo_cache) { 3425 error_report("%s: Can't alloc memory for COLO cache of block %s," 3426 "size 0x" RAM_ADDR_FMT, __func__, block->idstr, 3427 block->used_length); 3428 RAMBLOCK_FOREACH_NOT_IGNORED(block) { 3429 if (block->colo_cache) { 3430 qemu_anon_ram_free(block->colo_cache, block->used_length); 3431 block->colo_cache = NULL; 3432 } 3433 } 3434 return -errno; 3435 } 3436 if (!machine_dump_guest_core(current_machine)) { 3437 qemu_madvise(block->colo_cache, block->used_length, 3438 QEMU_MADV_DONTDUMP); 3439 } 3440 } 3441 } 3442 3443 /* 3444 * Record the dirty pages that sent by PVM, we use this dirty bitmap together 3445 * with to decide which page in cache should be flushed into SVM's RAM. Here 3446 * we use the same name 'ram_bitmap' as for migration. 3447 */ 3448 if (ram_bytes_total()) { 3449 RAMBLOCK_FOREACH_NOT_IGNORED(block) { 3450 unsigned long pages = block->max_length >> TARGET_PAGE_BITS; 3451 block->bmap = bitmap_new(pages); 3452 } 3453 } 3454 3455 colo_init_ram_state(); 3456 return 0; 3457 } 3458 3459 /* TODO: duplicated with ram_init_bitmaps */ 3460 void colo_incoming_start_dirty_log(void) 3461 { 3462 RAMBlock *block = NULL; 3463 /* For memory_global_dirty_log_start below. */ 3464 qemu_mutex_lock_iothread(); 3465 qemu_mutex_lock_ramlist(); 3466 3467 memory_global_dirty_log_sync(false); 3468 WITH_RCU_READ_LOCK_GUARD() { 3469 RAMBLOCK_FOREACH_NOT_IGNORED(block) { 3470 ramblock_sync_dirty_bitmap(ram_state, block); 3471 /* Discard this dirty bitmap record */ 3472 bitmap_zero(block->bmap, block->max_length >> TARGET_PAGE_BITS); 3473 } 3474 memory_global_dirty_log_start(GLOBAL_DIRTY_MIGRATION); 3475 } 3476 ram_state->migration_dirty_pages = 0; 3477 qemu_mutex_unlock_ramlist(); 3478 qemu_mutex_unlock_iothread(); 3479 } 3480 3481 /* It is need to hold the global lock to call this helper */ 3482 void colo_release_ram_cache(void) 3483 { 3484 RAMBlock *block; 3485 3486 memory_global_dirty_log_stop(GLOBAL_DIRTY_MIGRATION); 3487 RAMBLOCK_FOREACH_NOT_IGNORED(block) { 3488 g_free(block->bmap); 3489 block->bmap = NULL; 3490 } 3491 3492 WITH_RCU_READ_LOCK_GUARD() { 3493 RAMBLOCK_FOREACH_NOT_IGNORED(block) { 3494 if (block->colo_cache) { 3495 qemu_anon_ram_free(block->colo_cache, block->used_length); 3496 block->colo_cache = NULL; 3497 } 3498 } 3499 } 3500 ram_state_cleanup(&ram_state); 3501 } 3502 3503 /** 3504 * ram_load_setup: Setup RAM for migration incoming side 3505 * 3506 * Returns zero to indicate success and negative for error 3507 * 3508 * @f: QEMUFile where to receive the data 3509 * @opaque: RAMState pointer 3510 */ 3511 static int ram_load_setup(QEMUFile *f, void *opaque) 3512 { 3513 xbzrle_load_setup(); 3514 ramblock_recv_map_init(); 3515 3516 return 0; 3517 } 3518 3519 static int ram_load_cleanup(void *opaque) 3520 { 3521 RAMBlock *rb; 3522 3523 RAMBLOCK_FOREACH_NOT_IGNORED(rb) { 3524 qemu_ram_block_writeback(rb); 3525 } 3526 3527 xbzrle_load_cleanup(); 3528 3529 RAMBLOCK_FOREACH_NOT_IGNORED(rb) { 3530 g_free(rb->receivedmap); 3531 rb->receivedmap = NULL; 3532 } 3533 3534 return 0; 3535 } 3536 3537 /** 3538 * ram_postcopy_incoming_init: allocate postcopy data structures 3539 * 3540 * Returns 0 for success and negative if there was one error 3541 * 3542 * @mis: current migration incoming state 3543 * 3544 * Allocate data structures etc needed by incoming migration with 3545 * postcopy-ram. postcopy-ram's similarly names 3546 * postcopy_ram_incoming_init does the work. 3547 */ 3548 int ram_postcopy_incoming_init(MigrationIncomingState *mis) 3549 { 3550 return postcopy_ram_incoming_init(mis); 3551 } 3552 3553 /** 3554 * ram_load_postcopy: load a page in postcopy case 3555 * 3556 * Returns 0 for success or -errno in case of error 3557 * 3558 * Called in postcopy mode by ram_load(). 3559 * rcu_read_lock is taken prior to this being called. 3560 * 3561 * @f: QEMUFile where to send the data 3562 * @channel: the channel to use for loading 3563 */ 3564 int ram_load_postcopy(QEMUFile *f, int channel) 3565 { 3566 int flags = 0, ret = 0; 3567 bool place_needed = false; 3568 bool matches_target_page_size = false; 3569 MigrationIncomingState *mis = migration_incoming_get_current(); 3570 PostcopyTmpPage *tmp_page = &mis->postcopy_tmp_pages[channel]; 3571 3572 while (!ret && !(flags & RAM_SAVE_FLAG_EOS)) { 3573 ram_addr_t addr; 3574 void *page_buffer = NULL; 3575 void *place_source = NULL; 3576 RAMBlock *block = NULL; 3577 uint8_t ch; 3578 int len; 3579 3580 addr = qemu_get_be64(f); 3581 3582 /* 3583 * If qemu file error, we should stop here, and then "addr" 3584 * may be invalid 3585 */ 3586 ret = qemu_file_get_error(f); 3587 if (ret) { 3588 break; 3589 } 3590 3591 flags = addr & ~TARGET_PAGE_MASK; 3592 addr &= TARGET_PAGE_MASK; 3593 3594 trace_ram_load_postcopy_loop(channel, (uint64_t)addr, flags); 3595 if (flags & (RAM_SAVE_FLAG_ZERO | RAM_SAVE_FLAG_PAGE | 3596 RAM_SAVE_FLAG_COMPRESS_PAGE)) { 3597 block = ram_block_from_stream(mis, f, flags, channel); 3598 if (!block) { 3599 ret = -EINVAL; 3600 break; 3601 } 3602 3603 /* 3604 * Relying on used_length is racy and can result in false positives. 3605 * We might place pages beyond used_length in case RAM was shrunk 3606 * while in postcopy, which is fine - trying to place via 3607 * UFFDIO_COPY/UFFDIO_ZEROPAGE will never segfault. 3608 */ 3609 if (!block->host || addr >= block->postcopy_length) { 3610 error_report("Illegal RAM offset " RAM_ADDR_FMT, addr); 3611 ret = -EINVAL; 3612 break; 3613 } 3614 tmp_page->target_pages++; 3615 matches_target_page_size = block->page_size == TARGET_PAGE_SIZE; 3616 /* 3617 * Postcopy requires that we place whole host pages atomically; 3618 * these may be huge pages for RAMBlocks that are backed by 3619 * hugetlbfs. 3620 * To make it atomic, the data is read into a temporary page 3621 * that's moved into place later. 3622 * The migration protocol uses, possibly smaller, target-pages 3623 * however the source ensures it always sends all the components 3624 * of a host page in one chunk. 3625 */ 3626 page_buffer = tmp_page->tmp_huge_page + 3627 host_page_offset_from_ram_block_offset(block, addr); 3628 /* If all TP are zero then we can optimise the place */ 3629 if (tmp_page->target_pages == 1) { 3630 tmp_page->host_addr = 3631 host_page_from_ram_block_offset(block, addr); 3632 } else if (tmp_page->host_addr != 3633 host_page_from_ram_block_offset(block, addr)) { 3634 /* not the 1st TP within the HP */ 3635 error_report("Non-same host page detected on channel %d: " 3636 "Target host page %p, received host page %p " 3637 "(rb %s offset 0x"RAM_ADDR_FMT" target_pages %d)", 3638 channel, tmp_page->host_addr, 3639 host_page_from_ram_block_offset(block, addr), 3640 block->idstr, addr, tmp_page->target_pages); 3641 ret = -EINVAL; 3642 break; 3643 } 3644 3645 /* 3646 * If it's the last part of a host page then we place the host 3647 * page 3648 */ 3649 if (tmp_page->target_pages == 3650 (block->page_size / TARGET_PAGE_SIZE)) { 3651 place_needed = true; 3652 } 3653 place_source = tmp_page->tmp_huge_page; 3654 } 3655 3656 switch (flags & ~RAM_SAVE_FLAG_CONTINUE) { 3657 case RAM_SAVE_FLAG_ZERO: 3658 ch = qemu_get_byte(f); 3659 if (ch != 0) { 3660 error_report("Found a zero page with value %d", ch); 3661 ret = -EINVAL; 3662 break; 3663 } 3664 /* 3665 * Can skip to set page_buffer when 3666 * this is a zero page and (block->page_size == TARGET_PAGE_SIZE). 3667 */ 3668 if (!matches_target_page_size) { 3669 memset(page_buffer, ch, TARGET_PAGE_SIZE); 3670 } 3671 break; 3672 3673 case RAM_SAVE_FLAG_PAGE: 3674 tmp_page->all_zero = false; 3675 if (!matches_target_page_size) { 3676 /* For huge pages, we always use temporary buffer */ 3677 qemu_get_buffer(f, page_buffer, TARGET_PAGE_SIZE); 3678 } else { 3679 /* 3680 * For small pages that matches target page size, we 3681 * avoid the qemu_file copy. Instead we directly use 3682 * the buffer of QEMUFile to place the page. Note: we 3683 * cannot do any QEMUFile operation before using that 3684 * buffer to make sure the buffer is valid when 3685 * placing the page. 3686 */ 3687 qemu_get_buffer_in_place(f, (uint8_t **)&place_source, 3688 TARGET_PAGE_SIZE); 3689 } 3690 break; 3691 case RAM_SAVE_FLAG_COMPRESS_PAGE: 3692 tmp_page->all_zero = false; 3693 len = qemu_get_be32(f); 3694 if (len < 0 || len > compressBound(TARGET_PAGE_SIZE)) { 3695 error_report("Invalid compressed data length: %d", len); 3696 ret = -EINVAL; 3697 break; 3698 } 3699 decompress_data_with_multi_threads(f, page_buffer, len); 3700 break; 3701 case RAM_SAVE_FLAG_MULTIFD_FLUSH: 3702 multifd_recv_sync_main(); 3703 break; 3704 case RAM_SAVE_FLAG_EOS: 3705 /* normal exit */ 3706 if (migrate_multifd() && 3707 migrate_multifd_flush_after_each_section()) { 3708 multifd_recv_sync_main(); 3709 } 3710 break; 3711 default: 3712 error_report("Unknown combination of migration flags: 0x%x" 3713 " (postcopy mode)", flags); 3714 ret = -EINVAL; 3715 break; 3716 } 3717 3718 /* Got the whole host page, wait for decompress before placing. */ 3719 if (place_needed) { 3720 ret |= wait_for_decompress_done(); 3721 } 3722 3723 /* Detect for any possible file errors */ 3724 if (!ret && qemu_file_get_error(f)) { 3725 ret = qemu_file_get_error(f); 3726 } 3727 3728 if (!ret && place_needed) { 3729 if (tmp_page->all_zero) { 3730 ret = postcopy_place_page_zero(mis, tmp_page->host_addr, block); 3731 } else { 3732 ret = postcopy_place_page(mis, tmp_page->host_addr, 3733 place_source, block); 3734 } 3735 place_needed = false; 3736 postcopy_temp_page_reset(tmp_page); 3737 } 3738 } 3739 3740 return ret; 3741 } 3742 3743 static bool postcopy_is_running(void) 3744 { 3745 PostcopyState ps = postcopy_state_get(); 3746 return ps >= POSTCOPY_INCOMING_LISTENING && ps < POSTCOPY_INCOMING_END; 3747 } 3748 3749 /* 3750 * Flush content of RAM cache into SVM's memory. 3751 * Only flush the pages that be dirtied by PVM or SVM or both. 3752 */ 3753 void colo_flush_ram_cache(void) 3754 { 3755 RAMBlock *block = NULL; 3756 void *dst_host; 3757 void *src_host; 3758 unsigned long offset = 0; 3759 3760 memory_global_dirty_log_sync(false); 3761 qemu_mutex_lock(&ram_state->bitmap_mutex); 3762 WITH_RCU_READ_LOCK_GUARD() { 3763 RAMBLOCK_FOREACH_NOT_IGNORED(block) { 3764 ramblock_sync_dirty_bitmap(ram_state, block); 3765 } 3766 } 3767 3768 trace_colo_flush_ram_cache_begin(ram_state->migration_dirty_pages); 3769 WITH_RCU_READ_LOCK_GUARD() { 3770 block = QLIST_FIRST_RCU(&ram_list.blocks); 3771 3772 while (block) { 3773 unsigned long num = 0; 3774 3775 offset = colo_bitmap_find_dirty(ram_state, block, offset, &num); 3776 if (!offset_in_ramblock(block, 3777 ((ram_addr_t)offset) << TARGET_PAGE_BITS)) { 3778 offset = 0; 3779 num = 0; 3780 block = QLIST_NEXT_RCU(block, next); 3781 } else { 3782 unsigned long i = 0; 3783 3784 for (i = 0; i < num; i++) { 3785 migration_bitmap_clear_dirty(ram_state, block, offset + i); 3786 } 3787 dst_host = block->host 3788 + (((ram_addr_t)offset) << TARGET_PAGE_BITS); 3789 src_host = block->colo_cache 3790 + (((ram_addr_t)offset) << TARGET_PAGE_BITS); 3791 memcpy(dst_host, src_host, TARGET_PAGE_SIZE * num); 3792 offset += num; 3793 } 3794 } 3795 } 3796 qemu_mutex_unlock(&ram_state->bitmap_mutex); 3797 trace_colo_flush_ram_cache_end(); 3798 } 3799 3800 static int parse_ramblock(QEMUFile *f, RAMBlock *block, ram_addr_t length) 3801 { 3802 int ret = 0; 3803 /* ADVISE is earlier, it shows the source has the postcopy capability on */ 3804 bool postcopy_advised = migration_incoming_postcopy_advised(); 3805 3806 assert(block); 3807 3808 if (!qemu_ram_is_migratable(block)) { 3809 error_report("block %s should not be migrated !", block->idstr); 3810 return -EINVAL; 3811 } 3812 3813 if (length != block->used_length) { 3814 Error *local_err = NULL; 3815 3816 ret = qemu_ram_resize(block, length, &local_err); 3817 if (local_err) { 3818 error_report_err(local_err); 3819 return ret; 3820 } 3821 } 3822 /* For postcopy we need to check hugepage sizes match */ 3823 if (postcopy_advised && migrate_postcopy_ram() && 3824 block->page_size != qemu_host_page_size) { 3825 uint64_t remote_page_size = qemu_get_be64(f); 3826 if (remote_page_size != block->page_size) { 3827 error_report("Mismatched RAM page size %s " 3828 "(local) %zd != %" PRId64, block->idstr, 3829 block->page_size, remote_page_size); 3830 return -EINVAL; 3831 } 3832 } 3833 if (migrate_ignore_shared()) { 3834 hwaddr addr = qemu_get_be64(f); 3835 if (migrate_ram_is_ignored(block) && 3836 block->mr->addr != addr) { 3837 error_report("Mismatched GPAs for block %s " 3838 "%" PRId64 "!= %" PRId64, block->idstr, 3839 (uint64_t)addr, (uint64_t)block->mr->addr); 3840 return -EINVAL; 3841 } 3842 } 3843 ret = rdma_block_notification_handle(f, block->idstr); 3844 if (ret < 0) { 3845 qemu_file_set_error(f, ret); 3846 } 3847 3848 return ret; 3849 } 3850 3851 static int parse_ramblocks(QEMUFile *f, ram_addr_t total_ram_bytes) 3852 { 3853 int ret = 0; 3854 3855 /* Synchronize RAM block list */ 3856 while (!ret && total_ram_bytes) { 3857 RAMBlock *block; 3858 char id[256]; 3859 ram_addr_t length; 3860 int len = qemu_get_byte(f); 3861 3862 qemu_get_buffer(f, (uint8_t *)id, len); 3863 id[len] = 0; 3864 length = qemu_get_be64(f); 3865 3866 block = qemu_ram_block_by_name(id); 3867 if (block) { 3868 ret = parse_ramblock(f, block, length); 3869 } else { 3870 error_report("Unknown ramblock \"%s\", cannot accept " 3871 "migration", id); 3872 ret = -EINVAL; 3873 } 3874 total_ram_bytes -= length; 3875 } 3876 3877 return ret; 3878 } 3879 3880 /** 3881 * ram_load_precopy: load pages in precopy case 3882 * 3883 * Returns 0 for success or -errno in case of error 3884 * 3885 * Called in precopy mode by ram_load(). 3886 * rcu_read_lock is taken prior to this being called. 3887 * 3888 * @f: QEMUFile where to send the data 3889 */ 3890 static int ram_load_precopy(QEMUFile *f) 3891 { 3892 MigrationIncomingState *mis = migration_incoming_get_current(); 3893 int flags = 0, ret = 0, invalid_flags = 0, len = 0, i = 0; 3894 3895 if (!migrate_compress()) { 3896 invalid_flags |= RAM_SAVE_FLAG_COMPRESS_PAGE; 3897 } 3898 3899 while (!ret && !(flags & RAM_SAVE_FLAG_EOS)) { 3900 ram_addr_t addr; 3901 void *host = NULL, *host_bak = NULL; 3902 uint8_t ch; 3903 3904 /* 3905 * Yield periodically to let main loop run, but an iteration of 3906 * the main loop is expensive, so do it each some iterations 3907 */ 3908 if ((i & 32767) == 0 && qemu_in_coroutine()) { 3909 aio_co_schedule(qemu_get_current_aio_context(), 3910 qemu_coroutine_self()); 3911 qemu_coroutine_yield(); 3912 } 3913 i++; 3914 3915 addr = qemu_get_be64(f); 3916 flags = addr & ~TARGET_PAGE_MASK; 3917 addr &= TARGET_PAGE_MASK; 3918 3919 if (flags & invalid_flags) { 3920 if (flags & invalid_flags & RAM_SAVE_FLAG_COMPRESS_PAGE) { 3921 error_report("Received an unexpected compressed page"); 3922 } 3923 3924 ret = -EINVAL; 3925 break; 3926 } 3927 3928 if (flags & (RAM_SAVE_FLAG_ZERO | RAM_SAVE_FLAG_PAGE | 3929 RAM_SAVE_FLAG_COMPRESS_PAGE | RAM_SAVE_FLAG_XBZRLE)) { 3930 RAMBlock *block = ram_block_from_stream(mis, f, flags, 3931 RAM_CHANNEL_PRECOPY); 3932 3933 host = host_from_ram_block_offset(block, addr); 3934 /* 3935 * After going into COLO stage, we should not load the page 3936 * into SVM's memory directly, we put them into colo_cache firstly. 3937 * NOTE: We need to keep a copy of SVM's ram in colo_cache. 3938 * Previously, we copied all these memory in preparing stage of COLO 3939 * while we need to stop VM, which is a time-consuming process. 3940 * Here we optimize it by a trick, back-up every page while in 3941 * migration process while COLO is enabled, though it affects the 3942 * speed of the migration, but it obviously reduce the downtime of 3943 * back-up all SVM'S memory in COLO preparing stage. 3944 */ 3945 if (migration_incoming_colo_enabled()) { 3946 if (migration_incoming_in_colo_state()) { 3947 /* In COLO stage, put all pages into cache temporarily */ 3948 host = colo_cache_from_block_offset(block, addr, true); 3949 } else { 3950 /* 3951 * In migration stage but before COLO stage, 3952 * Put all pages into both cache and SVM's memory. 3953 */ 3954 host_bak = colo_cache_from_block_offset(block, addr, false); 3955 } 3956 } 3957 if (!host) { 3958 error_report("Illegal RAM offset " RAM_ADDR_FMT, addr); 3959 ret = -EINVAL; 3960 break; 3961 } 3962 if (!migration_incoming_in_colo_state()) { 3963 ramblock_recv_bitmap_set(block, host); 3964 } 3965 3966 trace_ram_load_loop(block->idstr, (uint64_t)addr, flags, host); 3967 } 3968 3969 switch (flags & ~RAM_SAVE_FLAG_CONTINUE) { 3970 case RAM_SAVE_FLAG_MEM_SIZE: 3971 ret = parse_ramblocks(f, addr); 3972 break; 3973 3974 case RAM_SAVE_FLAG_ZERO: 3975 ch = qemu_get_byte(f); 3976 if (ch != 0) { 3977 error_report("Found a zero page with value %d", ch); 3978 ret = -EINVAL; 3979 break; 3980 } 3981 ram_handle_zero(host, TARGET_PAGE_SIZE); 3982 break; 3983 3984 case RAM_SAVE_FLAG_PAGE: 3985 qemu_get_buffer(f, host, TARGET_PAGE_SIZE); 3986 break; 3987 3988 case RAM_SAVE_FLAG_COMPRESS_PAGE: 3989 len = qemu_get_be32(f); 3990 if (len < 0 || len > compressBound(TARGET_PAGE_SIZE)) { 3991 error_report("Invalid compressed data length: %d", len); 3992 ret = -EINVAL; 3993 break; 3994 } 3995 decompress_data_with_multi_threads(f, host, len); 3996 break; 3997 3998 case RAM_SAVE_FLAG_XBZRLE: 3999 if (load_xbzrle(f, addr, host) < 0) { 4000 error_report("Failed to decompress XBZRLE page at " 4001 RAM_ADDR_FMT, addr); 4002 ret = -EINVAL; 4003 break; 4004 } 4005 break; 4006 case RAM_SAVE_FLAG_MULTIFD_FLUSH: 4007 multifd_recv_sync_main(); 4008 break; 4009 case RAM_SAVE_FLAG_EOS: 4010 /* normal exit */ 4011 if (migrate_multifd() && 4012 migrate_multifd_flush_after_each_section()) { 4013 multifd_recv_sync_main(); 4014 } 4015 break; 4016 case RAM_SAVE_FLAG_HOOK: 4017 ret = rdma_registration_handle(f); 4018 if (ret < 0) { 4019 qemu_file_set_error(f, ret); 4020 } 4021 break; 4022 default: 4023 error_report("Unknown combination of migration flags: 0x%x", flags); 4024 ret = -EINVAL; 4025 } 4026 if (!ret) { 4027 ret = qemu_file_get_error(f); 4028 } 4029 if (!ret && host_bak) { 4030 memcpy(host_bak, host, TARGET_PAGE_SIZE); 4031 } 4032 } 4033 4034 ret |= wait_for_decompress_done(); 4035 return ret; 4036 } 4037 4038 static int ram_load(QEMUFile *f, void *opaque, int version_id) 4039 { 4040 int ret = 0; 4041 static uint64_t seq_iter; 4042 /* 4043 * If system is running in postcopy mode, page inserts to host memory must 4044 * be atomic 4045 */ 4046 bool postcopy_running = postcopy_is_running(); 4047 4048 seq_iter++; 4049 4050 if (version_id != 4) { 4051 return -EINVAL; 4052 } 4053 4054 /* 4055 * This RCU critical section can be very long running. 4056 * When RCU reclaims in the code start to become numerous, 4057 * it will be necessary to reduce the granularity of this 4058 * critical section. 4059 */ 4060 WITH_RCU_READ_LOCK_GUARD() { 4061 if (postcopy_running) { 4062 /* 4063 * Note! Here RAM_CHANNEL_PRECOPY is the precopy channel of 4064 * postcopy migration, we have another RAM_CHANNEL_POSTCOPY to 4065 * service fast page faults. 4066 */ 4067 ret = ram_load_postcopy(f, RAM_CHANNEL_PRECOPY); 4068 } else { 4069 ret = ram_load_precopy(f); 4070 } 4071 } 4072 trace_ram_load_complete(ret, seq_iter); 4073 4074 return ret; 4075 } 4076 4077 static bool ram_has_postcopy(void *opaque) 4078 { 4079 RAMBlock *rb; 4080 RAMBLOCK_FOREACH_NOT_IGNORED(rb) { 4081 if (ramblock_is_pmem(rb)) { 4082 info_report("Block: %s, host: %p is a nvdimm memory, postcopy" 4083 "is not supported now!", rb->idstr, rb->host); 4084 return false; 4085 } 4086 } 4087 4088 return migrate_postcopy_ram(); 4089 } 4090 4091 /* Sync all the dirty bitmap with destination VM. */ 4092 static int ram_dirty_bitmap_sync_all(MigrationState *s, RAMState *rs) 4093 { 4094 RAMBlock *block; 4095 QEMUFile *file = s->to_dst_file; 4096 4097 trace_ram_dirty_bitmap_sync_start(); 4098 4099 qatomic_set(&rs->postcopy_bmap_sync_requested, 0); 4100 RAMBLOCK_FOREACH_NOT_IGNORED(block) { 4101 qemu_savevm_send_recv_bitmap(file, block->idstr); 4102 trace_ram_dirty_bitmap_request(block->idstr); 4103 qatomic_inc(&rs->postcopy_bmap_sync_requested); 4104 } 4105 4106 trace_ram_dirty_bitmap_sync_wait(); 4107 4108 /* Wait until all the ramblocks' dirty bitmap synced */ 4109 while (qatomic_read(&rs->postcopy_bmap_sync_requested)) { 4110 migration_rp_wait(s); 4111 } 4112 4113 trace_ram_dirty_bitmap_sync_complete(); 4114 4115 return 0; 4116 } 4117 4118 /* 4119 * Read the received bitmap, revert it as the initial dirty bitmap. 4120 * This is only used when the postcopy migration is paused but wants 4121 * to resume from a middle point. 4122 */ 4123 int ram_dirty_bitmap_reload(MigrationState *s, RAMBlock *block) 4124 { 4125 int ret = -EINVAL; 4126 /* from_dst_file is always valid because we're within rp_thread */ 4127 QEMUFile *file = s->rp_state.from_dst_file; 4128 g_autofree unsigned long *le_bitmap = NULL; 4129 unsigned long nbits = block->used_length >> TARGET_PAGE_BITS; 4130 uint64_t local_size = DIV_ROUND_UP(nbits, 8); 4131 uint64_t size, end_mark; 4132 RAMState *rs = ram_state; 4133 4134 trace_ram_dirty_bitmap_reload_begin(block->idstr); 4135 4136 if (s->state != MIGRATION_STATUS_POSTCOPY_RECOVER) { 4137 error_report("%s: incorrect state %s", __func__, 4138 MigrationStatus_str(s->state)); 4139 return -EINVAL; 4140 } 4141 4142 /* 4143 * Note: see comments in ramblock_recv_bitmap_send() on why we 4144 * need the endianness conversion, and the paddings. 4145 */ 4146 local_size = ROUND_UP(local_size, 8); 4147 4148 /* Add paddings */ 4149 le_bitmap = bitmap_new(nbits + BITS_PER_LONG); 4150 4151 size = qemu_get_be64(file); 4152 4153 /* The size of the bitmap should match with our ramblock */ 4154 if (size != local_size) { 4155 error_report("%s: ramblock '%s' bitmap size mismatch " 4156 "(0x%"PRIx64" != 0x%"PRIx64")", __func__, 4157 block->idstr, size, local_size); 4158 return -EINVAL; 4159 } 4160 4161 size = qemu_get_buffer(file, (uint8_t *)le_bitmap, local_size); 4162 end_mark = qemu_get_be64(file); 4163 4164 ret = qemu_file_get_error(file); 4165 if (ret || size != local_size) { 4166 error_report("%s: read bitmap failed for ramblock '%s': %d" 4167 " (size 0x%"PRIx64", got: 0x%"PRIx64")", 4168 __func__, block->idstr, ret, local_size, size); 4169 return -EIO; 4170 } 4171 4172 if (end_mark != RAMBLOCK_RECV_BITMAP_ENDING) { 4173 error_report("%s: ramblock '%s' end mark incorrect: 0x%"PRIx64, 4174 __func__, block->idstr, end_mark); 4175 return -EINVAL; 4176 } 4177 4178 /* 4179 * Endianness conversion. We are during postcopy (though paused). 4180 * The dirty bitmap won't change. We can directly modify it. 4181 */ 4182 bitmap_from_le(block->bmap, le_bitmap, nbits); 4183 4184 /* 4185 * What we received is "received bitmap". Revert it as the initial 4186 * dirty bitmap for this ramblock. 4187 */ 4188 bitmap_complement(block->bmap, block->bmap, nbits); 4189 4190 /* Clear dirty bits of discarded ranges that we don't want to migrate. */ 4191 ramblock_dirty_bitmap_clear_discarded_pages(block); 4192 4193 /* We'll recalculate migration_dirty_pages in ram_state_resume_prepare(). */ 4194 trace_ram_dirty_bitmap_reload_complete(block->idstr); 4195 4196 qatomic_dec(&rs->postcopy_bmap_sync_requested); 4197 4198 /* 4199 * We succeeded to sync bitmap for current ramblock. Always kick the 4200 * migration thread to check whether all requested bitmaps are 4201 * reloaded. NOTE: it's racy to only kick when requested==0, because 4202 * we don't know whether the migration thread may still be increasing 4203 * it. 4204 */ 4205 migration_rp_kick(s); 4206 4207 return 0; 4208 } 4209 4210 static int ram_resume_prepare(MigrationState *s, void *opaque) 4211 { 4212 RAMState *rs = *(RAMState **)opaque; 4213 int ret; 4214 4215 ret = ram_dirty_bitmap_sync_all(s, rs); 4216 if (ret) { 4217 return ret; 4218 } 4219 4220 ram_state_resume_prepare(rs, s->to_dst_file); 4221 4222 return 0; 4223 } 4224 4225 void postcopy_preempt_shutdown_file(MigrationState *s) 4226 { 4227 qemu_put_be64(s->postcopy_qemufile_src, RAM_SAVE_FLAG_EOS); 4228 qemu_fflush(s->postcopy_qemufile_src); 4229 } 4230 4231 static SaveVMHandlers savevm_ram_handlers = { 4232 .save_setup = ram_save_setup, 4233 .save_live_iterate = ram_save_iterate, 4234 .save_live_complete_postcopy = ram_save_complete, 4235 .save_live_complete_precopy = ram_save_complete, 4236 .has_postcopy = ram_has_postcopy, 4237 .state_pending_exact = ram_state_pending_exact, 4238 .state_pending_estimate = ram_state_pending_estimate, 4239 .load_state = ram_load, 4240 .save_cleanup = ram_save_cleanup, 4241 .load_setup = ram_load_setup, 4242 .load_cleanup = ram_load_cleanup, 4243 .resume_prepare = ram_resume_prepare, 4244 }; 4245 4246 static void ram_mig_ram_block_resized(RAMBlockNotifier *n, void *host, 4247 size_t old_size, size_t new_size) 4248 { 4249 PostcopyState ps = postcopy_state_get(); 4250 ram_addr_t offset; 4251 RAMBlock *rb = qemu_ram_block_from_host(host, false, &offset); 4252 Error *err = NULL; 4253 4254 if (!rb) { 4255 error_report("RAM block not found"); 4256 return; 4257 } 4258 4259 if (migrate_ram_is_ignored(rb)) { 4260 return; 4261 } 4262 4263 if (!migration_is_idle()) { 4264 /* 4265 * Precopy code on the source cannot deal with the size of RAM blocks 4266 * changing at random points in time - especially after sending the 4267 * RAM block sizes in the migration stream, they must no longer change. 4268 * Abort and indicate a proper reason. 4269 */ 4270 error_setg(&err, "RAM block '%s' resized during precopy.", rb->idstr); 4271 migration_cancel(err); 4272 error_free(err); 4273 } 4274 4275 switch (ps) { 4276 case POSTCOPY_INCOMING_ADVISE: 4277 /* 4278 * Update what ram_postcopy_incoming_init()->init_range() does at the 4279 * time postcopy was advised. Syncing RAM blocks with the source will 4280 * result in RAM resizes. 4281 */ 4282 if (old_size < new_size) { 4283 if (ram_discard_range(rb->idstr, old_size, new_size - old_size)) { 4284 error_report("RAM block '%s' discard of resized RAM failed", 4285 rb->idstr); 4286 } 4287 } 4288 rb->postcopy_length = new_size; 4289 break; 4290 case POSTCOPY_INCOMING_NONE: 4291 case POSTCOPY_INCOMING_RUNNING: 4292 case POSTCOPY_INCOMING_END: 4293 /* 4294 * Once our guest is running, postcopy does no longer care about 4295 * resizes. When growing, the new memory was not available on the 4296 * source, no handler needed. 4297 */ 4298 break; 4299 default: 4300 error_report("RAM block '%s' resized during postcopy state: %d", 4301 rb->idstr, ps); 4302 exit(-1); 4303 } 4304 } 4305 4306 static RAMBlockNotifier ram_mig_ram_notifier = { 4307 .ram_block_resized = ram_mig_ram_block_resized, 4308 }; 4309 4310 void ram_mig_init(void) 4311 { 4312 qemu_mutex_init(&XBZRLE.lock); 4313 register_savevm_live("ram", 0, 4, &savevm_ram_handlers, &ram_state); 4314 ram_block_notifier_add(&ram_mig_ram_notifier); 4315 } 4316