1 /* 2 * QEMU System Emulator 3 * 4 * Copyright (c) 2003-2008 Fabrice Bellard 5 * Copyright (c) 2011-2015 Red Hat Inc 6 * 7 * Authors: 8 * Juan Quintela <quintela@redhat.com> 9 * 10 * Permission is hereby granted, free of charge, to any person obtaining a copy 11 * of this software and associated documentation files (the "Software"), to deal 12 * in the Software without restriction, including without limitation the rights 13 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 14 * copies of the Software, and to permit persons to whom the Software is 15 * furnished to do so, subject to the following conditions: 16 * 17 * The above copyright notice and this permission notice shall be included in 18 * all copies or substantial portions of the Software. 19 * 20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 23 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 24 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 25 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 26 * THE SOFTWARE. 27 */ 28 29 #include "qemu/osdep.h" 30 #include "qemu/cutils.h" 31 #include "qemu/bitops.h" 32 #include "qemu/bitmap.h" 33 #include "qemu/madvise.h" 34 #include "qemu/main-loop.h" 35 #include "xbzrle.h" 36 #include "ram-compress.h" 37 #include "ram.h" 38 #include "migration.h" 39 #include "migration-stats.h" 40 #include "migration/register.h" 41 #include "migration/misc.h" 42 #include "qemu-file.h" 43 #include "postcopy-ram.h" 44 #include "page_cache.h" 45 #include "qemu/error-report.h" 46 #include "qapi/error.h" 47 #include "qapi/qapi-types-migration.h" 48 #include "qapi/qapi-events-migration.h" 49 #include "qapi/qapi-commands-migration.h" 50 #include "qapi/qmp/qerror.h" 51 #include "trace.h" 52 #include "exec/ram_addr.h" 53 #include "exec/target_page.h" 54 #include "qemu/rcu_queue.h" 55 #include "migration/colo.h" 56 #include "block.h" 57 #include "sysemu/cpu-throttle.h" 58 #include "savevm.h" 59 #include "qemu/iov.h" 60 #include "multifd.h" 61 #include "sysemu/runstate.h" 62 #include "rdma.h" 63 #include "options.h" 64 #include "sysemu/dirtylimit.h" 65 #include "sysemu/kvm.h" 66 67 #include "hw/boards.h" /* for machine_dump_guest_core() */ 68 69 #if defined(__linux__) 70 #include "qemu/userfaultfd.h" 71 #endif /* defined(__linux__) */ 72 73 /***********************************************************/ 74 /* ram save/restore */ 75 76 /* 77 * RAM_SAVE_FLAG_ZERO used to be named RAM_SAVE_FLAG_COMPRESS, it 78 * worked for pages that were filled with the same char. We switched 79 * it to only search for the zero value. And to avoid confusion with 80 * RAM_SAVE_FLAG_COMPRESS_PAGE just rename it. 81 */ 82 /* 83 * RAM_SAVE_FLAG_FULL was obsoleted in 2009, it can be reused now 84 */ 85 #define RAM_SAVE_FLAG_FULL 0x01 86 #define RAM_SAVE_FLAG_ZERO 0x02 87 #define RAM_SAVE_FLAG_MEM_SIZE 0x04 88 #define RAM_SAVE_FLAG_PAGE 0x08 89 #define RAM_SAVE_FLAG_EOS 0x10 90 #define RAM_SAVE_FLAG_CONTINUE 0x20 91 #define RAM_SAVE_FLAG_XBZRLE 0x40 92 /* 0x80 is reserved in rdma.h for RAM_SAVE_FLAG_HOOK */ 93 #define RAM_SAVE_FLAG_COMPRESS_PAGE 0x100 94 #define RAM_SAVE_FLAG_MULTIFD_FLUSH 0x200 95 /* We can't use any flag that is bigger than 0x200 */ 96 97 /* 98 * mapped-ram migration supports O_DIRECT, so we need to make sure the 99 * userspace buffer, the IO operation size and the file offset are 100 * aligned according to the underlying device's block size. The first 101 * two are already aligned to page size, but we need to add padding to 102 * the file to align the offset. We cannot read the block size 103 * dynamically because the migration file can be moved between 104 * different systems, so use 1M to cover most block sizes and to keep 105 * the file offset aligned at page size as well. 106 */ 107 #define MAPPED_RAM_FILE_OFFSET_ALIGNMENT 0x100000 108 109 /* 110 * When doing mapped-ram migration, this is the amount we read from 111 * the pages region in the migration file at a time. 112 */ 113 #define MAPPED_RAM_LOAD_BUF_SIZE 0x100000 114 115 XBZRLECacheStats xbzrle_counters; 116 117 /* used by the search for pages to send */ 118 struct PageSearchStatus { 119 /* The migration channel used for a specific host page */ 120 QEMUFile *pss_channel; 121 /* Last block from where we have sent data */ 122 RAMBlock *last_sent_block; 123 /* Current block being searched */ 124 RAMBlock *block; 125 /* Current page to search from */ 126 unsigned long page; 127 /* Set once we wrap around */ 128 bool complete_round; 129 /* Whether we're sending a host page */ 130 bool host_page_sending; 131 /* The start/end of current host page. Invalid if host_page_sending==false */ 132 unsigned long host_page_start; 133 unsigned long host_page_end; 134 }; 135 typedef struct PageSearchStatus PageSearchStatus; 136 137 /* struct contains XBZRLE cache and a static page 138 used by the compression */ 139 static struct { 140 /* buffer used for XBZRLE encoding */ 141 uint8_t *encoded_buf; 142 /* buffer for storing page content */ 143 uint8_t *current_buf; 144 /* Cache for XBZRLE, Protected by lock. */ 145 PageCache *cache; 146 QemuMutex lock; 147 /* it will store a page full of zeros */ 148 uint8_t *zero_target_page; 149 /* buffer used for XBZRLE decoding */ 150 uint8_t *decoded_buf; 151 } XBZRLE; 152 153 static void XBZRLE_cache_lock(void) 154 { 155 if (migrate_xbzrle()) { 156 qemu_mutex_lock(&XBZRLE.lock); 157 } 158 } 159 160 static void XBZRLE_cache_unlock(void) 161 { 162 if (migrate_xbzrle()) { 163 qemu_mutex_unlock(&XBZRLE.lock); 164 } 165 } 166 167 /** 168 * xbzrle_cache_resize: resize the xbzrle cache 169 * 170 * This function is called from migrate_params_apply in main 171 * thread, possibly while a migration is in progress. A running 172 * migration may be using the cache and might finish during this call, 173 * hence changes to the cache are protected by XBZRLE.lock(). 174 * 175 * Returns 0 for success or -1 for error 176 * 177 * @new_size: new cache size 178 * @errp: set *errp if the check failed, with reason 179 */ 180 int xbzrle_cache_resize(uint64_t new_size, Error **errp) 181 { 182 PageCache *new_cache; 183 int64_t ret = 0; 184 185 /* Check for truncation */ 186 if (new_size != (size_t)new_size) { 187 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "cache size", 188 "exceeding address space"); 189 return -1; 190 } 191 192 if (new_size == migrate_xbzrle_cache_size()) { 193 /* nothing to do */ 194 return 0; 195 } 196 197 XBZRLE_cache_lock(); 198 199 if (XBZRLE.cache != NULL) { 200 new_cache = cache_init(new_size, TARGET_PAGE_SIZE, errp); 201 if (!new_cache) { 202 ret = -1; 203 goto out; 204 } 205 206 cache_fini(XBZRLE.cache); 207 XBZRLE.cache = new_cache; 208 } 209 out: 210 XBZRLE_cache_unlock(); 211 return ret; 212 } 213 214 static bool postcopy_preempt_active(void) 215 { 216 return migrate_postcopy_preempt() && migration_in_postcopy(); 217 } 218 219 bool migrate_ram_is_ignored(RAMBlock *block) 220 { 221 return !qemu_ram_is_migratable(block) || 222 (migrate_ignore_shared() && qemu_ram_is_shared(block) 223 && qemu_ram_is_named_file(block)); 224 } 225 226 #undef RAMBLOCK_FOREACH 227 228 int foreach_not_ignored_block(RAMBlockIterFunc func, void *opaque) 229 { 230 RAMBlock *block; 231 int ret = 0; 232 233 RCU_READ_LOCK_GUARD(); 234 235 RAMBLOCK_FOREACH_NOT_IGNORED(block) { 236 ret = func(block, opaque); 237 if (ret) { 238 break; 239 } 240 } 241 return ret; 242 } 243 244 static void ramblock_recv_map_init(void) 245 { 246 RAMBlock *rb; 247 248 RAMBLOCK_FOREACH_NOT_IGNORED(rb) { 249 assert(!rb->receivedmap); 250 rb->receivedmap = bitmap_new(rb->max_length >> qemu_target_page_bits()); 251 } 252 } 253 254 int ramblock_recv_bitmap_test(RAMBlock *rb, void *host_addr) 255 { 256 return test_bit(ramblock_recv_bitmap_offset(host_addr, rb), 257 rb->receivedmap); 258 } 259 260 bool ramblock_recv_bitmap_test_byte_offset(RAMBlock *rb, uint64_t byte_offset) 261 { 262 return test_bit(byte_offset >> TARGET_PAGE_BITS, rb->receivedmap); 263 } 264 265 void ramblock_recv_bitmap_set(RAMBlock *rb, void *host_addr) 266 { 267 set_bit_atomic(ramblock_recv_bitmap_offset(host_addr, rb), rb->receivedmap); 268 } 269 270 void ramblock_recv_bitmap_set_range(RAMBlock *rb, void *host_addr, 271 size_t nr) 272 { 273 bitmap_set_atomic(rb->receivedmap, 274 ramblock_recv_bitmap_offset(host_addr, rb), 275 nr); 276 } 277 278 #define RAMBLOCK_RECV_BITMAP_ENDING (0x0123456789abcdefULL) 279 280 /* 281 * Format: bitmap_size (8 bytes) + whole_bitmap (N bytes). 282 * 283 * Returns >0 if success with sent bytes, or <0 if error. 284 */ 285 int64_t ramblock_recv_bitmap_send(QEMUFile *file, 286 const char *block_name) 287 { 288 RAMBlock *block = qemu_ram_block_by_name(block_name); 289 unsigned long *le_bitmap, nbits; 290 uint64_t size; 291 292 if (!block) { 293 error_report("%s: invalid block name: %s", __func__, block_name); 294 return -1; 295 } 296 297 nbits = block->postcopy_length >> TARGET_PAGE_BITS; 298 299 /* 300 * Make sure the tmp bitmap buffer is big enough, e.g., on 32bit 301 * machines we may need 4 more bytes for padding (see below 302 * comment). So extend it a bit before hand. 303 */ 304 le_bitmap = bitmap_new(nbits + BITS_PER_LONG); 305 306 /* 307 * Always use little endian when sending the bitmap. This is 308 * required that when source and destination VMs are not using the 309 * same endianness. (Note: big endian won't work.) 310 */ 311 bitmap_to_le(le_bitmap, block->receivedmap, nbits); 312 313 /* Size of the bitmap, in bytes */ 314 size = DIV_ROUND_UP(nbits, 8); 315 316 /* 317 * size is always aligned to 8 bytes for 64bit machines, but it 318 * may not be true for 32bit machines. We need this padding to 319 * make sure the migration can survive even between 32bit and 320 * 64bit machines. 321 */ 322 size = ROUND_UP(size, 8); 323 324 qemu_put_be64(file, size); 325 qemu_put_buffer(file, (const uint8_t *)le_bitmap, size); 326 g_free(le_bitmap); 327 /* 328 * Mark as an end, in case the middle part is screwed up due to 329 * some "mysterious" reason. 330 */ 331 qemu_put_be64(file, RAMBLOCK_RECV_BITMAP_ENDING); 332 int ret = qemu_fflush(file); 333 if (ret) { 334 return ret; 335 } 336 337 return size + sizeof(size); 338 } 339 340 /* 341 * An outstanding page request, on the source, having been received 342 * and queued 343 */ 344 struct RAMSrcPageRequest { 345 RAMBlock *rb; 346 hwaddr offset; 347 hwaddr len; 348 349 QSIMPLEQ_ENTRY(RAMSrcPageRequest) next_req; 350 }; 351 352 /* State of RAM for migration */ 353 struct RAMState { 354 /* 355 * PageSearchStatus structures for the channels when send pages. 356 * Protected by the bitmap_mutex. 357 */ 358 PageSearchStatus pss[RAM_CHANNEL_MAX]; 359 /* UFFD file descriptor, used in 'write-tracking' migration */ 360 int uffdio_fd; 361 /* total ram size in bytes */ 362 uint64_t ram_bytes_total; 363 /* Last block that we have visited searching for dirty pages */ 364 RAMBlock *last_seen_block; 365 /* Last dirty target page we have sent */ 366 ram_addr_t last_page; 367 /* last ram version we have seen */ 368 uint32_t last_version; 369 /* How many times we have dirty too many pages */ 370 int dirty_rate_high_cnt; 371 /* these variables are used for bitmap sync */ 372 /* last time we did a full bitmap_sync */ 373 int64_t time_last_bitmap_sync; 374 /* bytes transferred at start_time */ 375 uint64_t bytes_xfer_prev; 376 /* number of dirty pages since start_time */ 377 uint64_t num_dirty_pages_period; 378 /* xbzrle misses since the beginning of the period */ 379 uint64_t xbzrle_cache_miss_prev; 380 /* Amount of xbzrle pages since the beginning of the period */ 381 uint64_t xbzrle_pages_prev; 382 /* Amount of xbzrle encoded bytes since the beginning of the period */ 383 uint64_t xbzrle_bytes_prev; 384 /* Are we really using XBZRLE (e.g., after the first round). */ 385 bool xbzrle_started; 386 /* Are we on the last stage of migration */ 387 bool last_stage; 388 389 /* total handled target pages at the beginning of period */ 390 uint64_t target_page_count_prev; 391 /* total handled target pages since start */ 392 uint64_t target_page_count; 393 /* number of dirty bits in the bitmap */ 394 uint64_t migration_dirty_pages; 395 /* 396 * Protects: 397 * - dirty/clear bitmap 398 * - migration_dirty_pages 399 * - pss structures 400 */ 401 QemuMutex bitmap_mutex; 402 /* The RAMBlock used in the last src_page_requests */ 403 RAMBlock *last_req_rb; 404 /* Queue of outstanding page requests from the destination */ 405 QemuMutex src_page_req_mutex; 406 QSIMPLEQ_HEAD(, RAMSrcPageRequest) src_page_requests; 407 408 /* 409 * This is only used when postcopy is in recovery phase, to communicate 410 * between the migration thread and the return path thread on dirty 411 * bitmap synchronizations. This field is unused in other stages of 412 * RAM migration. 413 */ 414 unsigned int postcopy_bmap_sync_requested; 415 }; 416 typedef struct RAMState RAMState; 417 418 static RAMState *ram_state; 419 420 static NotifierWithReturnList precopy_notifier_list; 421 422 /* Whether postcopy has queued requests? */ 423 static bool postcopy_has_request(RAMState *rs) 424 { 425 return !QSIMPLEQ_EMPTY_ATOMIC(&rs->src_page_requests); 426 } 427 428 void precopy_infrastructure_init(void) 429 { 430 notifier_with_return_list_init(&precopy_notifier_list); 431 } 432 433 void precopy_add_notifier(NotifierWithReturn *n) 434 { 435 notifier_with_return_list_add(&precopy_notifier_list, n); 436 } 437 438 void precopy_remove_notifier(NotifierWithReturn *n) 439 { 440 notifier_with_return_remove(n); 441 } 442 443 int precopy_notify(PrecopyNotifyReason reason, Error **errp) 444 { 445 PrecopyNotifyData pnd; 446 pnd.reason = reason; 447 448 return notifier_with_return_list_notify(&precopy_notifier_list, &pnd, errp); 449 } 450 451 uint64_t ram_bytes_remaining(void) 452 { 453 return ram_state ? (ram_state->migration_dirty_pages * TARGET_PAGE_SIZE) : 454 0; 455 } 456 457 void ram_transferred_add(uint64_t bytes) 458 { 459 if (runstate_is_running()) { 460 stat64_add(&mig_stats.precopy_bytes, bytes); 461 } else if (migration_in_postcopy()) { 462 stat64_add(&mig_stats.postcopy_bytes, bytes); 463 } else { 464 stat64_add(&mig_stats.downtime_bytes, bytes); 465 } 466 } 467 468 struct MigrationOps { 469 int (*ram_save_target_page)(RAMState *rs, PageSearchStatus *pss); 470 }; 471 typedef struct MigrationOps MigrationOps; 472 473 MigrationOps *migration_ops; 474 475 static int ram_save_host_page_urgent(PageSearchStatus *pss); 476 477 /* NOTE: page is the PFN not real ram_addr_t. */ 478 static void pss_init(PageSearchStatus *pss, RAMBlock *rb, ram_addr_t page) 479 { 480 pss->block = rb; 481 pss->page = page; 482 pss->complete_round = false; 483 } 484 485 /* 486 * Check whether two PSSs are actively sending the same page. Return true 487 * if it is, false otherwise. 488 */ 489 static bool pss_overlap(PageSearchStatus *pss1, PageSearchStatus *pss2) 490 { 491 return pss1->host_page_sending && pss2->host_page_sending && 492 (pss1->host_page_start == pss2->host_page_start); 493 } 494 495 /** 496 * save_page_header: write page header to wire 497 * 498 * If this is the 1st block, it also writes the block identification 499 * 500 * Returns the number of bytes written 501 * 502 * @pss: current PSS channel status 503 * @block: block that contains the page we want to send 504 * @offset: offset inside the block for the page 505 * in the lower bits, it contains flags 506 */ 507 static size_t save_page_header(PageSearchStatus *pss, QEMUFile *f, 508 RAMBlock *block, ram_addr_t offset) 509 { 510 size_t size, len; 511 bool same_block = (block == pss->last_sent_block); 512 513 if (same_block) { 514 offset |= RAM_SAVE_FLAG_CONTINUE; 515 } 516 qemu_put_be64(f, offset); 517 size = 8; 518 519 if (!same_block) { 520 len = strlen(block->idstr); 521 qemu_put_byte(f, len); 522 qemu_put_buffer(f, (uint8_t *)block->idstr, len); 523 size += 1 + len; 524 pss->last_sent_block = block; 525 } 526 return size; 527 } 528 529 /** 530 * mig_throttle_guest_down: throttle down the guest 531 * 532 * Reduce amount of guest cpu execution to hopefully slow down memory 533 * writes. If guest dirty memory rate is reduced below the rate at 534 * which we can transfer pages to the destination then we should be 535 * able to complete migration. Some workloads dirty memory way too 536 * fast and will not effectively converge, even with auto-converge. 537 */ 538 static void mig_throttle_guest_down(uint64_t bytes_dirty_period, 539 uint64_t bytes_dirty_threshold) 540 { 541 uint64_t pct_initial = migrate_cpu_throttle_initial(); 542 uint64_t pct_increment = migrate_cpu_throttle_increment(); 543 bool pct_tailslow = migrate_cpu_throttle_tailslow(); 544 int pct_max = migrate_max_cpu_throttle(); 545 546 uint64_t throttle_now = cpu_throttle_get_percentage(); 547 uint64_t cpu_now, cpu_ideal, throttle_inc; 548 549 /* We have not started throttling yet. Let's start it. */ 550 if (!cpu_throttle_active()) { 551 cpu_throttle_set(pct_initial); 552 } else { 553 /* Throttling already on, just increase the rate */ 554 if (!pct_tailslow) { 555 throttle_inc = pct_increment; 556 } else { 557 /* Compute the ideal CPU percentage used by Guest, which may 558 * make the dirty rate match the dirty rate threshold. */ 559 cpu_now = 100 - throttle_now; 560 cpu_ideal = cpu_now * (bytes_dirty_threshold * 1.0 / 561 bytes_dirty_period); 562 throttle_inc = MIN(cpu_now - cpu_ideal, pct_increment); 563 } 564 cpu_throttle_set(MIN(throttle_now + throttle_inc, pct_max)); 565 } 566 } 567 568 void mig_throttle_counter_reset(void) 569 { 570 RAMState *rs = ram_state; 571 572 rs->time_last_bitmap_sync = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); 573 rs->num_dirty_pages_period = 0; 574 rs->bytes_xfer_prev = migration_transferred_bytes(); 575 } 576 577 /** 578 * xbzrle_cache_zero_page: insert a zero page in the XBZRLE cache 579 * 580 * @current_addr: address for the zero page 581 * 582 * Update the xbzrle cache to reflect a page that's been sent as all 0. 583 * The important thing is that a stale (not-yet-0'd) page be replaced 584 * by the new data. 585 * As a bonus, if the page wasn't in the cache it gets added so that 586 * when a small write is made into the 0'd page it gets XBZRLE sent. 587 */ 588 static void xbzrle_cache_zero_page(ram_addr_t current_addr) 589 { 590 /* We don't care if this fails to allocate a new cache page 591 * as long as it updated an old one */ 592 cache_insert(XBZRLE.cache, current_addr, XBZRLE.zero_target_page, 593 stat64_get(&mig_stats.dirty_sync_count)); 594 } 595 596 #define ENCODING_FLAG_XBZRLE 0x1 597 598 /** 599 * save_xbzrle_page: compress and send current page 600 * 601 * Returns: 1 means that we wrote the page 602 * 0 means that page is identical to the one already sent 603 * -1 means that xbzrle would be longer than normal 604 * 605 * @rs: current RAM state 606 * @pss: current PSS channel 607 * @current_data: pointer to the address of the page contents 608 * @current_addr: addr of the page 609 * @block: block that contains the page we want to send 610 * @offset: offset inside the block for the page 611 */ 612 static int save_xbzrle_page(RAMState *rs, PageSearchStatus *pss, 613 uint8_t **current_data, ram_addr_t current_addr, 614 RAMBlock *block, ram_addr_t offset) 615 { 616 int encoded_len = 0, bytes_xbzrle; 617 uint8_t *prev_cached_page; 618 QEMUFile *file = pss->pss_channel; 619 uint64_t generation = stat64_get(&mig_stats.dirty_sync_count); 620 621 if (!cache_is_cached(XBZRLE.cache, current_addr, generation)) { 622 xbzrle_counters.cache_miss++; 623 if (!rs->last_stage) { 624 if (cache_insert(XBZRLE.cache, current_addr, *current_data, 625 generation) == -1) { 626 return -1; 627 } else { 628 /* update *current_data when the page has been 629 inserted into cache */ 630 *current_data = get_cached_data(XBZRLE.cache, current_addr); 631 } 632 } 633 return -1; 634 } 635 636 /* 637 * Reaching here means the page has hit the xbzrle cache, no matter what 638 * encoding result it is (normal encoding, overflow or skipping the page), 639 * count the page as encoded. This is used to calculate the encoding rate. 640 * 641 * Example: 2 pages (8KB) being encoded, first page encoding generates 2KB, 642 * 2nd page turns out to be skipped (i.e. no new bytes written to the 643 * page), the overall encoding rate will be 8KB / 2KB = 4, which has the 644 * skipped page included. In this way, the encoding rate can tell if the 645 * guest page is good for xbzrle encoding. 646 */ 647 xbzrle_counters.pages++; 648 prev_cached_page = get_cached_data(XBZRLE.cache, current_addr); 649 650 /* save current buffer into memory */ 651 memcpy(XBZRLE.current_buf, *current_data, TARGET_PAGE_SIZE); 652 653 /* XBZRLE encoding (if there is no overflow) */ 654 encoded_len = xbzrle_encode_buffer(prev_cached_page, XBZRLE.current_buf, 655 TARGET_PAGE_SIZE, XBZRLE.encoded_buf, 656 TARGET_PAGE_SIZE); 657 658 /* 659 * Update the cache contents, so that it corresponds to the data 660 * sent, in all cases except where we skip the page. 661 */ 662 if (!rs->last_stage && encoded_len != 0) { 663 memcpy(prev_cached_page, XBZRLE.current_buf, TARGET_PAGE_SIZE); 664 /* 665 * In the case where we couldn't compress, ensure that the caller 666 * sends the data from the cache, since the guest might have 667 * changed the RAM since we copied it. 668 */ 669 *current_data = prev_cached_page; 670 } 671 672 if (encoded_len == 0) { 673 trace_save_xbzrle_page_skipping(); 674 return 0; 675 } else if (encoded_len == -1) { 676 trace_save_xbzrle_page_overflow(); 677 xbzrle_counters.overflow++; 678 xbzrle_counters.bytes += TARGET_PAGE_SIZE; 679 return -1; 680 } 681 682 /* Send XBZRLE based compressed page */ 683 bytes_xbzrle = save_page_header(pss, pss->pss_channel, block, 684 offset | RAM_SAVE_FLAG_XBZRLE); 685 qemu_put_byte(file, ENCODING_FLAG_XBZRLE); 686 qemu_put_be16(file, encoded_len); 687 qemu_put_buffer(file, XBZRLE.encoded_buf, encoded_len); 688 bytes_xbzrle += encoded_len + 1 + 2; 689 /* 690 * Like compressed_size (please see update_compress_thread_counts), 691 * the xbzrle encoded bytes don't count the 8 byte header with 692 * RAM_SAVE_FLAG_CONTINUE. 693 */ 694 xbzrle_counters.bytes += bytes_xbzrle - 8; 695 ram_transferred_add(bytes_xbzrle); 696 697 return 1; 698 } 699 700 /** 701 * pss_find_next_dirty: find the next dirty page of current ramblock 702 * 703 * This function updates pss->page to point to the next dirty page index 704 * within the ramblock to migrate, or the end of ramblock when nothing 705 * found. Note that when pss->host_page_sending==true it means we're 706 * during sending a host page, so we won't look for dirty page that is 707 * outside the host page boundary. 708 * 709 * @pss: the current page search status 710 */ 711 static void pss_find_next_dirty(PageSearchStatus *pss) 712 { 713 RAMBlock *rb = pss->block; 714 unsigned long size = rb->used_length >> TARGET_PAGE_BITS; 715 unsigned long *bitmap = rb->bmap; 716 717 if (migrate_ram_is_ignored(rb)) { 718 /* Points directly to the end, so we know no dirty page */ 719 pss->page = size; 720 return; 721 } 722 723 /* 724 * If during sending a host page, only look for dirty pages within the 725 * current host page being send. 726 */ 727 if (pss->host_page_sending) { 728 assert(pss->host_page_end); 729 size = MIN(size, pss->host_page_end); 730 } 731 732 pss->page = find_next_bit(bitmap, size, pss->page); 733 } 734 735 static void migration_clear_memory_region_dirty_bitmap(RAMBlock *rb, 736 unsigned long page) 737 { 738 uint8_t shift; 739 hwaddr size, start; 740 741 if (!rb->clear_bmap || !clear_bmap_test_and_clear(rb, page)) { 742 return; 743 } 744 745 shift = rb->clear_bmap_shift; 746 /* 747 * CLEAR_BITMAP_SHIFT_MIN should always guarantee this... this 748 * can make things easier sometimes since then start address 749 * of the small chunk will always be 64 pages aligned so the 750 * bitmap will always be aligned to unsigned long. We should 751 * even be able to remove this restriction but I'm simply 752 * keeping it. 753 */ 754 assert(shift >= 6); 755 756 size = 1ULL << (TARGET_PAGE_BITS + shift); 757 start = QEMU_ALIGN_DOWN((ram_addr_t)page << TARGET_PAGE_BITS, size); 758 trace_migration_bitmap_clear_dirty(rb->idstr, start, size, page); 759 memory_region_clear_dirty_bitmap(rb->mr, start, size); 760 } 761 762 static void 763 migration_clear_memory_region_dirty_bitmap_range(RAMBlock *rb, 764 unsigned long start, 765 unsigned long npages) 766 { 767 unsigned long i, chunk_pages = 1UL << rb->clear_bmap_shift; 768 unsigned long chunk_start = QEMU_ALIGN_DOWN(start, chunk_pages); 769 unsigned long chunk_end = QEMU_ALIGN_UP(start + npages, chunk_pages); 770 771 /* 772 * Clear pages from start to start + npages - 1, so the end boundary is 773 * exclusive. 774 */ 775 for (i = chunk_start; i < chunk_end; i += chunk_pages) { 776 migration_clear_memory_region_dirty_bitmap(rb, i); 777 } 778 } 779 780 /* 781 * colo_bitmap_find_diry:find contiguous dirty pages from start 782 * 783 * Returns the page offset within memory region of the start of the contiguout 784 * dirty page 785 * 786 * @rs: current RAM state 787 * @rb: RAMBlock where to search for dirty pages 788 * @start: page where we start the search 789 * @num: the number of contiguous dirty pages 790 */ 791 static inline 792 unsigned long colo_bitmap_find_dirty(RAMState *rs, RAMBlock *rb, 793 unsigned long start, unsigned long *num) 794 { 795 unsigned long size = rb->used_length >> TARGET_PAGE_BITS; 796 unsigned long *bitmap = rb->bmap; 797 unsigned long first, next; 798 799 *num = 0; 800 801 if (migrate_ram_is_ignored(rb)) { 802 return size; 803 } 804 805 first = find_next_bit(bitmap, size, start); 806 if (first >= size) { 807 return first; 808 } 809 next = find_next_zero_bit(bitmap, size, first + 1); 810 assert(next >= first); 811 *num = next - first; 812 return first; 813 } 814 815 static inline bool migration_bitmap_clear_dirty(RAMState *rs, 816 RAMBlock *rb, 817 unsigned long page) 818 { 819 bool ret; 820 821 /* 822 * Clear dirty bitmap if needed. This _must_ be called before we 823 * send any of the page in the chunk because we need to make sure 824 * we can capture further page content changes when we sync dirty 825 * log the next time. So as long as we are going to send any of 826 * the page in the chunk we clear the remote dirty bitmap for all. 827 * Clearing it earlier won't be a problem, but too late will. 828 */ 829 migration_clear_memory_region_dirty_bitmap(rb, page); 830 831 ret = test_and_clear_bit(page, rb->bmap); 832 if (ret) { 833 rs->migration_dirty_pages--; 834 } 835 836 return ret; 837 } 838 839 static void dirty_bitmap_clear_section(MemoryRegionSection *section, 840 void *opaque) 841 { 842 const hwaddr offset = section->offset_within_region; 843 const hwaddr size = int128_get64(section->size); 844 const unsigned long start = offset >> TARGET_PAGE_BITS; 845 const unsigned long npages = size >> TARGET_PAGE_BITS; 846 RAMBlock *rb = section->mr->ram_block; 847 uint64_t *cleared_bits = opaque; 848 849 /* 850 * We don't grab ram_state->bitmap_mutex because we expect to run 851 * only when starting migration or during postcopy recovery where 852 * we don't have concurrent access. 853 */ 854 if (!migration_in_postcopy() && !migrate_background_snapshot()) { 855 migration_clear_memory_region_dirty_bitmap_range(rb, start, npages); 856 } 857 *cleared_bits += bitmap_count_one_with_offset(rb->bmap, start, npages); 858 bitmap_clear(rb->bmap, start, npages); 859 } 860 861 /* 862 * Exclude all dirty pages from migration that fall into a discarded range as 863 * managed by a RamDiscardManager responsible for the mapped memory region of 864 * the RAMBlock. Clear the corresponding bits in the dirty bitmaps. 865 * 866 * Discarded pages ("logically unplugged") have undefined content and must 867 * not get migrated, because even reading these pages for migration might 868 * result in undesired behavior. 869 * 870 * Returns the number of cleared bits in the RAMBlock dirty bitmap. 871 * 872 * Note: The result is only stable while migrating (precopy/postcopy). 873 */ 874 static uint64_t ramblock_dirty_bitmap_clear_discarded_pages(RAMBlock *rb) 875 { 876 uint64_t cleared_bits = 0; 877 878 if (rb->mr && rb->bmap && memory_region_has_ram_discard_manager(rb->mr)) { 879 RamDiscardManager *rdm = memory_region_get_ram_discard_manager(rb->mr); 880 MemoryRegionSection section = { 881 .mr = rb->mr, 882 .offset_within_region = 0, 883 .size = int128_make64(qemu_ram_get_used_length(rb)), 884 }; 885 886 ram_discard_manager_replay_discarded(rdm, §ion, 887 dirty_bitmap_clear_section, 888 &cleared_bits); 889 } 890 return cleared_bits; 891 } 892 893 /* 894 * Check if a host-page aligned page falls into a discarded range as managed by 895 * a RamDiscardManager responsible for the mapped memory region of the RAMBlock. 896 * 897 * Note: The result is only stable while migrating (precopy/postcopy). 898 */ 899 bool ramblock_page_is_discarded(RAMBlock *rb, ram_addr_t start) 900 { 901 if (rb->mr && memory_region_has_ram_discard_manager(rb->mr)) { 902 RamDiscardManager *rdm = memory_region_get_ram_discard_manager(rb->mr); 903 MemoryRegionSection section = { 904 .mr = rb->mr, 905 .offset_within_region = start, 906 .size = int128_make64(qemu_ram_pagesize(rb)), 907 }; 908 909 return !ram_discard_manager_is_populated(rdm, §ion); 910 } 911 return false; 912 } 913 914 /* Called with RCU critical section */ 915 static void ramblock_sync_dirty_bitmap(RAMState *rs, RAMBlock *rb) 916 { 917 uint64_t new_dirty_pages = 918 cpu_physical_memory_sync_dirty_bitmap(rb, 0, rb->used_length); 919 920 rs->migration_dirty_pages += new_dirty_pages; 921 rs->num_dirty_pages_period += new_dirty_pages; 922 } 923 924 /** 925 * ram_pagesize_summary: calculate all the pagesizes of a VM 926 * 927 * Returns a summary bitmap of the page sizes of all RAMBlocks 928 * 929 * For VMs with just normal pages this is equivalent to the host page 930 * size. If it's got some huge pages then it's the OR of all the 931 * different page sizes. 932 */ 933 uint64_t ram_pagesize_summary(void) 934 { 935 RAMBlock *block; 936 uint64_t summary = 0; 937 938 RAMBLOCK_FOREACH_NOT_IGNORED(block) { 939 summary |= block->page_size; 940 } 941 942 return summary; 943 } 944 945 uint64_t ram_get_total_transferred_pages(void) 946 { 947 return stat64_get(&mig_stats.normal_pages) + 948 stat64_get(&mig_stats.zero_pages) + 949 compress_ram_pages() + xbzrle_counters.pages; 950 } 951 952 static void migration_update_rates(RAMState *rs, int64_t end_time) 953 { 954 uint64_t page_count = rs->target_page_count - rs->target_page_count_prev; 955 956 /* calculate period counters */ 957 stat64_set(&mig_stats.dirty_pages_rate, 958 rs->num_dirty_pages_period * 1000 / 959 (end_time - rs->time_last_bitmap_sync)); 960 961 if (!page_count) { 962 return; 963 } 964 965 if (migrate_xbzrle()) { 966 double encoded_size, unencoded_size; 967 968 xbzrle_counters.cache_miss_rate = (double)(xbzrle_counters.cache_miss - 969 rs->xbzrle_cache_miss_prev) / page_count; 970 rs->xbzrle_cache_miss_prev = xbzrle_counters.cache_miss; 971 unencoded_size = (xbzrle_counters.pages - rs->xbzrle_pages_prev) * 972 TARGET_PAGE_SIZE; 973 encoded_size = xbzrle_counters.bytes - rs->xbzrle_bytes_prev; 974 if (xbzrle_counters.pages == rs->xbzrle_pages_prev || !encoded_size) { 975 xbzrle_counters.encoding_rate = 0; 976 } else { 977 xbzrle_counters.encoding_rate = unencoded_size / encoded_size; 978 } 979 rs->xbzrle_pages_prev = xbzrle_counters.pages; 980 rs->xbzrle_bytes_prev = xbzrle_counters.bytes; 981 } 982 compress_update_rates(page_count); 983 } 984 985 /* 986 * Enable dirty-limit to throttle down the guest 987 */ 988 static void migration_dirty_limit_guest(void) 989 { 990 /* 991 * dirty page rate quota for all vCPUs fetched from 992 * migration parameter 'vcpu_dirty_limit' 993 */ 994 static int64_t quota_dirtyrate; 995 MigrationState *s = migrate_get_current(); 996 997 /* 998 * If dirty limit already enabled and migration parameter 999 * vcpu-dirty-limit untouched. 1000 */ 1001 if (dirtylimit_in_service() && 1002 quota_dirtyrate == s->parameters.vcpu_dirty_limit) { 1003 return; 1004 } 1005 1006 quota_dirtyrate = s->parameters.vcpu_dirty_limit; 1007 1008 /* 1009 * Set all vCPU a quota dirtyrate, note that the second 1010 * parameter will be ignored if setting all vCPU for the vm 1011 */ 1012 qmp_set_vcpu_dirty_limit(false, -1, quota_dirtyrate, NULL); 1013 trace_migration_dirty_limit_guest(quota_dirtyrate); 1014 } 1015 1016 static void migration_trigger_throttle(RAMState *rs) 1017 { 1018 uint64_t threshold = migrate_throttle_trigger_threshold(); 1019 uint64_t bytes_xfer_period = 1020 migration_transferred_bytes() - rs->bytes_xfer_prev; 1021 uint64_t bytes_dirty_period = rs->num_dirty_pages_period * TARGET_PAGE_SIZE; 1022 uint64_t bytes_dirty_threshold = bytes_xfer_period * threshold / 100; 1023 1024 /* During block migration the auto-converge logic incorrectly detects 1025 * that ram migration makes no progress. Avoid this by disabling the 1026 * throttling logic during the bulk phase of block migration. */ 1027 if (blk_mig_bulk_active()) { 1028 return; 1029 } 1030 1031 /* 1032 * The following detection logic can be refined later. For now: 1033 * Check to see if the ratio between dirtied bytes and the approx. 1034 * amount of bytes that just got transferred since the last time 1035 * we were in this routine reaches the threshold. If that happens 1036 * twice, start or increase throttling. 1037 */ 1038 if ((bytes_dirty_period > bytes_dirty_threshold) && 1039 (++rs->dirty_rate_high_cnt >= 2)) { 1040 rs->dirty_rate_high_cnt = 0; 1041 if (migrate_auto_converge()) { 1042 trace_migration_throttle(); 1043 mig_throttle_guest_down(bytes_dirty_period, 1044 bytes_dirty_threshold); 1045 } else if (migrate_dirty_limit()) { 1046 migration_dirty_limit_guest(); 1047 } 1048 } 1049 } 1050 1051 static void migration_bitmap_sync(RAMState *rs, bool last_stage) 1052 { 1053 RAMBlock *block; 1054 int64_t end_time; 1055 1056 stat64_add(&mig_stats.dirty_sync_count, 1); 1057 1058 if (!rs->time_last_bitmap_sync) { 1059 rs->time_last_bitmap_sync = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); 1060 } 1061 1062 trace_migration_bitmap_sync_start(); 1063 memory_global_dirty_log_sync(last_stage); 1064 1065 qemu_mutex_lock(&rs->bitmap_mutex); 1066 WITH_RCU_READ_LOCK_GUARD() { 1067 RAMBLOCK_FOREACH_NOT_IGNORED(block) { 1068 ramblock_sync_dirty_bitmap(rs, block); 1069 } 1070 stat64_set(&mig_stats.dirty_bytes_last_sync, ram_bytes_remaining()); 1071 } 1072 qemu_mutex_unlock(&rs->bitmap_mutex); 1073 1074 memory_global_after_dirty_log_sync(); 1075 trace_migration_bitmap_sync_end(rs->num_dirty_pages_period); 1076 1077 end_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); 1078 1079 /* more than 1 second = 1000 millisecons */ 1080 if (end_time > rs->time_last_bitmap_sync + 1000) { 1081 migration_trigger_throttle(rs); 1082 1083 migration_update_rates(rs, end_time); 1084 1085 rs->target_page_count_prev = rs->target_page_count; 1086 1087 /* reset period counters */ 1088 rs->time_last_bitmap_sync = end_time; 1089 rs->num_dirty_pages_period = 0; 1090 rs->bytes_xfer_prev = migration_transferred_bytes(); 1091 } 1092 if (migrate_events()) { 1093 uint64_t generation = stat64_get(&mig_stats.dirty_sync_count); 1094 qapi_event_send_migration_pass(generation); 1095 } 1096 } 1097 1098 static void migration_bitmap_sync_precopy(RAMState *rs, bool last_stage) 1099 { 1100 Error *local_err = NULL; 1101 1102 /* 1103 * The current notifier usage is just an optimization to migration, so we 1104 * don't stop the normal migration process in the error case. 1105 */ 1106 if (precopy_notify(PRECOPY_NOTIFY_BEFORE_BITMAP_SYNC, &local_err)) { 1107 error_report_err(local_err); 1108 local_err = NULL; 1109 } 1110 1111 migration_bitmap_sync(rs, last_stage); 1112 1113 if (precopy_notify(PRECOPY_NOTIFY_AFTER_BITMAP_SYNC, &local_err)) { 1114 error_report_err(local_err); 1115 } 1116 } 1117 1118 void ram_release_page(const char *rbname, uint64_t offset) 1119 { 1120 if (!migrate_release_ram() || !migration_in_postcopy()) { 1121 return; 1122 } 1123 1124 ram_discard_range(rbname, offset, TARGET_PAGE_SIZE); 1125 } 1126 1127 /** 1128 * save_zero_page: send the zero page to the stream 1129 * 1130 * Returns the number of pages written. 1131 * 1132 * @rs: current RAM state 1133 * @pss: current PSS channel 1134 * @offset: offset inside the block for the page 1135 */ 1136 static int save_zero_page(RAMState *rs, PageSearchStatus *pss, 1137 ram_addr_t offset) 1138 { 1139 uint8_t *p = pss->block->host + offset; 1140 QEMUFile *file = pss->pss_channel; 1141 int len = 0; 1142 1143 if (migrate_zero_page_detection() == ZERO_PAGE_DETECTION_NONE) { 1144 return 0; 1145 } 1146 1147 if (!buffer_is_zero(p, TARGET_PAGE_SIZE)) { 1148 return 0; 1149 } 1150 1151 stat64_add(&mig_stats.zero_pages, 1); 1152 1153 if (migrate_mapped_ram()) { 1154 /* zero pages are not transferred with mapped-ram */ 1155 clear_bit_atomic(offset >> TARGET_PAGE_BITS, pss->block->file_bmap); 1156 return 1; 1157 } 1158 1159 len += save_page_header(pss, file, pss->block, offset | RAM_SAVE_FLAG_ZERO); 1160 qemu_put_byte(file, 0); 1161 len += 1; 1162 ram_release_page(pss->block->idstr, offset); 1163 ram_transferred_add(len); 1164 1165 /* 1166 * Must let xbzrle know, otherwise a previous (now 0'd) cached 1167 * page would be stale. 1168 */ 1169 if (rs->xbzrle_started) { 1170 XBZRLE_cache_lock(); 1171 xbzrle_cache_zero_page(pss->block->offset + offset); 1172 XBZRLE_cache_unlock(); 1173 } 1174 1175 return len; 1176 } 1177 1178 /* 1179 * @pages: the number of pages written by the control path, 1180 * < 0 - error 1181 * > 0 - number of pages written 1182 * 1183 * Return true if the pages has been saved, otherwise false is returned. 1184 */ 1185 static bool control_save_page(PageSearchStatus *pss, 1186 ram_addr_t offset, int *pages) 1187 { 1188 int ret; 1189 1190 ret = rdma_control_save_page(pss->pss_channel, pss->block->offset, offset, 1191 TARGET_PAGE_SIZE); 1192 if (ret == RAM_SAVE_CONTROL_NOT_SUPP) { 1193 return false; 1194 } 1195 1196 if (ret == RAM_SAVE_CONTROL_DELAYED) { 1197 *pages = 1; 1198 return true; 1199 } 1200 *pages = ret; 1201 return true; 1202 } 1203 1204 /* 1205 * directly send the page to the stream 1206 * 1207 * Returns the number of pages written. 1208 * 1209 * @pss: current PSS channel 1210 * @block: block that contains the page we want to send 1211 * @offset: offset inside the block for the page 1212 * @buf: the page to be sent 1213 * @async: send to page asyncly 1214 */ 1215 static int save_normal_page(PageSearchStatus *pss, RAMBlock *block, 1216 ram_addr_t offset, uint8_t *buf, bool async) 1217 { 1218 QEMUFile *file = pss->pss_channel; 1219 1220 if (migrate_mapped_ram()) { 1221 qemu_put_buffer_at(file, buf, TARGET_PAGE_SIZE, 1222 block->pages_offset + offset); 1223 set_bit(offset >> TARGET_PAGE_BITS, block->file_bmap); 1224 } else { 1225 ram_transferred_add(save_page_header(pss, pss->pss_channel, block, 1226 offset | RAM_SAVE_FLAG_PAGE)); 1227 if (async) { 1228 qemu_put_buffer_async(file, buf, TARGET_PAGE_SIZE, 1229 migrate_release_ram() && 1230 migration_in_postcopy()); 1231 } else { 1232 qemu_put_buffer(file, buf, TARGET_PAGE_SIZE); 1233 } 1234 } 1235 ram_transferred_add(TARGET_PAGE_SIZE); 1236 stat64_add(&mig_stats.normal_pages, 1); 1237 return 1; 1238 } 1239 1240 /** 1241 * ram_save_page: send the given page to the stream 1242 * 1243 * Returns the number of pages written. 1244 * < 0 - error 1245 * >=0 - Number of pages written - this might legally be 0 1246 * if xbzrle noticed the page was the same. 1247 * 1248 * @rs: current RAM state 1249 * @block: block that contains the page we want to send 1250 * @offset: offset inside the block for the page 1251 */ 1252 static int ram_save_page(RAMState *rs, PageSearchStatus *pss) 1253 { 1254 int pages = -1; 1255 uint8_t *p; 1256 bool send_async = true; 1257 RAMBlock *block = pss->block; 1258 ram_addr_t offset = ((ram_addr_t)pss->page) << TARGET_PAGE_BITS; 1259 ram_addr_t current_addr = block->offset + offset; 1260 1261 p = block->host + offset; 1262 trace_ram_save_page(block->idstr, (uint64_t)offset, p); 1263 1264 XBZRLE_cache_lock(); 1265 if (rs->xbzrle_started && !migration_in_postcopy()) { 1266 pages = save_xbzrle_page(rs, pss, &p, current_addr, 1267 block, offset); 1268 if (!rs->last_stage) { 1269 /* Can't send this cached data async, since the cache page 1270 * might get updated before it gets to the wire 1271 */ 1272 send_async = false; 1273 } 1274 } 1275 1276 /* XBZRLE overflow or normal page */ 1277 if (pages == -1) { 1278 pages = save_normal_page(pss, block, offset, p, send_async); 1279 } 1280 1281 XBZRLE_cache_unlock(); 1282 1283 return pages; 1284 } 1285 1286 static int ram_save_multifd_page(RAMBlock *block, ram_addr_t offset) 1287 { 1288 if (!multifd_queue_page(block, offset)) { 1289 return -1; 1290 } 1291 stat64_add(&mig_stats.normal_pages, 1); 1292 1293 return 1; 1294 } 1295 1296 int compress_send_queued_data(CompressParam *param) 1297 { 1298 PageSearchStatus *pss = &ram_state->pss[RAM_CHANNEL_PRECOPY]; 1299 MigrationState *ms = migrate_get_current(); 1300 QEMUFile *file = ms->to_dst_file; 1301 int len = 0; 1302 1303 RAMBlock *block = param->block; 1304 ram_addr_t offset = param->offset; 1305 1306 if (param->result == RES_NONE) { 1307 return 0; 1308 } 1309 1310 assert(block == pss->last_sent_block); 1311 1312 if (param->result == RES_ZEROPAGE) { 1313 assert(qemu_file_buffer_empty(param->file)); 1314 len += save_page_header(pss, file, block, offset | RAM_SAVE_FLAG_ZERO); 1315 qemu_put_byte(file, 0); 1316 len += 1; 1317 ram_release_page(block->idstr, offset); 1318 } else if (param->result == RES_COMPRESS) { 1319 assert(!qemu_file_buffer_empty(param->file)); 1320 len += save_page_header(pss, file, block, 1321 offset | RAM_SAVE_FLAG_COMPRESS_PAGE); 1322 len += qemu_put_qemu_file(file, param->file); 1323 } else { 1324 abort(); 1325 } 1326 1327 update_compress_thread_counts(param, len); 1328 1329 return len; 1330 } 1331 1332 #define PAGE_ALL_CLEAN 0 1333 #define PAGE_TRY_AGAIN 1 1334 #define PAGE_DIRTY_FOUND 2 1335 /** 1336 * find_dirty_block: find the next dirty page and update any state 1337 * associated with the search process. 1338 * 1339 * Returns: 1340 * <0: An error happened 1341 * PAGE_ALL_CLEAN: no dirty page found, give up 1342 * PAGE_TRY_AGAIN: no dirty page found, retry for next block 1343 * PAGE_DIRTY_FOUND: dirty page found 1344 * 1345 * @rs: current RAM state 1346 * @pss: data about the state of the current dirty page scan 1347 * @again: set to false if the search has scanned the whole of RAM 1348 */ 1349 static int find_dirty_block(RAMState *rs, PageSearchStatus *pss) 1350 { 1351 /* Update pss->page for the next dirty bit in ramblock */ 1352 pss_find_next_dirty(pss); 1353 1354 if (pss->complete_round && pss->block == rs->last_seen_block && 1355 pss->page >= rs->last_page) { 1356 /* 1357 * We've been once around the RAM and haven't found anything. 1358 * Give up. 1359 */ 1360 return PAGE_ALL_CLEAN; 1361 } 1362 if (!offset_in_ramblock(pss->block, 1363 ((ram_addr_t)pss->page) << TARGET_PAGE_BITS)) { 1364 /* Didn't find anything in this RAM Block */ 1365 pss->page = 0; 1366 pss->block = QLIST_NEXT_RCU(pss->block, next); 1367 if (!pss->block) { 1368 if (migrate_multifd() && 1369 (!migrate_multifd_flush_after_each_section() || 1370 migrate_mapped_ram())) { 1371 QEMUFile *f = rs->pss[RAM_CHANNEL_PRECOPY].pss_channel; 1372 int ret = multifd_send_sync_main(); 1373 if (ret < 0) { 1374 return ret; 1375 } 1376 1377 if (!migrate_mapped_ram()) { 1378 qemu_put_be64(f, RAM_SAVE_FLAG_MULTIFD_FLUSH); 1379 qemu_fflush(f); 1380 } 1381 } 1382 /* 1383 * If memory migration starts over, we will meet a dirtied page 1384 * which may still exists in compression threads's ring, so we 1385 * should flush the compressed data to make sure the new page 1386 * is not overwritten by the old one in the destination. 1387 * 1388 * Also If xbzrle is on, stop using the data compression at this 1389 * point. In theory, xbzrle can do better than compression. 1390 */ 1391 compress_flush_data(); 1392 1393 /* Hit the end of the list */ 1394 pss->block = QLIST_FIRST_RCU(&ram_list.blocks); 1395 /* Flag that we've looped */ 1396 pss->complete_round = true; 1397 /* After the first round, enable XBZRLE. */ 1398 if (migrate_xbzrle()) { 1399 rs->xbzrle_started = true; 1400 } 1401 } 1402 /* Didn't find anything this time, but try again on the new block */ 1403 return PAGE_TRY_AGAIN; 1404 } else { 1405 /* We've found something */ 1406 return PAGE_DIRTY_FOUND; 1407 } 1408 } 1409 1410 /** 1411 * unqueue_page: gets a page of the queue 1412 * 1413 * Helper for 'get_queued_page' - gets a page off the queue 1414 * 1415 * Returns the block of the page (or NULL if none available) 1416 * 1417 * @rs: current RAM state 1418 * @offset: used to return the offset within the RAMBlock 1419 */ 1420 static RAMBlock *unqueue_page(RAMState *rs, ram_addr_t *offset) 1421 { 1422 struct RAMSrcPageRequest *entry; 1423 RAMBlock *block = NULL; 1424 1425 if (!postcopy_has_request(rs)) { 1426 return NULL; 1427 } 1428 1429 QEMU_LOCK_GUARD(&rs->src_page_req_mutex); 1430 1431 /* 1432 * This should _never_ change even after we take the lock, because no one 1433 * should be taking anything off the request list other than us. 1434 */ 1435 assert(postcopy_has_request(rs)); 1436 1437 entry = QSIMPLEQ_FIRST(&rs->src_page_requests); 1438 block = entry->rb; 1439 *offset = entry->offset; 1440 1441 if (entry->len > TARGET_PAGE_SIZE) { 1442 entry->len -= TARGET_PAGE_SIZE; 1443 entry->offset += TARGET_PAGE_SIZE; 1444 } else { 1445 memory_region_unref(block->mr); 1446 QSIMPLEQ_REMOVE_HEAD(&rs->src_page_requests, next_req); 1447 g_free(entry); 1448 migration_consume_urgent_request(); 1449 } 1450 1451 return block; 1452 } 1453 1454 #if defined(__linux__) 1455 /** 1456 * poll_fault_page: try to get next UFFD write fault page and, if pending fault 1457 * is found, return RAM block pointer and page offset 1458 * 1459 * Returns pointer to the RAMBlock containing faulting page, 1460 * NULL if no write faults are pending 1461 * 1462 * @rs: current RAM state 1463 * @offset: page offset from the beginning of the block 1464 */ 1465 static RAMBlock *poll_fault_page(RAMState *rs, ram_addr_t *offset) 1466 { 1467 struct uffd_msg uffd_msg; 1468 void *page_address; 1469 RAMBlock *block; 1470 int res; 1471 1472 if (!migrate_background_snapshot()) { 1473 return NULL; 1474 } 1475 1476 res = uffd_read_events(rs->uffdio_fd, &uffd_msg, 1); 1477 if (res <= 0) { 1478 return NULL; 1479 } 1480 1481 page_address = (void *)(uintptr_t) uffd_msg.arg.pagefault.address; 1482 block = qemu_ram_block_from_host(page_address, false, offset); 1483 assert(block && (block->flags & RAM_UF_WRITEPROTECT) != 0); 1484 return block; 1485 } 1486 1487 /** 1488 * ram_save_release_protection: release UFFD write protection after 1489 * a range of pages has been saved 1490 * 1491 * @rs: current RAM state 1492 * @pss: page-search-status structure 1493 * @start_page: index of the first page in the range relative to pss->block 1494 * 1495 * Returns 0 on success, negative value in case of an error 1496 */ 1497 static int ram_save_release_protection(RAMState *rs, PageSearchStatus *pss, 1498 unsigned long start_page) 1499 { 1500 int res = 0; 1501 1502 /* Check if page is from UFFD-managed region. */ 1503 if (pss->block->flags & RAM_UF_WRITEPROTECT) { 1504 void *page_address = pss->block->host + (start_page << TARGET_PAGE_BITS); 1505 uint64_t run_length = (pss->page - start_page) << TARGET_PAGE_BITS; 1506 1507 /* Flush async buffers before un-protect. */ 1508 qemu_fflush(pss->pss_channel); 1509 /* Un-protect memory range. */ 1510 res = uffd_change_protection(rs->uffdio_fd, page_address, run_length, 1511 false, false); 1512 } 1513 1514 return res; 1515 } 1516 1517 /* ram_write_tracking_available: check if kernel supports required UFFD features 1518 * 1519 * Returns true if supports, false otherwise 1520 */ 1521 bool ram_write_tracking_available(void) 1522 { 1523 uint64_t uffd_features; 1524 int res; 1525 1526 res = uffd_query_features(&uffd_features); 1527 return (res == 0 && 1528 (uffd_features & UFFD_FEATURE_PAGEFAULT_FLAG_WP) != 0); 1529 } 1530 1531 /* ram_write_tracking_compatible: check if guest configuration is 1532 * compatible with 'write-tracking' 1533 * 1534 * Returns true if compatible, false otherwise 1535 */ 1536 bool ram_write_tracking_compatible(void) 1537 { 1538 const uint64_t uffd_ioctls_mask = BIT(_UFFDIO_WRITEPROTECT); 1539 int uffd_fd; 1540 RAMBlock *block; 1541 bool ret = false; 1542 1543 /* Open UFFD file descriptor */ 1544 uffd_fd = uffd_create_fd(UFFD_FEATURE_PAGEFAULT_FLAG_WP, false); 1545 if (uffd_fd < 0) { 1546 return false; 1547 } 1548 1549 RCU_READ_LOCK_GUARD(); 1550 1551 RAMBLOCK_FOREACH_NOT_IGNORED(block) { 1552 uint64_t uffd_ioctls; 1553 1554 /* Nothing to do with read-only and MMIO-writable regions */ 1555 if (block->mr->readonly || block->mr->rom_device) { 1556 continue; 1557 } 1558 /* Try to register block memory via UFFD-IO to track writes */ 1559 if (uffd_register_memory(uffd_fd, block->host, block->max_length, 1560 UFFDIO_REGISTER_MODE_WP, &uffd_ioctls)) { 1561 goto out; 1562 } 1563 if ((uffd_ioctls & uffd_ioctls_mask) != uffd_ioctls_mask) { 1564 goto out; 1565 } 1566 } 1567 ret = true; 1568 1569 out: 1570 uffd_close_fd(uffd_fd); 1571 return ret; 1572 } 1573 1574 static inline void populate_read_range(RAMBlock *block, ram_addr_t offset, 1575 ram_addr_t size) 1576 { 1577 const ram_addr_t end = offset + size; 1578 1579 /* 1580 * We read one byte of each page; this will preallocate page tables if 1581 * required and populate the shared zeropage on MAP_PRIVATE anonymous memory 1582 * where no page was populated yet. This might require adaption when 1583 * supporting other mappings, like shmem. 1584 */ 1585 for (; offset < end; offset += block->page_size) { 1586 char tmp = *((char *)block->host + offset); 1587 1588 /* Don't optimize the read out */ 1589 asm volatile("" : "+r" (tmp)); 1590 } 1591 } 1592 1593 static inline int populate_read_section(MemoryRegionSection *section, 1594 void *opaque) 1595 { 1596 const hwaddr size = int128_get64(section->size); 1597 hwaddr offset = section->offset_within_region; 1598 RAMBlock *block = section->mr->ram_block; 1599 1600 populate_read_range(block, offset, size); 1601 return 0; 1602 } 1603 1604 /* 1605 * ram_block_populate_read: preallocate page tables and populate pages in the 1606 * RAM block by reading a byte of each page. 1607 * 1608 * Since it's solely used for userfault_fd WP feature, here we just 1609 * hardcode page size to qemu_real_host_page_size. 1610 * 1611 * @block: RAM block to populate 1612 */ 1613 static void ram_block_populate_read(RAMBlock *rb) 1614 { 1615 /* 1616 * Skip populating all pages that fall into a discarded range as managed by 1617 * a RamDiscardManager responsible for the mapped memory region of the 1618 * RAMBlock. Such discarded ("logically unplugged") parts of a RAMBlock 1619 * must not get populated automatically. We don't have to track 1620 * modifications via userfaultfd WP reliably, because these pages will 1621 * not be part of the migration stream either way -- see 1622 * ramblock_dirty_bitmap_exclude_discarded_pages(). 1623 * 1624 * Note: The result is only stable while migrating (precopy/postcopy). 1625 */ 1626 if (rb->mr && memory_region_has_ram_discard_manager(rb->mr)) { 1627 RamDiscardManager *rdm = memory_region_get_ram_discard_manager(rb->mr); 1628 MemoryRegionSection section = { 1629 .mr = rb->mr, 1630 .offset_within_region = 0, 1631 .size = rb->mr->size, 1632 }; 1633 1634 ram_discard_manager_replay_populated(rdm, §ion, 1635 populate_read_section, NULL); 1636 } else { 1637 populate_read_range(rb, 0, rb->used_length); 1638 } 1639 } 1640 1641 /* 1642 * ram_write_tracking_prepare: prepare for UFFD-WP memory tracking 1643 */ 1644 void ram_write_tracking_prepare(void) 1645 { 1646 RAMBlock *block; 1647 1648 RCU_READ_LOCK_GUARD(); 1649 1650 RAMBLOCK_FOREACH_NOT_IGNORED(block) { 1651 /* Nothing to do with read-only and MMIO-writable regions */ 1652 if (block->mr->readonly || block->mr->rom_device) { 1653 continue; 1654 } 1655 1656 /* 1657 * Populate pages of the RAM block before enabling userfault_fd 1658 * write protection. 1659 * 1660 * This stage is required since ioctl(UFFDIO_WRITEPROTECT) with 1661 * UFFDIO_WRITEPROTECT_MODE_WP mode setting would silently skip 1662 * pages with pte_none() entries in page table. 1663 */ 1664 ram_block_populate_read(block); 1665 } 1666 } 1667 1668 static inline int uffd_protect_section(MemoryRegionSection *section, 1669 void *opaque) 1670 { 1671 const hwaddr size = int128_get64(section->size); 1672 const hwaddr offset = section->offset_within_region; 1673 RAMBlock *rb = section->mr->ram_block; 1674 int uffd_fd = (uintptr_t)opaque; 1675 1676 return uffd_change_protection(uffd_fd, rb->host + offset, size, true, 1677 false); 1678 } 1679 1680 static int ram_block_uffd_protect(RAMBlock *rb, int uffd_fd) 1681 { 1682 assert(rb->flags & RAM_UF_WRITEPROTECT); 1683 1684 /* See ram_block_populate_read() */ 1685 if (rb->mr && memory_region_has_ram_discard_manager(rb->mr)) { 1686 RamDiscardManager *rdm = memory_region_get_ram_discard_manager(rb->mr); 1687 MemoryRegionSection section = { 1688 .mr = rb->mr, 1689 .offset_within_region = 0, 1690 .size = rb->mr->size, 1691 }; 1692 1693 return ram_discard_manager_replay_populated(rdm, §ion, 1694 uffd_protect_section, 1695 (void *)(uintptr_t)uffd_fd); 1696 } 1697 return uffd_change_protection(uffd_fd, rb->host, 1698 rb->used_length, true, false); 1699 } 1700 1701 /* 1702 * ram_write_tracking_start: start UFFD-WP memory tracking 1703 * 1704 * Returns 0 for success or negative value in case of error 1705 */ 1706 int ram_write_tracking_start(void) 1707 { 1708 int uffd_fd; 1709 RAMState *rs = ram_state; 1710 RAMBlock *block; 1711 1712 /* Open UFFD file descriptor */ 1713 uffd_fd = uffd_create_fd(UFFD_FEATURE_PAGEFAULT_FLAG_WP, true); 1714 if (uffd_fd < 0) { 1715 return uffd_fd; 1716 } 1717 rs->uffdio_fd = uffd_fd; 1718 1719 RCU_READ_LOCK_GUARD(); 1720 1721 RAMBLOCK_FOREACH_NOT_IGNORED(block) { 1722 /* Nothing to do with read-only and MMIO-writable regions */ 1723 if (block->mr->readonly || block->mr->rom_device) { 1724 continue; 1725 } 1726 1727 /* Register block memory with UFFD to track writes */ 1728 if (uffd_register_memory(rs->uffdio_fd, block->host, 1729 block->max_length, UFFDIO_REGISTER_MODE_WP, NULL)) { 1730 goto fail; 1731 } 1732 block->flags |= RAM_UF_WRITEPROTECT; 1733 memory_region_ref(block->mr); 1734 1735 /* Apply UFFD write protection to the block memory range */ 1736 if (ram_block_uffd_protect(block, uffd_fd)) { 1737 goto fail; 1738 } 1739 1740 trace_ram_write_tracking_ramblock_start(block->idstr, block->page_size, 1741 block->host, block->max_length); 1742 } 1743 1744 return 0; 1745 1746 fail: 1747 error_report("ram_write_tracking_start() failed: restoring initial memory state"); 1748 1749 RAMBLOCK_FOREACH_NOT_IGNORED(block) { 1750 if ((block->flags & RAM_UF_WRITEPROTECT) == 0) { 1751 continue; 1752 } 1753 uffd_unregister_memory(rs->uffdio_fd, block->host, block->max_length); 1754 /* Cleanup flags and remove reference */ 1755 block->flags &= ~RAM_UF_WRITEPROTECT; 1756 memory_region_unref(block->mr); 1757 } 1758 1759 uffd_close_fd(uffd_fd); 1760 rs->uffdio_fd = -1; 1761 return -1; 1762 } 1763 1764 /** 1765 * ram_write_tracking_stop: stop UFFD-WP memory tracking and remove protection 1766 */ 1767 void ram_write_tracking_stop(void) 1768 { 1769 RAMState *rs = ram_state; 1770 RAMBlock *block; 1771 1772 RCU_READ_LOCK_GUARD(); 1773 1774 RAMBLOCK_FOREACH_NOT_IGNORED(block) { 1775 if ((block->flags & RAM_UF_WRITEPROTECT) == 0) { 1776 continue; 1777 } 1778 uffd_unregister_memory(rs->uffdio_fd, block->host, block->max_length); 1779 1780 trace_ram_write_tracking_ramblock_stop(block->idstr, block->page_size, 1781 block->host, block->max_length); 1782 1783 /* Cleanup flags and remove reference */ 1784 block->flags &= ~RAM_UF_WRITEPROTECT; 1785 memory_region_unref(block->mr); 1786 } 1787 1788 /* Finally close UFFD file descriptor */ 1789 uffd_close_fd(rs->uffdio_fd); 1790 rs->uffdio_fd = -1; 1791 } 1792 1793 #else 1794 /* No target OS support, stubs just fail or ignore */ 1795 1796 static RAMBlock *poll_fault_page(RAMState *rs, ram_addr_t *offset) 1797 { 1798 (void) rs; 1799 (void) offset; 1800 1801 return NULL; 1802 } 1803 1804 static int ram_save_release_protection(RAMState *rs, PageSearchStatus *pss, 1805 unsigned long start_page) 1806 { 1807 (void) rs; 1808 (void) pss; 1809 (void) start_page; 1810 1811 return 0; 1812 } 1813 1814 bool ram_write_tracking_available(void) 1815 { 1816 return false; 1817 } 1818 1819 bool ram_write_tracking_compatible(void) 1820 { 1821 assert(0); 1822 return false; 1823 } 1824 1825 int ram_write_tracking_start(void) 1826 { 1827 assert(0); 1828 return -1; 1829 } 1830 1831 void ram_write_tracking_stop(void) 1832 { 1833 assert(0); 1834 } 1835 #endif /* defined(__linux__) */ 1836 1837 /** 1838 * get_queued_page: unqueue a page from the postcopy requests 1839 * 1840 * Skips pages that are already sent (!dirty) 1841 * 1842 * Returns true if a queued page is found 1843 * 1844 * @rs: current RAM state 1845 * @pss: data about the state of the current dirty page scan 1846 */ 1847 static bool get_queued_page(RAMState *rs, PageSearchStatus *pss) 1848 { 1849 RAMBlock *block; 1850 ram_addr_t offset; 1851 bool dirty; 1852 1853 do { 1854 block = unqueue_page(rs, &offset); 1855 /* 1856 * We're sending this page, and since it's postcopy nothing else 1857 * will dirty it, and we must make sure it doesn't get sent again 1858 * even if this queue request was received after the background 1859 * search already sent it. 1860 */ 1861 if (block) { 1862 unsigned long page; 1863 1864 page = offset >> TARGET_PAGE_BITS; 1865 dirty = test_bit(page, block->bmap); 1866 if (!dirty) { 1867 trace_get_queued_page_not_dirty(block->idstr, (uint64_t)offset, 1868 page); 1869 } else { 1870 trace_get_queued_page(block->idstr, (uint64_t)offset, page); 1871 } 1872 } 1873 1874 } while (block && !dirty); 1875 1876 if (!block) { 1877 /* 1878 * Poll write faults too if background snapshot is enabled; that's 1879 * when we have vcpus got blocked by the write protected pages. 1880 */ 1881 block = poll_fault_page(rs, &offset); 1882 } 1883 1884 if (block) { 1885 /* 1886 * We want the background search to continue from the queued page 1887 * since the guest is likely to want other pages near to the page 1888 * it just requested. 1889 */ 1890 pss->block = block; 1891 pss->page = offset >> TARGET_PAGE_BITS; 1892 1893 /* 1894 * This unqueued page would break the "one round" check, even is 1895 * really rare. 1896 */ 1897 pss->complete_round = false; 1898 } 1899 1900 return !!block; 1901 } 1902 1903 /** 1904 * migration_page_queue_free: drop any remaining pages in the ram 1905 * request queue 1906 * 1907 * It should be empty at the end anyway, but in error cases there may 1908 * be some left. in case that there is any page left, we drop it. 1909 * 1910 */ 1911 static void migration_page_queue_free(RAMState *rs) 1912 { 1913 struct RAMSrcPageRequest *mspr, *next_mspr; 1914 /* This queue generally should be empty - but in the case of a failed 1915 * migration might have some droppings in. 1916 */ 1917 RCU_READ_LOCK_GUARD(); 1918 QSIMPLEQ_FOREACH_SAFE(mspr, &rs->src_page_requests, next_req, next_mspr) { 1919 memory_region_unref(mspr->rb->mr); 1920 QSIMPLEQ_REMOVE_HEAD(&rs->src_page_requests, next_req); 1921 g_free(mspr); 1922 } 1923 } 1924 1925 /** 1926 * ram_save_queue_pages: queue the page for transmission 1927 * 1928 * A request from postcopy destination for example. 1929 * 1930 * Returns zero on success or negative on error 1931 * 1932 * @rbname: Name of the RAMBLock of the request. NULL means the 1933 * same that last one. 1934 * @start: starting address from the start of the RAMBlock 1935 * @len: length (in bytes) to send 1936 */ 1937 int ram_save_queue_pages(const char *rbname, ram_addr_t start, ram_addr_t len, 1938 Error **errp) 1939 { 1940 RAMBlock *ramblock; 1941 RAMState *rs = ram_state; 1942 1943 stat64_add(&mig_stats.postcopy_requests, 1); 1944 RCU_READ_LOCK_GUARD(); 1945 1946 if (!rbname) { 1947 /* Reuse last RAMBlock */ 1948 ramblock = rs->last_req_rb; 1949 1950 if (!ramblock) { 1951 /* 1952 * Shouldn't happen, we can't reuse the last RAMBlock if 1953 * it's the 1st request. 1954 */ 1955 error_setg(errp, "MIG_RP_MSG_REQ_PAGES has no previous block"); 1956 return -1; 1957 } 1958 } else { 1959 ramblock = qemu_ram_block_by_name(rbname); 1960 1961 if (!ramblock) { 1962 /* We shouldn't be asked for a non-existent RAMBlock */ 1963 error_setg(errp, "MIG_RP_MSG_REQ_PAGES has no block '%s'", rbname); 1964 return -1; 1965 } 1966 rs->last_req_rb = ramblock; 1967 } 1968 trace_ram_save_queue_pages(ramblock->idstr, start, len); 1969 if (!offset_in_ramblock(ramblock, start + len - 1)) { 1970 error_setg(errp, "MIG_RP_MSG_REQ_PAGES request overrun, " 1971 "start=" RAM_ADDR_FMT " len=" 1972 RAM_ADDR_FMT " blocklen=" RAM_ADDR_FMT, 1973 start, len, ramblock->used_length); 1974 return -1; 1975 } 1976 1977 /* 1978 * When with postcopy preempt, we send back the page directly in the 1979 * rp-return thread. 1980 */ 1981 if (postcopy_preempt_active()) { 1982 ram_addr_t page_start = start >> TARGET_PAGE_BITS; 1983 size_t page_size = qemu_ram_pagesize(ramblock); 1984 PageSearchStatus *pss = &ram_state->pss[RAM_CHANNEL_POSTCOPY]; 1985 int ret = 0; 1986 1987 qemu_mutex_lock(&rs->bitmap_mutex); 1988 1989 pss_init(pss, ramblock, page_start); 1990 /* 1991 * Always use the preempt channel, and make sure it's there. It's 1992 * safe to access without lock, because when rp-thread is running 1993 * we should be the only one who operates on the qemufile 1994 */ 1995 pss->pss_channel = migrate_get_current()->postcopy_qemufile_src; 1996 assert(pss->pss_channel); 1997 1998 /* 1999 * It must be either one or multiple of host page size. Just 2000 * assert; if something wrong we're mostly split brain anyway. 2001 */ 2002 assert(len % page_size == 0); 2003 while (len) { 2004 if (ram_save_host_page_urgent(pss)) { 2005 error_setg(errp, "ram_save_host_page_urgent() failed: " 2006 "ramblock=%s, start_addr=0x"RAM_ADDR_FMT, 2007 ramblock->idstr, start); 2008 ret = -1; 2009 break; 2010 } 2011 /* 2012 * NOTE: after ram_save_host_page_urgent() succeeded, pss->page 2013 * will automatically be moved and point to the next host page 2014 * we're going to send, so no need to update here. 2015 * 2016 * Normally QEMU never sends >1 host page in requests, so 2017 * logically we don't even need that as the loop should only 2018 * run once, but just to be consistent. 2019 */ 2020 len -= page_size; 2021 }; 2022 qemu_mutex_unlock(&rs->bitmap_mutex); 2023 2024 return ret; 2025 } 2026 2027 struct RAMSrcPageRequest *new_entry = 2028 g_new0(struct RAMSrcPageRequest, 1); 2029 new_entry->rb = ramblock; 2030 new_entry->offset = start; 2031 new_entry->len = len; 2032 2033 memory_region_ref(ramblock->mr); 2034 qemu_mutex_lock(&rs->src_page_req_mutex); 2035 QSIMPLEQ_INSERT_TAIL(&rs->src_page_requests, new_entry, next_req); 2036 migration_make_urgent_request(); 2037 qemu_mutex_unlock(&rs->src_page_req_mutex); 2038 2039 return 0; 2040 } 2041 2042 /* 2043 * try to compress the page before posting it out, return true if the page 2044 * has been properly handled by compression, otherwise needs other 2045 * paths to handle it 2046 */ 2047 static bool save_compress_page(RAMState *rs, PageSearchStatus *pss, 2048 ram_addr_t offset) 2049 { 2050 if (!migrate_compress()) { 2051 return false; 2052 } 2053 2054 /* 2055 * When starting the process of a new block, the first page of 2056 * the block should be sent out before other pages in the same 2057 * block, and all the pages in last block should have been sent 2058 * out, keeping this order is important, because the 'cont' flag 2059 * is used to avoid resending the block name. 2060 * 2061 * We post the fist page as normal page as compression will take 2062 * much CPU resource. 2063 */ 2064 if (pss->block != pss->last_sent_block) { 2065 compress_flush_data(); 2066 return false; 2067 } 2068 2069 return compress_page_with_multi_thread(pss->block, offset, 2070 compress_send_queued_data); 2071 } 2072 2073 /** 2074 * ram_save_target_page_legacy: save one target page 2075 * 2076 * Returns the number of pages written 2077 * 2078 * @rs: current RAM state 2079 * @pss: data about the page we want to send 2080 */ 2081 static int ram_save_target_page_legacy(RAMState *rs, PageSearchStatus *pss) 2082 { 2083 RAMBlock *block = pss->block; 2084 ram_addr_t offset = ((ram_addr_t)pss->page) << TARGET_PAGE_BITS; 2085 int res; 2086 2087 if (control_save_page(pss, offset, &res)) { 2088 return res; 2089 } 2090 2091 if (save_compress_page(rs, pss, offset)) { 2092 return 1; 2093 } 2094 2095 if (save_zero_page(rs, pss, offset)) { 2096 return 1; 2097 } 2098 2099 /* 2100 * Do not use multifd in postcopy as one whole host page should be 2101 * placed. Meanwhile postcopy requires atomic update of pages, so even 2102 * if host page size == guest page size the dest guest during run may 2103 * still see partially copied pages which is data corruption. 2104 */ 2105 if (migrate_multifd() && !migration_in_postcopy()) { 2106 return ram_save_multifd_page(block, offset); 2107 } 2108 2109 return ram_save_page(rs, pss); 2110 } 2111 2112 /* Should be called before sending a host page */ 2113 static void pss_host_page_prepare(PageSearchStatus *pss) 2114 { 2115 /* How many guest pages are there in one host page? */ 2116 size_t guest_pfns = qemu_ram_pagesize(pss->block) >> TARGET_PAGE_BITS; 2117 2118 pss->host_page_sending = true; 2119 if (guest_pfns <= 1) { 2120 /* 2121 * This covers both when guest psize == host psize, or when guest 2122 * has larger psize than the host (guest_pfns==0). 2123 * 2124 * For the latter, we always send one whole guest page per 2125 * iteration of the host page (example: an Alpha VM on x86 host 2126 * will have guest psize 8K while host psize 4K). 2127 */ 2128 pss->host_page_start = pss->page; 2129 pss->host_page_end = pss->page + 1; 2130 } else { 2131 /* 2132 * The host page spans over multiple guest pages, we send them 2133 * within the same host page iteration. 2134 */ 2135 pss->host_page_start = ROUND_DOWN(pss->page, guest_pfns); 2136 pss->host_page_end = ROUND_UP(pss->page + 1, guest_pfns); 2137 } 2138 } 2139 2140 /* 2141 * Whether the page pointed by PSS is within the host page being sent. 2142 * Must be called after a previous pss_host_page_prepare(). 2143 */ 2144 static bool pss_within_range(PageSearchStatus *pss) 2145 { 2146 ram_addr_t ram_addr; 2147 2148 assert(pss->host_page_sending); 2149 2150 /* Over host-page boundary? */ 2151 if (pss->page >= pss->host_page_end) { 2152 return false; 2153 } 2154 2155 ram_addr = ((ram_addr_t)pss->page) << TARGET_PAGE_BITS; 2156 2157 return offset_in_ramblock(pss->block, ram_addr); 2158 } 2159 2160 static void pss_host_page_finish(PageSearchStatus *pss) 2161 { 2162 pss->host_page_sending = false; 2163 /* This is not needed, but just to reset it */ 2164 pss->host_page_start = pss->host_page_end = 0; 2165 } 2166 2167 /* 2168 * Send an urgent host page specified by `pss'. Need to be called with 2169 * bitmap_mutex held. 2170 * 2171 * Returns 0 if save host page succeeded, false otherwise. 2172 */ 2173 static int ram_save_host_page_urgent(PageSearchStatus *pss) 2174 { 2175 bool page_dirty, sent = false; 2176 RAMState *rs = ram_state; 2177 int ret = 0; 2178 2179 trace_postcopy_preempt_send_host_page(pss->block->idstr, pss->page); 2180 pss_host_page_prepare(pss); 2181 2182 /* 2183 * If precopy is sending the same page, let it be done in precopy, or 2184 * we could send the same page in two channels and none of them will 2185 * receive the whole page. 2186 */ 2187 if (pss_overlap(pss, &ram_state->pss[RAM_CHANNEL_PRECOPY])) { 2188 trace_postcopy_preempt_hit(pss->block->idstr, 2189 pss->page << TARGET_PAGE_BITS); 2190 return 0; 2191 } 2192 2193 do { 2194 page_dirty = migration_bitmap_clear_dirty(rs, pss->block, pss->page); 2195 2196 if (page_dirty) { 2197 /* Be strict to return code; it must be 1, or what else? */ 2198 if (migration_ops->ram_save_target_page(rs, pss) != 1) { 2199 error_report_once("%s: ram_save_target_page failed", __func__); 2200 ret = -1; 2201 goto out; 2202 } 2203 sent = true; 2204 } 2205 pss_find_next_dirty(pss); 2206 } while (pss_within_range(pss)); 2207 out: 2208 pss_host_page_finish(pss); 2209 /* For urgent requests, flush immediately if sent */ 2210 if (sent) { 2211 qemu_fflush(pss->pss_channel); 2212 } 2213 return ret; 2214 } 2215 2216 /** 2217 * ram_save_host_page: save a whole host page 2218 * 2219 * Starting at *offset send pages up to the end of the current host 2220 * page. It's valid for the initial offset to point into the middle of 2221 * a host page in which case the remainder of the hostpage is sent. 2222 * Only dirty target pages are sent. Note that the host page size may 2223 * be a huge page for this block. 2224 * 2225 * The saving stops at the boundary of the used_length of the block 2226 * if the RAMBlock isn't a multiple of the host page size. 2227 * 2228 * The caller must be with ram_state.bitmap_mutex held to call this 2229 * function. Note that this function can temporarily release the lock, but 2230 * when the function is returned it'll make sure the lock is still held. 2231 * 2232 * Returns the number of pages written or negative on error 2233 * 2234 * @rs: current RAM state 2235 * @pss: data about the page we want to send 2236 */ 2237 static int ram_save_host_page(RAMState *rs, PageSearchStatus *pss) 2238 { 2239 bool page_dirty, preempt_active = postcopy_preempt_active(); 2240 int tmppages, pages = 0; 2241 size_t pagesize_bits = 2242 qemu_ram_pagesize(pss->block) >> TARGET_PAGE_BITS; 2243 unsigned long start_page = pss->page; 2244 int res; 2245 2246 if (migrate_ram_is_ignored(pss->block)) { 2247 error_report("block %s should not be migrated !", pss->block->idstr); 2248 return 0; 2249 } 2250 2251 /* Update host page boundary information */ 2252 pss_host_page_prepare(pss); 2253 2254 do { 2255 page_dirty = migration_bitmap_clear_dirty(rs, pss->block, pss->page); 2256 2257 /* Check the pages is dirty and if it is send it */ 2258 if (page_dirty) { 2259 /* 2260 * Properly yield the lock only in postcopy preempt mode 2261 * because both migration thread and rp-return thread can 2262 * operate on the bitmaps. 2263 */ 2264 if (preempt_active) { 2265 qemu_mutex_unlock(&rs->bitmap_mutex); 2266 } 2267 tmppages = migration_ops->ram_save_target_page(rs, pss); 2268 if (tmppages >= 0) { 2269 pages += tmppages; 2270 /* 2271 * Allow rate limiting to happen in the middle of huge pages if 2272 * something is sent in the current iteration. 2273 */ 2274 if (pagesize_bits > 1 && tmppages > 0) { 2275 migration_rate_limit(); 2276 } 2277 } 2278 if (preempt_active) { 2279 qemu_mutex_lock(&rs->bitmap_mutex); 2280 } 2281 } else { 2282 tmppages = 0; 2283 } 2284 2285 if (tmppages < 0) { 2286 pss_host_page_finish(pss); 2287 return tmppages; 2288 } 2289 2290 pss_find_next_dirty(pss); 2291 } while (pss_within_range(pss)); 2292 2293 pss_host_page_finish(pss); 2294 2295 res = ram_save_release_protection(rs, pss, start_page); 2296 return (res < 0 ? res : pages); 2297 } 2298 2299 /** 2300 * ram_find_and_save_block: finds a dirty page and sends it to f 2301 * 2302 * Called within an RCU critical section. 2303 * 2304 * Returns the number of pages written where zero means no dirty pages, 2305 * or negative on error 2306 * 2307 * @rs: current RAM state 2308 * 2309 * On systems where host-page-size > target-page-size it will send all the 2310 * pages in a host page that are dirty. 2311 */ 2312 static int ram_find_and_save_block(RAMState *rs) 2313 { 2314 PageSearchStatus *pss = &rs->pss[RAM_CHANNEL_PRECOPY]; 2315 int pages = 0; 2316 2317 /* No dirty page as there is zero RAM */ 2318 if (!rs->ram_bytes_total) { 2319 return pages; 2320 } 2321 2322 /* 2323 * Always keep last_seen_block/last_page valid during this procedure, 2324 * because find_dirty_block() relies on these values (e.g., we compare 2325 * last_seen_block with pss.block to see whether we searched all the 2326 * ramblocks) to detect the completion of migration. Having NULL value 2327 * of last_seen_block can conditionally cause below loop to run forever. 2328 */ 2329 if (!rs->last_seen_block) { 2330 rs->last_seen_block = QLIST_FIRST_RCU(&ram_list.blocks); 2331 rs->last_page = 0; 2332 } 2333 2334 pss_init(pss, rs->last_seen_block, rs->last_page); 2335 2336 while (true){ 2337 if (!get_queued_page(rs, pss)) { 2338 /* priority queue empty, so just search for something dirty */ 2339 int res = find_dirty_block(rs, pss); 2340 if (res != PAGE_DIRTY_FOUND) { 2341 if (res == PAGE_ALL_CLEAN) { 2342 break; 2343 } else if (res == PAGE_TRY_AGAIN) { 2344 continue; 2345 } else if (res < 0) { 2346 pages = res; 2347 break; 2348 } 2349 } 2350 } 2351 pages = ram_save_host_page(rs, pss); 2352 if (pages) { 2353 break; 2354 } 2355 } 2356 2357 rs->last_seen_block = pss->block; 2358 rs->last_page = pss->page; 2359 2360 return pages; 2361 } 2362 2363 static uint64_t ram_bytes_total_with_ignored(void) 2364 { 2365 RAMBlock *block; 2366 uint64_t total = 0; 2367 2368 RCU_READ_LOCK_GUARD(); 2369 2370 RAMBLOCK_FOREACH_MIGRATABLE(block) { 2371 total += block->used_length; 2372 } 2373 return total; 2374 } 2375 2376 uint64_t ram_bytes_total(void) 2377 { 2378 RAMBlock *block; 2379 uint64_t total = 0; 2380 2381 RCU_READ_LOCK_GUARD(); 2382 2383 RAMBLOCK_FOREACH_NOT_IGNORED(block) { 2384 total += block->used_length; 2385 } 2386 return total; 2387 } 2388 2389 static void xbzrle_load_setup(void) 2390 { 2391 XBZRLE.decoded_buf = g_malloc(TARGET_PAGE_SIZE); 2392 } 2393 2394 static void xbzrle_load_cleanup(void) 2395 { 2396 g_free(XBZRLE.decoded_buf); 2397 XBZRLE.decoded_buf = NULL; 2398 } 2399 2400 static void ram_state_cleanup(RAMState **rsp) 2401 { 2402 if (*rsp) { 2403 migration_page_queue_free(*rsp); 2404 qemu_mutex_destroy(&(*rsp)->bitmap_mutex); 2405 qemu_mutex_destroy(&(*rsp)->src_page_req_mutex); 2406 g_free(*rsp); 2407 *rsp = NULL; 2408 } 2409 } 2410 2411 static void xbzrle_cleanup(void) 2412 { 2413 XBZRLE_cache_lock(); 2414 if (XBZRLE.cache) { 2415 cache_fini(XBZRLE.cache); 2416 g_free(XBZRLE.encoded_buf); 2417 g_free(XBZRLE.current_buf); 2418 g_free(XBZRLE.zero_target_page); 2419 XBZRLE.cache = NULL; 2420 XBZRLE.encoded_buf = NULL; 2421 XBZRLE.current_buf = NULL; 2422 XBZRLE.zero_target_page = NULL; 2423 } 2424 XBZRLE_cache_unlock(); 2425 } 2426 2427 static void ram_save_cleanup(void *opaque) 2428 { 2429 RAMState **rsp = opaque; 2430 RAMBlock *block; 2431 2432 /* We don't use dirty log with background snapshots */ 2433 if (!migrate_background_snapshot()) { 2434 /* caller have hold BQL or is in a bh, so there is 2435 * no writing race against the migration bitmap 2436 */ 2437 if (global_dirty_tracking & GLOBAL_DIRTY_MIGRATION) { 2438 /* 2439 * do not stop dirty log without starting it, since 2440 * memory_global_dirty_log_stop will assert that 2441 * memory_global_dirty_log_start/stop used in pairs 2442 */ 2443 memory_global_dirty_log_stop(GLOBAL_DIRTY_MIGRATION); 2444 } 2445 } 2446 2447 RAMBLOCK_FOREACH_NOT_IGNORED(block) { 2448 g_free(block->clear_bmap); 2449 block->clear_bmap = NULL; 2450 g_free(block->bmap); 2451 block->bmap = NULL; 2452 } 2453 2454 xbzrle_cleanup(); 2455 compress_threads_save_cleanup(); 2456 ram_state_cleanup(rsp); 2457 g_free(migration_ops); 2458 migration_ops = NULL; 2459 } 2460 2461 static void ram_state_reset(RAMState *rs) 2462 { 2463 int i; 2464 2465 for (i = 0; i < RAM_CHANNEL_MAX; i++) { 2466 rs->pss[i].last_sent_block = NULL; 2467 } 2468 2469 rs->last_seen_block = NULL; 2470 rs->last_page = 0; 2471 rs->last_version = ram_list.version; 2472 rs->xbzrle_started = false; 2473 } 2474 2475 #define MAX_WAIT 50 /* ms, half buffered_file limit */ 2476 2477 /* **** functions for postcopy ***** */ 2478 2479 void ram_postcopy_migrated_memory_release(MigrationState *ms) 2480 { 2481 struct RAMBlock *block; 2482 2483 RAMBLOCK_FOREACH_NOT_IGNORED(block) { 2484 unsigned long *bitmap = block->bmap; 2485 unsigned long range = block->used_length >> TARGET_PAGE_BITS; 2486 unsigned long run_start = find_next_zero_bit(bitmap, range, 0); 2487 2488 while (run_start < range) { 2489 unsigned long run_end = find_next_bit(bitmap, range, run_start + 1); 2490 ram_discard_range(block->idstr, 2491 ((ram_addr_t)run_start) << TARGET_PAGE_BITS, 2492 ((ram_addr_t)(run_end - run_start)) 2493 << TARGET_PAGE_BITS); 2494 run_start = find_next_zero_bit(bitmap, range, run_end + 1); 2495 } 2496 } 2497 } 2498 2499 /** 2500 * postcopy_send_discard_bm_ram: discard a RAMBlock 2501 * 2502 * Callback from postcopy_each_ram_send_discard for each RAMBlock 2503 * 2504 * @ms: current migration state 2505 * @block: RAMBlock to discard 2506 */ 2507 static void postcopy_send_discard_bm_ram(MigrationState *ms, RAMBlock *block) 2508 { 2509 unsigned long end = block->used_length >> TARGET_PAGE_BITS; 2510 unsigned long current; 2511 unsigned long *bitmap = block->bmap; 2512 2513 for (current = 0; current < end; ) { 2514 unsigned long one = find_next_bit(bitmap, end, current); 2515 unsigned long zero, discard_length; 2516 2517 if (one >= end) { 2518 break; 2519 } 2520 2521 zero = find_next_zero_bit(bitmap, end, one + 1); 2522 2523 if (zero >= end) { 2524 discard_length = end - one; 2525 } else { 2526 discard_length = zero - one; 2527 } 2528 postcopy_discard_send_range(ms, one, discard_length); 2529 current = one + discard_length; 2530 } 2531 } 2532 2533 static void postcopy_chunk_hostpages_pass(MigrationState *ms, RAMBlock *block); 2534 2535 /** 2536 * postcopy_each_ram_send_discard: discard all RAMBlocks 2537 * 2538 * Utility for the outgoing postcopy code. 2539 * Calls postcopy_send_discard_bm_ram for each RAMBlock 2540 * passing it bitmap indexes and name. 2541 * (qemu_ram_foreach_block ends up passing unscaled lengths 2542 * which would mean postcopy code would have to deal with target page) 2543 * 2544 * @ms: current migration state 2545 */ 2546 static void postcopy_each_ram_send_discard(MigrationState *ms) 2547 { 2548 struct RAMBlock *block; 2549 2550 RAMBLOCK_FOREACH_NOT_IGNORED(block) { 2551 postcopy_discard_send_init(ms, block->idstr); 2552 2553 /* 2554 * Deal with TPS != HPS and huge pages. It discard any partially sent 2555 * host-page size chunks, mark any partially dirty host-page size 2556 * chunks as all dirty. In this case the host-page is the host-page 2557 * for the particular RAMBlock, i.e. it might be a huge page. 2558 */ 2559 postcopy_chunk_hostpages_pass(ms, block); 2560 2561 /* 2562 * Postcopy sends chunks of bitmap over the wire, but it 2563 * just needs indexes at this point, avoids it having 2564 * target page specific code. 2565 */ 2566 postcopy_send_discard_bm_ram(ms, block); 2567 postcopy_discard_send_finish(ms); 2568 } 2569 } 2570 2571 /** 2572 * postcopy_chunk_hostpages_pass: canonicalize bitmap in hostpages 2573 * 2574 * Helper for postcopy_chunk_hostpages; it's called twice to 2575 * canonicalize the two bitmaps, that are similar, but one is 2576 * inverted. 2577 * 2578 * Postcopy requires that all target pages in a hostpage are dirty or 2579 * clean, not a mix. This function canonicalizes the bitmaps. 2580 * 2581 * @ms: current migration state 2582 * @block: block that contains the page we want to canonicalize 2583 */ 2584 static void postcopy_chunk_hostpages_pass(MigrationState *ms, RAMBlock *block) 2585 { 2586 RAMState *rs = ram_state; 2587 unsigned long *bitmap = block->bmap; 2588 unsigned int host_ratio = block->page_size / TARGET_PAGE_SIZE; 2589 unsigned long pages = block->used_length >> TARGET_PAGE_BITS; 2590 unsigned long run_start; 2591 2592 if (block->page_size == TARGET_PAGE_SIZE) { 2593 /* Easy case - TPS==HPS for a non-huge page RAMBlock */ 2594 return; 2595 } 2596 2597 /* Find a dirty page */ 2598 run_start = find_next_bit(bitmap, pages, 0); 2599 2600 while (run_start < pages) { 2601 2602 /* 2603 * If the start of this run of pages is in the middle of a host 2604 * page, then we need to fixup this host page. 2605 */ 2606 if (QEMU_IS_ALIGNED(run_start, host_ratio)) { 2607 /* Find the end of this run */ 2608 run_start = find_next_zero_bit(bitmap, pages, run_start + 1); 2609 /* 2610 * If the end isn't at the start of a host page, then the 2611 * run doesn't finish at the end of a host page 2612 * and we need to discard. 2613 */ 2614 } 2615 2616 if (!QEMU_IS_ALIGNED(run_start, host_ratio)) { 2617 unsigned long page; 2618 unsigned long fixup_start_addr = QEMU_ALIGN_DOWN(run_start, 2619 host_ratio); 2620 run_start = QEMU_ALIGN_UP(run_start, host_ratio); 2621 2622 /* Clean up the bitmap */ 2623 for (page = fixup_start_addr; 2624 page < fixup_start_addr + host_ratio; page++) { 2625 /* 2626 * Remark them as dirty, updating the count for any pages 2627 * that weren't previously dirty. 2628 */ 2629 rs->migration_dirty_pages += !test_and_set_bit(page, bitmap); 2630 } 2631 } 2632 2633 /* Find the next dirty page for the next iteration */ 2634 run_start = find_next_bit(bitmap, pages, run_start); 2635 } 2636 } 2637 2638 /** 2639 * ram_postcopy_send_discard_bitmap: transmit the discard bitmap 2640 * 2641 * Transmit the set of pages to be discarded after precopy to the target 2642 * these are pages that: 2643 * a) Have been previously transmitted but are now dirty again 2644 * b) Pages that have never been transmitted, this ensures that 2645 * any pages on the destination that have been mapped by background 2646 * tasks get discarded (transparent huge pages is the specific concern) 2647 * Hopefully this is pretty sparse 2648 * 2649 * @ms: current migration state 2650 */ 2651 void ram_postcopy_send_discard_bitmap(MigrationState *ms) 2652 { 2653 RAMState *rs = ram_state; 2654 2655 RCU_READ_LOCK_GUARD(); 2656 2657 /* This should be our last sync, the src is now paused */ 2658 migration_bitmap_sync(rs, false); 2659 2660 /* Easiest way to make sure we don't resume in the middle of a host-page */ 2661 rs->pss[RAM_CHANNEL_PRECOPY].last_sent_block = NULL; 2662 rs->last_seen_block = NULL; 2663 rs->last_page = 0; 2664 2665 postcopy_each_ram_send_discard(ms); 2666 2667 trace_ram_postcopy_send_discard_bitmap(); 2668 } 2669 2670 /** 2671 * ram_discard_range: discard dirtied pages at the beginning of postcopy 2672 * 2673 * Returns zero on success 2674 * 2675 * @rbname: name of the RAMBlock of the request. NULL means the 2676 * same that last one. 2677 * @start: RAMBlock starting page 2678 * @length: RAMBlock size 2679 */ 2680 int ram_discard_range(const char *rbname, uint64_t start, size_t length) 2681 { 2682 trace_ram_discard_range(rbname, start, length); 2683 2684 RCU_READ_LOCK_GUARD(); 2685 RAMBlock *rb = qemu_ram_block_by_name(rbname); 2686 2687 if (!rb) { 2688 error_report("ram_discard_range: Failed to find block '%s'", rbname); 2689 return -1; 2690 } 2691 2692 /* 2693 * On source VM, we don't need to update the received bitmap since 2694 * we don't even have one. 2695 */ 2696 if (rb->receivedmap) { 2697 bitmap_clear(rb->receivedmap, start >> qemu_target_page_bits(), 2698 length >> qemu_target_page_bits()); 2699 } 2700 2701 return ram_block_discard_range(rb, start, length); 2702 } 2703 2704 /* 2705 * For every allocation, we will try not to crash the VM if the 2706 * allocation failed. 2707 */ 2708 static int xbzrle_init(void) 2709 { 2710 Error *local_err = NULL; 2711 2712 if (!migrate_xbzrle()) { 2713 return 0; 2714 } 2715 2716 XBZRLE_cache_lock(); 2717 2718 XBZRLE.zero_target_page = g_try_malloc0(TARGET_PAGE_SIZE); 2719 if (!XBZRLE.zero_target_page) { 2720 error_report("%s: Error allocating zero page", __func__); 2721 goto err_out; 2722 } 2723 2724 XBZRLE.cache = cache_init(migrate_xbzrle_cache_size(), 2725 TARGET_PAGE_SIZE, &local_err); 2726 if (!XBZRLE.cache) { 2727 error_report_err(local_err); 2728 goto free_zero_page; 2729 } 2730 2731 XBZRLE.encoded_buf = g_try_malloc0(TARGET_PAGE_SIZE); 2732 if (!XBZRLE.encoded_buf) { 2733 error_report("%s: Error allocating encoded_buf", __func__); 2734 goto free_cache; 2735 } 2736 2737 XBZRLE.current_buf = g_try_malloc(TARGET_PAGE_SIZE); 2738 if (!XBZRLE.current_buf) { 2739 error_report("%s: Error allocating current_buf", __func__); 2740 goto free_encoded_buf; 2741 } 2742 2743 /* We are all good */ 2744 XBZRLE_cache_unlock(); 2745 return 0; 2746 2747 free_encoded_buf: 2748 g_free(XBZRLE.encoded_buf); 2749 XBZRLE.encoded_buf = NULL; 2750 free_cache: 2751 cache_fini(XBZRLE.cache); 2752 XBZRLE.cache = NULL; 2753 free_zero_page: 2754 g_free(XBZRLE.zero_target_page); 2755 XBZRLE.zero_target_page = NULL; 2756 err_out: 2757 XBZRLE_cache_unlock(); 2758 return -ENOMEM; 2759 } 2760 2761 static int ram_state_init(RAMState **rsp) 2762 { 2763 *rsp = g_try_new0(RAMState, 1); 2764 2765 if (!*rsp) { 2766 error_report("%s: Init ramstate fail", __func__); 2767 return -1; 2768 } 2769 2770 qemu_mutex_init(&(*rsp)->bitmap_mutex); 2771 qemu_mutex_init(&(*rsp)->src_page_req_mutex); 2772 QSIMPLEQ_INIT(&(*rsp)->src_page_requests); 2773 (*rsp)->ram_bytes_total = ram_bytes_total(); 2774 2775 /* 2776 * Count the total number of pages used by ram blocks not including any 2777 * gaps due to alignment or unplugs. 2778 * This must match with the initial values of dirty bitmap. 2779 */ 2780 (*rsp)->migration_dirty_pages = (*rsp)->ram_bytes_total >> TARGET_PAGE_BITS; 2781 ram_state_reset(*rsp); 2782 2783 return 0; 2784 } 2785 2786 static void ram_list_init_bitmaps(void) 2787 { 2788 MigrationState *ms = migrate_get_current(); 2789 RAMBlock *block; 2790 unsigned long pages; 2791 uint8_t shift; 2792 2793 /* Skip setting bitmap if there is no RAM */ 2794 if (ram_bytes_total()) { 2795 shift = ms->clear_bitmap_shift; 2796 if (shift > CLEAR_BITMAP_SHIFT_MAX) { 2797 error_report("clear_bitmap_shift (%u) too big, using " 2798 "max value (%u)", shift, CLEAR_BITMAP_SHIFT_MAX); 2799 shift = CLEAR_BITMAP_SHIFT_MAX; 2800 } else if (shift < CLEAR_BITMAP_SHIFT_MIN) { 2801 error_report("clear_bitmap_shift (%u) too small, using " 2802 "min value (%u)", shift, CLEAR_BITMAP_SHIFT_MIN); 2803 shift = CLEAR_BITMAP_SHIFT_MIN; 2804 } 2805 2806 RAMBLOCK_FOREACH_NOT_IGNORED(block) { 2807 pages = block->max_length >> TARGET_PAGE_BITS; 2808 /* 2809 * The initial dirty bitmap for migration must be set with all 2810 * ones to make sure we'll migrate every guest RAM page to 2811 * destination. 2812 * Here we set RAMBlock.bmap all to 1 because when rebegin a 2813 * new migration after a failed migration, ram_list. 2814 * dirty_memory[DIRTY_MEMORY_MIGRATION] don't include the whole 2815 * guest memory. 2816 */ 2817 block->bmap = bitmap_new(pages); 2818 bitmap_set(block->bmap, 0, pages); 2819 if (migrate_mapped_ram()) { 2820 block->file_bmap = bitmap_new(pages); 2821 } 2822 block->clear_bmap_shift = shift; 2823 block->clear_bmap = bitmap_new(clear_bmap_size(pages, shift)); 2824 } 2825 } 2826 } 2827 2828 static void migration_bitmap_clear_discarded_pages(RAMState *rs) 2829 { 2830 unsigned long pages; 2831 RAMBlock *rb; 2832 2833 RCU_READ_LOCK_GUARD(); 2834 2835 RAMBLOCK_FOREACH_NOT_IGNORED(rb) { 2836 pages = ramblock_dirty_bitmap_clear_discarded_pages(rb); 2837 rs->migration_dirty_pages -= pages; 2838 } 2839 } 2840 2841 static void ram_init_bitmaps(RAMState *rs) 2842 { 2843 qemu_mutex_lock_ramlist(); 2844 2845 WITH_RCU_READ_LOCK_GUARD() { 2846 ram_list_init_bitmaps(); 2847 /* We don't use dirty log with background snapshots */ 2848 if (!migrate_background_snapshot()) { 2849 memory_global_dirty_log_start(GLOBAL_DIRTY_MIGRATION); 2850 migration_bitmap_sync_precopy(rs, false); 2851 } 2852 } 2853 qemu_mutex_unlock_ramlist(); 2854 2855 /* 2856 * After an eventual first bitmap sync, fixup the initial bitmap 2857 * containing all 1s to exclude any discarded pages from migration. 2858 */ 2859 migration_bitmap_clear_discarded_pages(rs); 2860 } 2861 2862 static int ram_init_all(RAMState **rsp) 2863 { 2864 if (ram_state_init(rsp)) { 2865 return -1; 2866 } 2867 2868 if (xbzrle_init()) { 2869 ram_state_cleanup(rsp); 2870 return -1; 2871 } 2872 2873 ram_init_bitmaps(*rsp); 2874 2875 return 0; 2876 } 2877 2878 static void ram_state_resume_prepare(RAMState *rs, QEMUFile *out) 2879 { 2880 RAMBlock *block; 2881 uint64_t pages = 0; 2882 2883 /* 2884 * Postcopy is not using xbzrle/compression, so no need for that. 2885 * Also, since source are already halted, we don't need to care 2886 * about dirty page logging as well. 2887 */ 2888 2889 RAMBLOCK_FOREACH_NOT_IGNORED(block) { 2890 pages += bitmap_count_one(block->bmap, 2891 block->used_length >> TARGET_PAGE_BITS); 2892 } 2893 2894 /* This may not be aligned with current bitmaps. Recalculate. */ 2895 rs->migration_dirty_pages = pages; 2896 2897 ram_state_reset(rs); 2898 2899 /* Update RAMState cache of output QEMUFile */ 2900 rs->pss[RAM_CHANNEL_PRECOPY].pss_channel = out; 2901 2902 trace_ram_state_resume_prepare(pages); 2903 } 2904 2905 /* 2906 * This function clears bits of the free pages reported by the caller from the 2907 * migration dirty bitmap. @addr is the host address corresponding to the 2908 * start of the continuous guest free pages, and @len is the total bytes of 2909 * those pages. 2910 */ 2911 void qemu_guest_free_page_hint(void *addr, size_t len) 2912 { 2913 RAMBlock *block; 2914 ram_addr_t offset; 2915 size_t used_len, start, npages; 2916 2917 /* This function is currently expected to be used during live migration */ 2918 if (!migration_is_setup_or_active()) { 2919 return; 2920 } 2921 2922 for (; len > 0; len -= used_len, addr += used_len) { 2923 block = qemu_ram_block_from_host(addr, false, &offset); 2924 if (unlikely(!block || offset >= block->used_length)) { 2925 /* 2926 * The implementation might not support RAMBlock resize during 2927 * live migration, but it could happen in theory with future 2928 * updates. So we add a check here to capture that case. 2929 */ 2930 error_report_once("%s unexpected error", __func__); 2931 return; 2932 } 2933 2934 if (len <= block->used_length - offset) { 2935 used_len = len; 2936 } else { 2937 used_len = block->used_length - offset; 2938 } 2939 2940 start = offset >> TARGET_PAGE_BITS; 2941 npages = used_len >> TARGET_PAGE_BITS; 2942 2943 qemu_mutex_lock(&ram_state->bitmap_mutex); 2944 /* 2945 * The skipped free pages are equavalent to be sent from clear_bmap's 2946 * perspective, so clear the bits from the memory region bitmap which 2947 * are initially set. Otherwise those skipped pages will be sent in 2948 * the next round after syncing from the memory region bitmap. 2949 */ 2950 migration_clear_memory_region_dirty_bitmap_range(block, start, npages); 2951 ram_state->migration_dirty_pages -= 2952 bitmap_count_one_with_offset(block->bmap, start, npages); 2953 bitmap_clear(block->bmap, start, npages); 2954 qemu_mutex_unlock(&ram_state->bitmap_mutex); 2955 } 2956 } 2957 2958 #define MAPPED_RAM_HDR_VERSION 1 2959 struct MappedRamHeader { 2960 uint32_t version; 2961 /* 2962 * The target's page size, so we know how many pages are in the 2963 * bitmap. 2964 */ 2965 uint64_t page_size; 2966 /* 2967 * The offset in the migration file where the pages bitmap is 2968 * stored. 2969 */ 2970 uint64_t bitmap_offset; 2971 /* 2972 * The offset in the migration file where the actual pages (data) 2973 * are stored. 2974 */ 2975 uint64_t pages_offset; 2976 } QEMU_PACKED; 2977 typedef struct MappedRamHeader MappedRamHeader; 2978 2979 static void mapped_ram_setup_ramblock(QEMUFile *file, RAMBlock *block) 2980 { 2981 g_autofree MappedRamHeader *header = NULL; 2982 size_t header_size, bitmap_size; 2983 long num_pages; 2984 2985 header = g_new0(MappedRamHeader, 1); 2986 header_size = sizeof(MappedRamHeader); 2987 2988 num_pages = block->used_length >> TARGET_PAGE_BITS; 2989 bitmap_size = BITS_TO_LONGS(num_pages) * sizeof(unsigned long); 2990 2991 /* 2992 * Save the file offsets of where the bitmap and the pages should 2993 * go as they are written at the end of migration and during the 2994 * iterative phase, respectively. 2995 */ 2996 block->bitmap_offset = qemu_get_offset(file) + header_size; 2997 block->pages_offset = ROUND_UP(block->bitmap_offset + 2998 bitmap_size, 2999 MAPPED_RAM_FILE_OFFSET_ALIGNMENT); 3000 3001 header->version = cpu_to_be32(MAPPED_RAM_HDR_VERSION); 3002 header->page_size = cpu_to_be64(TARGET_PAGE_SIZE); 3003 header->bitmap_offset = cpu_to_be64(block->bitmap_offset); 3004 header->pages_offset = cpu_to_be64(block->pages_offset); 3005 3006 qemu_put_buffer(file, (uint8_t *) header, header_size); 3007 3008 /* prepare offset for next ramblock */ 3009 qemu_set_offset(file, block->pages_offset + block->used_length, SEEK_SET); 3010 } 3011 3012 static bool mapped_ram_read_header(QEMUFile *file, MappedRamHeader *header, 3013 Error **errp) 3014 { 3015 size_t ret, header_size = sizeof(MappedRamHeader); 3016 3017 ret = qemu_get_buffer(file, (uint8_t *)header, header_size); 3018 if (ret != header_size) { 3019 error_setg(errp, "Could not read whole mapped-ram migration header " 3020 "(expected %zd, got %zd bytes)", header_size, ret); 3021 return false; 3022 } 3023 3024 /* migration stream is big-endian */ 3025 header->version = be32_to_cpu(header->version); 3026 3027 if (header->version > MAPPED_RAM_HDR_VERSION) { 3028 error_setg(errp, "Migration mapped-ram capability version not " 3029 "supported (expected <= %d, got %d)", MAPPED_RAM_HDR_VERSION, 3030 header->version); 3031 return false; 3032 } 3033 3034 header->page_size = be64_to_cpu(header->page_size); 3035 header->bitmap_offset = be64_to_cpu(header->bitmap_offset); 3036 header->pages_offset = be64_to_cpu(header->pages_offset); 3037 3038 return true; 3039 } 3040 3041 /* 3042 * Each of ram_save_setup, ram_save_iterate and ram_save_complete has 3043 * long-running RCU critical section. When rcu-reclaims in the code 3044 * start to become numerous it will be necessary to reduce the 3045 * granularity of these critical sections. 3046 */ 3047 3048 /** 3049 * ram_save_setup: Setup RAM for migration 3050 * 3051 * Returns zero to indicate success and negative for error 3052 * 3053 * @f: QEMUFile where to send the data 3054 * @opaque: RAMState pointer 3055 */ 3056 static int ram_save_setup(QEMUFile *f, void *opaque) 3057 { 3058 RAMState **rsp = opaque; 3059 RAMBlock *block; 3060 int ret, max_hg_page_size; 3061 3062 if (compress_threads_save_setup()) { 3063 return -1; 3064 } 3065 3066 /* migration has already setup the bitmap, reuse it. */ 3067 if (!migration_in_colo_state()) { 3068 if (ram_init_all(rsp) != 0) { 3069 compress_threads_save_cleanup(); 3070 return -1; 3071 } 3072 } 3073 (*rsp)->pss[RAM_CHANNEL_PRECOPY].pss_channel = f; 3074 3075 /* 3076 * ??? Mirrors the previous value of qemu_host_page_size, 3077 * but is this really what was intended for the migration? 3078 */ 3079 max_hg_page_size = MAX(qemu_real_host_page_size(), TARGET_PAGE_SIZE); 3080 3081 WITH_RCU_READ_LOCK_GUARD() { 3082 qemu_put_be64(f, ram_bytes_total_with_ignored() 3083 | RAM_SAVE_FLAG_MEM_SIZE); 3084 3085 RAMBLOCK_FOREACH_MIGRATABLE(block) { 3086 qemu_put_byte(f, strlen(block->idstr)); 3087 qemu_put_buffer(f, (uint8_t *)block->idstr, strlen(block->idstr)); 3088 qemu_put_be64(f, block->used_length); 3089 if (migrate_postcopy_ram() && 3090 block->page_size != max_hg_page_size) { 3091 qemu_put_be64(f, block->page_size); 3092 } 3093 if (migrate_ignore_shared()) { 3094 qemu_put_be64(f, block->mr->addr); 3095 } 3096 3097 if (migrate_mapped_ram()) { 3098 mapped_ram_setup_ramblock(f, block); 3099 } 3100 } 3101 } 3102 3103 ret = rdma_registration_start(f, RAM_CONTROL_SETUP); 3104 if (ret < 0) { 3105 qemu_file_set_error(f, ret); 3106 return ret; 3107 } 3108 3109 ret = rdma_registration_stop(f, RAM_CONTROL_SETUP); 3110 if (ret < 0) { 3111 qemu_file_set_error(f, ret); 3112 return ret; 3113 } 3114 3115 migration_ops = g_malloc0(sizeof(MigrationOps)); 3116 migration_ops->ram_save_target_page = ram_save_target_page_legacy; 3117 3118 bql_unlock(); 3119 ret = multifd_send_sync_main(); 3120 bql_lock(); 3121 if (ret < 0) { 3122 return ret; 3123 } 3124 3125 if (migrate_multifd() && !migrate_multifd_flush_after_each_section() 3126 && !migrate_mapped_ram()) { 3127 qemu_put_be64(f, RAM_SAVE_FLAG_MULTIFD_FLUSH); 3128 } 3129 3130 qemu_put_be64(f, RAM_SAVE_FLAG_EOS); 3131 return qemu_fflush(f); 3132 } 3133 3134 static void ram_save_file_bmap(QEMUFile *f) 3135 { 3136 RAMBlock *block; 3137 3138 RAMBLOCK_FOREACH_MIGRATABLE(block) { 3139 long num_pages = block->used_length >> TARGET_PAGE_BITS; 3140 long bitmap_size = BITS_TO_LONGS(num_pages) * sizeof(unsigned long); 3141 3142 qemu_put_buffer_at(f, (uint8_t *)block->file_bmap, bitmap_size, 3143 block->bitmap_offset); 3144 ram_transferred_add(bitmap_size); 3145 3146 /* 3147 * Free the bitmap here to catch any synchronization issues 3148 * with multifd channels. No channels should be sending pages 3149 * after we've written the bitmap to file. 3150 */ 3151 g_free(block->file_bmap); 3152 block->file_bmap = NULL; 3153 } 3154 } 3155 3156 void ramblock_set_file_bmap_atomic(RAMBlock *block, ram_addr_t offset, bool set) 3157 { 3158 if (set) { 3159 set_bit_atomic(offset >> TARGET_PAGE_BITS, block->file_bmap); 3160 } else { 3161 clear_bit_atomic(offset >> TARGET_PAGE_BITS, block->file_bmap); 3162 } 3163 } 3164 3165 /** 3166 * ram_save_iterate: iterative stage for migration 3167 * 3168 * Returns zero to indicate success and negative for error 3169 * 3170 * @f: QEMUFile where to send the data 3171 * @opaque: RAMState pointer 3172 */ 3173 static int ram_save_iterate(QEMUFile *f, void *opaque) 3174 { 3175 RAMState **temp = opaque; 3176 RAMState *rs = *temp; 3177 int ret = 0; 3178 int i; 3179 int64_t t0; 3180 int done = 0; 3181 3182 if (blk_mig_bulk_active()) { 3183 /* Avoid transferring ram during bulk phase of block migration as 3184 * the bulk phase will usually take a long time and transferring 3185 * ram updates during that time is pointless. */ 3186 goto out; 3187 } 3188 3189 /* 3190 * We'll take this lock a little bit long, but it's okay for two reasons. 3191 * Firstly, the only possible other thread to take it is who calls 3192 * qemu_guest_free_page_hint(), which should be rare; secondly, see 3193 * MAX_WAIT (if curious, further see commit 4508bd9ed8053ce) below, which 3194 * guarantees that we'll at least released it in a regular basis. 3195 */ 3196 WITH_QEMU_LOCK_GUARD(&rs->bitmap_mutex) { 3197 WITH_RCU_READ_LOCK_GUARD() { 3198 if (ram_list.version != rs->last_version) { 3199 ram_state_reset(rs); 3200 } 3201 3202 /* Read version before ram_list.blocks */ 3203 smp_rmb(); 3204 3205 ret = rdma_registration_start(f, RAM_CONTROL_ROUND); 3206 if (ret < 0) { 3207 qemu_file_set_error(f, ret); 3208 goto out; 3209 } 3210 3211 t0 = qemu_clock_get_ns(QEMU_CLOCK_REALTIME); 3212 i = 0; 3213 while ((ret = migration_rate_exceeded(f)) == 0 || 3214 postcopy_has_request(rs)) { 3215 int pages; 3216 3217 if (qemu_file_get_error(f)) { 3218 break; 3219 } 3220 3221 pages = ram_find_and_save_block(rs); 3222 /* no more pages to sent */ 3223 if (pages == 0) { 3224 done = 1; 3225 break; 3226 } 3227 3228 if (pages < 0) { 3229 qemu_file_set_error(f, pages); 3230 break; 3231 } 3232 3233 rs->target_page_count += pages; 3234 3235 /* 3236 * During postcopy, it is necessary to make sure one whole host 3237 * page is sent in one chunk. 3238 */ 3239 if (migrate_postcopy_ram()) { 3240 compress_flush_data(); 3241 } 3242 3243 /* 3244 * we want to check in the 1st loop, just in case it was the 1st 3245 * time and we had to sync the dirty bitmap. 3246 * qemu_clock_get_ns() is a bit expensive, so we only check each 3247 * some iterations 3248 */ 3249 if ((i & 63) == 0) { 3250 uint64_t t1 = (qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - t0) / 3251 1000000; 3252 if (t1 > MAX_WAIT) { 3253 trace_ram_save_iterate_big_wait(t1, i); 3254 break; 3255 } 3256 } 3257 i++; 3258 } 3259 } 3260 } 3261 3262 /* 3263 * Must occur before EOS (or any QEMUFile operation) 3264 * because of RDMA protocol. 3265 */ 3266 ret = rdma_registration_stop(f, RAM_CONTROL_ROUND); 3267 if (ret < 0) { 3268 qemu_file_set_error(f, ret); 3269 } 3270 3271 out: 3272 if (ret >= 0 3273 && migration_is_setup_or_active()) { 3274 if (migrate_multifd() && migrate_multifd_flush_after_each_section() && 3275 !migrate_mapped_ram()) { 3276 ret = multifd_send_sync_main(); 3277 if (ret < 0) { 3278 return ret; 3279 } 3280 } 3281 3282 qemu_put_be64(f, RAM_SAVE_FLAG_EOS); 3283 ram_transferred_add(8); 3284 ret = qemu_fflush(f); 3285 } 3286 if (ret < 0) { 3287 return ret; 3288 } 3289 3290 return done; 3291 } 3292 3293 /** 3294 * ram_save_complete: function called to send the remaining amount of ram 3295 * 3296 * Returns zero to indicate success or negative on error 3297 * 3298 * Called with the BQL 3299 * 3300 * @f: QEMUFile where to send the data 3301 * @opaque: RAMState pointer 3302 */ 3303 static int ram_save_complete(QEMUFile *f, void *opaque) 3304 { 3305 RAMState **temp = opaque; 3306 RAMState *rs = *temp; 3307 int ret = 0; 3308 3309 rs->last_stage = !migration_in_colo_state(); 3310 3311 WITH_RCU_READ_LOCK_GUARD() { 3312 if (!migration_in_postcopy()) { 3313 migration_bitmap_sync_precopy(rs, true); 3314 } 3315 3316 ret = rdma_registration_start(f, RAM_CONTROL_FINISH); 3317 if (ret < 0) { 3318 qemu_file_set_error(f, ret); 3319 return ret; 3320 } 3321 3322 /* try transferring iterative blocks of memory */ 3323 3324 /* flush all remaining blocks regardless of rate limiting */ 3325 qemu_mutex_lock(&rs->bitmap_mutex); 3326 while (true) { 3327 int pages; 3328 3329 pages = ram_find_and_save_block(rs); 3330 /* no more blocks to sent */ 3331 if (pages == 0) { 3332 break; 3333 } 3334 if (pages < 0) { 3335 qemu_mutex_unlock(&rs->bitmap_mutex); 3336 return pages; 3337 } 3338 } 3339 qemu_mutex_unlock(&rs->bitmap_mutex); 3340 3341 compress_flush_data(); 3342 3343 ret = rdma_registration_stop(f, RAM_CONTROL_FINISH); 3344 if (ret < 0) { 3345 qemu_file_set_error(f, ret); 3346 return ret; 3347 } 3348 } 3349 3350 ret = multifd_send_sync_main(); 3351 if (ret < 0) { 3352 return ret; 3353 } 3354 3355 if (migrate_mapped_ram()) { 3356 ram_save_file_bmap(f); 3357 3358 if (qemu_file_get_error(f)) { 3359 Error *local_err = NULL; 3360 int err = qemu_file_get_error_obj(f, &local_err); 3361 3362 error_reportf_err(local_err, "Failed to write bitmap to file: "); 3363 return -err; 3364 } 3365 } 3366 3367 if (migrate_multifd() && !migrate_multifd_flush_after_each_section() && 3368 !migrate_mapped_ram()) { 3369 qemu_put_be64(f, RAM_SAVE_FLAG_MULTIFD_FLUSH); 3370 } 3371 qemu_put_be64(f, RAM_SAVE_FLAG_EOS); 3372 return qemu_fflush(f); 3373 } 3374 3375 static void ram_state_pending_estimate(void *opaque, uint64_t *must_precopy, 3376 uint64_t *can_postcopy) 3377 { 3378 RAMState **temp = opaque; 3379 RAMState *rs = *temp; 3380 3381 uint64_t remaining_size = rs->migration_dirty_pages * TARGET_PAGE_SIZE; 3382 3383 if (migrate_postcopy_ram()) { 3384 /* We can do postcopy, and all the data is postcopiable */ 3385 *can_postcopy += remaining_size; 3386 } else { 3387 *must_precopy += remaining_size; 3388 } 3389 } 3390 3391 static void ram_state_pending_exact(void *opaque, uint64_t *must_precopy, 3392 uint64_t *can_postcopy) 3393 { 3394 RAMState **temp = opaque; 3395 RAMState *rs = *temp; 3396 uint64_t remaining_size; 3397 3398 if (!migration_in_postcopy()) { 3399 bql_lock(); 3400 WITH_RCU_READ_LOCK_GUARD() { 3401 migration_bitmap_sync_precopy(rs, false); 3402 } 3403 bql_unlock(); 3404 } 3405 3406 remaining_size = rs->migration_dirty_pages * TARGET_PAGE_SIZE; 3407 3408 if (migrate_postcopy_ram()) { 3409 /* We can do postcopy, and all the data is postcopiable */ 3410 *can_postcopy += remaining_size; 3411 } else { 3412 *must_precopy += remaining_size; 3413 } 3414 } 3415 3416 static int load_xbzrle(QEMUFile *f, ram_addr_t addr, void *host) 3417 { 3418 unsigned int xh_len; 3419 int xh_flags; 3420 uint8_t *loaded_data; 3421 3422 /* extract RLE header */ 3423 xh_flags = qemu_get_byte(f); 3424 xh_len = qemu_get_be16(f); 3425 3426 if (xh_flags != ENCODING_FLAG_XBZRLE) { 3427 error_report("Failed to load XBZRLE page - wrong compression!"); 3428 return -1; 3429 } 3430 3431 if (xh_len > TARGET_PAGE_SIZE) { 3432 error_report("Failed to load XBZRLE page - len overflow!"); 3433 return -1; 3434 } 3435 loaded_data = XBZRLE.decoded_buf; 3436 /* load data and decode */ 3437 /* it can change loaded_data to point to an internal buffer */ 3438 qemu_get_buffer_in_place(f, &loaded_data, xh_len); 3439 3440 /* decode RLE */ 3441 if (xbzrle_decode_buffer(loaded_data, xh_len, host, 3442 TARGET_PAGE_SIZE) == -1) { 3443 error_report("Failed to load XBZRLE page - decode error!"); 3444 return -1; 3445 } 3446 3447 return 0; 3448 } 3449 3450 /** 3451 * ram_block_from_stream: read a RAMBlock id from the migration stream 3452 * 3453 * Must be called from within a rcu critical section. 3454 * 3455 * Returns a pointer from within the RCU-protected ram_list. 3456 * 3457 * @mis: the migration incoming state pointer 3458 * @f: QEMUFile where to read the data from 3459 * @flags: Page flags (mostly to see if it's a continuation of previous block) 3460 * @channel: the channel we're using 3461 */ 3462 static inline RAMBlock *ram_block_from_stream(MigrationIncomingState *mis, 3463 QEMUFile *f, int flags, 3464 int channel) 3465 { 3466 RAMBlock *block = mis->last_recv_block[channel]; 3467 char id[256]; 3468 uint8_t len; 3469 3470 if (flags & RAM_SAVE_FLAG_CONTINUE) { 3471 if (!block) { 3472 error_report("Ack, bad migration stream!"); 3473 return NULL; 3474 } 3475 return block; 3476 } 3477 3478 len = qemu_get_byte(f); 3479 qemu_get_buffer(f, (uint8_t *)id, len); 3480 id[len] = 0; 3481 3482 block = qemu_ram_block_by_name(id); 3483 if (!block) { 3484 error_report("Can't find block %s", id); 3485 return NULL; 3486 } 3487 3488 if (migrate_ram_is_ignored(block)) { 3489 error_report("block %s should not be migrated !", id); 3490 return NULL; 3491 } 3492 3493 mis->last_recv_block[channel] = block; 3494 3495 return block; 3496 } 3497 3498 static inline void *host_from_ram_block_offset(RAMBlock *block, 3499 ram_addr_t offset) 3500 { 3501 if (!offset_in_ramblock(block, offset)) { 3502 return NULL; 3503 } 3504 3505 return block->host + offset; 3506 } 3507 3508 static void *host_page_from_ram_block_offset(RAMBlock *block, 3509 ram_addr_t offset) 3510 { 3511 /* Note: Explicitly no check against offset_in_ramblock(). */ 3512 return (void *)QEMU_ALIGN_DOWN((uintptr_t)(block->host + offset), 3513 block->page_size); 3514 } 3515 3516 static ram_addr_t host_page_offset_from_ram_block_offset(RAMBlock *block, 3517 ram_addr_t offset) 3518 { 3519 return ((uintptr_t)block->host + offset) & (block->page_size - 1); 3520 } 3521 3522 void colo_record_bitmap(RAMBlock *block, ram_addr_t *normal, uint32_t pages) 3523 { 3524 qemu_mutex_lock(&ram_state->bitmap_mutex); 3525 for (int i = 0; i < pages; i++) { 3526 ram_addr_t offset = normal[i]; 3527 ram_state->migration_dirty_pages += !test_and_set_bit( 3528 offset >> TARGET_PAGE_BITS, 3529 block->bmap); 3530 } 3531 qemu_mutex_unlock(&ram_state->bitmap_mutex); 3532 } 3533 3534 static inline void *colo_cache_from_block_offset(RAMBlock *block, 3535 ram_addr_t offset, bool record_bitmap) 3536 { 3537 if (!offset_in_ramblock(block, offset)) { 3538 return NULL; 3539 } 3540 if (!block->colo_cache) { 3541 error_report("%s: colo_cache is NULL in block :%s", 3542 __func__, block->idstr); 3543 return NULL; 3544 } 3545 3546 /* 3547 * During colo checkpoint, we need bitmap of these migrated pages. 3548 * It help us to decide which pages in ram cache should be flushed 3549 * into VM's RAM later. 3550 */ 3551 if (record_bitmap) { 3552 colo_record_bitmap(block, &offset, 1); 3553 } 3554 return block->colo_cache + offset; 3555 } 3556 3557 /** 3558 * ram_handle_zero: handle the zero page case 3559 * 3560 * If a page (or a whole RDMA chunk) has been 3561 * determined to be zero, then zap it. 3562 * 3563 * @host: host address for the zero page 3564 * @ch: what the page is filled from. We only support zero 3565 * @size: size of the zero page 3566 */ 3567 void ram_handle_zero(void *host, uint64_t size) 3568 { 3569 if (!buffer_is_zero(host, size)) { 3570 memset(host, 0, size); 3571 } 3572 } 3573 3574 static void colo_init_ram_state(void) 3575 { 3576 ram_state_init(&ram_state); 3577 } 3578 3579 /* 3580 * colo cache: this is for secondary VM, we cache the whole 3581 * memory of the secondary VM, it is need to hold the global lock 3582 * to call this helper. 3583 */ 3584 int colo_init_ram_cache(void) 3585 { 3586 RAMBlock *block; 3587 3588 WITH_RCU_READ_LOCK_GUARD() { 3589 RAMBLOCK_FOREACH_NOT_IGNORED(block) { 3590 block->colo_cache = qemu_anon_ram_alloc(block->used_length, 3591 NULL, false, false); 3592 if (!block->colo_cache) { 3593 error_report("%s: Can't alloc memory for COLO cache of block %s," 3594 "size 0x" RAM_ADDR_FMT, __func__, block->idstr, 3595 block->used_length); 3596 RAMBLOCK_FOREACH_NOT_IGNORED(block) { 3597 if (block->colo_cache) { 3598 qemu_anon_ram_free(block->colo_cache, block->used_length); 3599 block->colo_cache = NULL; 3600 } 3601 } 3602 return -errno; 3603 } 3604 if (!machine_dump_guest_core(current_machine)) { 3605 qemu_madvise(block->colo_cache, block->used_length, 3606 QEMU_MADV_DONTDUMP); 3607 } 3608 } 3609 } 3610 3611 /* 3612 * Record the dirty pages that sent by PVM, we use this dirty bitmap together 3613 * with to decide which page in cache should be flushed into SVM's RAM. Here 3614 * we use the same name 'ram_bitmap' as for migration. 3615 */ 3616 if (ram_bytes_total()) { 3617 RAMBLOCK_FOREACH_NOT_IGNORED(block) { 3618 unsigned long pages = block->max_length >> TARGET_PAGE_BITS; 3619 block->bmap = bitmap_new(pages); 3620 } 3621 } 3622 3623 colo_init_ram_state(); 3624 return 0; 3625 } 3626 3627 /* TODO: duplicated with ram_init_bitmaps */ 3628 void colo_incoming_start_dirty_log(void) 3629 { 3630 RAMBlock *block = NULL; 3631 /* For memory_global_dirty_log_start below. */ 3632 bql_lock(); 3633 qemu_mutex_lock_ramlist(); 3634 3635 memory_global_dirty_log_sync(false); 3636 WITH_RCU_READ_LOCK_GUARD() { 3637 RAMBLOCK_FOREACH_NOT_IGNORED(block) { 3638 ramblock_sync_dirty_bitmap(ram_state, block); 3639 /* Discard this dirty bitmap record */ 3640 bitmap_zero(block->bmap, block->max_length >> TARGET_PAGE_BITS); 3641 } 3642 memory_global_dirty_log_start(GLOBAL_DIRTY_MIGRATION); 3643 } 3644 ram_state->migration_dirty_pages = 0; 3645 qemu_mutex_unlock_ramlist(); 3646 bql_unlock(); 3647 } 3648 3649 /* It is need to hold the global lock to call this helper */ 3650 void colo_release_ram_cache(void) 3651 { 3652 RAMBlock *block; 3653 3654 memory_global_dirty_log_stop(GLOBAL_DIRTY_MIGRATION); 3655 RAMBLOCK_FOREACH_NOT_IGNORED(block) { 3656 g_free(block->bmap); 3657 block->bmap = NULL; 3658 } 3659 3660 WITH_RCU_READ_LOCK_GUARD() { 3661 RAMBLOCK_FOREACH_NOT_IGNORED(block) { 3662 if (block->colo_cache) { 3663 qemu_anon_ram_free(block->colo_cache, block->used_length); 3664 block->colo_cache = NULL; 3665 } 3666 } 3667 } 3668 ram_state_cleanup(&ram_state); 3669 } 3670 3671 /** 3672 * ram_load_setup: Setup RAM for migration incoming side 3673 * 3674 * Returns zero to indicate success and negative for error 3675 * 3676 * @f: QEMUFile where to receive the data 3677 * @opaque: RAMState pointer 3678 */ 3679 static int ram_load_setup(QEMUFile *f, void *opaque) 3680 { 3681 xbzrle_load_setup(); 3682 ramblock_recv_map_init(); 3683 3684 return 0; 3685 } 3686 3687 static int ram_load_cleanup(void *opaque) 3688 { 3689 RAMBlock *rb; 3690 3691 RAMBLOCK_FOREACH_NOT_IGNORED(rb) { 3692 qemu_ram_block_writeback(rb); 3693 } 3694 3695 xbzrle_load_cleanup(); 3696 3697 RAMBLOCK_FOREACH_NOT_IGNORED(rb) { 3698 g_free(rb->receivedmap); 3699 rb->receivedmap = NULL; 3700 } 3701 3702 return 0; 3703 } 3704 3705 /** 3706 * ram_postcopy_incoming_init: allocate postcopy data structures 3707 * 3708 * Returns 0 for success and negative if there was one error 3709 * 3710 * @mis: current migration incoming state 3711 * 3712 * Allocate data structures etc needed by incoming migration with 3713 * postcopy-ram. postcopy-ram's similarly names 3714 * postcopy_ram_incoming_init does the work. 3715 */ 3716 int ram_postcopy_incoming_init(MigrationIncomingState *mis) 3717 { 3718 return postcopy_ram_incoming_init(mis); 3719 } 3720 3721 /** 3722 * ram_load_postcopy: load a page in postcopy case 3723 * 3724 * Returns 0 for success or -errno in case of error 3725 * 3726 * Called in postcopy mode by ram_load(). 3727 * rcu_read_lock is taken prior to this being called. 3728 * 3729 * @f: QEMUFile where to send the data 3730 * @channel: the channel to use for loading 3731 */ 3732 int ram_load_postcopy(QEMUFile *f, int channel) 3733 { 3734 int flags = 0, ret = 0; 3735 bool place_needed = false; 3736 bool matches_target_page_size = false; 3737 MigrationIncomingState *mis = migration_incoming_get_current(); 3738 PostcopyTmpPage *tmp_page = &mis->postcopy_tmp_pages[channel]; 3739 3740 while (!ret && !(flags & RAM_SAVE_FLAG_EOS)) { 3741 ram_addr_t addr; 3742 void *page_buffer = NULL; 3743 void *place_source = NULL; 3744 RAMBlock *block = NULL; 3745 uint8_t ch; 3746 int len; 3747 3748 addr = qemu_get_be64(f); 3749 3750 /* 3751 * If qemu file error, we should stop here, and then "addr" 3752 * may be invalid 3753 */ 3754 ret = qemu_file_get_error(f); 3755 if (ret) { 3756 break; 3757 } 3758 3759 flags = addr & ~TARGET_PAGE_MASK; 3760 addr &= TARGET_PAGE_MASK; 3761 3762 trace_ram_load_postcopy_loop(channel, (uint64_t)addr, flags); 3763 if (flags & (RAM_SAVE_FLAG_ZERO | RAM_SAVE_FLAG_PAGE | 3764 RAM_SAVE_FLAG_COMPRESS_PAGE)) { 3765 block = ram_block_from_stream(mis, f, flags, channel); 3766 if (!block) { 3767 ret = -EINVAL; 3768 break; 3769 } 3770 3771 /* 3772 * Relying on used_length is racy and can result in false positives. 3773 * We might place pages beyond used_length in case RAM was shrunk 3774 * while in postcopy, which is fine - trying to place via 3775 * UFFDIO_COPY/UFFDIO_ZEROPAGE will never segfault. 3776 */ 3777 if (!block->host || addr >= block->postcopy_length) { 3778 error_report("Illegal RAM offset " RAM_ADDR_FMT, addr); 3779 ret = -EINVAL; 3780 break; 3781 } 3782 tmp_page->target_pages++; 3783 matches_target_page_size = block->page_size == TARGET_PAGE_SIZE; 3784 /* 3785 * Postcopy requires that we place whole host pages atomically; 3786 * these may be huge pages for RAMBlocks that are backed by 3787 * hugetlbfs. 3788 * To make it atomic, the data is read into a temporary page 3789 * that's moved into place later. 3790 * The migration protocol uses, possibly smaller, target-pages 3791 * however the source ensures it always sends all the components 3792 * of a host page in one chunk. 3793 */ 3794 page_buffer = tmp_page->tmp_huge_page + 3795 host_page_offset_from_ram_block_offset(block, addr); 3796 /* If all TP are zero then we can optimise the place */ 3797 if (tmp_page->target_pages == 1) { 3798 tmp_page->host_addr = 3799 host_page_from_ram_block_offset(block, addr); 3800 } else if (tmp_page->host_addr != 3801 host_page_from_ram_block_offset(block, addr)) { 3802 /* not the 1st TP within the HP */ 3803 error_report("Non-same host page detected on channel %d: " 3804 "Target host page %p, received host page %p " 3805 "(rb %s offset 0x"RAM_ADDR_FMT" target_pages %d)", 3806 channel, tmp_page->host_addr, 3807 host_page_from_ram_block_offset(block, addr), 3808 block->idstr, addr, tmp_page->target_pages); 3809 ret = -EINVAL; 3810 break; 3811 } 3812 3813 /* 3814 * If it's the last part of a host page then we place the host 3815 * page 3816 */ 3817 if (tmp_page->target_pages == 3818 (block->page_size / TARGET_PAGE_SIZE)) { 3819 place_needed = true; 3820 } 3821 place_source = tmp_page->tmp_huge_page; 3822 } 3823 3824 switch (flags & ~RAM_SAVE_FLAG_CONTINUE) { 3825 case RAM_SAVE_FLAG_ZERO: 3826 ch = qemu_get_byte(f); 3827 if (ch != 0) { 3828 error_report("Found a zero page with value %d", ch); 3829 ret = -EINVAL; 3830 break; 3831 } 3832 /* 3833 * Can skip to set page_buffer when 3834 * this is a zero page and (block->page_size == TARGET_PAGE_SIZE). 3835 */ 3836 if (!matches_target_page_size) { 3837 memset(page_buffer, ch, TARGET_PAGE_SIZE); 3838 } 3839 break; 3840 3841 case RAM_SAVE_FLAG_PAGE: 3842 tmp_page->all_zero = false; 3843 if (!matches_target_page_size) { 3844 /* For huge pages, we always use temporary buffer */ 3845 qemu_get_buffer(f, page_buffer, TARGET_PAGE_SIZE); 3846 } else { 3847 /* 3848 * For small pages that matches target page size, we 3849 * avoid the qemu_file copy. Instead we directly use 3850 * the buffer of QEMUFile to place the page. Note: we 3851 * cannot do any QEMUFile operation before using that 3852 * buffer to make sure the buffer is valid when 3853 * placing the page. 3854 */ 3855 qemu_get_buffer_in_place(f, (uint8_t **)&place_source, 3856 TARGET_PAGE_SIZE); 3857 } 3858 break; 3859 case RAM_SAVE_FLAG_COMPRESS_PAGE: 3860 tmp_page->all_zero = false; 3861 len = qemu_get_be32(f); 3862 if (len < 0 || len > compressBound(TARGET_PAGE_SIZE)) { 3863 error_report("Invalid compressed data length: %d", len); 3864 ret = -EINVAL; 3865 break; 3866 } 3867 decompress_data_with_multi_threads(f, page_buffer, len); 3868 break; 3869 case RAM_SAVE_FLAG_MULTIFD_FLUSH: 3870 multifd_recv_sync_main(); 3871 break; 3872 case RAM_SAVE_FLAG_EOS: 3873 /* normal exit */ 3874 if (migrate_multifd() && 3875 migrate_multifd_flush_after_each_section()) { 3876 multifd_recv_sync_main(); 3877 } 3878 break; 3879 default: 3880 error_report("Unknown combination of migration flags: 0x%x" 3881 " (postcopy mode)", flags); 3882 ret = -EINVAL; 3883 break; 3884 } 3885 3886 /* Got the whole host page, wait for decompress before placing. */ 3887 if (place_needed) { 3888 ret |= wait_for_decompress_done(); 3889 } 3890 3891 /* Detect for any possible file errors */ 3892 if (!ret && qemu_file_get_error(f)) { 3893 ret = qemu_file_get_error(f); 3894 } 3895 3896 if (!ret && place_needed) { 3897 if (tmp_page->all_zero) { 3898 ret = postcopy_place_page_zero(mis, tmp_page->host_addr, block); 3899 } else { 3900 ret = postcopy_place_page(mis, tmp_page->host_addr, 3901 place_source, block); 3902 } 3903 place_needed = false; 3904 postcopy_temp_page_reset(tmp_page); 3905 } 3906 } 3907 3908 return ret; 3909 } 3910 3911 static bool postcopy_is_running(void) 3912 { 3913 PostcopyState ps = postcopy_state_get(); 3914 return ps >= POSTCOPY_INCOMING_LISTENING && ps < POSTCOPY_INCOMING_END; 3915 } 3916 3917 /* 3918 * Flush content of RAM cache into SVM's memory. 3919 * Only flush the pages that be dirtied by PVM or SVM or both. 3920 */ 3921 void colo_flush_ram_cache(void) 3922 { 3923 RAMBlock *block = NULL; 3924 void *dst_host; 3925 void *src_host; 3926 unsigned long offset = 0; 3927 3928 memory_global_dirty_log_sync(false); 3929 qemu_mutex_lock(&ram_state->bitmap_mutex); 3930 WITH_RCU_READ_LOCK_GUARD() { 3931 RAMBLOCK_FOREACH_NOT_IGNORED(block) { 3932 ramblock_sync_dirty_bitmap(ram_state, block); 3933 } 3934 } 3935 3936 trace_colo_flush_ram_cache_begin(ram_state->migration_dirty_pages); 3937 WITH_RCU_READ_LOCK_GUARD() { 3938 block = QLIST_FIRST_RCU(&ram_list.blocks); 3939 3940 while (block) { 3941 unsigned long num = 0; 3942 3943 offset = colo_bitmap_find_dirty(ram_state, block, offset, &num); 3944 if (!offset_in_ramblock(block, 3945 ((ram_addr_t)offset) << TARGET_PAGE_BITS)) { 3946 offset = 0; 3947 num = 0; 3948 block = QLIST_NEXT_RCU(block, next); 3949 } else { 3950 unsigned long i = 0; 3951 3952 for (i = 0; i < num; i++) { 3953 migration_bitmap_clear_dirty(ram_state, block, offset + i); 3954 } 3955 dst_host = block->host 3956 + (((ram_addr_t)offset) << TARGET_PAGE_BITS); 3957 src_host = block->colo_cache 3958 + (((ram_addr_t)offset) << TARGET_PAGE_BITS); 3959 memcpy(dst_host, src_host, TARGET_PAGE_SIZE * num); 3960 offset += num; 3961 } 3962 } 3963 } 3964 qemu_mutex_unlock(&ram_state->bitmap_mutex); 3965 trace_colo_flush_ram_cache_end(); 3966 } 3967 3968 static size_t ram_load_multifd_pages(void *host_addr, size_t size, 3969 uint64_t offset) 3970 { 3971 MultiFDRecvData *data = multifd_get_recv_data(); 3972 3973 data->opaque = host_addr; 3974 data->file_offset = offset; 3975 data->size = size; 3976 3977 if (!multifd_recv()) { 3978 return 0; 3979 } 3980 3981 return size; 3982 } 3983 3984 static bool read_ramblock_mapped_ram(QEMUFile *f, RAMBlock *block, 3985 long num_pages, unsigned long *bitmap, 3986 Error **errp) 3987 { 3988 ERRP_GUARD(); 3989 unsigned long set_bit_idx, clear_bit_idx; 3990 ram_addr_t offset; 3991 void *host; 3992 size_t read, unread, size; 3993 3994 for (set_bit_idx = find_first_bit(bitmap, num_pages); 3995 set_bit_idx < num_pages; 3996 set_bit_idx = find_next_bit(bitmap, num_pages, clear_bit_idx + 1)) { 3997 3998 clear_bit_idx = find_next_zero_bit(bitmap, num_pages, set_bit_idx + 1); 3999 4000 unread = TARGET_PAGE_SIZE * (clear_bit_idx - set_bit_idx); 4001 offset = set_bit_idx << TARGET_PAGE_BITS; 4002 4003 while (unread > 0) { 4004 host = host_from_ram_block_offset(block, offset); 4005 if (!host) { 4006 error_setg(errp, "page outside of ramblock %s range", 4007 block->idstr); 4008 return false; 4009 } 4010 4011 size = MIN(unread, MAPPED_RAM_LOAD_BUF_SIZE); 4012 4013 if (migrate_multifd()) { 4014 read = ram_load_multifd_pages(host, size, 4015 block->pages_offset + offset); 4016 } else { 4017 read = qemu_get_buffer_at(f, host, size, 4018 block->pages_offset + offset); 4019 } 4020 4021 if (!read) { 4022 goto err; 4023 } 4024 offset += read; 4025 unread -= read; 4026 } 4027 } 4028 4029 return true; 4030 4031 err: 4032 qemu_file_get_error_obj(f, errp); 4033 error_prepend(errp, "(%s) failed to read page " RAM_ADDR_FMT 4034 "from file offset %" PRIx64 ": ", block->idstr, offset, 4035 block->pages_offset + offset); 4036 return false; 4037 } 4038 4039 static void parse_ramblock_mapped_ram(QEMUFile *f, RAMBlock *block, 4040 ram_addr_t length, Error **errp) 4041 { 4042 g_autofree unsigned long *bitmap = NULL; 4043 MappedRamHeader header; 4044 size_t bitmap_size; 4045 long num_pages; 4046 4047 if (!mapped_ram_read_header(f, &header, errp)) { 4048 return; 4049 } 4050 4051 block->pages_offset = header.pages_offset; 4052 4053 /* 4054 * Check the alignment of the file region that contains pages. We 4055 * don't enforce MAPPED_RAM_FILE_OFFSET_ALIGNMENT to allow that 4056 * value to change in the future. Do only a sanity check with page 4057 * size alignment. 4058 */ 4059 if (!QEMU_IS_ALIGNED(block->pages_offset, TARGET_PAGE_SIZE)) { 4060 error_setg(errp, 4061 "Error reading ramblock %s pages, region has bad alignment", 4062 block->idstr); 4063 return; 4064 } 4065 4066 num_pages = length / header.page_size; 4067 bitmap_size = BITS_TO_LONGS(num_pages) * sizeof(unsigned long); 4068 4069 bitmap = g_malloc0(bitmap_size); 4070 if (qemu_get_buffer_at(f, (uint8_t *)bitmap, bitmap_size, 4071 header.bitmap_offset) != bitmap_size) { 4072 error_setg(errp, "Error reading dirty bitmap"); 4073 return; 4074 } 4075 4076 if (!read_ramblock_mapped_ram(f, block, num_pages, bitmap, errp)) { 4077 return; 4078 } 4079 4080 /* Skip pages array */ 4081 qemu_set_offset(f, block->pages_offset + length, SEEK_SET); 4082 4083 return; 4084 } 4085 4086 static int parse_ramblock(QEMUFile *f, RAMBlock *block, ram_addr_t length) 4087 { 4088 int ret = 0; 4089 /* ADVISE is earlier, it shows the source has the postcopy capability on */ 4090 bool postcopy_advised = migration_incoming_postcopy_advised(); 4091 int max_hg_page_size; 4092 Error *local_err = NULL; 4093 4094 assert(block); 4095 4096 if (migrate_mapped_ram()) { 4097 parse_ramblock_mapped_ram(f, block, length, &local_err); 4098 if (local_err) { 4099 error_report_err(local_err); 4100 return -EINVAL; 4101 } 4102 return 0; 4103 } 4104 4105 if (!qemu_ram_is_migratable(block)) { 4106 error_report("block %s should not be migrated !", block->idstr); 4107 return -EINVAL; 4108 } 4109 4110 if (length != block->used_length) { 4111 ret = qemu_ram_resize(block, length, &local_err); 4112 if (local_err) { 4113 error_report_err(local_err); 4114 return ret; 4115 } 4116 } 4117 4118 /* 4119 * ??? Mirrors the previous value of qemu_host_page_size, 4120 * but is this really what was intended for the migration? 4121 */ 4122 max_hg_page_size = MAX(qemu_real_host_page_size(), TARGET_PAGE_SIZE); 4123 4124 /* For postcopy we need to check hugepage sizes match */ 4125 if (postcopy_advised && migrate_postcopy_ram() && 4126 block->page_size != max_hg_page_size) { 4127 uint64_t remote_page_size = qemu_get_be64(f); 4128 if (remote_page_size != block->page_size) { 4129 error_report("Mismatched RAM page size %s " 4130 "(local) %zd != %" PRId64, block->idstr, 4131 block->page_size, remote_page_size); 4132 return -EINVAL; 4133 } 4134 } 4135 if (migrate_ignore_shared()) { 4136 hwaddr addr = qemu_get_be64(f); 4137 if (migrate_ram_is_ignored(block) && 4138 block->mr->addr != addr) { 4139 error_report("Mismatched GPAs for block %s " 4140 "%" PRId64 "!= %" PRId64, block->idstr, 4141 (uint64_t)addr, (uint64_t)block->mr->addr); 4142 return -EINVAL; 4143 } 4144 } 4145 ret = rdma_block_notification_handle(f, block->idstr); 4146 if (ret < 0) { 4147 qemu_file_set_error(f, ret); 4148 } 4149 4150 return ret; 4151 } 4152 4153 static int parse_ramblocks(QEMUFile *f, ram_addr_t total_ram_bytes) 4154 { 4155 int ret = 0; 4156 4157 /* Synchronize RAM block list */ 4158 while (!ret && total_ram_bytes) { 4159 RAMBlock *block; 4160 char id[256]; 4161 ram_addr_t length; 4162 int len = qemu_get_byte(f); 4163 4164 qemu_get_buffer(f, (uint8_t *)id, len); 4165 id[len] = 0; 4166 length = qemu_get_be64(f); 4167 4168 block = qemu_ram_block_by_name(id); 4169 if (block) { 4170 ret = parse_ramblock(f, block, length); 4171 } else { 4172 error_report("Unknown ramblock \"%s\", cannot accept " 4173 "migration", id); 4174 ret = -EINVAL; 4175 } 4176 total_ram_bytes -= length; 4177 } 4178 4179 return ret; 4180 } 4181 4182 /** 4183 * ram_load_precopy: load pages in precopy case 4184 * 4185 * Returns 0 for success or -errno in case of error 4186 * 4187 * Called in precopy mode by ram_load(). 4188 * rcu_read_lock is taken prior to this being called. 4189 * 4190 * @f: QEMUFile where to send the data 4191 */ 4192 static int ram_load_precopy(QEMUFile *f) 4193 { 4194 MigrationIncomingState *mis = migration_incoming_get_current(); 4195 int flags = 0, ret = 0, invalid_flags = 0, len = 0, i = 0; 4196 4197 if (!migrate_compress()) { 4198 invalid_flags |= RAM_SAVE_FLAG_COMPRESS_PAGE; 4199 } 4200 4201 if (migrate_mapped_ram()) { 4202 invalid_flags |= (RAM_SAVE_FLAG_HOOK | RAM_SAVE_FLAG_MULTIFD_FLUSH | 4203 RAM_SAVE_FLAG_PAGE | RAM_SAVE_FLAG_XBZRLE | 4204 RAM_SAVE_FLAG_ZERO); 4205 } 4206 4207 while (!ret && !(flags & RAM_SAVE_FLAG_EOS)) { 4208 ram_addr_t addr; 4209 void *host = NULL, *host_bak = NULL; 4210 uint8_t ch; 4211 4212 /* 4213 * Yield periodically to let main loop run, but an iteration of 4214 * the main loop is expensive, so do it each some iterations 4215 */ 4216 if ((i & 32767) == 0 && qemu_in_coroutine()) { 4217 aio_co_schedule(qemu_get_current_aio_context(), 4218 qemu_coroutine_self()); 4219 qemu_coroutine_yield(); 4220 } 4221 i++; 4222 4223 addr = qemu_get_be64(f); 4224 ret = qemu_file_get_error(f); 4225 if (ret) { 4226 error_report("Getting RAM address failed"); 4227 break; 4228 } 4229 4230 flags = addr & ~TARGET_PAGE_MASK; 4231 addr &= TARGET_PAGE_MASK; 4232 4233 if (flags & invalid_flags) { 4234 error_report("Unexpected RAM flags: %d", flags & invalid_flags); 4235 4236 if (flags & invalid_flags & RAM_SAVE_FLAG_COMPRESS_PAGE) { 4237 error_report("Received an unexpected compressed page"); 4238 } 4239 4240 ret = -EINVAL; 4241 break; 4242 } 4243 4244 if (flags & (RAM_SAVE_FLAG_ZERO | RAM_SAVE_FLAG_PAGE | 4245 RAM_SAVE_FLAG_COMPRESS_PAGE | RAM_SAVE_FLAG_XBZRLE)) { 4246 RAMBlock *block = ram_block_from_stream(mis, f, flags, 4247 RAM_CHANNEL_PRECOPY); 4248 4249 host = host_from_ram_block_offset(block, addr); 4250 /* 4251 * After going into COLO stage, we should not load the page 4252 * into SVM's memory directly, we put them into colo_cache firstly. 4253 * NOTE: We need to keep a copy of SVM's ram in colo_cache. 4254 * Previously, we copied all these memory in preparing stage of COLO 4255 * while we need to stop VM, which is a time-consuming process. 4256 * Here we optimize it by a trick, back-up every page while in 4257 * migration process while COLO is enabled, though it affects the 4258 * speed of the migration, but it obviously reduce the downtime of 4259 * back-up all SVM'S memory in COLO preparing stage. 4260 */ 4261 if (migration_incoming_colo_enabled()) { 4262 if (migration_incoming_in_colo_state()) { 4263 /* In COLO stage, put all pages into cache temporarily */ 4264 host = colo_cache_from_block_offset(block, addr, true); 4265 } else { 4266 /* 4267 * In migration stage but before COLO stage, 4268 * Put all pages into both cache and SVM's memory. 4269 */ 4270 host_bak = colo_cache_from_block_offset(block, addr, false); 4271 } 4272 } 4273 if (!host) { 4274 error_report("Illegal RAM offset " RAM_ADDR_FMT, addr); 4275 ret = -EINVAL; 4276 break; 4277 } 4278 if (!migration_incoming_in_colo_state()) { 4279 ramblock_recv_bitmap_set(block, host); 4280 } 4281 4282 trace_ram_load_loop(block->idstr, (uint64_t)addr, flags, host); 4283 } 4284 4285 switch (flags & ~RAM_SAVE_FLAG_CONTINUE) { 4286 case RAM_SAVE_FLAG_MEM_SIZE: 4287 ret = parse_ramblocks(f, addr); 4288 /* 4289 * For mapped-ram migration (to a file) using multifd, we sync 4290 * once and for all here to make sure all tasks we queued to 4291 * multifd threads are completed, so that all the ramblocks 4292 * (including all the guest memory pages within) are fully 4293 * loaded after this sync returns. 4294 */ 4295 if (migrate_mapped_ram()) { 4296 multifd_recv_sync_main(); 4297 } 4298 break; 4299 4300 case RAM_SAVE_FLAG_ZERO: 4301 ch = qemu_get_byte(f); 4302 if (ch != 0) { 4303 error_report("Found a zero page with value %d", ch); 4304 ret = -EINVAL; 4305 break; 4306 } 4307 ram_handle_zero(host, TARGET_PAGE_SIZE); 4308 break; 4309 4310 case RAM_SAVE_FLAG_PAGE: 4311 qemu_get_buffer(f, host, TARGET_PAGE_SIZE); 4312 break; 4313 4314 case RAM_SAVE_FLAG_COMPRESS_PAGE: 4315 len = qemu_get_be32(f); 4316 if (len < 0 || len > compressBound(TARGET_PAGE_SIZE)) { 4317 error_report("Invalid compressed data length: %d", len); 4318 ret = -EINVAL; 4319 break; 4320 } 4321 decompress_data_with_multi_threads(f, host, len); 4322 break; 4323 4324 case RAM_SAVE_FLAG_XBZRLE: 4325 if (load_xbzrle(f, addr, host) < 0) { 4326 error_report("Failed to decompress XBZRLE page at " 4327 RAM_ADDR_FMT, addr); 4328 ret = -EINVAL; 4329 break; 4330 } 4331 break; 4332 case RAM_SAVE_FLAG_MULTIFD_FLUSH: 4333 multifd_recv_sync_main(); 4334 break; 4335 case RAM_SAVE_FLAG_EOS: 4336 /* normal exit */ 4337 if (migrate_multifd() && 4338 migrate_multifd_flush_after_each_section() && 4339 /* 4340 * Mapped-ram migration flushes once and for all after 4341 * parsing ramblocks. Always ignore EOS for it. 4342 */ 4343 !migrate_mapped_ram()) { 4344 multifd_recv_sync_main(); 4345 } 4346 break; 4347 case RAM_SAVE_FLAG_HOOK: 4348 ret = rdma_registration_handle(f); 4349 if (ret < 0) { 4350 qemu_file_set_error(f, ret); 4351 } 4352 break; 4353 default: 4354 error_report("Unknown combination of migration flags: 0x%x", flags); 4355 ret = -EINVAL; 4356 } 4357 if (!ret) { 4358 ret = qemu_file_get_error(f); 4359 } 4360 if (!ret && host_bak) { 4361 memcpy(host_bak, host, TARGET_PAGE_SIZE); 4362 } 4363 } 4364 4365 ret |= wait_for_decompress_done(); 4366 return ret; 4367 } 4368 4369 static int ram_load(QEMUFile *f, void *opaque, int version_id) 4370 { 4371 int ret = 0; 4372 static uint64_t seq_iter; 4373 /* 4374 * If system is running in postcopy mode, page inserts to host memory must 4375 * be atomic 4376 */ 4377 bool postcopy_running = postcopy_is_running(); 4378 4379 seq_iter++; 4380 4381 if (version_id != 4) { 4382 return -EINVAL; 4383 } 4384 4385 /* 4386 * This RCU critical section can be very long running. 4387 * When RCU reclaims in the code start to become numerous, 4388 * it will be necessary to reduce the granularity of this 4389 * critical section. 4390 */ 4391 WITH_RCU_READ_LOCK_GUARD() { 4392 if (postcopy_running) { 4393 /* 4394 * Note! Here RAM_CHANNEL_PRECOPY is the precopy channel of 4395 * postcopy migration, we have another RAM_CHANNEL_POSTCOPY to 4396 * service fast page faults. 4397 */ 4398 ret = ram_load_postcopy(f, RAM_CHANNEL_PRECOPY); 4399 } else { 4400 ret = ram_load_precopy(f); 4401 } 4402 } 4403 trace_ram_load_complete(ret, seq_iter); 4404 4405 return ret; 4406 } 4407 4408 static bool ram_has_postcopy(void *opaque) 4409 { 4410 RAMBlock *rb; 4411 RAMBLOCK_FOREACH_NOT_IGNORED(rb) { 4412 if (ramblock_is_pmem(rb)) { 4413 info_report("Block: %s, host: %p is a nvdimm memory, postcopy" 4414 "is not supported now!", rb->idstr, rb->host); 4415 return false; 4416 } 4417 } 4418 4419 return migrate_postcopy_ram(); 4420 } 4421 4422 /* Sync all the dirty bitmap with destination VM. */ 4423 static int ram_dirty_bitmap_sync_all(MigrationState *s, RAMState *rs) 4424 { 4425 RAMBlock *block; 4426 QEMUFile *file = s->to_dst_file; 4427 4428 trace_ram_dirty_bitmap_sync_start(); 4429 4430 qatomic_set(&rs->postcopy_bmap_sync_requested, 0); 4431 RAMBLOCK_FOREACH_NOT_IGNORED(block) { 4432 qemu_savevm_send_recv_bitmap(file, block->idstr); 4433 trace_ram_dirty_bitmap_request(block->idstr); 4434 qatomic_inc(&rs->postcopy_bmap_sync_requested); 4435 } 4436 4437 trace_ram_dirty_bitmap_sync_wait(); 4438 4439 /* Wait until all the ramblocks' dirty bitmap synced */ 4440 while (qatomic_read(&rs->postcopy_bmap_sync_requested)) { 4441 if (migration_rp_wait(s)) { 4442 return -1; 4443 } 4444 } 4445 4446 trace_ram_dirty_bitmap_sync_complete(); 4447 4448 return 0; 4449 } 4450 4451 /* 4452 * Read the received bitmap, revert it as the initial dirty bitmap. 4453 * This is only used when the postcopy migration is paused but wants 4454 * to resume from a middle point. 4455 * 4456 * Returns true if succeeded, false for errors. 4457 */ 4458 bool ram_dirty_bitmap_reload(MigrationState *s, RAMBlock *block, Error **errp) 4459 { 4460 /* from_dst_file is always valid because we're within rp_thread */ 4461 QEMUFile *file = s->rp_state.from_dst_file; 4462 g_autofree unsigned long *le_bitmap = NULL; 4463 unsigned long nbits = block->used_length >> TARGET_PAGE_BITS; 4464 uint64_t local_size = DIV_ROUND_UP(nbits, 8); 4465 uint64_t size, end_mark; 4466 RAMState *rs = ram_state; 4467 4468 trace_ram_dirty_bitmap_reload_begin(block->idstr); 4469 4470 if (s->state != MIGRATION_STATUS_POSTCOPY_RECOVER) { 4471 error_setg(errp, "Reload bitmap in incorrect state %s", 4472 MigrationStatus_str(s->state)); 4473 return false; 4474 } 4475 4476 /* 4477 * Note: see comments in ramblock_recv_bitmap_send() on why we 4478 * need the endianness conversion, and the paddings. 4479 */ 4480 local_size = ROUND_UP(local_size, 8); 4481 4482 /* Add paddings */ 4483 le_bitmap = bitmap_new(nbits + BITS_PER_LONG); 4484 4485 size = qemu_get_be64(file); 4486 4487 /* The size of the bitmap should match with our ramblock */ 4488 if (size != local_size) { 4489 error_setg(errp, "ramblock '%s' bitmap size mismatch (0x%"PRIx64 4490 " != 0x%"PRIx64")", block->idstr, size, local_size); 4491 return false; 4492 } 4493 4494 size = qemu_get_buffer(file, (uint8_t *)le_bitmap, local_size); 4495 end_mark = qemu_get_be64(file); 4496 4497 if (qemu_file_get_error(file) || size != local_size) { 4498 error_setg(errp, "read bitmap failed for ramblock '%s': " 4499 "(size 0x%"PRIx64", got: 0x%"PRIx64")", 4500 block->idstr, local_size, size); 4501 return false; 4502 } 4503 4504 if (end_mark != RAMBLOCK_RECV_BITMAP_ENDING) { 4505 error_setg(errp, "ramblock '%s' end mark incorrect: 0x%"PRIx64, 4506 block->idstr, end_mark); 4507 return false; 4508 } 4509 4510 /* 4511 * Endianness conversion. We are during postcopy (though paused). 4512 * The dirty bitmap won't change. We can directly modify it. 4513 */ 4514 bitmap_from_le(block->bmap, le_bitmap, nbits); 4515 4516 /* 4517 * What we received is "received bitmap". Revert it as the initial 4518 * dirty bitmap for this ramblock. 4519 */ 4520 bitmap_complement(block->bmap, block->bmap, nbits); 4521 4522 /* Clear dirty bits of discarded ranges that we don't want to migrate. */ 4523 ramblock_dirty_bitmap_clear_discarded_pages(block); 4524 4525 /* We'll recalculate migration_dirty_pages in ram_state_resume_prepare(). */ 4526 trace_ram_dirty_bitmap_reload_complete(block->idstr); 4527 4528 qatomic_dec(&rs->postcopy_bmap_sync_requested); 4529 4530 /* 4531 * We succeeded to sync bitmap for current ramblock. Always kick the 4532 * migration thread to check whether all requested bitmaps are 4533 * reloaded. NOTE: it's racy to only kick when requested==0, because 4534 * we don't know whether the migration thread may still be increasing 4535 * it. 4536 */ 4537 migration_rp_kick(s); 4538 4539 return true; 4540 } 4541 4542 static int ram_resume_prepare(MigrationState *s, void *opaque) 4543 { 4544 RAMState *rs = *(RAMState **)opaque; 4545 int ret; 4546 4547 ret = ram_dirty_bitmap_sync_all(s, rs); 4548 if (ret) { 4549 return ret; 4550 } 4551 4552 ram_state_resume_prepare(rs, s->to_dst_file); 4553 4554 return 0; 4555 } 4556 4557 void postcopy_preempt_shutdown_file(MigrationState *s) 4558 { 4559 qemu_put_be64(s->postcopy_qemufile_src, RAM_SAVE_FLAG_EOS); 4560 qemu_fflush(s->postcopy_qemufile_src); 4561 } 4562 4563 static SaveVMHandlers savevm_ram_handlers = { 4564 .save_setup = ram_save_setup, 4565 .save_live_iterate = ram_save_iterate, 4566 .save_live_complete_postcopy = ram_save_complete, 4567 .save_live_complete_precopy = ram_save_complete, 4568 .has_postcopy = ram_has_postcopy, 4569 .state_pending_exact = ram_state_pending_exact, 4570 .state_pending_estimate = ram_state_pending_estimate, 4571 .load_state = ram_load, 4572 .save_cleanup = ram_save_cleanup, 4573 .load_setup = ram_load_setup, 4574 .load_cleanup = ram_load_cleanup, 4575 .resume_prepare = ram_resume_prepare, 4576 }; 4577 4578 static void ram_mig_ram_block_resized(RAMBlockNotifier *n, void *host, 4579 size_t old_size, size_t new_size) 4580 { 4581 PostcopyState ps = postcopy_state_get(); 4582 ram_addr_t offset; 4583 RAMBlock *rb = qemu_ram_block_from_host(host, false, &offset); 4584 Error *err = NULL; 4585 4586 if (!rb) { 4587 error_report("RAM block not found"); 4588 return; 4589 } 4590 4591 if (migrate_ram_is_ignored(rb)) { 4592 return; 4593 } 4594 4595 if (!migration_is_idle()) { 4596 /* 4597 * Precopy code on the source cannot deal with the size of RAM blocks 4598 * changing at random points in time - especially after sending the 4599 * RAM block sizes in the migration stream, they must no longer change. 4600 * Abort and indicate a proper reason. 4601 */ 4602 error_setg(&err, "RAM block '%s' resized during precopy.", rb->idstr); 4603 migration_cancel(err); 4604 error_free(err); 4605 } 4606 4607 switch (ps) { 4608 case POSTCOPY_INCOMING_ADVISE: 4609 /* 4610 * Update what ram_postcopy_incoming_init()->init_range() does at the 4611 * time postcopy was advised. Syncing RAM blocks with the source will 4612 * result in RAM resizes. 4613 */ 4614 if (old_size < new_size) { 4615 if (ram_discard_range(rb->idstr, old_size, new_size - old_size)) { 4616 error_report("RAM block '%s' discard of resized RAM failed", 4617 rb->idstr); 4618 } 4619 } 4620 rb->postcopy_length = new_size; 4621 break; 4622 case POSTCOPY_INCOMING_NONE: 4623 case POSTCOPY_INCOMING_RUNNING: 4624 case POSTCOPY_INCOMING_END: 4625 /* 4626 * Once our guest is running, postcopy does no longer care about 4627 * resizes. When growing, the new memory was not available on the 4628 * source, no handler needed. 4629 */ 4630 break; 4631 default: 4632 error_report("RAM block '%s' resized during postcopy state: %d", 4633 rb->idstr, ps); 4634 exit(-1); 4635 } 4636 } 4637 4638 static RAMBlockNotifier ram_mig_ram_notifier = { 4639 .ram_block_resized = ram_mig_ram_block_resized, 4640 }; 4641 4642 void ram_mig_init(void) 4643 { 4644 qemu_mutex_init(&XBZRLE.lock); 4645 register_savevm_live("ram", 0, 4, &savevm_ram_handlers, &ram_state); 4646 ram_block_notifier_add(&ram_mig_ram_notifier); 4647 } 4648