1 /* 2 * QEMU System Emulator 3 * 4 * Copyright (c) 2003-2008 Fabrice Bellard 5 * Copyright (c) 2011-2015 Red Hat Inc 6 * 7 * Authors: 8 * Juan Quintela <quintela@redhat.com> 9 * 10 * Permission is hereby granted, free of charge, to any person obtaining a copy 11 * of this software and associated documentation files (the "Software"), to deal 12 * in the Software without restriction, including without limitation the rights 13 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 14 * copies of the Software, and to permit persons to whom the Software is 15 * furnished to do so, subject to the following conditions: 16 * 17 * The above copyright notice and this permission notice shall be included in 18 * all copies or substantial portions of the Software. 19 * 20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 23 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 24 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 25 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 26 * THE SOFTWARE. 27 */ 28 29 #include "qemu/osdep.h" 30 #include "qemu/cutils.h" 31 #include "qemu/bitops.h" 32 #include "qemu/bitmap.h" 33 #include "qemu/madvise.h" 34 #include "qemu/main-loop.h" 35 #include "xbzrle.h" 36 #include "ram-compress.h" 37 #include "ram.h" 38 #include "migration.h" 39 #include "migration-stats.h" 40 #include "migration/register.h" 41 #include "migration/misc.h" 42 #include "qemu-file.h" 43 #include "postcopy-ram.h" 44 #include "page_cache.h" 45 #include "qemu/error-report.h" 46 #include "qapi/error.h" 47 #include "qapi/qapi-types-migration.h" 48 #include "qapi/qapi-events-migration.h" 49 #include "qapi/qapi-commands-migration.h" 50 #include "qapi/qmp/qerror.h" 51 #include "trace.h" 52 #include "exec/ram_addr.h" 53 #include "exec/target_page.h" 54 #include "qemu/rcu_queue.h" 55 #include "migration/colo.h" 56 #include "sysemu/cpu-throttle.h" 57 #include "savevm.h" 58 #include "qemu/iov.h" 59 #include "multifd.h" 60 #include "sysemu/runstate.h" 61 #include "rdma.h" 62 #include "options.h" 63 #include "sysemu/dirtylimit.h" 64 #include "sysemu/kvm.h" 65 66 #include "hw/boards.h" /* for machine_dump_guest_core() */ 67 68 #if defined(__linux__) 69 #include "qemu/userfaultfd.h" 70 #endif /* defined(__linux__) */ 71 72 /***********************************************************/ 73 /* ram save/restore */ 74 75 /* 76 * RAM_SAVE_FLAG_ZERO used to be named RAM_SAVE_FLAG_COMPRESS, it 77 * worked for pages that were filled with the same char. We switched 78 * it to only search for the zero value. And to avoid confusion with 79 * RAM_SAVE_FLAG_COMPRESS_PAGE just rename it. 80 */ 81 /* 82 * RAM_SAVE_FLAG_FULL was obsoleted in 2009, it can be reused now 83 */ 84 #define RAM_SAVE_FLAG_FULL 0x01 85 #define RAM_SAVE_FLAG_ZERO 0x02 86 #define RAM_SAVE_FLAG_MEM_SIZE 0x04 87 #define RAM_SAVE_FLAG_PAGE 0x08 88 #define RAM_SAVE_FLAG_EOS 0x10 89 #define RAM_SAVE_FLAG_CONTINUE 0x20 90 #define RAM_SAVE_FLAG_XBZRLE 0x40 91 /* 0x80 is reserved in rdma.h for RAM_SAVE_FLAG_HOOK */ 92 #define RAM_SAVE_FLAG_COMPRESS_PAGE 0x100 93 #define RAM_SAVE_FLAG_MULTIFD_FLUSH 0x200 94 /* We can't use any flag that is bigger than 0x200 */ 95 96 /* 97 * mapped-ram migration supports O_DIRECT, so we need to make sure the 98 * userspace buffer, the IO operation size and the file offset are 99 * aligned according to the underlying device's block size. The first 100 * two are already aligned to page size, but we need to add padding to 101 * the file to align the offset. We cannot read the block size 102 * dynamically because the migration file can be moved between 103 * different systems, so use 1M to cover most block sizes and to keep 104 * the file offset aligned at page size as well. 105 */ 106 #define MAPPED_RAM_FILE_OFFSET_ALIGNMENT 0x100000 107 108 /* 109 * When doing mapped-ram migration, this is the amount we read from 110 * the pages region in the migration file at a time. 111 */ 112 #define MAPPED_RAM_LOAD_BUF_SIZE 0x100000 113 114 XBZRLECacheStats xbzrle_counters; 115 116 /* used by the search for pages to send */ 117 struct PageSearchStatus { 118 /* The migration channel used for a specific host page */ 119 QEMUFile *pss_channel; 120 /* Last block from where we have sent data */ 121 RAMBlock *last_sent_block; 122 /* Current block being searched */ 123 RAMBlock *block; 124 /* Current page to search from */ 125 unsigned long page; 126 /* Set once we wrap around */ 127 bool complete_round; 128 /* Whether we're sending a host page */ 129 bool host_page_sending; 130 /* The start/end of current host page. Invalid if host_page_sending==false */ 131 unsigned long host_page_start; 132 unsigned long host_page_end; 133 }; 134 typedef struct PageSearchStatus PageSearchStatus; 135 136 /* struct contains XBZRLE cache and a static page 137 used by the compression */ 138 static struct { 139 /* buffer used for XBZRLE encoding */ 140 uint8_t *encoded_buf; 141 /* buffer for storing page content */ 142 uint8_t *current_buf; 143 /* Cache for XBZRLE, Protected by lock. */ 144 PageCache *cache; 145 QemuMutex lock; 146 /* it will store a page full of zeros */ 147 uint8_t *zero_target_page; 148 /* buffer used for XBZRLE decoding */ 149 uint8_t *decoded_buf; 150 } XBZRLE; 151 152 static void XBZRLE_cache_lock(void) 153 { 154 if (migrate_xbzrle()) { 155 qemu_mutex_lock(&XBZRLE.lock); 156 } 157 } 158 159 static void XBZRLE_cache_unlock(void) 160 { 161 if (migrate_xbzrle()) { 162 qemu_mutex_unlock(&XBZRLE.lock); 163 } 164 } 165 166 /** 167 * xbzrle_cache_resize: resize the xbzrle cache 168 * 169 * This function is called from migrate_params_apply in main 170 * thread, possibly while a migration is in progress. A running 171 * migration may be using the cache and might finish during this call, 172 * hence changes to the cache are protected by XBZRLE.lock(). 173 * 174 * Returns 0 for success or -1 for error 175 * 176 * @new_size: new cache size 177 * @errp: set *errp if the check failed, with reason 178 */ 179 int xbzrle_cache_resize(uint64_t new_size, Error **errp) 180 { 181 PageCache *new_cache; 182 int64_t ret = 0; 183 184 /* Check for truncation */ 185 if (new_size != (size_t)new_size) { 186 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "cache size", 187 "exceeding address space"); 188 return -1; 189 } 190 191 if (new_size == migrate_xbzrle_cache_size()) { 192 /* nothing to do */ 193 return 0; 194 } 195 196 XBZRLE_cache_lock(); 197 198 if (XBZRLE.cache != NULL) { 199 new_cache = cache_init(new_size, TARGET_PAGE_SIZE, errp); 200 if (!new_cache) { 201 ret = -1; 202 goto out; 203 } 204 205 cache_fini(XBZRLE.cache); 206 XBZRLE.cache = new_cache; 207 } 208 out: 209 XBZRLE_cache_unlock(); 210 return ret; 211 } 212 213 static bool postcopy_preempt_active(void) 214 { 215 return migrate_postcopy_preempt() && migration_in_postcopy(); 216 } 217 218 bool migrate_ram_is_ignored(RAMBlock *block) 219 { 220 return !qemu_ram_is_migratable(block) || 221 (migrate_ignore_shared() && qemu_ram_is_shared(block) 222 && qemu_ram_is_named_file(block)); 223 } 224 225 #undef RAMBLOCK_FOREACH 226 227 int foreach_not_ignored_block(RAMBlockIterFunc func, void *opaque) 228 { 229 RAMBlock *block; 230 int ret = 0; 231 232 RCU_READ_LOCK_GUARD(); 233 234 RAMBLOCK_FOREACH_NOT_IGNORED(block) { 235 ret = func(block, opaque); 236 if (ret) { 237 break; 238 } 239 } 240 return ret; 241 } 242 243 static void ramblock_recv_map_init(void) 244 { 245 RAMBlock *rb; 246 247 RAMBLOCK_FOREACH_NOT_IGNORED(rb) { 248 assert(!rb->receivedmap); 249 rb->receivedmap = bitmap_new(rb->max_length >> qemu_target_page_bits()); 250 } 251 } 252 253 int ramblock_recv_bitmap_test(RAMBlock *rb, void *host_addr) 254 { 255 return test_bit(ramblock_recv_bitmap_offset(host_addr, rb), 256 rb->receivedmap); 257 } 258 259 bool ramblock_recv_bitmap_test_byte_offset(RAMBlock *rb, uint64_t byte_offset) 260 { 261 return test_bit(byte_offset >> TARGET_PAGE_BITS, rb->receivedmap); 262 } 263 264 void ramblock_recv_bitmap_set(RAMBlock *rb, void *host_addr) 265 { 266 set_bit_atomic(ramblock_recv_bitmap_offset(host_addr, rb), rb->receivedmap); 267 } 268 269 void ramblock_recv_bitmap_set_range(RAMBlock *rb, void *host_addr, 270 size_t nr) 271 { 272 bitmap_set_atomic(rb->receivedmap, 273 ramblock_recv_bitmap_offset(host_addr, rb), 274 nr); 275 } 276 277 void ramblock_recv_bitmap_set_offset(RAMBlock *rb, uint64_t byte_offset) 278 { 279 set_bit_atomic(byte_offset >> TARGET_PAGE_BITS, rb->receivedmap); 280 } 281 #define RAMBLOCK_RECV_BITMAP_ENDING (0x0123456789abcdefULL) 282 283 /* 284 * Format: bitmap_size (8 bytes) + whole_bitmap (N bytes). 285 * 286 * Returns >0 if success with sent bytes, or <0 if error. 287 */ 288 int64_t ramblock_recv_bitmap_send(QEMUFile *file, 289 const char *block_name) 290 { 291 RAMBlock *block = qemu_ram_block_by_name(block_name); 292 unsigned long *le_bitmap, nbits; 293 uint64_t size; 294 295 if (!block) { 296 error_report("%s: invalid block name: %s", __func__, block_name); 297 return -1; 298 } 299 300 nbits = block->postcopy_length >> TARGET_PAGE_BITS; 301 302 /* 303 * Make sure the tmp bitmap buffer is big enough, e.g., on 32bit 304 * machines we may need 4 more bytes for padding (see below 305 * comment). So extend it a bit before hand. 306 */ 307 le_bitmap = bitmap_new(nbits + BITS_PER_LONG); 308 309 /* 310 * Always use little endian when sending the bitmap. This is 311 * required that when source and destination VMs are not using the 312 * same endianness. (Note: big endian won't work.) 313 */ 314 bitmap_to_le(le_bitmap, block->receivedmap, nbits); 315 316 /* Size of the bitmap, in bytes */ 317 size = DIV_ROUND_UP(nbits, 8); 318 319 /* 320 * size is always aligned to 8 bytes for 64bit machines, but it 321 * may not be true for 32bit machines. We need this padding to 322 * make sure the migration can survive even between 32bit and 323 * 64bit machines. 324 */ 325 size = ROUND_UP(size, 8); 326 327 qemu_put_be64(file, size); 328 qemu_put_buffer(file, (const uint8_t *)le_bitmap, size); 329 g_free(le_bitmap); 330 /* 331 * Mark as an end, in case the middle part is screwed up due to 332 * some "mysterious" reason. 333 */ 334 qemu_put_be64(file, RAMBLOCK_RECV_BITMAP_ENDING); 335 int ret = qemu_fflush(file); 336 if (ret) { 337 return ret; 338 } 339 340 return size + sizeof(size); 341 } 342 343 /* 344 * An outstanding page request, on the source, having been received 345 * and queued 346 */ 347 struct RAMSrcPageRequest { 348 RAMBlock *rb; 349 hwaddr offset; 350 hwaddr len; 351 352 QSIMPLEQ_ENTRY(RAMSrcPageRequest) next_req; 353 }; 354 355 /* State of RAM for migration */ 356 struct RAMState { 357 /* 358 * PageSearchStatus structures for the channels when send pages. 359 * Protected by the bitmap_mutex. 360 */ 361 PageSearchStatus pss[RAM_CHANNEL_MAX]; 362 /* UFFD file descriptor, used in 'write-tracking' migration */ 363 int uffdio_fd; 364 /* total ram size in bytes */ 365 uint64_t ram_bytes_total; 366 /* Last block that we have visited searching for dirty pages */ 367 RAMBlock *last_seen_block; 368 /* Last dirty target page we have sent */ 369 ram_addr_t last_page; 370 /* last ram version we have seen */ 371 uint32_t last_version; 372 /* How many times we have dirty too many pages */ 373 int dirty_rate_high_cnt; 374 /* these variables are used for bitmap sync */ 375 /* last time we did a full bitmap_sync */ 376 int64_t time_last_bitmap_sync; 377 /* bytes transferred at start_time */ 378 uint64_t bytes_xfer_prev; 379 /* number of dirty pages since start_time */ 380 uint64_t num_dirty_pages_period; 381 /* xbzrle misses since the beginning of the period */ 382 uint64_t xbzrle_cache_miss_prev; 383 /* Amount of xbzrle pages since the beginning of the period */ 384 uint64_t xbzrle_pages_prev; 385 /* Amount of xbzrle encoded bytes since the beginning of the period */ 386 uint64_t xbzrle_bytes_prev; 387 /* Are we really using XBZRLE (e.g., after the first round). */ 388 bool xbzrle_started; 389 /* Are we on the last stage of migration */ 390 bool last_stage; 391 392 /* total handled target pages at the beginning of period */ 393 uint64_t target_page_count_prev; 394 /* total handled target pages since start */ 395 uint64_t target_page_count; 396 /* number of dirty bits in the bitmap */ 397 uint64_t migration_dirty_pages; 398 /* 399 * Protects: 400 * - dirty/clear bitmap 401 * - migration_dirty_pages 402 * - pss structures 403 */ 404 QemuMutex bitmap_mutex; 405 /* The RAMBlock used in the last src_page_requests */ 406 RAMBlock *last_req_rb; 407 /* Queue of outstanding page requests from the destination */ 408 QemuMutex src_page_req_mutex; 409 QSIMPLEQ_HEAD(, RAMSrcPageRequest) src_page_requests; 410 411 /* 412 * This is only used when postcopy is in recovery phase, to communicate 413 * between the migration thread and the return path thread on dirty 414 * bitmap synchronizations. This field is unused in other stages of 415 * RAM migration. 416 */ 417 unsigned int postcopy_bmap_sync_requested; 418 }; 419 typedef struct RAMState RAMState; 420 421 static RAMState *ram_state; 422 423 static NotifierWithReturnList precopy_notifier_list; 424 425 /* Whether postcopy has queued requests? */ 426 static bool postcopy_has_request(RAMState *rs) 427 { 428 return !QSIMPLEQ_EMPTY_ATOMIC(&rs->src_page_requests); 429 } 430 431 void precopy_infrastructure_init(void) 432 { 433 notifier_with_return_list_init(&precopy_notifier_list); 434 } 435 436 void precopy_add_notifier(NotifierWithReturn *n) 437 { 438 notifier_with_return_list_add(&precopy_notifier_list, n); 439 } 440 441 void precopy_remove_notifier(NotifierWithReturn *n) 442 { 443 notifier_with_return_remove(n); 444 } 445 446 int precopy_notify(PrecopyNotifyReason reason, Error **errp) 447 { 448 PrecopyNotifyData pnd; 449 pnd.reason = reason; 450 451 return notifier_with_return_list_notify(&precopy_notifier_list, &pnd, errp); 452 } 453 454 uint64_t ram_bytes_remaining(void) 455 { 456 return ram_state ? (ram_state->migration_dirty_pages * TARGET_PAGE_SIZE) : 457 0; 458 } 459 460 void ram_transferred_add(uint64_t bytes) 461 { 462 if (runstate_is_running()) { 463 stat64_add(&mig_stats.precopy_bytes, bytes); 464 } else if (migration_in_postcopy()) { 465 stat64_add(&mig_stats.postcopy_bytes, bytes); 466 } else { 467 stat64_add(&mig_stats.downtime_bytes, bytes); 468 } 469 } 470 471 struct MigrationOps { 472 int (*ram_save_target_page)(RAMState *rs, PageSearchStatus *pss); 473 }; 474 typedef struct MigrationOps MigrationOps; 475 476 MigrationOps *migration_ops; 477 478 static int ram_save_host_page_urgent(PageSearchStatus *pss); 479 480 /* NOTE: page is the PFN not real ram_addr_t. */ 481 static void pss_init(PageSearchStatus *pss, RAMBlock *rb, ram_addr_t page) 482 { 483 pss->block = rb; 484 pss->page = page; 485 pss->complete_round = false; 486 } 487 488 /* 489 * Check whether two PSSs are actively sending the same page. Return true 490 * if it is, false otherwise. 491 */ 492 static bool pss_overlap(PageSearchStatus *pss1, PageSearchStatus *pss2) 493 { 494 return pss1->host_page_sending && pss2->host_page_sending && 495 (pss1->host_page_start == pss2->host_page_start); 496 } 497 498 /** 499 * save_page_header: write page header to wire 500 * 501 * If this is the 1st block, it also writes the block identification 502 * 503 * Returns the number of bytes written 504 * 505 * @pss: current PSS channel status 506 * @block: block that contains the page we want to send 507 * @offset: offset inside the block for the page 508 * in the lower bits, it contains flags 509 */ 510 static size_t save_page_header(PageSearchStatus *pss, QEMUFile *f, 511 RAMBlock *block, ram_addr_t offset) 512 { 513 size_t size, len; 514 bool same_block = (block == pss->last_sent_block); 515 516 if (same_block) { 517 offset |= RAM_SAVE_FLAG_CONTINUE; 518 } 519 qemu_put_be64(f, offset); 520 size = 8; 521 522 if (!same_block) { 523 len = strlen(block->idstr); 524 qemu_put_byte(f, len); 525 qemu_put_buffer(f, (uint8_t *)block->idstr, len); 526 size += 1 + len; 527 pss->last_sent_block = block; 528 } 529 return size; 530 } 531 532 /** 533 * mig_throttle_guest_down: throttle down the guest 534 * 535 * Reduce amount of guest cpu execution to hopefully slow down memory 536 * writes. If guest dirty memory rate is reduced below the rate at 537 * which we can transfer pages to the destination then we should be 538 * able to complete migration. Some workloads dirty memory way too 539 * fast and will not effectively converge, even with auto-converge. 540 */ 541 static void mig_throttle_guest_down(uint64_t bytes_dirty_period, 542 uint64_t bytes_dirty_threshold) 543 { 544 uint64_t pct_initial = migrate_cpu_throttle_initial(); 545 uint64_t pct_increment = migrate_cpu_throttle_increment(); 546 bool pct_tailslow = migrate_cpu_throttle_tailslow(); 547 int pct_max = migrate_max_cpu_throttle(); 548 549 uint64_t throttle_now = cpu_throttle_get_percentage(); 550 uint64_t cpu_now, cpu_ideal, throttle_inc; 551 552 /* We have not started throttling yet. Let's start it. */ 553 if (!cpu_throttle_active()) { 554 cpu_throttle_set(pct_initial); 555 } else { 556 /* Throttling already on, just increase the rate */ 557 if (!pct_tailslow) { 558 throttle_inc = pct_increment; 559 } else { 560 /* Compute the ideal CPU percentage used by Guest, which may 561 * make the dirty rate match the dirty rate threshold. */ 562 cpu_now = 100 - throttle_now; 563 cpu_ideal = cpu_now * (bytes_dirty_threshold * 1.0 / 564 bytes_dirty_period); 565 throttle_inc = MIN(cpu_now - cpu_ideal, pct_increment); 566 } 567 cpu_throttle_set(MIN(throttle_now + throttle_inc, pct_max)); 568 } 569 } 570 571 void mig_throttle_counter_reset(void) 572 { 573 RAMState *rs = ram_state; 574 575 rs->time_last_bitmap_sync = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); 576 rs->num_dirty_pages_period = 0; 577 rs->bytes_xfer_prev = migration_transferred_bytes(); 578 } 579 580 /** 581 * xbzrle_cache_zero_page: insert a zero page in the XBZRLE cache 582 * 583 * @current_addr: address for the zero page 584 * 585 * Update the xbzrle cache to reflect a page that's been sent as all 0. 586 * The important thing is that a stale (not-yet-0'd) page be replaced 587 * by the new data. 588 * As a bonus, if the page wasn't in the cache it gets added so that 589 * when a small write is made into the 0'd page it gets XBZRLE sent. 590 */ 591 static void xbzrle_cache_zero_page(ram_addr_t current_addr) 592 { 593 /* We don't care if this fails to allocate a new cache page 594 * as long as it updated an old one */ 595 cache_insert(XBZRLE.cache, current_addr, XBZRLE.zero_target_page, 596 stat64_get(&mig_stats.dirty_sync_count)); 597 } 598 599 #define ENCODING_FLAG_XBZRLE 0x1 600 601 /** 602 * save_xbzrle_page: compress and send current page 603 * 604 * Returns: 1 means that we wrote the page 605 * 0 means that page is identical to the one already sent 606 * -1 means that xbzrle would be longer than normal 607 * 608 * @rs: current RAM state 609 * @pss: current PSS channel 610 * @current_data: pointer to the address of the page contents 611 * @current_addr: addr of the page 612 * @block: block that contains the page we want to send 613 * @offset: offset inside the block for the page 614 */ 615 static int save_xbzrle_page(RAMState *rs, PageSearchStatus *pss, 616 uint8_t **current_data, ram_addr_t current_addr, 617 RAMBlock *block, ram_addr_t offset) 618 { 619 int encoded_len = 0, bytes_xbzrle; 620 uint8_t *prev_cached_page; 621 QEMUFile *file = pss->pss_channel; 622 uint64_t generation = stat64_get(&mig_stats.dirty_sync_count); 623 624 if (!cache_is_cached(XBZRLE.cache, current_addr, generation)) { 625 xbzrle_counters.cache_miss++; 626 if (!rs->last_stage) { 627 if (cache_insert(XBZRLE.cache, current_addr, *current_data, 628 generation) == -1) { 629 return -1; 630 } else { 631 /* update *current_data when the page has been 632 inserted into cache */ 633 *current_data = get_cached_data(XBZRLE.cache, current_addr); 634 } 635 } 636 return -1; 637 } 638 639 /* 640 * Reaching here means the page has hit the xbzrle cache, no matter what 641 * encoding result it is (normal encoding, overflow or skipping the page), 642 * count the page as encoded. This is used to calculate the encoding rate. 643 * 644 * Example: 2 pages (8KB) being encoded, first page encoding generates 2KB, 645 * 2nd page turns out to be skipped (i.e. no new bytes written to the 646 * page), the overall encoding rate will be 8KB / 2KB = 4, which has the 647 * skipped page included. In this way, the encoding rate can tell if the 648 * guest page is good for xbzrle encoding. 649 */ 650 xbzrle_counters.pages++; 651 prev_cached_page = get_cached_data(XBZRLE.cache, current_addr); 652 653 /* save current buffer into memory */ 654 memcpy(XBZRLE.current_buf, *current_data, TARGET_PAGE_SIZE); 655 656 /* XBZRLE encoding (if there is no overflow) */ 657 encoded_len = xbzrle_encode_buffer(prev_cached_page, XBZRLE.current_buf, 658 TARGET_PAGE_SIZE, XBZRLE.encoded_buf, 659 TARGET_PAGE_SIZE); 660 661 /* 662 * Update the cache contents, so that it corresponds to the data 663 * sent, in all cases except where we skip the page. 664 */ 665 if (!rs->last_stage && encoded_len != 0) { 666 memcpy(prev_cached_page, XBZRLE.current_buf, TARGET_PAGE_SIZE); 667 /* 668 * In the case where we couldn't compress, ensure that the caller 669 * sends the data from the cache, since the guest might have 670 * changed the RAM since we copied it. 671 */ 672 *current_data = prev_cached_page; 673 } 674 675 if (encoded_len == 0) { 676 trace_save_xbzrle_page_skipping(); 677 return 0; 678 } else if (encoded_len == -1) { 679 trace_save_xbzrle_page_overflow(); 680 xbzrle_counters.overflow++; 681 xbzrle_counters.bytes += TARGET_PAGE_SIZE; 682 return -1; 683 } 684 685 /* Send XBZRLE based compressed page */ 686 bytes_xbzrle = save_page_header(pss, pss->pss_channel, block, 687 offset | RAM_SAVE_FLAG_XBZRLE); 688 qemu_put_byte(file, ENCODING_FLAG_XBZRLE); 689 qemu_put_be16(file, encoded_len); 690 qemu_put_buffer(file, XBZRLE.encoded_buf, encoded_len); 691 bytes_xbzrle += encoded_len + 1 + 2; 692 /* 693 * Like compressed_size (please see update_compress_thread_counts), 694 * the xbzrle encoded bytes don't count the 8 byte header with 695 * RAM_SAVE_FLAG_CONTINUE. 696 */ 697 xbzrle_counters.bytes += bytes_xbzrle - 8; 698 ram_transferred_add(bytes_xbzrle); 699 700 return 1; 701 } 702 703 /** 704 * pss_find_next_dirty: find the next dirty page of current ramblock 705 * 706 * This function updates pss->page to point to the next dirty page index 707 * within the ramblock to migrate, or the end of ramblock when nothing 708 * found. Note that when pss->host_page_sending==true it means we're 709 * during sending a host page, so we won't look for dirty page that is 710 * outside the host page boundary. 711 * 712 * @pss: the current page search status 713 */ 714 static void pss_find_next_dirty(PageSearchStatus *pss) 715 { 716 RAMBlock *rb = pss->block; 717 unsigned long size = rb->used_length >> TARGET_PAGE_BITS; 718 unsigned long *bitmap = rb->bmap; 719 720 if (migrate_ram_is_ignored(rb)) { 721 /* Points directly to the end, so we know no dirty page */ 722 pss->page = size; 723 return; 724 } 725 726 /* 727 * If during sending a host page, only look for dirty pages within the 728 * current host page being send. 729 */ 730 if (pss->host_page_sending) { 731 assert(pss->host_page_end); 732 size = MIN(size, pss->host_page_end); 733 } 734 735 pss->page = find_next_bit(bitmap, size, pss->page); 736 } 737 738 static void migration_clear_memory_region_dirty_bitmap(RAMBlock *rb, 739 unsigned long page) 740 { 741 uint8_t shift; 742 hwaddr size, start; 743 744 if (!rb->clear_bmap || !clear_bmap_test_and_clear(rb, page)) { 745 return; 746 } 747 748 shift = rb->clear_bmap_shift; 749 /* 750 * CLEAR_BITMAP_SHIFT_MIN should always guarantee this... this 751 * can make things easier sometimes since then start address 752 * of the small chunk will always be 64 pages aligned so the 753 * bitmap will always be aligned to unsigned long. We should 754 * even be able to remove this restriction but I'm simply 755 * keeping it. 756 */ 757 assert(shift >= 6); 758 759 size = 1ULL << (TARGET_PAGE_BITS + shift); 760 start = QEMU_ALIGN_DOWN((ram_addr_t)page << TARGET_PAGE_BITS, size); 761 trace_migration_bitmap_clear_dirty(rb->idstr, start, size, page); 762 memory_region_clear_dirty_bitmap(rb->mr, start, size); 763 } 764 765 static void 766 migration_clear_memory_region_dirty_bitmap_range(RAMBlock *rb, 767 unsigned long start, 768 unsigned long npages) 769 { 770 unsigned long i, chunk_pages = 1UL << rb->clear_bmap_shift; 771 unsigned long chunk_start = QEMU_ALIGN_DOWN(start, chunk_pages); 772 unsigned long chunk_end = QEMU_ALIGN_UP(start + npages, chunk_pages); 773 774 /* 775 * Clear pages from start to start + npages - 1, so the end boundary is 776 * exclusive. 777 */ 778 for (i = chunk_start; i < chunk_end; i += chunk_pages) { 779 migration_clear_memory_region_dirty_bitmap(rb, i); 780 } 781 } 782 783 /* 784 * colo_bitmap_find_diry:find contiguous dirty pages from start 785 * 786 * Returns the page offset within memory region of the start of the contiguout 787 * dirty page 788 * 789 * @rs: current RAM state 790 * @rb: RAMBlock where to search for dirty pages 791 * @start: page where we start the search 792 * @num: the number of contiguous dirty pages 793 */ 794 static inline 795 unsigned long colo_bitmap_find_dirty(RAMState *rs, RAMBlock *rb, 796 unsigned long start, unsigned long *num) 797 { 798 unsigned long size = rb->used_length >> TARGET_PAGE_BITS; 799 unsigned long *bitmap = rb->bmap; 800 unsigned long first, next; 801 802 *num = 0; 803 804 if (migrate_ram_is_ignored(rb)) { 805 return size; 806 } 807 808 first = find_next_bit(bitmap, size, start); 809 if (first >= size) { 810 return first; 811 } 812 next = find_next_zero_bit(bitmap, size, first + 1); 813 assert(next >= first); 814 *num = next - first; 815 return first; 816 } 817 818 static inline bool migration_bitmap_clear_dirty(RAMState *rs, 819 RAMBlock *rb, 820 unsigned long page) 821 { 822 bool ret; 823 824 /* 825 * Clear dirty bitmap if needed. This _must_ be called before we 826 * send any of the page in the chunk because we need to make sure 827 * we can capture further page content changes when we sync dirty 828 * log the next time. So as long as we are going to send any of 829 * the page in the chunk we clear the remote dirty bitmap for all. 830 * Clearing it earlier won't be a problem, but too late will. 831 */ 832 migration_clear_memory_region_dirty_bitmap(rb, page); 833 834 ret = test_and_clear_bit(page, rb->bmap); 835 if (ret) { 836 rs->migration_dirty_pages--; 837 } 838 839 return ret; 840 } 841 842 static void dirty_bitmap_clear_section(MemoryRegionSection *section, 843 void *opaque) 844 { 845 const hwaddr offset = section->offset_within_region; 846 const hwaddr size = int128_get64(section->size); 847 const unsigned long start = offset >> TARGET_PAGE_BITS; 848 const unsigned long npages = size >> TARGET_PAGE_BITS; 849 RAMBlock *rb = section->mr->ram_block; 850 uint64_t *cleared_bits = opaque; 851 852 /* 853 * We don't grab ram_state->bitmap_mutex because we expect to run 854 * only when starting migration or during postcopy recovery where 855 * we don't have concurrent access. 856 */ 857 if (!migration_in_postcopy() && !migrate_background_snapshot()) { 858 migration_clear_memory_region_dirty_bitmap_range(rb, start, npages); 859 } 860 *cleared_bits += bitmap_count_one_with_offset(rb->bmap, start, npages); 861 bitmap_clear(rb->bmap, start, npages); 862 } 863 864 /* 865 * Exclude all dirty pages from migration that fall into a discarded range as 866 * managed by a RamDiscardManager responsible for the mapped memory region of 867 * the RAMBlock. Clear the corresponding bits in the dirty bitmaps. 868 * 869 * Discarded pages ("logically unplugged") have undefined content and must 870 * not get migrated, because even reading these pages for migration might 871 * result in undesired behavior. 872 * 873 * Returns the number of cleared bits in the RAMBlock dirty bitmap. 874 * 875 * Note: The result is only stable while migrating (precopy/postcopy). 876 */ 877 static uint64_t ramblock_dirty_bitmap_clear_discarded_pages(RAMBlock *rb) 878 { 879 uint64_t cleared_bits = 0; 880 881 if (rb->mr && rb->bmap && memory_region_has_ram_discard_manager(rb->mr)) { 882 RamDiscardManager *rdm = memory_region_get_ram_discard_manager(rb->mr); 883 MemoryRegionSection section = { 884 .mr = rb->mr, 885 .offset_within_region = 0, 886 .size = int128_make64(qemu_ram_get_used_length(rb)), 887 }; 888 889 ram_discard_manager_replay_discarded(rdm, §ion, 890 dirty_bitmap_clear_section, 891 &cleared_bits); 892 } 893 return cleared_bits; 894 } 895 896 /* 897 * Check if a host-page aligned page falls into a discarded range as managed by 898 * a RamDiscardManager responsible for the mapped memory region of the RAMBlock. 899 * 900 * Note: The result is only stable while migrating (precopy/postcopy). 901 */ 902 bool ramblock_page_is_discarded(RAMBlock *rb, ram_addr_t start) 903 { 904 if (rb->mr && memory_region_has_ram_discard_manager(rb->mr)) { 905 RamDiscardManager *rdm = memory_region_get_ram_discard_manager(rb->mr); 906 MemoryRegionSection section = { 907 .mr = rb->mr, 908 .offset_within_region = start, 909 .size = int128_make64(qemu_ram_pagesize(rb)), 910 }; 911 912 return !ram_discard_manager_is_populated(rdm, §ion); 913 } 914 return false; 915 } 916 917 /* Called with RCU critical section */ 918 static void ramblock_sync_dirty_bitmap(RAMState *rs, RAMBlock *rb) 919 { 920 uint64_t new_dirty_pages = 921 cpu_physical_memory_sync_dirty_bitmap(rb, 0, rb->used_length); 922 923 rs->migration_dirty_pages += new_dirty_pages; 924 rs->num_dirty_pages_period += new_dirty_pages; 925 } 926 927 /** 928 * ram_pagesize_summary: calculate all the pagesizes of a VM 929 * 930 * Returns a summary bitmap of the page sizes of all RAMBlocks 931 * 932 * For VMs with just normal pages this is equivalent to the host page 933 * size. If it's got some huge pages then it's the OR of all the 934 * different page sizes. 935 */ 936 uint64_t ram_pagesize_summary(void) 937 { 938 RAMBlock *block; 939 uint64_t summary = 0; 940 941 RAMBLOCK_FOREACH_NOT_IGNORED(block) { 942 summary |= block->page_size; 943 } 944 945 return summary; 946 } 947 948 uint64_t ram_get_total_transferred_pages(void) 949 { 950 return stat64_get(&mig_stats.normal_pages) + 951 stat64_get(&mig_stats.zero_pages) + 952 compress_ram_pages() + xbzrle_counters.pages; 953 } 954 955 static void migration_update_rates(RAMState *rs, int64_t end_time) 956 { 957 uint64_t page_count = rs->target_page_count - rs->target_page_count_prev; 958 959 /* calculate period counters */ 960 stat64_set(&mig_stats.dirty_pages_rate, 961 rs->num_dirty_pages_period * 1000 / 962 (end_time - rs->time_last_bitmap_sync)); 963 964 if (!page_count) { 965 return; 966 } 967 968 if (migrate_xbzrle()) { 969 double encoded_size, unencoded_size; 970 971 xbzrle_counters.cache_miss_rate = (double)(xbzrle_counters.cache_miss - 972 rs->xbzrle_cache_miss_prev) / page_count; 973 rs->xbzrle_cache_miss_prev = xbzrle_counters.cache_miss; 974 unencoded_size = (xbzrle_counters.pages - rs->xbzrle_pages_prev) * 975 TARGET_PAGE_SIZE; 976 encoded_size = xbzrle_counters.bytes - rs->xbzrle_bytes_prev; 977 if (xbzrle_counters.pages == rs->xbzrle_pages_prev || !encoded_size) { 978 xbzrle_counters.encoding_rate = 0; 979 } else { 980 xbzrle_counters.encoding_rate = unencoded_size / encoded_size; 981 } 982 rs->xbzrle_pages_prev = xbzrle_counters.pages; 983 rs->xbzrle_bytes_prev = xbzrle_counters.bytes; 984 } 985 compress_update_rates(page_count); 986 } 987 988 /* 989 * Enable dirty-limit to throttle down the guest 990 */ 991 static void migration_dirty_limit_guest(void) 992 { 993 /* 994 * dirty page rate quota for all vCPUs fetched from 995 * migration parameter 'vcpu_dirty_limit' 996 */ 997 static int64_t quota_dirtyrate; 998 MigrationState *s = migrate_get_current(); 999 1000 /* 1001 * If dirty limit already enabled and migration parameter 1002 * vcpu-dirty-limit untouched. 1003 */ 1004 if (dirtylimit_in_service() && 1005 quota_dirtyrate == s->parameters.vcpu_dirty_limit) { 1006 return; 1007 } 1008 1009 quota_dirtyrate = s->parameters.vcpu_dirty_limit; 1010 1011 /* 1012 * Set all vCPU a quota dirtyrate, note that the second 1013 * parameter will be ignored if setting all vCPU for the vm 1014 */ 1015 qmp_set_vcpu_dirty_limit(false, -1, quota_dirtyrate, NULL); 1016 trace_migration_dirty_limit_guest(quota_dirtyrate); 1017 } 1018 1019 static void migration_trigger_throttle(RAMState *rs) 1020 { 1021 uint64_t threshold = migrate_throttle_trigger_threshold(); 1022 uint64_t bytes_xfer_period = 1023 migration_transferred_bytes() - rs->bytes_xfer_prev; 1024 uint64_t bytes_dirty_period = rs->num_dirty_pages_period * TARGET_PAGE_SIZE; 1025 uint64_t bytes_dirty_threshold = bytes_xfer_period * threshold / 100; 1026 1027 /* 1028 * The following detection logic can be refined later. For now: 1029 * Check to see if the ratio between dirtied bytes and the approx. 1030 * amount of bytes that just got transferred since the last time 1031 * we were in this routine reaches the threshold. If that happens 1032 * twice, start or increase throttling. 1033 */ 1034 if ((bytes_dirty_period > bytes_dirty_threshold) && 1035 (++rs->dirty_rate_high_cnt >= 2)) { 1036 rs->dirty_rate_high_cnt = 0; 1037 if (migrate_auto_converge()) { 1038 trace_migration_throttle(); 1039 mig_throttle_guest_down(bytes_dirty_period, 1040 bytes_dirty_threshold); 1041 } else if (migrate_dirty_limit()) { 1042 migration_dirty_limit_guest(); 1043 } 1044 } 1045 } 1046 1047 static void migration_bitmap_sync(RAMState *rs, bool last_stage) 1048 { 1049 RAMBlock *block; 1050 int64_t end_time; 1051 1052 stat64_add(&mig_stats.dirty_sync_count, 1); 1053 1054 if (!rs->time_last_bitmap_sync) { 1055 rs->time_last_bitmap_sync = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); 1056 } 1057 1058 trace_migration_bitmap_sync_start(); 1059 memory_global_dirty_log_sync(last_stage); 1060 1061 WITH_QEMU_LOCK_GUARD(&rs->bitmap_mutex) { 1062 WITH_RCU_READ_LOCK_GUARD() { 1063 RAMBLOCK_FOREACH_NOT_IGNORED(block) { 1064 ramblock_sync_dirty_bitmap(rs, block); 1065 } 1066 stat64_set(&mig_stats.dirty_bytes_last_sync, ram_bytes_remaining()); 1067 } 1068 } 1069 1070 memory_global_after_dirty_log_sync(); 1071 trace_migration_bitmap_sync_end(rs->num_dirty_pages_period); 1072 1073 end_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); 1074 1075 /* more than 1 second = 1000 millisecons */ 1076 if (end_time > rs->time_last_bitmap_sync + 1000) { 1077 migration_trigger_throttle(rs); 1078 1079 migration_update_rates(rs, end_time); 1080 1081 rs->target_page_count_prev = rs->target_page_count; 1082 1083 /* reset period counters */ 1084 rs->time_last_bitmap_sync = end_time; 1085 rs->num_dirty_pages_period = 0; 1086 rs->bytes_xfer_prev = migration_transferred_bytes(); 1087 } 1088 if (migrate_events()) { 1089 uint64_t generation = stat64_get(&mig_stats.dirty_sync_count); 1090 qapi_event_send_migration_pass(generation); 1091 } 1092 } 1093 1094 static void migration_bitmap_sync_precopy(RAMState *rs, bool last_stage) 1095 { 1096 Error *local_err = NULL; 1097 1098 /* 1099 * The current notifier usage is just an optimization to migration, so we 1100 * don't stop the normal migration process in the error case. 1101 */ 1102 if (precopy_notify(PRECOPY_NOTIFY_BEFORE_BITMAP_SYNC, &local_err)) { 1103 error_report_err(local_err); 1104 local_err = NULL; 1105 } 1106 1107 migration_bitmap_sync(rs, last_stage); 1108 1109 if (precopy_notify(PRECOPY_NOTIFY_AFTER_BITMAP_SYNC, &local_err)) { 1110 error_report_err(local_err); 1111 } 1112 } 1113 1114 void ram_release_page(const char *rbname, uint64_t offset) 1115 { 1116 if (!migrate_release_ram() || !migration_in_postcopy()) { 1117 return; 1118 } 1119 1120 ram_discard_range(rbname, offset, TARGET_PAGE_SIZE); 1121 } 1122 1123 /** 1124 * save_zero_page: send the zero page to the stream 1125 * 1126 * Returns the number of pages written. 1127 * 1128 * @rs: current RAM state 1129 * @pss: current PSS channel 1130 * @offset: offset inside the block for the page 1131 */ 1132 static int save_zero_page(RAMState *rs, PageSearchStatus *pss, 1133 ram_addr_t offset) 1134 { 1135 uint8_t *p = pss->block->host + offset; 1136 QEMUFile *file = pss->pss_channel; 1137 int len = 0; 1138 1139 if (migrate_zero_page_detection() == ZERO_PAGE_DETECTION_NONE) { 1140 return 0; 1141 } 1142 1143 if (!buffer_is_zero(p, TARGET_PAGE_SIZE)) { 1144 return 0; 1145 } 1146 1147 stat64_add(&mig_stats.zero_pages, 1); 1148 1149 if (migrate_mapped_ram()) { 1150 /* zero pages are not transferred with mapped-ram */ 1151 clear_bit_atomic(offset >> TARGET_PAGE_BITS, pss->block->file_bmap); 1152 return 1; 1153 } 1154 1155 len += save_page_header(pss, file, pss->block, offset | RAM_SAVE_FLAG_ZERO); 1156 qemu_put_byte(file, 0); 1157 len += 1; 1158 ram_release_page(pss->block->idstr, offset); 1159 ram_transferred_add(len); 1160 1161 /* 1162 * Must let xbzrle know, otherwise a previous (now 0'd) cached 1163 * page would be stale. 1164 */ 1165 if (rs->xbzrle_started) { 1166 XBZRLE_cache_lock(); 1167 xbzrle_cache_zero_page(pss->block->offset + offset); 1168 XBZRLE_cache_unlock(); 1169 } 1170 1171 return len; 1172 } 1173 1174 /* 1175 * @pages: the number of pages written by the control path, 1176 * < 0 - error 1177 * > 0 - number of pages written 1178 * 1179 * Return true if the pages has been saved, otherwise false is returned. 1180 */ 1181 static bool control_save_page(PageSearchStatus *pss, 1182 ram_addr_t offset, int *pages) 1183 { 1184 int ret; 1185 1186 ret = rdma_control_save_page(pss->pss_channel, pss->block->offset, offset, 1187 TARGET_PAGE_SIZE); 1188 if (ret == RAM_SAVE_CONTROL_NOT_SUPP) { 1189 return false; 1190 } 1191 1192 if (ret == RAM_SAVE_CONTROL_DELAYED) { 1193 *pages = 1; 1194 return true; 1195 } 1196 *pages = ret; 1197 return true; 1198 } 1199 1200 /* 1201 * directly send the page to the stream 1202 * 1203 * Returns the number of pages written. 1204 * 1205 * @pss: current PSS channel 1206 * @block: block that contains the page we want to send 1207 * @offset: offset inside the block for the page 1208 * @buf: the page to be sent 1209 * @async: send to page asyncly 1210 */ 1211 static int save_normal_page(PageSearchStatus *pss, RAMBlock *block, 1212 ram_addr_t offset, uint8_t *buf, bool async) 1213 { 1214 QEMUFile *file = pss->pss_channel; 1215 1216 if (migrate_mapped_ram()) { 1217 qemu_put_buffer_at(file, buf, TARGET_PAGE_SIZE, 1218 block->pages_offset + offset); 1219 set_bit(offset >> TARGET_PAGE_BITS, block->file_bmap); 1220 } else { 1221 ram_transferred_add(save_page_header(pss, pss->pss_channel, block, 1222 offset | RAM_SAVE_FLAG_PAGE)); 1223 if (async) { 1224 qemu_put_buffer_async(file, buf, TARGET_PAGE_SIZE, 1225 migrate_release_ram() && 1226 migration_in_postcopy()); 1227 } else { 1228 qemu_put_buffer(file, buf, TARGET_PAGE_SIZE); 1229 } 1230 } 1231 ram_transferred_add(TARGET_PAGE_SIZE); 1232 stat64_add(&mig_stats.normal_pages, 1); 1233 return 1; 1234 } 1235 1236 /** 1237 * ram_save_page: send the given page to the stream 1238 * 1239 * Returns the number of pages written. 1240 * < 0 - error 1241 * >=0 - Number of pages written - this might legally be 0 1242 * if xbzrle noticed the page was the same. 1243 * 1244 * @rs: current RAM state 1245 * @block: block that contains the page we want to send 1246 * @offset: offset inside the block for the page 1247 */ 1248 static int ram_save_page(RAMState *rs, PageSearchStatus *pss) 1249 { 1250 int pages = -1; 1251 uint8_t *p; 1252 bool send_async = true; 1253 RAMBlock *block = pss->block; 1254 ram_addr_t offset = ((ram_addr_t)pss->page) << TARGET_PAGE_BITS; 1255 ram_addr_t current_addr = block->offset + offset; 1256 1257 p = block->host + offset; 1258 trace_ram_save_page(block->idstr, (uint64_t)offset, p); 1259 1260 XBZRLE_cache_lock(); 1261 if (rs->xbzrle_started && !migration_in_postcopy()) { 1262 pages = save_xbzrle_page(rs, pss, &p, current_addr, 1263 block, offset); 1264 if (!rs->last_stage) { 1265 /* Can't send this cached data async, since the cache page 1266 * might get updated before it gets to the wire 1267 */ 1268 send_async = false; 1269 } 1270 } 1271 1272 /* XBZRLE overflow or normal page */ 1273 if (pages == -1) { 1274 pages = save_normal_page(pss, block, offset, p, send_async); 1275 } 1276 1277 XBZRLE_cache_unlock(); 1278 1279 return pages; 1280 } 1281 1282 static int ram_save_multifd_page(RAMBlock *block, ram_addr_t offset) 1283 { 1284 if (!multifd_queue_page(block, offset)) { 1285 return -1; 1286 } 1287 1288 return 1; 1289 } 1290 1291 int compress_send_queued_data(CompressParam *param) 1292 { 1293 PageSearchStatus *pss = &ram_state->pss[RAM_CHANNEL_PRECOPY]; 1294 MigrationState *ms = migrate_get_current(); 1295 QEMUFile *file = ms->to_dst_file; 1296 int len = 0; 1297 1298 RAMBlock *block = param->block; 1299 ram_addr_t offset = param->offset; 1300 1301 if (param->result == RES_NONE) { 1302 return 0; 1303 } 1304 1305 assert(block == pss->last_sent_block); 1306 1307 if (param->result == RES_ZEROPAGE) { 1308 assert(qemu_file_buffer_empty(param->file)); 1309 len += save_page_header(pss, file, block, offset | RAM_SAVE_FLAG_ZERO); 1310 qemu_put_byte(file, 0); 1311 len += 1; 1312 ram_release_page(block->idstr, offset); 1313 } else if (param->result == RES_COMPRESS) { 1314 assert(!qemu_file_buffer_empty(param->file)); 1315 len += save_page_header(pss, file, block, 1316 offset | RAM_SAVE_FLAG_COMPRESS_PAGE); 1317 len += qemu_put_qemu_file(file, param->file); 1318 } else { 1319 abort(); 1320 } 1321 1322 update_compress_thread_counts(param, len); 1323 1324 return len; 1325 } 1326 1327 #define PAGE_ALL_CLEAN 0 1328 #define PAGE_TRY_AGAIN 1 1329 #define PAGE_DIRTY_FOUND 2 1330 /** 1331 * find_dirty_block: find the next dirty page and update any state 1332 * associated with the search process. 1333 * 1334 * Returns: 1335 * <0: An error happened 1336 * PAGE_ALL_CLEAN: no dirty page found, give up 1337 * PAGE_TRY_AGAIN: no dirty page found, retry for next block 1338 * PAGE_DIRTY_FOUND: dirty page found 1339 * 1340 * @rs: current RAM state 1341 * @pss: data about the state of the current dirty page scan 1342 * @again: set to false if the search has scanned the whole of RAM 1343 */ 1344 static int find_dirty_block(RAMState *rs, PageSearchStatus *pss) 1345 { 1346 /* Update pss->page for the next dirty bit in ramblock */ 1347 pss_find_next_dirty(pss); 1348 1349 if (pss->complete_round && pss->block == rs->last_seen_block && 1350 pss->page >= rs->last_page) { 1351 /* 1352 * We've been once around the RAM and haven't found anything. 1353 * Give up. 1354 */ 1355 return PAGE_ALL_CLEAN; 1356 } 1357 if (!offset_in_ramblock(pss->block, 1358 ((ram_addr_t)pss->page) << TARGET_PAGE_BITS)) { 1359 /* Didn't find anything in this RAM Block */ 1360 pss->page = 0; 1361 pss->block = QLIST_NEXT_RCU(pss->block, next); 1362 if (!pss->block) { 1363 if (migrate_multifd() && 1364 (!migrate_multifd_flush_after_each_section() || 1365 migrate_mapped_ram())) { 1366 QEMUFile *f = rs->pss[RAM_CHANNEL_PRECOPY].pss_channel; 1367 int ret = multifd_send_sync_main(); 1368 if (ret < 0) { 1369 return ret; 1370 } 1371 1372 if (!migrate_mapped_ram()) { 1373 qemu_put_be64(f, RAM_SAVE_FLAG_MULTIFD_FLUSH); 1374 qemu_fflush(f); 1375 } 1376 } 1377 /* 1378 * If memory migration starts over, we will meet a dirtied page 1379 * which may still exists in compression threads's ring, so we 1380 * should flush the compressed data to make sure the new page 1381 * is not overwritten by the old one in the destination. 1382 * 1383 * Also If xbzrle is on, stop using the data compression at this 1384 * point. In theory, xbzrle can do better than compression. 1385 */ 1386 compress_flush_data(); 1387 1388 /* Hit the end of the list */ 1389 pss->block = QLIST_FIRST_RCU(&ram_list.blocks); 1390 /* Flag that we've looped */ 1391 pss->complete_round = true; 1392 /* After the first round, enable XBZRLE. */ 1393 if (migrate_xbzrle()) { 1394 rs->xbzrle_started = true; 1395 } 1396 } 1397 /* Didn't find anything this time, but try again on the new block */ 1398 return PAGE_TRY_AGAIN; 1399 } else { 1400 /* We've found something */ 1401 return PAGE_DIRTY_FOUND; 1402 } 1403 } 1404 1405 /** 1406 * unqueue_page: gets a page of the queue 1407 * 1408 * Helper for 'get_queued_page' - gets a page off the queue 1409 * 1410 * Returns the block of the page (or NULL if none available) 1411 * 1412 * @rs: current RAM state 1413 * @offset: used to return the offset within the RAMBlock 1414 */ 1415 static RAMBlock *unqueue_page(RAMState *rs, ram_addr_t *offset) 1416 { 1417 struct RAMSrcPageRequest *entry; 1418 RAMBlock *block = NULL; 1419 1420 if (!postcopy_has_request(rs)) { 1421 return NULL; 1422 } 1423 1424 QEMU_LOCK_GUARD(&rs->src_page_req_mutex); 1425 1426 /* 1427 * This should _never_ change even after we take the lock, because no one 1428 * should be taking anything off the request list other than us. 1429 */ 1430 assert(postcopy_has_request(rs)); 1431 1432 entry = QSIMPLEQ_FIRST(&rs->src_page_requests); 1433 block = entry->rb; 1434 *offset = entry->offset; 1435 1436 if (entry->len > TARGET_PAGE_SIZE) { 1437 entry->len -= TARGET_PAGE_SIZE; 1438 entry->offset += TARGET_PAGE_SIZE; 1439 } else { 1440 memory_region_unref(block->mr); 1441 QSIMPLEQ_REMOVE_HEAD(&rs->src_page_requests, next_req); 1442 g_free(entry); 1443 migration_consume_urgent_request(); 1444 } 1445 1446 return block; 1447 } 1448 1449 #if defined(__linux__) 1450 /** 1451 * poll_fault_page: try to get next UFFD write fault page and, if pending fault 1452 * is found, return RAM block pointer and page offset 1453 * 1454 * Returns pointer to the RAMBlock containing faulting page, 1455 * NULL if no write faults are pending 1456 * 1457 * @rs: current RAM state 1458 * @offset: page offset from the beginning of the block 1459 */ 1460 static RAMBlock *poll_fault_page(RAMState *rs, ram_addr_t *offset) 1461 { 1462 struct uffd_msg uffd_msg; 1463 void *page_address; 1464 RAMBlock *block; 1465 int res; 1466 1467 if (!migrate_background_snapshot()) { 1468 return NULL; 1469 } 1470 1471 res = uffd_read_events(rs->uffdio_fd, &uffd_msg, 1); 1472 if (res <= 0) { 1473 return NULL; 1474 } 1475 1476 page_address = (void *)(uintptr_t) uffd_msg.arg.pagefault.address; 1477 block = qemu_ram_block_from_host(page_address, false, offset); 1478 assert(block && (block->flags & RAM_UF_WRITEPROTECT) != 0); 1479 return block; 1480 } 1481 1482 /** 1483 * ram_save_release_protection: release UFFD write protection after 1484 * a range of pages has been saved 1485 * 1486 * @rs: current RAM state 1487 * @pss: page-search-status structure 1488 * @start_page: index of the first page in the range relative to pss->block 1489 * 1490 * Returns 0 on success, negative value in case of an error 1491 */ 1492 static int ram_save_release_protection(RAMState *rs, PageSearchStatus *pss, 1493 unsigned long start_page) 1494 { 1495 int res = 0; 1496 1497 /* Check if page is from UFFD-managed region. */ 1498 if (pss->block->flags & RAM_UF_WRITEPROTECT) { 1499 void *page_address = pss->block->host + (start_page << TARGET_PAGE_BITS); 1500 uint64_t run_length = (pss->page - start_page) << TARGET_PAGE_BITS; 1501 1502 /* Flush async buffers before un-protect. */ 1503 qemu_fflush(pss->pss_channel); 1504 /* Un-protect memory range. */ 1505 res = uffd_change_protection(rs->uffdio_fd, page_address, run_length, 1506 false, false); 1507 } 1508 1509 return res; 1510 } 1511 1512 /* ram_write_tracking_available: check if kernel supports required UFFD features 1513 * 1514 * Returns true if supports, false otherwise 1515 */ 1516 bool ram_write_tracking_available(void) 1517 { 1518 uint64_t uffd_features; 1519 int res; 1520 1521 res = uffd_query_features(&uffd_features); 1522 return (res == 0 && 1523 (uffd_features & UFFD_FEATURE_PAGEFAULT_FLAG_WP) != 0); 1524 } 1525 1526 /* ram_write_tracking_compatible: check if guest configuration is 1527 * compatible with 'write-tracking' 1528 * 1529 * Returns true if compatible, false otherwise 1530 */ 1531 bool ram_write_tracking_compatible(void) 1532 { 1533 const uint64_t uffd_ioctls_mask = BIT(_UFFDIO_WRITEPROTECT); 1534 int uffd_fd; 1535 RAMBlock *block; 1536 bool ret = false; 1537 1538 /* Open UFFD file descriptor */ 1539 uffd_fd = uffd_create_fd(UFFD_FEATURE_PAGEFAULT_FLAG_WP, false); 1540 if (uffd_fd < 0) { 1541 return false; 1542 } 1543 1544 RCU_READ_LOCK_GUARD(); 1545 1546 RAMBLOCK_FOREACH_NOT_IGNORED(block) { 1547 uint64_t uffd_ioctls; 1548 1549 /* Nothing to do with read-only and MMIO-writable regions */ 1550 if (block->mr->readonly || block->mr->rom_device) { 1551 continue; 1552 } 1553 /* Try to register block memory via UFFD-IO to track writes */ 1554 if (uffd_register_memory(uffd_fd, block->host, block->max_length, 1555 UFFDIO_REGISTER_MODE_WP, &uffd_ioctls)) { 1556 goto out; 1557 } 1558 if ((uffd_ioctls & uffd_ioctls_mask) != uffd_ioctls_mask) { 1559 goto out; 1560 } 1561 } 1562 ret = true; 1563 1564 out: 1565 uffd_close_fd(uffd_fd); 1566 return ret; 1567 } 1568 1569 static inline void populate_read_range(RAMBlock *block, ram_addr_t offset, 1570 ram_addr_t size) 1571 { 1572 const ram_addr_t end = offset + size; 1573 1574 /* 1575 * We read one byte of each page; this will preallocate page tables if 1576 * required and populate the shared zeropage on MAP_PRIVATE anonymous memory 1577 * where no page was populated yet. This might require adaption when 1578 * supporting other mappings, like shmem. 1579 */ 1580 for (; offset < end; offset += block->page_size) { 1581 char tmp = *((char *)block->host + offset); 1582 1583 /* Don't optimize the read out */ 1584 asm volatile("" : "+r" (tmp)); 1585 } 1586 } 1587 1588 static inline int populate_read_section(MemoryRegionSection *section, 1589 void *opaque) 1590 { 1591 const hwaddr size = int128_get64(section->size); 1592 hwaddr offset = section->offset_within_region; 1593 RAMBlock *block = section->mr->ram_block; 1594 1595 populate_read_range(block, offset, size); 1596 return 0; 1597 } 1598 1599 /* 1600 * ram_block_populate_read: preallocate page tables and populate pages in the 1601 * RAM block by reading a byte of each page. 1602 * 1603 * Since it's solely used for userfault_fd WP feature, here we just 1604 * hardcode page size to qemu_real_host_page_size. 1605 * 1606 * @block: RAM block to populate 1607 */ 1608 static void ram_block_populate_read(RAMBlock *rb) 1609 { 1610 /* 1611 * Skip populating all pages that fall into a discarded range as managed by 1612 * a RamDiscardManager responsible for the mapped memory region of the 1613 * RAMBlock. Such discarded ("logically unplugged") parts of a RAMBlock 1614 * must not get populated automatically. We don't have to track 1615 * modifications via userfaultfd WP reliably, because these pages will 1616 * not be part of the migration stream either way -- see 1617 * ramblock_dirty_bitmap_exclude_discarded_pages(). 1618 * 1619 * Note: The result is only stable while migrating (precopy/postcopy). 1620 */ 1621 if (rb->mr && memory_region_has_ram_discard_manager(rb->mr)) { 1622 RamDiscardManager *rdm = memory_region_get_ram_discard_manager(rb->mr); 1623 MemoryRegionSection section = { 1624 .mr = rb->mr, 1625 .offset_within_region = 0, 1626 .size = rb->mr->size, 1627 }; 1628 1629 ram_discard_manager_replay_populated(rdm, §ion, 1630 populate_read_section, NULL); 1631 } else { 1632 populate_read_range(rb, 0, rb->used_length); 1633 } 1634 } 1635 1636 /* 1637 * ram_write_tracking_prepare: prepare for UFFD-WP memory tracking 1638 */ 1639 void ram_write_tracking_prepare(void) 1640 { 1641 RAMBlock *block; 1642 1643 RCU_READ_LOCK_GUARD(); 1644 1645 RAMBLOCK_FOREACH_NOT_IGNORED(block) { 1646 /* Nothing to do with read-only and MMIO-writable regions */ 1647 if (block->mr->readonly || block->mr->rom_device) { 1648 continue; 1649 } 1650 1651 /* 1652 * Populate pages of the RAM block before enabling userfault_fd 1653 * write protection. 1654 * 1655 * This stage is required since ioctl(UFFDIO_WRITEPROTECT) with 1656 * UFFDIO_WRITEPROTECT_MODE_WP mode setting would silently skip 1657 * pages with pte_none() entries in page table. 1658 */ 1659 ram_block_populate_read(block); 1660 } 1661 } 1662 1663 static inline int uffd_protect_section(MemoryRegionSection *section, 1664 void *opaque) 1665 { 1666 const hwaddr size = int128_get64(section->size); 1667 const hwaddr offset = section->offset_within_region; 1668 RAMBlock *rb = section->mr->ram_block; 1669 int uffd_fd = (uintptr_t)opaque; 1670 1671 return uffd_change_protection(uffd_fd, rb->host + offset, size, true, 1672 false); 1673 } 1674 1675 static int ram_block_uffd_protect(RAMBlock *rb, int uffd_fd) 1676 { 1677 assert(rb->flags & RAM_UF_WRITEPROTECT); 1678 1679 /* See ram_block_populate_read() */ 1680 if (rb->mr && memory_region_has_ram_discard_manager(rb->mr)) { 1681 RamDiscardManager *rdm = memory_region_get_ram_discard_manager(rb->mr); 1682 MemoryRegionSection section = { 1683 .mr = rb->mr, 1684 .offset_within_region = 0, 1685 .size = rb->mr->size, 1686 }; 1687 1688 return ram_discard_manager_replay_populated(rdm, §ion, 1689 uffd_protect_section, 1690 (void *)(uintptr_t)uffd_fd); 1691 } 1692 return uffd_change_protection(uffd_fd, rb->host, 1693 rb->used_length, true, false); 1694 } 1695 1696 /* 1697 * ram_write_tracking_start: start UFFD-WP memory tracking 1698 * 1699 * Returns 0 for success or negative value in case of error 1700 */ 1701 int ram_write_tracking_start(void) 1702 { 1703 int uffd_fd; 1704 RAMState *rs = ram_state; 1705 RAMBlock *block; 1706 1707 /* Open UFFD file descriptor */ 1708 uffd_fd = uffd_create_fd(UFFD_FEATURE_PAGEFAULT_FLAG_WP, true); 1709 if (uffd_fd < 0) { 1710 return uffd_fd; 1711 } 1712 rs->uffdio_fd = uffd_fd; 1713 1714 RCU_READ_LOCK_GUARD(); 1715 1716 RAMBLOCK_FOREACH_NOT_IGNORED(block) { 1717 /* Nothing to do with read-only and MMIO-writable regions */ 1718 if (block->mr->readonly || block->mr->rom_device) { 1719 continue; 1720 } 1721 1722 /* Register block memory with UFFD to track writes */ 1723 if (uffd_register_memory(rs->uffdio_fd, block->host, 1724 block->max_length, UFFDIO_REGISTER_MODE_WP, NULL)) { 1725 goto fail; 1726 } 1727 block->flags |= RAM_UF_WRITEPROTECT; 1728 memory_region_ref(block->mr); 1729 1730 /* Apply UFFD write protection to the block memory range */ 1731 if (ram_block_uffd_protect(block, uffd_fd)) { 1732 goto fail; 1733 } 1734 1735 trace_ram_write_tracking_ramblock_start(block->idstr, block->page_size, 1736 block->host, block->max_length); 1737 } 1738 1739 return 0; 1740 1741 fail: 1742 error_report("ram_write_tracking_start() failed: restoring initial memory state"); 1743 1744 RAMBLOCK_FOREACH_NOT_IGNORED(block) { 1745 if ((block->flags & RAM_UF_WRITEPROTECT) == 0) { 1746 continue; 1747 } 1748 uffd_unregister_memory(rs->uffdio_fd, block->host, block->max_length); 1749 /* Cleanup flags and remove reference */ 1750 block->flags &= ~RAM_UF_WRITEPROTECT; 1751 memory_region_unref(block->mr); 1752 } 1753 1754 uffd_close_fd(uffd_fd); 1755 rs->uffdio_fd = -1; 1756 return -1; 1757 } 1758 1759 /** 1760 * ram_write_tracking_stop: stop UFFD-WP memory tracking and remove protection 1761 */ 1762 void ram_write_tracking_stop(void) 1763 { 1764 RAMState *rs = ram_state; 1765 RAMBlock *block; 1766 1767 RCU_READ_LOCK_GUARD(); 1768 1769 RAMBLOCK_FOREACH_NOT_IGNORED(block) { 1770 if ((block->flags & RAM_UF_WRITEPROTECT) == 0) { 1771 continue; 1772 } 1773 uffd_unregister_memory(rs->uffdio_fd, block->host, block->max_length); 1774 1775 trace_ram_write_tracking_ramblock_stop(block->idstr, block->page_size, 1776 block->host, block->max_length); 1777 1778 /* Cleanup flags and remove reference */ 1779 block->flags &= ~RAM_UF_WRITEPROTECT; 1780 memory_region_unref(block->mr); 1781 } 1782 1783 /* Finally close UFFD file descriptor */ 1784 uffd_close_fd(rs->uffdio_fd); 1785 rs->uffdio_fd = -1; 1786 } 1787 1788 #else 1789 /* No target OS support, stubs just fail or ignore */ 1790 1791 static RAMBlock *poll_fault_page(RAMState *rs, ram_addr_t *offset) 1792 { 1793 (void) rs; 1794 (void) offset; 1795 1796 return NULL; 1797 } 1798 1799 static int ram_save_release_protection(RAMState *rs, PageSearchStatus *pss, 1800 unsigned long start_page) 1801 { 1802 (void) rs; 1803 (void) pss; 1804 (void) start_page; 1805 1806 return 0; 1807 } 1808 1809 bool ram_write_tracking_available(void) 1810 { 1811 return false; 1812 } 1813 1814 bool ram_write_tracking_compatible(void) 1815 { 1816 assert(0); 1817 return false; 1818 } 1819 1820 int ram_write_tracking_start(void) 1821 { 1822 assert(0); 1823 return -1; 1824 } 1825 1826 void ram_write_tracking_stop(void) 1827 { 1828 assert(0); 1829 } 1830 #endif /* defined(__linux__) */ 1831 1832 /** 1833 * get_queued_page: unqueue a page from the postcopy requests 1834 * 1835 * Skips pages that are already sent (!dirty) 1836 * 1837 * Returns true if a queued page is found 1838 * 1839 * @rs: current RAM state 1840 * @pss: data about the state of the current dirty page scan 1841 */ 1842 static bool get_queued_page(RAMState *rs, PageSearchStatus *pss) 1843 { 1844 RAMBlock *block; 1845 ram_addr_t offset; 1846 bool dirty; 1847 1848 do { 1849 block = unqueue_page(rs, &offset); 1850 /* 1851 * We're sending this page, and since it's postcopy nothing else 1852 * will dirty it, and we must make sure it doesn't get sent again 1853 * even if this queue request was received after the background 1854 * search already sent it. 1855 */ 1856 if (block) { 1857 unsigned long page; 1858 1859 page = offset >> TARGET_PAGE_BITS; 1860 dirty = test_bit(page, block->bmap); 1861 if (!dirty) { 1862 trace_get_queued_page_not_dirty(block->idstr, (uint64_t)offset, 1863 page); 1864 } else { 1865 trace_get_queued_page(block->idstr, (uint64_t)offset, page); 1866 } 1867 } 1868 1869 } while (block && !dirty); 1870 1871 if (!block) { 1872 /* 1873 * Poll write faults too if background snapshot is enabled; that's 1874 * when we have vcpus got blocked by the write protected pages. 1875 */ 1876 block = poll_fault_page(rs, &offset); 1877 } 1878 1879 if (block) { 1880 /* 1881 * We want the background search to continue from the queued page 1882 * since the guest is likely to want other pages near to the page 1883 * it just requested. 1884 */ 1885 pss->block = block; 1886 pss->page = offset >> TARGET_PAGE_BITS; 1887 1888 /* 1889 * This unqueued page would break the "one round" check, even is 1890 * really rare. 1891 */ 1892 pss->complete_round = false; 1893 } 1894 1895 return !!block; 1896 } 1897 1898 /** 1899 * migration_page_queue_free: drop any remaining pages in the ram 1900 * request queue 1901 * 1902 * It should be empty at the end anyway, but in error cases there may 1903 * be some left. in case that there is any page left, we drop it. 1904 * 1905 */ 1906 static void migration_page_queue_free(RAMState *rs) 1907 { 1908 struct RAMSrcPageRequest *mspr, *next_mspr; 1909 /* This queue generally should be empty - but in the case of a failed 1910 * migration might have some droppings in. 1911 */ 1912 RCU_READ_LOCK_GUARD(); 1913 QSIMPLEQ_FOREACH_SAFE(mspr, &rs->src_page_requests, next_req, next_mspr) { 1914 memory_region_unref(mspr->rb->mr); 1915 QSIMPLEQ_REMOVE_HEAD(&rs->src_page_requests, next_req); 1916 g_free(mspr); 1917 } 1918 } 1919 1920 /** 1921 * ram_save_queue_pages: queue the page for transmission 1922 * 1923 * A request from postcopy destination for example. 1924 * 1925 * Returns zero on success or negative on error 1926 * 1927 * @rbname: Name of the RAMBLock of the request. NULL means the 1928 * same that last one. 1929 * @start: starting address from the start of the RAMBlock 1930 * @len: length (in bytes) to send 1931 */ 1932 int ram_save_queue_pages(const char *rbname, ram_addr_t start, ram_addr_t len, 1933 Error **errp) 1934 { 1935 RAMBlock *ramblock; 1936 RAMState *rs = ram_state; 1937 1938 stat64_add(&mig_stats.postcopy_requests, 1); 1939 RCU_READ_LOCK_GUARD(); 1940 1941 if (!rbname) { 1942 /* Reuse last RAMBlock */ 1943 ramblock = rs->last_req_rb; 1944 1945 if (!ramblock) { 1946 /* 1947 * Shouldn't happen, we can't reuse the last RAMBlock if 1948 * it's the 1st request. 1949 */ 1950 error_setg(errp, "MIG_RP_MSG_REQ_PAGES has no previous block"); 1951 return -1; 1952 } 1953 } else { 1954 ramblock = qemu_ram_block_by_name(rbname); 1955 1956 if (!ramblock) { 1957 /* We shouldn't be asked for a non-existent RAMBlock */ 1958 error_setg(errp, "MIG_RP_MSG_REQ_PAGES has no block '%s'", rbname); 1959 return -1; 1960 } 1961 rs->last_req_rb = ramblock; 1962 } 1963 trace_ram_save_queue_pages(ramblock->idstr, start, len); 1964 if (!offset_in_ramblock(ramblock, start + len - 1)) { 1965 error_setg(errp, "MIG_RP_MSG_REQ_PAGES request overrun, " 1966 "start=" RAM_ADDR_FMT " len=" 1967 RAM_ADDR_FMT " blocklen=" RAM_ADDR_FMT, 1968 start, len, ramblock->used_length); 1969 return -1; 1970 } 1971 1972 /* 1973 * When with postcopy preempt, we send back the page directly in the 1974 * rp-return thread. 1975 */ 1976 if (postcopy_preempt_active()) { 1977 ram_addr_t page_start = start >> TARGET_PAGE_BITS; 1978 size_t page_size = qemu_ram_pagesize(ramblock); 1979 PageSearchStatus *pss = &ram_state->pss[RAM_CHANNEL_POSTCOPY]; 1980 int ret = 0; 1981 1982 qemu_mutex_lock(&rs->bitmap_mutex); 1983 1984 pss_init(pss, ramblock, page_start); 1985 /* 1986 * Always use the preempt channel, and make sure it's there. It's 1987 * safe to access without lock, because when rp-thread is running 1988 * we should be the only one who operates on the qemufile 1989 */ 1990 pss->pss_channel = migrate_get_current()->postcopy_qemufile_src; 1991 assert(pss->pss_channel); 1992 1993 /* 1994 * It must be either one or multiple of host page size. Just 1995 * assert; if something wrong we're mostly split brain anyway. 1996 */ 1997 assert(len % page_size == 0); 1998 while (len) { 1999 if (ram_save_host_page_urgent(pss)) { 2000 error_setg(errp, "ram_save_host_page_urgent() failed: " 2001 "ramblock=%s, start_addr=0x"RAM_ADDR_FMT, 2002 ramblock->idstr, start); 2003 ret = -1; 2004 break; 2005 } 2006 /* 2007 * NOTE: after ram_save_host_page_urgent() succeeded, pss->page 2008 * will automatically be moved and point to the next host page 2009 * we're going to send, so no need to update here. 2010 * 2011 * Normally QEMU never sends >1 host page in requests, so 2012 * logically we don't even need that as the loop should only 2013 * run once, but just to be consistent. 2014 */ 2015 len -= page_size; 2016 }; 2017 qemu_mutex_unlock(&rs->bitmap_mutex); 2018 2019 return ret; 2020 } 2021 2022 struct RAMSrcPageRequest *new_entry = 2023 g_new0(struct RAMSrcPageRequest, 1); 2024 new_entry->rb = ramblock; 2025 new_entry->offset = start; 2026 new_entry->len = len; 2027 2028 memory_region_ref(ramblock->mr); 2029 qemu_mutex_lock(&rs->src_page_req_mutex); 2030 QSIMPLEQ_INSERT_TAIL(&rs->src_page_requests, new_entry, next_req); 2031 migration_make_urgent_request(); 2032 qemu_mutex_unlock(&rs->src_page_req_mutex); 2033 2034 return 0; 2035 } 2036 2037 /* 2038 * try to compress the page before posting it out, return true if the page 2039 * has been properly handled by compression, otherwise needs other 2040 * paths to handle it 2041 */ 2042 static bool save_compress_page(RAMState *rs, PageSearchStatus *pss, 2043 ram_addr_t offset) 2044 { 2045 if (!migrate_compress()) { 2046 return false; 2047 } 2048 2049 /* 2050 * When starting the process of a new block, the first page of 2051 * the block should be sent out before other pages in the same 2052 * block, and all the pages in last block should have been sent 2053 * out, keeping this order is important, because the 'cont' flag 2054 * is used to avoid resending the block name. 2055 * 2056 * We post the fist page as normal page as compression will take 2057 * much CPU resource. 2058 */ 2059 if (pss->block != pss->last_sent_block) { 2060 compress_flush_data(); 2061 return false; 2062 } 2063 2064 return compress_page_with_multi_thread(pss->block, offset, 2065 compress_send_queued_data); 2066 } 2067 2068 /** 2069 * ram_save_target_page_legacy: save one target page 2070 * 2071 * Returns the number of pages written 2072 * 2073 * @rs: current RAM state 2074 * @pss: data about the page we want to send 2075 */ 2076 static int ram_save_target_page_legacy(RAMState *rs, PageSearchStatus *pss) 2077 { 2078 ram_addr_t offset = ((ram_addr_t)pss->page) << TARGET_PAGE_BITS; 2079 int res; 2080 2081 if (control_save_page(pss, offset, &res)) { 2082 return res; 2083 } 2084 2085 if (save_compress_page(rs, pss, offset)) { 2086 return 1; 2087 } 2088 2089 if (save_zero_page(rs, pss, offset)) { 2090 return 1; 2091 } 2092 2093 return ram_save_page(rs, pss); 2094 } 2095 2096 /** 2097 * ram_save_target_page_multifd: send one target page to multifd workers 2098 * 2099 * Returns 1 if the page was queued, -1 otherwise. 2100 * 2101 * @rs: current RAM state 2102 * @pss: data about the page we want to send 2103 */ 2104 static int ram_save_target_page_multifd(RAMState *rs, PageSearchStatus *pss) 2105 { 2106 RAMBlock *block = pss->block; 2107 ram_addr_t offset = ((ram_addr_t)pss->page) << TARGET_PAGE_BITS; 2108 2109 /* 2110 * While using multifd live migration, we still need to handle zero 2111 * page checking on the migration main thread. 2112 */ 2113 if (migrate_zero_page_detection() == ZERO_PAGE_DETECTION_LEGACY) { 2114 if (save_zero_page(rs, pss, offset)) { 2115 return 1; 2116 } 2117 } 2118 2119 return ram_save_multifd_page(block, offset); 2120 } 2121 2122 /* Should be called before sending a host page */ 2123 static void pss_host_page_prepare(PageSearchStatus *pss) 2124 { 2125 /* How many guest pages are there in one host page? */ 2126 size_t guest_pfns = qemu_ram_pagesize(pss->block) >> TARGET_PAGE_BITS; 2127 2128 pss->host_page_sending = true; 2129 if (guest_pfns <= 1) { 2130 /* 2131 * This covers both when guest psize == host psize, or when guest 2132 * has larger psize than the host (guest_pfns==0). 2133 * 2134 * For the latter, we always send one whole guest page per 2135 * iteration of the host page (example: an Alpha VM on x86 host 2136 * will have guest psize 8K while host psize 4K). 2137 */ 2138 pss->host_page_start = pss->page; 2139 pss->host_page_end = pss->page + 1; 2140 } else { 2141 /* 2142 * The host page spans over multiple guest pages, we send them 2143 * within the same host page iteration. 2144 */ 2145 pss->host_page_start = ROUND_DOWN(pss->page, guest_pfns); 2146 pss->host_page_end = ROUND_UP(pss->page + 1, guest_pfns); 2147 } 2148 } 2149 2150 /* 2151 * Whether the page pointed by PSS is within the host page being sent. 2152 * Must be called after a previous pss_host_page_prepare(). 2153 */ 2154 static bool pss_within_range(PageSearchStatus *pss) 2155 { 2156 ram_addr_t ram_addr; 2157 2158 assert(pss->host_page_sending); 2159 2160 /* Over host-page boundary? */ 2161 if (pss->page >= pss->host_page_end) { 2162 return false; 2163 } 2164 2165 ram_addr = ((ram_addr_t)pss->page) << TARGET_PAGE_BITS; 2166 2167 return offset_in_ramblock(pss->block, ram_addr); 2168 } 2169 2170 static void pss_host_page_finish(PageSearchStatus *pss) 2171 { 2172 pss->host_page_sending = false; 2173 /* This is not needed, but just to reset it */ 2174 pss->host_page_start = pss->host_page_end = 0; 2175 } 2176 2177 /* 2178 * Send an urgent host page specified by `pss'. Need to be called with 2179 * bitmap_mutex held. 2180 * 2181 * Returns 0 if save host page succeeded, false otherwise. 2182 */ 2183 static int ram_save_host_page_urgent(PageSearchStatus *pss) 2184 { 2185 bool page_dirty, sent = false; 2186 RAMState *rs = ram_state; 2187 int ret = 0; 2188 2189 trace_postcopy_preempt_send_host_page(pss->block->idstr, pss->page); 2190 pss_host_page_prepare(pss); 2191 2192 /* 2193 * If precopy is sending the same page, let it be done in precopy, or 2194 * we could send the same page in two channels and none of them will 2195 * receive the whole page. 2196 */ 2197 if (pss_overlap(pss, &ram_state->pss[RAM_CHANNEL_PRECOPY])) { 2198 trace_postcopy_preempt_hit(pss->block->idstr, 2199 pss->page << TARGET_PAGE_BITS); 2200 return 0; 2201 } 2202 2203 do { 2204 page_dirty = migration_bitmap_clear_dirty(rs, pss->block, pss->page); 2205 2206 if (page_dirty) { 2207 /* Be strict to return code; it must be 1, or what else? */ 2208 if (migration_ops->ram_save_target_page(rs, pss) != 1) { 2209 error_report_once("%s: ram_save_target_page failed", __func__); 2210 ret = -1; 2211 goto out; 2212 } 2213 sent = true; 2214 } 2215 pss_find_next_dirty(pss); 2216 } while (pss_within_range(pss)); 2217 out: 2218 pss_host_page_finish(pss); 2219 /* For urgent requests, flush immediately if sent */ 2220 if (sent) { 2221 qemu_fflush(pss->pss_channel); 2222 } 2223 return ret; 2224 } 2225 2226 /** 2227 * ram_save_host_page: save a whole host page 2228 * 2229 * Starting at *offset send pages up to the end of the current host 2230 * page. It's valid for the initial offset to point into the middle of 2231 * a host page in which case the remainder of the hostpage is sent. 2232 * Only dirty target pages are sent. Note that the host page size may 2233 * be a huge page for this block. 2234 * 2235 * The saving stops at the boundary of the used_length of the block 2236 * if the RAMBlock isn't a multiple of the host page size. 2237 * 2238 * The caller must be with ram_state.bitmap_mutex held to call this 2239 * function. Note that this function can temporarily release the lock, but 2240 * when the function is returned it'll make sure the lock is still held. 2241 * 2242 * Returns the number of pages written or negative on error 2243 * 2244 * @rs: current RAM state 2245 * @pss: data about the page we want to send 2246 */ 2247 static int ram_save_host_page(RAMState *rs, PageSearchStatus *pss) 2248 { 2249 bool page_dirty, preempt_active = postcopy_preempt_active(); 2250 int tmppages, pages = 0; 2251 size_t pagesize_bits = 2252 qemu_ram_pagesize(pss->block) >> TARGET_PAGE_BITS; 2253 unsigned long start_page = pss->page; 2254 int res; 2255 2256 if (migrate_ram_is_ignored(pss->block)) { 2257 error_report("block %s should not be migrated !", pss->block->idstr); 2258 return 0; 2259 } 2260 2261 /* Update host page boundary information */ 2262 pss_host_page_prepare(pss); 2263 2264 do { 2265 page_dirty = migration_bitmap_clear_dirty(rs, pss->block, pss->page); 2266 2267 /* Check the pages is dirty and if it is send it */ 2268 if (page_dirty) { 2269 /* 2270 * Properly yield the lock only in postcopy preempt mode 2271 * because both migration thread and rp-return thread can 2272 * operate on the bitmaps. 2273 */ 2274 if (preempt_active) { 2275 qemu_mutex_unlock(&rs->bitmap_mutex); 2276 } 2277 tmppages = migration_ops->ram_save_target_page(rs, pss); 2278 if (tmppages >= 0) { 2279 pages += tmppages; 2280 /* 2281 * Allow rate limiting to happen in the middle of huge pages if 2282 * something is sent in the current iteration. 2283 */ 2284 if (pagesize_bits > 1 && tmppages > 0) { 2285 migration_rate_limit(); 2286 } 2287 } 2288 if (preempt_active) { 2289 qemu_mutex_lock(&rs->bitmap_mutex); 2290 } 2291 } else { 2292 tmppages = 0; 2293 } 2294 2295 if (tmppages < 0) { 2296 pss_host_page_finish(pss); 2297 return tmppages; 2298 } 2299 2300 pss_find_next_dirty(pss); 2301 } while (pss_within_range(pss)); 2302 2303 pss_host_page_finish(pss); 2304 2305 res = ram_save_release_protection(rs, pss, start_page); 2306 return (res < 0 ? res : pages); 2307 } 2308 2309 /** 2310 * ram_find_and_save_block: finds a dirty page and sends it to f 2311 * 2312 * Called within an RCU critical section. 2313 * 2314 * Returns the number of pages written where zero means no dirty pages, 2315 * or negative on error 2316 * 2317 * @rs: current RAM state 2318 * 2319 * On systems where host-page-size > target-page-size it will send all the 2320 * pages in a host page that are dirty. 2321 */ 2322 static int ram_find_and_save_block(RAMState *rs) 2323 { 2324 PageSearchStatus *pss = &rs->pss[RAM_CHANNEL_PRECOPY]; 2325 int pages = 0; 2326 2327 /* No dirty page as there is zero RAM */ 2328 if (!rs->ram_bytes_total) { 2329 return pages; 2330 } 2331 2332 /* 2333 * Always keep last_seen_block/last_page valid during this procedure, 2334 * because find_dirty_block() relies on these values (e.g., we compare 2335 * last_seen_block with pss.block to see whether we searched all the 2336 * ramblocks) to detect the completion of migration. Having NULL value 2337 * of last_seen_block can conditionally cause below loop to run forever. 2338 */ 2339 if (!rs->last_seen_block) { 2340 rs->last_seen_block = QLIST_FIRST_RCU(&ram_list.blocks); 2341 rs->last_page = 0; 2342 } 2343 2344 pss_init(pss, rs->last_seen_block, rs->last_page); 2345 2346 while (true){ 2347 if (!get_queued_page(rs, pss)) { 2348 /* priority queue empty, so just search for something dirty */ 2349 int res = find_dirty_block(rs, pss); 2350 if (res != PAGE_DIRTY_FOUND) { 2351 if (res == PAGE_ALL_CLEAN) { 2352 break; 2353 } else if (res == PAGE_TRY_AGAIN) { 2354 continue; 2355 } else if (res < 0) { 2356 pages = res; 2357 break; 2358 } 2359 } 2360 } 2361 pages = ram_save_host_page(rs, pss); 2362 if (pages) { 2363 break; 2364 } 2365 } 2366 2367 rs->last_seen_block = pss->block; 2368 rs->last_page = pss->page; 2369 2370 return pages; 2371 } 2372 2373 static uint64_t ram_bytes_total_with_ignored(void) 2374 { 2375 RAMBlock *block; 2376 uint64_t total = 0; 2377 2378 RCU_READ_LOCK_GUARD(); 2379 2380 RAMBLOCK_FOREACH_MIGRATABLE(block) { 2381 total += block->used_length; 2382 } 2383 return total; 2384 } 2385 2386 uint64_t ram_bytes_total(void) 2387 { 2388 RAMBlock *block; 2389 uint64_t total = 0; 2390 2391 RCU_READ_LOCK_GUARD(); 2392 2393 RAMBLOCK_FOREACH_NOT_IGNORED(block) { 2394 total += block->used_length; 2395 } 2396 return total; 2397 } 2398 2399 static void xbzrle_load_setup(void) 2400 { 2401 XBZRLE.decoded_buf = g_malloc(TARGET_PAGE_SIZE); 2402 } 2403 2404 static void xbzrle_load_cleanup(void) 2405 { 2406 g_free(XBZRLE.decoded_buf); 2407 XBZRLE.decoded_buf = NULL; 2408 } 2409 2410 static void ram_state_cleanup(RAMState **rsp) 2411 { 2412 if (*rsp) { 2413 migration_page_queue_free(*rsp); 2414 qemu_mutex_destroy(&(*rsp)->bitmap_mutex); 2415 qemu_mutex_destroy(&(*rsp)->src_page_req_mutex); 2416 g_free(*rsp); 2417 *rsp = NULL; 2418 } 2419 } 2420 2421 static void xbzrle_cleanup(void) 2422 { 2423 XBZRLE_cache_lock(); 2424 if (XBZRLE.cache) { 2425 cache_fini(XBZRLE.cache); 2426 g_free(XBZRLE.encoded_buf); 2427 g_free(XBZRLE.current_buf); 2428 g_free(XBZRLE.zero_target_page); 2429 XBZRLE.cache = NULL; 2430 XBZRLE.encoded_buf = NULL; 2431 XBZRLE.current_buf = NULL; 2432 XBZRLE.zero_target_page = NULL; 2433 } 2434 XBZRLE_cache_unlock(); 2435 } 2436 2437 static void ram_bitmaps_destroy(void) 2438 { 2439 RAMBlock *block; 2440 2441 RAMBLOCK_FOREACH_NOT_IGNORED(block) { 2442 g_free(block->clear_bmap); 2443 block->clear_bmap = NULL; 2444 g_free(block->bmap); 2445 block->bmap = NULL; 2446 g_free(block->file_bmap); 2447 block->file_bmap = NULL; 2448 } 2449 } 2450 2451 static void ram_save_cleanup(void *opaque) 2452 { 2453 RAMState **rsp = opaque; 2454 2455 /* We don't use dirty log with background snapshots */ 2456 if (!migrate_background_snapshot()) { 2457 /* caller have hold BQL or is in a bh, so there is 2458 * no writing race against the migration bitmap 2459 */ 2460 if (global_dirty_tracking & GLOBAL_DIRTY_MIGRATION) { 2461 /* 2462 * do not stop dirty log without starting it, since 2463 * memory_global_dirty_log_stop will assert that 2464 * memory_global_dirty_log_start/stop used in pairs 2465 */ 2466 memory_global_dirty_log_stop(GLOBAL_DIRTY_MIGRATION); 2467 } 2468 } 2469 2470 ram_bitmaps_destroy(); 2471 2472 xbzrle_cleanup(); 2473 compress_threads_save_cleanup(); 2474 ram_state_cleanup(rsp); 2475 g_free(migration_ops); 2476 migration_ops = NULL; 2477 } 2478 2479 static void ram_state_reset(RAMState *rs) 2480 { 2481 int i; 2482 2483 for (i = 0; i < RAM_CHANNEL_MAX; i++) { 2484 rs->pss[i].last_sent_block = NULL; 2485 } 2486 2487 rs->last_seen_block = NULL; 2488 rs->last_page = 0; 2489 rs->last_version = ram_list.version; 2490 rs->xbzrle_started = false; 2491 } 2492 2493 #define MAX_WAIT 50 /* ms, half buffered_file limit */ 2494 2495 /* **** functions for postcopy ***** */ 2496 2497 void ram_postcopy_migrated_memory_release(MigrationState *ms) 2498 { 2499 struct RAMBlock *block; 2500 2501 RAMBLOCK_FOREACH_NOT_IGNORED(block) { 2502 unsigned long *bitmap = block->bmap; 2503 unsigned long range = block->used_length >> TARGET_PAGE_BITS; 2504 unsigned long run_start = find_next_zero_bit(bitmap, range, 0); 2505 2506 while (run_start < range) { 2507 unsigned long run_end = find_next_bit(bitmap, range, run_start + 1); 2508 ram_discard_range(block->idstr, 2509 ((ram_addr_t)run_start) << TARGET_PAGE_BITS, 2510 ((ram_addr_t)(run_end - run_start)) 2511 << TARGET_PAGE_BITS); 2512 run_start = find_next_zero_bit(bitmap, range, run_end + 1); 2513 } 2514 } 2515 } 2516 2517 /** 2518 * postcopy_send_discard_bm_ram: discard a RAMBlock 2519 * 2520 * Callback from postcopy_each_ram_send_discard for each RAMBlock 2521 * 2522 * @ms: current migration state 2523 * @block: RAMBlock to discard 2524 */ 2525 static void postcopy_send_discard_bm_ram(MigrationState *ms, RAMBlock *block) 2526 { 2527 unsigned long end = block->used_length >> TARGET_PAGE_BITS; 2528 unsigned long current; 2529 unsigned long *bitmap = block->bmap; 2530 2531 for (current = 0; current < end; ) { 2532 unsigned long one = find_next_bit(bitmap, end, current); 2533 unsigned long zero, discard_length; 2534 2535 if (one >= end) { 2536 break; 2537 } 2538 2539 zero = find_next_zero_bit(bitmap, end, one + 1); 2540 2541 if (zero >= end) { 2542 discard_length = end - one; 2543 } else { 2544 discard_length = zero - one; 2545 } 2546 postcopy_discard_send_range(ms, one, discard_length); 2547 current = one + discard_length; 2548 } 2549 } 2550 2551 static void postcopy_chunk_hostpages_pass(MigrationState *ms, RAMBlock *block); 2552 2553 /** 2554 * postcopy_each_ram_send_discard: discard all RAMBlocks 2555 * 2556 * Utility for the outgoing postcopy code. 2557 * Calls postcopy_send_discard_bm_ram for each RAMBlock 2558 * passing it bitmap indexes and name. 2559 * (qemu_ram_foreach_block ends up passing unscaled lengths 2560 * which would mean postcopy code would have to deal with target page) 2561 * 2562 * @ms: current migration state 2563 */ 2564 static void postcopy_each_ram_send_discard(MigrationState *ms) 2565 { 2566 struct RAMBlock *block; 2567 2568 RAMBLOCK_FOREACH_NOT_IGNORED(block) { 2569 postcopy_discard_send_init(ms, block->idstr); 2570 2571 /* 2572 * Deal with TPS != HPS and huge pages. It discard any partially sent 2573 * host-page size chunks, mark any partially dirty host-page size 2574 * chunks as all dirty. In this case the host-page is the host-page 2575 * for the particular RAMBlock, i.e. it might be a huge page. 2576 */ 2577 postcopy_chunk_hostpages_pass(ms, block); 2578 2579 /* 2580 * Postcopy sends chunks of bitmap over the wire, but it 2581 * just needs indexes at this point, avoids it having 2582 * target page specific code. 2583 */ 2584 postcopy_send_discard_bm_ram(ms, block); 2585 postcopy_discard_send_finish(ms); 2586 } 2587 } 2588 2589 /** 2590 * postcopy_chunk_hostpages_pass: canonicalize bitmap in hostpages 2591 * 2592 * Helper for postcopy_chunk_hostpages; it's called twice to 2593 * canonicalize the two bitmaps, that are similar, but one is 2594 * inverted. 2595 * 2596 * Postcopy requires that all target pages in a hostpage are dirty or 2597 * clean, not a mix. This function canonicalizes the bitmaps. 2598 * 2599 * @ms: current migration state 2600 * @block: block that contains the page we want to canonicalize 2601 */ 2602 static void postcopy_chunk_hostpages_pass(MigrationState *ms, RAMBlock *block) 2603 { 2604 RAMState *rs = ram_state; 2605 unsigned long *bitmap = block->bmap; 2606 unsigned int host_ratio = block->page_size / TARGET_PAGE_SIZE; 2607 unsigned long pages = block->used_length >> TARGET_PAGE_BITS; 2608 unsigned long run_start; 2609 2610 if (block->page_size == TARGET_PAGE_SIZE) { 2611 /* Easy case - TPS==HPS for a non-huge page RAMBlock */ 2612 return; 2613 } 2614 2615 /* Find a dirty page */ 2616 run_start = find_next_bit(bitmap, pages, 0); 2617 2618 while (run_start < pages) { 2619 2620 /* 2621 * If the start of this run of pages is in the middle of a host 2622 * page, then we need to fixup this host page. 2623 */ 2624 if (QEMU_IS_ALIGNED(run_start, host_ratio)) { 2625 /* Find the end of this run */ 2626 run_start = find_next_zero_bit(bitmap, pages, run_start + 1); 2627 /* 2628 * If the end isn't at the start of a host page, then the 2629 * run doesn't finish at the end of a host page 2630 * and we need to discard. 2631 */ 2632 } 2633 2634 if (!QEMU_IS_ALIGNED(run_start, host_ratio)) { 2635 unsigned long page; 2636 unsigned long fixup_start_addr = QEMU_ALIGN_DOWN(run_start, 2637 host_ratio); 2638 run_start = QEMU_ALIGN_UP(run_start, host_ratio); 2639 2640 /* Clean up the bitmap */ 2641 for (page = fixup_start_addr; 2642 page < fixup_start_addr + host_ratio; page++) { 2643 /* 2644 * Remark them as dirty, updating the count for any pages 2645 * that weren't previously dirty. 2646 */ 2647 rs->migration_dirty_pages += !test_and_set_bit(page, bitmap); 2648 } 2649 } 2650 2651 /* Find the next dirty page for the next iteration */ 2652 run_start = find_next_bit(bitmap, pages, run_start); 2653 } 2654 } 2655 2656 /** 2657 * ram_postcopy_send_discard_bitmap: transmit the discard bitmap 2658 * 2659 * Transmit the set of pages to be discarded after precopy to the target 2660 * these are pages that: 2661 * a) Have been previously transmitted but are now dirty again 2662 * b) Pages that have never been transmitted, this ensures that 2663 * any pages on the destination that have been mapped by background 2664 * tasks get discarded (transparent huge pages is the specific concern) 2665 * Hopefully this is pretty sparse 2666 * 2667 * @ms: current migration state 2668 */ 2669 void ram_postcopy_send_discard_bitmap(MigrationState *ms) 2670 { 2671 RAMState *rs = ram_state; 2672 2673 RCU_READ_LOCK_GUARD(); 2674 2675 /* This should be our last sync, the src is now paused */ 2676 migration_bitmap_sync(rs, false); 2677 2678 /* Easiest way to make sure we don't resume in the middle of a host-page */ 2679 rs->pss[RAM_CHANNEL_PRECOPY].last_sent_block = NULL; 2680 rs->last_seen_block = NULL; 2681 rs->last_page = 0; 2682 2683 postcopy_each_ram_send_discard(ms); 2684 2685 trace_ram_postcopy_send_discard_bitmap(); 2686 } 2687 2688 /** 2689 * ram_discard_range: discard dirtied pages at the beginning of postcopy 2690 * 2691 * Returns zero on success 2692 * 2693 * @rbname: name of the RAMBlock of the request. NULL means the 2694 * same that last one. 2695 * @start: RAMBlock starting page 2696 * @length: RAMBlock size 2697 */ 2698 int ram_discard_range(const char *rbname, uint64_t start, size_t length) 2699 { 2700 trace_ram_discard_range(rbname, start, length); 2701 2702 RCU_READ_LOCK_GUARD(); 2703 RAMBlock *rb = qemu_ram_block_by_name(rbname); 2704 2705 if (!rb) { 2706 error_report("ram_discard_range: Failed to find block '%s'", rbname); 2707 return -1; 2708 } 2709 2710 /* 2711 * On source VM, we don't need to update the received bitmap since 2712 * we don't even have one. 2713 */ 2714 if (rb->receivedmap) { 2715 bitmap_clear(rb->receivedmap, start >> qemu_target_page_bits(), 2716 length >> qemu_target_page_bits()); 2717 } 2718 2719 return ram_block_discard_range(rb, start, length); 2720 } 2721 2722 /* 2723 * For every allocation, we will try not to crash the VM if the 2724 * allocation failed. 2725 */ 2726 static bool xbzrle_init(Error **errp) 2727 { 2728 if (!migrate_xbzrle()) { 2729 return true; 2730 } 2731 2732 XBZRLE_cache_lock(); 2733 2734 XBZRLE.zero_target_page = g_try_malloc0(TARGET_PAGE_SIZE); 2735 if (!XBZRLE.zero_target_page) { 2736 error_setg(errp, "%s: Error allocating zero page", __func__); 2737 goto err_out; 2738 } 2739 2740 XBZRLE.cache = cache_init(migrate_xbzrle_cache_size(), 2741 TARGET_PAGE_SIZE, errp); 2742 if (!XBZRLE.cache) { 2743 goto free_zero_page; 2744 } 2745 2746 XBZRLE.encoded_buf = g_try_malloc0(TARGET_PAGE_SIZE); 2747 if (!XBZRLE.encoded_buf) { 2748 error_setg(errp, "%s: Error allocating encoded_buf", __func__); 2749 goto free_cache; 2750 } 2751 2752 XBZRLE.current_buf = g_try_malloc(TARGET_PAGE_SIZE); 2753 if (!XBZRLE.current_buf) { 2754 error_setg(errp, "%s: Error allocating current_buf", __func__); 2755 goto free_encoded_buf; 2756 } 2757 2758 /* We are all good */ 2759 XBZRLE_cache_unlock(); 2760 return true; 2761 2762 free_encoded_buf: 2763 g_free(XBZRLE.encoded_buf); 2764 XBZRLE.encoded_buf = NULL; 2765 free_cache: 2766 cache_fini(XBZRLE.cache); 2767 XBZRLE.cache = NULL; 2768 free_zero_page: 2769 g_free(XBZRLE.zero_target_page); 2770 XBZRLE.zero_target_page = NULL; 2771 err_out: 2772 XBZRLE_cache_unlock(); 2773 return false; 2774 } 2775 2776 static bool ram_state_init(RAMState **rsp, Error **errp) 2777 { 2778 *rsp = g_try_new0(RAMState, 1); 2779 2780 if (!*rsp) { 2781 error_setg(errp, "%s: Init ramstate fail", __func__); 2782 return false; 2783 } 2784 2785 qemu_mutex_init(&(*rsp)->bitmap_mutex); 2786 qemu_mutex_init(&(*rsp)->src_page_req_mutex); 2787 QSIMPLEQ_INIT(&(*rsp)->src_page_requests); 2788 (*rsp)->ram_bytes_total = ram_bytes_total(); 2789 2790 /* 2791 * Count the total number of pages used by ram blocks not including any 2792 * gaps due to alignment or unplugs. 2793 * This must match with the initial values of dirty bitmap. 2794 */ 2795 (*rsp)->migration_dirty_pages = (*rsp)->ram_bytes_total >> TARGET_PAGE_BITS; 2796 ram_state_reset(*rsp); 2797 2798 return true; 2799 } 2800 2801 static void ram_list_init_bitmaps(void) 2802 { 2803 MigrationState *ms = migrate_get_current(); 2804 RAMBlock *block; 2805 unsigned long pages; 2806 uint8_t shift; 2807 2808 /* Skip setting bitmap if there is no RAM */ 2809 if (ram_bytes_total()) { 2810 shift = ms->clear_bitmap_shift; 2811 if (shift > CLEAR_BITMAP_SHIFT_MAX) { 2812 error_report("clear_bitmap_shift (%u) too big, using " 2813 "max value (%u)", shift, CLEAR_BITMAP_SHIFT_MAX); 2814 shift = CLEAR_BITMAP_SHIFT_MAX; 2815 } else if (shift < CLEAR_BITMAP_SHIFT_MIN) { 2816 error_report("clear_bitmap_shift (%u) too small, using " 2817 "min value (%u)", shift, CLEAR_BITMAP_SHIFT_MIN); 2818 shift = CLEAR_BITMAP_SHIFT_MIN; 2819 } 2820 2821 RAMBLOCK_FOREACH_NOT_IGNORED(block) { 2822 pages = block->max_length >> TARGET_PAGE_BITS; 2823 /* 2824 * The initial dirty bitmap for migration must be set with all 2825 * ones to make sure we'll migrate every guest RAM page to 2826 * destination. 2827 * Here we set RAMBlock.bmap all to 1 because when rebegin a 2828 * new migration after a failed migration, ram_list. 2829 * dirty_memory[DIRTY_MEMORY_MIGRATION] don't include the whole 2830 * guest memory. 2831 */ 2832 block->bmap = bitmap_new(pages); 2833 bitmap_set(block->bmap, 0, pages); 2834 if (migrate_mapped_ram()) { 2835 block->file_bmap = bitmap_new(pages); 2836 } 2837 block->clear_bmap_shift = shift; 2838 block->clear_bmap = bitmap_new(clear_bmap_size(pages, shift)); 2839 } 2840 } 2841 } 2842 2843 static void migration_bitmap_clear_discarded_pages(RAMState *rs) 2844 { 2845 unsigned long pages; 2846 RAMBlock *rb; 2847 2848 RCU_READ_LOCK_GUARD(); 2849 2850 RAMBLOCK_FOREACH_NOT_IGNORED(rb) { 2851 pages = ramblock_dirty_bitmap_clear_discarded_pages(rb); 2852 rs->migration_dirty_pages -= pages; 2853 } 2854 } 2855 2856 static bool ram_init_bitmaps(RAMState *rs, Error **errp) 2857 { 2858 bool ret = true; 2859 2860 qemu_mutex_lock_ramlist(); 2861 2862 WITH_RCU_READ_LOCK_GUARD() { 2863 ram_list_init_bitmaps(); 2864 /* We don't use dirty log with background snapshots */ 2865 if (!migrate_background_snapshot()) { 2866 ret = memory_global_dirty_log_start(GLOBAL_DIRTY_MIGRATION, errp); 2867 if (!ret) { 2868 goto out_unlock; 2869 } 2870 migration_bitmap_sync_precopy(rs, false); 2871 } 2872 } 2873 out_unlock: 2874 qemu_mutex_unlock_ramlist(); 2875 2876 if (!ret) { 2877 ram_bitmaps_destroy(); 2878 return false; 2879 } 2880 2881 /* 2882 * After an eventual first bitmap sync, fixup the initial bitmap 2883 * containing all 1s to exclude any discarded pages from migration. 2884 */ 2885 migration_bitmap_clear_discarded_pages(rs); 2886 return true; 2887 } 2888 2889 static int ram_init_all(RAMState **rsp, Error **errp) 2890 { 2891 if (!ram_state_init(rsp, errp)) { 2892 return -1; 2893 } 2894 2895 if (!xbzrle_init(errp)) { 2896 ram_state_cleanup(rsp); 2897 return -1; 2898 } 2899 2900 if (!ram_init_bitmaps(*rsp, errp)) { 2901 return -1; 2902 } 2903 2904 return 0; 2905 } 2906 2907 static void ram_state_resume_prepare(RAMState *rs, QEMUFile *out) 2908 { 2909 RAMBlock *block; 2910 uint64_t pages = 0; 2911 2912 /* 2913 * Postcopy is not using xbzrle/compression, so no need for that. 2914 * Also, since source are already halted, we don't need to care 2915 * about dirty page logging as well. 2916 */ 2917 2918 RAMBLOCK_FOREACH_NOT_IGNORED(block) { 2919 pages += bitmap_count_one(block->bmap, 2920 block->used_length >> TARGET_PAGE_BITS); 2921 } 2922 2923 /* This may not be aligned with current bitmaps. Recalculate. */ 2924 rs->migration_dirty_pages = pages; 2925 2926 ram_state_reset(rs); 2927 2928 /* Update RAMState cache of output QEMUFile */ 2929 rs->pss[RAM_CHANNEL_PRECOPY].pss_channel = out; 2930 2931 trace_ram_state_resume_prepare(pages); 2932 } 2933 2934 /* 2935 * This function clears bits of the free pages reported by the caller from the 2936 * migration dirty bitmap. @addr is the host address corresponding to the 2937 * start of the continuous guest free pages, and @len is the total bytes of 2938 * those pages. 2939 */ 2940 void qemu_guest_free_page_hint(void *addr, size_t len) 2941 { 2942 RAMBlock *block; 2943 ram_addr_t offset; 2944 size_t used_len, start, npages; 2945 2946 /* This function is currently expected to be used during live migration */ 2947 if (!migration_is_setup_or_active()) { 2948 return; 2949 } 2950 2951 for (; len > 0; len -= used_len, addr += used_len) { 2952 block = qemu_ram_block_from_host(addr, false, &offset); 2953 if (unlikely(!block || offset >= block->used_length)) { 2954 /* 2955 * The implementation might not support RAMBlock resize during 2956 * live migration, but it could happen in theory with future 2957 * updates. So we add a check here to capture that case. 2958 */ 2959 error_report_once("%s unexpected error", __func__); 2960 return; 2961 } 2962 2963 if (len <= block->used_length - offset) { 2964 used_len = len; 2965 } else { 2966 used_len = block->used_length - offset; 2967 } 2968 2969 start = offset >> TARGET_PAGE_BITS; 2970 npages = used_len >> TARGET_PAGE_BITS; 2971 2972 qemu_mutex_lock(&ram_state->bitmap_mutex); 2973 /* 2974 * The skipped free pages are equavalent to be sent from clear_bmap's 2975 * perspective, so clear the bits from the memory region bitmap which 2976 * are initially set. Otherwise those skipped pages will be sent in 2977 * the next round after syncing from the memory region bitmap. 2978 */ 2979 migration_clear_memory_region_dirty_bitmap_range(block, start, npages); 2980 ram_state->migration_dirty_pages -= 2981 bitmap_count_one_with_offset(block->bmap, start, npages); 2982 bitmap_clear(block->bmap, start, npages); 2983 qemu_mutex_unlock(&ram_state->bitmap_mutex); 2984 } 2985 } 2986 2987 #define MAPPED_RAM_HDR_VERSION 1 2988 struct MappedRamHeader { 2989 uint32_t version; 2990 /* 2991 * The target's page size, so we know how many pages are in the 2992 * bitmap. 2993 */ 2994 uint64_t page_size; 2995 /* 2996 * The offset in the migration file where the pages bitmap is 2997 * stored. 2998 */ 2999 uint64_t bitmap_offset; 3000 /* 3001 * The offset in the migration file where the actual pages (data) 3002 * are stored. 3003 */ 3004 uint64_t pages_offset; 3005 } QEMU_PACKED; 3006 typedef struct MappedRamHeader MappedRamHeader; 3007 3008 static void mapped_ram_setup_ramblock(QEMUFile *file, RAMBlock *block) 3009 { 3010 g_autofree MappedRamHeader *header = NULL; 3011 size_t header_size, bitmap_size; 3012 long num_pages; 3013 3014 header = g_new0(MappedRamHeader, 1); 3015 header_size = sizeof(MappedRamHeader); 3016 3017 num_pages = block->used_length >> TARGET_PAGE_BITS; 3018 bitmap_size = BITS_TO_LONGS(num_pages) * sizeof(unsigned long); 3019 3020 /* 3021 * Save the file offsets of where the bitmap and the pages should 3022 * go as they are written at the end of migration and during the 3023 * iterative phase, respectively. 3024 */ 3025 block->bitmap_offset = qemu_get_offset(file) + header_size; 3026 block->pages_offset = ROUND_UP(block->bitmap_offset + 3027 bitmap_size, 3028 MAPPED_RAM_FILE_OFFSET_ALIGNMENT); 3029 3030 header->version = cpu_to_be32(MAPPED_RAM_HDR_VERSION); 3031 header->page_size = cpu_to_be64(TARGET_PAGE_SIZE); 3032 header->bitmap_offset = cpu_to_be64(block->bitmap_offset); 3033 header->pages_offset = cpu_to_be64(block->pages_offset); 3034 3035 qemu_put_buffer(file, (uint8_t *) header, header_size); 3036 3037 /* prepare offset for next ramblock */ 3038 qemu_set_offset(file, block->pages_offset + block->used_length, SEEK_SET); 3039 } 3040 3041 static bool mapped_ram_read_header(QEMUFile *file, MappedRamHeader *header, 3042 Error **errp) 3043 { 3044 size_t ret, header_size = sizeof(MappedRamHeader); 3045 3046 ret = qemu_get_buffer(file, (uint8_t *)header, header_size); 3047 if (ret != header_size) { 3048 error_setg(errp, "Could not read whole mapped-ram migration header " 3049 "(expected %zd, got %zd bytes)", header_size, ret); 3050 return false; 3051 } 3052 3053 /* migration stream is big-endian */ 3054 header->version = be32_to_cpu(header->version); 3055 3056 if (header->version > MAPPED_RAM_HDR_VERSION) { 3057 error_setg(errp, "Migration mapped-ram capability version not " 3058 "supported (expected <= %d, got %d)", MAPPED_RAM_HDR_VERSION, 3059 header->version); 3060 return false; 3061 } 3062 3063 header->page_size = be64_to_cpu(header->page_size); 3064 header->bitmap_offset = be64_to_cpu(header->bitmap_offset); 3065 header->pages_offset = be64_to_cpu(header->pages_offset); 3066 3067 return true; 3068 } 3069 3070 /* 3071 * Each of ram_save_setup, ram_save_iterate and ram_save_complete has 3072 * long-running RCU critical section. When rcu-reclaims in the code 3073 * start to become numerous it will be necessary to reduce the 3074 * granularity of these critical sections. 3075 */ 3076 3077 /** 3078 * ram_save_setup: Setup RAM for migration 3079 * 3080 * Returns zero to indicate success and negative for error 3081 * 3082 * @f: QEMUFile where to send the data 3083 * @opaque: RAMState pointer 3084 * @errp: pointer to Error*, to store an error if it happens. 3085 */ 3086 static int ram_save_setup(QEMUFile *f, void *opaque, Error **errp) 3087 { 3088 RAMState **rsp = opaque; 3089 RAMBlock *block; 3090 int ret, max_hg_page_size; 3091 3092 if (compress_threads_save_setup()) { 3093 error_setg(errp, "%s: failed to start compress threads", __func__); 3094 return -1; 3095 } 3096 3097 /* migration has already setup the bitmap, reuse it. */ 3098 if (!migration_in_colo_state()) { 3099 if (ram_init_all(rsp, errp) != 0) { 3100 compress_threads_save_cleanup(); 3101 return -1; 3102 } 3103 } 3104 (*rsp)->pss[RAM_CHANNEL_PRECOPY].pss_channel = f; 3105 3106 /* 3107 * ??? Mirrors the previous value of qemu_host_page_size, 3108 * but is this really what was intended for the migration? 3109 */ 3110 max_hg_page_size = MAX(qemu_real_host_page_size(), TARGET_PAGE_SIZE); 3111 3112 WITH_RCU_READ_LOCK_GUARD() { 3113 qemu_put_be64(f, ram_bytes_total_with_ignored() 3114 | RAM_SAVE_FLAG_MEM_SIZE); 3115 3116 RAMBLOCK_FOREACH_MIGRATABLE(block) { 3117 qemu_put_byte(f, strlen(block->idstr)); 3118 qemu_put_buffer(f, (uint8_t *)block->idstr, strlen(block->idstr)); 3119 qemu_put_be64(f, block->used_length); 3120 if (migrate_postcopy_ram() && 3121 block->page_size != max_hg_page_size) { 3122 qemu_put_be64(f, block->page_size); 3123 } 3124 if (migrate_ignore_shared()) { 3125 qemu_put_be64(f, block->mr->addr); 3126 } 3127 3128 if (migrate_mapped_ram()) { 3129 mapped_ram_setup_ramblock(f, block); 3130 } 3131 } 3132 } 3133 3134 ret = rdma_registration_start(f, RAM_CONTROL_SETUP); 3135 if (ret < 0) { 3136 error_setg(errp, "%s: failed to start RDMA registration", __func__); 3137 qemu_file_set_error(f, ret); 3138 return ret; 3139 } 3140 3141 ret = rdma_registration_stop(f, RAM_CONTROL_SETUP); 3142 if (ret < 0) { 3143 error_setg(errp, "%s: failed to stop RDMA registration", __func__); 3144 qemu_file_set_error(f, ret); 3145 return ret; 3146 } 3147 3148 migration_ops = g_malloc0(sizeof(MigrationOps)); 3149 3150 if (migrate_multifd()) { 3151 migration_ops->ram_save_target_page = ram_save_target_page_multifd; 3152 } else { 3153 migration_ops->ram_save_target_page = ram_save_target_page_legacy; 3154 } 3155 3156 bql_unlock(); 3157 ret = multifd_send_sync_main(); 3158 bql_lock(); 3159 if (ret < 0) { 3160 error_setg(errp, "%s: multifd synchronization failed", __func__); 3161 return ret; 3162 } 3163 3164 if (migrate_multifd() && !migrate_multifd_flush_after_each_section() 3165 && !migrate_mapped_ram()) { 3166 qemu_put_be64(f, RAM_SAVE_FLAG_MULTIFD_FLUSH); 3167 } 3168 3169 qemu_put_be64(f, RAM_SAVE_FLAG_EOS); 3170 ret = qemu_fflush(f); 3171 if (ret < 0) { 3172 error_setg_errno(errp, -ret, "%s failed", __func__); 3173 } 3174 return ret; 3175 } 3176 3177 static void ram_save_file_bmap(QEMUFile *f) 3178 { 3179 RAMBlock *block; 3180 3181 RAMBLOCK_FOREACH_MIGRATABLE(block) { 3182 long num_pages = block->used_length >> TARGET_PAGE_BITS; 3183 long bitmap_size = BITS_TO_LONGS(num_pages) * sizeof(unsigned long); 3184 3185 qemu_put_buffer_at(f, (uint8_t *)block->file_bmap, bitmap_size, 3186 block->bitmap_offset); 3187 ram_transferred_add(bitmap_size); 3188 3189 /* 3190 * Free the bitmap here to catch any synchronization issues 3191 * with multifd channels. No channels should be sending pages 3192 * after we've written the bitmap to file. 3193 */ 3194 g_free(block->file_bmap); 3195 block->file_bmap = NULL; 3196 } 3197 } 3198 3199 void ramblock_set_file_bmap_atomic(RAMBlock *block, ram_addr_t offset, bool set) 3200 { 3201 if (set) { 3202 set_bit_atomic(offset >> TARGET_PAGE_BITS, block->file_bmap); 3203 } else { 3204 clear_bit_atomic(offset >> TARGET_PAGE_BITS, block->file_bmap); 3205 } 3206 } 3207 3208 /** 3209 * ram_save_iterate: iterative stage for migration 3210 * 3211 * Returns zero to indicate success and negative for error 3212 * 3213 * @f: QEMUFile where to send the data 3214 * @opaque: RAMState pointer 3215 */ 3216 static int ram_save_iterate(QEMUFile *f, void *opaque) 3217 { 3218 RAMState **temp = opaque; 3219 RAMState *rs = *temp; 3220 int ret = 0; 3221 int i; 3222 int64_t t0; 3223 int done = 0; 3224 3225 /* 3226 * We'll take this lock a little bit long, but it's okay for two reasons. 3227 * Firstly, the only possible other thread to take it is who calls 3228 * qemu_guest_free_page_hint(), which should be rare; secondly, see 3229 * MAX_WAIT (if curious, further see commit 4508bd9ed8053ce) below, which 3230 * guarantees that we'll at least released it in a regular basis. 3231 */ 3232 WITH_QEMU_LOCK_GUARD(&rs->bitmap_mutex) { 3233 WITH_RCU_READ_LOCK_GUARD() { 3234 if (ram_list.version != rs->last_version) { 3235 ram_state_reset(rs); 3236 } 3237 3238 /* Read version before ram_list.blocks */ 3239 smp_rmb(); 3240 3241 ret = rdma_registration_start(f, RAM_CONTROL_ROUND); 3242 if (ret < 0) { 3243 qemu_file_set_error(f, ret); 3244 goto out; 3245 } 3246 3247 t0 = qemu_clock_get_ns(QEMU_CLOCK_REALTIME); 3248 i = 0; 3249 while ((ret = migration_rate_exceeded(f)) == 0 || 3250 postcopy_has_request(rs)) { 3251 int pages; 3252 3253 if (qemu_file_get_error(f)) { 3254 break; 3255 } 3256 3257 pages = ram_find_and_save_block(rs); 3258 /* no more pages to sent */ 3259 if (pages == 0) { 3260 done = 1; 3261 break; 3262 } 3263 3264 if (pages < 0) { 3265 qemu_file_set_error(f, pages); 3266 break; 3267 } 3268 3269 rs->target_page_count += pages; 3270 3271 /* 3272 * During postcopy, it is necessary to make sure one whole host 3273 * page is sent in one chunk. 3274 */ 3275 if (migrate_postcopy_ram()) { 3276 compress_flush_data(); 3277 } 3278 3279 /* 3280 * we want to check in the 1st loop, just in case it was the 1st 3281 * time and we had to sync the dirty bitmap. 3282 * qemu_clock_get_ns() is a bit expensive, so we only check each 3283 * some iterations 3284 */ 3285 if ((i & 63) == 0) { 3286 uint64_t t1 = (qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - t0) / 3287 1000000; 3288 if (t1 > MAX_WAIT) { 3289 trace_ram_save_iterate_big_wait(t1, i); 3290 break; 3291 } 3292 } 3293 i++; 3294 } 3295 } 3296 } 3297 3298 /* 3299 * Must occur before EOS (or any QEMUFile operation) 3300 * because of RDMA protocol. 3301 */ 3302 ret = rdma_registration_stop(f, RAM_CONTROL_ROUND); 3303 if (ret < 0) { 3304 qemu_file_set_error(f, ret); 3305 } 3306 3307 out: 3308 if (ret >= 0 3309 && migration_is_setup_or_active()) { 3310 if (migrate_multifd() && migrate_multifd_flush_after_each_section() && 3311 !migrate_mapped_ram()) { 3312 ret = multifd_send_sync_main(); 3313 if (ret < 0) { 3314 return ret; 3315 } 3316 } 3317 3318 qemu_put_be64(f, RAM_SAVE_FLAG_EOS); 3319 ram_transferred_add(8); 3320 ret = qemu_fflush(f); 3321 } 3322 if (ret < 0) { 3323 return ret; 3324 } 3325 3326 return done; 3327 } 3328 3329 /** 3330 * ram_save_complete: function called to send the remaining amount of ram 3331 * 3332 * Returns zero to indicate success or negative on error 3333 * 3334 * Called with the BQL 3335 * 3336 * @f: QEMUFile where to send the data 3337 * @opaque: RAMState pointer 3338 */ 3339 static int ram_save_complete(QEMUFile *f, void *opaque) 3340 { 3341 RAMState **temp = opaque; 3342 RAMState *rs = *temp; 3343 int ret = 0; 3344 3345 rs->last_stage = !migration_in_colo_state(); 3346 3347 WITH_RCU_READ_LOCK_GUARD() { 3348 if (!migration_in_postcopy()) { 3349 migration_bitmap_sync_precopy(rs, true); 3350 } 3351 3352 ret = rdma_registration_start(f, RAM_CONTROL_FINISH); 3353 if (ret < 0) { 3354 qemu_file_set_error(f, ret); 3355 return ret; 3356 } 3357 3358 /* try transferring iterative blocks of memory */ 3359 3360 /* flush all remaining blocks regardless of rate limiting */ 3361 qemu_mutex_lock(&rs->bitmap_mutex); 3362 while (true) { 3363 int pages; 3364 3365 pages = ram_find_and_save_block(rs); 3366 /* no more blocks to sent */ 3367 if (pages == 0) { 3368 break; 3369 } 3370 if (pages < 0) { 3371 qemu_mutex_unlock(&rs->bitmap_mutex); 3372 return pages; 3373 } 3374 } 3375 qemu_mutex_unlock(&rs->bitmap_mutex); 3376 3377 compress_flush_data(); 3378 3379 ret = rdma_registration_stop(f, RAM_CONTROL_FINISH); 3380 if (ret < 0) { 3381 qemu_file_set_error(f, ret); 3382 return ret; 3383 } 3384 } 3385 3386 ret = multifd_send_sync_main(); 3387 if (ret < 0) { 3388 return ret; 3389 } 3390 3391 if (migrate_mapped_ram()) { 3392 ram_save_file_bmap(f); 3393 3394 if (qemu_file_get_error(f)) { 3395 Error *local_err = NULL; 3396 int err = qemu_file_get_error_obj(f, &local_err); 3397 3398 error_reportf_err(local_err, "Failed to write bitmap to file: "); 3399 return -err; 3400 } 3401 } 3402 3403 if (migrate_multifd() && !migrate_multifd_flush_after_each_section() && 3404 !migrate_mapped_ram()) { 3405 qemu_put_be64(f, RAM_SAVE_FLAG_MULTIFD_FLUSH); 3406 } 3407 qemu_put_be64(f, RAM_SAVE_FLAG_EOS); 3408 return qemu_fflush(f); 3409 } 3410 3411 static void ram_state_pending_estimate(void *opaque, uint64_t *must_precopy, 3412 uint64_t *can_postcopy) 3413 { 3414 RAMState **temp = opaque; 3415 RAMState *rs = *temp; 3416 3417 uint64_t remaining_size = rs->migration_dirty_pages * TARGET_PAGE_SIZE; 3418 3419 if (migrate_postcopy_ram()) { 3420 /* We can do postcopy, and all the data is postcopiable */ 3421 *can_postcopy += remaining_size; 3422 } else { 3423 *must_precopy += remaining_size; 3424 } 3425 } 3426 3427 static void ram_state_pending_exact(void *opaque, uint64_t *must_precopy, 3428 uint64_t *can_postcopy) 3429 { 3430 RAMState **temp = opaque; 3431 RAMState *rs = *temp; 3432 uint64_t remaining_size; 3433 3434 if (!migration_in_postcopy()) { 3435 bql_lock(); 3436 WITH_RCU_READ_LOCK_GUARD() { 3437 migration_bitmap_sync_precopy(rs, false); 3438 } 3439 bql_unlock(); 3440 } 3441 3442 remaining_size = rs->migration_dirty_pages * TARGET_PAGE_SIZE; 3443 3444 if (migrate_postcopy_ram()) { 3445 /* We can do postcopy, and all the data is postcopiable */ 3446 *can_postcopy += remaining_size; 3447 } else { 3448 *must_precopy += remaining_size; 3449 } 3450 } 3451 3452 static int load_xbzrle(QEMUFile *f, ram_addr_t addr, void *host) 3453 { 3454 unsigned int xh_len; 3455 int xh_flags; 3456 uint8_t *loaded_data; 3457 3458 /* extract RLE header */ 3459 xh_flags = qemu_get_byte(f); 3460 xh_len = qemu_get_be16(f); 3461 3462 if (xh_flags != ENCODING_FLAG_XBZRLE) { 3463 error_report("Failed to load XBZRLE page - wrong compression!"); 3464 return -1; 3465 } 3466 3467 if (xh_len > TARGET_PAGE_SIZE) { 3468 error_report("Failed to load XBZRLE page - len overflow!"); 3469 return -1; 3470 } 3471 loaded_data = XBZRLE.decoded_buf; 3472 /* load data and decode */ 3473 /* it can change loaded_data to point to an internal buffer */ 3474 qemu_get_buffer_in_place(f, &loaded_data, xh_len); 3475 3476 /* decode RLE */ 3477 if (xbzrle_decode_buffer(loaded_data, xh_len, host, 3478 TARGET_PAGE_SIZE) == -1) { 3479 error_report("Failed to load XBZRLE page - decode error!"); 3480 return -1; 3481 } 3482 3483 return 0; 3484 } 3485 3486 /** 3487 * ram_block_from_stream: read a RAMBlock id from the migration stream 3488 * 3489 * Must be called from within a rcu critical section. 3490 * 3491 * Returns a pointer from within the RCU-protected ram_list. 3492 * 3493 * @mis: the migration incoming state pointer 3494 * @f: QEMUFile where to read the data from 3495 * @flags: Page flags (mostly to see if it's a continuation of previous block) 3496 * @channel: the channel we're using 3497 */ 3498 static inline RAMBlock *ram_block_from_stream(MigrationIncomingState *mis, 3499 QEMUFile *f, int flags, 3500 int channel) 3501 { 3502 RAMBlock *block = mis->last_recv_block[channel]; 3503 char id[256]; 3504 uint8_t len; 3505 3506 if (flags & RAM_SAVE_FLAG_CONTINUE) { 3507 if (!block) { 3508 error_report("Ack, bad migration stream!"); 3509 return NULL; 3510 } 3511 return block; 3512 } 3513 3514 len = qemu_get_byte(f); 3515 qemu_get_buffer(f, (uint8_t *)id, len); 3516 id[len] = 0; 3517 3518 block = qemu_ram_block_by_name(id); 3519 if (!block) { 3520 error_report("Can't find block %s", id); 3521 return NULL; 3522 } 3523 3524 if (migrate_ram_is_ignored(block)) { 3525 error_report("block %s should not be migrated !", id); 3526 return NULL; 3527 } 3528 3529 mis->last_recv_block[channel] = block; 3530 3531 return block; 3532 } 3533 3534 static inline void *host_from_ram_block_offset(RAMBlock *block, 3535 ram_addr_t offset) 3536 { 3537 if (!offset_in_ramblock(block, offset)) { 3538 return NULL; 3539 } 3540 3541 return block->host + offset; 3542 } 3543 3544 static void *host_page_from_ram_block_offset(RAMBlock *block, 3545 ram_addr_t offset) 3546 { 3547 /* Note: Explicitly no check against offset_in_ramblock(). */ 3548 return (void *)QEMU_ALIGN_DOWN((uintptr_t)(block->host + offset), 3549 block->page_size); 3550 } 3551 3552 static ram_addr_t host_page_offset_from_ram_block_offset(RAMBlock *block, 3553 ram_addr_t offset) 3554 { 3555 return ((uintptr_t)block->host + offset) & (block->page_size - 1); 3556 } 3557 3558 void colo_record_bitmap(RAMBlock *block, ram_addr_t *normal, uint32_t pages) 3559 { 3560 qemu_mutex_lock(&ram_state->bitmap_mutex); 3561 for (int i = 0; i < pages; i++) { 3562 ram_addr_t offset = normal[i]; 3563 ram_state->migration_dirty_pages += !test_and_set_bit( 3564 offset >> TARGET_PAGE_BITS, 3565 block->bmap); 3566 } 3567 qemu_mutex_unlock(&ram_state->bitmap_mutex); 3568 } 3569 3570 static inline void *colo_cache_from_block_offset(RAMBlock *block, 3571 ram_addr_t offset, bool record_bitmap) 3572 { 3573 if (!offset_in_ramblock(block, offset)) { 3574 return NULL; 3575 } 3576 if (!block->colo_cache) { 3577 error_report("%s: colo_cache is NULL in block :%s", 3578 __func__, block->idstr); 3579 return NULL; 3580 } 3581 3582 /* 3583 * During colo checkpoint, we need bitmap of these migrated pages. 3584 * It help us to decide which pages in ram cache should be flushed 3585 * into VM's RAM later. 3586 */ 3587 if (record_bitmap) { 3588 colo_record_bitmap(block, &offset, 1); 3589 } 3590 return block->colo_cache + offset; 3591 } 3592 3593 /** 3594 * ram_handle_zero: handle the zero page case 3595 * 3596 * If a page (or a whole RDMA chunk) has been 3597 * determined to be zero, then zap it. 3598 * 3599 * @host: host address for the zero page 3600 * @ch: what the page is filled from. We only support zero 3601 * @size: size of the zero page 3602 */ 3603 void ram_handle_zero(void *host, uint64_t size) 3604 { 3605 if (!buffer_is_zero(host, size)) { 3606 memset(host, 0, size); 3607 } 3608 } 3609 3610 static void colo_init_ram_state(void) 3611 { 3612 Error *local_err = NULL; 3613 3614 if (!ram_state_init(&ram_state, &local_err)) { 3615 error_report_err(local_err); 3616 } 3617 } 3618 3619 /* 3620 * colo cache: this is for secondary VM, we cache the whole 3621 * memory of the secondary VM, it is need to hold the global lock 3622 * to call this helper. 3623 */ 3624 int colo_init_ram_cache(void) 3625 { 3626 RAMBlock *block; 3627 3628 WITH_RCU_READ_LOCK_GUARD() { 3629 RAMBLOCK_FOREACH_NOT_IGNORED(block) { 3630 block->colo_cache = qemu_anon_ram_alloc(block->used_length, 3631 NULL, false, false); 3632 if (!block->colo_cache) { 3633 error_report("%s: Can't alloc memory for COLO cache of block %s," 3634 "size 0x" RAM_ADDR_FMT, __func__, block->idstr, 3635 block->used_length); 3636 RAMBLOCK_FOREACH_NOT_IGNORED(block) { 3637 if (block->colo_cache) { 3638 qemu_anon_ram_free(block->colo_cache, block->used_length); 3639 block->colo_cache = NULL; 3640 } 3641 } 3642 return -errno; 3643 } 3644 if (!machine_dump_guest_core(current_machine)) { 3645 qemu_madvise(block->colo_cache, block->used_length, 3646 QEMU_MADV_DONTDUMP); 3647 } 3648 } 3649 } 3650 3651 /* 3652 * Record the dirty pages that sent by PVM, we use this dirty bitmap together 3653 * with to decide which page in cache should be flushed into SVM's RAM. Here 3654 * we use the same name 'ram_bitmap' as for migration. 3655 */ 3656 if (ram_bytes_total()) { 3657 RAMBLOCK_FOREACH_NOT_IGNORED(block) { 3658 unsigned long pages = block->max_length >> TARGET_PAGE_BITS; 3659 block->bmap = bitmap_new(pages); 3660 } 3661 } 3662 3663 colo_init_ram_state(); 3664 return 0; 3665 } 3666 3667 /* TODO: duplicated with ram_init_bitmaps */ 3668 void colo_incoming_start_dirty_log(void) 3669 { 3670 RAMBlock *block = NULL; 3671 Error *local_err = NULL; 3672 3673 /* For memory_global_dirty_log_start below. */ 3674 bql_lock(); 3675 qemu_mutex_lock_ramlist(); 3676 3677 memory_global_dirty_log_sync(false); 3678 WITH_RCU_READ_LOCK_GUARD() { 3679 RAMBLOCK_FOREACH_NOT_IGNORED(block) { 3680 ramblock_sync_dirty_bitmap(ram_state, block); 3681 /* Discard this dirty bitmap record */ 3682 bitmap_zero(block->bmap, block->max_length >> TARGET_PAGE_BITS); 3683 } 3684 if (!memory_global_dirty_log_start(GLOBAL_DIRTY_MIGRATION, 3685 &local_err)) { 3686 error_report_err(local_err); 3687 } 3688 } 3689 ram_state->migration_dirty_pages = 0; 3690 qemu_mutex_unlock_ramlist(); 3691 bql_unlock(); 3692 } 3693 3694 /* It is need to hold the global lock to call this helper */ 3695 void colo_release_ram_cache(void) 3696 { 3697 RAMBlock *block; 3698 3699 memory_global_dirty_log_stop(GLOBAL_DIRTY_MIGRATION); 3700 RAMBLOCK_FOREACH_NOT_IGNORED(block) { 3701 g_free(block->bmap); 3702 block->bmap = NULL; 3703 } 3704 3705 WITH_RCU_READ_LOCK_GUARD() { 3706 RAMBLOCK_FOREACH_NOT_IGNORED(block) { 3707 if (block->colo_cache) { 3708 qemu_anon_ram_free(block->colo_cache, block->used_length); 3709 block->colo_cache = NULL; 3710 } 3711 } 3712 } 3713 ram_state_cleanup(&ram_state); 3714 } 3715 3716 /** 3717 * ram_load_setup: Setup RAM for migration incoming side 3718 * 3719 * Returns zero to indicate success and negative for error 3720 * 3721 * @f: QEMUFile where to receive the data 3722 * @opaque: RAMState pointer 3723 * @errp: pointer to Error*, to store an error if it happens. 3724 */ 3725 static int ram_load_setup(QEMUFile *f, void *opaque, Error **errp) 3726 { 3727 xbzrle_load_setup(); 3728 ramblock_recv_map_init(); 3729 3730 return 0; 3731 } 3732 3733 static int ram_load_cleanup(void *opaque) 3734 { 3735 RAMBlock *rb; 3736 3737 RAMBLOCK_FOREACH_NOT_IGNORED(rb) { 3738 qemu_ram_block_writeback(rb); 3739 } 3740 3741 xbzrle_load_cleanup(); 3742 3743 RAMBLOCK_FOREACH_NOT_IGNORED(rb) { 3744 g_free(rb->receivedmap); 3745 rb->receivedmap = NULL; 3746 } 3747 3748 return 0; 3749 } 3750 3751 /** 3752 * ram_postcopy_incoming_init: allocate postcopy data structures 3753 * 3754 * Returns 0 for success and negative if there was one error 3755 * 3756 * @mis: current migration incoming state 3757 * 3758 * Allocate data structures etc needed by incoming migration with 3759 * postcopy-ram. postcopy-ram's similarly names 3760 * postcopy_ram_incoming_init does the work. 3761 */ 3762 int ram_postcopy_incoming_init(MigrationIncomingState *mis) 3763 { 3764 return postcopy_ram_incoming_init(mis); 3765 } 3766 3767 /** 3768 * ram_load_postcopy: load a page in postcopy case 3769 * 3770 * Returns 0 for success or -errno in case of error 3771 * 3772 * Called in postcopy mode by ram_load(). 3773 * rcu_read_lock is taken prior to this being called. 3774 * 3775 * @f: QEMUFile where to send the data 3776 * @channel: the channel to use for loading 3777 */ 3778 int ram_load_postcopy(QEMUFile *f, int channel) 3779 { 3780 int flags = 0, ret = 0; 3781 bool place_needed = false; 3782 bool matches_target_page_size = false; 3783 MigrationIncomingState *mis = migration_incoming_get_current(); 3784 PostcopyTmpPage *tmp_page = &mis->postcopy_tmp_pages[channel]; 3785 3786 while (!ret && !(flags & RAM_SAVE_FLAG_EOS)) { 3787 ram_addr_t addr; 3788 void *page_buffer = NULL; 3789 void *place_source = NULL; 3790 RAMBlock *block = NULL; 3791 uint8_t ch; 3792 int len; 3793 3794 addr = qemu_get_be64(f); 3795 3796 /* 3797 * If qemu file error, we should stop here, and then "addr" 3798 * may be invalid 3799 */ 3800 ret = qemu_file_get_error(f); 3801 if (ret) { 3802 break; 3803 } 3804 3805 flags = addr & ~TARGET_PAGE_MASK; 3806 addr &= TARGET_PAGE_MASK; 3807 3808 trace_ram_load_postcopy_loop(channel, (uint64_t)addr, flags); 3809 if (flags & (RAM_SAVE_FLAG_ZERO | RAM_SAVE_FLAG_PAGE | 3810 RAM_SAVE_FLAG_COMPRESS_PAGE)) { 3811 block = ram_block_from_stream(mis, f, flags, channel); 3812 if (!block) { 3813 ret = -EINVAL; 3814 break; 3815 } 3816 3817 /* 3818 * Relying on used_length is racy and can result in false positives. 3819 * We might place pages beyond used_length in case RAM was shrunk 3820 * while in postcopy, which is fine - trying to place via 3821 * UFFDIO_COPY/UFFDIO_ZEROPAGE will never segfault. 3822 */ 3823 if (!block->host || addr >= block->postcopy_length) { 3824 error_report("Illegal RAM offset " RAM_ADDR_FMT, addr); 3825 ret = -EINVAL; 3826 break; 3827 } 3828 tmp_page->target_pages++; 3829 matches_target_page_size = block->page_size == TARGET_PAGE_SIZE; 3830 /* 3831 * Postcopy requires that we place whole host pages atomically; 3832 * these may be huge pages for RAMBlocks that are backed by 3833 * hugetlbfs. 3834 * To make it atomic, the data is read into a temporary page 3835 * that's moved into place later. 3836 * The migration protocol uses, possibly smaller, target-pages 3837 * however the source ensures it always sends all the components 3838 * of a host page in one chunk. 3839 */ 3840 page_buffer = tmp_page->tmp_huge_page + 3841 host_page_offset_from_ram_block_offset(block, addr); 3842 /* If all TP are zero then we can optimise the place */ 3843 if (tmp_page->target_pages == 1) { 3844 tmp_page->host_addr = 3845 host_page_from_ram_block_offset(block, addr); 3846 } else if (tmp_page->host_addr != 3847 host_page_from_ram_block_offset(block, addr)) { 3848 /* not the 1st TP within the HP */ 3849 error_report("Non-same host page detected on channel %d: " 3850 "Target host page %p, received host page %p " 3851 "(rb %s offset 0x"RAM_ADDR_FMT" target_pages %d)", 3852 channel, tmp_page->host_addr, 3853 host_page_from_ram_block_offset(block, addr), 3854 block->idstr, addr, tmp_page->target_pages); 3855 ret = -EINVAL; 3856 break; 3857 } 3858 3859 /* 3860 * If it's the last part of a host page then we place the host 3861 * page 3862 */ 3863 if (tmp_page->target_pages == 3864 (block->page_size / TARGET_PAGE_SIZE)) { 3865 place_needed = true; 3866 } 3867 place_source = tmp_page->tmp_huge_page; 3868 } 3869 3870 switch (flags & ~RAM_SAVE_FLAG_CONTINUE) { 3871 case RAM_SAVE_FLAG_ZERO: 3872 ch = qemu_get_byte(f); 3873 if (ch != 0) { 3874 error_report("Found a zero page with value %d", ch); 3875 ret = -EINVAL; 3876 break; 3877 } 3878 /* 3879 * Can skip to set page_buffer when 3880 * this is a zero page and (block->page_size == TARGET_PAGE_SIZE). 3881 */ 3882 if (!matches_target_page_size) { 3883 memset(page_buffer, ch, TARGET_PAGE_SIZE); 3884 } 3885 break; 3886 3887 case RAM_SAVE_FLAG_PAGE: 3888 tmp_page->all_zero = false; 3889 if (!matches_target_page_size) { 3890 /* For huge pages, we always use temporary buffer */ 3891 qemu_get_buffer(f, page_buffer, TARGET_PAGE_SIZE); 3892 } else { 3893 /* 3894 * For small pages that matches target page size, we 3895 * avoid the qemu_file copy. Instead we directly use 3896 * the buffer of QEMUFile to place the page. Note: we 3897 * cannot do any QEMUFile operation before using that 3898 * buffer to make sure the buffer is valid when 3899 * placing the page. 3900 */ 3901 qemu_get_buffer_in_place(f, (uint8_t **)&place_source, 3902 TARGET_PAGE_SIZE); 3903 } 3904 break; 3905 case RAM_SAVE_FLAG_COMPRESS_PAGE: 3906 tmp_page->all_zero = false; 3907 len = qemu_get_be32(f); 3908 if (len < 0 || len > compressBound(TARGET_PAGE_SIZE)) { 3909 error_report("Invalid compressed data length: %d", len); 3910 ret = -EINVAL; 3911 break; 3912 } 3913 decompress_data_with_multi_threads(f, page_buffer, len); 3914 break; 3915 case RAM_SAVE_FLAG_MULTIFD_FLUSH: 3916 multifd_recv_sync_main(); 3917 break; 3918 case RAM_SAVE_FLAG_EOS: 3919 /* normal exit */ 3920 if (migrate_multifd() && 3921 migrate_multifd_flush_after_each_section()) { 3922 multifd_recv_sync_main(); 3923 } 3924 break; 3925 default: 3926 error_report("Unknown combination of migration flags: 0x%x" 3927 " (postcopy mode)", flags); 3928 ret = -EINVAL; 3929 break; 3930 } 3931 3932 /* Got the whole host page, wait for decompress before placing. */ 3933 if (place_needed) { 3934 ret |= wait_for_decompress_done(); 3935 } 3936 3937 /* Detect for any possible file errors */ 3938 if (!ret && qemu_file_get_error(f)) { 3939 ret = qemu_file_get_error(f); 3940 } 3941 3942 if (!ret && place_needed) { 3943 if (tmp_page->all_zero) { 3944 ret = postcopy_place_page_zero(mis, tmp_page->host_addr, block); 3945 } else { 3946 ret = postcopy_place_page(mis, tmp_page->host_addr, 3947 place_source, block); 3948 } 3949 place_needed = false; 3950 postcopy_temp_page_reset(tmp_page); 3951 } 3952 } 3953 3954 return ret; 3955 } 3956 3957 static bool postcopy_is_running(void) 3958 { 3959 PostcopyState ps = postcopy_state_get(); 3960 return ps >= POSTCOPY_INCOMING_LISTENING && ps < POSTCOPY_INCOMING_END; 3961 } 3962 3963 /* 3964 * Flush content of RAM cache into SVM's memory. 3965 * Only flush the pages that be dirtied by PVM or SVM or both. 3966 */ 3967 void colo_flush_ram_cache(void) 3968 { 3969 RAMBlock *block = NULL; 3970 void *dst_host; 3971 void *src_host; 3972 unsigned long offset = 0; 3973 3974 memory_global_dirty_log_sync(false); 3975 qemu_mutex_lock(&ram_state->bitmap_mutex); 3976 WITH_RCU_READ_LOCK_GUARD() { 3977 RAMBLOCK_FOREACH_NOT_IGNORED(block) { 3978 ramblock_sync_dirty_bitmap(ram_state, block); 3979 } 3980 } 3981 3982 trace_colo_flush_ram_cache_begin(ram_state->migration_dirty_pages); 3983 WITH_RCU_READ_LOCK_GUARD() { 3984 block = QLIST_FIRST_RCU(&ram_list.blocks); 3985 3986 while (block) { 3987 unsigned long num = 0; 3988 3989 offset = colo_bitmap_find_dirty(ram_state, block, offset, &num); 3990 if (!offset_in_ramblock(block, 3991 ((ram_addr_t)offset) << TARGET_PAGE_BITS)) { 3992 offset = 0; 3993 num = 0; 3994 block = QLIST_NEXT_RCU(block, next); 3995 } else { 3996 unsigned long i = 0; 3997 3998 for (i = 0; i < num; i++) { 3999 migration_bitmap_clear_dirty(ram_state, block, offset + i); 4000 } 4001 dst_host = block->host 4002 + (((ram_addr_t)offset) << TARGET_PAGE_BITS); 4003 src_host = block->colo_cache 4004 + (((ram_addr_t)offset) << TARGET_PAGE_BITS); 4005 memcpy(dst_host, src_host, TARGET_PAGE_SIZE * num); 4006 offset += num; 4007 } 4008 } 4009 } 4010 qemu_mutex_unlock(&ram_state->bitmap_mutex); 4011 trace_colo_flush_ram_cache_end(); 4012 } 4013 4014 static size_t ram_load_multifd_pages(void *host_addr, size_t size, 4015 uint64_t offset) 4016 { 4017 MultiFDRecvData *data = multifd_get_recv_data(); 4018 4019 data->opaque = host_addr; 4020 data->file_offset = offset; 4021 data->size = size; 4022 4023 if (!multifd_recv()) { 4024 return 0; 4025 } 4026 4027 return size; 4028 } 4029 4030 static bool read_ramblock_mapped_ram(QEMUFile *f, RAMBlock *block, 4031 long num_pages, unsigned long *bitmap, 4032 Error **errp) 4033 { 4034 ERRP_GUARD(); 4035 unsigned long set_bit_idx, clear_bit_idx; 4036 ram_addr_t offset; 4037 void *host; 4038 size_t read, unread, size; 4039 4040 for (set_bit_idx = find_first_bit(bitmap, num_pages); 4041 set_bit_idx < num_pages; 4042 set_bit_idx = find_next_bit(bitmap, num_pages, clear_bit_idx + 1)) { 4043 4044 clear_bit_idx = find_next_zero_bit(bitmap, num_pages, set_bit_idx + 1); 4045 4046 unread = TARGET_PAGE_SIZE * (clear_bit_idx - set_bit_idx); 4047 offset = set_bit_idx << TARGET_PAGE_BITS; 4048 4049 while (unread > 0) { 4050 host = host_from_ram_block_offset(block, offset); 4051 if (!host) { 4052 error_setg(errp, "page outside of ramblock %s range", 4053 block->idstr); 4054 return false; 4055 } 4056 4057 size = MIN(unread, MAPPED_RAM_LOAD_BUF_SIZE); 4058 4059 if (migrate_multifd()) { 4060 read = ram_load_multifd_pages(host, size, 4061 block->pages_offset + offset); 4062 } else { 4063 read = qemu_get_buffer_at(f, host, size, 4064 block->pages_offset + offset); 4065 } 4066 4067 if (!read) { 4068 goto err; 4069 } 4070 offset += read; 4071 unread -= read; 4072 } 4073 } 4074 4075 return true; 4076 4077 err: 4078 qemu_file_get_error_obj(f, errp); 4079 error_prepend(errp, "(%s) failed to read page " RAM_ADDR_FMT 4080 "from file offset %" PRIx64 ": ", block->idstr, offset, 4081 block->pages_offset + offset); 4082 return false; 4083 } 4084 4085 static void parse_ramblock_mapped_ram(QEMUFile *f, RAMBlock *block, 4086 ram_addr_t length, Error **errp) 4087 { 4088 g_autofree unsigned long *bitmap = NULL; 4089 MappedRamHeader header; 4090 size_t bitmap_size; 4091 long num_pages; 4092 4093 if (!mapped_ram_read_header(f, &header, errp)) { 4094 return; 4095 } 4096 4097 block->pages_offset = header.pages_offset; 4098 4099 /* 4100 * Check the alignment of the file region that contains pages. We 4101 * don't enforce MAPPED_RAM_FILE_OFFSET_ALIGNMENT to allow that 4102 * value to change in the future. Do only a sanity check with page 4103 * size alignment. 4104 */ 4105 if (!QEMU_IS_ALIGNED(block->pages_offset, TARGET_PAGE_SIZE)) { 4106 error_setg(errp, 4107 "Error reading ramblock %s pages, region has bad alignment", 4108 block->idstr); 4109 return; 4110 } 4111 4112 num_pages = length / header.page_size; 4113 bitmap_size = BITS_TO_LONGS(num_pages) * sizeof(unsigned long); 4114 4115 bitmap = g_malloc0(bitmap_size); 4116 if (qemu_get_buffer_at(f, (uint8_t *)bitmap, bitmap_size, 4117 header.bitmap_offset) != bitmap_size) { 4118 error_setg(errp, "Error reading dirty bitmap"); 4119 return; 4120 } 4121 4122 if (!read_ramblock_mapped_ram(f, block, num_pages, bitmap, errp)) { 4123 return; 4124 } 4125 4126 /* Skip pages array */ 4127 qemu_set_offset(f, block->pages_offset + length, SEEK_SET); 4128 4129 return; 4130 } 4131 4132 static int parse_ramblock(QEMUFile *f, RAMBlock *block, ram_addr_t length) 4133 { 4134 int ret = 0; 4135 /* ADVISE is earlier, it shows the source has the postcopy capability on */ 4136 bool postcopy_advised = migration_incoming_postcopy_advised(); 4137 int max_hg_page_size; 4138 Error *local_err = NULL; 4139 4140 assert(block); 4141 4142 if (migrate_mapped_ram()) { 4143 parse_ramblock_mapped_ram(f, block, length, &local_err); 4144 if (local_err) { 4145 error_report_err(local_err); 4146 return -EINVAL; 4147 } 4148 return 0; 4149 } 4150 4151 if (!qemu_ram_is_migratable(block)) { 4152 error_report("block %s should not be migrated !", block->idstr); 4153 return -EINVAL; 4154 } 4155 4156 if (length != block->used_length) { 4157 ret = qemu_ram_resize(block, length, &local_err); 4158 if (local_err) { 4159 error_report_err(local_err); 4160 return ret; 4161 } 4162 } 4163 4164 /* 4165 * ??? Mirrors the previous value of qemu_host_page_size, 4166 * but is this really what was intended for the migration? 4167 */ 4168 max_hg_page_size = MAX(qemu_real_host_page_size(), TARGET_PAGE_SIZE); 4169 4170 /* For postcopy we need to check hugepage sizes match */ 4171 if (postcopy_advised && migrate_postcopy_ram() && 4172 block->page_size != max_hg_page_size) { 4173 uint64_t remote_page_size = qemu_get_be64(f); 4174 if (remote_page_size != block->page_size) { 4175 error_report("Mismatched RAM page size %s " 4176 "(local) %zd != %" PRId64, block->idstr, 4177 block->page_size, remote_page_size); 4178 return -EINVAL; 4179 } 4180 } 4181 if (migrate_ignore_shared()) { 4182 hwaddr addr = qemu_get_be64(f); 4183 if (migrate_ram_is_ignored(block) && 4184 block->mr->addr != addr) { 4185 error_report("Mismatched GPAs for block %s " 4186 "%" PRId64 "!= %" PRId64, block->idstr, 4187 (uint64_t)addr, (uint64_t)block->mr->addr); 4188 return -EINVAL; 4189 } 4190 } 4191 ret = rdma_block_notification_handle(f, block->idstr); 4192 if (ret < 0) { 4193 qemu_file_set_error(f, ret); 4194 } 4195 4196 return ret; 4197 } 4198 4199 static int parse_ramblocks(QEMUFile *f, ram_addr_t total_ram_bytes) 4200 { 4201 int ret = 0; 4202 4203 /* Synchronize RAM block list */ 4204 while (!ret && total_ram_bytes) { 4205 RAMBlock *block; 4206 char id[256]; 4207 ram_addr_t length; 4208 int len = qemu_get_byte(f); 4209 4210 qemu_get_buffer(f, (uint8_t *)id, len); 4211 id[len] = 0; 4212 length = qemu_get_be64(f); 4213 4214 block = qemu_ram_block_by_name(id); 4215 if (block) { 4216 ret = parse_ramblock(f, block, length); 4217 } else { 4218 error_report("Unknown ramblock \"%s\", cannot accept " 4219 "migration", id); 4220 ret = -EINVAL; 4221 } 4222 total_ram_bytes -= length; 4223 } 4224 4225 return ret; 4226 } 4227 4228 /** 4229 * ram_load_precopy: load pages in precopy case 4230 * 4231 * Returns 0 for success or -errno in case of error 4232 * 4233 * Called in precopy mode by ram_load(). 4234 * rcu_read_lock is taken prior to this being called. 4235 * 4236 * @f: QEMUFile where to send the data 4237 */ 4238 static int ram_load_precopy(QEMUFile *f) 4239 { 4240 MigrationIncomingState *mis = migration_incoming_get_current(); 4241 int flags = 0, ret = 0, invalid_flags = 0, len = 0, i = 0; 4242 4243 if (!migrate_compress()) { 4244 invalid_flags |= RAM_SAVE_FLAG_COMPRESS_PAGE; 4245 } 4246 4247 if (migrate_mapped_ram()) { 4248 invalid_flags |= (RAM_SAVE_FLAG_HOOK | RAM_SAVE_FLAG_MULTIFD_FLUSH | 4249 RAM_SAVE_FLAG_PAGE | RAM_SAVE_FLAG_XBZRLE | 4250 RAM_SAVE_FLAG_ZERO); 4251 } 4252 4253 while (!ret && !(flags & RAM_SAVE_FLAG_EOS)) { 4254 ram_addr_t addr; 4255 void *host = NULL, *host_bak = NULL; 4256 uint8_t ch; 4257 4258 /* 4259 * Yield periodically to let main loop run, but an iteration of 4260 * the main loop is expensive, so do it each some iterations 4261 */ 4262 if ((i & 32767) == 0 && qemu_in_coroutine()) { 4263 aio_co_schedule(qemu_get_current_aio_context(), 4264 qemu_coroutine_self()); 4265 qemu_coroutine_yield(); 4266 } 4267 i++; 4268 4269 addr = qemu_get_be64(f); 4270 ret = qemu_file_get_error(f); 4271 if (ret) { 4272 error_report("Getting RAM address failed"); 4273 break; 4274 } 4275 4276 flags = addr & ~TARGET_PAGE_MASK; 4277 addr &= TARGET_PAGE_MASK; 4278 4279 if (flags & invalid_flags) { 4280 error_report("Unexpected RAM flags: %d", flags & invalid_flags); 4281 4282 if (flags & invalid_flags & RAM_SAVE_FLAG_COMPRESS_PAGE) { 4283 error_report("Received an unexpected compressed page"); 4284 } 4285 4286 ret = -EINVAL; 4287 break; 4288 } 4289 4290 if (flags & (RAM_SAVE_FLAG_ZERO | RAM_SAVE_FLAG_PAGE | 4291 RAM_SAVE_FLAG_COMPRESS_PAGE | RAM_SAVE_FLAG_XBZRLE)) { 4292 RAMBlock *block = ram_block_from_stream(mis, f, flags, 4293 RAM_CHANNEL_PRECOPY); 4294 4295 host = host_from_ram_block_offset(block, addr); 4296 /* 4297 * After going into COLO stage, we should not load the page 4298 * into SVM's memory directly, we put them into colo_cache firstly. 4299 * NOTE: We need to keep a copy of SVM's ram in colo_cache. 4300 * Previously, we copied all these memory in preparing stage of COLO 4301 * while we need to stop VM, which is a time-consuming process. 4302 * Here we optimize it by a trick, back-up every page while in 4303 * migration process while COLO is enabled, though it affects the 4304 * speed of the migration, but it obviously reduce the downtime of 4305 * back-up all SVM'S memory in COLO preparing stage. 4306 */ 4307 if (migration_incoming_colo_enabled()) { 4308 if (migration_incoming_in_colo_state()) { 4309 /* In COLO stage, put all pages into cache temporarily */ 4310 host = colo_cache_from_block_offset(block, addr, true); 4311 } else { 4312 /* 4313 * In migration stage but before COLO stage, 4314 * Put all pages into both cache and SVM's memory. 4315 */ 4316 host_bak = colo_cache_from_block_offset(block, addr, false); 4317 } 4318 } 4319 if (!host) { 4320 error_report("Illegal RAM offset " RAM_ADDR_FMT, addr); 4321 ret = -EINVAL; 4322 break; 4323 } 4324 if (!migration_incoming_in_colo_state()) { 4325 ramblock_recv_bitmap_set(block, host); 4326 } 4327 4328 trace_ram_load_loop(block->idstr, (uint64_t)addr, flags, host); 4329 } 4330 4331 switch (flags & ~RAM_SAVE_FLAG_CONTINUE) { 4332 case RAM_SAVE_FLAG_MEM_SIZE: 4333 ret = parse_ramblocks(f, addr); 4334 /* 4335 * For mapped-ram migration (to a file) using multifd, we sync 4336 * once and for all here to make sure all tasks we queued to 4337 * multifd threads are completed, so that all the ramblocks 4338 * (including all the guest memory pages within) are fully 4339 * loaded after this sync returns. 4340 */ 4341 if (migrate_mapped_ram()) { 4342 multifd_recv_sync_main(); 4343 } 4344 break; 4345 4346 case RAM_SAVE_FLAG_ZERO: 4347 ch = qemu_get_byte(f); 4348 if (ch != 0) { 4349 error_report("Found a zero page with value %d", ch); 4350 ret = -EINVAL; 4351 break; 4352 } 4353 ram_handle_zero(host, TARGET_PAGE_SIZE); 4354 break; 4355 4356 case RAM_SAVE_FLAG_PAGE: 4357 qemu_get_buffer(f, host, TARGET_PAGE_SIZE); 4358 break; 4359 4360 case RAM_SAVE_FLAG_COMPRESS_PAGE: 4361 len = qemu_get_be32(f); 4362 if (len < 0 || len > compressBound(TARGET_PAGE_SIZE)) { 4363 error_report("Invalid compressed data length: %d", len); 4364 ret = -EINVAL; 4365 break; 4366 } 4367 decompress_data_with_multi_threads(f, host, len); 4368 break; 4369 4370 case RAM_SAVE_FLAG_XBZRLE: 4371 if (load_xbzrle(f, addr, host) < 0) { 4372 error_report("Failed to decompress XBZRLE page at " 4373 RAM_ADDR_FMT, addr); 4374 ret = -EINVAL; 4375 break; 4376 } 4377 break; 4378 case RAM_SAVE_FLAG_MULTIFD_FLUSH: 4379 multifd_recv_sync_main(); 4380 break; 4381 case RAM_SAVE_FLAG_EOS: 4382 /* normal exit */ 4383 if (migrate_multifd() && 4384 migrate_multifd_flush_after_each_section() && 4385 /* 4386 * Mapped-ram migration flushes once and for all after 4387 * parsing ramblocks. Always ignore EOS for it. 4388 */ 4389 !migrate_mapped_ram()) { 4390 multifd_recv_sync_main(); 4391 } 4392 break; 4393 case RAM_SAVE_FLAG_HOOK: 4394 ret = rdma_registration_handle(f); 4395 if (ret < 0) { 4396 qemu_file_set_error(f, ret); 4397 } 4398 break; 4399 default: 4400 error_report("Unknown combination of migration flags: 0x%x", flags); 4401 ret = -EINVAL; 4402 } 4403 if (!ret) { 4404 ret = qemu_file_get_error(f); 4405 } 4406 if (!ret && host_bak) { 4407 memcpy(host_bak, host, TARGET_PAGE_SIZE); 4408 } 4409 } 4410 4411 ret |= wait_for_decompress_done(); 4412 return ret; 4413 } 4414 4415 static int ram_load(QEMUFile *f, void *opaque, int version_id) 4416 { 4417 int ret = 0; 4418 static uint64_t seq_iter; 4419 /* 4420 * If system is running in postcopy mode, page inserts to host memory must 4421 * be atomic 4422 */ 4423 bool postcopy_running = postcopy_is_running(); 4424 4425 seq_iter++; 4426 4427 if (version_id != 4) { 4428 return -EINVAL; 4429 } 4430 4431 /* 4432 * This RCU critical section can be very long running. 4433 * When RCU reclaims in the code start to become numerous, 4434 * it will be necessary to reduce the granularity of this 4435 * critical section. 4436 */ 4437 WITH_RCU_READ_LOCK_GUARD() { 4438 if (postcopy_running) { 4439 /* 4440 * Note! Here RAM_CHANNEL_PRECOPY is the precopy channel of 4441 * postcopy migration, we have another RAM_CHANNEL_POSTCOPY to 4442 * service fast page faults. 4443 */ 4444 ret = ram_load_postcopy(f, RAM_CHANNEL_PRECOPY); 4445 } else { 4446 ret = ram_load_precopy(f); 4447 } 4448 } 4449 trace_ram_load_complete(ret, seq_iter); 4450 4451 return ret; 4452 } 4453 4454 static bool ram_has_postcopy(void *opaque) 4455 { 4456 RAMBlock *rb; 4457 RAMBLOCK_FOREACH_NOT_IGNORED(rb) { 4458 if (ramblock_is_pmem(rb)) { 4459 info_report("Block: %s, host: %p is a nvdimm memory, postcopy" 4460 "is not supported now!", rb->idstr, rb->host); 4461 return false; 4462 } 4463 } 4464 4465 return migrate_postcopy_ram(); 4466 } 4467 4468 /* Sync all the dirty bitmap with destination VM. */ 4469 static int ram_dirty_bitmap_sync_all(MigrationState *s, RAMState *rs) 4470 { 4471 RAMBlock *block; 4472 QEMUFile *file = s->to_dst_file; 4473 4474 trace_ram_dirty_bitmap_sync_start(); 4475 4476 qatomic_set(&rs->postcopy_bmap_sync_requested, 0); 4477 RAMBLOCK_FOREACH_NOT_IGNORED(block) { 4478 qemu_savevm_send_recv_bitmap(file, block->idstr); 4479 trace_ram_dirty_bitmap_request(block->idstr); 4480 qatomic_inc(&rs->postcopy_bmap_sync_requested); 4481 } 4482 4483 trace_ram_dirty_bitmap_sync_wait(); 4484 4485 /* Wait until all the ramblocks' dirty bitmap synced */ 4486 while (qatomic_read(&rs->postcopy_bmap_sync_requested)) { 4487 if (migration_rp_wait(s)) { 4488 return -1; 4489 } 4490 } 4491 4492 trace_ram_dirty_bitmap_sync_complete(); 4493 4494 return 0; 4495 } 4496 4497 /* 4498 * Read the received bitmap, revert it as the initial dirty bitmap. 4499 * This is only used when the postcopy migration is paused but wants 4500 * to resume from a middle point. 4501 * 4502 * Returns true if succeeded, false for errors. 4503 */ 4504 bool ram_dirty_bitmap_reload(MigrationState *s, RAMBlock *block, Error **errp) 4505 { 4506 /* from_dst_file is always valid because we're within rp_thread */ 4507 QEMUFile *file = s->rp_state.from_dst_file; 4508 g_autofree unsigned long *le_bitmap = NULL; 4509 unsigned long nbits = block->used_length >> TARGET_PAGE_BITS; 4510 uint64_t local_size = DIV_ROUND_UP(nbits, 8); 4511 uint64_t size, end_mark; 4512 RAMState *rs = ram_state; 4513 4514 trace_ram_dirty_bitmap_reload_begin(block->idstr); 4515 4516 if (s->state != MIGRATION_STATUS_POSTCOPY_RECOVER) { 4517 error_setg(errp, "Reload bitmap in incorrect state %s", 4518 MigrationStatus_str(s->state)); 4519 return false; 4520 } 4521 4522 /* 4523 * Note: see comments in ramblock_recv_bitmap_send() on why we 4524 * need the endianness conversion, and the paddings. 4525 */ 4526 local_size = ROUND_UP(local_size, 8); 4527 4528 /* Add paddings */ 4529 le_bitmap = bitmap_new(nbits + BITS_PER_LONG); 4530 4531 size = qemu_get_be64(file); 4532 4533 /* The size of the bitmap should match with our ramblock */ 4534 if (size != local_size) { 4535 error_setg(errp, "ramblock '%s' bitmap size mismatch (0x%"PRIx64 4536 " != 0x%"PRIx64")", block->idstr, size, local_size); 4537 return false; 4538 } 4539 4540 size = qemu_get_buffer(file, (uint8_t *)le_bitmap, local_size); 4541 end_mark = qemu_get_be64(file); 4542 4543 if (qemu_file_get_error(file) || size != local_size) { 4544 error_setg(errp, "read bitmap failed for ramblock '%s': " 4545 "(size 0x%"PRIx64", got: 0x%"PRIx64")", 4546 block->idstr, local_size, size); 4547 return false; 4548 } 4549 4550 if (end_mark != RAMBLOCK_RECV_BITMAP_ENDING) { 4551 error_setg(errp, "ramblock '%s' end mark incorrect: 0x%"PRIx64, 4552 block->idstr, end_mark); 4553 return false; 4554 } 4555 4556 /* 4557 * Endianness conversion. We are during postcopy (though paused). 4558 * The dirty bitmap won't change. We can directly modify it. 4559 */ 4560 bitmap_from_le(block->bmap, le_bitmap, nbits); 4561 4562 /* 4563 * What we received is "received bitmap". Revert it as the initial 4564 * dirty bitmap for this ramblock. 4565 */ 4566 bitmap_complement(block->bmap, block->bmap, nbits); 4567 4568 /* Clear dirty bits of discarded ranges that we don't want to migrate. */ 4569 ramblock_dirty_bitmap_clear_discarded_pages(block); 4570 4571 /* We'll recalculate migration_dirty_pages in ram_state_resume_prepare(). */ 4572 trace_ram_dirty_bitmap_reload_complete(block->idstr); 4573 4574 qatomic_dec(&rs->postcopy_bmap_sync_requested); 4575 4576 /* 4577 * We succeeded to sync bitmap for current ramblock. Always kick the 4578 * migration thread to check whether all requested bitmaps are 4579 * reloaded. NOTE: it's racy to only kick when requested==0, because 4580 * we don't know whether the migration thread may still be increasing 4581 * it. 4582 */ 4583 migration_rp_kick(s); 4584 4585 return true; 4586 } 4587 4588 static int ram_resume_prepare(MigrationState *s, void *opaque) 4589 { 4590 RAMState *rs = *(RAMState **)opaque; 4591 int ret; 4592 4593 ret = ram_dirty_bitmap_sync_all(s, rs); 4594 if (ret) { 4595 return ret; 4596 } 4597 4598 ram_state_resume_prepare(rs, s->to_dst_file); 4599 4600 return 0; 4601 } 4602 4603 void postcopy_preempt_shutdown_file(MigrationState *s) 4604 { 4605 qemu_put_be64(s->postcopy_qemufile_src, RAM_SAVE_FLAG_EOS); 4606 qemu_fflush(s->postcopy_qemufile_src); 4607 } 4608 4609 static SaveVMHandlers savevm_ram_handlers = { 4610 .save_setup = ram_save_setup, 4611 .save_live_iterate = ram_save_iterate, 4612 .save_live_complete_postcopy = ram_save_complete, 4613 .save_live_complete_precopy = ram_save_complete, 4614 .has_postcopy = ram_has_postcopy, 4615 .state_pending_exact = ram_state_pending_exact, 4616 .state_pending_estimate = ram_state_pending_estimate, 4617 .load_state = ram_load, 4618 .save_cleanup = ram_save_cleanup, 4619 .load_setup = ram_load_setup, 4620 .load_cleanup = ram_load_cleanup, 4621 .resume_prepare = ram_resume_prepare, 4622 }; 4623 4624 static void ram_mig_ram_block_resized(RAMBlockNotifier *n, void *host, 4625 size_t old_size, size_t new_size) 4626 { 4627 PostcopyState ps = postcopy_state_get(); 4628 ram_addr_t offset; 4629 RAMBlock *rb = qemu_ram_block_from_host(host, false, &offset); 4630 Error *err = NULL; 4631 4632 if (!rb) { 4633 error_report("RAM block not found"); 4634 return; 4635 } 4636 4637 if (migrate_ram_is_ignored(rb)) { 4638 return; 4639 } 4640 4641 if (!migration_is_idle()) { 4642 /* 4643 * Precopy code on the source cannot deal with the size of RAM blocks 4644 * changing at random points in time - especially after sending the 4645 * RAM block sizes in the migration stream, they must no longer change. 4646 * Abort and indicate a proper reason. 4647 */ 4648 error_setg(&err, "RAM block '%s' resized during precopy.", rb->idstr); 4649 migration_cancel(err); 4650 error_free(err); 4651 } 4652 4653 switch (ps) { 4654 case POSTCOPY_INCOMING_ADVISE: 4655 /* 4656 * Update what ram_postcopy_incoming_init()->init_range() does at the 4657 * time postcopy was advised. Syncing RAM blocks with the source will 4658 * result in RAM resizes. 4659 */ 4660 if (old_size < new_size) { 4661 if (ram_discard_range(rb->idstr, old_size, new_size - old_size)) { 4662 error_report("RAM block '%s' discard of resized RAM failed", 4663 rb->idstr); 4664 } 4665 } 4666 rb->postcopy_length = new_size; 4667 break; 4668 case POSTCOPY_INCOMING_NONE: 4669 case POSTCOPY_INCOMING_RUNNING: 4670 case POSTCOPY_INCOMING_END: 4671 /* 4672 * Once our guest is running, postcopy does no longer care about 4673 * resizes. When growing, the new memory was not available on the 4674 * source, no handler needed. 4675 */ 4676 break; 4677 default: 4678 error_report("RAM block '%s' resized during postcopy state: %d", 4679 rb->idstr, ps); 4680 exit(-1); 4681 } 4682 } 4683 4684 static RAMBlockNotifier ram_mig_ram_notifier = { 4685 .ram_block_resized = ram_mig_ram_block_resized, 4686 }; 4687 4688 void ram_mig_init(void) 4689 { 4690 qemu_mutex_init(&XBZRLE.lock); 4691 register_savevm_live("ram", 0, 4, &savevm_ram_handlers, &ram_state); 4692 ram_block_notifier_add(&ram_mig_ram_notifier); 4693 } 4694