1 /* 2 * QEMU System Emulator 3 * 4 * Copyright (c) 2003-2008 Fabrice Bellard 5 * Copyright (c) 2011-2015 Red Hat Inc 6 * 7 * Authors: 8 * Juan Quintela <quintela@redhat.com> 9 * 10 * Permission is hereby granted, free of charge, to any person obtaining a copy 11 * of this software and associated documentation files (the "Software"), to deal 12 * in the Software without restriction, including without limitation the rights 13 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 14 * copies of the Software, and to permit persons to whom the Software is 15 * furnished to do so, subject to the following conditions: 16 * 17 * The above copyright notice and this permission notice shall be included in 18 * all copies or substantial portions of the Software. 19 * 20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 23 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 24 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 25 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 26 * THE SOFTWARE. 27 */ 28 29 #include "qemu/osdep.h" 30 #include "qemu/cutils.h" 31 #include "qemu/bitops.h" 32 #include "qemu/bitmap.h" 33 #include "qemu/madvise.h" 34 #include "qemu/main-loop.h" 35 #include "xbzrle.h" 36 #include "ram-compress.h" 37 #include "ram.h" 38 #include "migration.h" 39 #include "migration-stats.h" 40 #include "migration/register.h" 41 #include "migration/misc.h" 42 #include "qemu-file.h" 43 #include "postcopy-ram.h" 44 #include "page_cache.h" 45 #include "qemu/error-report.h" 46 #include "qapi/error.h" 47 #include "qapi/qapi-types-migration.h" 48 #include "qapi/qapi-events-migration.h" 49 #include "qapi/qapi-commands-migration.h" 50 #include "qapi/qmp/qerror.h" 51 #include "trace.h" 52 #include "exec/ram_addr.h" 53 #include "exec/target_page.h" 54 #include "qemu/rcu_queue.h" 55 #include "migration/colo.h" 56 #include "block.h" 57 #include "sysemu/cpu-throttle.h" 58 #include "savevm.h" 59 #include "qemu/iov.h" 60 #include "multifd.h" 61 #include "sysemu/runstate.h" 62 #include "options.h" 63 #include "sysemu/dirtylimit.h" 64 #include "sysemu/kvm.h" 65 66 #include "hw/boards.h" /* for machine_dump_guest_core() */ 67 68 #if defined(__linux__) 69 #include "qemu/userfaultfd.h" 70 #endif /* defined(__linux__) */ 71 72 /***********************************************************/ 73 /* ram save/restore */ 74 75 /* 76 * RAM_SAVE_FLAG_ZERO used to be named RAM_SAVE_FLAG_COMPRESS, it 77 * worked for pages that were filled with the same char. We switched 78 * it to only search for the zero value. And to avoid confusion with 79 * RAM_SAVE_FLAG_COMPRESS_PAGE just rename it. 80 */ 81 /* 82 * RAM_SAVE_FLAG_FULL was obsoleted in 2009, it can be reused now 83 */ 84 #define RAM_SAVE_FLAG_FULL 0x01 85 #define RAM_SAVE_FLAG_ZERO 0x02 86 #define RAM_SAVE_FLAG_MEM_SIZE 0x04 87 #define RAM_SAVE_FLAG_PAGE 0x08 88 #define RAM_SAVE_FLAG_EOS 0x10 89 #define RAM_SAVE_FLAG_CONTINUE 0x20 90 #define RAM_SAVE_FLAG_XBZRLE 0x40 91 /* 0x80 is reserved in qemu-file.h for RAM_SAVE_FLAG_HOOK */ 92 #define RAM_SAVE_FLAG_COMPRESS_PAGE 0x100 93 #define RAM_SAVE_FLAG_MULTIFD_FLUSH 0x200 94 /* We can't use any flag that is bigger than 0x200 */ 95 96 XBZRLECacheStats xbzrle_counters; 97 98 /* used by the search for pages to send */ 99 struct PageSearchStatus { 100 /* The migration channel used for a specific host page */ 101 QEMUFile *pss_channel; 102 /* Last block from where we have sent data */ 103 RAMBlock *last_sent_block; 104 /* Current block being searched */ 105 RAMBlock *block; 106 /* Current page to search from */ 107 unsigned long page; 108 /* Set once we wrap around */ 109 bool complete_round; 110 /* Whether we're sending a host page */ 111 bool host_page_sending; 112 /* The start/end of current host page. Invalid if host_page_sending==false */ 113 unsigned long host_page_start; 114 unsigned long host_page_end; 115 }; 116 typedef struct PageSearchStatus PageSearchStatus; 117 118 /* struct contains XBZRLE cache and a static page 119 used by the compression */ 120 static struct { 121 /* buffer used for XBZRLE encoding */ 122 uint8_t *encoded_buf; 123 /* buffer for storing page content */ 124 uint8_t *current_buf; 125 /* Cache for XBZRLE, Protected by lock. */ 126 PageCache *cache; 127 QemuMutex lock; 128 /* it will store a page full of zeros */ 129 uint8_t *zero_target_page; 130 /* buffer used for XBZRLE decoding */ 131 uint8_t *decoded_buf; 132 } XBZRLE; 133 134 static void XBZRLE_cache_lock(void) 135 { 136 if (migrate_xbzrle()) { 137 qemu_mutex_lock(&XBZRLE.lock); 138 } 139 } 140 141 static void XBZRLE_cache_unlock(void) 142 { 143 if (migrate_xbzrle()) { 144 qemu_mutex_unlock(&XBZRLE.lock); 145 } 146 } 147 148 /** 149 * xbzrle_cache_resize: resize the xbzrle cache 150 * 151 * This function is called from migrate_params_apply in main 152 * thread, possibly while a migration is in progress. A running 153 * migration may be using the cache and might finish during this call, 154 * hence changes to the cache are protected by XBZRLE.lock(). 155 * 156 * Returns 0 for success or -1 for error 157 * 158 * @new_size: new cache size 159 * @errp: set *errp if the check failed, with reason 160 */ 161 int xbzrle_cache_resize(uint64_t new_size, Error **errp) 162 { 163 PageCache *new_cache; 164 int64_t ret = 0; 165 166 /* Check for truncation */ 167 if (new_size != (size_t)new_size) { 168 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "cache size", 169 "exceeding address space"); 170 return -1; 171 } 172 173 if (new_size == migrate_xbzrle_cache_size()) { 174 /* nothing to do */ 175 return 0; 176 } 177 178 XBZRLE_cache_lock(); 179 180 if (XBZRLE.cache != NULL) { 181 new_cache = cache_init(new_size, TARGET_PAGE_SIZE, errp); 182 if (!new_cache) { 183 ret = -1; 184 goto out; 185 } 186 187 cache_fini(XBZRLE.cache); 188 XBZRLE.cache = new_cache; 189 } 190 out: 191 XBZRLE_cache_unlock(); 192 return ret; 193 } 194 195 static bool postcopy_preempt_active(void) 196 { 197 return migrate_postcopy_preempt() && migration_in_postcopy(); 198 } 199 200 bool migrate_ram_is_ignored(RAMBlock *block) 201 { 202 return !qemu_ram_is_migratable(block) || 203 (migrate_ignore_shared() && qemu_ram_is_shared(block) 204 && qemu_ram_is_named_file(block)); 205 } 206 207 #undef RAMBLOCK_FOREACH 208 209 int foreach_not_ignored_block(RAMBlockIterFunc func, void *opaque) 210 { 211 RAMBlock *block; 212 int ret = 0; 213 214 RCU_READ_LOCK_GUARD(); 215 216 RAMBLOCK_FOREACH_NOT_IGNORED(block) { 217 ret = func(block, opaque); 218 if (ret) { 219 break; 220 } 221 } 222 return ret; 223 } 224 225 static void ramblock_recv_map_init(void) 226 { 227 RAMBlock *rb; 228 229 RAMBLOCK_FOREACH_NOT_IGNORED(rb) { 230 assert(!rb->receivedmap); 231 rb->receivedmap = bitmap_new(rb->max_length >> qemu_target_page_bits()); 232 } 233 } 234 235 int ramblock_recv_bitmap_test(RAMBlock *rb, void *host_addr) 236 { 237 return test_bit(ramblock_recv_bitmap_offset(host_addr, rb), 238 rb->receivedmap); 239 } 240 241 bool ramblock_recv_bitmap_test_byte_offset(RAMBlock *rb, uint64_t byte_offset) 242 { 243 return test_bit(byte_offset >> TARGET_PAGE_BITS, rb->receivedmap); 244 } 245 246 void ramblock_recv_bitmap_set(RAMBlock *rb, void *host_addr) 247 { 248 set_bit_atomic(ramblock_recv_bitmap_offset(host_addr, rb), rb->receivedmap); 249 } 250 251 void ramblock_recv_bitmap_set_range(RAMBlock *rb, void *host_addr, 252 size_t nr) 253 { 254 bitmap_set_atomic(rb->receivedmap, 255 ramblock_recv_bitmap_offset(host_addr, rb), 256 nr); 257 } 258 259 #define RAMBLOCK_RECV_BITMAP_ENDING (0x0123456789abcdefULL) 260 261 /* 262 * Format: bitmap_size (8 bytes) + whole_bitmap (N bytes). 263 * 264 * Returns >0 if success with sent bytes, or <0 if error. 265 */ 266 int64_t ramblock_recv_bitmap_send(QEMUFile *file, 267 const char *block_name) 268 { 269 RAMBlock *block = qemu_ram_block_by_name(block_name); 270 unsigned long *le_bitmap, nbits; 271 uint64_t size; 272 273 if (!block) { 274 error_report("%s: invalid block name: %s", __func__, block_name); 275 return -1; 276 } 277 278 nbits = block->postcopy_length >> TARGET_PAGE_BITS; 279 280 /* 281 * Make sure the tmp bitmap buffer is big enough, e.g., on 32bit 282 * machines we may need 4 more bytes for padding (see below 283 * comment). So extend it a bit before hand. 284 */ 285 le_bitmap = bitmap_new(nbits + BITS_PER_LONG); 286 287 /* 288 * Always use little endian when sending the bitmap. This is 289 * required that when source and destination VMs are not using the 290 * same endianness. (Note: big endian won't work.) 291 */ 292 bitmap_to_le(le_bitmap, block->receivedmap, nbits); 293 294 /* Size of the bitmap, in bytes */ 295 size = DIV_ROUND_UP(nbits, 8); 296 297 /* 298 * size is always aligned to 8 bytes for 64bit machines, but it 299 * may not be true for 32bit machines. We need this padding to 300 * make sure the migration can survive even between 32bit and 301 * 64bit machines. 302 */ 303 size = ROUND_UP(size, 8); 304 305 qemu_put_be64(file, size); 306 qemu_put_buffer(file, (const uint8_t *)le_bitmap, size); 307 /* 308 * Mark as an end, in case the middle part is screwed up due to 309 * some "mysterious" reason. 310 */ 311 qemu_put_be64(file, RAMBLOCK_RECV_BITMAP_ENDING); 312 qemu_fflush(file); 313 314 g_free(le_bitmap); 315 316 if (qemu_file_get_error(file)) { 317 return qemu_file_get_error(file); 318 } 319 320 return size + sizeof(size); 321 } 322 323 /* 324 * An outstanding page request, on the source, having been received 325 * and queued 326 */ 327 struct RAMSrcPageRequest { 328 RAMBlock *rb; 329 hwaddr offset; 330 hwaddr len; 331 332 QSIMPLEQ_ENTRY(RAMSrcPageRequest) next_req; 333 }; 334 335 /* State of RAM for migration */ 336 struct RAMState { 337 /* 338 * PageSearchStatus structures for the channels when send pages. 339 * Protected by the bitmap_mutex. 340 */ 341 PageSearchStatus pss[RAM_CHANNEL_MAX]; 342 /* UFFD file descriptor, used in 'write-tracking' migration */ 343 int uffdio_fd; 344 /* total ram size in bytes */ 345 uint64_t ram_bytes_total; 346 /* Last block that we have visited searching for dirty pages */ 347 RAMBlock *last_seen_block; 348 /* Last dirty target page we have sent */ 349 ram_addr_t last_page; 350 /* last ram version we have seen */ 351 uint32_t last_version; 352 /* How many times we have dirty too many pages */ 353 int dirty_rate_high_cnt; 354 /* these variables are used for bitmap sync */ 355 /* last time we did a full bitmap_sync */ 356 int64_t time_last_bitmap_sync; 357 /* bytes transferred at start_time */ 358 uint64_t bytes_xfer_prev; 359 /* number of dirty pages since start_time */ 360 uint64_t num_dirty_pages_period; 361 /* xbzrle misses since the beginning of the period */ 362 uint64_t xbzrle_cache_miss_prev; 363 /* Amount of xbzrle pages since the beginning of the period */ 364 uint64_t xbzrle_pages_prev; 365 /* Amount of xbzrle encoded bytes since the beginning of the period */ 366 uint64_t xbzrle_bytes_prev; 367 /* Are we really using XBZRLE (e.g., after the first round). */ 368 bool xbzrle_started; 369 /* Are we on the last stage of migration */ 370 bool last_stage; 371 /* compression statistics since the beginning of the period */ 372 /* amount of count that no free thread to compress data */ 373 uint64_t compress_thread_busy_prev; 374 /* amount bytes after compression */ 375 uint64_t compressed_size_prev; 376 /* amount of compressed pages */ 377 uint64_t compress_pages_prev; 378 379 /* total handled target pages at the beginning of period */ 380 uint64_t target_page_count_prev; 381 /* total handled target pages since start */ 382 uint64_t target_page_count; 383 /* number of dirty bits in the bitmap */ 384 uint64_t migration_dirty_pages; 385 /* 386 * Protects: 387 * - dirty/clear bitmap 388 * - migration_dirty_pages 389 * - pss structures 390 */ 391 QemuMutex bitmap_mutex; 392 /* The RAMBlock used in the last src_page_requests */ 393 RAMBlock *last_req_rb; 394 /* Queue of outstanding page requests from the destination */ 395 QemuMutex src_page_req_mutex; 396 QSIMPLEQ_HEAD(, RAMSrcPageRequest) src_page_requests; 397 }; 398 typedef struct RAMState RAMState; 399 400 static RAMState *ram_state; 401 402 static NotifierWithReturnList precopy_notifier_list; 403 404 /* Whether postcopy has queued requests? */ 405 static bool postcopy_has_request(RAMState *rs) 406 { 407 return !QSIMPLEQ_EMPTY_ATOMIC(&rs->src_page_requests); 408 } 409 410 void precopy_infrastructure_init(void) 411 { 412 notifier_with_return_list_init(&precopy_notifier_list); 413 } 414 415 void precopy_add_notifier(NotifierWithReturn *n) 416 { 417 notifier_with_return_list_add(&precopy_notifier_list, n); 418 } 419 420 void precopy_remove_notifier(NotifierWithReturn *n) 421 { 422 notifier_with_return_remove(n); 423 } 424 425 int precopy_notify(PrecopyNotifyReason reason, Error **errp) 426 { 427 PrecopyNotifyData pnd; 428 pnd.reason = reason; 429 pnd.errp = errp; 430 431 return notifier_with_return_list_notify(&precopy_notifier_list, &pnd); 432 } 433 434 uint64_t ram_bytes_remaining(void) 435 { 436 return ram_state ? (ram_state->migration_dirty_pages * TARGET_PAGE_SIZE) : 437 0; 438 } 439 440 void ram_transferred_add(uint64_t bytes) 441 { 442 if (runstate_is_running()) { 443 stat64_add(&mig_stats.precopy_bytes, bytes); 444 } else if (migration_in_postcopy()) { 445 stat64_add(&mig_stats.postcopy_bytes, bytes); 446 } else { 447 stat64_add(&mig_stats.downtime_bytes, bytes); 448 } 449 stat64_add(&mig_stats.transferred, bytes); 450 } 451 452 struct MigrationOps { 453 int (*ram_save_target_page)(RAMState *rs, PageSearchStatus *pss); 454 }; 455 typedef struct MigrationOps MigrationOps; 456 457 MigrationOps *migration_ops; 458 459 static int ram_save_host_page_urgent(PageSearchStatus *pss); 460 461 /* NOTE: page is the PFN not real ram_addr_t. */ 462 static void pss_init(PageSearchStatus *pss, RAMBlock *rb, ram_addr_t page) 463 { 464 pss->block = rb; 465 pss->page = page; 466 pss->complete_round = false; 467 } 468 469 /* 470 * Check whether two PSSs are actively sending the same page. Return true 471 * if it is, false otherwise. 472 */ 473 static bool pss_overlap(PageSearchStatus *pss1, PageSearchStatus *pss2) 474 { 475 return pss1->host_page_sending && pss2->host_page_sending && 476 (pss1->host_page_start == pss2->host_page_start); 477 } 478 479 /** 480 * save_page_header: write page header to wire 481 * 482 * If this is the 1st block, it also writes the block identification 483 * 484 * Returns the number of bytes written 485 * 486 * @pss: current PSS channel status 487 * @block: block that contains the page we want to send 488 * @offset: offset inside the block for the page 489 * in the lower bits, it contains flags 490 */ 491 static size_t save_page_header(PageSearchStatus *pss, QEMUFile *f, 492 RAMBlock *block, ram_addr_t offset) 493 { 494 size_t size, len; 495 bool same_block = (block == pss->last_sent_block); 496 497 if (same_block) { 498 offset |= RAM_SAVE_FLAG_CONTINUE; 499 } 500 qemu_put_be64(f, offset); 501 size = 8; 502 503 if (!same_block) { 504 len = strlen(block->idstr); 505 qemu_put_byte(f, len); 506 qemu_put_buffer(f, (uint8_t *)block->idstr, len); 507 size += 1 + len; 508 pss->last_sent_block = block; 509 } 510 return size; 511 } 512 513 /** 514 * mig_throttle_guest_down: throttle down the guest 515 * 516 * Reduce amount of guest cpu execution to hopefully slow down memory 517 * writes. If guest dirty memory rate is reduced below the rate at 518 * which we can transfer pages to the destination then we should be 519 * able to complete migration. Some workloads dirty memory way too 520 * fast and will not effectively converge, even with auto-converge. 521 */ 522 static void mig_throttle_guest_down(uint64_t bytes_dirty_period, 523 uint64_t bytes_dirty_threshold) 524 { 525 uint64_t pct_initial = migrate_cpu_throttle_initial(); 526 uint64_t pct_increment = migrate_cpu_throttle_increment(); 527 bool pct_tailslow = migrate_cpu_throttle_tailslow(); 528 int pct_max = migrate_max_cpu_throttle(); 529 530 uint64_t throttle_now = cpu_throttle_get_percentage(); 531 uint64_t cpu_now, cpu_ideal, throttle_inc; 532 533 /* We have not started throttling yet. Let's start it. */ 534 if (!cpu_throttle_active()) { 535 cpu_throttle_set(pct_initial); 536 } else { 537 /* Throttling already on, just increase the rate */ 538 if (!pct_tailslow) { 539 throttle_inc = pct_increment; 540 } else { 541 /* Compute the ideal CPU percentage used by Guest, which may 542 * make the dirty rate match the dirty rate threshold. */ 543 cpu_now = 100 - throttle_now; 544 cpu_ideal = cpu_now * (bytes_dirty_threshold * 1.0 / 545 bytes_dirty_period); 546 throttle_inc = MIN(cpu_now - cpu_ideal, pct_increment); 547 } 548 cpu_throttle_set(MIN(throttle_now + throttle_inc, pct_max)); 549 } 550 } 551 552 void mig_throttle_counter_reset(void) 553 { 554 RAMState *rs = ram_state; 555 556 rs->time_last_bitmap_sync = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); 557 rs->num_dirty_pages_period = 0; 558 rs->bytes_xfer_prev = stat64_get(&mig_stats.transferred); 559 } 560 561 /** 562 * xbzrle_cache_zero_page: insert a zero page in the XBZRLE cache 563 * 564 * @rs: current RAM state 565 * @current_addr: address for the zero page 566 * 567 * Update the xbzrle cache to reflect a page that's been sent as all 0. 568 * The important thing is that a stale (not-yet-0'd) page be replaced 569 * by the new data. 570 * As a bonus, if the page wasn't in the cache it gets added so that 571 * when a small write is made into the 0'd page it gets XBZRLE sent. 572 */ 573 static void xbzrle_cache_zero_page(RAMState *rs, ram_addr_t current_addr) 574 { 575 /* We don't care if this fails to allocate a new cache page 576 * as long as it updated an old one */ 577 cache_insert(XBZRLE.cache, current_addr, XBZRLE.zero_target_page, 578 stat64_get(&mig_stats.dirty_sync_count)); 579 } 580 581 #define ENCODING_FLAG_XBZRLE 0x1 582 583 /** 584 * save_xbzrle_page: compress and send current page 585 * 586 * Returns: 1 means that we wrote the page 587 * 0 means that page is identical to the one already sent 588 * -1 means that xbzrle would be longer than normal 589 * 590 * @rs: current RAM state 591 * @pss: current PSS channel 592 * @current_data: pointer to the address of the page contents 593 * @current_addr: addr of the page 594 * @block: block that contains the page we want to send 595 * @offset: offset inside the block for the page 596 */ 597 static int save_xbzrle_page(RAMState *rs, PageSearchStatus *pss, 598 uint8_t **current_data, ram_addr_t current_addr, 599 RAMBlock *block, ram_addr_t offset) 600 { 601 int encoded_len = 0, bytes_xbzrle; 602 uint8_t *prev_cached_page; 603 QEMUFile *file = pss->pss_channel; 604 uint64_t generation = stat64_get(&mig_stats.dirty_sync_count); 605 606 if (!cache_is_cached(XBZRLE.cache, current_addr, generation)) { 607 xbzrle_counters.cache_miss++; 608 if (!rs->last_stage) { 609 if (cache_insert(XBZRLE.cache, current_addr, *current_data, 610 generation) == -1) { 611 return -1; 612 } else { 613 /* update *current_data when the page has been 614 inserted into cache */ 615 *current_data = get_cached_data(XBZRLE.cache, current_addr); 616 } 617 } 618 return -1; 619 } 620 621 /* 622 * Reaching here means the page has hit the xbzrle cache, no matter what 623 * encoding result it is (normal encoding, overflow or skipping the page), 624 * count the page as encoded. This is used to calculate the encoding rate. 625 * 626 * Example: 2 pages (8KB) being encoded, first page encoding generates 2KB, 627 * 2nd page turns out to be skipped (i.e. no new bytes written to the 628 * page), the overall encoding rate will be 8KB / 2KB = 4, which has the 629 * skipped page included. In this way, the encoding rate can tell if the 630 * guest page is good for xbzrle encoding. 631 */ 632 xbzrle_counters.pages++; 633 prev_cached_page = get_cached_data(XBZRLE.cache, current_addr); 634 635 /* save current buffer into memory */ 636 memcpy(XBZRLE.current_buf, *current_data, TARGET_PAGE_SIZE); 637 638 /* XBZRLE encoding (if there is no overflow) */ 639 encoded_len = xbzrle_encode_buffer(prev_cached_page, XBZRLE.current_buf, 640 TARGET_PAGE_SIZE, XBZRLE.encoded_buf, 641 TARGET_PAGE_SIZE); 642 643 /* 644 * Update the cache contents, so that it corresponds to the data 645 * sent, in all cases except where we skip the page. 646 */ 647 if (!rs->last_stage && encoded_len != 0) { 648 memcpy(prev_cached_page, XBZRLE.current_buf, TARGET_PAGE_SIZE); 649 /* 650 * In the case where we couldn't compress, ensure that the caller 651 * sends the data from the cache, since the guest might have 652 * changed the RAM since we copied it. 653 */ 654 *current_data = prev_cached_page; 655 } 656 657 if (encoded_len == 0) { 658 trace_save_xbzrle_page_skipping(); 659 return 0; 660 } else if (encoded_len == -1) { 661 trace_save_xbzrle_page_overflow(); 662 xbzrle_counters.overflow++; 663 xbzrle_counters.bytes += TARGET_PAGE_SIZE; 664 return -1; 665 } 666 667 /* Send XBZRLE based compressed page */ 668 bytes_xbzrle = save_page_header(pss, pss->pss_channel, block, 669 offset | RAM_SAVE_FLAG_XBZRLE); 670 qemu_put_byte(file, ENCODING_FLAG_XBZRLE); 671 qemu_put_be16(file, encoded_len); 672 qemu_put_buffer(file, XBZRLE.encoded_buf, encoded_len); 673 bytes_xbzrle += encoded_len + 1 + 2; 674 /* 675 * Like compressed_size (please see update_compress_thread_counts), 676 * the xbzrle encoded bytes don't count the 8 byte header with 677 * RAM_SAVE_FLAG_CONTINUE. 678 */ 679 xbzrle_counters.bytes += bytes_xbzrle - 8; 680 ram_transferred_add(bytes_xbzrle); 681 682 return 1; 683 } 684 685 /** 686 * pss_find_next_dirty: find the next dirty page of current ramblock 687 * 688 * This function updates pss->page to point to the next dirty page index 689 * within the ramblock to migrate, or the end of ramblock when nothing 690 * found. Note that when pss->host_page_sending==true it means we're 691 * during sending a host page, so we won't look for dirty page that is 692 * outside the host page boundary. 693 * 694 * @pss: the current page search status 695 */ 696 static void pss_find_next_dirty(PageSearchStatus *pss) 697 { 698 RAMBlock *rb = pss->block; 699 unsigned long size = rb->used_length >> TARGET_PAGE_BITS; 700 unsigned long *bitmap = rb->bmap; 701 702 if (migrate_ram_is_ignored(rb)) { 703 /* Points directly to the end, so we know no dirty page */ 704 pss->page = size; 705 return; 706 } 707 708 /* 709 * If during sending a host page, only look for dirty pages within the 710 * current host page being send. 711 */ 712 if (pss->host_page_sending) { 713 assert(pss->host_page_end); 714 size = MIN(size, pss->host_page_end); 715 } 716 717 pss->page = find_next_bit(bitmap, size, pss->page); 718 } 719 720 static void migration_clear_memory_region_dirty_bitmap(RAMBlock *rb, 721 unsigned long page) 722 { 723 uint8_t shift; 724 hwaddr size, start; 725 726 if (!rb->clear_bmap || !clear_bmap_test_and_clear(rb, page)) { 727 return; 728 } 729 730 shift = rb->clear_bmap_shift; 731 /* 732 * CLEAR_BITMAP_SHIFT_MIN should always guarantee this... this 733 * can make things easier sometimes since then start address 734 * of the small chunk will always be 64 pages aligned so the 735 * bitmap will always be aligned to unsigned long. We should 736 * even be able to remove this restriction but I'm simply 737 * keeping it. 738 */ 739 assert(shift >= 6); 740 741 size = 1ULL << (TARGET_PAGE_BITS + shift); 742 start = QEMU_ALIGN_DOWN((ram_addr_t)page << TARGET_PAGE_BITS, size); 743 trace_migration_bitmap_clear_dirty(rb->idstr, start, size, page); 744 memory_region_clear_dirty_bitmap(rb->mr, start, size); 745 } 746 747 static void 748 migration_clear_memory_region_dirty_bitmap_range(RAMBlock *rb, 749 unsigned long start, 750 unsigned long npages) 751 { 752 unsigned long i, chunk_pages = 1UL << rb->clear_bmap_shift; 753 unsigned long chunk_start = QEMU_ALIGN_DOWN(start, chunk_pages); 754 unsigned long chunk_end = QEMU_ALIGN_UP(start + npages, chunk_pages); 755 756 /* 757 * Clear pages from start to start + npages - 1, so the end boundary is 758 * exclusive. 759 */ 760 for (i = chunk_start; i < chunk_end; i += chunk_pages) { 761 migration_clear_memory_region_dirty_bitmap(rb, i); 762 } 763 } 764 765 /* 766 * colo_bitmap_find_diry:find contiguous dirty pages from start 767 * 768 * Returns the page offset within memory region of the start of the contiguout 769 * dirty page 770 * 771 * @rs: current RAM state 772 * @rb: RAMBlock where to search for dirty pages 773 * @start: page where we start the search 774 * @num: the number of contiguous dirty pages 775 */ 776 static inline 777 unsigned long colo_bitmap_find_dirty(RAMState *rs, RAMBlock *rb, 778 unsigned long start, unsigned long *num) 779 { 780 unsigned long size = rb->used_length >> TARGET_PAGE_BITS; 781 unsigned long *bitmap = rb->bmap; 782 unsigned long first, next; 783 784 *num = 0; 785 786 if (migrate_ram_is_ignored(rb)) { 787 return size; 788 } 789 790 first = find_next_bit(bitmap, size, start); 791 if (first >= size) { 792 return first; 793 } 794 next = find_next_zero_bit(bitmap, size, first + 1); 795 assert(next >= first); 796 *num = next - first; 797 return first; 798 } 799 800 static inline bool migration_bitmap_clear_dirty(RAMState *rs, 801 RAMBlock *rb, 802 unsigned long page) 803 { 804 bool ret; 805 806 /* 807 * Clear dirty bitmap if needed. This _must_ be called before we 808 * send any of the page in the chunk because we need to make sure 809 * we can capture further page content changes when we sync dirty 810 * log the next time. So as long as we are going to send any of 811 * the page in the chunk we clear the remote dirty bitmap for all. 812 * Clearing it earlier won't be a problem, but too late will. 813 */ 814 migration_clear_memory_region_dirty_bitmap(rb, page); 815 816 ret = test_and_clear_bit(page, rb->bmap); 817 if (ret) { 818 rs->migration_dirty_pages--; 819 } 820 821 return ret; 822 } 823 824 static void dirty_bitmap_clear_section(MemoryRegionSection *section, 825 void *opaque) 826 { 827 const hwaddr offset = section->offset_within_region; 828 const hwaddr size = int128_get64(section->size); 829 const unsigned long start = offset >> TARGET_PAGE_BITS; 830 const unsigned long npages = size >> TARGET_PAGE_BITS; 831 RAMBlock *rb = section->mr->ram_block; 832 uint64_t *cleared_bits = opaque; 833 834 /* 835 * We don't grab ram_state->bitmap_mutex because we expect to run 836 * only when starting migration or during postcopy recovery where 837 * we don't have concurrent access. 838 */ 839 if (!migration_in_postcopy() && !migrate_background_snapshot()) { 840 migration_clear_memory_region_dirty_bitmap_range(rb, start, npages); 841 } 842 *cleared_bits += bitmap_count_one_with_offset(rb->bmap, start, npages); 843 bitmap_clear(rb->bmap, start, npages); 844 } 845 846 /* 847 * Exclude all dirty pages from migration that fall into a discarded range as 848 * managed by a RamDiscardManager responsible for the mapped memory region of 849 * the RAMBlock. Clear the corresponding bits in the dirty bitmaps. 850 * 851 * Discarded pages ("logically unplugged") have undefined content and must 852 * not get migrated, because even reading these pages for migration might 853 * result in undesired behavior. 854 * 855 * Returns the number of cleared bits in the RAMBlock dirty bitmap. 856 * 857 * Note: The result is only stable while migrating (precopy/postcopy). 858 */ 859 static uint64_t ramblock_dirty_bitmap_clear_discarded_pages(RAMBlock *rb) 860 { 861 uint64_t cleared_bits = 0; 862 863 if (rb->mr && rb->bmap && memory_region_has_ram_discard_manager(rb->mr)) { 864 RamDiscardManager *rdm = memory_region_get_ram_discard_manager(rb->mr); 865 MemoryRegionSection section = { 866 .mr = rb->mr, 867 .offset_within_region = 0, 868 .size = int128_make64(qemu_ram_get_used_length(rb)), 869 }; 870 871 ram_discard_manager_replay_discarded(rdm, §ion, 872 dirty_bitmap_clear_section, 873 &cleared_bits); 874 } 875 return cleared_bits; 876 } 877 878 /* 879 * Check if a host-page aligned page falls into a discarded range as managed by 880 * a RamDiscardManager responsible for the mapped memory region of the RAMBlock. 881 * 882 * Note: The result is only stable while migrating (precopy/postcopy). 883 */ 884 bool ramblock_page_is_discarded(RAMBlock *rb, ram_addr_t start) 885 { 886 if (rb->mr && memory_region_has_ram_discard_manager(rb->mr)) { 887 RamDiscardManager *rdm = memory_region_get_ram_discard_manager(rb->mr); 888 MemoryRegionSection section = { 889 .mr = rb->mr, 890 .offset_within_region = start, 891 .size = int128_make64(qemu_ram_pagesize(rb)), 892 }; 893 894 return !ram_discard_manager_is_populated(rdm, §ion); 895 } 896 return false; 897 } 898 899 /* Called with RCU critical section */ 900 static void ramblock_sync_dirty_bitmap(RAMState *rs, RAMBlock *rb) 901 { 902 uint64_t new_dirty_pages = 903 cpu_physical_memory_sync_dirty_bitmap(rb, 0, rb->used_length); 904 905 rs->migration_dirty_pages += new_dirty_pages; 906 rs->num_dirty_pages_period += new_dirty_pages; 907 } 908 909 /** 910 * ram_pagesize_summary: calculate all the pagesizes of a VM 911 * 912 * Returns a summary bitmap of the page sizes of all RAMBlocks 913 * 914 * For VMs with just normal pages this is equivalent to the host page 915 * size. If it's got some huge pages then it's the OR of all the 916 * different page sizes. 917 */ 918 uint64_t ram_pagesize_summary(void) 919 { 920 RAMBlock *block; 921 uint64_t summary = 0; 922 923 RAMBLOCK_FOREACH_NOT_IGNORED(block) { 924 summary |= block->page_size; 925 } 926 927 return summary; 928 } 929 930 uint64_t ram_get_total_transferred_pages(void) 931 { 932 return stat64_get(&mig_stats.normal_pages) + 933 stat64_get(&mig_stats.zero_pages) + 934 compression_counters.pages + xbzrle_counters.pages; 935 } 936 937 static void migration_update_rates(RAMState *rs, int64_t end_time) 938 { 939 uint64_t page_count = rs->target_page_count - rs->target_page_count_prev; 940 double compressed_size; 941 942 /* calculate period counters */ 943 stat64_set(&mig_stats.dirty_pages_rate, 944 rs->num_dirty_pages_period * 1000 / 945 (end_time - rs->time_last_bitmap_sync)); 946 947 if (!page_count) { 948 return; 949 } 950 951 if (migrate_xbzrle()) { 952 double encoded_size, unencoded_size; 953 954 xbzrle_counters.cache_miss_rate = (double)(xbzrle_counters.cache_miss - 955 rs->xbzrle_cache_miss_prev) / page_count; 956 rs->xbzrle_cache_miss_prev = xbzrle_counters.cache_miss; 957 unencoded_size = (xbzrle_counters.pages - rs->xbzrle_pages_prev) * 958 TARGET_PAGE_SIZE; 959 encoded_size = xbzrle_counters.bytes - rs->xbzrle_bytes_prev; 960 if (xbzrle_counters.pages == rs->xbzrle_pages_prev || !encoded_size) { 961 xbzrle_counters.encoding_rate = 0; 962 } else { 963 xbzrle_counters.encoding_rate = unencoded_size / encoded_size; 964 } 965 rs->xbzrle_pages_prev = xbzrle_counters.pages; 966 rs->xbzrle_bytes_prev = xbzrle_counters.bytes; 967 } 968 969 if (migrate_compress()) { 970 compression_counters.busy_rate = (double)(compression_counters.busy - 971 rs->compress_thread_busy_prev) / page_count; 972 rs->compress_thread_busy_prev = compression_counters.busy; 973 974 compressed_size = compression_counters.compressed_size - 975 rs->compressed_size_prev; 976 if (compressed_size) { 977 double uncompressed_size = (compression_counters.pages - 978 rs->compress_pages_prev) * TARGET_PAGE_SIZE; 979 980 /* Compression-Ratio = Uncompressed-size / Compressed-size */ 981 compression_counters.compression_rate = 982 uncompressed_size / compressed_size; 983 984 rs->compress_pages_prev = compression_counters.pages; 985 rs->compressed_size_prev = compression_counters.compressed_size; 986 } 987 } 988 } 989 990 /* 991 * Enable dirty-limit to throttle down the guest 992 */ 993 static void migration_dirty_limit_guest(void) 994 { 995 /* 996 * dirty page rate quota for all vCPUs fetched from 997 * migration parameter 'vcpu_dirty_limit' 998 */ 999 static int64_t quota_dirtyrate; 1000 MigrationState *s = migrate_get_current(); 1001 1002 /* 1003 * If dirty limit already enabled and migration parameter 1004 * vcpu-dirty-limit untouched. 1005 */ 1006 if (dirtylimit_in_service() && 1007 quota_dirtyrate == s->parameters.vcpu_dirty_limit) { 1008 return; 1009 } 1010 1011 quota_dirtyrate = s->parameters.vcpu_dirty_limit; 1012 1013 /* 1014 * Set all vCPU a quota dirtyrate, note that the second 1015 * parameter will be ignored if setting all vCPU for the vm 1016 */ 1017 qmp_set_vcpu_dirty_limit(false, -1, quota_dirtyrate, NULL); 1018 trace_migration_dirty_limit_guest(quota_dirtyrate); 1019 } 1020 1021 static void migration_trigger_throttle(RAMState *rs) 1022 { 1023 uint64_t threshold = migrate_throttle_trigger_threshold(); 1024 uint64_t bytes_xfer_period = 1025 stat64_get(&mig_stats.transferred) - rs->bytes_xfer_prev; 1026 uint64_t bytes_dirty_period = rs->num_dirty_pages_period * TARGET_PAGE_SIZE; 1027 uint64_t bytes_dirty_threshold = bytes_xfer_period * threshold / 100; 1028 1029 /* During block migration the auto-converge logic incorrectly detects 1030 * that ram migration makes no progress. Avoid this by disabling the 1031 * throttling logic during the bulk phase of block migration. */ 1032 if (blk_mig_bulk_active()) { 1033 return; 1034 } 1035 1036 /* 1037 * The following detection logic can be refined later. For now: 1038 * Check to see if the ratio between dirtied bytes and the approx. 1039 * amount of bytes that just got transferred since the last time 1040 * we were in this routine reaches the threshold. If that happens 1041 * twice, start or increase throttling. 1042 */ 1043 if ((bytes_dirty_period > bytes_dirty_threshold) && 1044 (++rs->dirty_rate_high_cnt >= 2)) { 1045 rs->dirty_rate_high_cnt = 0; 1046 if (migrate_auto_converge()) { 1047 trace_migration_throttle(); 1048 mig_throttle_guest_down(bytes_dirty_period, 1049 bytes_dirty_threshold); 1050 } else if (migrate_dirty_limit()) { 1051 migration_dirty_limit_guest(); 1052 } 1053 } 1054 } 1055 1056 static void migration_bitmap_sync(RAMState *rs, bool last_stage) 1057 { 1058 RAMBlock *block; 1059 int64_t end_time; 1060 1061 stat64_add(&mig_stats.dirty_sync_count, 1); 1062 1063 if (!rs->time_last_bitmap_sync) { 1064 rs->time_last_bitmap_sync = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); 1065 } 1066 1067 trace_migration_bitmap_sync_start(); 1068 memory_global_dirty_log_sync(last_stage); 1069 1070 qemu_mutex_lock(&rs->bitmap_mutex); 1071 WITH_RCU_READ_LOCK_GUARD() { 1072 RAMBLOCK_FOREACH_NOT_IGNORED(block) { 1073 ramblock_sync_dirty_bitmap(rs, block); 1074 } 1075 stat64_set(&mig_stats.dirty_bytes_last_sync, ram_bytes_remaining()); 1076 } 1077 qemu_mutex_unlock(&rs->bitmap_mutex); 1078 1079 memory_global_after_dirty_log_sync(); 1080 trace_migration_bitmap_sync_end(rs->num_dirty_pages_period); 1081 1082 end_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); 1083 1084 /* more than 1 second = 1000 millisecons */ 1085 if (end_time > rs->time_last_bitmap_sync + 1000) { 1086 migration_trigger_throttle(rs); 1087 1088 migration_update_rates(rs, end_time); 1089 1090 rs->target_page_count_prev = rs->target_page_count; 1091 1092 /* reset period counters */ 1093 rs->time_last_bitmap_sync = end_time; 1094 rs->num_dirty_pages_period = 0; 1095 rs->bytes_xfer_prev = stat64_get(&mig_stats.transferred); 1096 } 1097 if (migrate_events()) { 1098 uint64_t generation = stat64_get(&mig_stats.dirty_sync_count); 1099 qapi_event_send_migration_pass(generation); 1100 } 1101 } 1102 1103 static void migration_bitmap_sync_precopy(RAMState *rs, bool last_stage) 1104 { 1105 Error *local_err = NULL; 1106 1107 /* 1108 * The current notifier usage is just an optimization to migration, so we 1109 * don't stop the normal migration process in the error case. 1110 */ 1111 if (precopy_notify(PRECOPY_NOTIFY_BEFORE_BITMAP_SYNC, &local_err)) { 1112 error_report_err(local_err); 1113 local_err = NULL; 1114 } 1115 1116 migration_bitmap_sync(rs, last_stage); 1117 1118 if (precopy_notify(PRECOPY_NOTIFY_AFTER_BITMAP_SYNC, &local_err)) { 1119 error_report_err(local_err); 1120 } 1121 } 1122 1123 void ram_release_page(const char *rbname, uint64_t offset) 1124 { 1125 if (!migrate_release_ram() || !migration_in_postcopy()) { 1126 return; 1127 } 1128 1129 ram_discard_range(rbname, offset, TARGET_PAGE_SIZE); 1130 } 1131 1132 /** 1133 * save_zero_page_to_file: send the zero page to the file 1134 * 1135 * Returns the size of data written to the file, 0 means the page is not 1136 * a zero page 1137 * 1138 * @pss: current PSS channel 1139 * @block: block that contains the page we want to send 1140 * @offset: offset inside the block for the page 1141 */ 1142 static int save_zero_page_to_file(PageSearchStatus *pss, QEMUFile *file, 1143 RAMBlock *block, ram_addr_t offset) 1144 { 1145 uint8_t *p = block->host + offset; 1146 int len = 0; 1147 1148 if (buffer_is_zero(p, TARGET_PAGE_SIZE)) { 1149 len += save_page_header(pss, file, block, offset | RAM_SAVE_FLAG_ZERO); 1150 qemu_put_byte(file, 0); 1151 len += 1; 1152 ram_release_page(block->idstr, offset); 1153 } 1154 return len; 1155 } 1156 1157 /** 1158 * save_zero_page: send the zero page to the stream 1159 * 1160 * Returns the number of pages written. 1161 * 1162 * @pss: current PSS channel 1163 * @block: block that contains the page we want to send 1164 * @offset: offset inside the block for the page 1165 */ 1166 static int save_zero_page(PageSearchStatus *pss, QEMUFile *f, RAMBlock *block, 1167 ram_addr_t offset) 1168 { 1169 int len = save_zero_page_to_file(pss, f, block, offset); 1170 1171 if (len) { 1172 stat64_add(&mig_stats.zero_pages, 1); 1173 ram_transferred_add(len); 1174 return 1; 1175 } 1176 return -1; 1177 } 1178 1179 /* 1180 * @pages: the number of pages written by the control path, 1181 * < 0 - error 1182 * > 0 - number of pages written 1183 * 1184 * Return true if the pages has been saved, otherwise false is returned. 1185 */ 1186 static bool control_save_page(PageSearchStatus *pss, RAMBlock *block, 1187 ram_addr_t offset, int *pages) 1188 { 1189 uint64_t bytes_xmit = 0; 1190 int ret; 1191 1192 *pages = -1; 1193 ret = ram_control_save_page(pss->pss_channel, block->offset, offset, 1194 TARGET_PAGE_SIZE, &bytes_xmit); 1195 if (ret == RAM_SAVE_CONTROL_NOT_SUPP) { 1196 return false; 1197 } 1198 1199 if (bytes_xmit) { 1200 ram_transferred_add(bytes_xmit); 1201 *pages = 1; 1202 } 1203 1204 if (ret == RAM_SAVE_CONTROL_DELAYED) { 1205 return true; 1206 } 1207 1208 if (bytes_xmit > 0) { 1209 stat64_add(&mig_stats.normal_pages, 1); 1210 } else if (bytes_xmit == 0) { 1211 stat64_add(&mig_stats.zero_pages, 1); 1212 } 1213 1214 return true; 1215 } 1216 1217 /* 1218 * directly send the page to the stream 1219 * 1220 * Returns the number of pages written. 1221 * 1222 * @pss: current PSS channel 1223 * @block: block that contains the page we want to send 1224 * @offset: offset inside the block for the page 1225 * @buf: the page to be sent 1226 * @async: send to page asyncly 1227 */ 1228 static int save_normal_page(PageSearchStatus *pss, RAMBlock *block, 1229 ram_addr_t offset, uint8_t *buf, bool async) 1230 { 1231 QEMUFile *file = pss->pss_channel; 1232 1233 ram_transferred_add(save_page_header(pss, pss->pss_channel, block, 1234 offset | RAM_SAVE_FLAG_PAGE)); 1235 if (async) { 1236 qemu_put_buffer_async(file, buf, TARGET_PAGE_SIZE, 1237 migrate_release_ram() && 1238 migration_in_postcopy()); 1239 } else { 1240 qemu_put_buffer(file, buf, TARGET_PAGE_SIZE); 1241 } 1242 ram_transferred_add(TARGET_PAGE_SIZE); 1243 stat64_add(&mig_stats.normal_pages, 1); 1244 return 1; 1245 } 1246 1247 /** 1248 * ram_save_page: send the given page to the stream 1249 * 1250 * Returns the number of pages written. 1251 * < 0 - error 1252 * >=0 - Number of pages written - this might legally be 0 1253 * if xbzrle noticed the page was the same. 1254 * 1255 * @rs: current RAM state 1256 * @block: block that contains the page we want to send 1257 * @offset: offset inside the block for the page 1258 */ 1259 static int ram_save_page(RAMState *rs, PageSearchStatus *pss) 1260 { 1261 int pages = -1; 1262 uint8_t *p; 1263 bool send_async = true; 1264 RAMBlock *block = pss->block; 1265 ram_addr_t offset = ((ram_addr_t)pss->page) << TARGET_PAGE_BITS; 1266 ram_addr_t current_addr = block->offset + offset; 1267 1268 p = block->host + offset; 1269 trace_ram_save_page(block->idstr, (uint64_t)offset, p); 1270 1271 XBZRLE_cache_lock(); 1272 if (rs->xbzrle_started && !migration_in_postcopy()) { 1273 pages = save_xbzrle_page(rs, pss, &p, current_addr, 1274 block, offset); 1275 if (!rs->last_stage) { 1276 /* Can't send this cached data async, since the cache page 1277 * might get updated before it gets to the wire 1278 */ 1279 send_async = false; 1280 } 1281 } 1282 1283 /* XBZRLE overflow or normal page */ 1284 if (pages == -1) { 1285 pages = save_normal_page(pss, block, offset, p, send_async); 1286 } 1287 1288 XBZRLE_cache_unlock(); 1289 1290 return pages; 1291 } 1292 1293 static int ram_save_multifd_page(QEMUFile *file, RAMBlock *block, 1294 ram_addr_t offset) 1295 { 1296 if (multifd_queue_page(file, block, offset) < 0) { 1297 return -1; 1298 } 1299 stat64_add(&mig_stats.normal_pages, 1); 1300 1301 return 1; 1302 } 1303 1304 static void 1305 update_compress_thread_counts(const CompressParam *param, int bytes_xmit) 1306 { 1307 ram_transferred_add(bytes_xmit); 1308 1309 if (param->result == RES_ZEROPAGE) { 1310 stat64_add(&mig_stats.zero_pages, 1); 1311 return; 1312 } 1313 1314 /* 8 means a header with RAM_SAVE_FLAG_CONTINUE. */ 1315 compression_counters.compressed_size += bytes_xmit - 8; 1316 compression_counters.pages++; 1317 } 1318 1319 static bool save_page_use_compression(RAMState *rs); 1320 1321 static int send_queued_data(CompressParam *param) 1322 { 1323 PageSearchStatus *pss = &ram_state->pss[RAM_CHANNEL_PRECOPY]; 1324 MigrationState *ms = migrate_get_current(); 1325 QEMUFile *file = ms->to_dst_file; 1326 int len = 0; 1327 1328 RAMBlock *block = param->block; 1329 ram_addr_t offset = param->offset; 1330 1331 if (param->result == RES_NONE) { 1332 return 0; 1333 } 1334 1335 assert(block == pss->last_sent_block); 1336 1337 if (param->result == RES_ZEROPAGE) { 1338 assert(qemu_file_buffer_empty(param->file)); 1339 len += save_page_header(pss, file, block, offset | RAM_SAVE_FLAG_ZERO); 1340 qemu_put_byte(file, 0); 1341 len += 1; 1342 ram_release_page(block->idstr, offset); 1343 } else if (param->result == RES_COMPRESS) { 1344 assert(!qemu_file_buffer_empty(param->file)); 1345 len += save_page_header(pss, file, block, 1346 offset | RAM_SAVE_FLAG_COMPRESS_PAGE); 1347 len += qemu_put_qemu_file(file, param->file); 1348 } else { 1349 abort(); 1350 } 1351 1352 update_compress_thread_counts(param, len); 1353 1354 return len; 1355 } 1356 1357 static void ram_flush_compressed_data(RAMState *rs) 1358 { 1359 if (!save_page_use_compression(rs)) { 1360 return; 1361 } 1362 1363 flush_compressed_data(send_queued_data); 1364 } 1365 1366 #define PAGE_ALL_CLEAN 0 1367 #define PAGE_TRY_AGAIN 1 1368 #define PAGE_DIRTY_FOUND 2 1369 /** 1370 * find_dirty_block: find the next dirty page and update any state 1371 * associated with the search process. 1372 * 1373 * Returns: 1374 * <0: An error happened 1375 * PAGE_ALL_CLEAN: no dirty page found, give up 1376 * PAGE_TRY_AGAIN: no dirty page found, retry for next block 1377 * PAGE_DIRTY_FOUND: dirty page found 1378 * 1379 * @rs: current RAM state 1380 * @pss: data about the state of the current dirty page scan 1381 * @again: set to false if the search has scanned the whole of RAM 1382 */ 1383 static int find_dirty_block(RAMState *rs, PageSearchStatus *pss) 1384 { 1385 /* Update pss->page for the next dirty bit in ramblock */ 1386 pss_find_next_dirty(pss); 1387 1388 if (pss->complete_round && pss->block == rs->last_seen_block && 1389 pss->page >= rs->last_page) { 1390 /* 1391 * We've been once around the RAM and haven't found anything. 1392 * Give up. 1393 */ 1394 return PAGE_ALL_CLEAN; 1395 } 1396 if (!offset_in_ramblock(pss->block, 1397 ((ram_addr_t)pss->page) << TARGET_PAGE_BITS)) { 1398 /* Didn't find anything in this RAM Block */ 1399 pss->page = 0; 1400 pss->block = QLIST_NEXT_RCU(pss->block, next); 1401 if (!pss->block) { 1402 if (!migrate_multifd_flush_after_each_section()) { 1403 QEMUFile *f = rs->pss[RAM_CHANNEL_PRECOPY].pss_channel; 1404 int ret = multifd_send_sync_main(f); 1405 if (ret < 0) { 1406 return ret; 1407 } 1408 qemu_put_be64(f, RAM_SAVE_FLAG_MULTIFD_FLUSH); 1409 qemu_fflush(f); 1410 } 1411 /* 1412 * If memory migration starts over, we will meet a dirtied page 1413 * which may still exists in compression threads's ring, so we 1414 * should flush the compressed data to make sure the new page 1415 * is not overwritten by the old one in the destination. 1416 * 1417 * Also If xbzrle is on, stop using the data compression at this 1418 * point. In theory, xbzrle can do better than compression. 1419 */ 1420 ram_flush_compressed_data(rs); 1421 1422 /* Hit the end of the list */ 1423 pss->block = QLIST_FIRST_RCU(&ram_list.blocks); 1424 /* Flag that we've looped */ 1425 pss->complete_round = true; 1426 /* After the first round, enable XBZRLE. */ 1427 if (migrate_xbzrle()) { 1428 rs->xbzrle_started = true; 1429 } 1430 } 1431 /* Didn't find anything this time, but try again on the new block */ 1432 return PAGE_TRY_AGAIN; 1433 } else { 1434 /* We've found something */ 1435 return PAGE_DIRTY_FOUND; 1436 } 1437 } 1438 1439 /** 1440 * unqueue_page: gets a page of the queue 1441 * 1442 * Helper for 'get_queued_page' - gets a page off the queue 1443 * 1444 * Returns the block of the page (or NULL if none available) 1445 * 1446 * @rs: current RAM state 1447 * @offset: used to return the offset within the RAMBlock 1448 */ 1449 static RAMBlock *unqueue_page(RAMState *rs, ram_addr_t *offset) 1450 { 1451 struct RAMSrcPageRequest *entry; 1452 RAMBlock *block = NULL; 1453 1454 if (!postcopy_has_request(rs)) { 1455 return NULL; 1456 } 1457 1458 QEMU_LOCK_GUARD(&rs->src_page_req_mutex); 1459 1460 /* 1461 * This should _never_ change even after we take the lock, because no one 1462 * should be taking anything off the request list other than us. 1463 */ 1464 assert(postcopy_has_request(rs)); 1465 1466 entry = QSIMPLEQ_FIRST(&rs->src_page_requests); 1467 block = entry->rb; 1468 *offset = entry->offset; 1469 1470 if (entry->len > TARGET_PAGE_SIZE) { 1471 entry->len -= TARGET_PAGE_SIZE; 1472 entry->offset += TARGET_PAGE_SIZE; 1473 } else { 1474 memory_region_unref(block->mr); 1475 QSIMPLEQ_REMOVE_HEAD(&rs->src_page_requests, next_req); 1476 g_free(entry); 1477 migration_consume_urgent_request(); 1478 } 1479 1480 return block; 1481 } 1482 1483 #if defined(__linux__) 1484 /** 1485 * poll_fault_page: try to get next UFFD write fault page and, if pending fault 1486 * is found, return RAM block pointer and page offset 1487 * 1488 * Returns pointer to the RAMBlock containing faulting page, 1489 * NULL if no write faults are pending 1490 * 1491 * @rs: current RAM state 1492 * @offset: page offset from the beginning of the block 1493 */ 1494 static RAMBlock *poll_fault_page(RAMState *rs, ram_addr_t *offset) 1495 { 1496 struct uffd_msg uffd_msg; 1497 void *page_address; 1498 RAMBlock *block; 1499 int res; 1500 1501 if (!migrate_background_snapshot()) { 1502 return NULL; 1503 } 1504 1505 res = uffd_read_events(rs->uffdio_fd, &uffd_msg, 1); 1506 if (res <= 0) { 1507 return NULL; 1508 } 1509 1510 page_address = (void *)(uintptr_t) uffd_msg.arg.pagefault.address; 1511 block = qemu_ram_block_from_host(page_address, false, offset); 1512 assert(block && (block->flags & RAM_UF_WRITEPROTECT) != 0); 1513 return block; 1514 } 1515 1516 /** 1517 * ram_save_release_protection: release UFFD write protection after 1518 * a range of pages has been saved 1519 * 1520 * @rs: current RAM state 1521 * @pss: page-search-status structure 1522 * @start_page: index of the first page in the range relative to pss->block 1523 * 1524 * Returns 0 on success, negative value in case of an error 1525 */ 1526 static int ram_save_release_protection(RAMState *rs, PageSearchStatus *pss, 1527 unsigned long start_page) 1528 { 1529 int res = 0; 1530 1531 /* Check if page is from UFFD-managed region. */ 1532 if (pss->block->flags & RAM_UF_WRITEPROTECT) { 1533 void *page_address = pss->block->host + (start_page << TARGET_PAGE_BITS); 1534 uint64_t run_length = (pss->page - start_page) << TARGET_PAGE_BITS; 1535 1536 /* Flush async buffers before un-protect. */ 1537 qemu_fflush(pss->pss_channel); 1538 /* Un-protect memory range. */ 1539 res = uffd_change_protection(rs->uffdio_fd, page_address, run_length, 1540 false, false); 1541 } 1542 1543 return res; 1544 } 1545 1546 /* ram_write_tracking_available: check if kernel supports required UFFD features 1547 * 1548 * Returns true if supports, false otherwise 1549 */ 1550 bool ram_write_tracking_available(void) 1551 { 1552 uint64_t uffd_features; 1553 int res; 1554 1555 res = uffd_query_features(&uffd_features); 1556 return (res == 0 && 1557 (uffd_features & UFFD_FEATURE_PAGEFAULT_FLAG_WP) != 0); 1558 } 1559 1560 /* ram_write_tracking_compatible: check if guest configuration is 1561 * compatible with 'write-tracking' 1562 * 1563 * Returns true if compatible, false otherwise 1564 */ 1565 bool ram_write_tracking_compatible(void) 1566 { 1567 const uint64_t uffd_ioctls_mask = BIT(_UFFDIO_WRITEPROTECT); 1568 int uffd_fd; 1569 RAMBlock *block; 1570 bool ret = false; 1571 1572 /* Open UFFD file descriptor */ 1573 uffd_fd = uffd_create_fd(UFFD_FEATURE_PAGEFAULT_FLAG_WP, false); 1574 if (uffd_fd < 0) { 1575 return false; 1576 } 1577 1578 RCU_READ_LOCK_GUARD(); 1579 1580 RAMBLOCK_FOREACH_NOT_IGNORED(block) { 1581 uint64_t uffd_ioctls; 1582 1583 /* Nothing to do with read-only and MMIO-writable regions */ 1584 if (block->mr->readonly || block->mr->rom_device) { 1585 continue; 1586 } 1587 /* Try to register block memory via UFFD-IO to track writes */ 1588 if (uffd_register_memory(uffd_fd, block->host, block->max_length, 1589 UFFDIO_REGISTER_MODE_WP, &uffd_ioctls)) { 1590 goto out; 1591 } 1592 if ((uffd_ioctls & uffd_ioctls_mask) != uffd_ioctls_mask) { 1593 goto out; 1594 } 1595 } 1596 ret = true; 1597 1598 out: 1599 uffd_close_fd(uffd_fd); 1600 return ret; 1601 } 1602 1603 static inline void populate_read_range(RAMBlock *block, ram_addr_t offset, 1604 ram_addr_t size) 1605 { 1606 const ram_addr_t end = offset + size; 1607 1608 /* 1609 * We read one byte of each page; this will preallocate page tables if 1610 * required and populate the shared zeropage on MAP_PRIVATE anonymous memory 1611 * where no page was populated yet. This might require adaption when 1612 * supporting other mappings, like shmem. 1613 */ 1614 for (; offset < end; offset += block->page_size) { 1615 char tmp = *((char *)block->host + offset); 1616 1617 /* Don't optimize the read out */ 1618 asm volatile("" : "+r" (tmp)); 1619 } 1620 } 1621 1622 static inline int populate_read_section(MemoryRegionSection *section, 1623 void *opaque) 1624 { 1625 const hwaddr size = int128_get64(section->size); 1626 hwaddr offset = section->offset_within_region; 1627 RAMBlock *block = section->mr->ram_block; 1628 1629 populate_read_range(block, offset, size); 1630 return 0; 1631 } 1632 1633 /* 1634 * ram_block_populate_read: preallocate page tables and populate pages in the 1635 * RAM block by reading a byte of each page. 1636 * 1637 * Since it's solely used for userfault_fd WP feature, here we just 1638 * hardcode page size to qemu_real_host_page_size. 1639 * 1640 * @block: RAM block to populate 1641 */ 1642 static void ram_block_populate_read(RAMBlock *rb) 1643 { 1644 /* 1645 * Skip populating all pages that fall into a discarded range as managed by 1646 * a RamDiscardManager responsible for the mapped memory region of the 1647 * RAMBlock. Such discarded ("logically unplugged") parts of a RAMBlock 1648 * must not get populated automatically. We don't have to track 1649 * modifications via userfaultfd WP reliably, because these pages will 1650 * not be part of the migration stream either way -- see 1651 * ramblock_dirty_bitmap_exclude_discarded_pages(). 1652 * 1653 * Note: The result is only stable while migrating (precopy/postcopy). 1654 */ 1655 if (rb->mr && memory_region_has_ram_discard_manager(rb->mr)) { 1656 RamDiscardManager *rdm = memory_region_get_ram_discard_manager(rb->mr); 1657 MemoryRegionSection section = { 1658 .mr = rb->mr, 1659 .offset_within_region = 0, 1660 .size = rb->mr->size, 1661 }; 1662 1663 ram_discard_manager_replay_populated(rdm, §ion, 1664 populate_read_section, NULL); 1665 } else { 1666 populate_read_range(rb, 0, rb->used_length); 1667 } 1668 } 1669 1670 /* 1671 * ram_write_tracking_prepare: prepare for UFFD-WP memory tracking 1672 */ 1673 void ram_write_tracking_prepare(void) 1674 { 1675 RAMBlock *block; 1676 1677 RCU_READ_LOCK_GUARD(); 1678 1679 RAMBLOCK_FOREACH_NOT_IGNORED(block) { 1680 /* Nothing to do with read-only and MMIO-writable regions */ 1681 if (block->mr->readonly || block->mr->rom_device) { 1682 continue; 1683 } 1684 1685 /* 1686 * Populate pages of the RAM block before enabling userfault_fd 1687 * write protection. 1688 * 1689 * This stage is required since ioctl(UFFDIO_WRITEPROTECT) with 1690 * UFFDIO_WRITEPROTECT_MODE_WP mode setting would silently skip 1691 * pages with pte_none() entries in page table. 1692 */ 1693 ram_block_populate_read(block); 1694 } 1695 } 1696 1697 static inline int uffd_protect_section(MemoryRegionSection *section, 1698 void *opaque) 1699 { 1700 const hwaddr size = int128_get64(section->size); 1701 const hwaddr offset = section->offset_within_region; 1702 RAMBlock *rb = section->mr->ram_block; 1703 int uffd_fd = (uintptr_t)opaque; 1704 1705 return uffd_change_protection(uffd_fd, rb->host + offset, size, true, 1706 false); 1707 } 1708 1709 static int ram_block_uffd_protect(RAMBlock *rb, int uffd_fd) 1710 { 1711 assert(rb->flags & RAM_UF_WRITEPROTECT); 1712 1713 /* See ram_block_populate_read() */ 1714 if (rb->mr && memory_region_has_ram_discard_manager(rb->mr)) { 1715 RamDiscardManager *rdm = memory_region_get_ram_discard_manager(rb->mr); 1716 MemoryRegionSection section = { 1717 .mr = rb->mr, 1718 .offset_within_region = 0, 1719 .size = rb->mr->size, 1720 }; 1721 1722 return ram_discard_manager_replay_populated(rdm, §ion, 1723 uffd_protect_section, 1724 (void *)(uintptr_t)uffd_fd); 1725 } 1726 return uffd_change_protection(uffd_fd, rb->host, 1727 rb->used_length, true, false); 1728 } 1729 1730 /* 1731 * ram_write_tracking_start: start UFFD-WP memory tracking 1732 * 1733 * Returns 0 for success or negative value in case of error 1734 */ 1735 int ram_write_tracking_start(void) 1736 { 1737 int uffd_fd; 1738 RAMState *rs = ram_state; 1739 RAMBlock *block; 1740 1741 /* Open UFFD file descriptor */ 1742 uffd_fd = uffd_create_fd(UFFD_FEATURE_PAGEFAULT_FLAG_WP, true); 1743 if (uffd_fd < 0) { 1744 return uffd_fd; 1745 } 1746 rs->uffdio_fd = uffd_fd; 1747 1748 RCU_READ_LOCK_GUARD(); 1749 1750 RAMBLOCK_FOREACH_NOT_IGNORED(block) { 1751 /* Nothing to do with read-only and MMIO-writable regions */ 1752 if (block->mr->readonly || block->mr->rom_device) { 1753 continue; 1754 } 1755 1756 /* Register block memory with UFFD to track writes */ 1757 if (uffd_register_memory(rs->uffdio_fd, block->host, 1758 block->max_length, UFFDIO_REGISTER_MODE_WP, NULL)) { 1759 goto fail; 1760 } 1761 block->flags |= RAM_UF_WRITEPROTECT; 1762 memory_region_ref(block->mr); 1763 1764 /* Apply UFFD write protection to the block memory range */ 1765 if (ram_block_uffd_protect(block, uffd_fd)) { 1766 goto fail; 1767 } 1768 1769 trace_ram_write_tracking_ramblock_start(block->idstr, block->page_size, 1770 block->host, block->max_length); 1771 } 1772 1773 return 0; 1774 1775 fail: 1776 error_report("ram_write_tracking_start() failed: restoring initial memory state"); 1777 1778 RAMBLOCK_FOREACH_NOT_IGNORED(block) { 1779 if ((block->flags & RAM_UF_WRITEPROTECT) == 0) { 1780 continue; 1781 } 1782 uffd_unregister_memory(rs->uffdio_fd, block->host, block->max_length); 1783 /* Cleanup flags and remove reference */ 1784 block->flags &= ~RAM_UF_WRITEPROTECT; 1785 memory_region_unref(block->mr); 1786 } 1787 1788 uffd_close_fd(uffd_fd); 1789 rs->uffdio_fd = -1; 1790 return -1; 1791 } 1792 1793 /** 1794 * ram_write_tracking_stop: stop UFFD-WP memory tracking and remove protection 1795 */ 1796 void ram_write_tracking_stop(void) 1797 { 1798 RAMState *rs = ram_state; 1799 RAMBlock *block; 1800 1801 RCU_READ_LOCK_GUARD(); 1802 1803 RAMBLOCK_FOREACH_NOT_IGNORED(block) { 1804 if ((block->flags & RAM_UF_WRITEPROTECT) == 0) { 1805 continue; 1806 } 1807 uffd_unregister_memory(rs->uffdio_fd, block->host, block->max_length); 1808 1809 trace_ram_write_tracking_ramblock_stop(block->idstr, block->page_size, 1810 block->host, block->max_length); 1811 1812 /* Cleanup flags and remove reference */ 1813 block->flags &= ~RAM_UF_WRITEPROTECT; 1814 memory_region_unref(block->mr); 1815 } 1816 1817 /* Finally close UFFD file descriptor */ 1818 uffd_close_fd(rs->uffdio_fd); 1819 rs->uffdio_fd = -1; 1820 } 1821 1822 #else 1823 /* No target OS support, stubs just fail or ignore */ 1824 1825 static RAMBlock *poll_fault_page(RAMState *rs, ram_addr_t *offset) 1826 { 1827 (void) rs; 1828 (void) offset; 1829 1830 return NULL; 1831 } 1832 1833 static int ram_save_release_protection(RAMState *rs, PageSearchStatus *pss, 1834 unsigned long start_page) 1835 { 1836 (void) rs; 1837 (void) pss; 1838 (void) start_page; 1839 1840 return 0; 1841 } 1842 1843 bool ram_write_tracking_available(void) 1844 { 1845 return false; 1846 } 1847 1848 bool ram_write_tracking_compatible(void) 1849 { 1850 assert(0); 1851 return false; 1852 } 1853 1854 int ram_write_tracking_start(void) 1855 { 1856 assert(0); 1857 return -1; 1858 } 1859 1860 void ram_write_tracking_stop(void) 1861 { 1862 assert(0); 1863 } 1864 #endif /* defined(__linux__) */ 1865 1866 /** 1867 * get_queued_page: unqueue a page from the postcopy requests 1868 * 1869 * Skips pages that are already sent (!dirty) 1870 * 1871 * Returns true if a queued page is found 1872 * 1873 * @rs: current RAM state 1874 * @pss: data about the state of the current dirty page scan 1875 */ 1876 static bool get_queued_page(RAMState *rs, PageSearchStatus *pss) 1877 { 1878 RAMBlock *block; 1879 ram_addr_t offset; 1880 bool dirty; 1881 1882 do { 1883 block = unqueue_page(rs, &offset); 1884 /* 1885 * We're sending this page, and since it's postcopy nothing else 1886 * will dirty it, and we must make sure it doesn't get sent again 1887 * even if this queue request was received after the background 1888 * search already sent it. 1889 */ 1890 if (block) { 1891 unsigned long page; 1892 1893 page = offset >> TARGET_PAGE_BITS; 1894 dirty = test_bit(page, block->bmap); 1895 if (!dirty) { 1896 trace_get_queued_page_not_dirty(block->idstr, (uint64_t)offset, 1897 page); 1898 } else { 1899 trace_get_queued_page(block->idstr, (uint64_t)offset, page); 1900 } 1901 } 1902 1903 } while (block && !dirty); 1904 1905 if (!block) { 1906 /* 1907 * Poll write faults too if background snapshot is enabled; that's 1908 * when we have vcpus got blocked by the write protected pages. 1909 */ 1910 block = poll_fault_page(rs, &offset); 1911 } 1912 1913 if (block) { 1914 /* 1915 * We want the background search to continue from the queued page 1916 * since the guest is likely to want other pages near to the page 1917 * it just requested. 1918 */ 1919 pss->block = block; 1920 pss->page = offset >> TARGET_PAGE_BITS; 1921 1922 /* 1923 * This unqueued page would break the "one round" check, even is 1924 * really rare. 1925 */ 1926 pss->complete_round = false; 1927 } 1928 1929 return !!block; 1930 } 1931 1932 /** 1933 * migration_page_queue_free: drop any remaining pages in the ram 1934 * request queue 1935 * 1936 * It should be empty at the end anyway, but in error cases there may 1937 * be some left. in case that there is any page left, we drop it. 1938 * 1939 */ 1940 static void migration_page_queue_free(RAMState *rs) 1941 { 1942 struct RAMSrcPageRequest *mspr, *next_mspr; 1943 /* This queue generally should be empty - but in the case of a failed 1944 * migration might have some droppings in. 1945 */ 1946 RCU_READ_LOCK_GUARD(); 1947 QSIMPLEQ_FOREACH_SAFE(mspr, &rs->src_page_requests, next_req, next_mspr) { 1948 memory_region_unref(mspr->rb->mr); 1949 QSIMPLEQ_REMOVE_HEAD(&rs->src_page_requests, next_req); 1950 g_free(mspr); 1951 } 1952 } 1953 1954 /** 1955 * ram_save_queue_pages: queue the page for transmission 1956 * 1957 * A request from postcopy destination for example. 1958 * 1959 * Returns zero on success or negative on error 1960 * 1961 * @rbname: Name of the RAMBLock of the request. NULL means the 1962 * same that last one. 1963 * @start: starting address from the start of the RAMBlock 1964 * @len: length (in bytes) to send 1965 */ 1966 int ram_save_queue_pages(const char *rbname, ram_addr_t start, ram_addr_t len) 1967 { 1968 RAMBlock *ramblock; 1969 RAMState *rs = ram_state; 1970 1971 stat64_add(&mig_stats.postcopy_requests, 1); 1972 RCU_READ_LOCK_GUARD(); 1973 1974 if (!rbname) { 1975 /* Reuse last RAMBlock */ 1976 ramblock = rs->last_req_rb; 1977 1978 if (!ramblock) { 1979 /* 1980 * Shouldn't happen, we can't reuse the last RAMBlock if 1981 * it's the 1st request. 1982 */ 1983 error_report("ram_save_queue_pages no previous block"); 1984 return -1; 1985 } 1986 } else { 1987 ramblock = qemu_ram_block_by_name(rbname); 1988 1989 if (!ramblock) { 1990 /* We shouldn't be asked for a non-existent RAMBlock */ 1991 error_report("ram_save_queue_pages no block '%s'", rbname); 1992 return -1; 1993 } 1994 rs->last_req_rb = ramblock; 1995 } 1996 trace_ram_save_queue_pages(ramblock->idstr, start, len); 1997 if (!offset_in_ramblock(ramblock, start + len - 1)) { 1998 error_report("%s request overrun start=" RAM_ADDR_FMT " len=" 1999 RAM_ADDR_FMT " blocklen=" RAM_ADDR_FMT, 2000 __func__, start, len, ramblock->used_length); 2001 return -1; 2002 } 2003 2004 /* 2005 * When with postcopy preempt, we send back the page directly in the 2006 * rp-return thread. 2007 */ 2008 if (postcopy_preempt_active()) { 2009 ram_addr_t page_start = start >> TARGET_PAGE_BITS; 2010 size_t page_size = qemu_ram_pagesize(ramblock); 2011 PageSearchStatus *pss = &ram_state->pss[RAM_CHANNEL_POSTCOPY]; 2012 int ret = 0; 2013 2014 qemu_mutex_lock(&rs->bitmap_mutex); 2015 2016 pss_init(pss, ramblock, page_start); 2017 /* 2018 * Always use the preempt channel, and make sure it's there. It's 2019 * safe to access without lock, because when rp-thread is running 2020 * we should be the only one who operates on the qemufile 2021 */ 2022 pss->pss_channel = migrate_get_current()->postcopy_qemufile_src; 2023 assert(pss->pss_channel); 2024 2025 /* 2026 * It must be either one or multiple of host page size. Just 2027 * assert; if something wrong we're mostly split brain anyway. 2028 */ 2029 assert(len % page_size == 0); 2030 while (len) { 2031 if (ram_save_host_page_urgent(pss)) { 2032 error_report("%s: ram_save_host_page_urgent() failed: " 2033 "ramblock=%s, start_addr=0x"RAM_ADDR_FMT, 2034 __func__, ramblock->idstr, start); 2035 ret = -1; 2036 break; 2037 } 2038 /* 2039 * NOTE: after ram_save_host_page_urgent() succeeded, pss->page 2040 * will automatically be moved and point to the next host page 2041 * we're going to send, so no need to update here. 2042 * 2043 * Normally QEMU never sends >1 host page in requests, so 2044 * logically we don't even need that as the loop should only 2045 * run once, but just to be consistent. 2046 */ 2047 len -= page_size; 2048 }; 2049 qemu_mutex_unlock(&rs->bitmap_mutex); 2050 2051 return ret; 2052 } 2053 2054 struct RAMSrcPageRequest *new_entry = 2055 g_new0(struct RAMSrcPageRequest, 1); 2056 new_entry->rb = ramblock; 2057 new_entry->offset = start; 2058 new_entry->len = len; 2059 2060 memory_region_ref(ramblock->mr); 2061 qemu_mutex_lock(&rs->src_page_req_mutex); 2062 QSIMPLEQ_INSERT_TAIL(&rs->src_page_requests, new_entry, next_req); 2063 migration_make_urgent_request(); 2064 qemu_mutex_unlock(&rs->src_page_req_mutex); 2065 2066 return 0; 2067 } 2068 2069 static bool save_page_use_compression(RAMState *rs) 2070 { 2071 if (!migrate_compress()) { 2072 return false; 2073 } 2074 2075 /* 2076 * If xbzrle is enabled (e.g., after first round of migration), stop 2077 * using the data compression. In theory, xbzrle can do better than 2078 * compression. 2079 */ 2080 if (rs->xbzrle_started) { 2081 return false; 2082 } 2083 2084 return true; 2085 } 2086 2087 /* 2088 * try to compress the page before posting it out, return true if the page 2089 * has been properly handled by compression, otherwise needs other 2090 * paths to handle it 2091 */ 2092 static bool save_compress_page(RAMState *rs, PageSearchStatus *pss, 2093 RAMBlock *block, ram_addr_t offset) 2094 { 2095 if (!save_page_use_compression(rs)) { 2096 return false; 2097 } 2098 2099 /* 2100 * When starting the process of a new block, the first page of 2101 * the block should be sent out before other pages in the same 2102 * block, and all the pages in last block should have been sent 2103 * out, keeping this order is important, because the 'cont' flag 2104 * is used to avoid resending the block name. 2105 * 2106 * We post the fist page as normal page as compression will take 2107 * much CPU resource. 2108 */ 2109 if (block != pss->last_sent_block) { 2110 ram_flush_compressed_data(rs); 2111 return false; 2112 } 2113 2114 if (compress_page_with_multi_thread(block, offset, send_queued_data) > 0) { 2115 return true; 2116 } 2117 2118 compression_counters.busy++; 2119 return false; 2120 } 2121 2122 /** 2123 * ram_save_target_page_legacy: save one target page 2124 * 2125 * Returns the number of pages written 2126 * 2127 * @rs: current RAM state 2128 * @pss: data about the page we want to send 2129 */ 2130 static int ram_save_target_page_legacy(RAMState *rs, PageSearchStatus *pss) 2131 { 2132 RAMBlock *block = pss->block; 2133 ram_addr_t offset = ((ram_addr_t)pss->page) << TARGET_PAGE_BITS; 2134 int res; 2135 2136 if (control_save_page(pss, block, offset, &res)) { 2137 return res; 2138 } 2139 2140 if (save_compress_page(rs, pss, block, offset)) { 2141 return 1; 2142 } 2143 2144 res = save_zero_page(pss, pss->pss_channel, block, offset); 2145 if (res > 0) { 2146 /* Must let xbzrle know, otherwise a previous (now 0'd) cached 2147 * page would be stale 2148 */ 2149 if (rs->xbzrle_started) { 2150 XBZRLE_cache_lock(); 2151 xbzrle_cache_zero_page(rs, block->offset + offset); 2152 XBZRLE_cache_unlock(); 2153 } 2154 return res; 2155 } 2156 2157 /* 2158 * Do not use multifd in postcopy as one whole host page should be 2159 * placed. Meanwhile postcopy requires atomic update of pages, so even 2160 * if host page size == guest page size the dest guest during run may 2161 * still see partially copied pages which is data corruption. 2162 */ 2163 if (migrate_multifd() && !migration_in_postcopy()) { 2164 return ram_save_multifd_page(pss->pss_channel, block, offset); 2165 } 2166 2167 return ram_save_page(rs, pss); 2168 } 2169 2170 /* Should be called before sending a host page */ 2171 static void pss_host_page_prepare(PageSearchStatus *pss) 2172 { 2173 /* How many guest pages are there in one host page? */ 2174 size_t guest_pfns = qemu_ram_pagesize(pss->block) >> TARGET_PAGE_BITS; 2175 2176 pss->host_page_sending = true; 2177 if (guest_pfns <= 1) { 2178 /* 2179 * This covers both when guest psize == host psize, or when guest 2180 * has larger psize than the host (guest_pfns==0). 2181 * 2182 * For the latter, we always send one whole guest page per 2183 * iteration of the host page (example: an Alpha VM on x86 host 2184 * will have guest psize 8K while host psize 4K). 2185 */ 2186 pss->host_page_start = pss->page; 2187 pss->host_page_end = pss->page + 1; 2188 } else { 2189 /* 2190 * The host page spans over multiple guest pages, we send them 2191 * within the same host page iteration. 2192 */ 2193 pss->host_page_start = ROUND_DOWN(pss->page, guest_pfns); 2194 pss->host_page_end = ROUND_UP(pss->page + 1, guest_pfns); 2195 } 2196 } 2197 2198 /* 2199 * Whether the page pointed by PSS is within the host page being sent. 2200 * Must be called after a previous pss_host_page_prepare(). 2201 */ 2202 static bool pss_within_range(PageSearchStatus *pss) 2203 { 2204 ram_addr_t ram_addr; 2205 2206 assert(pss->host_page_sending); 2207 2208 /* Over host-page boundary? */ 2209 if (pss->page >= pss->host_page_end) { 2210 return false; 2211 } 2212 2213 ram_addr = ((ram_addr_t)pss->page) << TARGET_PAGE_BITS; 2214 2215 return offset_in_ramblock(pss->block, ram_addr); 2216 } 2217 2218 static void pss_host_page_finish(PageSearchStatus *pss) 2219 { 2220 pss->host_page_sending = false; 2221 /* This is not needed, but just to reset it */ 2222 pss->host_page_start = pss->host_page_end = 0; 2223 } 2224 2225 /* 2226 * Send an urgent host page specified by `pss'. Need to be called with 2227 * bitmap_mutex held. 2228 * 2229 * Returns 0 if save host page succeeded, false otherwise. 2230 */ 2231 static int ram_save_host_page_urgent(PageSearchStatus *pss) 2232 { 2233 bool page_dirty, sent = false; 2234 RAMState *rs = ram_state; 2235 int ret = 0; 2236 2237 trace_postcopy_preempt_send_host_page(pss->block->idstr, pss->page); 2238 pss_host_page_prepare(pss); 2239 2240 /* 2241 * If precopy is sending the same page, let it be done in precopy, or 2242 * we could send the same page in two channels and none of them will 2243 * receive the whole page. 2244 */ 2245 if (pss_overlap(pss, &ram_state->pss[RAM_CHANNEL_PRECOPY])) { 2246 trace_postcopy_preempt_hit(pss->block->idstr, 2247 pss->page << TARGET_PAGE_BITS); 2248 return 0; 2249 } 2250 2251 do { 2252 page_dirty = migration_bitmap_clear_dirty(rs, pss->block, pss->page); 2253 2254 if (page_dirty) { 2255 /* Be strict to return code; it must be 1, or what else? */ 2256 if (migration_ops->ram_save_target_page(rs, pss) != 1) { 2257 error_report_once("%s: ram_save_target_page failed", __func__); 2258 ret = -1; 2259 goto out; 2260 } 2261 sent = true; 2262 } 2263 pss_find_next_dirty(pss); 2264 } while (pss_within_range(pss)); 2265 out: 2266 pss_host_page_finish(pss); 2267 /* For urgent requests, flush immediately if sent */ 2268 if (sent) { 2269 qemu_fflush(pss->pss_channel); 2270 } 2271 return ret; 2272 } 2273 2274 /** 2275 * ram_save_host_page: save a whole host page 2276 * 2277 * Starting at *offset send pages up to the end of the current host 2278 * page. It's valid for the initial offset to point into the middle of 2279 * a host page in which case the remainder of the hostpage is sent. 2280 * Only dirty target pages are sent. Note that the host page size may 2281 * be a huge page for this block. 2282 * 2283 * The saving stops at the boundary of the used_length of the block 2284 * if the RAMBlock isn't a multiple of the host page size. 2285 * 2286 * The caller must be with ram_state.bitmap_mutex held to call this 2287 * function. Note that this function can temporarily release the lock, but 2288 * when the function is returned it'll make sure the lock is still held. 2289 * 2290 * Returns the number of pages written or negative on error 2291 * 2292 * @rs: current RAM state 2293 * @pss: data about the page we want to send 2294 */ 2295 static int ram_save_host_page(RAMState *rs, PageSearchStatus *pss) 2296 { 2297 bool page_dirty, preempt_active = postcopy_preempt_active(); 2298 int tmppages, pages = 0; 2299 size_t pagesize_bits = 2300 qemu_ram_pagesize(pss->block) >> TARGET_PAGE_BITS; 2301 unsigned long start_page = pss->page; 2302 int res; 2303 2304 if (migrate_ram_is_ignored(pss->block)) { 2305 error_report("block %s should not be migrated !", pss->block->idstr); 2306 return 0; 2307 } 2308 2309 /* Update host page boundary information */ 2310 pss_host_page_prepare(pss); 2311 2312 do { 2313 page_dirty = migration_bitmap_clear_dirty(rs, pss->block, pss->page); 2314 2315 /* Check the pages is dirty and if it is send it */ 2316 if (page_dirty) { 2317 /* 2318 * Properly yield the lock only in postcopy preempt mode 2319 * because both migration thread and rp-return thread can 2320 * operate on the bitmaps. 2321 */ 2322 if (preempt_active) { 2323 qemu_mutex_unlock(&rs->bitmap_mutex); 2324 } 2325 tmppages = migration_ops->ram_save_target_page(rs, pss); 2326 if (tmppages >= 0) { 2327 pages += tmppages; 2328 /* 2329 * Allow rate limiting to happen in the middle of huge pages if 2330 * something is sent in the current iteration. 2331 */ 2332 if (pagesize_bits > 1 && tmppages > 0) { 2333 migration_rate_limit(); 2334 } 2335 } 2336 if (preempt_active) { 2337 qemu_mutex_lock(&rs->bitmap_mutex); 2338 } 2339 } else { 2340 tmppages = 0; 2341 } 2342 2343 if (tmppages < 0) { 2344 pss_host_page_finish(pss); 2345 return tmppages; 2346 } 2347 2348 pss_find_next_dirty(pss); 2349 } while (pss_within_range(pss)); 2350 2351 pss_host_page_finish(pss); 2352 2353 res = ram_save_release_protection(rs, pss, start_page); 2354 return (res < 0 ? res : pages); 2355 } 2356 2357 /** 2358 * ram_find_and_save_block: finds a dirty page and sends it to f 2359 * 2360 * Called within an RCU critical section. 2361 * 2362 * Returns the number of pages written where zero means no dirty pages, 2363 * or negative on error 2364 * 2365 * @rs: current RAM state 2366 * 2367 * On systems where host-page-size > target-page-size it will send all the 2368 * pages in a host page that are dirty. 2369 */ 2370 static int ram_find_and_save_block(RAMState *rs) 2371 { 2372 PageSearchStatus *pss = &rs->pss[RAM_CHANNEL_PRECOPY]; 2373 int pages = 0; 2374 2375 /* No dirty page as there is zero RAM */ 2376 if (!rs->ram_bytes_total) { 2377 return pages; 2378 } 2379 2380 /* 2381 * Always keep last_seen_block/last_page valid during this procedure, 2382 * because find_dirty_block() relies on these values (e.g., we compare 2383 * last_seen_block with pss.block to see whether we searched all the 2384 * ramblocks) to detect the completion of migration. Having NULL value 2385 * of last_seen_block can conditionally cause below loop to run forever. 2386 */ 2387 if (!rs->last_seen_block) { 2388 rs->last_seen_block = QLIST_FIRST_RCU(&ram_list.blocks); 2389 rs->last_page = 0; 2390 } 2391 2392 pss_init(pss, rs->last_seen_block, rs->last_page); 2393 2394 while (true){ 2395 if (!get_queued_page(rs, pss)) { 2396 /* priority queue empty, so just search for something dirty */ 2397 int res = find_dirty_block(rs, pss); 2398 if (res != PAGE_DIRTY_FOUND) { 2399 if (res == PAGE_ALL_CLEAN) { 2400 break; 2401 } else if (res == PAGE_TRY_AGAIN) { 2402 continue; 2403 } else if (res < 0) { 2404 pages = res; 2405 break; 2406 } 2407 } 2408 } 2409 pages = ram_save_host_page(rs, pss); 2410 if (pages) { 2411 break; 2412 } 2413 } 2414 2415 rs->last_seen_block = pss->block; 2416 rs->last_page = pss->page; 2417 2418 return pages; 2419 } 2420 2421 static uint64_t ram_bytes_total_with_ignored(void) 2422 { 2423 RAMBlock *block; 2424 uint64_t total = 0; 2425 2426 RCU_READ_LOCK_GUARD(); 2427 2428 RAMBLOCK_FOREACH_MIGRATABLE(block) { 2429 total += block->used_length; 2430 } 2431 return total; 2432 } 2433 2434 uint64_t ram_bytes_total(void) 2435 { 2436 RAMBlock *block; 2437 uint64_t total = 0; 2438 2439 RCU_READ_LOCK_GUARD(); 2440 2441 RAMBLOCK_FOREACH_NOT_IGNORED(block) { 2442 total += block->used_length; 2443 } 2444 return total; 2445 } 2446 2447 static void xbzrle_load_setup(void) 2448 { 2449 XBZRLE.decoded_buf = g_malloc(TARGET_PAGE_SIZE); 2450 } 2451 2452 static void xbzrle_load_cleanup(void) 2453 { 2454 g_free(XBZRLE.decoded_buf); 2455 XBZRLE.decoded_buf = NULL; 2456 } 2457 2458 static void ram_state_cleanup(RAMState **rsp) 2459 { 2460 if (*rsp) { 2461 migration_page_queue_free(*rsp); 2462 qemu_mutex_destroy(&(*rsp)->bitmap_mutex); 2463 qemu_mutex_destroy(&(*rsp)->src_page_req_mutex); 2464 g_free(*rsp); 2465 *rsp = NULL; 2466 } 2467 } 2468 2469 static void xbzrle_cleanup(void) 2470 { 2471 XBZRLE_cache_lock(); 2472 if (XBZRLE.cache) { 2473 cache_fini(XBZRLE.cache); 2474 g_free(XBZRLE.encoded_buf); 2475 g_free(XBZRLE.current_buf); 2476 g_free(XBZRLE.zero_target_page); 2477 XBZRLE.cache = NULL; 2478 XBZRLE.encoded_buf = NULL; 2479 XBZRLE.current_buf = NULL; 2480 XBZRLE.zero_target_page = NULL; 2481 } 2482 XBZRLE_cache_unlock(); 2483 } 2484 2485 static void ram_save_cleanup(void *opaque) 2486 { 2487 RAMState **rsp = opaque; 2488 RAMBlock *block; 2489 2490 /* We don't use dirty log with background snapshots */ 2491 if (!migrate_background_snapshot()) { 2492 /* caller have hold iothread lock or is in a bh, so there is 2493 * no writing race against the migration bitmap 2494 */ 2495 if (global_dirty_tracking & GLOBAL_DIRTY_MIGRATION) { 2496 /* 2497 * do not stop dirty log without starting it, since 2498 * memory_global_dirty_log_stop will assert that 2499 * memory_global_dirty_log_start/stop used in pairs 2500 */ 2501 memory_global_dirty_log_stop(GLOBAL_DIRTY_MIGRATION); 2502 } 2503 } 2504 2505 RAMBLOCK_FOREACH_NOT_IGNORED(block) { 2506 g_free(block->clear_bmap); 2507 block->clear_bmap = NULL; 2508 g_free(block->bmap); 2509 block->bmap = NULL; 2510 } 2511 2512 xbzrle_cleanup(); 2513 compress_threads_save_cleanup(); 2514 ram_state_cleanup(rsp); 2515 g_free(migration_ops); 2516 migration_ops = NULL; 2517 } 2518 2519 static void ram_state_reset(RAMState *rs) 2520 { 2521 int i; 2522 2523 for (i = 0; i < RAM_CHANNEL_MAX; i++) { 2524 rs->pss[i].last_sent_block = NULL; 2525 } 2526 2527 rs->last_seen_block = NULL; 2528 rs->last_page = 0; 2529 rs->last_version = ram_list.version; 2530 rs->xbzrle_started = false; 2531 } 2532 2533 #define MAX_WAIT 50 /* ms, half buffered_file limit */ 2534 2535 /* **** functions for postcopy ***** */ 2536 2537 void ram_postcopy_migrated_memory_release(MigrationState *ms) 2538 { 2539 struct RAMBlock *block; 2540 2541 RAMBLOCK_FOREACH_NOT_IGNORED(block) { 2542 unsigned long *bitmap = block->bmap; 2543 unsigned long range = block->used_length >> TARGET_PAGE_BITS; 2544 unsigned long run_start = find_next_zero_bit(bitmap, range, 0); 2545 2546 while (run_start < range) { 2547 unsigned long run_end = find_next_bit(bitmap, range, run_start + 1); 2548 ram_discard_range(block->idstr, 2549 ((ram_addr_t)run_start) << TARGET_PAGE_BITS, 2550 ((ram_addr_t)(run_end - run_start)) 2551 << TARGET_PAGE_BITS); 2552 run_start = find_next_zero_bit(bitmap, range, run_end + 1); 2553 } 2554 } 2555 } 2556 2557 /** 2558 * postcopy_send_discard_bm_ram: discard a RAMBlock 2559 * 2560 * Callback from postcopy_each_ram_send_discard for each RAMBlock 2561 * 2562 * @ms: current migration state 2563 * @block: RAMBlock to discard 2564 */ 2565 static void postcopy_send_discard_bm_ram(MigrationState *ms, RAMBlock *block) 2566 { 2567 unsigned long end = block->used_length >> TARGET_PAGE_BITS; 2568 unsigned long current; 2569 unsigned long *bitmap = block->bmap; 2570 2571 for (current = 0; current < end; ) { 2572 unsigned long one = find_next_bit(bitmap, end, current); 2573 unsigned long zero, discard_length; 2574 2575 if (one >= end) { 2576 break; 2577 } 2578 2579 zero = find_next_zero_bit(bitmap, end, one + 1); 2580 2581 if (zero >= end) { 2582 discard_length = end - one; 2583 } else { 2584 discard_length = zero - one; 2585 } 2586 postcopy_discard_send_range(ms, one, discard_length); 2587 current = one + discard_length; 2588 } 2589 } 2590 2591 static void postcopy_chunk_hostpages_pass(MigrationState *ms, RAMBlock *block); 2592 2593 /** 2594 * postcopy_each_ram_send_discard: discard all RAMBlocks 2595 * 2596 * Utility for the outgoing postcopy code. 2597 * Calls postcopy_send_discard_bm_ram for each RAMBlock 2598 * passing it bitmap indexes and name. 2599 * (qemu_ram_foreach_block ends up passing unscaled lengths 2600 * which would mean postcopy code would have to deal with target page) 2601 * 2602 * @ms: current migration state 2603 */ 2604 static void postcopy_each_ram_send_discard(MigrationState *ms) 2605 { 2606 struct RAMBlock *block; 2607 2608 RAMBLOCK_FOREACH_NOT_IGNORED(block) { 2609 postcopy_discard_send_init(ms, block->idstr); 2610 2611 /* 2612 * Deal with TPS != HPS and huge pages. It discard any partially sent 2613 * host-page size chunks, mark any partially dirty host-page size 2614 * chunks as all dirty. In this case the host-page is the host-page 2615 * for the particular RAMBlock, i.e. it might be a huge page. 2616 */ 2617 postcopy_chunk_hostpages_pass(ms, block); 2618 2619 /* 2620 * Postcopy sends chunks of bitmap over the wire, but it 2621 * just needs indexes at this point, avoids it having 2622 * target page specific code. 2623 */ 2624 postcopy_send_discard_bm_ram(ms, block); 2625 postcopy_discard_send_finish(ms); 2626 } 2627 } 2628 2629 /** 2630 * postcopy_chunk_hostpages_pass: canonicalize bitmap in hostpages 2631 * 2632 * Helper for postcopy_chunk_hostpages; it's called twice to 2633 * canonicalize the two bitmaps, that are similar, but one is 2634 * inverted. 2635 * 2636 * Postcopy requires that all target pages in a hostpage are dirty or 2637 * clean, not a mix. This function canonicalizes the bitmaps. 2638 * 2639 * @ms: current migration state 2640 * @block: block that contains the page we want to canonicalize 2641 */ 2642 static void postcopy_chunk_hostpages_pass(MigrationState *ms, RAMBlock *block) 2643 { 2644 RAMState *rs = ram_state; 2645 unsigned long *bitmap = block->bmap; 2646 unsigned int host_ratio = block->page_size / TARGET_PAGE_SIZE; 2647 unsigned long pages = block->used_length >> TARGET_PAGE_BITS; 2648 unsigned long run_start; 2649 2650 if (block->page_size == TARGET_PAGE_SIZE) { 2651 /* Easy case - TPS==HPS for a non-huge page RAMBlock */ 2652 return; 2653 } 2654 2655 /* Find a dirty page */ 2656 run_start = find_next_bit(bitmap, pages, 0); 2657 2658 while (run_start < pages) { 2659 2660 /* 2661 * If the start of this run of pages is in the middle of a host 2662 * page, then we need to fixup this host page. 2663 */ 2664 if (QEMU_IS_ALIGNED(run_start, host_ratio)) { 2665 /* Find the end of this run */ 2666 run_start = find_next_zero_bit(bitmap, pages, run_start + 1); 2667 /* 2668 * If the end isn't at the start of a host page, then the 2669 * run doesn't finish at the end of a host page 2670 * and we need to discard. 2671 */ 2672 } 2673 2674 if (!QEMU_IS_ALIGNED(run_start, host_ratio)) { 2675 unsigned long page; 2676 unsigned long fixup_start_addr = QEMU_ALIGN_DOWN(run_start, 2677 host_ratio); 2678 run_start = QEMU_ALIGN_UP(run_start, host_ratio); 2679 2680 /* Clean up the bitmap */ 2681 for (page = fixup_start_addr; 2682 page < fixup_start_addr + host_ratio; page++) { 2683 /* 2684 * Remark them as dirty, updating the count for any pages 2685 * that weren't previously dirty. 2686 */ 2687 rs->migration_dirty_pages += !test_and_set_bit(page, bitmap); 2688 } 2689 } 2690 2691 /* Find the next dirty page for the next iteration */ 2692 run_start = find_next_bit(bitmap, pages, run_start); 2693 } 2694 } 2695 2696 /** 2697 * ram_postcopy_send_discard_bitmap: transmit the discard bitmap 2698 * 2699 * Transmit the set of pages to be discarded after precopy to the target 2700 * these are pages that: 2701 * a) Have been previously transmitted but are now dirty again 2702 * b) Pages that have never been transmitted, this ensures that 2703 * any pages on the destination that have been mapped by background 2704 * tasks get discarded (transparent huge pages is the specific concern) 2705 * Hopefully this is pretty sparse 2706 * 2707 * @ms: current migration state 2708 */ 2709 void ram_postcopy_send_discard_bitmap(MigrationState *ms) 2710 { 2711 RAMState *rs = ram_state; 2712 2713 RCU_READ_LOCK_GUARD(); 2714 2715 /* This should be our last sync, the src is now paused */ 2716 migration_bitmap_sync(rs, false); 2717 2718 /* Easiest way to make sure we don't resume in the middle of a host-page */ 2719 rs->pss[RAM_CHANNEL_PRECOPY].last_sent_block = NULL; 2720 rs->last_seen_block = NULL; 2721 rs->last_page = 0; 2722 2723 postcopy_each_ram_send_discard(ms); 2724 2725 trace_ram_postcopy_send_discard_bitmap(); 2726 } 2727 2728 /** 2729 * ram_discard_range: discard dirtied pages at the beginning of postcopy 2730 * 2731 * Returns zero on success 2732 * 2733 * @rbname: name of the RAMBlock of the request. NULL means the 2734 * same that last one. 2735 * @start: RAMBlock starting page 2736 * @length: RAMBlock size 2737 */ 2738 int ram_discard_range(const char *rbname, uint64_t start, size_t length) 2739 { 2740 trace_ram_discard_range(rbname, start, length); 2741 2742 RCU_READ_LOCK_GUARD(); 2743 RAMBlock *rb = qemu_ram_block_by_name(rbname); 2744 2745 if (!rb) { 2746 error_report("ram_discard_range: Failed to find block '%s'", rbname); 2747 return -1; 2748 } 2749 2750 /* 2751 * On source VM, we don't need to update the received bitmap since 2752 * we don't even have one. 2753 */ 2754 if (rb->receivedmap) { 2755 bitmap_clear(rb->receivedmap, start >> qemu_target_page_bits(), 2756 length >> qemu_target_page_bits()); 2757 } 2758 2759 return ram_block_discard_range(rb, start, length); 2760 } 2761 2762 /* 2763 * For every allocation, we will try not to crash the VM if the 2764 * allocation failed. 2765 */ 2766 static int xbzrle_init(void) 2767 { 2768 Error *local_err = NULL; 2769 2770 if (!migrate_xbzrle()) { 2771 return 0; 2772 } 2773 2774 XBZRLE_cache_lock(); 2775 2776 XBZRLE.zero_target_page = g_try_malloc0(TARGET_PAGE_SIZE); 2777 if (!XBZRLE.zero_target_page) { 2778 error_report("%s: Error allocating zero page", __func__); 2779 goto err_out; 2780 } 2781 2782 XBZRLE.cache = cache_init(migrate_xbzrle_cache_size(), 2783 TARGET_PAGE_SIZE, &local_err); 2784 if (!XBZRLE.cache) { 2785 error_report_err(local_err); 2786 goto free_zero_page; 2787 } 2788 2789 XBZRLE.encoded_buf = g_try_malloc0(TARGET_PAGE_SIZE); 2790 if (!XBZRLE.encoded_buf) { 2791 error_report("%s: Error allocating encoded_buf", __func__); 2792 goto free_cache; 2793 } 2794 2795 XBZRLE.current_buf = g_try_malloc(TARGET_PAGE_SIZE); 2796 if (!XBZRLE.current_buf) { 2797 error_report("%s: Error allocating current_buf", __func__); 2798 goto free_encoded_buf; 2799 } 2800 2801 /* We are all good */ 2802 XBZRLE_cache_unlock(); 2803 return 0; 2804 2805 free_encoded_buf: 2806 g_free(XBZRLE.encoded_buf); 2807 XBZRLE.encoded_buf = NULL; 2808 free_cache: 2809 cache_fini(XBZRLE.cache); 2810 XBZRLE.cache = NULL; 2811 free_zero_page: 2812 g_free(XBZRLE.zero_target_page); 2813 XBZRLE.zero_target_page = NULL; 2814 err_out: 2815 XBZRLE_cache_unlock(); 2816 return -ENOMEM; 2817 } 2818 2819 static int ram_state_init(RAMState **rsp) 2820 { 2821 *rsp = g_try_new0(RAMState, 1); 2822 2823 if (!*rsp) { 2824 error_report("%s: Init ramstate fail", __func__); 2825 return -1; 2826 } 2827 2828 qemu_mutex_init(&(*rsp)->bitmap_mutex); 2829 qemu_mutex_init(&(*rsp)->src_page_req_mutex); 2830 QSIMPLEQ_INIT(&(*rsp)->src_page_requests); 2831 (*rsp)->ram_bytes_total = ram_bytes_total(); 2832 2833 /* 2834 * Count the total number of pages used by ram blocks not including any 2835 * gaps due to alignment or unplugs. 2836 * This must match with the initial values of dirty bitmap. 2837 */ 2838 (*rsp)->migration_dirty_pages = (*rsp)->ram_bytes_total >> TARGET_PAGE_BITS; 2839 ram_state_reset(*rsp); 2840 2841 return 0; 2842 } 2843 2844 static void ram_list_init_bitmaps(void) 2845 { 2846 MigrationState *ms = migrate_get_current(); 2847 RAMBlock *block; 2848 unsigned long pages; 2849 uint8_t shift; 2850 2851 /* Skip setting bitmap if there is no RAM */ 2852 if (ram_bytes_total()) { 2853 shift = ms->clear_bitmap_shift; 2854 if (shift > CLEAR_BITMAP_SHIFT_MAX) { 2855 error_report("clear_bitmap_shift (%u) too big, using " 2856 "max value (%u)", shift, CLEAR_BITMAP_SHIFT_MAX); 2857 shift = CLEAR_BITMAP_SHIFT_MAX; 2858 } else if (shift < CLEAR_BITMAP_SHIFT_MIN) { 2859 error_report("clear_bitmap_shift (%u) too small, using " 2860 "min value (%u)", shift, CLEAR_BITMAP_SHIFT_MIN); 2861 shift = CLEAR_BITMAP_SHIFT_MIN; 2862 } 2863 2864 RAMBLOCK_FOREACH_NOT_IGNORED(block) { 2865 pages = block->max_length >> TARGET_PAGE_BITS; 2866 /* 2867 * The initial dirty bitmap for migration must be set with all 2868 * ones to make sure we'll migrate every guest RAM page to 2869 * destination. 2870 * Here we set RAMBlock.bmap all to 1 because when rebegin a 2871 * new migration after a failed migration, ram_list. 2872 * dirty_memory[DIRTY_MEMORY_MIGRATION] don't include the whole 2873 * guest memory. 2874 */ 2875 block->bmap = bitmap_new(pages); 2876 bitmap_set(block->bmap, 0, pages); 2877 block->clear_bmap_shift = shift; 2878 block->clear_bmap = bitmap_new(clear_bmap_size(pages, shift)); 2879 } 2880 } 2881 } 2882 2883 static void migration_bitmap_clear_discarded_pages(RAMState *rs) 2884 { 2885 unsigned long pages; 2886 RAMBlock *rb; 2887 2888 RCU_READ_LOCK_GUARD(); 2889 2890 RAMBLOCK_FOREACH_NOT_IGNORED(rb) { 2891 pages = ramblock_dirty_bitmap_clear_discarded_pages(rb); 2892 rs->migration_dirty_pages -= pages; 2893 } 2894 } 2895 2896 static void ram_init_bitmaps(RAMState *rs) 2897 { 2898 /* For memory_global_dirty_log_start below. */ 2899 qemu_mutex_lock_iothread(); 2900 qemu_mutex_lock_ramlist(); 2901 2902 WITH_RCU_READ_LOCK_GUARD() { 2903 ram_list_init_bitmaps(); 2904 /* We don't use dirty log with background snapshots */ 2905 if (!migrate_background_snapshot()) { 2906 memory_global_dirty_log_start(GLOBAL_DIRTY_MIGRATION); 2907 migration_bitmap_sync_precopy(rs, false); 2908 } 2909 } 2910 qemu_mutex_unlock_ramlist(); 2911 qemu_mutex_unlock_iothread(); 2912 2913 /* 2914 * After an eventual first bitmap sync, fixup the initial bitmap 2915 * containing all 1s to exclude any discarded pages from migration. 2916 */ 2917 migration_bitmap_clear_discarded_pages(rs); 2918 } 2919 2920 static int ram_init_all(RAMState **rsp) 2921 { 2922 if (ram_state_init(rsp)) { 2923 return -1; 2924 } 2925 2926 if (xbzrle_init()) { 2927 ram_state_cleanup(rsp); 2928 return -1; 2929 } 2930 2931 ram_init_bitmaps(*rsp); 2932 2933 return 0; 2934 } 2935 2936 static void ram_state_resume_prepare(RAMState *rs, QEMUFile *out) 2937 { 2938 RAMBlock *block; 2939 uint64_t pages = 0; 2940 2941 /* 2942 * Postcopy is not using xbzrle/compression, so no need for that. 2943 * Also, since source are already halted, we don't need to care 2944 * about dirty page logging as well. 2945 */ 2946 2947 RAMBLOCK_FOREACH_NOT_IGNORED(block) { 2948 pages += bitmap_count_one(block->bmap, 2949 block->used_length >> TARGET_PAGE_BITS); 2950 } 2951 2952 /* This may not be aligned with current bitmaps. Recalculate. */ 2953 rs->migration_dirty_pages = pages; 2954 2955 ram_state_reset(rs); 2956 2957 /* Update RAMState cache of output QEMUFile */ 2958 rs->pss[RAM_CHANNEL_PRECOPY].pss_channel = out; 2959 2960 trace_ram_state_resume_prepare(pages); 2961 } 2962 2963 /* 2964 * This function clears bits of the free pages reported by the caller from the 2965 * migration dirty bitmap. @addr is the host address corresponding to the 2966 * start of the continuous guest free pages, and @len is the total bytes of 2967 * those pages. 2968 */ 2969 void qemu_guest_free_page_hint(void *addr, size_t len) 2970 { 2971 RAMBlock *block; 2972 ram_addr_t offset; 2973 size_t used_len, start, npages; 2974 MigrationState *s = migrate_get_current(); 2975 2976 /* This function is currently expected to be used during live migration */ 2977 if (!migration_is_setup_or_active(s->state)) { 2978 return; 2979 } 2980 2981 for (; len > 0; len -= used_len, addr += used_len) { 2982 block = qemu_ram_block_from_host(addr, false, &offset); 2983 if (unlikely(!block || offset >= block->used_length)) { 2984 /* 2985 * The implementation might not support RAMBlock resize during 2986 * live migration, but it could happen in theory with future 2987 * updates. So we add a check here to capture that case. 2988 */ 2989 error_report_once("%s unexpected error", __func__); 2990 return; 2991 } 2992 2993 if (len <= block->used_length - offset) { 2994 used_len = len; 2995 } else { 2996 used_len = block->used_length - offset; 2997 } 2998 2999 start = offset >> TARGET_PAGE_BITS; 3000 npages = used_len >> TARGET_PAGE_BITS; 3001 3002 qemu_mutex_lock(&ram_state->bitmap_mutex); 3003 /* 3004 * The skipped free pages are equavalent to be sent from clear_bmap's 3005 * perspective, so clear the bits from the memory region bitmap which 3006 * are initially set. Otherwise those skipped pages will be sent in 3007 * the next round after syncing from the memory region bitmap. 3008 */ 3009 migration_clear_memory_region_dirty_bitmap_range(block, start, npages); 3010 ram_state->migration_dirty_pages -= 3011 bitmap_count_one_with_offset(block->bmap, start, npages); 3012 bitmap_clear(block->bmap, start, npages); 3013 qemu_mutex_unlock(&ram_state->bitmap_mutex); 3014 } 3015 } 3016 3017 /* 3018 * Each of ram_save_setup, ram_save_iterate and ram_save_complete has 3019 * long-running RCU critical section. When rcu-reclaims in the code 3020 * start to become numerous it will be necessary to reduce the 3021 * granularity of these critical sections. 3022 */ 3023 3024 /** 3025 * ram_save_setup: Setup RAM for migration 3026 * 3027 * Returns zero to indicate success and negative for error 3028 * 3029 * @f: QEMUFile where to send the data 3030 * @opaque: RAMState pointer 3031 */ 3032 static int ram_save_setup(QEMUFile *f, void *opaque) 3033 { 3034 RAMState **rsp = opaque; 3035 RAMBlock *block; 3036 int ret; 3037 3038 if (compress_threads_save_setup()) { 3039 return -1; 3040 } 3041 3042 /* migration has already setup the bitmap, reuse it. */ 3043 if (!migration_in_colo_state()) { 3044 if (ram_init_all(rsp) != 0) { 3045 compress_threads_save_cleanup(); 3046 return -1; 3047 } 3048 } 3049 (*rsp)->pss[RAM_CHANNEL_PRECOPY].pss_channel = f; 3050 3051 WITH_RCU_READ_LOCK_GUARD() { 3052 qemu_put_be64(f, ram_bytes_total_with_ignored() 3053 | RAM_SAVE_FLAG_MEM_SIZE); 3054 3055 RAMBLOCK_FOREACH_MIGRATABLE(block) { 3056 qemu_put_byte(f, strlen(block->idstr)); 3057 qemu_put_buffer(f, (uint8_t *)block->idstr, strlen(block->idstr)); 3058 qemu_put_be64(f, block->used_length); 3059 if (migrate_postcopy_ram() && block->page_size != 3060 qemu_host_page_size) { 3061 qemu_put_be64(f, block->page_size); 3062 } 3063 if (migrate_ignore_shared()) { 3064 qemu_put_be64(f, block->mr->addr); 3065 } 3066 } 3067 } 3068 3069 ram_control_before_iterate(f, RAM_CONTROL_SETUP); 3070 ram_control_after_iterate(f, RAM_CONTROL_SETUP); 3071 3072 migration_ops = g_malloc0(sizeof(MigrationOps)); 3073 migration_ops->ram_save_target_page = ram_save_target_page_legacy; 3074 ret = multifd_send_sync_main(f); 3075 if (ret < 0) { 3076 return ret; 3077 } 3078 3079 if (!migrate_multifd_flush_after_each_section()) { 3080 qemu_put_be64(f, RAM_SAVE_FLAG_MULTIFD_FLUSH); 3081 } 3082 3083 qemu_put_be64(f, RAM_SAVE_FLAG_EOS); 3084 qemu_fflush(f); 3085 3086 return 0; 3087 } 3088 3089 /** 3090 * ram_save_iterate: iterative stage for migration 3091 * 3092 * Returns zero to indicate success and negative for error 3093 * 3094 * @f: QEMUFile where to send the data 3095 * @opaque: RAMState pointer 3096 */ 3097 static int ram_save_iterate(QEMUFile *f, void *opaque) 3098 { 3099 RAMState **temp = opaque; 3100 RAMState *rs = *temp; 3101 int ret = 0; 3102 int i; 3103 int64_t t0; 3104 int done = 0; 3105 3106 if (blk_mig_bulk_active()) { 3107 /* Avoid transferring ram during bulk phase of block migration as 3108 * the bulk phase will usually take a long time and transferring 3109 * ram updates during that time is pointless. */ 3110 goto out; 3111 } 3112 3113 /* 3114 * We'll take this lock a little bit long, but it's okay for two reasons. 3115 * Firstly, the only possible other thread to take it is who calls 3116 * qemu_guest_free_page_hint(), which should be rare; secondly, see 3117 * MAX_WAIT (if curious, further see commit 4508bd9ed8053ce) below, which 3118 * guarantees that we'll at least released it in a regular basis. 3119 */ 3120 qemu_mutex_lock(&rs->bitmap_mutex); 3121 WITH_RCU_READ_LOCK_GUARD() { 3122 if (ram_list.version != rs->last_version) { 3123 ram_state_reset(rs); 3124 } 3125 3126 /* Read version before ram_list.blocks */ 3127 smp_rmb(); 3128 3129 ram_control_before_iterate(f, RAM_CONTROL_ROUND); 3130 3131 t0 = qemu_clock_get_ns(QEMU_CLOCK_REALTIME); 3132 i = 0; 3133 while ((ret = migration_rate_exceeded(f)) == 0 || 3134 postcopy_has_request(rs)) { 3135 int pages; 3136 3137 if (qemu_file_get_error(f)) { 3138 break; 3139 } 3140 3141 pages = ram_find_and_save_block(rs); 3142 /* no more pages to sent */ 3143 if (pages == 0) { 3144 done = 1; 3145 break; 3146 } 3147 3148 if (pages < 0) { 3149 qemu_file_set_error(f, pages); 3150 break; 3151 } 3152 3153 rs->target_page_count += pages; 3154 3155 /* 3156 * During postcopy, it is necessary to make sure one whole host 3157 * page is sent in one chunk. 3158 */ 3159 if (migrate_postcopy_ram()) { 3160 ram_flush_compressed_data(rs); 3161 } 3162 3163 /* 3164 * we want to check in the 1st loop, just in case it was the 1st 3165 * time and we had to sync the dirty bitmap. 3166 * qemu_clock_get_ns() is a bit expensive, so we only check each 3167 * some iterations 3168 */ 3169 if ((i & 63) == 0) { 3170 uint64_t t1 = (qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - t0) / 3171 1000000; 3172 if (t1 > MAX_WAIT) { 3173 trace_ram_save_iterate_big_wait(t1, i); 3174 break; 3175 } 3176 } 3177 i++; 3178 } 3179 } 3180 qemu_mutex_unlock(&rs->bitmap_mutex); 3181 3182 /* 3183 * Must occur before EOS (or any QEMUFile operation) 3184 * because of RDMA protocol. 3185 */ 3186 ram_control_after_iterate(f, RAM_CONTROL_ROUND); 3187 3188 out: 3189 if (ret >= 0 3190 && migration_is_setup_or_active(migrate_get_current()->state)) { 3191 if (migrate_multifd_flush_after_each_section()) { 3192 ret = multifd_send_sync_main(rs->pss[RAM_CHANNEL_PRECOPY].pss_channel); 3193 if (ret < 0) { 3194 return ret; 3195 } 3196 } 3197 3198 qemu_put_be64(f, RAM_SAVE_FLAG_EOS); 3199 qemu_fflush(f); 3200 ram_transferred_add(8); 3201 3202 ret = qemu_file_get_error(f); 3203 } 3204 if (ret < 0) { 3205 return ret; 3206 } 3207 3208 return done; 3209 } 3210 3211 /** 3212 * ram_save_complete: function called to send the remaining amount of ram 3213 * 3214 * Returns zero to indicate success or negative on error 3215 * 3216 * Called with iothread lock 3217 * 3218 * @f: QEMUFile where to send the data 3219 * @opaque: RAMState pointer 3220 */ 3221 static int ram_save_complete(QEMUFile *f, void *opaque) 3222 { 3223 RAMState **temp = opaque; 3224 RAMState *rs = *temp; 3225 int ret = 0; 3226 3227 rs->last_stage = !migration_in_colo_state(); 3228 3229 WITH_RCU_READ_LOCK_GUARD() { 3230 if (!migration_in_postcopy()) { 3231 migration_bitmap_sync_precopy(rs, true); 3232 } 3233 3234 ram_control_before_iterate(f, RAM_CONTROL_FINISH); 3235 3236 /* try transferring iterative blocks of memory */ 3237 3238 /* flush all remaining blocks regardless of rate limiting */ 3239 qemu_mutex_lock(&rs->bitmap_mutex); 3240 while (true) { 3241 int pages; 3242 3243 pages = ram_find_and_save_block(rs); 3244 /* no more blocks to sent */ 3245 if (pages == 0) { 3246 break; 3247 } 3248 if (pages < 0) { 3249 ret = pages; 3250 break; 3251 } 3252 } 3253 qemu_mutex_unlock(&rs->bitmap_mutex); 3254 3255 ram_flush_compressed_data(rs); 3256 ram_control_after_iterate(f, RAM_CONTROL_FINISH); 3257 } 3258 3259 if (ret < 0) { 3260 return ret; 3261 } 3262 3263 ret = multifd_send_sync_main(rs->pss[RAM_CHANNEL_PRECOPY].pss_channel); 3264 if (ret < 0) { 3265 return ret; 3266 } 3267 3268 if (!migrate_multifd_flush_after_each_section()) { 3269 qemu_put_be64(f, RAM_SAVE_FLAG_MULTIFD_FLUSH); 3270 } 3271 qemu_put_be64(f, RAM_SAVE_FLAG_EOS); 3272 qemu_fflush(f); 3273 3274 return 0; 3275 } 3276 3277 static void ram_state_pending_estimate(void *opaque, uint64_t *must_precopy, 3278 uint64_t *can_postcopy) 3279 { 3280 RAMState **temp = opaque; 3281 RAMState *rs = *temp; 3282 3283 uint64_t remaining_size = rs->migration_dirty_pages * TARGET_PAGE_SIZE; 3284 3285 if (migrate_postcopy_ram()) { 3286 /* We can do postcopy, and all the data is postcopiable */ 3287 *can_postcopy += remaining_size; 3288 } else { 3289 *must_precopy += remaining_size; 3290 } 3291 } 3292 3293 static void ram_state_pending_exact(void *opaque, uint64_t *must_precopy, 3294 uint64_t *can_postcopy) 3295 { 3296 MigrationState *s = migrate_get_current(); 3297 RAMState **temp = opaque; 3298 RAMState *rs = *temp; 3299 3300 uint64_t remaining_size = rs->migration_dirty_pages * TARGET_PAGE_SIZE; 3301 3302 if (!migration_in_postcopy() && remaining_size < s->threshold_size) { 3303 qemu_mutex_lock_iothread(); 3304 WITH_RCU_READ_LOCK_GUARD() { 3305 migration_bitmap_sync_precopy(rs, false); 3306 } 3307 qemu_mutex_unlock_iothread(); 3308 remaining_size = rs->migration_dirty_pages * TARGET_PAGE_SIZE; 3309 } 3310 3311 if (migrate_postcopy_ram()) { 3312 /* We can do postcopy, and all the data is postcopiable */ 3313 *can_postcopy += remaining_size; 3314 } else { 3315 *must_precopy += remaining_size; 3316 } 3317 } 3318 3319 static int load_xbzrle(QEMUFile *f, ram_addr_t addr, void *host) 3320 { 3321 unsigned int xh_len; 3322 int xh_flags; 3323 uint8_t *loaded_data; 3324 3325 /* extract RLE header */ 3326 xh_flags = qemu_get_byte(f); 3327 xh_len = qemu_get_be16(f); 3328 3329 if (xh_flags != ENCODING_FLAG_XBZRLE) { 3330 error_report("Failed to load XBZRLE page - wrong compression!"); 3331 return -1; 3332 } 3333 3334 if (xh_len > TARGET_PAGE_SIZE) { 3335 error_report("Failed to load XBZRLE page - len overflow!"); 3336 return -1; 3337 } 3338 loaded_data = XBZRLE.decoded_buf; 3339 /* load data and decode */ 3340 /* it can change loaded_data to point to an internal buffer */ 3341 qemu_get_buffer_in_place(f, &loaded_data, xh_len); 3342 3343 /* decode RLE */ 3344 if (xbzrle_decode_buffer(loaded_data, xh_len, host, 3345 TARGET_PAGE_SIZE) == -1) { 3346 error_report("Failed to load XBZRLE page - decode error!"); 3347 return -1; 3348 } 3349 3350 return 0; 3351 } 3352 3353 /** 3354 * ram_block_from_stream: read a RAMBlock id from the migration stream 3355 * 3356 * Must be called from within a rcu critical section. 3357 * 3358 * Returns a pointer from within the RCU-protected ram_list. 3359 * 3360 * @mis: the migration incoming state pointer 3361 * @f: QEMUFile where to read the data from 3362 * @flags: Page flags (mostly to see if it's a continuation of previous block) 3363 * @channel: the channel we're using 3364 */ 3365 static inline RAMBlock *ram_block_from_stream(MigrationIncomingState *mis, 3366 QEMUFile *f, int flags, 3367 int channel) 3368 { 3369 RAMBlock *block = mis->last_recv_block[channel]; 3370 char id[256]; 3371 uint8_t len; 3372 3373 if (flags & RAM_SAVE_FLAG_CONTINUE) { 3374 if (!block) { 3375 error_report("Ack, bad migration stream!"); 3376 return NULL; 3377 } 3378 return block; 3379 } 3380 3381 len = qemu_get_byte(f); 3382 qemu_get_buffer(f, (uint8_t *)id, len); 3383 id[len] = 0; 3384 3385 block = qemu_ram_block_by_name(id); 3386 if (!block) { 3387 error_report("Can't find block %s", id); 3388 return NULL; 3389 } 3390 3391 if (migrate_ram_is_ignored(block)) { 3392 error_report("block %s should not be migrated !", id); 3393 return NULL; 3394 } 3395 3396 mis->last_recv_block[channel] = block; 3397 3398 return block; 3399 } 3400 3401 static inline void *host_from_ram_block_offset(RAMBlock *block, 3402 ram_addr_t offset) 3403 { 3404 if (!offset_in_ramblock(block, offset)) { 3405 return NULL; 3406 } 3407 3408 return block->host + offset; 3409 } 3410 3411 static void *host_page_from_ram_block_offset(RAMBlock *block, 3412 ram_addr_t offset) 3413 { 3414 /* Note: Explicitly no check against offset_in_ramblock(). */ 3415 return (void *)QEMU_ALIGN_DOWN((uintptr_t)(block->host + offset), 3416 block->page_size); 3417 } 3418 3419 static ram_addr_t host_page_offset_from_ram_block_offset(RAMBlock *block, 3420 ram_addr_t offset) 3421 { 3422 return ((uintptr_t)block->host + offset) & (block->page_size - 1); 3423 } 3424 3425 void colo_record_bitmap(RAMBlock *block, ram_addr_t *normal, uint32_t pages) 3426 { 3427 qemu_mutex_lock(&ram_state->bitmap_mutex); 3428 for (int i = 0; i < pages; i++) { 3429 ram_addr_t offset = normal[i]; 3430 ram_state->migration_dirty_pages += !test_and_set_bit( 3431 offset >> TARGET_PAGE_BITS, 3432 block->bmap); 3433 } 3434 qemu_mutex_unlock(&ram_state->bitmap_mutex); 3435 } 3436 3437 static inline void *colo_cache_from_block_offset(RAMBlock *block, 3438 ram_addr_t offset, bool record_bitmap) 3439 { 3440 if (!offset_in_ramblock(block, offset)) { 3441 return NULL; 3442 } 3443 if (!block->colo_cache) { 3444 error_report("%s: colo_cache is NULL in block :%s", 3445 __func__, block->idstr); 3446 return NULL; 3447 } 3448 3449 /* 3450 * During colo checkpoint, we need bitmap of these migrated pages. 3451 * It help us to decide which pages in ram cache should be flushed 3452 * into VM's RAM later. 3453 */ 3454 if (record_bitmap) { 3455 colo_record_bitmap(block, &offset, 1); 3456 } 3457 return block->colo_cache + offset; 3458 } 3459 3460 /** 3461 * ram_handle_compressed: handle the zero page case 3462 * 3463 * If a page (or a whole RDMA chunk) has been 3464 * determined to be zero, then zap it. 3465 * 3466 * @host: host address for the zero page 3467 * @ch: what the page is filled from. We only support zero 3468 * @size: size of the zero page 3469 */ 3470 void ram_handle_compressed(void *host, uint8_t ch, uint64_t size) 3471 { 3472 if (ch != 0 || !buffer_is_zero(host, size)) { 3473 memset(host, ch, size); 3474 } 3475 } 3476 3477 static void colo_init_ram_state(void) 3478 { 3479 ram_state_init(&ram_state); 3480 } 3481 3482 /* 3483 * colo cache: this is for secondary VM, we cache the whole 3484 * memory of the secondary VM, it is need to hold the global lock 3485 * to call this helper. 3486 */ 3487 int colo_init_ram_cache(void) 3488 { 3489 RAMBlock *block; 3490 3491 WITH_RCU_READ_LOCK_GUARD() { 3492 RAMBLOCK_FOREACH_NOT_IGNORED(block) { 3493 block->colo_cache = qemu_anon_ram_alloc(block->used_length, 3494 NULL, false, false); 3495 if (!block->colo_cache) { 3496 error_report("%s: Can't alloc memory for COLO cache of block %s," 3497 "size 0x" RAM_ADDR_FMT, __func__, block->idstr, 3498 block->used_length); 3499 RAMBLOCK_FOREACH_NOT_IGNORED(block) { 3500 if (block->colo_cache) { 3501 qemu_anon_ram_free(block->colo_cache, block->used_length); 3502 block->colo_cache = NULL; 3503 } 3504 } 3505 return -errno; 3506 } 3507 if (!machine_dump_guest_core(current_machine)) { 3508 qemu_madvise(block->colo_cache, block->used_length, 3509 QEMU_MADV_DONTDUMP); 3510 } 3511 } 3512 } 3513 3514 /* 3515 * Record the dirty pages that sent by PVM, we use this dirty bitmap together 3516 * with to decide which page in cache should be flushed into SVM's RAM. Here 3517 * we use the same name 'ram_bitmap' as for migration. 3518 */ 3519 if (ram_bytes_total()) { 3520 RAMBlock *block; 3521 3522 RAMBLOCK_FOREACH_NOT_IGNORED(block) { 3523 unsigned long pages = block->max_length >> TARGET_PAGE_BITS; 3524 block->bmap = bitmap_new(pages); 3525 } 3526 } 3527 3528 colo_init_ram_state(); 3529 return 0; 3530 } 3531 3532 /* TODO: duplicated with ram_init_bitmaps */ 3533 void colo_incoming_start_dirty_log(void) 3534 { 3535 RAMBlock *block = NULL; 3536 /* For memory_global_dirty_log_start below. */ 3537 qemu_mutex_lock_iothread(); 3538 qemu_mutex_lock_ramlist(); 3539 3540 memory_global_dirty_log_sync(false); 3541 WITH_RCU_READ_LOCK_GUARD() { 3542 RAMBLOCK_FOREACH_NOT_IGNORED(block) { 3543 ramblock_sync_dirty_bitmap(ram_state, block); 3544 /* Discard this dirty bitmap record */ 3545 bitmap_zero(block->bmap, block->max_length >> TARGET_PAGE_BITS); 3546 } 3547 memory_global_dirty_log_start(GLOBAL_DIRTY_MIGRATION); 3548 } 3549 ram_state->migration_dirty_pages = 0; 3550 qemu_mutex_unlock_ramlist(); 3551 qemu_mutex_unlock_iothread(); 3552 } 3553 3554 /* It is need to hold the global lock to call this helper */ 3555 void colo_release_ram_cache(void) 3556 { 3557 RAMBlock *block; 3558 3559 memory_global_dirty_log_stop(GLOBAL_DIRTY_MIGRATION); 3560 RAMBLOCK_FOREACH_NOT_IGNORED(block) { 3561 g_free(block->bmap); 3562 block->bmap = NULL; 3563 } 3564 3565 WITH_RCU_READ_LOCK_GUARD() { 3566 RAMBLOCK_FOREACH_NOT_IGNORED(block) { 3567 if (block->colo_cache) { 3568 qemu_anon_ram_free(block->colo_cache, block->used_length); 3569 block->colo_cache = NULL; 3570 } 3571 } 3572 } 3573 ram_state_cleanup(&ram_state); 3574 } 3575 3576 /** 3577 * ram_load_setup: Setup RAM for migration incoming side 3578 * 3579 * Returns zero to indicate success and negative for error 3580 * 3581 * @f: QEMUFile where to receive the data 3582 * @opaque: RAMState pointer 3583 */ 3584 static int ram_load_setup(QEMUFile *f, void *opaque) 3585 { 3586 xbzrle_load_setup(); 3587 ramblock_recv_map_init(); 3588 3589 return 0; 3590 } 3591 3592 static int ram_load_cleanup(void *opaque) 3593 { 3594 RAMBlock *rb; 3595 3596 RAMBLOCK_FOREACH_NOT_IGNORED(rb) { 3597 qemu_ram_block_writeback(rb); 3598 } 3599 3600 xbzrle_load_cleanup(); 3601 3602 RAMBLOCK_FOREACH_NOT_IGNORED(rb) { 3603 g_free(rb->receivedmap); 3604 rb->receivedmap = NULL; 3605 } 3606 3607 return 0; 3608 } 3609 3610 /** 3611 * ram_postcopy_incoming_init: allocate postcopy data structures 3612 * 3613 * Returns 0 for success and negative if there was one error 3614 * 3615 * @mis: current migration incoming state 3616 * 3617 * Allocate data structures etc needed by incoming migration with 3618 * postcopy-ram. postcopy-ram's similarly names 3619 * postcopy_ram_incoming_init does the work. 3620 */ 3621 int ram_postcopy_incoming_init(MigrationIncomingState *mis) 3622 { 3623 return postcopy_ram_incoming_init(mis); 3624 } 3625 3626 /** 3627 * ram_load_postcopy: load a page in postcopy case 3628 * 3629 * Returns 0 for success or -errno in case of error 3630 * 3631 * Called in postcopy mode by ram_load(). 3632 * rcu_read_lock is taken prior to this being called. 3633 * 3634 * @f: QEMUFile where to send the data 3635 * @channel: the channel to use for loading 3636 */ 3637 int ram_load_postcopy(QEMUFile *f, int channel) 3638 { 3639 int flags = 0, ret = 0; 3640 bool place_needed = false; 3641 bool matches_target_page_size = false; 3642 MigrationIncomingState *mis = migration_incoming_get_current(); 3643 PostcopyTmpPage *tmp_page = &mis->postcopy_tmp_pages[channel]; 3644 3645 while (!ret && !(flags & RAM_SAVE_FLAG_EOS)) { 3646 ram_addr_t addr; 3647 void *page_buffer = NULL; 3648 void *place_source = NULL; 3649 RAMBlock *block = NULL; 3650 uint8_t ch; 3651 int len; 3652 3653 addr = qemu_get_be64(f); 3654 3655 /* 3656 * If qemu file error, we should stop here, and then "addr" 3657 * may be invalid 3658 */ 3659 ret = qemu_file_get_error(f); 3660 if (ret) { 3661 break; 3662 } 3663 3664 flags = addr & ~TARGET_PAGE_MASK; 3665 addr &= TARGET_PAGE_MASK; 3666 3667 trace_ram_load_postcopy_loop(channel, (uint64_t)addr, flags); 3668 if (flags & (RAM_SAVE_FLAG_ZERO | RAM_SAVE_FLAG_PAGE | 3669 RAM_SAVE_FLAG_COMPRESS_PAGE)) { 3670 block = ram_block_from_stream(mis, f, flags, channel); 3671 if (!block) { 3672 ret = -EINVAL; 3673 break; 3674 } 3675 3676 /* 3677 * Relying on used_length is racy and can result in false positives. 3678 * We might place pages beyond used_length in case RAM was shrunk 3679 * while in postcopy, which is fine - trying to place via 3680 * UFFDIO_COPY/UFFDIO_ZEROPAGE will never segfault. 3681 */ 3682 if (!block->host || addr >= block->postcopy_length) { 3683 error_report("Illegal RAM offset " RAM_ADDR_FMT, addr); 3684 ret = -EINVAL; 3685 break; 3686 } 3687 tmp_page->target_pages++; 3688 matches_target_page_size = block->page_size == TARGET_PAGE_SIZE; 3689 /* 3690 * Postcopy requires that we place whole host pages atomically; 3691 * these may be huge pages for RAMBlocks that are backed by 3692 * hugetlbfs. 3693 * To make it atomic, the data is read into a temporary page 3694 * that's moved into place later. 3695 * The migration protocol uses, possibly smaller, target-pages 3696 * however the source ensures it always sends all the components 3697 * of a host page in one chunk. 3698 */ 3699 page_buffer = tmp_page->tmp_huge_page + 3700 host_page_offset_from_ram_block_offset(block, addr); 3701 /* If all TP are zero then we can optimise the place */ 3702 if (tmp_page->target_pages == 1) { 3703 tmp_page->host_addr = 3704 host_page_from_ram_block_offset(block, addr); 3705 } else if (tmp_page->host_addr != 3706 host_page_from_ram_block_offset(block, addr)) { 3707 /* not the 1st TP within the HP */ 3708 error_report("Non-same host page detected on channel %d: " 3709 "Target host page %p, received host page %p " 3710 "(rb %s offset 0x"RAM_ADDR_FMT" target_pages %d)", 3711 channel, tmp_page->host_addr, 3712 host_page_from_ram_block_offset(block, addr), 3713 block->idstr, addr, tmp_page->target_pages); 3714 ret = -EINVAL; 3715 break; 3716 } 3717 3718 /* 3719 * If it's the last part of a host page then we place the host 3720 * page 3721 */ 3722 if (tmp_page->target_pages == 3723 (block->page_size / TARGET_PAGE_SIZE)) { 3724 place_needed = true; 3725 } 3726 place_source = tmp_page->tmp_huge_page; 3727 } 3728 3729 switch (flags & ~RAM_SAVE_FLAG_CONTINUE) { 3730 case RAM_SAVE_FLAG_ZERO: 3731 ch = qemu_get_byte(f); 3732 /* 3733 * Can skip to set page_buffer when 3734 * this is a zero page and (block->page_size == TARGET_PAGE_SIZE). 3735 */ 3736 if (ch || !matches_target_page_size) { 3737 memset(page_buffer, ch, TARGET_PAGE_SIZE); 3738 } 3739 if (ch) { 3740 tmp_page->all_zero = false; 3741 } 3742 break; 3743 3744 case RAM_SAVE_FLAG_PAGE: 3745 tmp_page->all_zero = false; 3746 if (!matches_target_page_size) { 3747 /* For huge pages, we always use temporary buffer */ 3748 qemu_get_buffer(f, page_buffer, TARGET_PAGE_SIZE); 3749 } else { 3750 /* 3751 * For small pages that matches target page size, we 3752 * avoid the qemu_file copy. Instead we directly use 3753 * the buffer of QEMUFile to place the page. Note: we 3754 * cannot do any QEMUFile operation before using that 3755 * buffer to make sure the buffer is valid when 3756 * placing the page. 3757 */ 3758 qemu_get_buffer_in_place(f, (uint8_t **)&place_source, 3759 TARGET_PAGE_SIZE); 3760 } 3761 break; 3762 case RAM_SAVE_FLAG_COMPRESS_PAGE: 3763 tmp_page->all_zero = false; 3764 len = qemu_get_be32(f); 3765 if (len < 0 || len > compressBound(TARGET_PAGE_SIZE)) { 3766 error_report("Invalid compressed data length: %d", len); 3767 ret = -EINVAL; 3768 break; 3769 } 3770 decompress_data_with_multi_threads(f, page_buffer, len); 3771 break; 3772 case RAM_SAVE_FLAG_MULTIFD_FLUSH: 3773 multifd_recv_sync_main(); 3774 break; 3775 case RAM_SAVE_FLAG_EOS: 3776 /* normal exit */ 3777 if (migrate_multifd_flush_after_each_section()) { 3778 multifd_recv_sync_main(); 3779 } 3780 break; 3781 default: 3782 error_report("Unknown combination of migration flags: 0x%x" 3783 " (postcopy mode)", flags); 3784 ret = -EINVAL; 3785 break; 3786 } 3787 3788 /* Got the whole host page, wait for decompress before placing. */ 3789 if (place_needed) { 3790 ret |= wait_for_decompress_done(); 3791 } 3792 3793 /* Detect for any possible file errors */ 3794 if (!ret && qemu_file_get_error(f)) { 3795 ret = qemu_file_get_error(f); 3796 } 3797 3798 if (!ret && place_needed) { 3799 if (tmp_page->all_zero) { 3800 ret = postcopy_place_page_zero(mis, tmp_page->host_addr, block); 3801 } else { 3802 ret = postcopy_place_page(mis, tmp_page->host_addr, 3803 place_source, block); 3804 } 3805 place_needed = false; 3806 postcopy_temp_page_reset(tmp_page); 3807 } 3808 } 3809 3810 return ret; 3811 } 3812 3813 static bool postcopy_is_running(void) 3814 { 3815 PostcopyState ps = postcopy_state_get(); 3816 return ps >= POSTCOPY_INCOMING_LISTENING && ps < POSTCOPY_INCOMING_END; 3817 } 3818 3819 /* 3820 * Flush content of RAM cache into SVM's memory. 3821 * Only flush the pages that be dirtied by PVM or SVM or both. 3822 */ 3823 void colo_flush_ram_cache(void) 3824 { 3825 RAMBlock *block = NULL; 3826 void *dst_host; 3827 void *src_host; 3828 unsigned long offset = 0; 3829 3830 memory_global_dirty_log_sync(false); 3831 qemu_mutex_lock(&ram_state->bitmap_mutex); 3832 WITH_RCU_READ_LOCK_GUARD() { 3833 RAMBLOCK_FOREACH_NOT_IGNORED(block) { 3834 ramblock_sync_dirty_bitmap(ram_state, block); 3835 } 3836 } 3837 3838 trace_colo_flush_ram_cache_begin(ram_state->migration_dirty_pages); 3839 WITH_RCU_READ_LOCK_GUARD() { 3840 block = QLIST_FIRST_RCU(&ram_list.blocks); 3841 3842 while (block) { 3843 unsigned long num = 0; 3844 3845 offset = colo_bitmap_find_dirty(ram_state, block, offset, &num); 3846 if (!offset_in_ramblock(block, 3847 ((ram_addr_t)offset) << TARGET_PAGE_BITS)) { 3848 offset = 0; 3849 num = 0; 3850 block = QLIST_NEXT_RCU(block, next); 3851 } else { 3852 unsigned long i = 0; 3853 3854 for (i = 0; i < num; i++) { 3855 migration_bitmap_clear_dirty(ram_state, block, offset + i); 3856 } 3857 dst_host = block->host 3858 + (((ram_addr_t)offset) << TARGET_PAGE_BITS); 3859 src_host = block->colo_cache 3860 + (((ram_addr_t)offset) << TARGET_PAGE_BITS); 3861 memcpy(dst_host, src_host, TARGET_PAGE_SIZE * num); 3862 offset += num; 3863 } 3864 } 3865 } 3866 qemu_mutex_unlock(&ram_state->bitmap_mutex); 3867 trace_colo_flush_ram_cache_end(); 3868 } 3869 3870 /** 3871 * ram_load_precopy: load pages in precopy case 3872 * 3873 * Returns 0 for success or -errno in case of error 3874 * 3875 * Called in precopy mode by ram_load(). 3876 * rcu_read_lock is taken prior to this being called. 3877 * 3878 * @f: QEMUFile where to send the data 3879 */ 3880 static int ram_load_precopy(QEMUFile *f) 3881 { 3882 MigrationIncomingState *mis = migration_incoming_get_current(); 3883 int flags = 0, ret = 0, invalid_flags = 0, len = 0, i = 0; 3884 /* ADVISE is earlier, it shows the source has the postcopy capability on */ 3885 bool postcopy_advised = migration_incoming_postcopy_advised(); 3886 if (!migrate_compress()) { 3887 invalid_flags |= RAM_SAVE_FLAG_COMPRESS_PAGE; 3888 } 3889 3890 while (!ret && !(flags & RAM_SAVE_FLAG_EOS)) { 3891 ram_addr_t addr, total_ram_bytes; 3892 void *host = NULL, *host_bak = NULL; 3893 uint8_t ch; 3894 3895 /* 3896 * Yield periodically to let main loop run, but an iteration of 3897 * the main loop is expensive, so do it each some iterations 3898 */ 3899 if ((i & 32767) == 0 && qemu_in_coroutine()) { 3900 aio_co_schedule(qemu_get_current_aio_context(), 3901 qemu_coroutine_self()); 3902 qemu_coroutine_yield(); 3903 } 3904 i++; 3905 3906 addr = qemu_get_be64(f); 3907 flags = addr & ~TARGET_PAGE_MASK; 3908 addr &= TARGET_PAGE_MASK; 3909 3910 if (flags & invalid_flags) { 3911 if (flags & invalid_flags & RAM_SAVE_FLAG_COMPRESS_PAGE) { 3912 error_report("Received an unexpected compressed page"); 3913 } 3914 3915 ret = -EINVAL; 3916 break; 3917 } 3918 3919 if (flags & (RAM_SAVE_FLAG_ZERO | RAM_SAVE_FLAG_PAGE | 3920 RAM_SAVE_FLAG_COMPRESS_PAGE | RAM_SAVE_FLAG_XBZRLE)) { 3921 RAMBlock *block = ram_block_from_stream(mis, f, flags, 3922 RAM_CHANNEL_PRECOPY); 3923 3924 host = host_from_ram_block_offset(block, addr); 3925 /* 3926 * After going into COLO stage, we should not load the page 3927 * into SVM's memory directly, we put them into colo_cache firstly. 3928 * NOTE: We need to keep a copy of SVM's ram in colo_cache. 3929 * Previously, we copied all these memory in preparing stage of COLO 3930 * while we need to stop VM, which is a time-consuming process. 3931 * Here we optimize it by a trick, back-up every page while in 3932 * migration process while COLO is enabled, though it affects the 3933 * speed of the migration, but it obviously reduce the downtime of 3934 * back-up all SVM'S memory in COLO preparing stage. 3935 */ 3936 if (migration_incoming_colo_enabled()) { 3937 if (migration_incoming_in_colo_state()) { 3938 /* In COLO stage, put all pages into cache temporarily */ 3939 host = colo_cache_from_block_offset(block, addr, true); 3940 } else { 3941 /* 3942 * In migration stage but before COLO stage, 3943 * Put all pages into both cache and SVM's memory. 3944 */ 3945 host_bak = colo_cache_from_block_offset(block, addr, false); 3946 } 3947 } 3948 if (!host) { 3949 error_report("Illegal RAM offset " RAM_ADDR_FMT, addr); 3950 ret = -EINVAL; 3951 break; 3952 } 3953 if (!migration_incoming_in_colo_state()) { 3954 ramblock_recv_bitmap_set(block, host); 3955 } 3956 3957 trace_ram_load_loop(block->idstr, (uint64_t)addr, flags, host); 3958 } 3959 3960 switch (flags & ~RAM_SAVE_FLAG_CONTINUE) { 3961 case RAM_SAVE_FLAG_MEM_SIZE: 3962 /* Synchronize RAM block list */ 3963 total_ram_bytes = addr; 3964 while (!ret && total_ram_bytes) { 3965 RAMBlock *block; 3966 char id[256]; 3967 ram_addr_t length; 3968 3969 len = qemu_get_byte(f); 3970 qemu_get_buffer(f, (uint8_t *)id, len); 3971 id[len] = 0; 3972 length = qemu_get_be64(f); 3973 3974 block = qemu_ram_block_by_name(id); 3975 if (block && !qemu_ram_is_migratable(block)) { 3976 error_report("block %s should not be migrated !", id); 3977 ret = -EINVAL; 3978 } else if (block) { 3979 if (length != block->used_length) { 3980 Error *local_err = NULL; 3981 3982 ret = qemu_ram_resize(block, length, 3983 &local_err); 3984 if (local_err) { 3985 error_report_err(local_err); 3986 } 3987 } 3988 /* For postcopy we need to check hugepage sizes match */ 3989 if (postcopy_advised && migrate_postcopy_ram() && 3990 block->page_size != qemu_host_page_size) { 3991 uint64_t remote_page_size = qemu_get_be64(f); 3992 if (remote_page_size != block->page_size) { 3993 error_report("Mismatched RAM page size %s " 3994 "(local) %zd != %" PRId64, 3995 id, block->page_size, 3996 remote_page_size); 3997 ret = -EINVAL; 3998 } 3999 } 4000 if (migrate_ignore_shared()) { 4001 hwaddr addr = qemu_get_be64(f); 4002 if (migrate_ram_is_ignored(block) && 4003 block->mr->addr != addr) { 4004 error_report("Mismatched GPAs for block %s " 4005 "%" PRId64 "!= %" PRId64, 4006 id, (uint64_t)addr, 4007 (uint64_t)block->mr->addr); 4008 ret = -EINVAL; 4009 } 4010 } 4011 ram_control_load_hook(f, RAM_CONTROL_BLOCK_REG, 4012 block->idstr); 4013 } else { 4014 error_report("Unknown ramblock \"%s\", cannot " 4015 "accept migration", id); 4016 ret = -EINVAL; 4017 } 4018 4019 total_ram_bytes -= length; 4020 } 4021 break; 4022 4023 case RAM_SAVE_FLAG_ZERO: 4024 ch = qemu_get_byte(f); 4025 ram_handle_compressed(host, ch, TARGET_PAGE_SIZE); 4026 break; 4027 4028 case RAM_SAVE_FLAG_PAGE: 4029 qemu_get_buffer(f, host, TARGET_PAGE_SIZE); 4030 break; 4031 4032 case RAM_SAVE_FLAG_COMPRESS_PAGE: 4033 len = qemu_get_be32(f); 4034 if (len < 0 || len > compressBound(TARGET_PAGE_SIZE)) { 4035 error_report("Invalid compressed data length: %d", len); 4036 ret = -EINVAL; 4037 break; 4038 } 4039 decompress_data_with_multi_threads(f, host, len); 4040 break; 4041 4042 case RAM_SAVE_FLAG_XBZRLE: 4043 if (load_xbzrle(f, addr, host) < 0) { 4044 error_report("Failed to decompress XBZRLE page at " 4045 RAM_ADDR_FMT, addr); 4046 ret = -EINVAL; 4047 break; 4048 } 4049 break; 4050 case RAM_SAVE_FLAG_MULTIFD_FLUSH: 4051 multifd_recv_sync_main(); 4052 break; 4053 case RAM_SAVE_FLAG_EOS: 4054 /* normal exit */ 4055 if (migrate_multifd_flush_after_each_section()) { 4056 multifd_recv_sync_main(); 4057 } 4058 break; 4059 case RAM_SAVE_FLAG_HOOK: 4060 ram_control_load_hook(f, RAM_CONTROL_HOOK, NULL); 4061 break; 4062 default: 4063 error_report("Unknown combination of migration flags: 0x%x", flags); 4064 ret = -EINVAL; 4065 } 4066 if (!ret) { 4067 ret = qemu_file_get_error(f); 4068 } 4069 if (!ret && host_bak) { 4070 memcpy(host_bak, host, TARGET_PAGE_SIZE); 4071 } 4072 } 4073 4074 ret |= wait_for_decompress_done(); 4075 return ret; 4076 } 4077 4078 static int ram_load(QEMUFile *f, void *opaque, int version_id) 4079 { 4080 int ret = 0; 4081 static uint64_t seq_iter; 4082 /* 4083 * If system is running in postcopy mode, page inserts to host memory must 4084 * be atomic 4085 */ 4086 bool postcopy_running = postcopy_is_running(); 4087 4088 seq_iter++; 4089 4090 if (version_id != 4) { 4091 return -EINVAL; 4092 } 4093 4094 /* 4095 * This RCU critical section can be very long running. 4096 * When RCU reclaims in the code start to become numerous, 4097 * it will be necessary to reduce the granularity of this 4098 * critical section. 4099 */ 4100 WITH_RCU_READ_LOCK_GUARD() { 4101 if (postcopy_running) { 4102 /* 4103 * Note! Here RAM_CHANNEL_PRECOPY is the precopy channel of 4104 * postcopy migration, we have another RAM_CHANNEL_POSTCOPY to 4105 * service fast page faults. 4106 */ 4107 ret = ram_load_postcopy(f, RAM_CHANNEL_PRECOPY); 4108 } else { 4109 ret = ram_load_precopy(f); 4110 } 4111 } 4112 trace_ram_load_complete(ret, seq_iter); 4113 4114 return ret; 4115 } 4116 4117 static bool ram_has_postcopy(void *opaque) 4118 { 4119 RAMBlock *rb; 4120 RAMBLOCK_FOREACH_NOT_IGNORED(rb) { 4121 if (ramblock_is_pmem(rb)) { 4122 info_report("Block: %s, host: %p is a nvdimm memory, postcopy" 4123 "is not supported now!", rb->idstr, rb->host); 4124 return false; 4125 } 4126 } 4127 4128 return migrate_postcopy_ram(); 4129 } 4130 4131 /* Sync all the dirty bitmap with destination VM. */ 4132 static int ram_dirty_bitmap_sync_all(MigrationState *s, RAMState *rs) 4133 { 4134 RAMBlock *block; 4135 QEMUFile *file = s->to_dst_file; 4136 int ramblock_count = 0; 4137 4138 trace_ram_dirty_bitmap_sync_start(); 4139 4140 RAMBLOCK_FOREACH_NOT_IGNORED(block) { 4141 qemu_savevm_send_recv_bitmap(file, block->idstr); 4142 trace_ram_dirty_bitmap_request(block->idstr); 4143 ramblock_count++; 4144 } 4145 4146 trace_ram_dirty_bitmap_sync_wait(); 4147 4148 /* Wait until all the ramblocks' dirty bitmap synced */ 4149 while (ramblock_count--) { 4150 qemu_sem_wait(&s->rp_state.rp_sem); 4151 } 4152 4153 trace_ram_dirty_bitmap_sync_complete(); 4154 4155 return 0; 4156 } 4157 4158 static void ram_dirty_bitmap_reload_notify(MigrationState *s) 4159 { 4160 qemu_sem_post(&s->rp_state.rp_sem); 4161 } 4162 4163 /* 4164 * Read the received bitmap, revert it as the initial dirty bitmap. 4165 * This is only used when the postcopy migration is paused but wants 4166 * to resume from a middle point. 4167 */ 4168 int ram_dirty_bitmap_reload(MigrationState *s, RAMBlock *block) 4169 { 4170 int ret = -EINVAL; 4171 /* from_dst_file is always valid because we're within rp_thread */ 4172 QEMUFile *file = s->rp_state.from_dst_file; 4173 unsigned long *le_bitmap, nbits = block->used_length >> TARGET_PAGE_BITS; 4174 uint64_t local_size = DIV_ROUND_UP(nbits, 8); 4175 uint64_t size, end_mark; 4176 4177 trace_ram_dirty_bitmap_reload_begin(block->idstr); 4178 4179 if (s->state != MIGRATION_STATUS_POSTCOPY_RECOVER) { 4180 error_report("%s: incorrect state %s", __func__, 4181 MigrationStatus_str(s->state)); 4182 return -EINVAL; 4183 } 4184 4185 /* 4186 * Note: see comments in ramblock_recv_bitmap_send() on why we 4187 * need the endianness conversion, and the paddings. 4188 */ 4189 local_size = ROUND_UP(local_size, 8); 4190 4191 /* Add paddings */ 4192 le_bitmap = bitmap_new(nbits + BITS_PER_LONG); 4193 4194 size = qemu_get_be64(file); 4195 4196 /* The size of the bitmap should match with our ramblock */ 4197 if (size != local_size) { 4198 error_report("%s: ramblock '%s' bitmap size mismatch " 4199 "(0x%"PRIx64" != 0x%"PRIx64")", __func__, 4200 block->idstr, size, local_size); 4201 ret = -EINVAL; 4202 goto out; 4203 } 4204 4205 size = qemu_get_buffer(file, (uint8_t *)le_bitmap, local_size); 4206 end_mark = qemu_get_be64(file); 4207 4208 ret = qemu_file_get_error(file); 4209 if (ret || size != local_size) { 4210 error_report("%s: read bitmap failed for ramblock '%s': %d" 4211 " (size 0x%"PRIx64", got: 0x%"PRIx64")", 4212 __func__, block->idstr, ret, local_size, size); 4213 ret = -EIO; 4214 goto out; 4215 } 4216 4217 if (end_mark != RAMBLOCK_RECV_BITMAP_ENDING) { 4218 error_report("%s: ramblock '%s' end mark incorrect: 0x%"PRIx64, 4219 __func__, block->idstr, end_mark); 4220 ret = -EINVAL; 4221 goto out; 4222 } 4223 4224 /* 4225 * Endianness conversion. We are during postcopy (though paused). 4226 * The dirty bitmap won't change. We can directly modify it. 4227 */ 4228 bitmap_from_le(block->bmap, le_bitmap, nbits); 4229 4230 /* 4231 * What we received is "received bitmap". Revert it as the initial 4232 * dirty bitmap for this ramblock. 4233 */ 4234 bitmap_complement(block->bmap, block->bmap, nbits); 4235 4236 /* Clear dirty bits of discarded ranges that we don't want to migrate. */ 4237 ramblock_dirty_bitmap_clear_discarded_pages(block); 4238 4239 /* We'll recalculate migration_dirty_pages in ram_state_resume_prepare(). */ 4240 trace_ram_dirty_bitmap_reload_complete(block->idstr); 4241 4242 /* 4243 * We succeeded to sync bitmap for current ramblock. If this is 4244 * the last one to sync, we need to notify the main send thread. 4245 */ 4246 ram_dirty_bitmap_reload_notify(s); 4247 4248 ret = 0; 4249 out: 4250 g_free(le_bitmap); 4251 return ret; 4252 } 4253 4254 static int ram_resume_prepare(MigrationState *s, void *opaque) 4255 { 4256 RAMState *rs = *(RAMState **)opaque; 4257 int ret; 4258 4259 ret = ram_dirty_bitmap_sync_all(s, rs); 4260 if (ret) { 4261 return ret; 4262 } 4263 4264 ram_state_resume_prepare(rs, s->to_dst_file); 4265 4266 return 0; 4267 } 4268 4269 void postcopy_preempt_shutdown_file(MigrationState *s) 4270 { 4271 qemu_put_be64(s->postcopy_qemufile_src, RAM_SAVE_FLAG_EOS); 4272 qemu_fflush(s->postcopy_qemufile_src); 4273 } 4274 4275 static SaveVMHandlers savevm_ram_handlers = { 4276 .save_setup = ram_save_setup, 4277 .save_live_iterate = ram_save_iterate, 4278 .save_live_complete_postcopy = ram_save_complete, 4279 .save_live_complete_precopy = ram_save_complete, 4280 .has_postcopy = ram_has_postcopy, 4281 .state_pending_exact = ram_state_pending_exact, 4282 .state_pending_estimate = ram_state_pending_estimate, 4283 .load_state = ram_load, 4284 .save_cleanup = ram_save_cleanup, 4285 .load_setup = ram_load_setup, 4286 .load_cleanup = ram_load_cleanup, 4287 .resume_prepare = ram_resume_prepare, 4288 }; 4289 4290 static void ram_mig_ram_block_resized(RAMBlockNotifier *n, void *host, 4291 size_t old_size, size_t new_size) 4292 { 4293 PostcopyState ps = postcopy_state_get(); 4294 ram_addr_t offset; 4295 RAMBlock *rb = qemu_ram_block_from_host(host, false, &offset); 4296 Error *err = NULL; 4297 4298 if (migrate_ram_is_ignored(rb)) { 4299 return; 4300 } 4301 4302 if (!migration_is_idle()) { 4303 /* 4304 * Precopy code on the source cannot deal with the size of RAM blocks 4305 * changing at random points in time - especially after sending the 4306 * RAM block sizes in the migration stream, they must no longer change. 4307 * Abort and indicate a proper reason. 4308 */ 4309 error_setg(&err, "RAM block '%s' resized during precopy.", rb->idstr); 4310 migration_cancel(err); 4311 error_free(err); 4312 } 4313 4314 switch (ps) { 4315 case POSTCOPY_INCOMING_ADVISE: 4316 /* 4317 * Update what ram_postcopy_incoming_init()->init_range() does at the 4318 * time postcopy was advised. Syncing RAM blocks with the source will 4319 * result in RAM resizes. 4320 */ 4321 if (old_size < new_size) { 4322 if (ram_discard_range(rb->idstr, old_size, new_size - old_size)) { 4323 error_report("RAM block '%s' discard of resized RAM failed", 4324 rb->idstr); 4325 } 4326 } 4327 rb->postcopy_length = new_size; 4328 break; 4329 case POSTCOPY_INCOMING_NONE: 4330 case POSTCOPY_INCOMING_RUNNING: 4331 case POSTCOPY_INCOMING_END: 4332 /* 4333 * Once our guest is running, postcopy does no longer care about 4334 * resizes. When growing, the new memory was not available on the 4335 * source, no handler needed. 4336 */ 4337 break; 4338 default: 4339 error_report("RAM block '%s' resized during postcopy state: %d", 4340 rb->idstr, ps); 4341 exit(-1); 4342 } 4343 } 4344 4345 static RAMBlockNotifier ram_mig_ram_notifier = { 4346 .ram_block_resized = ram_mig_ram_block_resized, 4347 }; 4348 4349 void ram_mig_init(void) 4350 { 4351 qemu_mutex_init(&XBZRLE.lock); 4352 register_savevm_live("ram", 0, 4, &savevm_ram_handlers, &ram_state); 4353 ram_block_notifier_add(&ram_mig_ram_notifier); 4354 } 4355