Lines Matching +full:load +full:- +full:reduced
4 * Copyright (c) 2003-2008 Fabrice Bellard
5 * Copyright (c) 2011-2015 Red Hat Inc
34 #include "qemu/main-loop.h"
38 #include "migration-stats.h"
41 #include "qemu-file.h"
42 #include "postcopy-ram.h"
44 #include "qemu/error-report.h"
46 #include "qapi/qapi-types-migration.h"
47 #include "qapi/qapi-events-migration.h"
48 #include "qapi/qapi-commands-migration.h"
55 #include "system/cpu-throttle.h"
75 * mapped-ram migration supports O_DIRECT, so we need to make sure the
87 * When doing mapped-ram migration, this is the amount we read from
182 * Returns 0 for success or -1 for error
196 return -1; in xbzrle_cache_resize()
209 ret = -1; in xbzrle_cache_resize()
258 assert(!rb->receivedmap); in ramblock_recv_map_init()
259 rb->receivedmap = bitmap_new(rb->max_length >> qemu_target_page_bits()); in ramblock_recv_map_init()
266 rb->receivedmap); in ramblock_recv_bitmap_test()
271 return test_bit(byte_offset >> TARGET_PAGE_BITS, rb->receivedmap); in ramblock_recv_bitmap_test_byte_offset()
276 set_bit_atomic(ramblock_recv_bitmap_offset(host_addr, rb), rb->receivedmap); in ramblock_recv_bitmap_set()
282 bitmap_set_atomic(rb->receivedmap, in ramblock_recv_bitmap_set_range()
289 set_bit_atomic(byte_offset >> TARGET_PAGE_BITS, rb->receivedmap); in ramblock_recv_bitmap_set_offset()
307 return -1; in ramblock_recv_bitmap_send()
310 nbits = block->postcopy_length >> TARGET_PAGE_BITS; in ramblock_recv_bitmap_send()
324 bitmap_to_le(le_bitmap, block->receivedmap, nbits); in ramblock_recv_bitmap_send()
372 /* UFFD file descriptor, used in 'write-tracking' migration */
410 * - dirty/clear bitmap
411 * - migration_dirty_pages
412 * - pss structures
445 return !QSIMPLEQ_EMPTY_ATOMIC(&rs->src_page_requests); in postcopy_has_request()
473 return ram_state ? (ram_state->migration_dirty_pages * TARGET_PAGE_SIZE) : in ram_bytes_remaining()
493 pss->block = rb; in pss_init()
494 pss->page = page; in pss_init()
495 pss->complete_round = false; in pss_init()
504 return pss1->host_page_sending && pss2->host_page_sending && in pss_overlap()
505 (pss1->host_page_start == pss2->host_page_start); in pss_overlap()
524 bool same_block = (block == pss->last_sent_block); in save_page_header()
533 len = strlen(block->idstr); in save_page_header()
535 qemu_put_buffer(f, (uint8_t *)block->idstr, len); in save_page_header()
537 pss->last_sent_block = block; in save_page_header()
546 * writes. If guest dirty memory rate is reduced below the rate at
549 * fast and will not effectively converge, even with auto-converge.
572 cpu_now = 100 - throttle_now; in mig_throttle_guest_down()
575 throttle_inc = MIN(cpu_now - cpu_ideal, pct_increment); in mig_throttle_guest_down()
585 rs->time_last_bitmap_sync = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); in mig_throttle_counter_reset()
586 rs->num_dirty_pages_period = 0; in mig_throttle_counter_reset()
587 rs->bytes_xfer_prev = migration_transferred_bytes(); in mig_throttle_counter_reset()
596 * The important thing is that a stale (not-yet-0'd) page be replaced
616 * -1 means that xbzrle would be longer than normal
631 QEMUFile *file = pss->pss_channel; in save_xbzrle_page()
636 if (!rs->last_stage) { in save_xbzrle_page()
638 generation) == -1) { in save_xbzrle_page()
639 return -1; in save_xbzrle_page()
646 return -1; in save_xbzrle_page()
675 if (!rs->last_stage && encoded_len != 0) { in save_xbzrle_page()
688 } else if (encoded_len == -1) { in save_xbzrle_page()
692 return -1; in save_xbzrle_page()
696 bytes_xbzrle = save_page_header(pss, pss->pss_channel, block, in save_xbzrle_page()
706 xbzrle_counters.bytes += bytes_xbzrle - 8; in save_xbzrle_page()
715 * This function updates pss->page to point to the next dirty page index
717 * found. Note that when pss->host_page_sending==true it means we're
725 RAMBlock *rb = pss->block; in pss_find_next_dirty()
726 unsigned long size = rb->used_length >> TARGET_PAGE_BITS; in pss_find_next_dirty()
727 unsigned long *bitmap = rb->bmap; in pss_find_next_dirty()
731 pss->page = size; in pss_find_next_dirty()
739 if (pss->host_page_sending) { in pss_find_next_dirty()
740 assert(pss->host_page_end); in pss_find_next_dirty()
741 size = MIN(size, pss->host_page_end); in pss_find_next_dirty()
744 pss->page = find_next_bit(bitmap, size, pss->page); in pss_find_next_dirty()
753 if (!rb->clear_bmap || !clear_bmap_test_and_clear(rb, page)) { in migration_clear_memory_region_dirty_bitmap()
757 shift = rb->clear_bmap_shift; in migration_clear_memory_region_dirty_bitmap()
770 trace_migration_bitmap_clear_dirty(rb->idstr, start, size, page); in migration_clear_memory_region_dirty_bitmap()
771 memory_region_clear_dirty_bitmap(rb->mr, start, size); in migration_clear_memory_region_dirty_bitmap()
779 unsigned long i, chunk_pages = 1UL << rb->clear_bmap_shift; in migration_clear_memory_region_dirty_bitmap_range()
784 * Clear pages from start to start + npages - 1, so the end boundary is in migration_clear_memory_region_dirty_bitmap_range()
807 unsigned long size = rb->used_length >> TARGET_PAGE_BITS; in colo_bitmap_find_dirty()
808 unsigned long *bitmap = rb->bmap; in colo_bitmap_find_dirty()
823 *num = next - first; in colo_bitmap_find_dirty()
841 if (!rs->last_stage && !migration_in_postcopy()) { in migration_bitmap_clear_dirty()
853 ret = test_and_clear_bit(page, rb->bmap); in migration_bitmap_clear_dirty()
855 rs->migration_dirty_pages--; in migration_bitmap_clear_dirty()
864 const hwaddr offset = section->offset_within_region; in dirty_bitmap_clear_section()
865 const hwaddr size = int128_get64(section->size); in dirty_bitmap_clear_section()
868 RAMBlock *rb = section->mr->ram_block; in dirty_bitmap_clear_section()
872 * We don't grab ram_state->bitmap_mutex because we expect to run in dirty_bitmap_clear_section()
879 *cleared_bits += bitmap_count_one_with_offset(rb->bmap, start, npages); in dirty_bitmap_clear_section()
880 bitmap_clear(rb->bmap, start, npages); in dirty_bitmap_clear_section()
901 if (rb->mr && rb->bmap && memory_region_has_ram_discard_manager(rb->mr)) { in ramblock_dirty_bitmap_clear_discarded_pages()
902 RamDiscardManager *rdm = memory_region_get_ram_discard_manager(rb->mr); in ramblock_dirty_bitmap_clear_discarded_pages()
904 .mr = rb->mr, in ramblock_dirty_bitmap_clear_discarded_pages()
917 * Check if a host-page aligned page falls into a discarded range as managed by
924 if (rb->mr && memory_region_has_ram_discard_manager(rb->mr)) { in ramblock_page_is_discarded()
925 RamDiscardManager *rdm = memory_region_get_ram_discard_manager(rb->mr); in ramblock_page_is_discarded()
927 .mr = rb->mr, in ramblock_page_is_discarded()
941 cpu_physical_memory_sync_dirty_bitmap(rb, 0, rb->used_length); in ramblock_sync_dirty_bitmap()
943 rs->migration_dirty_pages += new_dirty_pages; in ramblock_sync_dirty_bitmap()
944 rs->num_dirty_pages_period += new_dirty_pages; in ramblock_sync_dirty_bitmap()
962 summary |= block->page_size; in ram_pagesize_summary()
977 uint64_t page_count = rs->target_page_count - rs->target_page_count_prev; in migration_update_rates()
981 rs->num_dirty_pages_period * 1000 / in migration_update_rates()
982 (end_time - rs->time_last_bitmap_sync)); in migration_update_rates()
991 xbzrle_counters.cache_miss_rate = (double)(xbzrle_counters.cache_miss - in migration_update_rates()
992 rs->xbzrle_cache_miss_prev) / page_count; in migration_update_rates()
993 rs->xbzrle_cache_miss_prev = xbzrle_counters.cache_miss; in migration_update_rates()
994 unencoded_size = (xbzrle_counters.pages - rs->xbzrle_pages_prev) * in migration_update_rates()
996 encoded_size = xbzrle_counters.bytes - rs->xbzrle_bytes_prev; in migration_update_rates()
997 if (xbzrle_counters.pages == rs->xbzrle_pages_prev || !encoded_size) { in migration_update_rates()
1002 rs->xbzrle_pages_prev = xbzrle_counters.pages; in migration_update_rates()
1003 rs->xbzrle_bytes_prev = xbzrle_counters.bytes; in migration_update_rates()
1008 * Enable dirty-limit to throttle down the guest
1021 * vcpu-dirty-limit untouched. in migration_dirty_limit_guest()
1024 quota_dirtyrate == s->parameters.vcpu_dirty_limit) { in migration_dirty_limit_guest()
1028 quota_dirtyrate = s->parameters.vcpu_dirty_limit; in migration_dirty_limit_guest()
1034 qmp_set_vcpu_dirty_limit(false, -1, quota_dirtyrate, NULL); in migration_dirty_limit_guest()
1042 migration_transferred_bytes() - rs->bytes_xfer_prev; in migration_trigger_throttle()
1043 uint64_t bytes_dirty_period = rs->num_dirty_pages_period * TARGET_PAGE_SIZE; in migration_trigger_throttle()
1054 (++rs->dirty_rate_high_cnt >= 2)) { in migration_trigger_throttle()
1055 rs->dirty_rate_high_cnt = 0; in migration_trigger_throttle()
1073 if (!rs->time_last_bitmap_sync) { in migration_bitmap_sync()
1074 rs->time_last_bitmap_sync = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); in migration_bitmap_sync()
1080 WITH_QEMU_LOCK_GUARD(&rs->bitmap_mutex) { in migration_bitmap_sync()
1090 trace_migration_bitmap_sync_end(rs->num_dirty_pages_period); in migration_bitmap_sync()
1095 if (end_time > rs->time_last_bitmap_sync + 1000) { in migration_bitmap_sync()
1100 rs->target_page_count_prev = rs->target_page_count; in migration_bitmap_sync()
1103 rs->time_last_bitmap_sync = end_time; in migration_bitmap_sync()
1104 rs->num_dirty_pages_period = 0; in migration_bitmap_sync()
1105 rs->bytes_xfer_prev = migration_transferred_bytes(); in migration_bitmap_sync()
1155 uint8_t *p = pss->block->host + offset; in save_zero_page()
1156 QEMUFile *file = pss->pss_channel; in save_zero_page()
1170 /* zero pages are not transferred with mapped-ram */ in save_zero_page()
1171 clear_bit_atomic(offset >> TARGET_PAGE_BITS, pss->block->file_bmap); in save_zero_page()
1175 len += save_page_header(pss, file, pss->block, offset | RAM_SAVE_FLAG_ZERO); in save_zero_page()
1178 ram_release_page(pss->block->idstr, offset); in save_zero_page()
1185 if (rs->xbzrle_started) { in save_zero_page()
1187 xbzrle_cache_zero_page(pss->block->offset + offset); in save_zero_page()
1208 QEMUFile *file = pss->pss_channel; in save_normal_page()
1212 block->pages_offset + offset); in save_normal_page()
1213 set_bit(offset >> TARGET_PAGE_BITS, block->file_bmap); in save_normal_page()
1215 ram_transferred_add(save_page_header(pss, pss->pss_channel, block, in save_normal_page()
1234 * < 0 - error
1235 * >=0 - Number of pages written - this might legally be 0
1244 int pages = -1; in ram_save_page()
1247 RAMBlock *block = pss->block; in ram_save_page()
1248 ram_addr_t offset = ((ram_addr_t)pss->page) << TARGET_PAGE_BITS; in ram_save_page()
1249 ram_addr_t current_addr = block->offset + offset; in ram_save_page()
1251 p = block->host + offset; in ram_save_page()
1252 trace_ram_save_page(block->idstr, (uint64_t)offset, p); in ram_save_page()
1255 if (rs->xbzrle_started && !migration_in_postcopy()) { in ram_save_page()
1258 if (!rs->last_stage) { in ram_save_page()
1267 if (pages == -1) { in ram_save_page()
1279 return -1; in ram_save_multifd_page()
1305 /* Update pss->page for the next dirty bit in ramblock */ in find_dirty_block()
1308 if (pss->complete_round && pss->block == rs->last_seen_block && in find_dirty_block()
1309 pss->page >= rs->last_page) { in find_dirty_block()
1316 if (!offset_in_ramblock(pss->block, in find_dirty_block()
1317 ((ram_addr_t)pss->page) << TARGET_PAGE_BITS)) { in find_dirty_block()
1319 pss->page = 0; in find_dirty_block()
1320 pss->block = QLIST_NEXT_RCU(pss->block, next); in find_dirty_block()
1321 if (!pss->block) { in find_dirty_block()
1323 QEMUFile *f = rs->pss[RAM_CHANNEL_PRECOPY].pss_channel; in find_dirty_block()
1331 pss->block = QLIST_FIRST_RCU(&ram_list.blocks); in find_dirty_block()
1333 pss->complete_round = true; in find_dirty_block()
1336 rs->xbzrle_started = true; in find_dirty_block()
1350 * Helper for 'get_queued_page' - gets a page off the queue
1366 QEMU_LOCK_GUARD(&rs->src_page_req_mutex); in unqueue_page()
1374 entry = QSIMPLEQ_FIRST(&rs->src_page_requests); in unqueue_page()
1375 block = entry->rb; in unqueue_page()
1376 *offset = entry->offset; in unqueue_page()
1378 if (entry->len > TARGET_PAGE_SIZE) { in unqueue_page()
1379 entry->len -= TARGET_PAGE_SIZE; in unqueue_page()
1380 entry->offset += TARGET_PAGE_SIZE; in unqueue_page()
1382 memory_region_unref(block->mr); in unqueue_page()
1383 QSIMPLEQ_REMOVE_HEAD(&rs->src_page_requests, next_req); in unqueue_page()
1413 res = uffd_read_events(rs->uffdio_fd, &uffd_msg, 1); in poll_fault_page()
1420 assert(block && (block->flags & RAM_UF_WRITEPROTECT) != 0); in poll_fault_page()
1429 * @pss: page-search-status structure
1430 * @start_page: index of the first page in the range relative to pss->block
1439 /* Check if page is from UFFD-managed region. */ in ram_save_release_protection()
1440 if (pss->block->flags & RAM_UF_WRITEPROTECT) { in ram_save_release_protection()
1441 void *page_address = pss->block->host + (start_page << TARGET_PAGE_BITS); in ram_save_release_protection()
1442 uint64_t run_length = (pss->page - start_page) << TARGET_PAGE_BITS; in ram_save_release_protection()
1444 /* Flush async buffers before un-protect. */ in ram_save_release_protection()
1445 qemu_fflush(pss->pss_channel); in ram_save_release_protection()
1446 /* Un-protect memory range. */ in ram_save_release_protection()
1447 res = uffd_change_protection(rs->uffdio_fd, page_address, run_length, in ram_save_release_protection()
1469 * compatible with 'write-tracking'
1491 /* Nothing to do with read-only and MMIO-writable regions */ in ram_write_tracking_compatible()
1492 if (block->mr->readonly || block->mr->rom_device) { in ram_write_tracking_compatible()
1495 /* Try to register block memory via UFFD-IO to track writes */ in ram_write_tracking_compatible()
1496 if (uffd_register_memory(uffd_fd, block->host, block->max_length, in ram_write_tracking_compatible()
1522 for (; offset < end; offset += block->page_size) { in populate_read_range()
1523 char tmp = *((char *)block->host + offset); in populate_read_range()
1533 const hwaddr size = int128_get64(section->size); in populate_read_section()
1534 hwaddr offset = section->offset_within_region; in populate_read_section()
1535 RAMBlock *block = section->mr->ram_block; in populate_read_section()
1558 * not be part of the migration stream either way -- see in ram_block_populate_read()
1563 if (rb->mr && memory_region_has_ram_discard_manager(rb->mr)) { in ram_block_populate_read()
1564 RamDiscardManager *rdm = memory_region_get_ram_discard_manager(rb->mr); in ram_block_populate_read()
1566 .mr = rb->mr, in ram_block_populate_read()
1568 .size = rb->mr->size, in ram_block_populate_read()
1574 populate_read_range(rb, 0, rb->used_length); in ram_block_populate_read()
1579 * ram_write_tracking_prepare: prepare for UFFD-WP memory tracking
1588 /* Nothing to do with read-only and MMIO-writable regions */ in ram_write_tracking_prepare()
1589 if (block->mr->readonly || block->mr->rom_device) { in ram_write_tracking_prepare()
1608 const hwaddr size = int128_get64(section->size); in uffd_protect_section()
1609 const hwaddr offset = section->offset_within_region; in uffd_protect_section()
1610 RAMBlock *rb = section->mr->ram_block; in uffd_protect_section()
1613 return uffd_change_protection(uffd_fd, rb->host + offset, size, true, in uffd_protect_section()
1619 assert(rb->flags & RAM_UF_WRITEPROTECT); in ram_block_uffd_protect()
1622 if (rb->mr && memory_region_has_ram_discard_manager(rb->mr)) { in ram_block_uffd_protect()
1623 RamDiscardManager *rdm = memory_region_get_ram_discard_manager(rb->mr); in ram_block_uffd_protect()
1625 .mr = rb->mr, in ram_block_uffd_protect()
1627 .size = rb->mr->size, in ram_block_uffd_protect()
1634 return uffd_change_protection(uffd_fd, rb->host, in ram_block_uffd_protect()
1635 rb->used_length, true, false); in ram_block_uffd_protect()
1639 * ram_write_tracking_start: start UFFD-WP memory tracking
1654 rs->uffdio_fd = uffd_fd; in ram_write_tracking_start()
1659 /* Nothing to do with read-only and MMIO-writable regions */ in ram_write_tracking_start()
1660 if (block->mr->readonly || block->mr->rom_device) { in ram_write_tracking_start()
1665 if (uffd_register_memory(rs->uffdio_fd, block->host, in ram_write_tracking_start()
1666 block->max_length, UFFDIO_REGISTER_MODE_WP, NULL)) { in ram_write_tracking_start()
1669 block->flags |= RAM_UF_WRITEPROTECT; in ram_write_tracking_start()
1670 memory_region_ref(block->mr); in ram_write_tracking_start()
1677 trace_ram_write_tracking_ramblock_start(block->idstr, block->page_size, in ram_write_tracking_start()
1678 block->host, block->max_length); in ram_write_tracking_start()
1687 if ((block->flags & RAM_UF_WRITEPROTECT) == 0) { in ram_write_tracking_start()
1690 uffd_unregister_memory(rs->uffdio_fd, block->host, block->max_length); in ram_write_tracking_start()
1692 block->flags &= ~RAM_UF_WRITEPROTECT; in ram_write_tracking_start()
1693 memory_region_unref(block->mr); in ram_write_tracking_start()
1697 rs->uffdio_fd = -1; in ram_write_tracking_start()
1698 return -1; in ram_write_tracking_start()
1702 * ram_write_tracking_stop: stop UFFD-WP memory tracking and remove protection
1712 if ((block->flags & RAM_UF_WRITEPROTECT) == 0) { in ram_write_tracking_stop()
1715 uffd_unregister_memory(rs->uffdio_fd, block->host, block->max_length); in ram_write_tracking_stop()
1717 trace_ram_write_tracking_ramblock_stop(block->idstr, block->page_size, in ram_write_tracking_stop()
1718 block->host, block->max_length); in ram_write_tracking_stop()
1721 block->flags &= ~RAM_UF_WRITEPROTECT; in ram_write_tracking_stop()
1722 memory_region_unref(block->mr); in ram_write_tracking_stop()
1726 uffd_close_fd(rs->uffdio_fd); in ram_write_tracking_stop()
1727 rs->uffdio_fd = -1; in ram_write_tracking_stop()
1800 dirty = test_bit(page, block->bmap); in get_queued_page()
1802 trace_get_queued_page_not_dirty(block->idstr, (uint64_t)offset, in get_queued_page()
1805 trace_get_queued_page(block->idstr, (uint64_t)offset, page); in get_queued_page()
1825 pss->block = block; in get_queued_page()
1826 pss->page = offset >> TARGET_PAGE_BITS; in get_queued_page()
1832 pss->complete_round = false; in get_queued_page()
1849 /* This queue generally should be empty - but in the case of a failed in migration_page_queue_free()
1853 QSIMPLEQ_FOREACH_SAFE(mspr, &rs->src_page_requests, next_req, next_mspr) { in migration_page_queue_free()
1854 memory_region_unref(mspr->rb->mr); in migration_page_queue_free()
1855 QSIMPLEQ_REMOVE_HEAD(&rs->src_page_requests, next_req); in migration_page_queue_free()
1883 ramblock = rs->last_req_rb; in ram_save_queue_pages()
1891 return -1; in ram_save_queue_pages()
1897 /* We shouldn't be asked for a non-existent RAMBlock */ in ram_save_queue_pages()
1899 return -1; in ram_save_queue_pages()
1901 rs->last_req_rb = ramblock; in ram_save_queue_pages()
1903 trace_ram_save_queue_pages(ramblock->idstr, start, len); in ram_save_queue_pages()
1904 if (!offset_in_ramblock(ramblock, start + len - 1)) { in ram_save_queue_pages()
1908 start, len, ramblock->used_length); in ram_save_queue_pages()
1909 return -1; in ram_save_queue_pages()
1914 * rp-return thread. in ram_save_queue_pages()
1919 PageSearchStatus *pss = &ram_state->pss[RAM_CHANNEL_POSTCOPY]; in ram_save_queue_pages()
1922 qemu_mutex_lock(&rs->bitmap_mutex); in ram_save_queue_pages()
1927 * safe to access without lock, because when rp-thread is running in ram_save_queue_pages()
1930 pss->pss_channel = migrate_get_current()->postcopy_qemufile_src; in ram_save_queue_pages()
1931 assert(pss->pss_channel); in ram_save_queue_pages()
1942 ramblock->idstr, start); in ram_save_queue_pages()
1943 ret = -1; in ram_save_queue_pages()
1947 * NOTE: after ram_save_host_page_urgent() succeeded, pss->page in ram_save_queue_pages()
1955 len -= page_size; in ram_save_queue_pages()
1957 qemu_mutex_unlock(&rs->bitmap_mutex); in ram_save_queue_pages()
1964 new_entry->rb = ramblock; in ram_save_queue_pages()
1965 new_entry->offset = start; in ram_save_queue_pages()
1966 new_entry->len = len; in ram_save_queue_pages()
1968 memory_region_ref(ramblock->mr); in ram_save_queue_pages()
1969 qemu_mutex_lock(&rs->src_page_req_mutex); in ram_save_queue_pages()
1970 QSIMPLEQ_INSERT_TAIL(&rs->src_page_requests, new_entry, next_req); in ram_save_queue_pages()
1972 qemu_mutex_unlock(&rs->src_page_req_mutex); in ram_save_queue_pages()
1986 ram_addr_t offset = ((ram_addr_t)pss->page) << TARGET_PAGE_BITS; in ram_save_target_page()
1991 res = rdma_control_save_page(pss->pss_channel, pss->block->offset, in ram_save_target_page()
2008 return ram_save_multifd_page(pss->block, offset); in ram_save_target_page()
2018 size_t guest_pfns = qemu_ram_pagesize(pss->block) >> TARGET_PAGE_BITS; in pss_host_page_prepare()
2020 pss->host_page_sending = true; in pss_host_page_prepare()
2030 pss->host_page_start = pss->page; in pss_host_page_prepare()
2031 pss->host_page_end = pss->page + 1; in pss_host_page_prepare()
2037 pss->host_page_start = ROUND_DOWN(pss->page, guest_pfns); in pss_host_page_prepare()
2038 pss->host_page_end = ROUND_UP(pss->page + 1, guest_pfns); in pss_host_page_prepare()
2050 assert(pss->host_page_sending); in pss_within_range()
2052 /* Over host-page boundary? */ in pss_within_range()
2053 if (pss->page >= pss->host_page_end) { in pss_within_range()
2057 ram_addr = ((ram_addr_t)pss->page) << TARGET_PAGE_BITS; in pss_within_range()
2059 return offset_in_ramblock(pss->block, ram_addr); in pss_within_range()
2064 pss->host_page_sending = false; in pss_host_page_finish()
2066 pss->host_page_start = pss->host_page_end = 0; in pss_host_page_finish()
2071 PageLocationHint *hint = &rs->page_hint; in ram_page_hint_update()
2074 if (hint->valid) { in ram_page_hint_update()
2079 hint->location.block = pss->block; in ram_page_hint_update()
2080 hint->location.offset = pss->page; in ram_page_hint_update()
2081 hint->valid = true; in ram_page_hint_update()
2096 trace_postcopy_preempt_send_host_page(pss->block->idstr, pss->page); in ram_save_host_page_urgent()
2104 if (pss_overlap(pss, &ram_state->pss[RAM_CHANNEL_PRECOPY])) { in ram_save_host_page_urgent()
2105 trace_postcopy_preempt_hit(pss->block->idstr, in ram_save_host_page_urgent()
2106 pss->page << TARGET_PAGE_BITS); in ram_save_host_page_urgent()
2111 page_dirty = migration_bitmap_clear_dirty(rs, pss->block, pss->page); in ram_save_host_page_urgent()
2117 ret = -1; in ram_save_host_page_urgent()
2128 qemu_fflush(pss->pss_channel); in ram_save_host_page_urgent()
2160 qemu_ram_pagesize(pss->block) >> TARGET_PAGE_BITS; in ram_save_host_page()
2161 unsigned long start_page = pss->page; in ram_save_host_page()
2164 if (migrate_ram_is_ignored(pss->block)) { in ram_save_host_page()
2165 error_report("block %s should not be migrated !", pss->block->idstr); in ram_save_host_page()
2173 page_dirty = migration_bitmap_clear_dirty(rs, pss->block, pss->page); in ram_save_host_page()
2179 * because both migration thread and rp-return thread can in ram_save_host_page()
2183 qemu_mutex_unlock(&rs->bitmap_mutex); in ram_save_host_page()
2197 qemu_mutex_lock(&rs->bitmap_mutex); in ram_save_host_page()
2224 return rs->page_hint.valid; in ram_page_hint_valid()
2230 PageLocationHint *hint = &rs->page_hint; in ram_page_hint_collect()
2232 assert(hint->valid); in ram_page_hint_collect()
2234 *block = hint->location.block; in ram_page_hint_collect()
2235 *page = hint->location.offset; in ram_page_hint_collect()
2238 hint->valid = false; in ram_page_hint_collect()
2251 * On systems where host-page-size > target-page-size it will send all the
2256 PageSearchStatus *pss = &rs->pss[RAM_CHANNEL_PRECOPY]; in ram_find_and_save_block()
2262 if (!rs->ram_bytes_total) { in ram_find_and_save_block()
2273 if (!rs->last_seen_block) { in ram_find_and_save_block()
2274 rs->last_seen_block = QLIST_FIRST_RCU(&ram_list.blocks); in ram_find_and_save_block()
2275 rs->last_page = 0; in ram_find_and_save_block()
2281 next_block = rs->last_seen_block; in ram_find_and_save_block()
2282 next_page = rs->last_page; in ram_find_and_save_block()
2310 rs->last_seen_block = pss->block; in ram_find_and_save_block()
2311 rs->last_page = pss->page; in ram_find_and_save_block()
2324 total += block->used_length; in ram_bytes_total_with_ignored()
2337 total += block->used_length; in ram_bytes_total()
2357 qemu_mutex_destroy(&(*rsp)->bitmap_mutex); in ram_state_cleanup()
2358 qemu_mutex_destroy(&(*rsp)->src_page_req_mutex); in ram_state_cleanup()
2385 g_free(block->clear_bmap); in ram_bitmaps_destroy()
2386 block->clear_bmap = NULL; in ram_bitmaps_destroy()
2387 g_free(block->bmap); in ram_bitmaps_destroy()
2388 block->bmap = NULL; in ram_bitmaps_destroy()
2389 g_free(block->file_bmap); in ram_bitmaps_destroy()
2390 block->file_bmap = NULL; in ram_bitmaps_destroy()
2422 hint->location.block = NULL; in ram_page_hint_reset()
2423 hint->location.offset = 0; in ram_page_hint_reset()
2424 hint->valid = false; in ram_page_hint_reset()
2432 rs->pss[i].last_sent_block = NULL; in ram_state_reset()
2435 rs->last_seen_block = NULL; in ram_state_reset()
2436 rs->last_page = 0; in ram_state_reset()
2437 rs->last_version = ram_list.version; in ram_state_reset()
2438 rs->xbzrle_started = false; in ram_state_reset()
2440 ram_page_hint_reset(&rs->page_hint); in ram_state_reset()
2452 unsigned long *bitmap = block->bmap; in ram_postcopy_migrated_memory_release()
2453 unsigned long range = block->used_length >> TARGET_PAGE_BITS; in ram_postcopy_migrated_memory_release()
2458 ram_discard_range(block->idstr, in ram_postcopy_migrated_memory_release()
2460 ((ram_addr_t)(run_end - run_start)) in ram_postcopy_migrated_memory_release()
2477 unsigned long end = block->used_length >> TARGET_PAGE_BITS; in postcopy_send_discard_bm_ram()
2479 unsigned long *bitmap = block->bmap; in postcopy_send_discard_bm_ram()
2492 discard_length = end - one; in postcopy_send_discard_bm_ram()
2494 discard_length = zero - one; in postcopy_send_discard_bm_ram()
2519 postcopy_discard_send_init(ms, block->idstr); in postcopy_each_ram_send_discard()
2523 * host-page size chunks, mark any partially dirty host-page size in postcopy_each_ram_send_discard()
2524 * chunks as all dirty. In this case the host-page is the host-page in postcopy_each_ram_send_discard()
2555 unsigned long *bitmap = block->bmap; in postcopy_chunk_hostpages_pass()
2556 unsigned int host_ratio = block->page_size / TARGET_PAGE_SIZE; in postcopy_chunk_hostpages_pass()
2557 unsigned long pages = block->used_length >> TARGET_PAGE_BITS; in postcopy_chunk_hostpages_pass()
2560 if (block->page_size == TARGET_PAGE_SIZE) { in postcopy_chunk_hostpages_pass()
2561 /* Easy case - TPS==HPS for a non-huge page RAMBlock */ in postcopy_chunk_hostpages_pass()
2597 rs->migration_dirty_pages += !test_and_set_bit(page, bitmap); in postcopy_chunk_hostpages_pass()
2628 /* Easiest way to make sure we don't resume in the middle of a host-page */ in ram_postcopy_send_discard_bitmap()
2629 rs->pss[RAM_CHANNEL_PRECOPY].last_sent_block = NULL; in ram_postcopy_send_discard_bitmap()
2630 rs->last_seen_block = NULL; in ram_postcopy_send_discard_bitmap()
2631 rs->last_page = 0; in ram_postcopy_send_discard_bitmap()
2657 return -1; in ram_discard_range()
2664 if (rb->receivedmap) { in ram_discard_range()
2665 bitmap_clear(rb->receivedmap, start >> qemu_target_page_bits(), in ram_discard_range()
2735 qemu_mutex_init(&(*rsp)->bitmap_mutex); in ram_state_init()
2736 qemu_mutex_init(&(*rsp)->src_page_req_mutex); in ram_state_init()
2737 QSIMPLEQ_INIT(&(*rsp)->src_page_requests); in ram_state_init()
2738 (*rsp)->ram_bytes_total = ram_bytes_total(); in ram_state_init()
2745 (*rsp)->migration_dirty_pages = (*rsp)->ram_bytes_total >> TARGET_PAGE_BITS; in ram_state_init()
2760 shift = ms->clear_bitmap_shift; in ram_list_init_bitmaps()
2772 pages = block->max_length >> TARGET_PAGE_BITS; in ram_list_init_bitmaps()
2782 block->bmap = bitmap_new(pages); in ram_list_init_bitmaps()
2783 bitmap_set(block->bmap, 0, pages); in ram_list_init_bitmaps()
2785 block->file_bmap = bitmap_new(pages); in ram_list_init_bitmaps()
2787 block->clear_bmap_shift = shift; in ram_list_init_bitmaps()
2788 block->clear_bmap = bitmap_new(clear_bmap_size(pages, shift)); in ram_list_init_bitmaps()
2802 rs->migration_dirty_pages -= pages; in migration_bitmap_clear_discarded_pages()
2842 return -1; in ram_init_all()
2847 return -1; in ram_init_all()
2851 return -1; in ram_init_all()
2869 pages += bitmap_count_one(block->bmap, in ram_state_resume_prepare()
2870 block->used_length >> TARGET_PAGE_BITS); in ram_state_resume_prepare()
2874 rs->migration_dirty_pages = pages; in ram_state_resume_prepare()
2879 rs->pss[RAM_CHANNEL_PRECOPY].pss_channel = out; in ram_state_resume_prepare()
2901 for (; len > 0; len -= used_len, addr += used_len) { in qemu_guest_free_page_hint()
2903 if (unlikely(!block || offset >= block->used_length)) { in qemu_guest_free_page_hint()
2913 if (len <= block->used_length - offset) { in qemu_guest_free_page_hint()
2916 used_len = block->used_length - offset; in qemu_guest_free_page_hint()
2922 qemu_mutex_lock(&ram_state->bitmap_mutex); in qemu_guest_free_page_hint()
2930 ram_state->migration_dirty_pages -= in qemu_guest_free_page_hint()
2931 bitmap_count_one_with_offset(block->bmap, start, npages); in qemu_guest_free_page_hint()
2932 bitmap_clear(block->bmap, start, npages); in qemu_guest_free_page_hint()
2933 qemu_mutex_unlock(&ram_state->bitmap_mutex); in qemu_guest_free_page_hint()
2967 num_pages = block->used_length >> TARGET_PAGE_BITS; in mapped_ram_setup_ramblock()
2975 block->bitmap_offset = qemu_get_offset(file) + header_size; in mapped_ram_setup_ramblock()
2976 block->pages_offset = ROUND_UP(block->bitmap_offset + in mapped_ram_setup_ramblock()
2980 header->version = cpu_to_be32(MAPPED_RAM_HDR_VERSION); in mapped_ram_setup_ramblock()
2981 header->page_size = cpu_to_be64(TARGET_PAGE_SIZE); in mapped_ram_setup_ramblock()
2982 header->bitmap_offset = cpu_to_be64(block->bitmap_offset); in mapped_ram_setup_ramblock()
2983 header->pages_offset = cpu_to_be64(block->pages_offset); in mapped_ram_setup_ramblock()
2988 qemu_set_offset(file, block->pages_offset + block->used_length, SEEK_SET); in mapped_ram_setup_ramblock()
2998 error_setg(errp, "Could not read whole mapped-ram migration header " in mapped_ram_read_header()
3003 /* migration stream is big-endian */ in mapped_ram_read_header()
3004 header->version = be32_to_cpu(header->version); in mapped_ram_read_header()
3006 if (header->version > MAPPED_RAM_HDR_VERSION) { in mapped_ram_read_header()
3007 error_setg(errp, "Migration mapped-ram capability version not " in mapped_ram_read_header()
3009 header->version); in mapped_ram_read_header()
3013 header->page_size = be64_to_cpu(header->page_size); in mapped_ram_read_header()
3014 header->bitmap_offset = be64_to_cpu(header->bitmap_offset); in mapped_ram_read_header()
3015 header->pages_offset = be64_to_cpu(header->pages_offset); in mapped_ram_read_header()
3022 * long-running RCU critical section. When rcu-reclaims in the code
3045 return -1; in ram_save_setup()
3048 (*rsp)->pss[RAM_CHANNEL_PRECOPY].pss_channel = f; in ram_save_setup()
3061 qemu_put_byte(f, strlen(block->idstr)); in ram_save_setup()
3062 qemu_put_buffer(f, (uint8_t *)block->idstr, strlen(block->idstr)); in ram_save_setup()
3063 qemu_put_be64(f, block->used_length); in ram_save_setup()
3065 block->page_size != max_hg_page_size) { in ram_save_setup()
3066 qemu_put_be64(f, block->page_size); in ram_save_setup()
3069 qemu_put_be64(f, block->mr->addr); in ram_save_setup()
3099 * For legacy QEMUs using per-section sync in ram_save_setup()
3103 * per-channel to work. in ram_save_setup()
3105 * For modern QEMUs using per-round sync in ram_save_setup()
3132 error_setg_errno(errp, -ret, "%s failed", __func__); in ram_save_setup()
3142 long num_pages = block->used_length >> TARGET_PAGE_BITS; in ram_save_file_bmap()
3145 qemu_put_buffer_at(f, (uint8_t *)block->file_bmap, bitmap_size, in ram_save_file_bmap()
3146 block->bitmap_offset); in ram_save_file_bmap()
3154 g_free(block->file_bmap); in ram_save_file_bmap()
3155 block->file_bmap = NULL; in ram_save_file_bmap()
3162 set_bit_atomic(offset >> TARGET_PAGE_BITS, block->file_bmap); in ramblock_set_file_bmap_atomic()
3164 clear_bit_atomic(offset >> TARGET_PAGE_BITS, block->file_bmap); in ramblock_set_file_bmap_atomic()
3192 WITH_QEMU_LOCK_GUARD(&rs->bitmap_mutex) { in ram_save_iterate()
3194 if (ram_list.version != rs->last_version) { in ram_save_iterate()
3229 rs->target_page_count += pages; in ram_save_iterate()
3238 uint64_t t1 = (qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - t0) / in ram_save_iterate()
3295 trace_ram_save_complete(rs->migration_dirty_pages, 0); in ram_save_complete()
3297 rs->last_stage = !migration_in_colo_state(); in ram_save_complete()
3313 qemu_mutex_lock(&rs->bitmap_mutex); in ram_save_complete()
3323 qemu_mutex_unlock(&rs->bitmap_mutex); in ram_save_complete()
3327 qemu_mutex_unlock(&rs->bitmap_mutex); in ram_save_complete()
3355 return -err; in ram_save_complete()
3361 trace_ram_save_complete(rs->migration_dirty_pages, 1); in ram_save_complete()
3372 uint64_t remaining_size = rs->migration_dirty_pages * TARGET_PAGE_SIZE; in ram_state_pending_estimate()
3397 remaining_size = rs->migration_dirty_pages * TARGET_PAGE_SIZE; in ram_state_pending_exact()
3418 error_report("Failed to load XBZRLE page - wrong compression!"); in load_xbzrle()
3419 return -1; in load_xbzrle()
3423 error_report("Failed to load XBZRLE page - len overflow!"); in load_xbzrle()
3424 return -1; in load_xbzrle()
3427 /* load data and decode */ in load_xbzrle()
3433 TARGET_PAGE_SIZE) == -1) { in load_xbzrle()
3434 error_report("Failed to load XBZRLE page - decode error!"); in load_xbzrle()
3435 return -1; in load_xbzrle()
3446 * Returns a pointer from within the RCU-protected ram_list.
3457 RAMBlock *block = mis->last_recv_block[channel]; in ram_block_from_stream()
3484 mis->last_recv_block[channel] = block; in ram_block_from_stream()
3496 return block->host + offset; in host_from_ram_block_offset()
3503 return (void *)QEMU_ALIGN_DOWN((uintptr_t)(block->host + offset), in host_page_from_ram_block_offset()
3504 block->page_size); in host_page_from_ram_block_offset()
3510 return ((uintptr_t)block->host + offset) & (block->page_size - 1); in host_page_offset_from_ram_block_offset()
3515 qemu_mutex_lock(&ram_state->bitmap_mutex); in colo_record_bitmap()
3518 ram_state->migration_dirty_pages += !test_and_set_bit( in colo_record_bitmap()
3520 block->bmap); in colo_record_bitmap()
3522 qemu_mutex_unlock(&ram_state->bitmap_mutex); in colo_record_bitmap()
3531 if (!block->colo_cache) { in colo_cache_from_block_offset()
3533 __func__, block->idstr); in colo_cache_from_block_offset()
3545 return block->colo_cache + offset; in colo_cache_from_block_offset()
3585 block->colo_cache = qemu_anon_ram_alloc(block->used_length, in colo_init_ram_cache()
3587 if (!block->colo_cache) { in colo_init_ram_cache()
3589 "size 0x" RAM_ADDR_FMT, __func__, block->idstr, in colo_init_ram_cache()
3590 block->used_length); in colo_init_ram_cache()
3592 if (block->colo_cache) { in colo_init_ram_cache()
3593 qemu_anon_ram_free(block->colo_cache, block->used_length); in colo_init_ram_cache()
3594 block->colo_cache = NULL; in colo_init_ram_cache()
3597 return -errno; in colo_init_ram_cache()
3600 qemu_madvise(block->colo_cache, block->used_length, in colo_init_ram_cache()
3613 unsigned long pages = block->max_length >> TARGET_PAGE_BITS; in colo_init_ram_cache()
3614 block->bmap = bitmap_new(pages); in colo_init_ram_cache()
3637 bitmap_zero(block->bmap, block->max_length >> TARGET_PAGE_BITS); in colo_incoming_start_dirty_log()
3644 ram_state->migration_dirty_pages = 0; in colo_incoming_start_dirty_log()
3656 g_free(block->bmap); in colo_release_ram_cache()
3657 block->bmap = NULL; in colo_release_ram_cache()
3662 if (block->colo_cache) { in colo_release_ram_cache()
3663 qemu_anon_ram_free(block->colo_cache, block->used_length); in colo_release_ram_cache()
3664 block->colo_cache = NULL; in colo_release_ram_cache()
3693 if (memory_region_is_nonvolatile(rb->mr)) { in ram_load_cleanup()
3701 g_free(rb->receivedmap); in ram_load_cleanup()
3702 rb->receivedmap = NULL; in ram_load_cleanup()
3716 * postcopy-ram. postcopy-ram's similarly names
3725 * ram_load_postcopy: load a page in postcopy case
3727 * Returns 0 for success or -errno in case of error
3741 PostcopyTmpPage *tmp_page = &mis->postcopy_tmp_pages[channel]; in ram_load_postcopy()
3768 ret = -EINVAL; in ram_load_postcopy()
3775 * while in postcopy, which is fine - trying to place via in ram_load_postcopy()
3778 if (!block->host || addr >= block->postcopy_length) { in ram_load_postcopy()
3780 ret = -EINVAL; in ram_load_postcopy()
3783 tmp_page->target_pages++; in ram_load_postcopy()
3784 matches_target_page_size = block->page_size == TARGET_PAGE_SIZE; in ram_load_postcopy()
3791 * The migration protocol uses, possibly smaller, target-pages in ram_load_postcopy()
3795 page_buffer = tmp_page->tmp_huge_page + in ram_load_postcopy()
3798 if (tmp_page->target_pages == 1) { in ram_load_postcopy()
3799 tmp_page->host_addr = in ram_load_postcopy()
3801 } else if (tmp_page->host_addr != in ram_load_postcopy()
3804 error_report("Non-same host page detected on channel %d: " in ram_load_postcopy()
3807 channel, tmp_page->host_addr, in ram_load_postcopy()
3809 block->idstr, addr, tmp_page->target_pages); in ram_load_postcopy()
3810 ret = -EINVAL; in ram_load_postcopy()
3818 if (tmp_page->target_pages == in ram_load_postcopy()
3819 (block->page_size / TARGET_PAGE_SIZE)) { in ram_load_postcopy()
3822 place_source = tmp_page->tmp_huge_page; in ram_load_postcopy()
3830 ret = -EINVAL; in ram_load_postcopy()
3835 * this is a zero page and (block->page_size == TARGET_PAGE_SIZE). in ram_load_postcopy()
3843 tmp_page->all_zero = false; in ram_load_postcopy()
3865 ret = -EINVAL; in ram_load_postcopy()
3875 if (tmp_page->all_zero) { in ram_load_postcopy()
3876 ret = postcopy_place_page_zero(mis, tmp_page->host_addr, block); in ram_load_postcopy()
3878 ret = postcopy_place_page(mis, tmp_page->host_addr, in ram_load_postcopy()
3907 qemu_mutex_lock(&ram_state->bitmap_mutex); in colo_flush_ram_cache()
3914 trace_colo_flush_ram_cache_begin(ram_state->migration_dirty_pages); in colo_flush_ram_cache()
3933 dst_host = block->host in colo_flush_ram_cache()
3935 src_host = block->colo_cache in colo_flush_ram_cache()
3942 qemu_mutex_unlock(&ram_state->bitmap_mutex); in colo_flush_ram_cache()
3951 data->opaque = host_addr; in ram_load_multifd_pages()
3952 data->file_offset = offset; in ram_load_multifd_pages()
3953 data->size = size; in ram_load_multifd_pages()
3978 unread = TARGET_PAGE_SIZE * (clear_bit_idx - set_bit_idx); in read_ramblock_mapped_ram()
3985 block->idstr); in read_ramblock_mapped_ram()
3993 block->pages_offset + offset); in read_ramblock_mapped_ram()
3996 block->pages_offset + offset); in read_ramblock_mapped_ram()
4003 unread -= read; in read_ramblock_mapped_ram()
4012 "from file offset %" PRIx64 ": ", block->idstr, offset, in read_ramblock_mapped_ram()
4013 block->pages_offset + offset); in read_ramblock_mapped_ram()
4029 block->pages_offset = header.pages_offset; in parse_ramblock_mapped_ram()
4037 if (!QEMU_IS_ALIGNED(block->pages_offset, TARGET_PAGE_SIZE)) { in parse_ramblock_mapped_ram()
4040 block->idstr); in parse_ramblock_mapped_ram()
4059 qemu_set_offset(f, block->pages_offset + length, SEEK_SET); in parse_ramblock_mapped_ram()
4076 return -EINVAL; in parse_ramblock()
4082 error_report("block %s should not be migrated !", block->idstr); in parse_ramblock()
4083 return -EINVAL; in parse_ramblock()
4086 if (length != block->used_length) { in parse_ramblock()
4102 block->page_size != max_hg_page_size) { in parse_ramblock()
4104 if (remote_page_size != block->page_size) { in parse_ramblock()
4106 "(local) %zd != %" PRId64, block->idstr, in parse_ramblock()
4107 block->page_size, remote_page_size); in parse_ramblock()
4108 return -EINVAL; in parse_ramblock()
4114 block->mr->addr != addr) { in parse_ramblock()
4116 "%" PRId64 "!= %" PRId64, block->idstr, in parse_ramblock()
4117 (uint64_t)addr, (uint64_t)block->mr->addr); in parse_ramblock()
4118 return -EINVAL; in parse_ramblock()
4121 ret = rdma_block_notification_handle(f, block->idstr); in parse_ramblock()
4150 ret = -EINVAL; in parse_ramblocks()
4152 total_ram_bytes -= length; in parse_ramblocks()
4159 * ram_load_precopy: load pages in precopy case
4161 * Returns 0 for success or -errno in case of error
4208 ret = -EINVAL; in ram_load_precopy()
4219 * After going into COLO stage, we should not load the page in ram_load_precopy()
4223 * while we need to stop VM, which is a time-consuming process. in ram_load_precopy()
4224 * Here we optimize it by a trick, back-up every page while in in ram_load_precopy()
4227 * back-up all SVM'S memory in COLO preparing stage. in ram_load_precopy()
4243 ret = -EINVAL; in ram_load_precopy()
4250 trace_ram_load_loop(block->idstr, (uint64_t)addr, flags, host); in ram_load_precopy()
4257 * For mapped-ram migration (to a file) using multifd, we sync in ram_load_precopy()
4272 ret = -EINVAL; in ram_load_precopy()
4286 ret = -EINVAL; in ram_load_precopy()
4298 * Mapped-ram migration flushes once and for all after in ram_load_precopy()
4313 ret = -EINVAL; in ram_load_precopy()
4339 return -EINVAL; in ram_load()
4372 "is not supported now!", rb->idstr, rb->host); in ram_has_postcopy()
4384 QEMUFile *file = s->to_dst_file; in ram_dirty_bitmap_sync_all()
4388 qatomic_set(&rs->postcopy_bmap_sync_requested, 0); in ram_dirty_bitmap_sync_all()
4390 qemu_savevm_send_recv_bitmap(file, block->idstr); in ram_dirty_bitmap_sync_all()
4391 trace_ram_dirty_bitmap_request(block->idstr); in ram_dirty_bitmap_sync_all()
4392 qatomic_inc(&rs->postcopy_bmap_sync_requested); in ram_dirty_bitmap_sync_all()
4398 while (qatomic_read(&rs->postcopy_bmap_sync_requested)) { in ram_dirty_bitmap_sync_all()
4400 return -1; in ram_dirty_bitmap_sync_all()
4419 QEMUFile *file = s->rp_state.from_dst_file; in ram_dirty_bitmap_reload()
4421 unsigned long nbits = block->used_length >> TARGET_PAGE_BITS; in ram_dirty_bitmap_reload()
4426 trace_ram_dirty_bitmap_reload_begin(block->idstr); in ram_dirty_bitmap_reload()
4428 if (s->state != MIGRATION_STATUS_POSTCOPY_RECOVER) { in ram_dirty_bitmap_reload()
4430 MigrationStatus_str(s->state)); in ram_dirty_bitmap_reload()
4448 " != 0x%"PRIx64")", block->idstr, size, local_size); in ram_dirty_bitmap_reload()
4458 block->idstr, local_size, size); in ram_dirty_bitmap_reload()
4464 block->idstr, end_mark); in ram_dirty_bitmap_reload()
4472 bitmap_from_le(block->bmap, le_bitmap, nbits); in ram_dirty_bitmap_reload()
4478 bitmap_complement(block->bmap, block->bmap, nbits); in ram_dirty_bitmap_reload()
4484 trace_ram_dirty_bitmap_reload_complete(block->idstr); in ram_dirty_bitmap_reload()
4486 qatomic_dec(&rs->postcopy_bmap_sync_requested); in ram_dirty_bitmap_reload()
4510 ram_state_resume_prepare(rs, s->to_dst_file); in ram_resume_prepare()
4524 * The load of these pages must happen before switching to postcopy. in ram_save_postcopy_prepare()
4526 * threads is still non-atomic, so the load cannot happen with vCPUs in ram_save_postcopy_prepare()
4553 qemu_put_be64(s->postcopy_qemufile_src, RAM_SAVE_FLAG_EOS); in postcopy_preempt_shutdown_file()
4554 qemu_fflush(s->postcopy_qemufile_src); in postcopy_preempt_shutdown_file()
4592 * changing at random points in time - especially after sending the in ram_mig_ram_block_resized()
4596 error_setg(&err, "RAM block '%s' resized during precopy.", rb->idstr); in ram_mig_ram_block_resized()
4606 * Update what ram_postcopy_incoming_init()->init_range() does at the in ram_mig_ram_block_resized()
4611 if (ram_discard_range(rb->idstr, old_size, new_size - old_size)) { in ram_mig_ram_block_resized()
4613 rb->idstr); in ram_mig_ram_block_resized()
4616 rb->postcopy_length = new_size; in ram_mig_ram_block_resized()
4629 rb->idstr, ps); in ram_mig_ram_block_resized()
4630 exit(-1); in ram_mig_ram_block_resized()