Lines Matching full:pages

88  * the pages region in the migration file at a time.
112 * right after the requested urgent pages.
124 /* used by the search for pages to send */
368 * PageSearchStatus structures for the channels when send pages.
376 /* Last block that we have visited searching for dirty pages */
382 /* How many times we have dirty too many pages */
389 /* number of dirty pages since start_time */
393 /* Amount of xbzrle pages since the beginning of the period */
402 /* total handled target pages at the beginning of period */
404 /* total handled target pages since start */
547 * which we can transfer pages to the destination then we should be
654 * Example: 2 pages (8KB) being encoded, first page encoding generates 2KB, in save_xbzrle_page()
660 xbzrle_counters.pages++; in save_xbzrle_page()
736 * If during sending a host page, only look for dirty pages within the in pss_find_next_dirty()
761 * of the small chunk will always be 64 pages aligned so the in migration_clear_memory_region_dirty_bitmap()
784 * Clear pages from start to start + npages - 1, so the end boundary is in migration_clear_memory_region_dirty_bitmap_range()
793 * colo_bitmap_find_diry:find contiguous dirty pages from start
799 * @rb: RAMBlock where to search for dirty pages
801 * @num: the number of contiguous dirty pages
885 * Exclude all dirty pages from migration that fall into a discarded range as
889 * Discarded pages ("logically unplugged") have undefined content and must
890 * not get migrated, because even reading these pages for migration might
952 * For VMs with just normal pages this is equivalent to the host page
953 * size. If it's got some huge pages then it's the OR of all the
972 xbzrle_counters.pages; in ram_get_total_transferred_pages()
994 unencoded_size = (xbzrle_counters.pages - rs->xbzrle_pages_prev) * in migration_update_rates()
997 if (xbzrle_counters.pages == rs->xbzrle_pages_prev || !encoded_size) { in migration_update_rates()
1002 rs->xbzrle_pages_prev = xbzrle_counters.pages; in migration_update_rates()
1146 * Returns the number of pages written.
1170 /* zero pages are not transferred with mapped-ram */ in save_zero_page()
1197 * Returns the number of pages written.
1233 * Returns the number of pages written.
1235 * >=0 - Number of pages written - this might legally be 0
1244 int pages = -1; in ram_save_page() local
1256 pages = save_xbzrle_page(rs, pss, &p, current_addr, in ram_save_page()
1267 if (pages == -1) { in ram_save_page()
1268 pages = save_normal_page(pss, block, offset, p, send_async); in ram_save_page()
1273 return pages; in ram_save_page()
1426 * a range of pages has been saved
1542 * ram_block_populate_read: preallocate page tables and populate pages in the
1553 * Skip populating all pages that fall into a discarded range as managed by in ram_block_populate_read()
1557 * modifications via userfaultfd WP reliably, because these pages will in ram_block_populate_read()
1594 * Populate pages of the RAM block before enabling userfault_fd in ram_write_tracking_prepare()
1599 * pages with pte_none() entries in page table. in ram_write_tracking_prepare()
1775 * Skips pages that are already sent (!dirty)
1814 * when we have vcpus got blocked by the write protected pages. in get_queued_page()
1822 * since the guest is likely to want other pages near to the page in get_queued_page()
1839 * migration_page_queue_free: drop any remaining pages in the ram
2017 /* How many guest pages are there in one host page? */ in pss_host_page_prepare()
2034 * The host page spans over multiple guest pages, we send them in pss_host_page_prepare()
2137 * Starting at *offset send pages up to the end of the current host
2140 * Only dirty target pages are sent. Note that the host page size may
2150 * Returns the number of pages written or negative on error
2158 int tmppages, pages = 0; in ram_save_host_page() local
2175 /* Check the pages is dirty and if it is send it */ in ram_save_host_page()
2187 pages += tmppages; in ram_save_host_page()
2189 * Allow rate limiting to happen in the middle of huge pages if in ram_save_host_page()
2214 return (res < 0 ? res : pages); in ram_save_host_page()
2246 * Returns the number of pages written where zero means no dirty pages,
2252 * pages in a host page that are dirty.
2259 int pages = 0; in ram_find_and_save_block() local
2263 return pages; in ram_find_and_save_block()
2297 pages = res; in ram_find_and_save_block()
2304 pages = ram_save_host_page(rs, pss); in ram_find_and_save_block()
2305 if (pages) { in ram_find_and_save_block()
2313 return pages; in ram_find_and_save_block()
2522 * Deal with TPS != HPS and huge pages. It discard any partially sent in postcopy_each_ram_send_discard()
2546 * Postcopy requires that all target pages in a hostpage are dirty or
2557 unsigned long pages = block->used_length >> TARGET_PAGE_BITS; in postcopy_chunk_hostpages_pass() local
2566 run_start = find_next_bit(bitmap, pages, 0); in postcopy_chunk_hostpages_pass()
2568 while (run_start < pages) { in postcopy_chunk_hostpages_pass()
2571 * If the start of this run of pages is in the middle of a host in postcopy_chunk_hostpages_pass()
2576 run_start = find_next_zero_bit(bitmap, pages, run_start + 1); in postcopy_chunk_hostpages_pass()
2594 * Remark them as dirty, updating the count for any pages in postcopy_chunk_hostpages_pass()
2602 run_start = find_next_bit(bitmap, pages, run_start); in postcopy_chunk_hostpages_pass()
2609 * Transmit the set of pages to be discarded after precopy to the target
2610 * these are pages that:
2612 * b) Pages that have never been transmitted, this ensures that
2613 * any pages on the destination that have been mapped by background
2614 * tasks get discarded (transparent huge pages is the specific concern)
2639 * ram_discard_range: discard dirtied pages at the beginning of postcopy
2741 * Count the total number of pages used by ram blocks not including any in ram_state_init()
2755 unsigned long pages; in ram_list_init_bitmaps() local
2772 pages = block->max_length >> TARGET_PAGE_BITS; in ram_list_init_bitmaps()
2782 block->bmap = bitmap_new(pages); in ram_list_init_bitmaps()
2783 bitmap_set(block->bmap, 0, pages); in ram_list_init_bitmaps()
2785 block->file_bmap = bitmap_new(pages); in ram_list_init_bitmaps()
2788 block->clear_bmap = bitmap_new(clear_bmap_size(pages, shift)); in ram_list_init_bitmaps()
2795 unsigned long pages; in migration_bitmap_clear_discarded_pages() local
2801 pages = ramblock_dirty_bitmap_clear_discarded_pages(rb); in migration_bitmap_clear_discarded_pages()
2802 rs->migration_dirty_pages -= pages; in migration_bitmap_clear_discarded_pages()
2833 * containing all 1s to exclude any discarded pages from migration. in ram_init_bitmaps()
2860 uint64_t pages = 0; in ram_state_resume_prepare() local
2869 pages += bitmap_count_one(block->bmap, in ram_state_resume_prepare()
2874 rs->migration_dirty_pages = pages; in ram_state_resume_prepare()
2881 trace_ram_state_resume_prepare(pages); in ram_state_resume_prepare()
2885 * This function clears bits of the free pages reported by the caller from the
2887 * start of the continuous guest free pages, and @len is the total bytes of
2888 * those pages.
2924 * The skipped free pages are equavalent to be sent from clear_bmap's in qemu_guest_free_page_hint()
2926 * are initially set. Otherwise those skipped pages will be sent in in qemu_guest_free_page_hint()
2941 * The target's page size, so we know how many pages are in the
2946 * The offset in the migration file where the pages bitmap is
2951 * The offset in the migration file where the actual pages (data)
2971 * Save the file offsets of where the bitmap and the pages should in mapped_ram_setup_ramblock()
3114 * multifd recv threads won't start processing guest pages early before in ram_save_setup()
3151 * with multifd channels. No channels should be sending pages in ram_save_file_bmap()
3211 int pages; in ram_save_iterate() local
3217 pages = ram_find_and_save_block(rs); in ram_save_iterate()
3218 /* no more pages to sent */ in ram_save_iterate()
3219 if (pages == 0) { in ram_save_iterate()
3224 if (pages < 0) { in ram_save_iterate()
3225 qemu_file_set_error(f, pages); in ram_save_iterate()
3229 rs->target_page_count += pages; in ram_save_iterate()
3315 int pages; in ram_save_complete() local
3317 pages = ram_find_and_save_block(rs); in ram_save_complete()
3319 if (pages == 0) { in ram_save_complete()
3322 if (pages < 0) { in ram_save_complete()
3324 return pages; in ram_save_complete()
3513 void colo_record_bitmap(RAMBlock *block, ram_addr_t *normal, uint32_t pages) in colo_record_bitmap() argument
3516 for (int i = 0; i < pages; i++) { in colo_record_bitmap()
3538 * During colo checkpoint, we need bitmap of these migrated pages. in colo_cache_from_block_offset()
3539 * It help us to decide which pages in ram cache should be flushed in colo_cache_from_block_offset()
3607 * Record the dirty pages that sent by PVM, we use this dirty bitmap together in colo_init_ram_cache()
3613 unsigned long pages = block->max_length >> TARGET_PAGE_BITS; in colo_init_ram_cache() local
3614 block->bmap = bitmap_new(pages); in colo_init_ram_cache()
3774 * We might place pages beyond used_length in case RAM was shrunk in ram_load_postcopy()
3786 * Postcopy requires that we place whole host pages atomically; in ram_load_postcopy()
3787 * these may be huge pages for RAMBlocks that are backed by in ram_load_postcopy()
3791 * The migration protocol uses, possibly smaller, target-pages in ram_load_postcopy()
3845 /* For huge pages, we always use temporary buffer */ in ram_load_postcopy()
3849 * For small pages that matches target page size, we in ram_load_postcopy()
3897 * Only flush the pages that be dirtied by PVM or SVM or both.
4032 * Check the alignment of the file region that contains pages. We in parse_ramblock_mapped_ram()
4039 "Error reading ramblock %s pages, region has bad alignment", in parse_ramblock_mapped_ram()
4058 /* Skip pages array */ in parse_ramblock_mapped_ram()
4159 * ram_load_precopy: load pages in precopy case
4231 /* In COLO stage, put all pages into cache temporarily */ in ram_load_precopy()
4236 * Put all pages into both cache and SVM's memory. in ram_load_precopy()
4260 * (including all the guest memory pages within) are fully in ram_load_precopy()
4522 * pages queued before postcopy starts have been flushed. in ram_save_postcopy_prepare()
4524 * The load of these pages must happen before switching to postcopy. in ram_save_postcopy_prepare()
4525 * It's because loading of guest pages (so far) in multifd recv in ram_save_postcopy_prepare()
4529 * This flush and sync will guarantee that those pages are loaded in ram_save_postcopy_prepare()
4536 * all previous guest pages queued in the multifd channels are in ram_save_postcopy_prepare()