14705a71dSRichard Henderson /* 24705a71dSRichard Henderson * Declarations for cpu physical memory functions 34705a71dSRichard Henderson * 44705a71dSRichard Henderson * Copyright 2011 Red Hat, Inc. and/or its affiliates 54705a71dSRichard Henderson * 64705a71dSRichard Henderson * Authors: 74705a71dSRichard Henderson * Avi Kivity <avi@redhat.com> 84705a71dSRichard Henderson * 94705a71dSRichard Henderson * This work is licensed under the terms of the GNU GPL, version 2 or 104705a71dSRichard Henderson * later. See the COPYING file in the top-level directory. 114705a71dSRichard Henderson * 124705a71dSRichard Henderson */ 134705a71dSRichard Henderson 144705a71dSRichard Henderson /* 154705a71dSRichard Henderson * This header is for use by exec.c and memory.c ONLY. Do not include it. 164705a71dSRichard Henderson * The functions declared here will be removed soon. 174705a71dSRichard Henderson */ 184705a71dSRichard Henderson 194705a71dSRichard Henderson #ifndef SYSTEM_RAM_ADDR_H 204705a71dSRichard Henderson #define SYSTEM_RAM_ADDR_H 214705a71dSRichard Henderson 224705a71dSRichard Henderson #include "system/xen.h" 234705a71dSRichard Henderson #include "system/tcg.h" 244705a71dSRichard Henderson #include "exec/cputlb.h" 254705a71dSRichard Henderson #include "exec/ramlist.h" 26*548a0165SRichard Henderson #include "system/ramblock.h" 274705a71dSRichard Henderson #include "exec/exec-all.h" 284705a71dSRichard Henderson #include "system/memory.h" 294705a71dSRichard Henderson #include "exec/target_page.h" 304705a71dSRichard Henderson #include "qemu/rcu.h" 314705a71dSRichard Henderson 324705a71dSRichard Henderson #include "exec/hwaddr.h" 334705a71dSRichard Henderson #include "exec/cpu-common.h" 344705a71dSRichard Henderson 354705a71dSRichard Henderson extern uint64_t total_dirty_pages; 364705a71dSRichard Henderson 374705a71dSRichard Henderson /** 384705a71dSRichard Henderson * clear_bmap_size: calculate clear bitmap size 394705a71dSRichard Henderson * 404705a71dSRichard Henderson * @pages: number of guest pages 414705a71dSRichard Henderson * @shift: guest page number shift 424705a71dSRichard Henderson * 434705a71dSRichard Henderson * Returns: number of bits for the clear bitmap 444705a71dSRichard Henderson */ 454705a71dSRichard Henderson static inline long clear_bmap_size(uint64_t pages, uint8_t shift) 464705a71dSRichard Henderson { 474705a71dSRichard Henderson return DIV_ROUND_UP(pages, 1UL << shift); 484705a71dSRichard Henderson } 494705a71dSRichard Henderson 504705a71dSRichard Henderson /** 514705a71dSRichard Henderson * clear_bmap_set: set clear bitmap for the page range. Must be with 524705a71dSRichard Henderson * bitmap_mutex held. 534705a71dSRichard Henderson * 544705a71dSRichard Henderson * @rb: the ramblock to operate on 554705a71dSRichard Henderson * @start: the start page number 564705a71dSRichard Henderson * @size: number of pages to set in the bitmap 574705a71dSRichard Henderson * 584705a71dSRichard Henderson * Returns: None 594705a71dSRichard Henderson */ 604705a71dSRichard Henderson static inline void clear_bmap_set(RAMBlock *rb, uint64_t start, 614705a71dSRichard Henderson uint64_t npages) 624705a71dSRichard Henderson { 634705a71dSRichard Henderson uint8_t shift = rb->clear_bmap_shift; 644705a71dSRichard Henderson 654705a71dSRichard Henderson bitmap_set(rb->clear_bmap, start >> shift, clear_bmap_size(npages, shift)); 664705a71dSRichard Henderson } 674705a71dSRichard Henderson 684705a71dSRichard Henderson /** 694705a71dSRichard Henderson * clear_bmap_test_and_clear: test clear bitmap for the page, clear if set. 704705a71dSRichard Henderson * Must be with bitmap_mutex held. 714705a71dSRichard Henderson * 724705a71dSRichard Henderson * @rb: the ramblock to operate on 734705a71dSRichard Henderson * @page: the page number to check 744705a71dSRichard Henderson * 754705a71dSRichard Henderson * Returns: true if the bit was set, false otherwise 764705a71dSRichard Henderson */ 774705a71dSRichard Henderson static inline bool clear_bmap_test_and_clear(RAMBlock *rb, uint64_t page) 784705a71dSRichard Henderson { 794705a71dSRichard Henderson uint8_t shift = rb->clear_bmap_shift; 804705a71dSRichard Henderson 814705a71dSRichard Henderson return bitmap_test_and_clear(rb->clear_bmap, page >> shift, 1); 824705a71dSRichard Henderson } 834705a71dSRichard Henderson 844705a71dSRichard Henderson static inline bool offset_in_ramblock(RAMBlock *b, ram_addr_t offset) 854705a71dSRichard Henderson { 864705a71dSRichard Henderson return (b && b->host && offset < b->used_length) ? true : false; 874705a71dSRichard Henderson } 884705a71dSRichard Henderson 894705a71dSRichard Henderson static inline void *ramblock_ptr(RAMBlock *block, ram_addr_t offset) 904705a71dSRichard Henderson { 914705a71dSRichard Henderson assert(offset_in_ramblock(block, offset)); 924705a71dSRichard Henderson return (char *)block->host + offset; 934705a71dSRichard Henderson } 944705a71dSRichard Henderson 954705a71dSRichard Henderson static inline unsigned long int ramblock_recv_bitmap_offset(void *host_addr, 964705a71dSRichard Henderson RAMBlock *rb) 974705a71dSRichard Henderson { 984705a71dSRichard Henderson uint64_t host_addr_offset = 994705a71dSRichard Henderson (uint64_t)(uintptr_t)(host_addr - (void *)rb->host); 1004705a71dSRichard Henderson return host_addr_offset >> TARGET_PAGE_BITS; 1014705a71dSRichard Henderson } 1024705a71dSRichard Henderson 1034705a71dSRichard Henderson bool ramblock_is_pmem(RAMBlock *rb); 1044705a71dSRichard Henderson 1054705a71dSRichard Henderson /** 1064705a71dSRichard Henderson * qemu_ram_alloc_from_file, 1074705a71dSRichard Henderson * qemu_ram_alloc_from_fd: Allocate a ram block from the specified backing 1084705a71dSRichard Henderson * file or device 1094705a71dSRichard Henderson * 1104705a71dSRichard Henderson * Parameters: 1114705a71dSRichard Henderson * @size: the size in bytes of the ram block 1124705a71dSRichard Henderson * @max_size: the maximum size of the block after resizing 1134705a71dSRichard Henderson * @mr: the memory region where the ram block is 1144705a71dSRichard Henderson * @resized: callback after calls to qemu_ram_resize 1154705a71dSRichard Henderson * @ram_flags: RamBlock flags. Supported flags: RAM_SHARED, RAM_PMEM, 1164705a71dSRichard Henderson * RAM_NORESERVE, RAM_PROTECTED, RAM_NAMED_FILE, RAM_READONLY, 1174705a71dSRichard Henderson * RAM_READONLY_FD, RAM_GUEST_MEMFD 1184705a71dSRichard Henderson * @mem_path or @fd: specify the backing file or device 1194705a71dSRichard Henderson * @offset: Offset into target file 1204705a71dSRichard Henderson * @grow: extend file if necessary (but an empty file is always extended). 1214705a71dSRichard Henderson * @errp: pointer to Error*, to store an error if it happens 1224705a71dSRichard Henderson * 1234705a71dSRichard Henderson * Return: 1244705a71dSRichard Henderson * On success, return a pointer to the ram block. 1254705a71dSRichard Henderson * On failure, return NULL. 1264705a71dSRichard Henderson */ 1274705a71dSRichard Henderson typedef void (*qemu_ram_resize_cb)(const char *, uint64_t length, void *host); 1284705a71dSRichard Henderson 1294705a71dSRichard Henderson RAMBlock *qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr, 1304705a71dSRichard Henderson uint32_t ram_flags, const char *mem_path, 1314705a71dSRichard Henderson off_t offset, Error **errp); 1324705a71dSRichard Henderson RAMBlock *qemu_ram_alloc_from_fd(ram_addr_t size, ram_addr_t max_size, 1334705a71dSRichard Henderson qemu_ram_resize_cb resized, MemoryRegion *mr, 1344705a71dSRichard Henderson uint32_t ram_flags, int fd, off_t offset, 1354705a71dSRichard Henderson bool grow, 1364705a71dSRichard Henderson Error **errp); 1374705a71dSRichard Henderson 1384705a71dSRichard Henderson RAMBlock *qemu_ram_alloc_from_ptr(ram_addr_t size, void *host, 1394705a71dSRichard Henderson MemoryRegion *mr, Error **errp); 1404705a71dSRichard Henderson RAMBlock *qemu_ram_alloc(ram_addr_t size, uint32_t ram_flags, MemoryRegion *mr, 1414705a71dSRichard Henderson Error **errp); 1424705a71dSRichard Henderson RAMBlock *qemu_ram_alloc_resizeable(ram_addr_t size, ram_addr_t max_size, 1434705a71dSRichard Henderson qemu_ram_resize_cb resized, 1444705a71dSRichard Henderson MemoryRegion *mr, Error **errp); 1454705a71dSRichard Henderson void qemu_ram_free(RAMBlock *block); 1464705a71dSRichard Henderson 1474705a71dSRichard Henderson int qemu_ram_resize(RAMBlock *block, ram_addr_t newsize, Error **errp); 1484705a71dSRichard Henderson 1494705a71dSRichard Henderson void qemu_ram_msync(RAMBlock *block, ram_addr_t start, ram_addr_t length); 1504705a71dSRichard Henderson 1514705a71dSRichard Henderson /* Clear whole block of mem */ 1524705a71dSRichard Henderson static inline void qemu_ram_block_writeback(RAMBlock *block) 1534705a71dSRichard Henderson { 1544705a71dSRichard Henderson qemu_ram_msync(block, 0, block->used_length); 1554705a71dSRichard Henderson } 1564705a71dSRichard Henderson 1574705a71dSRichard Henderson #define DIRTY_CLIENTS_ALL ((1 << DIRTY_MEMORY_NUM) - 1) 1584705a71dSRichard Henderson #define DIRTY_CLIENTS_NOCODE (DIRTY_CLIENTS_ALL & ~(1 << DIRTY_MEMORY_CODE)) 1594705a71dSRichard Henderson 1604705a71dSRichard Henderson static inline bool cpu_physical_memory_get_dirty(ram_addr_t start, 1614705a71dSRichard Henderson ram_addr_t length, 1624705a71dSRichard Henderson unsigned client) 1634705a71dSRichard Henderson { 1644705a71dSRichard Henderson DirtyMemoryBlocks *blocks; 1654705a71dSRichard Henderson unsigned long end, page; 1664705a71dSRichard Henderson unsigned long idx, offset, base; 1674705a71dSRichard Henderson bool dirty = false; 1684705a71dSRichard Henderson 1694705a71dSRichard Henderson assert(client < DIRTY_MEMORY_NUM); 1704705a71dSRichard Henderson 1714705a71dSRichard Henderson end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS; 1724705a71dSRichard Henderson page = start >> TARGET_PAGE_BITS; 1734705a71dSRichard Henderson 1744705a71dSRichard Henderson WITH_RCU_READ_LOCK_GUARD() { 1754705a71dSRichard Henderson blocks = qatomic_rcu_read(&ram_list.dirty_memory[client]); 1764705a71dSRichard Henderson 1774705a71dSRichard Henderson idx = page / DIRTY_MEMORY_BLOCK_SIZE; 1784705a71dSRichard Henderson offset = page % DIRTY_MEMORY_BLOCK_SIZE; 1794705a71dSRichard Henderson base = page - offset; 1804705a71dSRichard Henderson while (page < end) { 1814705a71dSRichard Henderson unsigned long next = MIN(end, base + DIRTY_MEMORY_BLOCK_SIZE); 1824705a71dSRichard Henderson unsigned long num = next - base; 1834705a71dSRichard Henderson unsigned long found = find_next_bit(blocks->blocks[idx], 1844705a71dSRichard Henderson num, offset); 1854705a71dSRichard Henderson if (found < num) { 1864705a71dSRichard Henderson dirty = true; 1874705a71dSRichard Henderson break; 1884705a71dSRichard Henderson } 1894705a71dSRichard Henderson 1904705a71dSRichard Henderson page = next; 1914705a71dSRichard Henderson idx++; 1924705a71dSRichard Henderson offset = 0; 1934705a71dSRichard Henderson base += DIRTY_MEMORY_BLOCK_SIZE; 1944705a71dSRichard Henderson } 1954705a71dSRichard Henderson } 1964705a71dSRichard Henderson 1974705a71dSRichard Henderson return dirty; 1984705a71dSRichard Henderson } 1994705a71dSRichard Henderson 2004705a71dSRichard Henderson static inline bool cpu_physical_memory_all_dirty(ram_addr_t start, 2014705a71dSRichard Henderson ram_addr_t length, 2024705a71dSRichard Henderson unsigned client) 2034705a71dSRichard Henderson { 2044705a71dSRichard Henderson DirtyMemoryBlocks *blocks; 2054705a71dSRichard Henderson unsigned long end, page; 2064705a71dSRichard Henderson unsigned long idx, offset, base; 2074705a71dSRichard Henderson bool dirty = true; 2084705a71dSRichard Henderson 2094705a71dSRichard Henderson assert(client < DIRTY_MEMORY_NUM); 2104705a71dSRichard Henderson 2114705a71dSRichard Henderson end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS; 2124705a71dSRichard Henderson page = start >> TARGET_PAGE_BITS; 2134705a71dSRichard Henderson 2144705a71dSRichard Henderson RCU_READ_LOCK_GUARD(); 2154705a71dSRichard Henderson 2164705a71dSRichard Henderson blocks = qatomic_rcu_read(&ram_list.dirty_memory[client]); 2174705a71dSRichard Henderson 2184705a71dSRichard Henderson idx = page / DIRTY_MEMORY_BLOCK_SIZE; 2194705a71dSRichard Henderson offset = page % DIRTY_MEMORY_BLOCK_SIZE; 2204705a71dSRichard Henderson base = page - offset; 2214705a71dSRichard Henderson while (page < end) { 2224705a71dSRichard Henderson unsigned long next = MIN(end, base + DIRTY_MEMORY_BLOCK_SIZE); 2234705a71dSRichard Henderson unsigned long num = next - base; 2244705a71dSRichard Henderson unsigned long found = find_next_zero_bit(blocks->blocks[idx], num, offset); 2254705a71dSRichard Henderson if (found < num) { 2264705a71dSRichard Henderson dirty = false; 2274705a71dSRichard Henderson break; 2284705a71dSRichard Henderson } 2294705a71dSRichard Henderson 2304705a71dSRichard Henderson page = next; 2314705a71dSRichard Henderson idx++; 2324705a71dSRichard Henderson offset = 0; 2334705a71dSRichard Henderson base += DIRTY_MEMORY_BLOCK_SIZE; 2344705a71dSRichard Henderson } 2354705a71dSRichard Henderson 2364705a71dSRichard Henderson return dirty; 2374705a71dSRichard Henderson } 2384705a71dSRichard Henderson 2394705a71dSRichard Henderson static inline bool cpu_physical_memory_get_dirty_flag(ram_addr_t addr, 2404705a71dSRichard Henderson unsigned client) 2414705a71dSRichard Henderson { 2424705a71dSRichard Henderson return cpu_physical_memory_get_dirty(addr, 1, client); 2434705a71dSRichard Henderson } 2444705a71dSRichard Henderson 2454705a71dSRichard Henderson static inline bool cpu_physical_memory_is_clean(ram_addr_t addr) 2464705a71dSRichard Henderson { 2474705a71dSRichard Henderson bool vga = cpu_physical_memory_get_dirty_flag(addr, DIRTY_MEMORY_VGA); 2484705a71dSRichard Henderson bool code = cpu_physical_memory_get_dirty_flag(addr, DIRTY_MEMORY_CODE); 2494705a71dSRichard Henderson bool migration = 2504705a71dSRichard Henderson cpu_physical_memory_get_dirty_flag(addr, DIRTY_MEMORY_MIGRATION); 2514705a71dSRichard Henderson return !(vga && code && migration); 2524705a71dSRichard Henderson } 2534705a71dSRichard Henderson 2544705a71dSRichard Henderson static inline uint8_t cpu_physical_memory_range_includes_clean(ram_addr_t start, 2554705a71dSRichard Henderson ram_addr_t length, 2564705a71dSRichard Henderson uint8_t mask) 2574705a71dSRichard Henderson { 2584705a71dSRichard Henderson uint8_t ret = 0; 2594705a71dSRichard Henderson 2604705a71dSRichard Henderson if (mask & (1 << DIRTY_MEMORY_VGA) && 2614705a71dSRichard Henderson !cpu_physical_memory_all_dirty(start, length, DIRTY_MEMORY_VGA)) { 2624705a71dSRichard Henderson ret |= (1 << DIRTY_MEMORY_VGA); 2634705a71dSRichard Henderson } 2644705a71dSRichard Henderson if (mask & (1 << DIRTY_MEMORY_CODE) && 2654705a71dSRichard Henderson !cpu_physical_memory_all_dirty(start, length, DIRTY_MEMORY_CODE)) { 2664705a71dSRichard Henderson ret |= (1 << DIRTY_MEMORY_CODE); 2674705a71dSRichard Henderson } 2684705a71dSRichard Henderson if (mask & (1 << DIRTY_MEMORY_MIGRATION) && 2694705a71dSRichard Henderson !cpu_physical_memory_all_dirty(start, length, DIRTY_MEMORY_MIGRATION)) { 2704705a71dSRichard Henderson ret |= (1 << DIRTY_MEMORY_MIGRATION); 2714705a71dSRichard Henderson } 2724705a71dSRichard Henderson return ret; 2734705a71dSRichard Henderson } 2744705a71dSRichard Henderson 2754705a71dSRichard Henderson static inline void cpu_physical_memory_set_dirty_flag(ram_addr_t addr, 2764705a71dSRichard Henderson unsigned client) 2774705a71dSRichard Henderson { 2784705a71dSRichard Henderson unsigned long page, idx, offset; 2794705a71dSRichard Henderson DirtyMemoryBlocks *blocks; 2804705a71dSRichard Henderson 2814705a71dSRichard Henderson assert(client < DIRTY_MEMORY_NUM); 2824705a71dSRichard Henderson 2834705a71dSRichard Henderson page = addr >> TARGET_PAGE_BITS; 2844705a71dSRichard Henderson idx = page / DIRTY_MEMORY_BLOCK_SIZE; 2854705a71dSRichard Henderson offset = page % DIRTY_MEMORY_BLOCK_SIZE; 2864705a71dSRichard Henderson 2874705a71dSRichard Henderson RCU_READ_LOCK_GUARD(); 2884705a71dSRichard Henderson 2894705a71dSRichard Henderson blocks = qatomic_rcu_read(&ram_list.dirty_memory[client]); 2904705a71dSRichard Henderson 2914705a71dSRichard Henderson set_bit_atomic(offset, blocks->blocks[idx]); 2924705a71dSRichard Henderson } 2934705a71dSRichard Henderson 2944705a71dSRichard Henderson static inline void cpu_physical_memory_set_dirty_range(ram_addr_t start, 2954705a71dSRichard Henderson ram_addr_t length, 2964705a71dSRichard Henderson uint8_t mask) 2974705a71dSRichard Henderson { 2984705a71dSRichard Henderson DirtyMemoryBlocks *blocks[DIRTY_MEMORY_NUM]; 2994705a71dSRichard Henderson unsigned long end, page; 3004705a71dSRichard Henderson unsigned long idx, offset, base; 3014705a71dSRichard Henderson int i; 3024705a71dSRichard Henderson 3034705a71dSRichard Henderson if (!mask && !xen_enabled()) { 3044705a71dSRichard Henderson return; 3054705a71dSRichard Henderson } 3064705a71dSRichard Henderson 3074705a71dSRichard Henderson end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS; 3084705a71dSRichard Henderson page = start >> TARGET_PAGE_BITS; 3094705a71dSRichard Henderson 3104705a71dSRichard Henderson WITH_RCU_READ_LOCK_GUARD() { 3114705a71dSRichard Henderson for (i = 0; i < DIRTY_MEMORY_NUM; i++) { 3124705a71dSRichard Henderson blocks[i] = qatomic_rcu_read(&ram_list.dirty_memory[i]); 3134705a71dSRichard Henderson } 3144705a71dSRichard Henderson 3154705a71dSRichard Henderson idx = page / DIRTY_MEMORY_BLOCK_SIZE; 3164705a71dSRichard Henderson offset = page % DIRTY_MEMORY_BLOCK_SIZE; 3174705a71dSRichard Henderson base = page - offset; 3184705a71dSRichard Henderson while (page < end) { 3194705a71dSRichard Henderson unsigned long next = MIN(end, base + DIRTY_MEMORY_BLOCK_SIZE); 3204705a71dSRichard Henderson 3214705a71dSRichard Henderson if (likely(mask & (1 << DIRTY_MEMORY_MIGRATION))) { 3224705a71dSRichard Henderson bitmap_set_atomic(blocks[DIRTY_MEMORY_MIGRATION]->blocks[idx], 3234705a71dSRichard Henderson offset, next - page); 3244705a71dSRichard Henderson } 3254705a71dSRichard Henderson if (unlikely(mask & (1 << DIRTY_MEMORY_VGA))) { 3264705a71dSRichard Henderson bitmap_set_atomic(blocks[DIRTY_MEMORY_VGA]->blocks[idx], 3274705a71dSRichard Henderson offset, next - page); 3284705a71dSRichard Henderson } 3294705a71dSRichard Henderson if (unlikely(mask & (1 << DIRTY_MEMORY_CODE))) { 3304705a71dSRichard Henderson bitmap_set_atomic(blocks[DIRTY_MEMORY_CODE]->blocks[idx], 3314705a71dSRichard Henderson offset, next - page); 3324705a71dSRichard Henderson } 3334705a71dSRichard Henderson 3344705a71dSRichard Henderson page = next; 3354705a71dSRichard Henderson idx++; 3364705a71dSRichard Henderson offset = 0; 3374705a71dSRichard Henderson base += DIRTY_MEMORY_BLOCK_SIZE; 3384705a71dSRichard Henderson } 3394705a71dSRichard Henderson } 3404705a71dSRichard Henderson 3414705a71dSRichard Henderson if (xen_enabled()) { 3424705a71dSRichard Henderson xen_hvm_modified_memory(start, length); 3434705a71dSRichard Henderson } 3444705a71dSRichard Henderson } 3454705a71dSRichard Henderson 3464705a71dSRichard Henderson #if !defined(_WIN32) 3474705a71dSRichard Henderson 3484705a71dSRichard Henderson /* 3494705a71dSRichard Henderson * Contrary to cpu_physical_memory_sync_dirty_bitmap() this function returns 3504705a71dSRichard Henderson * the number of dirty pages in @bitmap passed as argument. On the other hand, 3514705a71dSRichard Henderson * cpu_physical_memory_sync_dirty_bitmap() returns newly dirtied pages that 3524705a71dSRichard Henderson * weren't set in the global migration bitmap. 3534705a71dSRichard Henderson */ 3544705a71dSRichard Henderson static inline 3554705a71dSRichard Henderson uint64_t cpu_physical_memory_set_dirty_lebitmap(unsigned long *bitmap, 3564705a71dSRichard Henderson ram_addr_t start, 3574705a71dSRichard Henderson ram_addr_t pages) 3584705a71dSRichard Henderson { 3594705a71dSRichard Henderson unsigned long i, j; 3604705a71dSRichard Henderson unsigned long page_number, c, nbits; 3614705a71dSRichard Henderson hwaddr addr; 3624705a71dSRichard Henderson ram_addr_t ram_addr; 3634705a71dSRichard Henderson uint64_t num_dirty = 0; 3644705a71dSRichard Henderson unsigned long len = (pages + HOST_LONG_BITS - 1) / HOST_LONG_BITS; 3654705a71dSRichard Henderson unsigned long hpratio = qemu_real_host_page_size() / TARGET_PAGE_SIZE; 3664705a71dSRichard Henderson unsigned long page = BIT_WORD(start >> TARGET_PAGE_BITS); 3674705a71dSRichard Henderson 3684705a71dSRichard Henderson /* start address is aligned at the start of a word? */ 3694705a71dSRichard Henderson if ((((page * BITS_PER_LONG) << TARGET_PAGE_BITS) == start) && 3704705a71dSRichard Henderson (hpratio == 1)) { 3714705a71dSRichard Henderson unsigned long **blocks[DIRTY_MEMORY_NUM]; 3724705a71dSRichard Henderson unsigned long idx; 3734705a71dSRichard Henderson unsigned long offset; 3744705a71dSRichard Henderson long k; 3754705a71dSRichard Henderson long nr = BITS_TO_LONGS(pages); 3764705a71dSRichard Henderson 3774705a71dSRichard Henderson idx = (start >> TARGET_PAGE_BITS) / DIRTY_MEMORY_BLOCK_SIZE; 3784705a71dSRichard Henderson offset = BIT_WORD((start >> TARGET_PAGE_BITS) % 3794705a71dSRichard Henderson DIRTY_MEMORY_BLOCK_SIZE); 3804705a71dSRichard Henderson 3814705a71dSRichard Henderson WITH_RCU_READ_LOCK_GUARD() { 3824705a71dSRichard Henderson for (i = 0; i < DIRTY_MEMORY_NUM; i++) { 3834705a71dSRichard Henderson blocks[i] = 3844705a71dSRichard Henderson qatomic_rcu_read(&ram_list.dirty_memory[i])->blocks; 3854705a71dSRichard Henderson } 3864705a71dSRichard Henderson 3874705a71dSRichard Henderson for (k = 0; k < nr; k++) { 3884705a71dSRichard Henderson if (bitmap[k]) { 3894705a71dSRichard Henderson unsigned long temp = leul_to_cpu(bitmap[k]); 3904705a71dSRichard Henderson 3914705a71dSRichard Henderson nbits = ctpopl(temp); 3924705a71dSRichard Henderson qatomic_or(&blocks[DIRTY_MEMORY_VGA][idx][offset], temp); 3934705a71dSRichard Henderson 3944705a71dSRichard Henderson if (global_dirty_tracking) { 3954705a71dSRichard Henderson qatomic_or( 3964705a71dSRichard Henderson &blocks[DIRTY_MEMORY_MIGRATION][idx][offset], 3974705a71dSRichard Henderson temp); 3984705a71dSRichard Henderson if (unlikely( 3994705a71dSRichard Henderson global_dirty_tracking & GLOBAL_DIRTY_DIRTY_RATE)) { 4004705a71dSRichard Henderson total_dirty_pages += nbits; 4014705a71dSRichard Henderson } 4024705a71dSRichard Henderson } 4034705a71dSRichard Henderson 4044705a71dSRichard Henderson num_dirty += nbits; 4054705a71dSRichard Henderson 4064705a71dSRichard Henderson if (tcg_enabled()) { 4074705a71dSRichard Henderson qatomic_or(&blocks[DIRTY_MEMORY_CODE][idx][offset], 4084705a71dSRichard Henderson temp); 4094705a71dSRichard Henderson } 4104705a71dSRichard Henderson } 4114705a71dSRichard Henderson 4124705a71dSRichard Henderson if (++offset >= BITS_TO_LONGS(DIRTY_MEMORY_BLOCK_SIZE)) { 4134705a71dSRichard Henderson offset = 0; 4144705a71dSRichard Henderson idx++; 4154705a71dSRichard Henderson } 4164705a71dSRichard Henderson } 4174705a71dSRichard Henderson } 4184705a71dSRichard Henderson 4194705a71dSRichard Henderson if (xen_enabled()) { 4204705a71dSRichard Henderson xen_hvm_modified_memory(start, pages << TARGET_PAGE_BITS); 4214705a71dSRichard Henderson } 4224705a71dSRichard Henderson } else { 4234705a71dSRichard Henderson uint8_t clients = tcg_enabled() ? DIRTY_CLIENTS_ALL : DIRTY_CLIENTS_NOCODE; 4244705a71dSRichard Henderson 4254705a71dSRichard Henderson if (!global_dirty_tracking) { 4264705a71dSRichard Henderson clients &= ~(1 << DIRTY_MEMORY_MIGRATION); 4274705a71dSRichard Henderson } 4284705a71dSRichard Henderson 4294705a71dSRichard Henderson /* 4304705a71dSRichard Henderson * bitmap-traveling is faster than memory-traveling (for addr...) 4314705a71dSRichard Henderson * especially when most of the memory is not dirty. 4324705a71dSRichard Henderson */ 4334705a71dSRichard Henderson for (i = 0; i < len; i++) { 4344705a71dSRichard Henderson if (bitmap[i] != 0) { 4354705a71dSRichard Henderson c = leul_to_cpu(bitmap[i]); 4364705a71dSRichard Henderson nbits = ctpopl(c); 4374705a71dSRichard Henderson if (unlikely(global_dirty_tracking & GLOBAL_DIRTY_DIRTY_RATE)) { 4384705a71dSRichard Henderson total_dirty_pages += nbits; 4394705a71dSRichard Henderson } 4404705a71dSRichard Henderson num_dirty += nbits; 4414705a71dSRichard Henderson do { 4424705a71dSRichard Henderson j = ctzl(c); 4434705a71dSRichard Henderson c &= ~(1ul << j); 4444705a71dSRichard Henderson page_number = (i * HOST_LONG_BITS + j) * hpratio; 4454705a71dSRichard Henderson addr = page_number * TARGET_PAGE_SIZE; 4464705a71dSRichard Henderson ram_addr = start + addr; 4474705a71dSRichard Henderson cpu_physical_memory_set_dirty_range(ram_addr, 4484705a71dSRichard Henderson TARGET_PAGE_SIZE * hpratio, clients); 4494705a71dSRichard Henderson } while (c != 0); 4504705a71dSRichard Henderson } 4514705a71dSRichard Henderson } 4524705a71dSRichard Henderson } 4534705a71dSRichard Henderson 4544705a71dSRichard Henderson return num_dirty; 4554705a71dSRichard Henderson } 4564705a71dSRichard Henderson #endif /* not _WIN32 */ 4574705a71dSRichard Henderson 4584705a71dSRichard Henderson static inline void cpu_physical_memory_dirty_bits_cleared(ram_addr_t start, 4594705a71dSRichard Henderson ram_addr_t length) 4604705a71dSRichard Henderson { 4614705a71dSRichard Henderson if (tcg_enabled()) { 4624705a71dSRichard Henderson tlb_reset_dirty_range_all(start, length); 4634705a71dSRichard Henderson } 4644705a71dSRichard Henderson 4654705a71dSRichard Henderson } 4664705a71dSRichard Henderson bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start, 4674705a71dSRichard Henderson ram_addr_t length, 4684705a71dSRichard Henderson unsigned client); 4694705a71dSRichard Henderson 4704705a71dSRichard Henderson DirtyBitmapSnapshot *cpu_physical_memory_snapshot_and_clear_dirty 4714705a71dSRichard Henderson (MemoryRegion *mr, hwaddr offset, hwaddr length, unsigned client); 4724705a71dSRichard Henderson 4734705a71dSRichard Henderson bool cpu_physical_memory_snapshot_get_dirty(DirtyBitmapSnapshot *snap, 4744705a71dSRichard Henderson ram_addr_t start, 4754705a71dSRichard Henderson ram_addr_t length); 4764705a71dSRichard Henderson 4774705a71dSRichard Henderson static inline void cpu_physical_memory_clear_dirty_range(ram_addr_t start, 4784705a71dSRichard Henderson ram_addr_t length) 4794705a71dSRichard Henderson { 4804705a71dSRichard Henderson cpu_physical_memory_test_and_clear_dirty(start, length, DIRTY_MEMORY_MIGRATION); 4814705a71dSRichard Henderson cpu_physical_memory_test_and_clear_dirty(start, length, DIRTY_MEMORY_VGA); 4824705a71dSRichard Henderson cpu_physical_memory_test_and_clear_dirty(start, length, DIRTY_MEMORY_CODE); 4834705a71dSRichard Henderson } 4844705a71dSRichard Henderson 4854705a71dSRichard Henderson 4864705a71dSRichard Henderson /* Called with RCU critical section */ 4874705a71dSRichard Henderson static inline 4884705a71dSRichard Henderson uint64_t cpu_physical_memory_sync_dirty_bitmap(RAMBlock *rb, 4894705a71dSRichard Henderson ram_addr_t start, 4904705a71dSRichard Henderson ram_addr_t length) 4914705a71dSRichard Henderson { 4924705a71dSRichard Henderson ram_addr_t addr; 4934705a71dSRichard Henderson unsigned long word = BIT_WORD((start + rb->offset) >> TARGET_PAGE_BITS); 4944705a71dSRichard Henderson uint64_t num_dirty = 0; 4954705a71dSRichard Henderson unsigned long *dest = rb->bmap; 4964705a71dSRichard Henderson 4974705a71dSRichard Henderson /* start address and length is aligned at the start of a word? */ 4984705a71dSRichard Henderson if (((word * BITS_PER_LONG) << TARGET_PAGE_BITS) == 4994705a71dSRichard Henderson (start + rb->offset) && 5004705a71dSRichard Henderson !(length & ((BITS_PER_LONG << TARGET_PAGE_BITS) - 1))) { 5014705a71dSRichard Henderson int k; 5024705a71dSRichard Henderson int nr = BITS_TO_LONGS(length >> TARGET_PAGE_BITS); 5034705a71dSRichard Henderson unsigned long * const *src; 5044705a71dSRichard Henderson unsigned long idx = (word * BITS_PER_LONG) / DIRTY_MEMORY_BLOCK_SIZE; 5054705a71dSRichard Henderson unsigned long offset = BIT_WORD((word * BITS_PER_LONG) % 5064705a71dSRichard Henderson DIRTY_MEMORY_BLOCK_SIZE); 5074705a71dSRichard Henderson unsigned long page = BIT_WORD(start >> TARGET_PAGE_BITS); 5084705a71dSRichard Henderson 5094705a71dSRichard Henderson src = qatomic_rcu_read( 5104705a71dSRichard Henderson &ram_list.dirty_memory[DIRTY_MEMORY_MIGRATION])->blocks; 5114705a71dSRichard Henderson 5124705a71dSRichard Henderson for (k = page; k < page + nr; k++) { 5134705a71dSRichard Henderson if (src[idx][offset]) { 5144705a71dSRichard Henderson unsigned long bits = qatomic_xchg(&src[idx][offset], 0); 5154705a71dSRichard Henderson unsigned long new_dirty; 5164705a71dSRichard Henderson new_dirty = ~dest[k]; 5174705a71dSRichard Henderson dest[k] |= bits; 5184705a71dSRichard Henderson new_dirty &= bits; 5194705a71dSRichard Henderson num_dirty += ctpopl(new_dirty); 5204705a71dSRichard Henderson } 5214705a71dSRichard Henderson 5224705a71dSRichard Henderson if (++offset >= BITS_TO_LONGS(DIRTY_MEMORY_BLOCK_SIZE)) { 5234705a71dSRichard Henderson offset = 0; 5244705a71dSRichard Henderson idx++; 5254705a71dSRichard Henderson } 5264705a71dSRichard Henderson } 5274705a71dSRichard Henderson if (num_dirty) { 5284705a71dSRichard Henderson cpu_physical_memory_dirty_bits_cleared(start, length); 5294705a71dSRichard Henderson } 5304705a71dSRichard Henderson 5314705a71dSRichard Henderson if (rb->clear_bmap) { 5324705a71dSRichard Henderson /* 5334705a71dSRichard Henderson * Postpone the dirty bitmap clear to the point before we 5344705a71dSRichard Henderson * really send the pages, also we will split the clear 5354705a71dSRichard Henderson * dirty procedure into smaller chunks. 5364705a71dSRichard Henderson */ 5374705a71dSRichard Henderson clear_bmap_set(rb, start >> TARGET_PAGE_BITS, 5384705a71dSRichard Henderson length >> TARGET_PAGE_BITS); 5394705a71dSRichard Henderson } else { 5404705a71dSRichard Henderson /* Slow path - still do that in a huge chunk */ 5414705a71dSRichard Henderson memory_region_clear_dirty_bitmap(rb->mr, start, length); 5424705a71dSRichard Henderson } 5434705a71dSRichard Henderson } else { 5444705a71dSRichard Henderson ram_addr_t offset = rb->offset; 5454705a71dSRichard Henderson 5464705a71dSRichard Henderson for (addr = 0; addr < length; addr += TARGET_PAGE_SIZE) { 5474705a71dSRichard Henderson if (cpu_physical_memory_test_and_clear_dirty( 5484705a71dSRichard Henderson start + addr + offset, 5494705a71dSRichard Henderson TARGET_PAGE_SIZE, 5504705a71dSRichard Henderson DIRTY_MEMORY_MIGRATION)) { 5514705a71dSRichard Henderson long k = (start + addr) >> TARGET_PAGE_BITS; 5524705a71dSRichard Henderson if (!test_and_set_bit(k, dest)) { 5534705a71dSRichard Henderson num_dirty++; 5544705a71dSRichard Henderson } 5554705a71dSRichard Henderson } 5564705a71dSRichard Henderson } 5574705a71dSRichard Henderson } 5584705a71dSRichard Henderson 5594705a71dSRichard Henderson return num_dirty; 5604705a71dSRichard Henderson } 5614705a71dSRichard Henderson 5624705a71dSRichard Henderson #endif 563