1 /* 2 * Declarations for cpu physical memory functions 3 * 4 * Copyright 2011 Red Hat, Inc. and/or its affiliates 5 * 6 * Authors: 7 * Avi Kivity <avi@redhat.com> 8 * 9 * This work is licensed under the terms of the GNU GPL, version 2 or 10 * later. See the COPYING file in the top-level directory. 11 * 12 */ 13 14 /* 15 * This header is for use by exec.c and memory.c ONLY. Do not include it. 16 * The functions declared here will be removed soon. 17 */ 18 19 #ifndef RAM_ADDR_H 20 #define RAM_ADDR_H 21 22 #ifndef CONFIG_USER_ONLY 23 #include "hw/xen/xen.h" 24 25 ram_addr_t qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr, 26 bool share, const char *mem_path, 27 Error **errp); 28 ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host, 29 MemoryRegion *mr); 30 ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr); 31 int qemu_get_ram_fd(ram_addr_t addr); 32 void *qemu_get_ram_block_host_ptr(ram_addr_t addr); 33 void *qemu_get_ram_ptr(ram_addr_t addr); 34 void qemu_ram_free(ram_addr_t addr); 35 void qemu_ram_free_from_ptr(ram_addr_t addr); 36 37 static inline bool cpu_physical_memory_get_dirty(ram_addr_t start, 38 ram_addr_t length, 39 unsigned client) 40 { 41 unsigned long end, page, next; 42 43 assert(client < DIRTY_MEMORY_NUM); 44 45 end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS; 46 page = start >> TARGET_PAGE_BITS; 47 next = find_next_bit(ram_list.dirty_memory[client], end, page); 48 49 return next < end; 50 } 51 52 static inline bool cpu_physical_memory_get_dirty_flag(ram_addr_t addr, 53 unsigned client) 54 { 55 return cpu_physical_memory_get_dirty(addr, 1, client); 56 } 57 58 static inline bool cpu_physical_memory_is_clean(ram_addr_t addr) 59 { 60 bool vga = cpu_physical_memory_get_dirty_flag(addr, DIRTY_MEMORY_VGA); 61 bool code = cpu_physical_memory_get_dirty_flag(addr, DIRTY_MEMORY_CODE); 62 bool migration = 63 cpu_physical_memory_get_dirty_flag(addr, DIRTY_MEMORY_MIGRATION); 64 return !(vga && code && migration); 65 } 66 67 static inline void cpu_physical_memory_set_dirty_flag(ram_addr_t addr, 68 unsigned client) 69 { 70 assert(client < DIRTY_MEMORY_NUM); 71 set_bit(addr >> TARGET_PAGE_BITS, ram_list.dirty_memory[client]); 72 } 73 74 static inline void cpu_physical_memory_set_dirty_range_nocode(ram_addr_t start, 75 ram_addr_t length) 76 { 77 unsigned long end, page; 78 79 end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS; 80 page = start >> TARGET_PAGE_BITS; 81 bitmap_set(ram_list.dirty_memory[DIRTY_MEMORY_MIGRATION], page, end - page); 82 bitmap_set(ram_list.dirty_memory[DIRTY_MEMORY_VGA], page, end - page); 83 } 84 85 static inline void cpu_physical_memory_set_dirty_range(ram_addr_t start, 86 ram_addr_t length) 87 { 88 unsigned long end, page; 89 90 end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS; 91 page = start >> TARGET_PAGE_BITS; 92 bitmap_set(ram_list.dirty_memory[DIRTY_MEMORY_MIGRATION], page, end - page); 93 bitmap_set(ram_list.dirty_memory[DIRTY_MEMORY_VGA], page, end - page); 94 bitmap_set(ram_list.dirty_memory[DIRTY_MEMORY_CODE], page, end - page); 95 xen_modified_memory(start, length); 96 } 97 98 #if !defined(_WIN32) 99 static inline void cpu_physical_memory_set_dirty_lebitmap(unsigned long *bitmap, 100 ram_addr_t start, 101 ram_addr_t pages) 102 { 103 unsigned long i, j; 104 unsigned long page_number, c; 105 hwaddr addr; 106 ram_addr_t ram_addr; 107 unsigned long len = (pages + HOST_LONG_BITS - 1) / HOST_LONG_BITS; 108 unsigned long hpratio = getpagesize() / TARGET_PAGE_SIZE; 109 unsigned long page = BIT_WORD(start >> TARGET_PAGE_BITS); 110 111 /* start address is aligned at the start of a word? */ 112 if ((((page * BITS_PER_LONG) << TARGET_PAGE_BITS) == start) && 113 (hpratio == 1)) { 114 long k; 115 long nr = BITS_TO_LONGS(pages); 116 117 for (k = 0; k < nr; k++) { 118 if (bitmap[k]) { 119 unsigned long temp = leul_to_cpu(bitmap[k]); 120 121 ram_list.dirty_memory[DIRTY_MEMORY_MIGRATION][page + k] |= temp; 122 ram_list.dirty_memory[DIRTY_MEMORY_VGA][page + k] |= temp; 123 ram_list.dirty_memory[DIRTY_MEMORY_CODE][page + k] |= temp; 124 } 125 } 126 xen_modified_memory(start, pages); 127 } else { 128 /* 129 * bitmap-traveling is faster than memory-traveling (for addr...) 130 * especially when most of the memory is not dirty. 131 */ 132 for (i = 0; i < len; i++) { 133 if (bitmap[i] != 0) { 134 c = leul_to_cpu(bitmap[i]); 135 do { 136 j = ctzl(c); 137 c &= ~(1ul << j); 138 page_number = (i * HOST_LONG_BITS + j) * hpratio; 139 addr = page_number * TARGET_PAGE_SIZE; 140 ram_addr = start + addr; 141 cpu_physical_memory_set_dirty_range(ram_addr, 142 TARGET_PAGE_SIZE * hpratio); 143 } while (c != 0); 144 } 145 } 146 } 147 } 148 #endif /* not _WIN32 */ 149 150 static inline void cpu_physical_memory_clear_dirty_range(ram_addr_t start, 151 ram_addr_t length, 152 unsigned client) 153 { 154 unsigned long end, page; 155 156 assert(client < DIRTY_MEMORY_NUM); 157 end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS; 158 page = start >> TARGET_PAGE_BITS; 159 bitmap_clear(ram_list.dirty_memory[client], page, end - page); 160 } 161 162 void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t length, 163 unsigned client); 164 165 #endif 166 #endif 167