1 /* 2 * Declarations for cpu physical memory functions 3 * 4 * Copyright 2011 Red Hat, Inc. and/or its affiliates 5 * 6 * Authors: 7 * Avi Kivity <avi@redhat.com> 8 * 9 * This work is licensed under the terms of the GNU GPL, version 2 or 10 * later. See the COPYING file in the top-level directory. 11 * 12 */ 13 14 /* 15 * This header is for use by exec.c and memory.c ONLY. Do not include it. 16 * The functions declared here will be removed soon. 17 */ 18 19 #ifndef RAM_ADDR_H 20 #define RAM_ADDR_H 21 22 #ifndef CONFIG_USER_ONLY 23 #include "hw/xen/xen.h" 24 #include "exec/ramlist.h" 25 26 struct RAMBlock { 27 struct rcu_head rcu; 28 struct MemoryRegion *mr; 29 uint8_t *host; 30 ram_addr_t offset; 31 ram_addr_t used_length; 32 ram_addr_t max_length; 33 void (*resized)(const char*, uint64_t length, void *host); 34 uint32_t flags; 35 /* Protected by iothread lock. */ 36 char idstr[256]; 37 /* RCU-enabled, writes protected by the ramlist lock */ 38 QLIST_ENTRY(RAMBlock) next; 39 QLIST_HEAD(, RAMBlockNotifier) ramblock_notifiers; 40 int fd; 41 size_t page_size; 42 }; 43 44 static inline bool offset_in_ramblock(RAMBlock *b, ram_addr_t offset) 45 { 46 return (b && b->host && offset < b->used_length) ? true : false; 47 } 48 49 static inline void *ramblock_ptr(RAMBlock *block, ram_addr_t offset) 50 { 51 assert(offset_in_ramblock(block, offset)); 52 return (char *)block->host + offset; 53 } 54 55 long qemu_getrampagesize(void); 56 ram_addr_t last_ram_offset(void); 57 RAMBlock *qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr, 58 bool share, const char *mem_path, 59 Error **errp); 60 RAMBlock *qemu_ram_alloc_from_ptr(ram_addr_t size, void *host, 61 MemoryRegion *mr, Error **errp); 62 RAMBlock *qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr, Error **errp); 63 RAMBlock *qemu_ram_alloc_resizeable(ram_addr_t size, ram_addr_t max_size, 64 void (*resized)(const char*, 65 uint64_t length, 66 void *host), 67 MemoryRegion *mr, Error **errp); 68 void qemu_ram_free(RAMBlock *block); 69 70 int qemu_ram_resize(RAMBlock *block, ram_addr_t newsize, Error **errp); 71 72 #define DIRTY_CLIENTS_ALL ((1 << DIRTY_MEMORY_NUM) - 1) 73 #define DIRTY_CLIENTS_NOCODE (DIRTY_CLIENTS_ALL & ~(1 << DIRTY_MEMORY_CODE)) 74 75 static inline bool cpu_physical_memory_get_dirty(ram_addr_t start, 76 ram_addr_t length, 77 unsigned client) 78 { 79 DirtyMemoryBlocks *blocks; 80 unsigned long end, page; 81 unsigned long idx, offset, base; 82 bool dirty = false; 83 84 assert(client < DIRTY_MEMORY_NUM); 85 86 end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS; 87 page = start >> TARGET_PAGE_BITS; 88 89 rcu_read_lock(); 90 91 blocks = atomic_rcu_read(&ram_list.dirty_memory[client]); 92 93 idx = page / DIRTY_MEMORY_BLOCK_SIZE; 94 offset = page % DIRTY_MEMORY_BLOCK_SIZE; 95 base = page - offset; 96 while (page < end) { 97 unsigned long next = MIN(end, base + DIRTY_MEMORY_BLOCK_SIZE); 98 unsigned long num = next - base; 99 unsigned long found = find_next_bit(blocks->blocks[idx], num, offset); 100 if (found < num) { 101 dirty = true; 102 break; 103 } 104 105 page = next; 106 idx++; 107 offset = 0; 108 base += DIRTY_MEMORY_BLOCK_SIZE; 109 } 110 111 rcu_read_unlock(); 112 113 return dirty; 114 } 115 116 static inline bool cpu_physical_memory_all_dirty(ram_addr_t start, 117 ram_addr_t length, 118 unsigned client) 119 { 120 DirtyMemoryBlocks *blocks; 121 unsigned long end, page; 122 unsigned long idx, offset, base; 123 bool dirty = true; 124 125 assert(client < DIRTY_MEMORY_NUM); 126 127 end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS; 128 page = start >> TARGET_PAGE_BITS; 129 130 rcu_read_lock(); 131 132 blocks = atomic_rcu_read(&ram_list.dirty_memory[client]); 133 134 idx = page / DIRTY_MEMORY_BLOCK_SIZE; 135 offset = page % DIRTY_MEMORY_BLOCK_SIZE; 136 base = page - offset; 137 while (page < end) { 138 unsigned long next = MIN(end, base + DIRTY_MEMORY_BLOCK_SIZE); 139 unsigned long num = next - base; 140 unsigned long found = find_next_zero_bit(blocks->blocks[idx], num, offset); 141 if (found < num) { 142 dirty = false; 143 break; 144 } 145 146 page = next; 147 idx++; 148 offset = 0; 149 base += DIRTY_MEMORY_BLOCK_SIZE; 150 } 151 152 rcu_read_unlock(); 153 154 return dirty; 155 } 156 157 static inline bool cpu_physical_memory_get_dirty_flag(ram_addr_t addr, 158 unsigned client) 159 { 160 return cpu_physical_memory_get_dirty(addr, 1, client); 161 } 162 163 static inline bool cpu_physical_memory_is_clean(ram_addr_t addr) 164 { 165 bool vga = cpu_physical_memory_get_dirty_flag(addr, DIRTY_MEMORY_VGA); 166 bool code = cpu_physical_memory_get_dirty_flag(addr, DIRTY_MEMORY_CODE); 167 bool migration = 168 cpu_physical_memory_get_dirty_flag(addr, DIRTY_MEMORY_MIGRATION); 169 return !(vga && code && migration); 170 } 171 172 static inline uint8_t cpu_physical_memory_range_includes_clean(ram_addr_t start, 173 ram_addr_t length, 174 uint8_t mask) 175 { 176 uint8_t ret = 0; 177 178 if (mask & (1 << DIRTY_MEMORY_VGA) && 179 !cpu_physical_memory_all_dirty(start, length, DIRTY_MEMORY_VGA)) { 180 ret |= (1 << DIRTY_MEMORY_VGA); 181 } 182 if (mask & (1 << DIRTY_MEMORY_CODE) && 183 !cpu_physical_memory_all_dirty(start, length, DIRTY_MEMORY_CODE)) { 184 ret |= (1 << DIRTY_MEMORY_CODE); 185 } 186 if (mask & (1 << DIRTY_MEMORY_MIGRATION) && 187 !cpu_physical_memory_all_dirty(start, length, DIRTY_MEMORY_MIGRATION)) { 188 ret |= (1 << DIRTY_MEMORY_MIGRATION); 189 } 190 return ret; 191 } 192 193 static inline void cpu_physical_memory_set_dirty_flag(ram_addr_t addr, 194 unsigned client) 195 { 196 unsigned long page, idx, offset; 197 DirtyMemoryBlocks *blocks; 198 199 assert(client < DIRTY_MEMORY_NUM); 200 201 page = addr >> TARGET_PAGE_BITS; 202 idx = page / DIRTY_MEMORY_BLOCK_SIZE; 203 offset = page % DIRTY_MEMORY_BLOCK_SIZE; 204 205 rcu_read_lock(); 206 207 blocks = atomic_rcu_read(&ram_list.dirty_memory[client]); 208 209 set_bit_atomic(offset, blocks->blocks[idx]); 210 211 rcu_read_unlock(); 212 } 213 214 static inline void cpu_physical_memory_set_dirty_range(ram_addr_t start, 215 ram_addr_t length, 216 uint8_t mask) 217 { 218 DirtyMemoryBlocks *blocks[DIRTY_MEMORY_NUM]; 219 unsigned long end, page; 220 unsigned long idx, offset, base; 221 int i; 222 223 if (!mask && !xen_enabled()) { 224 return; 225 } 226 227 end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS; 228 page = start >> TARGET_PAGE_BITS; 229 230 rcu_read_lock(); 231 232 for (i = 0; i < DIRTY_MEMORY_NUM; i++) { 233 blocks[i] = atomic_rcu_read(&ram_list.dirty_memory[i]); 234 } 235 236 idx = page / DIRTY_MEMORY_BLOCK_SIZE; 237 offset = page % DIRTY_MEMORY_BLOCK_SIZE; 238 base = page - offset; 239 while (page < end) { 240 unsigned long next = MIN(end, base + DIRTY_MEMORY_BLOCK_SIZE); 241 242 if (likely(mask & (1 << DIRTY_MEMORY_MIGRATION))) { 243 bitmap_set_atomic(blocks[DIRTY_MEMORY_MIGRATION]->blocks[idx], 244 offset, next - page); 245 } 246 if (unlikely(mask & (1 << DIRTY_MEMORY_VGA))) { 247 bitmap_set_atomic(blocks[DIRTY_MEMORY_VGA]->blocks[idx], 248 offset, next - page); 249 } 250 if (unlikely(mask & (1 << DIRTY_MEMORY_CODE))) { 251 bitmap_set_atomic(blocks[DIRTY_MEMORY_CODE]->blocks[idx], 252 offset, next - page); 253 } 254 255 page = next; 256 idx++; 257 offset = 0; 258 base += DIRTY_MEMORY_BLOCK_SIZE; 259 } 260 261 rcu_read_unlock(); 262 263 xen_hvm_modified_memory(start, length); 264 } 265 266 #if !defined(_WIN32) 267 static inline void cpu_physical_memory_set_dirty_lebitmap(unsigned long *bitmap, 268 ram_addr_t start, 269 ram_addr_t pages) 270 { 271 unsigned long i, j; 272 unsigned long page_number, c; 273 hwaddr addr; 274 ram_addr_t ram_addr; 275 unsigned long len = (pages + HOST_LONG_BITS - 1) / HOST_LONG_BITS; 276 unsigned long hpratio = getpagesize() / TARGET_PAGE_SIZE; 277 unsigned long page = BIT_WORD(start >> TARGET_PAGE_BITS); 278 279 /* start address is aligned at the start of a word? */ 280 if ((((page * BITS_PER_LONG) << TARGET_PAGE_BITS) == start) && 281 (hpratio == 1)) { 282 unsigned long **blocks[DIRTY_MEMORY_NUM]; 283 unsigned long idx; 284 unsigned long offset; 285 long k; 286 long nr = BITS_TO_LONGS(pages); 287 288 idx = (start >> TARGET_PAGE_BITS) / DIRTY_MEMORY_BLOCK_SIZE; 289 offset = BIT_WORD((start >> TARGET_PAGE_BITS) % 290 DIRTY_MEMORY_BLOCK_SIZE); 291 292 rcu_read_lock(); 293 294 for (i = 0; i < DIRTY_MEMORY_NUM; i++) { 295 blocks[i] = atomic_rcu_read(&ram_list.dirty_memory[i])->blocks; 296 } 297 298 for (k = 0; k < nr; k++) { 299 if (bitmap[k]) { 300 unsigned long temp = leul_to_cpu(bitmap[k]); 301 302 atomic_or(&blocks[DIRTY_MEMORY_MIGRATION][idx][offset], temp); 303 atomic_or(&blocks[DIRTY_MEMORY_VGA][idx][offset], temp); 304 if (tcg_enabled()) { 305 atomic_or(&blocks[DIRTY_MEMORY_CODE][idx][offset], temp); 306 } 307 } 308 309 if (++offset >= BITS_TO_LONGS(DIRTY_MEMORY_BLOCK_SIZE)) { 310 offset = 0; 311 idx++; 312 } 313 } 314 315 rcu_read_unlock(); 316 317 xen_hvm_modified_memory(start, pages << TARGET_PAGE_BITS); 318 } else { 319 uint8_t clients = tcg_enabled() ? DIRTY_CLIENTS_ALL : DIRTY_CLIENTS_NOCODE; 320 /* 321 * bitmap-traveling is faster than memory-traveling (for addr...) 322 * especially when most of the memory is not dirty. 323 */ 324 for (i = 0; i < len; i++) { 325 if (bitmap[i] != 0) { 326 c = leul_to_cpu(bitmap[i]); 327 do { 328 j = ctzl(c); 329 c &= ~(1ul << j); 330 page_number = (i * HOST_LONG_BITS + j) * hpratio; 331 addr = page_number * TARGET_PAGE_SIZE; 332 ram_addr = start + addr; 333 cpu_physical_memory_set_dirty_range(ram_addr, 334 TARGET_PAGE_SIZE * hpratio, clients); 335 } while (c != 0); 336 } 337 } 338 } 339 } 340 #endif /* not _WIN32 */ 341 342 bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start, 343 ram_addr_t length, 344 unsigned client); 345 346 static inline void cpu_physical_memory_clear_dirty_range(ram_addr_t start, 347 ram_addr_t length) 348 { 349 cpu_physical_memory_test_and_clear_dirty(start, length, DIRTY_MEMORY_MIGRATION); 350 cpu_physical_memory_test_and_clear_dirty(start, length, DIRTY_MEMORY_VGA); 351 cpu_physical_memory_test_and_clear_dirty(start, length, DIRTY_MEMORY_CODE); 352 } 353 354 355 static inline 356 uint64_t cpu_physical_memory_sync_dirty_bitmap(unsigned long *dest, 357 ram_addr_t start, 358 ram_addr_t length, 359 int64_t *real_dirty_pages) 360 { 361 ram_addr_t addr; 362 unsigned long page = BIT_WORD(start >> TARGET_PAGE_BITS); 363 uint64_t num_dirty = 0; 364 365 /* start address is aligned at the start of a word? */ 366 if (((page * BITS_PER_LONG) << TARGET_PAGE_BITS) == start) { 367 int k; 368 int nr = BITS_TO_LONGS(length >> TARGET_PAGE_BITS); 369 unsigned long * const *src; 370 unsigned long idx = (page * BITS_PER_LONG) / DIRTY_MEMORY_BLOCK_SIZE; 371 unsigned long offset = BIT_WORD((page * BITS_PER_LONG) % 372 DIRTY_MEMORY_BLOCK_SIZE); 373 374 rcu_read_lock(); 375 376 src = atomic_rcu_read( 377 &ram_list.dirty_memory[DIRTY_MEMORY_MIGRATION])->blocks; 378 379 for (k = page; k < page + nr; k++) { 380 if (src[idx][offset]) { 381 unsigned long bits = atomic_xchg(&src[idx][offset], 0); 382 unsigned long new_dirty; 383 *real_dirty_pages += ctpopl(bits); 384 new_dirty = ~dest[k]; 385 dest[k] |= bits; 386 new_dirty &= bits; 387 num_dirty += ctpopl(new_dirty); 388 } 389 390 if (++offset >= BITS_TO_LONGS(DIRTY_MEMORY_BLOCK_SIZE)) { 391 offset = 0; 392 idx++; 393 } 394 } 395 396 rcu_read_unlock(); 397 } else { 398 for (addr = 0; addr < length; addr += TARGET_PAGE_SIZE) { 399 if (cpu_physical_memory_test_and_clear_dirty( 400 start + addr, 401 TARGET_PAGE_SIZE, 402 DIRTY_MEMORY_MIGRATION)) { 403 *real_dirty_pages += 1; 404 long k = (start + addr) >> TARGET_PAGE_BITS; 405 if (!test_and_set_bit(k, dest)) { 406 num_dirty++; 407 } 408 } 409 } 410 } 411 412 return num_dirty; 413 } 414 415 void migration_bitmap_extend(ram_addr_t old, ram_addr_t new); 416 #endif 417 #endif 418