xref: /openbmc/qemu/include/exec/ram_addr.h (revision c964b660)
1 /*
2  * Declarations for cpu physical memory functions
3  *
4  * Copyright 2011 Red Hat, Inc. and/or its affiliates
5  *
6  * Authors:
7  *  Avi Kivity <avi@redhat.com>
8  *
9  * This work is licensed under the terms of the GNU GPL, version 2 or
10  * later.  See the COPYING file in the top-level directory.
11  *
12  */
13 
14 /*
15  * This header is for use by exec.c and memory.c ONLY.  Do not include it.
16  * The functions declared here will be removed soon.
17  */
18 
19 #ifndef RAM_ADDR_H
20 #define RAM_ADDR_H
21 
22 #ifndef CONFIG_USER_ONLY
23 #include "hw/xen/xen.h"
24 
25 struct RAMBlock {
26     struct rcu_head rcu;
27     struct MemoryRegion *mr;
28     uint8_t *host;
29     ram_addr_t offset;
30     ram_addr_t used_length;
31     ram_addr_t max_length;
32     void (*resized)(const char*, uint64_t length, void *host);
33     uint32_t flags;
34     /* Protected by iothread lock.  */
35     char idstr[256];
36     /* RCU-enabled, writes protected by the ramlist lock */
37     QLIST_ENTRY(RAMBlock) next;
38     int fd;
39 };
40 
41 static inline bool offset_in_ramblock(RAMBlock *b, ram_addr_t offset)
42 {
43     return (b && b->host && offset < b->used_length) ? true : false;
44 }
45 
46 static inline void *ramblock_ptr(RAMBlock *block, ram_addr_t offset)
47 {
48     assert(offset_in_ramblock(block, offset));
49     return (char *)block->host + offset;
50 }
51 
52 /* The dirty memory bitmap is split into fixed-size blocks to allow growth
53  * under RCU.  The bitmap for a block can be accessed as follows:
54  *
55  *   rcu_read_lock();
56  *
57  *   DirtyMemoryBlocks *blocks =
58  *       atomic_rcu_read(&ram_list.dirty_memory[DIRTY_MEMORY_MIGRATION]);
59  *
60  *   ram_addr_t idx = (addr >> TARGET_PAGE_BITS) / DIRTY_MEMORY_BLOCK_SIZE;
61  *   unsigned long *block = blocks.blocks[idx];
62  *   ...access block bitmap...
63  *
64  *   rcu_read_unlock();
65  *
66  * Remember to check for the end of the block when accessing a range of
67  * addresses.  Move on to the next block if you reach the end.
68  *
69  * Organization into blocks allows dirty memory to grow (but not shrink) under
70  * RCU.  When adding new RAMBlocks requires the dirty memory to grow, a new
71  * DirtyMemoryBlocks array is allocated with pointers to existing blocks kept
72  * the same.  Other threads can safely access existing blocks while dirty
73  * memory is being grown.  When no threads are using the old DirtyMemoryBlocks
74  * anymore it is freed by RCU (but the underlying blocks stay because they are
75  * pointed to from the new DirtyMemoryBlocks).
76  */
77 #define DIRTY_MEMORY_BLOCK_SIZE ((ram_addr_t)256 * 1024 * 8)
78 typedef struct {
79     struct rcu_head rcu;
80     unsigned long *blocks[];
81 } DirtyMemoryBlocks;
82 
83 typedef struct RAMList {
84     QemuMutex mutex;
85     RAMBlock *mru_block;
86     /* RCU-enabled, writes protected by the ramlist lock. */
87     QLIST_HEAD(, RAMBlock) blocks;
88     DirtyMemoryBlocks *dirty_memory[DIRTY_MEMORY_NUM];
89     uint32_t version;
90 } RAMList;
91 extern RAMList ram_list;
92 
93 ram_addr_t last_ram_offset(void);
94 void qemu_mutex_lock_ramlist(void);
95 void qemu_mutex_unlock_ramlist(void);
96 
97 ram_addr_t qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr,
98                                     bool share, const char *mem_path,
99                                     Error **errp);
100 ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
101                                    MemoryRegion *mr, Error **errp);
102 ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr, Error **errp);
103 ram_addr_t qemu_ram_alloc_resizeable(ram_addr_t size, ram_addr_t max_size,
104                                      void (*resized)(const char*,
105                                                      uint64_t length,
106                                                      void *host),
107                                      MemoryRegion *mr, Error **errp);
108 int qemu_get_ram_fd(ram_addr_t addr);
109 void qemu_set_ram_fd(ram_addr_t addr, int fd);
110 void *qemu_get_ram_block_host_ptr(ram_addr_t addr);
111 void qemu_ram_free(ram_addr_t addr);
112 
113 int qemu_ram_resize(ram_addr_t base, ram_addr_t newsize, Error **errp);
114 
115 #define DIRTY_CLIENTS_ALL     ((1 << DIRTY_MEMORY_NUM) - 1)
116 #define DIRTY_CLIENTS_NOCODE  (DIRTY_CLIENTS_ALL & ~(1 << DIRTY_MEMORY_CODE))
117 
118 static inline bool cpu_physical_memory_get_dirty(ram_addr_t start,
119                                                  ram_addr_t length,
120                                                  unsigned client)
121 {
122     DirtyMemoryBlocks *blocks;
123     unsigned long end, page;
124     unsigned long idx, offset, base;
125     bool dirty = false;
126 
127     assert(client < DIRTY_MEMORY_NUM);
128 
129     end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
130     page = start >> TARGET_PAGE_BITS;
131 
132     rcu_read_lock();
133 
134     blocks = atomic_rcu_read(&ram_list.dirty_memory[client]);
135 
136     idx = page / DIRTY_MEMORY_BLOCK_SIZE;
137     offset = page % DIRTY_MEMORY_BLOCK_SIZE;
138     base = page - offset;
139     while (page < end) {
140         unsigned long next = MIN(end, base + DIRTY_MEMORY_BLOCK_SIZE);
141         unsigned long num = next - base;
142         unsigned long found = find_next_bit(blocks->blocks[idx], num, offset);
143         if (found < num) {
144             dirty = true;
145             break;
146         }
147 
148         page = next;
149         idx++;
150         offset = 0;
151         base += DIRTY_MEMORY_BLOCK_SIZE;
152     }
153 
154     rcu_read_unlock();
155 
156     return dirty;
157 }
158 
159 static inline bool cpu_physical_memory_all_dirty(ram_addr_t start,
160                                                  ram_addr_t length,
161                                                  unsigned client)
162 {
163     DirtyMemoryBlocks *blocks;
164     unsigned long end, page;
165     unsigned long idx, offset, base;
166     bool dirty = true;
167 
168     assert(client < DIRTY_MEMORY_NUM);
169 
170     end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
171     page = start >> TARGET_PAGE_BITS;
172 
173     rcu_read_lock();
174 
175     blocks = atomic_rcu_read(&ram_list.dirty_memory[client]);
176 
177     idx = page / DIRTY_MEMORY_BLOCK_SIZE;
178     offset = page % DIRTY_MEMORY_BLOCK_SIZE;
179     base = page - offset;
180     while (page < end) {
181         unsigned long next = MIN(end, base + DIRTY_MEMORY_BLOCK_SIZE);
182         unsigned long num = next - base;
183         unsigned long found = find_next_zero_bit(blocks->blocks[idx], num, offset);
184         if (found < num) {
185             dirty = false;
186             break;
187         }
188 
189         page = next;
190         idx++;
191         offset = 0;
192         base += DIRTY_MEMORY_BLOCK_SIZE;
193     }
194 
195     rcu_read_unlock();
196 
197     return dirty;
198 }
199 
200 static inline bool cpu_physical_memory_get_dirty_flag(ram_addr_t addr,
201                                                       unsigned client)
202 {
203     return cpu_physical_memory_get_dirty(addr, 1, client);
204 }
205 
206 static inline bool cpu_physical_memory_is_clean(ram_addr_t addr)
207 {
208     bool vga = cpu_physical_memory_get_dirty_flag(addr, DIRTY_MEMORY_VGA);
209     bool code = cpu_physical_memory_get_dirty_flag(addr, DIRTY_MEMORY_CODE);
210     bool migration =
211         cpu_physical_memory_get_dirty_flag(addr, DIRTY_MEMORY_MIGRATION);
212     return !(vga && code && migration);
213 }
214 
215 static inline uint8_t cpu_physical_memory_range_includes_clean(ram_addr_t start,
216                                                                ram_addr_t length,
217                                                                uint8_t mask)
218 {
219     uint8_t ret = 0;
220 
221     if (mask & (1 << DIRTY_MEMORY_VGA) &&
222         !cpu_physical_memory_all_dirty(start, length, DIRTY_MEMORY_VGA)) {
223         ret |= (1 << DIRTY_MEMORY_VGA);
224     }
225     if (mask & (1 << DIRTY_MEMORY_CODE) &&
226         !cpu_physical_memory_all_dirty(start, length, DIRTY_MEMORY_CODE)) {
227         ret |= (1 << DIRTY_MEMORY_CODE);
228     }
229     if (mask & (1 << DIRTY_MEMORY_MIGRATION) &&
230         !cpu_physical_memory_all_dirty(start, length, DIRTY_MEMORY_MIGRATION)) {
231         ret |= (1 << DIRTY_MEMORY_MIGRATION);
232     }
233     return ret;
234 }
235 
236 static inline void cpu_physical_memory_set_dirty_flag(ram_addr_t addr,
237                                                       unsigned client)
238 {
239     unsigned long page, idx, offset;
240     DirtyMemoryBlocks *blocks;
241 
242     assert(client < DIRTY_MEMORY_NUM);
243 
244     page = addr >> TARGET_PAGE_BITS;
245     idx = page / DIRTY_MEMORY_BLOCK_SIZE;
246     offset = page % DIRTY_MEMORY_BLOCK_SIZE;
247 
248     rcu_read_lock();
249 
250     blocks = atomic_rcu_read(&ram_list.dirty_memory[client]);
251 
252     set_bit_atomic(offset, blocks->blocks[idx]);
253 
254     rcu_read_unlock();
255 }
256 
257 static inline void cpu_physical_memory_set_dirty_range(ram_addr_t start,
258                                                        ram_addr_t length,
259                                                        uint8_t mask)
260 {
261     DirtyMemoryBlocks *blocks[DIRTY_MEMORY_NUM];
262     unsigned long end, page;
263     unsigned long idx, offset, base;
264     int i;
265 
266     if (!mask && !xen_enabled()) {
267         return;
268     }
269 
270     end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
271     page = start >> TARGET_PAGE_BITS;
272 
273     rcu_read_lock();
274 
275     for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
276         blocks[i] = atomic_rcu_read(&ram_list.dirty_memory[i]);
277     }
278 
279     idx = page / DIRTY_MEMORY_BLOCK_SIZE;
280     offset = page % DIRTY_MEMORY_BLOCK_SIZE;
281     base = page - offset;
282     while (page < end) {
283         unsigned long next = MIN(end, base + DIRTY_MEMORY_BLOCK_SIZE);
284 
285         if (likely(mask & (1 << DIRTY_MEMORY_MIGRATION))) {
286             bitmap_set_atomic(blocks[DIRTY_MEMORY_MIGRATION]->blocks[idx],
287                               offset, next - page);
288         }
289         if (unlikely(mask & (1 << DIRTY_MEMORY_VGA))) {
290             bitmap_set_atomic(blocks[DIRTY_MEMORY_VGA]->blocks[idx],
291                               offset, next - page);
292         }
293         if (unlikely(mask & (1 << DIRTY_MEMORY_CODE))) {
294             bitmap_set_atomic(blocks[DIRTY_MEMORY_CODE]->blocks[idx],
295                               offset, next - page);
296         }
297 
298         page = next;
299         idx++;
300         offset = 0;
301         base += DIRTY_MEMORY_BLOCK_SIZE;
302     }
303 
304     rcu_read_unlock();
305 
306     xen_modified_memory(start, length);
307 }
308 
309 #if !defined(_WIN32)
310 static inline void cpu_physical_memory_set_dirty_lebitmap(unsigned long *bitmap,
311                                                           ram_addr_t start,
312                                                           ram_addr_t pages)
313 {
314     unsigned long i, j;
315     unsigned long page_number, c;
316     hwaddr addr;
317     ram_addr_t ram_addr;
318     unsigned long len = (pages + HOST_LONG_BITS - 1) / HOST_LONG_BITS;
319     unsigned long hpratio = getpagesize() / TARGET_PAGE_SIZE;
320     unsigned long page = BIT_WORD(start >> TARGET_PAGE_BITS);
321 
322     /* start address is aligned at the start of a word? */
323     if ((((page * BITS_PER_LONG) << TARGET_PAGE_BITS) == start) &&
324         (hpratio == 1)) {
325         unsigned long **blocks[DIRTY_MEMORY_NUM];
326         unsigned long idx;
327         unsigned long offset;
328         long k;
329         long nr = BITS_TO_LONGS(pages);
330 
331         idx = (start >> TARGET_PAGE_BITS) / DIRTY_MEMORY_BLOCK_SIZE;
332         offset = BIT_WORD((start >> TARGET_PAGE_BITS) %
333                           DIRTY_MEMORY_BLOCK_SIZE);
334 
335         rcu_read_lock();
336 
337         for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
338             blocks[i] = atomic_rcu_read(&ram_list.dirty_memory[i])->blocks;
339         }
340 
341         for (k = 0; k < nr; k++) {
342             if (bitmap[k]) {
343                 unsigned long temp = leul_to_cpu(bitmap[k]);
344 
345                 atomic_or(&blocks[DIRTY_MEMORY_MIGRATION][idx][offset], temp);
346                 atomic_or(&blocks[DIRTY_MEMORY_VGA][idx][offset], temp);
347                 if (tcg_enabled()) {
348                     atomic_or(&blocks[DIRTY_MEMORY_CODE][idx][offset], temp);
349                 }
350             }
351 
352             if (++offset >= BITS_TO_LONGS(DIRTY_MEMORY_BLOCK_SIZE)) {
353                 offset = 0;
354                 idx++;
355             }
356         }
357 
358         rcu_read_unlock();
359 
360         xen_modified_memory(start, pages << TARGET_PAGE_BITS);
361     } else {
362         uint8_t clients = tcg_enabled() ? DIRTY_CLIENTS_ALL : DIRTY_CLIENTS_NOCODE;
363         /*
364          * bitmap-traveling is faster than memory-traveling (for addr...)
365          * especially when most of the memory is not dirty.
366          */
367         for (i = 0; i < len; i++) {
368             if (bitmap[i] != 0) {
369                 c = leul_to_cpu(bitmap[i]);
370                 do {
371                     j = ctzl(c);
372                     c &= ~(1ul << j);
373                     page_number = (i * HOST_LONG_BITS + j) * hpratio;
374                     addr = page_number * TARGET_PAGE_SIZE;
375                     ram_addr = start + addr;
376                     cpu_physical_memory_set_dirty_range(ram_addr,
377                                        TARGET_PAGE_SIZE * hpratio, clients);
378                 } while (c != 0);
379             }
380         }
381     }
382 }
383 #endif /* not _WIN32 */
384 
385 bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start,
386                                               ram_addr_t length,
387                                               unsigned client);
388 
389 static inline void cpu_physical_memory_clear_dirty_range(ram_addr_t start,
390                                                          ram_addr_t length)
391 {
392     cpu_physical_memory_test_and_clear_dirty(start, length, DIRTY_MEMORY_MIGRATION);
393     cpu_physical_memory_test_and_clear_dirty(start, length, DIRTY_MEMORY_VGA);
394     cpu_physical_memory_test_and_clear_dirty(start, length, DIRTY_MEMORY_CODE);
395 }
396 
397 
398 static inline
399 uint64_t cpu_physical_memory_sync_dirty_bitmap(unsigned long *dest,
400                                                ram_addr_t start,
401                                                ram_addr_t length)
402 {
403     ram_addr_t addr;
404     unsigned long page = BIT_WORD(start >> TARGET_PAGE_BITS);
405     uint64_t num_dirty = 0;
406 
407     /* start address is aligned at the start of a word? */
408     if (((page * BITS_PER_LONG) << TARGET_PAGE_BITS) == start) {
409         int k;
410         int nr = BITS_TO_LONGS(length >> TARGET_PAGE_BITS);
411         unsigned long * const *src;
412         unsigned long idx = (page * BITS_PER_LONG) / DIRTY_MEMORY_BLOCK_SIZE;
413         unsigned long offset = BIT_WORD((page * BITS_PER_LONG) %
414                                         DIRTY_MEMORY_BLOCK_SIZE);
415 
416         rcu_read_lock();
417 
418         src = atomic_rcu_read(
419                 &ram_list.dirty_memory[DIRTY_MEMORY_MIGRATION])->blocks;
420 
421         for (k = page; k < page + nr; k++) {
422             if (src[idx][offset]) {
423                 unsigned long bits = atomic_xchg(&src[idx][offset], 0);
424                 unsigned long new_dirty;
425                 new_dirty = ~dest[k];
426                 dest[k] |= bits;
427                 new_dirty &= bits;
428                 num_dirty += ctpopl(new_dirty);
429             }
430 
431             if (++offset >= BITS_TO_LONGS(DIRTY_MEMORY_BLOCK_SIZE)) {
432                 offset = 0;
433                 idx++;
434             }
435         }
436 
437         rcu_read_unlock();
438     } else {
439         for (addr = 0; addr < length; addr += TARGET_PAGE_SIZE) {
440             if (cpu_physical_memory_test_and_clear_dirty(
441                         start + addr,
442                         TARGET_PAGE_SIZE,
443                         DIRTY_MEMORY_MIGRATION)) {
444                 long k = (start + addr) >> TARGET_PAGE_BITS;
445                 if (!test_and_set_bit(k, dest)) {
446                     num_dirty++;
447                 }
448             }
449         }
450     }
451 
452     return num_dirty;
453 }
454 
455 void migration_bitmap_extend(ram_addr_t old, ram_addr_t new);
456 #endif
457 #endif
458