xref: /openbmc/qemu/include/exec/ram_addr.h (revision 12a6c15e)
1 /*
2  * Declarations for cpu physical memory functions
3  *
4  * Copyright 2011 Red Hat, Inc. and/or its affiliates
5  *
6  * Authors:
7  *  Avi Kivity <avi@redhat.com>
8  *
9  * This work is licensed under the terms of the GNU GPL, version 2 or
10  * later.  See the COPYING file in the top-level directory.
11  *
12  */
13 
14 /*
15  * This header is for use by exec.c and memory.c ONLY.  Do not include it.
16  * The functions declared here will be removed soon.
17  */
18 
19 #ifndef RAM_ADDR_H
20 #define RAM_ADDR_H
21 
22 #ifndef CONFIG_USER_ONLY
23 #include "hw/xen/xen.h"
24 #include "exec/ramlist.h"
25 
26 struct RAMBlock {
27     struct rcu_head rcu;
28     struct MemoryRegion *mr;
29     uint8_t *host;
30     ram_addr_t offset;
31     ram_addr_t used_length;
32     ram_addr_t max_length;
33     void (*resized)(const char*, uint64_t length, void *host);
34     uint32_t flags;
35     /* Protected by iothread lock.  */
36     char idstr[256];
37     /* RCU-enabled, writes protected by the ramlist lock */
38     QLIST_ENTRY(RAMBlock) next;
39     QLIST_HEAD(, RAMBlockNotifier) ramblock_notifiers;
40     int fd;
41     size_t page_size;
42     /* dirty bitmap used during migration */
43     unsigned long *bmap;
44     /* bitmap of pages that haven't been sent even once
45      * only maintained and used in postcopy at the moment
46      * where it's used to send the dirtymap at the start
47      * of the postcopy phase
48      */
49     unsigned long *unsentmap;
50 };
51 
52 static inline bool offset_in_ramblock(RAMBlock *b, ram_addr_t offset)
53 {
54     return (b && b->host && offset < b->used_length) ? true : false;
55 }
56 
57 static inline void *ramblock_ptr(RAMBlock *block, ram_addr_t offset)
58 {
59     assert(offset_in_ramblock(block, offset));
60     return (char *)block->host + offset;
61 }
62 
63 long qemu_getrampagesize(void);
64 unsigned long last_ram_page(void);
65 RAMBlock *qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr,
66                                    bool share, const char *mem_path,
67                                    Error **errp);
68 RAMBlock *qemu_ram_alloc_from_fd(ram_addr_t size, MemoryRegion *mr,
69                                  bool share, int fd,
70                                  Error **errp);
71 RAMBlock *qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
72                                   MemoryRegion *mr, Error **errp);
73 RAMBlock *qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr, Error **errp);
74 RAMBlock *qemu_ram_alloc_resizeable(ram_addr_t size, ram_addr_t max_size,
75                                     void (*resized)(const char*,
76                                                     uint64_t length,
77                                                     void *host),
78                                     MemoryRegion *mr, Error **errp);
79 void qemu_ram_free(RAMBlock *block);
80 
81 int qemu_ram_resize(RAMBlock *block, ram_addr_t newsize, Error **errp);
82 
83 #define DIRTY_CLIENTS_ALL     ((1 << DIRTY_MEMORY_NUM) - 1)
84 #define DIRTY_CLIENTS_NOCODE  (DIRTY_CLIENTS_ALL & ~(1 << DIRTY_MEMORY_CODE))
85 
86 static inline bool cpu_physical_memory_get_dirty(ram_addr_t start,
87                                                  ram_addr_t length,
88                                                  unsigned client)
89 {
90     DirtyMemoryBlocks *blocks;
91     unsigned long end, page;
92     unsigned long idx, offset, base;
93     bool dirty = false;
94 
95     assert(client < DIRTY_MEMORY_NUM);
96 
97     end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
98     page = start >> TARGET_PAGE_BITS;
99 
100     rcu_read_lock();
101 
102     blocks = atomic_rcu_read(&ram_list.dirty_memory[client]);
103 
104     idx = page / DIRTY_MEMORY_BLOCK_SIZE;
105     offset = page % DIRTY_MEMORY_BLOCK_SIZE;
106     base = page - offset;
107     while (page < end) {
108         unsigned long next = MIN(end, base + DIRTY_MEMORY_BLOCK_SIZE);
109         unsigned long num = next - base;
110         unsigned long found = find_next_bit(blocks->blocks[idx], num, offset);
111         if (found < num) {
112             dirty = true;
113             break;
114         }
115 
116         page = next;
117         idx++;
118         offset = 0;
119         base += DIRTY_MEMORY_BLOCK_SIZE;
120     }
121 
122     rcu_read_unlock();
123 
124     return dirty;
125 }
126 
127 static inline bool cpu_physical_memory_all_dirty(ram_addr_t start,
128                                                  ram_addr_t length,
129                                                  unsigned client)
130 {
131     DirtyMemoryBlocks *blocks;
132     unsigned long end, page;
133     unsigned long idx, offset, base;
134     bool dirty = true;
135 
136     assert(client < DIRTY_MEMORY_NUM);
137 
138     end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
139     page = start >> TARGET_PAGE_BITS;
140 
141     rcu_read_lock();
142 
143     blocks = atomic_rcu_read(&ram_list.dirty_memory[client]);
144 
145     idx = page / DIRTY_MEMORY_BLOCK_SIZE;
146     offset = page % DIRTY_MEMORY_BLOCK_SIZE;
147     base = page - offset;
148     while (page < end) {
149         unsigned long next = MIN(end, base + DIRTY_MEMORY_BLOCK_SIZE);
150         unsigned long num = next - base;
151         unsigned long found = find_next_zero_bit(blocks->blocks[idx], num, offset);
152         if (found < num) {
153             dirty = false;
154             break;
155         }
156 
157         page = next;
158         idx++;
159         offset = 0;
160         base += DIRTY_MEMORY_BLOCK_SIZE;
161     }
162 
163     rcu_read_unlock();
164 
165     return dirty;
166 }
167 
168 static inline bool cpu_physical_memory_get_dirty_flag(ram_addr_t addr,
169                                                       unsigned client)
170 {
171     return cpu_physical_memory_get_dirty(addr, 1, client);
172 }
173 
174 static inline bool cpu_physical_memory_is_clean(ram_addr_t addr)
175 {
176     bool vga = cpu_physical_memory_get_dirty_flag(addr, DIRTY_MEMORY_VGA);
177     bool code = cpu_physical_memory_get_dirty_flag(addr, DIRTY_MEMORY_CODE);
178     bool migration =
179         cpu_physical_memory_get_dirty_flag(addr, DIRTY_MEMORY_MIGRATION);
180     return !(vga && code && migration);
181 }
182 
183 static inline uint8_t cpu_physical_memory_range_includes_clean(ram_addr_t start,
184                                                                ram_addr_t length,
185                                                                uint8_t mask)
186 {
187     uint8_t ret = 0;
188 
189     if (mask & (1 << DIRTY_MEMORY_VGA) &&
190         !cpu_physical_memory_all_dirty(start, length, DIRTY_MEMORY_VGA)) {
191         ret |= (1 << DIRTY_MEMORY_VGA);
192     }
193     if (mask & (1 << DIRTY_MEMORY_CODE) &&
194         !cpu_physical_memory_all_dirty(start, length, DIRTY_MEMORY_CODE)) {
195         ret |= (1 << DIRTY_MEMORY_CODE);
196     }
197     if (mask & (1 << DIRTY_MEMORY_MIGRATION) &&
198         !cpu_physical_memory_all_dirty(start, length, DIRTY_MEMORY_MIGRATION)) {
199         ret |= (1 << DIRTY_MEMORY_MIGRATION);
200     }
201     return ret;
202 }
203 
204 static inline void cpu_physical_memory_set_dirty_flag(ram_addr_t addr,
205                                                       unsigned client)
206 {
207     unsigned long page, idx, offset;
208     DirtyMemoryBlocks *blocks;
209 
210     assert(client < DIRTY_MEMORY_NUM);
211 
212     page = addr >> TARGET_PAGE_BITS;
213     idx = page / DIRTY_MEMORY_BLOCK_SIZE;
214     offset = page % DIRTY_MEMORY_BLOCK_SIZE;
215 
216     rcu_read_lock();
217 
218     blocks = atomic_rcu_read(&ram_list.dirty_memory[client]);
219 
220     set_bit_atomic(offset, blocks->blocks[idx]);
221 
222     rcu_read_unlock();
223 }
224 
225 static inline void cpu_physical_memory_set_dirty_range(ram_addr_t start,
226                                                        ram_addr_t length,
227                                                        uint8_t mask)
228 {
229     DirtyMemoryBlocks *blocks[DIRTY_MEMORY_NUM];
230     unsigned long end, page;
231     unsigned long idx, offset, base;
232     int i;
233 
234     if (!mask && !xen_enabled()) {
235         return;
236     }
237 
238     end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
239     page = start >> TARGET_PAGE_BITS;
240 
241     rcu_read_lock();
242 
243     for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
244         blocks[i] = atomic_rcu_read(&ram_list.dirty_memory[i]);
245     }
246 
247     idx = page / DIRTY_MEMORY_BLOCK_SIZE;
248     offset = page % DIRTY_MEMORY_BLOCK_SIZE;
249     base = page - offset;
250     while (page < end) {
251         unsigned long next = MIN(end, base + DIRTY_MEMORY_BLOCK_SIZE);
252 
253         if (likely(mask & (1 << DIRTY_MEMORY_MIGRATION))) {
254             bitmap_set_atomic(blocks[DIRTY_MEMORY_MIGRATION]->blocks[idx],
255                               offset, next - page);
256         }
257         if (unlikely(mask & (1 << DIRTY_MEMORY_VGA))) {
258             bitmap_set_atomic(blocks[DIRTY_MEMORY_VGA]->blocks[idx],
259                               offset, next - page);
260         }
261         if (unlikely(mask & (1 << DIRTY_MEMORY_CODE))) {
262             bitmap_set_atomic(blocks[DIRTY_MEMORY_CODE]->blocks[idx],
263                               offset, next - page);
264         }
265 
266         page = next;
267         idx++;
268         offset = 0;
269         base += DIRTY_MEMORY_BLOCK_SIZE;
270     }
271 
272     rcu_read_unlock();
273 
274     xen_hvm_modified_memory(start, length);
275 }
276 
277 #if !defined(_WIN32)
278 static inline void cpu_physical_memory_set_dirty_lebitmap(unsigned long *bitmap,
279                                                           ram_addr_t start,
280                                                           ram_addr_t pages)
281 {
282     unsigned long i, j;
283     unsigned long page_number, c;
284     hwaddr addr;
285     ram_addr_t ram_addr;
286     unsigned long len = (pages + HOST_LONG_BITS - 1) / HOST_LONG_BITS;
287     unsigned long hpratio = getpagesize() / TARGET_PAGE_SIZE;
288     unsigned long page = BIT_WORD(start >> TARGET_PAGE_BITS);
289 
290     /* start address is aligned at the start of a word? */
291     if ((((page * BITS_PER_LONG) << TARGET_PAGE_BITS) == start) &&
292         (hpratio == 1)) {
293         unsigned long **blocks[DIRTY_MEMORY_NUM];
294         unsigned long idx;
295         unsigned long offset;
296         long k;
297         long nr = BITS_TO_LONGS(pages);
298 
299         idx = (start >> TARGET_PAGE_BITS) / DIRTY_MEMORY_BLOCK_SIZE;
300         offset = BIT_WORD((start >> TARGET_PAGE_BITS) %
301                           DIRTY_MEMORY_BLOCK_SIZE);
302 
303         rcu_read_lock();
304 
305         for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
306             blocks[i] = atomic_rcu_read(&ram_list.dirty_memory[i])->blocks;
307         }
308 
309         for (k = 0; k < nr; k++) {
310             if (bitmap[k]) {
311                 unsigned long temp = leul_to_cpu(bitmap[k]);
312 
313                 atomic_or(&blocks[DIRTY_MEMORY_MIGRATION][idx][offset], temp);
314                 atomic_or(&blocks[DIRTY_MEMORY_VGA][idx][offset], temp);
315                 if (tcg_enabled()) {
316                     atomic_or(&blocks[DIRTY_MEMORY_CODE][idx][offset], temp);
317                 }
318             }
319 
320             if (++offset >= BITS_TO_LONGS(DIRTY_MEMORY_BLOCK_SIZE)) {
321                 offset = 0;
322                 idx++;
323             }
324         }
325 
326         rcu_read_unlock();
327 
328         xen_hvm_modified_memory(start, pages << TARGET_PAGE_BITS);
329     } else {
330         uint8_t clients = tcg_enabled() ? DIRTY_CLIENTS_ALL : DIRTY_CLIENTS_NOCODE;
331         /*
332          * bitmap-traveling is faster than memory-traveling (for addr...)
333          * especially when most of the memory is not dirty.
334          */
335         for (i = 0; i < len; i++) {
336             if (bitmap[i] != 0) {
337                 c = leul_to_cpu(bitmap[i]);
338                 do {
339                     j = ctzl(c);
340                     c &= ~(1ul << j);
341                     page_number = (i * HOST_LONG_BITS + j) * hpratio;
342                     addr = page_number * TARGET_PAGE_SIZE;
343                     ram_addr = start + addr;
344                     cpu_physical_memory_set_dirty_range(ram_addr,
345                                        TARGET_PAGE_SIZE * hpratio, clients);
346                 } while (c != 0);
347             }
348         }
349     }
350 }
351 #endif /* not _WIN32 */
352 
353 bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start,
354                                               ram_addr_t length,
355                                               unsigned client);
356 
357 DirtyBitmapSnapshot *cpu_physical_memory_snapshot_and_clear_dirty
358     (ram_addr_t start, ram_addr_t length, unsigned client);
359 
360 bool cpu_physical_memory_snapshot_get_dirty(DirtyBitmapSnapshot *snap,
361                                             ram_addr_t start,
362                                             ram_addr_t length);
363 
364 static inline void cpu_physical_memory_clear_dirty_range(ram_addr_t start,
365                                                          ram_addr_t length)
366 {
367     cpu_physical_memory_test_and_clear_dirty(start, length, DIRTY_MEMORY_MIGRATION);
368     cpu_physical_memory_test_and_clear_dirty(start, length, DIRTY_MEMORY_VGA);
369     cpu_physical_memory_test_and_clear_dirty(start, length, DIRTY_MEMORY_CODE);
370 }
371 
372 
373 static inline
374 uint64_t cpu_physical_memory_sync_dirty_bitmap(RAMBlock *rb,
375                                                ram_addr_t start,
376                                                ram_addr_t length,
377                                                uint64_t *real_dirty_pages)
378 {
379     ram_addr_t addr;
380     unsigned long word = BIT_WORD((start + rb->offset) >> TARGET_PAGE_BITS);
381     uint64_t num_dirty = 0;
382     unsigned long *dest = rb->bmap;
383 
384     /* start address is aligned at the start of a word? */
385     if (((word * BITS_PER_LONG) << TARGET_PAGE_BITS) ==
386          (start + rb->offset)) {
387         int k;
388         int nr = BITS_TO_LONGS(length >> TARGET_PAGE_BITS);
389         unsigned long * const *src;
390         unsigned long idx = (word * BITS_PER_LONG) / DIRTY_MEMORY_BLOCK_SIZE;
391         unsigned long offset = BIT_WORD((word * BITS_PER_LONG) %
392                                         DIRTY_MEMORY_BLOCK_SIZE);
393         unsigned long page = BIT_WORD(start >> TARGET_PAGE_BITS);
394 
395         rcu_read_lock();
396 
397         src = atomic_rcu_read(
398                 &ram_list.dirty_memory[DIRTY_MEMORY_MIGRATION])->blocks;
399 
400         for (k = page; k < page + nr; k++) {
401             if (src[idx][offset]) {
402                 unsigned long bits = atomic_xchg(&src[idx][offset], 0);
403                 unsigned long new_dirty;
404                 *real_dirty_pages += ctpopl(bits);
405                 new_dirty = ~dest[k];
406                 dest[k] |= bits;
407                 new_dirty &= bits;
408                 num_dirty += ctpopl(new_dirty);
409             }
410 
411             if (++offset >= BITS_TO_LONGS(DIRTY_MEMORY_BLOCK_SIZE)) {
412                 offset = 0;
413                 idx++;
414             }
415         }
416 
417         rcu_read_unlock();
418     } else {
419         ram_addr_t offset = rb->offset;
420 
421         for (addr = 0; addr < length; addr += TARGET_PAGE_SIZE) {
422             if (cpu_physical_memory_test_and_clear_dirty(
423                         start + addr + offset,
424                         TARGET_PAGE_SIZE,
425                         DIRTY_MEMORY_MIGRATION)) {
426                 *real_dirty_pages += 1;
427                 long k = (start + addr) >> TARGET_PAGE_BITS;
428                 if (!test_and_set_bit(k, dest)) {
429                     num_dirty++;
430                 }
431             }
432         }
433     }
434 
435     return num_dirty;
436 }
437 #endif
438 #endif
439