xref: /openbmc/qemu/include/exec/ram_addr.h (revision 429d3ae2)
1 /*
2  * Declarations for cpu physical memory functions
3  *
4  * Copyright 2011 Red Hat, Inc. and/or its affiliates
5  *
6  * Authors:
7  *  Avi Kivity <avi@redhat.com>
8  *
9  * This work is licensed under the terms of the GNU GPL, version 2 or
10  * later.  See the COPYING file in the top-level directory.
11  *
12  */
13 
14 /*
15  * This header is for use by exec.c and memory.c ONLY.  Do not include it.
16  * The functions declared here will be removed soon.
17  */
18 
19 #ifndef RAM_ADDR_H
20 #define RAM_ADDR_H
21 
22 #ifndef CONFIG_USER_ONLY
23 #include "hw/xen/xen.h"
24 #include "exec/ramlist.h"
25 
26 struct RAMBlock {
27     struct rcu_head rcu;
28     struct MemoryRegion *mr;
29     uint8_t *host;
30     ram_addr_t offset;
31     ram_addr_t used_length;
32     ram_addr_t max_length;
33     void (*resized)(const char*, uint64_t length, void *host);
34     uint32_t flags;
35     /* Protected by iothread lock.  */
36     char idstr[256];
37     /* RCU-enabled, writes protected by the ramlist lock */
38     QLIST_ENTRY(RAMBlock) next;
39     QLIST_HEAD(, RAMBlockNotifier) ramblock_notifiers;
40     int fd;
41     size_t page_size;
42     /* dirty bitmap used during migration */
43     unsigned long *bmap;
44     /* bitmap of pages that haven't been sent even once
45      * only maintained and used in postcopy at the moment
46      * where it's used to send the dirtymap at the start
47      * of the postcopy phase
48      */
49     unsigned long *unsentmap;
50     /* bitmap of already received pages in postcopy */
51     unsigned long *receivedmap;
52 };
53 
54 static inline bool offset_in_ramblock(RAMBlock *b, ram_addr_t offset)
55 {
56     return (b && b->host && offset < b->used_length) ? true : false;
57 }
58 
59 static inline void *ramblock_ptr(RAMBlock *block, ram_addr_t offset)
60 {
61     assert(offset_in_ramblock(block, offset));
62     return (char *)block->host + offset;
63 }
64 
65 static inline unsigned long int ramblock_recv_bitmap_offset(void *host_addr,
66                                                             RAMBlock *rb)
67 {
68     uint64_t host_addr_offset =
69             (uint64_t)(uintptr_t)(host_addr - (void *)rb->host);
70     return host_addr_offset >> TARGET_PAGE_BITS;
71 }
72 
73 long qemu_getrampagesize(void);
74 RAMBlock *qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr,
75                                    bool share, const char *mem_path,
76                                    Error **errp);
77 RAMBlock *qemu_ram_alloc_from_fd(ram_addr_t size, MemoryRegion *mr,
78                                  bool share, int fd,
79                                  Error **errp);
80 RAMBlock *qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
81                                   MemoryRegion *mr, Error **errp);
82 RAMBlock *qemu_ram_alloc(ram_addr_t size, bool share, MemoryRegion *mr,
83                          Error **errp);
84 RAMBlock *qemu_ram_alloc_resizeable(ram_addr_t size, ram_addr_t max_size,
85                                     void (*resized)(const char*,
86                                                     uint64_t length,
87                                                     void *host),
88                                     MemoryRegion *mr, Error **errp);
89 void qemu_ram_free(RAMBlock *block);
90 
91 int qemu_ram_resize(RAMBlock *block, ram_addr_t newsize, Error **errp);
92 
93 #define DIRTY_CLIENTS_ALL     ((1 << DIRTY_MEMORY_NUM) - 1)
94 #define DIRTY_CLIENTS_NOCODE  (DIRTY_CLIENTS_ALL & ~(1 << DIRTY_MEMORY_CODE))
95 
96 void tb_invalidate_phys_range(ram_addr_t start, ram_addr_t end);
97 
98 static inline bool cpu_physical_memory_get_dirty(ram_addr_t start,
99                                                  ram_addr_t length,
100                                                  unsigned client)
101 {
102     DirtyMemoryBlocks *blocks;
103     unsigned long end, page;
104     unsigned long idx, offset, base;
105     bool dirty = false;
106 
107     assert(client < DIRTY_MEMORY_NUM);
108 
109     end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
110     page = start >> TARGET_PAGE_BITS;
111 
112     rcu_read_lock();
113 
114     blocks = atomic_rcu_read(&ram_list.dirty_memory[client]);
115 
116     idx = page / DIRTY_MEMORY_BLOCK_SIZE;
117     offset = page % DIRTY_MEMORY_BLOCK_SIZE;
118     base = page - offset;
119     while (page < end) {
120         unsigned long next = MIN(end, base + DIRTY_MEMORY_BLOCK_SIZE);
121         unsigned long num = next - base;
122         unsigned long found = find_next_bit(blocks->blocks[idx], num, offset);
123         if (found < num) {
124             dirty = true;
125             break;
126         }
127 
128         page = next;
129         idx++;
130         offset = 0;
131         base += DIRTY_MEMORY_BLOCK_SIZE;
132     }
133 
134     rcu_read_unlock();
135 
136     return dirty;
137 }
138 
139 static inline bool cpu_physical_memory_all_dirty(ram_addr_t start,
140                                                  ram_addr_t length,
141                                                  unsigned client)
142 {
143     DirtyMemoryBlocks *blocks;
144     unsigned long end, page;
145     unsigned long idx, offset, base;
146     bool dirty = true;
147 
148     assert(client < DIRTY_MEMORY_NUM);
149 
150     end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
151     page = start >> TARGET_PAGE_BITS;
152 
153     rcu_read_lock();
154 
155     blocks = atomic_rcu_read(&ram_list.dirty_memory[client]);
156 
157     idx = page / DIRTY_MEMORY_BLOCK_SIZE;
158     offset = page % DIRTY_MEMORY_BLOCK_SIZE;
159     base = page - offset;
160     while (page < end) {
161         unsigned long next = MIN(end, base + DIRTY_MEMORY_BLOCK_SIZE);
162         unsigned long num = next - base;
163         unsigned long found = find_next_zero_bit(blocks->blocks[idx], num, offset);
164         if (found < num) {
165             dirty = false;
166             break;
167         }
168 
169         page = next;
170         idx++;
171         offset = 0;
172         base += DIRTY_MEMORY_BLOCK_SIZE;
173     }
174 
175     rcu_read_unlock();
176 
177     return dirty;
178 }
179 
180 static inline bool cpu_physical_memory_get_dirty_flag(ram_addr_t addr,
181                                                       unsigned client)
182 {
183     return cpu_physical_memory_get_dirty(addr, 1, client);
184 }
185 
186 static inline bool cpu_physical_memory_is_clean(ram_addr_t addr)
187 {
188     bool vga = cpu_physical_memory_get_dirty_flag(addr, DIRTY_MEMORY_VGA);
189     bool code = cpu_physical_memory_get_dirty_flag(addr, DIRTY_MEMORY_CODE);
190     bool migration =
191         cpu_physical_memory_get_dirty_flag(addr, DIRTY_MEMORY_MIGRATION);
192     return !(vga && code && migration);
193 }
194 
195 static inline uint8_t cpu_physical_memory_range_includes_clean(ram_addr_t start,
196                                                                ram_addr_t length,
197                                                                uint8_t mask)
198 {
199     uint8_t ret = 0;
200 
201     if (mask & (1 << DIRTY_MEMORY_VGA) &&
202         !cpu_physical_memory_all_dirty(start, length, DIRTY_MEMORY_VGA)) {
203         ret |= (1 << DIRTY_MEMORY_VGA);
204     }
205     if (mask & (1 << DIRTY_MEMORY_CODE) &&
206         !cpu_physical_memory_all_dirty(start, length, DIRTY_MEMORY_CODE)) {
207         ret |= (1 << DIRTY_MEMORY_CODE);
208     }
209     if (mask & (1 << DIRTY_MEMORY_MIGRATION) &&
210         !cpu_physical_memory_all_dirty(start, length, DIRTY_MEMORY_MIGRATION)) {
211         ret |= (1 << DIRTY_MEMORY_MIGRATION);
212     }
213     return ret;
214 }
215 
216 static inline void cpu_physical_memory_set_dirty_flag(ram_addr_t addr,
217                                                       unsigned client)
218 {
219     unsigned long page, idx, offset;
220     DirtyMemoryBlocks *blocks;
221 
222     assert(client < DIRTY_MEMORY_NUM);
223 
224     page = addr >> TARGET_PAGE_BITS;
225     idx = page / DIRTY_MEMORY_BLOCK_SIZE;
226     offset = page % DIRTY_MEMORY_BLOCK_SIZE;
227 
228     rcu_read_lock();
229 
230     blocks = atomic_rcu_read(&ram_list.dirty_memory[client]);
231 
232     set_bit_atomic(offset, blocks->blocks[idx]);
233 
234     rcu_read_unlock();
235 }
236 
237 static inline void cpu_physical_memory_set_dirty_range(ram_addr_t start,
238                                                        ram_addr_t length,
239                                                        uint8_t mask)
240 {
241     DirtyMemoryBlocks *blocks[DIRTY_MEMORY_NUM];
242     unsigned long end, page;
243     unsigned long idx, offset, base;
244     int i;
245 
246     if (!mask && !xen_enabled()) {
247         return;
248     }
249 
250     end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
251     page = start >> TARGET_PAGE_BITS;
252 
253     rcu_read_lock();
254 
255     for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
256         blocks[i] = atomic_rcu_read(&ram_list.dirty_memory[i]);
257     }
258 
259     idx = page / DIRTY_MEMORY_BLOCK_SIZE;
260     offset = page % DIRTY_MEMORY_BLOCK_SIZE;
261     base = page - offset;
262     while (page < end) {
263         unsigned long next = MIN(end, base + DIRTY_MEMORY_BLOCK_SIZE);
264 
265         if (likely(mask & (1 << DIRTY_MEMORY_MIGRATION))) {
266             bitmap_set_atomic(blocks[DIRTY_MEMORY_MIGRATION]->blocks[idx],
267                               offset, next - page);
268         }
269         if (unlikely(mask & (1 << DIRTY_MEMORY_VGA))) {
270             bitmap_set_atomic(blocks[DIRTY_MEMORY_VGA]->blocks[idx],
271                               offset, next - page);
272         }
273         if (unlikely(mask & (1 << DIRTY_MEMORY_CODE))) {
274             bitmap_set_atomic(blocks[DIRTY_MEMORY_CODE]->blocks[idx],
275                               offset, next - page);
276         }
277 
278         page = next;
279         idx++;
280         offset = 0;
281         base += DIRTY_MEMORY_BLOCK_SIZE;
282     }
283 
284     rcu_read_unlock();
285 
286     xen_hvm_modified_memory(start, length);
287 }
288 
289 #if !defined(_WIN32)
290 static inline void cpu_physical_memory_set_dirty_lebitmap(unsigned long *bitmap,
291                                                           ram_addr_t start,
292                                                           ram_addr_t pages)
293 {
294     unsigned long i, j;
295     unsigned long page_number, c;
296     hwaddr addr;
297     ram_addr_t ram_addr;
298     unsigned long len = (pages + HOST_LONG_BITS - 1) / HOST_LONG_BITS;
299     unsigned long hpratio = getpagesize() / TARGET_PAGE_SIZE;
300     unsigned long page = BIT_WORD(start >> TARGET_PAGE_BITS);
301 
302     /* start address is aligned at the start of a word? */
303     if ((((page * BITS_PER_LONG) << TARGET_PAGE_BITS) == start) &&
304         (hpratio == 1)) {
305         unsigned long **blocks[DIRTY_MEMORY_NUM];
306         unsigned long idx;
307         unsigned long offset;
308         long k;
309         long nr = BITS_TO_LONGS(pages);
310 
311         idx = (start >> TARGET_PAGE_BITS) / DIRTY_MEMORY_BLOCK_SIZE;
312         offset = BIT_WORD((start >> TARGET_PAGE_BITS) %
313                           DIRTY_MEMORY_BLOCK_SIZE);
314 
315         rcu_read_lock();
316 
317         for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
318             blocks[i] = atomic_rcu_read(&ram_list.dirty_memory[i])->blocks;
319         }
320 
321         for (k = 0; k < nr; k++) {
322             if (bitmap[k]) {
323                 unsigned long temp = leul_to_cpu(bitmap[k]);
324 
325                 atomic_or(&blocks[DIRTY_MEMORY_MIGRATION][idx][offset], temp);
326                 atomic_or(&blocks[DIRTY_MEMORY_VGA][idx][offset], temp);
327                 if (tcg_enabled()) {
328                     atomic_or(&blocks[DIRTY_MEMORY_CODE][idx][offset], temp);
329                 }
330             }
331 
332             if (++offset >= BITS_TO_LONGS(DIRTY_MEMORY_BLOCK_SIZE)) {
333                 offset = 0;
334                 idx++;
335             }
336         }
337 
338         rcu_read_unlock();
339 
340         xen_hvm_modified_memory(start, pages << TARGET_PAGE_BITS);
341     } else {
342         uint8_t clients = tcg_enabled() ? DIRTY_CLIENTS_ALL : DIRTY_CLIENTS_NOCODE;
343         /*
344          * bitmap-traveling is faster than memory-traveling (for addr...)
345          * especially when most of the memory is not dirty.
346          */
347         for (i = 0; i < len; i++) {
348             if (bitmap[i] != 0) {
349                 c = leul_to_cpu(bitmap[i]);
350                 do {
351                     j = ctzl(c);
352                     c &= ~(1ul << j);
353                     page_number = (i * HOST_LONG_BITS + j) * hpratio;
354                     addr = page_number * TARGET_PAGE_SIZE;
355                     ram_addr = start + addr;
356                     cpu_physical_memory_set_dirty_range(ram_addr,
357                                        TARGET_PAGE_SIZE * hpratio, clients);
358                 } while (c != 0);
359             }
360         }
361     }
362 }
363 #endif /* not _WIN32 */
364 
365 bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start,
366                                               ram_addr_t length,
367                                               unsigned client);
368 
369 DirtyBitmapSnapshot *cpu_physical_memory_snapshot_and_clear_dirty
370     (ram_addr_t start, ram_addr_t length, unsigned client);
371 
372 bool cpu_physical_memory_snapshot_get_dirty(DirtyBitmapSnapshot *snap,
373                                             ram_addr_t start,
374                                             ram_addr_t length);
375 
376 static inline void cpu_physical_memory_clear_dirty_range(ram_addr_t start,
377                                                          ram_addr_t length)
378 {
379     cpu_physical_memory_test_and_clear_dirty(start, length, DIRTY_MEMORY_MIGRATION);
380     cpu_physical_memory_test_and_clear_dirty(start, length, DIRTY_MEMORY_VGA);
381     cpu_physical_memory_test_and_clear_dirty(start, length, DIRTY_MEMORY_CODE);
382 }
383 
384 
385 static inline
386 uint64_t cpu_physical_memory_sync_dirty_bitmap(RAMBlock *rb,
387                                                ram_addr_t start,
388                                                ram_addr_t length,
389                                                uint64_t *real_dirty_pages)
390 {
391     ram_addr_t addr;
392     unsigned long word = BIT_WORD((start + rb->offset) >> TARGET_PAGE_BITS);
393     uint64_t num_dirty = 0;
394     unsigned long *dest = rb->bmap;
395 
396     /* start address and length is aligned at the start of a word? */
397     if (((word * BITS_PER_LONG) << TARGET_PAGE_BITS) ==
398          (start + rb->offset) &&
399         !(length & ((BITS_PER_LONG << TARGET_PAGE_BITS) - 1))) {
400         int k;
401         int nr = BITS_TO_LONGS(length >> TARGET_PAGE_BITS);
402         unsigned long * const *src;
403         unsigned long idx = (word * BITS_PER_LONG) / DIRTY_MEMORY_BLOCK_SIZE;
404         unsigned long offset = BIT_WORD((word * BITS_PER_LONG) %
405                                         DIRTY_MEMORY_BLOCK_SIZE);
406         unsigned long page = BIT_WORD(start >> TARGET_PAGE_BITS);
407 
408         rcu_read_lock();
409 
410         src = atomic_rcu_read(
411                 &ram_list.dirty_memory[DIRTY_MEMORY_MIGRATION])->blocks;
412 
413         for (k = page; k < page + nr; k++) {
414             if (src[idx][offset]) {
415                 unsigned long bits = atomic_xchg(&src[idx][offset], 0);
416                 unsigned long new_dirty;
417                 *real_dirty_pages += ctpopl(bits);
418                 new_dirty = ~dest[k];
419                 dest[k] |= bits;
420                 new_dirty &= bits;
421                 num_dirty += ctpopl(new_dirty);
422             }
423 
424             if (++offset >= BITS_TO_LONGS(DIRTY_MEMORY_BLOCK_SIZE)) {
425                 offset = 0;
426                 idx++;
427             }
428         }
429 
430         rcu_read_unlock();
431     } else {
432         ram_addr_t offset = rb->offset;
433 
434         for (addr = 0; addr < length; addr += TARGET_PAGE_SIZE) {
435             if (cpu_physical_memory_test_and_clear_dirty(
436                         start + addr + offset,
437                         TARGET_PAGE_SIZE,
438                         DIRTY_MEMORY_MIGRATION)) {
439                 *real_dirty_pages += 1;
440                 long k = (start + addr) >> TARGET_PAGE_BITS;
441                 if (!test_and_set_bit(k, dest)) {
442                     num_dirty++;
443                 }
444             }
445         }
446     }
447 
448     return num_dirty;
449 }
450 #endif
451 #endif
452