xref: /openbmc/qemu/include/exec/ram_addr.h (revision ca693d1c)
1 /*
2  * Declarations for cpu physical memory functions
3  *
4  * Copyright 2011 Red Hat, Inc. and/or its affiliates
5  *
6  * Authors:
7  *  Avi Kivity <avi@redhat.com>
8  *
9  * This work is licensed under the terms of the GNU GPL, version 2 or
10  * later.  See the COPYING file in the top-level directory.
11  *
12  */
13 
14 /*
15  * This header is for use by exec.c and memory.c ONLY.  Do not include it.
16  * The functions declared here will be removed soon.
17  */
18 
19 #ifndef RAM_ADDR_H
20 #define RAM_ADDR_H
21 
22 #ifndef CONFIG_USER_ONLY
23 #include "hw/xen/xen.h"
24 #include "exec/ramlist.h"
25 
26 struct RAMBlock {
27     struct rcu_head rcu;
28     struct MemoryRegion *mr;
29     uint8_t *host;
30     uint8_t *colo_cache; /* For colo, VM's ram cache */
31     ram_addr_t offset;
32     ram_addr_t used_length;
33     ram_addr_t max_length;
34     void (*resized)(const char*, uint64_t length, void *host);
35     uint32_t flags;
36     /* Protected by iothread lock.  */
37     char idstr[256];
38     /* RCU-enabled, writes protected by the ramlist lock */
39     QLIST_ENTRY(RAMBlock) next;
40     QLIST_HEAD(, RAMBlockNotifier) ramblock_notifiers;
41     int fd;
42     size_t page_size;
43     /* dirty bitmap used during migration */
44     unsigned long *bmap;
45     /* bitmap of pages that haven't been sent even once
46      * only maintained and used in postcopy at the moment
47      * where it's used to send the dirtymap at the start
48      * of the postcopy phase
49      */
50     unsigned long *unsentmap;
51     /* bitmap of already received pages in postcopy */
52     unsigned long *receivedmap;
53 };
54 
55 static inline bool offset_in_ramblock(RAMBlock *b, ram_addr_t offset)
56 {
57     return (b && b->host && offset < b->used_length) ? true : false;
58 }
59 
60 static inline void *ramblock_ptr(RAMBlock *block, ram_addr_t offset)
61 {
62     assert(offset_in_ramblock(block, offset));
63     return (char *)block->host + offset;
64 }
65 
66 static inline unsigned long int ramblock_recv_bitmap_offset(void *host_addr,
67                                                             RAMBlock *rb)
68 {
69     uint64_t host_addr_offset =
70             (uint64_t)(uintptr_t)(host_addr - (void *)rb->host);
71     return host_addr_offset >> TARGET_PAGE_BITS;
72 }
73 
74 bool ramblock_is_pmem(RAMBlock *rb);
75 
76 long qemu_minrampagesize(void);
77 long qemu_maxrampagesize(void);
78 
79 /**
80  * qemu_ram_alloc_from_file,
81  * qemu_ram_alloc_from_fd:  Allocate a ram block from the specified backing
82  *                          file or device
83  *
84  * Parameters:
85  *  @size: the size in bytes of the ram block
86  *  @mr: the memory region where the ram block is
87  *  @ram_flags: specify the properties of the ram block, which can be one
88  *              or bit-or of following values
89  *              - RAM_SHARED: mmap the backing file or device with MAP_SHARED
90  *              - RAM_PMEM: the backend @mem_path or @fd is persistent memory
91  *              Other bits are ignored.
92  *  @mem_path or @fd: specify the backing file or device
93  *  @errp: pointer to Error*, to store an error if it happens
94  *
95  * Return:
96  *  On success, return a pointer to the ram block.
97  *  On failure, return NULL.
98  */
99 RAMBlock *qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr,
100                                    uint32_t ram_flags, const char *mem_path,
101                                    Error **errp);
102 RAMBlock *qemu_ram_alloc_from_fd(ram_addr_t size, MemoryRegion *mr,
103                                  uint32_t ram_flags, int fd,
104                                  Error **errp);
105 
106 RAMBlock *qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
107                                   MemoryRegion *mr, Error **errp);
108 RAMBlock *qemu_ram_alloc(ram_addr_t size, bool share, MemoryRegion *mr,
109                          Error **errp);
110 RAMBlock *qemu_ram_alloc_resizeable(ram_addr_t size, ram_addr_t max_size,
111                                     void (*resized)(const char*,
112                                                     uint64_t length,
113                                                     void *host),
114                                     MemoryRegion *mr, Error **errp);
115 void qemu_ram_free(RAMBlock *block);
116 
117 int qemu_ram_resize(RAMBlock *block, ram_addr_t newsize, Error **errp);
118 
119 #define DIRTY_CLIENTS_ALL     ((1 << DIRTY_MEMORY_NUM) - 1)
120 #define DIRTY_CLIENTS_NOCODE  (DIRTY_CLIENTS_ALL & ~(1 << DIRTY_MEMORY_CODE))
121 
122 void tb_invalidate_phys_range(ram_addr_t start, ram_addr_t end);
123 
124 static inline bool cpu_physical_memory_get_dirty(ram_addr_t start,
125                                                  ram_addr_t length,
126                                                  unsigned client)
127 {
128     DirtyMemoryBlocks *blocks;
129     unsigned long end, page;
130     unsigned long idx, offset, base;
131     bool dirty = false;
132 
133     assert(client < DIRTY_MEMORY_NUM);
134 
135     end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
136     page = start >> TARGET_PAGE_BITS;
137 
138     rcu_read_lock();
139 
140     blocks = atomic_rcu_read(&ram_list.dirty_memory[client]);
141 
142     idx = page / DIRTY_MEMORY_BLOCK_SIZE;
143     offset = page % DIRTY_MEMORY_BLOCK_SIZE;
144     base = page - offset;
145     while (page < end) {
146         unsigned long next = MIN(end, base + DIRTY_MEMORY_BLOCK_SIZE);
147         unsigned long num = next - base;
148         unsigned long found = find_next_bit(blocks->blocks[idx], num, offset);
149         if (found < num) {
150             dirty = true;
151             break;
152         }
153 
154         page = next;
155         idx++;
156         offset = 0;
157         base += DIRTY_MEMORY_BLOCK_SIZE;
158     }
159 
160     rcu_read_unlock();
161 
162     return dirty;
163 }
164 
165 static inline bool cpu_physical_memory_all_dirty(ram_addr_t start,
166                                                  ram_addr_t length,
167                                                  unsigned client)
168 {
169     DirtyMemoryBlocks *blocks;
170     unsigned long end, page;
171     unsigned long idx, offset, base;
172     bool dirty = true;
173 
174     assert(client < DIRTY_MEMORY_NUM);
175 
176     end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
177     page = start >> TARGET_PAGE_BITS;
178 
179     rcu_read_lock();
180 
181     blocks = atomic_rcu_read(&ram_list.dirty_memory[client]);
182 
183     idx = page / DIRTY_MEMORY_BLOCK_SIZE;
184     offset = page % DIRTY_MEMORY_BLOCK_SIZE;
185     base = page - offset;
186     while (page < end) {
187         unsigned long next = MIN(end, base + DIRTY_MEMORY_BLOCK_SIZE);
188         unsigned long num = next - base;
189         unsigned long found = find_next_zero_bit(blocks->blocks[idx], num, offset);
190         if (found < num) {
191             dirty = false;
192             break;
193         }
194 
195         page = next;
196         idx++;
197         offset = 0;
198         base += DIRTY_MEMORY_BLOCK_SIZE;
199     }
200 
201     rcu_read_unlock();
202 
203     return dirty;
204 }
205 
206 static inline bool cpu_physical_memory_get_dirty_flag(ram_addr_t addr,
207                                                       unsigned client)
208 {
209     return cpu_physical_memory_get_dirty(addr, 1, client);
210 }
211 
212 static inline bool cpu_physical_memory_is_clean(ram_addr_t addr)
213 {
214     bool vga = cpu_physical_memory_get_dirty_flag(addr, DIRTY_MEMORY_VGA);
215     bool code = cpu_physical_memory_get_dirty_flag(addr, DIRTY_MEMORY_CODE);
216     bool migration =
217         cpu_physical_memory_get_dirty_flag(addr, DIRTY_MEMORY_MIGRATION);
218     return !(vga && code && migration);
219 }
220 
221 static inline uint8_t cpu_physical_memory_range_includes_clean(ram_addr_t start,
222                                                                ram_addr_t length,
223                                                                uint8_t mask)
224 {
225     uint8_t ret = 0;
226 
227     if (mask & (1 << DIRTY_MEMORY_VGA) &&
228         !cpu_physical_memory_all_dirty(start, length, DIRTY_MEMORY_VGA)) {
229         ret |= (1 << DIRTY_MEMORY_VGA);
230     }
231     if (mask & (1 << DIRTY_MEMORY_CODE) &&
232         !cpu_physical_memory_all_dirty(start, length, DIRTY_MEMORY_CODE)) {
233         ret |= (1 << DIRTY_MEMORY_CODE);
234     }
235     if (mask & (1 << DIRTY_MEMORY_MIGRATION) &&
236         !cpu_physical_memory_all_dirty(start, length, DIRTY_MEMORY_MIGRATION)) {
237         ret |= (1 << DIRTY_MEMORY_MIGRATION);
238     }
239     return ret;
240 }
241 
242 static inline void cpu_physical_memory_set_dirty_flag(ram_addr_t addr,
243                                                       unsigned client)
244 {
245     unsigned long page, idx, offset;
246     DirtyMemoryBlocks *blocks;
247 
248     assert(client < DIRTY_MEMORY_NUM);
249 
250     page = addr >> TARGET_PAGE_BITS;
251     idx = page / DIRTY_MEMORY_BLOCK_SIZE;
252     offset = page % DIRTY_MEMORY_BLOCK_SIZE;
253 
254     rcu_read_lock();
255 
256     blocks = atomic_rcu_read(&ram_list.dirty_memory[client]);
257 
258     set_bit_atomic(offset, blocks->blocks[idx]);
259 
260     rcu_read_unlock();
261 }
262 
263 static inline void cpu_physical_memory_set_dirty_range(ram_addr_t start,
264                                                        ram_addr_t length,
265                                                        uint8_t mask)
266 {
267     DirtyMemoryBlocks *blocks[DIRTY_MEMORY_NUM];
268     unsigned long end, page;
269     unsigned long idx, offset, base;
270     int i;
271 
272     if (!mask && !xen_enabled()) {
273         return;
274     }
275 
276     end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
277     page = start >> TARGET_PAGE_BITS;
278 
279     rcu_read_lock();
280 
281     for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
282         blocks[i] = atomic_rcu_read(&ram_list.dirty_memory[i]);
283     }
284 
285     idx = page / DIRTY_MEMORY_BLOCK_SIZE;
286     offset = page % DIRTY_MEMORY_BLOCK_SIZE;
287     base = page - offset;
288     while (page < end) {
289         unsigned long next = MIN(end, base + DIRTY_MEMORY_BLOCK_SIZE);
290 
291         if (likely(mask & (1 << DIRTY_MEMORY_MIGRATION))) {
292             bitmap_set_atomic(blocks[DIRTY_MEMORY_MIGRATION]->blocks[idx],
293                               offset, next - page);
294         }
295         if (unlikely(mask & (1 << DIRTY_MEMORY_VGA))) {
296             bitmap_set_atomic(blocks[DIRTY_MEMORY_VGA]->blocks[idx],
297                               offset, next - page);
298         }
299         if (unlikely(mask & (1 << DIRTY_MEMORY_CODE))) {
300             bitmap_set_atomic(blocks[DIRTY_MEMORY_CODE]->blocks[idx],
301                               offset, next - page);
302         }
303 
304         page = next;
305         idx++;
306         offset = 0;
307         base += DIRTY_MEMORY_BLOCK_SIZE;
308     }
309 
310     rcu_read_unlock();
311 
312     xen_hvm_modified_memory(start, length);
313 }
314 
315 #if !defined(_WIN32)
316 static inline void cpu_physical_memory_set_dirty_lebitmap(unsigned long *bitmap,
317                                                           ram_addr_t start,
318                                                           ram_addr_t pages)
319 {
320     unsigned long i, j;
321     unsigned long page_number, c;
322     hwaddr addr;
323     ram_addr_t ram_addr;
324     unsigned long len = (pages + HOST_LONG_BITS - 1) / HOST_LONG_BITS;
325     unsigned long hpratio = getpagesize() / TARGET_PAGE_SIZE;
326     unsigned long page = BIT_WORD(start >> TARGET_PAGE_BITS);
327 
328     /* start address is aligned at the start of a word? */
329     if ((((page * BITS_PER_LONG) << TARGET_PAGE_BITS) == start) &&
330         (hpratio == 1)) {
331         unsigned long **blocks[DIRTY_MEMORY_NUM];
332         unsigned long idx;
333         unsigned long offset;
334         long k;
335         long nr = BITS_TO_LONGS(pages);
336 
337         idx = (start >> TARGET_PAGE_BITS) / DIRTY_MEMORY_BLOCK_SIZE;
338         offset = BIT_WORD((start >> TARGET_PAGE_BITS) %
339                           DIRTY_MEMORY_BLOCK_SIZE);
340 
341         rcu_read_lock();
342 
343         for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
344             blocks[i] = atomic_rcu_read(&ram_list.dirty_memory[i])->blocks;
345         }
346 
347         for (k = 0; k < nr; k++) {
348             if (bitmap[k]) {
349                 unsigned long temp = leul_to_cpu(bitmap[k]);
350 
351                 atomic_or(&blocks[DIRTY_MEMORY_MIGRATION][idx][offset], temp);
352                 atomic_or(&blocks[DIRTY_MEMORY_VGA][idx][offset], temp);
353                 if (tcg_enabled()) {
354                     atomic_or(&blocks[DIRTY_MEMORY_CODE][idx][offset], temp);
355                 }
356             }
357 
358             if (++offset >= BITS_TO_LONGS(DIRTY_MEMORY_BLOCK_SIZE)) {
359                 offset = 0;
360                 idx++;
361             }
362         }
363 
364         rcu_read_unlock();
365 
366         xen_hvm_modified_memory(start, pages << TARGET_PAGE_BITS);
367     } else {
368         uint8_t clients = tcg_enabled() ? DIRTY_CLIENTS_ALL : DIRTY_CLIENTS_NOCODE;
369         /*
370          * bitmap-traveling is faster than memory-traveling (for addr...)
371          * especially when most of the memory is not dirty.
372          */
373         for (i = 0; i < len; i++) {
374             if (bitmap[i] != 0) {
375                 c = leul_to_cpu(bitmap[i]);
376                 do {
377                     j = ctzl(c);
378                     c &= ~(1ul << j);
379                     page_number = (i * HOST_LONG_BITS + j) * hpratio;
380                     addr = page_number * TARGET_PAGE_SIZE;
381                     ram_addr = start + addr;
382                     cpu_physical_memory_set_dirty_range(ram_addr,
383                                        TARGET_PAGE_SIZE * hpratio, clients);
384                 } while (c != 0);
385             }
386         }
387     }
388 }
389 #endif /* not _WIN32 */
390 
391 bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start,
392                                               ram_addr_t length,
393                                               unsigned client);
394 
395 DirtyBitmapSnapshot *cpu_physical_memory_snapshot_and_clear_dirty
396     (ram_addr_t start, ram_addr_t length, unsigned client);
397 
398 bool cpu_physical_memory_snapshot_get_dirty(DirtyBitmapSnapshot *snap,
399                                             ram_addr_t start,
400                                             ram_addr_t length);
401 
402 static inline void cpu_physical_memory_clear_dirty_range(ram_addr_t start,
403                                                          ram_addr_t length)
404 {
405     cpu_physical_memory_test_and_clear_dirty(start, length, DIRTY_MEMORY_MIGRATION);
406     cpu_physical_memory_test_and_clear_dirty(start, length, DIRTY_MEMORY_VGA);
407     cpu_physical_memory_test_and_clear_dirty(start, length, DIRTY_MEMORY_CODE);
408 }
409 
410 
411 static inline
412 uint64_t cpu_physical_memory_sync_dirty_bitmap(RAMBlock *rb,
413                                                ram_addr_t start,
414                                                ram_addr_t length,
415                                                uint64_t *real_dirty_pages)
416 {
417     ram_addr_t addr;
418     unsigned long word = BIT_WORD((start + rb->offset) >> TARGET_PAGE_BITS);
419     uint64_t num_dirty = 0;
420     unsigned long *dest = rb->bmap;
421 
422     /* start address and length is aligned at the start of a word? */
423     if (((word * BITS_PER_LONG) << TARGET_PAGE_BITS) ==
424          (start + rb->offset) &&
425         !(length & ((BITS_PER_LONG << TARGET_PAGE_BITS) - 1))) {
426         int k;
427         int nr = BITS_TO_LONGS(length >> TARGET_PAGE_BITS);
428         unsigned long * const *src;
429         unsigned long idx = (word * BITS_PER_LONG) / DIRTY_MEMORY_BLOCK_SIZE;
430         unsigned long offset = BIT_WORD((word * BITS_PER_LONG) %
431                                         DIRTY_MEMORY_BLOCK_SIZE);
432         unsigned long page = BIT_WORD(start >> TARGET_PAGE_BITS);
433 
434         rcu_read_lock();
435 
436         src = atomic_rcu_read(
437                 &ram_list.dirty_memory[DIRTY_MEMORY_MIGRATION])->blocks;
438 
439         for (k = page; k < page + nr; k++) {
440             if (src[idx][offset]) {
441                 unsigned long bits = atomic_xchg(&src[idx][offset], 0);
442                 unsigned long new_dirty;
443                 *real_dirty_pages += ctpopl(bits);
444                 new_dirty = ~dest[k];
445                 dest[k] |= bits;
446                 new_dirty &= bits;
447                 num_dirty += ctpopl(new_dirty);
448             }
449 
450             if (++offset >= BITS_TO_LONGS(DIRTY_MEMORY_BLOCK_SIZE)) {
451                 offset = 0;
452                 idx++;
453             }
454         }
455 
456         rcu_read_unlock();
457     } else {
458         ram_addr_t offset = rb->offset;
459 
460         for (addr = 0; addr < length; addr += TARGET_PAGE_SIZE) {
461             if (cpu_physical_memory_test_and_clear_dirty(
462                         start + addr + offset,
463                         TARGET_PAGE_SIZE,
464                         DIRTY_MEMORY_MIGRATION)) {
465                 *real_dirty_pages += 1;
466                 long k = (start + addr) >> TARGET_PAGE_BITS;
467                 if (!test_and_set_bit(k, dest)) {
468                     num_dirty++;
469                 }
470             }
471         }
472     }
473 
474     return num_dirty;
475 }
476 #endif
477 #endif
478