xref: /openbmc/qemu/include/exec/ram_addr.h (revision ac1d8878)
1 /*
2  * Declarations for cpu physical memory functions
3  *
4  * Copyright 2011 Red Hat, Inc. and/or its affiliates
5  *
6  * Authors:
7  *  Avi Kivity <avi@redhat.com>
8  *
9  * This work is licensed under the terms of the GNU GPL, version 2 or
10  * later.  See the COPYING file in the top-level directory.
11  *
12  */
13 
14 /*
15  * This header is for use by exec.c and memory.c ONLY.  Do not include it.
16  * The functions declared here will be removed soon.
17  */
18 
19 #ifndef RAM_ADDR_H
20 #define RAM_ADDR_H
21 
22 #ifndef CONFIG_USER_ONLY
23 #include "hw/xen/xen.h"
24 
25 struct RAMBlock {
26     struct rcu_head rcu;
27     struct MemoryRegion *mr;
28     uint8_t *host;
29     ram_addr_t offset;
30     ram_addr_t used_length;
31     ram_addr_t max_length;
32     void (*resized)(const char*, uint64_t length, void *host);
33     uint32_t flags;
34     /* Protected by iothread lock.  */
35     char idstr[256];
36     /* RCU-enabled, writes protected by the ramlist lock */
37     QLIST_ENTRY(RAMBlock) next;
38     int fd;
39 };
40 
41 static inline void *ramblock_ptr(RAMBlock *block, ram_addr_t offset)
42 {
43     assert(offset < block->used_length);
44     assert(block->host);
45     return (char *)block->host + offset;
46 }
47 
48 typedef struct RAMList {
49     QemuMutex mutex;
50     /* Protected by the iothread lock.  */
51     unsigned long *dirty_memory[DIRTY_MEMORY_NUM];
52     RAMBlock *mru_block;
53     /* RCU-enabled, writes protected by the ramlist lock. */
54     QLIST_HEAD(, RAMBlock) blocks;
55     uint32_t version;
56 } RAMList;
57 extern RAMList ram_list;
58 
59 ram_addr_t last_ram_offset(void);
60 void qemu_mutex_lock_ramlist(void);
61 void qemu_mutex_unlock_ramlist(void);
62 
63 ram_addr_t qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr,
64                                     bool share, const char *mem_path,
65                                     Error **errp);
66 ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
67                                    MemoryRegion *mr, Error **errp);
68 ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr, Error **errp);
69 ram_addr_t qemu_ram_alloc_resizeable(ram_addr_t size, ram_addr_t max_size,
70                                      void (*resized)(const char*,
71                                                      uint64_t length,
72                                                      void *host),
73                                      MemoryRegion *mr, Error **errp);
74 int qemu_get_ram_fd(ram_addr_t addr);
75 void *qemu_get_ram_block_host_ptr(ram_addr_t addr);
76 void qemu_ram_free(ram_addr_t addr);
77 
78 int qemu_ram_resize(ram_addr_t base, ram_addr_t newsize, Error **errp);
79 
80 #define DIRTY_CLIENTS_ALL     ((1 << DIRTY_MEMORY_NUM) - 1)
81 #define DIRTY_CLIENTS_NOCODE  (DIRTY_CLIENTS_ALL & ~(1 << DIRTY_MEMORY_CODE))
82 
83 static inline bool cpu_physical_memory_get_dirty(ram_addr_t start,
84                                                  ram_addr_t length,
85                                                  unsigned client)
86 {
87     unsigned long end, page, next;
88 
89     assert(client < DIRTY_MEMORY_NUM);
90 
91     end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
92     page = start >> TARGET_PAGE_BITS;
93     next = find_next_bit(ram_list.dirty_memory[client], end, page);
94 
95     return next < end;
96 }
97 
98 static inline bool cpu_physical_memory_all_dirty(ram_addr_t start,
99                                                  ram_addr_t length,
100                                                  unsigned client)
101 {
102     unsigned long end, page, next;
103 
104     assert(client < DIRTY_MEMORY_NUM);
105 
106     end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
107     page = start >> TARGET_PAGE_BITS;
108     next = find_next_zero_bit(ram_list.dirty_memory[client], end, page);
109 
110     return next >= end;
111 }
112 
113 static inline bool cpu_physical_memory_get_dirty_flag(ram_addr_t addr,
114                                                       unsigned client)
115 {
116     return cpu_physical_memory_get_dirty(addr, 1, client);
117 }
118 
119 static inline bool cpu_physical_memory_is_clean(ram_addr_t addr)
120 {
121     bool vga = cpu_physical_memory_get_dirty_flag(addr, DIRTY_MEMORY_VGA);
122     bool code = cpu_physical_memory_get_dirty_flag(addr, DIRTY_MEMORY_CODE);
123     bool migration =
124         cpu_physical_memory_get_dirty_flag(addr, DIRTY_MEMORY_MIGRATION);
125     return !(vga && code && migration);
126 }
127 
128 static inline uint8_t cpu_physical_memory_range_includes_clean(ram_addr_t start,
129                                                                ram_addr_t length,
130                                                                uint8_t mask)
131 {
132     uint8_t ret = 0;
133 
134     if (mask & (1 << DIRTY_MEMORY_VGA) &&
135         !cpu_physical_memory_all_dirty(start, length, DIRTY_MEMORY_VGA)) {
136         ret |= (1 << DIRTY_MEMORY_VGA);
137     }
138     if (mask & (1 << DIRTY_MEMORY_CODE) &&
139         !cpu_physical_memory_all_dirty(start, length, DIRTY_MEMORY_CODE)) {
140         ret |= (1 << DIRTY_MEMORY_CODE);
141     }
142     if (mask & (1 << DIRTY_MEMORY_MIGRATION) &&
143         !cpu_physical_memory_all_dirty(start, length, DIRTY_MEMORY_MIGRATION)) {
144         ret |= (1 << DIRTY_MEMORY_MIGRATION);
145     }
146     return ret;
147 }
148 
149 static inline void cpu_physical_memory_set_dirty_flag(ram_addr_t addr,
150                                                       unsigned client)
151 {
152     assert(client < DIRTY_MEMORY_NUM);
153     set_bit_atomic(addr >> TARGET_PAGE_BITS, ram_list.dirty_memory[client]);
154 }
155 
156 static inline void cpu_physical_memory_set_dirty_range(ram_addr_t start,
157                                                        ram_addr_t length,
158                                                        uint8_t mask)
159 {
160     unsigned long end, page;
161     unsigned long **d = ram_list.dirty_memory;
162 
163     end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
164     page = start >> TARGET_PAGE_BITS;
165     if (likely(mask & (1 << DIRTY_MEMORY_MIGRATION))) {
166         bitmap_set_atomic(d[DIRTY_MEMORY_MIGRATION], page, end - page);
167     }
168     if (unlikely(mask & (1 << DIRTY_MEMORY_VGA))) {
169         bitmap_set_atomic(d[DIRTY_MEMORY_VGA], page, end - page);
170     }
171     if (unlikely(mask & (1 << DIRTY_MEMORY_CODE))) {
172         bitmap_set_atomic(d[DIRTY_MEMORY_CODE], page, end - page);
173     }
174     xen_modified_memory(start, length);
175 }
176 
177 #if !defined(_WIN32)
178 static inline void cpu_physical_memory_set_dirty_lebitmap(unsigned long *bitmap,
179                                                           ram_addr_t start,
180                                                           ram_addr_t pages)
181 {
182     unsigned long i, j;
183     unsigned long page_number, c;
184     hwaddr addr;
185     ram_addr_t ram_addr;
186     unsigned long len = (pages + HOST_LONG_BITS - 1) / HOST_LONG_BITS;
187     unsigned long hpratio = getpagesize() / TARGET_PAGE_SIZE;
188     unsigned long page = BIT_WORD(start >> TARGET_PAGE_BITS);
189 
190     /* start address is aligned at the start of a word? */
191     if ((((page * BITS_PER_LONG) << TARGET_PAGE_BITS) == start) &&
192         (hpratio == 1)) {
193         long k;
194         long nr = BITS_TO_LONGS(pages);
195 
196         for (k = 0; k < nr; k++) {
197             if (bitmap[k]) {
198                 unsigned long temp = leul_to_cpu(bitmap[k]);
199                 unsigned long **d = ram_list.dirty_memory;
200 
201                 atomic_or(&d[DIRTY_MEMORY_MIGRATION][page + k], temp);
202                 atomic_or(&d[DIRTY_MEMORY_VGA][page + k], temp);
203                 if (tcg_enabled()) {
204                     atomic_or(&d[DIRTY_MEMORY_CODE][page + k], temp);
205                 }
206             }
207         }
208         xen_modified_memory(start, pages << TARGET_PAGE_BITS);
209     } else {
210         uint8_t clients = tcg_enabled() ? DIRTY_CLIENTS_ALL : DIRTY_CLIENTS_NOCODE;
211         /*
212          * bitmap-traveling is faster than memory-traveling (for addr...)
213          * especially when most of the memory is not dirty.
214          */
215         for (i = 0; i < len; i++) {
216             if (bitmap[i] != 0) {
217                 c = leul_to_cpu(bitmap[i]);
218                 do {
219                     j = ctzl(c);
220                     c &= ~(1ul << j);
221                     page_number = (i * HOST_LONG_BITS + j) * hpratio;
222                     addr = page_number * TARGET_PAGE_SIZE;
223                     ram_addr = start + addr;
224                     cpu_physical_memory_set_dirty_range(ram_addr,
225                                        TARGET_PAGE_SIZE * hpratio, clients);
226                 } while (c != 0);
227             }
228         }
229     }
230 }
231 #endif /* not _WIN32 */
232 
233 bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start,
234                                               ram_addr_t length,
235                                               unsigned client);
236 
237 static inline void cpu_physical_memory_clear_dirty_range(ram_addr_t start,
238                                                          ram_addr_t length)
239 {
240     cpu_physical_memory_test_and_clear_dirty(start, length, DIRTY_MEMORY_MIGRATION);
241     cpu_physical_memory_test_and_clear_dirty(start, length, DIRTY_MEMORY_VGA);
242     cpu_physical_memory_test_and_clear_dirty(start, length, DIRTY_MEMORY_CODE);
243 }
244 
245 
246 static inline
247 uint64_t cpu_physical_memory_sync_dirty_bitmap(unsigned long *dest,
248                                                ram_addr_t start,
249                                                ram_addr_t length)
250 {
251     ram_addr_t addr;
252     unsigned long page = BIT_WORD(start >> TARGET_PAGE_BITS);
253     uint64_t num_dirty = 0;
254 
255     /* start address is aligned at the start of a word? */
256     if (((page * BITS_PER_LONG) << TARGET_PAGE_BITS) == start) {
257         int k;
258         int nr = BITS_TO_LONGS(length >> TARGET_PAGE_BITS);
259         unsigned long *src = ram_list.dirty_memory[DIRTY_MEMORY_MIGRATION];
260 
261         for (k = page; k < page + nr; k++) {
262             if (src[k]) {
263                 unsigned long bits = atomic_xchg(&src[k], 0);
264                 unsigned long new_dirty;
265                 new_dirty = ~dest[k];
266                 dest[k] |= bits;
267                 new_dirty &= bits;
268                 num_dirty += ctpopl(new_dirty);
269             }
270         }
271     } else {
272         for (addr = 0; addr < length; addr += TARGET_PAGE_SIZE) {
273             if (cpu_physical_memory_test_and_clear_dirty(
274                         start + addr,
275                         TARGET_PAGE_SIZE,
276                         DIRTY_MEMORY_MIGRATION)) {
277                 long k = (start + addr) >> TARGET_PAGE_BITS;
278                 if (!test_and_set_bit(k, dest)) {
279                     num_dirty++;
280                 }
281             }
282         }
283     }
284 
285     return num_dirty;
286 }
287 
288 void migration_bitmap_extend(ram_addr_t old, ram_addr_t new);
289 #endif
290 #endif
291