xref: /openbmc/qemu/include/exec/ram_addr.h (revision 51e72bc1)
1 /*
2  * Declarations for cpu physical memory functions
3  *
4  * Copyright 2011 Red Hat, Inc. and/or its affiliates
5  *
6  * Authors:
7  *  Avi Kivity <avi@redhat.com>
8  *
9  * This work is licensed under the terms of the GNU GPL, version 2 or
10  * later.  See the COPYING file in the top-level directory.
11  *
12  */
13 
14 /*
15  * This header is for use by exec.c and memory.c ONLY.  Do not include it.
16  * The functions declared here will be removed soon.
17  */
18 
19 #ifndef RAM_ADDR_H
20 #define RAM_ADDR_H
21 
22 #ifndef CONFIG_USER_ONLY
23 #include "hw/xen/xen.h"
24 
25 struct RAMBlock {
26     struct rcu_head rcu;
27     struct MemoryRegion *mr;
28     uint8_t *host;
29     ram_addr_t offset;
30     ram_addr_t used_length;
31     ram_addr_t max_length;
32     void (*resized)(const char*, uint64_t length, void *host);
33     uint32_t flags;
34     /* Protected by iothread lock.  */
35     char idstr[256];
36     /* RCU-enabled, writes protected by the ramlist lock */
37     QLIST_ENTRY(RAMBlock) next;
38     int fd;
39 };
40 
41 static inline bool offset_in_ramblock(RAMBlock *b, ram_addr_t offset)
42 {
43     return (b && b->host && offset < b->used_length) ? true : false;
44 }
45 
46 static inline void *ramblock_ptr(RAMBlock *block, ram_addr_t offset)
47 {
48     assert(offset_in_ramblock(block, offset));
49     return (char *)block->host + offset;
50 }
51 
52 typedef struct RAMList {
53     QemuMutex mutex;
54     /* Protected by the iothread lock.  */
55     unsigned long *dirty_memory[DIRTY_MEMORY_NUM];
56     RAMBlock *mru_block;
57     /* RCU-enabled, writes protected by the ramlist lock. */
58     QLIST_HEAD(, RAMBlock) blocks;
59     uint32_t version;
60 } RAMList;
61 extern RAMList ram_list;
62 
63 ram_addr_t last_ram_offset(void);
64 void qemu_mutex_lock_ramlist(void);
65 void qemu_mutex_unlock_ramlist(void);
66 
67 ram_addr_t qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr,
68                                     bool share, const char *mem_path,
69                                     Error **errp);
70 ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
71                                    MemoryRegion *mr, Error **errp);
72 ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr, Error **errp);
73 ram_addr_t qemu_ram_alloc_resizeable(ram_addr_t size, ram_addr_t max_size,
74                                      void (*resized)(const char*,
75                                                      uint64_t length,
76                                                      void *host),
77                                      MemoryRegion *mr, Error **errp);
78 int qemu_get_ram_fd(ram_addr_t addr);
79 void qemu_set_ram_fd(ram_addr_t addr, int fd);
80 void *qemu_get_ram_block_host_ptr(ram_addr_t addr);
81 void qemu_ram_free(ram_addr_t addr);
82 
83 int qemu_ram_resize(ram_addr_t base, ram_addr_t newsize, Error **errp);
84 
85 #define DIRTY_CLIENTS_ALL     ((1 << DIRTY_MEMORY_NUM) - 1)
86 #define DIRTY_CLIENTS_NOCODE  (DIRTY_CLIENTS_ALL & ~(1 << DIRTY_MEMORY_CODE))
87 
88 static inline bool cpu_physical_memory_get_dirty(ram_addr_t start,
89                                                  ram_addr_t length,
90                                                  unsigned client)
91 {
92     unsigned long end, page, next;
93 
94     assert(client < DIRTY_MEMORY_NUM);
95 
96     end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
97     page = start >> TARGET_PAGE_BITS;
98     next = find_next_bit(ram_list.dirty_memory[client], end, page);
99 
100     return next < end;
101 }
102 
103 static inline bool cpu_physical_memory_all_dirty(ram_addr_t start,
104                                                  ram_addr_t length,
105                                                  unsigned client)
106 {
107     unsigned long end, page, next;
108 
109     assert(client < DIRTY_MEMORY_NUM);
110 
111     end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
112     page = start >> TARGET_PAGE_BITS;
113     next = find_next_zero_bit(ram_list.dirty_memory[client], end, page);
114 
115     return next >= end;
116 }
117 
118 static inline bool cpu_physical_memory_get_dirty_flag(ram_addr_t addr,
119                                                       unsigned client)
120 {
121     return cpu_physical_memory_get_dirty(addr, 1, client);
122 }
123 
124 static inline bool cpu_physical_memory_is_clean(ram_addr_t addr)
125 {
126     bool vga = cpu_physical_memory_get_dirty_flag(addr, DIRTY_MEMORY_VGA);
127     bool code = cpu_physical_memory_get_dirty_flag(addr, DIRTY_MEMORY_CODE);
128     bool migration =
129         cpu_physical_memory_get_dirty_flag(addr, DIRTY_MEMORY_MIGRATION);
130     return !(vga && code && migration);
131 }
132 
133 static inline uint8_t cpu_physical_memory_range_includes_clean(ram_addr_t start,
134                                                                ram_addr_t length,
135                                                                uint8_t mask)
136 {
137     uint8_t ret = 0;
138 
139     if (mask & (1 << DIRTY_MEMORY_VGA) &&
140         !cpu_physical_memory_all_dirty(start, length, DIRTY_MEMORY_VGA)) {
141         ret |= (1 << DIRTY_MEMORY_VGA);
142     }
143     if (mask & (1 << DIRTY_MEMORY_CODE) &&
144         !cpu_physical_memory_all_dirty(start, length, DIRTY_MEMORY_CODE)) {
145         ret |= (1 << DIRTY_MEMORY_CODE);
146     }
147     if (mask & (1 << DIRTY_MEMORY_MIGRATION) &&
148         !cpu_physical_memory_all_dirty(start, length, DIRTY_MEMORY_MIGRATION)) {
149         ret |= (1 << DIRTY_MEMORY_MIGRATION);
150     }
151     return ret;
152 }
153 
154 static inline void cpu_physical_memory_set_dirty_flag(ram_addr_t addr,
155                                                       unsigned client)
156 {
157     assert(client < DIRTY_MEMORY_NUM);
158     set_bit_atomic(addr >> TARGET_PAGE_BITS, ram_list.dirty_memory[client]);
159 }
160 
161 static inline void cpu_physical_memory_set_dirty_range(ram_addr_t start,
162                                                        ram_addr_t length,
163                                                        uint8_t mask)
164 {
165     unsigned long end, page;
166     unsigned long **d = ram_list.dirty_memory;
167 
168     end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
169     page = start >> TARGET_PAGE_BITS;
170     if (likely(mask & (1 << DIRTY_MEMORY_MIGRATION))) {
171         bitmap_set_atomic(d[DIRTY_MEMORY_MIGRATION], page, end - page);
172     }
173     if (unlikely(mask & (1 << DIRTY_MEMORY_VGA))) {
174         bitmap_set_atomic(d[DIRTY_MEMORY_VGA], page, end - page);
175     }
176     if (unlikely(mask & (1 << DIRTY_MEMORY_CODE))) {
177         bitmap_set_atomic(d[DIRTY_MEMORY_CODE], page, end - page);
178     }
179     xen_modified_memory(start, length);
180 }
181 
182 #if !defined(_WIN32)
183 static inline void cpu_physical_memory_set_dirty_lebitmap(unsigned long *bitmap,
184                                                           ram_addr_t start,
185                                                           ram_addr_t pages)
186 {
187     unsigned long i, j;
188     unsigned long page_number, c;
189     hwaddr addr;
190     ram_addr_t ram_addr;
191     unsigned long len = (pages + HOST_LONG_BITS - 1) / HOST_LONG_BITS;
192     unsigned long hpratio = getpagesize() / TARGET_PAGE_SIZE;
193     unsigned long page = BIT_WORD(start >> TARGET_PAGE_BITS);
194 
195     /* start address is aligned at the start of a word? */
196     if ((((page * BITS_PER_LONG) << TARGET_PAGE_BITS) == start) &&
197         (hpratio == 1)) {
198         long k;
199         long nr = BITS_TO_LONGS(pages);
200 
201         for (k = 0; k < nr; k++) {
202             if (bitmap[k]) {
203                 unsigned long temp = leul_to_cpu(bitmap[k]);
204                 unsigned long **d = ram_list.dirty_memory;
205 
206                 atomic_or(&d[DIRTY_MEMORY_MIGRATION][page + k], temp);
207                 atomic_or(&d[DIRTY_MEMORY_VGA][page + k], temp);
208                 if (tcg_enabled()) {
209                     atomic_or(&d[DIRTY_MEMORY_CODE][page + k], temp);
210                 }
211             }
212         }
213         xen_modified_memory(start, pages << TARGET_PAGE_BITS);
214     } else {
215         uint8_t clients = tcg_enabled() ? DIRTY_CLIENTS_ALL : DIRTY_CLIENTS_NOCODE;
216         /*
217          * bitmap-traveling is faster than memory-traveling (for addr...)
218          * especially when most of the memory is not dirty.
219          */
220         for (i = 0; i < len; i++) {
221             if (bitmap[i] != 0) {
222                 c = leul_to_cpu(bitmap[i]);
223                 do {
224                     j = ctzl(c);
225                     c &= ~(1ul << j);
226                     page_number = (i * HOST_LONG_BITS + j) * hpratio;
227                     addr = page_number * TARGET_PAGE_SIZE;
228                     ram_addr = start + addr;
229                     cpu_physical_memory_set_dirty_range(ram_addr,
230                                        TARGET_PAGE_SIZE * hpratio, clients);
231                 } while (c != 0);
232             }
233         }
234     }
235 }
236 #endif /* not _WIN32 */
237 
238 bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start,
239                                               ram_addr_t length,
240                                               unsigned client);
241 
242 static inline void cpu_physical_memory_clear_dirty_range(ram_addr_t start,
243                                                          ram_addr_t length)
244 {
245     cpu_physical_memory_test_and_clear_dirty(start, length, DIRTY_MEMORY_MIGRATION);
246     cpu_physical_memory_test_and_clear_dirty(start, length, DIRTY_MEMORY_VGA);
247     cpu_physical_memory_test_and_clear_dirty(start, length, DIRTY_MEMORY_CODE);
248 }
249 
250 
251 static inline
252 uint64_t cpu_physical_memory_sync_dirty_bitmap(unsigned long *dest,
253                                                ram_addr_t start,
254                                                ram_addr_t length)
255 {
256     ram_addr_t addr;
257     unsigned long page = BIT_WORD(start >> TARGET_PAGE_BITS);
258     uint64_t num_dirty = 0;
259 
260     /* start address is aligned at the start of a word? */
261     if (((page * BITS_PER_LONG) << TARGET_PAGE_BITS) == start) {
262         int k;
263         int nr = BITS_TO_LONGS(length >> TARGET_PAGE_BITS);
264         unsigned long *src = ram_list.dirty_memory[DIRTY_MEMORY_MIGRATION];
265 
266         for (k = page; k < page + nr; k++) {
267             if (src[k]) {
268                 unsigned long bits = atomic_xchg(&src[k], 0);
269                 unsigned long new_dirty;
270                 new_dirty = ~dest[k];
271                 dest[k] |= bits;
272                 new_dirty &= bits;
273                 num_dirty += ctpopl(new_dirty);
274             }
275         }
276     } else {
277         for (addr = 0; addr < length; addr += TARGET_PAGE_SIZE) {
278             if (cpu_physical_memory_test_and_clear_dirty(
279                         start + addr,
280                         TARGET_PAGE_SIZE,
281                         DIRTY_MEMORY_MIGRATION)) {
282                 long k = (start + addr) >> TARGET_PAGE_BITS;
283                 if (!test_and_set_bit(k, dest)) {
284                     num_dirty++;
285                 }
286             }
287         }
288     }
289 
290     return num_dirty;
291 }
292 
293 void migration_bitmap_extend(ram_addr_t old, ram_addr_t new);
294 #endif
295 #endif
296