1 /*
2 * Physical memory management
3 *
4 * Copyright 2011 Red Hat, Inc. and/or its affiliates
5 *
6 * Authors:
7 * Avi Kivity <avi@redhat.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
11 *
12 * Contributions after 2012-01-13 are licensed under the terms of the
13 * GNU GPL, version 2 or (at your option) any later version.
14 */
15
16 #include "qemu/osdep.h"
17 #include "qemu/log.h"
18 #include "qapi/error.h"
19 #include "exec/memory.h"
20 #include "qapi/visitor.h"
21 #include "qemu/bitops.h"
22 #include "qemu/error-report.h"
23 #include "qemu/main-loop.h"
24 #include "qemu/qemu-print.h"
25 #include "qom/object.h"
26 #include "trace.h"
27
28 #include "exec/memory-internal.h"
29 #include "exec/ram_addr.h"
30 #include "sysemu/kvm.h"
31 #include "sysemu/runstate.h"
32 #include "sysemu/tcg.h"
33 #include "qemu/accel.h"
34 #include "hw/boards.h"
35 #include "migration/vmstate.h"
36 #include "exec/address-spaces.h"
37
38 //#define DEBUG_UNASSIGNED
39
40 static unsigned memory_region_transaction_depth;
41 static bool memory_region_update_pending;
42 static bool ioeventfd_update_pending;
43 unsigned int global_dirty_tracking;
44
45 static QTAILQ_HEAD(, MemoryListener) memory_listeners
46 = QTAILQ_HEAD_INITIALIZER(memory_listeners);
47
48 static QTAILQ_HEAD(, AddressSpace) address_spaces
49 = QTAILQ_HEAD_INITIALIZER(address_spaces);
50
51 static GHashTable *flat_views;
52
53 typedef struct AddrRange AddrRange;
54
55 /*
56 * Note that signed integers are needed for negative offsetting in aliases
57 * (large MemoryRegion::alias_offset).
58 */
59 struct AddrRange {
60 Int128 start;
61 Int128 size;
62 };
63
addrrange_make(Int128 start,Int128 size)64 static AddrRange addrrange_make(Int128 start, Int128 size)
65 {
66 return (AddrRange) { start, size };
67 }
68
addrrange_equal(AddrRange r1,AddrRange r2)69 static bool addrrange_equal(AddrRange r1, AddrRange r2)
70 {
71 return int128_eq(r1.start, r2.start) && int128_eq(r1.size, r2.size);
72 }
73
addrrange_end(AddrRange r)74 static Int128 addrrange_end(AddrRange r)
75 {
76 return int128_add(r.start, r.size);
77 }
78
addrrange_shift(AddrRange range,Int128 delta)79 static AddrRange addrrange_shift(AddrRange range, Int128 delta)
80 {
81 int128_addto(&range.start, delta);
82 return range;
83 }
84
addrrange_contains(AddrRange range,Int128 addr)85 static bool addrrange_contains(AddrRange range, Int128 addr)
86 {
87 return int128_ge(addr, range.start)
88 && int128_lt(addr, addrrange_end(range));
89 }
90
addrrange_intersects(AddrRange r1,AddrRange r2)91 static bool addrrange_intersects(AddrRange r1, AddrRange r2)
92 {
93 return addrrange_contains(r1, r2.start)
94 || addrrange_contains(r2, r1.start);
95 }
96
addrrange_intersection(AddrRange r1,AddrRange r2)97 static AddrRange addrrange_intersection(AddrRange r1, AddrRange r2)
98 {
99 Int128 start = int128_max(r1.start, r2.start);
100 Int128 end = int128_min(addrrange_end(r1), addrrange_end(r2));
101 return addrrange_make(start, int128_sub(end, start));
102 }
103
104 enum ListenerDirection { Forward, Reverse };
105
106 #define MEMORY_LISTENER_CALL_GLOBAL(_callback, _direction, _args...) \
107 do { \
108 MemoryListener *_listener; \
109 \
110 switch (_direction) { \
111 case Forward: \
112 QTAILQ_FOREACH(_listener, &memory_listeners, link) { \
113 if (_listener->_callback) { \
114 _listener->_callback(_listener, ##_args); \
115 } \
116 } \
117 break; \
118 case Reverse: \
119 QTAILQ_FOREACH_REVERSE(_listener, &memory_listeners, link) { \
120 if (_listener->_callback) { \
121 _listener->_callback(_listener, ##_args); \
122 } \
123 } \
124 break; \
125 default: \
126 abort(); \
127 } \
128 } while (0)
129
130 #define MEMORY_LISTENER_CALL(_as, _callback, _direction, _section, _args...) \
131 do { \
132 MemoryListener *_listener; \
133 \
134 switch (_direction) { \
135 case Forward: \
136 QTAILQ_FOREACH(_listener, &(_as)->listeners, link_as) { \
137 if (_listener->_callback) { \
138 _listener->_callback(_listener, _section, ##_args); \
139 } \
140 } \
141 break; \
142 case Reverse: \
143 QTAILQ_FOREACH_REVERSE(_listener, &(_as)->listeners, link_as) { \
144 if (_listener->_callback) { \
145 _listener->_callback(_listener, _section, ##_args); \
146 } \
147 } \
148 break; \
149 default: \
150 abort(); \
151 } \
152 } while (0)
153
154 /* No need to ref/unref .mr, the FlatRange keeps it alive. */
155 #define MEMORY_LISTENER_UPDATE_REGION(fr, as, dir, callback, _args...) \
156 do { \
157 MemoryRegionSection mrs = section_from_flat_range(fr, \
158 address_space_to_flatview(as)); \
159 MEMORY_LISTENER_CALL(as, callback, dir, &mrs, ##_args); \
160 } while(0)
161
162 struct CoalescedMemoryRange {
163 AddrRange addr;
164 QTAILQ_ENTRY(CoalescedMemoryRange) link;
165 };
166
167 struct MemoryRegionIoeventfd {
168 AddrRange addr;
169 bool match_data;
170 uint64_t data;
171 EventNotifier *e;
172 };
173
memory_region_ioeventfd_before(MemoryRegionIoeventfd * a,MemoryRegionIoeventfd * b)174 static bool memory_region_ioeventfd_before(MemoryRegionIoeventfd *a,
175 MemoryRegionIoeventfd *b)
176 {
177 if (int128_lt(a->addr.start, b->addr.start)) {
178 return true;
179 } else if (int128_gt(a->addr.start, b->addr.start)) {
180 return false;
181 } else if (int128_lt(a->addr.size, b->addr.size)) {
182 return true;
183 } else if (int128_gt(a->addr.size, b->addr.size)) {
184 return false;
185 } else if (a->match_data < b->match_data) {
186 return true;
187 } else if (a->match_data > b->match_data) {
188 return false;
189 } else if (a->match_data) {
190 if (a->data < b->data) {
191 return true;
192 } else if (a->data > b->data) {
193 return false;
194 }
195 }
196 if (a->e < b->e) {
197 return true;
198 } else if (a->e > b->e) {
199 return false;
200 }
201 return false;
202 }
203
memory_region_ioeventfd_equal(MemoryRegionIoeventfd * a,MemoryRegionIoeventfd * b)204 static bool memory_region_ioeventfd_equal(MemoryRegionIoeventfd *a,
205 MemoryRegionIoeventfd *b)
206 {
207 if (int128_eq(a->addr.start, b->addr.start) &&
208 (!int128_nz(a->addr.size) || !int128_nz(b->addr.size) ||
209 (int128_eq(a->addr.size, b->addr.size) &&
210 (a->match_data == b->match_data) &&
211 ((a->match_data && (a->data == b->data)) || !a->match_data) &&
212 (a->e == b->e))))
213 return true;
214
215 return false;
216 }
217
218 /* Range of memory in the global map. Addresses are absolute. */
219 struct FlatRange {
220 MemoryRegion *mr;
221 hwaddr offset_in_region;
222 AddrRange addr;
223 uint8_t dirty_log_mask;
224 bool romd_mode;
225 bool readonly;
226 bool nonvolatile;
227 bool unmergeable;
228 };
229
230 #define FOR_EACH_FLAT_RANGE(var, view) \
231 for (var = (view)->ranges; var < (view)->ranges + (view)->nr; ++var)
232
233 static inline MemoryRegionSection
section_from_flat_range(FlatRange * fr,FlatView * fv)234 section_from_flat_range(FlatRange *fr, FlatView *fv)
235 {
236 return (MemoryRegionSection) {
237 .mr = fr->mr,
238 .fv = fv,
239 .offset_within_region = fr->offset_in_region,
240 .size = fr->addr.size,
241 .offset_within_address_space = int128_get64(fr->addr.start),
242 .readonly = fr->readonly,
243 .nonvolatile = fr->nonvolatile,
244 .unmergeable = fr->unmergeable,
245 };
246 }
247
flatrange_equal(FlatRange * a,FlatRange * b)248 static bool flatrange_equal(FlatRange *a, FlatRange *b)
249 {
250 return a->mr == b->mr
251 && addrrange_equal(a->addr, b->addr)
252 && a->offset_in_region == b->offset_in_region
253 && a->romd_mode == b->romd_mode
254 && a->readonly == b->readonly
255 && a->nonvolatile == b->nonvolatile
256 && a->unmergeable == b->unmergeable;
257 }
258
flatview_new(MemoryRegion * mr_root)259 static FlatView *flatview_new(MemoryRegion *mr_root)
260 {
261 FlatView *view;
262
263 view = g_new0(FlatView, 1);
264 view->ref = 1;
265 view->root = mr_root;
266 memory_region_ref(mr_root);
267 trace_flatview_new(view, mr_root);
268
269 return view;
270 }
271
272 /* Insert a range into a given position. Caller is responsible for maintaining
273 * sorting order.
274 */
flatview_insert(FlatView * view,unsigned pos,FlatRange * range)275 static void flatview_insert(FlatView *view, unsigned pos, FlatRange *range)
276 {
277 if (view->nr == view->nr_allocated) {
278 view->nr_allocated = MAX(2 * view->nr, 10);
279 view->ranges = g_realloc(view->ranges,
280 view->nr_allocated * sizeof(*view->ranges));
281 }
282 memmove(view->ranges + pos + 1, view->ranges + pos,
283 (view->nr - pos) * sizeof(FlatRange));
284 view->ranges[pos] = *range;
285 memory_region_ref(range->mr);
286 ++view->nr;
287 }
288
flatview_destroy(FlatView * view)289 static void flatview_destroy(FlatView *view)
290 {
291 int i;
292
293 trace_flatview_destroy(view, view->root);
294 if (view->dispatch) {
295 address_space_dispatch_free(view->dispatch);
296 }
297 for (i = 0; i < view->nr; i++) {
298 memory_region_unref(view->ranges[i].mr);
299 }
300 g_free(view->ranges);
301 memory_region_unref(view->root);
302 g_free(view);
303 }
304
flatview_ref(FlatView * view)305 static bool flatview_ref(FlatView *view)
306 {
307 return qatomic_fetch_inc_nonzero(&view->ref) > 0;
308 }
309
flatview_unref(FlatView * view)310 void flatview_unref(FlatView *view)
311 {
312 if (qatomic_fetch_dec(&view->ref) == 1) {
313 trace_flatview_destroy_rcu(view, view->root);
314 assert(view->root);
315 call_rcu(view, flatview_destroy, rcu);
316 }
317 }
318
can_merge(FlatRange * r1,FlatRange * r2)319 static bool can_merge(FlatRange *r1, FlatRange *r2)
320 {
321 return int128_eq(addrrange_end(r1->addr), r2->addr.start)
322 && r1->mr == r2->mr
323 && int128_eq(int128_add(int128_make64(r1->offset_in_region),
324 r1->addr.size),
325 int128_make64(r2->offset_in_region))
326 && r1->dirty_log_mask == r2->dirty_log_mask
327 && r1->romd_mode == r2->romd_mode
328 && r1->readonly == r2->readonly
329 && r1->nonvolatile == r2->nonvolatile
330 && !r1->unmergeable && !r2->unmergeable;
331 }
332
333 /* Attempt to simplify a view by merging adjacent ranges */
flatview_simplify(FlatView * view)334 static void flatview_simplify(FlatView *view)
335 {
336 unsigned i, j, k;
337
338 i = 0;
339 while (i < view->nr) {
340 j = i + 1;
341 while (j < view->nr
342 && can_merge(&view->ranges[j-1], &view->ranges[j])) {
343 int128_addto(&view->ranges[i].addr.size, view->ranges[j].addr.size);
344 ++j;
345 }
346 ++i;
347 for (k = i; k < j; k++) {
348 memory_region_unref(view->ranges[k].mr);
349 }
350 memmove(&view->ranges[i], &view->ranges[j],
351 (view->nr - j) * sizeof(view->ranges[j]));
352 view->nr -= j - i;
353 }
354 }
355
memory_region_big_endian(MemoryRegion * mr)356 static bool memory_region_big_endian(MemoryRegion *mr)
357 {
358 #if TARGET_BIG_ENDIAN
359 return mr->ops->endianness != DEVICE_LITTLE_ENDIAN;
360 #else
361 return mr->ops->endianness == DEVICE_BIG_ENDIAN;
362 #endif
363 }
364
adjust_endianness(MemoryRegion * mr,uint64_t * data,MemOp op)365 static void adjust_endianness(MemoryRegion *mr, uint64_t *data, MemOp op)
366 {
367 if ((op & MO_BSWAP) != devend_memop(mr->ops->endianness)) {
368 switch (op & MO_SIZE) {
369 case MO_8:
370 break;
371 case MO_16:
372 *data = bswap16(*data);
373 break;
374 case MO_32:
375 *data = bswap32(*data);
376 break;
377 case MO_64:
378 *data = bswap64(*data);
379 break;
380 default:
381 g_assert_not_reached();
382 }
383 }
384 }
385
memory_region_shift_read_access(uint64_t * value,signed shift,uint64_t mask,uint64_t tmp)386 static inline void memory_region_shift_read_access(uint64_t *value,
387 signed shift,
388 uint64_t mask,
389 uint64_t tmp)
390 {
391 if (shift >= 0) {
392 *value |= (tmp & mask) << shift;
393 } else {
394 *value |= (tmp & mask) >> -shift;
395 }
396 }
397
memory_region_shift_write_access(uint64_t * value,signed shift,uint64_t mask)398 static inline uint64_t memory_region_shift_write_access(uint64_t *value,
399 signed shift,
400 uint64_t mask)
401 {
402 uint64_t tmp;
403
404 if (shift >= 0) {
405 tmp = (*value >> shift) & mask;
406 } else {
407 tmp = (*value << -shift) & mask;
408 }
409
410 return tmp;
411 }
412
memory_region_to_absolute_addr(MemoryRegion * mr,hwaddr offset)413 static hwaddr memory_region_to_absolute_addr(MemoryRegion *mr, hwaddr offset)
414 {
415 MemoryRegion *root;
416 hwaddr abs_addr = offset;
417
418 abs_addr += mr->addr;
419 for (root = mr; root->container; ) {
420 root = root->container;
421 abs_addr += root->addr;
422 }
423
424 return abs_addr;
425 }
426
get_cpu_index(void)427 static int get_cpu_index(void)
428 {
429 if (current_cpu) {
430 return current_cpu->cpu_index;
431 }
432 return -1;
433 }
434
memory_region_read_accessor(MemoryRegion * mr,hwaddr addr,uint64_t * value,unsigned size,signed shift,uint64_t mask,MemTxAttrs attrs)435 static MemTxResult memory_region_read_accessor(MemoryRegion *mr,
436 hwaddr addr,
437 uint64_t *value,
438 unsigned size,
439 signed shift,
440 uint64_t mask,
441 MemTxAttrs attrs)
442 {
443 uint64_t tmp;
444
445 tmp = mr->ops->read(mr->opaque, addr, size);
446 if (mr->subpage) {
447 trace_memory_region_subpage_read(get_cpu_index(), mr, addr, tmp, size);
448 } else if (trace_event_get_state_backends(TRACE_MEMORY_REGION_OPS_READ)) {
449 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
450 trace_memory_region_ops_read(get_cpu_index(), mr, abs_addr, tmp, size,
451 memory_region_name(mr));
452 }
453 memory_region_shift_read_access(value, shift, mask, tmp);
454 return MEMTX_OK;
455 }
456
memory_region_read_with_attrs_accessor(MemoryRegion * mr,hwaddr addr,uint64_t * value,unsigned size,signed shift,uint64_t mask,MemTxAttrs attrs)457 static MemTxResult memory_region_read_with_attrs_accessor(MemoryRegion *mr,
458 hwaddr addr,
459 uint64_t *value,
460 unsigned size,
461 signed shift,
462 uint64_t mask,
463 MemTxAttrs attrs)
464 {
465 uint64_t tmp = 0;
466 MemTxResult r;
467
468 r = mr->ops->read_with_attrs(mr->opaque, addr, &tmp, size, attrs);
469 if (mr->subpage) {
470 trace_memory_region_subpage_read(get_cpu_index(), mr, addr, tmp, size);
471 } else if (trace_event_get_state_backends(TRACE_MEMORY_REGION_OPS_READ)) {
472 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
473 trace_memory_region_ops_read(get_cpu_index(), mr, abs_addr, tmp, size,
474 memory_region_name(mr));
475 }
476 memory_region_shift_read_access(value, shift, mask, tmp);
477 return r;
478 }
479
memory_region_write_accessor(MemoryRegion * mr,hwaddr addr,uint64_t * value,unsigned size,signed shift,uint64_t mask,MemTxAttrs attrs)480 static MemTxResult memory_region_write_accessor(MemoryRegion *mr,
481 hwaddr addr,
482 uint64_t *value,
483 unsigned size,
484 signed shift,
485 uint64_t mask,
486 MemTxAttrs attrs)
487 {
488 uint64_t tmp = memory_region_shift_write_access(value, shift, mask);
489
490 if (mr->subpage) {
491 trace_memory_region_subpage_write(get_cpu_index(), mr, addr, tmp, size);
492 } else if (trace_event_get_state_backends(TRACE_MEMORY_REGION_OPS_WRITE)) {
493 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
494 trace_memory_region_ops_write(get_cpu_index(), mr, abs_addr, tmp, size,
495 memory_region_name(mr));
496 }
497 mr->ops->write(mr->opaque, addr, tmp, size);
498 return MEMTX_OK;
499 }
500
memory_region_write_with_attrs_accessor(MemoryRegion * mr,hwaddr addr,uint64_t * value,unsigned size,signed shift,uint64_t mask,MemTxAttrs attrs)501 static MemTxResult memory_region_write_with_attrs_accessor(MemoryRegion *mr,
502 hwaddr addr,
503 uint64_t *value,
504 unsigned size,
505 signed shift,
506 uint64_t mask,
507 MemTxAttrs attrs)
508 {
509 uint64_t tmp = memory_region_shift_write_access(value, shift, mask);
510
511 if (mr->subpage) {
512 trace_memory_region_subpage_write(get_cpu_index(), mr, addr, tmp, size);
513 } else if (trace_event_get_state_backends(TRACE_MEMORY_REGION_OPS_WRITE)) {
514 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
515 trace_memory_region_ops_write(get_cpu_index(), mr, abs_addr, tmp, size,
516 memory_region_name(mr));
517 }
518 return mr->ops->write_with_attrs(mr->opaque, addr, tmp, size, attrs);
519 }
520
access_with_adjusted_size_aligned(hwaddr addr,uint64_t * value,unsigned size,unsigned access_size_min,unsigned access_size_max,MemTxResult (* access_fn)(MemoryRegion * mr,hwaddr addr,uint64_t * value,unsigned size,signed shift,uint64_t mask,MemTxAttrs attrs),MemoryRegion * mr,MemTxAttrs attrs)521 static MemTxResult access_with_adjusted_size_aligned(hwaddr addr,
522 uint64_t *value,
523 unsigned size,
524 unsigned access_size_min,
525 unsigned access_size_max,
526 MemTxResult (*access_fn)
527 (MemoryRegion *mr,
528 hwaddr addr,
529 uint64_t *value,
530 unsigned size,
531 signed shift,
532 uint64_t mask,
533 MemTxAttrs attrs),
534 MemoryRegion *mr,
535 MemTxAttrs attrs)
536 {
537 uint64_t access_mask;
538 unsigned access_size;
539 unsigned i;
540 MemTxResult r = MEMTX_OK;
541 bool reentrancy_guard_applied = false;
542
543 if (!access_size_min) {
544 access_size_min = 1;
545 }
546 if (!access_size_max) {
547 access_size_max = 4;
548 }
549
550 /* Do not allow more than one simultaneous access to a device's IO Regions */
551 if (mr->dev && !mr->disable_reentrancy_guard &&
552 !mr->ram_device && !mr->ram && !mr->rom_device && !mr->readonly) {
553 if (mr->dev->mem_reentrancy_guard.engaged_in_io) {
554 warn_report_once("Blocked re-entrant IO on MemoryRegion: "
555 "%s at addr: 0x%" HWADDR_PRIX,
556 memory_region_name(mr), addr);
557 return MEMTX_ACCESS_ERROR;
558 }
559 mr->dev->mem_reentrancy_guard.engaged_in_io = true;
560 reentrancy_guard_applied = true;
561 }
562
563 access_size = MAX(MIN(size, access_size_max), access_size_min);
564 access_mask = MAKE_64BIT_MASK(0, access_size * 8);
565 if (memory_region_big_endian(mr)) {
566 for (i = 0; i < size; i += access_size) {
567 r |= access_fn(mr, addr + i, value, access_size,
568 (size - access_size - i) * 8, access_mask, attrs);
569 }
570 } else {
571 for (i = 0; i < size; i += access_size) {
572 r |= access_fn(mr, addr + i, value, access_size, i * 8,
573 access_mask, attrs);
574 }
575 }
576 if (mr->dev && reentrancy_guard_applied) {
577 mr->dev->mem_reentrancy_guard.engaged_in_io = false;
578 }
579 return r;
580 }
581
582 /* Assume power-of-two size */
583 #define align_down(addr, size) ((addr) & ~((size) - 1))
584 #define align_up(addr, size) \
585 ({ typeof(size) __size = size; \
586 align_down((addr) + (__size) - 1, (__size)); })
587
access_with_adjusted_size_unaligned(hwaddr addr,uint64_t * value,unsigned size,unsigned access_size_min,unsigned access_size_max,bool unaligned,MemTxResult (* access)(MemoryRegion * mr,hwaddr addr,uint64_t * value,unsigned size,signed shift,uint64_t mask,MemTxAttrs attrs),MemoryRegion * mr,MemTxAttrs attrs)588 static MemTxResult access_with_adjusted_size_unaligned(hwaddr addr,
589 uint64_t *value,
590 unsigned size,
591 unsigned access_size_min,
592 unsigned access_size_max,
593 bool unaligned,
594 MemTxResult (*access)(MemoryRegion *mr,
595 hwaddr addr,
596 uint64_t *value,
597 unsigned size,
598 signed shift,
599 uint64_t mask,
600 MemTxAttrs attrs),
601 MemoryRegion *mr,
602 MemTxAttrs attrs)
603 {
604 uint64_t access_value = 0;
605 MemTxResult r = MEMTX_OK;
606 hwaddr access_addr[2];
607 uint64_t access_mask;
608 unsigned access_size;
609
610 if (unlikely(!access_size_min)) {
611 access_size_min = 1;
612 }
613 if (unlikely(!access_size_max)) {
614 access_size_max = 4;
615 }
616
617 access_size = MAX(MIN(size, access_size_max), access_size_min);
618 access_addr[0] = align_down(addr, access_size);
619 access_addr[1] = align_up(addr + size, access_size);
620
621 if (memory_region_big_endian(mr)) {
622 hwaddr cur;
623
624 /* XXX: Big-endian path is untested... */
625
626 for (cur = access_addr[0]; cur < access_addr[1]; cur += access_size) {
627 uint64_t mask_bounds[2];
628
629 mask_bounds[0] = MAX(addr, cur) - cur;
630 mask_bounds[1] =
631 MIN(addr + size, align_up(cur + 1, access_size)) - cur;
632
633 access_mask = (-1ULL << mask_bounds[0] * 8) &
634 (-1ULL >> (64 - mask_bounds[1] * 8));
635
636 r |= access(mr, cur, &access_value, access_size,
637 (size - access_size - (MAX(addr, cur) - addr)),
638 access_mask, attrs);
639
640 /* XXX: Can't do this hack for writes */
641 access_value >>= mask_bounds[0] * 8;
642 }
643 } else {
644 hwaddr cur;
645
646 for (cur = access_addr[0]; cur < access_addr[1]; cur += access_size) {
647 uint64_t mask_bounds[2];
648
649 mask_bounds[0] = MAX(addr, cur) - cur;
650 mask_bounds[1] =
651 MIN(addr + size, align_up(cur + 1, access_size)) - cur;
652
653 access_mask = (-1ULL << mask_bounds[0] * 8) &
654 (-1ULL >> (64 - mask_bounds[1] * 8));
655
656 r |= access(mr, cur, &access_value, access_size,
657 (MAX(addr, cur) - addr), access_mask, attrs);
658
659 /* XXX: Can't do this hack for writes */
660 access_value >>= mask_bounds[0] * 8;
661 }
662 }
663
664 *value = access_value;
665
666 return r;
667 }
668
access_with_adjusted_size(hwaddr addr,uint64_t * value,unsigned size,unsigned access_size_min,unsigned access_size_max,bool unaligned,MemTxResult (* access)(MemoryRegion * mr,hwaddr addr,uint64_t * value,unsigned size,signed shift,uint64_t mask,MemTxAttrs attrs),MemoryRegion * mr,MemTxAttrs attrs)669 static inline MemTxResult access_with_adjusted_size(hwaddr addr,
670 uint64_t *value,
671 unsigned size,
672 unsigned access_size_min,
673 unsigned access_size_max,
674 bool unaligned,
675 MemTxResult (*access)(MemoryRegion *mr,
676 hwaddr addr,
677 uint64_t *value,
678 unsigned size,
679 signed shift,
680 uint64_t mask,
681 MemTxAttrs attrs),
682 MemoryRegion *mr,
683 MemTxAttrs attrs)
684 {
685 unsigned access_size;
686
687 if (!access_size_min) {
688 access_size_min = 1;
689 }
690 if (!access_size_max) {
691 access_size_max = 4;
692 }
693
694 access_size = MAX(MIN(size, access_size_max), access_size_min);
695
696 /* Handle unaligned accesses if the model only supports natural alignment */
697 if (unlikely((addr & (access_size - 1)) && !unaligned)) {
698 return access_with_adjusted_size_unaligned(addr, value, size,
699 access_size_min, access_size_max, unaligned, access, mr, attrs);
700 }
701
702 /*
703 * Otherwise, if the access is aligned or the model specifies it can handle
704 * unaligned accesses, use the 'aligned' handler
705 */
706 return access_with_adjusted_size_aligned(addr, value, size,
707 access_size_min, access_size_max, access, mr, attrs);
708 }
709
memory_region_to_address_space(MemoryRegion * mr)710 static AddressSpace *memory_region_to_address_space(MemoryRegion *mr)
711 {
712 AddressSpace *as;
713
714 while (mr->container) {
715 mr = mr->container;
716 }
717 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
718 if (mr == as->root) {
719 return as;
720 }
721 }
722 return NULL;
723 }
724
725 /* Render a memory region into the global view. Ranges in @view obscure
726 * ranges in @mr.
727 */
render_memory_region(FlatView * view,MemoryRegion * mr,Int128 base,AddrRange clip,bool readonly,bool nonvolatile,bool unmergeable)728 static void render_memory_region(FlatView *view,
729 MemoryRegion *mr,
730 Int128 base,
731 AddrRange clip,
732 bool readonly,
733 bool nonvolatile,
734 bool unmergeable)
735 {
736 MemoryRegion *subregion;
737 unsigned i;
738 hwaddr offset_in_region;
739 Int128 remain;
740 Int128 now;
741 FlatRange fr;
742 AddrRange tmp;
743
744 if (!mr->enabled) {
745 return;
746 }
747
748 int128_addto(&base, int128_make64(mr->addr));
749 readonly |= mr->readonly;
750 nonvolatile |= mr->nonvolatile;
751 unmergeable |= mr->unmergeable;
752
753 tmp = addrrange_make(base, mr->size);
754
755 if (!addrrange_intersects(tmp, clip)) {
756 return;
757 }
758
759 clip = addrrange_intersection(tmp, clip);
760
761 if (mr->alias) {
762 int128_subfrom(&base, int128_make64(mr->alias->addr));
763 int128_subfrom(&base, int128_make64(mr->alias_offset));
764 render_memory_region(view, mr->alias, base, clip,
765 readonly, nonvolatile, unmergeable);
766 return;
767 }
768
769 /* Render subregions in priority order. */
770 QTAILQ_FOREACH(subregion, &mr->subregions, subregions_link) {
771 render_memory_region(view, subregion, base, clip,
772 readonly, nonvolatile, unmergeable);
773 }
774
775 if (!mr->terminates) {
776 return;
777 }
778
779 offset_in_region = int128_get64(int128_sub(clip.start, base));
780 base = clip.start;
781 remain = clip.size;
782
783 fr.mr = mr;
784 fr.dirty_log_mask = memory_region_get_dirty_log_mask(mr);
785 fr.romd_mode = mr->romd_mode;
786 fr.readonly = readonly;
787 fr.nonvolatile = nonvolatile;
788 fr.unmergeable = unmergeable;
789
790 /* Render the region itself into any gaps left by the current view. */
791 for (i = 0; i < view->nr && int128_nz(remain); ++i) {
792 if (int128_ge(base, addrrange_end(view->ranges[i].addr))) {
793 continue;
794 }
795 if (int128_lt(base, view->ranges[i].addr.start)) {
796 now = int128_min(remain,
797 int128_sub(view->ranges[i].addr.start, base));
798 fr.offset_in_region = offset_in_region;
799 fr.addr = addrrange_make(base, now);
800 flatview_insert(view, i, &fr);
801 ++i;
802 int128_addto(&base, now);
803 offset_in_region += int128_get64(now);
804 int128_subfrom(&remain, now);
805 }
806 now = int128_sub(int128_min(int128_add(base, remain),
807 addrrange_end(view->ranges[i].addr)),
808 base);
809 int128_addto(&base, now);
810 offset_in_region += int128_get64(now);
811 int128_subfrom(&remain, now);
812 }
813 if (int128_nz(remain)) {
814 fr.offset_in_region = offset_in_region;
815 fr.addr = addrrange_make(base, remain);
816 flatview_insert(view, i, &fr);
817 }
818 }
819
flatview_for_each_range(FlatView * fv,flatview_cb cb,void * opaque)820 void flatview_for_each_range(FlatView *fv, flatview_cb cb , void *opaque)
821 {
822 FlatRange *fr;
823
824 assert(fv);
825 assert(cb);
826
827 FOR_EACH_FLAT_RANGE(fr, fv) {
828 if (cb(fr->addr.start, fr->addr.size, fr->mr,
829 fr->offset_in_region, opaque)) {
830 break;
831 }
832 }
833 }
834
memory_region_get_flatview_root(MemoryRegion * mr)835 static MemoryRegion *memory_region_get_flatview_root(MemoryRegion *mr)
836 {
837 while (mr->enabled) {
838 if (mr->alias) {
839 if (!mr->alias_offset && int128_ge(mr->size, mr->alias->size)) {
840 /* The alias is included in its entirety. Use it as
841 * the "real" root, so that we can share more FlatViews.
842 */
843 mr = mr->alias;
844 continue;
845 }
846 } else if (!mr->terminates) {
847 unsigned int found = 0;
848 MemoryRegion *child, *next = NULL;
849 QTAILQ_FOREACH(child, &mr->subregions, subregions_link) {
850 if (child->enabled) {
851 if (++found > 1) {
852 next = NULL;
853 break;
854 }
855 if (!child->addr && int128_ge(mr->size, child->size)) {
856 /* A child is included in its entirety. If it's the only
857 * enabled one, use it in the hope of finding an alias down the
858 * way. This will also let us share FlatViews.
859 */
860 next = child;
861 }
862 }
863 }
864 if (found == 0) {
865 return NULL;
866 }
867 if (next) {
868 mr = next;
869 continue;
870 }
871 }
872
873 return mr;
874 }
875
876 return NULL;
877 }
878
879 /* Render a memory topology into a list of disjoint absolute ranges. */
generate_memory_topology(MemoryRegion * mr)880 static FlatView *generate_memory_topology(MemoryRegion *mr)
881 {
882 int i;
883 FlatView *view;
884
885 view = flatview_new(mr);
886
887 if (mr) {
888 render_memory_region(view, mr, int128_zero(),
889 addrrange_make(int128_zero(), int128_2_64()),
890 false, false, false);
891 }
892 flatview_simplify(view);
893
894 view->dispatch = address_space_dispatch_new(view);
895 for (i = 0; i < view->nr; i++) {
896 MemoryRegionSection mrs =
897 section_from_flat_range(&view->ranges[i], view);
898 flatview_add_to_dispatch(view, &mrs);
899 }
900 address_space_dispatch_compact(view->dispatch);
901 g_hash_table_replace(flat_views, mr, view);
902
903 return view;
904 }
905
address_space_add_del_ioeventfds(AddressSpace * as,MemoryRegionIoeventfd * fds_new,unsigned fds_new_nb,MemoryRegionIoeventfd * fds_old,unsigned fds_old_nb)906 static void address_space_add_del_ioeventfds(AddressSpace *as,
907 MemoryRegionIoeventfd *fds_new,
908 unsigned fds_new_nb,
909 MemoryRegionIoeventfd *fds_old,
910 unsigned fds_old_nb)
911 {
912 unsigned iold, inew;
913 MemoryRegionIoeventfd *fd;
914 MemoryRegionSection section;
915
916 /* Generate a symmetric difference of the old and new fd sets, adding
917 * and deleting as necessary.
918 */
919
920 iold = inew = 0;
921 while (iold < fds_old_nb || inew < fds_new_nb) {
922 if (iold < fds_old_nb
923 && (inew == fds_new_nb
924 || memory_region_ioeventfd_before(&fds_old[iold],
925 &fds_new[inew]))) {
926 fd = &fds_old[iold];
927 section = (MemoryRegionSection) {
928 .fv = address_space_to_flatview(as),
929 .offset_within_address_space = int128_get64(fd->addr.start),
930 .size = fd->addr.size,
931 };
932 MEMORY_LISTENER_CALL(as, eventfd_del, Forward, §ion,
933 fd->match_data, fd->data, fd->e);
934 ++iold;
935 } else if (inew < fds_new_nb
936 && (iold == fds_old_nb
937 || memory_region_ioeventfd_before(&fds_new[inew],
938 &fds_old[iold]))) {
939 fd = &fds_new[inew];
940 section = (MemoryRegionSection) {
941 .fv = address_space_to_flatview(as),
942 .offset_within_address_space = int128_get64(fd->addr.start),
943 .size = fd->addr.size,
944 };
945 MEMORY_LISTENER_CALL(as, eventfd_add, Reverse, §ion,
946 fd->match_data, fd->data, fd->e);
947 ++inew;
948 } else {
949 ++iold;
950 ++inew;
951 }
952 }
953 }
954
address_space_get_flatview(AddressSpace * as)955 FlatView *address_space_get_flatview(AddressSpace *as)
956 {
957 FlatView *view;
958
959 RCU_READ_LOCK_GUARD();
960 do {
961 view = address_space_to_flatview(as);
962 /* If somebody has replaced as->current_map concurrently,
963 * flatview_ref returns false.
964 */
965 } while (!flatview_ref(view));
966 return view;
967 }
968
address_space_update_ioeventfds(AddressSpace * as)969 static void address_space_update_ioeventfds(AddressSpace *as)
970 {
971 FlatView *view;
972 FlatRange *fr;
973 unsigned ioeventfd_nb = 0;
974 unsigned ioeventfd_max;
975 MemoryRegionIoeventfd *ioeventfds;
976 AddrRange tmp;
977 unsigned i;
978
979 if (!as->ioeventfd_notifiers) {
980 return;
981 }
982
983 /*
984 * It is likely that the number of ioeventfds hasn't changed much, so use
985 * the previous size as the starting value, with some headroom to avoid
986 * gratuitous reallocations.
987 */
988 ioeventfd_max = QEMU_ALIGN_UP(as->ioeventfd_nb, 4);
989 ioeventfds = g_new(MemoryRegionIoeventfd, ioeventfd_max);
990
991 view = address_space_get_flatview(as);
992 FOR_EACH_FLAT_RANGE(fr, view) {
993 for (i = 0; i < fr->mr->ioeventfd_nb; ++i) {
994 tmp = addrrange_shift(fr->mr->ioeventfds[i].addr,
995 int128_sub(fr->addr.start,
996 int128_make64(fr->offset_in_region)));
997 if (addrrange_intersects(fr->addr, tmp)) {
998 ++ioeventfd_nb;
999 if (ioeventfd_nb > ioeventfd_max) {
1000 ioeventfd_max = MAX(ioeventfd_max * 2, 4);
1001 ioeventfds = g_realloc(ioeventfds,
1002 ioeventfd_max * sizeof(*ioeventfds));
1003 }
1004 ioeventfds[ioeventfd_nb-1] = fr->mr->ioeventfds[i];
1005 ioeventfds[ioeventfd_nb-1].addr = tmp;
1006 }
1007 }
1008 }
1009
1010 address_space_add_del_ioeventfds(as, ioeventfds, ioeventfd_nb,
1011 as->ioeventfds, as->ioeventfd_nb);
1012
1013 g_free(as->ioeventfds);
1014 as->ioeventfds = ioeventfds;
1015 as->ioeventfd_nb = ioeventfd_nb;
1016 flatview_unref(view);
1017 }
1018
1019 /*
1020 * Notify the memory listeners about the coalesced IO change events of
1021 * range `cmr'. Only the part that has intersection of the specified
1022 * FlatRange will be sent.
1023 */
flat_range_coalesced_io_notify(FlatRange * fr,AddressSpace * as,CoalescedMemoryRange * cmr,bool add)1024 static void flat_range_coalesced_io_notify(FlatRange *fr, AddressSpace *as,
1025 CoalescedMemoryRange *cmr, bool add)
1026 {
1027 AddrRange tmp;
1028
1029 tmp = addrrange_shift(cmr->addr,
1030 int128_sub(fr->addr.start,
1031 int128_make64(fr->offset_in_region)));
1032 if (!addrrange_intersects(tmp, fr->addr)) {
1033 return;
1034 }
1035 tmp = addrrange_intersection(tmp, fr->addr);
1036
1037 if (add) {
1038 MEMORY_LISTENER_UPDATE_REGION(fr, as, Forward, coalesced_io_add,
1039 int128_get64(tmp.start),
1040 int128_get64(tmp.size));
1041 } else {
1042 MEMORY_LISTENER_UPDATE_REGION(fr, as, Reverse, coalesced_io_del,
1043 int128_get64(tmp.start),
1044 int128_get64(tmp.size));
1045 }
1046 }
1047
flat_range_coalesced_io_del(FlatRange * fr,AddressSpace * as)1048 static void flat_range_coalesced_io_del(FlatRange *fr, AddressSpace *as)
1049 {
1050 CoalescedMemoryRange *cmr;
1051
1052 QTAILQ_FOREACH(cmr, &fr->mr->coalesced, link) {
1053 flat_range_coalesced_io_notify(fr, as, cmr, false);
1054 }
1055 }
1056
flat_range_coalesced_io_add(FlatRange * fr,AddressSpace * as)1057 static void flat_range_coalesced_io_add(FlatRange *fr, AddressSpace *as)
1058 {
1059 MemoryRegion *mr = fr->mr;
1060 CoalescedMemoryRange *cmr;
1061
1062 if (QTAILQ_EMPTY(&mr->coalesced)) {
1063 return;
1064 }
1065
1066 QTAILQ_FOREACH(cmr, &mr->coalesced, link) {
1067 flat_range_coalesced_io_notify(fr, as, cmr, true);
1068 }
1069 }
1070
address_space_update_topology_pass(AddressSpace * as,const FlatView * old_view,const FlatView * new_view,bool adding)1071 static void address_space_update_topology_pass(AddressSpace *as,
1072 const FlatView *old_view,
1073 const FlatView *new_view,
1074 bool adding)
1075 {
1076 unsigned iold, inew;
1077 FlatRange *frold, *frnew;
1078
1079 /* Generate a symmetric difference of the old and new memory maps.
1080 * Kill ranges in the old map, and instantiate ranges in the new map.
1081 */
1082 iold = inew = 0;
1083 while (iold < old_view->nr || inew < new_view->nr) {
1084 if (iold < old_view->nr) {
1085 frold = &old_view->ranges[iold];
1086 } else {
1087 frold = NULL;
1088 }
1089 if (inew < new_view->nr) {
1090 frnew = &new_view->ranges[inew];
1091 } else {
1092 frnew = NULL;
1093 }
1094
1095 if (frold
1096 && (!frnew
1097 || int128_lt(frold->addr.start, frnew->addr.start)
1098 || (int128_eq(frold->addr.start, frnew->addr.start)
1099 && !flatrange_equal(frold, frnew)))) {
1100 /* In old but not in new, or in both but attributes changed. */
1101
1102 if (!adding) {
1103 flat_range_coalesced_io_del(frold, as);
1104 MEMORY_LISTENER_UPDATE_REGION(frold, as, Reverse, region_del);
1105 }
1106
1107 ++iold;
1108 } else if (frold && frnew && flatrange_equal(frold, frnew)) {
1109 /* In both and unchanged (except logging may have changed) */
1110
1111 if (adding) {
1112 MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, region_nop);
1113 if (frnew->dirty_log_mask & ~frold->dirty_log_mask) {
1114 MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, log_start,
1115 frold->dirty_log_mask,
1116 frnew->dirty_log_mask);
1117 }
1118 if (frold->dirty_log_mask & ~frnew->dirty_log_mask) {
1119 MEMORY_LISTENER_UPDATE_REGION(frnew, as, Reverse, log_stop,
1120 frold->dirty_log_mask,
1121 frnew->dirty_log_mask);
1122 }
1123 }
1124
1125 ++iold;
1126 ++inew;
1127 } else {
1128 /* In new */
1129
1130 if (adding) {
1131 MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, region_add);
1132 flat_range_coalesced_io_add(frnew, as);
1133 }
1134
1135 ++inew;
1136 }
1137 }
1138 }
1139
flatviews_init(void)1140 static void flatviews_init(void)
1141 {
1142 static FlatView *empty_view;
1143
1144 if (flat_views) {
1145 return;
1146 }
1147
1148 flat_views = g_hash_table_new_full(g_direct_hash, g_direct_equal, NULL,
1149 (GDestroyNotify) flatview_unref);
1150 if (!empty_view) {
1151 empty_view = generate_memory_topology(NULL);
1152 /* We keep it alive forever in the global variable. */
1153 flatview_ref(empty_view);
1154 } else {
1155 g_hash_table_replace(flat_views, NULL, empty_view);
1156 flatview_ref(empty_view);
1157 }
1158 }
1159
flatviews_reset(void)1160 static void flatviews_reset(void)
1161 {
1162 AddressSpace *as;
1163
1164 if (flat_views) {
1165 g_hash_table_unref(flat_views);
1166 flat_views = NULL;
1167 }
1168 flatviews_init();
1169
1170 /* Render unique FVs */
1171 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
1172 MemoryRegion *physmr = memory_region_get_flatview_root(as->root);
1173
1174 if (g_hash_table_lookup(flat_views, physmr)) {
1175 continue;
1176 }
1177
1178 generate_memory_topology(physmr);
1179 }
1180 }
1181
address_space_set_flatview(AddressSpace * as)1182 static void address_space_set_flatview(AddressSpace *as)
1183 {
1184 FlatView *old_view = address_space_to_flatview(as);
1185 MemoryRegion *physmr = memory_region_get_flatview_root(as->root);
1186 FlatView *new_view = g_hash_table_lookup(flat_views, physmr);
1187
1188 assert(new_view);
1189
1190 if (old_view == new_view) {
1191 return;
1192 }
1193
1194 if (old_view) {
1195 flatview_ref(old_view);
1196 }
1197
1198 flatview_ref(new_view);
1199
1200 if (!QTAILQ_EMPTY(&as->listeners)) {
1201 FlatView tmpview = { .nr = 0 }, *old_view2 = old_view;
1202
1203 if (!old_view2) {
1204 old_view2 = &tmpview;
1205 }
1206 address_space_update_topology_pass(as, old_view2, new_view, false);
1207 address_space_update_topology_pass(as, old_view2, new_view, true);
1208 }
1209
1210 /* Writes are protected by the BQL. */
1211 qatomic_rcu_set(&as->current_map, new_view);
1212 if (old_view) {
1213 flatview_unref(old_view);
1214 }
1215
1216 /* Note that all the old MemoryRegions are still alive up to this
1217 * point. This relieves most MemoryListeners from the need to
1218 * ref/unref the MemoryRegions they get---unless they use them
1219 * outside the iothread mutex, in which case precise reference
1220 * counting is necessary.
1221 */
1222 if (old_view) {
1223 flatview_unref(old_view);
1224 }
1225 }
1226
address_space_update_topology(AddressSpace * as)1227 static void address_space_update_topology(AddressSpace *as)
1228 {
1229 MemoryRegion *physmr = memory_region_get_flatview_root(as->root);
1230
1231 flatviews_init();
1232 if (!g_hash_table_lookup(flat_views, physmr)) {
1233 generate_memory_topology(physmr);
1234 }
1235 address_space_set_flatview(as);
1236 }
1237
memory_region_transaction_begin(void)1238 void memory_region_transaction_begin(void)
1239 {
1240 qemu_flush_coalesced_mmio_buffer();
1241 ++memory_region_transaction_depth;
1242 }
1243
memory_region_transaction_commit(void)1244 void memory_region_transaction_commit(void)
1245 {
1246 AddressSpace *as;
1247
1248 assert(memory_region_transaction_depth);
1249 assert(qemu_mutex_iothread_locked());
1250
1251 --memory_region_transaction_depth;
1252 if (!memory_region_transaction_depth) {
1253 if (memory_region_update_pending) {
1254 flatviews_reset();
1255
1256 MEMORY_LISTENER_CALL_GLOBAL(begin, Forward);
1257
1258 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
1259 address_space_set_flatview(as);
1260 address_space_update_ioeventfds(as);
1261 }
1262 memory_region_update_pending = false;
1263 ioeventfd_update_pending = false;
1264 MEMORY_LISTENER_CALL_GLOBAL(commit, Forward);
1265 } else if (ioeventfd_update_pending) {
1266 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
1267 address_space_update_ioeventfds(as);
1268 }
1269 ioeventfd_update_pending = false;
1270 }
1271 }
1272 }
1273
memory_region_destructor_none(MemoryRegion * mr)1274 static void memory_region_destructor_none(MemoryRegion *mr)
1275 {
1276 }
1277
memory_region_destructor_ram(MemoryRegion * mr)1278 static void memory_region_destructor_ram(MemoryRegion *mr)
1279 {
1280 qemu_ram_free(mr->ram_block);
1281 }
1282
memory_region_need_escape(char c)1283 static bool memory_region_need_escape(char c)
1284 {
1285 return c == '/' || c == '[' || c == '\\' || c == ']';
1286 }
1287
memory_region_escape_name(const char * name)1288 static char *memory_region_escape_name(const char *name)
1289 {
1290 const char *p;
1291 char *escaped, *q;
1292 uint8_t c;
1293 size_t bytes = 0;
1294
1295 for (p = name; *p; p++) {
1296 bytes += memory_region_need_escape(*p) ? 4 : 1;
1297 }
1298 if (bytes == p - name) {
1299 return g_memdup(name, bytes + 1);
1300 }
1301
1302 escaped = g_malloc(bytes + 1);
1303 for (p = name, q = escaped; *p; p++) {
1304 c = *p;
1305 if (unlikely(memory_region_need_escape(c))) {
1306 *q++ = '\\';
1307 *q++ = 'x';
1308 *q++ = "0123456789abcdef"[c >> 4];
1309 c = "0123456789abcdef"[c & 15];
1310 }
1311 *q++ = c;
1312 }
1313 *q = 0;
1314 return escaped;
1315 }
1316
memory_region_do_init(MemoryRegion * mr,Object * owner,const char * name,uint64_t size)1317 static void memory_region_do_init(MemoryRegion *mr,
1318 Object *owner,
1319 const char *name,
1320 uint64_t size)
1321 {
1322 mr->size = int128_make64(size);
1323 if (size == UINT64_MAX) {
1324 mr->size = int128_2_64();
1325 }
1326 mr->name = g_strdup(name);
1327 mr->owner = owner;
1328 mr->dev = (DeviceState *) object_dynamic_cast(mr->owner, TYPE_DEVICE);
1329 mr->ram_block = NULL;
1330
1331 if (name) {
1332 char *escaped_name = memory_region_escape_name(name);
1333 char *name_array = g_strdup_printf("%s[*]", escaped_name);
1334
1335 if (!owner) {
1336 owner = container_get(qdev_get_machine(), "/unattached");
1337 }
1338
1339 object_property_add_child(owner, name_array, OBJECT(mr));
1340 object_unref(OBJECT(mr));
1341 g_free(name_array);
1342 g_free(escaped_name);
1343 }
1344 }
1345
memory_region_init(MemoryRegion * mr,Object * owner,const char * name,uint64_t size)1346 void memory_region_init(MemoryRegion *mr,
1347 Object *owner,
1348 const char *name,
1349 uint64_t size)
1350 {
1351 object_initialize(mr, sizeof(*mr), TYPE_MEMORY_REGION);
1352 memory_region_do_init(mr, owner, name, size);
1353 }
1354
memory_region_get_container(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)1355 static void memory_region_get_container(Object *obj, Visitor *v,
1356 const char *name, void *opaque,
1357 Error **errp)
1358 {
1359 MemoryRegion *mr = MEMORY_REGION(obj);
1360 char *path = (char *)"";
1361
1362 if (mr->container) {
1363 path = object_get_canonical_path(OBJECT(mr->container));
1364 }
1365 visit_type_str(v, name, &path, errp);
1366 if (mr->container) {
1367 g_free(path);
1368 }
1369 }
1370
memory_region_resolve_container(Object * obj,void * opaque,const char * part)1371 static Object *memory_region_resolve_container(Object *obj, void *opaque,
1372 const char *part)
1373 {
1374 MemoryRegion *mr = MEMORY_REGION(obj);
1375
1376 return OBJECT(mr->container);
1377 }
1378
memory_region_get_priority(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)1379 static void memory_region_get_priority(Object *obj, Visitor *v,
1380 const char *name, void *opaque,
1381 Error **errp)
1382 {
1383 MemoryRegion *mr = MEMORY_REGION(obj);
1384 int32_t value = mr->priority;
1385
1386 visit_type_int32(v, name, &value, errp);
1387 }
1388
memory_region_get_size(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)1389 static void memory_region_get_size(Object *obj, Visitor *v, const char *name,
1390 void *opaque, Error **errp)
1391 {
1392 MemoryRegion *mr = MEMORY_REGION(obj);
1393 uint64_t value = memory_region_size(mr);
1394
1395 visit_type_uint64(v, name, &value, errp);
1396 }
1397
memory_region_initfn(Object * obj)1398 static void memory_region_initfn(Object *obj)
1399 {
1400 MemoryRegion *mr = MEMORY_REGION(obj);
1401 ObjectProperty *op;
1402
1403 mr->ops = &unassigned_mem_ops;
1404 mr->enabled = true;
1405 mr->romd_mode = true;
1406 mr->destructor = memory_region_destructor_none;
1407 QTAILQ_INIT(&mr->subregions);
1408 QTAILQ_INIT(&mr->coalesced);
1409
1410 op = object_property_add(OBJECT(mr), "container",
1411 "link<" TYPE_MEMORY_REGION ">",
1412 memory_region_get_container,
1413 NULL, /* memory_region_set_container */
1414 NULL, NULL);
1415 op->resolve = memory_region_resolve_container;
1416
1417 object_property_add_uint64_ptr(OBJECT(mr), "addr",
1418 &mr->addr, OBJ_PROP_FLAG_READ);
1419 object_property_add(OBJECT(mr), "priority", "uint32",
1420 memory_region_get_priority,
1421 NULL, /* memory_region_set_priority */
1422 NULL, NULL);
1423 object_property_add(OBJECT(mr), "size", "uint64",
1424 memory_region_get_size,
1425 NULL, /* memory_region_set_size, */
1426 NULL, NULL);
1427 }
1428
iommu_memory_region_initfn(Object * obj)1429 static void iommu_memory_region_initfn(Object *obj)
1430 {
1431 MemoryRegion *mr = MEMORY_REGION(obj);
1432
1433 mr->is_iommu = true;
1434 }
1435
unassigned_mem_read(void * opaque,hwaddr addr,unsigned size)1436 static uint64_t unassigned_mem_read(void *opaque, hwaddr addr,
1437 unsigned size)
1438 {
1439 #ifdef DEBUG_UNASSIGNED
1440 printf("Unassigned mem read " HWADDR_FMT_plx "\n", addr);
1441 #endif
1442 return 0;
1443 }
1444
unassigned_mem_write(void * opaque,hwaddr addr,uint64_t val,unsigned size)1445 static void unassigned_mem_write(void *opaque, hwaddr addr,
1446 uint64_t val, unsigned size)
1447 {
1448 #ifdef DEBUG_UNASSIGNED
1449 printf("Unassigned mem write " HWADDR_FMT_plx " = 0x%"PRIx64"\n", addr, val);
1450 #endif
1451 }
1452
unassigned_mem_accepts(void * opaque,hwaddr addr,unsigned size,bool is_write,MemTxAttrs attrs)1453 static bool unassigned_mem_accepts(void *opaque, hwaddr addr,
1454 unsigned size, bool is_write,
1455 MemTxAttrs attrs)
1456 {
1457 return false;
1458 }
1459
1460 const MemoryRegionOps unassigned_mem_ops = {
1461 .valid.accepts = unassigned_mem_accepts,
1462 .endianness = DEVICE_NATIVE_ENDIAN,
1463 };
1464
memory_region_ram_device_read(void * opaque,hwaddr addr,unsigned size)1465 static uint64_t memory_region_ram_device_read(void *opaque,
1466 hwaddr addr, unsigned size)
1467 {
1468 MemoryRegion *mr = opaque;
1469 uint64_t data = ldn_he_p(mr->ram_block->host + addr, size);
1470
1471 trace_memory_region_ram_device_read(get_cpu_index(), mr, addr, data, size);
1472
1473 return data;
1474 }
1475
memory_region_ram_device_write(void * opaque,hwaddr addr,uint64_t data,unsigned size)1476 static void memory_region_ram_device_write(void *opaque, hwaddr addr,
1477 uint64_t data, unsigned size)
1478 {
1479 MemoryRegion *mr = opaque;
1480
1481 trace_memory_region_ram_device_write(get_cpu_index(), mr, addr, data, size);
1482
1483 stn_he_p(mr->ram_block->host + addr, size, data);
1484 }
1485
1486 static const MemoryRegionOps ram_device_mem_ops = {
1487 .read = memory_region_ram_device_read,
1488 .write = memory_region_ram_device_write,
1489 .endianness = DEVICE_HOST_ENDIAN,
1490 .valid = {
1491 .min_access_size = 1,
1492 .max_access_size = 8,
1493 .unaligned = true,
1494 },
1495 .impl = {
1496 .min_access_size = 1,
1497 .max_access_size = 8,
1498 .unaligned = true,
1499 },
1500 };
1501
memory_region_access_valid(MemoryRegion * mr,hwaddr addr,unsigned size,bool is_write,MemTxAttrs attrs)1502 bool memory_region_access_valid(MemoryRegion *mr,
1503 hwaddr addr,
1504 unsigned size,
1505 bool is_write,
1506 MemTxAttrs attrs)
1507 {
1508 if (mr->ops->valid.accepts
1509 && !mr->ops->valid.accepts(mr->opaque, addr, size, is_write, attrs)) {
1510 qemu_log_mask(LOG_GUEST_ERROR, "Invalid %s at addr 0x%" HWADDR_PRIX
1511 ", size %u, region '%s', reason: rejected\n",
1512 is_write ? "write" : "read",
1513 addr, size, memory_region_name(mr));
1514 return false;
1515 }
1516
1517 if (!mr->ops->valid.unaligned && (addr & (size - 1))) {
1518 qemu_log_mask(LOG_GUEST_ERROR, "Invalid %s at addr 0x%" HWADDR_PRIX
1519 ", size %u, region '%s', reason: unaligned\n",
1520 is_write ? "write" : "read",
1521 addr, size, memory_region_name(mr));
1522 return false;
1523 }
1524
1525 /* Treat zero as compatibility all valid */
1526 if (!mr->ops->valid.max_access_size) {
1527 return true;
1528 }
1529
1530 if (size > mr->ops->valid.max_access_size
1531 || size < mr->ops->valid.min_access_size) {
1532 qemu_log_mask(LOG_GUEST_ERROR, "Invalid %s at addr 0x%" HWADDR_PRIX
1533 ", size %u, region '%s', reason: invalid size "
1534 "(min:%u max:%u)\n",
1535 is_write ? "write" : "read",
1536 addr, size, memory_region_name(mr),
1537 mr->ops->valid.min_access_size,
1538 mr->ops->valid.max_access_size);
1539 return false;
1540 }
1541 return true;
1542 }
1543
memory_region_dispatch_read1(MemoryRegion * mr,hwaddr addr,uint64_t * pval,unsigned size,MemTxAttrs attrs)1544 static MemTxResult memory_region_dispatch_read1(MemoryRegion *mr,
1545 hwaddr addr,
1546 uint64_t *pval,
1547 unsigned size,
1548 MemTxAttrs attrs)
1549 {
1550 *pval = 0;
1551
1552 if (mr->ops->read) {
1553 return access_with_adjusted_size(addr, pval, size,
1554 mr->ops->impl.min_access_size,
1555 mr->ops->impl.max_access_size,
1556 mr->ops->impl.unaligned,
1557 memory_region_read_accessor,
1558 mr, attrs);
1559 } else {
1560 return access_with_adjusted_size(addr, pval, size,
1561 mr->ops->impl.min_access_size,
1562 mr->ops->impl.max_access_size,
1563 mr->ops->impl.unaligned,
1564 memory_region_read_with_attrs_accessor,
1565 mr, attrs);
1566 }
1567 }
1568
memory_region_dispatch_read(MemoryRegion * mr,hwaddr addr,uint64_t * pval,MemOp op,MemTxAttrs attrs)1569 MemTxResult memory_region_dispatch_read(MemoryRegion *mr,
1570 hwaddr addr,
1571 uint64_t *pval,
1572 MemOp op,
1573 MemTxAttrs attrs)
1574 {
1575 unsigned size = memop_size(op);
1576 MemTxResult r;
1577
1578 if (mr->alias) {
1579 return memory_region_dispatch_read(mr->alias,
1580 mr->alias_offset + addr,
1581 pval, op, attrs);
1582 }
1583 if (!memory_region_access_valid(mr, addr, size, false, attrs)) {
1584 *pval = unassigned_mem_read(mr, addr, size);
1585 return MEMTX_DECODE_ERROR;
1586 }
1587
1588 r = memory_region_dispatch_read1(mr, addr, pval, size, attrs);
1589 adjust_endianness(mr, pval, op);
1590 return r;
1591 }
1592
1593 /* Return true if an eventfd was signalled */
memory_region_dispatch_write_eventfds(MemoryRegion * mr,hwaddr addr,uint64_t data,unsigned size,MemTxAttrs attrs)1594 static bool memory_region_dispatch_write_eventfds(MemoryRegion *mr,
1595 hwaddr addr,
1596 uint64_t data,
1597 unsigned size,
1598 MemTxAttrs attrs)
1599 {
1600 MemoryRegionIoeventfd ioeventfd = {
1601 .addr = addrrange_make(int128_make64(addr), int128_make64(size)),
1602 .data = data,
1603 };
1604 unsigned i;
1605
1606 for (i = 0; i < mr->ioeventfd_nb; i++) {
1607 ioeventfd.match_data = mr->ioeventfds[i].match_data;
1608 ioeventfd.e = mr->ioeventfds[i].e;
1609
1610 if (memory_region_ioeventfd_equal(&ioeventfd, &mr->ioeventfds[i])) {
1611 event_notifier_set(ioeventfd.e);
1612 return true;
1613 }
1614 }
1615
1616 return false;
1617 }
1618
memory_region_dispatch_write(MemoryRegion * mr,hwaddr addr,uint64_t data,MemOp op,MemTxAttrs attrs)1619 MemTxResult memory_region_dispatch_write(MemoryRegion *mr,
1620 hwaddr addr,
1621 uint64_t data,
1622 MemOp op,
1623 MemTxAttrs attrs)
1624 {
1625 unsigned size = memop_size(op);
1626
1627 if (mr->alias) {
1628 return memory_region_dispatch_write(mr->alias,
1629 mr->alias_offset + addr,
1630 data, op, attrs);
1631 }
1632 if (!memory_region_access_valid(mr, addr, size, true, attrs)) {
1633 unassigned_mem_write(mr, addr, data, size);
1634 return MEMTX_DECODE_ERROR;
1635 }
1636
1637 adjust_endianness(mr, &data, op);
1638
1639 /*
1640 * FIXME: it's not clear why under KVM the write would be processed
1641 * directly, instead of going through eventfd. This probably should
1642 * test "tcg_enabled() || qtest_enabled()", or should just go away.
1643 */
1644 if (!kvm_enabled() &&
1645 memory_region_dispatch_write_eventfds(mr, addr, data, size, attrs)) {
1646 return MEMTX_OK;
1647 }
1648
1649 if (mr->ops->write) {
1650 return access_with_adjusted_size_aligned(addr, &data, size,
1651 mr->ops->impl.min_access_size,
1652 mr->ops->impl.max_access_size,
1653 memory_region_write_accessor, mr,
1654 attrs);
1655 } else {
1656 return
1657 access_with_adjusted_size_aligned(addr, &data, size,
1658 mr->ops->impl.min_access_size,
1659 mr->ops->impl.max_access_size,
1660 memory_region_write_with_attrs_accessor,
1661 mr, attrs);
1662 }
1663 }
1664
memory_region_init_io(MemoryRegion * mr,Object * owner,const MemoryRegionOps * ops,void * opaque,const char * name,uint64_t size)1665 void memory_region_init_io(MemoryRegion *mr,
1666 Object *owner,
1667 const MemoryRegionOps *ops,
1668 void *opaque,
1669 const char *name,
1670 uint64_t size)
1671 {
1672 memory_region_init(mr, owner, name, size);
1673 mr->ops = ops ? ops : &unassigned_mem_ops;
1674 mr->opaque = opaque;
1675 mr->terminates = true;
1676 }
1677
memory_region_init_ram_nomigrate(MemoryRegion * mr,Object * owner,const char * name,uint64_t size,Error ** errp)1678 void memory_region_init_ram_nomigrate(MemoryRegion *mr,
1679 Object *owner,
1680 const char *name,
1681 uint64_t size,
1682 Error **errp)
1683 {
1684 memory_region_init_ram_flags_nomigrate(mr, owner, name, size, 0, errp);
1685 }
1686
memory_region_init_ram_flags_nomigrate(MemoryRegion * mr,Object * owner,const char * name,uint64_t size,uint32_t ram_flags,Error ** errp)1687 void memory_region_init_ram_flags_nomigrate(MemoryRegion *mr,
1688 Object *owner,
1689 const char *name,
1690 uint64_t size,
1691 uint32_t ram_flags,
1692 Error **errp)
1693 {
1694 Error *err = NULL;
1695 memory_region_init(mr, owner, name, size);
1696 mr->ram = true;
1697 mr->terminates = true;
1698 mr->destructor = memory_region_destructor_ram;
1699 mr->ram_block = qemu_ram_alloc(size, ram_flags, mr, &err);
1700 if (err) {
1701 mr->size = int128_zero();
1702 object_unparent(OBJECT(mr));
1703 error_propagate(errp, err);
1704 }
1705 }
1706
memory_region_init_resizeable_ram(MemoryRegion * mr,Object * owner,const char * name,uint64_t size,uint64_t max_size,void (* resized)(const char *,uint64_t length,void * host),Error ** errp)1707 void memory_region_init_resizeable_ram(MemoryRegion *mr,
1708 Object *owner,
1709 const char *name,
1710 uint64_t size,
1711 uint64_t max_size,
1712 void (*resized)(const char*,
1713 uint64_t length,
1714 void *host),
1715 Error **errp)
1716 {
1717 Error *err = NULL;
1718 memory_region_init(mr, owner, name, size);
1719 mr->ram = true;
1720 mr->terminates = true;
1721 mr->destructor = memory_region_destructor_ram;
1722 mr->ram_block = qemu_ram_alloc_resizeable(size, max_size, resized,
1723 mr, &err);
1724 if (err) {
1725 mr->size = int128_zero();
1726 object_unparent(OBJECT(mr));
1727 error_propagate(errp, err);
1728 }
1729 }
1730
1731 #ifdef CONFIG_POSIX
memory_region_init_ram_from_file(MemoryRegion * mr,Object * owner,const char * name,uint64_t size,uint64_t align,uint32_t ram_flags,const char * path,ram_addr_t offset,Error ** errp)1732 void memory_region_init_ram_from_file(MemoryRegion *mr,
1733 Object *owner,
1734 const char *name,
1735 uint64_t size,
1736 uint64_t align,
1737 uint32_t ram_flags,
1738 const char *path,
1739 ram_addr_t offset,
1740 Error **errp)
1741 {
1742 Error *err = NULL;
1743 memory_region_init(mr, owner, name, size);
1744 mr->ram = true;
1745 mr->readonly = !!(ram_flags & RAM_READONLY);
1746 mr->terminates = true;
1747 mr->destructor = memory_region_destructor_ram;
1748 mr->align = align;
1749 mr->ram_block = qemu_ram_alloc_from_file(size, mr, ram_flags, path,
1750 offset, &err);
1751 if (err) {
1752 mr->size = int128_zero();
1753 object_unparent(OBJECT(mr));
1754 error_propagate(errp, err);
1755 }
1756 }
1757
memory_region_init_ram_from_fd(MemoryRegion * mr,Object * owner,const char * name,uint64_t size,uint32_t ram_flags,int fd,ram_addr_t offset,Error ** errp)1758 void memory_region_init_ram_from_fd(MemoryRegion *mr,
1759 Object *owner,
1760 const char *name,
1761 uint64_t size,
1762 uint32_t ram_flags,
1763 int fd,
1764 ram_addr_t offset,
1765 Error **errp)
1766 {
1767 Error *err = NULL;
1768 memory_region_init(mr, owner, name, size);
1769 mr->ram = true;
1770 mr->readonly = !!(ram_flags & RAM_READONLY);
1771 mr->terminates = true;
1772 mr->destructor = memory_region_destructor_ram;
1773 mr->ram_block = qemu_ram_alloc_from_fd(size, mr, ram_flags, fd, offset,
1774 &err);
1775 if (err) {
1776 mr->size = int128_zero();
1777 object_unparent(OBJECT(mr));
1778 error_propagate(errp, err);
1779 }
1780 }
1781 #endif
1782
memory_region_init_ram_ptr(MemoryRegion * mr,Object * owner,const char * name,uint64_t size,void * ptr)1783 void memory_region_init_ram_ptr(MemoryRegion *mr,
1784 Object *owner,
1785 const char *name,
1786 uint64_t size,
1787 void *ptr)
1788 {
1789 memory_region_init(mr, owner, name, size);
1790 mr->ram = true;
1791 mr->terminates = true;
1792 mr->destructor = memory_region_destructor_ram;
1793
1794 /* qemu_ram_alloc_from_ptr cannot fail with ptr != NULL. */
1795 assert(ptr != NULL);
1796 mr->ram_block = qemu_ram_alloc_from_ptr(size, ptr, mr, &error_abort);
1797 }
1798
memory_region_init_ram_device_ptr(MemoryRegion * mr,Object * owner,const char * name,uint64_t size,void * ptr)1799 void memory_region_init_ram_device_ptr(MemoryRegion *mr,
1800 Object *owner,
1801 const char *name,
1802 uint64_t size,
1803 void *ptr)
1804 {
1805 memory_region_init(mr, owner, name, size);
1806 mr->ram = true;
1807 mr->terminates = true;
1808 mr->ram_device = true;
1809 mr->ops = &ram_device_mem_ops;
1810 mr->opaque = mr;
1811 mr->destructor = memory_region_destructor_ram;
1812
1813 /* qemu_ram_alloc_from_ptr cannot fail with ptr != NULL. */
1814 assert(ptr != NULL);
1815 mr->ram_block = qemu_ram_alloc_from_ptr(size, ptr, mr, &error_abort);
1816 }
1817
memory_region_init_alias(MemoryRegion * mr,Object * owner,const char * name,MemoryRegion * orig,hwaddr offset,uint64_t size)1818 void memory_region_init_alias(MemoryRegion *mr,
1819 Object *owner,
1820 const char *name,
1821 MemoryRegion *orig,
1822 hwaddr offset,
1823 uint64_t size)
1824 {
1825 memory_region_init(mr, owner, name, size);
1826 mr->alias = orig;
1827 mr->alias_offset = offset;
1828 }
1829
memory_region_init_rom_nomigrate(MemoryRegion * mr,Object * owner,const char * name,uint64_t size,Error ** errp)1830 void memory_region_init_rom_nomigrate(MemoryRegion *mr,
1831 Object *owner,
1832 const char *name,
1833 uint64_t size,
1834 Error **errp)
1835 {
1836 memory_region_init_ram_flags_nomigrate(mr, owner, name, size, 0, errp);
1837 mr->readonly = true;
1838 }
1839
memory_region_init_rom_device_nomigrate(MemoryRegion * mr,Object * owner,const MemoryRegionOps * ops,void * opaque,const char * name,uint64_t size,Error ** errp)1840 void memory_region_init_rom_device_nomigrate(MemoryRegion *mr,
1841 Object *owner,
1842 const MemoryRegionOps *ops,
1843 void *opaque,
1844 const char *name,
1845 uint64_t size,
1846 Error **errp)
1847 {
1848 Error *err = NULL;
1849 assert(ops);
1850 memory_region_init(mr, owner, name, size);
1851 mr->ops = ops;
1852 mr->opaque = opaque;
1853 mr->terminates = true;
1854 mr->rom_device = true;
1855 mr->destructor = memory_region_destructor_ram;
1856 mr->ram_block = qemu_ram_alloc(size, 0, mr, &err);
1857 if (err) {
1858 mr->size = int128_zero();
1859 object_unparent(OBJECT(mr));
1860 error_propagate(errp, err);
1861 }
1862 }
1863
memory_region_init_iommu(void * _iommu_mr,size_t instance_size,const char * mrtypename,Object * owner,const char * name,uint64_t size)1864 void memory_region_init_iommu(void *_iommu_mr,
1865 size_t instance_size,
1866 const char *mrtypename,
1867 Object *owner,
1868 const char *name,
1869 uint64_t size)
1870 {
1871 struct IOMMUMemoryRegion *iommu_mr;
1872 struct MemoryRegion *mr;
1873
1874 object_initialize(_iommu_mr, instance_size, mrtypename);
1875 mr = MEMORY_REGION(_iommu_mr);
1876 memory_region_do_init(mr, owner, name, size);
1877 iommu_mr = IOMMU_MEMORY_REGION(mr);
1878 mr->terminates = true; /* then re-forwards */
1879 QLIST_INIT(&iommu_mr->iommu_notify);
1880 iommu_mr->iommu_notify_flags = IOMMU_NOTIFIER_NONE;
1881 }
1882
memory_region_finalize(Object * obj)1883 static void memory_region_finalize(Object *obj)
1884 {
1885 MemoryRegion *mr = MEMORY_REGION(obj);
1886
1887 assert(!mr->container);
1888
1889 /* We know the region is not visible in any address space (it
1890 * does not have a container and cannot be a root either because
1891 * it has no references, so we can blindly clear mr->enabled.
1892 * memory_region_set_enabled instead could trigger a transaction
1893 * and cause an infinite loop.
1894 */
1895 mr->enabled = false;
1896 memory_region_transaction_begin();
1897 while (!QTAILQ_EMPTY(&mr->subregions)) {
1898 MemoryRegion *subregion = QTAILQ_FIRST(&mr->subregions);
1899 memory_region_del_subregion(mr, subregion);
1900 }
1901 memory_region_transaction_commit();
1902
1903 mr->destructor(mr);
1904 memory_region_clear_coalescing(mr);
1905 g_free((char *)mr->name);
1906 g_free(mr->ioeventfds);
1907 }
1908
memory_region_owner(MemoryRegion * mr)1909 Object *memory_region_owner(MemoryRegion *mr)
1910 {
1911 Object *obj = OBJECT(mr);
1912 return obj->parent;
1913 }
1914
memory_region_ref(MemoryRegion * mr)1915 void memory_region_ref(MemoryRegion *mr)
1916 {
1917 /* MMIO callbacks most likely will access data that belongs
1918 * to the owner, hence the need to ref/unref the owner whenever
1919 * the memory region is in use.
1920 *
1921 * The memory region is a child of its owner. As long as the
1922 * owner doesn't call unparent itself on the memory region,
1923 * ref-ing the owner will also keep the memory region alive.
1924 * Memory regions without an owner are supposed to never go away;
1925 * we do not ref/unref them because it slows down DMA sensibly.
1926 */
1927 if (mr && mr->owner) {
1928 object_ref(mr->owner);
1929 }
1930 }
1931
memory_region_unref(MemoryRegion * mr)1932 void memory_region_unref(MemoryRegion *mr)
1933 {
1934 if (mr && mr->owner) {
1935 object_unref(mr->owner);
1936 }
1937 }
1938
memory_region_size(MemoryRegion * mr)1939 uint64_t memory_region_size(MemoryRegion *mr)
1940 {
1941 if (int128_eq(mr->size, int128_2_64())) {
1942 return UINT64_MAX;
1943 }
1944 return int128_get64(mr->size);
1945 }
1946
memory_region_name(const MemoryRegion * mr)1947 const char *memory_region_name(const MemoryRegion *mr)
1948 {
1949 if (!mr->name) {
1950 ((MemoryRegion *)mr)->name =
1951 g_strdup(object_get_canonical_path_component(OBJECT(mr)));
1952 }
1953 return mr->name;
1954 }
1955
memory_region_is_ram_device(MemoryRegion * mr)1956 bool memory_region_is_ram_device(MemoryRegion *mr)
1957 {
1958 return mr->ram_device;
1959 }
1960
memory_region_is_protected(MemoryRegion * mr)1961 bool memory_region_is_protected(MemoryRegion *mr)
1962 {
1963 return mr->ram && (mr->ram_block->flags & RAM_PROTECTED);
1964 }
1965
memory_region_get_dirty_log_mask(MemoryRegion * mr)1966 uint8_t memory_region_get_dirty_log_mask(MemoryRegion *mr)
1967 {
1968 uint8_t mask = mr->dirty_log_mask;
1969 RAMBlock *rb = mr->ram_block;
1970
1971 if (global_dirty_tracking && ((rb && qemu_ram_is_migratable(rb)) ||
1972 memory_region_is_iommu(mr))) {
1973 mask |= (1 << DIRTY_MEMORY_MIGRATION);
1974 }
1975
1976 if (tcg_enabled() && rb) {
1977 /* TCG only cares about dirty memory logging for RAM, not IOMMU. */
1978 mask |= (1 << DIRTY_MEMORY_CODE);
1979 }
1980 return mask;
1981 }
1982
memory_region_is_logging(MemoryRegion * mr,uint8_t client)1983 bool memory_region_is_logging(MemoryRegion *mr, uint8_t client)
1984 {
1985 return memory_region_get_dirty_log_mask(mr) & (1 << client);
1986 }
1987
memory_region_update_iommu_notify_flags(IOMMUMemoryRegion * iommu_mr,Error ** errp)1988 static int memory_region_update_iommu_notify_flags(IOMMUMemoryRegion *iommu_mr,
1989 Error **errp)
1990 {
1991 IOMMUNotifierFlag flags = IOMMU_NOTIFIER_NONE;
1992 IOMMUNotifier *iommu_notifier;
1993 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
1994 int ret = 0;
1995
1996 IOMMU_NOTIFIER_FOREACH(iommu_notifier, iommu_mr) {
1997 flags |= iommu_notifier->notifier_flags;
1998 }
1999
2000 if (flags != iommu_mr->iommu_notify_flags && imrc->notify_flag_changed) {
2001 ret = imrc->notify_flag_changed(iommu_mr,
2002 iommu_mr->iommu_notify_flags,
2003 flags, errp);
2004 }
2005
2006 if (!ret) {
2007 iommu_mr->iommu_notify_flags = flags;
2008 }
2009 return ret;
2010 }
2011
memory_region_iommu_set_page_size_mask(IOMMUMemoryRegion * iommu_mr,uint64_t page_size_mask,Error ** errp)2012 int memory_region_iommu_set_page_size_mask(IOMMUMemoryRegion *iommu_mr,
2013 uint64_t page_size_mask,
2014 Error **errp)
2015 {
2016 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
2017 int ret = 0;
2018
2019 if (imrc->iommu_set_page_size_mask) {
2020 ret = imrc->iommu_set_page_size_mask(iommu_mr, page_size_mask, errp);
2021 }
2022 return ret;
2023 }
2024
memory_region_iommu_set_iova_ranges(IOMMUMemoryRegion * iommu_mr,GList * iova_ranges,Error ** errp)2025 int memory_region_iommu_set_iova_ranges(IOMMUMemoryRegion *iommu_mr,
2026 GList *iova_ranges,
2027 Error **errp)
2028 {
2029 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
2030 int ret = 0;
2031
2032 if (imrc->iommu_set_iova_ranges) {
2033 ret = imrc->iommu_set_iova_ranges(iommu_mr, iova_ranges, errp);
2034 }
2035 return ret;
2036 }
2037
memory_region_register_iommu_notifier(MemoryRegion * mr,IOMMUNotifier * n,Error ** errp)2038 int memory_region_register_iommu_notifier(MemoryRegion *mr,
2039 IOMMUNotifier *n, Error **errp)
2040 {
2041 IOMMUMemoryRegion *iommu_mr;
2042 int ret;
2043
2044 if (mr->alias) {
2045 return memory_region_register_iommu_notifier(mr->alias, n, errp);
2046 }
2047
2048 /* We need to register for at least one bitfield */
2049 iommu_mr = IOMMU_MEMORY_REGION(mr);
2050 assert(n->notifier_flags != IOMMU_NOTIFIER_NONE);
2051 assert(n->start <= n->end);
2052 assert(n->iommu_idx >= 0 &&
2053 n->iommu_idx < memory_region_iommu_num_indexes(iommu_mr));
2054
2055 QLIST_INSERT_HEAD(&iommu_mr->iommu_notify, n, node);
2056 ret = memory_region_update_iommu_notify_flags(iommu_mr, errp);
2057 if (ret) {
2058 QLIST_REMOVE(n, node);
2059 }
2060 return ret;
2061 }
2062
memory_region_iommu_get_min_page_size(IOMMUMemoryRegion * iommu_mr)2063 uint64_t memory_region_iommu_get_min_page_size(IOMMUMemoryRegion *iommu_mr)
2064 {
2065 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
2066
2067 if (imrc->get_min_page_size) {
2068 return imrc->get_min_page_size(iommu_mr);
2069 }
2070 return TARGET_PAGE_SIZE;
2071 }
2072
memory_region_iommu_replay(IOMMUMemoryRegion * iommu_mr,IOMMUNotifier * n)2073 void memory_region_iommu_replay(IOMMUMemoryRegion *iommu_mr, IOMMUNotifier *n)
2074 {
2075 MemoryRegion *mr = MEMORY_REGION(iommu_mr);
2076 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
2077 hwaddr addr, granularity;
2078 IOMMUTLBEntry iotlb;
2079
2080 /* If the IOMMU has its own replay callback, override */
2081 if (imrc->replay) {
2082 imrc->replay(iommu_mr, n);
2083 return;
2084 }
2085
2086 granularity = memory_region_iommu_get_min_page_size(iommu_mr);
2087
2088 for (addr = 0; addr < memory_region_size(mr); addr += granularity) {
2089 iotlb = imrc->translate(iommu_mr, addr, IOMMU_NONE, n->iommu_idx);
2090 if (iotlb.perm != IOMMU_NONE) {
2091 n->notify(n, &iotlb);
2092 }
2093
2094 /* if (2^64 - MR size) < granularity, it's possible to get an
2095 * infinite loop here. This should catch such a wraparound */
2096 if ((addr + granularity) < addr) {
2097 break;
2098 }
2099 }
2100 }
2101
memory_region_unregister_iommu_notifier(MemoryRegion * mr,IOMMUNotifier * n)2102 void memory_region_unregister_iommu_notifier(MemoryRegion *mr,
2103 IOMMUNotifier *n)
2104 {
2105 IOMMUMemoryRegion *iommu_mr;
2106
2107 if (mr->alias) {
2108 memory_region_unregister_iommu_notifier(mr->alias, n);
2109 return;
2110 }
2111 QLIST_REMOVE(n, node);
2112 iommu_mr = IOMMU_MEMORY_REGION(mr);
2113 memory_region_update_iommu_notify_flags(iommu_mr, NULL);
2114 }
2115
memory_region_notify_iommu_one(IOMMUNotifier * notifier,IOMMUTLBEvent * event)2116 void memory_region_notify_iommu_one(IOMMUNotifier *notifier,
2117 IOMMUTLBEvent *event)
2118 {
2119 IOMMUTLBEntry *entry = &event->entry;
2120 hwaddr entry_end = entry->iova + entry->addr_mask;
2121 IOMMUTLBEntry tmp = *entry;
2122
2123 if (event->type == IOMMU_NOTIFIER_UNMAP) {
2124 assert(entry->perm == IOMMU_NONE);
2125 }
2126
2127 /*
2128 * Skip the notification if the notification does not overlap
2129 * with registered range.
2130 */
2131 if (notifier->start > entry_end || notifier->end < entry->iova) {
2132 return;
2133 }
2134
2135 if (notifier->notifier_flags & IOMMU_NOTIFIER_DEVIOTLB_UNMAP) {
2136 /* Crop (iova, addr_mask) to range */
2137 tmp.iova = MAX(tmp.iova, notifier->start);
2138 tmp.addr_mask = MIN(entry_end, notifier->end) - tmp.iova;
2139 } else {
2140 assert(entry->iova >= notifier->start && entry_end <= notifier->end);
2141 }
2142
2143 if (event->type & notifier->notifier_flags) {
2144 notifier->notify(notifier, &tmp);
2145 }
2146 }
2147
memory_region_unmap_iommu_notifier_range(IOMMUNotifier * notifier)2148 void memory_region_unmap_iommu_notifier_range(IOMMUNotifier *notifier)
2149 {
2150 IOMMUTLBEvent event;
2151
2152 event.type = IOMMU_NOTIFIER_UNMAP;
2153 event.entry.target_as = &address_space_memory;
2154 event.entry.iova = notifier->start;
2155 event.entry.perm = IOMMU_NONE;
2156 event.entry.addr_mask = notifier->end - notifier->start;
2157
2158 memory_region_notify_iommu_one(notifier, &event);
2159 }
2160
memory_region_notify_iommu(IOMMUMemoryRegion * iommu_mr,int iommu_idx,IOMMUTLBEvent event)2161 void memory_region_notify_iommu(IOMMUMemoryRegion *iommu_mr,
2162 int iommu_idx,
2163 IOMMUTLBEvent event)
2164 {
2165 IOMMUNotifier *iommu_notifier;
2166
2167 assert(memory_region_is_iommu(MEMORY_REGION(iommu_mr)));
2168
2169 IOMMU_NOTIFIER_FOREACH(iommu_notifier, iommu_mr) {
2170 if (iommu_notifier->iommu_idx == iommu_idx) {
2171 memory_region_notify_iommu_one(iommu_notifier, &event);
2172 }
2173 }
2174 }
2175
memory_region_iommu_get_attr(IOMMUMemoryRegion * iommu_mr,enum IOMMUMemoryRegionAttr attr,void * data)2176 int memory_region_iommu_get_attr(IOMMUMemoryRegion *iommu_mr,
2177 enum IOMMUMemoryRegionAttr attr,
2178 void *data)
2179 {
2180 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
2181
2182 if (!imrc->get_attr) {
2183 return -EINVAL;
2184 }
2185
2186 return imrc->get_attr(iommu_mr, attr, data);
2187 }
2188
memory_region_iommu_attrs_to_index(IOMMUMemoryRegion * iommu_mr,MemTxAttrs attrs)2189 int memory_region_iommu_attrs_to_index(IOMMUMemoryRegion *iommu_mr,
2190 MemTxAttrs attrs)
2191 {
2192 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
2193
2194 if (!imrc->attrs_to_index) {
2195 return 0;
2196 }
2197
2198 return imrc->attrs_to_index(iommu_mr, attrs);
2199 }
2200
memory_region_iommu_num_indexes(IOMMUMemoryRegion * iommu_mr)2201 int memory_region_iommu_num_indexes(IOMMUMemoryRegion *iommu_mr)
2202 {
2203 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
2204
2205 if (!imrc->num_indexes) {
2206 return 1;
2207 }
2208
2209 return imrc->num_indexes(iommu_mr);
2210 }
2211
memory_region_get_ram_discard_manager(MemoryRegion * mr)2212 RamDiscardManager *memory_region_get_ram_discard_manager(MemoryRegion *mr)
2213 {
2214 if (!memory_region_is_ram(mr)) {
2215 return NULL;
2216 }
2217 return mr->rdm;
2218 }
2219
memory_region_set_ram_discard_manager(MemoryRegion * mr,RamDiscardManager * rdm)2220 void memory_region_set_ram_discard_manager(MemoryRegion *mr,
2221 RamDiscardManager *rdm)
2222 {
2223 g_assert(memory_region_is_ram(mr));
2224 g_assert(!rdm || !mr->rdm);
2225 mr->rdm = rdm;
2226 }
2227
ram_discard_manager_get_min_granularity(const RamDiscardManager * rdm,const MemoryRegion * mr)2228 uint64_t ram_discard_manager_get_min_granularity(const RamDiscardManager *rdm,
2229 const MemoryRegion *mr)
2230 {
2231 RamDiscardManagerClass *rdmc = RAM_DISCARD_MANAGER_GET_CLASS(rdm);
2232
2233 g_assert(rdmc->get_min_granularity);
2234 return rdmc->get_min_granularity(rdm, mr);
2235 }
2236
ram_discard_manager_is_populated(const RamDiscardManager * rdm,const MemoryRegionSection * section)2237 bool ram_discard_manager_is_populated(const RamDiscardManager *rdm,
2238 const MemoryRegionSection *section)
2239 {
2240 RamDiscardManagerClass *rdmc = RAM_DISCARD_MANAGER_GET_CLASS(rdm);
2241
2242 g_assert(rdmc->is_populated);
2243 return rdmc->is_populated(rdm, section);
2244 }
2245
ram_discard_manager_replay_populated(const RamDiscardManager * rdm,MemoryRegionSection * section,ReplayRamPopulate replay_fn,void * opaque)2246 int ram_discard_manager_replay_populated(const RamDiscardManager *rdm,
2247 MemoryRegionSection *section,
2248 ReplayRamPopulate replay_fn,
2249 void *opaque)
2250 {
2251 RamDiscardManagerClass *rdmc = RAM_DISCARD_MANAGER_GET_CLASS(rdm);
2252
2253 g_assert(rdmc->replay_populated);
2254 return rdmc->replay_populated(rdm, section, replay_fn, opaque);
2255 }
2256
ram_discard_manager_replay_discarded(const RamDiscardManager * rdm,MemoryRegionSection * section,ReplayRamDiscard replay_fn,void * opaque)2257 void ram_discard_manager_replay_discarded(const RamDiscardManager *rdm,
2258 MemoryRegionSection *section,
2259 ReplayRamDiscard replay_fn,
2260 void *opaque)
2261 {
2262 RamDiscardManagerClass *rdmc = RAM_DISCARD_MANAGER_GET_CLASS(rdm);
2263
2264 g_assert(rdmc->replay_discarded);
2265 rdmc->replay_discarded(rdm, section, replay_fn, opaque);
2266 }
2267
ram_discard_manager_register_listener(RamDiscardManager * rdm,RamDiscardListener * rdl,MemoryRegionSection * section)2268 void ram_discard_manager_register_listener(RamDiscardManager *rdm,
2269 RamDiscardListener *rdl,
2270 MemoryRegionSection *section)
2271 {
2272 RamDiscardManagerClass *rdmc = RAM_DISCARD_MANAGER_GET_CLASS(rdm);
2273
2274 g_assert(rdmc->register_listener);
2275 rdmc->register_listener(rdm, rdl, section);
2276 }
2277
ram_discard_manager_unregister_listener(RamDiscardManager * rdm,RamDiscardListener * rdl)2278 void ram_discard_manager_unregister_listener(RamDiscardManager *rdm,
2279 RamDiscardListener *rdl)
2280 {
2281 RamDiscardManagerClass *rdmc = RAM_DISCARD_MANAGER_GET_CLASS(rdm);
2282
2283 g_assert(rdmc->unregister_listener);
2284 rdmc->unregister_listener(rdm, rdl);
2285 }
2286
2287 /* Called with rcu_read_lock held. */
memory_get_xlat_addr(IOMMUTLBEntry * iotlb,void ** vaddr,ram_addr_t * ram_addr,bool * read_only,bool * mr_has_discard_manager)2288 bool memory_get_xlat_addr(IOMMUTLBEntry *iotlb, void **vaddr,
2289 ram_addr_t *ram_addr, bool *read_only,
2290 bool *mr_has_discard_manager)
2291 {
2292 MemoryRegion *mr;
2293 hwaddr xlat;
2294 hwaddr len = iotlb->addr_mask + 1;
2295 bool writable = iotlb->perm & IOMMU_WO;
2296
2297 if (mr_has_discard_manager) {
2298 *mr_has_discard_manager = false;
2299 }
2300 /*
2301 * The IOMMU TLB entry we have just covers translation through
2302 * this IOMMU to its immediate target. We need to translate
2303 * it the rest of the way through to memory.
2304 */
2305 mr = address_space_translate(&address_space_memory, iotlb->translated_addr,
2306 &xlat, &len, writable, MEMTXATTRS_UNSPECIFIED);
2307 if (!memory_region_is_ram(mr)) {
2308 error_report("iommu map to non memory area %" HWADDR_PRIx "", xlat);
2309 return false;
2310 } else if (memory_region_has_ram_discard_manager(mr)) {
2311 RamDiscardManager *rdm = memory_region_get_ram_discard_manager(mr);
2312 MemoryRegionSection tmp = {
2313 .mr = mr,
2314 .offset_within_region = xlat,
2315 .size = int128_make64(len),
2316 };
2317 if (mr_has_discard_manager) {
2318 *mr_has_discard_manager = true;
2319 }
2320 /*
2321 * Malicious VMs can map memory into the IOMMU, which is expected
2322 * to remain discarded. vfio will pin all pages, populating memory.
2323 * Disallow that. vmstate priorities make sure any RamDiscardManager
2324 * were already restored before IOMMUs are restored.
2325 */
2326 if (!ram_discard_manager_is_populated(rdm, &tmp)) {
2327 error_report("iommu map to discarded memory (e.g., unplugged via"
2328 " virtio-mem): %" HWADDR_PRIx "",
2329 iotlb->translated_addr);
2330 return false;
2331 }
2332 }
2333
2334 /*
2335 * Translation truncates length to the IOMMU page size,
2336 * check that it did not truncate too much.
2337 */
2338 if (len & iotlb->addr_mask) {
2339 error_report("iommu has granularity incompatible with target AS");
2340 return false;
2341 }
2342
2343 if (vaddr) {
2344 *vaddr = memory_region_get_ram_ptr(mr) + xlat;
2345 }
2346
2347 if (ram_addr) {
2348 *ram_addr = memory_region_get_ram_addr(mr) + xlat;
2349 }
2350
2351 if (read_only) {
2352 *read_only = !writable || mr->readonly;
2353 }
2354
2355 return true;
2356 }
2357
memory_region_set_log(MemoryRegion * mr,bool log,unsigned client)2358 void memory_region_set_log(MemoryRegion *mr, bool log, unsigned client)
2359 {
2360 uint8_t mask = 1 << client;
2361 uint8_t old_logging;
2362
2363 assert(client == DIRTY_MEMORY_VGA);
2364 old_logging = mr->vga_logging_count;
2365 mr->vga_logging_count += log ? 1 : -1;
2366 if (!!old_logging == !!mr->vga_logging_count) {
2367 return;
2368 }
2369
2370 memory_region_transaction_begin();
2371 mr->dirty_log_mask = (mr->dirty_log_mask & ~mask) | (log * mask);
2372 memory_region_update_pending |= mr->enabled;
2373 memory_region_transaction_commit();
2374 }
2375
memory_region_set_dirty(MemoryRegion * mr,hwaddr addr,hwaddr size)2376 void memory_region_set_dirty(MemoryRegion *mr, hwaddr addr,
2377 hwaddr size)
2378 {
2379 assert(mr->ram_block);
2380 cpu_physical_memory_set_dirty_range(memory_region_get_ram_addr(mr) + addr,
2381 size,
2382 memory_region_get_dirty_log_mask(mr));
2383 }
2384
2385 /*
2386 * If memory region `mr' is NULL, do global sync. Otherwise, sync
2387 * dirty bitmap for the specified memory region.
2388 */
memory_region_sync_dirty_bitmap(MemoryRegion * mr,bool last_stage)2389 static void memory_region_sync_dirty_bitmap(MemoryRegion *mr, bool last_stage)
2390 {
2391 MemoryListener *listener;
2392 AddressSpace *as;
2393 FlatView *view;
2394 FlatRange *fr;
2395
2396 /* If the same address space has multiple log_sync listeners, we
2397 * visit that address space's FlatView multiple times. But because
2398 * log_sync listeners are rare, it's still cheaper than walking each
2399 * address space once.
2400 */
2401 QTAILQ_FOREACH(listener, &memory_listeners, link) {
2402 if (listener->log_sync) {
2403 as = listener->address_space;
2404 view = address_space_get_flatview(as);
2405 FOR_EACH_FLAT_RANGE(fr, view) {
2406 if (fr->dirty_log_mask && (!mr || fr->mr == mr)) {
2407 MemoryRegionSection mrs = section_from_flat_range(fr, view);
2408 listener->log_sync(listener, &mrs);
2409 }
2410 }
2411 flatview_unref(view);
2412 trace_memory_region_sync_dirty(mr ? mr->name : "(all)", listener->name, 0);
2413 } else if (listener->log_sync_global) {
2414 /*
2415 * No matter whether MR is specified, what we can do here
2416 * is to do a global sync, because we are not capable to
2417 * sync in a finer granularity.
2418 */
2419 listener->log_sync_global(listener, last_stage);
2420 trace_memory_region_sync_dirty(mr ? mr->name : "(all)", listener->name, 1);
2421 }
2422 }
2423 }
2424
memory_region_clear_dirty_bitmap(MemoryRegion * mr,hwaddr start,hwaddr len)2425 void memory_region_clear_dirty_bitmap(MemoryRegion *mr, hwaddr start,
2426 hwaddr len)
2427 {
2428 MemoryRegionSection mrs;
2429 MemoryListener *listener;
2430 AddressSpace *as;
2431 FlatView *view;
2432 FlatRange *fr;
2433 hwaddr sec_start, sec_end, sec_size;
2434
2435 QTAILQ_FOREACH(listener, &memory_listeners, link) {
2436 if (!listener->log_clear) {
2437 continue;
2438 }
2439 as = listener->address_space;
2440 view = address_space_get_flatview(as);
2441 FOR_EACH_FLAT_RANGE(fr, view) {
2442 if (!fr->dirty_log_mask || fr->mr != mr) {
2443 /*
2444 * Clear dirty bitmap operation only applies to those
2445 * regions whose dirty logging is at least enabled
2446 */
2447 continue;
2448 }
2449
2450 mrs = section_from_flat_range(fr, view);
2451
2452 sec_start = MAX(mrs.offset_within_region, start);
2453 sec_end = mrs.offset_within_region + int128_get64(mrs.size);
2454 sec_end = MIN(sec_end, start + len);
2455
2456 if (sec_start >= sec_end) {
2457 /*
2458 * If this memory region section has no intersection
2459 * with the requested range, skip.
2460 */
2461 continue;
2462 }
2463
2464 /* Valid case; shrink the section if needed */
2465 mrs.offset_within_address_space +=
2466 sec_start - mrs.offset_within_region;
2467 mrs.offset_within_region = sec_start;
2468 sec_size = sec_end - sec_start;
2469 mrs.size = int128_make64(sec_size);
2470 listener->log_clear(listener, &mrs);
2471 }
2472 flatview_unref(view);
2473 }
2474 }
2475
memory_region_snapshot_and_clear_dirty(MemoryRegion * mr,hwaddr addr,hwaddr size,unsigned client)2476 DirtyBitmapSnapshot *memory_region_snapshot_and_clear_dirty(MemoryRegion *mr,
2477 hwaddr addr,
2478 hwaddr size,
2479 unsigned client)
2480 {
2481 DirtyBitmapSnapshot *snapshot;
2482 assert(mr->ram_block);
2483 memory_region_sync_dirty_bitmap(mr, false);
2484 snapshot = cpu_physical_memory_snapshot_and_clear_dirty(mr, addr, size, client);
2485 memory_global_after_dirty_log_sync();
2486 return snapshot;
2487 }
2488
memory_region_snapshot_get_dirty(MemoryRegion * mr,DirtyBitmapSnapshot * snap,hwaddr addr,hwaddr size)2489 bool memory_region_snapshot_get_dirty(MemoryRegion *mr, DirtyBitmapSnapshot *snap,
2490 hwaddr addr, hwaddr size)
2491 {
2492 assert(mr->ram_block);
2493 return cpu_physical_memory_snapshot_get_dirty(snap,
2494 memory_region_get_ram_addr(mr) + addr, size);
2495 }
2496
memory_region_set_readonly(MemoryRegion * mr,bool readonly)2497 void memory_region_set_readonly(MemoryRegion *mr, bool readonly)
2498 {
2499 if (mr->readonly != readonly) {
2500 memory_region_transaction_begin();
2501 mr->readonly = readonly;
2502 memory_region_update_pending |= mr->enabled;
2503 memory_region_transaction_commit();
2504 }
2505 }
2506
memory_region_set_nonvolatile(MemoryRegion * mr,bool nonvolatile)2507 void memory_region_set_nonvolatile(MemoryRegion *mr, bool nonvolatile)
2508 {
2509 if (mr->nonvolatile != nonvolatile) {
2510 memory_region_transaction_begin();
2511 mr->nonvolatile = nonvolatile;
2512 memory_region_update_pending |= mr->enabled;
2513 memory_region_transaction_commit();
2514 }
2515 }
2516
memory_region_rom_device_set_romd(MemoryRegion * mr,bool romd_mode)2517 void memory_region_rom_device_set_romd(MemoryRegion *mr, bool romd_mode)
2518 {
2519 if (mr->romd_mode != romd_mode) {
2520 memory_region_transaction_begin();
2521 mr->romd_mode = romd_mode;
2522 memory_region_update_pending |= mr->enabled;
2523 memory_region_transaction_commit();
2524 }
2525 }
2526
memory_region_reset_dirty(MemoryRegion * mr,hwaddr addr,hwaddr size,unsigned client)2527 void memory_region_reset_dirty(MemoryRegion *mr, hwaddr addr,
2528 hwaddr size, unsigned client)
2529 {
2530 assert(mr->ram_block);
2531 cpu_physical_memory_test_and_clear_dirty(
2532 memory_region_get_ram_addr(mr) + addr, size, client);
2533 }
2534
memory_region_get_fd(MemoryRegion * mr)2535 int memory_region_get_fd(MemoryRegion *mr)
2536 {
2537 RCU_READ_LOCK_GUARD();
2538 while (mr->alias) {
2539 mr = mr->alias;
2540 }
2541 return mr->ram_block->fd;
2542 }
2543
memory_region_get_ram_ptr(MemoryRegion * mr)2544 void *memory_region_get_ram_ptr(MemoryRegion *mr)
2545 {
2546 uint64_t offset = 0;
2547
2548 RCU_READ_LOCK_GUARD();
2549 while (mr->alias) {
2550 offset += mr->alias_offset;
2551 mr = mr->alias;
2552 }
2553 assert(mr->ram_block);
2554 return qemu_map_ram_ptr(mr->ram_block, offset);
2555 }
2556
memory_region_from_host(void * ptr,ram_addr_t * offset)2557 MemoryRegion *memory_region_from_host(void *ptr, ram_addr_t *offset)
2558 {
2559 RAMBlock *block;
2560
2561 block = qemu_ram_block_from_host(ptr, false, offset);
2562 if (!block) {
2563 return NULL;
2564 }
2565
2566 return block->mr;
2567 }
2568
memory_region_get_ram_addr(MemoryRegion * mr)2569 ram_addr_t memory_region_get_ram_addr(MemoryRegion *mr)
2570 {
2571 return mr->ram_block ? mr->ram_block->offset : RAM_ADDR_INVALID;
2572 }
2573
memory_region_ram_resize(MemoryRegion * mr,ram_addr_t newsize,Error ** errp)2574 void memory_region_ram_resize(MemoryRegion *mr, ram_addr_t newsize, Error **errp)
2575 {
2576 assert(mr->ram_block);
2577
2578 qemu_ram_resize(mr->ram_block, newsize, errp);
2579 }
2580
memory_region_msync(MemoryRegion * mr,hwaddr addr,hwaddr size)2581 void memory_region_msync(MemoryRegion *mr, hwaddr addr, hwaddr size)
2582 {
2583 if (mr->ram_block) {
2584 qemu_ram_msync(mr->ram_block, addr, size);
2585 }
2586 }
2587
memory_region_writeback(MemoryRegion * mr,hwaddr addr,hwaddr size)2588 void memory_region_writeback(MemoryRegion *mr, hwaddr addr, hwaddr size)
2589 {
2590 /*
2591 * Might be extended case needed to cover
2592 * different types of memory regions
2593 */
2594 if (mr->dirty_log_mask) {
2595 memory_region_msync(mr, addr, size);
2596 }
2597 }
2598
2599 /*
2600 * Call proper memory listeners about the change on the newly
2601 * added/removed CoalescedMemoryRange.
2602 */
memory_region_update_coalesced_range(MemoryRegion * mr,CoalescedMemoryRange * cmr,bool add)2603 static void memory_region_update_coalesced_range(MemoryRegion *mr,
2604 CoalescedMemoryRange *cmr,
2605 bool add)
2606 {
2607 AddressSpace *as;
2608 FlatView *view;
2609 FlatRange *fr;
2610
2611 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
2612 view = address_space_get_flatview(as);
2613 FOR_EACH_FLAT_RANGE(fr, view) {
2614 if (fr->mr == mr) {
2615 flat_range_coalesced_io_notify(fr, as, cmr, add);
2616 }
2617 }
2618 flatview_unref(view);
2619 }
2620 }
2621
memory_region_set_coalescing(MemoryRegion * mr)2622 void memory_region_set_coalescing(MemoryRegion *mr)
2623 {
2624 memory_region_clear_coalescing(mr);
2625 memory_region_add_coalescing(mr, 0, int128_get64(mr->size));
2626 }
2627
memory_region_add_coalescing(MemoryRegion * mr,hwaddr offset,uint64_t size)2628 void memory_region_add_coalescing(MemoryRegion *mr,
2629 hwaddr offset,
2630 uint64_t size)
2631 {
2632 CoalescedMemoryRange *cmr = g_malloc(sizeof(*cmr));
2633
2634 cmr->addr = addrrange_make(int128_make64(offset), int128_make64(size));
2635 QTAILQ_INSERT_TAIL(&mr->coalesced, cmr, link);
2636 memory_region_update_coalesced_range(mr, cmr, true);
2637 memory_region_set_flush_coalesced(mr);
2638 }
2639
memory_region_clear_coalescing(MemoryRegion * mr)2640 void memory_region_clear_coalescing(MemoryRegion *mr)
2641 {
2642 CoalescedMemoryRange *cmr;
2643
2644 if (QTAILQ_EMPTY(&mr->coalesced)) {
2645 return;
2646 }
2647
2648 qemu_flush_coalesced_mmio_buffer();
2649 mr->flush_coalesced_mmio = false;
2650
2651 while (!QTAILQ_EMPTY(&mr->coalesced)) {
2652 cmr = QTAILQ_FIRST(&mr->coalesced);
2653 QTAILQ_REMOVE(&mr->coalesced, cmr, link);
2654 memory_region_update_coalesced_range(mr, cmr, false);
2655 g_free(cmr);
2656 }
2657 }
2658
memory_region_set_flush_coalesced(MemoryRegion * mr)2659 void memory_region_set_flush_coalesced(MemoryRegion *mr)
2660 {
2661 mr->flush_coalesced_mmio = true;
2662 }
2663
memory_region_clear_flush_coalesced(MemoryRegion * mr)2664 void memory_region_clear_flush_coalesced(MemoryRegion *mr)
2665 {
2666 qemu_flush_coalesced_mmio_buffer();
2667 if (QTAILQ_EMPTY(&mr->coalesced)) {
2668 mr->flush_coalesced_mmio = false;
2669 }
2670 }
2671
memory_region_add_eventfd(MemoryRegion * mr,hwaddr addr,unsigned size,bool match_data,uint64_t data,EventNotifier * e)2672 void memory_region_add_eventfd(MemoryRegion *mr,
2673 hwaddr addr,
2674 unsigned size,
2675 bool match_data,
2676 uint64_t data,
2677 EventNotifier *e)
2678 {
2679 MemoryRegionIoeventfd mrfd = {
2680 .addr.start = int128_make64(addr),
2681 .addr.size = int128_make64(size),
2682 .match_data = match_data,
2683 .data = data,
2684 .e = e,
2685 };
2686 unsigned i;
2687
2688 if (size) {
2689 adjust_endianness(mr, &mrfd.data, size_memop(size) | MO_TE);
2690 }
2691 memory_region_transaction_begin();
2692 for (i = 0; i < mr->ioeventfd_nb; ++i) {
2693 if (memory_region_ioeventfd_before(&mrfd, &mr->ioeventfds[i])) {
2694 break;
2695 }
2696 }
2697 ++mr->ioeventfd_nb;
2698 mr->ioeventfds = g_realloc(mr->ioeventfds,
2699 sizeof(*mr->ioeventfds) * mr->ioeventfd_nb);
2700 memmove(&mr->ioeventfds[i+1], &mr->ioeventfds[i],
2701 sizeof(*mr->ioeventfds) * (mr->ioeventfd_nb-1 - i));
2702 mr->ioeventfds[i] = mrfd;
2703 ioeventfd_update_pending |= mr->enabled;
2704 memory_region_transaction_commit();
2705 }
2706
memory_region_del_eventfd(MemoryRegion * mr,hwaddr addr,unsigned size,bool match_data,uint64_t data,EventNotifier * e)2707 void memory_region_del_eventfd(MemoryRegion *mr,
2708 hwaddr addr,
2709 unsigned size,
2710 bool match_data,
2711 uint64_t data,
2712 EventNotifier *e)
2713 {
2714 MemoryRegionIoeventfd mrfd = {
2715 .addr.start = int128_make64(addr),
2716 .addr.size = int128_make64(size),
2717 .match_data = match_data,
2718 .data = data,
2719 .e = e,
2720 };
2721 unsigned i;
2722
2723 if (size) {
2724 adjust_endianness(mr, &mrfd.data, size_memop(size) | MO_TE);
2725 }
2726 memory_region_transaction_begin();
2727 for (i = 0; i < mr->ioeventfd_nb; ++i) {
2728 if (memory_region_ioeventfd_equal(&mrfd, &mr->ioeventfds[i])) {
2729 break;
2730 }
2731 }
2732 assert(i != mr->ioeventfd_nb);
2733 memmove(&mr->ioeventfds[i], &mr->ioeventfds[i+1],
2734 sizeof(*mr->ioeventfds) * (mr->ioeventfd_nb - (i+1)));
2735 --mr->ioeventfd_nb;
2736 mr->ioeventfds = g_realloc(mr->ioeventfds,
2737 sizeof(*mr->ioeventfds)*mr->ioeventfd_nb + 1);
2738 ioeventfd_update_pending |= mr->enabled;
2739 memory_region_transaction_commit();
2740 }
2741
memory_region_update_container_subregions(MemoryRegion * subregion)2742 static void memory_region_update_container_subregions(MemoryRegion *subregion)
2743 {
2744 MemoryRegion *mr = subregion->container;
2745 MemoryRegion *other;
2746
2747 memory_region_transaction_begin();
2748
2749 memory_region_ref(subregion);
2750 QTAILQ_FOREACH(other, &mr->subregions, subregions_link) {
2751 if (subregion->priority >= other->priority) {
2752 QTAILQ_INSERT_BEFORE(other, subregion, subregions_link);
2753 goto done;
2754 }
2755 }
2756 QTAILQ_INSERT_TAIL(&mr->subregions, subregion, subregions_link);
2757 done:
2758 memory_region_update_pending |= mr->enabled && subregion->enabled;
2759 memory_region_transaction_commit();
2760 }
2761
memory_region_add_subregion_common(MemoryRegion * mr,hwaddr offset,MemoryRegion * subregion)2762 static void memory_region_add_subregion_common(MemoryRegion *mr,
2763 hwaddr offset,
2764 MemoryRegion *subregion)
2765 {
2766 MemoryRegion *alias;
2767
2768 assert(!subregion->container);
2769 subregion->container = mr;
2770 for (alias = subregion->alias; alias; alias = alias->alias) {
2771 alias->mapped_via_alias++;
2772 }
2773 subregion->addr = offset;
2774 memory_region_update_container_subregions(subregion);
2775 }
2776
memory_region_add_subregion(MemoryRegion * mr,hwaddr offset,MemoryRegion * subregion)2777 void memory_region_add_subregion(MemoryRegion *mr,
2778 hwaddr offset,
2779 MemoryRegion *subregion)
2780 {
2781 subregion->priority = 0;
2782 memory_region_add_subregion_common(mr, offset, subregion);
2783 }
2784
memory_region_add_subregion_overlap(MemoryRegion * mr,hwaddr offset,MemoryRegion * subregion,int priority)2785 void memory_region_add_subregion_overlap(MemoryRegion *mr,
2786 hwaddr offset,
2787 MemoryRegion *subregion,
2788 int priority)
2789 {
2790 subregion->priority = priority;
2791 memory_region_add_subregion_common(mr, offset, subregion);
2792 }
2793
memory_region_del_subregion(MemoryRegion * mr,MemoryRegion * subregion)2794 void memory_region_del_subregion(MemoryRegion *mr,
2795 MemoryRegion *subregion)
2796 {
2797 MemoryRegion *alias;
2798
2799 memory_region_transaction_begin();
2800 assert(subregion->container == mr);
2801 subregion->container = NULL;
2802 for (alias = subregion->alias; alias; alias = alias->alias) {
2803 alias->mapped_via_alias--;
2804 assert(alias->mapped_via_alias >= 0);
2805 }
2806 QTAILQ_REMOVE(&mr->subregions, subregion, subregions_link);
2807 memory_region_unref(subregion);
2808 memory_region_update_pending |= mr->enabled && subregion->enabled;
2809 memory_region_transaction_commit();
2810 }
2811
memory_region_set_enabled(MemoryRegion * mr,bool enabled)2812 void memory_region_set_enabled(MemoryRegion *mr, bool enabled)
2813 {
2814 if (enabled == mr->enabled) {
2815 return;
2816 }
2817 memory_region_transaction_begin();
2818 mr->enabled = enabled;
2819 memory_region_update_pending = true;
2820 memory_region_transaction_commit();
2821 }
2822
memory_region_set_size(MemoryRegion * mr,uint64_t size)2823 void memory_region_set_size(MemoryRegion *mr, uint64_t size)
2824 {
2825 Int128 s = int128_make64(size);
2826
2827 if (size == UINT64_MAX) {
2828 s = int128_2_64();
2829 }
2830 if (int128_eq(s, mr->size)) {
2831 return;
2832 }
2833 memory_region_transaction_begin();
2834 mr->size = s;
2835 memory_region_update_pending = true;
2836 memory_region_transaction_commit();
2837 }
2838
memory_region_readd_subregion(MemoryRegion * mr)2839 static void memory_region_readd_subregion(MemoryRegion *mr)
2840 {
2841 MemoryRegion *container = mr->container;
2842
2843 if (container) {
2844 memory_region_transaction_begin();
2845 memory_region_ref(mr);
2846 memory_region_del_subregion(container, mr);
2847 memory_region_add_subregion_common(container, mr->addr, mr);
2848 memory_region_unref(mr);
2849 memory_region_transaction_commit();
2850 }
2851 }
2852
memory_region_set_address(MemoryRegion * mr,hwaddr addr)2853 void memory_region_set_address(MemoryRegion *mr, hwaddr addr)
2854 {
2855 if (addr != mr->addr) {
2856 mr->addr = addr;
2857 memory_region_readd_subregion(mr);
2858 }
2859 }
2860
memory_region_set_alias_offset(MemoryRegion * mr,hwaddr offset)2861 void memory_region_set_alias_offset(MemoryRegion *mr, hwaddr offset)
2862 {
2863 assert(mr->alias);
2864
2865 if (offset == mr->alias_offset) {
2866 return;
2867 }
2868
2869 memory_region_transaction_begin();
2870 mr->alias_offset = offset;
2871 memory_region_update_pending |= mr->enabled;
2872 memory_region_transaction_commit();
2873 }
2874
memory_region_set_unmergeable(MemoryRegion * mr,bool unmergeable)2875 void memory_region_set_unmergeable(MemoryRegion *mr, bool unmergeable)
2876 {
2877 if (unmergeable == mr->unmergeable) {
2878 return;
2879 }
2880
2881 memory_region_transaction_begin();
2882 mr->unmergeable = unmergeable;
2883 memory_region_update_pending |= mr->enabled;
2884 memory_region_transaction_commit();
2885 }
2886
memory_region_get_alignment(const MemoryRegion * mr)2887 uint64_t memory_region_get_alignment(const MemoryRegion *mr)
2888 {
2889 return mr->align;
2890 }
2891
cmp_flatrange_addr(const void * addr_,const void * fr_)2892 static int cmp_flatrange_addr(const void *addr_, const void *fr_)
2893 {
2894 const AddrRange *addr = addr_;
2895 const FlatRange *fr = fr_;
2896
2897 if (int128_le(addrrange_end(*addr), fr->addr.start)) {
2898 return -1;
2899 } else if (int128_ge(addr->start, addrrange_end(fr->addr))) {
2900 return 1;
2901 }
2902 return 0;
2903 }
2904
flatview_lookup(FlatView * view,AddrRange addr)2905 static FlatRange *flatview_lookup(FlatView *view, AddrRange addr)
2906 {
2907 return bsearch(&addr, view->ranges, view->nr,
2908 sizeof(FlatRange), cmp_flatrange_addr);
2909 }
2910
memory_region_is_mapped(MemoryRegion * mr)2911 bool memory_region_is_mapped(MemoryRegion *mr)
2912 {
2913 return !!mr->container || mr->mapped_via_alias;
2914 }
2915
2916 /* Same as memory_region_find, but it does not add a reference to the
2917 * returned region. It must be called from an RCU critical section.
2918 */
memory_region_find_rcu(MemoryRegion * mr,hwaddr addr,uint64_t size)2919 static MemoryRegionSection memory_region_find_rcu(MemoryRegion *mr,
2920 hwaddr addr, uint64_t size)
2921 {
2922 MemoryRegionSection ret = { .mr = NULL };
2923 MemoryRegion *root;
2924 AddressSpace *as;
2925 AddrRange range;
2926 FlatView *view;
2927 FlatRange *fr;
2928
2929 addr += mr->addr;
2930 for (root = mr; root->container; ) {
2931 root = root->container;
2932 addr += root->addr;
2933 }
2934
2935 as = memory_region_to_address_space(root);
2936 if (!as) {
2937 return ret;
2938 }
2939 range = addrrange_make(int128_make64(addr), int128_make64(size));
2940
2941 view = address_space_to_flatview(as);
2942 fr = flatview_lookup(view, range);
2943 if (!fr) {
2944 return ret;
2945 }
2946
2947 while (fr > view->ranges && addrrange_intersects(fr[-1].addr, range)) {
2948 --fr;
2949 }
2950
2951 ret.mr = fr->mr;
2952 ret.fv = view;
2953 range = addrrange_intersection(range, fr->addr);
2954 ret.offset_within_region = fr->offset_in_region;
2955 ret.offset_within_region += int128_get64(int128_sub(range.start,
2956 fr->addr.start));
2957 ret.size = range.size;
2958 ret.offset_within_address_space = int128_get64(range.start);
2959 ret.readonly = fr->readonly;
2960 ret.nonvolatile = fr->nonvolatile;
2961 return ret;
2962 }
2963
memory_region_find(MemoryRegion * mr,hwaddr addr,uint64_t size)2964 MemoryRegionSection memory_region_find(MemoryRegion *mr,
2965 hwaddr addr, uint64_t size)
2966 {
2967 MemoryRegionSection ret;
2968 RCU_READ_LOCK_GUARD();
2969 ret = memory_region_find_rcu(mr, addr, size);
2970 if (ret.mr) {
2971 memory_region_ref(ret.mr);
2972 }
2973 return ret;
2974 }
2975
memory_region_section_new_copy(MemoryRegionSection * s)2976 MemoryRegionSection *memory_region_section_new_copy(MemoryRegionSection *s)
2977 {
2978 MemoryRegionSection *tmp = g_new(MemoryRegionSection, 1);
2979
2980 *tmp = *s;
2981 if (tmp->mr) {
2982 memory_region_ref(tmp->mr);
2983 }
2984 if (tmp->fv) {
2985 bool ret = flatview_ref(tmp->fv);
2986
2987 g_assert(ret);
2988 }
2989 return tmp;
2990 }
2991
memory_region_section_free_copy(MemoryRegionSection * s)2992 void memory_region_section_free_copy(MemoryRegionSection *s)
2993 {
2994 if (s->fv) {
2995 flatview_unref(s->fv);
2996 }
2997 if (s->mr) {
2998 memory_region_unref(s->mr);
2999 }
3000 g_free(s);
3001 }
3002
memory_region_present(MemoryRegion * container,hwaddr addr)3003 bool memory_region_present(MemoryRegion *container, hwaddr addr)
3004 {
3005 MemoryRegion *mr;
3006
3007 RCU_READ_LOCK_GUARD();
3008 mr = memory_region_find_rcu(container, addr, 1).mr;
3009 return mr && mr != container;
3010 }
3011
memory_global_dirty_log_sync(bool last_stage)3012 void memory_global_dirty_log_sync(bool last_stage)
3013 {
3014 memory_region_sync_dirty_bitmap(NULL, last_stage);
3015 }
3016
memory_global_after_dirty_log_sync(void)3017 void memory_global_after_dirty_log_sync(void)
3018 {
3019 MEMORY_LISTENER_CALL_GLOBAL(log_global_after_sync, Forward);
3020 }
3021
3022 /*
3023 * Dirty track stop flags that are postponed due to VM being stopped. Should
3024 * only be used within vmstate_change hook.
3025 */
3026 static unsigned int postponed_stop_flags;
3027 static VMChangeStateEntry *vmstate_change;
3028 static void memory_global_dirty_log_stop_postponed_run(void);
3029
memory_global_dirty_log_start(unsigned int flags)3030 void memory_global_dirty_log_start(unsigned int flags)
3031 {
3032 unsigned int old_flags;
3033
3034 assert(flags && !(flags & (~GLOBAL_DIRTY_MASK)));
3035
3036 if (vmstate_change) {
3037 /* If there is postponed stop(), operate on it first */
3038 postponed_stop_flags &= ~flags;
3039 memory_global_dirty_log_stop_postponed_run();
3040 }
3041
3042 flags &= ~global_dirty_tracking;
3043 if (!flags) {
3044 return;
3045 }
3046
3047 old_flags = global_dirty_tracking;
3048 global_dirty_tracking |= flags;
3049 trace_global_dirty_changed(global_dirty_tracking);
3050
3051 if (!old_flags) {
3052 MEMORY_LISTENER_CALL_GLOBAL(log_global_start, Forward);
3053 memory_region_transaction_begin();
3054 memory_region_update_pending = true;
3055 memory_region_transaction_commit();
3056 }
3057 }
3058
memory_global_dirty_log_do_stop(unsigned int flags)3059 static void memory_global_dirty_log_do_stop(unsigned int flags)
3060 {
3061 assert(flags && !(flags & (~GLOBAL_DIRTY_MASK)));
3062 assert((global_dirty_tracking & flags) == flags);
3063 global_dirty_tracking &= ~flags;
3064
3065 trace_global_dirty_changed(global_dirty_tracking);
3066
3067 if (!global_dirty_tracking) {
3068 memory_region_transaction_begin();
3069 memory_region_update_pending = true;
3070 memory_region_transaction_commit();
3071 MEMORY_LISTENER_CALL_GLOBAL(log_global_stop, Reverse);
3072 }
3073 }
3074
3075 /*
3076 * Execute the postponed dirty log stop operations if there is, then reset
3077 * everything (including the flags and the vmstate change hook).
3078 */
memory_global_dirty_log_stop_postponed_run(void)3079 static void memory_global_dirty_log_stop_postponed_run(void)
3080 {
3081 /* This must be called with the vmstate handler registered */
3082 assert(vmstate_change);
3083
3084 /* Note: postponed_stop_flags can be cleared in log start routine */
3085 if (postponed_stop_flags) {
3086 memory_global_dirty_log_do_stop(postponed_stop_flags);
3087 postponed_stop_flags = 0;
3088 }
3089
3090 qemu_del_vm_change_state_handler(vmstate_change);
3091 vmstate_change = NULL;
3092 }
3093
memory_vm_change_state_handler(void * opaque,bool running,RunState state)3094 static void memory_vm_change_state_handler(void *opaque, bool running,
3095 RunState state)
3096 {
3097 if (running) {
3098 memory_global_dirty_log_stop_postponed_run();
3099 }
3100 }
3101
memory_global_dirty_log_stop(unsigned int flags)3102 void memory_global_dirty_log_stop(unsigned int flags)
3103 {
3104 if (!runstate_is_running()) {
3105 /* Postpone the dirty log stop, e.g., to when VM starts again */
3106 if (vmstate_change) {
3107 /* Batch with previous postponed flags */
3108 postponed_stop_flags |= flags;
3109 } else {
3110 postponed_stop_flags = flags;
3111 vmstate_change = qemu_add_vm_change_state_handler(
3112 memory_vm_change_state_handler, NULL);
3113 }
3114 return;
3115 }
3116
3117 memory_global_dirty_log_do_stop(flags);
3118 }
3119
listener_add_address_space(MemoryListener * listener,AddressSpace * as)3120 static void listener_add_address_space(MemoryListener *listener,
3121 AddressSpace *as)
3122 {
3123 FlatView *view;
3124 FlatRange *fr;
3125
3126 if (listener->begin) {
3127 listener->begin(listener);
3128 }
3129 if (global_dirty_tracking) {
3130 if (listener->log_global_start) {
3131 listener->log_global_start(listener);
3132 }
3133 }
3134
3135 view = address_space_get_flatview(as);
3136 FOR_EACH_FLAT_RANGE(fr, view) {
3137 MemoryRegionSection section = section_from_flat_range(fr, view);
3138
3139 if (listener->region_add) {
3140 listener->region_add(listener, §ion);
3141 }
3142 if (fr->dirty_log_mask && listener->log_start) {
3143 listener->log_start(listener, §ion, 0, fr->dirty_log_mask);
3144 }
3145 }
3146 if (listener->commit) {
3147 listener->commit(listener);
3148 }
3149 flatview_unref(view);
3150 }
3151
listener_del_address_space(MemoryListener * listener,AddressSpace * as)3152 static void listener_del_address_space(MemoryListener *listener,
3153 AddressSpace *as)
3154 {
3155 FlatView *view;
3156 FlatRange *fr;
3157
3158 if (listener->begin) {
3159 listener->begin(listener);
3160 }
3161 view = address_space_get_flatview(as);
3162 FOR_EACH_FLAT_RANGE(fr, view) {
3163 MemoryRegionSection section = section_from_flat_range(fr, view);
3164
3165 if (fr->dirty_log_mask && listener->log_stop) {
3166 listener->log_stop(listener, §ion, fr->dirty_log_mask, 0);
3167 }
3168 if (listener->region_del) {
3169 listener->region_del(listener, §ion);
3170 }
3171 }
3172 if (listener->commit) {
3173 listener->commit(listener);
3174 }
3175 flatview_unref(view);
3176 }
3177
memory_listener_register(MemoryListener * listener,AddressSpace * as)3178 void memory_listener_register(MemoryListener *listener, AddressSpace *as)
3179 {
3180 MemoryListener *other = NULL;
3181
3182 /* Only one of them can be defined for a listener */
3183 assert(!(listener->log_sync && listener->log_sync_global));
3184
3185 listener->address_space = as;
3186 if (QTAILQ_EMPTY(&memory_listeners)
3187 || listener->priority >= QTAILQ_LAST(&memory_listeners)->priority) {
3188 QTAILQ_INSERT_TAIL(&memory_listeners, listener, link);
3189 } else {
3190 QTAILQ_FOREACH(other, &memory_listeners, link) {
3191 if (listener->priority < other->priority) {
3192 break;
3193 }
3194 }
3195 QTAILQ_INSERT_BEFORE(other, listener, link);
3196 }
3197
3198 if (QTAILQ_EMPTY(&as->listeners)
3199 || listener->priority >= QTAILQ_LAST(&as->listeners)->priority) {
3200 QTAILQ_INSERT_TAIL(&as->listeners, listener, link_as);
3201 } else {
3202 QTAILQ_FOREACH(other, &as->listeners, link_as) {
3203 if (listener->priority < other->priority) {
3204 break;
3205 }
3206 }
3207 QTAILQ_INSERT_BEFORE(other, listener, link_as);
3208 }
3209
3210 listener_add_address_space(listener, as);
3211
3212 if (listener->eventfd_add || listener->eventfd_del) {
3213 as->ioeventfd_notifiers++;
3214 }
3215 }
3216
memory_listener_unregister(MemoryListener * listener)3217 void memory_listener_unregister(MemoryListener *listener)
3218 {
3219 if (!listener->address_space) {
3220 return;
3221 }
3222
3223 if (listener->eventfd_add || listener->eventfd_del) {
3224 listener->address_space->ioeventfd_notifiers--;
3225 }
3226
3227 listener_del_address_space(listener, listener->address_space);
3228 QTAILQ_REMOVE(&memory_listeners, listener, link);
3229 QTAILQ_REMOVE(&listener->address_space->listeners, listener, link_as);
3230 listener->address_space = NULL;
3231 }
3232
address_space_remove_listeners(AddressSpace * as)3233 void address_space_remove_listeners(AddressSpace *as)
3234 {
3235 while (!QTAILQ_EMPTY(&as->listeners)) {
3236 memory_listener_unregister(QTAILQ_FIRST(&as->listeners));
3237 }
3238 }
3239
address_space_init(AddressSpace * as,MemoryRegion * root,const char * name)3240 void address_space_init(AddressSpace *as, MemoryRegion *root, const char *name)
3241 {
3242 memory_region_ref(root);
3243 as->root = root;
3244 as->current_map = NULL;
3245 as->ioeventfd_nb = 0;
3246 as->ioeventfds = NULL;
3247 QTAILQ_INIT(&as->listeners);
3248 QTAILQ_INSERT_TAIL(&address_spaces, as, address_spaces_link);
3249 as->name = g_strdup(name ? name : "anonymous");
3250 address_space_update_topology(as);
3251 address_space_update_ioeventfds(as);
3252 }
3253
do_address_space_destroy(AddressSpace * as)3254 static void do_address_space_destroy(AddressSpace *as)
3255 {
3256 assert(QTAILQ_EMPTY(&as->listeners));
3257
3258 flatview_unref(as->current_map);
3259 g_free(as->name);
3260 g_free(as->ioeventfds);
3261 memory_region_unref(as->root);
3262 }
3263
address_space_destroy(AddressSpace * as)3264 void address_space_destroy(AddressSpace *as)
3265 {
3266 MemoryRegion *root = as->root;
3267
3268 /* Flush out anything from MemoryListeners listening in on this */
3269 memory_region_transaction_begin();
3270 as->root = NULL;
3271 memory_region_transaction_commit();
3272 QTAILQ_REMOVE(&address_spaces, as, address_spaces_link);
3273
3274 /* At this point, as->dispatch and as->current_map are dummy
3275 * entries that the guest should never use. Wait for the old
3276 * values to expire before freeing the data.
3277 */
3278 as->root = root;
3279 call_rcu(as, do_address_space_destroy, rcu);
3280 }
3281
memory_region_type(MemoryRegion * mr)3282 static const char *memory_region_type(MemoryRegion *mr)
3283 {
3284 if (mr->alias) {
3285 return memory_region_type(mr->alias);
3286 }
3287 if (memory_region_is_ram_device(mr)) {
3288 return "ramd";
3289 } else if (memory_region_is_romd(mr)) {
3290 return "romd";
3291 } else if (memory_region_is_rom(mr)) {
3292 return "rom";
3293 } else if (memory_region_is_ram(mr)) {
3294 return "ram";
3295 } else {
3296 return "i/o";
3297 }
3298 }
3299
3300 typedef struct MemoryRegionList MemoryRegionList;
3301
3302 struct MemoryRegionList {
3303 const MemoryRegion *mr;
3304 QTAILQ_ENTRY(MemoryRegionList) mrqueue;
3305 };
3306
3307 typedef QTAILQ_HEAD(, MemoryRegionList) MemoryRegionListHead;
3308
3309 #define MR_SIZE(size) (int128_nz(size) ? (hwaddr)int128_get64( \
3310 int128_sub((size), int128_one())) : 0)
3311 #define MTREE_INDENT " "
3312
mtree_expand_owner(const char * label,Object * obj)3313 static void mtree_expand_owner(const char *label, Object *obj)
3314 {
3315 DeviceState *dev = (DeviceState *) object_dynamic_cast(obj, TYPE_DEVICE);
3316
3317 qemu_printf(" %s:{%s", label, dev ? "dev" : "obj");
3318 if (dev && dev->id) {
3319 qemu_printf(" id=%s", dev->id);
3320 } else {
3321 char *canonical_path = object_get_canonical_path(obj);
3322 if (canonical_path) {
3323 qemu_printf(" path=%s", canonical_path);
3324 g_free(canonical_path);
3325 } else {
3326 qemu_printf(" type=%s", object_get_typename(obj));
3327 }
3328 }
3329 qemu_printf("}");
3330 }
3331
mtree_print_mr_owner(const MemoryRegion * mr)3332 static void mtree_print_mr_owner(const MemoryRegion *mr)
3333 {
3334 Object *owner = mr->owner;
3335 Object *parent = memory_region_owner((MemoryRegion *)mr);
3336
3337 if (!owner && !parent) {
3338 qemu_printf(" orphan");
3339 return;
3340 }
3341 if (owner) {
3342 mtree_expand_owner("owner", owner);
3343 }
3344 if (parent && parent != owner) {
3345 mtree_expand_owner("parent", parent);
3346 }
3347 }
3348
mtree_print_mr(const MemoryRegion * mr,unsigned int level,hwaddr base,MemoryRegionListHead * alias_print_queue,bool owner,bool display_disabled)3349 static void mtree_print_mr(const MemoryRegion *mr, unsigned int level,
3350 hwaddr base,
3351 MemoryRegionListHead *alias_print_queue,
3352 bool owner, bool display_disabled)
3353 {
3354 MemoryRegionList *new_ml, *ml, *next_ml;
3355 MemoryRegionListHead submr_print_queue;
3356 const MemoryRegion *submr;
3357 unsigned int i;
3358 hwaddr cur_start, cur_end;
3359
3360 if (!mr) {
3361 return;
3362 }
3363
3364 cur_start = base + mr->addr;
3365 cur_end = cur_start + MR_SIZE(mr->size);
3366
3367 /*
3368 * Try to detect overflow of memory region. This should never
3369 * happen normally. When it happens, we dump something to warn the
3370 * user who is observing this.
3371 */
3372 if (cur_start < base || cur_end < cur_start) {
3373 qemu_printf("[DETECTED OVERFLOW!] ");
3374 }
3375
3376 if (mr->alias) {
3377 bool found = false;
3378
3379 /* check if the alias is already in the queue */
3380 QTAILQ_FOREACH(ml, alias_print_queue, mrqueue) {
3381 if (ml->mr == mr->alias) {
3382 found = true;
3383 }
3384 }
3385
3386 if (!found) {
3387 ml = g_new(MemoryRegionList, 1);
3388 ml->mr = mr->alias;
3389 QTAILQ_INSERT_TAIL(alias_print_queue, ml, mrqueue);
3390 }
3391 if (mr->enabled || display_disabled) {
3392 for (i = 0; i < level; i++) {
3393 qemu_printf(MTREE_INDENT);
3394 }
3395 qemu_printf(HWADDR_FMT_plx "-" HWADDR_FMT_plx
3396 " (prio %d, %s%s): alias %s @%s " HWADDR_FMT_plx
3397 "-" HWADDR_FMT_plx "%s",
3398 cur_start, cur_end,
3399 mr->priority,
3400 mr->nonvolatile ? "nv-" : "",
3401 memory_region_type((MemoryRegion *)mr),
3402 memory_region_name(mr),
3403 memory_region_name(mr->alias),
3404 mr->alias_offset,
3405 mr->alias_offset + MR_SIZE(mr->size),
3406 mr->enabled ? "" : " [disabled]");
3407 if (owner) {
3408 mtree_print_mr_owner(mr);
3409 }
3410 qemu_printf("\n");
3411 }
3412 } else {
3413 if (mr->enabled || display_disabled) {
3414 for (i = 0; i < level; i++) {
3415 qemu_printf(MTREE_INDENT);
3416 }
3417 qemu_printf(HWADDR_FMT_plx "-" HWADDR_FMT_plx
3418 " (prio %d, %s%s): %s%s",
3419 cur_start, cur_end,
3420 mr->priority,
3421 mr->nonvolatile ? "nv-" : "",
3422 memory_region_type((MemoryRegion *)mr),
3423 memory_region_name(mr),
3424 mr->enabled ? "" : " [disabled]");
3425 if (owner) {
3426 mtree_print_mr_owner(mr);
3427 }
3428 qemu_printf("\n");
3429 }
3430 }
3431
3432 QTAILQ_INIT(&submr_print_queue);
3433
3434 QTAILQ_FOREACH(submr, &mr->subregions, subregions_link) {
3435 new_ml = g_new(MemoryRegionList, 1);
3436 new_ml->mr = submr;
3437 QTAILQ_FOREACH(ml, &submr_print_queue, mrqueue) {
3438 if (new_ml->mr->addr < ml->mr->addr ||
3439 (new_ml->mr->addr == ml->mr->addr &&
3440 new_ml->mr->priority > ml->mr->priority)) {
3441 QTAILQ_INSERT_BEFORE(ml, new_ml, mrqueue);
3442 new_ml = NULL;
3443 break;
3444 }
3445 }
3446 if (new_ml) {
3447 QTAILQ_INSERT_TAIL(&submr_print_queue, new_ml, mrqueue);
3448 }
3449 }
3450
3451 QTAILQ_FOREACH(ml, &submr_print_queue, mrqueue) {
3452 mtree_print_mr(ml->mr, level + 1, cur_start,
3453 alias_print_queue, owner, display_disabled);
3454 }
3455
3456 QTAILQ_FOREACH_SAFE(ml, &submr_print_queue, mrqueue, next_ml) {
3457 g_free(ml);
3458 }
3459 }
3460
3461 struct FlatViewInfo {
3462 int counter;
3463 bool dispatch_tree;
3464 bool owner;
3465 AccelClass *ac;
3466 };
3467
mtree_print_flatview(gpointer key,gpointer value,gpointer user_data)3468 static void mtree_print_flatview(gpointer key, gpointer value,
3469 gpointer user_data)
3470 {
3471 FlatView *view = key;
3472 GArray *fv_address_spaces = value;
3473 struct FlatViewInfo *fvi = user_data;
3474 FlatRange *range = &view->ranges[0];
3475 MemoryRegion *mr;
3476 int n = view->nr;
3477 int i;
3478 AddressSpace *as;
3479
3480 qemu_printf("FlatView #%d\n", fvi->counter);
3481 ++fvi->counter;
3482
3483 for (i = 0; i < fv_address_spaces->len; ++i) {
3484 as = g_array_index(fv_address_spaces, AddressSpace*, i);
3485 qemu_printf(" AS \"%s\", root: %s",
3486 as->name, memory_region_name(as->root));
3487 if (as->root->alias) {
3488 qemu_printf(", alias %s", memory_region_name(as->root->alias));
3489 }
3490 qemu_printf("\n");
3491 }
3492
3493 qemu_printf(" Root memory region: %s\n",
3494 view->root ? memory_region_name(view->root) : "(none)");
3495
3496 if (n <= 0) {
3497 qemu_printf(MTREE_INDENT "No rendered FlatView\n\n");
3498 return;
3499 }
3500
3501 while (n--) {
3502 mr = range->mr;
3503 if (range->offset_in_region) {
3504 qemu_printf(MTREE_INDENT HWADDR_FMT_plx "-" HWADDR_FMT_plx
3505 " (prio %d, %s%s): %s @" HWADDR_FMT_plx,
3506 int128_get64(range->addr.start),
3507 int128_get64(range->addr.start)
3508 + MR_SIZE(range->addr.size),
3509 mr->priority,
3510 range->nonvolatile ? "nv-" : "",
3511 range->readonly ? "rom" : memory_region_type(mr),
3512 memory_region_name(mr),
3513 range->offset_in_region);
3514 } else {
3515 qemu_printf(MTREE_INDENT HWADDR_FMT_plx "-" HWADDR_FMT_plx
3516 " (prio %d, %s%s): %s",
3517 int128_get64(range->addr.start),
3518 int128_get64(range->addr.start)
3519 + MR_SIZE(range->addr.size),
3520 mr->priority,
3521 range->nonvolatile ? "nv-" : "",
3522 range->readonly ? "rom" : memory_region_type(mr),
3523 memory_region_name(mr));
3524 }
3525 if (fvi->owner) {
3526 mtree_print_mr_owner(mr);
3527 }
3528
3529 if (fvi->ac) {
3530 for (i = 0; i < fv_address_spaces->len; ++i) {
3531 as = g_array_index(fv_address_spaces, AddressSpace*, i);
3532 if (fvi->ac->has_memory(current_machine, as,
3533 int128_get64(range->addr.start),
3534 MR_SIZE(range->addr.size) + 1)) {
3535 qemu_printf(" %s", fvi->ac->name);
3536 }
3537 }
3538 }
3539 qemu_printf("\n");
3540 range++;
3541 }
3542
3543 #if !defined(CONFIG_USER_ONLY)
3544 if (fvi->dispatch_tree && view->root) {
3545 mtree_print_dispatch(view->dispatch, view->root);
3546 }
3547 #endif
3548
3549 qemu_printf("\n");
3550 }
3551
mtree_info_flatview_free(gpointer key,gpointer value,gpointer user_data)3552 static gboolean mtree_info_flatview_free(gpointer key, gpointer value,
3553 gpointer user_data)
3554 {
3555 FlatView *view = key;
3556 GArray *fv_address_spaces = value;
3557
3558 g_array_unref(fv_address_spaces);
3559 flatview_unref(view);
3560
3561 return true;
3562 }
3563
mtree_info_flatview(bool dispatch_tree,bool owner)3564 static void mtree_info_flatview(bool dispatch_tree, bool owner)
3565 {
3566 struct FlatViewInfo fvi = {
3567 .counter = 0,
3568 .dispatch_tree = dispatch_tree,
3569 .owner = owner,
3570 };
3571 AddressSpace *as;
3572 FlatView *view;
3573 GArray *fv_address_spaces;
3574 GHashTable *views = g_hash_table_new(g_direct_hash, g_direct_equal);
3575 AccelClass *ac = ACCEL_GET_CLASS(current_accel());
3576
3577 if (ac->has_memory) {
3578 fvi.ac = ac;
3579 }
3580
3581 /* Gather all FVs in one table */
3582 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
3583 view = address_space_get_flatview(as);
3584
3585 fv_address_spaces = g_hash_table_lookup(views, view);
3586 if (!fv_address_spaces) {
3587 fv_address_spaces = g_array_new(false, false, sizeof(as));
3588 g_hash_table_insert(views, view, fv_address_spaces);
3589 }
3590
3591 g_array_append_val(fv_address_spaces, as);
3592 }
3593
3594 /* Print */
3595 g_hash_table_foreach(views, mtree_print_flatview, &fvi);
3596
3597 /* Free */
3598 g_hash_table_foreach_remove(views, mtree_info_flatview_free, 0);
3599 g_hash_table_unref(views);
3600 }
3601
3602 struct AddressSpaceInfo {
3603 MemoryRegionListHead *ml_head;
3604 bool owner;
3605 bool disabled;
3606 };
3607
3608 /* Returns negative value if a < b; zero if a = b; positive value if a > b. */
address_space_compare_name(gconstpointer a,gconstpointer b)3609 static gint address_space_compare_name(gconstpointer a, gconstpointer b)
3610 {
3611 const AddressSpace *as_a = a;
3612 const AddressSpace *as_b = b;
3613
3614 return g_strcmp0(as_a->name, as_b->name);
3615 }
3616
mtree_print_as_name(gpointer data,gpointer user_data)3617 static void mtree_print_as_name(gpointer data, gpointer user_data)
3618 {
3619 AddressSpace *as = data;
3620
3621 qemu_printf("address-space: %s\n", as->name);
3622 }
3623
mtree_print_as(gpointer key,gpointer value,gpointer user_data)3624 static void mtree_print_as(gpointer key, gpointer value, gpointer user_data)
3625 {
3626 MemoryRegion *mr = key;
3627 GSList *as_same_root_mr_list = value;
3628 struct AddressSpaceInfo *asi = user_data;
3629
3630 g_slist_foreach(as_same_root_mr_list, mtree_print_as_name, NULL);
3631 mtree_print_mr(mr, 1, 0, asi->ml_head, asi->owner, asi->disabled);
3632 qemu_printf("\n");
3633 }
3634
mtree_info_as_free(gpointer key,gpointer value,gpointer user_data)3635 static gboolean mtree_info_as_free(gpointer key, gpointer value,
3636 gpointer user_data)
3637 {
3638 GSList *as_same_root_mr_list = value;
3639
3640 g_slist_free(as_same_root_mr_list);
3641
3642 return true;
3643 }
3644
mtree_info_as(bool dispatch_tree,bool owner,bool disabled)3645 static void mtree_info_as(bool dispatch_tree, bool owner, bool disabled)
3646 {
3647 MemoryRegionListHead ml_head;
3648 MemoryRegionList *ml, *ml2;
3649 AddressSpace *as;
3650 GHashTable *views = g_hash_table_new(g_direct_hash, g_direct_equal);
3651 GSList *as_same_root_mr_list;
3652 struct AddressSpaceInfo asi = {
3653 .ml_head = &ml_head,
3654 .owner = owner,
3655 .disabled = disabled,
3656 };
3657
3658 QTAILQ_INIT(&ml_head);
3659
3660 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
3661 /* Create hashtable, key=AS root MR, value = list of AS */
3662 as_same_root_mr_list = g_hash_table_lookup(views, as->root);
3663 as_same_root_mr_list = g_slist_insert_sorted(as_same_root_mr_list, as,
3664 address_space_compare_name);
3665 g_hash_table_insert(views, as->root, as_same_root_mr_list);
3666 }
3667
3668 /* print address spaces */
3669 g_hash_table_foreach(views, mtree_print_as, &asi);
3670 g_hash_table_foreach_remove(views, mtree_info_as_free, 0);
3671 g_hash_table_unref(views);
3672
3673 /* print aliased regions */
3674 QTAILQ_FOREACH(ml, &ml_head, mrqueue) {
3675 qemu_printf("memory-region: %s\n", memory_region_name(ml->mr));
3676 mtree_print_mr(ml->mr, 1, 0, &ml_head, owner, disabled);
3677 qemu_printf("\n");
3678 }
3679
3680 QTAILQ_FOREACH_SAFE(ml, &ml_head, mrqueue, ml2) {
3681 g_free(ml);
3682 }
3683 }
3684
mtree_info(bool flatview,bool dispatch_tree,bool owner,bool disabled)3685 void mtree_info(bool flatview, bool dispatch_tree, bool owner, bool disabled)
3686 {
3687 if (flatview) {
3688 mtree_info_flatview(dispatch_tree, owner);
3689 } else {
3690 mtree_info_as(dispatch_tree, owner, disabled);
3691 }
3692 }
3693
memory_region_init_ram(MemoryRegion * mr,Object * owner,const char * name,uint64_t size,Error ** errp)3694 void memory_region_init_ram(MemoryRegion *mr,
3695 Object *owner,
3696 const char *name,
3697 uint64_t size,
3698 Error **errp)
3699 {
3700 DeviceState *owner_dev;
3701 Error *err = NULL;
3702
3703 memory_region_init_ram_nomigrate(mr, owner, name, size, &err);
3704 if (err) {
3705 error_propagate(errp, err);
3706 return;
3707 }
3708 /* This will assert if owner is neither NULL nor a DeviceState.
3709 * We only want the owner here for the purposes of defining a
3710 * unique name for migration. TODO: Ideally we should implement
3711 * a naming scheme for Objects which are not DeviceStates, in
3712 * which case we can relax this restriction.
3713 */
3714 owner_dev = DEVICE(owner);
3715 vmstate_register_ram(mr, owner_dev);
3716 }
3717
memory_region_init_rom(MemoryRegion * mr,Object * owner,const char * name,uint64_t size,Error ** errp)3718 void memory_region_init_rom(MemoryRegion *mr,
3719 Object *owner,
3720 const char *name,
3721 uint64_t size,
3722 Error **errp)
3723 {
3724 DeviceState *owner_dev;
3725 Error *err = NULL;
3726
3727 memory_region_init_rom_nomigrate(mr, owner, name, size, &err);
3728 if (err) {
3729 error_propagate(errp, err);
3730 return;
3731 }
3732 /* This will assert if owner is neither NULL nor a DeviceState.
3733 * We only want the owner here for the purposes of defining a
3734 * unique name for migration. TODO: Ideally we should implement
3735 * a naming scheme for Objects which are not DeviceStates, in
3736 * which case we can relax this restriction.
3737 */
3738 owner_dev = DEVICE(owner);
3739 vmstate_register_ram(mr, owner_dev);
3740 }
3741
memory_region_init_rom_device(MemoryRegion * mr,Object * owner,const MemoryRegionOps * ops,void * opaque,const char * name,uint64_t size,Error ** errp)3742 void memory_region_init_rom_device(MemoryRegion *mr,
3743 Object *owner,
3744 const MemoryRegionOps *ops,
3745 void *opaque,
3746 const char *name,
3747 uint64_t size,
3748 Error **errp)
3749 {
3750 DeviceState *owner_dev;
3751 Error *err = NULL;
3752
3753 memory_region_init_rom_device_nomigrate(mr, owner, ops, opaque,
3754 name, size, &err);
3755 if (err) {
3756 error_propagate(errp, err);
3757 return;
3758 }
3759 /* This will assert if owner is neither NULL nor a DeviceState.
3760 * We only want the owner here for the purposes of defining a
3761 * unique name for migration. TODO: Ideally we should implement
3762 * a naming scheme for Objects which are not DeviceStates, in
3763 * which case we can relax this restriction.
3764 */
3765 owner_dev = DEVICE(owner);
3766 vmstate_register_ram(mr, owner_dev);
3767 }
3768
3769 /*
3770 * Support system builds with CONFIG_FUZZ using a weak symbol and a stub for
3771 * the fuzz_dma_read_cb callback
3772 */
3773 #ifdef CONFIG_FUZZ
fuzz_dma_read_cb(size_t addr,size_t len,MemoryRegion * mr)3774 void __attribute__((weak)) fuzz_dma_read_cb(size_t addr,
3775 size_t len,
3776 MemoryRegion *mr)
3777 {
3778 }
3779 #endif
3780
3781 static const TypeInfo memory_region_info = {
3782 .parent = TYPE_OBJECT,
3783 .name = TYPE_MEMORY_REGION,
3784 .class_size = sizeof(MemoryRegionClass),
3785 .instance_size = sizeof(MemoryRegion),
3786 .instance_init = memory_region_initfn,
3787 .instance_finalize = memory_region_finalize,
3788 };
3789
3790 static const TypeInfo iommu_memory_region_info = {
3791 .parent = TYPE_MEMORY_REGION,
3792 .name = TYPE_IOMMU_MEMORY_REGION,
3793 .class_size = sizeof(IOMMUMemoryRegionClass),
3794 .instance_size = sizeof(IOMMUMemoryRegion),
3795 .instance_init = iommu_memory_region_initfn,
3796 .abstract = true,
3797 };
3798
3799 static const TypeInfo ram_discard_manager_info = {
3800 .parent = TYPE_INTERFACE,
3801 .name = TYPE_RAM_DISCARD_MANAGER,
3802 .class_size = sizeof(RamDiscardManagerClass),
3803 };
3804
memory_register_types(void)3805 static void memory_register_types(void)
3806 {
3807 type_register_static(&memory_region_info);
3808 type_register_static(&iommu_memory_region_info);
3809 type_register_static(&ram_discard_manager_info);
3810 }
3811
3812 type_init(memory_register_types)
3813