1 /*
2 * Physical memory management
3 *
4 * Copyright 2011 Red Hat, Inc. and/or its affiliates
5 *
6 * Authors:
7 * Avi Kivity <avi@redhat.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
11 *
12 * Contributions after 2012-01-13 are licensed under the terms of the
13 * GNU GPL, version 2 or (at your option) any later version.
14 */
15
16 #include "qemu/osdep.h"
17 #include "qemu/log.h"
18 #include "qapi/error.h"
19 #include "system/memory.h"
20 #include "qapi/visitor.h"
21 #include "qemu/bitops.h"
22 #include "qemu/error-report.h"
23 #include "qemu/main-loop.h"
24 #include "qemu/qemu-print.h"
25 #include "qemu/target-info.h"
26 #include "qom/object.h"
27 #include "trace.h"
28 #include "system/physmem.h"
29 #include "system/ram_addr.h"
30 #include "system/kvm.h"
31 #include "system/runstate.h"
32 #include "system/tcg.h"
33 #include "qemu/accel.h"
34 #include "accel/accel-ops.h"
35 #include "hw/boards.h"
36 #include "migration/vmstate.h"
37 #include "system/address-spaces.h"
38
39 #include "memory-internal.h"
40
41 //#define DEBUG_UNASSIGNED
42
43 static unsigned memory_region_transaction_depth;
44 static bool memory_region_update_pending;
45 static bool ioeventfd_update_pending;
46 unsigned int global_dirty_tracking;
47
48 static QTAILQ_HEAD(, MemoryListener) memory_listeners
49 = QTAILQ_HEAD_INITIALIZER(memory_listeners);
50
51 static QTAILQ_HEAD(, AddressSpace) address_spaces
52 = QTAILQ_HEAD_INITIALIZER(address_spaces);
53
54 static GHashTable *flat_views;
55
56 typedef struct AddrRange AddrRange;
57
58 /*
59 * Note that signed integers are needed for negative offsetting in aliases
60 * (large MemoryRegion::alias_offset).
61 */
62 struct AddrRange {
63 Int128 start;
64 Int128 size;
65 };
66
addrrange_make(Int128 start,Int128 size)67 static AddrRange addrrange_make(Int128 start, Int128 size)
68 {
69 return (AddrRange) { start, size };
70 }
71
addrrange_equal(AddrRange r1,AddrRange r2)72 static bool addrrange_equal(AddrRange r1, AddrRange r2)
73 {
74 return int128_eq(r1.start, r2.start) && int128_eq(r1.size, r2.size);
75 }
76
addrrange_end(AddrRange r)77 static Int128 addrrange_end(AddrRange r)
78 {
79 return int128_add(r.start, r.size);
80 }
81
addrrange_shift(AddrRange range,Int128 delta)82 static AddrRange addrrange_shift(AddrRange range, Int128 delta)
83 {
84 int128_addto(&range.start, delta);
85 return range;
86 }
87
addrrange_contains(AddrRange range,Int128 addr)88 static bool addrrange_contains(AddrRange range, Int128 addr)
89 {
90 return int128_ge(addr, range.start)
91 && int128_lt(addr, addrrange_end(range));
92 }
93
addrrange_intersects(AddrRange r1,AddrRange r2)94 static bool addrrange_intersects(AddrRange r1, AddrRange r2)
95 {
96 return addrrange_contains(r1, r2.start)
97 || addrrange_contains(r2, r1.start);
98 }
99
addrrange_intersection(AddrRange r1,AddrRange r2)100 static AddrRange addrrange_intersection(AddrRange r1, AddrRange r2)
101 {
102 Int128 start = int128_max(r1.start, r2.start);
103 Int128 end = int128_min(addrrange_end(r1), addrrange_end(r2));
104 return addrrange_make(start, int128_sub(end, start));
105 }
106
107 enum ListenerDirection { Forward, Reverse };
108
109 #define MEMORY_LISTENER_CALL_GLOBAL(_callback, _direction, _args...) \
110 do { \
111 MemoryListener *_listener; \
112 \
113 switch (_direction) { \
114 case Forward: \
115 QTAILQ_FOREACH(_listener, &memory_listeners, link) { \
116 if (_listener->_callback) { \
117 _listener->_callback(_listener, ##_args); \
118 } \
119 } \
120 break; \
121 case Reverse: \
122 QTAILQ_FOREACH_REVERSE(_listener, &memory_listeners, link) { \
123 if (_listener->_callback) { \
124 _listener->_callback(_listener, ##_args); \
125 } \
126 } \
127 break; \
128 default: \
129 abort(); \
130 } \
131 } while (0)
132
133 #define MEMORY_LISTENER_CALL(_as, _callback, _direction, _section, _args...) \
134 do { \
135 MemoryListener *_listener; \
136 \
137 switch (_direction) { \
138 case Forward: \
139 QTAILQ_FOREACH(_listener, &(_as)->listeners, link_as) { \
140 if (_listener->_callback) { \
141 _listener->_callback(_listener, _section, ##_args); \
142 } \
143 } \
144 break; \
145 case Reverse: \
146 QTAILQ_FOREACH_REVERSE(_listener, &(_as)->listeners, link_as) { \
147 if (_listener->_callback) { \
148 _listener->_callback(_listener, _section, ##_args); \
149 } \
150 } \
151 break; \
152 default: \
153 abort(); \
154 } \
155 } while (0)
156
157 /* No need to ref/unref .mr, the FlatRange keeps it alive. */
158 #define MEMORY_LISTENER_UPDATE_REGION(fr, as, dir, callback, _args...) \
159 do { \
160 MemoryRegionSection mrs = section_from_flat_range(fr, \
161 address_space_to_flatview(as)); \
162 MEMORY_LISTENER_CALL(as, callback, dir, &mrs, ##_args); \
163 } while(0)
164
165 struct CoalescedMemoryRange {
166 AddrRange addr;
167 QTAILQ_ENTRY(CoalescedMemoryRange) link;
168 };
169
170 struct MemoryRegionIoeventfd {
171 AddrRange addr;
172 bool match_data;
173 uint64_t data;
174 EventNotifier *e;
175 };
176
memory_region_ioeventfd_before(MemoryRegionIoeventfd * a,MemoryRegionIoeventfd * b)177 static bool memory_region_ioeventfd_before(MemoryRegionIoeventfd *a,
178 MemoryRegionIoeventfd *b)
179 {
180 if (int128_lt(a->addr.start, b->addr.start)) {
181 return true;
182 } else if (int128_gt(a->addr.start, b->addr.start)) {
183 return false;
184 } else if (int128_lt(a->addr.size, b->addr.size)) {
185 return true;
186 } else if (int128_gt(a->addr.size, b->addr.size)) {
187 return false;
188 } else if (a->match_data < b->match_data) {
189 return true;
190 } else if (a->match_data > b->match_data) {
191 return false;
192 } else if (a->match_data) {
193 if (a->data < b->data) {
194 return true;
195 } else if (a->data > b->data) {
196 return false;
197 }
198 }
199 if (a->e < b->e) {
200 return true;
201 } else if (a->e > b->e) {
202 return false;
203 }
204 return false;
205 }
206
memory_region_ioeventfd_equal(MemoryRegionIoeventfd * a,MemoryRegionIoeventfd * b)207 static bool memory_region_ioeventfd_equal(MemoryRegionIoeventfd *a,
208 MemoryRegionIoeventfd *b)
209 {
210 if (int128_eq(a->addr.start, b->addr.start) &&
211 (!int128_nz(a->addr.size) || !int128_nz(b->addr.size) ||
212 (int128_eq(a->addr.size, b->addr.size) &&
213 (a->match_data == b->match_data) &&
214 ((a->match_data && (a->data == b->data)) || !a->match_data) &&
215 (a->e == b->e))))
216 return true;
217
218 return false;
219 }
220
221 /* Range of memory in the global map. Addresses are absolute. */
222 struct FlatRange {
223 MemoryRegion *mr;
224 hwaddr offset_in_region;
225 AddrRange addr;
226 uint8_t dirty_log_mask;
227 bool romd_mode;
228 bool readonly;
229 bool nonvolatile;
230 bool unmergeable;
231 };
232
233 #define FOR_EACH_FLAT_RANGE(var, view) \
234 for (var = (view)->ranges; var < (view)->ranges + (view)->nr; ++var)
235
236 static inline MemoryRegionSection
section_from_flat_range(FlatRange * fr,FlatView * fv)237 section_from_flat_range(FlatRange *fr, FlatView *fv)
238 {
239 return (MemoryRegionSection) {
240 .mr = fr->mr,
241 .fv = fv,
242 .offset_within_region = fr->offset_in_region,
243 .size = fr->addr.size,
244 .offset_within_address_space = int128_get64(fr->addr.start),
245 .readonly = fr->readonly,
246 .nonvolatile = fr->nonvolatile,
247 .unmergeable = fr->unmergeable,
248 };
249 }
250
flatrange_equal(FlatRange * a,FlatRange * b)251 static bool flatrange_equal(FlatRange *a, FlatRange *b)
252 {
253 return a->mr == b->mr
254 && addrrange_equal(a->addr, b->addr)
255 && a->offset_in_region == b->offset_in_region
256 && a->romd_mode == b->romd_mode
257 && a->readonly == b->readonly
258 && a->nonvolatile == b->nonvolatile
259 && a->unmergeable == b->unmergeable;
260 }
261
flatview_new(MemoryRegion * mr_root)262 static FlatView *flatview_new(MemoryRegion *mr_root)
263 {
264 FlatView *view;
265
266 view = g_new0(FlatView, 1);
267 view->ref = 1;
268 view->root = mr_root;
269 memory_region_ref(mr_root);
270 trace_flatview_new(view, mr_root);
271
272 return view;
273 }
274
275 /* Insert a range into a given position. Caller is responsible for maintaining
276 * sorting order.
277 */
flatview_insert(FlatView * view,unsigned pos,FlatRange * range)278 static void flatview_insert(FlatView *view, unsigned pos, FlatRange *range)
279 {
280 if (view->nr == view->nr_allocated) {
281 view->nr_allocated = MAX(2 * view->nr, 10);
282 view->ranges = g_realloc(view->ranges,
283 view->nr_allocated * sizeof(*view->ranges));
284 }
285 memmove(view->ranges + pos + 1, view->ranges + pos,
286 (view->nr - pos) * sizeof(FlatRange));
287 view->ranges[pos] = *range;
288 memory_region_ref(range->mr);
289 ++view->nr;
290 }
291
flatview_destroy(FlatView * view)292 static void flatview_destroy(FlatView *view)
293 {
294 int i;
295
296 trace_flatview_destroy(view, view->root);
297 if (view->dispatch) {
298 address_space_dispatch_free(view->dispatch);
299 }
300 for (i = 0; i < view->nr; i++) {
301 memory_region_unref(view->ranges[i].mr);
302 }
303 g_free(view->ranges);
304 memory_region_unref(view->root);
305 g_free(view);
306 }
307
flatview_ref(FlatView * view)308 static bool flatview_ref(FlatView *view)
309 {
310 return qatomic_fetch_inc_nonzero(&view->ref) > 0;
311 }
312
flatview_unref(FlatView * view)313 void flatview_unref(FlatView *view)
314 {
315 if (qatomic_fetch_dec(&view->ref) == 1) {
316 trace_flatview_destroy_rcu(view, view->root);
317 assert(view->root);
318 call_rcu(view, flatview_destroy, rcu);
319 }
320 }
321
can_merge(FlatRange * r1,FlatRange * r2)322 static bool can_merge(FlatRange *r1, FlatRange *r2)
323 {
324 return int128_eq(addrrange_end(r1->addr), r2->addr.start)
325 && r1->mr == r2->mr
326 && int128_eq(int128_add(int128_make64(r1->offset_in_region),
327 r1->addr.size),
328 int128_make64(r2->offset_in_region))
329 && r1->dirty_log_mask == r2->dirty_log_mask
330 && r1->romd_mode == r2->romd_mode
331 && r1->readonly == r2->readonly
332 && r1->nonvolatile == r2->nonvolatile
333 && !r1->unmergeable && !r2->unmergeable;
334 }
335
336 /* Attempt to simplify a view by merging adjacent ranges */
flatview_simplify(FlatView * view)337 static void flatview_simplify(FlatView *view)
338 {
339 unsigned i, j, k;
340
341 i = 0;
342 while (i < view->nr) {
343 j = i + 1;
344 while (j < view->nr
345 && can_merge(&view->ranges[j-1], &view->ranges[j])) {
346 int128_addto(&view->ranges[i].addr.size, view->ranges[j].addr.size);
347 ++j;
348 }
349 ++i;
350 for (k = i; k < j; k++) {
351 memory_region_unref(view->ranges[k].mr);
352 }
353 memmove(&view->ranges[i], &view->ranges[j],
354 (view->nr - j) * sizeof(view->ranges[j]));
355 view->nr -= j - i;
356 }
357 }
358
adjust_endianness(MemoryRegion * mr,uint64_t * data,MemOp op)359 static void adjust_endianness(MemoryRegion *mr, uint64_t *data, MemOp op)
360 {
361 if ((op & MO_BSWAP) != devend_memop(mr->ops->endianness)) {
362 switch (op & MO_SIZE) {
363 case MO_8:
364 break;
365 case MO_16:
366 *data = bswap16(*data);
367 break;
368 case MO_32:
369 *data = bswap32(*data);
370 break;
371 case MO_64:
372 *data = bswap64(*data);
373 break;
374 default:
375 g_assert_not_reached();
376 }
377 }
378 }
379
memory_region_shift_read_access(uint64_t * value,signed shift,uint64_t mask,uint64_t tmp)380 static inline void memory_region_shift_read_access(uint64_t *value,
381 signed shift,
382 uint64_t mask,
383 uint64_t tmp)
384 {
385 if (shift >= 0) {
386 *value |= (tmp & mask) << shift;
387 } else {
388 *value |= (tmp & mask) >> -shift;
389 }
390 }
391
memory_region_shift_write_access(uint64_t * value,signed shift,uint64_t mask)392 static inline uint64_t memory_region_shift_write_access(uint64_t *value,
393 signed shift,
394 uint64_t mask)
395 {
396 uint64_t tmp;
397
398 if (shift >= 0) {
399 tmp = (*value >> shift) & mask;
400 } else {
401 tmp = (*value << -shift) & mask;
402 }
403
404 return tmp;
405 }
406
memory_region_to_absolute_addr(MemoryRegion * mr,hwaddr offset)407 static hwaddr memory_region_to_absolute_addr(MemoryRegion *mr, hwaddr offset)
408 {
409 MemoryRegion *root;
410 hwaddr abs_addr = offset;
411
412 abs_addr += mr->addr;
413 for (root = mr; root->container; ) {
414 root = root->container;
415 abs_addr += root->addr;
416 }
417
418 return abs_addr;
419 }
420
get_cpu_index(void)421 static int get_cpu_index(void)
422 {
423 if (current_cpu) {
424 return current_cpu->cpu_index;
425 }
426 return -1;
427 }
428
memory_region_read_accessor(MemoryRegion * mr,hwaddr addr,uint64_t * value,unsigned size,signed shift,uint64_t mask,MemTxAttrs attrs)429 static MemTxResult memory_region_read_accessor(MemoryRegion *mr,
430 hwaddr addr,
431 uint64_t *value,
432 unsigned size,
433 signed shift,
434 uint64_t mask,
435 MemTxAttrs attrs)
436 {
437 uint64_t tmp;
438
439 tmp = mr->ops->read(mr->opaque, addr, size);
440 if (mr->subpage) {
441 trace_memory_region_subpage_read(get_cpu_index(), mr, addr, tmp, size);
442 } else if (trace_event_get_state_backends(TRACE_MEMORY_REGION_OPS_READ)) {
443 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
444 trace_memory_region_ops_read(get_cpu_index(), mr, abs_addr, tmp, size,
445 memory_region_name(mr));
446 }
447 memory_region_shift_read_access(value, shift, mask, tmp);
448 return MEMTX_OK;
449 }
450
memory_region_read_with_attrs_accessor(MemoryRegion * mr,hwaddr addr,uint64_t * value,unsigned size,signed shift,uint64_t mask,MemTxAttrs attrs)451 static MemTxResult memory_region_read_with_attrs_accessor(MemoryRegion *mr,
452 hwaddr addr,
453 uint64_t *value,
454 unsigned size,
455 signed shift,
456 uint64_t mask,
457 MemTxAttrs attrs)
458 {
459 uint64_t tmp = 0;
460 MemTxResult r;
461
462 r = mr->ops->read_with_attrs(mr->opaque, addr, &tmp, size, attrs);
463 if (mr->subpage) {
464 trace_memory_region_subpage_read(get_cpu_index(), mr, addr, tmp, size);
465 } else if (trace_event_get_state_backends(TRACE_MEMORY_REGION_OPS_READ)) {
466 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
467 trace_memory_region_ops_read(get_cpu_index(), mr, abs_addr, tmp, size,
468 memory_region_name(mr));
469 }
470 memory_region_shift_read_access(value, shift, mask, tmp);
471 return r;
472 }
473
memory_region_write_accessor(MemoryRegion * mr,hwaddr addr,uint64_t * value,unsigned size,signed shift,uint64_t mask,MemTxAttrs attrs)474 static MemTxResult memory_region_write_accessor(MemoryRegion *mr,
475 hwaddr addr,
476 uint64_t *value,
477 unsigned size,
478 signed shift,
479 uint64_t mask,
480 MemTxAttrs attrs)
481 {
482 uint64_t tmp = memory_region_shift_write_access(value, shift, mask);
483
484 if (mr->subpage) {
485 trace_memory_region_subpage_write(get_cpu_index(), mr, addr, tmp, size);
486 } else if (trace_event_get_state_backends(TRACE_MEMORY_REGION_OPS_WRITE)) {
487 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
488 trace_memory_region_ops_write(get_cpu_index(), mr, abs_addr, tmp, size,
489 memory_region_name(mr));
490 }
491 mr->ops->write(mr->opaque, addr, tmp, size);
492 return MEMTX_OK;
493 }
494
memory_region_write_with_attrs_accessor(MemoryRegion * mr,hwaddr addr,uint64_t * value,unsigned size,signed shift,uint64_t mask,MemTxAttrs attrs)495 static MemTxResult memory_region_write_with_attrs_accessor(MemoryRegion *mr,
496 hwaddr addr,
497 uint64_t *value,
498 unsigned size,
499 signed shift,
500 uint64_t mask,
501 MemTxAttrs attrs)
502 {
503 uint64_t tmp = memory_region_shift_write_access(value, shift, mask);
504
505 if (mr->subpage) {
506 trace_memory_region_subpage_write(get_cpu_index(), mr, addr, tmp, size);
507 } else if (trace_event_get_state_backends(TRACE_MEMORY_REGION_OPS_WRITE)) {
508 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
509 trace_memory_region_ops_write(get_cpu_index(), mr, abs_addr, tmp, size,
510 memory_region_name(mr));
511 }
512 return mr->ops->write_with_attrs(mr->opaque, addr, tmp, size, attrs);
513 }
514
access_with_adjusted_size_aligned(hwaddr addr,uint64_t * value,unsigned size,unsigned access_size_min,unsigned access_size_max,MemTxResult (* access_fn)(MemoryRegion * mr,hwaddr addr,uint64_t * value,unsigned size,signed shift,uint64_t mask,MemTxAttrs attrs),MemoryRegion * mr,MemTxAttrs attrs)515 static MemTxResult access_with_adjusted_size_aligned(hwaddr addr,
516 uint64_t *value,
517 unsigned size,
518 unsigned access_size_min,
519 unsigned access_size_max,
520 MemTxResult (*access_fn)
521 (MemoryRegion *mr,
522 hwaddr addr,
523 uint64_t *value,
524 unsigned size,
525 signed shift,
526 uint64_t mask,
527 MemTxAttrs attrs),
528 MemoryRegion *mr,
529 MemTxAttrs attrs)
530 {
531 uint64_t access_mask;
532 unsigned access_size;
533 unsigned i;
534 MemTxResult r = MEMTX_OK;
535 bool reentrancy_guard_applied = false;
536
537 if (!access_size_min) {
538 access_size_min = 1;
539 }
540 if (!access_size_max) {
541 access_size_max = 4;
542 }
543
544 /* Do not allow more than one simultaneous access to a device's IO Regions */
545 if (mr->dev && !mr->disable_reentrancy_guard &&
546 !mr->ram_device && !mr->ram && !mr->rom_device && !mr->readonly) {
547 if (mr->dev->mem_reentrancy_guard.engaged_in_io) {
548 warn_report_once("Blocked re-entrant IO on MemoryRegion: "
549 "%s at addr: 0x%" HWADDR_PRIX,
550 memory_region_name(mr), addr);
551 return MEMTX_ACCESS_ERROR;
552 }
553 mr->dev->mem_reentrancy_guard.engaged_in_io = true;
554 reentrancy_guard_applied = true;
555 }
556
557 access_size = MAX(MIN(size, access_size_max), access_size_min);
558 access_mask = MAKE_64BIT_MASK(0, access_size * 8);
559 if (devend_big_endian(mr->ops->endianness)) {
560 for (i = 0; i < size; i += access_size) {
561 r |= access_fn(mr, addr + i, value, access_size,
562 (size - access_size - i) * 8, access_mask, attrs);
563 }
564 } else {
565 for (i = 0; i < size; i += access_size) {
566 r |= access_fn(mr, addr + i, value, access_size, i * 8,
567 access_mask, attrs);
568 }
569 }
570 if (mr->dev && reentrancy_guard_applied) {
571 mr->dev->mem_reentrancy_guard.engaged_in_io = false;
572 }
573 return r;
574 }
575
576 /* Assume power-of-two size */
577 #define align_down(addr, size) ((addr) & ~((size) - 1))
578 #define align_up(addr, size) \
579 ({ typeof(size) __size = size; \
580 align_down((addr) + (__size) - 1, (__size)); })
581
access_with_adjusted_size_unaligned(hwaddr addr,uint64_t * value,unsigned size,unsigned access_size_min,unsigned access_size_max,bool unaligned,MemTxResult (* access)(MemoryRegion * mr,hwaddr addr,uint64_t * value,unsigned size,signed shift,uint64_t mask,MemTxAttrs attrs),MemoryRegion * mr,MemTxAttrs attrs)582 static MemTxResult access_with_adjusted_size_unaligned(hwaddr addr,
583 uint64_t *value,
584 unsigned size,
585 unsigned access_size_min,
586 unsigned access_size_max,
587 bool unaligned,
588 MemTxResult (*access)(MemoryRegion *mr,
589 hwaddr addr,
590 uint64_t *value,
591 unsigned size,
592 signed shift,
593 uint64_t mask,
594 MemTxAttrs attrs),
595 MemoryRegion *mr,
596 MemTxAttrs attrs)
597 {
598 uint64_t access_value = 0;
599 MemTxResult r = MEMTX_OK;
600 hwaddr access_addr[2];
601 uint64_t access_mask;
602 unsigned access_size;
603
604 if (unlikely(!access_size_min)) {
605 access_size_min = 1;
606 }
607 if (unlikely(!access_size_max)) {
608 access_size_max = 4;
609 }
610
611 access_size = MAX(MIN(size, access_size_max), access_size_min);
612 access_addr[0] = align_down(addr, access_size);
613 access_addr[1] = align_up(addr + size, access_size);
614
615 if (devend_big_endian(mr->ops->endianness)) {
616 hwaddr cur;
617
618 /* XXX: Big-endian path is untested... */
619
620 for (cur = access_addr[0]; cur < access_addr[1]; cur += access_size) {
621 uint64_t mask_bounds[2];
622
623 mask_bounds[0] = MAX(addr, cur) - cur;
624 mask_bounds[1] =
625 MIN(addr + size, align_up(cur + 1, access_size)) - cur;
626
627 access_mask = (-1ULL << mask_bounds[0] * 8) &
628 (-1ULL >> (64 - mask_bounds[1] * 8));
629
630 r |= access(mr, cur, &access_value, access_size,
631 (size - access_size - (MAX(addr, cur) - addr)),
632 access_mask, attrs);
633
634 /* XXX: Can't do this hack for writes */
635 access_value >>= mask_bounds[0] * 8;
636 }
637 } else {
638 hwaddr cur;
639
640 for (cur = access_addr[0]; cur < access_addr[1]; cur += access_size) {
641 uint64_t mask_bounds[2];
642
643 mask_bounds[0] = MAX(addr, cur) - cur;
644 mask_bounds[1] =
645 MIN(addr + size, align_up(cur + 1, access_size)) - cur;
646
647 access_mask = (-1ULL << mask_bounds[0] * 8) &
648 (-1ULL >> (64 - mask_bounds[1] * 8));
649
650 r |= access(mr, cur, &access_value, access_size,
651 (MAX(addr, cur) - addr), access_mask, attrs);
652
653 /* XXX: Can't do this hack for writes */
654 access_value >>= mask_bounds[0] * 8;
655 }
656 }
657
658 *value = access_value;
659
660 return r;
661 }
662
access_with_adjusted_size(hwaddr addr,uint64_t * value,unsigned size,unsigned access_size_min,unsigned access_size_max,bool unaligned,MemTxResult (* access)(MemoryRegion * mr,hwaddr addr,uint64_t * value,unsigned size,signed shift,uint64_t mask,MemTxAttrs attrs),MemoryRegion * mr,MemTxAttrs attrs)663 static inline MemTxResult access_with_adjusted_size(hwaddr addr,
664 uint64_t *value,
665 unsigned size,
666 unsigned access_size_min,
667 unsigned access_size_max,
668 bool unaligned,
669 MemTxResult (*access)(MemoryRegion *mr,
670 hwaddr addr,
671 uint64_t *value,
672 unsigned size,
673 signed shift,
674 uint64_t mask,
675 MemTxAttrs attrs),
676 MemoryRegion *mr,
677 MemTxAttrs attrs)
678 {
679 unsigned access_size;
680
681 if (!access_size_min) {
682 access_size_min = 1;
683 }
684 if (!access_size_max) {
685 access_size_max = 4;
686 }
687
688 access_size = MAX(MIN(size, access_size_max), access_size_min);
689
690 /* Handle unaligned accesses if the model only supports natural alignment */
691 if (unlikely((addr & (access_size - 1)) && !unaligned)) {
692 return access_with_adjusted_size_unaligned(addr, value, size,
693 access_size_min, access_size_max, unaligned, access, mr, attrs);
694 }
695
696 /*
697 * Otherwise, if the access is aligned or the model specifies it can handle
698 * unaligned accesses, use the 'aligned' handler
699 */
700 return access_with_adjusted_size_aligned(addr, value, size,
701 access_size_min, access_size_max, access, mr, attrs);
702 }
703
memory_region_to_address_space(MemoryRegion * mr)704 static AddressSpace *memory_region_to_address_space(MemoryRegion *mr)
705 {
706 AddressSpace *as;
707
708 while (mr->container) {
709 mr = mr->container;
710 }
711 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
712 if (mr == as->root) {
713 return as;
714 }
715 }
716 return NULL;
717 }
718
719 /* Render a memory region into the global view. Ranges in @view obscure
720 * ranges in @mr.
721 */
render_memory_region(FlatView * view,MemoryRegion * mr,Int128 base,AddrRange clip,bool readonly,bool nonvolatile,bool unmergeable)722 static void render_memory_region(FlatView *view,
723 MemoryRegion *mr,
724 Int128 base,
725 AddrRange clip,
726 bool readonly,
727 bool nonvolatile,
728 bool unmergeable)
729 {
730 MemoryRegion *subregion;
731 unsigned i;
732 hwaddr offset_in_region;
733 Int128 remain;
734 Int128 now;
735 FlatRange fr;
736 AddrRange tmp;
737
738 if (!mr->enabled) {
739 return;
740 }
741
742 int128_addto(&base, int128_make64(mr->addr));
743 readonly |= mr->readonly;
744 nonvolatile |= mr->nonvolatile;
745 unmergeable |= mr->unmergeable;
746
747 tmp = addrrange_make(base, mr->size);
748
749 if (!addrrange_intersects(tmp, clip)) {
750 return;
751 }
752
753 clip = addrrange_intersection(tmp, clip);
754
755 if (mr->alias) {
756 int128_subfrom(&base, int128_make64(mr->alias->addr));
757 int128_subfrom(&base, int128_make64(mr->alias_offset));
758 render_memory_region(view, mr->alias, base, clip,
759 readonly, nonvolatile, unmergeable);
760 return;
761 }
762
763 /* Render subregions in priority order. */
764 QTAILQ_FOREACH(subregion, &mr->subregions, subregions_link) {
765 render_memory_region(view, subregion, base, clip,
766 readonly, nonvolatile, unmergeable);
767 }
768
769 if (!mr->terminates) {
770 return;
771 }
772
773 offset_in_region = int128_get64(int128_sub(clip.start, base));
774 base = clip.start;
775 remain = clip.size;
776
777 fr.mr = mr;
778 fr.dirty_log_mask = memory_region_get_dirty_log_mask(mr);
779 fr.romd_mode = mr->romd_mode;
780 fr.readonly = readonly;
781 fr.nonvolatile = nonvolatile;
782 fr.unmergeable = unmergeable;
783
784 /* Render the region itself into any gaps left by the current view. */
785 for (i = 0; i < view->nr && int128_nz(remain); ++i) {
786 if (int128_ge(base, addrrange_end(view->ranges[i].addr))) {
787 continue;
788 }
789 if (int128_lt(base, view->ranges[i].addr.start)) {
790 now = int128_min(remain,
791 int128_sub(view->ranges[i].addr.start, base));
792 fr.offset_in_region = offset_in_region;
793 fr.addr = addrrange_make(base, now);
794 flatview_insert(view, i, &fr);
795 ++i;
796 int128_addto(&base, now);
797 offset_in_region += int128_get64(now);
798 int128_subfrom(&remain, now);
799 }
800 now = int128_sub(int128_min(int128_add(base, remain),
801 addrrange_end(view->ranges[i].addr)),
802 base);
803 int128_addto(&base, now);
804 offset_in_region += int128_get64(now);
805 int128_subfrom(&remain, now);
806 }
807 if (int128_nz(remain)) {
808 fr.offset_in_region = offset_in_region;
809 fr.addr = addrrange_make(base, remain);
810 flatview_insert(view, i, &fr);
811 }
812 }
813
flatview_for_each_range(FlatView * fv,flatview_cb cb,void * opaque)814 void flatview_for_each_range(FlatView *fv, flatview_cb cb , void *opaque)
815 {
816 FlatRange *fr;
817
818 assert(fv);
819 assert(cb);
820
821 FOR_EACH_FLAT_RANGE(fr, fv) {
822 if (cb(fr->addr.start, fr->addr.size, fr->mr,
823 fr->offset_in_region, opaque)) {
824 break;
825 }
826 }
827 }
828
memory_region_get_flatview_root(MemoryRegion * mr)829 static MemoryRegion *memory_region_get_flatview_root(MemoryRegion *mr)
830 {
831 while (mr->enabled) {
832 if (mr->alias) {
833 if (!mr->alias_offset && int128_ge(mr->size, mr->alias->size)) {
834 /* The alias is included in its entirety. Use it as
835 * the "real" root, so that we can share more FlatViews.
836 */
837 mr = mr->alias;
838 continue;
839 }
840 } else if (!mr->terminates) {
841 unsigned int found = 0;
842 MemoryRegion *child, *next = NULL;
843 QTAILQ_FOREACH(child, &mr->subregions, subregions_link) {
844 if (child->enabled) {
845 if (++found > 1) {
846 next = NULL;
847 break;
848 }
849 if (!child->addr && int128_ge(mr->size, child->size)) {
850 /* A child is included in its entirety. If it's the only
851 * enabled one, use it in the hope of finding an alias down the
852 * way. This will also let us share FlatViews.
853 */
854 next = child;
855 }
856 }
857 }
858 if (found == 0) {
859 return NULL;
860 }
861 if (next) {
862 mr = next;
863 continue;
864 }
865 }
866
867 return mr;
868 }
869
870 return NULL;
871 }
872
873 /* Render a memory topology into a list of disjoint absolute ranges. */
generate_memory_topology(MemoryRegion * mr)874 static FlatView *generate_memory_topology(MemoryRegion *mr)
875 {
876 int i;
877 FlatView *view;
878
879 view = flatview_new(mr);
880
881 if (mr) {
882 render_memory_region(view, mr, int128_zero(),
883 addrrange_make(int128_zero(), int128_2_64()),
884 false, false, false);
885 }
886 flatview_simplify(view);
887
888 view->dispatch = address_space_dispatch_new(view);
889 for (i = 0; i < view->nr; i++) {
890 MemoryRegionSection mrs =
891 section_from_flat_range(&view->ranges[i], view);
892 flatview_add_to_dispatch(view, &mrs);
893 }
894 address_space_dispatch_compact(view->dispatch);
895 g_hash_table_replace(flat_views, mr, view);
896
897 return view;
898 }
899
address_space_add_del_ioeventfds(AddressSpace * as,MemoryRegionIoeventfd * fds_new,unsigned fds_new_nb,MemoryRegionIoeventfd * fds_old,unsigned fds_old_nb)900 static void address_space_add_del_ioeventfds(AddressSpace *as,
901 MemoryRegionIoeventfd *fds_new,
902 unsigned fds_new_nb,
903 MemoryRegionIoeventfd *fds_old,
904 unsigned fds_old_nb)
905 {
906 unsigned iold, inew;
907 MemoryRegionIoeventfd *fd;
908 MemoryRegionSection section;
909
910 /* Generate a symmetric difference of the old and new fd sets, adding
911 * and deleting as necessary.
912 */
913
914 iold = inew = 0;
915 while (iold < fds_old_nb || inew < fds_new_nb) {
916 if (iold < fds_old_nb
917 && (inew == fds_new_nb
918 || memory_region_ioeventfd_before(&fds_old[iold],
919 &fds_new[inew]))) {
920 fd = &fds_old[iold];
921 section = (MemoryRegionSection) {
922 .fv = address_space_to_flatview(as),
923 .offset_within_address_space = int128_get64(fd->addr.start),
924 .size = fd->addr.size,
925 };
926 MEMORY_LISTENER_CALL(as, eventfd_del, Forward, §ion,
927 fd->match_data, fd->data, fd->e);
928 ++iold;
929 } else if (inew < fds_new_nb
930 && (iold == fds_old_nb
931 || memory_region_ioeventfd_before(&fds_new[inew],
932 &fds_old[iold]))) {
933 fd = &fds_new[inew];
934 section = (MemoryRegionSection) {
935 .fv = address_space_to_flatview(as),
936 .offset_within_address_space = int128_get64(fd->addr.start),
937 .size = fd->addr.size,
938 };
939 MEMORY_LISTENER_CALL(as, eventfd_add, Reverse, §ion,
940 fd->match_data, fd->data, fd->e);
941 ++inew;
942 } else {
943 ++iold;
944 ++inew;
945 }
946 }
947 }
948
address_space_get_flatview(AddressSpace * as)949 FlatView *address_space_get_flatview(AddressSpace *as)
950 {
951 FlatView *view;
952
953 RCU_READ_LOCK_GUARD();
954 do {
955 view = address_space_to_flatview(as);
956 /* If somebody has replaced as->current_map concurrently,
957 * flatview_ref returns false.
958 */
959 } while (!flatview_ref(view));
960 return view;
961 }
962
address_space_update_ioeventfds(AddressSpace * as)963 static void address_space_update_ioeventfds(AddressSpace *as)
964 {
965 FlatView *view;
966 FlatRange *fr;
967 unsigned ioeventfd_nb = 0;
968 unsigned ioeventfd_max;
969 MemoryRegionIoeventfd *ioeventfds;
970 AddrRange tmp;
971 unsigned i;
972
973 if (!as->ioeventfd_notifiers) {
974 return;
975 }
976
977 /*
978 * It is likely that the number of ioeventfds hasn't changed much, so use
979 * the previous size as the starting value, with some headroom to avoid
980 * gratuitous reallocations.
981 */
982 ioeventfd_max = QEMU_ALIGN_UP(as->ioeventfd_nb, 4);
983 ioeventfds = g_new(MemoryRegionIoeventfd, ioeventfd_max);
984
985 view = address_space_get_flatview(as);
986 FOR_EACH_FLAT_RANGE(fr, view) {
987 for (i = 0; i < fr->mr->ioeventfd_nb; ++i) {
988 tmp = addrrange_shift(fr->mr->ioeventfds[i].addr,
989 int128_sub(fr->addr.start,
990 int128_make64(fr->offset_in_region)));
991 if (addrrange_intersects(fr->addr, tmp)) {
992 ++ioeventfd_nb;
993 if (ioeventfd_nb > ioeventfd_max) {
994 ioeventfd_max = MAX(ioeventfd_max * 2, 4);
995 ioeventfds = g_realloc(ioeventfds,
996 ioeventfd_max * sizeof(*ioeventfds));
997 }
998 ioeventfds[ioeventfd_nb-1] = fr->mr->ioeventfds[i];
999 ioeventfds[ioeventfd_nb-1].addr = tmp;
1000 }
1001 }
1002 }
1003
1004 address_space_add_del_ioeventfds(as, ioeventfds, ioeventfd_nb,
1005 as->ioeventfds, as->ioeventfd_nb);
1006
1007 g_free(as->ioeventfds);
1008 as->ioeventfds = ioeventfds;
1009 as->ioeventfd_nb = ioeventfd_nb;
1010 flatview_unref(view);
1011 }
1012
1013 /*
1014 * Notify the memory listeners about the coalesced IO change events of
1015 * range `cmr'. Only the part that has intersection of the specified
1016 * FlatRange will be sent.
1017 */
flat_range_coalesced_io_notify(FlatRange * fr,AddressSpace * as,CoalescedMemoryRange * cmr,bool add)1018 static void flat_range_coalesced_io_notify(FlatRange *fr, AddressSpace *as,
1019 CoalescedMemoryRange *cmr, bool add)
1020 {
1021 AddrRange tmp;
1022
1023 tmp = addrrange_shift(cmr->addr,
1024 int128_sub(fr->addr.start,
1025 int128_make64(fr->offset_in_region)));
1026 if (!addrrange_intersects(tmp, fr->addr)) {
1027 return;
1028 }
1029 tmp = addrrange_intersection(tmp, fr->addr);
1030
1031 if (add) {
1032 MEMORY_LISTENER_UPDATE_REGION(fr, as, Forward, coalesced_io_add,
1033 int128_get64(tmp.start),
1034 int128_get64(tmp.size));
1035 } else {
1036 MEMORY_LISTENER_UPDATE_REGION(fr, as, Reverse, coalesced_io_del,
1037 int128_get64(tmp.start),
1038 int128_get64(tmp.size));
1039 }
1040 }
1041
flat_range_coalesced_io_del(FlatRange * fr,AddressSpace * as)1042 static void flat_range_coalesced_io_del(FlatRange *fr, AddressSpace *as)
1043 {
1044 CoalescedMemoryRange *cmr;
1045
1046 QTAILQ_FOREACH(cmr, &fr->mr->coalesced, link) {
1047 flat_range_coalesced_io_notify(fr, as, cmr, false);
1048 }
1049 }
1050
flat_range_coalesced_io_add(FlatRange * fr,AddressSpace * as)1051 static void flat_range_coalesced_io_add(FlatRange *fr, AddressSpace *as)
1052 {
1053 MemoryRegion *mr = fr->mr;
1054 CoalescedMemoryRange *cmr;
1055
1056 if (QTAILQ_EMPTY(&mr->coalesced)) {
1057 return;
1058 }
1059
1060 QTAILQ_FOREACH(cmr, &mr->coalesced, link) {
1061 flat_range_coalesced_io_notify(fr, as, cmr, true);
1062 }
1063 }
1064
1065 static void
flat_range_coalesced_io_notify_listener_add_del(FlatRange * fr,MemoryRegionSection * mrs,MemoryListener * listener,AddressSpace * as,bool add)1066 flat_range_coalesced_io_notify_listener_add_del(FlatRange *fr,
1067 MemoryRegionSection *mrs,
1068 MemoryListener *listener,
1069 AddressSpace *as, bool add)
1070 {
1071 CoalescedMemoryRange *cmr;
1072 MemoryRegion *mr = fr->mr;
1073 AddrRange tmp;
1074
1075 QTAILQ_FOREACH(cmr, &mr->coalesced, link) {
1076 tmp = addrrange_shift(cmr->addr,
1077 int128_sub(fr->addr.start,
1078 int128_make64(fr->offset_in_region)));
1079
1080 if (!addrrange_intersects(tmp, fr->addr)) {
1081 return;
1082 }
1083 tmp = addrrange_intersection(tmp, fr->addr);
1084
1085 if (add && listener->coalesced_io_add) {
1086 listener->coalesced_io_add(listener, mrs,
1087 int128_get64(tmp.start),
1088 int128_get64(tmp.size));
1089 } else if (!add && listener->coalesced_io_del) {
1090 listener->coalesced_io_del(listener, mrs,
1091 int128_get64(tmp.start),
1092 int128_get64(tmp.size));
1093 }
1094 }
1095 }
1096
address_space_update_topology_pass(AddressSpace * as,const FlatView * old_view,const FlatView * new_view,bool adding)1097 static void address_space_update_topology_pass(AddressSpace *as,
1098 const FlatView *old_view,
1099 const FlatView *new_view,
1100 bool adding)
1101 {
1102 unsigned iold, inew;
1103 FlatRange *frold, *frnew;
1104
1105 /* Generate a symmetric difference of the old and new memory maps.
1106 * Kill ranges in the old map, and instantiate ranges in the new map.
1107 */
1108 iold = inew = 0;
1109 while (iold < old_view->nr || inew < new_view->nr) {
1110 if (iold < old_view->nr) {
1111 frold = &old_view->ranges[iold];
1112 } else {
1113 frold = NULL;
1114 }
1115 if (inew < new_view->nr) {
1116 frnew = &new_view->ranges[inew];
1117 } else {
1118 frnew = NULL;
1119 }
1120
1121 if (frold
1122 && (!frnew
1123 || int128_lt(frold->addr.start, frnew->addr.start)
1124 || (int128_eq(frold->addr.start, frnew->addr.start)
1125 && !flatrange_equal(frold, frnew)))) {
1126 /* In old but not in new, or in both but attributes changed. */
1127
1128 if (!adding) {
1129 flat_range_coalesced_io_del(frold, as);
1130 MEMORY_LISTENER_UPDATE_REGION(frold, as, Reverse, region_del);
1131 }
1132
1133 ++iold;
1134 } else if (frold && frnew && flatrange_equal(frold, frnew)) {
1135 /* In both and unchanged (except logging may have changed) */
1136
1137 if (adding) {
1138 MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, region_nop);
1139 if (frnew->dirty_log_mask & ~frold->dirty_log_mask) {
1140 MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, log_start,
1141 frold->dirty_log_mask,
1142 frnew->dirty_log_mask);
1143 }
1144 if (frold->dirty_log_mask & ~frnew->dirty_log_mask) {
1145 MEMORY_LISTENER_UPDATE_REGION(frnew, as, Reverse, log_stop,
1146 frold->dirty_log_mask,
1147 frnew->dirty_log_mask);
1148 }
1149 }
1150
1151 ++iold;
1152 ++inew;
1153 } else {
1154 /* In new */
1155
1156 if (adding) {
1157 MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, region_add);
1158 flat_range_coalesced_io_add(frnew, as);
1159 }
1160
1161 ++inew;
1162 }
1163 }
1164 }
1165
flatviews_init(void)1166 static void flatviews_init(void)
1167 {
1168 static FlatView *empty_view;
1169
1170 if (flat_views) {
1171 return;
1172 }
1173
1174 flat_views = g_hash_table_new_full(g_direct_hash, g_direct_equal, NULL,
1175 (GDestroyNotify) flatview_unref);
1176 if (!empty_view) {
1177 empty_view = generate_memory_topology(NULL);
1178 /* We keep it alive forever in the global variable. */
1179 flatview_ref(empty_view);
1180 } else {
1181 g_hash_table_replace(flat_views, NULL, empty_view);
1182 flatview_ref(empty_view);
1183 }
1184 }
1185
flatviews_reset(void)1186 static void flatviews_reset(void)
1187 {
1188 AddressSpace *as;
1189
1190 if (flat_views) {
1191 g_hash_table_unref(flat_views);
1192 flat_views = NULL;
1193 }
1194 flatviews_init();
1195
1196 /* Render unique FVs */
1197 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
1198 MemoryRegion *physmr = memory_region_get_flatview_root(as->root);
1199
1200 if (g_hash_table_lookup(flat_views, physmr)) {
1201 continue;
1202 }
1203
1204 generate_memory_topology(physmr);
1205 }
1206 }
1207
address_space_set_flatview(AddressSpace * as)1208 static void address_space_set_flatview(AddressSpace *as)
1209 {
1210 FlatView *old_view = address_space_to_flatview(as);
1211 MemoryRegion *physmr = memory_region_get_flatview_root(as->root);
1212 FlatView *new_view = g_hash_table_lookup(flat_views, physmr);
1213
1214 assert(new_view);
1215
1216 if (old_view == new_view) {
1217 return;
1218 }
1219
1220 if (old_view) {
1221 flatview_ref(old_view);
1222 }
1223
1224 flatview_ref(new_view);
1225
1226 if (!QTAILQ_EMPTY(&as->listeners)) {
1227 FlatView tmpview = { .nr = 0 }, *old_view2 = old_view;
1228
1229 if (!old_view2) {
1230 old_view2 = &tmpview;
1231 }
1232 address_space_update_topology_pass(as, old_view2, new_view, false);
1233 address_space_update_topology_pass(as, old_view2, new_view, true);
1234 }
1235
1236 /* Writes are protected by the BQL. */
1237 qatomic_rcu_set(&as->current_map, new_view);
1238 if (old_view) {
1239 flatview_unref(old_view);
1240 }
1241
1242 /* Note that all the old MemoryRegions are still alive up to this
1243 * point. This relieves most MemoryListeners from the need to
1244 * ref/unref the MemoryRegions they get---unless they use them
1245 * outside the iothread mutex, in which case precise reference
1246 * counting is necessary.
1247 */
1248 if (old_view) {
1249 flatview_unref(old_view);
1250 }
1251 }
1252
address_space_update_topology(AddressSpace * as)1253 static void address_space_update_topology(AddressSpace *as)
1254 {
1255 MemoryRegion *physmr = memory_region_get_flatview_root(as->root);
1256
1257 flatviews_init();
1258 if (!g_hash_table_lookup(flat_views, physmr)) {
1259 generate_memory_topology(physmr);
1260 }
1261 address_space_set_flatview(as);
1262 }
1263
memory_region_transaction_begin(void)1264 void memory_region_transaction_begin(void)
1265 {
1266 qemu_flush_coalesced_mmio_buffer();
1267 ++memory_region_transaction_depth;
1268 }
1269
memory_region_transaction_commit(void)1270 void memory_region_transaction_commit(void)
1271 {
1272 AddressSpace *as;
1273
1274 assert(memory_region_transaction_depth);
1275 assert(bql_locked());
1276
1277 --memory_region_transaction_depth;
1278 if (!memory_region_transaction_depth) {
1279 if (memory_region_update_pending) {
1280 flatviews_reset();
1281
1282 MEMORY_LISTENER_CALL_GLOBAL(begin, Forward);
1283
1284 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
1285 address_space_set_flatview(as);
1286 address_space_update_ioeventfds(as);
1287 }
1288 memory_region_update_pending = false;
1289 ioeventfd_update_pending = false;
1290 MEMORY_LISTENER_CALL_GLOBAL(commit, Forward);
1291 } else if (ioeventfd_update_pending) {
1292 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
1293 address_space_update_ioeventfds(as);
1294 }
1295 ioeventfd_update_pending = false;
1296 }
1297 }
1298 }
1299
memory_region_destructor_none(MemoryRegion * mr)1300 static void memory_region_destructor_none(MemoryRegion *mr)
1301 {
1302 }
1303
memory_region_destructor_ram(MemoryRegion * mr)1304 static void memory_region_destructor_ram(MemoryRegion *mr)
1305 {
1306 qemu_ram_free(mr->ram_block);
1307 }
1308
memory_region_need_escape(char c)1309 static bool memory_region_need_escape(char c)
1310 {
1311 return c == '/' || c == '[' || c == '\\' || c == ']';
1312 }
1313
memory_region_escape_name(const char * name)1314 static char *memory_region_escape_name(const char *name)
1315 {
1316 const char *p;
1317 char *escaped, *q;
1318 uint8_t c;
1319 size_t bytes = 0;
1320
1321 for (p = name; *p; p++) {
1322 bytes += memory_region_need_escape(*p) ? 4 : 1;
1323 }
1324 if (bytes == p - name) {
1325 return g_memdup(name, bytes + 1);
1326 }
1327
1328 escaped = g_malloc(bytes + 1);
1329 for (p = name, q = escaped; *p; p++) {
1330 c = *p;
1331 if (unlikely(memory_region_need_escape(c))) {
1332 *q++ = '\\';
1333 *q++ = 'x';
1334 *q++ = "0123456789abcdef"[c >> 4];
1335 c = "0123456789abcdef"[c & 15];
1336 }
1337 *q++ = c;
1338 }
1339 *q = 0;
1340 return escaped;
1341 }
1342
memory_region_do_init(MemoryRegion * mr,Object * owner,const char * name,uint64_t size)1343 static void memory_region_do_init(MemoryRegion *mr,
1344 Object *owner,
1345 const char *name,
1346 uint64_t size)
1347 {
1348 mr->size = int128_make64(size);
1349 if (size == UINT64_MAX) {
1350 mr->size = int128_2_64();
1351 }
1352 mr->name = g_strdup(name);
1353 mr->owner = owner;
1354 mr->dev = (DeviceState *) object_dynamic_cast(mr->owner, TYPE_DEVICE);
1355 mr->ram_block = NULL;
1356
1357 if (name) {
1358 char *escaped_name = memory_region_escape_name(name);
1359 char *name_array = g_strdup_printf("%s[*]", escaped_name);
1360
1361 if (!owner) {
1362 owner = machine_get_container("unattached");
1363 }
1364
1365 object_property_add_child(owner, name_array, OBJECT(mr));
1366 object_unref(OBJECT(mr));
1367 g_free(name_array);
1368 g_free(escaped_name);
1369 }
1370 }
1371
memory_region_init(MemoryRegion * mr,Object * owner,const char * name,uint64_t size)1372 void memory_region_init(MemoryRegion *mr,
1373 Object *owner,
1374 const char *name,
1375 uint64_t size)
1376 {
1377 object_initialize(mr, sizeof(*mr), TYPE_MEMORY_REGION);
1378 memory_region_do_init(mr, owner, name, size);
1379 }
1380
memory_region_get_container(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)1381 static void memory_region_get_container(Object *obj, Visitor *v,
1382 const char *name, void *opaque,
1383 Error **errp)
1384 {
1385 MemoryRegion *mr = MEMORY_REGION(obj);
1386 char *path = (char *)"";
1387
1388 if (mr->container) {
1389 path = object_get_canonical_path(OBJECT(mr->container));
1390 }
1391 visit_type_str(v, name, &path, errp);
1392 if (mr->container) {
1393 g_free(path);
1394 }
1395 }
1396
memory_region_resolve_container(Object * obj,void * opaque,const char * part)1397 static Object *memory_region_resolve_container(Object *obj, void *opaque,
1398 const char *part)
1399 {
1400 MemoryRegion *mr = MEMORY_REGION(obj);
1401
1402 return OBJECT(mr->container);
1403 }
1404
memory_region_get_priority(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)1405 static void memory_region_get_priority(Object *obj, Visitor *v,
1406 const char *name, void *opaque,
1407 Error **errp)
1408 {
1409 MemoryRegion *mr = MEMORY_REGION(obj);
1410 int32_t value = mr->priority;
1411
1412 visit_type_int32(v, name, &value, errp);
1413 }
1414
memory_region_get_size(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)1415 static void memory_region_get_size(Object *obj, Visitor *v, const char *name,
1416 void *opaque, Error **errp)
1417 {
1418 MemoryRegion *mr = MEMORY_REGION(obj);
1419 uint64_t value = memory_region_size(mr);
1420
1421 visit_type_uint64(v, name, &value, errp);
1422 }
1423
memory_region_initfn(Object * obj)1424 static void memory_region_initfn(Object *obj)
1425 {
1426 MemoryRegion *mr = MEMORY_REGION(obj);
1427 ObjectProperty *op;
1428
1429 mr->ops = &unassigned_mem_ops;
1430 mr->enabled = true;
1431 mr->romd_mode = true;
1432 mr->destructor = memory_region_destructor_none;
1433 QTAILQ_INIT(&mr->subregions);
1434 QTAILQ_INIT(&mr->coalesced);
1435
1436 op = object_property_add(OBJECT(mr), "container",
1437 "link<" TYPE_MEMORY_REGION ">",
1438 memory_region_get_container,
1439 NULL, /* memory_region_set_container */
1440 NULL, NULL);
1441 op->resolve = memory_region_resolve_container;
1442
1443 object_property_add_uint64_ptr(OBJECT(mr), "addr",
1444 &mr->addr, OBJ_PROP_FLAG_READ);
1445 object_property_add(OBJECT(mr), "priority", "uint32",
1446 memory_region_get_priority,
1447 NULL, /* memory_region_set_priority */
1448 NULL, NULL);
1449 object_property_add(OBJECT(mr), "size", "uint64",
1450 memory_region_get_size,
1451 NULL, /* memory_region_set_size, */
1452 NULL, NULL);
1453 }
1454
iommu_memory_region_initfn(Object * obj)1455 static void iommu_memory_region_initfn(Object *obj)
1456 {
1457 MemoryRegion *mr = MEMORY_REGION(obj);
1458
1459 mr->is_iommu = true;
1460 }
1461
unassigned_mem_read(void * opaque,hwaddr addr,unsigned size)1462 static uint64_t unassigned_mem_read(void *opaque, hwaddr addr,
1463 unsigned size)
1464 {
1465 #ifdef DEBUG_UNASSIGNED
1466 printf("Unassigned mem read " HWADDR_FMT_plx "\n", addr);
1467 #endif
1468 return 0;
1469 }
1470
unassigned_mem_write(void * opaque,hwaddr addr,uint64_t val,unsigned size)1471 static void unassigned_mem_write(void *opaque, hwaddr addr,
1472 uint64_t val, unsigned size)
1473 {
1474 #ifdef DEBUG_UNASSIGNED
1475 printf("Unassigned mem write " HWADDR_FMT_plx " = 0x%"PRIx64"\n", addr, val);
1476 #endif
1477 }
1478
unassigned_mem_accepts(void * opaque,hwaddr addr,unsigned size,bool is_write,MemTxAttrs attrs)1479 static bool unassigned_mem_accepts(void *opaque, hwaddr addr,
1480 unsigned size, bool is_write,
1481 MemTxAttrs attrs)
1482 {
1483 return false;
1484 }
1485
1486 const MemoryRegionOps unassigned_mem_ops = {
1487 .valid.accepts = unassigned_mem_accepts,
1488 .endianness = DEVICE_NATIVE_ENDIAN,
1489 };
1490
memory_region_ram_device_read(void * opaque,hwaddr addr,unsigned size)1491 static uint64_t memory_region_ram_device_read(void *opaque,
1492 hwaddr addr, unsigned size)
1493 {
1494 MemoryRegion *mr = opaque;
1495 uint64_t data = ldn_he_p(mr->ram_block->host + addr, size);
1496
1497 trace_memory_region_ram_device_read(get_cpu_index(), mr, addr, data, size);
1498
1499 return data;
1500 }
1501
memory_region_ram_device_write(void * opaque,hwaddr addr,uint64_t data,unsigned size)1502 static void memory_region_ram_device_write(void *opaque, hwaddr addr,
1503 uint64_t data, unsigned size)
1504 {
1505 MemoryRegion *mr = opaque;
1506
1507 trace_memory_region_ram_device_write(get_cpu_index(), mr, addr, data, size);
1508
1509 stn_he_p(mr->ram_block->host + addr, size, data);
1510 }
1511
1512 static const MemoryRegionOps ram_device_mem_ops = {
1513 .read = memory_region_ram_device_read,
1514 .write = memory_region_ram_device_write,
1515 .endianness = HOST_BIG_ENDIAN ? DEVICE_BIG_ENDIAN : DEVICE_LITTLE_ENDIAN,
1516 .valid = {
1517 .min_access_size = 1,
1518 .max_access_size = 8,
1519 .unaligned = true,
1520 },
1521 .impl = {
1522 .min_access_size = 1,
1523 .max_access_size = 8,
1524 .unaligned = true,
1525 },
1526 };
1527
memory_region_access_valid(MemoryRegion * mr,hwaddr addr,unsigned size,bool is_write,MemTxAttrs attrs)1528 bool memory_region_access_valid(MemoryRegion *mr,
1529 hwaddr addr,
1530 unsigned size,
1531 bool is_write,
1532 MemTxAttrs attrs)
1533 {
1534 if (mr->ops->valid.accepts
1535 && !mr->ops->valid.accepts(mr->opaque, addr, size, is_write, attrs)) {
1536 qemu_log_mask(LOG_INVALID_MEM, "Invalid %s at addr 0x%" HWADDR_PRIX
1537 ", size %u, region '%s', reason: rejected\n",
1538 is_write ? "write" : "read",
1539 addr, size, memory_region_name(mr));
1540 return false;
1541 }
1542
1543 if (!mr->ops->valid.unaligned && (addr & (size - 1))) {
1544 qemu_log_mask(LOG_INVALID_MEM, "Invalid %s at addr 0x%" HWADDR_PRIX
1545 ", size %u, region '%s', reason: unaligned\n",
1546 is_write ? "write" : "read",
1547 addr, size, memory_region_name(mr));
1548 return false;
1549 }
1550
1551 /* Treat zero as compatibility all valid */
1552 if (!mr->ops->valid.max_access_size) {
1553 return true;
1554 }
1555
1556 if (size > mr->ops->valid.max_access_size
1557 || size < mr->ops->valid.min_access_size) {
1558 qemu_log_mask(LOG_INVALID_MEM, "Invalid %s at addr 0x%" HWADDR_PRIX
1559 ", size %u, region '%s', reason: invalid size "
1560 "(min:%u max:%u)\n",
1561 is_write ? "write" : "read",
1562 addr, size, memory_region_name(mr),
1563 mr->ops->valid.min_access_size,
1564 mr->ops->valid.max_access_size);
1565 return false;
1566 }
1567 return true;
1568 }
1569
memory_region_dispatch_read1(MemoryRegion * mr,hwaddr addr,uint64_t * pval,unsigned size,MemTxAttrs attrs)1570 static MemTxResult memory_region_dispatch_read1(MemoryRegion *mr,
1571 hwaddr addr,
1572 uint64_t *pval,
1573 unsigned size,
1574 MemTxAttrs attrs)
1575 {
1576 *pval = 0;
1577
1578 if (mr->ops->read) {
1579 return access_with_adjusted_size(addr, pval, size,
1580 mr->ops->impl.min_access_size,
1581 mr->ops->impl.max_access_size,
1582 mr->ops->impl.unaligned,
1583 memory_region_read_accessor,
1584 mr, attrs);
1585 } else {
1586 return access_with_adjusted_size(addr, pval, size,
1587 mr->ops->impl.min_access_size,
1588 mr->ops->impl.max_access_size,
1589 mr->ops->impl.unaligned,
1590 memory_region_read_with_attrs_accessor,
1591 mr, attrs);
1592 }
1593 }
1594
memory_region_dispatch_read(MemoryRegion * mr,hwaddr addr,uint64_t * pval,MemOp op,MemTxAttrs attrs)1595 MemTxResult memory_region_dispatch_read(MemoryRegion *mr,
1596 hwaddr addr,
1597 uint64_t *pval,
1598 MemOp op,
1599 MemTxAttrs attrs)
1600 {
1601 unsigned size = memop_size(op);
1602 MemTxResult r;
1603
1604 if (mr->alias) {
1605 return memory_region_dispatch_read(mr->alias,
1606 mr->alias_offset + addr,
1607 pval, op, attrs);
1608 }
1609 if (!memory_region_access_valid(mr, addr, size, false, attrs)) {
1610 *pval = unassigned_mem_read(mr, addr, size);
1611 return MEMTX_DECODE_ERROR;
1612 }
1613
1614 r = memory_region_dispatch_read1(mr, addr, pval, size, attrs);
1615 adjust_endianness(mr, pval, op);
1616 return r;
1617 }
1618
1619 /* Return true if an eventfd was signalled */
memory_region_dispatch_write_eventfds(MemoryRegion * mr,hwaddr addr,uint64_t data,unsigned size,MemTxAttrs attrs)1620 static bool memory_region_dispatch_write_eventfds(MemoryRegion *mr,
1621 hwaddr addr,
1622 uint64_t data,
1623 unsigned size,
1624 MemTxAttrs attrs)
1625 {
1626 MemoryRegionIoeventfd ioeventfd = {
1627 .addr = addrrange_make(int128_make64(addr), int128_make64(size)),
1628 .data = data,
1629 };
1630 unsigned i;
1631
1632 for (i = 0; i < mr->ioeventfd_nb; i++) {
1633 ioeventfd.match_data = mr->ioeventfds[i].match_data;
1634 ioeventfd.e = mr->ioeventfds[i].e;
1635
1636 if (memory_region_ioeventfd_equal(&ioeventfd, &mr->ioeventfds[i])) {
1637 event_notifier_set(ioeventfd.e);
1638 return true;
1639 }
1640 }
1641
1642 return false;
1643 }
1644
memory_region_dispatch_write(MemoryRegion * mr,hwaddr addr,uint64_t data,MemOp op,MemTxAttrs attrs)1645 MemTxResult memory_region_dispatch_write(MemoryRegion *mr,
1646 hwaddr addr,
1647 uint64_t data,
1648 MemOp op,
1649 MemTxAttrs attrs)
1650 {
1651 unsigned size = memop_size(op);
1652
1653 if (mr->alias) {
1654 return memory_region_dispatch_write(mr->alias,
1655 mr->alias_offset + addr,
1656 data, op, attrs);
1657 }
1658 if (!memory_region_access_valid(mr, addr, size, true, attrs)) {
1659 unassigned_mem_write(mr, addr, data, size);
1660 return MEMTX_DECODE_ERROR;
1661 }
1662
1663 adjust_endianness(mr, &data, op);
1664
1665 /*
1666 * FIXME: it's not clear why under KVM the write would be processed
1667 * directly, instead of going through eventfd. This probably should
1668 * test "tcg_enabled() || qtest_enabled()", or should just go away.
1669 */
1670 if (!kvm_enabled() &&
1671 memory_region_dispatch_write_eventfds(mr, addr, data, size, attrs)) {
1672 return MEMTX_OK;
1673 }
1674
1675 if (mr->ops->write) {
1676 return access_with_adjusted_size_aligned(addr, &data, size,
1677 mr->ops->impl.min_access_size,
1678 mr->ops->impl.max_access_size,
1679 memory_region_write_accessor, mr,
1680 attrs);
1681 } else {
1682 return
1683 access_with_adjusted_size_aligned(addr, &data, size,
1684 mr->ops->impl.min_access_size,
1685 mr->ops->impl.max_access_size,
1686 memory_region_write_with_attrs_accessor,
1687 mr, attrs);
1688 }
1689 }
1690
memory_region_init_io(MemoryRegion * mr,Object * owner,const MemoryRegionOps * ops,void * opaque,const char * name,uint64_t size)1691 void memory_region_init_io(MemoryRegion *mr,
1692 Object *owner,
1693 const MemoryRegionOps *ops,
1694 void *opaque,
1695 const char *name,
1696 uint64_t size)
1697 {
1698 memory_region_init(mr, owner, name, size);
1699 mr->ops = ops ? ops : &unassigned_mem_ops;
1700 mr->opaque = opaque;
1701 mr->terminates = true;
1702 }
1703
memory_region_init_ram_nomigrate(MemoryRegion * mr,Object * owner,const char * name,uint64_t size,Error ** errp)1704 bool memory_region_init_ram_nomigrate(MemoryRegion *mr,
1705 Object *owner,
1706 const char *name,
1707 uint64_t size,
1708 Error **errp)
1709 {
1710 return memory_region_init_ram_flags_nomigrate(mr, owner, name,
1711 size, 0, errp);
1712 }
1713
memory_region_init_ram_flags_nomigrate(MemoryRegion * mr,Object * owner,const char * name,uint64_t size,uint32_t ram_flags,Error ** errp)1714 bool memory_region_init_ram_flags_nomigrate(MemoryRegion *mr,
1715 Object *owner,
1716 const char *name,
1717 uint64_t size,
1718 uint32_t ram_flags,
1719 Error **errp)
1720 {
1721 Error *err = NULL;
1722 memory_region_init(mr, owner, name, size);
1723 mr->ram = true;
1724 mr->terminates = true;
1725 mr->destructor = memory_region_destructor_ram;
1726 mr->ram_block = qemu_ram_alloc(size, ram_flags, mr, &err);
1727 if (err) {
1728 mr->size = int128_zero();
1729 object_unparent(OBJECT(mr));
1730 error_propagate(errp, err);
1731 return false;
1732 }
1733 return true;
1734 }
1735
memory_region_init_resizeable_ram(MemoryRegion * mr,Object * owner,const char * name,uint64_t size,uint64_t max_size,void (* resized)(const char *,uint64_t length,void * host),Error ** errp)1736 bool memory_region_init_resizeable_ram(MemoryRegion *mr,
1737 Object *owner,
1738 const char *name,
1739 uint64_t size,
1740 uint64_t max_size,
1741 void (*resized)(const char*,
1742 uint64_t length,
1743 void *host),
1744 Error **errp)
1745 {
1746 Error *err = NULL;
1747 memory_region_init(mr, owner, name, size);
1748 mr->ram = true;
1749 mr->terminates = true;
1750 mr->destructor = memory_region_destructor_ram;
1751 mr->ram_block = qemu_ram_alloc_resizeable(size, max_size, resized,
1752 mr, &err);
1753 if (err) {
1754 mr->size = int128_zero();
1755 object_unparent(OBJECT(mr));
1756 error_propagate(errp, err);
1757 return false;
1758 }
1759 return true;
1760 }
1761
1762 #if defined(CONFIG_POSIX) && !defined(EMSCRIPTEN)
memory_region_init_ram_from_file(MemoryRegion * mr,Object * owner,const char * name,uint64_t size,uint64_t align,uint32_t ram_flags,const char * path,ram_addr_t offset,Error ** errp)1763 bool memory_region_init_ram_from_file(MemoryRegion *mr,
1764 Object *owner,
1765 const char *name,
1766 uint64_t size,
1767 uint64_t align,
1768 uint32_t ram_flags,
1769 const char *path,
1770 ram_addr_t offset,
1771 Error **errp)
1772 {
1773 Error *err = NULL;
1774 memory_region_init(mr, owner, name, size);
1775 mr->ram = true;
1776 mr->readonly = !!(ram_flags & RAM_READONLY);
1777 mr->terminates = true;
1778 mr->destructor = memory_region_destructor_ram;
1779 mr->align = align;
1780 mr->ram_block = qemu_ram_alloc_from_file(size, mr, ram_flags, path,
1781 offset, &err);
1782 if (err) {
1783 mr->size = int128_zero();
1784 object_unparent(OBJECT(mr));
1785 error_propagate(errp, err);
1786 return false;
1787 }
1788 return true;
1789 }
1790
memory_region_init_ram_from_fd(MemoryRegion * mr,Object * owner,const char * name,uint64_t size,uint32_t ram_flags,int fd,ram_addr_t offset,Error ** errp)1791 bool memory_region_init_ram_from_fd(MemoryRegion *mr,
1792 Object *owner,
1793 const char *name,
1794 uint64_t size,
1795 uint32_t ram_flags,
1796 int fd,
1797 ram_addr_t offset,
1798 Error **errp)
1799 {
1800 Error *err = NULL;
1801 memory_region_init(mr, owner, name, size);
1802 mr->ram = true;
1803 mr->readonly = !!(ram_flags & RAM_READONLY);
1804 mr->terminates = true;
1805 mr->destructor = memory_region_destructor_ram;
1806 mr->ram_block = qemu_ram_alloc_from_fd(size, size, NULL, mr, ram_flags, fd,
1807 offset, false, &err);
1808 if (err) {
1809 mr->size = int128_zero();
1810 object_unparent(OBJECT(mr));
1811 error_propagate(errp, err);
1812 return false;
1813 }
1814 return true;
1815 }
1816 #endif
1817
memory_region_init_ram_ptr(MemoryRegion * mr,Object * owner,const char * name,uint64_t size,void * ptr)1818 void memory_region_init_ram_ptr(MemoryRegion *mr,
1819 Object *owner,
1820 const char *name,
1821 uint64_t size,
1822 void *ptr)
1823 {
1824 memory_region_init(mr, owner, name, size);
1825 mr->ram = true;
1826 mr->terminates = true;
1827 mr->destructor = memory_region_destructor_ram;
1828
1829 /* qemu_ram_alloc_from_ptr cannot fail with ptr != NULL. */
1830 assert(ptr != NULL);
1831 mr->ram_block = qemu_ram_alloc_from_ptr(size, ptr, mr, &error_abort);
1832 }
1833
memory_region_init_ram_device_ptr(MemoryRegion * mr,Object * owner,const char * name,uint64_t size,void * ptr)1834 void memory_region_init_ram_device_ptr(MemoryRegion *mr,
1835 Object *owner,
1836 const char *name,
1837 uint64_t size,
1838 void *ptr)
1839 {
1840 memory_region_init(mr, owner, name, size);
1841 mr->ram = true;
1842 mr->terminates = true;
1843 mr->ram_device = true;
1844 mr->ops = &ram_device_mem_ops;
1845 mr->opaque = mr;
1846 mr->destructor = memory_region_destructor_ram;
1847
1848 /* qemu_ram_alloc_from_ptr cannot fail with ptr != NULL. */
1849 assert(ptr != NULL);
1850 mr->ram_block = qemu_ram_alloc_from_ptr(size, ptr, mr, &error_abort);
1851 }
1852
memory_region_init_alias(MemoryRegion * mr,Object * owner,const char * name,MemoryRegion * orig,hwaddr offset,uint64_t size)1853 void memory_region_init_alias(MemoryRegion *mr,
1854 Object *owner,
1855 const char *name,
1856 MemoryRegion *orig,
1857 hwaddr offset,
1858 uint64_t size)
1859 {
1860 memory_region_init(mr, owner, name, size);
1861 mr->alias = orig;
1862 mr->alias_offset = offset;
1863 }
1864
memory_region_init_rom_nomigrate(MemoryRegion * mr,Object * owner,const char * name,uint64_t size,Error ** errp)1865 bool memory_region_init_rom_nomigrate(MemoryRegion *mr,
1866 Object *owner,
1867 const char *name,
1868 uint64_t size,
1869 Error **errp)
1870 {
1871 if (!memory_region_init_ram_flags_nomigrate(mr, owner, name,
1872 size, 0, errp)) {
1873 return false;
1874 }
1875 mr->readonly = true;
1876
1877 return true;
1878 }
1879
memory_region_init_rom_device_nomigrate(MemoryRegion * mr,Object * owner,const MemoryRegionOps * ops,void * opaque,const char * name,uint64_t size,Error ** errp)1880 bool memory_region_init_rom_device_nomigrate(MemoryRegion *mr,
1881 Object *owner,
1882 const MemoryRegionOps *ops,
1883 void *opaque,
1884 const char *name,
1885 uint64_t size,
1886 Error **errp)
1887 {
1888 Error *err = NULL;
1889 assert(ops);
1890 memory_region_init(mr, owner, name, size);
1891 mr->ops = ops;
1892 mr->opaque = opaque;
1893 mr->terminates = true;
1894 mr->rom_device = true;
1895 mr->destructor = memory_region_destructor_ram;
1896 mr->ram_block = qemu_ram_alloc(size, 0, mr, &err);
1897 if (err) {
1898 mr->size = int128_zero();
1899 object_unparent(OBJECT(mr));
1900 error_propagate(errp, err);
1901 return false;
1902 }
1903 return true;
1904 }
1905
memory_region_init_iommu(void * _iommu_mr,size_t instance_size,const char * mrtypename,Object * owner,const char * name,uint64_t size)1906 void memory_region_init_iommu(void *_iommu_mr,
1907 size_t instance_size,
1908 const char *mrtypename,
1909 Object *owner,
1910 const char *name,
1911 uint64_t size)
1912 {
1913 struct IOMMUMemoryRegion *iommu_mr;
1914 struct MemoryRegion *mr;
1915
1916 object_initialize(_iommu_mr, instance_size, mrtypename);
1917 mr = MEMORY_REGION(_iommu_mr);
1918 memory_region_do_init(mr, owner, name, size);
1919 iommu_mr = IOMMU_MEMORY_REGION(mr);
1920 mr->terminates = true; /* then re-forwards */
1921 QLIST_INIT(&iommu_mr->iommu_notify);
1922 iommu_mr->iommu_notify_flags = IOMMU_NOTIFIER_NONE;
1923 }
1924
memory_region_finalize(Object * obj)1925 static void memory_region_finalize(Object *obj)
1926 {
1927 MemoryRegion *mr = MEMORY_REGION(obj);
1928
1929 /*
1930 * Each memory region (that can be freed) must have an owner, and it
1931 * always has the same lifecycle of its owner. It means when reaching
1932 * here, the memory region's owner's refcount is zero.
1933 *
1934 * Here it is possible that the MR has:
1935 *
1936 * (1) mr->container set, which means this MR is a subregion of a
1937 * container MR. In this case they must share the same owner as the
1938 * container (otherwise the container should have kept a refcount
1939 * of this MR's owner).
1940 *
1941 * (2) mr->subregions non-empty, which means this MR is a container of
1942 * one or more other MRs (which might have the the owner as this
1943 * MR, or a different owner).
1944 *
1945 * We know the MR, or any MR that is attached to this one as either
1946 * container or children, is not visible in any address space, because
1947 * otherwise the address space should have taken at least one refcount
1948 * of this MR's owner. So we can blindly clear mr->enabled.
1949 *
1950 * memory_region_set_enabled instead could trigger a transaction and
1951 * cause an infinite loop.
1952 */
1953 mr->enabled = false;
1954 memory_region_transaction_begin();
1955 if (mr->container) {
1956 /* Must share the owner; see above comments */
1957 assert(mr->container->owner == mr->owner);
1958 memory_region_del_subregion(mr->container, mr);
1959 }
1960 while (!QTAILQ_EMPTY(&mr->subregions)) {
1961 MemoryRegion *subregion = QTAILQ_FIRST(&mr->subregions);
1962 memory_region_del_subregion(mr, subregion);
1963 }
1964 memory_region_transaction_commit();
1965
1966 mr->destructor(mr);
1967 memory_region_clear_coalescing(mr);
1968 g_free((char *)mr->name);
1969 g_free(mr->ioeventfds);
1970 }
1971
memory_region_owner(MemoryRegion * mr)1972 Object *memory_region_owner(MemoryRegion *mr)
1973 {
1974 Object *obj = OBJECT(mr);
1975 return obj->parent;
1976 }
1977
memory_region_ref(MemoryRegion * mr)1978 void memory_region_ref(MemoryRegion *mr)
1979 {
1980 /* MMIO callbacks most likely will access data that belongs
1981 * to the owner, hence the need to ref/unref the owner whenever
1982 * the memory region is in use.
1983 *
1984 * The memory region is a child of its owner. As long as the
1985 * owner doesn't call unparent itself on the memory region,
1986 * ref-ing the owner will also keep the memory region alive.
1987 * Memory regions without an owner are supposed to never go away;
1988 * we do not ref/unref them because it slows down DMA sensibly.
1989 */
1990 if (mr && mr->owner) {
1991 object_ref(mr->owner);
1992 }
1993 }
1994
memory_region_unref(MemoryRegion * mr)1995 void memory_region_unref(MemoryRegion *mr)
1996 {
1997 if (mr && mr->owner) {
1998 object_unref(mr->owner);
1999 }
2000 }
2001
memory_region_size(MemoryRegion * mr)2002 uint64_t memory_region_size(MemoryRegion *mr)
2003 {
2004 if (int128_eq(mr->size, int128_2_64())) {
2005 return UINT64_MAX;
2006 }
2007 return int128_get64(mr->size);
2008 }
2009
memory_region_name(const MemoryRegion * mr)2010 const char *memory_region_name(const MemoryRegion *mr)
2011 {
2012 if (!mr->name) {
2013 ((MemoryRegion *)mr)->name =
2014 g_strdup(object_get_canonical_path_component(OBJECT(mr)));
2015 }
2016 return mr->name;
2017 }
2018
memory_region_is_ram_device(MemoryRegion * mr)2019 bool memory_region_is_ram_device(MemoryRegion *mr)
2020 {
2021 return mr->ram_device;
2022 }
2023
memory_region_is_protected(MemoryRegion * mr)2024 bool memory_region_is_protected(MemoryRegion *mr)
2025 {
2026 return mr->ram && (mr->ram_block->flags & RAM_PROTECTED);
2027 }
2028
memory_region_has_guest_memfd(MemoryRegion * mr)2029 bool memory_region_has_guest_memfd(MemoryRegion *mr)
2030 {
2031 return mr->ram_block && mr->ram_block->guest_memfd >= 0;
2032 }
2033
memory_region_get_dirty_log_mask(MemoryRegion * mr)2034 uint8_t memory_region_get_dirty_log_mask(MemoryRegion *mr)
2035 {
2036 uint8_t mask = mr->dirty_log_mask;
2037 RAMBlock *rb = mr->ram_block;
2038
2039 if (global_dirty_tracking && ((rb && qemu_ram_is_migratable(rb)) ||
2040 memory_region_is_iommu(mr))) {
2041 mask |= (1 << DIRTY_MEMORY_MIGRATION);
2042 }
2043
2044 if (tcg_enabled() && rb) {
2045 /* TCG only cares about dirty memory logging for RAM, not IOMMU. */
2046 mask |= (1 << DIRTY_MEMORY_CODE);
2047 }
2048 return mask;
2049 }
2050
memory_region_is_logging(MemoryRegion * mr,uint8_t client)2051 bool memory_region_is_logging(MemoryRegion *mr, uint8_t client)
2052 {
2053 return memory_region_get_dirty_log_mask(mr) & (1 << client);
2054 }
2055
memory_region_update_iommu_notify_flags(IOMMUMemoryRegion * iommu_mr,Error ** errp)2056 static int memory_region_update_iommu_notify_flags(IOMMUMemoryRegion *iommu_mr,
2057 Error **errp)
2058 {
2059 IOMMUNotifierFlag flags = IOMMU_NOTIFIER_NONE;
2060 IOMMUNotifier *iommu_notifier;
2061 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
2062 int ret = 0;
2063
2064 IOMMU_NOTIFIER_FOREACH(iommu_notifier, iommu_mr) {
2065 flags |= iommu_notifier->notifier_flags;
2066 }
2067
2068 if (flags != iommu_mr->iommu_notify_flags && imrc->notify_flag_changed) {
2069 ret = imrc->notify_flag_changed(iommu_mr,
2070 iommu_mr->iommu_notify_flags,
2071 flags, errp);
2072 }
2073
2074 if (!ret) {
2075 iommu_mr->iommu_notify_flags = flags;
2076 }
2077 return ret;
2078 }
2079
memory_region_register_iommu_notifier(MemoryRegion * mr,IOMMUNotifier * n,Error ** errp)2080 int memory_region_register_iommu_notifier(MemoryRegion *mr,
2081 IOMMUNotifier *n, Error **errp)
2082 {
2083 IOMMUMemoryRegion *iommu_mr;
2084 int ret;
2085
2086 if (mr->alias) {
2087 return memory_region_register_iommu_notifier(mr->alias, n, errp);
2088 }
2089
2090 /* We need to register for at least one bitfield */
2091 iommu_mr = IOMMU_MEMORY_REGION(mr);
2092 assert(n->notifier_flags != IOMMU_NOTIFIER_NONE);
2093 assert(n->start <= n->end);
2094 assert(n->iommu_idx >= 0 &&
2095 n->iommu_idx < memory_region_iommu_num_indexes(iommu_mr));
2096
2097 QLIST_INSERT_HEAD(&iommu_mr->iommu_notify, n, node);
2098 ret = memory_region_update_iommu_notify_flags(iommu_mr, errp);
2099 if (ret) {
2100 QLIST_REMOVE(n, node);
2101 }
2102 return ret;
2103 }
2104
memory_region_iommu_get_min_page_size(IOMMUMemoryRegion * iommu_mr)2105 uint64_t memory_region_iommu_get_min_page_size(IOMMUMemoryRegion *iommu_mr)
2106 {
2107 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
2108
2109 if (imrc->get_min_page_size) {
2110 return imrc->get_min_page_size(iommu_mr);
2111 }
2112 return TARGET_PAGE_SIZE;
2113 }
2114
memory_region_iommu_replay(IOMMUMemoryRegion * iommu_mr,IOMMUNotifier * n)2115 void memory_region_iommu_replay(IOMMUMemoryRegion *iommu_mr, IOMMUNotifier *n)
2116 {
2117 MemoryRegion *mr = MEMORY_REGION(iommu_mr);
2118 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
2119 hwaddr addr, granularity;
2120 IOMMUTLBEntry iotlb;
2121
2122 /* If the IOMMU has its own replay callback, override */
2123 if (imrc->replay) {
2124 imrc->replay(iommu_mr, n);
2125 return;
2126 }
2127
2128 granularity = memory_region_iommu_get_min_page_size(iommu_mr);
2129
2130 for (addr = 0; addr < memory_region_size(mr); addr += granularity) {
2131 iotlb = imrc->translate(iommu_mr, addr, IOMMU_NONE, n->iommu_idx);
2132 if (iotlb.perm != IOMMU_NONE) {
2133 n->notify(n, &iotlb);
2134 }
2135
2136 /* if (2^64 - MR size) < granularity, it's possible to get an
2137 * infinite loop here. This should catch such a wraparound */
2138 if ((addr + granularity) < addr) {
2139 break;
2140 }
2141 }
2142 }
2143
memory_region_unregister_iommu_notifier(MemoryRegion * mr,IOMMUNotifier * n)2144 void memory_region_unregister_iommu_notifier(MemoryRegion *mr,
2145 IOMMUNotifier *n)
2146 {
2147 IOMMUMemoryRegion *iommu_mr;
2148
2149 if (mr->alias) {
2150 memory_region_unregister_iommu_notifier(mr->alias, n);
2151 return;
2152 }
2153 QLIST_REMOVE(n, node);
2154 iommu_mr = IOMMU_MEMORY_REGION(mr);
2155 memory_region_update_iommu_notify_flags(iommu_mr, NULL);
2156 }
2157
memory_region_notify_iommu_one(IOMMUNotifier * notifier,const IOMMUTLBEvent * event)2158 void memory_region_notify_iommu_one(IOMMUNotifier *notifier,
2159 const IOMMUTLBEvent *event)
2160 {
2161 const IOMMUTLBEntry *entry = &event->entry;
2162 hwaddr entry_end = entry->iova + entry->addr_mask;
2163 IOMMUTLBEntry tmp = *entry;
2164
2165 if (event->type == IOMMU_NOTIFIER_UNMAP) {
2166 assert(entry->perm == IOMMU_NONE);
2167 }
2168
2169 /*
2170 * Skip the notification if the notification does not overlap
2171 * with registered range.
2172 */
2173 if (notifier->start > entry_end || notifier->end < entry->iova) {
2174 return;
2175 }
2176
2177 /* Crop (iova, addr_mask) to range */
2178 tmp.iova = MAX(tmp.iova, notifier->start);
2179 tmp.addr_mask = MIN(entry_end, notifier->end) - tmp.iova;
2180
2181 if (event->type & notifier->notifier_flags) {
2182 notifier->notify(notifier, &tmp);
2183 }
2184 }
2185
memory_region_unmap_iommu_notifier_range(IOMMUNotifier * notifier)2186 void memory_region_unmap_iommu_notifier_range(IOMMUNotifier *notifier)
2187 {
2188 IOMMUTLBEvent event;
2189
2190 event.type = IOMMU_NOTIFIER_UNMAP;
2191 event.entry.target_as = &address_space_memory;
2192 event.entry.iova = notifier->start;
2193 event.entry.perm = IOMMU_NONE;
2194 event.entry.addr_mask = notifier->end - notifier->start;
2195
2196 memory_region_notify_iommu_one(notifier, &event);
2197 }
2198
memory_region_notify_iommu(IOMMUMemoryRegion * iommu_mr,int iommu_idx,const IOMMUTLBEvent event)2199 void memory_region_notify_iommu(IOMMUMemoryRegion *iommu_mr,
2200 int iommu_idx,
2201 const IOMMUTLBEvent event)
2202 {
2203 IOMMUNotifier *iommu_notifier;
2204
2205 assert(memory_region_is_iommu(MEMORY_REGION(iommu_mr)));
2206
2207 IOMMU_NOTIFIER_FOREACH(iommu_notifier, iommu_mr) {
2208 if (iommu_notifier->iommu_idx == iommu_idx) {
2209 memory_region_notify_iommu_one(iommu_notifier, &event);
2210 }
2211 }
2212 }
2213
memory_region_iommu_get_attr(IOMMUMemoryRegion * iommu_mr,enum IOMMUMemoryRegionAttr attr,void * data)2214 int memory_region_iommu_get_attr(IOMMUMemoryRegion *iommu_mr,
2215 enum IOMMUMemoryRegionAttr attr,
2216 void *data)
2217 {
2218 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
2219
2220 if (!imrc->get_attr) {
2221 return -EINVAL;
2222 }
2223
2224 return imrc->get_attr(iommu_mr, attr, data);
2225 }
2226
memory_region_iommu_attrs_to_index(IOMMUMemoryRegion * iommu_mr,MemTxAttrs attrs)2227 int memory_region_iommu_attrs_to_index(IOMMUMemoryRegion *iommu_mr,
2228 MemTxAttrs attrs)
2229 {
2230 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
2231
2232 if (!imrc->attrs_to_index) {
2233 return 0;
2234 }
2235
2236 return imrc->attrs_to_index(iommu_mr, attrs);
2237 }
2238
memory_region_iommu_num_indexes(IOMMUMemoryRegion * iommu_mr)2239 int memory_region_iommu_num_indexes(IOMMUMemoryRegion *iommu_mr)
2240 {
2241 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
2242
2243 if (!imrc->num_indexes) {
2244 return 1;
2245 }
2246
2247 return imrc->num_indexes(iommu_mr);
2248 }
2249
memory_region_get_ram_discard_manager(MemoryRegion * mr)2250 RamDiscardManager *memory_region_get_ram_discard_manager(MemoryRegion *mr)
2251 {
2252 if (!memory_region_is_ram(mr)) {
2253 return NULL;
2254 }
2255 return mr->rdm;
2256 }
2257
memory_region_set_ram_discard_manager(MemoryRegion * mr,RamDiscardManager * rdm)2258 int memory_region_set_ram_discard_manager(MemoryRegion *mr,
2259 RamDiscardManager *rdm)
2260 {
2261 g_assert(memory_region_is_ram(mr));
2262 if (mr->rdm && rdm) {
2263 return -EBUSY;
2264 }
2265
2266 mr->rdm = rdm;
2267 return 0;
2268 }
2269
ram_discard_manager_get_min_granularity(const RamDiscardManager * rdm,const MemoryRegion * mr)2270 uint64_t ram_discard_manager_get_min_granularity(const RamDiscardManager *rdm,
2271 const MemoryRegion *mr)
2272 {
2273 RamDiscardManagerClass *rdmc = RAM_DISCARD_MANAGER_GET_CLASS(rdm);
2274
2275 g_assert(rdmc->get_min_granularity);
2276 return rdmc->get_min_granularity(rdm, mr);
2277 }
2278
ram_discard_manager_is_populated(const RamDiscardManager * rdm,const MemoryRegionSection * section)2279 bool ram_discard_manager_is_populated(const RamDiscardManager *rdm,
2280 const MemoryRegionSection *section)
2281 {
2282 RamDiscardManagerClass *rdmc = RAM_DISCARD_MANAGER_GET_CLASS(rdm);
2283
2284 g_assert(rdmc->is_populated);
2285 return rdmc->is_populated(rdm, section);
2286 }
2287
ram_discard_manager_replay_populated(const RamDiscardManager * rdm,MemoryRegionSection * section,ReplayRamDiscardState replay_fn,void * opaque)2288 int ram_discard_manager_replay_populated(const RamDiscardManager *rdm,
2289 MemoryRegionSection *section,
2290 ReplayRamDiscardState replay_fn,
2291 void *opaque)
2292 {
2293 RamDiscardManagerClass *rdmc = RAM_DISCARD_MANAGER_GET_CLASS(rdm);
2294
2295 g_assert(rdmc->replay_populated);
2296 return rdmc->replay_populated(rdm, section, replay_fn, opaque);
2297 }
2298
ram_discard_manager_replay_discarded(const RamDiscardManager * rdm,MemoryRegionSection * section,ReplayRamDiscardState replay_fn,void * opaque)2299 int ram_discard_manager_replay_discarded(const RamDiscardManager *rdm,
2300 MemoryRegionSection *section,
2301 ReplayRamDiscardState replay_fn,
2302 void *opaque)
2303 {
2304 RamDiscardManagerClass *rdmc = RAM_DISCARD_MANAGER_GET_CLASS(rdm);
2305
2306 g_assert(rdmc->replay_discarded);
2307 return rdmc->replay_discarded(rdm, section, replay_fn, opaque);
2308 }
2309
ram_discard_manager_register_listener(RamDiscardManager * rdm,RamDiscardListener * rdl,MemoryRegionSection * section)2310 void ram_discard_manager_register_listener(RamDiscardManager *rdm,
2311 RamDiscardListener *rdl,
2312 MemoryRegionSection *section)
2313 {
2314 RamDiscardManagerClass *rdmc = RAM_DISCARD_MANAGER_GET_CLASS(rdm);
2315
2316 g_assert(rdmc->register_listener);
2317 rdmc->register_listener(rdm, rdl, section);
2318 }
2319
ram_discard_manager_unregister_listener(RamDiscardManager * rdm,RamDiscardListener * rdl)2320 void ram_discard_manager_unregister_listener(RamDiscardManager *rdm,
2321 RamDiscardListener *rdl)
2322 {
2323 RamDiscardManagerClass *rdmc = RAM_DISCARD_MANAGER_GET_CLASS(rdm);
2324
2325 g_assert(rdmc->unregister_listener);
2326 rdmc->unregister_listener(rdm, rdl);
2327 }
2328
2329 /* Called with rcu_read_lock held. */
memory_translate_iotlb(IOMMUTLBEntry * iotlb,hwaddr * xlat_p,Error ** errp)2330 MemoryRegion *memory_translate_iotlb(IOMMUTLBEntry *iotlb, hwaddr *xlat_p,
2331 Error **errp)
2332 {
2333 MemoryRegion *mr;
2334 hwaddr xlat;
2335 hwaddr len = iotlb->addr_mask + 1;
2336 bool writable = iotlb->perm & IOMMU_WO;
2337
2338 /*
2339 * The IOMMU TLB entry we have just covers translation through
2340 * this IOMMU to its immediate target. We need to translate
2341 * it the rest of the way through to memory.
2342 */
2343 mr = address_space_translate(&address_space_memory, iotlb->translated_addr,
2344 &xlat, &len, writable, MEMTXATTRS_UNSPECIFIED);
2345 if (!memory_region_is_ram(mr)) {
2346 error_setg(errp, "iommu map to non memory area %" HWADDR_PRIx "", xlat);
2347 return NULL;
2348 } else if (memory_region_has_ram_discard_manager(mr)) {
2349 RamDiscardManager *rdm = memory_region_get_ram_discard_manager(mr);
2350 MemoryRegionSection tmp = {
2351 .mr = mr,
2352 .offset_within_region = xlat,
2353 .size = int128_make64(len),
2354 };
2355 /*
2356 * Malicious VMs can map memory into the IOMMU, which is expected
2357 * to remain discarded. vfio will pin all pages, populating memory.
2358 * Disallow that. vmstate priorities make sure any RamDiscardManager
2359 * were already restored before IOMMUs are restored.
2360 */
2361 if (!ram_discard_manager_is_populated(rdm, &tmp)) {
2362 error_setg(errp, "iommu map to discarded memory (e.g., unplugged"
2363 " via virtio-mem): %" HWADDR_PRIx "",
2364 iotlb->translated_addr);
2365 return NULL;
2366 }
2367 }
2368
2369 /*
2370 * Translation truncates length to the IOMMU page size,
2371 * check that it did not truncate too much.
2372 */
2373 if (len & iotlb->addr_mask) {
2374 error_setg(errp, "iommu has granularity incompatible with target AS");
2375 return NULL;
2376 }
2377
2378 *xlat_p = xlat;
2379 return mr;
2380 }
2381
memory_region_set_log(MemoryRegion * mr,bool log,unsigned client)2382 void memory_region_set_log(MemoryRegion *mr, bool log, unsigned client)
2383 {
2384 uint8_t mask = 1 << client;
2385 uint8_t old_logging;
2386
2387 assert(client == DIRTY_MEMORY_VGA);
2388 old_logging = mr->vga_logging_count;
2389 mr->vga_logging_count += log ? 1 : -1;
2390 if (!!old_logging == !!mr->vga_logging_count) {
2391 return;
2392 }
2393
2394 memory_region_transaction_begin();
2395 mr->dirty_log_mask = (mr->dirty_log_mask & ~mask) | (log * mask);
2396 memory_region_update_pending |= mr->enabled;
2397 memory_region_transaction_commit();
2398 }
2399
memory_region_set_dirty(MemoryRegion * mr,hwaddr addr,hwaddr size)2400 void memory_region_set_dirty(MemoryRegion *mr, hwaddr addr,
2401 hwaddr size)
2402 {
2403 assert(mr->ram_block);
2404 physical_memory_set_dirty_range(memory_region_get_ram_addr(mr) + addr,
2405 size,
2406 memory_region_get_dirty_log_mask(mr));
2407 }
2408
2409 /*
2410 * If memory region `mr' is NULL, do global sync. Otherwise, sync
2411 * dirty bitmap for the specified memory region.
2412 */
memory_region_sync_dirty_bitmap(MemoryRegion * mr,bool last_stage)2413 static void memory_region_sync_dirty_bitmap(MemoryRegion *mr, bool last_stage)
2414 {
2415 MemoryListener *listener;
2416 AddressSpace *as;
2417 FlatView *view;
2418 FlatRange *fr;
2419
2420 /* If the same address space has multiple log_sync listeners, we
2421 * visit that address space's FlatView multiple times. But because
2422 * log_sync listeners are rare, it's still cheaper than walking each
2423 * address space once.
2424 */
2425 QTAILQ_FOREACH(listener, &memory_listeners, link) {
2426 if (listener->log_sync) {
2427 as = listener->address_space;
2428 view = address_space_get_flatview(as);
2429 FOR_EACH_FLAT_RANGE(fr, view) {
2430 if (fr->dirty_log_mask && (!mr || fr->mr == mr)) {
2431 MemoryRegionSection mrs = section_from_flat_range(fr, view);
2432 listener->log_sync(listener, &mrs);
2433 }
2434 }
2435 flatview_unref(view);
2436 trace_memory_region_sync_dirty(mr ? mr->name : "(all)", listener->name, 0);
2437 } else if (listener->log_sync_global) {
2438 /*
2439 * No matter whether MR is specified, what we can do here
2440 * is to do a global sync, because we are not capable to
2441 * sync in a finer granularity.
2442 */
2443 listener->log_sync_global(listener, last_stage);
2444 trace_memory_region_sync_dirty(mr ? mr->name : "(all)", listener->name, 1);
2445 }
2446 }
2447 }
2448
memory_region_clear_dirty_bitmap(MemoryRegion * mr,hwaddr start,hwaddr len)2449 void memory_region_clear_dirty_bitmap(MemoryRegion *mr, hwaddr start,
2450 hwaddr len)
2451 {
2452 MemoryRegionSection mrs;
2453 MemoryListener *listener;
2454 AddressSpace *as;
2455 FlatView *view;
2456 FlatRange *fr;
2457 hwaddr sec_start, sec_end, sec_size;
2458
2459 QTAILQ_FOREACH(listener, &memory_listeners, link) {
2460 if (!listener->log_clear) {
2461 continue;
2462 }
2463 as = listener->address_space;
2464 view = address_space_get_flatview(as);
2465 FOR_EACH_FLAT_RANGE(fr, view) {
2466 if (!fr->dirty_log_mask || fr->mr != mr) {
2467 /*
2468 * Clear dirty bitmap operation only applies to those
2469 * regions whose dirty logging is at least enabled
2470 */
2471 continue;
2472 }
2473
2474 mrs = section_from_flat_range(fr, view);
2475
2476 sec_start = MAX(mrs.offset_within_region, start);
2477 sec_end = mrs.offset_within_region + int128_get64(mrs.size);
2478 sec_end = MIN(sec_end, start + len);
2479
2480 if (sec_start >= sec_end) {
2481 /*
2482 * If this memory region section has no intersection
2483 * with the requested range, skip.
2484 */
2485 continue;
2486 }
2487
2488 /* Valid case; shrink the section if needed */
2489 mrs.offset_within_address_space +=
2490 sec_start - mrs.offset_within_region;
2491 mrs.offset_within_region = sec_start;
2492 sec_size = sec_end - sec_start;
2493 mrs.size = int128_make64(sec_size);
2494 listener->log_clear(listener, &mrs);
2495 }
2496 flatview_unref(view);
2497 }
2498 }
2499
memory_region_snapshot_and_clear_dirty(MemoryRegion * mr,hwaddr addr,hwaddr size,unsigned client)2500 DirtyBitmapSnapshot *memory_region_snapshot_and_clear_dirty(MemoryRegion *mr,
2501 hwaddr addr,
2502 hwaddr size,
2503 unsigned client)
2504 {
2505 DirtyBitmapSnapshot *snapshot;
2506 assert(mr->ram_block);
2507 memory_region_sync_dirty_bitmap(mr, false);
2508 snapshot = physical_memory_snapshot_and_clear_dirty(mr, addr, size, client);
2509 memory_global_after_dirty_log_sync();
2510 return snapshot;
2511 }
2512
memory_region_snapshot_get_dirty(MemoryRegion * mr,DirtyBitmapSnapshot * snap,hwaddr addr,hwaddr size)2513 bool memory_region_snapshot_get_dirty(MemoryRegion *mr, DirtyBitmapSnapshot *snap,
2514 hwaddr addr, hwaddr size)
2515 {
2516 assert(mr->ram_block);
2517 return physical_memory_snapshot_get_dirty(snap,
2518 memory_region_get_ram_addr(mr) + addr, size);
2519 }
2520
memory_region_set_readonly(MemoryRegion * mr,bool readonly)2521 void memory_region_set_readonly(MemoryRegion *mr, bool readonly)
2522 {
2523 if (mr->readonly != readonly) {
2524 memory_region_transaction_begin();
2525 mr->readonly = readonly;
2526 memory_region_update_pending |= mr->enabled;
2527 memory_region_transaction_commit();
2528 }
2529 }
2530
memory_region_set_nonvolatile(MemoryRegion * mr,bool nonvolatile)2531 void memory_region_set_nonvolatile(MemoryRegion *mr, bool nonvolatile)
2532 {
2533 if (mr->nonvolatile != nonvolatile) {
2534 memory_region_transaction_begin();
2535 mr->nonvolatile = nonvolatile;
2536 memory_region_update_pending |= mr->enabled;
2537 memory_region_transaction_commit();
2538 }
2539 }
2540
memory_region_rom_device_set_romd(MemoryRegion * mr,bool romd_mode)2541 void memory_region_rom_device_set_romd(MemoryRegion *mr, bool romd_mode)
2542 {
2543 if (mr->romd_mode != romd_mode) {
2544 memory_region_transaction_begin();
2545 mr->romd_mode = romd_mode;
2546 memory_region_update_pending |= mr->enabled;
2547 memory_region_transaction_commit();
2548 }
2549 }
2550
memory_region_reset_dirty(MemoryRegion * mr,hwaddr addr,hwaddr size,unsigned client)2551 void memory_region_reset_dirty(MemoryRegion *mr, hwaddr addr,
2552 hwaddr size, unsigned client)
2553 {
2554 assert(mr->ram_block);
2555 physical_memory_test_and_clear_dirty(
2556 memory_region_get_ram_addr(mr) + addr, size, client);
2557 }
2558
memory_region_get_fd(MemoryRegion * mr)2559 int memory_region_get_fd(MemoryRegion *mr)
2560 {
2561 RCU_READ_LOCK_GUARD();
2562 while (mr->alias) {
2563 mr = mr->alias;
2564 }
2565 return mr->ram_block->fd;
2566 }
2567
memory_region_get_ram_ptr(MemoryRegion * mr)2568 void *memory_region_get_ram_ptr(MemoryRegion *mr)
2569 {
2570 uint64_t offset = 0;
2571
2572 RCU_READ_LOCK_GUARD();
2573 while (mr->alias) {
2574 offset += mr->alias_offset;
2575 mr = mr->alias;
2576 }
2577 assert(mr->ram_block);
2578 return qemu_map_ram_ptr(mr->ram_block, offset);
2579 }
2580
memory_region_from_host(void * ptr,ram_addr_t * offset)2581 MemoryRegion *memory_region_from_host(void *ptr, ram_addr_t *offset)
2582 {
2583 RAMBlock *block;
2584
2585 block = qemu_ram_block_from_host(ptr, false, offset);
2586 if (!block) {
2587 return NULL;
2588 }
2589
2590 return block->mr;
2591 }
2592
memory_region_get_ram_addr(MemoryRegion * mr)2593 ram_addr_t memory_region_get_ram_addr(MemoryRegion *mr)
2594 {
2595 return mr->ram_block ? mr->ram_block->offset : RAM_ADDR_INVALID;
2596 }
2597
memory_region_ram_resize(MemoryRegion * mr,ram_addr_t newsize,Error ** errp)2598 void memory_region_ram_resize(MemoryRegion *mr, ram_addr_t newsize, Error **errp)
2599 {
2600 assert(mr->ram_block);
2601
2602 qemu_ram_resize(mr->ram_block, newsize, errp);
2603 }
2604
memory_region_msync(MemoryRegion * mr,hwaddr addr,hwaddr size)2605 void memory_region_msync(MemoryRegion *mr, hwaddr addr, hwaddr size)
2606 {
2607 if (mr->ram_block) {
2608 qemu_ram_msync(mr->ram_block, addr, size);
2609 }
2610 }
2611
memory_region_writeback(MemoryRegion * mr,hwaddr addr,hwaddr size)2612 void memory_region_writeback(MemoryRegion *mr, hwaddr addr, hwaddr size)
2613 {
2614 /*
2615 * Might be extended case needed to cover
2616 * different types of memory regions
2617 */
2618 if (mr->dirty_log_mask) {
2619 memory_region_msync(mr, addr, size);
2620 }
2621 }
2622
2623 /*
2624 * Call proper memory listeners about the change on the newly
2625 * added/removed CoalescedMemoryRange.
2626 */
memory_region_update_coalesced_range(MemoryRegion * mr,CoalescedMemoryRange * cmr,bool add)2627 static void memory_region_update_coalesced_range(MemoryRegion *mr,
2628 CoalescedMemoryRange *cmr,
2629 bool add)
2630 {
2631 AddressSpace *as;
2632 FlatView *view;
2633 FlatRange *fr;
2634
2635 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
2636 view = address_space_get_flatview(as);
2637 FOR_EACH_FLAT_RANGE(fr, view) {
2638 if (fr->mr == mr) {
2639 flat_range_coalesced_io_notify(fr, as, cmr, add);
2640 }
2641 }
2642 flatview_unref(view);
2643 }
2644 }
2645
memory_region_set_coalescing(MemoryRegion * mr)2646 void memory_region_set_coalescing(MemoryRegion *mr)
2647 {
2648 memory_region_clear_coalescing(mr);
2649 memory_region_add_coalescing(mr, 0, int128_get64(mr->size));
2650 }
2651
memory_region_add_coalescing(MemoryRegion * mr,hwaddr offset,uint64_t size)2652 void memory_region_add_coalescing(MemoryRegion *mr,
2653 hwaddr offset,
2654 uint64_t size)
2655 {
2656 CoalescedMemoryRange *cmr = g_malloc(sizeof(*cmr));
2657
2658 cmr->addr = addrrange_make(int128_make64(offset), int128_make64(size));
2659 QTAILQ_INSERT_TAIL(&mr->coalesced, cmr, link);
2660 memory_region_update_coalesced_range(mr, cmr, true);
2661 memory_region_set_flush_coalesced(mr);
2662 }
2663
memory_region_clear_coalescing(MemoryRegion * mr)2664 void memory_region_clear_coalescing(MemoryRegion *mr)
2665 {
2666 CoalescedMemoryRange *cmr;
2667
2668 if (QTAILQ_EMPTY(&mr->coalesced)) {
2669 return;
2670 }
2671
2672 qemu_flush_coalesced_mmio_buffer();
2673 mr->flush_coalesced_mmio = false;
2674
2675 while (!QTAILQ_EMPTY(&mr->coalesced)) {
2676 cmr = QTAILQ_FIRST(&mr->coalesced);
2677 QTAILQ_REMOVE(&mr->coalesced, cmr, link);
2678 memory_region_update_coalesced_range(mr, cmr, false);
2679 g_free(cmr);
2680 }
2681 }
2682
memory_region_set_flush_coalesced(MemoryRegion * mr)2683 void memory_region_set_flush_coalesced(MemoryRegion *mr)
2684 {
2685 mr->flush_coalesced_mmio = true;
2686 }
2687
memory_region_clear_flush_coalesced(MemoryRegion * mr)2688 void memory_region_clear_flush_coalesced(MemoryRegion *mr)
2689 {
2690 qemu_flush_coalesced_mmio_buffer();
2691 if (QTAILQ_EMPTY(&mr->coalesced)) {
2692 mr->flush_coalesced_mmio = false;
2693 }
2694 }
2695
memory_region_enable_lockless_io(MemoryRegion * mr)2696 void memory_region_enable_lockless_io(MemoryRegion *mr)
2697 {
2698 mr->lockless_io = true;
2699 /*
2700 * reentrancy_guard has per device scope, that when enabled
2701 * will effectively prevent concurrent access to device's IO
2702 * MemoryRegion(s) by not calling accessor callback.
2703 *
2704 * Turn it off for lock-less IO enabled devices, to allow
2705 * concurrent IO.
2706 * TODO: remove this when reentrancy_guard becomes per transaction.
2707 */
2708 mr->disable_reentrancy_guard = true;
2709 }
2710
memory_region_add_eventfd(MemoryRegion * mr,hwaddr addr,unsigned size,bool match_data,uint64_t data,EventNotifier * e)2711 void memory_region_add_eventfd(MemoryRegion *mr,
2712 hwaddr addr,
2713 unsigned size,
2714 bool match_data,
2715 uint64_t data,
2716 EventNotifier *e)
2717 {
2718 MemoryRegionIoeventfd mrfd = {
2719 .addr.start = int128_make64(addr),
2720 .addr.size = int128_make64(size),
2721 .match_data = match_data,
2722 .data = data,
2723 .e = e,
2724 };
2725 unsigned i;
2726
2727 if (size) {
2728 MemOp mop = (target_big_endian() ? MO_BE : MO_LE) | size_memop(size);
2729 adjust_endianness(mr, &mrfd.data, mop);
2730 }
2731 memory_region_transaction_begin();
2732 for (i = 0; i < mr->ioeventfd_nb; ++i) {
2733 if (memory_region_ioeventfd_before(&mrfd, &mr->ioeventfds[i])) {
2734 break;
2735 }
2736 }
2737 ++mr->ioeventfd_nb;
2738 mr->ioeventfds = g_realloc(mr->ioeventfds,
2739 sizeof(*mr->ioeventfds) * mr->ioeventfd_nb);
2740 memmove(&mr->ioeventfds[i+1], &mr->ioeventfds[i],
2741 sizeof(*mr->ioeventfds) * (mr->ioeventfd_nb-1 - i));
2742 mr->ioeventfds[i] = mrfd;
2743 ioeventfd_update_pending |= mr->enabled;
2744 memory_region_transaction_commit();
2745 }
2746
memory_region_del_eventfd(MemoryRegion * mr,hwaddr addr,unsigned size,bool match_data,uint64_t data,EventNotifier * e)2747 void memory_region_del_eventfd(MemoryRegion *mr,
2748 hwaddr addr,
2749 unsigned size,
2750 bool match_data,
2751 uint64_t data,
2752 EventNotifier *e)
2753 {
2754 MemoryRegionIoeventfd mrfd = {
2755 .addr.start = int128_make64(addr),
2756 .addr.size = int128_make64(size),
2757 .match_data = match_data,
2758 .data = data,
2759 .e = e,
2760 };
2761 unsigned i;
2762
2763 if (size) {
2764 MemOp mop = (target_big_endian() ? MO_BE : MO_LE) | size_memop(size);
2765 adjust_endianness(mr, &mrfd.data, mop);
2766 }
2767 memory_region_transaction_begin();
2768 for (i = 0; i < mr->ioeventfd_nb; ++i) {
2769 if (memory_region_ioeventfd_equal(&mrfd, &mr->ioeventfds[i])) {
2770 break;
2771 }
2772 }
2773 assert(i != mr->ioeventfd_nb);
2774 memmove(&mr->ioeventfds[i], &mr->ioeventfds[i+1],
2775 sizeof(*mr->ioeventfds) * (mr->ioeventfd_nb - (i+1)));
2776 --mr->ioeventfd_nb;
2777 mr->ioeventfds = g_realloc(mr->ioeventfds,
2778 sizeof(*mr->ioeventfds)*mr->ioeventfd_nb + 1);
2779 ioeventfd_update_pending |= mr->enabled;
2780 memory_region_transaction_commit();
2781 }
2782
memory_region_update_container_subregions(MemoryRegion * subregion)2783 static void memory_region_update_container_subregions(MemoryRegion *subregion)
2784 {
2785 MemoryRegion *mr = subregion->container;
2786 MemoryRegion *other;
2787
2788 memory_region_transaction_begin();
2789
2790 if (mr->owner != subregion->owner) {
2791 memory_region_ref(subregion);
2792 }
2793
2794 QTAILQ_FOREACH(other, &mr->subregions, subregions_link) {
2795 if (subregion->priority >= other->priority) {
2796 QTAILQ_INSERT_BEFORE(other, subregion, subregions_link);
2797 goto done;
2798 }
2799 }
2800 QTAILQ_INSERT_TAIL(&mr->subregions, subregion, subregions_link);
2801 done:
2802 memory_region_update_pending |= mr->enabled && subregion->enabled;
2803 memory_region_transaction_commit();
2804 }
2805
memory_region_add_subregion_common(MemoryRegion * mr,hwaddr offset,MemoryRegion * subregion)2806 static void memory_region_add_subregion_common(MemoryRegion *mr,
2807 hwaddr offset,
2808 MemoryRegion *subregion)
2809 {
2810 MemoryRegion *alias;
2811
2812 assert(!subregion->container);
2813 subregion->container = mr;
2814 for (alias = subregion->alias; alias; alias = alias->alias) {
2815 alias->mapped_via_alias++;
2816 }
2817 subregion->addr = offset;
2818 memory_region_update_container_subregions(subregion);
2819 }
2820
memory_region_add_subregion(MemoryRegion * mr,hwaddr offset,MemoryRegion * subregion)2821 void memory_region_add_subregion(MemoryRegion *mr,
2822 hwaddr offset,
2823 MemoryRegion *subregion)
2824 {
2825 subregion->priority = 0;
2826 memory_region_add_subregion_common(mr, offset, subregion);
2827 }
2828
memory_region_add_subregion_overlap(MemoryRegion * mr,hwaddr offset,MemoryRegion * subregion,int priority)2829 void memory_region_add_subregion_overlap(MemoryRegion *mr,
2830 hwaddr offset,
2831 MemoryRegion *subregion,
2832 int priority)
2833 {
2834 subregion->priority = priority;
2835 memory_region_add_subregion_common(mr, offset, subregion);
2836 }
2837
memory_region_del_subregion(MemoryRegion * mr,MemoryRegion * subregion)2838 void memory_region_del_subregion(MemoryRegion *mr,
2839 MemoryRegion *subregion)
2840 {
2841 MemoryRegion *alias;
2842
2843 memory_region_transaction_begin();
2844 assert(subregion->container == mr);
2845 subregion->container = NULL;
2846 for (alias = subregion->alias; alias; alias = alias->alias) {
2847 alias->mapped_via_alias--;
2848 assert(alias->mapped_via_alias >= 0);
2849 }
2850 QTAILQ_REMOVE(&mr->subregions, subregion, subregions_link);
2851
2852 if (mr->owner != subregion->owner) {
2853 memory_region_unref(subregion);
2854 }
2855
2856 memory_region_update_pending |= mr->enabled && subregion->enabled;
2857 memory_region_transaction_commit();
2858 }
2859
memory_region_set_enabled(MemoryRegion * mr,bool enabled)2860 void memory_region_set_enabled(MemoryRegion *mr, bool enabled)
2861 {
2862 if (enabled == mr->enabled) {
2863 return;
2864 }
2865 memory_region_transaction_begin();
2866 mr->enabled = enabled;
2867 memory_region_update_pending = true;
2868 memory_region_transaction_commit();
2869 }
2870
memory_region_set_size(MemoryRegion * mr,uint64_t size)2871 void memory_region_set_size(MemoryRegion *mr, uint64_t size)
2872 {
2873 Int128 s = int128_make64(size);
2874
2875 if (size == UINT64_MAX) {
2876 s = int128_2_64();
2877 }
2878 if (int128_eq(s, mr->size)) {
2879 return;
2880 }
2881 memory_region_transaction_begin();
2882 mr->size = s;
2883 memory_region_update_pending = true;
2884 memory_region_transaction_commit();
2885 }
2886
memory_region_readd_subregion(MemoryRegion * mr)2887 static void memory_region_readd_subregion(MemoryRegion *mr)
2888 {
2889 MemoryRegion *container = mr->container;
2890
2891 if (container) {
2892 memory_region_transaction_begin();
2893 memory_region_ref(mr);
2894 memory_region_del_subregion(container, mr);
2895 memory_region_add_subregion_common(container, mr->addr, mr);
2896 memory_region_unref(mr);
2897 memory_region_transaction_commit();
2898 }
2899 }
2900
memory_region_set_address(MemoryRegion * mr,hwaddr addr)2901 void memory_region_set_address(MemoryRegion *mr, hwaddr addr)
2902 {
2903 if (addr != mr->addr) {
2904 mr->addr = addr;
2905 memory_region_readd_subregion(mr);
2906 }
2907 }
2908
memory_region_set_alias_offset(MemoryRegion * mr,hwaddr offset)2909 void memory_region_set_alias_offset(MemoryRegion *mr, hwaddr offset)
2910 {
2911 assert(mr->alias);
2912
2913 if (offset == mr->alias_offset) {
2914 return;
2915 }
2916
2917 memory_region_transaction_begin();
2918 mr->alias_offset = offset;
2919 memory_region_update_pending |= mr->enabled;
2920 memory_region_transaction_commit();
2921 }
2922
memory_region_set_unmergeable(MemoryRegion * mr,bool unmergeable)2923 void memory_region_set_unmergeable(MemoryRegion *mr, bool unmergeable)
2924 {
2925 if (unmergeable == mr->unmergeable) {
2926 return;
2927 }
2928
2929 memory_region_transaction_begin();
2930 mr->unmergeable = unmergeable;
2931 memory_region_update_pending |= mr->enabled;
2932 memory_region_transaction_commit();
2933 }
2934
memory_region_get_alignment(const MemoryRegion * mr)2935 uint64_t memory_region_get_alignment(const MemoryRegion *mr)
2936 {
2937 return mr->align;
2938 }
2939
cmp_flatrange_addr(const void * addr_,const void * fr_)2940 static int cmp_flatrange_addr(const void *addr_, const void *fr_)
2941 {
2942 const AddrRange *addr = addr_;
2943 const FlatRange *fr = fr_;
2944
2945 if (int128_le(addrrange_end(*addr), fr->addr.start)) {
2946 return -1;
2947 } else if (int128_ge(addr->start, addrrange_end(fr->addr))) {
2948 return 1;
2949 }
2950 return 0;
2951 }
2952
flatview_lookup(FlatView * view,AddrRange addr)2953 static FlatRange *flatview_lookup(FlatView *view, AddrRange addr)
2954 {
2955 return bsearch(&addr, view->ranges, view->nr,
2956 sizeof(FlatRange), cmp_flatrange_addr);
2957 }
2958
memory_region_is_mapped(MemoryRegion * mr)2959 bool memory_region_is_mapped(MemoryRegion *mr)
2960 {
2961 return !!mr->container || mr->mapped_via_alias;
2962 }
2963
2964 /* Same as memory_region_find, but it does not add a reference to the
2965 * returned region. It must be called from an RCU critical section.
2966 */
memory_region_find_rcu(MemoryRegion * mr,hwaddr addr,uint64_t size)2967 static MemoryRegionSection memory_region_find_rcu(MemoryRegion *mr,
2968 hwaddr addr, uint64_t size)
2969 {
2970 MemoryRegionSection ret = { .mr = NULL };
2971 MemoryRegion *root;
2972 AddressSpace *as;
2973 AddrRange range;
2974 FlatView *view;
2975 FlatRange *fr;
2976
2977 addr += mr->addr;
2978 for (root = mr; root->container; ) {
2979 root = root->container;
2980 addr += root->addr;
2981 }
2982
2983 as = memory_region_to_address_space(root);
2984 if (!as) {
2985 return ret;
2986 }
2987 range = addrrange_make(int128_make64(addr), int128_make64(size));
2988
2989 view = address_space_to_flatview(as);
2990 fr = flatview_lookup(view, range);
2991 if (!fr) {
2992 return ret;
2993 }
2994
2995 while (fr > view->ranges && addrrange_intersects(fr[-1].addr, range)) {
2996 --fr;
2997 }
2998
2999 ret.mr = fr->mr;
3000 ret.fv = view;
3001 range = addrrange_intersection(range, fr->addr);
3002 ret.offset_within_region = fr->offset_in_region;
3003 ret.offset_within_region += int128_get64(int128_sub(range.start,
3004 fr->addr.start));
3005 ret.size = range.size;
3006 ret.offset_within_address_space = int128_get64(range.start);
3007 ret.readonly = fr->readonly;
3008 ret.nonvolatile = fr->nonvolatile;
3009 return ret;
3010 }
3011
memory_region_find(MemoryRegion * mr,hwaddr addr,uint64_t size)3012 MemoryRegionSection memory_region_find(MemoryRegion *mr,
3013 hwaddr addr, uint64_t size)
3014 {
3015 MemoryRegionSection ret;
3016 RCU_READ_LOCK_GUARD();
3017 ret = memory_region_find_rcu(mr, addr, size);
3018 if (ret.mr) {
3019 memory_region_ref(ret.mr);
3020 }
3021 return ret;
3022 }
3023
memory_region_section_new_copy(MemoryRegionSection * s)3024 MemoryRegionSection *memory_region_section_new_copy(MemoryRegionSection *s)
3025 {
3026 MemoryRegionSection *tmp = g_new(MemoryRegionSection, 1);
3027
3028 *tmp = *s;
3029 if (tmp->mr) {
3030 memory_region_ref(tmp->mr);
3031 }
3032 if (tmp->fv) {
3033 bool ret = flatview_ref(tmp->fv);
3034
3035 g_assert(ret);
3036 }
3037 return tmp;
3038 }
3039
memory_region_section_free_copy(MemoryRegionSection * s)3040 void memory_region_section_free_copy(MemoryRegionSection *s)
3041 {
3042 if (s->fv) {
3043 flatview_unref(s->fv);
3044 }
3045 if (s->mr) {
3046 memory_region_unref(s->mr);
3047 }
3048 g_free(s);
3049 }
3050
memory_region_present(MemoryRegion * container,hwaddr addr)3051 bool memory_region_present(MemoryRegion *container, hwaddr addr)
3052 {
3053 MemoryRegion *mr;
3054
3055 RCU_READ_LOCK_GUARD();
3056 mr = memory_region_find_rcu(container, addr, 1).mr;
3057 return mr && mr != container;
3058 }
3059
memory_global_dirty_log_sync(bool last_stage)3060 void memory_global_dirty_log_sync(bool last_stage)
3061 {
3062 memory_region_sync_dirty_bitmap(NULL, last_stage);
3063 }
3064
memory_global_after_dirty_log_sync(void)3065 void memory_global_after_dirty_log_sync(void)
3066 {
3067 MEMORY_LISTENER_CALL_GLOBAL(log_global_after_sync, Forward);
3068 }
3069
3070 /*
3071 * Dirty track stop flags that are postponed due to VM being stopped. Should
3072 * only be used within vmstate_change hook.
3073 */
3074 static unsigned int postponed_stop_flags;
3075 static VMChangeStateEntry *vmstate_change;
3076 static void memory_global_dirty_log_stop_postponed_run(void);
3077
memory_global_dirty_log_do_start(Error ** errp)3078 static bool memory_global_dirty_log_do_start(Error **errp)
3079 {
3080 MemoryListener *listener;
3081
3082 QTAILQ_FOREACH(listener, &memory_listeners, link) {
3083 if (listener->log_global_start) {
3084 if (!listener->log_global_start(listener, errp)) {
3085 goto err;
3086 }
3087 }
3088 }
3089 return true;
3090
3091 err:
3092 while ((listener = QTAILQ_PREV(listener, link)) != NULL) {
3093 if (listener->log_global_stop) {
3094 listener->log_global_stop(listener);
3095 }
3096 }
3097
3098 return false;
3099 }
3100
memory_global_dirty_log_start(unsigned int flags,Error ** errp)3101 bool memory_global_dirty_log_start(unsigned int flags, Error **errp)
3102 {
3103 unsigned int old_flags;
3104
3105 assert(flags && !(flags & (~GLOBAL_DIRTY_MASK)));
3106
3107 if (vmstate_change) {
3108 /* If there is postponed stop(), operate on it first */
3109 postponed_stop_flags &= ~flags;
3110 memory_global_dirty_log_stop_postponed_run();
3111 }
3112
3113 flags &= ~global_dirty_tracking;
3114 if (!flags) {
3115 return true;
3116 }
3117
3118 old_flags = global_dirty_tracking;
3119 global_dirty_tracking |= flags;
3120 trace_global_dirty_changed(global_dirty_tracking);
3121
3122 if (!old_flags) {
3123 if (!memory_global_dirty_log_do_start(errp)) {
3124 global_dirty_tracking &= ~flags;
3125 trace_global_dirty_changed(global_dirty_tracking);
3126 return false;
3127 }
3128
3129 memory_region_transaction_begin();
3130 memory_region_update_pending = true;
3131 memory_region_transaction_commit();
3132 }
3133 return true;
3134 }
3135
memory_global_dirty_log_do_stop(unsigned int flags)3136 static void memory_global_dirty_log_do_stop(unsigned int flags)
3137 {
3138 assert(flags && !(flags & (~GLOBAL_DIRTY_MASK)));
3139 assert((global_dirty_tracking & flags) == flags);
3140 global_dirty_tracking &= ~flags;
3141
3142 trace_global_dirty_changed(global_dirty_tracking);
3143
3144 if (!global_dirty_tracking) {
3145 memory_region_transaction_begin();
3146 memory_region_update_pending = true;
3147 memory_region_transaction_commit();
3148 MEMORY_LISTENER_CALL_GLOBAL(log_global_stop, Reverse);
3149 }
3150 }
3151
3152 /*
3153 * Execute the postponed dirty log stop operations if there is, then reset
3154 * everything (including the flags and the vmstate change hook).
3155 */
memory_global_dirty_log_stop_postponed_run(void)3156 static void memory_global_dirty_log_stop_postponed_run(void)
3157 {
3158 /* This must be called with the vmstate handler registered */
3159 assert(vmstate_change);
3160
3161 /* Note: postponed_stop_flags can be cleared in log start routine */
3162 if (postponed_stop_flags) {
3163 memory_global_dirty_log_do_stop(postponed_stop_flags);
3164 postponed_stop_flags = 0;
3165 }
3166
3167 qemu_del_vm_change_state_handler(vmstate_change);
3168 vmstate_change = NULL;
3169 }
3170
memory_vm_change_state_handler(void * opaque,bool running,RunState state)3171 static void memory_vm_change_state_handler(void *opaque, bool running,
3172 RunState state)
3173 {
3174 if (running) {
3175 memory_global_dirty_log_stop_postponed_run();
3176 }
3177 }
3178
memory_global_dirty_log_stop(unsigned int flags)3179 void memory_global_dirty_log_stop(unsigned int flags)
3180 {
3181 if (!runstate_is_running()) {
3182 /* Postpone the dirty log stop, e.g., to when VM starts again */
3183 if (vmstate_change) {
3184 /* Batch with previous postponed flags */
3185 postponed_stop_flags |= flags;
3186 } else {
3187 postponed_stop_flags = flags;
3188 vmstate_change = qemu_add_vm_change_state_handler(
3189 memory_vm_change_state_handler, NULL);
3190 }
3191 return;
3192 }
3193
3194 memory_global_dirty_log_do_stop(flags);
3195 }
3196
listener_add_address_space(MemoryListener * listener,AddressSpace * as)3197 static void listener_add_address_space(MemoryListener *listener,
3198 AddressSpace *as)
3199 {
3200 unsigned i;
3201 FlatView *view;
3202 FlatRange *fr;
3203 MemoryRegionIoeventfd *fd;
3204
3205 if (listener->begin) {
3206 listener->begin(listener);
3207 }
3208 if (global_dirty_tracking) {
3209 /*
3210 * Currently only VFIO can fail log_global_start(), and it's not
3211 * yet allowed to hotplug any PCI device during migration. So this
3212 * should never fail when invoked, guard it with error_abort. If
3213 * it can start to fail in the future, we need to be able to fail
3214 * the whole listener_add_address_space() and its callers.
3215 */
3216 if (listener->log_global_start) {
3217 listener->log_global_start(listener, &error_abort);
3218 }
3219 }
3220
3221 view = address_space_get_flatview(as);
3222 FOR_EACH_FLAT_RANGE(fr, view) {
3223 MemoryRegionSection section = section_from_flat_range(fr, view);
3224
3225 if (listener->region_add) {
3226 listener->region_add(listener, §ion);
3227 }
3228
3229 /* send coalesced io add notifications */
3230 flat_range_coalesced_io_notify_listener_add_del(fr, §ion,
3231 listener, as, true);
3232
3233 if (fr->dirty_log_mask && listener->log_start) {
3234 listener->log_start(listener, §ion, 0, fr->dirty_log_mask);
3235 }
3236 }
3237
3238 /*
3239 * register all eventfds for this address space for the newly registered
3240 * listener.
3241 */
3242 for (i = 0; i < as->ioeventfd_nb; i++) {
3243 fd = &as->ioeventfds[i];
3244 MemoryRegionSection section = (MemoryRegionSection) {
3245 .fv = view,
3246 .offset_within_address_space = int128_get64(fd->addr.start),
3247 .size = fd->addr.size,
3248 };
3249
3250 if (listener->eventfd_add) {
3251 listener->eventfd_add(listener, §ion,
3252 fd->match_data, fd->data, fd->e);
3253 }
3254 }
3255
3256 if (listener->commit) {
3257 listener->commit(listener);
3258 }
3259 flatview_unref(view);
3260 }
3261
listener_del_address_space(MemoryListener * listener,AddressSpace * as)3262 static void listener_del_address_space(MemoryListener *listener,
3263 AddressSpace *as)
3264 {
3265 unsigned i;
3266 FlatView *view;
3267 FlatRange *fr;
3268 MemoryRegionIoeventfd *fd;
3269
3270 if (listener->begin) {
3271 listener->begin(listener);
3272 }
3273 view = address_space_get_flatview(as);
3274 FOR_EACH_FLAT_RANGE(fr, view) {
3275 MemoryRegionSection section = section_from_flat_range(fr, view);
3276
3277 if (fr->dirty_log_mask && listener->log_stop) {
3278 listener->log_stop(listener, §ion, fr->dirty_log_mask, 0);
3279 }
3280
3281 /* send coalesced io del notifications */
3282 flat_range_coalesced_io_notify_listener_add_del(fr, §ion,
3283 listener, as, false);
3284 if (listener->region_del) {
3285 listener->region_del(listener, §ion);
3286 }
3287 }
3288
3289 /*
3290 * de-register all eventfds for this address space for the current
3291 * listener.
3292 */
3293 for (i = 0; i < as->ioeventfd_nb; i++) {
3294 fd = &as->ioeventfds[i];
3295 MemoryRegionSection section = (MemoryRegionSection) {
3296 .fv = view,
3297 .offset_within_address_space = int128_get64(fd->addr.start),
3298 .size = fd->addr.size,
3299 };
3300
3301 if (listener->eventfd_del) {
3302 listener->eventfd_del(listener, §ion,
3303 fd->match_data, fd->data, fd->e);
3304 }
3305 }
3306
3307 if (listener->commit) {
3308 listener->commit(listener);
3309 }
3310 flatview_unref(view);
3311 }
3312
memory_listener_register(MemoryListener * listener,AddressSpace * as)3313 void memory_listener_register(MemoryListener *listener, AddressSpace *as)
3314 {
3315 MemoryListener *other = NULL;
3316
3317 /* Only one of them can be defined for a listener */
3318 assert(!(listener->log_sync && listener->log_sync_global));
3319
3320 listener->address_space = as;
3321 if (QTAILQ_EMPTY(&memory_listeners)
3322 || listener->priority >= QTAILQ_LAST(&memory_listeners)->priority) {
3323 QTAILQ_INSERT_TAIL(&memory_listeners, listener, link);
3324 } else {
3325 QTAILQ_FOREACH(other, &memory_listeners, link) {
3326 if (listener->priority < other->priority) {
3327 break;
3328 }
3329 }
3330 QTAILQ_INSERT_BEFORE(other, listener, link);
3331 }
3332
3333 if (QTAILQ_EMPTY(&as->listeners)
3334 || listener->priority >= QTAILQ_LAST(&as->listeners)->priority) {
3335 QTAILQ_INSERT_TAIL(&as->listeners, listener, link_as);
3336 } else {
3337 QTAILQ_FOREACH(other, &as->listeners, link_as) {
3338 if (listener->priority < other->priority) {
3339 break;
3340 }
3341 }
3342 QTAILQ_INSERT_BEFORE(other, listener, link_as);
3343 }
3344
3345 listener_add_address_space(listener, as);
3346
3347 if (listener->eventfd_add || listener->eventfd_del) {
3348 as->ioeventfd_notifiers++;
3349 }
3350 }
3351
memory_listener_unregister(MemoryListener * listener)3352 void memory_listener_unregister(MemoryListener *listener)
3353 {
3354 if (!listener->address_space) {
3355 return;
3356 }
3357
3358 if (listener->eventfd_add || listener->eventfd_del) {
3359 listener->address_space->ioeventfd_notifiers--;
3360 }
3361
3362 listener_del_address_space(listener, listener->address_space);
3363 QTAILQ_REMOVE(&memory_listeners, listener, link);
3364 QTAILQ_REMOVE(&listener->address_space->listeners, listener, link_as);
3365 listener->address_space = NULL;
3366 }
3367
address_space_remove_listeners(AddressSpace * as)3368 void address_space_remove_listeners(AddressSpace *as)
3369 {
3370 while (!QTAILQ_EMPTY(&as->listeners)) {
3371 memory_listener_unregister(QTAILQ_FIRST(&as->listeners));
3372 }
3373 }
3374
address_space_init(AddressSpace * as,MemoryRegion * root,const char * name)3375 void address_space_init(AddressSpace *as, MemoryRegion *root, const char *name)
3376 {
3377 memory_region_ref(root);
3378 as->root = root;
3379 as->current_map = NULL;
3380 as->ioeventfd_nb = 0;
3381 as->ioeventfds = NULL;
3382 QTAILQ_INIT(&as->listeners);
3383 QTAILQ_INSERT_TAIL(&address_spaces, as, address_spaces_link);
3384 as->max_bounce_buffer_size = DEFAULT_MAX_BOUNCE_BUFFER_SIZE;
3385 as->bounce_buffer_size = 0;
3386 qemu_mutex_init(&as->map_client_list_lock);
3387 QLIST_INIT(&as->map_client_list);
3388 as->name = g_strdup(name ? name : "anonymous");
3389 address_space_update_topology(as);
3390 address_space_update_ioeventfds(as);
3391 }
3392
do_address_space_destroy(AddressSpace * as)3393 static void do_address_space_destroy(AddressSpace *as)
3394 {
3395 assert(qatomic_read(&as->bounce_buffer_size) == 0);
3396 assert(QLIST_EMPTY(&as->map_client_list));
3397 qemu_mutex_destroy(&as->map_client_list_lock);
3398
3399 assert(QTAILQ_EMPTY(&as->listeners));
3400
3401 flatview_unref(as->current_map);
3402 g_free(as->name);
3403 g_free(as->ioeventfds);
3404 memory_region_unref(as->root);
3405 }
3406
do_address_space_destroy_free(AddressSpace * as)3407 static void do_address_space_destroy_free(AddressSpace *as)
3408 {
3409 do_address_space_destroy(as);
3410 g_free(as);
3411 }
3412
3413 /* Detach address space from global view, notify all listeners */
address_space_detach(AddressSpace * as)3414 static void address_space_detach(AddressSpace *as)
3415 {
3416 MemoryRegion *root = as->root;
3417
3418 /* Flush out anything from MemoryListeners listening in on this */
3419 memory_region_transaction_begin();
3420 as->root = NULL;
3421 memory_region_transaction_commit();
3422 QTAILQ_REMOVE(&address_spaces, as, address_spaces_link);
3423
3424 /* At this point, as->dispatch and as->current_map are dummy
3425 * entries that the guest should never use. Wait for the old
3426 * values to expire before freeing the data.
3427 */
3428 as->root = root;
3429 }
3430
address_space_destroy(AddressSpace * as)3431 void address_space_destroy(AddressSpace *as)
3432 {
3433 address_space_detach(as);
3434 call_rcu(as, do_address_space_destroy, rcu);
3435 }
3436
address_space_destroy_free(AddressSpace * as)3437 void address_space_destroy_free(AddressSpace *as)
3438 {
3439 address_space_detach(as);
3440 call_rcu(as, do_address_space_destroy_free, rcu);
3441 }
3442
memory_region_type(MemoryRegion * mr)3443 static const char *memory_region_type(MemoryRegion *mr)
3444 {
3445 if (mr->alias) {
3446 return memory_region_type(mr->alias);
3447 }
3448 if (memory_region_is_ram_device(mr)) {
3449 return "ramd";
3450 } else if (memory_region_is_romd(mr)) {
3451 return "romd";
3452 } else if (memory_region_is_rom(mr)) {
3453 return "rom";
3454 } else if (memory_region_is_ram(mr)) {
3455 return "ram";
3456 } else {
3457 return "i/o";
3458 }
3459 }
3460
3461 typedef struct MemoryRegionList MemoryRegionList;
3462
3463 struct MemoryRegionList {
3464 const MemoryRegion *mr;
3465 QTAILQ_ENTRY(MemoryRegionList) mrqueue;
3466 };
3467
3468 typedef QTAILQ_HEAD(, MemoryRegionList) MemoryRegionListHead;
3469
3470 #define MR_SIZE(size) (int128_nz(size) ? (hwaddr)int128_get64( \
3471 int128_sub((size), int128_one())) : 0)
3472 #define MTREE_INDENT " "
3473
mtree_expand_owner(const char * label,Object * obj)3474 static void mtree_expand_owner(const char *label, Object *obj)
3475 {
3476 DeviceState *dev = (DeviceState *) object_dynamic_cast(obj, TYPE_DEVICE);
3477
3478 qemu_printf(" %s:{%s", label, dev ? "dev" : "obj");
3479 if (dev && dev->id) {
3480 qemu_printf(" id=%s", dev->id);
3481 } else {
3482 char *canonical_path = object_get_canonical_path(obj);
3483 if (canonical_path) {
3484 qemu_printf(" path=%s", canonical_path);
3485 g_free(canonical_path);
3486 } else {
3487 qemu_printf(" type=%s", object_get_typename(obj));
3488 }
3489 }
3490 qemu_printf("}");
3491 }
3492
mtree_print_mr_owner(const MemoryRegion * mr)3493 static void mtree_print_mr_owner(const MemoryRegion *mr)
3494 {
3495 Object *owner = mr->owner;
3496 Object *parent = memory_region_owner((MemoryRegion *)mr);
3497
3498 if (!owner && !parent) {
3499 qemu_printf(" orphan");
3500 return;
3501 }
3502 if (owner) {
3503 mtree_expand_owner("owner", owner);
3504 }
3505 if (parent && parent != owner) {
3506 mtree_expand_owner("parent", parent);
3507 }
3508 }
3509
mtree_print_mr(const MemoryRegion * mr,unsigned int level,hwaddr base,MemoryRegionListHead * alias_print_queue,bool owner,bool display_disabled)3510 static void mtree_print_mr(const MemoryRegion *mr, unsigned int level,
3511 hwaddr base,
3512 MemoryRegionListHead *alias_print_queue,
3513 bool owner, bool display_disabled)
3514 {
3515 MemoryRegionList *new_ml, *ml, *next_ml;
3516 MemoryRegionListHead submr_print_queue;
3517 const MemoryRegion *submr;
3518 unsigned int i;
3519 hwaddr cur_start, cur_end;
3520
3521 if (!mr) {
3522 return;
3523 }
3524
3525 cur_start = base + mr->addr;
3526 cur_end = cur_start + MR_SIZE(mr->size);
3527
3528 /*
3529 * Try to detect overflow of memory region. This should never
3530 * happen normally. When it happens, we dump something to warn the
3531 * user who is observing this.
3532 */
3533 if (cur_start < base || cur_end < cur_start) {
3534 qemu_printf("[DETECTED OVERFLOW!] ");
3535 }
3536
3537 if (mr->alias) {
3538 bool found = false;
3539
3540 /* check if the alias is already in the queue */
3541 QTAILQ_FOREACH(ml, alias_print_queue, mrqueue) {
3542 if (ml->mr == mr->alias) {
3543 found = true;
3544 }
3545 }
3546
3547 if (!found) {
3548 ml = g_new(MemoryRegionList, 1);
3549 ml->mr = mr->alias;
3550 QTAILQ_INSERT_TAIL(alias_print_queue, ml, mrqueue);
3551 }
3552 if (mr->enabled || display_disabled) {
3553 for (i = 0; i < level; i++) {
3554 qemu_printf(MTREE_INDENT);
3555 }
3556 qemu_printf(HWADDR_FMT_plx "-" HWADDR_FMT_plx
3557 " (prio %d, %s%s): alias %s @%s " HWADDR_FMT_plx
3558 "-" HWADDR_FMT_plx "%s",
3559 cur_start, cur_end,
3560 mr->priority,
3561 mr->nonvolatile ? "nv-" : "",
3562 memory_region_type((MemoryRegion *)mr),
3563 memory_region_name(mr),
3564 memory_region_name(mr->alias),
3565 mr->alias_offset,
3566 mr->alias_offset + MR_SIZE(mr->size),
3567 mr->enabled ? "" : " [disabled]");
3568 if (owner) {
3569 mtree_print_mr_owner(mr);
3570 }
3571 qemu_printf("\n");
3572 }
3573 } else {
3574 if (mr->enabled || display_disabled) {
3575 for (i = 0; i < level; i++) {
3576 qemu_printf(MTREE_INDENT);
3577 }
3578 qemu_printf(HWADDR_FMT_plx "-" HWADDR_FMT_plx
3579 " (prio %d, %s%s): %s%s",
3580 cur_start, cur_end,
3581 mr->priority,
3582 mr->nonvolatile ? "nv-" : "",
3583 memory_region_type((MemoryRegion *)mr),
3584 memory_region_name(mr),
3585 mr->enabled ? "" : " [disabled]");
3586 if (owner) {
3587 mtree_print_mr_owner(mr);
3588 }
3589 qemu_printf("\n");
3590 }
3591 }
3592
3593 QTAILQ_INIT(&submr_print_queue);
3594
3595 QTAILQ_FOREACH(submr, &mr->subregions, subregions_link) {
3596 new_ml = g_new(MemoryRegionList, 1);
3597 new_ml->mr = submr;
3598 QTAILQ_FOREACH(ml, &submr_print_queue, mrqueue) {
3599 if (new_ml->mr->addr < ml->mr->addr ||
3600 (new_ml->mr->addr == ml->mr->addr &&
3601 new_ml->mr->priority > ml->mr->priority)) {
3602 QTAILQ_INSERT_BEFORE(ml, new_ml, mrqueue);
3603 new_ml = NULL;
3604 break;
3605 }
3606 }
3607 if (new_ml) {
3608 QTAILQ_INSERT_TAIL(&submr_print_queue, new_ml, mrqueue);
3609 }
3610 }
3611
3612 QTAILQ_FOREACH(ml, &submr_print_queue, mrqueue) {
3613 mtree_print_mr(ml->mr, level + 1, cur_start,
3614 alias_print_queue, owner, display_disabled);
3615 }
3616
3617 QTAILQ_FOREACH_SAFE(ml, &submr_print_queue, mrqueue, next_ml) {
3618 g_free(ml);
3619 }
3620 }
3621
3622 struct FlatViewInfo {
3623 int counter;
3624 bool dispatch_tree;
3625 bool owner;
3626 AccelClass *ac;
3627 };
3628
mtree_print_flatview(gpointer key,gpointer value,gpointer user_data)3629 static void mtree_print_flatview(gpointer key, gpointer value,
3630 gpointer user_data)
3631 {
3632 FlatView *view = key;
3633 GArray *fv_address_spaces = value;
3634 struct FlatViewInfo *fvi = user_data;
3635 FlatRange *range = &view->ranges[0];
3636 MemoryRegion *mr;
3637 int n = view->nr;
3638 int i;
3639 AddressSpace *as;
3640
3641 qemu_printf("FlatView #%d\n", fvi->counter);
3642 ++fvi->counter;
3643
3644 for (i = 0; i < fv_address_spaces->len; ++i) {
3645 as = g_array_index(fv_address_spaces, AddressSpace*, i);
3646 qemu_printf(" AS \"%s\", root: %s",
3647 as->name, memory_region_name(as->root));
3648 if (as->root->alias) {
3649 qemu_printf(", alias %s", memory_region_name(as->root->alias));
3650 }
3651 qemu_printf("\n");
3652 }
3653
3654 qemu_printf(" Root memory region: %s\n",
3655 view->root ? memory_region_name(view->root) : "(none)");
3656
3657 if (n <= 0) {
3658 qemu_printf(MTREE_INDENT "No rendered FlatView\n\n");
3659 return;
3660 }
3661
3662 while (n--) {
3663 mr = range->mr;
3664 if (range->offset_in_region) {
3665 qemu_printf(MTREE_INDENT HWADDR_FMT_plx "-" HWADDR_FMT_plx
3666 " (prio %d, %s%s): %s @" HWADDR_FMT_plx,
3667 int128_get64(range->addr.start),
3668 int128_get64(range->addr.start)
3669 + MR_SIZE(range->addr.size),
3670 mr->priority,
3671 range->nonvolatile ? "nv-" : "",
3672 range->readonly ? "rom" : memory_region_type(mr),
3673 memory_region_name(mr),
3674 range->offset_in_region);
3675 } else {
3676 qemu_printf(MTREE_INDENT HWADDR_FMT_plx "-" HWADDR_FMT_plx
3677 " (prio %d, %s%s): %s",
3678 int128_get64(range->addr.start),
3679 int128_get64(range->addr.start)
3680 + MR_SIZE(range->addr.size),
3681 mr->priority,
3682 range->nonvolatile ? "nv-" : "",
3683 range->readonly ? "rom" : memory_region_type(mr),
3684 memory_region_name(mr));
3685 }
3686 if (fvi->owner) {
3687 mtree_print_mr_owner(mr);
3688 }
3689
3690 if (fvi->ac) {
3691 for (i = 0; i < fv_address_spaces->len; ++i) {
3692 as = g_array_index(fv_address_spaces, AddressSpace*, i);
3693 if (fvi->ac->has_memory(current_machine->accelerator, as,
3694 int128_get64(range->addr.start),
3695 MR_SIZE(range->addr.size) + 1)) {
3696 qemu_printf(" %s", fvi->ac->name);
3697 }
3698 }
3699 }
3700 qemu_printf("\n");
3701 range++;
3702 }
3703
3704 #if !defined(CONFIG_USER_ONLY)
3705 if (fvi->dispatch_tree && view->root) {
3706 mtree_print_dispatch(view->dispatch, view->root);
3707 }
3708 #endif
3709
3710 qemu_printf("\n");
3711 }
3712
mtree_info_flatview_free(gpointer key,gpointer value,gpointer user_data)3713 static gboolean mtree_info_flatview_free(gpointer key, gpointer value,
3714 gpointer user_data)
3715 {
3716 FlatView *view = key;
3717 GArray *fv_address_spaces = value;
3718
3719 g_array_unref(fv_address_spaces);
3720 flatview_unref(view);
3721
3722 return true;
3723 }
3724
mtree_info_flatview(bool dispatch_tree,bool owner)3725 static void mtree_info_flatview(bool dispatch_tree, bool owner)
3726 {
3727 struct FlatViewInfo fvi = {
3728 .counter = 0,
3729 .dispatch_tree = dispatch_tree,
3730 .owner = owner,
3731 };
3732 AddressSpace *as;
3733 FlatView *view;
3734 GArray *fv_address_spaces;
3735 GHashTable *views = g_hash_table_new(g_direct_hash, g_direct_equal);
3736 AccelClass *ac = ACCEL_GET_CLASS(current_accel());
3737
3738 if (ac->has_memory) {
3739 fvi.ac = ac;
3740 }
3741
3742 /* Gather all FVs in one table */
3743 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
3744 view = address_space_get_flatview(as);
3745
3746 fv_address_spaces = g_hash_table_lookup(views, view);
3747 if (!fv_address_spaces) {
3748 fv_address_spaces = g_array_new(false, false, sizeof(as));
3749 g_hash_table_insert(views, view, fv_address_spaces);
3750 }
3751
3752 g_array_append_val(fv_address_spaces, as);
3753 }
3754
3755 /* Print */
3756 g_hash_table_foreach(views, mtree_print_flatview, &fvi);
3757
3758 /* Free */
3759 g_hash_table_foreach_remove(views, mtree_info_flatview_free, 0);
3760 g_hash_table_unref(views);
3761 }
3762
3763 struct AddressSpaceInfo {
3764 MemoryRegionListHead *ml_head;
3765 bool owner;
3766 bool disabled;
3767 };
3768
3769 /* Returns negative value if a < b; zero if a = b; positive value if a > b. */
address_space_compare_name(gconstpointer a,gconstpointer b)3770 static gint address_space_compare_name(gconstpointer a, gconstpointer b)
3771 {
3772 const AddressSpace *as_a = a;
3773 const AddressSpace *as_b = b;
3774
3775 return g_strcmp0(as_a->name, as_b->name);
3776 }
3777
mtree_print_as_name(gpointer data,gpointer user_data)3778 static void mtree_print_as_name(gpointer data, gpointer user_data)
3779 {
3780 AddressSpace *as = data;
3781
3782 qemu_printf("address-space: %s\n", as->name);
3783 }
3784
mtree_print_as(gpointer key,gpointer value,gpointer user_data)3785 static void mtree_print_as(gpointer key, gpointer value, gpointer user_data)
3786 {
3787 MemoryRegion *mr = key;
3788 GSList *as_same_root_mr_list = value;
3789 struct AddressSpaceInfo *asi = user_data;
3790
3791 g_slist_foreach(as_same_root_mr_list, mtree_print_as_name, NULL);
3792 mtree_print_mr(mr, 1, 0, asi->ml_head, asi->owner, asi->disabled);
3793 qemu_printf("\n");
3794 }
3795
mtree_info_as_free(gpointer key,gpointer value,gpointer user_data)3796 static gboolean mtree_info_as_free(gpointer key, gpointer value,
3797 gpointer user_data)
3798 {
3799 GSList *as_same_root_mr_list = value;
3800
3801 g_slist_free(as_same_root_mr_list);
3802
3803 return true;
3804 }
3805
mtree_info_as(bool dispatch_tree,bool owner,bool disabled)3806 static void mtree_info_as(bool dispatch_tree, bool owner, bool disabled)
3807 {
3808 MemoryRegionListHead ml_head;
3809 MemoryRegionList *ml, *ml2;
3810 AddressSpace *as;
3811 GHashTable *views = g_hash_table_new(g_direct_hash, g_direct_equal);
3812 GSList *as_same_root_mr_list;
3813 struct AddressSpaceInfo asi = {
3814 .ml_head = &ml_head,
3815 .owner = owner,
3816 .disabled = disabled,
3817 };
3818
3819 QTAILQ_INIT(&ml_head);
3820
3821 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
3822 /* Create hashtable, key=AS root MR, value = list of AS */
3823 as_same_root_mr_list = g_hash_table_lookup(views, as->root);
3824 as_same_root_mr_list = g_slist_insert_sorted(as_same_root_mr_list, as,
3825 address_space_compare_name);
3826 g_hash_table_insert(views, as->root, as_same_root_mr_list);
3827 }
3828
3829 /* print address spaces */
3830 g_hash_table_foreach(views, mtree_print_as, &asi);
3831 g_hash_table_foreach_remove(views, mtree_info_as_free, 0);
3832 g_hash_table_unref(views);
3833
3834 /* print aliased regions */
3835 QTAILQ_FOREACH(ml, &ml_head, mrqueue) {
3836 qemu_printf("memory-region: %s\n", memory_region_name(ml->mr));
3837 mtree_print_mr(ml->mr, 1, 0, &ml_head, owner, disabled);
3838 qemu_printf("\n");
3839 }
3840
3841 QTAILQ_FOREACH_SAFE(ml, &ml_head, mrqueue, ml2) {
3842 g_free(ml);
3843 }
3844 }
3845
mtree_info(bool flatview,bool dispatch_tree,bool owner,bool disabled)3846 void mtree_info(bool flatview, bool dispatch_tree, bool owner, bool disabled)
3847 {
3848 if (flatview) {
3849 mtree_info_flatview(dispatch_tree, owner);
3850 } else {
3851 mtree_info_as(dispatch_tree, owner, disabled);
3852 }
3853 }
3854
memory_region_init_ram(MemoryRegion * mr,Object * owner,const char * name,uint64_t size,Error ** errp)3855 bool memory_region_init_ram(MemoryRegion *mr,
3856 Object *owner,
3857 const char *name,
3858 uint64_t size,
3859 Error **errp)
3860 {
3861 DeviceState *owner_dev;
3862
3863 if (!memory_region_init_ram_nomigrate(mr, owner, name, size, errp)) {
3864 return false;
3865 }
3866 /* This will assert if owner is neither NULL nor a DeviceState.
3867 * We only want the owner here for the purposes of defining a
3868 * unique name for migration. TODO: Ideally we should implement
3869 * a naming scheme for Objects which are not DeviceStates, in
3870 * which case we can relax this restriction.
3871 */
3872 owner_dev = DEVICE(owner);
3873 vmstate_register_ram(mr, owner_dev);
3874
3875 return true;
3876 }
3877
memory_region_init_ram_guest_memfd(MemoryRegion * mr,Object * owner,const char * name,uint64_t size,Error ** errp)3878 bool memory_region_init_ram_guest_memfd(MemoryRegion *mr,
3879 Object *owner,
3880 const char *name,
3881 uint64_t size,
3882 Error **errp)
3883 {
3884 DeviceState *owner_dev;
3885
3886 if (!memory_region_init_ram_flags_nomigrate(mr, owner, name, size,
3887 RAM_GUEST_MEMFD, errp)) {
3888 return false;
3889 }
3890 /* This will assert if owner is neither NULL nor a DeviceState.
3891 * We only want the owner here for the purposes of defining a
3892 * unique name for migration. TODO: Ideally we should implement
3893 * a naming scheme for Objects which are not DeviceStates, in
3894 * which case we can relax this restriction.
3895 */
3896 owner_dev = DEVICE(owner);
3897 vmstate_register_ram(mr, owner_dev);
3898
3899 return true;
3900 }
3901
memory_region_init_rom(MemoryRegion * mr,Object * owner,const char * name,uint64_t size,Error ** errp)3902 bool memory_region_init_rom(MemoryRegion *mr,
3903 Object *owner,
3904 const char *name,
3905 uint64_t size,
3906 Error **errp)
3907 {
3908 DeviceState *owner_dev;
3909
3910 if (!memory_region_init_rom_nomigrate(mr, owner, name, size, errp)) {
3911 return false;
3912 }
3913 /* This will assert if owner is neither NULL nor a DeviceState.
3914 * We only want the owner here for the purposes of defining a
3915 * unique name for migration. TODO: Ideally we should implement
3916 * a naming scheme for Objects which are not DeviceStates, in
3917 * which case we can relax this restriction.
3918 */
3919 owner_dev = DEVICE(owner);
3920 vmstate_register_ram(mr, owner_dev);
3921
3922 return true;
3923 }
3924
memory_region_init_rom_device(MemoryRegion * mr,Object * owner,const MemoryRegionOps * ops,void * opaque,const char * name,uint64_t size,Error ** errp)3925 bool memory_region_init_rom_device(MemoryRegion *mr,
3926 Object *owner,
3927 const MemoryRegionOps *ops,
3928 void *opaque,
3929 const char *name,
3930 uint64_t size,
3931 Error **errp)
3932 {
3933 DeviceState *owner_dev;
3934
3935 if (!memory_region_init_rom_device_nomigrate(mr, owner, ops, opaque,
3936 name, size, errp)) {
3937 return false;
3938 }
3939 /* This will assert if owner is neither NULL nor a DeviceState.
3940 * We only want the owner here for the purposes of defining a
3941 * unique name for migration. TODO: Ideally we should implement
3942 * a naming scheme for Objects which are not DeviceStates, in
3943 * which case we can relax this restriction.
3944 */
3945 owner_dev = DEVICE(owner);
3946 vmstate_register_ram(mr, owner_dev);
3947
3948 return true;
3949 }
3950
3951 /*
3952 * Support system builds with CONFIG_FUZZ using a weak symbol and a stub for
3953 * the fuzz_dma_read_cb callback
3954 */
3955 #ifdef CONFIG_FUZZ
fuzz_dma_read_cb(size_t addr,size_t len,MemoryRegion * mr)3956 void __attribute__((weak)) fuzz_dma_read_cb(size_t addr,
3957 size_t len,
3958 MemoryRegion *mr)
3959 {
3960 }
3961 #endif
3962
3963 static const TypeInfo memory_region_info = {
3964 .parent = TYPE_OBJECT,
3965 .name = TYPE_MEMORY_REGION,
3966 .class_size = sizeof(MemoryRegionClass),
3967 .instance_size = sizeof(MemoryRegion),
3968 .instance_init = memory_region_initfn,
3969 .instance_finalize = memory_region_finalize,
3970 };
3971
3972 static const TypeInfo iommu_memory_region_info = {
3973 .parent = TYPE_MEMORY_REGION,
3974 .name = TYPE_IOMMU_MEMORY_REGION,
3975 .class_size = sizeof(IOMMUMemoryRegionClass),
3976 .instance_size = sizeof(IOMMUMemoryRegion),
3977 .instance_init = iommu_memory_region_initfn,
3978 .abstract = true,
3979 };
3980
3981 static const TypeInfo ram_discard_manager_info = {
3982 .parent = TYPE_INTERFACE,
3983 .name = TYPE_RAM_DISCARD_MANAGER,
3984 .class_size = sizeof(RamDiscardManagerClass),
3985 };
3986
memory_register_types(void)3987 static void memory_register_types(void)
3988 {
3989 type_register_static(&memory_region_info);
3990 type_register_static(&iommu_memory_region_info);
3991 type_register_static(&ram_discard_manager_info);
3992 }
3993
3994 type_init(memory_register_types)
3995