1 /*
2 * Physical memory management
3 *
4 * Copyright 2011 Red Hat, Inc. and/or its affiliates
5 *
6 * Authors:
7 * Avi Kivity <avi@redhat.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
11 *
12 * Contributions after 2012-01-13 are licensed under the terms of the
13 * GNU GPL, version 2 or (at your option) any later version.
14 */
15
16 #include "qemu/osdep.h"
17 #include "qemu/log.h"
18 #include "qapi/error.h"
19 #include "exec/memory.h"
20 #include "qapi/visitor.h"
21 #include "qemu/bitops.h"
22 #include "qemu/error-report.h"
23 #include "qemu/main-loop.h"
24 #include "qemu/qemu-print.h"
25 #include "qom/object.h"
26 #include "trace.h"
27
28 #include "exec/memory-internal.h"
29 #include "exec/ram_addr.h"
30 #include "sysemu/kvm.h"
31 #include "sysemu/runstate.h"
32 #include "sysemu/tcg.h"
33 #include "qemu/accel.h"
34 #include "hw/boards.h"
35 #include "migration/vmstate.h"
36 #include "exec/address-spaces.h"
37
38 //#define DEBUG_UNASSIGNED
39
40 static unsigned memory_region_transaction_depth;
41 static bool memory_region_update_pending;
42 static bool ioeventfd_update_pending;
43 unsigned int global_dirty_tracking;
44
45 static QTAILQ_HEAD(, MemoryListener) memory_listeners
46 = QTAILQ_HEAD_INITIALIZER(memory_listeners);
47
48 static QTAILQ_HEAD(, AddressSpace) address_spaces
49 = QTAILQ_HEAD_INITIALIZER(address_spaces);
50
51 static GHashTable *flat_views;
52
53 typedef struct AddrRange AddrRange;
54
55 /*
56 * Note that signed integers are needed for negative offsetting in aliases
57 * (large MemoryRegion::alias_offset).
58 */
59 struct AddrRange {
60 Int128 start;
61 Int128 size;
62 };
63
addrrange_make(Int128 start,Int128 size)64 static AddrRange addrrange_make(Int128 start, Int128 size)
65 {
66 return (AddrRange) { start, size };
67 }
68
addrrange_equal(AddrRange r1,AddrRange r2)69 static bool addrrange_equal(AddrRange r1, AddrRange r2)
70 {
71 return int128_eq(r1.start, r2.start) && int128_eq(r1.size, r2.size);
72 }
73
addrrange_end(AddrRange r)74 static Int128 addrrange_end(AddrRange r)
75 {
76 return int128_add(r.start, r.size);
77 }
78
addrrange_shift(AddrRange range,Int128 delta)79 static AddrRange addrrange_shift(AddrRange range, Int128 delta)
80 {
81 int128_addto(&range.start, delta);
82 return range;
83 }
84
addrrange_contains(AddrRange range,Int128 addr)85 static bool addrrange_contains(AddrRange range, Int128 addr)
86 {
87 return int128_ge(addr, range.start)
88 && int128_lt(addr, addrrange_end(range));
89 }
90
addrrange_intersects(AddrRange r1,AddrRange r2)91 static bool addrrange_intersects(AddrRange r1, AddrRange r2)
92 {
93 return addrrange_contains(r1, r2.start)
94 || addrrange_contains(r2, r1.start);
95 }
96
addrrange_intersection(AddrRange r1,AddrRange r2)97 static AddrRange addrrange_intersection(AddrRange r1, AddrRange r2)
98 {
99 Int128 start = int128_max(r1.start, r2.start);
100 Int128 end = int128_min(addrrange_end(r1), addrrange_end(r2));
101 return addrrange_make(start, int128_sub(end, start));
102 }
103
104 enum ListenerDirection { Forward, Reverse };
105
106 #define MEMORY_LISTENER_CALL_GLOBAL(_callback, _direction, _args...) \
107 do { \
108 MemoryListener *_listener; \
109 \
110 switch (_direction) { \
111 case Forward: \
112 QTAILQ_FOREACH(_listener, &memory_listeners, link) { \
113 if (_listener->_callback) { \
114 _listener->_callback(_listener, ##_args); \
115 } \
116 } \
117 break; \
118 case Reverse: \
119 QTAILQ_FOREACH_REVERSE(_listener, &memory_listeners, link) { \
120 if (_listener->_callback) { \
121 _listener->_callback(_listener, ##_args); \
122 } \
123 } \
124 break; \
125 default: \
126 abort(); \
127 } \
128 } while (0)
129
130 #define MEMORY_LISTENER_CALL(_as, _callback, _direction, _section, _args...) \
131 do { \
132 MemoryListener *_listener; \
133 \
134 switch (_direction) { \
135 case Forward: \
136 QTAILQ_FOREACH(_listener, &(_as)->listeners, link_as) { \
137 if (_listener->_callback) { \
138 _listener->_callback(_listener, _section, ##_args); \
139 } \
140 } \
141 break; \
142 case Reverse: \
143 QTAILQ_FOREACH_REVERSE(_listener, &(_as)->listeners, link_as) { \
144 if (_listener->_callback) { \
145 _listener->_callback(_listener, _section, ##_args); \
146 } \
147 } \
148 break; \
149 default: \
150 abort(); \
151 } \
152 } while (0)
153
154 /* No need to ref/unref .mr, the FlatRange keeps it alive. */
155 #define MEMORY_LISTENER_UPDATE_REGION(fr, as, dir, callback, _args...) \
156 do { \
157 MemoryRegionSection mrs = section_from_flat_range(fr, \
158 address_space_to_flatview(as)); \
159 MEMORY_LISTENER_CALL(as, callback, dir, &mrs, ##_args); \
160 } while(0)
161
162 struct CoalescedMemoryRange {
163 AddrRange addr;
164 QTAILQ_ENTRY(CoalescedMemoryRange) link;
165 };
166
167 struct MemoryRegionIoeventfd {
168 AddrRange addr;
169 bool match_data;
170 uint64_t data;
171 EventNotifier *e;
172 };
173
memory_region_ioeventfd_before(MemoryRegionIoeventfd * a,MemoryRegionIoeventfd * b)174 static bool memory_region_ioeventfd_before(MemoryRegionIoeventfd *a,
175 MemoryRegionIoeventfd *b)
176 {
177 if (int128_lt(a->addr.start, b->addr.start)) {
178 return true;
179 } else if (int128_gt(a->addr.start, b->addr.start)) {
180 return false;
181 } else if (int128_lt(a->addr.size, b->addr.size)) {
182 return true;
183 } else if (int128_gt(a->addr.size, b->addr.size)) {
184 return false;
185 } else if (a->match_data < b->match_data) {
186 return true;
187 } else if (a->match_data > b->match_data) {
188 return false;
189 } else if (a->match_data) {
190 if (a->data < b->data) {
191 return true;
192 } else if (a->data > b->data) {
193 return false;
194 }
195 }
196 if (a->e < b->e) {
197 return true;
198 } else if (a->e > b->e) {
199 return false;
200 }
201 return false;
202 }
203
memory_region_ioeventfd_equal(MemoryRegionIoeventfd * a,MemoryRegionIoeventfd * b)204 static bool memory_region_ioeventfd_equal(MemoryRegionIoeventfd *a,
205 MemoryRegionIoeventfd *b)
206 {
207 if (int128_eq(a->addr.start, b->addr.start) &&
208 (!int128_nz(a->addr.size) || !int128_nz(b->addr.size) ||
209 (int128_eq(a->addr.size, b->addr.size) &&
210 (a->match_data == b->match_data) &&
211 ((a->match_data && (a->data == b->data)) || !a->match_data) &&
212 (a->e == b->e))))
213 return true;
214
215 return false;
216 }
217
218 /* Range of memory in the global map. Addresses are absolute. */
219 struct FlatRange {
220 MemoryRegion *mr;
221 hwaddr offset_in_region;
222 AddrRange addr;
223 uint8_t dirty_log_mask;
224 bool romd_mode;
225 bool readonly;
226 bool nonvolatile;
227 bool unmergeable;
228 };
229
230 #define FOR_EACH_FLAT_RANGE(var, view) \
231 for (var = (view)->ranges; var < (view)->ranges + (view)->nr; ++var)
232
233 static inline MemoryRegionSection
section_from_flat_range(FlatRange * fr,FlatView * fv)234 section_from_flat_range(FlatRange *fr, FlatView *fv)
235 {
236 return (MemoryRegionSection) {
237 .mr = fr->mr,
238 .fv = fv,
239 .offset_within_region = fr->offset_in_region,
240 .size = fr->addr.size,
241 .offset_within_address_space = int128_get64(fr->addr.start),
242 .readonly = fr->readonly,
243 .nonvolatile = fr->nonvolatile,
244 .unmergeable = fr->unmergeable,
245 };
246 }
247
flatrange_equal(FlatRange * a,FlatRange * b)248 static bool flatrange_equal(FlatRange *a, FlatRange *b)
249 {
250 return a->mr == b->mr
251 && addrrange_equal(a->addr, b->addr)
252 && a->offset_in_region == b->offset_in_region
253 && a->romd_mode == b->romd_mode
254 && a->readonly == b->readonly
255 && a->nonvolatile == b->nonvolatile
256 && a->unmergeable == b->unmergeable;
257 }
258
flatview_new(MemoryRegion * mr_root)259 static FlatView *flatview_new(MemoryRegion *mr_root)
260 {
261 FlatView *view;
262
263 view = g_new0(FlatView, 1);
264 view->ref = 1;
265 view->root = mr_root;
266 memory_region_ref(mr_root);
267 trace_flatview_new(view, mr_root);
268
269 return view;
270 }
271
272 /* Insert a range into a given position. Caller is responsible for maintaining
273 * sorting order.
274 */
flatview_insert(FlatView * view,unsigned pos,FlatRange * range)275 static void flatview_insert(FlatView *view, unsigned pos, FlatRange *range)
276 {
277 if (view->nr == view->nr_allocated) {
278 view->nr_allocated = MAX(2 * view->nr, 10);
279 view->ranges = g_realloc(view->ranges,
280 view->nr_allocated * sizeof(*view->ranges));
281 }
282 memmove(view->ranges + pos + 1, view->ranges + pos,
283 (view->nr - pos) * sizeof(FlatRange));
284 view->ranges[pos] = *range;
285 memory_region_ref(range->mr);
286 ++view->nr;
287 }
288
flatview_destroy(FlatView * view)289 static void flatview_destroy(FlatView *view)
290 {
291 int i;
292
293 trace_flatview_destroy(view, view->root);
294 if (view->dispatch) {
295 address_space_dispatch_free(view->dispatch);
296 }
297 for (i = 0; i < view->nr; i++) {
298 memory_region_unref(view->ranges[i].mr);
299 }
300 g_free(view->ranges);
301 memory_region_unref(view->root);
302 g_free(view);
303 }
304
flatview_ref(FlatView * view)305 static bool flatview_ref(FlatView *view)
306 {
307 return qatomic_fetch_inc_nonzero(&view->ref) > 0;
308 }
309
flatview_unref(FlatView * view)310 void flatview_unref(FlatView *view)
311 {
312 if (qatomic_fetch_dec(&view->ref) == 1) {
313 trace_flatview_destroy_rcu(view, view->root);
314 assert(view->root);
315 call_rcu(view, flatview_destroy, rcu);
316 }
317 }
318
can_merge(FlatRange * r1,FlatRange * r2)319 static bool can_merge(FlatRange *r1, FlatRange *r2)
320 {
321 return int128_eq(addrrange_end(r1->addr), r2->addr.start)
322 && r1->mr == r2->mr
323 && int128_eq(int128_add(int128_make64(r1->offset_in_region),
324 r1->addr.size),
325 int128_make64(r2->offset_in_region))
326 && r1->dirty_log_mask == r2->dirty_log_mask
327 && r1->romd_mode == r2->romd_mode
328 && r1->readonly == r2->readonly
329 && r1->nonvolatile == r2->nonvolatile
330 && !r1->unmergeable && !r2->unmergeable;
331 }
332
333 /* Attempt to simplify a view by merging adjacent ranges */
flatview_simplify(FlatView * view)334 static void flatview_simplify(FlatView *view)
335 {
336 unsigned i, j, k;
337
338 i = 0;
339 while (i < view->nr) {
340 j = i + 1;
341 while (j < view->nr
342 && can_merge(&view->ranges[j-1], &view->ranges[j])) {
343 int128_addto(&view->ranges[i].addr.size, view->ranges[j].addr.size);
344 ++j;
345 }
346 ++i;
347 for (k = i; k < j; k++) {
348 memory_region_unref(view->ranges[k].mr);
349 }
350 memmove(&view->ranges[i], &view->ranges[j],
351 (view->nr - j) * sizeof(view->ranges[j]));
352 view->nr -= j - i;
353 }
354 }
355
memory_region_big_endian(MemoryRegion * mr)356 static bool memory_region_big_endian(MemoryRegion *mr)
357 {
358 #if TARGET_BIG_ENDIAN
359 return mr->ops->endianness != DEVICE_LITTLE_ENDIAN;
360 #else
361 return mr->ops->endianness == DEVICE_BIG_ENDIAN;
362 #endif
363 }
364
adjust_endianness(MemoryRegion * mr,uint64_t * data,MemOp op)365 static void adjust_endianness(MemoryRegion *mr, uint64_t *data, MemOp op)
366 {
367 if ((op & MO_BSWAP) != devend_memop(mr->ops->endianness)) {
368 switch (op & MO_SIZE) {
369 case MO_8:
370 break;
371 case MO_16:
372 *data = bswap16(*data);
373 break;
374 case MO_32:
375 *data = bswap32(*data);
376 break;
377 case MO_64:
378 *data = bswap64(*data);
379 break;
380 default:
381 g_assert_not_reached();
382 }
383 }
384 }
385
memory_region_shift_read_access(uint64_t * value,signed shift,uint64_t mask,uint64_t tmp)386 static inline void memory_region_shift_read_access(uint64_t *value,
387 signed shift,
388 uint64_t mask,
389 uint64_t tmp)
390 {
391 if (shift >= 0) {
392 *value |= (tmp & mask) << shift;
393 } else {
394 *value |= (tmp & mask) >> -shift;
395 }
396 }
397
memory_region_shift_write_access(uint64_t * value,signed shift,uint64_t mask)398 static inline uint64_t memory_region_shift_write_access(uint64_t *value,
399 signed shift,
400 uint64_t mask)
401 {
402 uint64_t tmp;
403
404 if (shift >= 0) {
405 tmp = (*value >> shift) & mask;
406 } else {
407 tmp = (*value << -shift) & mask;
408 }
409
410 return tmp;
411 }
412
memory_region_to_absolute_addr(MemoryRegion * mr,hwaddr offset)413 static hwaddr memory_region_to_absolute_addr(MemoryRegion *mr, hwaddr offset)
414 {
415 MemoryRegion *root;
416 hwaddr abs_addr = offset;
417
418 abs_addr += mr->addr;
419 for (root = mr; root->container; ) {
420 root = root->container;
421 abs_addr += root->addr;
422 }
423
424 return abs_addr;
425 }
426
get_cpu_index(void)427 static int get_cpu_index(void)
428 {
429 if (current_cpu) {
430 return current_cpu->cpu_index;
431 }
432 return -1;
433 }
434
memory_region_read_accessor(MemoryRegion * mr,hwaddr addr,uint64_t * value,unsigned size,signed shift,uint64_t mask,MemTxAttrs attrs)435 static MemTxResult memory_region_read_accessor(MemoryRegion *mr,
436 hwaddr addr,
437 uint64_t *value,
438 unsigned size,
439 signed shift,
440 uint64_t mask,
441 MemTxAttrs attrs)
442 {
443 uint64_t tmp;
444
445 tmp = mr->ops->read(mr->opaque, addr, size);
446 if (mr->subpage) {
447 trace_memory_region_subpage_read(get_cpu_index(), mr, addr, tmp, size);
448 } else if (trace_event_get_state_backends(TRACE_MEMORY_REGION_OPS_READ)) {
449 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
450 trace_memory_region_ops_read(get_cpu_index(), mr, abs_addr, tmp, size,
451 memory_region_name(mr));
452 }
453 memory_region_shift_read_access(value, shift, mask, tmp);
454 return MEMTX_OK;
455 }
456
memory_region_read_with_attrs_accessor(MemoryRegion * mr,hwaddr addr,uint64_t * value,unsigned size,signed shift,uint64_t mask,MemTxAttrs attrs)457 static MemTxResult memory_region_read_with_attrs_accessor(MemoryRegion *mr,
458 hwaddr addr,
459 uint64_t *value,
460 unsigned size,
461 signed shift,
462 uint64_t mask,
463 MemTxAttrs attrs)
464 {
465 uint64_t tmp = 0;
466 MemTxResult r;
467
468 r = mr->ops->read_with_attrs(mr->opaque, addr, &tmp, size, attrs);
469 if (mr->subpage) {
470 trace_memory_region_subpage_read(get_cpu_index(), mr, addr, tmp, size);
471 } else if (trace_event_get_state_backends(TRACE_MEMORY_REGION_OPS_READ)) {
472 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
473 trace_memory_region_ops_read(get_cpu_index(), mr, abs_addr, tmp, size,
474 memory_region_name(mr));
475 }
476 memory_region_shift_read_access(value, shift, mask, tmp);
477 return r;
478 }
479
memory_region_write_accessor(MemoryRegion * mr,hwaddr addr,uint64_t * value,unsigned size,signed shift,uint64_t mask,MemTxAttrs attrs)480 static MemTxResult memory_region_write_accessor(MemoryRegion *mr,
481 hwaddr addr,
482 uint64_t *value,
483 unsigned size,
484 signed shift,
485 uint64_t mask,
486 MemTxAttrs attrs)
487 {
488 uint64_t tmp = memory_region_shift_write_access(value, shift, mask);
489
490 if (mr->subpage) {
491 trace_memory_region_subpage_write(get_cpu_index(), mr, addr, tmp, size);
492 } else if (trace_event_get_state_backends(TRACE_MEMORY_REGION_OPS_WRITE)) {
493 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
494 trace_memory_region_ops_write(get_cpu_index(), mr, abs_addr, tmp, size,
495 memory_region_name(mr));
496 }
497 mr->ops->write(mr->opaque, addr, tmp, size);
498 return MEMTX_OK;
499 }
500
memory_region_write_with_attrs_accessor(MemoryRegion * mr,hwaddr addr,uint64_t * value,unsigned size,signed shift,uint64_t mask,MemTxAttrs attrs)501 static MemTxResult memory_region_write_with_attrs_accessor(MemoryRegion *mr,
502 hwaddr addr,
503 uint64_t *value,
504 unsigned size,
505 signed shift,
506 uint64_t mask,
507 MemTxAttrs attrs)
508 {
509 uint64_t tmp = memory_region_shift_write_access(value, shift, mask);
510
511 if (mr->subpage) {
512 trace_memory_region_subpage_write(get_cpu_index(), mr, addr, tmp, size);
513 } else if (trace_event_get_state_backends(TRACE_MEMORY_REGION_OPS_WRITE)) {
514 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
515 trace_memory_region_ops_write(get_cpu_index(), mr, abs_addr, tmp, size,
516 memory_region_name(mr));
517 }
518 return mr->ops->write_with_attrs(mr->opaque, addr, tmp, size, attrs);
519 }
520
access_with_adjusted_size(hwaddr addr,uint64_t * value,unsigned size,unsigned access_size_min,unsigned access_size_max,MemTxResult (* access_fn)(MemoryRegion * mr,hwaddr addr,uint64_t * value,unsigned size,signed shift,uint64_t mask,MemTxAttrs attrs),MemoryRegion * mr,MemTxAttrs attrs)521 static MemTxResult access_with_adjusted_size(hwaddr addr,
522 uint64_t *value,
523 unsigned size,
524 unsigned access_size_min,
525 unsigned access_size_max,
526 MemTxResult (*access_fn)
527 (MemoryRegion *mr,
528 hwaddr addr,
529 uint64_t *value,
530 unsigned size,
531 signed shift,
532 uint64_t mask,
533 MemTxAttrs attrs),
534 MemoryRegion *mr,
535 MemTxAttrs attrs)
536 {
537 uint64_t access_mask;
538 unsigned access_size;
539 unsigned i;
540 MemTxResult r = MEMTX_OK;
541 bool reentrancy_guard_applied = false;
542
543 if (!access_size_min) {
544 access_size_min = 1;
545 }
546 if (!access_size_max) {
547 access_size_max = 4;
548 }
549
550 /* Do not allow more than one simultaneous access to a device's IO Regions */
551 if (mr->dev && !mr->disable_reentrancy_guard &&
552 !mr->ram_device && !mr->ram && !mr->rom_device && !mr->readonly) {
553 if (mr->dev->mem_reentrancy_guard.engaged_in_io) {
554 warn_report_once("Blocked re-entrant IO on MemoryRegion: "
555 "%s at addr: 0x%" HWADDR_PRIX,
556 memory_region_name(mr), addr);
557 return MEMTX_ACCESS_ERROR;
558 }
559 mr->dev->mem_reentrancy_guard.engaged_in_io = true;
560 reentrancy_guard_applied = true;
561 }
562
563 /* FIXME: support unaligned access? */
564 access_size = MAX(MIN(size, access_size_max), access_size_min);
565 access_mask = MAKE_64BIT_MASK(0, access_size * 8);
566 if (memory_region_big_endian(mr)) {
567 for (i = 0; i < size; i += access_size) {
568 r |= access_fn(mr, addr + i, value, access_size,
569 (size - access_size - i) * 8, access_mask, attrs);
570 }
571 } else {
572 for (i = 0; i < size; i += access_size) {
573 r |= access_fn(mr, addr + i, value, access_size, i * 8,
574 access_mask, attrs);
575 }
576 }
577 if (mr->dev && reentrancy_guard_applied) {
578 mr->dev->mem_reentrancy_guard.engaged_in_io = false;
579 }
580 return r;
581 }
582
memory_region_to_address_space(MemoryRegion * mr)583 static AddressSpace *memory_region_to_address_space(MemoryRegion *mr)
584 {
585 AddressSpace *as;
586
587 while (mr->container) {
588 mr = mr->container;
589 }
590 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
591 if (mr == as->root) {
592 return as;
593 }
594 }
595 return NULL;
596 }
597
598 /* Render a memory region into the global view. Ranges in @view obscure
599 * ranges in @mr.
600 */
render_memory_region(FlatView * view,MemoryRegion * mr,Int128 base,AddrRange clip,bool readonly,bool nonvolatile,bool unmergeable)601 static void render_memory_region(FlatView *view,
602 MemoryRegion *mr,
603 Int128 base,
604 AddrRange clip,
605 bool readonly,
606 bool nonvolatile,
607 bool unmergeable)
608 {
609 MemoryRegion *subregion;
610 unsigned i;
611 hwaddr offset_in_region;
612 Int128 remain;
613 Int128 now;
614 FlatRange fr;
615 AddrRange tmp;
616
617 if (!mr->enabled) {
618 return;
619 }
620
621 int128_addto(&base, int128_make64(mr->addr));
622 readonly |= mr->readonly;
623 nonvolatile |= mr->nonvolatile;
624 unmergeable |= mr->unmergeable;
625
626 tmp = addrrange_make(base, mr->size);
627
628 if (!addrrange_intersects(tmp, clip)) {
629 return;
630 }
631
632 clip = addrrange_intersection(tmp, clip);
633
634 if (mr->alias) {
635 int128_subfrom(&base, int128_make64(mr->alias->addr));
636 int128_subfrom(&base, int128_make64(mr->alias_offset));
637 render_memory_region(view, mr->alias, base, clip,
638 readonly, nonvolatile, unmergeable);
639 return;
640 }
641
642 /* Render subregions in priority order. */
643 QTAILQ_FOREACH(subregion, &mr->subregions, subregions_link) {
644 render_memory_region(view, subregion, base, clip,
645 readonly, nonvolatile, unmergeable);
646 }
647
648 if (!mr->terminates) {
649 return;
650 }
651
652 offset_in_region = int128_get64(int128_sub(clip.start, base));
653 base = clip.start;
654 remain = clip.size;
655
656 fr.mr = mr;
657 fr.dirty_log_mask = memory_region_get_dirty_log_mask(mr);
658 fr.romd_mode = mr->romd_mode;
659 fr.readonly = readonly;
660 fr.nonvolatile = nonvolatile;
661 fr.unmergeable = unmergeable;
662
663 /* Render the region itself into any gaps left by the current view. */
664 for (i = 0; i < view->nr && int128_nz(remain); ++i) {
665 if (int128_ge(base, addrrange_end(view->ranges[i].addr))) {
666 continue;
667 }
668 if (int128_lt(base, view->ranges[i].addr.start)) {
669 now = int128_min(remain,
670 int128_sub(view->ranges[i].addr.start, base));
671 fr.offset_in_region = offset_in_region;
672 fr.addr = addrrange_make(base, now);
673 flatview_insert(view, i, &fr);
674 ++i;
675 int128_addto(&base, now);
676 offset_in_region += int128_get64(now);
677 int128_subfrom(&remain, now);
678 }
679 now = int128_sub(int128_min(int128_add(base, remain),
680 addrrange_end(view->ranges[i].addr)),
681 base);
682 int128_addto(&base, now);
683 offset_in_region += int128_get64(now);
684 int128_subfrom(&remain, now);
685 }
686 if (int128_nz(remain)) {
687 fr.offset_in_region = offset_in_region;
688 fr.addr = addrrange_make(base, remain);
689 flatview_insert(view, i, &fr);
690 }
691 }
692
flatview_for_each_range(FlatView * fv,flatview_cb cb,void * opaque)693 void flatview_for_each_range(FlatView *fv, flatview_cb cb , void *opaque)
694 {
695 FlatRange *fr;
696
697 assert(fv);
698 assert(cb);
699
700 FOR_EACH_FLAT_RANGE(fr, fv) {
701 if (cb(fr->addr.start, fr->addr.size, fr->mr,
702 fr->offset_in_region, opaque)) {
703 break;
704 }
705 }
706 }
707
memory_region_get_flatview_root(MemoryRegion * mr)708 static MemoryRegion *memory_region_get_flatview_root(MemoryRegion *mr)
709 {
710 while (mr->enabled) {
711 if (mr->alias) {
712 if (!mr->alias_offset && int128_ge(mr->size, mr->alias->size)) {
713 /* The alias is included in its entirety. Use it as
714 * the "real" root, so that we can share more FlatViews.
715 */
716 mr = mr->alias;
717 continue;
718 }
719 } else if (!mr->terminates) {
720 unsigned int found = 0;
721 MemoryRegion *child, *next = NULL;
722 QTAILQ_FOREACH(child, &mr->subregions, subregions_link) {
723 if (child->enabled) {
724 if (++found > 1) {
725 next = NULL;
726 break;
727 }
728 if (!child->addr && int128_ge(mr->size, child->size)) {
729 /* A child is included in its entirety. If it's the only
730 * enabled one, use it in the hope of finding an alias down the
731 * way. This will also let us share FlatViews.
732 */
733 next = child;
734 }
735 }
736 }
737 if (found == 0) {
738 return NULL;
739 }
740 if (next) {
741 mr = next;
742 continue;
743 }
744 }
745
746 return mr;
747 }
748
749 return NULL;
750 }
751
752 /* Render a memory topology into a list of disjoint absolute ranges. */
generate_memory_topology(MemoryRegion * mr)753 static FlatView *generate_memory_topology(MemoryRegion *mr)
754 {
755 int i;
756 FlatView *view;
757
758 view = flatview_new(mr);
759
760 if (mr) {
761 render_memory_region(view, mr, int128_zero(),
762 addrrange_make(int128_zero(), int128_2_64()),
763 false, false, false);
764 }
765 flatview_simplify(view);
766
767 view->dispatch = address_space_dispatch_new(view);
768 for (i = 0; i < view->nr; i++) {
769 MemoryRegionSection mrs =
770 section_from_flat_range(&view->ranges[i], view);
771 flatview_add_to_dispatch(view, &mrs);
772 }
773 address_space_dispatch_compact(view->dispatch);
774 g_hash_table_replace(flat_views, mr, view);
775
776 return view;
777 }
778
address_space_add_del_ioeventfds(AddressSpace * as,MemoryRegionIoeventfd * fds_new,unsigned fds_new_nb,MemoryRegionIoeventfd * fds_old,unsigned fds_old_nb)779 static void address_space_add_del_ioeventfds(AddressSpace *as,
780 MemoryRegionIoeventfd *fds_new,
781 unsigned fds_new_nb,
782 MemoryRegionIoeventfd *fds_old,
783 unsigned fds_old_nb)
784 {
785 unsigned iold, inew;
786 MemoryRegionIoeventfd *fd;
787 MemoryRegionSection section;
788
789 /* Generate a symmetric difference of the old and new fd sets, adding
790 * and deleting as necessary.
791 */
792
793 iold = inew = 0;
794 while (iold < fds_old_nb || inew < fds_new_nb) {
795 if (iold < fds_old_nb
796 && (inew == fds_new_nb
797 || memory_region_ioeventfd_before(&fds_old[iold],
798 &fds_new[inew]))) {
799 fd = &fds_old[iold];
800 section = (MemoryRegionSection) {
801 .fv = address_space_to_flatview(as),
802 .offset_within_address_space = int128_get64(fd->addr.start),
803 .size = fd->addr.size,
804 };
805 MEMORY_LISTENER_CALL(as, eventfd_del, Forward, §ion,
806 fd->match_data, fd->data, fd->e);
807 ++iold;
808 } else if (inew < fds_new_nb
809 && (iold == fds_old_nb
810 || memory_region_ioeventfd_before(&fds_new[inew],
811 &fds_old[iold]))) {
812 fd = &fds_new[inew];
813 section = (MemoryRegionSection) {
814 .fv = address_space_to_flatview(as),
815 .offset_within_address_space = int128_get64(fd->addr.start),
816 .size = fd->addr.size,
817 };
818 MEMORY_LISTENER_CALL(as, eventfd_add, Reverse, §ion,
819 fd->match_data, fd->data, fd->e);
820 ++inew;
821 } else {
822 ++iold;
823 ++inew;
824 }
825 }
826 }
827
address_space_get_flatview(AddressSpace * as)828 FlatView *address_space_get_flatview(AddressSpace *as)
829 {
830 FlatView *view;
831
832 RCU_READ_LOCK_GUARD();
833 do {
834 view = address_space_to_flatview(as);
835 /* If somebody has replaced as->current_map concurrently,
836 * flatview_ref returns false.
837 */
838 } while (!flatview_ref(view));
839 return view;
840 }
841
address_space_update_ioeventfds(AddressSpace * as)842 static void address_space_update_ioeventfds(AddressSpace *as)
843 {
844 FlatView *view;
845 FlatRange *fr;
846 unsigned ioeventfd_nb = 0;
847 unsigned ioeventfd_max;
848 MemoryRegionIoeventfd *ioeventfds;
849 AddrRange tmp;
850 unsigned i;
851
852 if (!as->ioeventfd_notifiers) {
853 return;
854 }
855
856 /*
857 * It is likely that the number of ioeventfds hasn't changed much, so use
858 * the previous size as the starting value, with some headroom to avoid
859 * gratuitous reallocations.
860 */
861 ioeventfd_max = QEMU_ALIGN_UP(as->ioeventfd_nb, 4);
862 ioeventfds = g_new(MemoryRegionIoeventfd, ioeventfd_max);
863
864 view = address_space_get_flatview(as);
865 FOR_EACH_FLAT_RANGE(fr, view) {
866 for (i = 0; i < fr->mr->ioeventfd_nb; ++i) {
867 tmp = addrrange_shift(fr->mr->ioeventfds[i].addr,
868 int128_sub(fr->addr.start,
869 int128_make64(fr->offset_in_region)));
870 if (addrrange_intersects(fr->addr, tmp)) {
871 ++ioeventfd_nb;
872 if (ioeventfd_nb > ioeventfd_max) {
873 ioeventfd_max = MAX(ioeventfd_max * 2, 4);
874 ioeventfds = g_realloc(ioeventfds,
875 ioeventfd_max * sizeof(*ioeventfds));
876 }
877 ioeventfds[ioeventfd_nb-1] = fr->mr->ioeventfds[i];
878 ioeventfds[ioeventfd_nb-1].addr = tmp;
879 }
880 }
881 }
882
883 address_space_add_del_ioeventfds(as, ioeventfds, ioeventfd_nb,
884 as->ioeventfds, as->ioeventfd_nb);
885
886 g_free(as->ioeventfds);
887 as->ioeventfds = ioeventfds;
888 as->ioeventfd_nb = ioeventfd_nb;
889 flatview_unref(view);
890 }
891
892 /*
893 * Notify the memory listeners about the coalesced IO change events of
894 * range `cmr'. Only the part that has intersection of the specified
895 * FlatRange will be sent.
896 */
flat_range_coalesced_io_notify(FlatRange * fr,AddressSpace * as,CoalescedMemoryRange * cmr,bool add)897 static void flat_range_coalesced_io_notify(FlatRange *fr, AddressSpace *as,
898 CoalescedMemoryRange *cmr, bool add)
899 {
900 AddrRange tmp;
901
902 tmp = addrrange_shift(cmr->addr,
903 int128_sub(fr->addr.start,
904 int128_make64(fr->offset_in_region)));
905 if (!addrrange_intersects(tmp, fr->addr)) {
906 return;
907 }
908 tmp = addrrange_intersection(tmp, fr->addr);
909
910 if (add) {
911 MEMORY_LISTENER_UPDATE_REGION(fr, as, Forward, coalesced_io_add,
912 int128_get64(tmp.start),
913 int128_get64(tmp.size));
914 } else {
915 MEMORY_LISTENER_UPDATE_REGION(fr, as, Reverse, coalesced_io_del,
916 int128_get64(tmp.start),
917 int128_get64(tmp.size));
918 }
919 }
920
flat_range_coalesced_io_del(FlatRange * fr,AddressSpace * as)921 static void flat_range_coalesced_io_del(FlatRange *fr, AddressSpace *as)
922 {
923 CoalescedMemoryRange *cmr;
924
925 QTAILQ_FOREACH(cmr, &fr->mr->coalesced, link) {
926 flat_range_coalesced_io_notify(fr, as, cmr, false);
927 }
928 }
929
flat_range_coalesced_io_add(FlatRange * fr,AddressSpace * as)930 static void flat_range_coalesced_io_add(FlatRange *fr, AddressSpace *as)
931 {
932 MemoryRegion *mr = fr->mr;
933 CoalescedMemoryRange *cmr;
934
935 if (QTAILQ_EMPTY(&mr->coalesced)) {
936 return;
937 }
938
939 QTAILQ_FOREACH(cmr, &mr->coalesced, link) {
940 flat_range_coalesced_io_notify(fr, as, cmr, true);
941 }
942 }
943
address_space_update_topology_pass(AddressSpace * as,const FlatView * old_view,const FlatView * new_view,bool adding)944 static void address_space_update_topology_pass(AddressSpace *as,
945 const FlatView *old_view,
946 const FlatView *new_view,
947 bool adding)
948 {
949 unsigned iold, inew;
950 FlatRange *frold, *frnew;
951
952 /* Generate a symmetric difference of the old and new memory maps.
953 * Kill ranges in the old map, and instantiate ranges in the new map.
954 */
955 iold = inew = 0;
956 while (iold < old_view->nr || inew < new_view->nr) {
957 if (iold < old_view->nr) {
958 frold = &old_view->ranges[iold];
959 } else {
960 frold = NULL;
961 }
962 if (inew < new_view->nr) {
963 frnew = &new_view->ranges[inew];
964 } else {
965 frnew = NULL;
966 }
967
968 if (frold
969 && (!frnew
970 || int128_lt(frold->addr.start, frnew->addr.start)
971 || (int128_eq(frold->addr.start, frnew->addr.start)
972 && !flatrange_equal(frold, frnew)))) {
973 /* In old but not in new, or in both but attributes changed. */
974
975 if (!adding) {
976 flat_range_coalesced_io_del(frold, as);
977 MEMORY_LISTENER_UPDATE_REGION(frold, as, Reverse, region_del);
978 }
979
980 ++iold;
981 } else if (frold && frnew && flatrange_equal(frold, frnew)) {
982 /* In both and unchanged (except logging may have changed) */
983
984 if (adding) {
985 MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, region_nop);
986 if (frnew->dirty_log_mask & ~frold->dirty_log_mask) {
987 MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, log_start,
988 frold->dirty_log_mask,
989 frnew->dirty_log_mask);
990 }
991 if (frold->dirty_log_mask & ~frnew->dirty_log_mask) {
992 MEMORY_LISTENER_UPDATE_REGION(frnew, as, Reverse, log_stop,
993 frold->dirty_log_mask,
994 frnew->dirty_log_mask);
995 }
996 }
997
998 ++iold;
999 ++inew;
1000 } else {
1001 /* In new */
1002
1003 if (adding) {
1004 MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, region_add);
1005 flat_range_coalesced_io_add(frnew, as);
1006 }
1007
1008 ++inew;
1009 }
1010 }
1011 }
1012
flatviews_init(void)1013 static void flatviews_init(void)
1014 {
1015 static FlatView *empty_view;
1016
1017 if (flat_views) {
1018 return;
1019 }
1020
1021 flat_views = g_hash_table_new_full(g_direct_hash, g_direct_equal, NULL,
1022 (GDestroyNotify) flatview_unref);
1023 if (!empty_view) {
1024 empty_view = generate_memory_topology(NULL);
1025 /* We keep it alive forever in the global variable. */
1026 flatview_ref(empty_view);
1027 } else {
1028 g_hash_table_replace(flat_views, NULL, empty_view);
1029 flatview_ref(empty_view);
1030 }
1031 }
1032
flatviews_reset(void)1033 static void flatviews_reset(void)
1034 {
1035 AddressSpace *as;
1036
1037 if (flat_views) {
1038 g_hash_table_unref(flat_views);
1039 flat_views = NULL;
1040 }
1041 flatviews_init();
1042
1043 /* Render unique FVs */
1044 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
1045 MemoryRegion *physmr = memory_region_get_flatview_root(as->root);
1046
1047 if (g_hash_table_lookup(flat_views, physmr)) {
1048 continue;
1049 }
1050
1051 generate_memory_topology(physmr);
1052 }
1053 }
1054
address_space_set_flatview(AddressSpace * as)1055 static void address_space_set_flatview(AddressSpace *as)
1056 {
1057 FlatView *old_view = address_space_to_flatview(as);
1058 MemoryRegion *physmr = memory_region_get_flatview_root(as->root);
1059 FlatView *new_view = g_hash_table_lookup(flat_views, physmr);
1060
1061 assert(new_view);
1062
1063 if (old_view == new_view) {
1064 return;
1065 }
1066
1067 if (old_view) {
1068 flatview_ref(old_view);
1069 }
1070
1071 flatview_ref(new_view);
1072
1073 if (!QTAILQ_EMPTY(&as->listeners)) {
1074 FlatView tmpview = { .nr = 0 }, *old_view2 = old_view;
1075
1076 if (!old_view2) {
1077 old_view2 = &tmpview;
1078 }
1079 address_space_update_topology_pass(as, old_view2, new_view, false);
1080 address_space_update_topology_pass(as, old_view2, new_view, true);
1081 }
1082
1083 /* Writes are protected by the BQL. */
1084 qatomic_rcu_set(&as->current_map, new_view);
1085 if (old_view) {
1086 flatview_unref(old_view);
1087 }
1088
1089 /* Note that all the old MemoryRegions are still alive up to this
1090 * point. This relieves most MemoryListeners from the need to
1091 * ref/unref the MemoryRegions they get---unless they use them
1092 * outside the iothread mutex, in which case precise reference
1093 * counting is necessary.
1094 */
1095 if (old_view) {
1096 flatview_unref(old_view);
1097 }
1098 }
1099
address_space_update_topology(AddressSpace * as)1100 static void address_space_update_topology(AddressSpace *as)
1101 {
1102 MemoryRegion *physmr = memory_region_get_flatview_root(as->root);
1103
1104 flatviews_init();
1105 if (!g_hash_table_lookup(flat_views, physmr)) {
1106 generate_memory_topology(physmr);
1107 }
1108 address_space_set_flatview(as);
1109 }
1110
memory_region_transaction_begin(void)1111 void memory_region_transaction_begin(void)
1112 {
1113 qemu_flush_coalesced_mmio_buffer();
1114 ++memory_region_transaction_depth;
1115 }
1116
memory_region_transaction_commit(void)1117 void memory_region_transaction_commit(void)
1118 {
1119 AddressSpace *as;
1120
1121 assert(memory_region_transaction_depth);
1122 assert(bql_locked());
1123
1124 --memory_region_transaction_depth;
1125 if (!memory_region_transaction_depth) {
1126 if (memory_region_update_pending) {
1127 flatviews_reset();
1128
1129 MEMORY_LISTENER_CALL_GLOBAL(begin, Forward);
1130
1131 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
1132 address_space_set_flatview(as);
1133 address_space_update_ioeventfds(as);
1134 }
1135 memory_region_update_pending = false;
1136 ioeventfd_update_pending = false;
1137 MEMORY_LISTENER_CALL_GLOBAL(commit, Forward);
1138 } else if (ioeventfd_update_pending) {
1139 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
1140 address_space_update_ioeventfds(as);
1141 }
1142 ioeventfd_update_pending = false;
1143 }
1144 }
1145 }
1146
memory_region_destructor_none(MemoryRegion * mr)1147 static void memory_region_destructor_none(MemoryRegion *mr)
1148 {
1149 }
1150
memory_region_destructor_ram(MemoryRegion * mr)1151 static void memory_region_destructor_ram(MemoryRegion *mr)
1152 {
1153 qemu_ram_free(mr->ram_block);
1154 }
1155
memory_region_need_escape(char c)1156 static bool memory_region_need_escape(char c)
1157 {
1158 return c == '/' || c == '[' || c == '\\' || c == ']';
1159 }
1160
memory_region_escape_name(const char * name)1161 static char *memory_region_escape_name(const char *name)
1162 {
1163 const char *p;
1164 char *escaped, *q;
1165 uint8_t c;
1166 size_t bytes = 0;
1167
1168 for (p = name; *p; p++) {
1169 bytes += memory_region_need_escape(*p) ? 4 : 1;
1170 }
1171 if (bytes == p - name) {
1172 return g_memdup(name, bytes + 1);
1173 }
1174
1175 escaped = g_malloc(bytes + 1);
1176 for (p = name, q = escaped; *p; p++) {
1177 c = *p;
1178 if (unlikely(memory_region_need_escape(c))) {
1179 *q++ = '\\';
1180 *q++ = 'x';
1181 *q++ = "0123456789abcdef"[c >> 4];
1182 c = "0123456789abcdef"[c & 15];
1183 }
1184 *q++ = c;
1185 }
1186 *q = 0;
1187 return escaped;
1188 }
1189
memory_region_do_init(MemoryRegion * mr,Object * owner,const char * name,uint64_t size)1190 static void memory_region_do_init(MemoryRegion *mr,
1191 Object *owner,
1192 const char *name,
1193 uint64_t size)
1194 {
1195 mr->size = int128_make64(size);
1196 if (size == UINT64_MAX) {
1197 mr->size = int128_2_64();
1198 }
1199 mr->name = g_strdup(name);
1200 mr->owner = owner;
1201 mr->dev = (DeviceState *) object_dynamic_cast(mr->owner, TYPE_DEVICE);
1202 mr->ram_block = NULL;
1203
1204 if (name) {
1205 char *escaped_name = memory_region_escape_name(name);
1206 char *name_array = g_strdup_printf("%s[*]", escaped_name);
1207
1208 if (!owner) {
1209 owner = container_get(qdev_get_machine(), "/unattached");
1210 }
1211
1212 object_property_add_child(owner, name_array, OBJECT(mr));
1213 object_unref(OBJECT(mr));
1214 g_free(name_array);
1215 g_free(escaped_name);
1216 }
1217 }
1218
memory_region_init(MemoryRegion * mr,Object * owner,const char * name,uint64_t size)1219 void memory_region_init(MemoryRegion *mr,
1220 Object *owner,
1221 const char *name,
1222 uint64_t size)
1223 {
1224 object_initialize(mr, sizeof(*mr), TYPE_MEMORY_REGION);
1225 memory_region_do_init(mr, owner, name, size);
1226 }
1227
memory_region_get_container(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)1228 static void memory_region_get_container(Object *obj, Visitor *v,
1229 const char *name, void *opaque,
1230 Error **errp)
1231 {
1232 MemoryRegion *mr = MEMORY_REGION(obj);
1233 char *path = (char *)"";
1234
1235 if (mr->container) {
1236 path = object_get_canonical_path(OBJECT(mr->container));
1237 }
1238 visit_type_str(v, name, &path, errp);
1239 if (mr->container) {
1240 g_free(path);
1241 }
1242 }
1243
memory_region_resolve_container(Object * obj,void * opaque,const char * part)1244 static Object *memory_region_resolve_container(Object *obj, void *opaque,
1245 const char *part)
1246 {
1247 MemoryRegion *mr = MEMORY_REGION(obj);
1248
1249 return OBJECT(mr->container);
1250 }
1251
memory_region_get_priority(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)1252 static void memory_region_get_priority(Object *obj, Visitor *v,
1253 const char *name, void *opaque,
1254 Error **errp)
1255 {
1256 MemoryRegion *mr = MEMORY_REGION(obj);
1257 int32_t value = mr->priority;
1258
1259 visit_type_int32(v, name, &value, errp);
1260 }
1261
memory_region_get_size(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)1262 static void memory_region_get_size(Object *obj, Visitor *v, const char *name,
1263 void *opaque, Error **errp)
1264 {
1265 MemoryRegion *mr = MEMORY_REGION(obj);
1266 uint64_t value = memory_region_size(mr);
1267
1268 visit_type_uint64(v, name, &value, errp);
1269 }
1270
memory_region_initfn(Object * obj)1271 static void memory_region_initfn(Object *obj)
1272 {
1273 MemoryRegion *mr = MEMORY_REGION(obj);
1274 ObjectProperty *op;
1275
1276 mr->ops = &unassigned_mem_ops;
1277 mr->enabled = true;
1278 mr->romd_mode = true;
1279 mr->destructor = memory_region_destructor_none;
1280 QTAILQ_INIT(&mr->subregions);
1281 QTAILQ_INIT(&mr->coalesced);
1282
1283 op = object_property_add(OBJECT(mr), "container",
1284 "link<" TYPE_MEMORY_REGION ">",
1285 memory_region_get_container,
1286 NULL, /* memory_region_set_container */
1287 NULL, NULL);
1288 op->resolve = memory_region_resolve_container;
1289
1290 object_property_add_uint64_ptr(OBJECT(mr), "addr",
1291 &mr->addr, OBJ_PROP_FLAG_READ);
1292 object_property_add(OBJECT(mr), "priority", "uint32",
1293 memory_region_get_priority,
1294 NULL, /* memory_region_set_priority */
1295 NULL, NULL);
1296 object_property_add(OBJECT(mr), "size", "uint64",
1297 memory_region_get_size,
1298 NULL, /* memory_region_set_size, */
1299 NULL, NULL);
1300 }
1301
iommu_memory_region_initfn(Object * obj)1302 static void iommu_memory_region_initfn(Object *obj)
1303 {
1304 MemoryRegion *mr = MEMORY_REGION(obj);
1305
1306 mr->is_iommu = true;
1307 }
1308
unassigned_mem_read(void * opaque,hwaddr addr,unsigned size)1309 static uint64_t unassigned_mem_read(void *opaque, hwaddr addr,
1310 unsigned size)
1311 {
1312 #ifdef DEBUG_UNASSIGNED
1313 printf("Unassigned mem read " HWADDR_FMT_plx "\n", addr);
1314 #endif
1315 return 0;
1316 }
1317
unassigned_mem_write(void * opaque,hwaddr addr,uint64_t val,unsigned size)1318 static void unassigned_mem_write(void *opaque, hwaddr addr,
1319 uint64_t val, unsigned size)
1320 {
1321 #ifdef DEBUG_UNASSIGNED
1322 printf("Unassigned mem write " HWADDR_FMT_plx " = 0x%"PRIx64"\n", addr, val);
1323 #endif
1324 }
1325
unassigned_mem_accepts(void * opaque,hwaddr addr,unsigned size,bool is_write,MemTxAttrs attrs)1326 static bool unassigned_mem_accepts(void *opaque, hwaddr addr,
1327 unsigned size, bool is_write,
1328 MemTxAttrs attrs)
1329 {
1330 return false;
1331 }
1332
1333 const MemoryRegionOps unassigned_mem_ops = {
1334 .valid.accepts = unassigned_mem_accepts,
1335 .endianness = DEVICE_NATIVE_ENDIAN,
1336 };
1337
memory_region_ram_device_read(void * opaque,hwaddr addr,unsigned size)1338 static uint64_t memory_region_ram_device_read(void *opaque,
1339 hwaddr addr, unsigned size)
1340 {
1341 MemoryRegion *mr = opaque;
1342 uint64_t data = ldn_he_p(mr->ram_block->host + addr, size);
1343
1344 trace_memory_region_ram_device_read(get_cpu_index(), mr, addr, data, size);
1345
1346 return data;
1347 }
1348
memory_region_ram_device_write(void * opaque,hwaddr addr,uint64_t data,unsigned size)1349 static void memory_region_ram_device_write(void *opaque, hwaddr addr,
1350 uint64_t data, unsigned size)
1351 {
1352 MemoryRegion *mr = opaque;
1353
1354 trace_memory_region_ram_device_write(get_cpu_index(), mr, addr, data, size);
1355
1356 stn_he_p(mr->ram_block->host + addr, size, data);
1357 }
1358
1359 static const MemoryRegionOps ram_device_mem_ops = {
1360 .read = memory_region_ram_device_read,
1361 .write = memory_region_ram_device_write,
1362 .endianness = DEVICE_HOST_ENDIAN,
1363 .valid = {
1364 .min_access_size = 1,
1365 .max_access_size = 8,
1366 .unaligned = true,
1367 },
1368 .impl = {
1369 .min_access_size = 1,
1370 .max_access_size = 8,
1371 .unaligned = true,
1372 },
1373 };
1374
memory_region_access_valid(MemoryRegion * mr,hwaddr addr,unsigned size,bool is_write,MemTxAttrs attrs)1375 bool memory_region_access_valid(MemoryRegion *mr,
1376 hwaddr addr,
1377 unsigned size,
1378 bool is_write,
1379 MemTxAttrs attrs)
1380 {
1381 if (mr->ops->valid.accepts
1382 && !mr->ops->valid.accepts(mr->opaque, addr, size, is_write, attrs)) {
1383 qemu_log_mask(LOG_GUEST_ERROR, "Invalid %s at addr 0x%" HWADDR_PRIX
1384 ", size %u, region '%s', reason: rejected\n",
1385 is_write ? "write" : "read",
1386 addr, size, memory_region_name(mr));
1387 return false;
1388 }
1389
1390 if (!mr->ops->valid.unaligned && (addr & (size - 1))) {
1391 qemu_log_mask(LOG_GUEST_ERROR, "Invalid %s at addr 0x%" HWADDR_PRIX
1392 ", size %u, region '%s', reason: unaligned\n",
1393 is_write ? "write" : "read",
1394 addr, size, memory_region_name(mr));
1395 return false;
1396 }
1397
1398 /* Treat zero as compatibility all valid */
1399 if (!mr->ops->valid.max_access_size) {
1400 return true;
1401 }
1402
1403 if (size > mr->ops->valid.max_access_size
1404 || size < mr->ops->valid.min_access_size) {
1405 qemu_log_mask(LOG_GUEST_ERROR, "Invalid %s at addr 0x%" HWADDR_PRIX
1406 ", size %u, region '%s', reason: invalid size "
1407 "(min:%u max:%u)\n",
1408 is_write ? "write" : "read",
1409 addr, size, memory_region_name(mr),
1410 mr->ops->valid.min_access_size,
1411 mr->ops->valid.max_access_size);
1412 return false;
1413 }
1414 return true;
1415 }
1416
memory_region_dispatch_read1(MemoryRegion * mr,hwaddr addr,uint64_t * pval,unsigned size,MemTxAttrs attrs)1417 static MemTxResult memory_region_dispatch_read1(MemoryRegion *mr,
1418 hwaddr addr,
1419 uint64_t *pval,
1420 unsigned size,
1421 MemTxAttrs attrs)
1422 {
1423 *pval = 0;
1424
1425 if (mr->ops->read) {
1426 return access_with_adjusted_size(addr, pval, size,
1427 mr->ops->impl.min_access_size,
1428 mr->ops->impl.max_access_size,
1429 memory_region_read_accessor,
1430 mr, attrs);
1431 } else {
1432 return access_with_adjusted_size(addr, pval, size,
1433 mr->ops->impl.min_access_size,
1434 mr->ops->impl.max_access_size,
1435 memory_region_read_with_attrs_accessor,
1436 mr, attrs);
1437 }
1438 }
1439
memory_region_dispatch_read(MemoryRegion * mr,hwaddr addr,uint64_t * pval,MemOp op,MemTxAttrs attrs)1440 MemTxResult memory_region_dispatch_read(MemoryRegion *mr,
1441 hwaddr addr,
1442 uint64_t *pval,
1443 MemOp op,
1444 MemTxAttrs attrs)
1445 {
1446 unsigned size = memop_size(op);
1447 MemTxResult r;
1448
1449 if (mr->alias) {
1450 return memory_region_dispatch_read(mr->alias,
1451 mr->alias_offset + addr,
1452 pval, op, attrs);
1453 }
1454 if (!memory_region_access_valid(mr, addr, size, false, attrs)) {
1455 *pval = unassigned_mem_read(mr, addr, size);
1456 return MEMTX_DECODE_ERROR;
1457 }
1458
1459 r = memory_region_dispatch_read1(mr, addr, pval, size, attrs);
1460 adjust_endianness(mr, pval, op);
1461 return r;
1462 }
1463
1464 /* Return true if an eventfd was signalled */
memory_region_dispatch_write_eventfds(MemoryRegion * mr,hwaddr addr,uint64_t data,unsigned size,MemTxAttrs attrs)1465 static bool memory_region_dispatch_write_eventfds(MemoryRegion *mr,
1466 hwaddr addr,
1467 uint64_t data,
1468 unsigned size,
1469 MemTxAttrs attrs)
1470 {
1471 MemoryRegionIoeventfd ioeventfd = {
1472 .addr = addrrange_make(int128_make64(addr), int128_make64(size)),
1473 .data = data,
1474 };
1475 unsigned i;
1476
1477 for (i = 0; i < mr->ioeventfd_nb; i++) {
1478 ioeventfd.match_data = mr->ioeventfds[i].match_data;
1479 ioeventfd.e = mr->ioeventfds[i].e;
1480
1481 if (memory_region_ioeventfd_equal(&ioeventfd, &mr->ioeventfds[i])) {
1482 event_notifier_set(ioeventfd.e);
1483 return true;
1484 }
1485 }
1486
1487 return false;
1488 }
1489
memory_region_dispatch_write(MemoryRegion * mr,hwaddr addr,uint64_t data,MemOp op,MemTxAttrs attrs)1490 MemTxResult memory_region_dispatch_write(MemoryRegion *mr,
1491 hwaddr addr,
1492 uint64_t data,
1493 MemOp op,
1494 MemTxAttrs attrs)
1495 {
1496 unsigned size = memop_size(op);
1497
1498 if (mr->alias) {
1499 return memory_region_dispatch_write(mr->alias,
1500 mr->alias_offset + addr,
1501 data, op, attrs);
1502 }
1503 if (!memory_region_access_valid(mr, addr, size, true, attrs)) {
1504 unassigned_mem_write(mr, addr, data, size);
1505 return MEMTX_DECODE_ERROR;
1506 }
1507
1508 adjust_endianness(mr, &data, op);
1509
1510 /*
1511 * FIXME: it's not clear why under KVM the write would be processed
1512 * directly, instead of going through eventfd. This probably should
1513 * test "tcg_enabled() || qtest_enabled()", or should just go away.
1514 */
1515 if (!kvm_enabled() &&
1516 memory_region_dispatch_write_eventfds(mr, addr, data, size, attrs)) {
1517 return MEMTX_OK;
1518 }
1519
1520 if (mr->ops->write) {
1521 return access_with_adjusted_size(addr, &data, size,
1522 mr->ops->impl.min_access_size,
1523 mr->ops->impl.max_access_size,
1524 memory_region_write_accessor, mr,
1525 attrs);
1526 } else {
1527 return
1528 access_with_adjusted_size(addr, &data, size,
1529 mr->ops->impl.min_access_size,
1530 mr->ops->impl.max_access_size,
1531 memory_region_write_with_attrs_accessor,
1532 mr, attrs);
1533 }
1534 }
1535
memory_region_init_io(MemoryRegion * mr,Object * owner,const MemoryRegionOps * ops,void * opaque,const char * name,uint64_t size)1536 void memory_region_init_io(MemoryRegion *mr,
1537 Object *owner,
1538 const MemoryRegionOps *ops,
1539 void *opaque,
1540 const char *name,
1541 uint64_t size)
1542 {
1543 memory_region_init(mr, owner, name, size);
1544 mr->ops = ops ? ops : &unassigned_mem_ops;
1545 mr->opaque = opaque;
1546 mr->terminates = true;
1547 }
1548
memory_region_init_ram_nomigrate(MemoryRegion * mr,Object * owner,const char * name,uint64_t size,Error ** errp)1549 bool memory_region_init_ram_nomigrate(MemoryRegion *mr,
1550 Object *owner,
1551 const char *name,
1552 uint64_t size,
1553 Error **errp)
1554 {
1555 return memory_region_init_ram_flags_nomigrate(mr, owner, name,
1556 size, 0, errp);
1557 }
1558
memory_region_init_ram_flags_nomigrate(MemoryRegion * mr,Object * owner,const char * name,uint64_t size,uint32_t ram_flags,Error ** errp)1559 bool memory_region_init_ram_flags_nomigrate(MemoryRegion *mr,
1560 Object *owner,
1561 const char *name,
1562 uint64_t size,
1563 uint32_t ram_flags,
1564 Error **errp)
1565 {
1566 Error *err = NULL;
1567 memory_region_init(mr, owner, name, size);
1568 mr->ram = true;
1569 mr->terminates = true;
1570 mr->destructor = memory_region_destructor_ram;
1571 mr->ram_block = qemu_ram_alloc(size, ram_flags, mr, &err);
1572 if (err) {
1573 mr->size = int128_zero();
1574 object_unparent(OBJECT(mr));
1575 error_propagate(errp, err);
1576 return false;
1577 }
1578 return true;
1579 }
1580
memory_region_init_resizeable_ram(MemoryRegion * mr,Object * owner,const char * name,uint64_t size,uint64_t max_size,void (* resized)(const char *,uint64_t length,void * host),Error ** errp)1581 bool memory_region_init_resizeable_ram(MemoryRegion *mr,
1582 Object *owner,
1583 const char *name,
1584 uint64_t size,
1585 uint64_t max_size,
1586 void (*resized)(const char*,
1587 uint64_t length,
1588 void *host),
1589 Error **errp)
1590 {
1591 Error *err = NULL;
1592 memory_region_init(mr, owner, name, size);
1593 mr->ram = true;
1594 mr->terminates = true;
1595 mr->destructor = memory_region_destructor_ram;
1596 mr->ram_block = qemu_ram_alloc_resizeable(size, max_size, resized,
1597 mr, &err);
1598 if (err) {
1599 mr->size = int128_zero();
1600 object_unparent(OBJECT(mr));
1601 error_propagate(errp, err);
1602 return false;
1603 }
1604 return true;
1605 }
1606
1607 #ifdef CONFIG_POSIX
memory_region_init_ram_from_file(MemoryRegion * mr,Object * owner,const char * name,uint64_t size,uint64_t align,uint32_t ram_flags,const char * path,ram_addr_t offset,Error ** errp)1608 bool memory_region_init_ram_from_file(MemoryRegion *mr,
1609 Object *owner,
1610 const char *name,
1611 uint64_t size,
1612 uint64_t align,
1613 uint32_t ram_flags,
1614 const char *path,
1615 ram_addr_t offset,
1616 Error **errp)
1617 {
1618 Error *err = NULL;
1619 memory_region_init(mr, owner, name, size);
1620 mr->ram = true;
1621 mr->readonly = !!(ram_flags & RAM_READONLY);
1622 mr->terminates = true;
1623 mr->destructor = memory_region_destructor_ram;
1624 mr->align = align;
1625 mr->ram_block = qemu_ram_alloc_from_file(size, mr, ram_flags, path,
1626 offset, &err);
1627 if (err) {
1628 mr->size = int128_zero();
1629 object_unparent(OBJECT(mr));
1630 error_propagate(errp, err);
1631 return false;
1632 }
1633 return true;
1634 }
1635
memory_region_init_ram_from_fd(MemoryRegion * mr,Object * owner,const char * name,uint64_t size,uint32_t ram_flags,int fd,ram_addr_t offset,Error ** errp)1636 bool memory_region_init_ram_from_fd(MemoryRegion *mr,
1637 Object *owner,
1638 const char *name,
1639 uint64_t size,
1640 uint32_t ram_flags,
1641 int fd,
1642 ram_addr_t offset,
1643 Error **errp)
1644 {
1645 Error *err = NULL;
1646 memory_region_init(mr, owner, name, size);
1647 mr->ram = true;
1648 mr->readonly = !!(ram_flags & RAM_READONLY);
1649 mr->terminates = true;
1650 mr->destructor = memory_region_destructor_ram;
1651 mr->ram_block = qemu_ram_alloc_from_fd(size, mr, ram_flags, fd, offset,
1652 &err);
1653 if (err) {
1654 mr->size = int128_zero();
1655 object_unparent(OBJECT(mr));
1656 error_propagate(errp, err);
1657 return false;
1658 }
1659 return true;
1660 }
1661 #endif
1662
memory_region_init_ram_ptr(MemoryRegion * mr,Object * owner,const char * name,uint64_t size,void * ptr)1663 void memory_region_init_ram_ptr(MemoryRegion *mr,
1664 Object *owner,
1665 const char *name,
1666 uint64_t size,
1667 void *ptr)
1668 {
1669 memory_region_init(mr, owner, name, size);
1670 mr->ram = true;
1671 mr->terminates = true;
1672 mr->destructor = memory_region_destructor_ram;
1673
1674 /* qemu_ram_alloc_from_ptr cannot fail with ptr != NULL. */
1675 assert(ptr != NULL);
1676 mr->ram_block = qemu_ram_alloc_from_ptr(size, ptr, mr, &error_abort);
1677 }
1678
memory_region_init_ram_device_ptr(MemoryRegion * mr,Object * owner,const char * name,uint64_t size,void * ptr)1679 void memory_region_init_ram_device_ptr(MemoryRegion *mr,
1680 Object *owner,
1681 const char *name,
1682 uint64_t size,
1683 void *ptr)
1684 {
1685 memory_region_init(mr, owner, name, size);
1686 mr->ram = true;
1687 mr->terminates = true;
1688 mr->ram_device = true;
1689 mr->ops = &ram_device_mem_ops;
1690 mr->opaque = mr;
1691 mr->destructor = memory_region_destructor_ram;
1692
1693 /* qemu_ram_alloc_from_ptr cannot fail with ptr != NULL. */
1694 assert(ptr != NULL);
1695 mr->ram_block = qemu_ram_alloc_from_ptr(size, ptr, mr, &error_abort);
1696 }
1697
memory_region_init_alias(MemoryRegion * mr,Object * owner,const char * name,MemoryRegion * orig,hwaddr offset,uint64_t size)1698 void memory_region_init_alias(MemoryRegion *mr,
1699 Object *owner,
1700 const char *name,
1701 MemoryRegion *orig,
1702 hwaddr offset,
1703 uint64_t size)
1704 {
1705 memory_region_init(mr, owner, name, size);
1706 mr->alias = orig;
1707 mr->alias_offset = offset;
1708 }
1709
memory_region_init_rom_nomigrate(MemoryRegion * mr,Object * owner,const char * name,uint64_t size,Error ** errp)1710 bool memory_region_init_rom_nomigrate(MemoryRegion *mr,
1711 Object *owner,
1712 const char *name,
1713 uint64_t size,
1714 Error **errp)
1715 {
1716 if (!memory_region_init_ram_flags_nomigrate(mr, owner, name,
1717 size, 0, errp)) {
1718 return false;
1719 }
1720 mr->readonly = true;
1721
1722 return true;
1723 }
1724
memory_region_init_rom_device_nomigrate(MemoryRegion * mr,Object * owner,const MemoryRegionOps * ops,void * opaque,const char * name,uint64_t size,Error ** errp)1725 bool memory_region_init_rom_device_nomigrate(MemoryRegion *mr,
1726 Object *owner,
1727 const MemoryRegionOps *ops,
1728 void *opaque,
1729 const char *name,
1730 uint64_t size,
1731 Error **errp)
1732 {
1733 Error *err = NULL;
1734 assert(ops);
1735 memory_region_init(mr, owner, name, size);
1736 mr->ops = ops;
1737 mr->opaque = opaque;
1738 mr->terminates = true;
1739 mr->rom_device = true;
1740 mr->destructor = memory_region_destructor_ram;
1741 mr->ram_block = qemu_ram_alloc(size, 0, mr, &err);
1742 if (err) {
1743 mr->size = int128_zero();
1744 object_unparent(OBJECT(mr));
1745 error_propagate(errp, err);
1746 return false;
1747 }
1748 return true;
1749 }
1750
memory_region_init_iommu(void * _iommu_mr,size_t instance_size,const char * mrtypename,Object * owner,const char * name,uint64_t size)1751 void memory_region_init_iommu(void *_iommu_mr,
1752 size_t instance_size,
1753 const char *mrtypename,
1754 Object *owner,
1755 const char *name,
1756 uint64_t size)
1757 {
1758 struct IOMMUMemoryRegion *iommu_mr;
1759 struct MemoryRegion *mr;
1760
1761 object_initialize(_iommu_mr, instance_size, mrtypename);
1762 mr = MEMORY_REGION(_iommu_mr);
1763 memory_region_do_init(mr, owner, name, size);
1764 iommu_mr = IOMMU_MEMORY_REGION(mr);
1765 mr->terminates = true; /* then re-forwards */
1766 QLIST_INIT(&iommu_mr->iommu_notify);
1767 iommu_mr->iommu_notify_flags = IOMMU_NOTIFIER_NONE;
1768 }
1769
memory_region_finalize(Object * obj)1770 static void memory_region_finalize(Object *obj)
1771 {
1772 MemoryRegion *mr = MEMORY_REGION(obj);
1773
1774 assert(!mr->container);
1775
1776 /* We know the region is not visible in any address space (it
1777 * does not have a container and cannot be a root either because
1778 * it has no references, so we can blindly clear mr->enabled.
1779 * memory_region_set_enabled instead could trigger a transaction
1780 * and cause an infinite loop.
1781 */
1782 mr->enabled = false;
1783 memory_region_transaction_begin();
1784 while (!QTAILQ_EMPTY(&mr->subregions)) {
1785 MemoryRegion *subregion = QTAILQ_FIRST(&mr->subregions);
1786 memory_region_del_subregion(mr, subregion);
1787 }
1788 memory_region_transaction_commit();
1789
1790 mr->destructor(mr);
1791 memory_region_clear_coalescing(mr);
1792 g_free((char *)mr->name);
1793 g_free(mr->ioeventfds);
1794 }
1795
memory_region_owner(MemoryRegion * mr)1796 Object *memory_region_owner(MemoryRegion *mr)
1797 {
1798 Object *obj = OBJECT(mr);
1799 return obj->parent;
1800 }
1801
memory_region_ref(MemoryRegion * mr)1802 void memory_region_ref(MemoryRegion *mr)
1803 {
1804 /* MMIO callbacks most likely will access data that belongs
1805 * to the owner, hence the need to ref/unref the owner whenever
1806 * the memory region is in use.
1807 *
1808 * The memory region is a child of its owner. As long as the
1809 * owner doesn't call unparent itself on the memory region,
1810 * ref-ing the owner will also keep the memory region alive.
1811 * Memory regions without an owner are supposed to never go away;
1812 * we do not ref/unref them because it slows down DMA sensibly.
1813 */
1814 if (mr && mr->owner) {
1815 object_ref(mr->owner);
1816 }
1817 }
1818
memory_region_unref(MemoryRegion * mr)1819 void memory_region_unref(MemoryRegion *mr)
1820 {
1821 if (mr && mr->owner) {
1822 object_unref(mr->owner);
1823 }
1824 }
1825
memory_region_size(MemoryRegion * mr)1826 uint64_t memory_region_size(MemoryRegion *mr)
1827 {
1828 if (int128_eq(mr->size, int128_2_64())) {
1829 return UINT64_MAX;
1830 }
1831 return int128_get64(mr->size);
1832 }
1833
memory_region_name(const MemoryRegion * mr)1834 const char *memory_region_name(const MemoryRegion *mr)
1835 {
1836 if (!mr->name) {
1837 ((MemoryRegion *)mr)->name =
1838 g_strdup(object_get_canonical_path_component(OBJECT(mr)));
1839 }
1840 return mr->name;
1841 }
1842
memory_region_is_ram_device(MemoryRegion * mr)1843 bool memory_region_is_ram_device(MemoryRegion *mr)
1844 {
1845 return mr->ram_device;
1846 }
1847
memory_region_is_protected(MemoryRegion * mr)1848 bool memory_region_is_protected(MemoryRegion *mr)
1849 {
1850 return mr->ram && (mr->ram_block->flags & RAM_PROTECTED);
1851 }
1852
memory_region_has_guest_memfd(MemoryRegion * mr)1853 bool memory_region_has_guest_memfd(MemoryRegion *mr)
1854 {
1855 return mr->ram_block && mr->ram_block->guest_memfd >= 0;
1856 }
1857
memory_region_get_dirty_log_mask(MemoryRegion * mr)1858 uint8_t memory_region_get_dirty_log_mask(MemoryRegion *mr)
1859 {
1860 uint8_t mask = mr->dirty_log_mask;
1861 RAMBlock *rb = mr->ram_block;
1862
1863 if (global_dirty_tracking && ((rb && qemu_ram_is_migratable(rb)) ||
1864 memory_region_is_iommu(mr))) {
1865 mask |= (1 << DIRTY_MEMORY_MIGRATION);
1866 }
1867
1868 if (tcg_enabled() && rb) {
1869 /* TCG only cares about dirty memory logging for RAM, not IOMMU. */
1870 mask |= (1 << DIRTY_MEMORY_CODE);
1871 }
1872 return mask;
1873 }
1874
memory_region_is_logging(MemoryRegion * mr,uint8_t client)1875 bool memory_region_is_logging(MemoryRegion *mr, uint8_t client)
1876 {
1877 return memory_region_get_dirty_log_mask(mr) & (1 << client);
1878 }
1879
memory_region_update_iommu_notify_flags(IOMMUMemoryRegion * iommu_mr,Error ** errp)1880 static int memory_region_update_iommu_notify_flags(IOMMUMemoryRegion *iommu_mr,
1881 Error **errp)
1882 {
1883 IOMMUNotifierFlag flags = IOMMU_NOTIFIER_NONE;
1884 IOMMUNotifier *iommu_notifier;
1885 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
1886 int ret = 0;
1887
1888 IOMMU_NOTIFIER_FOREACH(iommu_notifier, iommu_mr) {
1889 flags |= iommu_notifier->notifier_flags;
1890 }
1891
1892 if (flags != iommu_mr->iommu_notify_flags && imrc->notify_flag_changed) {
1893 ret = imrc->notify_flag_changed(iommu_mr,
1894 iommu_mr->iommu_notify_flags,
1895 flags, errp);
1896 }
1897
1898 if (!ret) {
1899 iommu_mr->iommu_notify_flags = flags;
1900 }
1901 return ret;
1902 }
1903
memory_region_register_iommu_notifier(MemoryRegion * mr,IOMMUNotifier * n,Error ** errp)1904 int memory_region_register_iommu_notifier(MemoryRegion *mr,
1905 IOMMUNotifier *n, Error **errp)
1906 {
1907 IOMMUMemoryRegion *iommu_mr;
1908 int ret;
1909
1910 if (mr->alias) {
1911 return memory_region_register_iommu_notifier(mr->alias, n, errp);
1912 }
1913
1914 /* We need to register for at least one bitfield */
1915 iommu_mr = IOMMU_MEMORY_REGION(mr);
1916 assert(n->notifier_flags != IOMMU_NOTIFIER_NONE);
1917 assert(n->start <= n->end);
1918 assert(n->iommu_idx >= 0 &&
1919 n->iommu_idx < memory_region_iommu_num_indexes(iommu_mr));
1920
1921 QLIST_INSERT_HEAD(&iommu_mr->iommu_notify, n, node);
1922 ret = memory_region_update_iommu_notify_flags(iommu_mr, errp);
1923 if (ret) {
1924 QLIST_REMOVE(n, node);
1925 }
1926 return ret;
1927 }
1928
memory_region_iommu_get_min_page_size(IOMMUMemoryRegion * iommu_mr)1929 uint64_t memory_region_iommu_get_min_page_size(IOMMUMemoryRegion *iommu_mr)
1930 {
1931 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
1932
1933 if (imrc->get_min_page_size) {
1934 return imrc->get_min_page_size(iommu_mr);
1935 }
1936 return TARGET_PAGE_SIZE;
1937 }
1938
memory_region_iommu_replay(IOMMUMemoryRegion * iommu_mr,IOMMUNotifier * n)1939 void memory_region_iommu_replay(IOMMUMemoryRegion *iommu_mr, IOMMUNotifier *n)
1940 {
1941 MemoryRegion *mr = MEMORY_REGION(iommu_mr);
1942 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
1943 hwaddr addr, granularity;
1944 IOMMUTLBEntry iotlb;
1945
1946 /* If the IOMMU has its own replay callback, override */
1947 if (imrc->replay) {
1948 imrc->replay(iommu_mr, n);
1949 return;
1950 }
1951
1952 granularity = memory_region_iommu_get_min_page_size(iommu_mr);
1953
1954 for (addr = 0; addr < memory_region_size(mr); addr += granularity) {
1955 iotlb = imrc->translate(iommu_mr, addr, IOMMU_NONE, n->iommu_idx);
1956 if (iotlb.perm != IOMMU_NONE) {
1957 n->notify(n, &iotlb);
1958 }
1959
1960 /* if (2^64 - MR size) < granularity, it's possible to get an
1961 * infinite loop here. This should catch such a wraparound */
1962 if ((addr + granularity) < addr) {
1963 break;
1964 }
1965 }
1966 }
1967
memory_region_unregister_iommu_notifier(MemoryRegion * mr,IOMMUNotifier * n)1968 void memory_region_unregister_iommu_notifier(MemoryRegion *mr,
1969 IOMMUNotifier *n)
1970 {
1971 IOMMUMemoryRegion *iommu_mr;
1972
1973 if (mr->alias) {
1974 memory_region_unregister_iommu_notifier(mr->alias, n);
1975 return;
1976 }
1977 QLIST_REMOVE(n, node);
1978 iommu_mr = IOMMU_MEMORY_REGION(mr);
1979 memory_region_update_iommu_notify_flags(iommu_mr, NULL);
1980 }
1981
memory_region_notify_iommu_one(IOMMUNotifier * notifier,const IOMMUTLBEvent * event)1982 void memory_region_notify_iommu_one(IOMMUNotifier *notifier,
1983 const IOMMUTLBEvent *event)
1984 {
1985 const IOMMUTLBEntry *entry = &event->entry;
1986 hwaddr entry_end = entry->iova + entry->addr_mask;
1987 IOMMUTLBEntry tmp = *entry;
1988
1989 if (event->type == IOMMU_NOTIFIER_UNMAP) {
1990 assert(entry->perm == IOMMU_NONE);
1991 }
1992
1993 /*
1994 * Skip the notification if the notification does not overlap
1995 * with registered range.
1996 */
1997 if (notifier->start > entry_end || notifier->end < entry->iova) {
1998 return;
1999 }
2000
2001 if (notifier->notifier_flags & IOMMU_NOTIFIER_DEVIOTLB_UNMAP) {
2002 /* Crop (iova, addr_mask) to range */
2003 tmp.iova = MAX(tmp.iova, notifier->start);
2004 tmp.addr_mask = MIN(entry_end, notifier->end) - tmp.iova;
2005 } else {
2006 assert(entry->iova >= notifier->start && entry_end <= notifier->end);
2007 }
2008
2009 if (event->type & notifier->notifier_flags) {
2010 notifier->notify(notifier, &tmp);
2011 }
2012 }
2013
memory_region_unmap_iommu_notifier_range(IOMMUNotifier * notifier)2014 void memory_region_unmap_iommu_notifier_range(IOMMUNotifier *notifier)
2015 {
2016 IOMMUTLBEvent event;
2017
2018 event.type = IOMMU_NOTIFIER_UNMAP;
2019 event.entry.target_as = &address_space_memory;
2020 event.entry.iova = notifier->start;
2021 event.entry.perm = IOMMU_NONE;
2022 event.entry.addr_mask = notifier->end - notifier->start;
2023
2024 memory_region_notify_iommu_one(notifier, &event);
2025 }
2026
memory_region_notify_iommu(IOMMUMemoryRegion * iommu_mr,int iommu_idx,const IOMMUTLBEvent event)2027 void memory_region_notify_iommu(IOMMUMemoryRegion *iommu_mr,
2028 int iommu_idx,
2029 const IOMMUTLBEvent event)
2030 {
2031 IOMMUNotifier *iommu_notifier;
2032
2033 assert(memory_region_is_iommu(MEMORY_REGION(iommu_mr)));
2034
2035 IOMMU_NOTIFIER_FOREACH(iommu_notifier, iommu_mr) {
2036 if (iommu_notifier->iommu_idx == iommu_idx) {
2037 memory_region_notify_iommu_one(iommu_notifier, &event);
2038 }
2039 }
2040 }
2041
memory_region_iommu_get_attr(IOMMUMemoryRegion * iommu_mr,enum IOMMUMemoryRegionAttr attr,void * data)2042 int memory_region_iommu_get_attr(IOMMUMemoryRegion *iommu_mr,
2043 enum IOMMUMemoryRegionAttr attr,
2044 void *data)
2045 {
2046 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
2047
2048 if (!imrc->get_attr) {
2049 return -EINVAL;
2050 }
2051
2052 return imrc->get_attr(iommu_mr, attr, data);
2053 }
2054
memory_region_iommu_attrs_to_index(IOMMUMemoryRegion * iommu_mr,MemTxAttrs attrs)2055 int memory_region_iommu_attrs_to_index(IOMMUMemoryRegion *iommu_mr,
2056 MemTxAttrs attrs)
2057 {
2058 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
2059
2060 if (!imrc->attrs_to_index) {
2061 return 0;
2062 }
2063
2064 return imrc->attrs_to_index(iommu_mr, attrs);
2065 }
2066
memory_region_iommu_num_indexes(IOMMUMemoryRegion * iommu_mr)2067 int memory_region_iommu_num_indexes(IOMMUMemoryRegion *iommu_mr)
2068 {
2069 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
2070
2071 if (!imrc->num_indexes) {
2072 return 1;
2073 }
2074
2075 return imrc->num_indexes(iommu_mr);
2076 }
2077
memory_region_get_ram_discard_manager(MemoryRegion * mr)2078 RamDiscardManager *memory_region_get_ram_discard_manager(MemoryRegion *mr)
2079 {
2080 if (!memory_region_is_ram(mr)) {
2081 return NULL;
2082 }
2083 return mr->rdm;
2084 }
2085
memory_region_set_ram_discard_manager(MemoryRegion * mr,RamDiscardManager * rdm)2086 void memory_region_set_ram_discard_manager(MemoryRegion *mr,
2087 RamDiscardManager *rdm)
2088 {
2089 g_assert(memory_region_is_ram(mr));
2090 g_assert(!rdm || !mr->rdm);
2091 mr->rdm = rdm;
2092 }
2093
ram_discard_manager_get_min_granularity(const RamDiscardManager * rdm,const MemoryRegion * mr)2094 uint64_t ram_discard_manager_get_min_granularity(const RamDiscardManager *rdm,
2095 const MemoryRegion *mr)
2096 {
2097 RamDiscardManagerClass *rdmc = RAM_DISCARD_MANAGER_GET_CLASS(rdm);
2098
2099 g_assert(rdmc->get_min_granularity);
2100 return rdmc->get_min_granularity(rdm, mr);
2101 }
2102
ram_discard_manager_is_populated(const RamDiscardManager * rdm,const MemoryRegionSection * section)2103 bool ram_discard_manager_is_populated(const RamDiscardManager *rdm,
2104 const MemoryRegionSection *section)
2105 {
2106 RamDiscardManagerClass *rdmc = RAM_DISCARD_MANAGER_GET_CLASS(rdm);
2107
2108 g_assert(rdmc->is_populated);
2109 return rdmc->is_populated(rdm, section);
2110 }
2111
ram_discard_manager_replay_populated(const RamDiscardManager * rdm,MemoryRegionSection * section,ReplayRamPopulate replay_fn,void * opaque)2112 int ram_discard_manager_replay_populated(const RamDiscardManager *rdm,
2113 MemoryRegionSection *section,
2114 ReplayRamPopulate replay_fn,
2115 void *opaque)
2116 {
2117 RamDiscardManagerClass *rdmc = RAM_DISCARD_MANAGER_GET_CLASS(rdm);
2118
2119 g_assert(rdmc->replay_populated);
2120 return rdmc->replay_populated(rdm, section, replay_fn, opaque);
2121 }
2122
ram_discard_manager_replay_discarded(const RamDiscardManager * rdm,MemoryRegionSection * section,ReplayRamDiscard replay_fn,void * opaque)2123 void ram_discard_manager_replay_discarded(const RamDiscardManager *rdm,
2124 MemoryRegionSection *section,
2125 ReplayRamDiscard replay_fn,
2126 void *opaque)
2127 {
2128 RamDiscardManagerClass *rdmc = RAM_DISCARD_MANAGER_GET_CLASS(rdm);
2129
2130 g_assert(rdmc->replay_discarded);
2131 rdmc->replay_discarded(rdm, section, replay_fn, opaque);
2132 }
2133
ram_discard_manager_register_listener(RamDiscardManager * rdm,RamDiscardListener * rdl,MemoryRegionSection * section)2134 void ram_discard_manager_register_listener(RamDiscardManager *rdm,
2135 RamDiscardListener *rdl,
2136 MemoryRegionSection *section)
2137 {
2138 RamDiscardManagerClass *rdmc = RAM_DISCARD_MANAGER_GET_CLASS(rdm);
2139
2140 g_assert(rdmc->register_listener);
2141 rdmc->register_listener(rdm, rdl, section);
2142 }
2143
ram_discard_manager_unregister_listener(RamDiscardManager * rdm,RamDiscardListener * rdl)2144 void ram_discard_manager_unregister_listener(RamDiscardManager *rdm,
2145 RamDiscardListener *rdl)
2146 {
2147 RamDiscardManagerClass *rdmc = RAM_DISCARD_MANAGER_GET_CLASS(rdm);
2148
2149 g_assert(rdmc->unregister_listener);
2150 rdmc->unregister_listener(rdm, rdl);
2151 }
2152
2153 /* Called with rcu_read_lock held. */
memory_get_xlat_addr(IOMMUTLBEntry * iotlb,void ** vaddr,ram_addr_t * ram_addr,bool * read_only,bool * mr_has_discard_manager,Error ** errp)2154 bool memory_get_xlat_addr(IOMMUTLBEntry *iotlb, void **vaddr,
2155 ram_addr_t *ram_addr, bool *read_only,
2156 bool *mr_has_discard_manager, Error **errp)
2157 {
2158 MemoryRegion *mr;
2159 hwaddr xlat;
2160 hwaddr len = iotlb->addr_mask + 1;
2161 bool writable = iotlb->perm & IOMMU_WO;
2162
2163 if (mr_has_discard_manager) {
2164 *mr_has_discard_manager = false;
2165 }
2166 /*
2167 * The IOMMU TLB entry we have just covers translation through
2168 * this IOMMU to its immediate target. We need to translate
2169 * it the rest of the way through to memory.
2170 */
2171 mr = address_space_translate(&address_space_memory, iotlb->translated_addr,
2172 &xlat, &len, writable, MEMTXATTRS_UNSPECIFIED);
2173 if (!memory_region_is_ram(mr)) {
2174 error_setg(errp, "iommu map to non memory area %" HWADDR_PRIx "", xlat);
2175 return false;
2176 } else if (memory_region_has_ram_discard_manager(mr)) {
2177 RamDiscardManager *rdm = memory_region_get_ram_discard_manager(mr);
2178 MemoryRegionSection tmp = {
2179 .mr = mr,
2180 .offset_within_region = xlat,
2181 .size = int128_make64(len),
2182 };
2183 if (mr_has_discard_manager) {
2184 *mr_has_discard_manager = true;
2185 }
2186 /*
2187 * Malicious VMs can map memory into the IOMMU, which is expected
2188 * to remain discarded. vfio will pin all pages, populating memory.
2189 * Disallow that. vmstate priorities make sure any RamDiscardManager
2190 * were already restored before IOMMUs are restored.
2191 */
2192 if (!ram_discard_manager_is_populated(rdm, &tmp)) {
2193 error_setg(errp, "iommu map to discarded memory (e.g., unplugged"
2194 " via virtio-mem): %" HWADDR_PRIx "",
2195 iotlb->translated_addr);
2196 return false;
2197 }
2198 }
2199
2200 /*
2201 * Translation truncates length to the IOMMU page size,
2202 * check that it did not truncate too much.
2203 */
2204 if (len & iotlb->addr_mask) {
2205 error_setg(errp, "iommu has granularity incompatible with target AS");
2206 return false;
2207 }
2208
2209 if (vaddr) {
2210 *vaddr = memory_region_get_ram_ptr(mr) + xlat;
2211 }
2212
2213 if (ram_addr) {
2214 *ram_addr = memory_region_get_ram_addr(mr) + xlat;
2215 }
2216
2217 if (read_only) {
2218 *read_only = !writable || mr->readonly;
2219 }
2220
2221 return true;
2222 }
2223
memory_region_set_log(MemoryRegion * mr,bool log,unsigned client)2224 void memory_region_set_log(MemoryRegion *mr, bool log, unsigned client)
2225 {
2226 uint8_t mask = 1 << client;
2227 uint8_t old_logging;
2228
2229 assert(client == DIRTY_MEMORY_VGA);
2230 old_logging = mr->vga_logging_count;
2231 mr->vga_logging_count += log ? 1 : -1;
2232 if (!!old_logging == !!mr->vga_logging_count) {
2233 return;
2234 }
2235
2236 memory_region_transaction_begin();
2237 mr->dirty_log_mask = (mr->dirty_log_mask & ~mask) | (log * mask);
2238 memory_region_update_pending |= mr->enabled;
2239 memory_region_transaction_commit();
2240 }
2241
memory_region_set_dirty(MemoryRegion * mr,hwaddr addr,hwaddr size)2242 void memory_region_set_dirty(MemoryRegion *mr, hwaddr addr,
2243 hwaddr size)
2244 {
2245 assert(mr->ram_block);
2246 cpu_physical_memory_set_dirty_range(memory_region_get_ram_addr(mr) + addr,
2247 size,
2248 memory_region_get_dirty_log_mask(mr));
2249 }
2250
2251 /*
2252 * If memory region `mr' is NULL, do global sync. Otherwise, sync
2253 * dirty bitmap for the specified memory region.
2254 */
memory_region_sync_dirty_bitmap(MemoryRegion * mr,bool last_stage)2255 static void memory_region_sync_dirty_bitmap(MemoryRegion *mr, bool last_stage)
2256 {
2257 MemoryListener *listener;
2258 AddressSpace *as;
2259 FlatView *view;
2260 FlatRange *fr;
2261
2262 /* If the same address space has multiple log_sync listeners, we
2263 * visit that address space's FlatView multiple times. But because
2264 * log_sync listeners are rare, it's still cheaper than walking each
2265 * address space once.
2266 */
2267 QTAILQ_FOREACH(listener, &memory_listeners, link) {
2268 if (listener->log_sync) {
2269 as = listener->address_space;
2270 view = address_space_get_flatview(as);
2271 FOR_EACH_FLAT_RANGE(fr, view) {
2272 if (fr->dirty_log_mask && (!mr || fr->mr == mr)) {
2273 MemoryRegionSection mrs = section_from_flat_range(fr, view);
2274 listener->log_sync(listener, &mrs);
2275 }
2276 }
2277 flatview_unref(view);
2278 trace_memory_region_sync_dirty(mr ? mr->name : "(all)", listener->name, 0);
2279 } else if (listener->log_sync_global) {
2280 /*
2281 * No matter whether MR is specified, what we can do here
2282 * is to do a global sync, because we are not capable to
2283 * sync in a finer granularity.
2284 */
2285 listener->log_sync_global(listener, last_stage);
2286 trace_memory_region_sync_dirty(mr ? mr->name : "(all)", listener->name, 1);
2287 }
2288 }
2289 }
2290
memory_region_clear_dirty_bitmap(MemoryRegion * mr,hwaddr start,hwaddr len)2291 void memory_region_clear_dirty_bitmap(MemoryRegion *mr, hwaddr start,
2292 hwaddr len)
2293 {
2294 MemoryRegionSection mrs;
2295 MemoryListener *listener;
2296 AddressSpace *as;
2297 FlatView *view;
2298 FlatRange *fr;
2299 hwaddr sec_start, sec_end, sec_size;
2300
2301 QTAILQ_FOREACH(listener, &memory_listeners, link) {
2302 if (!listener->log_clear) {
2303 continue;
2304 }
2305 as = listener->address_space;
2306 view = address_space_get_flatview(as);
2307 FOR_EACH_FLAT_RANGE(fr, view) {
2308 if (!fr->dirty_log_mask || fr->mr != mr) {
2309 /*
2310 * Clear dirty bitmap operation only applies to those
2311 * regions whose dirty logging is at least enabled
2312 */
2313 continue;
2314 }
2315
2316 mrs = section_from_flat_range(fr, view);
2317
2318 sec_start = MAX(mrs.offset_within_region, start);
2319 sec_end = mrs.offset_within_region + int128_get64(mrs.size);
2320 sec_end = MIN(sec_end, start + len);
2321
2322 if (sec_start >= sec_end) {
2323 /*
2324 * If this memory region section has no intersection
2325 * with the requested range, skip.
2326 */
2327 continue;
2328 }
2329
2330 /* Valid case; shrink the section if needed */
2331 mrs.offset_within_address_space +=
2332 sec_start - mrs.offset_within_region;
2333 mrs.offset_within_region = sec_start;
2334 sec_size = sec_end - sec_start;
2335 mrs.size = int128_make64(sec_size);
2336 listener->log_clear(listener, &mrs);
2337 }
2338 flatview_unref(view);
2339 }
2340 }
2341
memory_region_snapshot_and_clear_dirty(MemoryRegion * mr,hwaddr addr,hwaddr size,unsigned client)2342 DirtyBitmapSnapshot *memory_region_snapshot_and_clear_dirty(MemoryRegion *mr,
2343 hwaddr addr,
2344 hwaddr size,
2345 unsigned client)
2346 {
2347 DirtyBitmapSnapshot *snapshot;
2348 assert(mr->ram_block);
2349 memory_region_sync_dirty_bitmap(mr, false);
2350 snapshot = cpu_physical_memory_snapshot_and_clear_dirty(mr, addr, size, client);
2351 memory_global_after_dirty_log_sync();
2352 return snapshot;
2353 }
2354
memory_region_snapshot_get_dirty(MemoryRegion * mr,DirtyBitmapSnapshot * snap,hwaddr addr,hwaddr size)2355 bool memory_region_snapshot_get_dirty(MemoryRegion *mr, DirtyBitmapSnapshot *snap,
2356 hwaddr addr, hwaddr size)
2357 {
2358 assert(mr->ram_block);
2359 return cpu_physical_memory_snapshot_get_dirty(snap,
2360 memory_region_get_ram_addr(mr) + addr, size);
2361 }
2362
memory_region_set_readonly(MemoryRegion * mr,bool readonly)2363 void memory_region_set_readonly(MemoryRegion *mr, bool readonly)
2364 {
2365 if (mr->readonly != readonly) {
2366 memory_region_transaction_begin();
2367 mr->readonly = readonly;
2368 memory_region_update_pending |= mr->enabled;
2369 memory_region_transaction_commit();
2370 }
2371 }
2372
memory_region_set_nonvolatile(MemoryRegion * mr,bool nonvolatile)2373 void memory_region_set_nonvolatile(MemoryRegion *mr, bool nonvolatile)
2374 {
2375 if (mr->nonvolatile != nonvolatile) {
2376 memory_region_transaction_begin();
2377 mr->nonvolatile = nonvolatile;
2378 memory_region_update_pending |= mr->enabled;
2379 memory_region_transaction_commit();
2380 }
2381 }
2382
memory_region_rom_device_set_romd(MemoryRegion * mr,bool romd_mode)2383 void memory_region_rom_device_set_romd(MemoryRegion *mr, bool romd_mode)
2384 {
2385 if (mr->romd_mode != romd_mode) {
2386 memory_region_transaction_begin();
2387 mr->romd_mode = romd_mode;
2388 memory_region_update_pending |= mr->enabled;
2389 memory_region_transaction_commit();
2390 }
2391 }
2392
memory_region_reset_dirty(MemoryRegion * mr,hwaddr addr,hwaddr size,unsigned client)2393 void memory_region_reset_dirty(MemoryRegion *mr, hwaddr addr,
2394 hwaddr size, unsigned client)
2395 {
2396 assert(mr->ram_block);
2397 cpu_physical_memory_test_and_clear_dirty(
2398 memory_region_get_ram_addr(mr) + addr, size, client);
2399 }
2400
memory_region_get_fd(MemoryRegion * mr)2401 int memory_region_get_fd(MemoryRegion *mr)
2402 {
2403 RCU_READ_LOCK_GUARD();
2404 while (mr->alias) {
2405 mr = mr->alias;
2406 }
2407 return mr->ram_block->fd;
2408 }
2409
memory_region_get_ram_ptr(MemoryRegion * mr)2410 void *memory_region_get_ram_ptr(MemoryRegion *mr)
2411 {
2412 uint64_t offset = 0;
2413
2414 RCU_READ_LOCK_GUARD();
2415 while (mr->alias) {
2416 offset += mr->alias_offset;
2417 mr = mr->alias;
2418 }
2419 assert(mr->ram_block);
2420 return qemu_map_ram_ptr(mr->ram_block, offset);
2421 }
2422
memory_region_from_host(void * ptr,ram_addr_t * offset)2423 MemoryRegion *memory_region_from_host(void *ptr, ram_addr_t *offset)
2424 {
2425 RAMBlock *block;
2426
2427 block = qemu_ram_block_from_host(ptr, false, offset);
2428 if (!block) {
2429 return NULL;
2430 }
2431
2432 return block->mr;
2433 }
2434
memory_region_get_ram_addr(MemoryRegion * mr)2435 ram_addr_t memory_region_get_ram_addr(MemoryRegion *mr)
2436 {
2437 return mr->ram_block ? mr->ram_block->offset : RAM_ADDR_INVALID;
2438 }
2439
memory_region_ram_resize(MemoryRegion * mr,ram_addr_t newsize,Error ** errp)2440 void memory_region_ram_resize(MemoryRegion *mr, ram_addr_t newsize, Error **errp)
2441 {
2442 assert(mr->ram_block);
2443
2444 qemu_ram_resize(mr->ram_block, newsize, errp);
2445 }
2446
memory_region_msync(MemoryRegion * mr,hwaddr addr,hwaddr size)2447 void memory_region_msync(MemoryRegion *mr, hwaddr addr, hwaddr size)
2448 {
2449 if (mr->ram_block) {
2450 qemu_ram_msync(mr->ram_block, addr, size);
2451 }
2452 }
2453
memory_region_writeback(MemoryRegion * mr,hwaddr addr,hwaddr size)2454 void memory_region_writeback(MemoryRegion *mr, hwaddr addr, hwaddr size)
2455 {
2456 /*
2457 * Might be extended case needed to cover
2458 * different types of memory regions
2459 */
2460 if (mr->dirty_log_mask) {
2461 memory_region_msync(mr, addr, size);
2462 }
2463 }
2464
2465 /*
2466 * Call proper memory listeners about the change on the newly
2467 * added/removed CoalescedMemoryRange.
2468 */
memory_region_update_coalesced_range(MemoryRegion * mr,CoalescedMemoryRange * cmr,bool add)2469 static void memory_region_update_coalesced_range(MemoryRegion *mr,
2470 CoalescedMemoryRange *cmr,
2471 bool add)
2472 {
2473 AddressSpace *as;
2474 FlatView *view;
2475 FlatRange *fr;
2476
2477 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
2478 view = address_space_get_flatview(as);
2479 FOR_EACH_FLAT_RANGE(fr, view) {
2480 if (fr->mr == mr) {
2481 flat_range_coalesced_io_notify(fr, as, cmr, add);
2482 }
2483 }
2484 flatview_unref(view);
2485 }
2486 }
2487
memory_region_set_coalescing(MemoryRegion * mr)2488 void memory_region_set_coalescing(MemoryRegion *mr)
2489 {
2490 memory_region_clear_coalescing(mr);
2491 memory_region_add_coalescing(mr, 0, int128_get64(mr->size));
2492 }
2493
memory_region_add_coalescing(MemoryRegion * mr,hwaddr offset,uint64_t size)2494 void memory_region_add_coalescing(MemoryRegion *mr,
2495 hwaddr offset,
2496 uint64_t size)
2497 {
2498 CoalescedMemoryRange *cmr = g_malloc(sizeof(*cmr));
2499
2500 cmr->addr = addrrange_make(int128_make64(offset), int128_make64(size));
2501 QTAILQ_INSERT_TAIL(&mr->coalesced, cmr, link);
2502 memory_region_update_coalesced_range(mr, cmr, true);
2503 memory_region_set_flush_coalesced(mr);
2504 }
2505
memory_region_clear_coalescing(MemoryRegion * mr)2506 void memory_region_clear_coalescing(MemoryRegion *mr)
2507 {
2508 CoalescedMemoryRange *cmr;
2509
2510 if (QTAILQ_EMPTY(&mr->coalesced)) {
2511 return;
2512 }
2513
2514 qemu_flush_coalesced_mmio_buffer();
2515 mr->flush_coalesced_mmio = false;
2516
2517 while (!QTAILQ_EMPTY(&mr->coalesced)) {
2518 cmr = QTAILQ_FIRST(&mr->coalesced);
2519 QTAILQ_REMOVE(&mr->coalesced, cmr, link);
2520 memory_region_update_coalesced_range(mr, cmr, false);
2521 g_free(cmr);
2522 }
2523 }
2524
memory_region_set_flush_coalesced(MemoryRegion * mr)2525 void memory_region_set_flush_coalesced(MemoryRegion *mr)
2526 {
2527 mr->flush_coalesced_mmio = true;
2528 }
2529
memory_region_clear_flush_coalesced(MemoryRegion * mr)2530 void memory_region_clear_flush_coalesced(MemoryRegion *mr)
2531 {
2532 qemu_flush_coalesced_mmio_buffer();
2533 if (QTAILQ_EMPTY(&mr->coalesced)) {
2534 mr->flush_coalesced_mmio = false;
2535 }
2536 }
2537
memory_region_add_eventfd(MemoryRegion * mr,hwaddr addr,unsigned size,bool match_data,uint64_t data,EventNotifier * e)2538 void memory_region_add_eventfd(MemoryRegion *mr,
2539 hwaddr addr,
2540 unsigned size,
2541 bool match_data,
2542 uint64_t data,
2543 EventNotifier *e)
2544 {
2545 MemoryRegionIoeventfd mrfd = {
2546 .addr.start = int128_make64(addr),
2547 .addr.size = int128_make64(size),
2548 .match_data = match_data,
2549 .data = data,
2550 .e = e,
2551 };
2552 unsigned i;
2553
2554 if (size) {
2555 adjust_endianness(mr, &mrfd.data, size_memop(size) | MO_TE);
2556 }
2557 memory_region_transaction_begin();
2558 for (i = 0; i < mr->ioeventfd_nb; ++i) {
2559 if (memory_region_ioeventfd_before(&mrfd, &mr->ioeventfds[i])) {
2560 break;
2561 }
2562 }
2563 ++mr->ioeventfd_nb;
2564 mr->ioeventfds = g_realloc(mr->ioeventfds,
2565 sizeof(*mr->ioeventfds) * mr->ioeventfd_nb);
2566 memmove(&mr->ioeventfds[i+1], &mr->ioeventfds[i],
2567 sizeof(*mr->ioeventfds) * (mr->ioeventfd_nb-1 - i));
2568 mr->ioeventfds[i] = mrfd;
2569 ioeventfd_update_pending |= mr->enabled;
2570 memory_region_transaction_commit();
2571 }
2572
memory_region_del_eventfd(MemoryRegion * mr,hwaddr addr,unsigned size,bool match_data,uint64_t data,EventNotifier * e)2573 void memory_region_del_eventfd(MemoryRegion *mr,
2574 hwaddr addr,
2575 unsigned size,
2576 bool match_data,
2577 uint64_t data,
2578 EventNotifier *e)
2579 {
2580 MemoryRegionIoeventfd mrfd = {
2581 .addr.start = int128_make64(addr),
2582 .addr.size = int128_make64(size),
2583 .match_data = match_data,
2584 .data = data,
2585 .e = e,
2586 };
2587 unsigned i;
2588
2589 if (size) {
2590 adjust_endianness(mr, &mrfd.data, size_memop(size) | MO_TE);
2591 }
2592 memory_region_transaction_begin();
2593 for (i = 0; i < mr->ioeventfd_nb; ++i) {
2594 if (memory_region_ioeventfd_equal(&mrfd, &mr->ioeventfds[i])) {
2595 break;
2596 }
2597 }
2598 assert(i != mr->ioeventfd_nb);
2599 memmove(&mr->ioeventfds[i], &mr->ioeventfds[i+1],
2600 sizeof(*mr->ioeventfds) * (mr->ioeventfd_nb - (i+1)));
2601 --mr->ioeventfd_nb;
2602 mr->ioeventfds = g_realloc(mr->ioeventfds,
2603 sizeof(*mr->ioeventfds)*mr->ioeventfd_nb + 1);
2604 ioeventfd_update_pending |= mr->enabled;
2605 memory_region_transaction_commit();
2606 }
2607
memory_region_update_container_subregions(MemoryRegion * subregion)2608 static void memory_region_update_container_subregions(MemoryRegion *subregion)
2609 {
2610 MemoryRegion *mr = subregion->container;
2611 MemoryRegion *other;
2612
2613 memory_region_transaction_begin();
2614
2615 memory_region_ref(subregion);
2616 QTAILQ_FOREACH(other, &mr->subregions, subregions_link) {
2617 if (subregion->priority >= other->priority) {
2618 QTAILQ_INSERT_BEFORE(other, subregion, subregions_link);
2619 goto done;
2620 }
2621 }
2622 QTAILQ_INSERT_TAIL(&mr->subregions, subregion, subregions_link);
2623 done:
2624 memory_region_update_pending |= mr->enabled && subregion->enabled;
2625 memory_region_transaction_commit();
2626 }
2627
memory_region_add_subregion_common(MemoryRegion * mr,hwaddr offset,MemoryRegion * subregion)2628 static void memory_region_add_subregion_common(MemoryRegion *mr,
2629 hwaddr offset,
2630 MemoryRegion *subregion)
2631 {
2632 MemoryRegion *alias;
2633
2634 assert(!subregion->container);
2635 subregion->container = mr;
2636 for (alias = subregion->alias; alias; alias = alias->alias) {
2637 alias->mapped_via_alias++;
2638 }
2639 subregion->addr = offset;
2640 memory_region_update_container_subregions(subregion);
2641 }
2642
memory_region_add_subregion(MemoryRegion * mr,hwaddr offset,MemoryRegion * subregion)2643 void memory_region_add_subregion(MemoryRegion *mr,
2644 hwaddr offset,
2645 MemoryRegion *subregion)
2646 {
2647 subregion->priority = 0;
2648 memory_region_add_subregion_common(mr, offset, subregion);
2649 }
2650
memory_region_add_subregion_overlap(MemoryRegion * mr,hwaddr offset,MemoryRegion * subregion,int priority)2651 void memory_region_add_subregion_overlap(MemoryRegion *mr,
2652 hwaddr offset,
2653 MemoryRegion *subregion,
2654 int priority)
2655 {
2656 subregion->priority = priority;
2657 memory_region_add_subregion_common(mr, offset, subregion);
2658 }
2659
memory_region_del_subregion(MemoryRegion * mr,MemoryRegion * subregion)2660 void memory_region_del_subregion(MemoryRegion *mr,
2661 MemoryRegion *subregion)
2662 {
2663 MemoryRegion *alias;
2664
2665 memory_region_transaction_begin();
2666 assert(subregion->container == mr);
2667 subregion->container = NULL;
2668 for (alias = subregion->alias; alias; alias = alias->alias) {
2669 alias->mapped_via_alias--;
2670 assert(alias->mapped_via_alias >= 0);
2671 }
2672 QTAILQ_REMOVE(&mr->subregions, subregion, subregions_link);
2673 memory_region_unref(subregion);
2674 memory_region_update_pending |= mr->enabled && subregion->enabled;
2675 memory_region_transaction_commit();
2676 }
2677
memory_region_set_enabled(MemoryRegion * mr,bool enabled)2678 void memory_region_set_enabled(MemoryRegion *mr, bool enabled)
2679 {
2680 if (enabled == mr->enabled) {
2681 return;
2682 }
2683 memory_region_transaction_begin();
2684 mr->enabled = enabled;
2685 memory_region_update_pending = true;
2686 memory_region_transaction_commit();
2687 }
2688
memory_region_set_size(MemoryRegion * mr,uint64_t size)2689 void memory_region_set_size(MemoryRegion *mr, uint64_t size)
2690 {
2691 Int128 s = int128_make64(size);
2692
2693 if (size == UINT64_MAX) {
2694 s = int128_2_64();
2695 }
2696 if (int128_eq(s, mr->size)) {
2697 return;
2698 }
2699 memory_region_transaction_begin();
2700 mr->size = s;
2701 memory_region_update_pending = true;
2702 memory_region_transaction_commit();
2703 }
2704
memory_region_readd_subregion(MemoryRegion * mr)2705 static void memory_region_readd_subregion(MemoryRegion *mr)
2706 {
2707 MemoryRegion *container = mr->container;
2708
2709 if (container) {
2710 memory_region_transaction_begin();
2711 memory_region_ref(mr);
2712 memory_region_del_subregion(container, mr);
2713 memory_region_add_subregion_common(container, mr->addr, mr);
2714 memory_region_unref(mr);
2715 memory_region_transaction_commit();
2716 }
2717 }
2718
memory_region_set_address(MemoryRegion * mr,hwaddr addr)2719 void memory_region_set_address(MemoryRegion *mr, hwaddr addr)
2720 {
2721 if (addr != mr->addr) {
2722 mr->addr = addr;
2723 memory_region_readd_subregion(mr);
2724 }
2725 }
2726
memory_region_set_alias_offset(MemoryRegion * mr,hwaddr offset)2727 void memory_region_set_alias_offset(MemoryRegion *mr, hwaddr offset)
2728 {
2729 assert(mr->alias);
2730
2731 if (offset == mr->alias_offset) {
2732 return;
2733 }
2734
2735 memory_region_transaction_begin();
2736 mr->alias_offset = offset;
2737 memory_region_update_pending |= mr->enabled;
2738 memory_region_transaction_commit();
2739 }
2740
memory_region_set_unmergeable(MemoryRegion * mr,bool unmergeable)2741 void memory_region_set_unmergeable(MemoryRegion *mr, bool unmergeable)
2742 {
2743 if (unmergeable == mr->unmergeable) {
2744 return;
2745 }
2746
2747 memory_region_transaction_begin();
2748 mr->unmergeable = unmergeable;
2749 memory_region_update_pending |= mr->enabled;
2750 memory_region_transaction_commit();
2751 }
2752
memory_region_get_alignment(const MemoryRegion * mr)2753 uint64_t memory_region_get_alignment(const MemoryRegion *mr)
2754 {
2755 return mr->align;
2756 }
2757
cmp_flatrange_addr(const void * addr_,const void * fr_)2758 static int cmp_flatrange_addr(const void *addr_, const void *fr_)
2759 {
2760 const AddrRange *addr = addr_;
2761 const FlatRange *fr = fr_;
2762
2763 if (int128_le(addrrange_end(*addr), fr->addr.start)) {
2764 return -1;
2765 } else if (int128_ge(addr->start, addrrange_end(fr->addr))) {
2766 return 1;
2767 }
2768 return 0;
2769 }
2770
flatview_lookup(FlatView * view,AddrRange addr)2771 static FlatRange *flatview_lookup(FlatView *view, AddrRange addr)
2772 {
2773 return bsearch(&addr, view->ranges, view->nr,
2774 sizeof(FlatRange), cmp_flatrange_addr);
2775 }
2776
memory_region_is_mapped(MemoryRegion * mr)2777 bool memory_region_is_mapped(MemoryRegion *mr)
2778 {
2779 return !!mr->container || mr->mapped_via_alias;
2780 }
2781
2782 /* Same as memory_region_find, but it does not add a reference to the
2783 * returned region. It must be called from an RCU critical section.
2784 */
memory_region_find_rcu(MemoryRegion * mr,hwaddr addr,uint64_t size)2785 static MemoryRegionSection memory_region_find_rcu(MemoryRegion *mr,
2786 hwaddr addr, uint64_t size)
2787 {
2788 MemoryRegionSection ret = { .mr = NULL };
2789 MemoryRegion *root;
2790 AddressSpace *as;
2791 AddrRange range;
2792 FlatView *view;
2793 FlatRange *fr;
2794
2795 addr += mr->addr;
2796 for (root = mr; root->container; ) {
2797 root = root->container;
2798 addr += root->addr;
2799 }
2800
2801 as = memory_region_to_address_space(root);
2802 if (!as) {
2803 return ret;
2804 }
2805 range = addrrange_make(int128_make64(addr), int128_make64(size));
2806
2807 view = address_space_to_flatview(as);
2808 fr = flatview_lookup(view, range);
2809 if (!fr) {
2810 return ret;
2811 }
2812
2813 while (fr > view->ranges && addrrange_intersects(fr[-1].addr, range)) {
2814 --fr;
2815 }
2816
2817 ret.mr = fr->mr;
2818 ret.fv = view;
2819 range = addrrange_intersection(range, fr->addr);
2820 ret.offset_within_region = fr->offset_in_region;
2821 ret.offset_within_region += int128_get64(int128_sub(range.start,
2822 fr->addr.start));
2823 ret.size = range.size;
2824 ret.offset_within_address_space = int128_get64(range.start);
2825 ret.readonly = fr->readonly;
2826 ret.nonvolatile = fr->nonvolatile;
2827 return ret;
2828 }
2829
memory_region_find(MemoryRegion * mr,hwaddr addr,uint64_t size)2830 MemoryRegionSection memory_region_find(MemoryRegion *mr,
2831 hwaddr addr, uint64_t size)
2832 {
2833 MemoryRegionSection ret;
2834 RCU_READ_LOCK_GUARD();
2835 ret = memory_region_find_rcu(mr, addr, size);
2836 if (ret.mr) {
2837 memory_region_ref(ret.mr);
2838 }
2839 return ret;
2840 }
2841
memory_region_section_new_copy(MemoryRegionSection * s)2842 MemoryRegionSection *memory_region_section_new_copy(MemoryRegionSection *s)
2843 {
2844 MemoryRegionSection *tmp = g_new(MemoryRegionSection, 1);
2845
2846 *tmp = *s;
2847 if (tmp->mr) {
2848 memory_region_ref(tmp->mr);
2849 }
2850 if (tmp->fv) {
2851 bool ret = flatview_ref(tmp->fv);
2852
2853 g_assert(ret);
2854 }
2855 return tmp;
2856 }
2857
memory_region_section_free_copy(MemoryRegionSection * s)2858 void memory_region_section_free_copy(MemoryRegionSection *s)
2859 {
2860 if (s->fv) {
2861 flatview_unref(s->fv);
2862 }
2863 if (s->mr) {
2864 memory_region_unref(s->mr);
2865 }
2866 g_free(s);
2867 }
2868
memory_region_present(MemoryRegion * container,hwaddr addr)2869 bool memory_region_present(MemoryRegion *container, hwaddr addr)
2870 {
2871 MemoryRegion *mr;
2872
2873 RCU_READ_LOCK_GUARD();
2874 mr = memory_region_find_rcu(container, addr, 1).mr;
2875 return mr && mr != container;
2876 }
2877
memory_global_dirty_log_sync(bool last_stage)2878 void memory_global_dirty_log_sync(bool last_stage)
2879 {
2880 memory_region_sync_dirty_bitmap(NULL, last_stage);
2881 }
2882
memory_global_after_dirty_log_sync(void)2883 void memory_global_after_dirty_log_sync(void)
2884 {
2885 MEMORY_LISTENER_CALL_GLOBAL(log_global_after_sync, Forward);
2886 }
2887
2888 /*
2889 * Dirty track stop flags that are postponed due to VM being stopped. Should
2890 * only be used within vmstate_change hook.
2891 */
2892 static unsigned int postponed_stop_flags;
2893 static VMChangeStateEntry *vmstate_change;
2894 static void memory_global_dirty_log_stop_postponed_run(void);
2895
memory_global_dirty_log_do_start(Error ** errp)2896 static bool memory_global_dirty_log_do_start(Error **errp)
2897 {
2898 MemoryListener *listener;
2899
2900 QTAILQ_FOREACH(listener, &memory_listeners, link) {
2901 if (listener->log_global_start) {
2902 if (!listener->log_global_start(listener, errp)) {
2903 goto err;
2904 }
2905 }
2906 }
2907 return true;
2908
2909 err:
2910 while ((listener = QTAILQ_PREV(listener, link)) != NULL) {
2911 if (listener->log_global_stop) {
2912 listener->log_global_stop(listener);
2913 }
2914 }
2915
2916 return false;
2917 }
2918
memory_global_dirty_log_start(unsigned int flags,Error ** errp)2919 bool memory_global_dirty_log_start(unsigned int flags, Error **errp)
2920 {
2921 unsigned int old_flags;
2922
2923 assert(flags && !(flags & (~GLOBAL_DIRTY_MASK)));
2924
2925 if (vmstate_change) {
2926 /* If there is postponed stop(), operate on it first */
2927 postponed_stop_flags &= ~flags;
2928 memory_global_dirty_log_stop_postponed_run();
2929 }
2930
2931 flags &= ~global_dirty_tracking;
2932 if (!flags) {
2933 return true;
2934 }
2935
2936 old_flags = global_dirty_tracking;
2937 global_dirty_tracking |= flags;
2938 trace_global_dirty_changed(global_dirty_tracking);
2939
2940 if (!old_flags) {
2941 if (!memory_global_dirty_log_do_start(errp)) {
2942 global_dirty_tracking &= ~flags;
2943 trace_global_dirty_changed(global_dirty_tracking);
2944 return false;
2945 }
2946
2947 memory_region_transaction_begin();
2948 memory_region_update_pending = true;
2949 memory_region_transaction_commit();
2950 }
2951 return true;
2952 }
2953
memory_global_dirty_log_do_stop(unsigned int flags)2954 static void memory_global_dirty_log_do_stop(unsigned int flags)
2955 {
2956 assert(flags && !(flags & (~GLOBAL_DIRTY_MASK)));
2957 assert((global_dirty_tracking & flags) == flags);
2958 global_dirty_tracking &= ~flags;
2959
2960 trace_global_dirty_changed(global_dirty_tracking);
2961
2962 if (!global_dirty_tracking) {
2963 memory_region_transaction_begin();
2964 memory_region_update_pending = true;
2965 memory_region_transaction_commit();
2966 MEMORY_LISTENER_CALL_GLOBAL(log_global_stop, Reverse);
2967 }
2968 }
2969
2970 /*
2971 * Execute the postponed dirty log stop operations if there is, then reset
2972 * everything (including the flags and the vmstate change hook).
2973 */
memory_global_dirty_log_stop_postponed_run(void)2974 static void memory_global_dirty_log_stop_postponed_run(void)
2975 {
2976 /* This must be called with the vmstate handler registered */
2977 assert(vmstate_change);
2978
2979 /* Note: postponed_stop_flags can be cleared in log start routine */
2980 if (postponed_stop_flags) {
2981 memory_global_dirty_log_do_stop(postponed_stop_flags);
2982 postponed_stop_flags = 0;
2983 }
2984
2985 qemu_del_vm_change_state_handler(vmstate_change);
2986 vmstate_change = NULL;
2987 }
2988
memory_vm_change_state_handler(void * opaque,bool running,RunState state)2989 static void memory_vm_change_state_handler(void *opaque, bool running,
2990 RunState state)
2991 {
2992 if (running) {
2993 memory_global_dirty_log_stop_postponed_run();
2994 }
2995 }
2996
memory_global_dirty_log_stop(unsigned int flags)2997 void memory_global_dirty_log_stop(unsigned int flags)
2998 {
2999 if (!runstate_is_running()) {
3000 /* Postpone the dirty log stop, e.g., to when VM starts again */
3001 if (vmstate_change) {
3002 /* Batch with previous postponed flags */
3003 postponed_stop_flags |= flags;
3004 } else {
3005 postponed_stop_flags = flags;
3006 vmstate_change = qemu_add_vm_change_state_handler(
3007 memory_vm_change_state_handler, NULL);
3008 }
3009 return;
3010 }
3011
3012 memory_global_dirty_log_do_stop(flags);
3013 }
3014
listener_add_address_space(MemoryListener * listener,AddressSpace * as)3015 static void listener_add_address_space(MemoryListener *listener,
3016 AddressSpace *as)
3017 {
3018 FlatView *view;
3019 FlatRange *fr;
3020
3021 if (listener->begin) {
3022 listener->begin(listener);
3023 }
3024 if (global_dirty_tracking) {
3025 /*
3026 * Currently only VFIO can fail log_global_start(), and it's not
3027 * yet allowed to hotplug any PCI device during migration. So this
3028 * should never fail when invoked, guard it with error_abort. If
3029 * it can start to fail in the future, we need to be able to fail
3030 * the whole listener_add_address_space() and its callers.
3031 */
3032 if (listener->log_global_start) {
3033 listener->log_global_start(listener, &error_abort);
3034 }
3035 }
3036
3037 view = address_space_get_flatview(as);
3038 FOR_EACH_FLAT_RANGE(fr, view) {
3039 MemoryRegionSection section = section_from_flat_range(fr, view);
3040
3041 if (listener->region_add) {
3042 listener->region_add(listener, §ion);
3043 }
3044 if (fr->dirty_log_mask && listener->log_start) {
3045 listener->log_start(listener, §ion, 0, fr->dirty_log_mask);
3046 }
3047 }
3048 if (listener->commit) {
3049 listener->commit(listener);
3050 }
3051 flatview_unref(view);
3052 }
3053
listener_del_address_space(MemoryListener * listener,AddressSpace * as)3054 static void listener_del_address_space(MemoryListener *listener,
3055 AddressSpace *as)
3056 {
3057 FlatView *view;
3058 FlatRange *fr;
3059
3060 if (listener->begin) {
3061 listener->begin(listener);
3062 }
3063 view = address_space_get_flatview(as);
3064 FOR_EACH_FLAT_RANGE(fr, view) {
3065 MemoryRegionSection section = section_from_flat_range(fr, view);
3066
3067 if (fr->dirty_log_mask && listener->log_stop) {
3068 listener->log_stop(listener, §ion, fr->dirty_log_mask, 0);
3069 }
3070 if (listener->region_del) {
3071 listener->region_del(listener, §ion);
3072 }
3073 }
3074 if (listener->commit) {
3075 listener->commit(listener);
3076 }
3077 flatview_unref(view);
3078 }
3079
memory_listener_register(MemoryListener * listener,AddressSpace * as)3080 void memory_listener_register(MemoryListener *listener, AddressSpace *as)
3081 {
3082 MemoryListener *other = NULL;
3083
3084 /* Only one of them can be defined for a listener */
3085 assert(!(listener->log_sync && listener->log_sync_global));
3086
3087 listener->address_space = as;
3088 if (QTAILQ_EMPTY(&memory_listeners)
3089 || listener->priority >= QTAILQ_LAST(&memory_listeners)->priority) {
3090 QTAILQ_INSERT_TAIL(&memory_listeners, listener, link);
3091 } else {
3092 QTAILQ_FOREACH(other, &memory_listeners, link) {
3093 if (listener->priority < other->priority) {
3094 break;
3095 }
3096 }
3097 QTAILQ_INSERT_BEFORE(other, listener, link);
3098 }
3099
3100 if (QTAILQ_EMPTY(&as->listeners)
3101 || listener->priority >= QTAILQ_LAST(&as->listeners)->priority) {
3102 QTAILQ_INSERT_TAIL(&as->listeners, listener, link_as);
3103 } else {
3104 QTAILQ_FOREACH(other, &as->listeners, link_as) {
3105 if (listener->priority < other->priority) {
3106 break;
3107 }
3108 }
3109 QTAILQ_INSERT_BEFORE(other, listener, link_as);
3110 }
3111
3112 listener_add_address_space(listener, as);
3113
3114 if (listener->eventfd_add || listener->eventfd_del) {
3115 as->ioeventfd_notifiers++;
3116 }
3117 }
3118
memory_listener_unregister(MemoryListener * listener)3119 void memory_listener_unregister(MemoryListener *listener)
3120 {
3121 if (!listener->address_space) {
3122 return;
3123 }
3124
3125 if (listener->eventfd_add || listener->eventfd_del) {
3126 listener->address_space->ioeventfd_notifiers--;
3127 }
3128
3129 listener_del_address_space(listener, listener->address_space);
3130 QTAILQ_REMOVE(&memory_listeners, listener, link);
3131 QTAILQ_REMOVE(&listener->address_space->listeners, listener, link_as);
3132 listener->address_space = NULL;
3133 }
3134
address_space_remove_listeners(AddressSpace * as)3135 void address_space_remove_listeners(AddressSpace *as)
3136 {
3137 while (!QTAILQ_EMPTY(&as->listeners)) {
3138 memory_listener_unregister(QTAILQ_FIRST(&as->listeners));
3139 }
3140 }
3141
address_space_init(AddressSpace * as,MemoryRegion * root,const char * name)3142 void address_space_init(AddressSpace *as, MemoryRegion *root, const char *name)
3143 {
3144 memory_region_ref(root);
3145 as->root = root;
3146 as->current_map = NULL;
3147 as->ioeventfd_nb = 0;
3148 as->ioeventfds = NULL;
3149 QTAILQ_INIT(&as->listeners);
3150 QTAILQ_INSERT_TAIL(&address_spaces, as, address_spaces_link);
3151 as->max_bounce_buffer_size = DEFAULT_MAX_BOUNCE_BUFFER_SIZE;
3152 as->bounce_buffer_size = 0;
3153 qemu_mutex_init(&as->map_client_list_lock);
3154 QLIST_INIT(&as->map_client_list);
3155 as->name = g_strdup(name ? name : "anonymous");
3156 address_space_update_topology(as);
3157 address_space_update_ioeventfds(as);
3158 }
3159
do_address_space_destroy(AddressSpace * as)3160 static void do_address_space_destroy(AddressSpace *as)
3161 {
3162 assert(qatomic_read(&as->bounce_buffer_size) == 0);
3163 assert(QLIST_EMPTY(&as->map_client_list));
3164 qemu_mutex_destroy(&as->map_client_list_lock);
3165
3166 assert(QTAILQ_EMPTY(&as->listeners));
3167
3168 flatview_unref(as->current_map);
3169 g_free(as->name);
3170 g_free(as->ioeventfds);
3171 memory_region_unref(as->root);
3172 }
3173
address_space_destroy(AddressSpace * as)3174 void address_space_destroy(AddressSpace *as)
3175 {
3176 MemoryRegion *root = as->root;
3177
3178 /* Flush out anything from MemoryListeners listening in on this */
3179 memory_region_transaction_begin();
3180 as->root = NULL;
3181 memory_region_transaction_commit();
3182 QTAILQ_REMOVE(&address_spaces, as, address_spaces_link);
3183
3184 /* At this point, as->dispatch and as->current_map are dummy
3185 * entries that the guest should never use. Wait for the old
3186 * values to expire before freeing the data.
3187 */
3188 as->root = root;
3189 call_rcu(as, do_address_space_destroy, rcu);
3190 }
3191
memory_region_type(MemoryRegion * mr)3192 static const char *memory_region_type(MemoryRegion *mr)
3193 {
3194 if (mr->alias) {
3195 return memory_region_type(mr->alias);
3196 }
3197 if (memory_region_is_ram_device(mr)) {
3198 return "ramd";
3199 } else if (memory_region_is_romd(mr)) {
3200 return "romd";
3201 } else if (memory_region_is_rom(mr)) {
3202 return "rom";
3203 } else if (memory_region_is_ram(mr)) {
3204 return "ram";
3205 } else {
3206 return "i/o";
3207 }
3208 }
3209
3210 typedef struct MemoryRegionList MemoryRegionList;
3211
3212 struct MemoryRegionList {
3213 const MemoryRegion *mr;
3214 QTAILQ_ENTRY(MemoryRegionList) mrqueue;
3215 };
3216
3217 typedef QTAILQ_HEAD(, MemoryRegionList) MemoryRegionListHead;
3218
3219 #define MR_SIZE(size) (int128_nz(size) ? (hwaddr)int128_get64( \
3220 int128_sub((size), int128_one())) : 0)
3221 #define MTREE_INDENT " "
3222
mtree_expand_owner(const char * label,Object * obj)3223 static void mtree_expand_owner(const char *label, Object *obj)
3224 {
3225 DeviceState *dev = (DeviceState *) object_dynamic_cast(obj, TYPE_DEVICE);
3226
3227 qemu_printf(" %s:{%s", label, dev ? "dev" : "obj");
3228 if (dev && dev->id) {
3229 qemu_printf(" id=%s", dev->id);
3230 } else {
3231 char *canonical_path = object_get_canonical_path(obj);
3232 if (canonical_path) {
3233 qemu_printf(" path=%s", canonical_path);
3234 g_free(canonical_path);
3235 } else {
3236 qemu_printf(" type=%s", object_get_typename(obj));
3237 }
3238 }
3239 qemu_printf("}");
3240 }
3241
mtree_print_mr_owner(const MemoryRegion * mr)3242 static void mtree_print_mr_owner(const MemoryRegion *mr)
3243 {
3244 Object *owner = mr->owner;
3245 Object *parent = memory_region_owner((MemoryRegion *)mr);
3246
3247 if (!owner && !parent) {
3248 qemu_printf(" orphan");
3249 return;
3250 }
3251 if (owner) {
3252 mtree_expand_owner("owner", owner);
3253 }
3254 if (parent && parent != owner) {
3255 mtree_expand_owner("parent", parent);
3256 }
3257 }
3258
mtree_print_mr(const MemoryRegion * mr,unsigned int level,hwaddr base,MemoryRegionListHead * alias_print_queue,bool owner,bool display_disabled)3259 static void mtree_print_mr(const MemoryRegion *mr, unsigned int level,
3260 hwaddr base,
3261 MemoryRegionListHead *alias_print_queue,
3262 bool owner, bool display_disabled)
3263 {
3264 MemoryRegionList *new_ml, *ml, *next_ml;
3265 MemoryRegionListHead submr_print_queue;
3266 const MemoryRegion *submr;
3267 unsigned int i;
3268 hwaddr cur_start, cur_end;
3269
3270 if (!mr) {
3271 return;
3272 }
3273
3274 cur_start = base + mr->addr;
3275 cur_end = cur_start + MR_SIZE(mr->size);
3276
3277 /*
3278 * Try to detect overflow of memory region. This should never
3279 * happen normally. When it happens, we dump something to warn the
3280 * user who is observing this.
3281 */
3282 if (cur_start < base || cur_end < cur_start) {
3283 qemu_printf("[DETECTED OVERFLOW!] ");
3284 }
3285
3286 if (mr->alias) {
3287 bool found = false;
3288
3289 /* check if the alias is already in the queue */
3290 QTAILQ_FOREACH(ml, alias_print_queue, mrqueue) {
3291 if (ml->mr == mr->alias) {
3292 found = true;
3293 }
3294 }
3295
3296 if (!found) {
3297 ml = g_new(MemoryRegionList, 1);
3298 ml->mr = mr->alias;
3299 QTAILQ_INSERT_TAIL(alias_print_queue, ml, mrqueue);
3300 }
3301 if (mr->enabled || display_disabled) {
3302 for (i = 0; i < level; i++) {
3303 qemu_printf(MTREE_INDENT);
3304 }
3305 qemu_printf(HWADDR_FMT_plx "-" HWADDR_FMT_plx
3306 " (prio %d, %s%s): alias %s @%s " HWADDR_FMT_plx
3307 "-" HWADDR_FMT_plx "%s",
3308 cur_start, cur_end,
3309 mr->priority,
3310 mr->nonvolatile ? "nv-" : "",
3311 memory_region_type((MemoryRegion *)mr),
3312 memory_region_name(mr),
3313 memory_region_name(mr->alias),
3314 mr->alias_offset,
3315 mr->alias_offset + MR_SIZE(mr->size),
3316 mr->enabled ? "" : " [disabled]");
3317 if (owner) {
3318 mtree_print_mr_owner(mr);
3319 }
3320 qemu_printf("\n");
3321 }
3322 } else {
3323 if (mr->enabled || display_disabled) {
3324 for (i = 0; i < level; i++) {
3325 qemu_printf(MTREE_INDENT);
3326 }
3327 qemu_printf(HWADDR_FMT_plx "-" HWADDR_FMT_plx
3328 " (prio %d, %s%s): %s%s",
3329 cur_start, cur_end,
3330 mr->priority,
3331 mr->nonvolatile ? "nv-" : "",
3332 memory_region_type((MemoryRegion *)mr),
3333 memory_region_name(mr),
3334 mr->enabled ? "" : " [disabled]");
3335 if (owner) {
3336 mtree_print_mr_owner(mr);
3337 }
3338 qemu_printf("\n");
3339 }
3340 }
3341
3342 QTAILQ_INIT(&submr_print_queue);
3343
3344 QTAILQ_FOREACH(submr, &mr->subregions, subregions_link) {
3345 new_ml = g_new(MemoryRegionList, 1);
3346 new_ml->mr = submr;
3347 QTAILQ_FOREACH(ml, &submr_print_queue, mrqueue) {
3348 if (new_ml->mr->addr < ml->mr->addr ||
3349 (new_ml->mr->addr == ml->mr->addr &&
3350 new_ml->mr->priority > ml->mr->priority)) {
3351 QTAILQ_INSERT_BEFORE(ml, new_ml, mrqueue);
3352 new_ml = NULL;
3353 break;
3354 }
3355 }
3356 if (new_ml) {
3357 QTAILQ_INSERT_TAIL(&submr_print_queue, new_ml, mrqueue);
3358 }
3359 }
3360
3361 QTAILQ_FOREACH(ml, &submr_print_queue, mrqueue) {
3362 mtree_print_mr(ml->mr, level + 1, cur_start,
3363 alias_print_queue, owner, display_disabled);
3364 }
3365
3366 QTAILQ_FOREACH_SAFE(ml, &submr_print_queue, mrqueue, next_ml) {
3367 g_free(ml);
3368 }
3369 }
3370
3371 struct FlatViewInfo {
3372 int counter;
3373 bool dispatch_tree;
3374 bool owner;
3375 AccelClass *ac;
3376 };
3377
mtree_print_flatview(gpointer key,gpointer value,gpointer user_data)3378 static void mtree_print_flatview(gpointer key, gpointer value,
3379 gpointer user_data)
3380 {
3381 FlatView *view = key;
3382 GArray *fv_address_spaces = value;
3383 struct FlatViewInfo *fvi = user_data;
3384 FlatRange *range = &view->ranges[0];
3385 MemoryRegion *mr;
3386 int n = view->nr;
3387 int i;
3388 AddressSpace *as;
3389
3390 qemu_printf("FlatView #%d\n", fvi->counter);
3391 ++fvi->counter;
3392
3393 for (i = 0; i < fv_address_spaces->len; ++i) {
3394 as = g_array_index(fv_address_spaces, AddressSpace*, i);
3395 qemu_printf(" AS \"%s\", root: %s",
3396 as->name, memory_region_name(as->root));
3397 if (as->root->alias) {
3398 qemu_printf(", alias %s", memory_region_name(as->root->alias));
3399 }
3400 qemu_printf("\n");
3401 }
3402
3403 qemu_printf(" Root memory region: %s\n",
3404 view->root ? memory_region_name(view->root) : "(none)");
3405
3406 if (n <= 0) {
3407 qemu_printf(MTREE_INDENT "No rendered FlatView\n\n");
3408 return;
3409 }
3410
3411 while (n--) {
3412 mr = range->mr;
3413 if (range->offset_in_region) {
3414 qemu_printf(MTREE_INDENT HWADDR_FMT_plx "-" HWADDR_FMT_plx
3415 " (prio %d, %s%s): %s @" HWADDR_FMT_plx,
3416 int128_get64(range->addr.start),
3417 int128_get64(range->addr.start)
3418 + MR_SIZE(range->addr.size),
3419 mr->priority,
3420 range->nonvolatile ? "nv-" : "",
3421 range->readonly ? "rom" : memory_region_type(mr),
3422 memory_region_name(mr),
3423 range->offset_in_region);
3424 } else {
3425 qemu_printf(MTREE_INDENT HWADDR_FMT_plx "-" HWADDR_FMT_plx
3426 " (prio %d, %s%s): %s",
3427 int128_get64(range->addr.start),
3428 int128_get64(range->addr.start)
3429 + MR_SIZE(range->addr.size),
3430 mr->priority,
3431 range->nonvolatile ? "nv-" : "",
3432 range->readonly ? "rom" : memory_region_type(mr),
3433 memory_region_name(mr));
3434 }
3435 if (fvi->owner) {
3436 mtree_print_mr_owner(mr);
3437 }
3438
3439 if (fvi->ac) {
3440 for (i = 0; i < fv_address_spaces->len; ++i) {
3441 as = g_array_index(fv_address_spaces, AddressSpace*, i);
3442 if (fvi->ac->has_memory(current_machine, as,
3443 int128_get64(range->addr.start),
3444 MR_SIZE(range->addr.size) + 1)) {
3445 qemu_printf(" %s", fvi->ac->name);
3446 }
3447 }
3448 }
3449 qemu_printf("\n");
3450 range++;
3451 }
3452
3453 #if !defined(CONFIG_USER_ONLY)
3454 if (fvi->dispatch_tree && view->root) {
3455 mtree_print_dispatch(view->dispatch, view->root);
3456 }
3457 #endif
3458
3459 qemu_printf("\n");
3460 }
3461
mtree_info_flatview_free(gpointer key,gpointer value,gpointer user_data)3462 static gboolean mtree_info_flatview_free(gpointer key, gpointer value,
3463 gpointer user_data)
3464 {
3465 FlatView *view = key;
3466 GArray *fv_address_spaces = value;
3467
3468 g_array_unref(fv_address_spaces);
3469 flatview_unref(view);
3470
3471 return true;
3472 }
3473
mtree_info_flatview(bool dispatch_tree,bool owner)3474 static void mtree_info_flatview(bool dispatch_tree, bool owner)
3475 {
3476 struct FlatViewInfo fvi = {
3477 .counter = 0,
3478 .dispatch_tree = dispatch_tree,
3479 .owner = owner,
3480 };
3481 AddressSpace *as;
3482 FlatView *view;
3483 GArray *fv_address_spaces;
3484 GHashTable *views = g_hash_table_new(g_direct_hash, g_direct_equal);
3485 AccelClass *ac = ACCEL_GET_CLASS(current_accel());
3486
3487 if (ac->has_memory) {
3488 fvi.ac = ac;
3489 }
3490
3491 /* Gather all FVs in one table */
3492 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
3493 view = address_space_get_flatview(as);
3494
3495 fv_address_spaces = g_hash_table_lookup(views, view);
3496 if (!fv_address_spaces) {
3497 fv_address_spaces = g_array_new(false, false, sizeof(as));
3498 g_hash_table_insert(views, view, fv_address_spaces);
3499 }
3500
3501 g_array_append_val(fv_address_spaces, as);
3502 }
3503
3504 /* Print */
3505 g_hash_table_foreach(views, mtree_print_flatview, &fvi);
3506
3507 /* Free */
3508 g_hash_table_foreach_remove(views, mtree_info_flatview_free, 0);
3509 g_hash_table_unref(views);
3510 }
3511
3512 struct AddressSpaceInfo {
3513 MemoryRegionListHead *ml_head;
3514 bool owner;
3515 bool disabled;
3516 };
3517
3518 /* Returns negative value if a < b; zero if a = b; positive value if a > b. */
address_space_compare_name(gconstpointer a,gconstpointer b)3519 static gint address_space_compare_name(gconstpointer a, gconstpointer b)
3520 {
3521 const AddressSpace *as_a = a;
3522 const AddressSpace *as_b = b;
3523
3524 return g_strcmp0(as_a->name, as_b->name);
3525 }
3526
mtree_print_as_name(gpointer data,gpointer user_data)3527 static void mtree_print_as_name(gpointer data, gpointer user_data)
3528 {
3529 AddressSpace *as = data;
3530
3531 qemu_printf("address-space: %s\n", as->name);
3532 }
3533
mtree_print_as(gpointer key,gpointer value,gpointer user_data)3534 static void mtree_print_as(gpointer key, gpointer value, gpointer user_data)
3535 {
3536 MemoryRegion *mr = key;
3537 GSList *as_same_root_mr_list = value;
3538 struct AddressSpaceInfo *asi = user_data;
3539
3540 g_slist_foreach(as_same_root_mr_list, mtree_print_as_name, NULL);
3541 mtree_print_mr(mr, 1, 0, asi->ml_head, asi->owner, asi->disabled);
3542 qemu_printf("\n");
3543 }
3544
mtree_info_as_free(gpointer key,gpointer value,gpointer user_data)3545 static gboolean mtree_info_as_free(gpointer key, gpointer value,
3546 gpointer user_data)
3547 {
3548 GSList *as_same_root_mr_list = value;
3549
3550 g_slist_free(as_same_root_mr_list);
3551
3552 return true;
3553 }
3554
mtree_info_as(bool dispatch_tree,bool owner,bool disabled)3555 static void mtree_info_as(bool dispatch_tree, bool owner, bool disabled)
3556 {
3557 MemoryRegionListHead ml_head;
3558 MemoryRegionList *ml, *ml2;
3559 AddressSpace *as;
3560 GHashTable *views = g_hash_table_new(g_direct_hash, g_direct_equal);
3561 GSList *as_same_root_mr_list;
3562 struct AddressSpaceInfo asi = {
3563 .ml_head = &ml_head,
3564 .owner = owner,
3565 .disabled = disabled,
3566 };
3567
3568 QTAILQ_INIT(&ml_head);
3569
3570 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
3571 /* Create hashtable, key=AS root MR, value = list of AS */
3572 as_same_root_mr_list = g_hash_table_lookup(views, as->root);
3573 as_same_root_mr_list = g_slist_insert_sorted(as_same_root_mr_list, as,
3574 address_space_compare_name);
3575 g_hash_table_insert(views, as->root, as_same_root_mr_list);
3576 }
3577
3578 /* print address spaces */
3579 g_hash_table_foreach(views, mtree_print_as, &asi);
3580 g_hash_table_foreach_remove(views, mtree_info_as_free, 0);
3581 g_hash_table_unref(views);
3582
3583 /* print aliased regions */
3584 QTAILQ_FOREACH(ml, &ml_head, mrqueue) {
3585 qemu_printf("memory-region: %s\n", memory_region_name(ml->mr));
3586 mtree_print_mr(ml->mr, 1, 0, &ml_head, owner, disabled);
3587 qemu_printf("\n");
3588 }
3589
3590 QTAILQ_FOREACH_SAFE(ml, &ml_head, mrqueue, ml2) {
3591 g_free(ml);
3592 }
3593 }
3594
mtree_info(bool flatview,bool dispatch_tree,bool owner,bool disabled)3595 void mtree_info(bool flatview, bool dispatch_tree, bool owner, bool disabled)
3596 {
3597 if (flatview) {
3598 mtree_info_flatview(dispatch_tree, owner);
3599 } else {
3600 mtree_info_as(dispatch_tree, owner, disabled);
3601 }
3602 }
3603
memory_region_init_ram(MemoryRegion * mr,Object * owner,const char * name,uint64_t size,Error ** errp)3604 bool memory_region_init_ram(MemoryRegion *mr,
3605 Object *owner,
3606 const char *name,
3607 uint64_t size,
3608 Error **errp)
3609 {
3610 DeviceState *owner_dev;
3611
3612 if (!memory_region_init_ram_nomigrate(mr, owner, name, size, errp)) {
3613 return false;
3614 }
3615 /* This will assert if owner is neither NULL nor a DeviceState.
3616 * We only want the owner here for the purposes of defining a
3617 * unique name for migration. TODO: Ideally we should implement
3618 * a naming scheme for Objects which are not DeviceStates, in
3619 * which case we can relax this restriction.
3620 */
3621 owner_dev = DEVICE(owner);
3622 vmstate_register_ram(mr, owner_dev);
3623
3624 return true;
3625 }
3626
memory_region_init_ram_guest_memfd(MemoryRegion * mr,Object * owner,const char * name,uint64_t size,Error ** errp)3627 bool memory_region_init_ram_guest_memfd(MemoryRegion *mr,
3628 Object *owner,
3629 const char *name,
3630 uint64_t size,
3631 Error **errp)
3632 {
3633 DeviceState *owner_dev;
3634
3635 if (!memory_region_init_ram_flags_nomigrate(mr, owner, name, size,
3636 RAM_GUEST_MEMFD, errp)) {
3637 return false;
3638 }
3639 /* This will assert if owner is neither NULL nor a DeviceState.
3640 * We only want the owner here for the purposes of defining a
3641 * unique name for migration. TODO: Ideally we should implement
3642 * a naming scheme for Objects which are not DeviceStates, in
3643 * which case we can relax this restriction.
3644 */
3645 owner_dev = DEVICE(owner);
3646 vmstate_register_ram(mr, owner_dev);
3647
3648 return true;
3649 }
3650
memory_region_init_rom(MemoryRegion * mr,Object * owner,const char * name,uint64_t size,Error ** errp)3651 bool memory_region_init_rom(MemoryRegion *mr,
3652 Object *owner,
3653 const char *name,
3654 uint64_t size,
3655 Error **errp)
3656 {
3657 DeviceState *owner_dev;
3658
3659 if (!memory_region_init_rom_nomigrate(mr, owner, name, size, errp)) {
3660 return false;
3661 }
3662 /* This will assert if owner is neither NULL nor a DeviceState.
3663 * We only want the owner here for the purposes of defining a
3664 * unique name for migration. TODO: Ideally we should implement
3665 * a naming scheme for Objects which are not DeviceStates, in
3666 * which case we can relax this restriction.
3667 */
3668 owner_dev = DEVICE(owner);
3669 vmstate_register_ram(mr, owner_dev);
3670
3671 return true;
3672 }
3673
memory_region_init_rom_device(MemoryRegion * mr,Object * owner,const MemoryRegionOps * ops,void * opaque,const char * name,uint64_t size,Error ** errp)3674 bool memory_region_init_rom_device(MemoryRegion *mr,
3675 Object *owner,
3676 const MemoryRegionOps *ops,
3677 void *opaque,
3678 const char *name,
3679 uint64_t size,
3680 Error **errp)
3681 {
3682 DeviceState *owner_dev;
3683
3684 if (!memory_region_init_rom_device_nomigrate(mr, owner, ops, opaque,
3685 name, size, errp)) {
3686 return false;
3687 }
3688 /* This will assert if owner is neither NULL nor a DeviceState.
3689 * We only want the owner here for the purposes of defining a
3690 * unique name for migration. TODO: Ideally we should implement
3691 * a naming scheme for Objects which are not DeviceStates, in
3692 * which case we can relax this restriction.
3693 */
3694 owner_dev = DEVICE(owner);
3695 vmstate_register_ram(mr, owner_dev);
3696
3697 return true;
3698 }
3699
3700 /*
3701 * Support system builds with CONFIG_FUZZ using a weak symbol and a stub for
3702 * the fuzz_dma_read_cb callback
3703 */
3704 #ifdef CONFIG_FUZZ
fuzz_dma_read_cb(size_t addr,size_t len,MemoryRegion * mr)3705 void __attribute__((weak)) fuzz_dma_read_cb(size_t addr,
3706 size_t len,
3707 MemoryRegion *mr)
3708 {
3709 }
3710 #endif
3711
3712 static const TypeInfo memory_region_info = {
3713 .parent = TYPE_OBJECT,
3714 .name = TYPE_MEMORY_REGION,
3715 .class_size = sizeof(MemoryRegionClass),
3716 .instance_size = sizeof(MemoryRegion),
3717 .instance_init = memory_region_initfn,
3718 .instance_finalize = memory_region_finalize,
3719 };
3720
3721 static const TypeInfo iommu_memory_region_info = {
3722 .parent = TYPE_MEMORY_REGION,
3723 .name = TYPE_IOMMU_MEMORY_REGION,
3724 .class_size = sizeof(IOMMUMemoryRegionClass),
3725 .instance_size = sizeof(IOMMUMemoryRegion),
3726 .instance_init = iommu_memory_region_initfn,
3727 .abstract = true,
3728 };
3729
3730 static const TypeInfo ram_discard_manager_info = {
3731 .parent = TYPE_INTERFACE,
3732 .name = TYPE_RAM_DISCARD_MANAGER,
3733 .class_size = sizeof(RamDiscardManagerClass),
3734 };
3735
memory_register_types(void)3736 static void memory_register_types(void)
3737 {
3738 type_register_static(&memory_region_info);
3739 type_register_static(&iommu_memory_region_info);
3740 type_register_static(&ram_discard_manager_info);
3741 }
3742
3743 type_init(memory_register_types)
3744