1 /* 2 * Physical memory management 3 * 4 * Copyright 2011 Red Hat, Inc. and/or its affiliates 5 * 6 * Authors: 7 * Avi Kivity <avi@redhat.com> 8 * 9 * This work is licensed under the terms of the GNU GPL, version 2. See 10 * the COPYING file in the top-level directory. 11 * 12 * Contributions after 2012-01-13 are licensed under the terms of the 13 * GNU GPL, version 2 or (at your option) any later version. 14 */ 15 16 #include "qemu/osdep.h" 17 #include "qemu/log.h" 18 #include "qapi/error.h" 19 #include "exec/memory.h" 20 #include "qapi/visitor.h" 21 #include "qemu/bitops.h" 22 #include "qemu/error-report.h" 23 #include "qemu/main-loop.h" 24 #include "qemu/qemu-print.h" 25 #include "qom/object.h" 26 #include "trace.h" 27 28 #include "exec/memory-internal.h" 29 #include "exec/ram_addr.h" 30 #include "sysemu/kvm.h" 31 #include "sysemu/runstate.h" 32 #include "sysemu/tcg.h" 33 #include "qemu/accel.h" 34 #include "hw/boards.h" 35 #include "migration/vmstate.h" 36 #include "exec/address-spaces.h" 37 38 //#define DEBUG_UNASSIGNED 39 40 static unsigned memory_region_transaction_depth; 41 static bool memory_region_update_pending; 42 static bool ioeventfd_update_pending; 43 unsigned int global_dirty_tracking; 44 45 static QTAILQ_HEAD(, MemoryListener) memory_listeners 46 = QTAILQ_HEAD_INITIALIZER(memory_listeners); 47 48 static QTAILQ_HEAD(, AddressSpace) address_spaces 49 = QTAILQ_HEAD_INITIALIZER(address_spaces); 50 51 static GHashTable *flat_views; 52 53 typedef struct AddrRange AddrRange; 54 55 /* 56 * Note that signed integers are needed for negative offsetting in aliases 57 * (large MemoryRegion::alias_offset). 58 */ 59 struct AddrRange { 60 Int128 start; 61 Int128 size; 62 }; 63 64 static AddrRange addrrange_make(Int128 start, Int128 size) 65 { 66 return (AddrRange) { start, size }; 67 } 68 69 static bool addrrange_equal(AddrRange r1, AddrRange r2) 70 { 71 return int128_eq(r1.start, r2.start) && int128_eq(r1.size, r2.size); 72 } 73 74 static Int128 addrrange_end(AddrRange r) 75 { 76 return int128_add(r.start, r.size); 77 } 78 79 static AddrRange addrrange_shift(AddrRange range, Int128 delta) 80 { 81 int128_addto(&range.start, delta); 82 return range; 83 } 84 85 static bool addrrange_contains(AddrRange range, Int128 addr) 86 { 87 return int128_ge(addr, range.start) 88 && int128_lt(addr, addrrange_end(range)); 89 } 90 91 static bool addrrange_intersects(AddrRange r1, AddrRange r2) 92 { 93 return addrrange_contains(r1, r2.start) 94 || addrrange_contains(r2, r1.start); 95 } 96 97 static AddrRange addrrange_intersection(AddrRange r1, AddrRange r2) 98 { 99 Int128 start = int128_max(r1.start, r2.start); 100 Int128 end = int128_min(addrrange_end(r1), addrrange_end(r2)); 101 return addrrange_make(start, int128_sub(end, start)); 102 } 103 104 enum ListenerDirection { Forward, Reverse }; 105 106 #define MEMORY_LISTENER_CALL_GLOBAL(_callback, _direction, _args...) \ 107 do { \ 108 MemoryListener *_listener; \ 109 \ 110 switch (_direction) { \ 111 case Forward: \ 112 QTAILQ_FOREACH(_listener, &memory_listeners, link) { \ 113 if (_listener->_callback) { \ 114 _listener->_callback(_listener, ##_args); \ 115 } \ 116 } \ 117 break; \ 118 case Reverse: \ 119 QTAILQ_FOREACH_REVERSE(_listener, &memory_listeners, link) { \ 120 if (_listener->_callback) { \ 121 _listener->_callback(_listener, ##_args); \ 122 } \ 123 } \ 124 break; \ 125 default: \ 126 abort(); \ 127 } \ 128 } while (0) 129 130 #define MEMORY_LISTENER_CALL(_as, _callback, _direction, _section, _args...) \ 131 do { \ 132 MemoryListener *_listener; \ 133 \ 134 switch (_direction) { \ 135 case Forward: \ 136 QTAILQ_FOREACH(_listener, &(_as)->listeners, link_as) { \ 137 if (_listener->_callback) { \ 138 _listener->_callback(_listener, _section, ##_args); \ 139 } \ 140 } \ 141 break; \ 142 case Reverse: \ 143 QTAILQ_FOREACH_REVERSE(_listener, &(_as)->listeners, link_as) { \ 144 if (_listener->_callback) { \ 145 _listener->_callback(_listener, _section, ##_args); \ 146 } \ 147 } \ 148 break; \ 149 default: \ 150 abort(); \ 151 } \ 152 } while (0) 153 154 /* No need to ref/unref .mr, the FlatRange keeps it alive. */ 155 #define MEMORY_LISTENER_UPDATE_REGION(fr, as, dir, callback, _args...) \ 156 do { \ 157 MemoryRegionSection mrs = section_from_flat_range(fr, \ 158 address_space_to_flatview(as)); \ 159 MEMORY_LISTENER_CALL(as, callback, dir, &mrs, ##_args); \ 160 } while(0) 161 162 struct CoalescedMemoryRange { 163 AddrRange addr; 164 QTAILQ_ENTRY(CoalescedMemoryRange) link; 165 }; 166 167 struct MemoryRegionIoeventfd { 168 AddrRange addr; 169 bool match_data; 170 uint64_t data; 171 EventNotifier *e; 172 }; 173 174 static bool memory_region_ioeventfd_before(MemoryRegionIoeventfd *a, 175 MemoryRegionIoeventfd *b) 176 { 177 if (int128_lt(a->addr.start, b->addr.start)) { 178 return true; 179 } else if (int128_gt(a->addr.start, b->addr.start)) { 180 return false; 181 } else if (int128_lt(a->addr.size, b->addr.size)) { 182 return true; 183 } else if (int128_gt(a->addr.size, b->addr.size)) { 184 return false; 185 } else if (a->match_data < b->match_data) { 186 return true; 187 } else if (a->match_data > b->match_data) { 188 return false; 189 } else if (a->match_data) { 190 if (a->data < b->data) { 191 return true; 192 } else if (a->data > b->data) { 193 return false; 194 } 195 } 196 if (a->e < b->e) { 197 return true; 198 } else if (a->e > b->e) { 199 return false; 200 } 201 return false; 202 } 203 204 static bool memory_region_ioeventfd_equal(MemoryRegionIoeventfd *a, 205 MemoryRegionIoeventfd *b) 206 { 207 if (int128_eq(a->addr.start, b->addr.start) && 208 (!int128_nz(a->addr.size) || !int128_nz(b->addr.size) || 209 (int128_eq(a->addr.size, b->addr.size) && 210 (a->match_data == b->match_data) && 211 ((a->match_data && (a->data == b->data)) || !a->match_data) && 212 (a->e == b->e)))) 213 return true; 214 215 return false; 216 } 217 218 /* Range of memory in the global map. Addresses are absolute. */ 219 struct FlatRange { 220 MemoryRegion *mr; 221 hwaddr offset_in_region; 222 AddrRange addr; 223 uint8_t dirty_log_mask; 224 bool romd_mode; 225 bool readonly; 226 bool nonvolatile; 227 bool unmergeable; 228 }; 229 230 #define FOR_EACH_FLAT_RANGE(var, view) \ 231 for (var = (view)->ranges; var < (view)->ranges + (view)->nr; ++var) 232 233 static inline MemoryRegionSection 234 section_from_flat_range(FlatRange *fr, FlatView *fv) 235 { 236 return (MemoryRegionSection) { 237 .mr = fr->mr, 238 .fv = fv, 239 .offset_within_region = fr->offset_in_region, 240 .size = fr->addr.size, 241 .offset_within_address_space = int128_get64(fr->addr.start), 242 .readonly = fr->readonly, 243 .nonvolatile = fr->nonvolatile, 244 .unmergeable = fr->unmergeable, 245 }; 246 } 247 248 static bool flatrange_equal(FlatRange *a, FlatRange *b) 249 { 250 return a->mr == b->mr 251 && addrrange_equal(a->addr, b->addr) 252 && a->offset_in_region == b->offset_in_region 253 && a->romd_mode == b->romd_mode 254 && a->readonly == b->readonly 255 && a->nonvolatile == b->nonvolatile 256 && a->unmergeable == b->unmergeable; 257 } 258 259 static FlatView *flatview_new(MemoryRegion *mr_root) 260 { 261 FlatView *view; 262 263 view = g_new0(FlatView, 1); 264 view->ref = 1; 265 view->root = mr_root; 266 memory_region_ref(mr_root); 267 trace_flatview_new(view, mr_root); 268 269 return view; 270 } 271 272 /* Insert a range into a given position. Caller is responsible for maintaining 273 * sorting order. 274 */ 275 static void flatview_insert(FlatView *view, unsigned pos, FlatRange *range) 276 { 277 if (view->nr == view->nr_allocated) { 278 view->nr_allocated = MAX(2 * view->nr, 10); 279 view->ranges = g_realloc(view->ranges, 280 view->nr_allocated * sizeof(*view->ranges)); 281 } 282 memmove(view->ranges + pos + 1, view->ranges + pos, 283 (view->nr - pos) * sizeof(FlatRange)); 284 view->ranges[pos] = *range; 285 memory_region_ref(range->mr); 286 ++view->nr; 287 } 288 289 static void flatview_destroy(FlatView *view) 290 { 291 int i; 292 293 trace_flatview_destroy(view, view->root); 294 if (view->dispatch) { 295 address_space_dispatch_free(view->dispatch); 296 } 297 for (i = 0; i < view->nr; i++) { 298 memory_region_unref(view->ranges[i].mr); 299 } 300 g_free(view->ranges); 301 memory_region_unref(view->root); 302 g_free(view); 303 } 304 305 static bool flatview_ref(FlatView *view) 306 { 307 return qatomic_fetch_inc_nonzero(&view->ref) > 0; 308 } 309 310 void flatview_unref(FlatView *view) 311 { 312 if (qatomic_fetch_dec(&view->ref) == 1) { 313 trace_flatview_destroy_rcu(view, view->root); 314 assert(view->root); 315 call_rcu(view, flatview_destroy, rcu); 316 } 317 } 318 319 static bool can_merge(FlatRange *r1, FlatRange *r2) 320 { 321 return int128_eq(addrrange_end(r1->addr), r2->addr.start) 322 && r1->mr == r2->mr 323 && int128_eq(int128_add(int128_make64(r1->offset_in_region), 324 r1->addr.size), 325 int128_make64(r2->offset_in_region)) 326 && r1->dirty_log_mask == r2->dirty_log_mask 327 && r1->romd_mode == r2->romd_mode 328 && r1->readonly == r2->readonly 329 && r1->nonvolatile == r2->nonvolatile 330 && !r1->unmergeable && !r2->unmergeable; 331 } 332 333 /* Attempt to simplify a view by merging adjacent ranges */ 334 static void flatview_simplify(FlatView *view) 335 { 336 unsigned i, j, k; 337 338 i = 0; 339 while (i < view->nr) { 340 j = i + 1; 341 while (j < view->nr 342 && can_merge(&view->ranges[j-1], &view->ranges[j])) { 343 int128_addto(&view->ranges[i].addr.size, view->ranges[j].addr.size); 344 ++j; 345 } 346 ++i; 347 for (k = i; k < j; k++) { 348 memory_region_unref(view->ranges[k].mr); 349 } 350 memmove(&view->ranges[i], &view->ranges[j], 351 (view->nr - j) * sizeof(view->ranges[j])); 352 view->nr -= j - i; 353 } 354 } 355 356 static bool memory_region_big_endian(MemoryRegion *mr) 357 { 358 #if TARGET_BIG_ENDIAN 359 return mr->ops->endianness != DEVICE_LITTLE_ENDIAN; 360 #else 361 return mr->ops->endianness == DEVICE_BIG_ENDIAN; 362 #endif 363 } 364 365 static void adjust_endianness(MemoryRegion *mr, uint64_t *data, MemOp op) 366 { 367 if ((op & MO_BSWAP) != devend_memop(mr->ops->endianness)) { 368 switch (op & MO_SIZE) { 369 case MO_8: 370 break; 371 case MO_16: 372 *data = bswap16(*data); 373 break; 374 case MO_32: 375 *data = bswap32(*data); 376 break; 377 case MO_64: 378 *data = bswap64(*data); 379 break; 380 default: 381 g_assert_not_reached(); 382 } 383 } 384 } 385 386 static inline void memory_region_shift_read_access(uint64_t *value, 387 signed shift, 388 uint64_t mask, 389 uint64_t tmp) 390 { 391 if (shift >= 0) { 392 *value |= (tmp & mask) << shift; 393 } else { 394 *value |= (tmp & mask) >> -shift; 395 } 396 } 397 398 static inline uint64_t memory_region_shift_write_access(uint64_t *value, 399 signed shift, 400 uint64_t mask) 401 { 402 uint64_t tmp; 403 404 if (shift >= 0) { 405 tmp = (*value >> shift) & mask; 406 } else { 407 tmp = (*value << -shift) & mask; 408 } 409 410 return tmp; 411 } 412 413 static hwaddr memory_region_to_absolute_addr(MemoryRegion *mr, hwaddr offset) 414 { 415 MemoryRegion *root; 416 hwaddr abs_addr = offset; 417 418 abs_addr += mr->addr; 419 for (root = mr; root->container; ) { 420 root = root->container; 421 abs_addr += root->addr; 422 } 423 424 return abs_addr; 425 } 426 427 static int get_cpu_index(void) 428 { 429 if (current_cpu) { 430 return current_cpu->cpu_index; 431 } 432 return -1; 433 } 434 435 static MemTxResult memory_region_read_accessor(MemoryRegion *mr, 436 hwaddr addr, 437 uint64_t *value, 438 unsigned size, 439 signed shift, 440 uint64_t mask, 441 MemTxAttrs attrs) 442 { 443 uint64_t tmp; 444 445 tmp = mr->ops->read(mr->opaque, addr, size); 446 if (mr->subpage) { 447 trace_memory_region_subpage_read(get_cpu_index(), mr, addr, tmp, size); 448 } else if (trace_event_get_state_backends(TRACE_MEMORY_REGION_OPS_READ)) { 449 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr); 450 trace_memory_region_ops_read(get_cpu_index(), mr, abs_addr, tmp, size, 451 memory_region_name(mr)); 452 } 453 memory_region_shift_read_access(value, shift, mask, tmp); 454 return MEMTX_OK; 455 } 456 457 static MemTxResult memory_region_read_with_attrs_accessor(MemoryRegion *mr, 458 hwaddr addr, 459 uint64_t *value, 460 unsigned size, 461 signed shift, 462 uint64_t mask, 463 MemTxAttrs attrs) 464 { 465 uint64_t tmp = 0; 466 MemTxResult r; 467 468 r = mr->ops->read_with_attrs(mr->opaque, addr, &tmp, size, attrs); 469 if (mr->subpage) { 470 trace_memory_region_subpage_read(get_cpu_index(), mr, addr, tmp, size); 471 } else if (trace_event_get_state_backends(TRACE_MEMORY_REGION_OPS_READ)) { 472 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr); 473 trace_memory_region_ops_read(get_cpu_index(), mr, abs_addr, tmp, size, 474 memory_region_name(mr)); 475 } 476 memory_region_shift_read_access(value, shift, mask, tmp); 477 return r; 478 } 479 480 static MemTxResult memory_region_write_accessor(MemoryRegion *mr, 481 hwaddr addr, 482 uint64_t *value, 483 unsigned size, 484 signed shift, 485 uint64_t mask, 486 MemTxAttrs attrs) 487 { 488 uint64_t tmp = memory_region_shift_write_access(value, shift, mask); 489 490 if (mr->subpage) { 491 trace_memory_region_subpage_write(get_cpu_index(), mr, addr, tmp, size); 492 } else if (trace_event_get_state_backends(TRACE_MEMORY_REGION_OPS_WRITE)) { 493 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr); 494 trace_memory_region_ops_write(get_cpu_index(), mr, abs_addr, tmp, size, 495 memory_region_name(mr)); 496 } 497 mr->ops->write(mr->opaque, addr, tmp, size); 498 return MEMTX_OK; 499 } 500 501 static MemTxResult memory_region_write_with_attrs_accessor(MemoryRegion *mr, 502 hwaddr addr, 503 uint64_t *value, 504 unsigned size, 505 signed shift, 506 uint64_t mask, 507 MemTxAttrs attrs) 508 { 509 uint64_t tmp = memory_region_shift_write_access(value, shift, mask); 510 511 if (mr->subpage) { 512 trace_memory_region_subpage_write(get_cpu_index(), mr, addr, tmp, size); 513 } else if (trace_event_get_state_backends(TRACE_MEMORY_REGION_OPS_WRITE)) { 514 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr); 515 trace_memory_region_ops_write(get_cpu_index(), mr, abs_addr, tmp, size, 516 memory_region_name(mr)); 517 } 518 return mr->ops->write_with_attrs(mr->opaque, addr, tmp, size, attrs); 519 } 520 521 static MemTxResult access_with_adjusted_size(hwaddr addr, 522 uint64_t *value, 523 unsigned size, 524 unsigned access_size_min, 525 unsigned access_size_max, 526 MemTxResult (*access_fn) 527 (MemoryRegion *mr, 528 hwaddr addr, 529 uint64_t *value, 530 unsigned size, 531 signed shift, 532 uint64_t mask, 533 MemTxAttrs attrs), 534 MemoryRegion *mr, 535 MemTxAttrs attrs) 536 { 537 uint64_t access_mask; 538 unsigned access_size; 539 unsigned i; 540 MemTxResult r = MEMTX_OK; 541 bool reentrancy_guard_applied = false; 542 543 if (!access_size_min) { 544 access_size_min = 1; 545 } 546 if (!access_size_max) { 547 access_size_max = 4; 548 } 549 550 /* Do not allow more than one simultaneous access to a device's IO Regions */ 551 if (mr->dev && !mr->disable_reentrancy_guard && 552 !mr->ram_device && !mr->ram && !mr->rom_device && !mr->readonly) { 553 if (mr->dev->mem_reentrancy_guard.engaged_in_io) { 554 warn_report_once("Blocked re-entrant IO on MemoryRegion: " 555 "%s at addr: 0x%" HWADDR_PRIX, 556 memory_region_name(mr), addr); 557 return MEMTX_ACCESS_ERROR; 558 } 559 mr->dev->mem_reentrancy_guard.engaged_in_io = true; 560 reentrancy_guard_applied = true; 561 } 562 563 /* FIXME: support unaligned access? */ 564 access_size = MAX(MIN(size, access_size_max), access_size_min); 565 access_mask = MAKE_64BIT_MASK(0, access_size * 8); 566 if (memory_region_big_endian(mr)) { 567 for (i = 0; i < size; i += access_size) { 568 r |= access_fn(mr, addr + i, value, access_size, 569 (size - access_size - i) * 8, access_mask, attrs); 570 } 571 } else { 572 for (i = 0; i < size; i += access_size) { 573 r |= access_fn(mr, addr + i, value, access_size, i * 8, 574 access_mask, attrs); 575 } 576 } 577 if (mr->dev && reentrancy_guard_applied) { 578 mr->dev->mem_reentrancy_guard.engaged_in_io = false; 579 } 580 return r; 581 } 582 583 static AddressSpace *memory_region_to_address_space(MemoryRegion *mr) 584 { 585 AddressSpace *as; 586 587 while (mr->container) { 588 mr = mr->container; 589 } 590 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) { 591 if (mr == as->root) { 592 return as; 593 } 594 } 595 return NULL; 596 } 597 598 /* Render a memory region into the global view. Ranges in @view obscure 599 * ranges in @mr. 600 */ 601 static void render_memory_region(FlatView *view, 602 MemoryRegion *mr, 603 Int128 base, 604 AddrRange clip, 605 bool readonly, 606 bool nonvolatile, 607 bool unmergeable) 608 { 609 MemoryRegion *subregion; 610 unsigned i; 611 hwaddr offset_in_region; 612 Int128 remain; 613 Int128 now; 614 FlatRange fr; 615 AddrRange tmp; 616 617 if (!mr->enabled) { 618 return; 619 } 620 621 int128_addto(&base, int128_make64(mr->addr)); 622 readonly |= mr->readonly; 623 nonvolatile |= mr->nonvolatile; 624 unmergeable |= mr->unmergeable; 625 626 tmp = addrrange_make(base, mr->size); 627 628 if (!addrrange_intersects(tmp, clip)) { 629 return; 630 } 631 632 clip = addrrange_intersection(tmp, clip); 633 634 if (mr->alias) { 635 int128_subfrom(&base, int128_make64(mr->alias->addr)); 636 int128_subfrom(&base, int128_make64(mr->alias_offset)); 637 render_memory_region(view, mr->alias, base, clip, 638 readonly, nonvolatile, unmergeable); 639 return; 640 } 641 642 /* Render subregions in priority order. */ 643 QTAILQ_FOREACH(subregion, &mr->subregions, subregions_link) { 644 render_memory_region(view, subregion, base, clip, 645 readonly, nonvolatile, unmergeable); 646 } 647 648 if (!mr->terminates) { 649 return; 650 } 651 652 offset_in_region = int128_get64(int128_sub(clip.start, base)); 653 base = clip.start; 654 remain = clip.size; 655 656 fr.mr = mr; 657 fr.dirty_log_mask = memory_region_get_dirty_log_mask(mr); 658 fr.romd_mode = mr->romd_mode; 659 fr.readonly = readonly; 660 fr.nonvolatile = nonvolatile; 661 fr.unmergeable = unmergeable; 662 663 /* Render the region itself into any gaps left by the current view. */ 664 for (i = 0; i < view->nr && int128_nz(remain); ++i) { 665 if (int128_ge(base, addrrange_end(view->ranges[i].addr))) { 666 continue; 667 } 668 if (int128_lt(base, view->ranges[i].addr.start)) { 669 now = int128_min(remain, 670 int128_sub(view->ranges[i].addr.start, base)); 671 fr.offset_in_region = offset_in_region; 672 fr.addr = addrrange_make(base, now); 673 flatview_insert(view, i, &fr); 674 ++i; 675 int128_addto(&base, now); 676 offset_in_region += int128_get64(now); 677 int128_subfrom(&remain, now); 678 } 679 now = int128_sub(int128_min(int128_add(base, remain), 680 addrrange_end(view->ranges[i].addr)), 681 base); 682 int128_addto(&base, now); 683 offset_in_region += int128_get64(now); 684 int128_subfrom(&remain, now); 685 } 686 if (int128_nz(remain)) { 687 fr.offset_in_region = offset_in_region; 688 fr.addr = addrrange_make(base, remain); 689 flatview_insert(view, i, &fr); 690 } 691 } 692 693 void flatview_for_each_range(FlatView *fv, flatview_cb cb , void *opaque) 694 { 695 FlatRange *fr; 696 697 assert(fv); 698 assert(cb); 699 700 FOR_EACH_FLAT_RANGE(fr, fv) { 701 if (cb(fr->addr.start, fr->addr.size, fr->mr, 702 fr->offset_in_region, opaque)) { 703 break; 704 } 705 } 706 } 707 708 static MemoryRegion *memory_region_get_flatview_root(MemoryRegion *mr) 709 { 710 while (mr->enabled) { 711 if (mr->alias) { 712 if (!mr->alias_offset && int128_ge(mr->size, mr->alias->size)) { 713 /* The alias is included in its entirety. Use it as 714 * the "real" root, so that we can share more FlatViews. 715 */ 716 mr = mr->alias; 717 continue; 718 } 719 } else if (!mr->terminates) { 720 unsigned int found = 0; 721 MemoryRegion *child, *next = NULL; 722 QTAILQ_FOREACH(child, &mr->subregions, subregions_link) { 723 if (child->enabled) { 724 if (++found > 1) { 725 next = NULL; 726 break; 727 } 728 if (!child->addr && int128_ge(mr->size, child->size)) { 729 /* A child is included in its entirety. If it's the only 730 * enabled one, use it in the hope of finding an alias down the 731 * way. This will also let us share FlatViews. 732 */ 733 next = child; 734 } 735 } 736 } 737 if (found == 0) { 738 return NULL; 739 } 740 if (next) { 741 mr = next; 742 continue; 743 } 744 } 745 746 return mr; 747 } 748 749 return NULL; 750 } 751 752 /* Render a memory topology into a list of disjoint absolute ranges. */ 753 static FlatView *generate_memory_topology(MemoryRegion *mr) 754 { 755 int i; 756 FlatView *view; 757 758 view = flatview_new(mr); 759 760 if (mr) { 761 render_memory_region(view, mr, int128_zero(), 762 addrrange_make(int128_zero(), int128_2_64()), 763 false, false, false); 764 } 765 flatview_simplify(view); 766 767 view->dispatch = address_space_dispatch_new(view); 768 for (i = 0; i < view->nr; i++) { 769 MemoryRegionSection mrs = 770 section_from_flat_range(&view->ranges[i], view); 771 flatview_add_to_dispatch(view, &mrs); 772 } 773 address_space_dispatch_compact(view->dispatch); 774 g_hash_table_replace(flat_views, mr, view); 775 776 return view; 777 } 778 779 static void address_space_add_del_ioeventfds(AddressSpace *as, 780 MemoryRegionIoeventfd *fds_new, 781 unsigned fds_new_nb, 782 MemoryRegionIoeventfd *fds_old, 783 unsigned fds_old_nb) 784 { 785 unsigned iold, inew; 786 MemoryRegionIoeventfd *fd; 787 MemoryRegionSection section; 788 789 /* Generate a symmetric difference of the old and new fd sets, adding 790 * and deleting as necessary. 791 */ 792 793 iold = inew = 0; 794 while (iold < fds_old_nb || inew < fds_new_nb) { 795 if (iold < fds_old_nb 796 && (inew == fds_new_nb 797 || memory_region_ioeventfd_before(&fds_old[iold], 798 &fds_new[inew]))) { 799 fd = &fds_old[iold]; 800 section = (MemoryRegionSection) { 801 .fv = address_space_to_flatview(as), 802 .offset_within_address_space = int128_get64(fd->addr.start), 803 .size = fd->addr.size, 804 }; 805 MEMORY_LISTENER_CALL(as, eventfd_del, Forward, §ion, 806 fd->match_data, fd->data, fd->e); 807 ++iold; 808 } else if (inew < fds_new_nb 809 && (iold == fds_old_nb 810 || memory_region_ioeventfd_before(&fds_new[inew], 811 &fds_old[iold]))) { 812 fd = &fds_new[inew]; 813 section = (MemoryRegionSection) { 814 .fv = address_space_to_flatview(as), 815 .offset_within_address_space = int128_get64(fd->addr.start), 816 .size = fd->addr.size, 817 }; 818 MEMORY_LISTENER_CALL(as, eventfd_add, Reverse, §ion, 819 fd->match_data, fd->data, fd->e); 820 ++inew; 821 } else { 822 ++iold; 823 ++inew; 824 } 825 } 826 } 827 828 FlatView *address_space_get_flatview(AddressSpace *as) 829 { 830 FlatView *view; 831 832 RCU_READ_LOCK_GUARD(); 833 do { 834 view = address_space_to_flatview(as); 835 /* If somebody has replaced as->current_map concurrently, 836 * flatview_ref returns false. 837 */ 838 } while (!flatview_ref(view)); 839 return view; 840 } 841 842 static void address_space_update_ioeventfds(AddressSpace *as) 843 { 844 FlatView *view; 845 FlatRange *fr; 846 unsigned ioeventfd_nb = 0; 847 unsigned ioeventfd_max; 848 MemoryRegionIoeventfd *ioeventfds; 849 AddrRange tmp; 850 unsigned i; 851 852 if (!as->ioeventfd_notifiers) { 853 return; 854 } 855 856 /* 857 * It is likely that the number of ioeventfds hasn't changed much, so use 858 * the previous size as the starting value, with some headroom to avoid 859 * gratuitous reallocations. 860 */ 861 ioeventfd_max = QEMU_ALIGN_UP(as->ioeventfd_nb, 4); 862 ioeventfds = g_new(MemoryRegionIoeventfd, ioeventfd_max); 863 864 view = address_space_get_flatview(as); 865 FOR_EACH_FLAT_RANGE(fr, view) { 866 for (i = 0; i < fr->mr->ioeventfd_nb; ++i) { 867 tmp = addrrange_shift(fr->mr->ioeventfds[i].addr, 868 int128_sub(fr->addr.start, 869 int128_make64(fr->offset_in_region))); 870 if (addrrange_intersects(fr->addr, tmp)) { 871 ++ioeventfd_nb; 872 if (ioeventfd_nb > ioeventfd_max) { 873 ioeventfd_max = MAX(ioeventfd_max * 2, 4); 874 ioeventfds = g_realloc(ioeventfds, 875 ioeventfd_max * sizeof(*ioeventfds)); 876 } 877 ioeventfds[ioeventfd_nb-1] = fr->mr->ioeventfds[i]; 878 ioeventfds[ioeventfd_nb-1].addr = tmp; 879 } 880 } 881 } 882 883 address_space_add_del_ioeventfds(as, ioeventfds, ioeventfd_nb, 884 as->ioeventfds, as->ioeventfd_nb); 885 886 g_free(as->ioeventfds); 887 as->ioeventfds = ioeventfds; 888 as->ioeventfd_nb = ioeventfd_nb; 889 flatview_unref(view); 890 } 891 892 /* 893 * Notify the memory listeners about the coalesced IO change events of 894 * range `cmr'. Only the part that has intersection of the specified 895 * FlatRange will be sent. 896 */ 897 static void flat_range_coalesced_io_notify(FlatRange *fr, AddressSpace *as, 898 CoalescedMemoryRange *cmr, bool add) 899 { 900 AddrRange tmp; 901 902 tmp = addrrange_shift(cmr->addr, 903 int128_sub(fr->addr.start, 904 int128_make64(fr->offset_in_region))); 905 if (!addrrange_intersects(tmp, fr->addr)) { 906 return; 907 } 908 tmp = addrrange_intersection(tmp, fr->addr); 909 910 if (add) { 911 MEMORY_LISTENER_UPDATE_REGION(fr, as, Forward, coalesced_io_add, 912 int128_get64(tmp.start), 913 int128_get64(tmp.size)); 914 } else { 915 MEMORY_LISTENER_UPDATE_REGION(fr, as, Reverse, coalesced_io_del, 916 int128_get64(tmp.start), 917 int128_get64(tmp.size)); 918 } 919 } 920 921 static void flat_range_coalesced_io_del(FlatRange *fr, AddressSpace *as) 922 { 923 CoalescedMemoryRange *cmr; 924 925 QTAILQ_FOREACH(cmr, &fr->mr->coalesced, link) { 926 flat_range_coalesced_io_notify(fr, as, cmr, false); 927 } 928 } 929 930 static void flat_range_coalesced_io_add(FlatRange *fr, AddressSpace *as) 931 { 932 MemoryRegion *mr = fr->mr; 933 CoalescedMemoryRange *cmr; 934 935 if (QTAILQ_EMPTY(&mr->coalesced)) { 936 return; 937 } 938 939 QTAILQ_FOREACH(cmr, &mr->coalesced, link) { 940 flat_range_coalesced_io_notify(fr, as, cmr, true); 941 } 942 } 943 944 static void address_space_update_topology_pass(AddressSpace *as, 945 const FlatView *old_view, 946 const FlatView *new_view, 947 bool adding) 948 { 949 unsigned iold, inew; 950 FlatRange *frold, *frnew; 951 952 /* Generate a symmetric difference of the old and new memory maps. 953 * Kill ranges in the old map, and instantiate ranges in the new map. 954 */ 955 iold = inew = 0; 956 while (iold < old_view->nr || inew < new_view->nr) { 957 if (iold < old_view->nr) { 958 frold = &old_view->ranges[iold]; 959 } else { 960 frold = NULL; 961 } 962 if (inew < new_view->nr) { 963 frnew = &new_view->ranges[inew]; 964 } else { 965 frnew = NULL; 966 } 967 968 if (frold 969 && (!frnew 970 || int128_lt(frold->addr.start, frnew->addr.start) 971 || (int128_eq(frold->addr.start, frnew->addr.start) 972 && !flatrange_equal(frold, frnew)))) { 973 /* In old but not in new, or in both but attributes changed. */ 974 975 if (!adding) { 976 flat_range_coalesced_io_del(frold, as); 977 MEMORY_LISTENER_UPDATE_REGION(frold, as, Reverse, region_del); 978 } 979 980 ++iold; 981 } else if (frold && frnew && flatrange_equal(frold, frnew)) { 982 /* In both and unchanged (except logging may have changed) */ 983 984 if (adding) { 985 MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, region_nop); 986 if (frnew->dirty_log_mask & ~frold->dirty_log_mask) { 987 MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, log_start, 988 frold->dirty_log_mask, 989 frnew->dirty_log_mask); 990 } 991 if (frold->dirty_log_mask & ~frnew->dirty_log_mask) { 992 MEMORY_LISTENER_UPDATE_REGION(frnew, as, Reverse, log_stop, 993 frold->dirty_log_mask, 994 frnew->dirty_log_mask); 995 } 996 } 997 998 ++iold; 999 ++inew; 1000 } else { 1001 /* In new */ 1002 1003 if (adding) { 1004 MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, region_add); 1005 flat_range_coalesced_io_add(frnew, as); 1006 } 1007 1008 ++inew; 1009 } 1010 } 1011 } 1012 1013 static void flatviews_init(void) 1014 { 1015 static FlatView *empty_view; 1016 1017 if (flat_views) { 1018 return; 1019 } 1020 1021 flat_views = g_hash_table_new_full(g_direct_hash, g_direct_equal, NULL, 1022 (GDestroyNotify) flatview_unref); 1023 if (!empty_view) { 1024 empty_view = generate_memory_topology(NULL); 1025 /* We keep it alive forever in the global variable. */ 1026 flatview_ref(empty_view); 1027 } else { 1028 g_hash_table_replace(flat_views, NULL, empty_view); 1029 flatview_ref(empty_view); 1030 } 1031 } 1032 1033 static void flatviews_reset(void) 1034 { 1035 AddressSpace *as; 1036 1037 if (flat_views) { 1038 g_hash_table_unref(flat_views); 1039 flat_views = NULL; 1040 } 1041 flatviews_init(); 1042 1043 /* Render unique FVs */ 1044 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) { 1045 MemoryRegion *physmr = memory_region_get_flatview_root(as->root); 1046 1047 if (g_hash_table_lookup(flat_views, physmr)) { 1048 continue; 1049 } 1050 1051 generate_memory_topology(physmr); 1052 } 1053 } 1054 1055 static void address_space_set_flatview(AddressSpace *as) 1056 { 1057 FlatView *old_view = address_space_to_flatview(as); 1058 MemoryRegion *physmr = memory_region_get_flatview_root(as->root); 1059 FlatView *new_view = g_hash_table_lookup(flat_views, physmr); 1060 1061 assert(new_view); 1062 1063 if (old_view == new_view) { 1064 return; 1065 } 1066 1067 if (old_view) { 1068 flatview_ref(old_view); 1069 } 1070 1071 flatview_ref(new_view); 1072 1073 if (!QTAILQ_EMPTY(&as->listeners)) { 1074 FlatView tmpview = { .nr = 0 }, *old_view2 = old_view; 1075 1076 if (!old_view2) { 1077 old_view2 = &tmpview; 1078 } 1079 address_space_update_topology_pass(as, old_view2, new_view, false); 1080 address_space_update_topology_pass(as, old_view2, new_view, true); 1081 } 1082 1083 /* Writes are protected by the BQL. */ 1084 qatomic_rcu_set(&as->current_map, new_view); 1085 if (old_view) { 1086 flatview_unref(old_view); 1087 } 1088 1089 /* Note that all the old MemoryRegions are still alive up to this 1090 * point. This relieves most MemoryListeners from the need to 1091 * ref/unref the MemoryRegions they get---unless they use them 1092 * outside the iothread mutex, in which case precise reference 1093 * counting is necessary. 1094 */ 1095 if (old_view) { 1096 flatview_unref(old_view); 1097 } 1098 } 1099 1100 static void address_space_update_topology(AddressSpace *as) 1101 { 1102 MemoryRegion *physmr = memory_region_get_flatview_root(as->root); 1103 1104 flatviews_init(); 1105 if (!g_hash_table_lookup(flat_views, physmr)) { 1106 generate_memory_topology(physmr); 1107 } 1108 address_space_set_flatview(as); 1109 } 1110 1111 void memory_region_transaction_begin(void) 1112 { 1113 qemu_flush_coalesced_mmio_buffer(); 1114 ++memory_region_transaction_depth; 1115 } 1116 1117 void memory_region_transaction_commit(void) 1118 { 1119 AddressSpace *as; 1120 1121 assert(memory_region_transaction_depth); 1122 assert(qemu_mutex_iothread_locked()); 1123 1124 --memory_region_transaction_depth; 1125 if (!memory_region_transaction_depth) { 1126 if (memory_region_update_pending) { 1127 flatviews_reset(); 1128 1129 MEMORY_LISTENER_CALL_GLOBAL(begin, Forward); 1130 1131 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) { 1132 address_space_set_flatview(as); 1133 address_space_update_ioeventfds(as); 1134 } 1135 memory_region_update_pending = false; 1136 ioeventfd_update_pending = false; 1137 MEMORY_LISTENER_CALL_GLOBAL(commit, Forward); 1138 } else if (ioeventfd_update_pending) { 1139 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) { 1140 address_space_update_ioeventfds(as); 1141 } 1142 ioeventfd_update_pending = false; 1143 } 1144 } 1145 } 1146 1147 static void memory_region_destructor_none(MemoryRegion *mr) 1148 { 1149 } 1150 1151 static void memory_region_destructor_ram(MemoryRegion *mr) 1152 { 1153 qemu_ram_free(mr->ram_block); 1154 } 1155 1156 static bool memory_region_need_escape(char c) 1157 { 1158 return c == '/' || c == '[' || c == '\\' || c == ']'; 1159 } 1160 1161 static char *memory_region_escape_name(const char *name) 1162 { 1163 const char *p; 1164 char *escaped, *q; 1165 uint8_t c; 1166 size_t bytes = 0; 1167 1168 for (p = name; *p; p++) { 1169 bytes += memory_region_need_escape(*p) ? 4 : 1; 1170 } 1171 if (bytes == p - name) { 1172 return g_memdup(name, bytes + 1); 1173 } 1174 1175 escaped = g_malloc(bytes + 1); 1176 for (p = name, q = escaped; *p; p++) { 1177 c = *p; 1178 if (unlikely(memory_region_need_escape(c))) { 1179 *q++ = '\\'; 1180 *q++ = 'x'; 1181 *q++ = "0123456789abcdef"[c >> 4]; 1182 c = "0123456789abcdef"[c & 15]; 1183 } 1184 *q++ = c; 1185 } 1186 *q = 0; 1187 return escaped; 1188 } 1189 1190 static void memory_region_do_init(MemoryRegion *mr, 1191 Object *owner, 1192 const char *name, 1193 uint64_t size) 1194 { 1195 mr->size = int128_make64(size); 1196 if (size == UINT64_MAX) { 1197 mr->size = int128_2_64(); 1198 } 1199 mr->name = g_strdup(name); 1200 mr->owner = owner; 1201 mr->dev = (DeviceState *) object_dynamic_cast(mr->owner, TYPE_DEVICE); 1202 mr->ram_block = NULL; 1203 1204 if (name) { 1205 char *escaped_name = memory_region_escape_name(name); 1206 char *name_array = g_strdup_printf("%s[*]", escaped_name); 1207 1208 if (!owner) { 1209 owner = container_get(qdev_get_machine(), "/unattached"); 1210 } 1211 1212 object_property_add_child(owner, name_array, OBJECT(mr)); 1213 object_unref(OBJECT(mr)); 1214 g_free(name_array); 1215 g_free(escaped_name); 1216 } 1217 } 1218 1219 void memory_region_init(MemoryRegion *mr, 1220 Object *owner, 1221 const char *name, 1222 uint64_t size) 1223 { 1224 object_initialize(mr, sizeof(*mr), TYPE_MEMORY_REGION); 1225 memory_region_do_init(mr, owner, name, size); 1226 } 1227 1228 static void memory_region_get_container(Object *obj, Visitor *v, 1229 const char *name, void *opaque, 1230 Error **errp) 1231 { 1232 MemoryRegion *mr = MEMORY_REGION(obj); 1233 char *path = (char *)""; 1234 1235 if (mr->container) { 1236 path = object_get_canonical_path(OBJECT(mr->container)); 1237 } 1238 visit_type_str(v, name, &path, errp); 1239 if (mr->container) { 1240 g_free(path); 1241 } 1242 } 1243 1244 static Object *memory_region_resolve_container(Object *obj, void *opaque, 1245 const char *part) 1246 { 1247 MemoryRegion *mr = MEMORY_REGION(obj); 1248 1249 return OBJECT(mr->container); 1250 } 1251 1252 static void memory_region_get_priority(Object *obj, Visitor *v, 1253 const char *name, void *opaque, 1254 Error **errp) 1255 { 1256 MemoryRegion *mr = MEMORY_REGION(obj); 1257 int32_t value = mr->priority; 1258 1259 visit_type_int32(v, name, &value, errp); 1260 } 1261 1262 static void memory_region_get_size(Object *obj, Visitor *v, const char *name, 1263 void *opaque, Error **errp) 1264 { 1265 MemoryRegion *mr = MEMORY_REGION(obj); 1266 uint64_t value = memory_region_size(mr); 1267 1268 visit_type_uint64(v, name, &value, errp); 1269 } 1270 1271 static void memory_region_initfn(Object *obj) 1272 { 1273 MemoryRegion *mr = MEMORY_REGION(obj); 1274 ObjectProperty *op; 1275 1276 mr->ops = &unassigned_mem_ops; 1277 mr->enabled = true; 1278 mr->romd_mode = true; 1279 mr->destructor = memory_region_destructor_none; 1280 QTAILQ_INIT(&mr->subregions); 1281 QTAILQ_INIT(&mr->coalesced); 1282 1283 op = object_property_add(OBJECT(mr), "container", 1284 "link<" TYPE_MEMORY_REGION ">", 1285 memory_region_get_container, 1286 NULL, /* memory_region_set_container */ 1287 NULL, NULL); 1288 op->resolve = memory_region_resolve_container; 1289 1290 object_property_add_uint64_ptr(OBJECT(mr), "addr", 1291 &mr->addr, OBJ_PROP_FLAG_READ); 1292 object_property_add(OBJECT(mr), "priority", "uint32", 1293 memory_region_get_priority, 1294 NULL, /* memory_region_set_priority */ 1295 NULL, NULL); 1296 object_property_add(OBJECT(mr), "size", "uint64", 1297 memory_region_get_size, 1298 NULL, /* memory_region_set_size, */ 1299 NULL, NULL); 1300 } 1301 1302 static void iommu_memory_region_initfn(Object *obj) 1303 { 1304 MemoryRegion *mr = MEMORY_REGION(obj); 1305 1306 mr->is_iommu = true; 1307 } 1308 1309 static uint64_t unassigned_mem_read(void *opaque, hwaddr addr, 1310 unsigned size) 1311 { 1312 #ifdef DEBUG_UNASSIGNED 1313 printf("Unassigned mem read " HWADDR_FMT_plx "\n", addr); 1314 #endif 1315 return 0; 1316 } 1317 1318 static void unassigned_mem_write(void *opaque, hwaddr addr, 1319 uint64_t val, unsigned size) 1320 { 1321 #ifdef DEBUG_UNASSIGNED 1322 printf("Unassigned mem write " HWADDR_FMT_plx " = 0x%"PRIx64"\n", addr, val); 1323 #endif 1324 } 1325 1326 static bool unassigned_mem_accepts(void *opaque, hwaddr addr, 1327 unsigned size, bool is_write, 1328 MemTxAttrs attrs) 1329 { 1330 return false; 1331 } 1332 1333 const MemoryRegionOps unassigned_mem_ops = { 1334 .valid.accepts = unassigned_mem_accepts, 1335 .endianness = DEVICE_NATIVE_ENDIAN, 1336 }; 1337 1338 static uint64_t memory_region_ram_device_read(void *opaque, 1339 hwaddr addr, unsigned size) 1340 { 1341 MemoryRegion *mr = opaque; 1342 uint64_t data = (uint64_t)~0; 1343 1344 switch (size) { 1345 case 1: 1346 data = *(uint8_t *)(mr->ram_block->host + addr); 1347 break; 1348 case 2: 1349 data = *(uint16_t *)(mr->ram_block->host + addr); 1350 break; 1351 case 4: 1352 data = *(uint32_t *)(mr->ram_block->host + addr); 1353 break; 1354 case 8: 1355 data = *(uint64_t *)(mr->ram_block->host + addr); 1356 break; 1357 } 1358 1359 trace_memory_region_ram_device_read(get_cpu_index(), mr, addr, data, size); 1360 1361 return data; 1362 } 1363 1364 static void memory_region_ram_device_write(void *opaque, hwaddr addr, 1365 uint64_t data, unsigned size) 1366 { 1367 MemoryRegion *mr = opaque; 1368 1369 trace_memory_region_ram_device_write(get_cpu_index(), mr, addr, data, size); 1370 1371 switch (size) { 1372 case 1: 1373 *(uint8_t *)(mr->ram_block->host + addr) = (uint8_t)data; 1374 break; 1375 case 2: 1376 *(uint16_t *)(mr->ram_block->host + addr) = (uint16_t)data; 1377 break; 1378 case 4: 1379 *(uint32_t *)(mr->ram_block->host + addr) = (uint32_t)data; 1380 break; 1381 case 8: 1382 *(uint64_t *)(mr->ram_block->host + addr) = data; 1383 break; 1384 } 1385 } 1386 1387 static const MemoryRegionOps ram_device_mem_ops = { 1388 .read = memory_region_ram_device_read, 1389 .write = memory_region_ram_device_write, 1390 .endianness = DEVICE_HOST_ENDIAN, 1391 .valid = { 1392 .min_access_size = 1, 1393 .max_access_size = 8, 1394 .unaligned = true, 1395 }, 1396 .impl = { 1397 .min_access_size = 1, 1398 .max_access_size = 8, 1399 .unaligned = true, 1400 }, 1401 }; 1402 1403 bool memory_region_access_valid(MemoryRegion *mr, 1404 hwaddr addr, 1405 unsigned size, 1406 bool is_write, 1407 MemTxAttrs attrs) 1408 { 1409 if (mr->ops->valid.accepts 1410 && !mr->ops->valid.accepts(mr->opaque, addr, size, is_write, attrs)) { 1411 qemu_log_mask(LOG_GUEST_ERROR, "Invalid %s at addr 0x%" HWADDR_PRIX 1412 ", size %u, region '%s', reason: rejected\n", 1413 is_write ? "write" : "read", 1414 addr, size, memory_region_name(mr)); 1415 return false; 1416 } 1417 1418 if (!mr->ops->valid.unaligned && (addr & (size - 1))) { 1419 qemu_log_mask(LOG_GUEST_ERROR, "Invalid %s at addr 0x%" HWADDR_PRIX 1420 ", size %u, region '%s', reason: unaligned\n", 1421 is_write ? "write" : "read", 1422 addr, size, memory_region_name(mr)); 1423 return false; 1424 } 1425 1426 /* Treat zero as compatibility all valid */ 1427 if (!mr->ops->valid.max_access_size) { 1428 return true; 1429 } 1430 1431 if (size > mr->ops->valid.max_access_size 1432 || size < mr->ops->valid.min_access_size) { 1433 qemu_log_mask(LOG_GUEST_ERROR, "Invalid %s at addr 0x%" HWADDR_PRIX 1434 ", size %u, region '%s', reason: invalid size " 1435 "(min:%u max:%u)\n", 1436 is_write ? "write" : "read", 1437 addr, size, memory_region_name(mr), 1438 mr->ops->valid.min_access_size, 1439 mr->ops->valid.max_access_size); 1440 return false; 1441 } 1442 return true; 1443 } 1444 1445 static MemTxResult memory_region_dispatch_read1(MemoryRegion *mr, 1446 hwaddr addr, 1447 uint64_t *pval, 1448 unsigned size, 1449 MemTxAttrs attrs) 1450 { 1451 *pval = 0; 1452 1453 if (mr->ops->read) { 1454 return access_with_adjusted_size(addr, pval, size, 1455 mr->ops->impl.min_access_size, 1456 mr->ops->impl.max_access_size, 1457 memory_region_read_accessor, 1458 mr, attrs); 1459 } else { 1460 return access_with_adjusted_size(addr, pval, size, 1461 mr->ops->impl.min_access_size, 1462 mr->ops->impl.max_access_size, 1463 memory_region_read_with_attrs_accessor, 1464 mr, attrs); 1465 } 1466 } 1467 1468 MemTxResult memory_region_dispatch_read(MemoryRegion *mr, 1469 hwaddr addr, 1470 uint64_t *pval, 1471 MemOp op, 1472 MemTxAttrs attrs) 1473 { 1474 unsigned size = memop_size(op); 1475 MemTxResult r; 1476 1477 if (mr->alias) { 1478 return memory_region_dispatch_read(mr->alias, 1479 mr->alias_offset + addr, 1480 pval, op, attrs); 1481 } 1482 if (!memory_region_access_valid(mr, addr, size, false, attrs)) { 1483 *pval = unassigned_mem_read(mr, addr, size); 1484 return MEMTX_DECODE_ERROR; 1485 } 1486 1487 r = memory_region_dispatch_read1(mr, addr, pval, size, attrs); 1488 adjust_endianness(mr, pval, op); 1489 return r; 1490 } 1491 1492 /* Return true if an eventfd was signalled */ 1493 static bool memory_region_dispatch_write_eventfds(MemoryRegion *mr, 1494 hwaddr addr, 1495 uint64_t data, 1496 unsigned size, 1497 MemTxAttrs attrs) 1498 { 1499 MemoryRegionIoeventfd ioeventfd = { 1500 .addr = addrrange_make(int128_make64(addr), int128_make64(size)), 1501 .data = data, 1502 }; 1503 unsigned i; 1504 1505 for (i = 0; i < mr->ioeventfd_nb; i++) { 1506 ioeventfd.match_data = mr->ioeventfds[i].match_data; 1507 ioeventfd.e = mr->ioeventfds[i].e; 1508 1509 if (memory_region_ioeventfd_equal(&ioeventfd, &mr->ioeventfds[i])) { 1510 event_notifier_set(ioeventfd.e); 1511 return true; 1512 } 1513 } 1514 1515 return false; 1516 } 1517 1518 MemTxResult memory_region_dispatch_write(MemoryRegion *mr, 1519 hwaddr addr, 1520 uint64_t data, 1521 MemOp op, 1522 MemTxAttrs attrs) 1523 { 1524 unsigned size = memop_size(op); 1525 1526 if (mr->alias) { 1527 return memory_region_dispatch_write(mr->alias, 1528 mr->alias_offset + addr, 1529 data, op, attrs); 1530 } 1531 if (!memory_region_access_valid(mr, addr, size, true, attrs)) { 1532 unassigned_mem_write(mr, addr, data, size); 1533 return MEMTX_DECODE_ERROR; 1534 } 1535 1536 adjust_endianness(mr, &data, op); 1537 1538 if ((!kvm_eventfds_enabled()) && 1539 memory_region_dispatch_write_eventfds(mr, addr, data, size, attrs)) { 1540 return MEMTX_OK; 1541 } 1542 1543 if (mr->ops->write) { 1544 return access_with_adjusted_size(addr, &data, size, 1545 mr->ops->impl.min_access_size, 1546 mr->ops->impl.max_access_size, 1547 memory_region_write_accessor, mr, 1548 attrs); 1549 } else { 1550 return 1551 access_with_adjusted_size(addr, &data, size, 1552 mr->ops->impl.min_access_size, 1553 mr->ops->impl.max_access_size, 1554 memory_region_write_with_attrs_accessor, 1555 mr, attrs); 1556 } 1557 } 1558 1559 void memory_region_init_io(MemoryRegion *mr, 1560 Object *owner, 1561 const MemoryRegionOps *ops, 1562 void *opaque, 1563 const char *name, 1564 uint64_t size) 1565 { 1566 memory_region_init(mr, owner, name, size); 1567 mr->ops = ops ? ops : &unassigned_mem_ops; 1568 mr->opaque = opaque; 1569 mr->terminates = true; 1570 } 1571 1572 void memory_region_init_ram_nomigrate(MemoryRegion *mr, 1573 Object *owner, 1574 const char *name, 1575 uint64_t size, 1576 Error **errp) 1577 { 1578 memory_region_init_ram_flags_nomigrate(mr, owner, name, size, 0, errp); 1579 } 1580 1581 void memory_region_init_ram_flags_nomigrate(MemoryRegion *mr, 1582 Object *owner, 1583 const char *name, 1584 uint64_t size, 1585 uint32_t ram_flags, 1586 Error **errp) 1587 { 1588 Error *err = NULL; 1589 memory_region_init(mr, owner, name, size); 1590 mr->ram = true; 1591 mr->terminates = true; 1592 mr->destructor = memory_region_destructor_ram; 1593 mr->ram_block = qemu_ram_alloc(size, ram_flags, mr, &err); 1594 if (err) { 1595 mr->size = int128_zero(); 1596 object_unparent(OBJECT(mr)); 1597 error_propagate(errp, err); 1598 } 1599 } 1600 1601 void memory_region_init_resizeable_ram(MemoryRegion *mr, 1602 Object *owner, 1603 const char *name, 1604 uint64_t size, 1605 uint64_t max_size, 1606 void (*resized)(const char*, 1607 uint64_t length, 1608 void *host), 1609 Error **errp) 1610 { 1611 Error *err = NULL; 1612 memory_region_init(mr, owner, name, size); 1613 mr->ram = true; 1614 mr->terminates = true; 1615 mr->destructor = memory_region_destructor_ram; 1616 mr->ram_block = qemu_ram_alloc_resizeable(size, max_size, resized, 1617 mr, &err); 1618 if (err) { 1619 mr->size = int128_zero(); 1620 object_unparent(OBJECT(mr)); 1621 error_propagate(errp, err); 1622 } 1623 } 1624 1625 #ifdef CONFIG_POSIX 1626 void memory_region_init_ram_from_file(MemoryRegion *mr, 1627 Object *owner, 1628 const char *name, 1629 uint64_t size, 1630 uint64_t align, 1631 uint32_t ram_flags, 1632 const char *path, 1633 ram_addr_t offset, 1634 Error **errp) 1635 { 1636 Error *err = NULL; 1637 memory_region_init(mr, owner, name, size); 1638 mr->ram = true; 1639 mr->readonly = !!(ram_flags & RAM_READONLY); 1640 mr->terminates = true; 1641 mr->destructor = memory_region_destructor_ram; 1642 mr->align = align; 1643 mr->ram_block = qemu_ram_alloc_from_file(size, mr, ram_flags, path, 1644 offset, &err); 1645 if (err) { 1646 mr->size = int128_zero(); 1647 object_unparent(OBJECT(mr)); 1648 error_propagate(errp, err); 1649 } 1650 } 1651 1652 void memory_region_init_ram_from_fd(MemoryRegion *mr, 1653 Object *owner, 1654 const char *name, 1655 uint64_t size, 1656 uint32_t ram_flags, 1657 int fd, 1658 ram_addr_t offset, 1659 Error **errp) 1660 { 1661 Error *err = NULL; 1662 memory_region_init(mr, owner, name, size); 1663 mr->ram = true; 1664 mr->readonly = !!(ram_flags & RAM_READONLY); 1665 mr->terminates = true; 1666 mr->destructor = memory_region_destructor_ram; 1667 mr->ram_block = qemu_ram_alloc_from_fd(size, mr, ram_flags, fd, offset, 1668 &err); 1669 if (err) { 1670 mr->size = int128_zero(); 1671 object_unparent(OBJECT(mr)); 1672 error_propagate(errp, err); 1673 } 1674 } 1675 #endif 1676 1677 void memory_region_init_ram_ptr(MemoryRegion *mr, 1678 Object *owner, 1679 const char *name, 1680 uint64_t size, 1681 void *ptr) 1682 { 1683 memory_region_init(mr, owner, name, size); 1684 mr->ram = true; 1685 mr->terminates = true; 1686 mr->destructor = memory_region_destructor_ram; 1687 1688 /* qemu_ram_alloc_from_ptr cannot fail with ptr != NULL. */ 1689 assert(ptr != NULL); 1690 mr->ram_block = qemu_ram_alloc_from_ptr(size, ptr, mr, &error_fatal); 1691 } 1692 1693 void memory_region_init_ram_device_ptr(MemoryRegion *mr, 1694 Object *owner, 1695 const char *name, 1696 uint64_t size, 1697 void *ptr) 1698 { 1699 memory_region_init(mr, owner, name, size); 1700 mr->ram = true; 1701 mr->terminates = true; 1702 mr->ram_device = true; 1703 mr->ops = &ram_device_mem_ops; 1704 mr->opaque = mr; 1705 mr->destructor = memory_region_destructor_ram; 1706 1707 /* qemu_ram_alloc_from_ptr cannot fail with ptr != NULL. */ 1708 assert(ptr != NULL); 1709 mr->ram_block = qemu_ram_alloc_from_ptr(size, ptr, mr, &error_fatal); 1710 } 1711 1712 void memory_region_init_alias(MemoryRegion *mr, 1713 Object *owner, 1714 const char *name, 1715 MemoryRegion *orig, 1716 hwaddr offset, 1717 uint64_t size) 1718 { 1719 memory_region_init(mr, owner, name, size); 1720 mr->alias = orig; 1721 mr->alias_offset = offset; 1722 } 1723 1724 void memory_region_init_rom_nomigrate(MemoryRegion *mr, 1725 Object *owner, 1726 const char *name, 1727 uint64_t size, 1728 Error **errp) 1729 { 1730 memory_region_init_ram_flags_nomigrate(mr, owner, name, size, 0, errp); 1731 mr->readonly = true; 1732 } 1733 1734 void memory_region_init_rom_device_nomigrate(MemoryRegion *mr, 1735 Object *owner, 1736 const MemoryRegionOps *ops, 1737 void *opaque, 1738 const char *name, 1739 uint64_t size, 1740 Error **errp) 1741 { 1742 Error *err = NULL; 1743 assert(ops); 1744 memory_region_init(mr, owner, name, size); 1745 mr->ops = ops; 1746 mr->opaque = opaque; 1747 mr->terminates = true; 1748 mr->rom_device = true; 1749 mr->destructor = memory_region_destructor_ram; 1750 mr->ram_block = qemu_ram_alloc(size, 0, mr, &err); 1751 if (err) { 1752 mr->size = int128_zero(); 1753 object_unparent(OBJECT(mr)); 1754 error_propagate(errp, err); 1755 } 1756 } 1757 1758 void memory_region_init_iommu(void *_iommu_mr, 1759 size_t instance_size, 1760 const char *mrtypename, 1761 Object *owner, 1762 const char *name, 1763 uint64_t size) 1764 { 1765 struct IOMMUMemoryRegion *iommu_mr; 1766 struct MemoryRegion *mr; 1767 1768 object_initialize(_iommu_mr, instance_size, mrtypename); 1769 mr = MEMORY_REGION(_iommu_mr); 1770 memory_region_do_init(mr, owner, name, size); 1771 iommu_mr = IOMMU_MEMORY_REGION(mr); 1772 mr->terminates = true; /* then re-forwards */ 1773 QLIST_INIT(&iommu_mr->iommu_notify); 1774 iommu_mr->iommu_notify_flags = IOMMU_NOTIFIER_NONE; 1775 } 1776 1777 static void memory_region_finalize(Object *obj) 1778 { 1779 MemoryRegion *mr = MEMORY_REGION(obj); 1780 1781 assert(!mr->container); 1782 1783 /* We know the region is not visible in any address space (it 1784 * does not have a container and cannot be a root either because 1785 * it has no references, so we can blindly clear mr->enabled. 1786 * memory_region_set_enabled instead could trigger a transaction 1787 * and cause an infinite loop. 1788 */ 1789 mr->enabled = false; 1790 memory_region_transaction_begin(); 1791 while (!QTAILQ_EMPTY(&mr->subregions)) { 1792 MemoryRegion *subregion = QTAILQ_FIRST(&mr->subregions); 1793 memory_region_del_subregion(mr, subregion); 1794 } 1795 memory_region_transaction_commit(); 1796 1797 mr->destructor(mr); 1798 memory_region_clear_coalescing(mr); 1799 g_free((char *)mr->name); 1800 g_free(mr->ioeventfds); 1801 } 1802 1803 Object *memory_region_owner(MemoryRegion *mr) 1804 { 1805 Object *obj = OBJECT(mr); 1806 return obj->parent; 1807 } 1808 1809 void memory_region_ref(MemoryRegion *mr) 1810 { 1811 /* MMIO callbacks most likely will access data that belongs 1812 * to the owner, hence the need to ref/unref the owner whenever 1813 * the memory region is in use. 1814 * 1815 * The memory region is a child of its owner. As long as the 1816 * owner doesn't call unparent itself on the memory region, 1817 * ref-ing the owner will also keep the memory region alive. 1818 * Memory regions without an owner are supposed to never go away; 1819 * we do not ref/unref them because it slows down DMA sensibly. 1820 */ 1821 if (mr && mr->owner) { 1822 object_ref(mr->owner); 1823 } 1824 } 1825 1826 void memory_region_unref(MemoryRegion *mr) 1827 { 1828 if (mr && mr->owner) { 1829 object_unref(mr->owner); 1830 } 1831 } 1832 1833 uint64_t memory_region_size(MemoryRegion *mr) 1834 { 1835 if (int128_eq(mr->size, int128_2_64())) { 1836 return UINT64_MAX; 1837 } 1838 return int128_get64(mr->size); 1839 } 1840 1841 const char *memory_region_name(const MemoryRegion *mr) 1842 { 1843 if (!mr->name) { 1844 ((MemoryRegion *)mr)->name = 1845 g_strdup(object_get_canonical_path_component(OBJECT(mr))); 1846 } 1847 return mr->name; 1848 } 1849 1850 bool memory_region_is_ram_device(MemoryRegion *mr) 1851 { 1852 return mr->ram_device; 1853 } 1854 1855 bool memory_region_is_protected(MemoryRegion *mr) 1856 { 1857 return mr->ram && (mr->ram_block->flags & RAM_PROTECTED); 1858 } 1859 1860 uint8_t memory_region_get_dirty_log_mask(MemoryRegion *mr) 1861 { 1862 uint8_t mask = mr->dirty_log_mask; 1863 RAMBlock *rb = mr->ram_block; 1864 1865 if (global_dirty_tracking && ((rb && qemu_ram_is_migratable(rb)) || 1866 memory_region_is_iommu(mr))) { 1867 mask |= (1 << DIRTY_MEMORY_MIGRATION); 1868 } 1869 1870 if (tcg_enabled() && rb) { 1871 /* TCG only cares about dirty memory logging for RAM, not IOMMU. */ 1872 mask |= (1 << DIRTY_MEMORY_CODE); 1873 } 1874 return mask; 1875 } 1876 1877 bool memory_region_is_logging(MemoryRegion *mr, uint8_t client) 1878 { 1879 return memory_region_get_dirty_log_mask(mr) & (1 << client); 1880 } 1881 1882 static int memory_region_update_iommu_notify_flags(IOMMUMemoryRegion *iommu_mr, 1883 Error **errp) 1884 { 1885 IOMMUNotifierFlag flags = IOMMU_NOTIFIER_NONE; 1886 IOMMUNotifier *iommu_notifier; 1887 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr); 1888 int ret = 0; 1889 1890 IOMMU_NOTIFIER_FOREACH(iommu_notifier, iommu_mr) { 1891 flags |= iommu_notifier->notifier_flags; 1892 } 1893 1894 if (flags != iommu_mr->iommu_notify_flags && imrc->notify_flag_changed) { 1895 ret = imrc->notify_flag_changed(iommu_mr, 1896 iommu_mr->iommu_notify_flags, 1897 flags, errp); 1898 } 1899 1900 if (!ret) { 1901 iommu_mr->iommu_notify_flags = flags; 1902 } 1903 return ret; 1904 } 1905 1906 int memory_region_iommu_set_page_size_mask(IOMMUMemoryRegion *iommu_mr, 1907 uint64_t page_size_mask, 1908 Error **errp) 1909 { 1910 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr); 1911 int ret = 0; 1912 1913 if (imrc->iommu_set_page_size_mask) { 1914 ret = imrc->iommu_set_page_size_mask(iommu_mr, page_size_mask, errp); 1915 } 1916 return ret; 1917 } 1918 1919 int memory_region_register_iommu_notifier(MemoryRegion *mr, 1920 IOMMUNotifier *n, Error **errp) 1921 { 1922 IOMMUMemoryRegion *iommu_mr; 1923 int ret; 1924 1925 if (mr->alias) { 1926 return memory_region_register_iommu_notifier(mr->alias, n, errp); 1927 } 1928 1929 /* We need to register for at least one bitfield */ 1930 iommu_mr = IOMMU_MEMORY_REGION(mr); 1931 assert(n->notifier_flags != IOMMU_NOTIFIER_NONE); 1932 assert(n->start <= n->end); 1933 assert(n->iommu_idx >= 0 && 1934 n->iommu_idx < memory_region_iommu_num_indexes(iommu_mr)); 1935 1936 QLIST_INSERT_HEAD(&iommu_mr->iommu_notify, n, node); 1937 ret = memory_region_update_iommu_notify_flags(iommu_mr, errp); 1938 if (ret) { 1939 QLIST_REMOVE(n, node); 1940 } 1941 return ret; 1942 } 1943 1944 uint64_t memory_region_iommu_get_min_page_size(IOMMUMemoryRegion *iommu_mr) 1945 { 1946 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr); 1947 1948 if (imrc->get_min_page_size) { 1949 return imrc->get_min_page_size(iommu_mr); 1950 } 1951 return TARGET_PAGE_SIZE; 1952 } 1953 1954 void memory_region_iommu_replay(IOMMUMemoryRegion *iommu_mr, IOMMUNotifier *n) 1955 { 1956 MemoryRegion *mr = MEMORY_REGION(iommu_mr); 1957 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr); 1958 hwaddr addr, granularity; 1959 IOMMUTLBEntry iotlb; 1960 1961 /* If the IOMMU has its own replay callback, override */ 1962 if (imrc->replay) { 1963 imrc->replay(iommu_mr, n); 1964 return; 1965 } 1966 1967 granularity = memory_region_iommu_get_min_page_size(iommu_mr); 1968 1969 for (addr = 0; addr < memory_region_size(mr); addr += granularity) { 1970 iotlb = imrc->translate(iommu_mr, addr, IOMMU_NONE, n->iommu_idx); 1971 if (iotlb.perm != IOMMU_NONE) { 1972 n->notify(n, &iotlb); 1973 } 1974 1975 /* if (2^64 - MR size) < granularity, it's possible to get an 1976 * infinite loop here. This should catch such a wraparound */ 1977 if ((addr + granularity) < addr) { 1978 break; 1979 } 1980 } 1981 } 1982 1983 void memory_region_unregister_iommu_notifier(MemoryRegion *mr, 1984 IOMMUNotifier *n) 1985 { 1986 IOMMUMemoryRegion *iommu_mr; 1987 1988 if (mr->alias) { 1989 memory_region_unregister_iommu_notifier(mr->alias, n); 1990 return; 1991 } 1992 QLIST_REMOVE(n, node); 1993 iommu_mr = IOMMU_MEMORY_REGION(mr); 1994 memory_region_update_iommu_notify_flags(iommu_mr, NULL); 1995 } 1996 1997 void memory_region_notify_iommu_one(IOMMUNotifier *notifier, 1998 IOMMUTLBEvent *event) 1999 { 2000 IOMMUTLBEntry *entry = &event->entry; 2001 hwaddr entry_end = entry->iova + entry->addr_mask; 2002 IOMMUTLBEntry tmp = *entry; 2003 2004 if (event->type == IOMMU_NOTIFIER_UNMAP) { 2005 assert(entry->perm == IOMMU_NONE); 2006 } 2007 2008 /* 2009 * Skip the notification if the notification does not overlap 2010 * with registered range. 2011 */ 2012 if (notifier->start > entry_end || notifier->end < entry->iova) { 2013 return; 2014 } 2015 2016 if (notifier->notifier_flags & IOMMU_NOTIFIER_DEVIOTLB_UNMAP) { 2017 /* Crop (iova, addr_mask) to range */ 2018 tmp.iova = MAX(tmp.iova, notifier->start); 2019 tmp.addr_mask = MIN(entry_end, notifier->end) - tmp.iova; 2020 } else { 2021 assert(entry->iova >= notifier->start && entry_end <= notifier->end); 2022 } 2023 2024 if (event->type & notifier->notifier_flags) { 2025 notifier->notify(notifier, &tmp); 2026 } 2027 } 2028 2029 void memory_region_unmap_iommu_notifier_range(IOMMUNotifier *notifier) 2030 { 2031 IOMMUTLBEvent event; 2032 2033 event.type = IOMMU_NOTIFIER_UNMAP; 2034 event.entry.target_as = &address_space_memory; 2035 event.entry.iova = notifier->start; 2036 event.entry.perm = IOMMU_NONE; 2037 event.entry.addr_mask = notifier->end - notifier->start; 2038 2039 memory_region_notify_iommu_one(notifier, &event); 2040 } 2041 2042 void memory_region_notify_iommu(IOMMUMemoryRegion *iommu_mr, 2043 int iommu_idx, 2044 IOMMUTLBEvent event) 2045 { 2046 IOMMUNotifier *iommu_notifier; 2047 2048 assert(memory_region_is_iommu(MEMORY_REGION(iommu_mr))); 2049 2050 IOMMU_NOTIFIER_FOREACH(iommu_notifier, iommu_mr) { 2051 if (iommu_notifier->iommu_idx == iommu_idx) { 2052 memory_region_notify_iommu_one(iommu_notifier, &event); 2053 } 2054 } 2055 } 2056 2057 int memory_region_iommu_get_attr(IOMMUMemoryRegion *iommu_mr, 2058 enum IOMMUMemoryRegionAttr attr, 2059 void *data) 2060 { 2061 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr); 2062 2063 if (!imrc->get_attr) { 2064 return -EINVAL; 2065 } 2066 2067 return imrc->get_attr(iommu_mr, attr, data); 2068 } 2069 2070 int memory_region_iommu_attrs_to_index(IOMMUMemoryRegion *iommu_mr, 2071 MemTxAttrs attrs) 2072 { 2073 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr); 2074 2075 if (!imrc->attrs_to_index) { 2076 return 0; 2077 } 2078 2079 return imrc->attrs_to_index(iommu_mr, attrs); 2080 } 2081 2082 int memory_region_iommu_num_indexes(IOMMUMemoryRegion *iommu_mr) 2083 { 2084 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr); 2085 2086 if (!imrc->num_indexes) { 2087 return 1; 2088 } 2089 2090 return imrc->num_indexes(iommu_mr); 2091 } 2092 2093 RamDiscardManager *memory_region_get_ram_discard_manager(MemoryRegion *mr) 2094 { 2095 if (!memory_region_is_ram(mr)) { 2096 return NULL; 2097 } 2098 return mr->rdm; 2099 } 2100 2101 void memory_region_set_ram_discard_manager(MemoryRegion *mr, 2102 RamDiscardManager *rdm) 2103 { 2104 g_assert(memory_region_is_ram(mr)); 2105 g_assert(!rdm || !mr->rdm); 2106 mr->rdm = rdm; 2107 } 2108 2109 uint64_t ram_discard_manager_get_min_granularity(const RamDiscardManager *rdm, 2110 const MemoryRegion *mr) 2111 { 2112 RamDiscardManagerClass *rdmc = RAM_DISCARD_MANAGER_GET_CLASS(rdm); 2113 2114 g_assert(rdmc->get_min_granularity); 2115 return rdmc->get_min_granularity(rdm, mr); 2116 } 2117 2118 bool ram_discard_manager_is_populated(const RamDiscardManager *rdm, 2119 const MemoryRegionSection *section) 2120 { 2121 RamDiscardManagerClass *rdmc = RAM_DISCARD_MANAGER_GET_CLASS(rdm); 2122 2123 g_assert(rdmc->is_populated); 2124 return rdmc->is_populated(rdm, section); 2125 } 2126 2127 int ram_discard_manager_replay_populated(const RamDiscardManager *rdm, 2128 MemoryRegionSection *section, 2129 ReplayRamPopulate replay_fn, 2130 void *opaque) 2131 { 2132 RamDiscardManagerClass *rdmc = RAM_DISCARD_MANAGER_GET_CLASS(rdm); 2133 2134 g_assert(rdmc->replay_populated); 2135 return rdmc->replay_populated(rdm, section, replay_fn, opaque); 2136 } 2137 2138 void ram_discard_manager_replay_discarded(const RamDiscardManager *rdm, 2139 MemoryRegionSection *section, 2140 ReplayRamDiscard replay_fn, 2141 void *opaque) 2142 { 2143 RamDiscardManagerClass *rdmc = RAM_DISCARD_MANAGER_GET_CLASS(rdm); 2144 2145 g_assert(rdmc->replay_discarded); 2146 rdmc->replay_discarded(rdm, section, replay_fn, opaque); 2147 } 2148 2149 void ram_discard_manager_register_listener(RamDiscardManager *rdm, 2150 RamDiscardListener *rdl, 2151 MemoryRegionSection *section) 2152 { 2153 RamDiscardManagerClass *rdmc = RAM_DISCARD_MANAGER_GET_CLASS(rdm); 2154 2155 g_assert(rdmc->register_listener); 2156 rdmc->register_listener(rdm, rdl, section); 2157 } 2158 2159 void ram_discard_manager_unregister_listener(RamDiscardManager *rdm, 2160 RamDiscardListener *rdl) 2161 { 2162 RamDiscardManagerClass *rdmc = RAM_DISCARD_MANAGER_GET_CLASS(rdm); 2163 2164 g_assert(rdmc->unregister_listener); 2165 rdmc->unregister_listener(rdm, rdl); 2166 } 2167 2168 /* Called with rcu_read_lock held. */ 2169 bool memory_get_xlat_addr(IOMMUTLBEntry *iotlb, void **vaddr, 2170 ram_addr_t *ram_addr, bool *read_only, 2171 bool *mr_has_discard_manager) 2172 { 2173 MemoryRegion *mr; 2174 hwaddr xlat; 2175 hwaddr len = iotlb->addr_mask + 1; 2176 bool writable = iotlb->perm & IOMMU_WO; 2177 2178 if (mr_has_discard_manager) { 2179 *mr_has_discard_manager = false; 2180 } 2181 /* 2182 * The IOMMU TLB entry we have just covers translation through 2183 * this IOMMU to its immediate target. We need to translate 2184 * it the rest of the way through to memory. 2185 */ 2186 mr = address_space_translate(&address_space_memory, iotlb->translated_addr, 2187 &xlat, &len, writable, MEMTXATTRS_UNSPECIFIED); 2188 if (!memory_region_is_ram(mr)) { 2189 error_report("iommu map to non memory area %" HWADDR_PRIx "", xlat); 2190 return false; 2191 } else if (memory_region_has_ram_discard_manager(mr)) { 2192 RamDiscardManager *rdm = memory_region_get_ram_discard_manager(mr); 2193 MemoryRegionSection tmp = { 2194 .mr = mr, 2195 .offset_within_region = xlat, 2196 .size = int128_make64(len), 2197 }; 2198 if (mr_has_discard_manager) { 2199 *mr_has_discard_manager = true; 2200 } 2201 /* 2202 * Malicious VMs can map memory into the IOMMU, which is expected 2203 * to remain discarded. vfio will pin all pages, populating memory. 2204 * Disallow that. vmstate priorities make sure any RamDiscardManager 2205 * were already restored before IOMMUs are restored. 2206 */ 2207 if (!ram_discard_manager_is_populated(rdm, &tmp)) { 2208 error_report("iommu map to discarded memory (e.g., unplugged via" 2209 " virtio-mem): %" HWADDR_PRIx "", 2210 iotlb->translated_addr); 2211 return false; 2212 } 2213 } 2214 2215 /* 2216 * Translation truncates length to the IOMMU page size, 2217 * check that it did not truncate too much. 2218 */ 2219 if (len & iotlb->addr_mask) { 2220 error_report("iommu has granularity incompatible with target AS"); 2221 return false; 2222 } 2223 2224 if (vaddr) { 2225 *vaddr = memory_region_get_ram_ptr(mr) + xlat; 2226 } 2227 2228 if (ram_addr) { 2229 *ram_addr = memory_region_get_ram_addr(mr) + xlat; 2230 } 2231 2232 if (read_only) { 2233 *read_only = !writable || mr->readonly; 2234 } 2235 2236 return true; 2237 } 2238 2239 void memory_region_set_log(MemoryRegion *mr, bool log, unsigned client) 2240 { 2241 uint8_t mask = 1 << client; 2242 uint8_t old_logging; 2243 2244 assert(client == DIRTY_MEMORY_VGA); 2245 old_logging = mr->vga_logging_count; 2246 mr->vga_logging_count += log ? 1 : -1; 2247 if (!!old_logging == !!mr->vga_logging_count) { 2248 return; 2249 } 2250 2251 memory_region_transaction_begin(); 2252 mr->dirty_log_mask = (mr->dirty_log_mask & ~mask) | (log * mask); 2253 memory_region_update_pending |= mr->enabled; 2254 memory_region_transaction_commit(); 2255 } 2256 2257 void memory_region_set_dirty(MemoryRegion *mr, hwaddr addr, 2258 hwaddr size) 2259 { 2260 assert(mr->ram_block); 2261 cpu_physical_memory_set_dirty_range(memory_region_get_ram_addr(mr) + addr, 2262 size, 2263 memory_region_get_dirty_log_mask(mr)); 2264 } 2265 2266 /* 2267 * If memory region `mr' is NULL, do global sync. Otherwise, sync 2268 * dirty bitmap for the specified memory region. 2269 */ 2270 static void memory_region_sync_dirty_bitmap(MemoryRegion *mr, bool last_stage) 2271 { 2272 MemoryListener *listener; 2273 AddressSpace *as; 2274 FlatView *view; 2275 FlatRange *fr; 2276 2277 /* If the same address space has multiple log_sync listeners, we 2278 * visit that address space's FlatView multiple times. But because 2279 * log_sync listeners are rare, it's still cheaper than walking each 2280 * address space once. 2281 */ 2282 QTAILQ_FOREACH(listener, &memory_listeners, link) { 2283 if (listener->log_sync) { 2284 as = listener->address_space; 2285 view = address_space_get_flatview(as); 2286 FOR_EACH_FLAT_RANGE(fr, view) { 2287 if (fr->dirty_log_mask && (!mr || fr->mr == mr)) { 2288 MemoryRegionSection mrs = section_from_flat_range(fr, view); 2289 listener->log_sync(listener, &mrs); 2290 } 2291 } 2292 flatview_unref(view); 2293 trace_memory_region_sync_dirty(mr ? mr->name : "(all)", listener->name, 0); 2294 } else if (listener->log_sync_global) { 2295 /* 2296 * No matter whether MR is specified, what we can do here 2297 * is to do a global sync, because we are not capable to 2298 * sync in a finer granularity. 2299 */ 2300 listener->log_sync_global(listener, last_stage); 2301 trace_memory_region_sync_dirty(mr ? mr->name : "(all)", listener->name, 1); 2302 } 2303 } 2304 } 2305 2306 void memory_region_clear_dirty_bitmap(MemoryRegion *mr, hwaddr start, 2307 hwaddr len) 2308 { 2309 MemoryRegionSection mrs; 2310 MemoryListener *listener; 2311 AddressSpace *as; 2312 FlatView *view; 2313 FlatRange *fr; 2314 hwaddr sec_start, sec_end, sec_size; 2315 2316 QTAILQ_FOREACH(listener, &memory_listeners, link) { 2317 if (!listener->log_clear) { 2318 continue; 2319 } 2320 as = listener->address_space; 2321 view = address_space_get_flatview(as); 2322 FOR_EACH_FLAT_RANGE(fr, view) { 2323 if (!fr->dirty_log_mask || fr->mr != mr) { 2324 /* 2325 * Clear dirty bitmap operation only applies to those 2326 * regions whose dirty logging is at least enabled 2327 */ 2328 continue; 2329 } 2330 2331 mrs = section_from_flat_range(fr, view); 2332 2333 sec_start = MAX(mrs.offset_within_region, start); 2334 sec_end = mrs.offset_within_region + int128_get64(mrs.size); 2335 sec_end = MIN(sec_end, start + len); 2336 2337 if (sec_start >= sec_end) { 2338 /* 2339 * If this memory region section has no intersection 2340 * with the requested range, skip. 2341 */ 2342 continue; 2343 } 2344 2345 /* Valid case; shrink the section if needed */ 2346 mrs.offset_within_address_space += 2347 sec_start - mrs.offset_within_region; 2348 mrs.offset_within_region = sec_start; 2349 sec_size = sec_end - sec_start; 2350 mrs.size = int128_make64(sec_size); 2351 listener->log_clear(listener, &mrs); 2352 } 2353 flatview_unref(view); 2354 } 2355 } 2356 2357 DirtyBitmapSnapshot *memory_region_snapshot_and_clear_dirty(MemoryRegion *mr, 2358 hwaddr addr, 2359 hwaddr size, 2360 unsigned client) 2361 { 2362 DirtyBitmapSnapshot *snapshot; 2363 assert(mr->ram_block); 2364 memory_region_sync_dirty_bitmap(mr, false); 2365 snapshot = cpu_physical_memory_snapshot_and_clear_dirty(mr, addr, size, client); 2366 memory_global_after_dirty_log_sync(); 2367 return snapshot; 2368 } 2369 2370 bool memory_region_snapshot_get_dirty(MemoryRegion *mr, DirtyBitmapSnapshot *snap, 2371 hwaddr addr, hwaddr size) 2372 { 2373 assert(mr->ram_block); 2374 return cpu_physical_memory_snapshot_get_dirty(snap, 2375 memory_region_get_ram_addr(mr) + addr, size); 2376 } 2377 2378 void memory_region_set_readonly(MemoryRegion *mr, bool readonly) 2379 { 2380 if (mr->readonly != readonly) { 2381 memory_region_transaction_begin(); 2382 mr->readonly = readonly; 2383 memory_region_update_pending |= mr->enabled; 2384 memory_region_transaction_commit(); 2385 } 2386 } 2387 2388 void memory_region_set_nonvolatile(MemoryRegion *mr, bool nonvolatile) 2389 { 2390 if (mr->nonvolatile != nonvolatile) { 2391 memory_region_transaction_begin(); 2392 mr->nonvolatile = nonvolatile; 2393 memory_region_update_pending |= mr->enabled; 2394 memory_region_transaction_commit(); 2395 } 2396 } 2397 2398 void memory_region_rom_device_set_romd(MemoryRegion *mr, bool romd_mode) 2399 { 2400 if (mr->romd_mode != romd_mode) { 2401 memory_region_transaction_begin(); 2402 mr->romd_mode = romd_mode; 2403 memory_region_update_pending |= mr->enabled; 2404 memory_region_transaction_commit(); 2405 } 2406 } 2407 2408 void memory_region_reset_dirty(MemoryRegion *mr, hwaddr addr, 2409 hwaddr size, unsigned client) 2410 { 2411 assert(mr->ram_block); 2412 cpu_physical_memory_test_and_clear_dirty( 2413 memory_region_get_ram_addr(mr) + addr, size, client); 2414 } 2415 2416 int memory_region_get_fd(MemoryRegion *mr) 2417 { 2418 RCU_READ_LOCK_GUARD(); 2419 while (mr->alias) { 2420 mr = mr->alias; 2421 } 2422 return mr->ram_block->fd; 2423 } 2424 2425 void *memory_region_get_ram_ptr(MemoryRegion *mr) 2426 { 2427 uint64_t offset = 0; 2428 2429 RCU_READ_LOCK_GUARD(); 2430 while (mr->alias) { 2431 offset += mr->alias_offset; 2432 mr = mr->alias; 2433 } 2434 assert(mr->ram_block); 2435 return qemu_map_ram_ptr(mr->ram_block, offset); 2436 } 2437 2438 MemoryRegion *memory_region_from_host(void *ptr, ram_addr_t *offset) 2439 { 2440 RAMBlock *block; 2441 2442 block = qemu_ram_block_from_host(ptr, false, offset); 2443 if (!block) { 2444 return NULL; 2445 } 2446 2447 return block->mr; 2448 } 2449 2450 ram_addr_t memory_region_get_ram_addr(MemoryRegion *mr) 2451 { 2452 return mr->ram_block ? mr->ram_block->offset : RAM_ADDR_INVALID; 2453 } 2454 2455 void memory_region_ram_resize(MemoryRegion *mr, ram_addr_t newsize, Error **errp) 2456 { 2457 assert(mr->ram_block); 2458 2459 qemu_ram_resize(mr->ram_block, newsize, errp); 2460 } 2461 2462 void memory_region_msync(MemoryRegion *mr, hwaddr addr, hwaddr size) 2463 { 2464 if (mr->ram_block) { 2465 qemu_ram_msync(mr->ram_block, addr, size); 2466 } 2467 } 2468 2469 void memory_region_writeback(MemoryRegion *mr, hwaddr addr, hwaddr size) 2470 { 2471 /* 2472 * Might be extended case needed to cover 2473 * different types of memory regions 2474 */ 2475 if (mr->dirty_log_mask) { 2476 memory_region_msync(mr, addr, size); 2477 } 2478 } 2479 2480 /* 2481 * Call proper memory listeners about the change on the newly 2482 * added/removed CoalescedMemoryRange. 2483 */ 2484 static void memory_region_update_coalesced_range(MemoryRegion *mr, 2485 CoalescedMemoryRange *cmr, 2486 bool add) 2487 { 2488 AddressSpace *as; 2489 FlatView *view; 2490 FlatRange *fr; 2491 2492 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) { 2493 view = address_space_get_flatview(as); 2494 FOR_EACH_FLAT_RANGE(fr, view) { 2495 if (fr->mr == mr) { 2496 flat_range_coalesced_io_notify(fr, as, cmr, add); 2497 } 2498 } 2499 flatview_unref(view); 2500 } 2501 } 2502 2503 void memory_region_set_coalescing(MemoryRegion *mr) 2504 { 2505 memory_region_clear_coalescing(mr); 2506 memory_region_add_coalescing(mr, 0, int128_get64(mr->size)); 2507 } 2508 2509 void memory_region_add_coalescing(MemoryRegion *mr, 2510 hwaddr offset, 2511 uint64_t size) 2512 { 2513 CoalescedMemoryRange *cmr = g_malloc(sizeof(*cmr)); 2514 2515 cmr->addr = addrrange_make(int128_make64(offset), int128_make64(size)); 2516 QTAILQ_INSERT_TAIL(&mr->coalesced, cmr, link); 2517 memory_region_update_coalesced_range(mr, cmr, true); 2518 memory_region_set_flush_coalesced(mr); 2519 } 2520 2521 void memory_region_clear_coalescing(MemoryRegion *mr) 2522 { 2523 CoalescedMemoryRange *cmr; 2524 2525 if (QTAILQ_EMPTY(&mr->coalesced)) { 2526 return; 2527 } 2528 2529 qemu_flush_coalesced_mmio_buffer(); 2530 mr->flush_coalesced_mmio = false; 2531 2532 while (!QTAILQ_EMPTY(&mr->coalesced)) { 2533 cmr = QTAILQ_FIRST(&mr->coalesced); 2534 QTAILQ_REMOVE(&mr->coalesced, cmr, link); 2535 memory_region_update_coalesced_range(mr, cmr, false); 2536 g_free(cmr); 2537 } 2538 } 2539 2540 void memory_region_set_flush_coalesced(MemoryRegion *mr) 2541 { 2542 mr->flush_coalesced_mmio = true; 2543 } 2544 2545 void memory_region_clear_flush_coalesced(MemoryRegion *mr) 2546 { 2547 qemu_flush_coalesced_mmio_buffer(); 2548 if (QTAILQ_EMPTY(&mr->coalesced)) { 2549 mr->flush_coalesced_mmio = false; 2550 } 2551 } 2552 2553 static bool userspace_eventfd_warning; 2554 2555 void memory_region_add_eventfd(MemoryRegion *mr, 2556 hwaddr addr, 2557 unsigned size, 2558 bool match_data, 2559 uint64_t data, 2560 EventNotifier *e) 2561 { 2562 MemoryRegionIoeventfd mrfd = { 2563 .addr.start = int128_make64(addr), 2564 .addr.size = int128_make64(size), 2565 .match_data = match_data, 2566 .data = data, 2567 .e = e, 2568 }; 2569 unsigned i; 2570 2571 if (kvm_enabled() && (!(kvm_eventfds_enabled() || 2572 userspace_eventfd_warning))) { 2573 userspace_eventfd_warning = true; 2574 error_report("Using eventfd without MMIO binding in KVM. " 2575 "Suboptimal performance expected"); 2576 } 2577 2578 if (size) { 2579 adjust_endianness(mr, &mrfd.data, size_memop(size) | MO_TE); 2580 } 2581 memory_region_transaction_begin(); 2582 for (i = 0; i < mr->ioeventfd_nb; ++i) { 2583 if (memory_region_ioeventfd_before(&mrfd, &mr->ioeventfds[i])) { 2584 break; 2585 } 2586 } 2587 ++mr->ioeventfd_nb; 2588 mr->ioeventfds = g_realloc(mr->ioeventfds, 2589 sizeof(*mr->ioeventfds) * mr->ioeventfd_nb); 2590 memmove(&mr->ioeventfds[i+1], &mr->ioeventfds[i], 2591 sizeof(*mr->ioeventfds) * (mr->ioeventfd_nb-1 - i)); 2592 mr->ioeventfds[i] = mrfd; 2593 ioeventfd_update_pending |= mr->enabled; 2594 memory_region_transaction_commit(); 2595 } 2596 2597 void memory_region_del_eventfd(MemoryRegion *mr, 2598 hwaddr addr, 2599 unsigned size, 2600 bool match_data, 2601 uint64_t data, 2602 EventNotifier *e) 2603 { 2604 MemoryRegionIoeventfd mrfd = { 2605 .addr.start = int128_make64(addr), 2606 .addr.size = int128_make64(size), 2607 .match_data = match_data, 2608 .data = data, 2609 .e = e, 2610 }; 2611 unsigned i; 2612 2613 if (size) { 2614 adjust_endianness(mr, &mrfd.data, size_memop(size) | MO_TE); 2615 } 2616 memory_region_transaction_begin(); 2617 for (i = 0; i < mr->ioeventfd_nb; ++i) { 2618 if (memory_region_ioeventfd_equal(&mrfd, &mr->ioeventfds[i])) { 2619 break; 2620 } 2621 } 2622 assert(i != mr->ioeventfd_nb); 2623 memmove(&mr->ioeventfds[i], &mr->ioeventfds[i+1], 2624 sizeof(*mr->ioeventfds) * (mr->ioeventfd_nb - (i+1))); 2625 --mr->ioeventfd_nb; 2626 mr->ioeventfds = g_realloc(mr->ioeventfds, 2627 sizeof(*mr->ioeventfds)*mr->ioeventfd_nb + 1); 2628 ioeventfd_update_pending |= mr->enabled; 2629 memory_region_transaction_commit(); 2630 } 2631 2632 static void memory_region_update_container_subregions(MemoryRegion *subregion) 2633 { 2634 MemoryRegion *mr = subregion->container; 2635 MemoryRegion *other; 2636 2637 memory_region_transaction_begin(); 2638 2639 memory_region_ref(subregion); 2640 QTAILQ_FOREACH(other, &mr->subregions, subregions_link) { 2641 if (subregion->priority >= other->priority) { 2642 QTAILQ_INSERT_BEFORE(other, subregion, subregions_link); 2643 goto done; 2644 } 2645 } 2646 QTAILQ_INSERT_TAIL(&mr->subregions, subregion, subregions_link); 2647 done: 2648 memory_region_update_pending |= mr->enabled && subregion->enabled; 2649 memory_region_transaction_commit(); 2650 } 2651 2652 static void memory_region_add_subregion_common(MemoryRegion *mr, 2653 hwaddr offset, 2654 MemoryRegion *subregion) 2655 { 2656 MemoryRegion *alias; 2657 2658 assert(!subregion->container); 2659 subregion->container = mr; 2660 for (alias = subregion->alias; alias; alias = alias->alias) { 2661 alias->mapped_via_alias++; 2662 } 2663 subregion->addr = offset; 2664 memory_region_update_container_subregions(subregion); 2665 } 2666 2667 void memory_region_add_subregion(MemoryRegion *mr, 2668 hwaddr offset, 2669 MemoryRegion *subregion) 2670 { 2671 subregion->priority = 0; 2672 memory_region_add_subregion_common(mr, offset, subregion); 2673 } 2674 2675 void memory_region_add_subregion_overlap(MemoryRegion *mr, 2676 hwaddr offset, 2677 MemoryRegion *subregion, 2678 int priority) 2679 { 2680 subregion->priority = priority; 2681 memory_region_add_subregion_common(mr, offset, subregion); 2682 } 2683 2684 void memory_region_del_subregion(MemoryRegion *mr, 2685 MemoryRegion *subregion) 2686 { 2687 MemoryRegion *alias; 2688 2689 memory_region_transaction_begin(); 2690 assert(subregion->container == mr); 2691 subregion->container = NULL; 2692 for (alias = subregion->alias; alias; alias = alias->alias) { 2693 alias->mapped_via_alias--; 2694 assert(alias->mapped_via_alias >= 0); 2695 } 2696 QTAILQ_REMOVE(&mr->subregions, subregion, subregions_link); 2697 memory_region_unref(subregion); 2698 memory_region_update_pending |= mr->enabled && subregion->enabled; 2699 memory_region_transaction_commit(); 2700 } 2701 2702 void memory_region_set_enabled(MemoryRegion *mr, bool enabled) 2703 { 2704 if (enabled == mr->enabled) { 2705 return; 2706 } 2707 memory_region_transaction_begin(); 2708 mr->enabled = enabled; 2709 memory_region_update_pending = true; 2710 memory_region_transaction_commit(); 2711 } 2712 2713 void memory_region_set_size(MemoryRegion *mr, uint64_t size) 2714 { 2715 Int128 s = int128_make64(size); 2716 2717 if (size == UINT64_MAX) { 2718 s = int128_2_64(); 2719 } 2720 if (int128_eq(s, mr->size)) { 2721 return; 2722 } 2723 memory_region_transaction_begin(); 2724 mr->size = s; 2725 memory_region_update_pending = true; 2726 memory_region_transaction_commit(); 2727 } 2728 2729 static void memory_region_readd_subregion(MemoryRegion *mr) 2730 { 2731 MemoryRegion *container = mr->container; 2732 2733 if (container) { 2734 memory_region_transaction_begin(); 2735 memory_region_ref(mr); 2736 memory_region_del_subregion(container, mr); 2737 memory_region_add_subregion_common(container, mr->addr, mr); 2738 memory_region_unref(mr); 2739 memory_region_transaction_commit(); 2740 } 2741 } 2742 2743 void memory_region_set_address(MemoryRegion *mr, hwaddr addr) 2744 { 2745 if (addr != mr->addr) { 2746 mr->addr = addr; 2747 memory_region_readd_subregion(mr); 2748 } 2749 } 2750 2751 void memory_region_set_alias_offset(MemoryRegion *mr, hwaddr offset) 2752 { 2753 assert(mr->alias); 2754 2755 if (offset == mr->alias_offset) { 2756 return; 2757 } 2758 2759 memory_region_transaction_begin(); 2760 mr->alias_offset = offset; 2761 memory_region_update_pending |= mr->enabled; 2762 memory_region_transaction_commit(); 2763 } 2764 2765 void memory_region_set_unmergeable(MemoryRegion *mr, bool unmergeable) 2766 { 2767 if (unmergeable == mr->unmergeable) { 2768 return; 2769 } 2770 2771 memory_region_transaction_begin(); 2772 mr->unmergeable = unmergeable; 2773 memory_region_update_pending |= mr->enabled; 2774 memory_region_transaction_commit(); 2775 } 2776 2777 uint64_t memory_region_get_alignment(const MemoryRegion *mr) 2778 { 2779 return mr->align; 2780 } 2781 2782 static int cmp_flatrange_addr(const void *addr_, const void *fr_) 2783 { 2784 const AddrRange *addr = addr_; 2785 const FlatRange *fr = fr_; 2786 2787 if (int128_le(addrrange_end(*addr), fr->addr.start)) { 2788 return -1; 2789 } else if (int128_ge(addr->start, addrrange_end(fr->addr))) { 2790 return 1; 2791 } 2792 return 0; 2793 } 2794 2795 static FlatRange *flatview_lookup(FlatView *view, AddrRange addr) 2796 { 2797 return bsearch(&addr, view->ranges, view->nr, 2798 sizeof(FlatRange), cmp_flatrange_addr); 2799 } 2800 2801 bool memory_region_is_mapped(MemoryRegion *mr) 2802 { 2803 return !!mr->container || mr->mapped_via_alias; 2804 } 2805 2806 /* Same as memory_region_find, but it does not add a reference to the 2807 * returned region. It must be called from an RCU critical section. 2808 */ 2809 static MemoryRegionSection memory_region_find_rcu(MemoryRegion *mr, 2810 hwaddr addr, uint64_t size) 2811 { 2812 MemoryRegionSection ret = { .mr = NULL }; 2813 MemoryRegion *root; 2814 AddressSpace *as; 2815 AddrRange range; 2816 FlatView *view; 2817 FlatRange *fr; 2818 2819 addr += mr->addr; 2820 for (root = mr; root->container; ) { 2821 root = root->container; 2822 addr += root->addr; 2823 } 2824 2825 as = memory_region_to_address_space(root); 2826 if (!as) { 2827 return ret; 2828 } 2829 range = addrrange_make(int128_make64(addr), int128_make64(size)); 2830 2831 view = address_space_to_flatview(as); 2832 fr = flatview_lookup(view, range); 2833 if (!fr) { 2834 return ret; 2835 } 2836 2837 while (fr > view->ranges && addrrange_intersects(fr[-1].addr, range)) { 2838 --fr; 2839 } 2840 2841 ret.mr = fr->mr; 2842 ret.fv = view; 2843 range = addrrange_intersection(range, fr->addr); 2844 ret.offset_within_region = fr->offset_in_region; 2845 ret.offset_within_region += int128_get64(int128_sub(range.start, 2846 fr->addr.start)); 2847 ret.size = range.size; 2848 ret.offset_within_address_space = int128_get64(range.start); 2849 ret.readonly = fr->readonly; 2850 ret.nonvolatile = fr->nonvolatile; 2851 return ret; 2852 } 2853 2854 MemoryRegionSection memory_region_find(MemoryRegion *mr, 2855 hwaddr addr, uint64_t size) 2856 { 2857 MemoryRegionSection ret; 2858 RCU_READ_LOCK_GUARD(); 2859 ret = memory_region_find_rcu(mr, addr, size); 2860 if (ret.mr) { 2861 memory_region_ref(ret.mr); 2862 } 2863 return ret; 2864 } 2865 2866 MemoryRegionSection *memory_region_section_new_copy(MemoryRegionSection *s) 2867 { 2868 MemoryRegionSection *tmp = g_new(MemoryRegionSection, 1); 2869 2870 *tmp = *s; 2871 if (tmp->mr) { 2872 memory_region_ref(tmp->mr); 2873 } 2874 if (tmp->fv) { 2875 bool ret = flatview_ref(tmp->fv); 2876 2877 g_assert(ret); 2878 } 2879 return tmp; 2880 } 2881 2882 void memory_region_section_free_copy(MemoryRegionSection *s) 2883 { 2884 if (s->fv) { 2885 flatview_unref(s->fv); 2886 } 2887 if (s->mr) { 2888 memory_region_unref(s->mr); 2889 } 2890 g_free(s); 2891 } 2892 2893 bool memory_region_present(MemoryRegion *container, hwaddr addr) 2894 { 2895 MemoryRegion *mr; 2896 2897 RCU_READ_LOCK_GUARD(); 2898 mr = memory_region_find_rcu(container, addr, 1).mr; 2899 return mr && mr != container; 2900 } 2901 2902 void memory_global_dirty_log_sync(bool last_stage) 2903 { 2904 memory_region_sync_dirty_bitmap(NULL, last_stage); 2905 } 2906 2907 void memory_global_after_dirty_log_sync(void) 2908 { 2909 MEMORY_LISTENER_CALL_GLOBAL(log_global_after_sync, Forward); 2910 } 2911 2912 /* 2913 * Dirty track stop flags that are postponed due to VM being stopped. Should 2914 * only be used within vmstate_change hook. 2915 */ 2916 static unsigned int postponed_stop_flags; 2917 static VMChangeStateEntry *vmstate_change; 2918 static void memory_global_dirty_log_stop_postponed_run(void); 2919 2920 void memory_global_dirty_log_start(unsigned int flags) 2921 { 2922 unsigned int old_flags; 2923 2924 assert(flags && !(flags & (~GLOBAL_DIRTY_MASK))); 2925 2926 if (vmstate_change) { 2927 /* If there is postponed stop(), operate on it first */ 2928 postponed_stop_flags &= ~flags; 2929 memory_global_dirty_log_stop_postponed_run(); 2930 } 2931 2932 flags &= ~global_dirty_tracking; 2933 if (!flags) { 2934 return; 2935 } 2936 2937 old_flags = global_dirty_tracking; 2938 global_dirty_tracking |= flags; 2939 trace_global_dirty_changed(global_dirty_tracking); 2940 2941 if (!old_flags) { 2942 MEMORY_LISTENER_CALL_GLOBAL(log_global_start, Forward); 2943 memory_region_transaction_begin(); 2944 memory_region_update_pending = true; 2945 memory_region_transaction_commit(); 2946 } 2947 } 2948 2949 static void memory_global_dirty_log_do_stop(unsigned int flags) 2950 { 2951 assert(flags && !(flags & (~GLOBAL_DIRTY_MASK))); 2952 assert((global_dirty_tracking & flags) == flags); 2953 global_dirty_tracking &= ~flags; 2954 2955 trace_global_dirty_changed(global_dirty_tracking); 2956 2957 if (!global_dirty_tracking) { 2958 memory_region_transaction_begin(); 2959 memory_region_update_pending = true; 2960 memory_region_transaction_commit(); 2961 MEMORY_LISTENER_CALL_GLOBAL(log_global_stop, Reverse); 2962 } 2963 } 2964 2965 /* 2966 * Execute the postponed dirty log stop operations if there is, then reset 2967 * everything (including the flags and the vmstate change hook). 2968 */ 2969 static void memory_global_dirty_log_stop_postponed_run(void) 2970 { 2971 /* This must be called with the vmstate handler registered */ 2972 assert(vmstate_change); 2973 2974 /* Note: postponed_stop_flags can be cleared in log start routine */ 2975 if (postponed_stop_flags) { 2976 memory_global_dirty_log_do_stop(postponed_stop_flags); 2977 postponed_stop_flags = 0; 2978 } 2979 2980 qemu_del_vm_change_state_handler(vmstate_change); 2981 vmstate_change = NULL; 2982 } 2983 2984 static void memory_vm_change_state_handler(void *opaque, bool running, 2985 RunState state) 2986 { 2987 if (running) { 2988 memory_global_dirty_log_stop_postponed_run(); 2989 } 2990 } 2991 2992 void memory_global_dirty_log_stop(unsigned int flags) 2993 { 2994 if (!runstate_is_running()) { 2995 /* Postpone the dirty log stop, e.g., to when VM starts again */ 2996 if (vmstate_change) { 2997 /* Batch with previous postponed flags */ 2998 postponed_stop_flags |= flags; 2999 } else { 3000 postponed_stop_flags = flags; 3001 vmstate_change = qemu_add_vm_change_state_handler( 3002 memory_vm_change_state_handler, NULL); 3003 } 3004 return; 3005 } 3006 3007 memory_global_dirty_log_do_stop(flags); 3008 } 3009 3010 static void listener_add_address_space(MemoryListener *listener, 3011 AddressSpace *as) 3012 { 3013 FlatView *view; 3014 FlatRange *fr; 3015 3016 if (listener->begin) { 3017 listener->begin(listener); 3018 } 3019 if (global_dirty_tracking) { 3020 if (listener->log_global_start) { 3021 listener->log_global_start(listener); 3022 } 3023 } 3024 3025 view = address_space_get_flatview(as); 3026 FOR_EACH_FLAT_RANGE(fr, view) { 3027 MemoryRegionSection section = section_from_flat_range(fr, view); 3028 3029 if (listener->region_add) { 3030 listener->region_add(listener, §ion); 3031 } 3032 if (fr->dirty_log_mask && listener->log_start) { 3033 listener->log_start(listener, §ion, 0, fr->dirty_log_mask); 3034 } 3035 } 3036 if (listener->commit) { 3037 listener->commit(listener); 3038 } 3039 flatview_unref(view); 3040 } 3041 3042 static void listener_del_address_space(MemoryListener *listener, 3043 AddressSpace *as) 3044 { 3045 FlatView *view; 3046 FlatRange *fr; 3047 3048 if (listener->begin) { 3049 listener->begin(listener); 3050 } 3051 view = address_space_get_flatview(as); 3052 FOR_EACH_FLAT_RANGE(fr, view) { 3053 MemoryRegionSection section = section_from_flat_range(fr, view); 3054 3055 if (fr->dirty_log_mask && listener->log_stop) { 3056 listener->log_stop(listener, §ion, fr->dirty_log_mask, 0); 3057 } 3058 if (listener->region_del) { 3059 listener->region_del(listener, §ion); 3060 } 3061 } 3062 if (listener->commit) { 3063 listener->commit(listener); 3064 } 3065 flatview_unref(view); 3066 } 3067 3068 void memory_listener_register(MemoryListener *listener, AddressSpace *as) 3069 { 3070 MemoryListener *other = NULL; 3071 3072 /* Only one of them can be defined for a listener */ 3073 assert(!(listener->log_sync && listener->log_sync_global)); 3074 3075 listener->address_space = as; 3076 if (QTAILQ_EMPTY(&memory_listeners) 3077 || listener->priority >= QTAILQ_LAST(&memory_listeners)->priority) { 3078 QTAILQ_INSERT_TAIL(&memory_listeners, listener, link); 3079 } else { 3080 QTAILQ_FOREACH(other, &memory_listeners, link) { 3081 if (listener->priority < other->priority) { 3082 break; 3083 } 3084 } 3085 QTAILQ_INSERT_BEFORE(other, listener, link); 3086 } 3087 3088 if (QTAILQ_EMPTY(&as->listeners) 3089 || listener->priority >= QTAILQ_LAST(&as->listeners)->priority) { 3090 QTAILQ_INSERT_TAIL(&as->listeners, listener, link_as); 3091 } else { 3092 QTAILQ_FOREACH(other, &as->listeners, link_as) { 3093 if (listener->priority < other->priority) { 3094 break; 3095 } 3096 } 3097 QTAILQ_INSERT_BEFORE(other, listener, link_as); 3098 } 3099 3100 listener_add_address_space(listener, as); 3101 3102 if (listener->eventfd_add || listener->eventfd_del) { 3103 as->ioeventfd_notifiers++; 3104 } 3105 } 3106 3107 void memory_listener_unregister(MemoryListener *listener) 3108 { 3109 if (!listener->address_space) { 3110 return; 3111 } 3112 3113 if (listener->eventfd_add || listener->eventfd_del) { 3114 listener->address_space->ioeventfd_notifiers--; 3115 } 3116 3117 listener_del_address_space(listener, listener->address_space); 3118 QTAILQ_REMOVE(&memory_listeners, listener, link); 3119 QTAILQ_REMOVE(&listener->address_space->listeners, listener, link_as); 3120 listener->address_space = NULL; 3121 } 3122 3123 void address_space_remove_listeners(AddressSpace *as) 3124 { 3125 while (!QTAILQ_EMPTY(&as->listeners)) { 3126 memory_listener_unregister(QTAILQ_FIRST(&as->listeners)); 3127 } 3128 } 3129 3130 void address_space_init(AddressSpace *as, MemoryRegion *root, const char *name) 3131 { 3132 memory_region_ref(root); 3133 as->root = root; 3134 as->current_map = NULL; 3135 as->ioeventfd_nb = 0; 3136 as->ioeventfds = NULL; 3137 QTAILQ_INIT(&as->listeners); 3138 QTAILQ_INSERT_TAIL(&address_spaces, as, address_spaces_link); 3139 as->name = g_strdup(name ? name : "anonymous"); 3140 address_space_update_topology(as); 3141 address_space_update_ioeventfds(as); 3142 } 3143 3144 static void do_address_space_destroy(AddressSpace *as) 3145 { 3146 assert(QTAILQ_EMPTY(&as->listeners)); 3147 3148 flatview_unref(as->current_map); 3149 g_free(as->name); 3150 g_free(as->ioeventfds); 3151 memory_region_unref(as->root); 3152 } 3153 3154 void address_space_destroy(AddressSpace *as) 3155 { 3156 MemoryRegion *root = as->root; 3157 3158 /* Flush out anything from MemoryListeners listening in on this */ 3159 memory_region_transaction_begin(); 3160 as->root = NULL; 3161 memory_region_transaction_commit(); 3162 QTAILQ_REMOVE(&address_spaces, as, address_spaces_link); 3163 3164 /* At this point, as->dispatch and as->current_map are dummy 3165 * entries that the guest should never use. Wait for the old 3166 * values to expire before freeing the data. 3167 */ 3168 as->root = root; 3169 call_rcu(as, do_address_space_destroy, rcu); 3170 } 3171 3172 static const char *memory_region_type(MemoryRegion *mr) 3173 { 3174 if (mr->alias) { 3175 return memory_region_type(mr->alias); 3176 } 3177 if (memory_region_is_ram_device(mr)) { 3178 return "ramd"; 3179 } else if (memory_region_is_romd(mr)) { 3180 return "romd"; 3181 } else if (memory_region_is_rom(mr)) { 3182 return "rom"; 3183 } else if (memory_region_is_ram(mr)) { 3184 return "ram"; 3185 } else { 3186 return "i/o"; 3187 } 3188 } 3189 3190 typedef struct MemoryRegionList MemoryRegionList; 3191 3192 struct MemoryRegionList { 3193 const MemoryRegion *mr; 3194 QTAILQ_ENTRY(MemoryRegionList) mrqueue; 3195 }; 3196 3197 typedef QTAILQ_HEAD(, MemoryRegionList) MemoryRegionListHead; 3198 3199 #define MR_SIZE(size) (int128_nz(size) ? (hwaddr)int128_get64( \ 3200 int128_sub((size), int128_one())) : 0) 3201 #define MTREE_INDENT " " 3202 3203 static void mtree_expand_owner(const char *label, Object *obj) 3204 { 3205 DeviceState *dev = (DeviceState *) object_dynamic_cast(obj, TYPE_DEVICE); 3206 3207 qemu_printf(" %s:{%s", label, dev ? "dev" : "obj"); 3208 if (dev && dev->id) { 3209 qemu_printf(" id=%s", dev->id); 3210 } else { 3211 char *canonical_path = object_get_canonical_path(obj); 3212 if (canonical_path) { 3213 qemu_printf(" path=%s", canonical_path); 3214 g_free(canonical_path); 3215 } else { 3216 qemu_printf(" type=%s", object_get_typename(obj)); 3217 } 3218 } 3219 qemu_printf("}"); 3220 } 3221 3222 static void mtree_print_mr_owner(const MemoryRegion *mr) 3223 { 3224 Object *owner = mr->owner; 3225 Object *parent = memory_region_owner((MemoryRegion *)mr); 3226 3227 if (!owner && !parent) { 3228 qemu_printf(" orphan"); 3229 return; 3230 } 3231 if (owner) { 3232 mtree_expand_owner("owner", owner); 3233 } 3234 if (parent && parent != owner) { 3235 mtree_expand_owner("parent", parent); 3236 } 3237 } 3238 3239 static void mtree_print_mr(const MemoryRegion *mr, unsigned int level, 3240 hwaddr base, 3241 MemoryRegionListHead *alias_print_queue, 3242 bool owner, bool display_disabled) 3243 { 3244 MemoryRegionList *new_ml, *ml, *next_ml; 3245 MemoryRegionListHead submr_print_queue; 3246 const MemoryRegion *submr; 3247 unsigned int i; 3248 hwaddr cur_start, cur_end; 3249 3250 if (!mr) { 3251 return; 3252 } 3253 3254 cur_start = base + mr->addr; 3255 cur_end = cur_start + MR_SIZE(mr->size); 3256 3257 /* 3258 * Try to detect overflow of memory region. This should never 3259 * happen normally. When it happens, we dump something to warn the 3260 * user who is observing this. 3261 */ 3262 if (cur_start < base || cur_end < cur_start) { 3263 qemu_printf("[DETECTED OVERFLOW!] "); 3264 } 3265 3266 if (mr->alias) { 3267 bool found = false; 3268 3269 /* check if the alias is already in the queue */ 3270 QTAILQ_FOREACH(ml, alias_print_queue, mrqueue) { 3271 if (ml->mr == mr->alias) { 3272 found = true; 3273 } 3274 } 3275 3276 if (!found) { 3277 ml = g_new(MemoryRegionList, 1); 3278 ml->mr = mr->alias; 3279 QTAILQ_INSERT_TAIL(alias_print_queue, ml, mrqueue); 3280 } 3281 if (mr->enabled || display_disabled) { 3282 for (i = 0; i < level; i++) { 3283 qemu_printf(MTREE_INDENT); 3284 } 3285 qemu_printf(HWADDR_FMT_plx "-" HWADDR_FMT_plx 3286 " (prio %d, %s%s): alias %s @%s " HWADDR_FMT_plx 3287 "-" HWADDR_FMT_plx "%s", 3288 cur_start, cur_end, 3289 mr->priority, 3290 mr->nonvolatile ? "nv-" : "", 3291 memory_region_type((MemoryRegion *)mr), 3292 memory_region_name(mr), 3293 memory_region_name(mr->alias), 3294 mr->alias_offset, 3295 mr->alias_offset + MR_SIZE(mr->size), 3296 mr->enabled ? "" : " [disabled]"); 3297 if (owner) { 3298 mtree_print_mr_owner(mr); 3299 } 3300 qemu_printf("\n"); 3301 } 3302 } else { 3303 if (mr->enabled || display_disabled) { 3304 for (i = 0; i < level; i++) { 3305 qemu_printf(MTREE_INDENT); 3306 } 3307 qemu_printf(HWADDR_FMT_plx "-" HWADDR_FMT_plx 3308 " (prio %d, %s%s): %s%s", 3309 cur_start, cur_end, 3310 mr->priority, 3311 mr->nonvolatile ? "nv-" : "", 3312 memory_region_type((MemoryRegion *)mr), 3313 memory_region_name(mr), 3314 mr->enabled ? "" : " [disabled]"); 3315 if (owner) { 3316 mtree_print_mr_owner(mr); 3317 } 3318 qemu_printf("\n"); 3319 } 3320 } 3321 3322 QTAILQ_INIT(&submr_print_queue); 3323 3324 QTAILQ_FOREACH(submr, &mr->subregions, subregions_link) { 3325 new_ml = g_new(MemoryRegionList, 1); 3326 new_ml->mr = submr; 3327 QTAILQ_FOREACH(ml, &submr_print_queue, mrqueue) { 3328 if (new_ml->mr->addr < ml->mr->addr || 3329 (new_ml->mr->addr == ml->mr->addr && 3330 new_ml->mr->priority > ml->mr->priority)) { 3331 QTAILQ_INSERT_BEFORE(ml, new_ml, mrqueue); 3332 new_ml = NULL; 3333 break; 3334 } 3335 } 3336 if (new_ml) { 3337 QTAILQ_INSERT_TAIL(&submr_print_queue, new_ml, mrqueue); 3338 } 3339 } 3340 3341 QTAILQ_FOREACH(ml, &submr_print_queue, mrqueue) { 3342 mtree_print_mr(ml->mr, level + 1, cur_start, 3343 alias_print_queue, owner, display_disabled); 3344 } 3345 3346 QTAILQ_FOREACH_SAFE(ml, &submr_print_queue, mrqueue, next_ml) { 3347 g_free(ml); 3348 } 3349 } 3350 3351 struct FlatViewInfo { 3352 int counter; 3353 bool dispatch_tree; 3354 bool owner; 3355 AccelClass *ac; 3356 }; 3357 3358 static void mtree_print_flatview(gpointer key, gpointer value, 3359 gpointer user_data) 3360 { 3361 FlatView *view = key; 3362 GArray *fv_address_spaces = value; 3363 struct FlatViewInfo *fvi = user_data; 3364 FlatRange *range = &view->ranges[0]; 3365 MemoryRegion *mr; 3366 int n = view->nr; 3367 int i; 3368 AddressSpace *as; 3369 3370 qemu_printf("FlatView #%d\n", fvi->counter); 3371 ++fvi->counter; 3372 3373 for (i = 0; i < fv_address_spaces->len; ++i) { 3374 as = g_array_index(fv_address_spaces, AddressSpace*, i); 3375 qemu_printf(" AS \"%s\", root: %s", 3376 as->name, memory_region_name(as->root)); 3377 if (as->root->alias) { 3378 qemu_printf(", alias %s", memory_region_name(as->root->alias)); 3379 } 3380 qemu_printf("\n"); 3381 } 3382 3383 qemu_printf(" Root memory region: %s\n", 3384 view->root ? memory_region_name(view->root) : "(none)"); 3385 3386 if (n <= 0) { 3387 qemu_printf(MTREE_INDENT "No rendered FlatView\n\n"); 3388 return; 3389 } 3390 3391 while (n--) { 3392 mr = range->mr; 3393 if (range->offset_in_region) { 3394 qemu_printf(MTREE_INDENT HWADDR_FMT_plx "-" HWADDR_FMT_plx 3395 " (prio %d, %s%s): %s @" HWADDR_FMT_plx, 3396 int128_get64(range->addr.start), 3397 int128_get64(range->addr.start) 3398 + MR_SIZE(range->addr.size), 3399 mr->priority, 3400 range->nonvolatile ? "nv-" : "", 3401 range->readonly ? "rom" : memory_region_type(mr), 3402 memory_region_name(mr), 3403 range->offset_in_region); 3404 } else { 3405 qemu_printf(MTREE_INDENT HWADDR_FMT_plx "-" HWADDR_FMT_plx 3406 " (prio %d, %s%s): %s", 3407 int128_get64(range->addr.start), 3408 int128_get64(range->addr.start) 3409 + MR_SIZE(range->addr.size), 3410 mr->priority, 3411 range->nonvolatile ? "nv-" : "", 3412 range->readonly ? "rom" : memory_region_type(mr), 3413 memory_region_name(mr)); 3414 } 3415 if (fvi->owner) { 3416 mtree_print_mr_owner(mr); 3417 } 3418 3419 if (fvi->ac) { 3420 for (i = 0; i < fv_address_spaces->len; ++i) { 3421 as = g_array_index(fv_address_spaces, AddressSpace*, i); 3422 if (fvi->ac->has_memory(current_machine, as, 3423 int128_get64(range->addr.start), 3424 MR_SIZE(range->addr.size) + 1)) { 3425 qemu_printf(" %s", fvi->ac->name); 3426 } 3427 } 3428 } 3429 qemu_printf("\n"); 3430 range++; 3431 } 3432 3433 #if !defined(CONFIG_USER_ONLY) 3434 if (fvi->dispatch_tree && view->root) { 3435 mtree_print_dispatch(view->dispatch, view->root); 3436 } 3437 #endif 3438 3439 qemu_printf("\n"); 3440 } 3441 3442 static gboolean mtree_info_flatview_free(gpointer key, gpointer value, 3443 gpointer user_data) 3444 { 3445 FlatView *view = key; 3446 GArray *fv_address_spaces = value; 3447 3448 g_array_unref(fv_address_spaces); 3449 flatview_unref(view); 3450 3451 return true; 3452 } 3453 3454 static void mtree_info_flatview(bool dispatch_tree, bool owner) 3455 { 3456 struct FlatViewInfo fvi = { 3457 .counter = 0, 3458 .dispatch_tree = dispatch_tree, 3459 .owner = owner, 3460 }; 3461 AddressSpace *as; 3462 FlatView *view; 3463 GArray *fv_address_spaces; 3464 GHashTable *views = g_hash_table_new(g_direct_hash, g_direct_equal); 3465 AccelClass *ac = ACCEL_GET_CLASS(current_accel()); 3466 3467 if (ac->has_memory) { 3468 fvi.ac = ac; 3469 } 3470 3471 /* Gather all FVs in one table */ 3472 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) { 3473 view = address_space_get_flatview(as); 3474 3475 fv_address_spaces = g_hash_table_lookup(views, view); 3476 if (!fv_address_spaces) { 3477 fv_address_spaces = g_array_new(false, false, sizeof(as)); 3478 g_hash_table_insert(views, view, fv_address_spaces); 3479 } 3480 3481 g_array_append_val(fv_address_spaces, as); 3482 } 3483 3484 /* Print */ 3485 g_hash_table_foreach(views, mtree_print_flatview, &fvi); 3486 3487 /* Free */ 3488 g_hash_table_foreach_remove(views, mtree_info_flatview_free, 0); 3489 g_hash_table_unref(views); 3490 } 3491 3492 struct AddressSpaceInfo { 3493 MemoryRegionListHead *ml_head; 3494 bool owner; 3495 bool disabled; 3496 }; 3497 3498 /* Returns negative value if a < b; zero if a = b; positive value if a > b. */ 3499 static gint address_space_compare_name(gconstpointer a, gconstpointer b) 3500 { 3501 const AddressSpace *as_a = a; 3502 const AddressSpace *as_b = b; 3503 3504 return g_strcmp0(as_a->name, as_b->name); 3505 } 3506 3507 static void mtree_print_as_name(gpointer data, gpointer user_data) 3508 { 3509 AddressSpace *as = data; 3510 3511 qemu_printf("address-space: %s\n", as->name); 3512 } 3513 3514 static void mtree_print_as(gpointer key, gpointer value, gpointer user_data) 3515 { 3516 MemoryRegion *mr = key; 3517 GSList *as_same_root_mr_list = value; 3518 struct AddressSpaceInfo *asi = user_data; 3519 3520 g_slist_foreach(as_same_root_mr_list, mtree_print_as_name, NULL); 3521 mtree_print_mr(mr, 1, 0, asi->ml_head, asi->owner, asi->disabled); 3522 qemu_printf("\n"); 3523 } 3524 3525 static gboolean mtree_info_as_free(gpointer key, gpointer value, 3526 gpointer user_data) 3527 { 3528 GSList *as_same_root_mr_list = value; 3529 3530 g_slist_free(as_same_root_mr_list); 3531 3532 return true; 3533 } 3534 3535 static void mtree_info_as(bool dispatch_tree, bool owner, bool disabled) 3536 { 3537 MemoryRegionListHead ml_head; 3538 MemoryRegionList *ml, *ml2; 3539 AddressSpace *as; 3540 GHashTable *views = g_hash_table_new(g_direct_hash, g_direct_equal); 3541 GSList *as_same_root_mr_list; 3542 struct AddressSpaceInfo asi = { 3543 .ml_head = &ml_head, 3544 .owner = owner, 3545 .disabled = disabled, 3546 }; 3547 3548 QTAILQ_INIT(&ml_head); 3549 3550 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) { 3551 /* Create hashtable, key=AS root MR, value = list of AS */ 3552 as_same_root_mr_list = g_hash_table_lookup(views, as->root); 3553 as_same_root_mr_list = g_slist_insert_sorted(as_same_root_mr_list, as, 3554 address_space_compare_name); 3555 g_hash_table_insert(views, as->root, as_same_root_mr_list); 3556 } 3557 3558 /* print address spaces */ 3559 g_hash_table_foreach(views, mtree_print_as, &asi); 3560 g_hash_table_foreach_remove(views, mtree_info_as_free, 0); 3561 g_hash_table_unref(views); 3562 3563 /* print aliased regions */ 3564 QTAILQ_FOREACH(ml, &ml_head, mrqueue) { 3565 qemu_printf("memory-region: %s\n", memory_region_name(ml->mr)); 3566 mtree_print_mr(ml->mr, 1, 0, &ml_head, owner, disabled); 3567 qemu_printf("\n"); 3568 } 3569 3570 QTAILQ_FOREACH_SAFE(ml, &ml_head, mrqueue, ml2) { 3571 g_free(ml); 3572 } 3573 } 3574 3575 void mtree_info(bool flatview, bool dispatch_tree, bool owner, bool disabled) 3576 { 3577 if (flatview) { 3578 mtree_info_flatview(dispatch_tree, owner); 3579 } else { 3580 mtree_info_as(dispatch_tree, owner, disabled); 3581 } 3582 } 3583 3584 void memory_region_init_ram(MemoryRegion *mr, 3585 Object *owner, 3586 const char *name, 3587 uint64_t size, 3588 Error **errp) 3589 { 3590 DeviceState *owner_dev; 3591 Error *err = NULL; 3592 3593 memory_region_init_ram_nomigrate(mr, owner, name, size, &err); 3594 if (err) { 3595 error_propagate(errp, err); 3596 return; 3597 } 3598 /* This will assert if owner is neither NULL nor a DeviceState. 3599 * We only want the owner here for the purposes of defining a 3600 * unique name for migration. TODO: Ideally we should implement 3601 * a naming scheme for Objects which are not DeviceStates, in 3602 * which case we can relax this restriction. 3603 */ 3604 owner_dev = DEVICE(owner); 3605 vmstate_register_ram(mr, owner_dev); 3606 } 3607 3608 void memory_region_init_rom(MemoryRegion *mr, 3609 Object *owner, 3610 const char *name, 3611 uint64_t size, 3612 Error **errp) 3613 { 3614 DeviceState *owner_dev; 3615 Error *err = NULL; 3616 3617 memory_region_init_rom_nomigrate(mr, owner, name, size, &err); 3618 if (err) { 3619 error_propagate(errp, err); 3620 return; 3621 } 3622 /* This will assert if owner is neither NULL nor a DeviceState. 3623 * We only want the owner here for the purposes of defining a 3624 * unique name for migration. TODO: Ideally we should implement 3625 * a naming scheme for Objects which are not DeviceStates, in 3626 * which case we can relax this restriction. 3627 */ 3628 owner_dev = DEVICE(owner); 3629 vmstate_register_ram(mr, owner_dev); 3630 } 3631 3632 void memory_region_init_rom_device(MemoryRegion *mr, 3633 Object *owner, 3634 const MemoryRegionOps *ops, 3635 void *opaque, 3636 const char *name, 3637 uint64_t size, 3638 Error **errp) 3639 { 3640 DeviceState *owner_dev; 3641 Error *err = NULL; 3642 3643 memory_region_init_rom_device_nomigrate(mr, owner, ops, opaque, 3644 name, size, &err); 3645 if (err) { 3646 error_propagate(errp, err); 3647 return; 3648 } 3649 /* This will assert if owner is neither NULL nor a DeviceState. 3650 * We only want the owner here for the purposes of defining a 3651 * unique name for migration. TODO: Ideally we should implement 3652 * a naming scheme for Objects which are not DeviceStates, in 3653 * which case we can relax this restriction. 3654 */ 3655 owner_dev = DEVICE(owner); 3656 vmstate_register_ram(mr, owner_dev); 3657 } 3658 3659 /* 3660 * Support system builds with CONFIG_FUZZ using a weak symbol and a stub for 3661 * the fuzz_dma_read_cb callback 3662 */ 3663 #ifdef CONFIG_FUZZ 3664 void __attribute__((weak)) fuzz_dma_read_cb(size_t addr, 3665 size_t len, 3666 MemoryRegion *mr) 3667 { 3668 } 3669 #endif 3670 3671 static const TypeInfo memory_region_info = { 3672 .parent = TYPE_OBJECT, 3673 .name = TYPE_MEMORY_REGION, 3674 .class_size = sizeof(MemoryRegionClass), 3675 .instance_size = sizeof(MemoryRegion), 3676 .instance_init = memory_region_initfn, 3677 .instance_finalize = memory_region_finalize, 3678 }; 3679 3680 static const TypeInfo iommu_memory_region_info = { 3681 .parent = TYPE_MEMORY_REGION, 3682 .name = TYPE_IOMMU_MEMORY_REGION, 3683 .class_size = sizeof(IOMMUMemoryRegionClass), 3684 .instance_size = sizeof(IOMMUMemoryRegion), 3685 .instance_init = iommu_memory_region_initfn, 3686 .abstract = true, 3687 }; 3688 3689 static const TypeInfo ram_discard_manager_info = { 3690 .parent = TYPE_INTERFACE, 3691 .name = TYPE_RAM_DISCARD_MANAGER, 3692 .class_size = sizeof(RamDiscardManagerClass), 3693 }; 3694 3695 static void memory_register_types(void) 3696 { 3697 type_register_static(&memory_region_info); 3698 type_register_static(&iommu_memory_region_info); 3699 type_register_static(&ram_discard_manager_info); 3700 } 3701 3702 type_init(memory_register_types) 3703