1 /* 2 * Physical memory management API 3 * 4 * Copyright 2011 Red Hat, Inc. and/or its affiliates 5 * 6 * Authors: 7 * Avi Kivity <avi@redhat.com> 8 * 9 * This work is licensed under the terms of the GNU GPL, version 2. See 10 * the COPYING file in the top-level directory. 11 * 12 */ 13 14 #ifndef MEMORY_H 15 #define MEMORY_H 16 17 #ifndef CONFIG_USER_ONLY 18 19 #include "exec/cpu-common.h" 20 #include "exec/hwaddr.h" 21 #include "exec/memattrs.h" 22 #include "exec/ramlist.h" 23 #include "qemu/queue.h" 24 #include "qemu/int128.h" 25 #include "qemu/notify.h" 26 #include "qom/object.h" 27 #include "qemu/rcu.h" 28 29 #define RAM_ADDR_INVALID (~(ram_addr_t)0) 30 31 #define MAX_PHYS_ADDR_SPACE_BITS 62 32 #define MAX_PHYS_ADDR (((hwaddr)1 << MAX_PHYS_ADDR_SPACE_BITS) - 1) 33 34 #define TYPE_MEMORY_REGION "qemu:memory-region" 35 #define MEMORY_REGION(obj) \ 36 OBJECT_CHECK(MemoryRegion, (obj), TYPE_MEMORY_REGION) 37 38 typedef struct MemoryRegionOps MemoryRegionOps; 39 typedef struct MemoryRegionMmio MemoryRegionMmio; 40 41 struct MemoryRegionMmio { 42 CPUReadMemoryFunc *read[3]; 43 CPUWriteMemoryFunc *write[3]; 44 }; 45 46 typedef struct IOMMUTLBEntry IOMMUTLBEntry; 47 48 /* See address_space_translate: bit 0 is read, bit 1 is write. */ 49 typedef enum { 50 IOMMU_NONE = 0, 51 IOMMU_RO = 1, 52 IOMMU_WO = 2, 53 IOMMU_RW = 3, 54 } IOMMUAccessFlags; 55 56 #define IOMMU_ACCESS_FLAG(r, w) (((r) ? IOMMU_RO : 0) | ((w) ? IOMMU_WO : 0)) 57 58 struct IOMMUTLBEntry { 59 AddressSpace *target_as; 60 hwaddr iova; 61 hwaddr translated_addr; 62 hwaddr addr_mask; /* 0xfff = 4k translation */ 63 IOMMUAccessFlags perm; 64 }; 65 66 /* 67 * Bitmap for different IOMMUNotifier capabilities. Each notifier can 68 * register with one or multiple IOMMU Notifier capability bit(s). 69 */ 70 typedef enum { 71 IOMMU_NOTIFIER_NONE = 0, 72 /* Notify cache invalidations */ 73 IOMMU_NOTIFIER_UNMAP = 0x1, 74 /* Notify entry changes (newly created entries) */ 75 IOMMU_NOTIFIER_MAP = 0x2, 76 } IOMMUNotifierFlag; 77 78 #define IOMMU_NOTIFIER_ALL (IOMMU_NOTIFIER_MAP | IOMMU_NOTIFIER_UNMAP) 79 80 struct IOMMUNotifier; 81 typedef void (*IOMMUNotify)(struct IOMMUNotifier *notifier, 82 IOMMUTLBEntry *data); 83 84 struct IOMMUNotifier { 85 IOMMUNotify notify; 86 IOMMUNotifierFlag notifier_flags; 87 /* Notify for address space range start <= addr <= end */ 88 hwaddr start; 89 hwaddr end; 90 QLIST_ENTRY(IOMMUNotifier) node; 91 }; 92 typedef struct IOMMUNotifier IOMMUNotifier; 93 94 static inline void iommu_notifier_init(IOMMUNotifier *n, IOMMUNotify fn, 95 IOMMUNotifierFlag flags, 96 hwaddr start, hwaddr end) 97 { 98 n->notify = fn; 99 n->notifier_flags = flags; 100 n->start = start; 101 n->end = end; 102 } 103 104 /* New-style MMIO accessors can indicate that the transaction failed. 105 * A zero (MEMTX_OK) response means success; anything else is a failure 106 * of some kind. The memory subsystem will bitwise-OR together results 107 * if it is synthesizing an operation from multiple smaller accesses. 108 */ 109 #define MEMTX_OK 0 110 #define MEMTX_ERROR (1U << 0) /* device returned an error */ 111 #define MEMTX_DECODE_ERROR (1U << 1) /* nothing at that address */ 112 typedef uint32_t MemTxResult; 113 114 /* 115 * Memory region callbacks 116 */ 117 struct MemoryRegionOps { 118 /* Read from the memory region. @addr is relative to @mr; @size is 119 * in bytes. */ 120 uint64_t (*read)(void *opaque, 121 hwaddr addr, 122 unsigned size); 123 /* Write to the memory region. @addr is relative to @mr; @size is 124 * in bytes. */ 125 void (*write)(void *opaque, 126 hwaddr addr, 127 uint64_t data, 128 unsigned size); 129 130 MemTxResult (*read_with_attrs)(void *opaque, 131 hwaddr addr, 132 uint64_t *data, 133 unsigned size, 134 MemTxAttrs attrs); 135 MemTxResult (*write_with_attrs)(void *opaque, 136 hwaddr addr, 137 uint64_t data, 138 unsigned size, 139 MemTxAttrs attrs); 140 141 enum device_endian endianness; 142 /* Guest-visible constraints: */ 143 struct { 144 /* If nonzero, specify bounds on access sizes beyond which a machine 145 * check is thrown. 146 */ 147 unsigned min_access_size; 148 unsigned max_access_size; 149 /* If true, unaligned accesses are supported. Otherwise unaligned 150 * accesses throw machine checks. 151 */ 152 bool unaligned; 153 /* 154 * If present, and returns #false, the transaction is not accepted 155 * by the device (and results in machine dependent behaviour such 156 * as a machine check exception). 157 */ 158 bool (*accepts)(void *opaque, hwaddr addr, 159 unsigned size, bool is_write); 160 } valid; 161 /* Internal implementation constraints: */ 162 struct { 163 /* If nonzero, specifies the minimum size implemented. Smaller sizes 164 * will be rounded upwards and a partial result will be returned. 165 */ 166 unsigned min_access_size; 167 /* If nonzero, specifies the maximum size implemented. Larger sizes 168 * will be done as a series of accesses with smaller sizes. 169 */ 170 unsigned max_access_size; 171 /* If true, unaligned accesses are supported. Otherwise all accesses 172 * are converted to (possibly multiple) naturally aligned accesses. 173 */ 174 bool unaligned; 175 } impl; 176 177 /* If .read and .write are not present, old_mmio may be used for 178 * backwards compatibility with old mmio registration 179 */ 180 const MemoryRegionMmio old_mmio; 181 }; 182 183 typedef struct MemoryRegionIOMMUOps MemoryRegionIOMMUOps; 184 185 struct MemoryRegionIOMMUOps { 186 /* 187 * Return a TLB entry that contains a given address. Flag should 188 * be the access permission of this translation operation. We can 189 * set flag to IOMMU_NONE to mean that we don't need any 190 * read/write permission checks, like, when for region replay. 191 */ 192 IOMMUTLBEntry (*translate)(MemoryRegion *iommu, hwaddr addr, 193 IOMMUAccessFlags flag); 194 /* Returns minimum supported page size */ 195 uint64_t (*get_min_page_size)(MemoryRegion *iommu); 196 /* Called when IOMMU Notifier flag changed */ 197 void (*notify_flag_changed)(MemoryRegion *iommu, 198 IOMMUNotifierFlag old_flags, 199 IOMMUNotifierFlag new_flags); 200 /* Set this up to provide customized IOMMU replay function */ 201 void (*replay)(MemoryRegion *iommu, IOMMUNotifier *notifier); 202 }; 203 204 typedef struct CoalescedMemoryRange CoalescedMemoryRange; 205 typedef struct MemoryRegionIoeventfd MemoryRegionIoeventfd; 206 207 struct MemoryRegion { 208 Object parent_obj; 209 210 /* All fields are private - violators will be prosecuted */ 211 212 /* The following fields should fit in a cache line */ 213 bool romd_mode; 214 bool ram; 215 bool subpage; 216 bool readonly; /* For RAM regions */ 217 bool rom_device; 218 bool flush_coalesced_mmio; 219 bool global_locking; 220 uint8_t dirty_log_mask; 221 RAMBlock *ram_block; 222 Object *owner; 223 const MemoryRegionIOMMUOps *iommu_ops; 224 225 const MemoryRegionOps *ops; 226 void *opaque; 227 MemoryRegion *container; 228 Int128 size; 229 hwaddr addr; 230 void (*destructor)(MemoryRegion *mr); 231 uint64_t align; 232 bool terminates; 233 bool ram_device; 234 bool enabled; 235 bool warning_printed; /* For reservations */ 236 uint8_t vga_logging_count; 237 MemoryRegion *alias; 238 hwaddr alias_offset; 239 int32_t priority; 240 QTAILQ_HEAD(subregions, MemoryRegion) subregions; 241 QTAILQ_ENTRY(MemoryRegion) subregions_link; 242 QTAILQ_HEAD(coalesced_ranges, CoalescedMemoryRange) coalesced; 243 const char *name; 244 unsigned ioeventfd_nb; 245 MemoryRegionIoeventfd *ioeventfds; 246 QLIST_HEAD(, IOMMUNotifier) iommu_notify; 247 IOMMUNotifierFlag iommu_notify_flags; 248 }; 249 250 #define IOMMU_NOTIFIER_FOREACH(n, mr) \ 251 QLIST_FOREACH((n), &(mr)->iommu_notify, node) 252 253 /** 254 * MemoryListener: callbacks structure for updates to the physical memory map 255 * 256 * Allows a component to adjust to changes in the guest-visible memory map. 257 * Use with memory_listener_register() and memory_listener_unregister(). 258 */ 259 struct MemoryListener { 260 void (*begin)(MemoryListener *listener); 261 void (*commit)(MemoryListener *listener); 262 void (*region_add)(MemoryListener *listener, MemoryRegionSection *section); 263 void (*region_del)(MemoryListener *listener, MemoryRegionSection *section); 264 void (*region_nop)(MemoryListener *listener, MemoryRegionSection *section); 265 void (*log_start)(MemoryListener *listener, MemoryRegionSection *section, 266 int old, int new); 267 void (*log_stop)(MemoryListener *listener, MemoryRegionSection *section, 268 int old, int new); 269 void (*log_sync)(MemoryListener *listener, MemoryRegionSection *section); 270 void (*log_global_start)(MemoryListener *listener); 271 void (*log_global_stop)(MemoryListener *listener); 272 void (*eventfd_add)(MemoryListener *listener, MemoryRegionSection *section, 273 bool match_data, uint64_t data, EventNotifier *e); 274 void (*eventfd_del)(MemoryListener *listener, MemoryRegionSection *section, 275 bool match_data, uint64_t data, EventNotifier *e); 276 void (*coalesced_mmio_add)(MemoryListener *listener, MemoryRegionSection *section, 277 hwaddr addr, hwaddr len); 278 void (*coalesced_mmio_del)(MemoryListener *listener, MemoryRegionSection *section, 279 hwaddr addr, hwaddr len); 280 /* Lower = earlier (during add), later (during del) */ 281 unsigned priority; 282 AddressSpace *address_space; 283 QTAILQ_ENTRY(MemoryListener) link; 284 QTAILQ_ENTRY(MemoryListener) link_as; 285 }; 286 287 /** 288 * AddressSpace: describes a mapping of addresses to #MemoryRegion objects 289 */ 290 struct AddressSpace { 291 /* All fields are private. */ 292 struct rcu_head rcu; 293 char *name; 294 MemoryRegion *root; 295 int ref_count; 296 bool malloced; 297 298 /* Accessed via RCU. */ 299 struct FlatView *current_map; 300 301 int ioeventfd_nb; 302 struct MemoryRegionIoeventfd *ioeventfds; 303 struct AddressSpaceDispatch *dispatch; 304 struct AddressSpaceDispatch *next_dispatch; 305 MemoryListener dispatch_listener; 306 QTAILQ_HEAD(memory_listeners_as, MemoryListener) listeners; 307 QTAILQ_ENTRY(AddressSpace) address_spaces_link; 308 }; 309 310 /** 311 * MemoryRegionSection: describes a fragment of a #MemoryRegion 312 * 313 * @mr: the region, or %NULL if empty 314 * @address_space: the address space the region is mapped in 315 * @offset_within_region: the beginning of the section, relative to @mr's start 316 * @size: the size of the section; will not exceed @mr's boundaries 317 * @offset_within_address_space: the address of the first byte of the section 318 * relative to the region's address space 319 * @readonly: writes to this section are ignored 320 */ 321 struct MemoryRegionSection { 322 MemoryRegion *mr; 323 AddressSpace *address_space; 324 hwaddr offset_within_region; 325 Int128 size; 326 hwaddr offset_within_address_space; 327 bool readonly; 328 }; 329 330 /** 331 * memory_region_init: Initialize a memory region 332 * 333 * The region typically acts as a container for other memory regions. Use 334 * memory_region_add_subregion() to add subregions. 335 * 336 * @mr: the #MemoryRegion to be initialized 337 * @owner: the object that tracks the region's reference count 338 * @name: used for debugging; not visible to the user or ABI 339 * @size: size of the region; any subregions beyond this size will be clipped 340 */ 341 void memory_region_init(MemoryRegion *mr, 342 struct Object *owner, 343 const char *name, 344 uint64_t size); 345 346 /** 347 * memory_region_ref: Add 1 to a memory region's reference count 348 * 349 * Whenever memory regions are accessed outside the BQL, they need to be 350 * preserved against hot-unplug. MemoryRegions actually do not have their 351 * own reference count; they piggyback on a QOM object, their "owner". 352 * This function adds a reference to the owner. 353 * 354 * All MemoryRegions must have an owner if they can disappear, even if the 355 * device they belong to operates exclusively under the BQL. This is because 356 * the region could be returned at any time by memory_region_find, and this 357 * is usually under guest control. 358 * 359 * @mr: the #MemoryRegion 360 */ 361 void memory_region_ref(MemoryRegion *mr); 362 363 /** 364 * memory_region_unref: Remove 1 to a memory region's reference count 365 * 366 * Whenever memory regions are accessed outside the BQL, they need to be 367 * preserved against hot-unplug. MemoryRegions actually do not have their 368 * own reference count; they piggyback on a QOM object, their "owner". 369 * This function removes a reference to the owner and possibly destroys it. 370 * 371 * @mr: the #MemoryRegion 372 */ 373 void memory_region_unref(MemoryRegion *mr); 374 375 /** 376 * memory_region_init_io: Initialize an I/O memory region. 377 * 378 * Accesses into the region will cause the callbacks in @ops to be called. 379 * if @size is nonzero, subregions will be clipped to @size. 380 * 381 * @mr: the #MemoryRegion to be initialized. 382 * @owner: the object that tracks the region's reference count 383 * @ops: a structure containing read and write callbacks to be used when 384 * I/O is performed on the region. 385 * @opaque: passed to the read and write callbacks of the @ops structure. 386 * @name: used for debugging; not visible to the user or ABI 387 * @size: size of the region. 388 */ 389 void memory_region_init_io(MemoryRegion *mr, 390 struct Object *owner, 391 const MemoryRegionOps *ops, 392 void *opaque, 393 const char *name, 394 uint64_t size); 395 396 /** 397 * memory_region_init_ram: Initialize RAM memory region. Accesses into the 398 * region will modify memory directly. 399 * 400 * @mr: the #MemoryRegion to be initialized. 401 * @owner: the object that tracks the region's reference count 402 * @name: Region name, becomes part of RAMBlock name used in migration stream 403 * must be unique within any device 404 * @size: size of the region. 405 * @errp: pointer to Error*, to store an error if it happens. 406 */ 407 void memory_region_init_ram(MemoryRegion *mr, 408 struct Object *owner, 409 const char *name, 410 uint64_t size, 411 Error **errp); 412 413 /** 414 * memory_region_init_resizeable_ram: Initialize memory region with resizeable 415 * RAM. Accesses into the region will 416 * modify memory directly. Only an initial 417 * portion of this RAM is actually used. 418 * The used size can change across reboots. 419 * 420 * @mr: the #MemoryRegion to be initialized. 421 * @owner: the object that tracks the region's reference count 422 * @name: Region name, becomes part of RAMBlock name used in migration stream 423 * must be unique within any device 424 * @size: used size of the region. 425 * @max_size: max size of the region. 426 * @resized: callback to notify owner about used size change. 427 * @errp: pointer to Error*, to store an error if it happens. 428 */ 429 void memory_region_init_resizeable_ram(MemoryRegion *mr, 430 struct Object *owner, 431 const char *name, 432 uint64_t size, 433 uint64_t max_size, 434 void (*resized)(const char*, 435 uint64_t length, 436 void *host), 437 Error **errp); 438 #ifdef __linux__ 439 /** 440 * memory_region_init_ram_from_file: Initialize RAM memory region with a 441 * mmap-ed backend. 442 * 443 * @mr: the #MemoryRegion to be initialized. 444 * @owner: the object that tracks the region's reference count 445 * @name: Region name, becomes part of RAMBlock name used in migration stream 446 * must be unique within any device 447 * @size: size of the region. 448 * @share: %true if memory must be mmaped with the MAP_SHARED flag 449 * @path: the path in which to allocate the RAM. 450 * @errp: pointer to Error*, to store an error if it happens. 451 */ 452 void memory_region_init_ram_from_file(MemoryRegion *mr, 453 struct Object *owner, 454 const char *name, 455 uint64_t size, 456 bool share, 457 const char *path, 458 Error **errp); 459 460 /** 461 * memory_region_init_ram_from_fd: Initialize RAM memory region with a 462 * mmap-ed backend. 463 * 464 * @mr: the #MemoryRegion to be initialized. 465 * @owner: the object that tracks the region's reference count 466 * @name: the name of the region. 467 * @size: size of the region. 468 * @share: %true if memory must be mmaped with the MAP_SHARED flag 469 * @fd: the fd to mmap. 470 * @errp: pointer to Error*, to store an error if it happens. 471 */ 472 void memory_region_init_ram_from_fd(MemoryRegion *mr, 473 struct Object *owner, 474 const char *name, 475 uint64_t size, 476 bool share, 477 int fd, 478 Error **errp); 479 #endif 480 481 /** 482 * memory_region_init_ram_ptr: Initialize RAM memory region from a 483 * user-provided pointer. Accesses into the 484 * region will modify memory directly. 485 * 486 * @mr: the #MemoryRegion to be initialized. 487 * @owner: the object that tracks the region's reference count 488 * @name: Region name, becomes part of RAMBlock name used in migration stream 489 * must be unique within any device 490 * @size: size of the region. 491 * @ptr: memory to be mapped; must contain at least @size bytes. 492 */ 493 void memory_region_init_ram_ptr(MemoryRegion *mr, 494 struct Object *owner, 495 const char *name, 496 uint64_t size, 497 void *ptr); 498 499 /** 500 * memory_region_init_ram_device_ptr: Initialize RAM device memory region from 501 * a user-provided pointer. 502 * 503 * A RAM device represents a mapping to a physical device, such as to a PCI 504 * MMIO BAR of an vfio-pci assigned device. The memory region may be mapped 505 * into the VM address space and access to the region will modify memory 506 * directly. However, the memory region should not be included in a memory 507 * dump (device may not be enabled/mapped at the time of the dump), and 508 * operations incompatible with manipulating MMIO should be avoided. Replaces 509 * skip_dump flag. 510 * 511 * @mr: the #MemoryRegion to be initialized. 512 * @owner: the object that tracks the region's reference count 513 * @name: the name of the region. 514 * @size: size of the region. 515 * @ptr: memory to be mapped; must contain at least @size bytes. 516 */ 517 void memory_region_init_ram_device_ptr(MemoryRegion *mr, 518 struct Object *owner, 519 const char *name, 520 uint64_t size, 521 void *ptr); 522 523 /** 524 * memory_region_init_alias: Initialize a memory region that aliases all or a 525 * part of another memory region. 526 * 527 * @mr: the #MemoryRegion to be initialized. 528 * @owner: the object that tracks the region's reference count 529 * @name: used for debugging; not visible to the user or ABI 530 * @orig: the region to be referenced; @mr will be equivalent to 531 * @orig between @offset and @offset + @size - 1. 532 * @offset: start of the section in @orig to be referenced. 533 * @size: size of the region. 534 */ 535 void memory_region_init_alias(MemoryRegion *mr, 536 struct Object *owner, 537 const char *name, 538 MemoryRegion *orig, 539 hwaddr offset, 540 uint64_t size); 541 542 /** 543 * memory_region_init_rom: Initialize a ROM memory region. 544 * 545 * This has the same effect as calling memory_region_init_ram() 546 * and then marking the resulting region read-only with 547 * memory_region_set_readonly(). 548 * 549 * @mr: the #MemoryRegion to be initialized. 550 * @owner: the object that tracks the region's reference count 551 * @name: Region name, becomes part of RAMBlock name used in migration stream 552 * must be unique within any device 553 * @size: size of the region. 554 * @errp: pointer to Error*, to store an error if it happens. 555 */ 556 void memory_region_init_rom(MemoryRegion *mr, 557 struct Object *owner, 558 const char *name, 559 uint64_t size, 560 Error **errp); 561 562 /** 563 * memory_region_init_rom_device: Initialize a ROM memory region. Writes are 564 * handled via callbacks. 565 * 566 * @mr: the #MemoryRegion to be initialized. 567 * @owner: the object that tracks the region's reference count 568 * @ops: callbacks for write access handling (must not be NULL). 569 * @name: Region name, becomes part of RAMBlock name used in migration stream 570 * must be unique within any device 571 * @size: size of the region. 572 * @errp: pointer to Error*, to store an error if it happens. 573 */ 574 void memory_region_init_rom_device(MemoryRegion *mr, 575 struct Object *owner, 576 const MemoryRegionOps *ops, 577 void *opaque, 578 const char *name, 579 uint64_t size, 580 Error **errp); 581 582 /** 583 * memory_region_init_reservation: Initialize a memory region that reserves 584 * I/O space. 585 * 586 * A reservation region primariy serves debugging purposes. It claims I/O 587 * space that is not supposed to be handled by QEMU itself. Any access via 588 * the memory API will cause an abort(). 589 * This function is deprecated. Use memory_region_init_io() with NULL 590 * callbacks instead. 591 * 592 * @mr: the #MemoryRegion to be initialized 593 * @owner: the object that tracks the region's reference count 594 * @name: used for debugging; not visible to the user or ABI 595 * @size: size of the region. 596 */ 597 static inline void memory_region_init_reservation(MemoryRegion *mr, 598 Object *owner, 599 const char *name, 600 uint64_t size) 601 { 602 memory_region_init_io(mr, owner, NULL, mr, name, size); 603 } 604 605 /** 606 * memory_region_init_iommu: Initialize a memory region that translates 607 * addresses 608 * 609 * An IOMMU region translates addresses and forwards accesses to a target 610 * memory region. 611 * 612 * @mr: the #MemoryRegion to be initialized 613 * @owner: the object that tracks the region's reference count 614 * @ops: a function that translates addresses into the @target region 615 * @name: used for debugging; not visible to the user or ABI 616 * @size: size of the region. 617 */ 618 void memory_region_init_iommu(MemoryRegion *mr, 619 struct Object *owner, 620 const MemoryRegionIOMMUOps *ops, 621 const char *name, 622 uint64_t size); 623 624 /** 625 * memory_region_owner: get a memory region's owner. 626 * 627 * @mr: the memory region being queried. 628 */ 629 struct Object *memory_region_owner(MemoryRegion *mr); 630 631 /** 632 * memory_region_size: get a memory region's size. 633 * 634 * @mr: the memory region being queried. 635 */ 636 uint64_t memory_region_size(MemoryRegion *mr); 637 638 /** 639 * memory_region_is_ram: check whether a memory region is random access 640 * 641 * Returns %true is a memory region is random access. 642 * 643 * @mr: the memory region being queried 644 */ 645 static inline bool memory_region_is_ram(MemoryRegion *mr) 646 { 647 return mr->ram; 648 } 649 650 /** 651 * memory_region_is_ram_device: check whether a memory region is a ram device 652 * 653 * Returns %true is a memory region is a device backed ram region 654 * 655 * @mr: the memory region being queried 656 */ 657 bool memory_region_is_ram_device(MemoryRegion *mr); 658 659 /** 660 * memory_region_is_romd: check whether a memory region is in ROMD mode 661 * 662 * Returns %true if a memory region is a ROM device and currently set to allow 663 * direct reads. 664 * 665 * @mr: the memory region being queried 666 */ 667 static inline bool memory_region_is_romd(MemoryRegion *mr) 668 { 669 return mr->rom_device && mr->romd_mode; 670 } 671 672 /** 673 * memory_region_is_iommu: check whether a memory region is an iommu 674 * 675 * Returns %true is a memory region is an iommu. 676 * 677 * @mr: the memory region being queried 678 */ 679 static inline bool memory_region_is_iommu(MemoryRegion *mr) 680 { 681 if (mr->alias) { 682 return memory_region_is_iommu(mr->alias); 683 } 684 return mr->iommu_ops; 685 } 686 687 688 /** 689 * memory_region_iommu_get_min_page_size: get minimum supported page size 690 * for an iommu 691 * 692 * Returns minimum supported page size for an iommu. 693 * 694 * @mr: the memory region being queried 695 */ 696 uint64_t memory_region_iommu_get_min_page_size(MemoryRegion *mr); 697 698 /** 699 * memory_region_notify_iommu: notify a change in an IOMMU translation entry. 700 * 701 * The notification type will be decided by entry.perm bits: 702 * 703 * - For UNMAP (cache invalidation) notifies: set entry.perm to IOMMU_NONE. 704 * - For MAP (newly added entry) notifies: set entry.perm to the 705 * permission of the page (which is definitely !IOMMU_NONE). 706 * 707 * Note: for any IOMMU implementation, an in-place mapping change 708 * should be notified with an UNMAP followed by a MAP. 709 * 710 * @mr: the memory region that was changed 711 * @entry: the new entry in the IOMMU translation table. The entry 712 * replaces all old entries for the same virtual I/O address range. 713 * Deleted entries have .@perm == 0. 714 */ 715 void memory_region_notify_iommu(MemoryRegion *mr, 716 IOMMUTLBEntry entry); 717 718 /** 719 * memory_region_notify_one: notify a change in an IOMMU translation 720 * entry to a single notifier 721 * 722 * This works just like memory_region_notify_iommu(), but it only 723 * notifies a specific notifier, not all of them. 724 * 725 * @notifier: the notifier to be notified 726 * @entry: the new entry in the IOMMU translation table. The entry 727 * replaces all old entries for the same virtual I/O address range. 728 * Deleted entries have .@perm == 0. 729 */ 730 void memory_region_notify_one(IOMMUNotifier *notifier, 731 IOMMUTLBEntry *entry); 732 733 /** 734 * memory_region_register_iommu_notifier: register a notifier for changes to 735 * IOMMU translation entries. 736 * 737 * @mr: the memory region to observe 738 * @n: the IOMMUNotifier to be added; the notify callback receives a 739 * pointer to an #IOMMUTLBEntry as the opaque value; the pointer 740 * ceases to be valid on exit from the notifier. 741 */ 742 void memory_region_register_iommu_notifier(MemoryRegion *mr, 743 IOMMUNotifier *n); 744 745 /** 746 * memory_region_iommu_replay: replay existing IOMMU translations to 747 * a notifier with the minimum page granularity returned by 748 * mr->iommu_ops->get_page_size(). 749 * 750 * @mr: the memory region to observe 751 * @n: the notifier to which to replay iommu mappings 752 */ 753 void memory_region_iommu_replay(MemoryRegion *mr, IOMMUNotifier *n); 754 755 /** 756 * memory_region_iommu_replay_all: replay existing IOMMU translations 757 * to all the notifiers registered. 758 * 759 * @mr: the memory region to observe 760 */ 761 void memory_region_iommu_replay_all(MemoryRegion *mr); 762 763 /** 764 * memory_region_unregister_iommu_notifier: unregister a notifier for 765 * changes to IOMMU translation entries. 766 * 767 * @mr: the memory region which was observed and for which notity_stopped() 768 * needs to be called 769 * @n: the notifier to be removed. 770 */ 771 void memory_region_unregister_iommu_notifier(MemoryRegion *mr, 772 IOMMUNotifier *n); 773 774 /** 775 * memory_region_name: get a memory region's name 776 * 777 * Returns the string that was used to initialize the memory region. 778 * 779 * @mr: the memory region being queried 780 */ 781 const char *memory_region_name(const MemoryRegion *mr); 782 783 /** 784 * memory_region_is_logging: return whether a memory region is logging writes 785 * 786 * Returns %true if the memory region is logging writes for the given client 787 * 788 * @mr: the memory region being queried 789 * @client: the client being queried 790 */ 791 bool memory_region_is_logging(MemoryRegion *mr, uint8_t client); 792 793 /** 794 * memory_region_get_dirty_log_mask: return the clients for which a 795 * memory region is logging writes. 796 * 797 * Returns a bitmap of clients, in which the DIRTY_MEMORY_* constants 798 * are the bit indices. 799 * 800 * @mr: the memory region being queried 801 */ 802 uint8_t memory_region_get_dirty_log_mask(MemoryRegion *mr); 803 804 /** 805 * memory_region_is_rom: check whether a memory region is ROM 806 * 807 * Returns %true is a memory region is read-only memory. 808 * 809 * @mr: the memory region being queried 810 */ 811 static inline bool memory_region_is_rom(MemoryRegion *mr) 812 { 813 return mr->ram && mr->readonly; 814 } 815 816 817 /** 818 * memory_region_get_fd: Get a file descriptor backing a RAM memory region. 819 * 820 * Returns a file descriptor backing a file-based RAM memory region, 821 * or -1 if the region is not a file-based RAM memory region. 822 * 823 * @mr: the RAM or alias memory region being queried. 824 */ 825 int memory_region_get_fd(MemoryRegion *mr); 826 827 /** 828 * memory_region_from_host: Convert a pointer into a RAM memory region 829 * and an offset within it. 830 * 831 * Given a host pointer inside a RAM memory region (created with 832 * memory_region_init_ram() or memory_region_init_ram_ptr()), return 833 * the MemoryRegion and the offset within it. 834 * 835 * Use with care; by the time this function returns, the returned pointer is 836 * not protected by RCU anymore. If the caller is not within an RCU critical 837 * section and does not hold the iothread lock, it must have other means of 838 * protecting the pointer, such as a reference to the region that includes 839 * the incoming ram_addr_t. 840 * 841 * @mr: the memory region being queried. 842 */ 843 MemoryRegion *memory_region_from_host(void *ptr, ram_addr_t *offset); 844 845 /** 846 * memory_region_get_ram_ptr: Get a pointer into a RAM memory region. 847 * 848 * Returns a host pointer to a RAM memory region (created with 849 * memory_region_init_ram() or memory_region_init_ram_ptr()). 850 * 851 * Use with care; by the time this function returns, the returned pointer is 852 * not protected by RCU anymore. If the caller is not within an RCU critical 853 * section and does not hold the iothread lock, it must have other means of 854 * protecting the pointer, such as a reference to the region that includes 855 * the incoming ram_addr_t. 856 * 857 * @mr: the memory region being queried. 858 */ 859 void *memory_region_get_ram_ptr(MemoryRegion *mr); 860 861 /* memory_region_ram_resize: Resize a RAM region. 862 * 863 * Only legal before guest might have detected the memory size: e.g. on 864 * incoming migration, or right after reset. 865 * 866 * @mr: a memory region created with @memory_region_init_resizeable_ram. 867 * @newsize: the new size the region 868 * @errp: pointer to Error*, to store an error if it happens. 869 */ 870 void memory_region_ram_resize(MemoryRegion *mr, ram_addr_t newsize, 871 Error **errp); 872 873 /** 874 * memory_region_set_log: Turn dirty logging on or off for a region. 875 * 876 * Turns dirty logging on or off for a specified client (display, migration). 877 * Only meaningful for RAM regions. 878 * 879 * @mr: the memory region being updated. 880 * @log: whether dirty logging is to be enabled or disabled. 881 * @client: the user of the logging information; %DIRTY_MEMORY_VGA only. 882 */ 883 void memory_region_set_log(MemoryRegion *mr, bool log, unsigned client); 884 885 /** 886 * memory_region_get_dirty: Check whether a range of bytes is dirty 887 * for a specified client. 888 * 889 * Checks whether a range of bytes has been written to since the last 890 * call to memory_region_reset_dirty() with the same @client. Dirty logging 891 * must be enabled. 892 * 893 * @mr: the memory region being queried. 894 * @addr: the address (relative to the start of the region) being queried. 895 * @size: the size of the range being queried. 896 * @client: the user of the logging information; %DIRTY_MEMORY_MIGRATION or 897 * %DIRTY_MEMORY_VGA. 898 */ 899 bool memory_region_get_dirty(MemoryRegion *mr, hwaddr addr, 900 hwaddr size, unsigned client); 901 902 /** 903 * memory_region_set_dirty: Mark a range of bytes as dirty in a memory region. 904 * 905 * Marks a range of bytes as dirty, after it has been dirtied outside 906 * guest code. 907 * 908 * @mr: the memory region being dirtied. 909 * @addr: the address (relative to the start of the region) being dirtied. 910 * @size: size of the range being dirtied. 911 */ 912 void memory_region_set_dirty(MemoryRegion *mr, hwaddr addr, 913 hwaddr size); 914 915 /** 916 * memory_region_test_and_clear_dirty: Check whether a range of bytes is dirty 917 * for a specified client. It clears them. 918 * 919 * Checks whether a range of bytes has been written to since the last 920 * call to memory_region_reset_dirty() with the same @client. Dirty logging 921 * must be enabled. 922 * 923 * @mr: the memory region being queried. 924 * @addr: the address (relative to the start of the region) being queried. 925 * @size: the size of the range being queried. 926 * @client: the user of the logging information; %DIRTY_MEMORY_MIGRATION or 927 * %DIRTY_MEMORY_VGA. 928 */ 929 bool memory_region_test_and_clear_dirty(MemoryRegion *mr, hwaddr addr, 930 hwaddr size, unsigned client); 931 932 /** 933 * memory_region_snapshot_and_clear_dirty: Get a snapshot of the dirty 934 * bitmap and clear it. 935 * 936 * Creates a snapshot of the dirty bitmap, clears the dirty bitmap and 937 * returns the snapshot. The snapshot can then be used to query dirty 938 * status, using memory_region_snapshot_get_dirty. Unlike 939 * memory_region_test_and_clear_dirty this allows to query the same 940 * page multiple times, which is especially useful for display updates 941 * where the scanlines often are not page aligned. 942 * 943 * The dirty bitmap region which gets copyed into the snapshot (and 944 * cleared afterwards) can be larger than requested. The boundaries 945 * are rounded up/down so complete bitmap longs (covering 64 pages on 946 * 64bit hosts) can be copied over into the bitmap snapshot. Which 947 * isn't a problem for display updates as the extra pages are outside 948 * the visible area, and in case the visible area changes a full 949 * display redraw is due anyway. Should other use cases for this 950 * function emerge we might have to revisit this implementation 951 * detail. 952 * 953 * Use g_free to release DirtyBitmapSnapshot. 954 * 955 * @mr: the memory region being queried. 956 * @addr: the address (relative to the start of the region) being queried. 957 * @size: the size of the range being queried. 958 * @client: the user of the logging information; typically %DIRTY_MEMORY_VGA. 959 */ 960 DirtyBitmapSnapshot *memory_region_snapshot_and_clear_dirty(MemoryRegion *mr, 961 hwaddr addr, 962 hwaddr size, 963 unsigned client); 964 965 /** 966 * memory_region_snapshot_get_dirty: Check whether a range of bytes is dirty 967 * in the specified dirty bitmap snapshot. 968 * 969 * @mr: the memory region being queried. 970 * @snap: the dirty bitmap snapshot 971 * @addr: the address (relative to the start of the region) being queried. 972 * @size: the size of the range being queried. 973 */ 974 bool memory_region_snapshot_get_dirty(MemoryRegion *mr, 975 DirtyBitmapSnapshot *snap, 976 hwaddr addr, hwaddr size); 977 978 /** 979 * memory_region_sync_dirty_bitmap: Synchronize a region's dirty bitmap with 980 * any external TLBs (e.g. kvm) 981 * 982 * Flushes dirty information from accelerators such as kvm and vhost-net 983 * and makes it available to users of the memory API. 984 * 985 * @mr: the region being flushed. 986 */ 987 void memory_region_sync_dirty_bitmap(MemoryRegion *mr); 988 989 /** 990 * memory_region_reset_dirty: Mark a range of pages as clean, for a specified 991 * client. 992 * 993 * Marks a range of pages as no longer dirty. 994 * 995 * @mr: the region being updated. 996 * @addr: the start of the subrange being cleaned. 997 * @size: the size of the subrange being cleaned. 998 * @client: the user of the logging information; %DIRTY_MEMORY_MIGRATION or 999 * %DIRTY_MEMORY_VGA. 1000 */ 1001 void memory_region_reset_dirty(MemoryRegion *mr, hwaddr addr, 1002 hwaddr size, unsigned client); 1003 1004 /** 1005 * memory_region_set_readonly: Turn a memory region read-only (or read-write) 1006 * 1007 * Allows a memory region to be marked as read-only (turning it into a ROM). 1008 * only useful on RAM regions. 1009 * 1010 * @mr: the region being updated. 1011 * @readonly: whether rhe region is to be ROM or RAM. 1012 */ 1013 void memory_region_set_readonly(MemoryRegion *mr, bool readonly); 1014 1015 /** 1016 * memory_region_rom_device_set_romd: enable/disable ROMD mode 1017 * 1018 * Allows a ROM device (initialized with memory_region_init_rom_device() to 1019 * set to ROMD mode (default) or MMIO mode. When it is in ROMD mode, the 1020 * device is mapped to guest memory and satisfies read access directly. 1021 * When in MMIO mode, reads are forwarded to the #MemoryRegion.read function. 1022 * Writes are always handled by the #MemoryRegion.write function. 1023 * 1024 * @mr: the memory region to be updated 1025 * @romd_mode: %true to put the region into ROMD mode 1026 */ 1027 void memory_region_rom_device_set_romd(MemoryRegion *mr, bool romd_mode); 1028 1029 /** 1030 * memory_region_set_coalescing: Enable memory coalescing for the region. 1031 * 1032 * Enabled writes to a region to be queued for later processing. MMIO ->write 1033 * callbacks may be delayed until a non-coalesced MMIO is issued. 1034 * Only useful for IO regions. Roughly similar to write-combining hardware. 1035 * 1036 * @mr: the memory region to be write coalesced 1037 */ 1038 void memory_region_set_coalescing(MemoryRegion *mr); 1039 1040 /** 1041 * memory_region_add_coalescing: Enable memory coalescing for a sub-range of 1042 * a region. 1043 * 1044 * Like memory_region_set_coalescing(), but works on a sub-range of a region. 1045 * Multiple calls can be issued coalesced disjoint ranges. 1046 * 1047 * @mr: the memory region to be updated. 1048 * @offset: the start of the range within the region to be coalesced. 1049 * @size: the size of the subrange to be coalesced. 1050 */ 1051 void memory_region_add_coalescing(MemoryRegion *mr, 1052 hwaddr offset, 1053 uint64_t size); 1054 1055 /** 1056 * memory_region_clear_coalescing: Disable MMIO coalescing for the region. 1057 * 1058 * Disables any coalescing caused by memory_region_set_coalescing() or 1059 * memory_region_add_coalescing(). Roughly equivalent to uncacheble memory 1060 * hardware. 1061 * 1062 * @mr: the memory region to be updated. 1063 */ 1064 void memory_region_clear_coalescing(MemoryRegion *mr); 1065 1066 /** 1067 * memory_region_set_flush_coalesced: Enforce memory coalescing flush before 1068 * accesses. 1069 * 1070 * Ensure that pending coalesced MMIO request are flushed before the memory 1071 * region is accessed. This property is automatically enabled for all regions 1072 * passed to memory_region_set_coalescing() and memory_region_add_coalescing(). 1073 * 1074 * @mr: the memory region to be updated. 1075 */ 1076 void memory_region_set_flush_coalesced(MemoryRegion *mr); 1077 1078 /** 1079 * memory_region_clear_flush_coalesced: Disable memory coalescing flush before 1080 * accesses. 1081 * 1082 * Clear the automatic coalesced MMIO flushing enabled via 1083 * memory_region_set_flush_coalesced. Note that this service has no effect on 1084 * memory regions that have MMIO coalescing enabled for themselves. For them, 1085 * automatic flushing will stop once coalescing is disabled. 1086 * 1087 * @mr: the memory region to be updated. 1088 */ 1089 void memory_region_clear_flush_coalesced(MemoryRegion *mr); 1090 1091 /** 1092 * memory_region_set_global_locking: Declares the access processing requires 1093 * QEMU's global lock. 1094 * 1095 * When this is invoked, accesses to the memory region will be processed while 1096 * holding the global lock of QEMU. This is the default behavior of memory 1097 * regions. 1098 * 1099 * @mr: the memory region to be updated. 1100 */ 1101 void memory_region_set_global_locking(MemoryRegion *mr); 1102 1103 /** 1104 * memory_region_clear_global_locking: Declares that access processing does 1105 * not depend on the QEMU global lock. 1106 * 1107 * By clearing this property, accesses to the memory region will be processed 1108 * outside of QEMU's global lock (unless the lock is held on when issuing the 1109 * access request). In this case, the device model implementing the access 1110 * handlers is responsible for synchronization of concurrency. 1111 * 1112 * @mr: the memory region to be updated. 1113 */ 1114 void memory_region_clear_global_locking(MemoryRegion *mr); 1115 1116 /** 1117 * memory_region_add_eventfd: Request an eventfd to be triggered when a word 1118 * is written to a location. 1119 * 1120 * Marks a word in an IO region (initialized with memory_region_init_io()) 1121 * as a trigger for an eventfd event. The I/O callback will not be called. 1122 * The caller must be prepared to handle failure (that is, take the required 1123 * action if the callback _is_ called). 1124 * 1125 * @mr: the memory region being updated. 1126 * @addr: the address within @mr that is to be monitored 1127 * @size: the size of the access to trigger the eventfd 1128 * @match_data: whether to match against @data, instead of just @addr 1129 * @data: the data to match against the guest write 1130 * @fd: the eventfd to be triggered when @addr, @size, and @data all match. 1131 **/ 1132 void memory_region_add_eventfd(MemoryRegion *mr, 1133 hwaddr addr, 1134 unsigned size, 1135 bool match_data, 1136 uint64_t data, 1137 EventNotifier *e); 1138 1139 /** 1140 * memory_region_del_eventfd: Cancel an eventfd. 1141 * 1142 * Cancels an eventfd trigger requested by a previous 1143 * memory_region_add_eventfd() call. 1144 * 1145 * @mr: the memory region being updated. 1146 * @addr: the address within @mr that is to be monitored 1147 * @size: the size of the access to trigger the eventfd 1148 * @match_data: whether to match against @data, instead of just @addr 1149 * @data: the data to match against the guest write 1150 * @fd: the eventfd to be triggered when @addr, @size, and @data all match. 1151 */ 1152 void memory_region_del_eventfd(MemoryRegion *mr, 1153 hwaddr addr, 1154 unsigned size, 1155 bool match_data, 1156 uint64_t data, 1157 EventNotifier *e); 1158 1159 /** 1160 * memory_region_add_subregion: Add a subregion to a container. 1161 * 1162 * Adds a subregion at @offset. The subregion may not overlap with other 1163 * subregions (except for those explicitly marked as overlapping). A region 1164 * may only be added once as a subregion (unless removed with 1165 * memory_region_del_subregion()); use memory_region_init_alias() if you 1166 * want a region to be a subregion in multiple locations. 1167 * 1168 * @mr: the region to contain the new subregion; must be a container 1169 * initialized with memory_region_init(). 1170 * @offset: the offset relative to @mr where @subregion is added. 1171 * @subregion: the subregion to be added. 1172 */ 1173 void memory_region_add_subregion(MemoryRegion *mr, 1174 hwaddr offset, 1175 MemoryRegion *subregion); 1176 /** 1177 * memory_region_add_subregion_overlap: Add a subregion to a container 1178 * with overlap. 1179 * 1180 * Adds a subregion at @offset. The subregion may overlap with other 1181 * subregions. Conflicts are resolved by having a higher @priority hide a 1182 * lower @priority. Subregions without priority are taken as @priority 0. 1183 * A region may only be added once as a subregion (unless removed with 1184 * memory_region_del_subregion()); use memory_region_init_alias() if you 1185 * want a region to be a subregion in multiple locations. 1186 * 1187 * @mr: the region to contain the new subregion; must be a container 1188 * initialized with memory_region_init(). 1189 * @offset: the offset relative to @mr where @subregion is added. 1190 * @subregion: the subregion to be added. 1191 * @priority: used for resolving overlaps; highest priority wins. 1192 */ 1193 void memory_region_add_subregion_overlap(MemoryRegion *mr, 1194 hwaddr offset, 1195 MemoryRegion *subregion, 1196 int priority); 1197 1198 /** 1199 * memory_region_get_ram_addr: Get the ram address associated with a memory 1200 * region 1201 */ 1202 ram_addr_t memory_region_get_ram_addr(MemoryRegion *mr); 1203 1204 uint64_t memory_region_get_alignment(const MemoryRegion *mr); 1205 /** 1206 * memory_region_del_subregion: Remove a subregion. 1207 * 1208 * Removes a subregion from its container. 1209 * 1210 * @mr: the container to be updated. 1211 * @subregion: the region being removed; must be a current subregion of @mr. 1212 */ 1213 void memory_region_del_subregion(MemoryRegion *mr, 1214 MemoryRegion *subregion); 1215 1216 /* 1217 * memory_region_set_enabled: dynamically enable or disable a region 1218 * 1219 * Enables or disables a memory region. A disabled memory region 1220 * ignores all accesses to itself and its subregions. It does not 1221 * obscure sibling subregions with lower priority - it simply behaves as 1222 * if it was removed from the hierarchy. 1223 * 1224 * Regions default to being enabled. 1225 * 1226 * @mr: the region to be updated 1227 * @enabled: whether to enable or disable the region 1228 */ 1229 void memory_region_set_enabled(MemoryRegion *mr, bool enabled); 1230 1231 /* 1232 * memory_region_set_address: dynamically update the address of a region 1233 * 1234 * Dynamically updates the address of a region, relative to its container. 1235 * May be used on regions are currently part of a memory hierarchy. 1236 * 1237 * @mr: the region to be updated 1238 * @addr: new address, relative to container region 1239 */ 1240 void memory_region_set_address(MemoryRegion *mr, hwaddr addr); 1241 1242 /* 1243 * memory_region_set_size: dynamically update the size of a region. 1244 * 1245 * Dynamically updates the size of a region. 1246 * 1247 * @mr: the region to be updated 1248 * @size: used size of the region. 1249 */ 1250 void memory_region_set_size(MemoryRegion *mr, uint64_t size); 1251 1252 /* 1253 * memory_region_set_alias_offset: dynamically update a memory alias's offset 1254 * 1255 * Dynamically updates the offset into the target region that an alias points 1256 * to, as if the fourth argument to memory_region_init_alias() has changed. 1257 * 1258 * @mr: the #MemoryRegion to be updated; should be an alias. 1259 * @offset: the new offset into the target memory region 1260 */ 1261 void memory_region_set_alias_offset(MemoryRegion *mr, 1262 hwaddr offset); 1263 1264 /** 1265 * memory_region_present: checks if an address relative to a @container 1266 * translates into #MemoryRegion within @container 1267 * 1268 * Answer whether a #MemoryRegion within @container covers the address 1269 * @addr. 1270 * 1271 * @container: a #MemoryRegion within which @addr is a relative address 1272 * @addr: the area within @container to be searched 1273 */ 1274 bool memory_region_present(MemoryRegion *container, hwaddr addr); 1275 1276 /** 1277 * memory_region_is_mapped: returns true if #MemoryRegion is mapped 1278 * into any address space. 1279 * 1280 * @mr: a #MemoryRegion which should be checked if it's mapped 1281 */ 1282 bool memory_region_is_mapped(MemoryRegion *mr); 1283 1284 /** 1285 * memory_region_find: translate an address/size relative to a 1286 * MemoryRegion into a #MemoryRegionSection. 1287 * 1288 * Locates the first #MemoryRegion within @mr that overlaps the range 1289 * given by @addr and @size. 1290 * 1291 * Returns a #MemoryRegionSection that describes a contiguous overlap. 1292 * It will have the following characteristics: 1293 * .@size = 0 iff no overlap was found 1294 * .@mr is non-%NULL iff an overlap was found 1295 * 1296 * Remember that in the return value the @offset_within_region is 1297 * relative to the returned region (in the .@mr field), not to the 1298 * @mr argument. 1299 * 1300 * Similarly, the .@offset_within_address_space is relative to the 1301 * address space that contains both regions, the passed and the 1302 * returned one. However, in the special case where the @mr argument 1303 * has no container (and thus is the root of the address space), the 1304 * following will hold: 1305 * .@offset_within_address_space >= @addr 1306 * .@offset_within_address_space + .@size <= @addr + @size 1307 * 1308 * @mr: a MemoryRegion within which @addr is a relative address 1309 * @addr: start of the area within @as to be searched 1310 * @size: size of the area to be searched 1311 */ 1312 MemoryRegionSection memory_region_find(MemoryRegion *mr, 1313 hwaddr addr, uint64_t size); 1314 1315 /** 1316 * memory_global_dirty_log_sync: synchronize the dirty log for all memory 1317 * 1318 * Synchronizes the dirty page log for all address spaces. 1319 */ 1320 void memory_global_dirty_log_sync(void); 1321 1322 /** 1323 * memory_region_transaction_begin: Start a transaction. 1324 * 1325 * During a transaction, changes will be accumulated and made visible 1326 * only when the transaction ends (is committed). 1327 */ 1328 void memory_region_transaction_begin(void); 1329 1330 /** 1331 * memory_region_transaction_commit: Commit a transaction and make changes 1332 * visible to the guest. 1333 */ 1334 void memory_region_transaction_commit(void); 1335 1336 /** 1337 * memory_listener_register: register callbacks to be called when memory 1338 * sections are mapped or unmapped into an address 1339 * space 1340 * 1341 * @listener: an object containing the callbacks to be called 1342 * @filter: if non-%NULL, only regions in this address space will be observed 1343 */ 1344 void memory_listener_register(MemoryListener *listener, AddressSpace *filter); 1345 1346 /** 1347 * memory_listener_unregister: undo the effect of memory_listener_register() 1348 * 1349 * @listener: an object containing the callbacks to be removed 1350 */ 1351 void memory_listener_unregister(MemoryListener *listener); 1352 1353 /** 1354 * memory_global_dirty_log_start: begin dirty logging for all regions 1355 */ 1356 void memory_global_dirty_log_start(void); 1357 1358 /** 1359 * memory_global_dirty_log_stop: end dirty logging for all regions 1360 */ 1361 void memory_global_dirty_log_stop(void); 1362 1363 void mtree_info(fprintf_function mon_printf, void *f, bool flatview); 1364 1365 /** 1366 * memory_region_dispatch_read: perform a read directly to the specified 1367 * MemoryRegion. 1368 * 1369 * @mr: #MemoryRegion to access 1370 * @addr: address within that region 1371 * @pval: pointer to uint64_t which the data is written to 1372 * @size: size of the access in bytes 1373 * @attrs: memory transaction attributes to use for the access 1374 */ 1375 MemTxResult memory_region_dispatch_read(MemoryRegion *mr, 1376 hwaddr addr, 1377 uint64_t *pval, 1378 unsigned size, 1379 MemTxAttrs attrs); 1380 /** 1381 * memory_region_dispatch_write: perform a write directly to the specified 1382 * MemoryRegion. 1383 * 1384 * @mr: #MemoryRegion to access 1385 * @addr: address within that region 1386 * @data: data to write 1387 * @size: size of the access in bytes 1388 * @attrs: memory transaction attributes to use for the access 1389 */ 1390 MemTxResult memory_region_dispatch_write(MemoryRegion *mr, 1391 hwaddr addr, 1392 uint64_t data, 1393 unsigned size, 1394 MemTxAttrs attrs); 1395 1396 /** 1397 * address_space_init: initializes an address space 1398 * 1399 * @as: an uninitialized #AddressSpace 1400 * @root: a #MemoryRegion that routes addresses for the address space 1401 * @name: an address space name. The name is only used for debugging 1402 * output. 1403 */ 1404 void address_space_init(AddressSpace *as, MemoryRegion *root, const char *name); 1405 1406 /** 1407 * address_space_init_shareable: return an address space for a memory region, 1408 * creating it if it does not already exist 1409 * 1410 * @root: a #MemoryRegion that routes addresses for the address space 1411 * @name: an address space name. The name is only used for debugging 1412 * output. 1413 * 1414 * This function will return a pointer to an existing AddressSpace 1415 * which was initialized with the specified MemoryRegion, or it will 1416 * create and initialize one if it does not already exist. The ASes 1417 * are reference-counted, so the memory will be freed automatically 1418 * when the AddressSpace is destroyed via address_space_destroy. 1419 */ 1420 AddressSpace *address_space_init_shareable(MemoryRegion *root, 1421 const char *name); 1422 1423 /** 1424 * address_space_destroy: destroy an address space 1425 * 1426 * Releases all resources associated with an address space. After an address space 1427 * is destroyed, its root memory region (given by address_space_init()) may be destroyed 1428 * as well. 1429 * 1430 * @as: address space to be destroyed 1431 */ 1432 void address_space_destroy(AddressSpace *as); 1433 1434 /** 1435 * address_space_rw: read from or write to an address space. 1436 * 1437 * Return a MemTxResult indicating whether the operation succeeded 1438 * or failed (eg unassigned memory, device rejected the transaction, 1439 * IOMMU fault). 1440 * 1441 * @as: #AddressSpace to be accessed 1442 * @addr: address within that address space 1443 * @attrs: memory transaction attributes 1444 * @buf: buffer with the data transferred 1445 * @is_write: indicates the transfer direction 1446 */ 1447 MemTxResult address_space_rw(AddressSpace *as, hwaddr addr, 1448 MemTxAttrs attrs, uint8_t *buf, 1449 int len, bool is_write); 1450 1451 /** 1452 * address_space_write: write to address space. 1453 * 1454 * Return a MemTxResult indicating whether the operation succeeded 1455 * or failed (eg unassigned memory, device rejected the transaction, 1456 * IOMMU fault). 1457 * 1458 * @as: #AddressSpace to be accessed 1459 * @addr: address within that address space 1460 * @attrs: memory transaction attributes 1461 * @buf: buffer with the data transferred 1462 */ 1463 MemTxResult address_space_write(AddressSpace *as, hwaddr addr, 1464 MemTxAttrs attrs, 1465 const uint8_t *buf, int len); 1466 1467 /* address_space_ld*: load from an address space 1468 * address_space_st*: store to an address space 1469 * 1470 * These functions perform a load or store of the byte, word, 1471 * longword or quad to the specified address within the AddressSpace. 1472 * The _le suffixed functions treat the data as little endian; 1473 * _be indicates big endian; no suffix indicates "same endianness 1474 * as guest CPU". 1475 * 1476 * The "guest CPU endianness" accessors are deprecated for use outside 1477 * target-* code; devices should be CPU-agnostic and use either the LE 1478 * or the BE accessors. 1479 * 1480 * @as #AddressSpace to be accessed 1481 * @addr: address within that address space 1482 * @val: data value, for stores 1483 * @attrs: memory transaction attributes 1484 * @result: location to write the success/failure of the transaction; 1485 * if NULL, this information is discarded 1486 */ 1487 uint32_t address_space_ldub(AddressSpace *as, hwaddr addr, 1488 MemTxAttrs attrs, MemTxResult *result); 1489 uint32_t address_space_lduw_le(AddressSpace *as, hwaddr addr, 1490 MemTxAttrs attrs, MemTxResult *result); 1491 uint32_t address_space_lduw_be(AddressSpace *as, hwaddr addr, 1492 MemTxAttrs attrs, MemTxResult *result); 1493 uint32_t address_space_ldl_le(AddressSpace *as, hwaddr addr, 1494 MemTxAttrs attrs, MemTxResult *result); 1495 uint32_t address_space_ldl_be(AddressSpace *as, hwaddr addr, 1496 MemTxAttrs attrs, MemTxResult *result); 1497 uint64_t address_space_ldq_le(AddressSpace *as, hwaddr addr, 1498 MemTxAttrs attrs, MemTxResult *result); 1499 uint64_t address_space_ldq_be(AddressSpace *as, hwaddr addr, 1500 MemTxAttrs attrs, MemTxResult *result); 1501 void address_space_stb(AddressSpace *as, hwaddr addr, uint32_t val, 1502 MemTxAttrs attrs, MemTxResult *result); 1503 void address_space_stw_le(AddressSpace *as, hwaddr addr, uint32_t val, 1504 MemTxAttrs attrs, MemTxResult *result); 1505 void address_space_stw_be(AddressSpace *as, hwaddr addr, uint32_t val, 1506 MemTxAttrs attrs, MemTxResult *result); 1507 void address_space_stl_le(AddressSpace *as, hwaddr addr, uint32_t val, 1508 MemTxAttrs attrs, MemTxResult *result); 1509 void address_space_stl_be(AddressSpace *as, hwaddr addr, uint32_t val, 1510 MemTxAttrs attrs, MemTxResult *result); 1511 void address_space_stq_le(AddressSpace *as, hwaddr addr, uint64_t val, 1512 MemTxAttrs attrs, MemTxResult *result); 1513 void address_space_stq_be(AddressSpace *as, hwaddr addr, uint64_t val, 1514 MemTxAttrs attrs, MemTxResult *result); 1515 1516 uint32_t ldub_phys(AddressSpace *as, hwaddr addr); 1517 uint32_t lduw_le_phys(AddressSpace *as, hwaddr addr); 1518 uint32_t lduw_be_phys(AddressSpace *as, hwaddr addr); 1519 uint32_t ldl_le_phys(AddressSpace *as, hwaddr addr); 1520 uint32_t ldl_be_phys(AddressSpace *as, hwaddr addr); 1521 uint64_t ldq_le_phys(AddressSpace *as, hwaddr addr); 1522 uint64_t ldq_be_phys(AddressSpace *as, hwaddr addr); 1523 void stb_phys(AddressSpace *as, hwaddr addr, uint32_t val); 1524 void stw_le_phys(AddressSpace *as, hwaddr addr, uint32_t val); 1525 void stw_be_phys(AddressSpace *as, hwaddr addr, uint32_t val); 1526 void stl_le_phys(AddressSpace *as, hwaddr addr, uint32_t val); 1527 void stl_be_phys(AddressSpace *as, hwaddr addr, uint32_t val); 1528 void stq_le_phys(AddressSpace *as, hwaddr addr, uint64_t val); 1529 void stq_be_phys(AddressSpace *as, hwaddr addr, uint64_t val); 1530 1531 struct MemoryRegionCache { 1532 hwaddr xlat; 1533 hwaddr len; 1534 AddressSpace *as; 1535 }; 1536 1537 #define MEMORY_REGION_CACHE_INVALID ((MemoryRegionCache) { .as = NULL }) 1538 1539 /* address_space_cache_init: prepare for repeated access to a physical 1540 * memory region 1541 * 1542 * @cache: #MemoryRegionCache to be filled 1543 * @as: #AddressSpace to be accessed 1544 * @addr: address within that address space 1545 * @len: length of buffer 1546 * @is_write: indicates the transfer direction 1547 * 1548 * Will only work with RAM, and may map a subset of the requested range by 1549 * returning a value that is less than @len. On failure, return a negative 1550 * errno value. 1551 * 1552 * Because it only works with RAM, this function can be used for 1553 * read-modify-write operations. In this case, is_write should be %true. 1554 * 1555 * Note that addresses passed to the address_space_*_cached functions 1556 * are relative to @addr. 1557 */ 1558 int64_t address_space_cache_init(MemoryRegionCache *cache, 1559 AddressSpace *as, 1560 hwaddr addr, 1561 hwaddr len, 1562 bool is_write); 1563 1564 /** 1565 * address_space_cache_invalidate: complete a write to a #MemoryRegionCache 1566 * 1567 * @cache: The #MemoryRegionCache to operate on. 1568 * @addr: The first physical address that was written, relative to the 1569 * address that was passed to @address_space_cache_init. 1570 * @access_len: The number of bytes that were written starting at @addr. 1571 */ 1572 void address_space_cache_invalidate(MemoryRegionCache *cache, 1573 hwaddr addr, 1574 hwaddr access_len); 1575 1576 /** 1577 * address_space_cache_destroy: free a #MemoryRegionCache 1578 * 1579 * @cache: The #MemoryRegionCache whose memory should be released. 1580 */ 1581 void address_space_cache_destroy(MemoryRegionCache *cache); 1582 1583 /* address_space_ld*_cached: load from a cached #MemoryRegion 1584 * address_space_st*_cached: store into a cached #MemoryRegion 1585 * 1586 * These functions perform a load or store of the byte, word, 1587 * longword or quad to the specified address. The address is 1588 * a physical address in the AddressSpace, but it must lie within 1589 * a #MemoryRegion that was mapped with address_space_cache_init. 1590 * 1591 * The _le suffixed functions treat the data as little endian; 1592 * _be indicates big endian; no suffix indicates "same endianness 1593 * as guest CPU". 1594 * 1595 * The "guest CPU endianness" accessors are deprecated for use outside 1596 * target-* code; devices should be CPU-agnostic and use either the LE 1597 * or the BE accessors. 1598 * 1599 * @cache: previously initialized #MemoryRegionCache to be accessed 1600 * @addr: address within the address space 1601 * @val: data value, for stores 1602 * @attrs: memory transaction attributes 1603 * @result: location to write the success/failure of the transaction; 1604 * if NULL, this information is discarded 1605 */ 1606 uint32_t address_space_ldub_cached(MemoryRegionCache *cache, hwaddr addr, 1607 MemTxAttrs attrs, MemTxResult *result); 1608 uint32_t address_space_lduw_le_cached(MemoryRegionCache *cache, hwaddr addr, 1609 MemTxAttrs attrs, MemTxResult *result); 1610 uint32_t address_space_lduw_be_cached(MemoryRegionCache *cache, hwaddr addr, 1611 MemTxAttrs attrs, MemTxResult *result); 1612 uint32_t address_space_ldl_le_cached(MemoryRegionCache *cache, hwaddr addr, 1613 MemTxAttrs attrs, MemTxResult *result); 1614 uint32_t address_space_ldl_be_cached(MemoryRegionCache *cache, hwaddr addr, 1615 MemTxAttrs attrs, MemTxResult *result); 1616 uint64_t address_space_ldq_le_cached(MemoryRegionCache *cache, hwaddr addr, 1617 MemTxAttrs attrs, MemTxResult *result); 1618 uint64_t address_space_ldq_be_cached(MemoryRegionCache *cache, hwaddr addr, 1619 MemTxAttrs attrs, MemTxResult *result); 1620 void address_space_stb_cached(MemoryRegionCache *cache, hwaddr addr, uint32_t val, 1621 MemTxAttrs attrs, MemTxResult *result); 1622 void address_space_stw_le_cached(MemoryRegionCache *cache, hwaddr addr, uint32_t val, 1623 MemTxAttrs attrs, MemTxResult *result); 1624 void address_space_stw_be_cached(MemoryRegionCache *cache, hwaddr addr, uint32_t val, 1625 MemTxAttrs attrs, MemTxResult *result); 1626 void address_space_stl_le_cached(MemoryRegionCache *cache, hwaddr addr, uint32_t val, 1627 MemTxAttrs attrs, MemTxResult *result); 1628 void address_space_stl_be_cached(MemoryRegionCache *cache, hwaddr addr, uint32_t val, 1629 MemTxAttrs attrs, MemTxResult *result); 1630 void address_space_stq_le_cached(MemoryRegionCache *cache, hwaddr addr, uint64_t val, 1631 MemTxAttrs attrs, MemTxResult *result); 1632 void address_space_stq_be_cached(MemoryRegionCache *cache, hwaddr addr, uint64_t val, 1633 MemTxAttrs attrs, MemTxResult *result); 1634 1635 uint32_t ldub_phys_cached(MemoryRegionCache *cache, hwaddr addr); 1636 uint32_t lduw_le_phys_cached(MemoryRegionCache *cache, hwaddr addr); 1637 uint32_t lduw_be_phys_cached(MemoryRegionCache *cache, hwaddr addr); 1638 uint32_t ldl_le_phys_cached(MemoryRegionCache *cache, hwaddr addr); 1639 uint32_t ldl_be_phys_cached(MemoryRegionCache *cache, hwaddr addr); 1640 uint64_t ldq_le_phys_cached(MemoryRegionCache *cache, hwaddr addr); 1641 uint64_t ldq_be_phys_cached(MemoryRegionCache *cache, hwaddr addr); 1642 void stb_phys_cached(MemoryRegionCache *cache, hwaddr addr, uint32_t val); 1643 void stw_le_phys_cached(MemoryRegionCache *cache, hwaddr addr, uint32_t val); 1644 void stw_be_phys_cached(MemoryRegionCache *cache, hwaddr addr, uint32_t val); 1645 void stl_le_phys_cached(MemoryRegionCache *cache, hwaddr addr, uint32_t val); 1646 void stl_be_phys_cached(MemoryRegionCache *cache, hwaddr addr, uint32_t val); 1647 void stq_le_phys_cached(MemoryRegionCache *cache, hwaddr addr, uint64_t val); 1648 void stq_be_phys_cached(MemoryRegionCache *cache, hwaddr addr, uint64_t val); 1649 /* address_space_get_iotlb_entry: translate an address into an IOTLB 1650 * entry. Should be called from an RCU critical section. 1651 */ 1652 IOMMUTLBEntry address_space_get_iotlb_entry(AddressSpace *as, hwaddr addr, 1653 bool is_write); 1654 1655 /* address_space_translate: translate an address range into an address space 1656 * into a MemoryRegion and an address range into that section. Should be 1657 * called from an RCU critical section, to avoid that the last reference 1658 * to the returned region disappears after address_space_translate returns. 1659 * 1660 * @as: #AddressSpace to be accessed 1661 * @addr: address within that address space 1662 * @xlat: pointer to address within the returned memory region section's 1663 * #MemoryRegion. 1664 * @len: pointer to length 1665 * @is_write: indicates the transfer direction 1666 */ 1667 MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr, 1668 hwaddr *xlat, hwaddr *len, 1669 bool is_write); 1670 1671 /* address_space_access_valid: check for validity of accessing an address 1672 * space range 1673 * 1674 * Check whether memory is assigned to the given address space range, and 1675 * access is permitted by any IOMMU regions that are active for the address 1676 * space. 1677 * 1678 * For now, addr and len should be aligned to a page size. This limitation 1679 * will be lifted in the future. 1680 * 1681 * @as: #AddressSpace to be accessed 1682 * @addr: address within that address space 1683 * @len: length of the area to be checked 1684 * @is_write: indicates the transfer direction 1685 */ 1686 bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write); 1687 1688 /* address_space_map: map a physical memory region into a host virtual address 1689 * 1690 * May map a subset of the requested range, given by and returned in @plen. 1691 * May return %NULL if resources needed to perform the mapping are exhausted. 1692 * Use only for reads OR writes - not for read-modify-write operations. 1693 * Use cpu_register_map_client() to know when retrying the map operation is 1694 * likely to succeed. 1695 * 1696 * @as: #AddressSpace to be accessed 1697 * @addr: address within that address space 1698 * @plen: pointer to length of buffer; updated on return 1699 * @is_write: indicates the transfer direction 1700 */ 1701 void *address_space_map(AddressSpace *as, hwaddr addr, 1702 hwaddr *plen, bool is_write); 1703 1704 /* address_space_unmap: Unmaps a memory region previously mapped by address_space_map() 1705 * 1706 * Will also mark the memory as dirty if @is_write == %true. @access_len gives 1707 * the amount of memory that was actually read or written by the caller. 1708 * 1709 * @as: #AddressSpace used 1710 * @addr: address within that address space 1711 * @len: buffer length as returned by address_space_map() 1712 * @access_len: amount of data actually transferred 1713 * @is_write: indicates the transfer direction 1714 */ 1715 void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len, 1716 int is_write, hwaddr access_len); 1717 1718 1719 /* Internal functions, part of the implementation of address_space_read. */ 1720 MemTxResult address_space_read_continue(AddressSpace *as, hwaddr addr, 1721 MemTxAttrs attrs, uint8_t *buf, 1722 int len, hwaddr addr1, hwaddr l, 1723 MemoryRegion *mr); 1724 MemTxResult address_space_read_full(AddressSpace *as, hwaddr addr, 1725 MemTxAttrs attrs, uint8_t *buf, int len); 1726 void *qemu_map_ram_ptr(RAMBlock *ram_block, ram_addr_t addr); 1727 1728 static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write) 1729 { 1730 if (is_write) { 1731 return memory_region_is_ram(mr) && 1732 !mr->readonly && !memory_region_is_ram_device(mr); 1733 } else { 1734 return (memory_region_is_ram(mr) && !memory_region_is_ram_device(mr)) || 1735 memory_region_is_romd(mr); 1736 } 1737 } 1738 1739 /** 1740 * address_space_read: read from an address space. 1741 * 1742 * Return a MemTxResult indicating whether the operation succeeded 1743 * or failed (eg unassigned memory, device rejected the transaction, 1744 * IOMMU fault). 1745 * 1746 * @as: #AddressSpace to be accessed 1747 * @addr: address within that address space 1748 * @attrs: memory transaction attributes 1749 * @buf: buffer with the data transferred 1750 */ 1751 static inline __attribute__((__always_inline__)) 1752 MemTxResult address_space_read(AddressSpace *as, hwaddr addr, MemTxAttrs attrs, 1753 uint8_t *buf, int len) 1754 { 1755 MemTxResult result = MEMTX_OK; 1756 hwaddr l, addr1; 1757 void *ptr; 1758 MemoryRegion *mr; 1759 1760 if (__builtin_constant_p(len)) { 1761 if (len) { 1762 rcu_read_lock(); 1763 l = len; 1764 mr = address_space_translate(as, addr, &addr1, &l, false); 1765 if (len == l && memory_access_is_direct(mr, false)) { 1766 ptr = qemu_map_ram_ptr(mr->ram_block, addr1); 1767 memcpy(buf, ptr, len); 1768 } else { 1769 result = address_space_read_continue(as, addr, attrs, buf, len, 1770 addr1, l, mr); 1771 } 1772 rcu_read_unlock(); 1773 } 1774 } else { 1775 result = address_space_read_full(as, addr, attrs, buf, len); 1776 } 1777 return result; 1778 } 1779 1780 /** 1781 * address_space_read_cached: read from a cached RAM region 1782 * 1783 * @cache: Cached region to be addressed 1784 * @addr: address relative to the base of the RAM region 1785 * @buf: buffer with the data transferred 1786 * @len: length of the data transferred 1787 */ 1788 static inline void 1789 address_space_read_cached(MemoryRegionCache *cache, hwaddr addr, 1790 void *buf, int len) 1791 { 1792 assert(addr < cache->len && len <= cache->len - addr); 1793 address_space_read(cache->as, cache->xlat + addr, MEMTXATTRS_UNSPECIFIED, buf, len); 1794 } 1795 1796 /** 1797 * address_space_write_cached: write to a cached RAM region 1798 * 1799 * @cache: Cached region to be addressed 1800 * @addr: address relative to the base of the RAM region 1801 * @buf: buffer with the data transferred 1802 * @len: length of the data transferred 1803 */ 1804 static inline void 1805 address_space_write_cached(MemoryRegionCache *cache, hwaddr addr, 1806 void *buf, int len) 1807 { 1808 assert(addr < cache->len && len <= cache->len - addr); 1809 address_space_write(cache->as, cache->xlat + addr, MEMTXATTRS_UNSPECIFIED, buf, len); 1810 } 1811 1812 #endif 1813 1814 #endif 1815