1 /* 2 * Physical memory management API 3 * 4 * Copyright 2011 Red Hat, Inc. and/or its affiliates 5 * 6 * Authors: 7 * Avi Kivity <avi@redhat.com> 8 * 9 * This work is licensed under the terms of the GNU GPL, version 2. See 10 * the COPYING file in the top-level directory. 11 * 12 */ 13 14 #ifndef MEMORY_H 15 #define MEMORY_H 16 17 #ifndef CONFIG_USER_ONLY 18 19 #include "exec/cpu-common.h" 20 #include "exec/hwaddr.h" 21 #include "exec/memattrs.h" 22 #include "exec/ramlist.h" 23 #include "qemu/queue.h" 24 #include "qemu/int128.h" 25 #include "qemu/notify.h" 26 #include "qom/object.h" 27 #include "qemu/rcu.h" 28 #include "hw/qdev-core.h" 29 30 #define RAM_ADDR_INVALID (~(ram_addr_t)0) 31 32 #define MAX_PHYS_ADDR_SPACE_BITS 62 33 #define MAX_PHYS_ADDR (((hwaddr)1 << MAX_PHYS_ADDR_SPACE_BITS) - 1) 34 35 #define TYPE_MEMORY_REGION "qemu:memory-region" 36 #define MEMORY_REGION(obj) \ 37 OBJECT_CHECK(MemoryRegion, (obj), TYPE_MEMORY_REGION) 38 39 #define TYPE_IOMMU_MEMORY_REGION "qemu:iommu-memory-region" 40 #define IOMMU_MEMORY_REGION(obj) \ 41 OBJECT_CHECK(IOMMUMemoryRegion, (obj), TYPE_IOMMU_MEMORY_REGION) 42 #define IOMMU_MEMORY_REGION_CLASS(klass) \ 43 OBJECT_CLASS_CHECK(IOMMUMemoryRegionClass, (klass), \ 44 TYPE_IOMMU_MEMORY_REGION) 45 #define IOMMU_MEMORY_REGION_GET_CLASS(obj) \ 46 OBJECT_GET_CLASS(IOMMUMemoryRegionClass, (obj), \ 47 TYPE_IOMMU_MEMORY_REGION) 48 49 typedef struct MemoryRegionOps MemoryRegionOps; 50 typedef struct MemoryRegionMmio MemoryRegionMmio; 51 52 struct MemoryRegionMmio { 53 CPUReadMemoryFunc *read[3]; 54 CPUWriteMemoryFunc *write[3]; 55 }; 56 57 typedef struct IOMMUTLBEntry IOMMUTLBEntry; 58 59 /* See address_space_translate: bit 0 is read, bit 1 is write. */ 60 typedef enum { 61 IOMMU_NONE = 0, 62 IOMMU_RO = 1, 63 IOMMU_WO = 2, 64 IOMMU_RW = 3, 65 } IOMMUAccessFlags; 66 67 #define IOMMU_ACCESS_FLAG(r, w) (((r) ? IOMMU_RO : 0) | ((w) ? IOMMU_WO : 0)) 68 69 struct IOMMUTLBEntry { 70 AddressSpace *target_as; 71 hwaddr iova; 72 hwaddr translated_addr; 73 hwaddr addr_mask; /* 0xfff = 4k translation */ 74 IOMMUAccessFlags perm; 75 }; 76 77 /* 78 * Bitmap for different IOMMUNotifier capabilities. Each notifier can 79 * register with one or multiple IOMMU Notifier capability bit(s). 80 */ 81 typedef enum { 82 IOMMU_NOTIFIER_NONE = 0, 83 /* Notify cache invalidations */ 84 IOMMU_NOTIFIER_UNMAP = 0x1, 85 /* Notify entry changes (newly created entries) */ 86 IOMMU_NOTIFIER_MAP = 0x2, 87 } IOMMUNotifierFlag; 88 89 #define IOMMU_NOTIFIER_ALL (IOMMU_NOTIFIER_MAP | IOMMU_NOTIFIER_UNMAP) 90 91 struct IOMMUNotifier; 92 typedef void (*IOMMUNotify)(struct IOMMUNotifier *notifier, 93 IOMMUTLBEntry *data); 94 95 struct IOMMUNotifier { 96 IOMMUNotify notify; 97 IOMMUNotifierFlag notifier_flags; 98 /* Notify for address space range start <= addr <= end */ 99 hwaddr start; 100 hwaddr end; 101 QLIST_ENTRY(IOMMUNotifier) node; 102 }; 103 typedef struct IOMMUNotifier IOMMUNotifier; 104 105 static inline void iommu_notifier_init(IOMMUNotifier *n, IOMMUNotify fn, 106 IOMMUNotifierFlag flags, 107 hwaddr start, hwaddr end) 108 { 109 n->notify = fn; 110 n->notifier_flags = flags; 111 n->start = start; 112 n->end = end; 113 } 114 115 /* 116 * Memory region callbacks 117 */ 118 struct MemoryRegionOps { 119 /* Read from the memory region. @addr is relative to @mr; @size is 120 * in bytes. */ 121 uint64_t (*read)(void *opaque, 122 hwaddr addr, 123 unsigned size); 124 /* Write to the memory region. @addr is relative to @mr; @size is 125 * in bytes. */ 126 void (*write)(void *opaque, 127 hwaddr addr, 128 uint64_t data, 129 unsigned size); 130 131 MemTxResult (*read_with_attrs)(void *opaque, 132 hwaddr addr, 133 uint64_t *data, 134 unsigned size, 135 MemTxAttrs attrs); 136 MemTxResult (*write_with_attrs)(void *opaque, 137 hwaddr addr, 138 uint64_t data, 139 unsigned size, 140 MemTxAttrs attrs); 141 /* Instruction execution pre-callback: 142 * @addr is the address of the access relative to the @mr. 143 * @size is the size of the area returned by the callback. 144 * @offset is the location of the pointer inside @mr. 145 * 146 * Returns a pointer to a location which contains guest code. 147 */ 148 void *(*request_ptr)(void *opaque, hwaddr addr, unsigned *size, 149 unsigned *offset); 150 151 enum device_endian endianness; 152 /* Guest-visible constraints: */ 153 struct { 154 /* If nonzero, specify bounds on access sizes beyond which a machine 155 * check is thrown. 156 */ 157 unsigned min_access_size; 158 unsigned max_access_size; 159 /* If true, unaligned accesses are supported. Otherwise unaligned 160 * accesses throw machine checks. 161 */ 162 bool unaligned; 163 /* 164 * If present, and returns #false, the transaction is not accepted 165 * by the device (and results in machine dependent behaviour such 166 * as a machine check exception). 167 */ 168 bool (*accepts)(void *opaque, hwaddr addr, 169 unsigned size, bool is_write); 170 } valid; 171 /* Internal implementation constraints: */ 172 struct { 173 /* If nonzero, specifies the minimum size implemented. Smaller sizes 174 * will be rounded upwards and a partial result will be returned. 175 */ 176 unsigned min_access_size; 177 /* If nonzero, specifies the maximum size implemented. Larger sizes 178 * will be done as a series of accesses with smaller sizes. 179 */ 180 unsigned max_access_size; 181 /* If true, unaligned accesses are supported. Otherwise all accesses 182 * are converted to (possibly multiple) naturally aligned accesses. 183 */ 184 bool unaligned; 185 } impl; 186 187 /* If .read and .write are not present, old_mmio may be used for 188 * backwards compatibility with old mmio registration 189 */ 190 const MemoryRegionMmio old_mmio; 191 }; 192 193 enum IOMMUMemoryRegionAttr { 194 IOMMU_ATTR_SPAPR_TCE_FD 195 }; 196 197 typedef struct IOMMUMemoryRegionClass { 198 /* private */ 199 struct DeviceClass parent_class; 200 201 /* 202 * Return a TLB entry that contains a given address. Flag should 203 * be the access permission of this translation operation. We can 204 * set flag to IOMMU_NONE to mean that we don't need any 205 * read/write permission checks, like, when for region replay. 206 */ 207 IOMMUTLBEntry (*translate)(IOMMUMemoryRegion *iommu, hwaddr addr, 208 IOMMUAccessFlags flag); 209 /* Returns minimum supported page size */ 210 uint64_t (*get_min_page_size)(IOMMUMemoryRegion *iommu); 211 /* Called when IOMMU Notifier flag changed */ 212 void (*notify_flag_changed)(IOMMUMemoryRegion *iommu, 213 IOMMUNotifierFlag old_flags, 214 IOMMUNotifierFlag new_flags); 215 /* Set this up to provide customized IOMMU replay function */ 216 void (*replay)(IOMMUMemoryRegion *iommu, IOMMUNotifier *notifier); 217 218 /* Get IOMMU misc attributes */ 219 int (*get_attr)(IOMMUMemoryRegion *iommu, enum IOMMUMemoryRegionAttr, 220 void *data); 221 } IOMMUMemoryRegionClass; 222 223 typedef struct CoalescedMemoryRange CoalescedMemoryRange; 224 typedef struct MemoryRegionIoeventfd MemoryRegionIoeventfd; 225 226 struct MemoryRegion { 227 Object parent_obj; 228 229 /* All fields are private - violators will be prosecuted */ 230 231 /* The following fields should fit in a cache line */ 232 bool romd_mode; 233 bool ram; 234 bool subpage; 235 bool readonly; /* For RAM regions */ 236 bool rom_device; 237 bool flush_coalesced_mmio; 238 bool global_locking; 239 uint8_t dirty_log_mask; 240 bool is_iommu; 241 RAMBlock *ram_block; 242 Object *owner; 243 244 const MemoryRegionOps *ops; 245 void *opaque; 246 MemoryRegion *container; 247 Int128 size; 248 hwaddr addr; 249 void (*destructor)(MemoryRegion *mr); 250 uint64_t align; 251 bool terminates; 252 bool ram_device; 253 bool enabled; 254 bool warning_printed; /* For reservations */ 255 uint8_t vga_logging_count; 256 MemoryRegion *alias; 257 hwaddr alias_offset; 258 int32_t priority; 259 QTAILQ_HEAD(subregions, MemoryRegion) subregions; 260 QTAILQ_ENTRY(MemoryRegion) subregions_link; 261 QTAILQ_HEAD(coalesced_ranges, CoalescedMemoryRange) coalesced; 262 const char *name; 263 unsigned ioeventfd_nb; 264 MemoryRegionIoeventfd *ioeventfds; 265 }; 266 267 struct IOMMUMemoryRegion { 268 MemoryRegion parent_obj; 269 270 QLIST_HEAD(, IOMMUNotifier) iommu_notify; 271 IOMMUNotifierFlag iommu_notify_flags; 272 }; 273 274 #define IOMMU_NOTIFIER_FOREACH(n, mr) \ 275 QLIST_FOREACH((n), &(mr)->iommu_notify, node) 276 277 /** 278 * MemoryListener: callbacks structure for updates to the physical memory map 279 * 280 * Allows a component to adjust to changes in the guest-visible memory map. 281 * Use with memory_listener_register() and memory_listener_unregister(). 282 */ 283 struct MemoryListener { 284 void (*begin)(MemoryListener *listener); 285 void (*commit)(MemoryListener *listener); 286 void (*region_add)(MemoryListener *listener, MemoryRegionSection *section); 287 void (*region_del)(MemoryListener *listener, MemoryRegionSection *section); 288 void (*region_nop)(MemoryListener *listener, MemoryRegionSection *section); 289 void (*log_start)(MemoryListener *listener, MemoryRegionSection *section, 290 int old, int new); 291 void (*log_stop)(MemoryListener *listener, MemoryRegionSection *section, 292 int old, int new); 293 void (*log_sync)(MemoryListener *listener, MemoryRegionSection *section); 294 void (*log_global_start)(MemoryListener *listener); 295 void (*log_global_stop)(MemoryListener *listener); 296 void (*eventfd_add)(MemoryListener *listener, MemoryRegionSection *section, 297 bool match_data, uint64_t data, EventNotifier *e); 298 void (*eventfd_del)(MemoryListener *listener, MemoryRegionSection *section, 299 bool match_data, uint64_t data, EventNotifier *e); 300 void (*coalesced_mmio_add)(MemoryListener *listener, MemoryRegionSection *section, 301 hwaddr addr, hwaddr len); 302 void (*coalesced_mmio_del)(MemoryListener *listener, MemoryRegionSection *section, 303 hwaddr addr, hwaddr len); 304 /* Lower = earlier (during add), later (during del) */ 305 unsigned priority; 306 AddressSpace *address_space; 307 QTAILQ_ENTRY(MemoryListener) link; 308 QTAILQ_ENTRY(MemoryListener) link_as; 309 }; 310 311 /** 312 * AddressSpace: describes a mapping of addresses to #MemoryRegion objects 313 */ 314 struct AddressSpace { 315 /* All fields are private. */ 316 struct rcu_head rcu; 317 char *name; 318 MemoryRegion *root; 319 320 /* Accessed via RCU. */ 321 struct FlatView *current_map; 322 323 int ioeventfd_nb; 324 struct MemoryRegionIoeventfd *ioeventfds; 325 QTAILQ_HEAD(memory_listeners_as, MemoryListener) listeners; 326 QTAILQ_ENTRY(AddressSpace) address_spaces_link; 327 }; 328 329 typedef struct AddressSpaceDispatch AddressSpaceDispatch; 330 typedef struct FlatRange FlatRange; 331 332 /* Flattened global view of current active memory hierarchy. Kept in sorted 333 * order. 334 */ 335 struct FlatView { 336 struct rcu_head rcu; 337 unsigned ref; 338 FlatRange *ranges; 339 unsigned nr; 340 unsigned nr_allocated; 341 struct AddressSpaceDispatch *dispatch; 342 MemoryRegion *root; 343 }; 344 345 static inline FlatView *address_space_to_flatview(AddressSpace *as) 346 { 347 return atomic_rcu_read(&as->current_map); 348 } 349 350 351 /** 352 * MemoryRegionSection: describes a fragment of a #MemoryRegion 353 * 354 * @mr: the region, or %NULL if empty 355 * @fv: the flat view of the address space the region is mapped in 356 * @offset_within_region: the beginning of the section, relative to @mr's start 357 * @size: the size of the section; will not exceed @mr's boundaries 358 * @offset_within_address_space: the address of the first byte of the section 359 * relative to the region's address space 360 * @readonly: writes to this section are ignored 361 */ 362 struct MemoryRegionSection { 363 MemoryRegion *mr; 364 FlatView *fv; 365 hwaddr offset_within_region; 366 Int128 size; 367 hwaddr offset_within_address_space; 368 bool readonly; 369 }; 370 371 /** 372 * memory_region_init: Initialize a memory region 373 * 374 * The region typically acts as a container for other memory regions. Use 375 * memory_region_add_subregion() to add subregions. 376 * 377 * @mr: the #MemoryRegion to be initialized 378 * @owner: the object that tracks the region's reference count 379 * @name: used for debugging; not visible to the user or ABI 380 * @size: size of the region; any subregions beyond this size will be clipped 381 */ 382 void memory_region_init(MemoryRegion *mr, 383 struct Object *owner, 384 const char *name, 385 uint64_t size); 386 387 /** 388 * memory_region_ref: Add 1 to a memory region's reference count 389 * 390 * Whenever memory regions are accessed outside the BQL, they need to be 391 * preserved against hot-unplug. MemoryRegions actually do not have their 392 * own reference count; they piggyback on a QOM object, their "owner". 393 * This function adds a reference to the owner. 394 * 395 * All MemoryRegions must have an owner if they can disappear, even if the 396 * device they belong to operates exclusively under the BQL. This is because 397 * the region could be returned at any time by memory_region_find, and this 398 * is usually under guest control. 399 * 400 * @mr: the #MemoryRegion 401 */ 402 void memory_region_ref(MemoryRegion *mr); 403 404 /** 405 * memory_region_unref: Remove 1 to a memory region's reference count 406 * 407 * Whenever memory regions are accessed outside the BQL, they need to be 408 * preserved against hot-unplug. MemoryRegions actually do not have their 409 * own reference count; they piggyback on a QOM object, their "owner". 410 * This function removes a reference to the owner and possibly destroys it. 411 * 412 * @mr: the #MemoryRegion 413 */ 414 void memory_region_unref(MemoryRegion *mr); 415 416 /** 417 * memory_region_init_io: Initialize an I/O memory region. 418 * 419 * Accesses into the region will cause the callbacks in @ops to be called. 420 * if @size is nonzero, subregions will be clipped to @size. 421 * 422 * @mr: the #MemoryRegion to be initialized. 423 * @owner: the object that tracks the region's reference count 424 * @ops: a structure containing read and write callbacks to be used when 425 * I/O is performed on the region. 426 * @opaque: passed to the read and write callbacks of the @ops structure. 427 * @name: used for debugging; not visible to the user or ABI 428 * @size: size of the region. 429 */ 430 void memory_region_init_io(MemoryRegion *mr, 431 struct Object *owner, 432 const MemoryRegionOps *ops, 433 void *opaque, 434 const char *name, 435 uint64_t size); 436 437 /** 438 * memory_region_init_ram_nomigrate: Initialize RAM memory region. Accesses 439 * into the region will modify memory 440 * directly. 441 * 442 * @mr: the #MemoryRegion to be initialized. 443 * @owner: the object that tracks the region's reference count 444 * @name: Region name, becomes part of RAMBlock name used in migration stream 445 * must be unique within any device 446 * @size: size of the region. 447 * @errp: pointer to Error*, to store an error if it happens. 448 * 449 * Note that this function does not do anything to cause the data in the 450 * RAM memory region to be migrated; that is the responsibility of the caller. 451 */ 452 void memory_region_init_ram_nomigrate(MemoryRegion *mr, 453 struct Object *owner, 454 const char *name, 455 uint64_t size, 456 Error **errp); 457 458 /** 459 * memory_region_init_ram_shared_nomigrate: Initialize RAM memory region. 460 * Accesses into the region will 461 * modify memory directly. 462 * 463 * @mr: the #MemoryRegion to be initialized. 464 * @owner: the object that tracks the region's reference count 465 * @name: Region name, becomes part of RAMBlock name used in migration stream 466 * must be unique within any device 467 * @size: size of the region. 468 * @share: allow remapping RAM to different addresses 469 * @errp: pointer to Error*, to store an error if it happens. 470 * 471 * Note that this function is similar to memory_region_init_ram_nomigrate. 472 * The only difference is part of the RAM region can be remapped. 473 */ 474 void memory_region_init_ram_shared_nomigrate(MemoryRegion *mr, 475 struct Object *owner, 476 const char *name, 477 uint64_t size, 478 bool share, 479 Error **errp); 480 481 /** 482 * memory_region_init_resizeable_ram: Initialize memory region with resizeable 483 * RAM. Accesses into the region will 484 * modify memory directly. Only an initial 485 * portion of this RAM is actually used. 486 * The used size can change across reboots. 487 * 488 * @mr: the #MemoryRegion to be initialized. 489 * @owner: the object that tracks the region's reference count 490 * @name: Region name, becomes part of RAMBlock name used in migration stream 491 * must be unique within any device 492 * @size: used size of the region. 493 * @max_size: max size of the region. 494 * @resized: callback to notify owner about used size change. 495 * @errp: pointer to Error*, to store an error if it happens. 496 * 497 * Note that this function does not do anything to cause the data in the 498 * RAM memory region to be migrated; that is the responsibility of the caller. 499 */ 500 void memory_region_init_resizeable_ram(MemoryRegion *mr, 501 struct Object *owner, 502 const char *name, 503 uint64_t size, 504 uint64_t max_size, 505 void (*resized)(const char*, 506 uint64_t length, 507 void *host), 508 Error **errp); 509 #ifdef __linux__ 510 /** 511 * memory_region_init_ram_from_file: Initialize RAM memory region with a 512 * mmap-ed backend. 513 * 514 * @mr: the #MemoryRegion to be initialized. 515 * @owner: the object that tracks the region's reference count 516 * @name: Region name, becomes part of RAMBlock name used in migration stream 517 * must be unique within any device 518 * @size: size of the region. 519 * @align: alignment of the region base address; if 0, the default alignment 520 * (getpagesize()) will be used. 521 * @share: %true if memory must be mmaped with the MAP_SHARED flag 522 * @path: the path in which to allocate the RAM. 523 * @errp: pointer to Error*, to store an error if it happens. 524 * 525 * Note that this function does not do anything to cause the data in the 526 * RAM memory region to be migrated; that is the responsibility of the caller. 527 */ 528 void memory_region_init_ram_from_file(MemoryRegion *mr, 529 struct Object *owner, 530 const char *name, 531 uint64_t size, 532 uint64_t align, 533 bool share, 534 const char *path, 535 Error **errp); 536 537 /** 538 * memory_region_init_ram_from_fd: Initialize RAM memory region with a 539 * mmap-ed backend. 540 * 541 * @mr: the #MemoryRegion to be initialized. 542 * @owner: the object that tracks the region's reference count 543 * @name: the name of the region. 544 * @size: size of the region. 545 * @share: %true if memory must be mmaped with the MAP_SHARED flag 546 * @fd: the fd to mmap. 547 * @errp: pointer to Error*, to store an error if it happens. 548 * 549 * Note that this function does not do anything to cause the data in the 550 * RAM memory region to be migrated; that is the responsibility of the caller. 551 */ 552 void memory_region_init_ram_from_fd(MemoryRegion *mr, 553 struct Object *owner, 554 const char *name, 555 uint64_t size, 556 bool share, 557 int fd, 558 Error **errp); 559 #endif 560 561 /** 562 * memory_region_init_ram_ptr: Initialize RAM memory region from a 563 * user-provided pointer. Accesses into the 564 * region will modify memory directly. 565 * 566 * @mr: the #MemoryRegion to be initialized. 567 * @owner: the object that tracks the region's reference count 568 * @name: Region name, becomes part of RAMBlock name used in migration stream 569 * must be unique within any device 570 * @size: size of the region. 571 * @ptr: memory to be mapped; must contain at least @size bytes. 572 * 573 * Note that this function does not do anything to cause the data in the 574 * RAM memory region to be migrated; that is the responsibility of the caller. 575 */ 576 void memory_region_init_ram_ptr(MemoryRegion *mr, 577 struct Object *owner, 578 const char *name, 579 uint64_t size, 580 void *ptr); 581 582 /** 583 * memory_region_init_ram_device_ptr: Initialize RAM device memory region from 584 * a user-provided pointer. 585 * 586 * A RAM device represents a mapping to a physical device, such as to a PCI 587 * MMIO BAR of an vfio-pci assigned device. The memory region may be mapped 588 * into the VM address space and access to the region will modify memory 589 * directly. However, the memory region should not be included in a memory 590 * dump (device may not be enabled/mapped at the time of the dump), and 591 * operations incompatible with manipulating MMIO should be avoided. Replaces 592 * skip_dump flag. 593 * 594 * @mr: the #MemoryRegion to be initialized. 595 * @owner: the object that tracks the region's reference count 596 * @name: the name of the region. 597 * @size: size of the region. 598 * @ptr: memory to be mapped; must contain at least @size bytes. 599 * 600 * Note that this function does not do anything to cause the data in the 601 * RAM memory region to be migrated; that is the responsibility of the caller. 602 * (For RAM device memory regions, migrating the contents rarely makes sense.) 603 */ 604 void memory_region_init_ram_device_ptr(MemoryRegion *mr, 605 struct Object *owner, 606 const char *name, 607 uint64_t size, 608 void *ptr); 609 610 /** 611 * memory_region_init_alias: Initialize a memory region that aliases all or a 612 * part of another memory region. 613 * 614 * @mr: the #MemoryRegion to be initialized. 615 * @owner: the object that tracks the region's reference count 616 * @name: used for debugging; not visible to the user or ABI 617 * @orig: the region to be referenced; @mr will be equivalent to 618 * @orig between @offset and @offset + @size - 1. 619 * @offset: start of the section in @orig to be referenced. 620 * @size: size of the region. 621 */ 622 void memory_region_init_alias(MemoryRegion *mr, 623 struct Object *owner, 624 const char *name, 625 MemoryRegion *orig, 626 hwaddr offset, 627 uint64_t size); 628 629 /** 630 * memory_region_init_rom_nomigrate: Initialize a ROM memory region. 631 * 632 * This has the same effect as calling memory_region_init_ram_nomigrate() 633 * and then marking the resulting region read-only with 634 * memory_region_set_readonly(). 635 * 636 * Note that this function does not do anything to cause the data in the 637 * RAM side of the memory region to be migrated; that is the responsibility 638 * of the caller. 639 * 640 * @mr: the #MemoryRegion to be initialized. 641 * @owner: the object that tracks the region's reference count 642 * @name: Region name, becomes part of RAMBlock name used in migration stream 643 * must be unique within any device 644 * @size: size of the region. 645 * @errp: pointer to Error*, to store an error if it happens. 646 */ 647 void memory_region_init_rom_nomigrate(MemoryRegion *mr, 648 struct Object *owner, 649 const char *name, 650 uint64_t size, 651 Error **errp); 652 653 /** 654 * memory_region_init_rom_device_nomigrate: Initialize a ROM memory region. 655 * Writes are handled via callbacks. 656 * 657 * Note that this function does not do anything to cause the data in the 658 * RAM side of the memory region to be migrated; that is the responsibility 659 * of the caller. 660 * 661 * @mr: the #MemoryRegion to be initialized. 662 * @owner: the object that tracks the region's reference count 663 * @ops: callbacks for write access handling (must not be NULL). 664 * @opaque: passed to the read and write callbacks of the @ops structure. 665 * @name: Region name, becomes part of RAMBlock name used in migration stream 666 * must be unique within any device 667 * @size: size of the region. 668 * @errp: pointer to Error*, to store an error if it happens. 669 */ 670 void memory_region_init_rom_device_nomigrate(MemoryRegion *mr, 671 struct Object *owner, 672 const MemoryRegionOps *ops, 673 void *opaque, 674 const char *name, 675 uint64_t size, 676 Error **errp); 677 678 /** 679 * memory_region_init_reservation: Initialize a memory region that reserves 680 * I/O space. 681 * 682 * A reservation region primariy serves debugging purposes. It claims I/O 683 * space that is not supposed to be handled by QEMU itself. Any access via 684 * the memory API will cause an abort(). 685 * This function is deprecated. Use memory_region_init_io() with NULL 686 * callbacks instead. 687 * 688 * @mr: the #MemoryRegion to be initialized 689 * @owner: the object that tracks the region's reference count 690 * @name: used for debugging; not visible to the user or ABI 691 * @size: size of the region. 692 */ 693 static inline void memory_region_init_reservation(MemoryRegion *mr, 694 Object *owner, 695 const char *name, 696 uint64_t size) 697 { 698 memory_region_init_io(mr, owner, NULL, mr, name, size); 699 } 700 701 /** 702 * memory_region_init_iommu: Initialize a memory region of a custom type 703 * that translates addresses 704 * 705 * An IOMMU region translates addresses and forwards accesses to a target 706 * memory region. 707 * 708 * @_iommu_mr: the #IOMMUMemoryRegion to be initialized 709 * @instance_size: the IOMMUMemoryRegion subclass instance size 710 * @mrtypename: the type name of the #IOMMUMemoryRegion 711 * @owner: the object that tracks the region's reference count 712 * @name: used for debugging; not visible to the user or ABI 713 * @size: size of the region. 714 */ 715 void memory_region_init_iommu(void *_iommu_mr, 716 size_t instance_size, 717 const char *mrtypename, 718 Object *owner, 719 const char *name, 720 uint64_t size); 721 722 /** 723 * memory_region_init_ram - Initialize RAM memory region. Accesses into the 724 * region will modify memory directly. 725 * 726 * @mr: the #MemoryRegion to be initialized 727 * @owner: the object that tracks the region's reference count (must be 728 * TYPE_DEVICE or a subclass of TYPE_DEVICE, or NULL) 729 * @name: name of the memory region 730 * @size: size of the region in bytes 731 * @errp: pointer to Error*, to store an error if it happens. 732 * 733 * This function allocates RAM for a board model or device, and 734 * arranges for it to be migrated (by calling vmstate_register_ram() 735 * if @owner is a DeviceState, or vmstate_register_ram_global() if 736 * @owner is NULL). 737 * 738 * TODO: Currently we restrict @owner to being either NULL (for 739 * global RAM regions with no owner) or devices, so that we can 740 * give the RAM block a unique name for migration purposes. 741 * We should lift this restriction and allow arbitrary Objects. 742 * If you pass a non-NULL non-device @owner then we will assert. 743 */ 744 void memory_region_init_ram(MemoryRegion *mr, 745 struct Object *owner, 746 const char *name, 747 uint64_t size, 748 Error **errp); 749 750 /** 751 * memory_region_init_rom: Initialize a ROM memory region. 752 * 753 * This has the same effect as calling memory_region_init_ram() 754 * and then marking the resulting region read-only with 755 * memory_region_set_readonly(). This includes arranging for the 756 * contents to be migrated. 757 * 758 * TODO: Currently we restrict @owner to being either NULL (for 759 * global RAM regions with no owner) or devices, so that we can 760 * give the RAM block a unique name for migration purposes. 761 * We should lift this restriction and allow arbitrary Objects. 762 * If you pass a non-NULL non-device @owner then we will assert. 763 * 764 * @mr: the #MemoryRegion to be initialized. 765 * @owner: the object that tracks the region's reference count 766 * @name: Region name, becomes part of RAMBlock name used in migration stream 767 * must be unique within any device 768 * @size: size of the region. 769 * @errp: pointer to Error*, to store an error if it happens. 770 */ 771 void memory_region_init_rom(MemoryRegion *mr, 772 struct Object *owner, 773 const char *name, 774 uint64_t size, 775 Error **errp); 776 777 /** 778 * memory_region_init_rom_device: Initialize a ROM memory region. 779 * Writes are handled via callbacks. 780 * 781 * This function initializes a memory region backed by RAM for reads 782 * and callbacks for writes, and arranges for the RAM backing to 783 * be migrated (by calling vmstate_register_ram() 784 * if @owner is a DeviceState, or vmstate_register_ram_global() if 785 * @owner is NULL). 786 * 787 * TODO: Currently we restrict @owner to being either NULL (for 788 * global RAM regions with no owner) or devices, so that we can 789 * give the RAM block a unique name for migration purposes. 790 * We should lift this restriction and allow arbitrary Objects. 791 * If you pass a non-NULL non-device @owner then we will assert. 792 * 793 * @mr: the #MemoryRegion to be initialized. 794 * @owner: the object that tracks the region's reference count 795 * @ops: callbacks for write access handling (must not be NULL). 796 * @name: Region name, becomes part of RAMBlock name used in migration stream 797 * must be unique within any device 798 * @size: size of the region. 799 * @errp: pointer to Error*, to store an error if it happens. 800 */ 801 void memory_region_init_rom_device(MemoryRegion *mr, 802 struct Object *owner, 803 const MemoryRegionOps *ops, 804 void *opaque, 805 const char *name, 806 uint64_t size, 807 Error **errp); 808 809 810 /** 811 * memory_region_owner: get a memory region's owner. 812 * 813 * @mr: the memory region being queried. 814 */ 815 struct Object *memory_region_owner(MemoryRegion *mr); 816 817 /** 818 * memory_region_size: get a memory region's size. 819 * 820 * @mr: the memory region being queried. 821 */ 822 uint64_t memory_region_size(MemoryRegion *mr); 823 824 /** 825 * memory_region_is_ram: check whether a memory region is random access 826 * 827 * Returns %true is a memory region is random access. 828 * 829 * @mr: the memory region being queried 830 */ 831 static inline bool memory_region_is_ram(MemoryRegion *mr) 832 { 833 return mr->ram; 834 } 835 836 /** 837 * memory_region_is_ram_device: check whether a memory region is a ram device 838 * 839 * Returns %true is a memory region is a device backed ram region 840 * 841 * @mr: the memory region being queried 842 */ 843 bool memory_region_is_ram_device(MemoryRegion *mr); 844 845 /** 846 * memory_region_is_romd: check whether a memory region is in ROMD mode 847 * 848 * Returns %true if a memory region is a ROM device and currently set to allow 849 * direct reads. 850 * 851 * @mr: the memory region being queried 852 */ 853 static inline bool memory_region_is_romd(MemoryRegion *mr) 854 { 855 return mr->rom_device && mr->romd_mode; 856 } 857 858 /** 859 * memory_region_get_iommu: check whether a memory region is an iommu 860 * 861 * Returns pointer to IOMMUMemoryRegion if a memory region is an iommu, 862 * otherwise NULL. 863 * 864 * @mr: the memory region being queried 865 */ 866 static inline IOMMUMemoryRegion *memory_region_get_iommu(MemoryRegion *mr) 867 { 868 if (mr->alias) { 869 return memory_region_get_iommu(mr->alias); 870 } 871 if (mr->is_iommu) { 872 return (IOMMUMemoryRegion *) mr; 873 } 874 return NULL; 875 } 876 877 /** 878 * memory_region_get_iommu_class_nocheck: returns iommu memory region class 879 * if an iommu or NULL if not 880 * 881 * Returns pointer to IOMMUMemoryRegionClass if a memory region is an iommu, 882 * otherwise NULL. This is fast path avoiding QOM checking, use with caution. 883 * 884 * @mr: the memory region being queried 885 */ 886 static inline IOMMUMemoryRegionClass *memory_region_get_iommu_class_nocheck( 887 IOMMUMemoryRegion *iommu_mr) 888 { 889 return (IOMMUMemoryRegionClass *) (((Object *)iommu_mr)->class); 890 } 891 892 #define memory_region_is_iommu(mr) (memory_region_get_iommu(mr) != NULL) 893 894 /** 895 * memory_region_iommu_get_min_page_size: get minimum supported page size 896 * for an iommu 897 * 898 * Returns minimum supported page size for an iommu. 899 * 900 * @iommu_mr: the memory region being queried 901 */ 902 uint64_t memory_region_iommu_get_min_page_size(IOMMUMemoryRegion *iommu_mr); 903 904 /** 905 * memory_region_notify_iommu: notify a change in an IOMMU translation entry. 906 * 907 * The notification type will be decided by entry.perm bits: 908 * 909 * - For UNMAP (cache invalidation) notifies: set entry.perm to IOMMU_NONE. 910 * - For MAP (newly added entry) notifies: set entry.perm to the 911 * permission of the page (which is definitely !IOMMU_NONE). 912 * 913 * Note: for any IOMMU implementation, an in-place mapping change 914 * should be notified with an UNMAP followed by a MAP. 915 * 916 * @iommu_mr: the memory region that was changed 917 * @entry: the new entry in the IOMMU translation table. The entry 918 * replaces all old entries for the same virtual I/O address range. 919 * Deleted entries have .@perm == 0. 920 */ 921 void memory_region_notify_iommu(IOMMUMemoryRegion *iommu_mr, 922 IOMMUTLBEntry entry); 923 924 /** 925 * memory_region_notify_one: notify a change in an IOMMU translation 926 * entry to a single notifier 927 * 928 * This works just like memory_region_notify_iommu(), but it only 929 * notifies a specific notifier, not all of them. 930 * 931 * @notifier: the notifier to be notified 932 * @entry: the new entry in the IOMMU translation table. The entry 933 * replaces all old entries for the same virtual I/O address range. 934 * Deleted entries have .@perm == 0. 935 */ 936 void memory_region_notify_one(IOMMUNotifier *notifier, 937 IOMMUTLBEntry *entry); 938 939 /** 940 * memory_region_register_iommu_notifier: register a notifier for changes to 941 * IOMMU translation entries. 942 * 943 * @mr: the memory region to observe 944 * @n: the IOMMUNotifier to be added; the notify callback receives a 945 * pointer to an #IOMMUTLBEntry as the opaque value; the pointer 946 * ceases to be valid on exit from the notifier. 947 */ 948 void memory_region_register_iommu_notifier(MemoryRegion *mr, 949 IOMMUNotifier *n); 950 951 /** 952 * memory_region_iommu_replay: replay existing IOMMU translations to 953 * a notifier with the minimum page granularity returned by 954 * mr->iommu_ops->get_page_size(). 955 * 956 * @iommu_mr: the memory region to observe 957 * @n: the notifier to which to replay iommu mappings 958 */ 959 void memory_region_iommu_replay(IOMMUMemoryRegion *iommu_mr, IOMMUNotifier *n); 960 961 /** 962 * memory_region_iommu_replay_all: replay existing IOMMU translations 963 * to all the notifiers registered. 964 * 965 * @iommu_mr: the memory region to observe 966 */ 967 void memory_region_iommu_replay_all(IOMMUMemoryRegion *iommu_mr); 968 969 /** 970 * memory_region_unregister_iommu_notifier: unregister a notifier for 971 * changes to IOMMU translation entries. 972 * 973 * @mr: the memory region which was observed and for which notity_stopped() 974 * needs to be called 975 * @n: the notifier to be removed. 976 */ 977 void memory_region_unregister_iommu_notifier(MemoryRegion *mr, 978 IOMMUNotifier *n); 979 980 /** 981 * memory_region_iommu_get_attr: return an IOMMU attr if get_attr() is 982 * defined on the IOMMU. 983 * 984 * Returns 0 if succeded, error code otherwise. 985 * 986 * @iommu_mr: the memory region 987 * @attr: the requested attribute 988 * @data: a pointer to the requested attribute data 989 */ 990 int memory_region_iommu_get_attr(IOMMUMemoryRegion *iommu_mr, 991 enum IOMMUMemoryRegionAttr attr, 992 void *data); 993 994 /** 995 * memory_region_name: get a memory region's name 996 * 997 * Returns the string that was used to initialize the memory region. 998 * 999 * @mr: the memory region being queried 1000 */ 1001 const char *memory_region_name(const MemoryRegion *mr); 1002 1003 /** 1004 * memory_region_is_logging: return whether a memory region is logging writes 1005 * 1006 * Returns %true if the memory region is logging writes for the given client 1007 * 1008 * @mr: the memory region being queried 1009 * @client: the client being queried 1010 */ 1011 bool memory_region_is_logging(MemoryRegion *mr, uint8_t client); 1012 1013 /** 1014 * memory_region_get_dirty_log_mask: return the clients for which a 1015 * memory region is logging writes. 1016 * 1017 * Returns a bitmap of clients, in which the DIRTY_MEMORY_* constants 1018 * are the bit indices. 1019 * 1020 * @mr: the memory region being queried 1021 */ 1022 uint8_t memory_region_get_dirty_log_mask(MemoryRegion *mr); 1023 1024 /** 1025 * memory_region_is_rom: check whether a memory region is ROM 1026 * 1027 * Returns %true is a memory region is read-only memory. 1028 * 1029 * @mr: the memory region being queried 1030 */ 1031 static inline bool memory_region_is_rom(MemoryRegion *mr) 1032 { 1033 return mr->ram && mr->readonly; 1034 } 1035 1036 1037 /** 1038 * memory_region_get_fd: Get a file descriptor backing a RAM memory region. 1039 * 1040 * Returns a file descriptor backing a file-based RAM memory region, 1041 * or -1 if the region is not a file-based RAM memory region. 1042 * 1043 * @mr: the RAM or alias memory region being queried. 1044 */ 1045 int memory_region_get_fd(MemoryRegion *mr); 1046 1047 /** 1048 * memory_region_from_host: Convert a pointer into a RAM memory region 1049 * and an offset within it. 1050 * 1051 * Given a host pointer inside a RAM memory region (created with 1052 * memory_region_init_ram() or memory_region_init_ram_ptr()), return 1053 * the MemoryRegion and the offset within it. 1054 * 1055 * Use with care; by the time this function returns, the returned pointer is 1056 * not protected by RCU anymore. If the caller is not within an RCU critical 1057 * section and does not hold the iothread lock, it must have other means of 1058 * protecting the pointer, such as a reference to the region that includes 1059 * the incoming ram_addr_t. 1060 * 1061 * @ptr: the host pointer to be converted 1062 * @offset: the offset within memory region 1063 */ 1064 MemoryRegion *memory_region_from_host(void *ptr, ram_addr_t *offset); 1065 1066 /** 1067 * memory_region_get_ram_ptr: Get a pointer into a RAM memory region. 1068 * 1069 * Returns a host pointer to a RAM memory region (created with 1070 * memory_region_init_ram() or memory_region_init_ram_ptr()). 1071 * 1072 * Use with care; by the time this function returns, the returned pointer is 1073 * not protected by RCU anymore. If the caller is not within an RCU critical 1074 * section and does not hold the iothread lock, it must have other means of 1075 * protecting the pointer, such as a reference to the region that includes 1076 * the incoming ram_addr_t. 1077 * 1078 * @mr: the memory region being queried. 1079 */ 1080 void *memory_region_get_ram_ptr(MemoryRegion *mr); 1081 1082 /* memory_region_ram_resize: Resize a RAM region. 1083 * 1084 * Only legal before guest might have detected the memory size: e.g. on 1085 * incoming migration, or right after reset. 1086 * 1087 * @mr: a memory region created with @memory_region_init_resizeable_ram. 1088 * @newsize: the new size the region 1089 * @errp: pointer to Error*, to store an error if it happens. 1090 */ 1091 void memory_region_ram_resize(MemoryRegion *mr, ram_addr_t newsize, 1092 Error **errp); 1093 1094 /** 1095 * memory_region_set_log: Turn dirty logging on or off for a region. 1096 * 1097 * Turns dirty logging on or off for a specified client (display, migration). 1098 * Only meaningful for RAM regions. 1099 * 1100 * @mr: the memory region being updated. 1101 * @log: whether dirty logging is to be enabled or disabled. 1102 * @client: the user of the logging information; %DIRTY_MEMORY_VGA only. 1103 */ 1104 void memory_region_set_log(MemoryRegion *mr, bool log, unsigned client); 1105 1106 /** 1107 * memory_region_get_dirty: Check whether a range of bytes is dirty 1108 * for a specified client. 1109 * 1110 * Checks whether a range of bytes has been written to since the last 1111 * call to memory_region_reset_dirty() with the same @client. Dirty logging 1112 * must be enabled. 1113 * 1114 * @mr: the memory region being queried. 1115 * @addr: the address (relative to the start of the region) being queried. 1116 * @size: the size of the range being queried. 1117 * @client: the user of the logging information; %DIRTY_MEMORY_MIGRATION or 1118 * %DIRTY_MEMORY_VGA. 1119 */ 1120 bool memory_region_get_dirty(MemoryRegion *mr, hwaddr addr, 1121 hwaddr size, unsigned client); 1122 1123 /** 1124 * memory_region_set_dirty: Mark a range of bytes as dirty in a memory region. 1125 * 1126 * Marks a range of bytes as dirty, after it has been dirtied outside 1127 * guest code. 1128 * 1129 * @mr: the memory region being dirtied. 1130 * @addr: the address (relative to the start of the region) being dirtied. 1131 * @size: size of the range being dirtied. 1132 */ 1133 void memory_region_set_dirty(MemoryRegion *mr, hwaddr addr, 1134 hwaddr size); 1135 1136 /** 1137 * memory_region_snapshot_and_clear_dirty: Get a snapshot of the dirty 1138 * bitmap and clear it. 1139 * 1140 * Creates a snapshot of the dirty bitmap, clears the dirty bitmap and 1141 * returns the snapshot. The snapshot can then be used to query dirty 1142 * status, using memory_region_snapshot_get_dirty. Snapshotting allows 1143 * querying the same page multiple times, which is especially useful for 1144 * display updates where the scanlines often are not page aligned. 1145 * 1146 * The dirty bitmap region which gets copyed into the snapshot (and 1147 * cleared afterwards) can be larger than requested. The boundaries 1148 * are rounded up/down so complete bitmap longs (covering 64 pages on 1149 * 64bit hosts) can be copied over into the bitmap snapshot. Which 1150 * isn't a problem for display updates as the extra pages are outside 1151 * the visible area, and in case the visible area changes a full 1152 * display redraw is due anyway. Should other use cases for this 1153 * function emerge we might have to revisit this implementation 1154 * detail. 1155 * 1156 * Use g_free to release DirtyBitmapSnapshot. 1157 * 1158 * @mr: the memory region being queried. 1159 * @addr: the address (relative to the start of the region) being queried. 1160 * @size: the size of the range being queried. 1161 * @client: the user of the logging information; typically %DIRTY_MEMORY_VGA. 1162 */ 1163 DirtyBitmapSnapshot *memory_region_snapshot_and_clear_dirty(MemoryRegion *mr, 1164 hwaddr addr, 1165 hwaddr size, 1166 unsigned client); 1167 1168 /** 1169 * memory_region_snapshot_get_dirty: Check whether a range of bytes is dirty 1170 * in the specified dirty bitmap snapshot. 1171 * 1172 * @mr: the memory region being queried. 1173 * @snap: the dirty bitmap snapshot 1174 * @addr: the address (relative to the start of the region) being queried. 1175 * @size: the size of the range being queried. 1176 */ 1177 bool memory_region_snapshot_get_dirty(MemoryRegion *mr, 1178 DirtyBitmapSnapshot *snap, 1179 hwaddr addr, hwaddr size); 1180 1181 /** 1182 * memory_region_reset_dirty: Mark a range of pages as clean, for a specified 1183 * client. 1184 * 1185 * Marks a range of pages as no longer dirty. 1186 * 1187 * @mr: the region being updated. 1188 * @addr: the start of the subrange being cleaned. 1189 * @size: the size of the subrange being cleaned. 1190 * @client: the user of the logging information; %DIRTY_MEMORY_MIGRATION or 1191 * %DIRTY_MEMORY_VGA. 1192 */ 1193 void memory_region_reset_dirty(MemoryRegion *mr, hwaddr addr, 1194 hwaddr size, unsigned client); 1195 1196 /** 1197 * memory_region_set_readonly: Turn a memory region read-only (or read-write) 1198 * 1199 * Allows a memory region to be marked as read-only (turning it into a ROM). 1200 * only useful on RAM regions. 1201 * 1202 * @mr: the region being updated. 1203 * @readonly: whether rhe region is to be ROM or RAM. 1204 */ 1205 void memory_region_set_readonly(MemoryRegion *mr, bool readonly); 1206 1207 /** 1208 * memory_region_rom_device_set_romd: enable/disable ROMD mode 1209 * 1210 * Allows a ROM device (initialized with memory_region_init_rom_device() to 1211 * set to ROMD mode (default) or MMIO mode. When it is in ROMD mode, the 1212 * device is mapped to guest memory and satisfies read access directly. 1213 * When in MMIO mode, reads are forwarded to the #MemoryRegion.read function. 1214 * Writes are always handled by the #MemoryRegion.write function. 1215 * 1216 * @mr: the memory region to be updated 1217 * @romd_mode: %true to put the region into ROMD mode 1218 */ 1219 void memory_region_rom_device_set_romd(MemoryRegion *mr, bool romd_mode); 1220 1221 /** 1222 * memory_region_set_coalescing: Enable memory coalescing for the region. 1223 * 1224 * Enabled writes to a region to be queued for later processing. MMIO ->write 1225 * callbacks may be delayed until a non-coalesced MMIO is issued. 1226 * Only useful for IO regions. Roughly similar to write-combining hardware. 1227 * 1228 * @mr: the memory region to be write coalesced 1229 */ 1230 void memory_region_set_coalescing(MemoryRegion *mr); 1231 1232 /** 1233 * memory_region_add_coalescing: Enable memory coalescing for a sub-range of 1234 * a region. 1235 * 1236 * Like memory_region_set_coalescing(), but works on a sub-range of a region. 1237 * Multiple calls can be issued coalesced disjoint ranges. 1238 * 1239 * @mr: the memory region to be updated. 1240 * @offset: the start of the range within the region to be coalesced. 1241 * @size: the size of the subrange to be coalesced. 1242 */ 1243 void memory_region_add_coalescing(MemoryRegion *mr, 1244 hwaddr offset, 1245 uint64_t size); 1246 1247 /** 1248 * memory_region_clear_coalescing: Disable MMIO coalescing for the region. 1249 * 1250 * Disables any coalescing caused by memory_region_set_coalescing() or 1251 * memory_region_add_coalescing(). Roughly equivalent to uncacheble memory 1252 * hardware. 1253 * 1254 * @mr: the memory region to be updated. 1255 */ 1256 void memory_region_clear_coalescing(MemoryRegion *mr); 1257 1258 /** 1259 * memory_region_set_flush_coalesced: Enforce memory coalescing flush before 1260 * accesses. 1261 * 1262 * Ensure that pending coalesced MMIO request are flushed before the memory 1263 * region is accessed. This property is automatically enabled for all regions 1264 * passed to memory_region_set_coalescing() and memory_region_add_coalescing(). 1265 * 1266 * @mr: the memory region to be updated. 1267 */ 1268 void memory_region_set_flush_coalesced(MemoryRegion *mr); 1269 1270 /** 1271 * memory_region_clear_flush_coalesced: Disable memory coalescing flush before 1272 * accesses. 1273 * 1274 * Clear the automatic coalesced MMIO flushing enabled via 1275 * memory_region_set_flush_coalesced. Note that this service has no effect on 1276 * memory regions that have MMIO coalescing enabled for themselves. For them, 1277 * automatic flushing will stop once coalescing is disabled. 1278 * 1279 * @mr: the memory region to be updated. 1280 */ 1281 void memory_region_clear_flush_coalesced(MemoryRegion *mr); 1282 1283 /** 1284 * memory_region_clear_global_locking: Declares that access processing does 1285 * not depend on the QEMU global lock. 1286 * 1287 * By clearing this property, accesses to the memory region will be processed 1288 * outside of QEMU's global lock (unless the lock is held on when issuing the 1289 * access request). In this case, the device model implementing the access 1290 * handlers is responsible for synchronization of concurrency. 1291 * 1292 * @mr: the memory region to be updated. 1293 */ 1294 void memory_region_clear_global_locking(MemoryRegion *mr); 1295 1296 /** 1297 * memory_region_add_eventfd: Request an eventfd to be triggered when a word 1298 * is written to a location. 1299 * 1300 * Marks a word in an IO region (initialized with memory_region_init_io()) 1301 * as a trigger for an eventfd event. The I/O callback will not be called. 1302 * The caller must be prepared to handle failure (that is, take the required 1303 * action if the callback _is_ called). 1304 * 1305 * @mr: the memory region being updated. 1306 * @addr: the address within @mr that is to be monitored 1307 * @size: the size of the access to trigger the eventfd 1308 * @match_data: whether to match against @data, instead of just @addr 1309 * @data: the data to match against the guest write 1310 * @e: event notifier to be triggered when @addr, @size, and @data all match. 1311 **/ 1312 void memory_region_add_eventfd(MemoryRegion *mr, 1313 hwaddr addr, 1314 unsigned size, 1315 bool match_data, 1316 uint64_t data, 1317 EventNotifier *e); 1318 1319 /** 1320 * memory_region_del_eventfd: Cancel an eventfd. 1321 * 1322 * Cancels an eventfd trigger requested by a previous 1323 * memory_region_add_eventfd() call. 1324 * 1325 * @mr: the memory region being updated. 1326 * @addr: the address within @mr that is to be monitored 1327 * @size: the size of the access to trigger the eventfd 1328 * @match_data: whether to match against @data, instead of just @addr 1329 * @data: the data to match against the guest write 1330 * @e: event notifier to be triggered when @addr, @size, and @data all match. 1331 */ 1332 void memory_region_del_eventfd(MemoryRegion *mr, 1333 hwaddr addr, 1334 unsigned size, 1335 bool match_data, 1336 uint64_t data, 1337 EventNotifier *e); 1338 1339 /** 1340 * memory_region_add_subregion: Add a subregion to a container. 1341 * 1342 * Adds a subregion at @offset. The subregion may not overlap with other 1343 * subregions (except for those explicitly marked as overlapping). A region 1344 * may only be added once as a subregion (unless removed with 1345 * memory_region_del_subregion()); use memory_region_init_alias() if you 1346 * want a region to be a subregion in multiple locations. 1347 * 1348 * @mr: the region to contain the new subregion; must be a container 1349 * initialized with memory_region_init(). 1350 * @offset: the offset relative to @mr where @subregion is added. 1351 * @subregion: the subregion to be added. 1352 */ 1353 void memory_region_add_subregion(MemoryRegion *mr, 1354 hwaddr offset, 1355 MemoryRegion *subregion); 1356 /** 1357 * memory_region_add_subregion_overlap: Add a subregion to a container 1358 * with overlap. 1359 * 1360 * Adds a subregion at @offset. The subregion may overlap with other 1361 * subregions. Conflicts are resolved by having a higher @priority hide a 1362 * lower @priority. Subregions without priority are taken as @priority 0. 1363 * A region may only be added once as a subregion (unless removed with 1364 * memory_region_del_subregion()); use memory_region_init_alias() if you 1365 * want a region to be a subregion in multiple locations. 1366 * 1367 * @mr: the region to contain the new subregion; must be a container 1368 * initialized with memory_region_init(). 1369 * @offset: the offset relative to @mr where @subregion is added. 1370 * @subregion: the subregion to be added. 1371 * @priority: used for resolving overlaps; highest priority wins. 1372 */ 1373 void memory_region_add_subregion_overlap(MemoryRegion *mr, 1374 hwaddr offset, 1375 MemoryRegion *subregion, 1376 int priority); 1377 1378 /** 1379 * memory_region_get_ram_addr: Get the ram address associated with a memory 1380 * region 1381 */ 1382 ram_addr_t memory_region_get_ram_addr(MemoryRegion *mr); 1383 1384 uint64_t memory_region_get_alignment(const MemoryRegion *mr); 1385 /** 1386 * memory_region_del_subregion: Remove a subregion. 1387 * 1388 * Removes a subregion from its container. 1389 * 1390 * @mr: the container to be updated. 1391 * @subregion: the region being removed; must be a current subregion of @mr. 1392 */ 1393 void memory_region_del_subregion(MemoryRegion *mr, 1394 MemoryRegion *subregion); 1395 1396 /* 1397 * memory_region_set_enabled: dynamically enable or disable a region 1398 * 1399 * Enables or disables a memory region. A disabled memory region 1400 * ignores all accesses to itself and its subregions. It does not 1401 * obscure sibling subregions with lower priority - it simply behaves as 1402 * if it was removed from the hierarchy. 1403 * 1404 * Regions default to being enabled. 1405 * 1406 * @mr: the region to be updated 1407 * @enabled: whether to enable or disable the region 1408 */ 1409 void memory_region_set_enabled(MemoryRegion *mr, bool enabled); 1410 1411 /* 1412 * memory_region_set_address: dynamically update the address of a region 1413 * 1414 * Dynamically updates the address of a region, relative to its container. 1415 * May be used on regions are currently part of a memory hierarchy. 1416 * 1417 * @mr: the region to be updated 1418 * @addr: new address, relative to container region 1419 */ 1420 void memory_region_set_address(MemoryRegion *mr, hwaddr addr); 1421 1422 /* 1423 * memory_region_set_size: dynamically update the size of a region. 1424 * 1425 * Dynamically updates the size of a region. 1426 * 1427 * @mr: the region to be updated 1428 * @size: used size of the region. 1429 */ 1430 void memory_region_set_size(MemoryRegion *mr, uint64_t size); 1431 1432 /* 1433 * memory_region_set_alias_offset: dynamically update a memory alias's offset 1434 * 1435 * Dynamically updates the offset into the target region that an alias points 1436 * to, as if the fourth argument to memory_region_init_alias() has changed. 1437 * 1438 * @mr: the #MemoryRegion to be updated; should be an alias. 1439 * @offset: the new offset into the target memory region 1440 */ 1441 void memory_region_set_alias_offset(MemoryRegion *mr, 1442 hwaddr offset); 1443 1444 /** 1445 * memory_region_present: checks if an address relative to a @container 1446 * translates into #MemoryRegion within @container 1447 * 1448 * Answer whether a #MemoryRegion within @container covers the address 1449 * @addr. 1450 * 1451 * @container: a #MemoryRegion within which @addr is a relative address 1452 * @addr: the area within @container to be searched 1453 */ 1454 bool memory_region_present(MemoryRegion *container, hwaddr addr); 1455 1456 /** 1457 * memory_region_is_mapped: returns true if #MemoryRegion is mapped 1458 * into any address space. 1459 * 1460 * @mr: a #MemoryRegion which should be checked if it's mapped 1461 */ 1462 bool memory_region_is_mapped(MemoryRegion *mr); 1463 1464 /** 1465 * memory_region_find: translate an address/size relative to a 1466 * MemoryRegion into a #MemoryRegionSection. 1467 * 1468 * Locates the first #MemoryRegion within @mr that overlaps the range 1469 * given by @addr and @size. 1470 * 1471 * Returns a #MemoryRegionSection that describes a contiguous overlap. 1472 * It will have the following characteristics: 1473 * .@size = 0 iff no overlap was found 1474 * .@mr is non-%NULL iff an overlap was found 1475 * 1476 * Remember that in the return value the @offset_within_region is 1477 * relative to the returned region (in the .@mr field), not to the 1478 * @mr argument. 1479 * 1480 * Similarly, the .@offset_within_address_space is relative to the 1481 * address space that contains both regions, the passed and the 1482 * returned one. However, in the special case where the @mr argument 1483 * has no container (and thus is the root of the address space), the 1484 * following will hold: 1485 * .@offset_within_address_space >= @addr 1486 * .@offset_within_address_space + .@size <= @addr + @size 1487 * 1488 * @mr: a MemoryRegion within which @addr is a relative address 1489 * @addr: start of the area within @as to be searched 1490 * @size: size of the area to be searched 1491 */ 1492 MemoryRegionSection memory_region_find(MemoryRegion *mr, 1493 hwaddr addr, uint64_t size); 1494 1495 /** 1496 * memory_global_dirty_log_sync: synchronize the dirty log for all memory 1497 * 1498 * Synchronizes the dirty page log for all address spaces. 1499 */ 1500 void memory_global_dirty_log_sync(void); 1501 1502 /** 1503 * memory_region_transaction_begin: Start a transaction. 1504 * 1505 * During a transaction, changes will be accumulated and made visible 1506 * only when the transaction ends (is committed). 1507 */ 1508 void memory_region_transaction_begin(void); 1509 1510 /** 1511 * memory_region_transaction_commit: Commit a transaction and make changes 1512 * visible to the guest. 1513 */ 1514 void memory_region_transaction_commit(void); 1515 1516 /** 1517 * memory_listener_register: register callbacks to be called when memory 1518 * sections are mapped or unmapped into an address 1519 * space 1520 * 1521 * @listener: an object containing the callbacks to be called 1522 * @filter: if non-%NULL, only regions in this address space will be observed 1523 */ 1524 void memory_listener_register(MemoryListener *listener, AddressSpace *filter); 1525 1526 /** 1527 * memory_listener_unregister: undo the effect of memory_listener_register() 1528 * 1529 * @listener: an object containing the callbacks to be removed 1530 */ 1531 void memory_listener_unregister(MemoryListener *listener); 1532 1533 /** 1534 * memory_global_dirty_log_start: begin dirty logging for all regions 1535 */ 1536 void memory_global_dirty_log_start(void); 1537 1538 /** 1539 * memory_global_dirty_log_stop: end dirty logging for all regions 1540 */ 1541 void memory_global_dirty_log_stop(void); 1542 1543 void mtree_info(fprintf_function mon_printf, void *f, bool flatview, 1544 bool dispatch_tree); 1545 1546 /** 1547 * memory_region_request_mmio_ptr: request a pointer to an mmio 1548 * MemoryRegion. If it is possible map a RAM MemoryRegion with this pointer. 1549 * When the device wants to invalidate the pointer it will call 1550 * memory_region_invalidate_mmio_ptr. 1551 * 1552 * @mr: #MemoryRegion to check 1553 * @addr: address within that region 1554 * 1555 * Returns true on success, false otherwise. 1556 */ 1557 bool memory_region_request_mmio_ptr(MemoryRegion *mr, hwaddr addr); 1558 1559 /** 1560 * memory_region_invalidate_mmio_ptr: invalidate the pointer to an mmio 1561 * previously requested. 1562 * In the end that means that if something wants to execute from this area it 1563 * will need to request the pointer again. 1564 * 1565 * @mr: #MemoryRegion associated to the pointer. 1566 * @offset: offset within the memory region 1567 * @size: size of that area. 1568 */ 1569 void memory_region_invalidate_mmio_ptr(MemoryRegion *mr, hwaddr offset, 1570 unsigned size); 1571 1572 /** 1573 * memory_region_dispatch_read: perform a read directly to the specified 1574 * MemoryRegion. 1575 * 1576 * @mr: #MemoryRegion to access 1577 * @addr: address within that region 1578 * @pval: pointer to uint64_t which the data is written to 1579 * @size: size of the access in bytes 1580 * @attrs: memory transaction attributes to use for the access 1581 */ 1582 MemTxResult memory_region_dispatch_read(MemoryRegion *mr, 1583 hwaddr addr, 1584 uint64_t *pval, 1585 unsigned size, 1586 MemTxAttrs attrs); 1587 /** 1588 * memory_region_dispatch_write: perform a write directly to the specified 1589 * MemoryRegion. 1590 * 1591 * @mr: #MemoryRegion to access 1592 * @addr: address within that region 1593 * @data: data to write 1594 * @size: size of the access in bytes 1595 * @attrs: memory transaction attributes to use for the access 1596 */ 1597 MemTxResult memory_region_dispatch_write(MemoryRegion *mr, 1598 hwaddr addr, 1599 uint64_t data, 1600 unsigned size, 1601 MemTxAttrs attrs); 1602 1603 /** 1604 * address_space_init: initializes an address space 1605 * 1606 * @as: an uninitialized #AddressSpace 1607 * @root: a #MemoryRegion that routes addresses for the address space 1608 * @name: an address space name. The name is only used for debugging 1609 * output. 1610 */ 1611 void address_space_init(AddressSpace *as, MemoryRegion *root, const char *name); 1612 1613 /** 1614 * address_space_destroy: destroy an address space 1615 * 1616 * Releases all resources associated with an address space. After an address space 1617 * is destroyed, its root memory region (given by address_space_init()) may be destroyed 1618 * as well. 1619 * 1620 * @as: address space to be destroyed 1621 */ 1622 void address_space_destroy(AddressSpace *as); 1623 1624 /** 1625 * address_space_rw: read from or write to an address space. 1626 * 1627 * Return a MemTxResult indicating whether the operation succeeded 1628 * or failed (eg unassigned memory, device rejected the transaction, 1629 * IOMMU fault). 1630 * 1631 * @as: #AddressSpace to be accessed 1632 * @addr: address within that address space 1633 * @attrs: memory transaction attributes 1634 * @buf: buffer with the data transferred 1635 * @len: the number of bytes to read or write 1636 * @is_write: indicates the transfer direction 1637 */ 1638 MemTxResult address_space_rw(AddressSpace *as, hwaddr addr, 1639 MemTxAttrs attrs, uint8_t *buf, 1640 int len, bool is_write); 1641 1642 /** 1643 * address_space_write: write to address space. 1644 * 1645 * Return a MemTxResult indicating whether the operation succeeded 1646 * or failed (eg unassigned memory, device rejected the transaction, 1647 * IOMMU fault). 1648 * 1649 * @as: #AddressSpace to be accessed 1650 * @addr: address within that address space 1651 * @attrs: memory transaction attributes 1652 * @buf: buffer with the data transferred 1653 * @len: the number of bytes to write 1654 */ 1655 MemTxResult address_space_write(AddressSpace *as, hwaddr addr, 1656 MemTxAttrs attrs, 1657 const uint8_t *buf, int len); 1658 1659 /* address_space_ld*: load from an address space 1660 * address_space_st*: store to an address space 1661 * 1662 * These functions perform a load or store of the byte, word, 1663 * longword or quad to the specified address within the AddressSpace. 1664 * The _le suffixed functions treat the data as little endian; 1665 * _be indicates big endian; no suffix indicates "same endianness 1666 * as guest CPU". 1667 * 1668 * The "guest CPU endianness" accessors are deprecated for use outside 1669 * target-* code; devices should be CPU-agnostic and use either the LE 1670 * or the BE accessors. 1671 * 1672 * @as #AddressSpace to be accessed 1673 * @addr: address within that address space 1674 * @val: data value, for stores 1675 * @attrs: memory transaction attributes 1676 * @result: location to write the success/failure of the transaction; 1677 * if NULL, this information is discarded 1678 */ 1679 uint32_t address_space_ldub(AddressSpace *as, hwaddr addr, 1680 MemTxAttrs attrs, MemTxResult *result); 1681 uint32_t address_space_lduw_le(AddressSpace *as, hwaddr addr, 1682 MemTxAttrs attrs, MemTxResult *result); 1683 uint32_t address_space_lduw_be(AddressSpace *as, hwaddr addr, 1684 MemTxAttrs attrs, MemTxResult *result); 1685 uint32_t address_space_ldl_le(AddressSpace *as, hwaddr addr, 1686 MemTxAttrs attrs, MemTxResult *result); 1687 uint32_t address_space_ldl_be(AddressSpace *as, hwaddr addr, 1688 MemTxAttrs attrs, MemTxResult *result); 1689 uint64_t address_space_ldq_le(AddressSpace *as, hwaddr addr, 1690 MemTxAttrs attrs, MemTxResult *result); 1691 uint64_t address_space_ldq_be(AddressSpace *as, hwaddr addr, 1692 MemTxAttrs attrs, MemTxResult *result); 1693 void address_space_stb(AddressSpace *as, hwaddr addr, uint32_t val, 1694 MemTxAttrs attrs, MemTxResult *result); 1695 void address_space_stw_le(AddressSpace *as, hwaddr addr, uint32_t val, 1696 MemTxAttrs attrs, MemTxResult *result); 1697 void address_space_stw_be(AddressSpace *as, hwaddr addr, uint32_t val, 1698 MemTxAttrs attrs, MemTxResult *result); 1699 void address_space_stl_le(AddressSpace *as, hwaddr addr, uint32_t val, 1700 MemTxAttrs attrs, MemTxResult *result); 1701 void address_space_stl_be(AddressSpace *as, hwaddr addr, uint32_t val, 1702 MemTxAttrs attrs, MemTxResult *result); 1703 void address_space_stq_le(AddressSpace *as, hwaddr addr, uint64_t val, 1704 MemTxAttrs attrs, MemTxResult *result); 1705 void address_space_stq_be(AddressSpace *as, hwaddr addr, uint64_t val, 1706 MemTxAttrs attrs, MemTxResult *result); 1707 1708 uint32_t ldub_phys(AddressSpace *as, hwaddr addr); 1709 uint32_t lduw_le_phys(AddressSpace *as, hwaddr addr); 1710 uint32_t lduw_be_phys(AddressSpace *as, hwaddr addr); 1711 uint32_t ldl_le_phys(AddressSpace *as, hwaddr addr); 1712 uint32_t ldl_be_phys(AddressSpace *as, hwaddr addr); 1713 uint64_t ldq_le_phys(AddressSpace *as, hwaddr addr); 1714 uint64_t ldq_be_phys(AddressSpace *as, hwaddr addr); 1715 void stb_phys(AddressSpace *as, hwaddr addr, uint32_t val); 1716 void stw_le_phys(AddressSpace *as, hwaddr addr, uint32_t val); 1717 void stw_be_phys(AddressSpace *as, hwaddr addr, uint32_t val); 1718 void stl_le_phys(AddressSpace *as, hwaddr addr, uint32_t val); 1719 void stl_be_phys(AddressSpace *as, hwaddr addr, uint32_t val); 1720 void stq_le_phys(AddressSpace *as, hwaddr addr, uint64_t val); 1721 void stq_be_phys(AddressSpace *as, hwaddr addr, uint64_t val); 1722 1723 struct MemoryRegionCache { 1724 hwaddr xlat; 1725 hwaddr len; 1726 AddressSpace *as; 1727 }; 1728 1729 #define MEMORY_REGION_CACHE_INVALID ((MemoryRegionCache) { .as = NULL }) 1730 1731 /* address_space_cache_init: prepare for repeated access to a physical 1732 * memory region 1733 * 1734 * @cache: #MemoryRegionCache to be filled 1735 * @as: #AddressSpace to be accessed 1736 * @addr: address within that address space 1737 * @len: length of buffer 1738 * @is_write: indicates the transfer direction 1739 * 1740 * Will only work with RAM, and may map a subset of the requested range by 1741 * returning a value that is less than @len. On failure, return a negative 1742 * errno value. 1743 * 1744 * Because it only works with RAM, this function can be used for 1745 * read-modify-write operations. In this case, is_write should be %true. 1746 * 1747 * Note that addresses passed to the address_space_*_cached functions 1748 * are relative to @addr. 1749 */ 1750 int64_t address_space_cache_init(MemoryRegionCache *cache, 1751 AddressSpace *as, 1752 hwaddr addr, 1753 hwaddr len, 1754 bool is_write); 1755 1756 /** 1757 * address_space_cache_invalidate: complete a write to a #MemoryRegionCache 1758 * 1759 * @cache: The #MemoryRegionCache to operate on. 1760 * @addr: The first physical address that was written, relative to the 1761 * address that was passed to @address_space_cache_init. 1762 * @access_len: The number of bytes that were written starting at @addr. 1763 */ 1764 void address_space_cache_invalidate(MemoryRegionCache *cache, 1765 hwaddr addr, 1766 hwaddr access_len); 1767 1768 /** 1769 * address_space_cache_destroy: free a #MemoryRegionCache 1770 * 1771 * @cache: The #MemoryRegionCache whose memory should be released. 1772 */ 1773 void address_space_cache_destroy(MemoryRegionCache *cache); 1774 1775 /* address_space_ld*_cached: load from a cached #MemoryRegion 1776 * address_space_st*_cached: store into a cached #MemoryRegion 1777 * 1778 * These functions perform a load or store of the byte, word, 1779 * longword or quad to the specified address. The address is 1780 * a physical address in the AddressSpace, but it must lie within 1781 * a #MemoryRegion that was mapped with address_space_cache_init. 1782 * 1783 * The _le suffixed functions treat the data as little endian; 1784 * _be indicates big endian; no suffix indicates "same endianness 1785 * as guest CPU". 1786 * 1787 * The "guest CPU endianness" accessors are deprecated for use outside 1788 * target-* code; devices should be CPU-agnostic and use either the LE 1789 * or the BE accessors. 1790 * 1791 * @cache: previously initialized #MemoryRegionCache to be accessed 1792 * @addr: address within the address space 1793 * @val: data value, for stores 1794 * @attrs: memory transaction attributes 1795 * @result: location to write the success/failure of the transaction; 1796 * if NULL, this information is discarded 1797 */ 1798 uint32_t address_space_ldub_cached(MemoryRegionCache *cache, hwaddr addr, 1799 MemTxAttrs attrs, MemTxResult *result); 1800 uint32_t address_space_lduw_le_cached(MemoryRegionCache *cache, hwaddr addr, 1801 MemTxAttrs attrs, MemTxResult *result); 1802 uint32_t address_space_lduw_be_cached(MemoryRegionCache *cache, hwaddr addr, 1803 MemTxAttrs attrs, MemTxResult *result); 1804 uint32_t address_space_ldl_le_cached(MemoryRegionCache *cache, hwaddr addr, 1805 MemTxAttrs attrs, MemTxResult *result); 1806 uint32_t address_space_ldl_be_cached(MemoryRegionCache *cache, hwaddr addr, 1807 MemTxAttrs attrs, MemTxResult *result); 1808 uint64_t address_space_ldq_le_cached(MemoryRegionCache *cache, hwaddr addr, 1809 MemTxAttrs attrs, MemTxResult *result); 1810 uint64_t address_space_ldq_be_cached(MemoryRegionCache *cache, hwaddr addr, 1811 MemTxAttrs attrs, MemTxResult *result); 1812 void address_space_stb_cached(MemoryRegionCache *cache, hwaddr addr, uint32_t val, 1813 MemTxAttrs attrs, MemTxResult *result); 1814 void address_space_stw_le_cached(MemoryRegionCache *cache, hwaddr addr, uint32_t val, 1815 MemTxAttrs attrs, MemTxResult *result); 1816 void address_space_stw_be_cached(MemoryRegionCache *cache, hwaddr addr, uint32_t val, 1817 MemTxAttrs attrs, MemTxResult *result); 1818 void address_space_stl_le_cached(MemoryRegionCache *cache, hwaddr addr, uint32_t val, 1819 MemTxAttrs attrs, MemTxResult *result); 1820 void address_space_stl_be_cached(MemoryRegionCache *cache, hwaddr addr, uint32_t val, 1821 MemTxAttrs attrs, MemTxResult *result); 1822 void address_space_stq_le_cached(MemoryRegionCache *cache, hwaddr addr, uint64_t val, 1823 MemTxAttrs attrs, MemTxResult *result); 1824 void address_space_stq_be_cached(MemoryRegionCache *cache, hwaddr addr, uint64_t val, 1825 MemTxAttrs attrs, MemTxResult *result); 1826 1827 uint32_t ldub_phys_cached(MemoryRegionCache *cache, hwaddr addr); 1828 uint32_t lduw_le_phys_cached(MemoryRegionCache *cache, hwaddr addr); 1829 uint32_t lduw_be_phys_cached(MemoryRegionCache *cache, hwaddr addr); 1830 uint32_t ldl_le_phys_cached(MemoryRegionCache *cache, hwaddr addr); 1831 uint32_t ldl_be_phys_cached(MemoryRegionCache *cache, hwaddr addr); 1832 uint64_t ldq_le_phys_cached(MemoryRegionCache *cache, hwaddr addr); 1833 uint64_t ldq_be_phys_cached(MemoryRegionCache *cache, hwaddr addr); 1834 void stb_phys_cached(MemoryRegionCache *cache, hwaddr addr, uint32_t val); 1835 void stw_le_phys_cached(MemoryRegionCache *cache, hwaddr addr, uint32_t val); 1836 void stw_be_phys_cached(MemoryRegionCache *cache, hwaddr addr, uint32_t val); 1837 void stl_le_phys_cached(MemoryRegionCache *cache, hwaddr addr, uint32_t val); 1838 void stl_be_phys_cached(MemoryRegionCache *cache, hwaddr addr, uint32_t val); 1839 void stq_le_phys_cached(MemoryRegionCache *cache, hwaddr addr, uint64_t val); 1840 void stq_be_phys_cached(MemoryRegionCache *cache, hwaddr addr, uint64_t val); 1841 /* address_space_get_iotlb_entry: translate an address into an IOTLB 1842 * entry. Should be called from an RCU critical section. 1843 */ 1844 IOMMUTLBEntry address_space_get_iotlb_entry(AddressSpace *as, hwaddr addr, 1845 bool is_write); 1846 1847 /* address_space_translate: translate an address range into an address space 1848 * into a MemoryRegion and an address range into that section. Should be 1849 * called from an RCU critical section, to avoid that the last reference 1850 * to the returned region disappears after address_space_translate returns. 1851 * 1852 * @fv: #FlatView to be accessed 1853 * @addr: address within that address space 1854 * @xlat: pointer to address within the returned memory region section's 1855 * #MemoryRegion. 1856 * @len: pointer to length 1857 * @is_write: indicates the transfer direction 1858 */ 1859 MemoryRegion *flatview_translate(FlatView *fv, 1860 hwaddr addr, hwaddr *xlat, 1861 hwaddr *len, bool is_write); 1862 1863 static inline MemoryRegion *address_space_translate(AddressSpace *as, 1864 hwaddr addr, hwaddr *xlat, 1865 hwaddr *len, bool is_write) 1866 { 1867 return flatview_translate(address_space_to_flatview(as), 1868 addr, xlat, len, is_write); 1869 } 1870 1871 /* address_space_access_valid: check for validity of accessing an address 1872 * space range 1873 * 1874 * Check whether memory is assigned to the given address space range, and 1875 * access is permitted by any IOMMU regions that are active for the address 1876 * space. 1877 * 1878 * For now, addr and len should be aligned to a page size. This limitation 1879 * will be lifted in the future. 1880 * 1881 * @as: #AddressSpace to be accessed 1882 * @addr: address within that address space 1883 * @len: length of the area to be checked 1884 * @is_write: indicates the transfer direction 1885 */ 1886 bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write); 1887 1888 /* address_space_map: map a physical memory region into a host virtual address 1889 * 1890 * May map a subset of the requested range, given by and returned in @plen. 1891 * May return %NULL if resources needed to perform the mapping are exhausted. 1892 * Use only for reads OR writes - not for read-modify-write operations. 1893 * Use cpu_register_map_client() to know when retrying the map operation is 1894 * likely to succeed. 1895 * 1896 * @as: #AddressSpace to be accessed 1897 * @addr: address within that address space 1898 * @plen: pointer to length of buffer; updated on return 1899 * @is_write: indicates the transfer direction 1900 */ 1901 void *address_space_map(AddressSpace *as, hwaddr addr, 1902 hwaddr *plen, bool is_write); 1903 1904 /* address_space_unmap: Unmaps a memory region previously mapped by address_space_map() 1905 * 1906 * Will also mark the memory as dirty if @is_write == %true. @access_len gives 1907 * the amount of memory that was actually read or written by the caller. 1908 * 1909 * @as: #AddressSpace used 1910 * @buffer: host pointer as returned by address_space_map() 1911 * @len: buffer length as returned by address_space_map() 1912 * @access_len: amount of data actually transferred 1913 * @is_write: indicates the transfer direction 1914 */ 1915 void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len, 1916 int is_write, hwaddr access_len); 1917 1918 1919 /* Internal functions, part of the implementation of address_space_read. */ 1920 MemTxResult address_space_read_full(AddressSpace *as, hwaddr addr, 1921 MemTxAttrs attrs, uint8_t *buf, int len); 1922 MemTxResult flatview_read_continue(FlatView *fv, hwaddr addr, 1923 MemTxAttrs attrs, uint8_t *buf, 1924 int len, hwaddr addr1, hwaddr l, 1925 MemoryRegion *mr); 1926 void *qemu_map_ram_ptr(RAMBlock *ram_block, ram_addr_t addr); 1927 1928 static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write) 1929 { 1930 if (is_write) { 1931 return memory_region_is_ram(mr) && 1932 !mr->readonly && !memory_region_is_ram_device(mr); 1933 } else { 1934 return (memory_region_is_ram(mr) && !memory_region_is_ram_device(mr)) || 1935 memory_region_is_romd(mr); 1936 } 1937 } 1938 1939 /** 1940 * address_space_read: read from an address space. 1941 * 1942 * Return a MemTxResult indicating whether the operation succeeded 1943 * or failed (eg unassigned memory, device rejected the transaction, 1944 * IOMMU fault). Called within RCU critical section. 1945 * 1946 * @as: #AddressSpace to be accessed 1947 * @addr: address within that address space 1948 * @attrs: memory transaction attributes 1949 * @buf: buffer with the data transferred 1950 */ 1951 static inline __attribute__((__always_inline__)) 1952 MemTxResult address_space_read(AddressSpace *as, hwaddr addr, 1953 MemTxAttrs attrs, uint8_t *buf, 1954 int len) 1955 { 1956 MemTxResult result = MEMTX_OK; 1957 hwaddr l, addr1; 1958 void *ptr; 1959 MemoryRegion *mr; 1960 FlatView *fv; 1961 1962 if (__builtin_constant_p(len)) { 1963 if (len) { 1964 rcu_read_lock(); 1965 fv = address_space_to_flatview(as); 1966 l = len; 1967 mr = flatview_translate(fv, addr, &addr1, &l, false); 1968 if (len == l && memory_access_is_direct(mr, false)) { 1969 ptr = qemu_map_ram_ptr(mr->ram_block, addr1); 1970 memcpy(buf, ptr, len); 1971 } else { 1972 result = flatview_read_continue(fv, addr, attrs, buf, len, 1973 addr1, l, mr); 1974 } 1975 rcu_read_unlock(); 1976 } 1977 } else { 1978 result = address_space_read_full(as, addr, attrs, buf, len); 1979 } 1980 return result; 1981 } 1982 1983 /** 1984 * address_space_read_cached: read from a cached RAM region 1985 * 1986 * @cache: Cached region to be addressed 1987 * @addr: address relative to the base of the RAM region 1988 * @buf: buffer with the data transferred 1989 * @len: length of the data transferred 1990 */ 1991 static inline void 1992 address_space_read_cached(MemoryRegionCache *cache, hwaddr addr, 1993 void *buf, int len) 1994 { 1995 assert(addr < cache->len && len <= cache->len - addr); 1996 address_space_read(cache->as, cache->xlat + addr, MEMTXATTRS_UNSPECIFIED, buf, len); 1997 } 1998 1999 /** 2000 * address_space_write_cached: write to a cached RAM region 2001 * 2002 * @cache: Cached region to be addressed 2003 * @addr: address relative to the base of the RAM region 2004 * @buf: buffer with the data transferred 2005 * @len: length of the data transferred 2006 */ 2007 static inline void 2008 address_space_write_cached(MemoryRegionCache *cache, hwaddr addr, 2009 void *buf, int len) 2010 { 2011 assert(addr < cache->len && len <= cache->len - addr); 2012 address_space_write(cache->as, cache->xlat + addr, MEMTXATTRS_UNSPECIFIED, buf, len); 2013 } 2014 2015 #endif 2016 2017 #endif 2018