1 /* 2 * Physical memory management API 3 * 4 * Copyright 2011 Red Hat, Inc. and/or its affiliates 5 * 6 * Authors: 7 * Avi Kivity <avi@redhat.com> 8 * 9 * This work is licensed under the terms of the GNU GPL, version 2. See 10 * the COPYING file in the top-level directory. 11 * 12 */ 13 14 #ifndef MEMORY_H 15 #define MEMORY_H 16 17 #ifndef CONFIG_USER_ONLY 18 19 #include "exec/cpu-common.h" 20 #include "exec/hwaddr.h" 21 #include "exec/memattrs.h" 22 #include "exec/ramlist.h" 23 #include "qemu/queue.h" 24 #include "qemu/int128.h" 25 #include "qemu/notify.h" 26 #include "qom/object.h" 27 #include "qemu/rcu.h" 28 #include "hw/qdev-core.h" 29 30 #define RAM_ADDR_INVALID (~(ram_addr_t)0) 31 32 #define MAX_PHYS_ADDR_SPACE_BITS 62 33 #define MAX_PHYS_ADDR (((hwaddr)1 << MAX_PHYS_ADDR_SPACE_BITS) - 1) 34 35 #define TYPE_MEMORY_REGION "qemu:memory-region" 36 #define MEMORY_REGION(obj) \ 37 OBJECT_CHECK(MemoryRegion, (obj), TYPE_MEMORY_REGION) 38 39 #define TYPE_IOMMU_MEMORY_REGION "qemu:iommu-memory-region" 40 #define IOMMU_MEMORY_REGION(obj) \ 41 OBJECT_CHECK(IOMMUMemoryRegion, (obj), TYPE_IOMMU_MEMORY_REGION) 42 #define IOMMU_MEMORY_REGION_CLASS(klass) \ 43 OBJECT_CLASS_CHECK(IOMMUMemoryRegionClass, (klass), \ 44 TYPE_IOMMU_MEMORY_REGION) 45 #define IOMMU_MEMORY_REGION_GET_CLASS(obj) \ 46 OBJECT_GET_CLASS(IOMMUMemoryRegionClass, (obj), \ 47 TYPE_IOMMU_MEMORY_REGION) 48 49 typedef struct MemoryRegionOps MemoryRegionOps; 50 typedef struct MemoryRegionMmio MemoryRegionMmio; 51 52 struct MemoryRegionMmio { 53 CPUReadMemoryFunc *read[3]; 54 CPUWriteMemoryFunc *write[3]; 55 }; 56 57 typedef struct IOMMUTLBEntry IOMMUTLBEntry; 58 59 /* See address_space_translate: bit 0 is read, bit 1 is write. */ 60 typedef enum { 61 IOMMU_NONE = 0, 62 IOMMU_RO = 1, 63 IOMMU_WO = 2, 64 IOMMU_RW = 3, 65 } IOMMUAccessFlags; 66 67 #define IOMMU_ACCESS_FLAG(r, w) (((r) ? IOMMU_RO : 0) | ((w) ? IOMMU_WO : 0)) 68 69 struct IOMMUTLBEntry { 70 AddressSpace *target_as; 71 hwaddr iova; 72 hwaddr translated_addr; 73 hwaddr addr_mask; /* 0xfff = 4k translation */ 74 IOMMUAccessFlags perm; 75 }; 76 77 /* 78 * Bitmap for different IOMMUNotifier capabilities. Each notifier can 79 * register with one or multiple IOMMU Notifier capability bit(s). 80 */ 81 typedef enum { 82 IOMMU_NOTIFIER_NONE = 0, 83 /* Notify cache invalidations */ 84 IOMMU_NOTIFIER_UNMAP = 0x1, 85 /* Notify entry changes (newly created entries) */ 86 IOMMU_NOTIFIER_MAP = 0x2, 87 } IOMMUNotifierFlag; 88 89 #define IOMMU_NOTIFIER_ALL (IOMMU_NOTIFIER_MAP | IOMMU_NOTIFIER_UNMAP) 90 91 struct IOMMUNotifier; 92 typedef void (*IOMMUNotify)(struct IOMMUNotifier *notifier, 93 IOMMUTLBEntry *data); 94 95 struct IOMMUNotifier { 96 IOMMUNotify notify; 97 IOMMUNotifierFlag notifier_flags; 98 /* Notify for address space range start <= addr <= end */ 99 hwaddr start; 100 hwaddr end; 101 QLIST_ENTRY(IOMMUNotifier) node; 102 }; 103 typedef struct IOMMUNotifier IOMMUNotifier; 104 105 static inline void iommu_notifier_init(IOMMUNotifier *n, IOMMUNotify fn, 106 IOMMUNotifierFlag flags, 107 hwaddr start, hwaddr end) 108 { 109 n->notify = fn; 110 n->notifier_flags = flags; 111 n->start = start; 112 n->end = end; 113 } 114 115 /* 116 * Memory region callbacks 117 */ 118 struct MemoryRegionOps { 119 /* Read from the memory region. @addr is relative to @mr; @size is 120 * in bytes. */ 121 uint64_t (*read)(void *opaque, 122 hwaddr addr, 123 unsigned size); 124 /* Write to the memory region. @addr is relative to @mr; @size is 125 * in bytes. */ 126 void (*write)(void *opaque, 127 hwaddr addr, 128 uint64_t data, 129 unsigned size); 130 131 MemTxResult (*read_with_attrs)(void *opaque, 132 hwaddr addr, 133 uint64_t *data, 134 unsigned size, 135 MemTxAttrs attrs); 136 MemTxResult (*write_with_attrs)(void *opaque, 137 hwaddr addr, 138 uint64_t data, 139 unsigned size, 140 MemTxAttrs attrs); 141 /* Instruction execution pre-callback: 142 * @addr is the address of the access relative to the @mr. 143 * @size is the size of the area returned by the callback. 144 * @offset is the location of the pointer inside @mr. 145 * 146 * Returns a pointer to a location which contains guest code. 147 */ 148 void *(*request_ptr)(void *opaque, hwaddr addr, unsigned *size, 149 unsigned *offset); 150 151 enum device_endian endianness; 152 /* Guest-visible constraints: */ 153 struct { 154 /* If nonzero, specify bounds on access sizes beyond which a machine 155 * check is thrown. 156 */ 157 unsigned min_access_size; 158 unsigned max_access_size; 159 /* If true, unaligned accesses are supported. Otherwise unaligned 160 * accesses throw machine checks. 161 */ 162 bool unaligned; 163 /* 164 * If present, and returns #false, the transaction is not accepted 165 * by the device (and results in machine dependent behaviour such 166 * as a machine check exception). 167 */ 168 bool (*accepts)(void *opaque, hwaddr addr, 169 unsigned size, bool is_write); 170 } valid; 171 /* Internal implementation constraints: */ 172 struct { 173 /* If nonzero, specifies the minimum size implemented. Smaller sizes 174 * will be rounded upwards and a partial result will be returned. 175 */ 176 unsigned min_access_size; 177 /* If nonzero, specifies the maximum size implemented. Larger sizes 178 * will be done as a series of accesses with smaller sizes. 179 */ 180 unsigned max_access_size; 181 /* If true, unaligned accesses are supported. Otherwise all accesses 182 * are converted to (possibly multiple) naturally aligned accesses. 183 */ 184 bool unaligned; 185 } impl; 186 187 /* If .read and .write are not present, old_mmio may be used for 188 * backwards compatibility with old mmio registration 189 */ 190 const MemoryRegionMmio old_mmio; 191 }; 192 193 enum IOMMUMemoryRegionAttr { 194 IOMMU_ATTR_SPAPR_TCE_FD 195 }; 196 197 typedef struct IOMMUMemoryRegionClass { 198 /* private */ 199 struct DeviceClass parent_class; 200 201 /* 202 * Return a TLB entry that contains a given address. Flag should 203 * be the access permission of this translation operation. We can 204 * set flag to IOMMU_NONE to mean that we don't need any 205 * read/write permission checks, like, when for region replay. 206 */ 207 IOMMUTLBEntry (*translate)(IOMMUMemoryRegion *iommu, hwaddr addr, 208 IOMMUAccessFlags flag); 209 /* Returns minimum supported page size */ 210 uint64_t (*get_min_page_size)(IOMMUMemoryRegion *iommu); 211 /* Called when IOMMU Notifier flag changed */ 212 void (*notify_flag_changed)(IOMMUMemoryRegion *iommu, 213 IOMMUNotifierFlag old_flags, 214 IOMMUNotifierFlag new_flags); 215 /* Set this up to provide customized IOMMU replay function */ 216 void (*replay)(IOMMUMemoryRegion *iommu, IOMMUNotifier *notifier); 217 218 /* Get IOMMU misc attributes */ 219 int (*get_attr)(IOMMUMemoryRegion *iommu, enum IOMMUMemoryRegionAttr, 220 void *data); 221 } IOMMUMemoryRegionClass; 222 223 typedef struct CoalescedMemoryRange CoalescedMemoryRange; 224 typedef struct MemoryRegionIoeventfd MemoryRegionIoeventfd; 225 226 struct MemoryRegion { 227 Object parent_obj; 228 229 /* All fields are private - violators will be prosecuted */ 230 231 /* The following fields should fit in a cache line */ 232 bool romd_mode; 233 bool ram; 234 bool subpage; 235 bool readonly; /* For RAM regions */ 236 bool rom_device; 237 bool flush_coalesced_mmio; 238 bool global_locking; 239 uint8_t dirty_log_mask; 240 bool is_iommu; 241 RAMBlock *ram_block; 242 Object *owner; 243 244 const MemoryRegionOps *ops; 245 void *opaque; 246 MemoryRegion *container; 247 Int128 size; 248 hwaddr addr; 249 void (*destructor)(MemoryRegion *mr); 250 uint64_t align; 251 bool terminates; 252 bool ram_device; 253 bool enabled; 254 bool warning_printed; /* For reservations */ 255 uint8_t vga_logging_count; 256 MemoryRegion *alias; 257 hwaddr alias_offset; 258 int32_t priority; 259 QTAILQ_HEAD(subregions, MemoryRegion) subregions; 260 QTAILQ_ENTRY(MemoryRegion) subregions_link; 261 QTAILQ_HEAD(coalesced_ranges, CoalescedMemoryRange) coalesced; 262 const char *name; 263 unsigned ioeventfd_nb; 264 MemoryRegionIoeventfd *ioeventfds; 265 }; 266 267 struct IOMMUMemoryRegion { 268 MemoryRegion parent_obj; 269 270 QLIST_HEAD(, IOMMUNotifier) iommu_notify; 271 IOMMUNotifierFlag iommu_notify_flags; 272 }; 273 274 #define IOMMU_NOTIFIER_FOREACH(n, mr) \ 275 QLIST_FOREACH((n), &(mr)->iommu_notify, node) 276 277 /** 278 * MemoryListener: callbacks structure for updates to the physical memory map 279 * 280 * Allows a component to adjust to changes in the guest-visible memory map. 281 * Use with memory_listener_register() and memory_listener_unregister(). 282 */ 283 struct MemoryListener { 284 void (*begin)(MemoryListener *listener); 285 void (*commit)(MemoryListener *listener); 286 void (*region_add)(MemoryListener *listener, MemoryRegionSection *section); 287 void (*region_del)(MemoryListener *listener, MemoryRegionSection *section); 288 void (*region_nop)(MemoryListener *listener, MemoryRegionSection *section); 289 void (*log_start)(MemoryListener *listener, MemoryRegionSection *section, 290 int old, int new); 291 void (*log_stop)(MemoryListener *listener, MemoryRegionSection *section, 292 int old, int new); 293 void (*log_sync)(MemoryListener *listener, MemoryRegionSection *section); 294 void (*log_global_start)(MemoryListener *listener); 295 void (*log_global_stop)(MemoryListener *listener); 296 void (*eventfd_add)(MemoryListener *listener, MemoryRegionSection *section, 297 bool match_data, uint64_t data, EventNotifier *e); 298 void (*eventfd_del)(MemoryListener *listener, MemoryRegionSection *section, 299 bool match_data, uint64_t data, EventNotifier *e); 300 void (*coalesced_mmio_add)(MemoryListener *listener, MemoryRegionSection *section, 301 hwaddr addr, hwaddr len); 302 void (*coalesced_mmio_del)(MemoryListener *listener, MemoryRegionSection *section, 303 hwaddr addr, hwaddr len); 304 /* Lower = earlier (during add), later (during del) */ 305 unsigned priority; 306 AddressSpace *address_space; 307 QTAILQ_ENTRY(MemoryListener) link; 308 QTAILQ_ENTRY(MemoryListener) link_as; 309 }; 310 311 /** 312 * AddressSpace: describes a mapping of addresses to #MemoryRegion objects 313 */ 314 struct AddressSpace { 315 /* All fields are private. */ 316 struct rcu_head rcu; 317 char *name; 318 MemoryRegion *root; 319 320 /* Accessed via RCU. */ 321 struct FlatView *current_map; 322 323 int ioeventfd_nb; 324 struct MemoryRegionIoeventfd *ioeventfds; 325 QTAILQ_HEAD(memory_listeners_as, MemoryListener) listeners; 326 QTAILQ_ENTRY(AddressSpace) address_spaces_link; 327 }; 328 329 FlatView *address_space_to_flatview(AddressSpace *as); 330 331 /** 332 * MemoryRegionSection: describes a fragment of a #MemoryRegion 333 * 334 * @mr: the region, or %NULL if empty 335 * @fv: the flat view of the address space the region is mapped in 336 * @offset_within_region: the beginning of the section, relative to @mr's start 337 * @size: the size of the section; will not exceed @mr's boundaries 338 * @offset_within_address_space: the address of the first byte of the section 339 * relative to the region's address space 340 * @readonly: writes to this section are ignored 341 */ 342 struct MemoryRegionSection { 343 MemoryRegion *mr; 344 FlatView *fv; 345 hwaddr offset_within_region; 346 Int128 size; 347 hwaddr offset_within_address_space; 348 bool readonly; 349 }; 350 351 /** 352 * memory_region_init: Initialize a memory region 353 * 354 * The region typically acts as a container for other memory regions. Use 355 * memory_region_add_subregion() to add subregions. 356 * 357 * @mr: the #MemoryRegion to be initialized 358 * @owner: the object that tracks the region's reference count 359 * @name: used for debugging; not visible to the user or ABI 360 * @size: size of the region; any subregions beyond this size will be clipped 361 */ 362 void memory_region_init(MemoryRegion *mr, 363 struct Object *owner, 364 const char *name, 365 uint64_t size); 366 367 /** 368 * memory_region_ref: Add 1 to a memory region's reference count 369 * 370 * Whenever memory regions are accessed outside the BQL, they need to be 371 * preserved against hot-unplug. MemoryRegions actually do not have their 372 * own reference count; they piggyback on a QOM object, their "owner". 373 * This function adds a reference to the owner. 374 * 375 * All MemoryRegions must have an owner if they can disappear, even if the 376 * device they belong to operates exclusively under the BQL. This is because 377 * the region could be returned at any time by memory_region_find, and this 378 * is usually under guest control. 379 * 380 * @mr: the #MemoryRegion 381 */ 382 void memory_region_ref(MemoryRegion *mr); 383 384 /** 385 * memory_region_unref: Remove 1 to a memory region's reference count 386 * 387 * Whenever memory regions are accessed outside the BQL, they need to be 388 * preserved against hot-unplug. MemoryRegions actually do not have their 389 * own reference count; they piggyback on a QOM object, their "owner". 390 * This function removes a reference to the owner and possibly destroys it. 391 * 392 * @mr: the #MemoryRegion 393 */ 394 void memory_region_unref(MemoryRegion *mr); 395 396 /** 397 * memory_region_init_io: Initialize an I/O memory region. 398 * 399 * Accesses into the region will cause the callbacks in @ops to be called. 400 * if @size is nonzero, subregions will be clipped to @size. 401 * 402 * @mr: the #MemoryRegion to be initialized. 403 * @owner: the object that tracks the region's reference count 404 * @ops: a structure containing read and write callbacks to be used when 405 * I/O is performed on the region. 406 * @opaque: passed to the read and write callbacks of the @ops structure. 407 * @name: used for debugging; not visible to the user or ABI 408 * @size: size of the region. 409 */ 410 void memory_region_init_io(MemoryRegion *mr, 411 struct Object *owner, 412 const MemoryRegionOps *ops, 413 void *opaque, 414 const char *name, 415 uint64_t size); 416 417 /** 418 * memory_region_init_ram_nomigrate: Initialize RAM memory region. Accesses 419 * into the region will modify memory 420 * directly. 421 * 422 * @mr: the #MemoryRegion to be initialized. 423 * @owner: the object that tracks the region's reference count 424 * @name: Region name, becomes part of RAMBlock name used in migration stream 425 * must be unique within any device 426 * @size: size of the region. 427 * @errp: pointer to Error*, to store an error if it happens. 428 * 429 * Note that this function does not do anything to cause the data in the 430 * RAM memory region to be migrated; that is the responsibility of the caller. 431 */ 432 void memory_region_init_ram_nomigrate(MemoryRegion *mr, 433 struct Object *owner, 434 const char *name, 435 uint64_t size, 436 Error **errp); 437 438 /** 439 * memory_region_init_ram_shared_nomigrate: Initialize RAM memory region. 440 * Accesses into the region will 441 * modify memory directly. 442 * 443 * @mr: the #MemoryRegion to be initialized. 444 * @owner: the object that tracks the region's reference count 445 * @name: Region name, becomes part of RAMBlock name used in migration stream 446 * must be unique within any device 447 * @size: size of the region. 448 * @share: allow remapping RAM to different addresses 449 * @errp: pointer to Error*, to store an error if it happens. 450 * 451 * Note that this function is similar to memory_region_init_ram_nomigrate. 452 * The only difference is part of the RAM region can be remapped. 453 */ 454 void memory_region_init_ram_shared_nomigrate(MemoryRegion *mr, 455 struct Object *owner, 456 const char *name, 457 uint64_t size, 458 bool share, 459 Error **errp); 460 461 /** 462 * memory_region_init_resizeable_ram: Initialize memory region with resizeable 463 * RAM. Accesses into the region will 464 * modify memory directly. Only an initial 465 * portion of this RAM is actually used. 466 * The used size can change across reboots. 467 * 468 * @mr: the #MemoryRegion to be initialized. 469 * @owner: the object that tracks the region's reference count 470 * @name: Region name, becomes part of RAMBlock name used in migration stream 471 * must be unique within any device 472 * @size: used size of the region. 473 * @max_size: max size of the region. 474 * @resized: callback to notify owner about used size change. 475 * @errp: pointer to Error*, to store an error if it happens. 476 * 477 * Note that this function does not do anything to cause the data in the 478 * RAM memory region to be migrated; that is the responsibility of the caller. 479 */ 480 void memory_region_init_resizeable_ram(MemoryRegion *mr, 481 struct Object *owner, 482 const char *name, 483 uint64_t size, 484 uint64_t max_size, 485 void (*resized)(const char*, 486 uint64_t length, 487 void *host), 488 Error **errp); 489 #ifdef __linux__ 490 /** 491 * memory_region_init_ram_from_file: Initialize RAM memory region with a 492 * mmap-ed backend. 493 * 494 * @mr: the #MemoryRegion to be initialized. 495 * @owner: the object that tracks the region's reference count 496 * @name: Region name, becomes part of RAMBlock name used in migration stream 497 * must be unique within any device 498 * @size: size of the region. 499 * @align: alignment of the region base address; if 0, the default alignment 500 * (getpagesize()) will be used. 501 * @share: %true if memory must be mmaped with the MAP_SHARED flag 502 * @path: the path in which to allocate the RAM. 503 * @errp: pointer to Error*, to store an error if it happens. 504 * 505 * Note that this function does not do anything to cause the data in the 506 * RAM memory region to be migrated; that is the responsibility of the caller. 507 */ 508 void memory_region_init_ram_from_file(MemoryRegion *mr, 509 struct Object *owner, 510 const char *name, 511 uint64_t size, 512 uint64_t align, 513 bool share, 514 const char *path, 515 Error **errp); 516 517 /** 518 * memory_region_init_ram_from_fd: Initialize RAM memory region with a 519 * mmap-ed backend. 520 * 521 * @mr: the #MemoryRegion to be initialized. 522 * @owner: the object that tracks the region's reference count 523 * @name: the name of the region. 524 * @size: size of the region. 525 * @share: %true if memory must be mmaped with the MAP_SHARED flag 526 * @fd: the fd to mmap. 527 * @errp: pointer to Error*, to store an error if it happens. 528 * 529 * Note that this function does not do anything to cause the data in the 530 * RAM memory region to be migrated; that is the responsibility of the caller. 531 */ 532 void memory_region_init_ram_from_fd(MemoryRegion *mr, 533 struct Object *owner, 534 const char *name, 535 uint64_t size, 536 bool share, 537 int fd, 538 Error **errp); 539 #endif 540 541 /** 542 * memory_region_init_ram_ptr: Initialize RAM memory region from a 543 * user-provided pointer. Accesses into the 544 * region will modify memory directly. 545 * 546 * @mr: the #MemoryRegion to be initialized. 547 * @owner: the object that tracks the region's reference count 548 * @name: Region name, becomes part of RAMBlock name used in migration stream 549 * must be unique within any device 550 * @size: size of the region. 551 * @ptr: memory to be mapped; must contain at least @size bytes. 552 * 553 * Note that this function does not do anything to cause the data in the 554 * RAM memory region to be migrated; that is the responsibility of the caller. 555 */ 556 void memory_region_init_ram_ptr(MemoryRegion *mr, 557 struct Object *owner, 558 const char *name, 559 uint64_t size, 560 void *ptr); 561 562 /** 563 * memory_region_init_ram_device_ptr: Initialize RAM device memory region from 564 * a user-provided pointer. 565 * 566 * A RAM device represents a mapping to a physical device, such as to a PCI 567 * MMIO BAR of an vfio-pci assigned device. The memory region may be mapped 568 * into the VM address space and access to the region will modify memory 569 * directly. However, the memory region should not be included in a memory 570 * dump (device may not be enabled/mapped at the time of the dump), and 571 * operations incompatible with manipulating MMIO should be avoided. Replaces 572 * skip_dump flag. 573 * 574 * @mr: the #MemoryRegion to be initialized. 575 * @owner: the object that tracks the region's reference count 576 * @name: the name of the region. 577 * @size: size of the region. 578 * @ptr: memory to be mapped; must contain at least @size bytes. 579 * 580 * Note that this function does not do anything to cause the data in the 581 * RAM memory region to be migrated; that is the responsibility of the caller. 582 * (For RAM device memory regions, migrating the contents rarely makes sense.) 583 */ 584 void memory_region_init_ram_device_ptr(MemoryRegion *mr, 585 struct Object *owner, 586 const char *name, 587 uint64_t size, 588 void *ptr); 589 590 /** 591 * memory_region_init_alias: Initialize a memory region that aliases all or a 592 * part of another memory region. 593 * 594 * @mr: the #MemoryRegion to be initialized. 595 * @owner: the object that tracks the region's reference count 596 * @name: used for debugging; not visible to the user or ABI 597 * @orig: the region to be referenced; @mr will be equivalent to 598 * @orig between @offset and @offset + @size - 1. 599 * @offset: start of the section in @orig to be referenced. 600 * @size: size of the region. 601 */ 602 void memory_region_init_alias(MemoryRegion *mr, 603 struct Object *owner, 604 const char *name, 605 MemoryRegion *orig, 606 hwaddr offset, 607 uint64_t size); 608 609 /** 610 * memory_region_init_rom_nomigrate: Initialize a ROM memory region. 611 * 612 * This has the same effect as calling memory_region_init_ram_nomigrate() 613 * and then marking the resulting region read-only with 614 * memory_region_set_readonly(). 615 * 616 * Note that this function does not do anything to cause the data in the 617 * RAM side of the memory region to be migrated; that is the responsibility 618 * of the caller. 619 * 620 * @mr: the #MemoryRegion to be initialized. 621 * @owner: the object that tracks the region's reference count 622 * @name: Region name, becomes part of RAMBlock name used in migration stream 623 * must be unique within any device 624 * @size: size of the region. 625 * @errp: pointer to Error*, to store an error if it happens. 626 */ 627 void memory_region_init_rom_nomigrate(MemoryRegion *mr, 628 struct Object *owner, 629 const char *name, 630 uint64_t size, 631 Error **errp); 632 633 /** 634 * memory_region_init_rom_device_nomigrate: Initialize a ROM memory region. 635 * Writes are handled via callbacks. 636 * 637 * Note that this function does not do anything to cause the data in the 638 * RAM side of the memory region to be migrated; that is the responsibility 639 * of the caller. 640 * 641 * @mr: the #MemoryRegion to be initialized. 642 * @owner: the object that tracks the region's reference count 643 * @ops: callbacks for write access handling (must not be NULL). 644 * @opaque: passed to the read and write callbacks of the @ops structure. 645 * @name: Region name, becomes part of RAMBlock name used in migration stream 646 * must be unique within any device 647 * @size: size of the region. 648 * @errp: pointer to Error*, to store an error if it happens. 649 */ 650 void memory_region_init_rom_device_nomigrate(MemoryRegion *mr, 651 struct Object *owner, 652 const MemoryRegionOps *ops, 653 void *opaque, 654 const char *name, 655 uint64_t size, 656 Error **errp); 657 658 /** 659 * memory_region_init_reservation: Initialize a memory region that reserves 660 * I/O space. 661 * 662 * A reservation region primariy serves debugging purposes. It claims I/O 663 * space that is not supposed to be handled by QEMU itself. Any access via 664 * the memory API will cause an abort(). 665 * This function is deprecated. Use memory_region_init_io() with NULL 666 * callbacks instead. 667 * 668 * @mr: the #MemoryRegion to be initialized 669 * @owner: the object that tracks the region's reference count 670 * @name: used for debugging; not visible to the user or ABI 671 * @size: size of the region. 672 */ 673 static inline void memory_region_init_reservation(MemoryRegion *mr, 674 Object *owner, 675 const char *name, 676 uint64_t size) 677 { 678 memory_region_init_io(mr, owner, NULL, mr, name, size); 679 } 680 681 /** 682 * memory_region_init_iommu: Initialize a memory region of a custom type 683 * that translates addresses 684 * 685 * An IOMMU region translates addresses and forwards accesses to a target 686 * memory region. 687 * 688 * @_iommu_mr: the #IOMMUMemoryRegion to be initialized 689 * @instance_size: the IOMMUMemoryRegion subclass instance size 690 * @mrtypename: the type name of the #IOMMUMemoryRegion 691 * @owner: the object that tracks the region's reference count 692 * @name: used for debugging; not visible to the user or ABI 693 * @size: size of the region. 694 */ 695 void memory_region_init_iommu(void *_iommu_mr, 696 size_t instance_size, 697 const char *mrtypename, 698 Object *owner, 699 const char *name, 700 uint64_t size); 701 702 /** 703 * memory_region_init_ram - Initialize RAM memory region. Accesses into the 704 * region will modify memory directly. 705 * 706 * @mr: the #MemoryRegion to be initialized 707 * @owner: the object that tracks the region's reference count (must be 708 * TYPE_DEVICE or a subclass of TYPE_DEVICE, or NULL) 709 * @name: name of the memory region 710 * @size: size of the region in bytes 711 * @errp: pointer to Error*, to store an error if it happens. 712 * 713 * This function allocates RAM for a board model or device, and 714 * arranges for it to be migrated (by calling vmstate_register_ram() 715 * if @owner is a DeviceState, or vmstate_register_ram_global() if 716 * @owner is NULL). 717 * 718 * TODO: Currently we restrict @owner to being either NULL (for 719 * global RAM regions with no owner) or devices, so that we can 720 * give the RAM block a unique name for migration purposes. 721 * We should lift this restriction and allow arbitrary Objects. 722 * If you pass a non-NULL non-device @owner then we will assert. 723 */ 724 void memory_region_init_ram(MemoryRegion *mr, 725 struct Object *owner, 726 const char *name, 727 uint64_t size, 728 Error **errp); 729 730 /** 731 * memory_region_init_rom: Initialize a ROM memory region. 732 * 733 * This has the same effect as calling memory_region_init_ram() 734 * and then marking the resulting region read-only with 735 * memory_region_set_readonly(). This includes arranging for the 736 * contents to be migrated. 737 * 738 * TODO: Currently we restrict @owner to being either NULL (for 739 * global RAM regions with no owner) or devices, so that we can 740 * give the RAM block a unique name for migration purposes. 741 * We should lift this restriction and allow arbitrary Objects. 742 * If you pass a non-NULL non-device @owner then we will assert. 743 * 744 * @mr: the #MemoryRegion to be initialized. 745 * @owner: the object that tracks the region's reference count 746 * @name: Region name, becomes part of RAMBlock name used in migration stream 747 * must be unique within any device 748 * @size: size of the region. 749 * @errp: pointer to Error*, to store an error if it happens. 750 */ 751 void memory_region_init_rom(MemoryRegion *mr, 752 struct Object *owner, 753 const char *name, 754 uint64_t size, 755 Error **errp); 756 757 /** 758 * memory_region_init_rom_device: Initialize a ROM memory region. 759 * Writes are handled via callbacks. 760 * 761 * This function initializes a memory region backed by RAM for reads 762 * and callbacks for writes, and arranges for the RAM backing to 763 * be migrated (by calling vmstate_register_ram() 764 * if @owner is a DeviceState, or vmstate_register_ram_global() if 765 * @owner is NULL). 766 * 767 * TODO: Currently we restrict @owner to being either NULL (for 768 * global RAM regions with no owner) or devices, so that we can 769 * give the RAM block a unique name for migration purposes. 770 * We should lift this restriction and allow arbitrary Objects. 771 * If you pass a non-NULL non-device @owner then we will assert. 772 * 773 * @mr: the #MemoryRegion to be initialized. 774 * @owner: the object that tracks the region's reference count 775 * @ops: callbacks for write access handling (must not be NULL). 776 * @name: Region name, becomes part of RAMBlock name used in migration stream 777 * must be unique within any device 778 * @size: size of the region. 779 * @errp: pointer to Error*, to store an error if it happens. 780 */ 781 void memory_region_init_rom_device(MemoryRegion *mr, 782 struct Object *owner, 783 const MemoryRegionOps *ops, 784 void *opaque, 785 const char *name, 786 uint64_t size, 787 Error **errp); 788 789 790 /** 791 * memory_region_owner: get a memory region's owner. 792 * 793 * @mr: the memory region being queried. 794 */ 795 struct Object *memory_region_owner(MemoryRegion *mr); 796 797 /** 798 * memory_region_size: get a memory region's size. 799 * 800 * @mr: the memory region being queried. 801 */ 802 uint64_t memory_region_size(MemoryRegion *mr); 803 804 /** 805 * memory_region_is_ram: check whether a memory region is random access 806 * 807 * Returns %true is a memory region is random access. 808 * 809 * @mr: the memory region being queried 810 */ 811 static inline bool memory_region_is_ram(MemoryRegion *mr) 812 { 813 return mr->ram; 814 } 815 816 /** 817 * memory_region_is_ram_device: check whether a memory region is a ram device 818 * 819 * Returns %true is a memory region is a device backed ram region 820 * 821 * @mr: the memory region being queried 822 */ 823 bool memory_region_is_ram_device(MemoryRegion *mr); 824 825 /** 826 * memory_region_is_romd: check whether a memory region is in ROMD mode 827 * 828 * Returns %true if a memory region is a ROM device and currently set to allow 829 * direct reads. 830 * 831 * @mr: the memory region being queried 832 */ 833 static inline bool memory_region_is_romd(MemoryRegion *mr) 834 { 835 return mr->rom_device && mr->romd_mode; 836 } 837 838 /** 839 * memory_region_get_iommu: check whether a memory region is an iommu 840 * 841 * Returns pointer to IOMMUMemoryRegion if a memory region is an iommu, 842 * otherwise NULL. 843 * 844 * @mr: the memory region being queried 845 */ 846 static inline IOMMUMemoryRegion *memory_region_get_iommu(MemoryRegion *mr) 847 { 848 if (mr->alias) { 849 return memory_region_get_iommu(mr->alias); 850 } 851 if (mr->is_iommu) { 852 return (IOMMUMemoryRegion *) mr; 853 } 854 return NULL; 855 } 856 857 /** 858 * memory_region_get_iommu_class_nocheck: returns iommu memory region class 859 * if an iommu or NULL if not 860 * 861 * Returns pointer to IOMMUMemoryRegionClass if a memory region is an iommu, 862 * otherwise NULL. This is fast path avoiding QOM checking, use with caution. 863 * 864 * @mr: the memory region being queried 865 */ 866 static inline IOMMUMemoryRegionClass *memory_region_get_iommu_class_nocheck( 867 IOMMUMemoryRegion *iommu_mr) 868 { 869 return (IOMMUMemoryRegionClass *) (((Object *)iommu_mr)->class); 870 } 871 872 #define memory_region_is_iommu(mr) (memory_region_get_iommu(mr) != NULL) 873 874 /** 875 * memory_region_iommu_get_min_page_size: get minimum supported page size 876 * for an iommu 877 * 878 * Returns minimum supported page size for an iommu. 879 * 880 * @iommu_mr: the memory region being queried 881 */ 882 uint64_t memory_region_iommu_get_min_page_size(IOMMUMemoryRegion *iommu_mr); 883 884 /** 885 * memory_region_notify_iommu: notify a change in an IOMMU translation entry. 886 * 887 * The notification type will be decided by entry.perm bits: 888 * 889 * - For UNMAP (cache invalidation) notifies: set entry.perm to IOMMU_NONE. 890 * - For MAP (newly added entry) notifies: set entry.perm to the 891 * permission of the page (which is definitely !IOMMU_NONE). 892 * 893 * Note: for any IOMMU implementation, an in-place mapping change 894 * should be notified with an UNMAP followed by a MAP. 895 * 896 * @iommu_mr: the memory region that was changed 897 * @entry: the new entry in the IOMMU translation table. The entry 898 * replaces all old entries for the same virtual I/O address range. 899 * Deleted entries have .@perm == 0. 900 */ 901 void memory_region_notify_iommu(IOMMUMemoryRegion *iommu_mr, 902 IOMMUTLBEntry entry); 903 904 /** 905 * memory_region_notify_one: notify a change in an IOMMU translation 906 * entry to a single notifier 907 * 908 * This works just like memory_region_notify_iommu(), but it only 909 * notifies a specific notifier, not all of them. 910 * 911 * @notifier: the notifier to be notified 912 * @entry: the new entry in the IOMMU translation table. The entry 913 * replaces all old entries for the same virtual I/O address range. 914 * Deleted entries have .@perm == 0. 915 */ 916 void memory_region_notify_one(IOMMUNotifier *notifier, 917 IOMMUTLBEntry *entry); 918 919 /** 920 * memory_region_register_iommu_notifier: register a notifier for changes to 921 * IOMMU translation entries. 922 * 923 * @mr: the memory region to observe 924 * @n: the IOMMUNotifier to be added; the notify callback receives a 925 * pointer to an #IOMMUTLBEntry as the opaque value; the pointer 926 * ceases to be valid on exit from the notifier. 927 */ 928 void memory_region_register_iommu_notifier(MemoryRegion *mr, 929 IOMMUNotifier *n); 930 931 /** 932 * memory_region_iommu_replay: replay existing IOMMU translations to 933 * a notifier with the minimum page granularity returned by 934 * mr->iommu_ops->get_page_size(). 935 * 936 * @iommu_mr: the memory region to observe 937 * @n: the notifier to which to replay iommu mappings 938 */ 939 void memory_region_iommu_replay(IOMMUMemoryRegion *iommu_mr, IOMMUNotifier *n); 940 941 /** 942 * memory_region_iommu_replay_all: replay existing IOMMU translations 943 * to all the notifiers registered. 944 * 945 * @iommu_mr: the memory region to observe 946 */ 947 void memory_region_iommu_replay_all(IOMMUMemoryRegion *iommu_mr); 948 949 /** 950 * memory_region_unregister_iommu_notifier: unregister a notifier for 951 * changes to IOMMU translation entries. 952 * 953 * @mr: the memory region which was observed and for which notity_stopped() 954 * needs to be called 955 * @n: the notifier to be removed. 956 */ 957 void memory_region_unregister_iommu_notifier(MemoryRegion *mr, 958 IOMMUNotifier *n); 959 960 /** 961 * memory_region_iommu_get_attr: return an IOMMU attr if get_attr() is 962 * defined on the IOMMU. 963 * 964 * Returns 0 if succeded, error code otherwise. 965 * 966 * @iommu_mr: the memory region 967 * @attr: the requested attribute 968 * @data: a pointer to the requested attribute data 969 */ 970 int memory_region_iommu_get_attr(IOMMUMemoryRegion *iommu_mr, 971 enum IOMMUMemoryRegionAttr attr, 972 void *data); 973 974 /** 975 * memory_region_name: get a memory region's name 976 * 977 * Returns the string that was used to initialize the memory region. 978 * 979 * @mr: the memory region being queried 980 */ 981 const char *memory_region_name(const MemoryRegion *mr); 982 983 /** 984 * memory_region_is_logging: return whether a memory region is logging writes 985 * 986 * Returns %true if the memory region is logging writes for the given client 987 * 988 * @mr: the memory region being queried 989 * @client: the client being queried 990 */ 991 bool memory_region_is_logging(MemoryRegion *mr, uint8_t client); 992 993 /** 994 * memory_region_get_dirty_log_mask: return the clients for which a 995 * memory region is logging writes. 996 * 997 * Returns a bitmap of clients, in which the DIRTY_MEMORY_* constants 998 * are the bit indices. 999 * 1000 * @mr: the memory region being queried 1001 */ 1002 uint8_t memory_region_get_dirty_log_mask(MemoryRegion *mr); 1003 1004 /** 1005 * memory_region_is_rom: check whether a memory region is ROM 1006 * 1007 * Returns %true is a memory region is read-only memory. 1008 * 1009 * @mr: the memory region being queried 1010 */ 1011 static inline bool memory_region_is_rom(MemoryRegion *mr) 1012 { 1013 return mr->ram && mr->readonly; 1014 } 1015 1016 1017 /** 1018 * memory_region_get_fd: Get a file descriptor backing a RAM memory region. 1019 * 1020 * Returns a file descriptor backing a file-based RAM memory region, 1021 * or -1 if the region is not a file-based RAM memory region. 1022 * 1023 * @mr: the RAM or alias memory region being queried. 1024 */ 1025 int memory_region_get_fd(MemoryRegion *mr); 1026 1027 /** 1028 * memory_region_from_host: Convert a pointer into a RAM memory region 1029 * and an offset within it. 1030 * 1031 * Given a host pointer inside a RAM memory region (created with 1032 * memory_region_init_ram() or memory_region_init_ram_ptr()), return 1033 * the MemoryRegion and the offset within it. 1034 * 1035 * Use with care; by the time this function returns, the returned pointer is 1036 * not protected by RCU anymore. If the caller is not within an RCU critical 1037 * section and does not hold the iothread lock, it must have other means of 1038 * protecting the pointer, such as a reference to the region that includes 1039 * the incoming ram_addr_t. 1040 * 1041 * @ptr: the host pointer to be converted 1042 * @offset: the offset within memory region 1043 */ 1044 MemoryRegion *memory_region_from_host(void *ptr, ram_addr_t *offset); 1045 1046 /** 1047 * memory_region_get_ram_ptr: Get a pointer into a RAM memory region. 1048 * 1049 * Returns a host pointer to a RAM memory region (created with 1050 * memory_region_init_ram() or memory_region_init_ram_ptr()). 1051 * 1052 * Use with care; by the time this function returns, the returned pointer is 1053 * not protected by RCU anymore. If the caller is not within an RCU critical 1054 * section and does not hold the iothread lock, it must have other means of 1055 * protecting the pointer, such as a reference to the region that includes 1056 * the incoming ram_addr_t. 1057 * 1058 * @mr: the memory region being queried. 1059 */ 1060 void *memory_region_get_ram_ptr(MemoryRegion *mr); 1061 1062 /* memory_region_ram_resize: Resize a RAM region. 1063 * 1064 * Only legal before guest might have detected the memory size: e.g. on 1065 * incoming migration, or right after reset. 1066 * 1067 * @mr: a memory region created with @memory_region_init_resizeable_ram. 1068 * @newsize: the new size the region 1069 * @errp: pointer to Error*, to store an error if it happens. 1070 */ 1071 void memory_region_ram_resize(MemoryRegion *mr, ram_addr_t newsize, 1072 Error **errp); 1073 1074 /** 1075 * memory_region_set_log: Turn dirty logging on or off for a region. 1076 * 1077 * Turns dirty logging on or off for a specified client (display, migration). 1078 * Only meaningful for RAM regions. 1079 * 1080 * @mr: the memory region being updated. 1081 * @log: whether dirty logging is to be enabled or disabled. 1082 * @client: the user of the logging information; %DIRTY_MEMORY_VGA only. 1083 */ 1084 void memory_region_set_log(MemoryRegion *mr, bool log, unsigned client); 1085 1086 /** 1087 * memory_region_get_dirty: Check whether a range of bytes is dirty 1088 * for a specified client. 1089 * 1090 * Checks whether a range of bytes has been written to since the last 1091 * call to memory_region_reset_dirty() with the same @client. Dirty logging 1092 * must be enabled. 1093 * 1094 * @mr: the memory region being queried. 1095 * @addr: the address (relative to the start of the region) being queried. 1096 * @size: the size of the range being queried. 1097 * @client: the user of the logging information; %DIRTY_MEMORY_MIGRATION or 1098 * %DIRTY_MEMORY_VGA. 1099 */ 1100 bool memory_region_get_dirty(MemoryRegion *mr, hwaddr addr, 1101 hwaddr size, unsigned client); 1102 1103 /** 1104 * memory_region_set_dirty: Mark a range of bytes as dirty in a memory region. 1105 * 1106 * Marks a range of bytes as dirty, after it has been dirtied outside 1107 * guest code. 1108 * 1109 * @mr: the memory region being dirtied. 1110 * @addr: the address (relative to the start of the region) being dirtied. 1111 * @size: size of the range being dirtied. 1112 */ 1113 void memory_region_set_dirty(MemoryRegion *mr, hwaddr addr, 1114 hwaddr size); 1115 1116 /** 1117 * memory_region_snapshot_and_clear_dirty: Get a snapshot of the dirty 1118 * bitmap and clear it. 1119 * 1120 * Creates a snapshot of the dirty bitmap, clears the dirty bitmap and 1121 * returns the snapshot. The snapshot can then be used to query dirty 1122 * status, using memory_region_snapshot_get_dirty. Snapshotting allows 1123 * querying the same page multiple times, which is especially useful for 1124 * display updates where the scanlines often are not page aligned. 1125 * 1126 * The dirty bitmap region which gets copyed into the snapshot (and 1127 * cleared afterwards) can be larger than requested. The boundaries 1128 * are rounded up/down so complete bitmap longs (covering 64 pages on 1129 * 64bit hosts) can be copied over into the bitmap snapshot. Which 1130 * isn't a problem for display updates as the extra pages are outside 1131 * the visible area, and in case the visible area changes a full 1132 * display redraw is due anyway. Should other use cases for this 1133 * function emerge we might have to revisit this implementation 1134 * detail. 1135 * 1136 * Use g_free to release DirtyBitmapSnapshot. 1137 * 1138 * @mr: the memory region being queried. 1139 * @addr: the address (relative to the start of the region) being queried. 1140 * @size: the size of the range being queried. 1141 * @client: the user of the logging information; typically %DIRTY_MEMORY_VGA. 1142 */ 1143 DirtyBitmapSnapshot *memory_region_snapshot_and_clear_dirty(MemoryRegion *mr, 1144 hwaddr addr, 1145 hwaddr size, 1146 unsigned client); 1147 1148 /** 1149 * memory_region_snapshot_get_dirty: Check whether a range of bytes is dirty 1150 * in the specified dirty bitmap snapshot. 1151 * 1152 * @mr: the memory region being queried. 1153 * @snap: the dirty bitmap snapshot 1154 * @addr: the address (relative to the start of the region) being queried. 1155 * @size: the size of the range being queried. 1156 */ 1157 bool memory_region_snapshot_get_dirty(MemoryRegion *mr, 1158 DirtyBitmapSnapshot *snap, 1159 hwaddr addr, hwaddr size); 1160 1161 /** 1162 * memory_region_reset_dirty: Mark a range of pages as clean, for a specified 1163 * client. 1164 * 1165 * Marks a range of pages as no longer dirty. 1166 * 1167 * @mr: the region being updated. 1168 * @addr: the start of the subrange being cleaned. 1169 * @size: the size of the subrange being cleaned. 1170 * @client: the user of the logging information; %DIRTY_MEMORY_MIGRATION or 1171 * %DIRTY_MEMORY_VGA. 1172 */ 1173 void memory_region_reset_dirty(MemoryRegion *mr, hwaddr addr, 1174 hwaddr size, unsigned client); 1175 1176 /** 1177 * memory_region_set_readonly: Turn a memory region read-only (or read-write) 1178 * 1179 * Allows a memory region to be marked as read-only (turning it into a ROM). 1180 * only useful on RAM regions. 1181 * 1182 * @mr: the region being updated. 1183 * @readonly: whether rhe region is to be ROM or RAM. 1184 */ 1185 void memory_region_set_readonly(MemoryRegion *mr, bool readonly); 1186 1187 /** 1188 * memory_region_rom_device_set_romd: enable/disable ROMD mode 1189 * 1190 * Allows a ROM device (initialized with memory_region_init_rom_device() to 1191 * set to ROMD mode (default) or MMIO mode. When it is in ROMD mode, the 1192 * device is mapped to guest memory and satisfies read access directly. 1193 * When in MMIO mode, reads are forwarded to the #MemoryRegion.read function. 1194 * Writes are always handled by the #MemoryRegion.write function. 1195 * 1196 * @mr: the memory region to be updated 1197 * @romd_mode: %true to put the region into ROMD mode 1198 */ 1199 void memory_region_rom_device_set_romd(MemoryRegion *mr, bool romd_mode); 1200 1201 /** 1202 * memory_region_set_coalescing: Enable memory coalescing for the region. 1203 * 1204 * Enabled writes to a region to be queued for later processing. MMIO ->write 1205 * callbacks may be delayed until a non-coalesced MMIO is issued. 1206 * Only useful for IO regions. Roughly similar to write-combining hardware. 1207 * 1208 * @mr: the memory region to be write coalesced 1209 */ 1210 void memory_region_set_coalescing(MemoryRegion *mr); 1211 1212 /** 1213 * memory_region_add_coalescing: Enable memory coalescing for a sub-range of 1214 * a region. 1215 * 1216 * Like memory_region_set_coalescing(), but works on a sub-range of a region. 1217 * Multiple calls can be issued coalesced disjoint ranges. 1218 * 1219 * @mr: the memory region to be updated. 1220 * @offset: the start of the range within the region to be coalesced. 1221 * @size: the size of the subrange to be coalesced. 1222 */ 1223 void memory_region_add_coalescing(MemoryRegion *mr, 1224 hwaddr offset, 1225 uint64_t size); 1226 1227 /** 1228 * memory_region_clear_coalescing: Disable MMIO coalescing for the region. 1229 * 1230 * Disables any coalescing caused by memory_region_set_coalescing() or 1231 * memory_region_add_coalescing(). Roughly equivalent to uncacheble memory 1232 * hardware. 1233 * 1234 * @mr: the memory region to be updated. 1235 */ 1236 void memory_region_clear_coalescing(MemoryRegion *mr); 1237 1238 /** 1239 * memory_region_set_flush_coalesced: Enforce memory coalescing flush before 1240 * accesses. 1241 * 1242 * Ensure that pending coalesced MMIO request are flushed before the memory 1243 * region is accessed. This property is automatically enabled for all regions 1244 * passed to memory_region_set_coalescing() and memory_region_add_coalescing(). 1245 * 1246 * @mr: the memory region to be updated. 1247 */ 1248 void memory_region_set_flush_coalesced(MemoryRegion *mr); 1249 1250 /** 1251 * memory_region_clear_flush_coalesced: Disable memory coalescing flush before 1252 * accesses. 1253 * 1254 * Clear the automatic coalesced MMIO flushing enabled via 1255 * memory_region_set_flush_coalesced. Note that this service has no effect on 1256 * memory regions that have MMIO coalescing enabled for themselves. For them, 1257 * automatic flushing will stop once coalescing is disabled. 1258 * 1259 * @mr: the memory region to be updated. 1260 */ 1261 void memory_region_clear_flush_coalesced(MemoryRegion *mr); 1262 1263 /** 1264 * memory_region_clear_global_locking: Declares that access processing does 1265 * not depend on the QEMU global lock. 1266 * 1267 * By clearing this property, accesses to the memory region will be processed 1268 * outside of QEMU's global lock (unless the lock is held on when issuing the 1269 * access request). In this case, the device model implementing the access 1270 * handlers is responsible for synchronization of concurrency. 1271 * 1272 * @mr: the memory region to be updated. 1273 */ 1274 void memory_region_clear_global_locking(MemoryRegion *mr); 1275 1276 /** 1277 * memory_region_add_eventfd: Request an eventfd to be triggered when a word 1278 * is written to a location. 1279 * 1280 * Marks a word in an IO region (initialized with memory_region_init_io()) 1281 * as a trigger for an eventfd event. The I/O callback will not be called. 1282 * The caller must be prepared to handle failure (that is, take the required 1283 * action if the callback _is_ called). 1284 * 1285 * @mr: the memory region being updated. 1286 * @addr: the address within @mr that is to be monitored 1287 * @size: the size of the access to trigger the eventfd 1288 * @match_data: whether to match against @data, instead of just @addr 1289 * @data: the data to match against the guest write 1290 * @e: event notifier to be triggered when @addr, @size, and @data all match. 1291 **/ 1292 void memory_region_add_eventfd(MemoryRegion *mr, 1293 hwaddr addr, 1294 unsigned size, 1295 bool match_data, 1296 uint64_t data, 1297 EventNotifier *e); 1298 1299 /** 1300 * memory_region_del_eventfd: Cancel an eventfd. 1301 * 1302 * Cancels an eventfd trigger requested by a previous 1303 * memory_region_add_eventfd() call. 1304 * 1305 * @mr: the memory region being updated. 1306 * @addr: the address within @mr that is to be monitored 1307 * @size: the size of the access to trigger the eventfd 1308 * @match_data: whether to match against @data, instead of just @addr 1309 * @data: the data to match against the guest write 1310 * @e: event notifier to be triggered when @addr, @size, and @data all match. 1311 */ 1312 void memory_region_del_eventfd(MemoryRegion *mr, 1313 hwaddr addr, 1314 unsigned size, 1315 bool match_data, 1316 uint64_t data, 1317 EventNotifier *e); 1318 1319 /** 1320 * memory_region_add_subregion: Add a subregion to a container. 1321 * 1322 * Adds a subregion at @offset. The subregion may not overlap with other 1323 * subregions (except for those explicitly marked as overlapping). A region 1324 * may only be added once as a subregion (unless removed with 1325 * memory_region_del_subregion()); use memory_region_init_alias() if you 1326 * want a region to be a subregion in multiple locations. 1327 * 1328 * @mr: the region to contain the new subregion; must be a container 1329 * initialized with memory_region_init(). 1330 * @offset: the offset relative to @mr where @subregion is added. 1331 * @subregion: the subregion to be added. 1332 */ 1333 void memory_region_add_subregion(MemoryRegion *mr, 1334 hwaddr offset, 1335 MemoryRegion *subregion); 1336 /** 1337 * memory_region_add_subregion_overlap: Add a subregion to a container 1338 * with overlap. 1339 * 1340 * Adds a subregion at @offset. The subregion may overlap with other 1341 * subregions. Conflicts are resolved by having a higher @priority hide a 1342 * lower @priority. Subregions without priority are taken as @priority 0. 1343 * A region may only be added once as a subregion (unless removed with 1344 * memory_region_del_subregion()); use memory_region_init_alias() if you 1345 * want a region to be a subregion in multiple locations. 1346 * 1347 * @mr: the region to contain the new subregion; must be a container 1348 * initialized with memory_region_init(). 1349 * @offset: the offset relative to @mr where @subregion is added. 1350 * @subregion: the subregion to be added. 1351 * @priority: used for resolving overlaps; highest priority wins. 1352 */ 1353 void memory_region_add_subregion_overlap(MemoryRegion *mr, 1354 hwaddr offset, 1355 MemoryRegion *subregion, 1356 int priority); 1357 1358 /** 1359 * memory_region_get_ram_addr: Get the ram address associated with a memory 1360 * region 1361 */ 1362 ram_addr_t memory_region_get_ram_addr(MemoryRegion *mr); 1363 1364 uint64_t memory_region_get_alignment(const MemoryRegion *mr); 1365 /** 1366 * memory_region_del_subregion: Remove a subregion. 1367 * 1368 * Removes a subregion from its container. 1369 * 1370 * @mr: the container to be updated. 1371 * @subregion: the region being removed; must be a current subregion of @mr. 1372 */ 1373 void memory_region_del_subregion(MemoryRegion *mr, 1374 MemoryRegion *subregion); 1375 1376 /* 1377 * memory_region_set_enabled: dynamically enable or disable a region 1378 * 1379 * Enables or disables a memory region. A disabled memory region 1380 * ignores all accesses to itself and its subregions. It does not 1381 * obscure sibling subregions with lower priority - it simply behaves as 1382 * if it was removed from the hierarchy. 1383 * 1384 * Regions default to being enabled. 1385 * 1386 * @mr: the region to be updated 1387 * @enabled: whether to enable or disable the region 1388 */ 1389 void memory_region_set_enabled(MemoryRegion *mr, bool enabled); 1390 1391 /* 1392 * memory_region_set_address: dynamically update the address of a region 1393 * 1394 * Dynamically updates the address of a region, relative to its container. 1395 * May be used on regions are currently part of a memory hierarchy. 1396 * 1397 * @mr: the region to be updated 1398 * @addr: new address, relative to container region 1399 */ 1400 void memory_region_set_address(MemoryRegion *mr, hwaddr addr); 1401 1402 /* 1403 * memory_region_set_size: dynamically update the size of a region. 1404 * 1405 * Dynamically updates the size of a region. 1406 * 1407 * @mr: the region to be updated 1408 * @size: used size of the region. 1409 */ 1410 void memory_region_set_size(MemoryRegion *mr, uint64_t size); 1411 1412 /* 1413 * memory_region_set_alias_offset: dynamically update a memory alias's offset 1414 * 1415 * Dynamically updates the offset into the target region that an alias points 1416 * to, as if the fourth argument to memory_region_init_alias() has changed. 1417 * 1418 * @mr: the #MemoryRegion to be updated; should be an alias. 1419 * @offset: the new offset into the target memory region 1420 */ 1421 void memory_region_set_alias_offset(MemoryRegion *mr, 1422 hwaddr offset); 1423 1424 /** 1425 * memory_region_present: checks if an address relative to a @container 1426 * translates into #MemoryRegion within @container 1427 * 1428 * Answer whether a #MemoryRegion within @container covers the address 1429 * @addr. 1430 * 1431 * @container: a #MemoryRegion within which @addr is a relative address 1432 * @addr: the area within @container to be searched 1433 */ 1434 bool memory_region_present(MemoryRegion *container, hwaddr addr); 1435 1436 /** 1437 * memory_region_is_mapped: returns true if #MemoryRegion is mapped 1438 * into any address space. 1439 * 1440 * @mr: a #MemoryRegion which should be checked if it's mapped 1441 */ 1442 bool memory_region_is_mapped(MemoryRegion *mr); 1443 1444 /** 1445 * memory_region_find: translate an address/size relative to a 1446 * MemoryRegion into a #MemoryRegionSection. 1447 * 1448 * Locates the first #MemoryRegion within @mr that overlaps the range 1449 * given by @addr and @size. 1450 * 1451 * Returns a #MemoryRegionSection that describes a contiguous overlap. 1452 * It will have the following characteristics: 1453 * .@size = 0 iff no overlap was found 1454 * .@mr is non-%NULL iff an overlap was found 1455 * 1456 * Remember that in the return value the @offset_within_region is 1457 * relative to the returned region (in the .@mr field), not to the 1458 * @mr argument. 1459 * 1460 * Similarly, the .@offset_within_address_space is relative to the 1461 * address space that contains both regions, the passed and the 1462 * returned one. However, in the special case where the @mr argument 1463 * has no container (and thus is the root of the address space), the 1464 * following will hold: 1465 * .@offset_within_address_space >= @addr 1466 * .@offset_within_address_space + .@size <= @addr + @size 1467 * 1468 * @mr: a MemoryRegion within which @addr is a relative address 1469 * @addr: start of the area within @as to be searched 1470 * @size: size of the area to be searched 1471 */ 1472 MemoryRegionSection memory_region_find(MemoryRegion *mr, 1473 hwaddr addr, uint64_t size); 1474 1475 /** 1476 * memory_global_dirty_log_sync: synchronize the dirty log for all memory 1477 * 1478 * Synchronizes the dirty page log for all address spaces. 1479 */ 1480 void memory_global_dirty_log_sync(void); 1481 1482 /** 1483 * memory_region_transaction_begin: Start a transaction. 1484 * 1485 * During a transaction, changes will be accumulated and made visible 1486 * only when the transaction ends (is committed). 1487 */ 1488 void memory_region_transaction_begin(void); 1489 1490 /** 1491 * memory_region_transaction_commit: Commit a transaction and make changes 1492 * visible to the guest. 1493 */ 1494 void memory_region_transaction_commit(void); 1495 1496 /** 1497 * memory_listener_register: register callbacks to be called when memory 1498 * sections are mapped or unmapped into an address 1499 * space 1500 * 1501 * @listener: an object containing the callbacks to be called 1502 * @filter: if non-%NULL, only regions in this address space will be observed 1503 */ 1504 void memory_listener_register(MemoryListener *listener, AddressSpace *filter); 1505 1506 /** 1507 * memory_listener_unregister: undo the effect of memory_listener_register() 1508 * 1509 * @listener: an object containing the callbacks to be removed 1510 */ 1511 void memory_listener_unregister(MemoryListener *listener); 1512 1513 /** 1514 * memory_global_dirty_log_start: begin dirty logging for all regions 1515 */ 1516 void memory_global_dirty_log_start(void); 1517 1518 /** 1519 * memory_global_dirty_log_stop: end dirty logging for all regions 1520 */ 1521 void memory_global_dirty_log_stop(void); 1522 1523 void mtree_info(fprintf_function mon_printf, void *f, bool flatview, 1524 bool dispatch_tree); 1525 1526 /** 1527 * memory_region_request_mmio_ptr: request a pointer to an mmio 1528 * MemoryRegion. If it is possible map a RAM MemoryRegion with this pointer. 1529 * When the device wants to invalidate the pointer it will call 1530 * memory_region_invalidate_mmio_ptr. 1531 * 1532 * @mr: #MemoryRegion to check 1533 * @addr: address within that region 1534 * 1535 * Returns true on success, false otherwise. 1536 */ 1537 bool memory_region_request_mmio_ptr(MemoryRegion *mr, hwaddr addr); 1538 1539 /** 1540 * memory_region_invalidate_mmio_ptr: invalidate the pointer to an mmio 1541 * previously requested. 1542 * In the end that means that if something wants to execute from this area it 1543 * will need to request the pointer again. 1544 * 1545 * @mr: #MemoryRegion associated to the pointer. 1546 * @offset: offset within the memory region 1547 * @size: size of that area. 1548 */ 1549 void memory_region_invalidate_mmio_ptr(MemoryRegion *mr, hwaddr offset, 1550 unsigned size); 1551 1552 /** 1553 * memory_region_dispatch_read: perform a read directly to the specified 1554 * MemoryRegion. 1555 * 1556 * @mr: #MemoryRegion to access 1557 * @addr: address within that region 1558 * @pval: pointer to uint64_t which the data is written to 1559 * @size: size of the access in bytes 1560 * @attrs: memory transaction attributes to use for the access 1561 */ 1562 MemTxResult memory_region_dispatch_read(MemoryRegion *mr, 1563 hwaddr addr, 1564 uint64_t *pval, 1565 unsigned size, 1566 MemTxAttrs attrs); 1567 /** 1568 * memory_region_dispatch_write: perform a write directly to the specified 1569 * MemoryRegion. 1570 * 1571 * @mr: #MemoryRegion to access 1572 * @addr: address within that region 1573 * @data: data to write 1574 * @size: size of the access in bytes 1575 * @attrs: memory transaction attributes to use for the access 1576 */ 1577 MemTxResult memory_region_dispatch_write(MemoryRegion *mr, 1578 hwaddr addr, 1579 uint64_t data, 1580 unsigned size, 1581 MemTxAttrs attrs); 1582 1583 /** 1584 * address_space_init: initializes an address space 1585 * 1586 * @as: an uninitialized #AddressSpace 1587 * @root: a #MemoryRegion that routes addresses for the address space 1588 * @name: an address space name. The name is only used for debugging 1589 * output. 1590 */ 1591 void address_space_init(AddressSpace *as, MemoryRegion *root, const char *name); 1592 1593 /** 1594 * address_space_destroy: destroy an address space 1595 * 1596 * Releases all resources associated with an address space. After an address space 1597 * is destroyed, its root memory region (given by address_space_init()) may be destroyed 1598 * as well. 1599 * 1600 * @as: address space to be destroyed 1601 */ 1602 void address_space_destroy(AddressSpace *as); 1603 1604 /** 1605 * address_space_rw: read from or write to an address space. 1606 * 1607 * Return a MemTxResult indicating whether the operation succeeded 1608 * or failed (eg unassigned memory, device rejected the transaction, 1609 * IOMMU fault). 1610 * 1611 * @as: #AddressSpace to be accessed 1612 * @addr: address within that address space 1613 * @attrs: memory transaction attributes 1614 * @buf: buffer with the data transferred 1615 * @len: the number of bytes to read or write 1616 * @is_write: indicates the transfer direction 1617 */ 1618 MemTxResult address_space_rw(AddressSpace *as, hwaddr addr, 1619 MemTxAttrs attrs, uint8_t *buf, 1620 int len, bool is_write); 1621 1622 /** 1623 * address_space_write: write to address space. 1624 * 1625 * Return a MemTxResult indicating whether the operation succeeded 1626 * or failed (eg unassigned memory, device rejected the transaction, 1627 * IOMMU fault). 1628 * 1629 * @as: #AddressSpace to be accessed 1630 * @addr: address within that address space 1631 * @attrs: memory transaction attributes 1632 * @buf: buffer with the data transferred 1633 * @len: the number of bytes to write 1634 */ 1635 MemTxResult address_space_write(AddressSpace *as, hwaddr addr, 1636 MemTxAttrs attrs, 1637 const uint8_t *buf, int len); 1638 1639 /* address_space_ld*: load from an address space 1640 * address_space_st*: store to an address space 1641 * 1642 * These functions perform a load or store of the byte, word, 1643 * longword or quad to the specified address within the AddressSpace. 1644 * The _le suffixed functions treat the data as little endian; 1645 * _be indicates big endian; no suffix indicates "same endianness 1646 * as guest CPU". 1647 * 1648 * The "guest CPU endianness" accessors are deprecated for use outside 1649 * target-* code; devices should be CPU-agnostic and use either the LE 1650 * or the BE accessors. 1651 * 1652 * @as #AddressSpace to be accessed 1653 * @addr: address within that address space 1654 * @val: data value, for stores 1655 * @attrs: memory transaction attributes 1656 * @result: location to write the success/failure of the transaction; 1657 * if NULL, this information is discarded 1658 */ 1659 uint32_t address_space_ldub(AddressSpace *as, hwaddr addr, 1660 MemTxAttrs attrs, MemTxResult *result); 1661 uint32_t address_space_lduw_le(AddressSpace *as, hwaddr addr, 1662 MemTxAttrs attrs, MemTxResult *result); 1663 uint32_t address_space_lduw_be(AddressSpace *as, hwaddr addr, 1664 MemTxAttrs attrs, MemTxResult *result); 1665 uint32_t address_space_ldl_le(AddressSpace *as, hwaddr addr, 1666 MemTxAttrs attrs, MemTxResult *result); 1667 uint32_t address_space_ldl_be(AddressSpace *as, hwaddr addr, 1668 MemTxAttrs attrs, MemTxResult *result); 1669 uint64_t address_space_ldq_le(AddressSpace *as, hwaddr addr, 1670 MemTxAttrs attrs, MemTxResult *result); 1671 uint64_t address_space_ldq_be(AddressSpace *as, hwaddr addr, 1672 MemTxAttrs attrs, MemTxResult *result); 1673 void address_space_stb(AddressSpace *as, hwaddr addr, uint32_t val, 1674 MemTxAttrs attrs, MemTxResult *result); 1675 void address_space_stw_le(AddressSpace *as, hwaddr addr, uint32_t val, 1676 MemTxAttrs attrs, MemTxResult *result); 1677 void address_space_stw_be(AddressSpace *as, hwaddr addr, uint32_t val, 1678 MemTxAttrs attrs, MemTxResult *result); 1679 void address_space_stl_le(AddressSpace *as, hwaddr addr, uint32_t val, 1680 MemTxAttrs attrs, MemTxResult *result); 1681 void address_space_stl_be(AddressSpace *as, hwaddr addr, uint32_t val, 1682 MemTxAttrs attrs, MemTxResult *result); 1683 void address_space_stq_le(AddressSpace *as, hwaddr addr, uint64_t val, 1684 MemTxAttrs attrs, MemTxResult *result); 1685 void address_space_stq_be(AddressSpace *as, hwaddr addr, uint64_t val, 1686 MemTxAttrs attrs, MemTxResult *result); 1687 1688 uint32_t ldub_phys(AddressSpace *as, hwaddr addr); 1689 uint32_t lduw_le_phys(AddressSpace *as, hwaddr addr); 1690 uint32_t lduw_be_phys(AddressSpace *as, hwaddr addr); 1691 uint32_t ldl_le_phys(AddressSpace *as, hwaddr addr); 1692 uint32_t ldl_be_phys(AddressSpace *as, hwaddr addr); 1693 uint64_t ldq_le_phys(AddressSpace *as, hwaddr addr); 1694 uint64_t ldq_be_phys(AddressSpace *as, hwaddr addr); 1695 void stb_phys(AddressSpace *as, hwaddr addr, uint32_t val); 1696 void stw_le_phys(AddressSpace *as, hwaddr addr, uint32_t val); 1697 void stw_be_phys(AddressSpace *as, hwaddr addr, uint32_t val); 1698 void stl_le_phys(AddressSpace *as, hwaddr addr, uint32_t val); 1699 void stl_be_phys(AddressSpace *as, hwaddr addr, uint32_t val); 1700 void stq_le_phys(AddressSpace *as, hwaddr addr, uint64_t val); 1701 void stq_be_phys(AddressSpace *as, hwaddr addr, uint64_t val); 1702 1703 struct MemoryRegionCache { 1704 hwaddr xlat; 1705 hwaddr len; 1706 AddressSpace *as; 1707 }; 1708 1709 #define MEMORY_REGION_CACHE_INVALID ((MemoryRegionCache) { .as = NULL }) 1710 1711 /* address_space_cache_init: prepare for repeated access to a physical 1712 * memory region 1713 * 1714 * @cache: #MemoryRegionCache to be filled 1715 * @as: #AddressSpace to be accessed 1716 * @addr: address within that address space 1717 * @len: length of buffer 1718 * @is_write: indicates the transfer direction 1719 * 1720 * Will only work with RAM, and may map a subset of the requested range by 1721 * returning a value that is less than @len. On failure, return a negative 1722 * errno value. 1723 * 1724 * Because it only works with RAM, this function can be used for 1725 * read-modify-write operations. In this case, is_write should be %true. 1726 * 1727 * Note that addresses passed to the address_space_*_cached functions 1728 * are relative to @addr. 1729 */ 1730 int64_t address_space_cache_init(MemoryRegionCache *cache, 1731 AddressSpace *as, 1732 hwaddr addr, 1733 hwaddr len, 1734 bool is_write); 1735 1736 /** 1737 * address_space_cache_invalidate: complete a write to a #MemoryRegionCache 1738 * 1739 * @cache: The #MemoryRegionCache to operate on. 1740 * @addr: The first physical address that was written, relative to the 1741 * address that was passed to @address_space_cache_init. 1742 * @access_len: The number of bytes that were written starting at @addr. 1743 */ 1744 void address_space_cache_invalidate(MemoryRegionCache *cache, 1745 hwaddr addr, 1746 hwaddr access_len); 1747 1748 /** 1749 * address_space_cache_destroy: free a #MemoryRegionCache 1750 * 1751 * @cache: The #MemoryRegionCache whose memory should be released. 1752 */ 1753 void address_space_cache_destroy(MemoryRegionCache *cache); 1754 1755 /* address_space_ld*_cached: load from a cached #MemoryRegion 1756 * address_space_st*_cached: store into a cached #MemoryRegion 1757 * 1758 * These functions perform a load or store of the byte, word, 1759 * longword or quad to the specified address. The address is 1760 * a physical address in the AddressSpace, but it must lie within 1761 * a #MemoryRegion that was mapped with address_space_cache_init. 1762 * 1763 * The _le suffixed functions treat the data as little endian; 1764 * _be indicates big endian; no suffix indicates "same endianness 1765 * as guest CPU". 1766 * 1767 * The "guest CPU endianness" accessors are deprecated for use outside 1768 * target-* code; devices should be CPU-agnostic and use either the LE 1769 * or the BE accessors. 1770 * 1771 * @cache: previously initialized #MemoryRegionCache to be accessed 1772 * @addr: address within the address space 1773 * @val: data value, for stores 1774 * @attrs: memory transaction attributes 1775 * @result: location to write the success/failure of the transaction; 1776 * if NULL, this information is discarded 1777 */ 1778 uint32_t address_space_ldub_cached(MemoryRegionCache *cache, hwaddr addr, 1779 MemTxAttrs attrs, MemTxResult *result); 1780 uint32_t address_space_lduw_le_cached(MemoryRegionCache *cache, hwaddr addr, 1781 MemTxAttrs attrs, MemTxResult *result); 1782 uint32_t address_space_lduw_be_cached(MemoryRegionCache *cache, hwaddr addr, 1783 MemTxAttrs attrs, MemTxResult *result); 1784 uint32_t address_space_ldl_le_cached(MemoryRegionCache *cache, hwaddr addr, 1785 MemTxAttrs attrs, MemTxResult *result); 1786 uint32_t address_space_ldl_be_cached(MemoryRegionCache *cache, hwaddr addr, 1787 MemTxAttrs attrs, MemTxResult *result); 1788 uint64_t address_space_ldq_le_cached(MemoryRegionCache *cache, hwaddr addr, 1789 MemTxAttrs attrs, MemTxResult *result); 1790 uint64_t address_space_ldq_be_cached(MemoryRegionCache *cache, hwaddr addr, 1791 MemTxAttrs attrs, MemTxResult *result); 1792 void address_space_stb_cached(MemoryRegionCache *cache, hwaddr addr, uint32_t val, 1793 MemTxAttrs attrs, MemTxResult *result); 1794 void address_space_stw_le_cached(MemoryRegionCache *cache, hwaddr addr, uint32_t val, 1795 MemTxAttrs attrs, MemTxResult *result); 1796 void address_space_stw_be_cached(MemoryRegionCache *cache, hwaddr addr, uint32_t val, 1797 MemTxAttrs attrs, MemTxResult *result); 1798 void address_space_stl_le_cached(MemoryRegionCache *cache, hwaddr addr, uint32_t val, 1799 MemTxAttrs attrs, MemTxResult *result); 1800 void address_space_stl_be_cached(MemoryRegionCache *cache, hwaddr addr, uint32_t val, 1801 MemTxAttrs attrs, MemTxResult *result); 1802 void address_space_stq_le_cached(MemoryRegionCache *cache, hwaddr addr, uint64_t val, 1803 MemTxAttrs attrs, MemTxResult *result); 1804 void address_space_stq_be_cached(MemoryRegionCache *cache, hwaddr addr, uint64_t val, 1805 MemTxAttrs attrs, MemTxResult *result); 1806 1807 uint32_t ldub_phys_cached(MemoryRegionCache *cache, hwaddr addr); 1808 uint32_t lduw_le_phys_cached(MemoryRegionCache *cache, hwaddr addr); 1809 uint32_t lduw_be_phys_cached(MemoryRegionCache *cache, hwaddr addr); 1810 uint32_t ldl_le_phys_cached(MemoryRegionCache *cache, hwaddr addr); 1811 uint32_t ldl_be_phys_cached(MemoryRegionCache *cache, hwaddr addr); 1812 uint64_t ldq_le_phys_cached(MemoryRegionCache *cache, hwaddr addr); 1813 uint64_t ldq_be_phys_cached(MemoryRegionCache *cache, hwaddr addr); 1814 void stb_phys_cached(MemoryRegionCache *cache, hwaddr addr, uint32_t val); 1815 void stw_le_phys_cached(MemoryRegionCache *cache, hwaddr addr, uint32_t val); 1816 void stw_be_phys_cached(MemoryRegionCache *cache, hwaddr addr, uint32_t val); 1817 void stl_le_phys_cached(MemoryRegionCache *cache, hwaddr addr, uint32_t val); 1818 void stl_be_phys_cached(MemoryRegionCache *cache, hwaddr addr, uint32_t val); 1819 void stq_le_phys_cached(MemoryRegionCache *cache, hwaddr addr, uint64_t val); 1820 void stq_be_phys_cached(MemoryRegionCache *cache, hwaddr addr, uint64_t val); 1821 /* address_space_get_iotlb_entry: translate an address into an IOTLB 1822 * entry. Should be called from an RCU critical section. 1823 */ 1824 IOMMUTLBEntry address_space_get_iotlb_entry(AddressSpace *as, hwaddr addr, 1825 bool is_write); 1826 1827 /* address_space_translate: translate an address range into an address space 1828 * into a MemoryRegion and an address range into that section. Should be 1829 * called from an RCU critical section, to avoid that the last reference 1830 * to the returned region disappears after address_space_translate returns. 1831 * 1832 * @fv: #FlatView to be accessed 1833 * @addr: address within that address space 1834 * @xlat: pointer to address within the returned memory region section's 1835 * #MemoryRegion. 1836 * @len: pointer to length 1837 * @is_write: indicates the transfer direction 1838 */ 1839 MemoryRegion *flatview_translate(FlatView *fv, 1840 hwaddr addr, hwaddr *xlat, 1841 hwaddr *len, bool is_write); 1842 1843 static inline MemoryRegion *address_space_translate(AddressSpace *as, 1844 hwaddr addr, hwaddr *xlat, 1845 hwaddr *len, bool is_write) 1846 { 1847 return flatview_translate(address_space_to_flatview(as), 1848 addr, xlat, len, is_write); 1849 } 1850 1851 /* address_space_access_valid: check for validity of accessing an address 1852 * space range 1853 * 1854 * Check whether memory is assigned to the given address space range, and 1855 * access is permitted by any IOMMU regions that are active for the address 1856 * space. 1857 * 1858 * For now, addr and len should be aligned to a page size. This limitation 1859 * will be lifted in the future. 1860 * 1861 * @as: #AddressSpace to be accessed 1862 * @addr: address within that address space 1863 * @len: length of the area to be checked 1864 * @is_write: indicates the transfer direction 1865 */ 1866 bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write); 1867 1868 /* address_space_map: map a physical memory region into a host virtual address 1869 * 1870 * May map a subset of the requested range, given by and returned in @plen. 1871 * May return %NULL if resources needed to perform the mapping are exhausted. 1872 * Use only for reads OR writes - not for read-modify-write operations. 1873 * Use cpu_register_map_client() to know when retrying the map operation is 1874 * likely to succeed. 1875 * 1876 * @as: #AddressSpace to be accessed 1877 * @addr: address within that address space 1878 * @plen: pointer to length of buffer; updated on return 1879 * @is_write: indicates the transfer direction 1880 */ 1881 void *address_space_map(AddressSpace *as, hwaddr addr, 1882 hwaddr *plen, bool is_write); 1883 1884 /* address_space_unmap: Unmaps a memory region previously mapped by address_space_map() 1885 * 1886 * Will also mark the memory as dirty if @is_write == %true. @access_len gives 1887 * the amount of memory that was actually read or written by the caller. 1888 * 1889 * @as: #AddressSpace used 1890 * @buffer: host pointer as returned by address_space_map() 1891 * @len: buffer length as returned by address_space_map() 1892 * @access_len: amount of data actually transferred 1893 * @is_write: indicates the transfer direction 1894 */ 1895 void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len, 1896 int is_write, hwaddr access_len); 1897 1898 1899 /* Internal functions, part of the implementation of address_space_read. */ 1900 MemTxResult flatview_read_continue(FlatView *fv, hwaddr addr, 1901 MemTxAttrs attrs, uint8_t *buf, 1902 int len, hwaddr addr1, hwaddr l, 1903 MemoryRegion *mr); 1904 1905 MemTxResult flatview_read_full(FlatView *fv, hwaddr addr, 1906 MemTxAttrs attrs, uint8_t *buf, int len); 1907 void *qemu_map_ram_ptr(RAMBlock *ram_block, ram_addr_t addr); 1908 1909 static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write) 1910 { 1911 if (is_write) { 1912 return memory_region_is_ram(mr) && 1913 !mr->readonly && !memory_region_is_ram_device(mr); 1914 } else { 1915 return (memory_region_is_ram(mr) && !memory_region_is_ram_device(mr)) || 1916 memory_region_is_romd(mr); 1917 } 1918 } 1919 1920 /** 1921 * address_space_read: read from an address space. 1922 * 1923 * Return a MemTxResult indicating whether the operation succeeded 1924 * or failed (eg unassigned memory, device rejected the transaction, 1925 * IOMMU fault). 1926 * 1927 * @fv: #FlatView to be accessed 1928 * @addr: address within that address space 1929 * @attrs: memory transaction attributes 1930 * @buf: buffer with the data transferred 1931 */ 1932 static inline __attribute__((__always_inline__)) 1933 MemTxResult flatview_read(FlatView *fv, hwaddr addr, MemTxAttrs attrs, 1934 uint8_t *buf, int len) 1935 { 1936 MemTxResult result = MEMTX_OK; 1937 hwaddr l, addr1; 1938 void *ptr; 1939 MemoryRegion *mr; 1940 1941 if (__builtin_constant_p(len)) { 1942 if (len) { 1943 rcu_read_lock(); 1944 l = len; 1945 mr = flatview_translate(fv, addr, &addr1, &l, false); 1946 if (len == l && memory_access_is_direct(mr, false)) { 1947 ptr = qemu_map_ram_ptr(mr->ram_block, addr1); 1948 memcpy(buf, ptr, len); 1949 } else { 1950 result = flatview_read_continue(fv, addr, attrs, buf, len, 1951 addr1, l, mr); 1952 } 1953 rcu_read_unlock(); 1954 } 1955 } else { 1956 result = flatview_read_full(fv, addr, attrs, buf, len); 1957 } 1958 return result; 1959 } 1960 1961 static inline MemTxResult address_space_read(AddressSpace *as, hwaddr addr, 1962 MemTxAttrs attrs, uint8_t *buf, 1963 int len) 1964 { 1965 return flatview_read(address_space_to_flatview(as), addr, attrs, buf, len); 1966 } 1967 1968 /** 1969 * address_space_read_cached: read from a cached RAM region 1970 * 1971 * @cache: Cached region to be addressed 1972 * @addr: address relative to the base of the RAM region 1973 * @buf: buffer with the data transferred 1974 * @len: length of the data transferred 1975 */ 1976 static inline void 1977 address_space_read_cached(MemoryRegionCache *cache, hwaddr addr, 1978 void *buf, int len) 1979 { 1980 assert(addr < cache->len && len <= cache->len - addr); 1981 address_space_read(cache->as, cache->xlat + addr, MEMTXATTRS_UNSPECIFIED, buf, len); 1982 } 1983 1984 /** 1985 * address_space_write_cached: write to a cached RAM region 1986 * 1987 * @cache: Cached region to be addressed 1988 * @addr: address relative to the base of the RAM region 1989 * @buf: buffer with the data transferred 1990 * @len: length of the data transferred 1991 */ 1992 static inline void 1993 address_space_write_cached(MemoryRegionCache *cache, hwaddr addr, 1994 void *buf, int len) 1995 { 1996 assert(addr < cache->len && len <= cache->len - addr); 1997 address_space_write(cache->as, cache->xlat + addr, MEMTXATTRS_UNSPECIFIED, buf, len); 1998 } 1999 2000 #endif 2001 2002 #endif 2003