1 /* 2 * Physical memory management API 3 * 4 * Copyright 2011 Red Hat, Inc. and/or its affiliates 5 * 6 * Authors: 7 * Avi Kivity <avi@redhat.com> 8 * 9 * This work is licensed under the terms of the GNU GPL, version 2. See 10 * the COPYING file in the top-level directory. 11 * 12 */ 13 14 #ifndef MEMORY_H 15 #define MEMORY_H 16 17 #ifndef CONFIG_USER_ONLY 18 19 #include "exec/cpu-common.h" 20 #include "exec/hwaddr.h" 21 #include "exec/memattrs.h" 22 #include "exec/ramlist.h" 23 #include "qemu/queue.h" 24 #include "qemu/int128.h" 25 #include "qemu/notify.h" 26 #include "qom/object.h" 27 #include "qemu/rcu.h" 28 #include "hw/qdev-core.h" 29 30 #define RAM_ADDR_INVALID (~(ram_addr_t)0) 31 32 #define MAX_PHYS_ADDR_SPACE_BITS 62 33 #define MAX_PHYS_ADDR (((hwaddr)1 << MAX_PHYS_ADDR_SPACE_BITS) - 1) 34 35 #define TYPE_MEMORY_REGION "qemu:memory-region" 36 #define MEMORY_REGION(obj) \ 37 OBJECT_CHECK(MemoryRegion, (obj), TYPE_MEMORY_REGION) 38 39 #define TYPE_IOMMU_MEMORY_REGION "qemu:iommu-memory-region" 40 #define IOMMU_MEMORY_REGION(obj) \ 41 OBJECT_CHECK(IOMMUMemoryRegion, (obj), TYPE_IOMMU_MEMORY_REGION) 42 #define IOMMU_MEMORY_REGION_CLASS(klass) \ 43 OBJECT_CLASS_CHECK(IOMMUMemoryRegionClass, (klass), \ 44 TYPE_IOMMU_MEMORY_REGION) 45 #define IOMMU_MEMORY_REGION_GET_CLASS(obj) \ 46 OBJECT_GET_CLASS(IOMMUMemoryRegionClass, (obj), \ 47 TYPE_IOMMU_MEMORY_REGION) 48 49 typedef struct MemoryRegionOps MemoryRegionOps; 50 typedef struct MemoryRegionMmio MemoryRegionMmio; 51 52 struct MemoryRegionMmio { 53 CPUReadMemoryFunc *read[3]; 54 CPUWriteMemoryFunc *write[3]; 55 }; 56 57 typedef struct IOMMUTLBEntry IOMMUTLBEntry; 58 59 /* See address_space_translate: bit 0 is read, bit 1 is write. */ 60 typedef enum { 61 IOMMU_NONE = 0, 62 IOMMU_RO = 1, 63 IOMMU_WO = 2, 64 IOMMU_RW = 3, 65 } IOMMUAccessFlags; 66 67 #define IOMMU_ACCESS_FLAG(r, w) (((r) ? IOMMU_RO : 0) | ((w) ? IOMMU_WO : 0)) 68 69 struct IOMMUTLBEntry { 70 AddressSpace *target_as; 71 hwaddr iova; 72 hwaddr translated_addr; 73 hwaddr addr_mask; /* 0xfff = 4k translation */ 74 IOMMUAccessFlags perm; 75 }; 76 77 /* 78 * Bitmap for different IOMMUNotifier capabilities. Each notifier can 79 * register with one or multiple IOMMU Notifier capability bit(s). 80 */ 81 typedef enum { 82 IOMMU_NOTIFIER_NONE = 0, 83 /* Notify cache invalidations */ 84 IOMMU_NOTIFIER_UNMAP = 0x1, 85 /* Notify entry changes (newly created entries) */ 86 IOMMU_NOTIFIER_MAP = 0x2, 87 } IOMMUNotifierFlag; 88 89 #define IOMMU_NOTIFIER_ALL (IOMMU_NOTIFIER_MAP | IOMMU_NOTIFIER_UNMAP) 90 91 struct IOMMUNotifier; 92 typedef void (*IOMMUNotify)(struct IOMMUNotifier *notifier, 93 IOMMUTLBEntry *data); 94 95 struct IOMMUNotifier { 96 IOMMUNotify notify; 97 IOMMUNotifierFlag notifier_flags; 98 /* Notify for address space range start <= addr <= end */ 99 hwaddr start; 100 hwaddr end; 101 QLIST_ENTRY(IOMMUNotifier) node; 102 }; 103 typedef struct IOMMUNotifier IOMMUNotifier; 104 105 static inline void iommu_notifier_init(IOMMUNotifier *n, IOMMUNotify fn, 106 IOMMUNotifierFlag flags, 107 hwaddr start, hwaddr end) 108 { 109 n->notify = fn; 110 n->notifier_flags = flags; 111 n->start = start; 112 n->end = end; 113 } 114 115 /* 116 * Memory region callbacks 117 */ 118 struct MemoryRegionOps { 119 /* Read from the memory region. @addr is relative to @mr; @size is 120 * in bytes. */ 121 uint64_t (*read)(void *opaque, 122 hwaddr addr, 123 unsigned size); 124 /* Write to the memory region. @addr is relative to @mr; @size is 125 * in bytes. */ 126 void (*write)(void *opaque, 127 hwaddr addr, 128 uint64_t data, 129 unsigned size); 130 131 MemTxResult (*read_with_attrs)(void *opaque, 132 hwaddr addr, 133 uint64_t *data, 134 unsigned size, 135 MemTxAttrs attrs); 136 MemTxResult (*write_with_attrs)(void *opaque, 137 hwaddr addr, 138 uint64_t data, 139 unsigned size, 140 MemTxAttrs attrs); 141 /* Instruction execution pre-callback: 142 * @addr is the address of the access relative to the @mr. 143 * @size is the size of the area returned by the callback. 144 * @offset is the location of the pointer inside @mr. 145 * 146 * Returns a pointer to a location which contains guest code. 147 */ 148 void *(*request_ptr)(void *opaque, hwaddr addr, unsigned *size, 149 unsigned *offset); 150 151 enum device_endian endianness; 152 /* Guest-visible constraints: */ 153 struct { 154 /* If nonzero, specify bounds on access sizes beyond which a machine 155 * check is thrown. 156 */ 157 unsigned min_access_size; 158 unsigned max_access_size; 159 /* If true, unaligned accesses are supported. Otherwise unaligned 160 * accesses throw machine checks. 161 */ 162 bool unaligned; 163 /* 164 * If present, and returns #false, the transaction is not accepted 165 * by the device (and results in machine dependent behaviour such 166 * as a machine check exception). 167 */ 168 bool (*accepts)(void *opaque, hwaddr addr, 169 unsigned size, bool is_write, 170 MemTxAttrs attrs); 171 } valid; 172 /* Internal implementation constraints: */ 173 struct { 174 /* If nonzero, specifies the minimum size implemented. Smaller sizes 175 * will be rounded upwards and a partial result will be returned. 176 */ 177 unsigned min_access_size; 178 /* If nonzero, specifies the maximum size implemented. Larger sizes 179 * will be done as a series of accesses with smaller sizes. 180 */ 181 unsigned max_access_size; 182 /* If true, unaligned accesses are supported. Otherwise all accesses 183 * are converted to (possibly multiple) naturally aligned accesses. 184 */ 185 bool unaligned; 186 } impl; 187 188 /* If .read and .write are not present, old_mmio may be used for 189 * backwards compatibility with old mmio registration 190 */ 191 const MemoryRegionMmio old_mmio; 192 }; 193 194 enum IOMMUMemoryRegionAttr { 195 IOMMU_ATTR_SPAPR_TCE_FD 196 }; 197 198 /** 199 * IOMMUMemoryRegionClass: 200 * 201 * All IOMMU implementations need to subclass TYPE_IOMMU_MEMORY_REGION 202 * and provide an implementation of at least the @translate method here 203 * to handle requests to the memory region. Other methods are optional. 204 * 205 * The IOMMU implementation must use the IOMMU notifier infrastructure 206 * to report whenever mappings are changed, by calling 207 * memory_region_notify_iommu() (or, if necessary, by calling 208 * memory_region_notify_one() for each registered notifier). 209 */ 210 typedef struct IOMMUMemoryRegionClass { 211 /* private */ 212 struct DeviceClass parent_class; 213 214 /* 215 * Return a TLB entry that contains a given address. 216 * 217 * The IOMMUAccessFlags indicated via @flag are optional and may 218 * be specified as IOMMU_NONE to indicate that the caller needs 219 * the full translation information for both reads and writes. If 220 * the access flags are specified then the IOMMU implementation 221 * may use this as an optimization, to stop doing a page table 222 * walk as soon as it knows that the requested permissions are not 223 * allowed. If IOMMU_NONE is passed then the IOMMU must do the 224 * full page table walk and report the permissions in the returned 225 * IOMMUTLBEntry. (Note that this implies that an IOMMU may not 226 * return different mappings for reads and writes.) 227 * 228 * The returned information remains valid while the caller is 229 * holding the big QEMU lock or is inside an RCU critical section; 230 * if the caller wishes to cache the mapping beyond that it must 231 * register an IOMMU notifier so it can invalidate its cached 232 * information when the IOMMU mapping changes. 233 * 234 * @iommu: the IOMMUMemoryRegion 235 * @hwaddr: address to be translated within the memory region 236 * @flag: requested access permissions 237 */ 238 IOMMUTLBEntry (*translate)(IOMMUMemoryRegion *iommu, hwaddr addr, 239 IOMMUAccessFlags flag); 240 /* Returns minimum supported page size in bytes. 241 * If this method is not provided then the minimum is assumed to 242 * be TARGET_PAGE_SIZE. 243 * 244 * @iommu: the IOMMUMemoryRegion 245 */ 246 uint64_t (*get_min_page_size)(IOMMUMemoryRegion *iommu); 247 /* Called when IOMMU Notifier flag changes (ie when the set of 248 * events which IOMMU users are requesting notification for changes). 249 * Optional method -- need not be provided if the IOMMU does not 250 * need to know exactly which events must be notified. 251 * 252 * @iommu: the IOMMUMemoryRegion 253 * @old_flags: events which previously needed to be notified 254 * @new_flags: events which now need to be notified 255 */ 256 void (*notify_flag_changed)(IOMMUMemoryRegion *iommu, 257 IOMMUNotifierFlag old_flags, 258 IOMMUNotifierFlag new_flags); 259 /* Called to handle memory_region_iommu_replay(). 260 * 261 * The default implementation of memory_region_iommu_replay() is to 262 * call the IOMMU translate method for every page in the address space 263 * with flag == IOMMU_NONE and then call the notifier if translate 264 * returns a valid mapping. If this method is implemented then it 265 * overrides the default behaviour, and must provide the full semantics 266 * of memory_region_iommu_replay(), by calling @notifier for every 267 * translation present in the IOMMU. 268 * 269 * Optional method -- an IOMMU only needs to provide this method 270 * if the default is inefficient or produces undesirable side effects. 271 * 272 * Note: this is not related to record-and-replay functionality. 273 */ 274 void (*replay)(IOMMUMemoryRegion *iommu, IOMMUNotifier *notifier); 275 276 /* Get IOMMU misc attributes. This is an optional method that 277 * can be used to allow users of the IOMMU to get implementation-specific 278 * information. The IOMMU implements this method to handle calls 279 * by IOMMU users to memory_region_iommu_get_attr() by filling in 280 * the arbitrary data pointer for any IOMMUMemoryRegionAttr values that 281 * the IOMMU supports. If the method is unimplemented then 282 * memory_region_iommu_get_attr() will always return -EINVAL. 283 * 284 * @iommu: the IOMMUMemoryRegion 285 * @attr: attribute being queried 286 * @data: memory to fill in with the attribute data 287 * 288 * Returns 0 on success, or a negative errno; in particular 289 * returns -EINVAL for unrecognized or unimplemented attribute types. 290 */ 291 int (*get_attr)(IOMMUMemoryRegion *iommu, enum IOMMUMemoryRegionAttr attr, 292 void *data); 293 } IOMMUMemoryRegionClass; 294 295 typedef struct CoalescedMemoryRange CoalescedMemoryRange; 296 typedef struct MemoryRegionIoeventfd MemoryRegionIoeventfd; 297 298 struct MemoryRegion { 299 Object parent_obj; 300 301 /* All fields are private - violators will be prosecuted */ 302 303 /* The following fields should fit in a cache line */ 304 bool romd_mode; 305 bool ram; 306 bool subpage; 307 bool readonly; /* For RAM regions */ 308 bool rom_device; 309 bool flush_coalesced_mmio; 310 bool global_locking; 311 uint8_t dirty_log_mask; 312 bool is_iommu; 313 RAMBlock *ram_block; 314 Object *owner; 315 316 const MemoryRegionOps *ops; 317 void *opaque; 318 MemoryRegion *container; 319 Int128 size; 320 hwaddr addr; 321 void (*destructor)(MemoryRegion *mr); 322 uint64_t align; 323 bool terminates; 324 bool ram_device; 325 bool enabled; 326 bool warning_printed; /* For reservations */ 327 uint8_t vga_logging_count; 328 MemoryRegion *alias; 329 hwaddr alias_offset; 330 int32_t priority; 331 QTAILQ_HEAD(subregions, MemoryRegion) subregions; 332 QTAILQ_ENTRY(MemoryRegion) subregions_link; 333 QTAILQ_HEAD(coalesced_ranges, CoalescedMemoryRange) coalesced; 334 const char *name; 335 unsigned ioeventfd_nb; 336 MemoryRegionIoeventfd *ioeventfds; 337 }; 338 339 struct IOMMUMemoryRegion { 340 MemoryRegion parent_obj; 341 342 QLIST_HEAD(, IOMMUNotifier) iommu_notify; 343 IOMMUNotifierFlag iommu_notify_flags; 344 }; 345 346 #define IOMMU_NOTIFIER_FOREACH(n, mr) \ 347 QLIST_FOREACH((n), &(mr)->iommu_notify, node) 348 349 /** 350 * MemoryListener: callbacks structure for updates to the physical memory map 351 * 352 * Allows a component to adjust to changes in the guest-visible memory map. 353 * Use with memory_listener_register() and memory_listener_unregister(). 354 */ 355 struct MemoryListener { 356 void (*begin)(MemoryListener *listener); 357 void (*commit)(MemoryListener *listener); 358 void (*region_add)(MemoryListener *listener, MemoryRegionSection *section); 359 void (*region_del)(MemoryListener *listener, MemoryRegionSection *section); 360 void (*region_nop)(MemoryListener *listener, MemoryRegionSection *section); 361 void (*log_start)(MemoryListener *listener, MemoryRegionSection *section, 362 int old, int new); 363 void (*log_stop)(MemoryListener *listener, MemoryRegionSection *section, 364 int old, int new); 365 void (*log_sync)(MemoryListener *listener, MemoryRegionSection *section); 366 void (*log_global_start)(MemoryListener *listener); 367 void (*log_global_stop)(MemoryListener *listener); 368 void (*eventfd_add)(MemoryListener *listener, MemoryRegionSection *section, 369 bool match_data, uint64_t data, EventNotifier *e); 370 void (*eventfd_del)(MemoryListener *listener, MemoryRegionSection *section, 371 bool match_data, uint64_t data, EventNotifier *e); 372 void (*coalesced_mmio_add)(MemoryListener *listener, MemoryRegionSection *section, 373 hwaddr addr, hwaddr len); 374 void (*coalesced_mmio_del)(MemoryListener *listener, MemoryRegionSection *section, 375 hwaddr addr, hwaddr len); 376 /* Lower = earlier (during add), later (during del) */ 377 unsigned priority; 378 AddressSpace *address_space; 379 QTAILQ_ENTRY(MemoryListener) link; 380 QTAILQ_ENTRY(MemoryListener) link_as; 381 }; 382 383 /** 384 * AddressSpace: describes a mapping of addresses to #MemoryRegion objects 385 */ 386 struct AddressSpace { 387 /* All fields are private. */ 388 struct rcu_head rcu; 389 char *name; 390 MemoryRegion *root; 391 392 /* Accessed via RCU. */ 393 struct FlatView *current_map; 394 395 int ioeventfd_nb; 396 struct MemoryRegionIoeventfd *ioeventfds; 397 QTAILQ_HEAD(memory_listeners_as, MemoryListener) listeners; 398 QTAILQ_ENTRY(AddressSpace) address_spaces_link; 399 }; 400 401 typedef struct AddressSpaceDispatch AddressSpaceDispatch; 402 typedef struct FlatRange FlatRange; 403 404 /* Flattened global view of current active memory hierarchy. Kept in sorted 405 * order. 406 */ 407 struct FlatView { 408 struct rcu_head rcu; 409 unsigned ref; 410 FlatRange *ranges; 411 unsigned nr; 412 unsigned nr_allocated; 413 struct AddressSpaceDispatch *dispatch; 414 MemoryRegion *root; 415 }; 416 417 static inline FlatView *address_space_to_flatview(AddressSpace *as) 418 { 419 return atomic_rcu_read(&as->current_map); 420 } 421 422 423 /** 424 * MemoryRegionSection: describes a fragment of a #MemoryRegion 425 * 426 * @mr: the region, or %NULL if empty 427 * @fv: the flat view of the address space the region is mapped in 428 * @offset_within_region: the beginning of the section, relative to @mr's start 429 * @size: the size of the section; will not exceed @mr's boundaries 430 * @offset_within_address_space: the address of the first byte of the section 431 * relative to the region's address space 432 * @readonly: writes to this section are ignored 433 */ 434 struct MemoryRegionSection { 435 MemoryRegion *mr; 436 FlatView *fv; 437 hwaddr offset_within_region; 438 Int128 size; 439 hwaddr offset_within_address_space; 440 bool readonly; 441 }; 442 443 /** 444 * memory_region_init: Initialize a memory region 445 * 446 * The region typically acts as a container for other memory regions. Use 447 * memory_region_add_subregion() to add subregions. 448 * 449 * @mr: the #MemoryRegion to be initialized 450 * @owner: the object that tracks the region's reference count 451 * @name: used for debugging; not visible to the user or ABI 452 * @size: size of the region; any subregions beyond this size will be clipped 453 */ 454 void memory_region_init(MemoryRegion *mr, 455 struct Object *owner, 456 const char *name, 457 uint64_t size); 458 459 /** 460 * memory_region_ref: Add 1 to a memory region's reference count 461 * 462 * Whenever memory regions are accessed outside the BQL, they need to be 463 * preserved against hot-unplug. MemoryRegions actually do not have their 464 * own reference count; they piggyback on a QOM object, their "owner". 465 * This function adds a reference to the owner. 466 * 467 * All MemoryRegions must have an owner if they can disappear, even if the 468 * device they belong to operates exclusively under the BQL. This is because 469 * the region could be returned at any time by memory_region_find, and this 470 * is usually under guest control. 471 * 472 * @mr: the #MemoryRegion 473 */ 474 void memory_region_ref(MemoryRegion *mr); 475 476 /** 477 * memory_region_unref: Remove 1 to a memory region's reference count 478 * 479 * Whenever memory regions are accessed outside the BQL, they need to be 480 * preserved against hot-unplug. MemoryRegions actually do not have their 481 * own reference count; they piggyback on a QOM object, their "owner". 482 * This function removes a reference to the owner and possibly destroys it. 483 * 484 * @mr: the #MemoryRegion 485 */ 486 void memory_region_unref(MemoryRegion *mr); 487 488 /** 489 * memory_region_init_io: Initialize an I/O memory region. 490 * 491 * Accesses into the region will cause the callbacks in @ops to be called. 492 * if @size is nonzero, subregions will be clipped to @size. 493 * 494 * @mr: the #MemoryRegion to be initialized. 495 * @owner: the object that tracks the region's reference count 496 * @ops: a structure containing read and write callbacks to be used when 497 * I/O is performed on the region. 498 * @opaque: passed to the read and write callbacks of the @ops structure. 499 * @name: used for debugging; not visible to the user or ABI 500 * @size: size of the region. 501 */ 502 void memory_region_init_io(MemoryRegion *mr, 503 struct Object *owner, 504 const MemoryRegionOps *ops, 505 void *opaque, 506 const char *name, 507 uint64_t size); 508 509 /** 510 * memory_region_init_ram_nomigrate: Initialize RAM memory region. Accesses 511 * into the region will modify memory 512 * directly. 513 * 514 * @mr: the #MemoryRegion to be initialized. 515 * @owner: the object that tracks the region's reference count 516 * @name: Region name, becomes part of RAMBlock name used in migration stream 517 * must be unique within any device 518 * @size: size of the region. 519 * @errp: pointer to Error*, to store an error if it happens. 520 * 521 * Note that this function does not do anything to cause the data in the 522 * RAM memory region to be migrated; that is the responsibility of the caller. 523 */ 524 void memory_region_init_ram_nomigrate(MemoryRegion *mr, 525 struct Object *owner, 526 const char *name, 527 uint64_t size, 528 Error **errp); 529 530 /** 531 * memory_region_init_ram_shared_nomigrate: Initialize RAM memory region. 532 * Accesses into the region will 533 * modify memory directly. 534 * 535 * @mr: the #MemoryRegion to be initialized. 536 * @owner: the object that tracks the region's reference count 537 * @name: Region name, becomes part of RAMBlock name used in migration stream 538 * must be unique within any device 539 * @size: size of the region. 540 * @share: allow remapping RAM to different addresses 541 * @errp: pointer to Error*, to store an error if it happens. 542 * 543 * Note that this function is similar to memory_region_init_ram_nomigrate. 544 * The only difference is part of the RAM region can be remapped. 545 */ 546 void memory_region_init_ram_shared_nomigrate(MemoryRegion *mr, 547 struct Object *owner, 548 const char *name, 549 uint64_t size, 550 bool share, 551 Error **errp); 552 553 /** 554 * memory_region_init_resizeable_ram: Initialize memory region with resizeable 555 * RAM. Accesses into the region will 556 * modify memory directly. Only an initial 557 * portion of this RAM is actually used. 558 * The used size can change across reboots. 559 * 560 * @mr: the #MemoryRegion to be initialized. 561 * @owner: the object that tracks the region's reference count 562 * @name: Region name, becomes part of RAMBlock name used in migration stream 563 * must be unique within any device 564 * @size: used size of the region. 565 * @max_size: max size of the region. 566 * @resized: callback to notify owner about used size change. 567 * @errp: pointer to Error*, to store an error if it happens. 568 * 569 * Note that this function does not do anything to cause the data in the 570 * RAM memory region to be migrated; that is the responsibility of the caller. 571 */ 572 void memory_region_init_resizeable_ram(MemoryRegion *mr, 573 struct Object *owner, 574 const char *name, 575 uint64_t size, 576 uint64_t max_size, 577 void (*resized)(const char*, 578 uint64_t length, 579 void *host), 580 Error **errp); 581 #ifdef __linux__ 582 /** 583 * memory_region_init_ram_from_file: Initialize RAM memory region with a 584 * mmap-ed backend. 585 * 586 * @mr: the #MemoryRegion to be initialized. 587 * @owner: the object that tracks the region's reference count 588 * @name: Region name, becomes part of RAMBlock name used in migration stream 589 * must be unique within any device 590 * @size: size of the region. 591 * @align: alignment of the region base address; if 0, the default alignment 592 * (getpagesize()) will be used. 593 * @share: %true if memory must be mmaped with the MAP_SHARED flag 594 * @path: the path in which to allocate the RAM. 595 * @errp: pointer to Error*, to store an error if it happens. 596 * 597 * Note that this function does not do anything to cause the data in the 598 * RAM memory region to be migrated; that is the responsibility of the caller. 599 */ 600 void memory_region_init_ram_from_file(MemoryRegion *mr, 601 struct Object *owner, 602 const char *name, 603 uint64_t size, 604 uint64_t align, 605 bool share, 606 const char *path, 607 Error **errp); 608 609 /** 610 * memory_region_init_ram_from_fd: Initialize RAM memory region with a 611 * mmap-ed backend. 612 * 613 * @mr: the #MemoryRegion to be initialized. 614 * @owner: the object that tracks the region's reference count 615 * @name: the name of the region. 616 * @size: size of the region. 617 * @share: %true if memory must be mmaped with the MAP_SHARED flag 618 * @fd: the fd to mmap. 619 * @errp: pointer to Error*, to store an error if it happens. 620 * 621 * Note that this function does not do anything to cause the data in the 622 * RAM memory region to be migrated; that is the responsibility of the caller. 623 */ 624 void memory_region_init_ram_from_fd(MemoryRegion *mr, 625 struct Object *owner, 626 const char *name, 627 uint64_t size, 628 bool share, 629 int fd, 630 Error **errp); 631 #endif 632 633 /** 634 * memory_region_init_ram_ptr: Initialize RAM memory region from a 635 * user-provided pointer. Accesses into the 636 * region will modify memory directly. 637 * 638 * @mr: the #MemoryRegion to be initialized. 639 * @owner: the object that tracks the region's reference count 640 * @name: Region name, becomes part of RAMBlock name used in migration stream 641 * must be unique within any device 642 * @size: size of the region. 643 * @ptr: memory to be mapped; must contain at least @size bytes. 644 * 645 * Note that this function does not do anything to cause the data in the 646 * RAM memory region to be migrated; that is the responsibility of the caller. 647 */ 648 void memory_region_init_ram_ptr(MemoryRegion *mr, 649 struct Object *owner, 650 const char *name, 651 uint64_t size, 652 void *ptr); 653 654 /** 655 * memory_region_init_ram_device_ptr: Initialize RAM device memory region from 656 * a user-provided pointer. 657 * 658 * A RAM device represents a mapping to a physical device, such as to a PCI 659 * MMIO BAR of an vfio-pci assigned device. The memory region may be mapped 660 * into the VM address space and access to the region will modify memory 661 * directly. However, the memory region should not be included in a memory 662 * dump (device may not be enabled/mapped at the time of the dump), and 663 * operations incompatible with manipulating MMIO should be avoided. Replaces 664 * skip_dump flag. 665 * 666 * @mr: the #MemoryRegion to be initialized. 667 * @owner: the object that tracks the region's reference count 668 * @name: the name of the region. 669 * @size: size of the region. 670 * @ptr: memory to be mapped; must contain at least @size bytes. 671 * 672 * Note that this function does not do anything to cause the data in the 673 * RAM memory region to be migrated; that is the responsibility of the caller. 674 * (For RAM device memory regions, migrating the contents rarely makes sense.) 675 */ 676 void memory_region_init_ram_device_ptr(MemoryRegion *mr, 677 struct Object *owner, 678 const char *name, 679 uint64_t size, 680 void *ptr); 681 682 /** 683 * memory_region_init_alias: Initialize a memory region that aliases all or a 684 * part of another memory region. 685 * 686 * @mr: the #MemoryRegion to be initialized. 687 * @owner: the object that tracks the region's reference count 688 * @name: used for debugging; not visible to the user or ABI 689 * @orig: the region to be referenced; @mr will be equivalent to 690 * @orig between @offset and @offset + @size - 1. 691 * @offset: start of the section in @orig to be referenced. 692 * @size: size of the region. 693 */ 694 void memory_region_init_alias(MemoryRegion *mr, 695 struct Object *owner, 696 const char *name, 697 MemoryRegion *orig, 698 hwaddr offset, 699 uint64_t size); 700 701 /** 702 * memory_region_init_rom_nomigrate: Initialize a ROM memory region. 703 * 704 * This has the same effect as calling memory_region_init_ram_nomigrate() 705 * and then marking the resulting region read-only with 706 * memory_region_set_readonly(). 707 * 708 * Note that this function does not do anything to cause the data in the 709 * RAM side of the memory region to be migrated; that is the responsibility 710 * of the caller. 711 * 712 * @mr: the #MemoryRegion to be initialized. 713 * @owner: the object that tracks the region's reference count 714 * @name: Region name, becomes part of RAMBlock name used in migration stream 715 * must be unique within any device 716 * @size: size of the region. 717 * @errp: pointer to Error*, to store an error if it happens. 718 */ 719 void memory_region_init_rom_nomigrate(MemoryRegion *mr, 720 struct Object *owner, 721 const char *name, 722 uint64_t size, 723 Error **errp); 724 725 /** 726 * memory_region_init_rom_device_nomigrate: Initialize a ROM memory region. 727 * Writes are handled via callbacks. 728 * 729 * Note that this function does not do anything to cause the data in the 730 * RAM side of the memory region to be migrated; that is the responsibility 731 * of the caller. 732 * 733 * @mr: the #MemoryRegion to be initialized. 734 * @owner: the object that tracks the region's reference count 735 * @ops: callbacks for write access handling (must not be NULL). 736 * @opaque: passed to the read and write callbacks of the @ops structure. 737 * @name: Region name, becomes part of RAMBlock name used in migration stream 738 * must be unique within any device 739 * @size: size of the region. 740 * @errp: pointer to Error*, to store an error if it happens. 741 */ 742 void memory_region_init_rom_device_nomigrate(MemoryRegion *mr, 743 struct Object *owner, 744 const MemoryRegionOps *ops, 745 void *opaque, 746 const char *name, 747 uint64_t size, 748 Error **errp); 749 750 /** 751 * memory_region_init_iommu: Initialize a memory region of a custom type 752 * that translates addresses 753 * 754 * An IOMMU region translates addresses and forwards accesses to a target 755 * memory region. 756 * 757 * The IOMMU implementation must define a subclass of TYPE_IOMMU_MEMORY_REGION. 758 * @_iommu_mr should be a pointer to enough memory for an instance of 759 * that subclass, @instance_size is the size of that subclass, and 760 * @mrtypename is its name. This function will initialize @_iommu_mr as an 761 * instance of the subclass, and its methods will then be called to handle 762 * accesses to the memory region. See the documentation of 763 * #IOMMUMemoryRegionClass for further details. 764 * 765 * @_iommu_mr: the #IOMMUMemoryRegion to be initialized 766 * @instance_size: the IOMMUMemoryRegion subclass instance size 767 * @mrtypename: the type name of the #IOMMUMemoryRegion 768 * @owner: the object that tracks the region's reference count 769 * @name: used for debugging; not visible to the user or ABI 770 * @size: size of the region. 771 */ 772 void memory_region_init_iommu(void *_iommu_mr, 773 size_t instance_size, 774 const char *mrtypename, 775 Object *owner, 776 const char *name, 777 uint64_t size); 778 779 /** 780 * memory_region_init_ram - Initialize RAM memory region. Accesses into the 781 * region will modify memory directly. 782 * 783 * @mr: the #MemoryRegion to be initialized 784 * @owner: the object that tracks the region's reference count (must be 785 * TYPE_DEVICE or a subclass of TYPE_DEVICE, or NULL) 786 * @name: name of the memory region 787 * @size: size of the region in bytes 788 * @errp: pointer to Error*, to store an error if it happens. 789 * 790 * This function allocates RAM for a board model or device, and 791 * arranges for it to be migrated (by calling vmstate_register_ram() 792 * if @owner is a DeviceState, or vmstate_register_ram_global() if 793 * @owner is NULL). 794 * 795 * TODO: Currently we restrict @owner to being either NULL (for 796 * global RAM regions with no owner) or devices, so that we can 797 * give the RAM block a unique name for migration purposes. 798 * We should lift this restriction and allow arbitrary Objects. 799 * If you pass a non-NULL non-device @owner then we will assert. 800 */ 801 void memory_region_init_ram(MemoryRegion *mr, 802 struct Object *owner, 803 const char *name, 804 uint64_t size, 805 Error **errp); 806 807 /** 808 * memory_region_init_rom: Initialize a ROM memory region. 809 * 810 * This has the same effect as calling memory_region_init_ram() 811 * and then marking the resulting region read-only with 812 * memory_region_set_readonly(). This includes arranging for the 813 * contents to be migrated. 814 * 815 * TODO: Currently we restrict @owner to being either NULL (for 816 * global RAM regions with no owner) or devices, so that we can 817 * give the RAM block a unique name for migration purposes. 818 * We should lift this restriction and allow arbitrary Objects. 819 * If you pass a non-NULL non-device @owner then we will assert. 820 * 821 * @mr: the #MemoryRegion to be initialized. 822 * @owner: the object that tracks the region's reference count 823 * @name: Region name, becomes part of RAMBlock name used in migration stream 824 * must be unique within any device 825 * @size: size of the region. 826 * @errp: pointer to Error*, to store an error if it happens. 827 */ 828 void memory_region_init_rom(MemoryRegion *mr, 829 struct Object *owner, 830 const char *name, 831 uint64_t size, 832 Error **errp); 833 834 /** 835 * memory_region_init_rom_device: Initialize a ROM memory region. 836 * Writes are handled via callbacks. 837 * 838 * This function initializes a memory region backed by RAM for reads 839 * and callbacks for writes, and arranges for the RAM backing to 840 * be migrated (by calling vmstate_register_ram() 841 * if @owner is a DeviceState, or vmstate_register_ram_global() if 842 * @owner is NULL). 843 * 844 * TODO: Currently we restrict @owner to being either NULL (for 845 * global RAM regions with no owner) or devices, so that we can 846 * give the RAM block a unique name for migration purposes. 847 * We should lift this restriction and allow arbitrary Objects. 848 * If you pass a non-NULL non-device @owner then we will assert. 849 * 850 * @mr: the #MemoryRegion to be initialized. 851 * @owner: the object that tracks the region's reference count 852 * @ops: callbacks for write access handling (must not be NULL). 853 * @name: Region name, becomes part of RAMBlock name used in migration stream 854 * must be unique within any device 855 * @size: size of the region. 856 * @errp: pointer to Error*, to store an error if it happens. 857 */ 858 void memory_region_init_rom_device(MemoryRegion *mr, 859 struct Object *owner, 860 const MemoryRegionOps *ops, 861 void *opaque, 862 const char *name, 863 uint64_t size, 864 Error **errp); 865 866 867 /** 868 * memory_region_owner: get a memory region's owner. 869 * 870 * @mr: the memory region being queried. 871 */ 872 struct Object *memory_region_owner(MemoryRegion *mr); 873 874 /** 875 * memory_region_size: get a memory region's size. 876 * 877 * @mr: the memory region being queried. 878 */ 879 uint64_t memory_region_size(MemoryRegion *mr); 880 881 /** 882 * memory_region_is_ram: check whether a memory region is random access 883 * 884 * Returns %true is a memory region is random access. 885 * 886 * @mr: the memory region being queried 887 */ 888 static inline bool memory_region_is_ram(MemoryRegion *mr) 889 { 890 return mr->ram; 891 } 892 893 /** 894 * memory_region_is_ram_device: check whether a memory region is a ram device 895 * 896 * Returns %true is a memory region is a device backed ram region 897 * 898 * @mr: the memory region being queried 899 */ 900 bool memory_region_is_ram_device(MemoryRegion *mr); 901 902 /** 903 * memory_region_is_romd: check whether a memory region is in ROMD mode 904 * 905 * Returns %true if a memory region is a ROM device and currently set to allow 906 * direct reads. 907 * 908 * @mr: the memory region being queried 909 */ 910 static inline bool memory_region_is_romd(MemoryRegion *mr) 911 { 912 return mr->rom_device && mr->romd_mode; 913 } 914 915 /** 916 * memory_region_get_iommu: check whether a memory region is an iommu 917 * 918 * Returns pointer to IOMMUMemoryRegion if a memory region is an iommu, 919 * otherwise NULL. 920 * 921 * @mr: the memory region being queried 922 */ 923 static inline IOMMUMemoryRegion *memory_region_get_iommu(MemoryRegion *mr) 924 { 925 if (mr->alias) { 926 return memory_region_get_iommu(mr->alias); 927 } 928 if (mr->is_iommu) { 929 return (IOMMUMemoryRegion *) mr; 930 } 931 return NULL; 932 } 933 934 /** 935 * memory_region_get_iommu_class_nocheck: returns iommu memory region class 936 * if an iommu or NULL if not 937 * 938 * Returns pointer to IOMMUMemoryRegionClass if a memory region is an iommu, 939 * otherwise NULL. This is fast path avoiding QOM checking, use with caution. 940 * 941 * @mr: the memory region being queried 942 */ 943 static inline IOMMUMemoryRegionClass *memory_region_get_iommu_class_nocheck( 944 IOMMUMemoryRegion *iommu_mr) 945 { 946 return (IOMMUMemoryRegionClass *) (((Object *)iommu_mr)->class); 947 } 948 949 #define memory_region_is_iommu(mr) (memory_region_get_iommu(mr) != NULL) 950 951 /** 952 * memory_region_iommu_get_min_page_size: get minimum supported page size 953 * for an iommu 954 * 955 * Returns minimum supported page size for an iommu. 956 * 957 * @iommu_mr: the memory region being queried 958 */ 959 uint64_t memory_region_iommu_get_min_page_size(IOMMUMemoryRegion *iommu_mr); 960 961 /** 962 * memory_region_notify_iommu: notify a change in an IOMMU translation entry. 963 * 964 * The notification type will be decided by entry.perm bits: 965 * 966 * - For UNMAP (cache invalidation) notifies: set entry.perm to IOMMU_NONE. 967 * - For MAP (newly added entry) notifies: set entry.perm to the 968 * permission of the page (which is definitely !IOMMU_NONE). 969 * 970 * Note: for any IOMMU implementation, an in-place mapping change 971 * should be notified with an UNMAP followed by a MAP. 972 * 973 * @iommu_mr: the memory region that was changed 974 * @entry: the new entry in the IOMMU translation table. The entry 975 * replaces all old entries for the same virtual I/O address range. 976 * Deleted entries have .@perm == 0. 977 */ 978 void memory_region_notify_iommu(IOMMUMemoryRegion *iommu_mr, 979 IOMMUTLBEntry entry); 980 981 /** 982 * memory_region_notify_one: notify a change in an IOMMU translation 983 * entry to a single notifier 984 * 985 * This works just like memory_region_notify_iommu(), but it only 986 * notifies a specific notifier, not all of them. 987 * 988 * @notifier: the notifier to be notified 989 * @entry: the new entry in the IOMMU translation table. The entry 990 * replaces all old entries for the same virtual I/O address range. 991 * Deleted entries have .@perm == 0. 992 */ 993 void memory_region_notify_one(IOMMUNotifier *notifier, 994 IOMMUTLBEntry *entry); 995 996 /** 997 * memory_region_register_iommu_notifier: register a notifier for changes to 998 * IOMMU translation entries. 999 * 1000 * @mr: the memory region to observe 1001 * @n: the IOMMUNotifier to be added; the notify callback receives a 1002 * pointer to an #IOMMUTLBEntry as the opaque value; the pointer 1003 * ceases to be valid on exit from the notifier. 1004 */ 1005 void memory_region_register_iommu_notifier(MemoryRegion *mr, 1006 IOMMUNotifier *n); 1007 1008 /** 1009 * memory_region_iommu_replay: replay existing IOMMU translations to 1010 * a notifier with the minimum page granularity returned by 1011 * mr->iommu_ops->get_page_size(). 1012 * 1013 * Note: this is not related to record-and-replay functionality. 1014 * 1015 * @iommu_mr: the memory region to observe 1016 * @n: the notifier to which to replay iommu mappings 1017 */ 1018 void memory_region_iommu_replay(IOMMUMemoryRegion *iommu_mr, IOMMUNotifier *n); 1019 1020 /** 1021 * memory_region_iommu_replay_all: replay existing IOMMU translations 1022 * to all the notifiers registered. 1023 * 1024 * Note: this is not related to record-and-replay functionality. 1025 * 1026 * @iommu_mr: the memory region to observe 1027 */ 1028 void memory_region_iommu_replay_all(IOMMUMemoryRegion *iommu_mr); 1029 1030 /** 1031 * memory_region_unregister_iommu_notifier: unregister a notifier for 1032 * changes to IOMMU translation entries. 1033 * 1034 * @mr: the memory region which was observed and for which notity_stopped() 1035 * needs to be called 1036 * @n: the notifier to be removed. 1037 */ 1038 void memory_region_unregister_iommu_notifier(MemoryRegion *mr, 1039 IOMMUNotifier *n); 1040 1041 /** 1042 * memory_region_iommu_get_attr: return an IOMMU attr if get_attr() is 1043 * defined on the IOMMU. 1044 * 1045 * Returns 0 on success, or a negative errno otherwise. In particular, 1046 * -EINVAL indicates that the IOMMU does not support the requested 1047 * attribute. 1048 * 1049 * @iommu_mr: the memory region 1050 * @attr: the requested attribute 1051 * @data: a pointer to the requested attribute data 1052 */ 1053 int memory_region_iommu_get_attr(IOMMUMemoryRegion *iommu_mr, 1054 enum IOMMUMemoryRegionAttr attr, 1055 void *data); 1056 1057 /** 1058 * memory_region_name: get a memory region's name 1059 * 1060 * Returns the string that was used to initialize the memory region. 1061 * 1062 * @mr: the memory region being queried 1063 */ 1064 const char *memory_region_name(const MemoryRegion *mr); 1065 1066 /** 1067 * memory_region_is_logging: return whether a memory region is logging writes 1068 * 1069 * Returns %true if the memory region is logging writes for the given client 1070 * 1071 * @mr: the memory region being queried 1072 * @client: the client being queried 1073 */ 1074 bool memory_region_is_logging(MemoryRegion *mr, uint8_t client); 1075 1076 /** 1077 * memory_region_get_dirty_log_mask: return the clients for which a 1078 * memory region is logging writes. 1079 * 1080 * Returns a bitmap of clients, in which the DIRTY_MEMORY_* constants 1081 * are the bit indices. 1082 * 1083 * @mr: the memory region being queried 1084 */ 1085 uint8_t memory_region_get_dirty_log_mask(MemoryRegion *mr); 1086 1087 /** 1088 * memory_region_is_rom: check whether a memory region is ROM 1089 * 1090 * Returns %true is a memory region is read-only memory. 1091 * 1092 * @mr: the memory region being queried 1093 */ 1094 static inline bool memory_region_is_rom(MemoryRegion *mr) 1095 { 1096 return mr->ram && mr->readonly; 1097 } 1098 1099 1100 /** 1101 * memory_region_get_fd: Get a file descriptor backing a RAM memory region. 1102 * 1103 * Returns a file descriptor backing a file-based RAM memory region, 1104 * or -1 if the region is not a file-based RAM memory region. 1105 * 1106 * @mr: the RAM or alias memory region being queried. 1107 */ 1108 int memory_region_get_fd(MemoryRegion *mr); 1109 1110 /** 1111 * memory_region_from_host: Convert a pointer into a RAM memory region 1112 * and an offset within it. 1113 * 1114 * Given a host pointer inside a RAM memory region (created with 1115 * memory_region_init_ram() or memory_region_init_ram_ptr()), return 1116 * the MemoryRegion and the offset within it. 1117 * 1118 * Use with care; by the time this function returns, the returned pointer is 1119 * not protected by RCU anymore. If the caller is not within an RCU critical 1120 * section and does not hold the iothread lock, it must have other means of 1121 * protecting the pointer, such as a reference to the region that includes 1122 * the incoming ram_addr_t. 1123 * 1124 * @ptr: the host pointer to be converted 1125 * @offset: the offset within memory region 1126 */ 1127 MemoryRegion *memory_region_from_host(void *ptr, ram_addr_t *offset); 1128 1129 /** 1130 * memory_region_get_ram_ptr: Get a pointer into a RAM memory region. 1131 * 1132 * Returns a host pointer to a RAM memory region (created with 1133 * memory_region_init_ram() or memory_region_init_ram_ptr()). 1134 * 1135 * Use with care; by the time this function returns, the returned pointer is 1136 * not protected by RCU anymore. If the caller is not within an RCU critical 1137 * section and does not hold the iothread lock, it must have other means of 1138 * protecting the pointer, such as a reference to the region that includes 1139 * the incoming ram_addr_t. 1140 * 1141 * @mr: the memory region being queried. 1142 */ 1143 void *memory_region_get_ram_ptr(MemoryRegion *mr); 1144 1145 /* memory_region_ram_resize: Resize a RAM region. 1146 * 1147 * Only legal before guest might have detected the memory size: e.g. on 1148 * incoming migration, or right after reset. 1149 * 1150 * @mr: a memory region created with @memory_region_init_resizeable_ram. 1151 * @newsize: the new size the region 1152 * @errp: pointer to Error*, to store an error if it happens. 1153 */ 1154 void memory_region_ram_resize(MemoryRegion *mr, ram_addr_t newsize, 1155 Error **errp); 1156 1157 /** 1158 * memory_region_set_log: Turn dirty logging on or off for a region. 1159 * 1160 * Turns dirty logging on or off for a specified client (display, migration). 1161 * Only meaningful for RAM regions. 1162 * 1163 * @mr: the memory region being updated. 1164 * @log: whether dirty logging is to be enabled or disabled. 1165 * @client: the user of the logging information; %DIRTY_MEMORY_VGA only. 1166 */ 1167 void memory_region_set_log(MemoryRegion *mr, bool log, unsigned client); 1168 1169 /** 1170 * memory_region_get_dirty: Check whether a range of bytes is dirty 1171 * for a specified client. 1172 * 1173 * Checks whether a range of bytes has been written to since the last 1174 * call to memory_region_reset_dirty() with the same @client. Dirty logging 1175 * must be enabled. 1176 * 1177 * @mr: the memory region being queried. 1178 * @addr: the address (relative to the start of the region) being queried. 1179 * @size: the size of the range being queried. 1180 * @client: the user of the logging information; %DIRTY_MEMORY_MIGRATION or 1181 * %DIRTY_MEMORY_VGA. 1182 */ 1183 bool memory_region_get_dirty(MemoryRegion *mr, hwaddr addr, 1184 hwaddr size, unsigned client); 1185 1186 /** 1187 * memory_region_set_dirty: Mark a range of bytes as dirty in a memory region. 1188 * 1189 * Marks a range of bytes as dirty, after it has been dirtied outside 1190 * guest code. 1191 * 1192 * @mr: the memory region being dirtied. 1193 * @addr: the address (relative to the start of the region) being dirtied. 1194 * @size: size of the range being dirtied. 1195 */ 1196 void memory_region_set_dirty(MemoryRegion *mr, hwaddr addr, 1197 hwaddr size); 1198 1199 /** 1200 * memory_region_snapshot_and_clear_dirty: Get a snapshot of the dirty 1201 * bitmap and clear it. 1202 * 1203 * Creates a snapshot of the dirty bitmap, clears the dirty bitmap and 1204 * returns the snapshot. The snapshot can then be used to query dirty 1205 * status, using memory_region_snapshot_get_dirty. Snapshotting allows 1206 * querying the same page multiple times, which is especially useful for 1207 * display updates where the scanlines often are not page aligned. 1208 * 1209 * The dirty bitmap region which gets copyed into the snapshot (and 1210 * cleared afterwards) can be larger than requested. The boundaries 1211 * are rounded up/down so complete bitmap longs (covering 64 pages on 1212 * 64bit hosts) can be copied over into the bitmap snapshot. Which 1213 * isn't a problem for display updates as the extra pages are outside 1214 * the visible area, and in case the visible area changes a full 1215 * display redraw is due anyway. Should other use cases for this 1216 * function emerge we might have to revisit this implementation 1217 * detail. 1218 * 1219 * Use g_free to release DirtyBitmapSnapshot. 1220 * 1221 * @mr: the memory region being queried. 1222 * @addr: the address (relative to the start of the region) being queried. 1223 * @size: the size of the range being queried. 1224 * @client: the user of the logging information; typically %DIRTY_MEMORY_VGA. 1225 */ 1226 DirtyBitmapSnapshot *memory_region_snapshot_and_clear_dirty(MemoryRegion *mr, 1227 hwaddr addr, 1228 hwaddr size, 1229 unsigned client); 1230 1231 /** 1232 * memory_region_snapshot_get_dirty: Check whether a range of bytes is dirty 1233 * in the specified dirty bitmap snapshot. 1234 * 1235 * @mr: the memory region being queried. 1236 * @snap: the dirty bitmap snapshot 1237 * @addr: the address (relative to the start of the region) being queried. 1238 * @size: the size of the range being queried. 1239 */ 1240 bool memory_region_snapshot_get_dirty(MemoryRegion *mr, 1241 DirtyBitmapSnapshot *snap, 1242 hwaddr addr, hwaddr size); 1243 1244 /** 1245 * memory_region_reset_dirty: Mark a range of pages as clean, for a specified 1246 * client. 1247 * 1248 * Marks a range of pages as no longer dirty. 1249 * 1250 * @mr: the region being updated. 1251 * @addr: the start of the subrange being cleaned. 1252 * @size: the size of the subrange being cleaned. 1253 * @client: the user of the logging information; %DIRTY_MEMORY_MIGRATION or 1254 * %DIRTY_MEMORY_VGA. 1255 */ 1256 void memory_region_reset_dirty(MemoryRegion *mr, hwaddr addr, 1257 hwaddr size, unsigned client); 1258 1259 /** 1260 * memory_region_set_readonly: Turn a memory region read-only (or read-write) 1261 * 1262 * Allows a memory region to be marked as read-only (turning it into a ROM). 1263 * only useful on RAM regions. 1264 * 1265 * @mr: the region being updated. 1266 * @readonly: whether rhe region is to be ROM or RAM. 1267 */ 1268 void memory_region_set_readonly(MemoryRegion *mr, bool readonly); 1269 1270 /** 1271 * memory_region_rom_device_set_romd: enable/disable ROMD mode 1272 * 1273 * Allows a ROM device (initialized with memory_region_init_rom_device() to 1274 * set to ROMD mode (default) or MMIO mode. When it is in ROMD mode, the 1275 * device is mapped to guest memory and satisfies read access directly. 1276 * When in MMIO mode, reads are forwarded to the #MemoryRegion.read function. 1277 * Writes are always handled by the #MemoryRegion.write function. 1278 * 1279 * @mr: the memory region to be updated 1280 * @romd_mode: %true to put the region into ROMD mode 1281 */ 1282 void memory_region_rom_device_set_romd(MemoryRegion *mr, bool romd_mode); 1283 1284 /** 1285 * memory_region_set_coalescing: Enable memory coalescing for the region. 1286 * 1287 * Enabled writes to a region to be queued for later processing. MMIO ->write 1288 * callbacks may be delayed until a non-coalesced MMIO is issued. 1289 * Only useful for IO regions. Roughly similar to write-combining hardware. 1290 * 1291 * @mr: the memory region to be write coalesced 1292 */ 1293 void memory_region_set_coalescing(MemoryRegion *mr); 1294 1295 /** 1296 * memory_region_add_coalescing: Enable memory coalescing for a sub-range of 1297 * a region. 1298 * 1299 * Like memory_region_set_coalescing(), but works on a sub-range of a region. 1300 * Multiple calls can be issued coalesced disjoint ranges. 1301 * 1302 * @mr: the memory region to be updated. 1303 * @offset: the start of the range within the region to be coalesced. 1304 * @size: the size of the subrange to be coalesced. 1305 */ 1306 void memory_region_add_coalescing(MemoryRegion *mr, 1307 hwaddr offset, 1308 uint64_t size); 1309 1310 /** 1311 * memory_region_clear_coalescing: Disable MMIO coalescing for the region. 1312 * 1313 * Disables any coalescing caused by memory_region_set_coalescing() or 1314 * memory_region_add_coalescing(). Roughly equivalent to uncacheble memory 1315 * hardware. 1316 * 1317 * @mr: the memory region to be updated. 1318 */ 1319 void memory_region_clear_coalescing(MemoryRegion *mr); 1320 1321 /** 1322 * memory_region_set_flush_coalesced: Enforce memory coalescing flush before 1323 * accesses. 1324 * 1325 * Ensure that pending coalesced MMIO request are flushed before the memory 1326 * region is accessed. This property is automatically enabled for all regions 1327 * passed to memory_region_set_coalescing() and memory_region_add_coalescing(). 1328 * 1329 * @mr: the memory region to be updated. 1330 */ 1331 void memory_region_set_flush_coalesced(MemoryRegion *mr); 1332 1333 /** 1334 * memory_region_clear_flush_coalesced: Disable memory coalescing flush before 1335 * accesses. 1336 * 1337 * Clear the automatic coalesced MMIO flushing enabled via 1338 * memory_region_set_flush_coalesced. Note that this service has no effect on 1339 * memory regions that have MMIO coalescing enabled for themselves. For them, 1340 * automatic flushing will stop once coalescing is disabled. 1341 * 1342 * @mr: the memory region to be updated. 1343 */ 1344 void memory_region_clear_flush_coalesced(MemoryRegion *mr); 1345 1346 /** 1347 * memory_region_clear_global_locking: Declares that access processing does 1348 * not depend on the QEMU global lock. 1349 * 1350 * By clearing this property, accesses to the memory region will be processed 1351 * outside of QEMU's global lock (unless the lock is held on when issuing the 1352 * access request). In this case, the device model implementing the access 1353 * handlers is responsible for synchronization of concurrency. 1354 * 1355 * @mr: the memory region to be updated. 1356 */ 1357 void memory_region_clear_global_locking(MemoryRegion *mr); 1358 1359 /** 1360 * memory_region_add_eventfd: Request an eventfd to be triggered when a word 1361 * is written to a location. 1362 * 1363 * Marks a word in an IO region (initialized with memory_region_init_io()) 1364 * as a trigger for an eventfd event. The I/O callback will not be called. 1365 * The caller must be prepared to handle failure (that is, take the required 1366 * action if the callback _is_ called). 1367 * 1368 * @mr: the memory region being updated. 1369 * @addr: the address within @mr that is to be monitored 1370 * @size: the size of the access to trigger the eventfd 1371 * @match_data: whether to match against @data, instead of just @addr 1372 * @data: the data to match against the guest write 1373 * @e: event notifier to be triggered when @addr, @size, and @data all match. 1374 **/ 1375 void memory_region_add_eventfd(MemoryRegion *mr, 1376 hwaddr addr, 1377 unsigned size, 1378 bool match_data, 1379 uint64_t data, 1380 EventNotifier *e); 1381 1382 /** 1383 * memory_region_del_eventfd: Cancel an eventfd. 1384 * 1385 * Cancels an eventfd trigger requested by a previous 1386 * memory_region_add_eventfd() call. 1387 * 1388 * @mr: the memory region being updated. 1389 * @addr: the address within @mr that is to be monitored 1390 * @size: the size of the access to trigger the eventfd 1391 * @match_data: whether to match against @data, instead of just @addr 1392 * @data: the data to match against the guest write 1393 * @e: event notifier to be triggered when @addr, @size, and @data all match. 1394 */ 1395 void memory_region_del_eventfd(MemoryRegion *mr, 1396 hwaddr addr, 1397 unsigned size, 1398 bool match_data, 1399 uint64_t data, 1400 EventNotifier *e); 1401 1402 /** 1403 * memory_region_add_subregion: Add a subregion to a container. 1404 * 1405 * Adds a subregion at @offset. The subregion may not overlap with other 1406 * subregions (except for those explicitly marked as overlapping). A region 1407 * may only be added once as a subregion (unless removed with 1408 * memory_region_del_subregion()); use memory_region_init_alias() if you 1409 * want a region to be a subregion in multiple locations. 1410 * 1411 * @mr: the region to contain the new subregion; must be a container 1412 * initialized with memory_region_init(). 1413 * @offset: the offset relative to @mr where @subregion is added. 1414 * @subregion: the subregion to be added. 1415 */ 1416 void memory_region_add_subregion(MemoryRegion *mr, 1417 hwaddr offset, 1418 MemoryRegion *subregion); 1419 /** 1420 * memory_region_add_subregion_overlap: Add a subregion to a container 1421 * with overlap. 1422 * 1423 * Adds a subregion at @offset. The subregion may overlap with other 1424 * subregions. Conflicts are resolved by having a higher @priority hide a 1425 * lower @priority. Subregions without priority are taken as @priority 0. 1426 * A region may only be added once as a subregion (unless removed with 1427 * memory_region_del_subregion()); use memory_region_init_alias() if you 1428 * want a region to be a subregion in multiple locations. 1429 * 1430 * @mr: the region to contain the new subregion; must be a container 1431 * initialized with memory_region_init(). 1432 * @offset: the offset relative to @mr where @subregion is added. 1433 * @subregion: the subregion to be added. 1434 * @priority: used for resolving overlaps; highest priority wins. 1435 */ 1436 void memory_region_add_subregion_overlap(MemoryRegion *mr, 1437 hwaddr offset, 1438 MemoryRegion *subregion, 1439 int priority); 1440 1441 /** 1442 * memory_region_get_ram_addr: Get the ram address associated with a memory 1443 * region 1444 */ 1445 ram_addr_t memory_region_get_ram_addr(MemoryRegion *mr); 1446 1447 uint64_t memory_region_get_alignment(const MemoryRegion *mr); 1448 /** 1449 * memory_region_del_subregion: Remove a subregion. 1450 * 1451 * Removes a subregion from its container. 1452 * 1453 * @mr: the container to be updated. 1454 * @subregion: the region being removed; must be a current subregion of @mr. 1455 */ 1456 void memory_region_del_subregion(MemoryRegion *mr, 1457 MemoryRegion *subregion); 1458 1459 /* 1460 * memory_region_set_enabled: dynamically enable or disable a region 1461 * 1462 * Enables or disables a memory region. A disabled memory region 1463 * ignores all accesses to itself and its subregions. It does not 1464 * obscure sibling subregions with lower priority - it simply behaves as 1465 * if it was removed from the hierarchy. 1466 * 1467 * Regions default to being enabled. 1468 * 1469 * @mr: the region to be updated 1470 * @enabled: whether to enable or disable the region 1471 */ 1472 void memory_region_set_enabled(MemoryRegion *mr, bool enabled); 1473 1474 /* 1475 * memory_region_set_address: dynamically update the address of a region 1476 * 1477 * Dynamically updates the address of a region, relative to its container. 1478 * May be used on regions are currently part of a memory hierarchy. 1479 * 1480 * @mr: the region to be updated 1481 * @addr: new address, relative to container region 1482 */ 1483 void memory_region_set_address(MemoryRegion *mr, hwaddr addr); 1484 1485 /* 1486 * memory_region_set_size: dynamically update the size of a region. 1487 * 1488 * Dynamically updates the size of a region. 1489 * 1490 * @mr: the region to be updated 1491 * @size: used size of the region. 1492 */ 1493 void memory_region_set_size(MemoryRegion *mr, uint64_t size); 1494 1495 /* 1496 * memory_region_set_alias_offset: dynamically update a memory alias's offset 1497 * 1498 * Dynamically updates the offset into the target region that an alias points 1499 * to, as if the fourth argument to memory_region_init_alias() has changed. 1500 * 1501 * @mr: the #MemoryRegion to be updated; should be an alias. 1502 * @offset: the new offset into the target memory region 1503 */ 1504 void memory_region_set_alias_offset(MemoryRegion *mr, 1505 hwaddr offset); 1506 1507 /** 1508 * memory_region_present: checks if an address relative to a @container 1509 * translates into #MemoryRegion within @container 1510 * 1511 * Answer whether a #MemoryRegion within @container covers the address 1512 * @addr. 1513 * 1514 * @container: a #MemoryRegion within which @addr is a relative address 1515 * @addr: the area within @container to be searched 1516 */ 1517 bool memory_region_present(MemoryRegion *container, hwaddr addr); 1518 1519 /** 1520 * memory_region_is_mapped: returns true if #MemoryRegion is mapped 1521 * into any address space. 1522 * 1523 * @mr: a #MemoryRegion which should be checked if it's mapped 1524 */ 1525 bool memory_region_is_mapped(MemoryRegion *mr); 1526 1527 /** 1528 * memory_region_find: translate an address/size relative to a 1529 * MemoryRegion into a #MemoryRegionSection. 1530 * 1531 * Locates the first #MemoryRegion within @mr that overlaps the range 1532 * given by @addr and @size. 1533 * 1534 * Returns a #MemoryRegionSection that describes a contiguous overlap. 1535 * It will have the following characteristics: 1536 * .@size = 0 iff no overlap was found 1537 * .@mr is non-%NULL iff an overlap was found 1538 * 1539 * Remember that in the return value the @offset_within_region is 1540 * relative to the returned region (in the .@mr field), not to the 1541 * @mr argument. 1542 * 1543 * Similarly, the .@offset_within_address_space is relative to the 1544 * address space that contains both regions, the passed and the 1545 * returned one. However, in the special case where the @mr argument 1546 * has no container (and thus is the root of the address space), the 1547 * following will hold: 1548 * .@offset_within_address_space >= @addr 1549 * .@offset_within_address_space + .@size <= @addr + @size 1550 * 1551 * @mr: a MemoryRegion within which @addr is a relative address 1552 * @addr: start of the area within @as to be searched 1553 * @size: size of the area to be searched 1554 */ 1555 MemoryRegionSection memory_region_find(MemoryRegion *mr, 1556 hwaddr addr, uint64_t size); 1557 1558 /** 1559 * memory_global_dirty_log_sync: synchronize the dirty log for all memory 1560 * 1561 * Synchronizes the dirty page log for all address spaces. 1562 */ 1563 void memory_global_dirty_log_sync(void); 1564 1565 /** 1566 * memory_region_transaction_begin: Start a transaction. 1567 * 1568 * During a transaction, changes will be accumulated and made visible 1569 * only when the transaction ends (is committed). 1570 */ 1571 void memory_region_transaction_begin(void); 1572 1573 /** 1574 * memory_region_transaction_commit: Commit a transaction and make changes 1575 * visible to the guest. 1576 */ 1577 void memory_region_transaction_commit(void); 1578 1579 /** 1580 * memory_listener_register: register callbacks to be called when memory 1581 * sections are mapped or unmapped into an address 1582 * space 1583 * 1584 * @listener: an object containing the callbacks to be called 1585 * @filter: if non-%NULL, only regions in this address space will be observed 1586 */ 1587 void memory_listener_register(MemoryListener *listener, AddressSpace *filter); 1588 1589 /** 1590 * memory_listener_unregister: undo the effect of memory_listener_register() 1591 * 1592 * @listener: an object containing the callbacks to be removed 1593 */ 1594 void memory_listener_unregister(MemoryListener *listener); 1595 1596 /** 1597 * memory_global_dirty_log_start: begin dirty logging for all regions 1598 */ 1599 void memory_global_dirty_log_start(void); 1600 1601 /** 1602 * memory_global_dirty_log_stop: end dirty logging for all regions 1603 */ 1604 void memory_global_dirty_log_stop(void); 1605 1606 void mtree_info(fprintf_function mon_printf, void *f, bool flatview, 1607 bool dispatch_tree); 1608 1609 /** 1610 * memory_region_request_mmio_ptr: request a pointer to an mmio 1611 * MemoryRegion. If it is possible map a RAM MemoryRegion with this pointer. 1612 * When the device wants to invalidate the pointer it will call 1613 * memory_region_invalidate_mmio_ptr. 1614 * 1615 * @mr: #MemoryRegion to check 1616 * @addr: address within that region 1617 * 1618 * Returns true on success, false otherwise. 1619 */ 1620 bool memory_region_request_mmio_ptr(MemoryRegion *mr, hwaddr addr); 1621 1622 /** 1623 * memory_region_invalidate_mmio_ptr: invalidate the pointer to an mmio 1624 * previously requested. 1625 * In the end that means that if something wants to execute from this area it 1626 * will need to request the pointer again. 1627 * 1628 * @mr: #MemoryRegion associated to the pointer. 1629 * @offset: offset within the memory region 1630 * @size: size of that area. 1631 */ 1632 void memory_region_invalidate_mmio_ptr(MemoryRegion *mr, hwaddr offset, 1633 unsigned size); 1634 1635 /** 1636 * memory_region_dispatch_read: perform a read directly to the specified 1637 * MemoryRegion. 1638 * 1639 * @mr: #MemoryRegion to access 1640 * @addr: address within that region 1641 * @pval: pointer to uint64_t which the data is written to 1642 * @size: size of the access in bytes 1643 * @attrs: memory transaction attributes to use for the access 1644 */ 1645 MemTxResult memory_region_dispatch_read(MemoryRegion *mr, 1646 hwaddr addr, 1647 uint64_t *pval, 1648 unsigned size, 1649 MemTxAttrs attrs); 1650 /** 1651 * memory_region_dispatch_write: perform a write directly to the specified 1652 * MemoryRegion. 1653 * 1654 * @mr: #MemoryRegion to access 1655 * @addr: address within that region 1656 * @data: data to write 1657 * @size: size of the access in bytes 1658 * @attrs: memory transaction attributes to use for the access 1659 */ 1660 MemTxResult memory_region_dispatch_write(MemoryRegion *mr, 1661 hwaddr addr, 1662 uint64_t data, 1663 unsigned size, 1664 MemTxAttrs attrs); 1665 1666 /** 1667 * address_space_init: initializes an address space 1668 * 1669 * @as: an uninitialized #AddressSpace 1670 * @root: a #MemoryRegion that routes addresses for the address space 1671 * @name: an address space name. The name is only used for debugging 1672 * output. 1673 */ 1674 void address_space_init(AddressSpace *as, MemoryRegion *root, const char *name); 1675 1676 /** 1677 * address_space_destroy: destroy an address space 1678 * 1679 * Releases all resources associated with an address space. After an address space 1680 * is destroyed, its root memory region (given by address_space_init()) may be destroyed 1681 * as well. 1682 * 1683 * @as: address space to be destroyed 1684 */ 1685 void address_space_destroy(AddressSpace *as); 1686 1687 /** 1688 * address_space_rw: read from or write to an address space. 1689 * 1690 * Return a MemTxResult indicating whether the operation succeeded 1691 * or failed (eg unassigned memory, device rejected the transaction, 1692 * IOMMU fault). 1693 * 1694 * @as: #AddressSpace to be accessed 1695 * @addr: address within that address space 1696 * @attrs: memory transaction attributes 1697 * @buf: buffer with the data transferred 1698 * @len: the number of bytes to read or write 1699 * @is_write: indicates the transfer direction 1700 */ 1701 MemTxResult address_space_rw(AddressSpace *as, hwaddr addr, 1702 MemTxAttrs attrs, uint8_t *buf, 1703 int len, bool is_write); 1704 1705 /** 1706 * address_space_write: write to address space. 1707 * 1708 * Return a MemTxResult indicating whether the operation succeeded 1709 * or failed (eg unassigned memory, device rejected the transaction, 1710 * IOMMU fault). 1711 * 1712 * @as: #AddressSpace to be accessed 1713 * @addr: address within that address space 1714 * @attrs: memory transaction attributes 1715 * @buf: buffer with the data transferred 1716 * @len: the number of bytes to write 1717 */ 1718 MemTxResult address_space_write(AddressSpace *as, hwaddr addr, 1719 MemTxAttrs attrs, 1720 const uint8_t *buf, int len); 1721 1722 /* address_space_ld*: load from an address space 1723 * address_space_st*: store to an address space 1724 * 1725 * These functions perform a load or store of the byte, word, 1726 * longword or quad to the specified address within the AddressSpace. 1727 * The _le suffixed functions treat the data as little endian; 1728 * _be indicates big endian; no suffix indicates "same endianness 1729 * as guest CPU". 1730 * 1731 * The "guest CPU endianness" accessors are deprecated for use outside 1732 * target-* code; devices should be CPU-agnostic and use either the LE 1733 * or the BE accessors. 1734 * 1735 * @as #AddressSpace to be accessed 1736 * @addr: address within that address space 1737 * @val: data value, for stores 1738 * @attrs: memory transaction attributes 1739 * @result: location to write the success/failure of the transaction; 1740 * if NULL, this information is discarded 1741 */ 1742 1743 #define SUFFIX 1744 #define ARG1 as 1745 #define ARG1_DECL AddressSpace *as 1746 #include "exec/memory_ldst.inc.h" 1747 1748 #define SUFFIX 1749 #define ARG1 as 1750 #define ARG1_DECL AddressSpace *as 1751 #include "exec/memory_ldst_phys.inc.h" 1752 1753 struct MemoryRegionCache { 1754 void *ptr; 1755 hwaddr xlat; 1756 hwaddr len; 1757 FlatView *fv; 1758 MemoryRegionSection mrs; 1759 bool is_write; 1760 }; 1761 1762 #define MEMORY_REGION_CACHE_INVALID ((MemoryRegionCache) { .mrs.mr = NULL }) 1763 1764 1765 /* address_space_ld*_cached: load from a cached #MemoryRegion 1766 * address_space_st*_cached: store into a cached #MemoryRegion 1767 * 1768 * These functions perform a load or store of the byte, word, 1769 * longword or quad to the specified address. The address is 1770 * a physical address in the AddressSpace, but it must lie within 1771 * a #MemoryRegion that was mapped with address_space_cache_init. 1772 * 1773 * The _le suffixed functions treat the data as little endian; 1774 * _be indicates big endian; no suffix indicates "same endianness 1775 * as guest CPU". 1776 * 1777 * The "guest CPU endianness" accessors are deprecated for use outside 1778 * target-* code; devices should be CPU-agnostic and use either the LE 1779 * or the BE accessors. 1780 * 1781 * @cache: previously initialized #MemoryRegionCache to be accessed 1782 * @addr: address within the address space 1783 * @val: data value, for stores 1784 * @attrs: memory transaction attributes 1785 * @result: location to write the success/failure of the transaction; 1786 * if NULL, this information is discarded 1787 */ 1788 1789 #define SUFFIX _cached_slow 1790 #define ARG1 cache 1791 #define ARG1_DECL MemoryRegionCache *cache 1792 #include "exec/memory_ldst.inc.h" 1793 1794 /* Inline fast path for direct RAM access. */ 1795 static inline uint8_t address_space_ldub_cached(MemoryRegionCache *cache, 1796 hwaddr addr, MemTxAttrs attrs, MemTxResult *result) 1797 { 1798 assert(addr < cache->len); 1799 if (likely(cache->ptr)) { 1800 return ldub_p(cache->ptr + addr); 1801 } else { 1802 return address_space_ldub_cached_slow(cache, addr, attrs, result); 1803 } 1804 } 1805 1806 static inline void address_space_stb_cached(MemoryRegionCache *cache, 1807 hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result) 1808 { 1809 assert(addr < cache->len); 1810 if (likely(cache->ptr)) { 1811 stb_p(cache->ptr + addr, val); 1812 } else { 1813 address_space_stb_cached_slow(cache, addr, val, attrs, result); 1814 } 1815 } 1816 1817 #define ENDIANNESS _le 1818 #include "exec/memory_ldst_cached.inc.h" 1819 1820 #define ENDIANNESS _be 1821 #include "exec/memory_ldst_cached.inc.h" 1822 1823 #define SUFFIX _cached 1824 #define ARG1 cache 1825 #define ARG1_DECL MemoryRegionCache *cache 1826 #include "exec/memory_ldst_phys.inc.h" 1827 1828 /* address_space_cache_init: prepare for repeated access to a physical 1829 * memory region 1830 * 1831 * @cache: #MemoryRegionCache to be filled 1832 * @as: #AddressSpace to be accessed 1833 * @addr: address within that address space 1834 * @len: length of buffer 1835 * @is_write: indicates the transfer direction 1836 * 1837 * Will only work with RAM, and may map a subset of the requested range by 1838 * returning a value that is less than @len. On failure, return a negative 1839 * errno value. 1840 * 1841 * Because it only works with RAM, this function can be used for 1842 * read-modify-write operations. In this case, is_write should be %true. 1843 * 1844 * Note that addresses passed to the address_space_*_cached functions 1845 * are relative to @addr. 1846 */ 1847 int64_t address_space_cache_init(MemoryRegionCache *cache, 1848 AddressSpace *as, 1849 hwaddr addr, 1850 hwaddr len, 1851 bool is_write); 1852 1853 /** 1854 * address_space_cache_invalidate: complete a write to a #MemoryRegionCache 1855 * 1856 * @cache: The #MemoryRegionCache to operate on. 1857 * @addr: The first physical address that was written, relative to the 1858 * address that was passed to @address_space_cache_init. 1859 * @access_len: The number of bytes that were written starting at @addr. 1860 */ 1861 void address_space_cache_invalidate(MemoryRegionCache *cache, 1862 hwaddr addr, 1863 hwaddr access_len); 1864 1865 /** 1866 * address_space_cache_destroy: free a #MemoryRegionCache 1867 * 1868 * @cache: The #MemoryRegionCache whose memory should be released. 1869 */ 1870 void address_space_cache_destroy(MemoryRegionCache *cache); 1871 1872 /* address_space_get_iotlb_entry: translate an address into an IOTLB 1873 * entry. Should be called from an RCU critical section. 1874 */ 1875 IOMMUTLBEntry address_space_get_iotlb_entry(AddressSpace *as, hwaddr addr, 1876 bool is_write, MemTxAttrs attrs); 1877 1878 /* address_space_translate: translate an address range into an address space 1879 * into a MemoryRegion and an address range into that section. Should be 1880 * called from an RCU critical section, to avoid that the last reference 1881 * to the returned region disappears after address_space_translate returns. 1882 * 1883 * @fv: #FlatView to be accessed 1884 * @addr: address within that address space 1885 * @xlat: pointer to address within the returned memory region section's 1886 * #MemoryRegion. 1887 * @len: pointer to length 1888 * @is_write: indicates the transfer direction 1889 * @attrs: memory attributes 1890 */ 1891 MemoryRegion *flatview_translate(FlatView *fv, 1892 hwaddr addr, hwaddr *xlat, 1893 hwaddr *len, bool is_write, 1894 MemTxAttrs attrs); 1895 1896 static inline MemoryRegion *address_space_translate(AddressSpace *as, 1897 hwaddr addr, hwaddr *xlat, 1898 hwaddr *len, bool is_write, 1899 MemTxAttrs attrs) 1900 { 1901 return flatview_translate(address_space_to_flatview(as), 1902 addr, xlat, len, is_write, attrs); 1903 } 1904 1905 /* address_space_access_valid: check for validity of accessing an address 1906 * space range 1907 * 1908 * Check whether memory is assigned to the given address space range, and 1909 * access is permitted by any IOMMU regions that are active for the address 1910 * space. 1911 * 1912 * For now, addr and len should be aligned to a page size. This limitation 1913 * will be lifted in the future. 1914 * 1915 * @as: #AddressSpace to be accessed 1916 * @addr: address within that address space 1917 * @len: length of the area to be checked 1918 * @is_write: indicates the transfer direction 1919 * @attrs: memory attributes 1920 */ 1921 bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, 1922 bool is_write, MemTxAttrs attrs); 1923 1924 /* address_space_map: map a physical memory region into a host virtual address 1925 * 1926 * May map a subset of the requested range, given by and returned in @plen. 1927 * May return %NULL if resources needed to perform the mapping are exhausted. 1928 * Use only for reads OR writes - not for read-modify-write operations. 1929 * Use cpu_register_map_client() to know when retrying the map operation is 1930 * likely to succeed. 1931 * 1932 * @as: #AddressSpace to be accessed 1933 * @addr: address within that address space 1934 * @plen: pointer to length of buffer; updated on return 1935 * @is_write: indicates the transfer direction 1936 * @attrs: memory attributes 1937 */ 1938 void *address_space_map(AddressSpace *as, hwaddr addr, 1939 hwaddr *plen, bool is_write, MemTxAttrs attrs); 1940 1941 /* address_space_unmap: Unmaps a memory region previously mapped by address_space_map() 1942 * 1943 * Will also mark the memory as dirty if @is_write == %true. @access_len gives 1944 * the amount of memory that was actually read or written by the caller. 1945 * 1946 * @as: #AddressSpace used 1947 * @buffer: host pointer as returned by address_space_map() 1948 * @len: buffer length as returned by address_space_map() 1949 * @access_len: amount of data actually transferred 1950 * @is_write: indicates the transfer direction 1951 */ 1952 void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len, 1953 int is_write, hwaddr access_len); 1954 1955 1956 /* Internal functions, part of the implementation of address_space_read. */ 1957 MemTxResult address_space_read_full(AddressSpace *as, hwaddr addr, 1958 MemTxAttrs attrs, uint8_t *buf, int len); 1959 MemTxResult flatview_read_continue(FlatView *fv, hwaddr addr, 1960 MemTxAttrs attrs, uint8_t *buf, 1961 int len, hwaddr addr1, hwaddr l, 1962 MemoryRegion *mr); 1963 void *qemu_map_ram_ptr(RAMBlock *ram_block, ram_addr_t addr); 1964 1965 /* Internal functions, part of the implementation of address_space_read_cached 1966 * and address_space_write_cached. */ 1967 void address_space_read_cached_slow(MemoryRegionCache *cache, 1968 hwaddr addr, void *buf, int len); 1969 void address_space_write_cached_slow(MemoryRegionCache *cache, 1970 hwaddr addr, const void *buf, int len); 1971 1972 static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write) 1973 { 1974 if (is_write) { 1975 return memory_region_is_ram(mr) && 1976 !mr->readonly && !memory_region_is_ram_device(mr); 1977 } else { 1978 return (memory_region_is_ram(mr) && !memory_region_is_ram_device(mr)) || 1979 memory_region_is_romd(mr); 1980 } 1981 } 1982 1983 /** 1984 * address_space_read: read from an address space. 1985 * 1986 * Return a MemTxResult indicating whether the operation succeeded 1987 * or failed (eg unassigned memory, device rejected the transaction, 1988 * IOMMU fault). Called within RCU critical section. 1989 * 1990 * @as: #AddressSpace to be accessed 1991 * @addr: address within that address space 1992 * @attrs: memory transaction attributes 1993 * @buf: buffer with the data transferred 1994 */ 1995 static inline __attribute__((__always_inline__)) 1996 MemTxResult address_space_read(AddressSpace *as, hwaddr addr, 1997 MemTxAttrs attrs, uint8_t *buf, 1998 int len) 1999 { 2000 MemTxResult result = MEMTX_OK; 2001 hwaddr l, addr1; 2002 void *ptr; 2003 MemoryRegion *mr; 2004 FlatView *fv; 2005 2006 if (__builtin_constant_p(len)) { 2007 if (len) { 2008 rcu_read_lock(); 2009 fv = address_space_to_flatview(as); 2010 l = len; 2011 mr = flatview_translate(fv, addr, &addr1, &l, false, attrs); 2012 if (len == l && memory_access_is_direct(mr, false)) { 2013 ptr = qemu_map_ram_ptr(mr->ram_block, addr1); 2014 memcpy(buf, ptr, len); 2015 } else { 2016 result = flatview_read_continue(fv, addr, attrs, buf, len, 2017 addr1, l, mr); 2018 } 2019 rcu_read_unlock(); 2020 } 2021 } else { 2022 result = address_space_read_full(as, addr, attrs, buf, len); 2023 } 2024 return result; 2025 } 2026 2027 /** 2028 * address_space_read_cached: read from a cached RAM region 2029 * 2030 * @cache: Cached region to be addressed 2031 * @addr: address relative to the base of the RAM region 2032 * @buf: buffer with the data transferred 2033 * @len: length of the data transferred 2034 */ 2035 static inline void 2036 address_space_read_cached(MemoryRegionCache *cache, hwaddr addr, 2037 void *buf, int len) 2038 { 2039 assert(addr < cache->len && len <= cache->len - addr); 2040 if (likely(cache->ptr)) { 2041 memcpy(buf, cache->ptr + addr, len); 2042 } else { 2043 address_space_read_cached_slow(cache, addr, buf, len); 2044 } 2045 } 2046 2047 /** 2048 * address_space_write_cached: write to a cached RAM region 2049 * 2050 * @cache: Cached region to be addressed 2051 * @addr: address relative to the base of the RAM region 2052 * @buf: buffer with the data transferred 2053 * @len: length of the data transferred 2054 */ 2055 static inline void 2056 address_space_write_cached(MemoryRegionCache *cache, hwaddr addr, 2057 void *buf, int len) 2058 { 2059 assert(addr < cache->len && len <= cache->len - addr); 2060 if (likely(cache->ptr)) { 2061 memcpy(cache->ptr + addr, buf, len); 2062 } else { 2063 address_space_write_cached_slow(cache, addr, buf, len); 2064 } 2065 } 2066 2067 #endif 2068 2069 #endif 2070