1 /* 2 * Physical memory management API 3 * 4 * Copyright 2011 Red Hat, Inc. and/or its affiliates 5 * 6 * Authors: 7 * Avi Kivity <avi@redhat.com> 8 * 9 * This work is licensed under the terms of the GNU GPL, version 2. See 10 * the COPYING file in the top-level directory. 11 * 12 */ 13 14 #ifndef MEMORY_H 15 #define MEMORY_H 16 17 #ifndef CONFIG_USER_ONLY 18 19 #include "exec/cpu-common.h" 20 #include "exec/hwaddr.h" 21 #include "exec/memattrs.h" 22 #include "exec/ramlist.h" 23 #include "qemu/bswap.h" 24 #include "qemu/queue.h" 25 #include "qemu/int128.h" 26 #include "qemu/notify.h" 27 #include "qom/object.h" 28 #include "qemu/rcu.h" 29 30 #define RAM_ADDR_INVALID (~(ram_addr_t)0) 31 32 #define MAX_PHYS_ADDR_SPACE_BITS 62 33 #define MAX_PHYS_ADDR (((hwaddr)1 << MAX_PHYS_ADDR_SPACE_BITS) - 1) 34 35 #define TYPE_MEMORY_REGION "qemu:memory-region" 36 #define MEMORY_REGION(obj) \ 37 OBJECT_CHECK(MemoryRegion, (obj), TYPE_MEMORY_REGION) 38 39 #define TYPE_IOMMU_MEMORY_REGION "qemu:iommu-memory-region" 40 #define IOMMU_MEMORY_REGION(obj) \ 41 OBJECT_CHECK(IOMMUMemoryRegion, (obj), TYPE_IOMMU_MEMORY_REGION) 42 #define IOMMU_MEMORY_REGION_CLASS(klass) \ 43 OBJECT_CLASS_CHECK(IOMMUMemoryRegionClass, (klass), \ 44 TYPE_IOMMU_MEMORY_REGION) 45 #define IOMMU_MEMORY_REGION_GET_CLASS(obj) \ 46 OBJECT_GET_CLASS(IOMMUMemoryRegionClass, (obj), \ 47 TYPE_IOMMU_MEMORY_REGION) 48 49 extern bool global_dirty_log; 50 51 typedef struct MemoryRegionOps MemoryRegionOps; 52 typedef struct MemoryRegionMmio MemoryRegionMmio; 53 54 struct MemoryRegionMmio { 55 CPUReadMemoryFunc *read[3]; 56 CPUWriteMemoryFunc *write[3]; 57 }; 58 59 typedef struct IOMMUTLBEntry IOMMUTLBEntry; 60 61 /* See address_space_translate: bit 0 is read, bit 1 is write. */ 62 typedef enum { 63 IOMMU_NONE = 0, 64 IOMMU_RO = 1, 65 IOMMU_WO = 2, 66 IOMMU_RW = 3, 67 } IOMMUAccessFlags; 68 69 #define IOMMU_ACCESS_FLAG(r, w) (((r) ? IOMMU_RO : 0) | ((w) ? IOMMU_WO : 0)) 70 71 struct IOMMUTLBEntry { 72 AddressSpace *target_as; 73 hwaddr iova; 74 hwaddr translated_addr; 75 hwaddr addr_mask; /* 0xfff = 4k translation */ 76 IOMMUAccessFlags perm; 77 }; 78 79 /* 80 * Bitmap for different IOMMUNotifier capabilities. Each notifier can 81 * register with one or multiple IOMMU Notifier capability bit(s). 82 */ 83 typedef enum { 84 IOMMU_NOTIFIER_NONE = 0, 85 /* Notify cache invalidations */ 86 IOMMU_NOTIFIER_UNMAP = 0x1, 87 /* Notify entry changes (newly created entries) */ 88 IOMMU_NOTIFIER_MAP = 0x2, 89 } IOMMUNotifierFlag; 90 91 #define IOMMU_NOTIFIER_ALL (IOMMU_NOTIFIER_MAP | IOMMU_NOTIFIER_UNMAP) 92 93 struct IOMMUNotifier; 94 typedef void (*IOMMUNotify)(struct IOMMUNotifier *notifier, 95 IOMMUTLBEntry *data); 96 97 struct IOMMUNotifier { 98 IOMMUNotify notify; 99 IOMMUNotifierFlag notifier_flags; 100 /* Notify for address space range start <= addr <= end */ 101 hwaddr start; 102 hwaddr end; 103 int iommu_idx; 104 QLIST_ENTRY(IOMMUNotifier) node; 105 }; 106 typedef struct IOMMUNotifier IOMMUNotifier; 107 108 /* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */ 109 #define RAM_PREALLOC (1 << 0) 110 111 /* RAM is mmap-ed with MAP_SHARED */ 112 #define RAM_SHARED (1 << 1) 113 114 /* Only a portion of RAM (used_length) is actually used, and migrated. 115 * This used_length size can change across reboots. 116 */ 117 #define RAM_RESIZEABLE (1 << 2) 118 119 /* UFFDIO_ZEROPAGE is available on this RAMBlock to atomically 120 * zero the page and wake waiting processes. 121 * (Set during postcopy) 122 */ 123 #define RAM_UF_ZEROPAGE (1 << 3) 124 125 /* RAM can be migrated */ 126 #define RAM_MIGRATABLE (1 << 4) 127 128 /* RAM is a persistent kind memory */ 129 #define RAM_PMEM (1 << 5) 130 131 static inline void iommu_notifier_init(IOMMUNotifier *n, IOMMUNotify fn, 132 IOMMUNotifierFlag flags, 133 hwaddr start, hwaddr end, 134 int iommu_idx) 135 { 136 n->notify = fn; 137 n->notifier_flags = flags; 138 n->start = start; 139 n->end = end; 140 n->iommu_idx = iommu_idx; 141 } 142 143 /* 144 * Memory region callbacks 145 */ 146 struct MemoryRegionOps { 147 /* Read from the memory region. @addr is relative to @mr; @size is 148 * in bytes. */ 149 uint64_t (*read)(void *opaque, 150 hwaddr addr, 151 unsigned size); 152 /* Write to the memory region. @addr is relative to @mr; @size is 153 * in bytes. */ 154 void (*write)(void *opaque, 155 hwaddr addr, 156 uint64_t data, 157 unsigned size); 158 159 MemTxResult (*read_with_attrs)(void *opaque, 160 hwaddr addr, 161 uint64_t *data, 162 unsigned size, 163 MemTxAttrs attrs); 164 MemTxResult (*write_with_attrs)(void *opaque, 165 hwaddr addr, 166 uint64_t data, 167 unsigned size, 168 MemTxAttrs attrs); 169 170 enum device_endian endianness; 171 /* Guest-visible constraints: */ 172 struct { 173 /* If nonzero, specify bounds on access sizes beyond which a machine 174 * check is thrown. 175 */ 176 unsigned min_access_size; 177 unsigned max_access_size; 178 /* If true, unaligned accesses are supported. Otherwise unaligned 179 * accesses throw machine checks. 180 */ 181 bool unaligned; 182 /* 183 * If present, and returns #false, the transaction is not accepted 184 * by the device (and results in machine dependent behaviour such 185 * as a machine check exception). 186 */ 187 bool (*accepts)(void *opaque, hwaddr addr, 188 unsigned size, bool is_write, 189 MemTxAttrs attrs); 190 } valid; 191 /* Internal implementation constraints: */ 192 struct { 193 /* If nonzero, specifies the minimum size implemented. Smaller sizes 194 * will be rounded upwards and a partial result will be returned. 195 */ 196 unsigned min_access_size; 197 /* If nonzero, specifies the maximum size implemented. Larger sizes 198 * will be done as a series of accesses with smaller sizes. 199 */ 200 unsigned max_access_size; 201 /* If true, unaligned accesses are supported. Otherwise all accesses 202 * are converted to (possibly multiple) naturally aligned accesses. 203 */ 204 bool unaligned; 205 } impl; 206 }; 207 208 typedef struct MemoryRegionClass { 209 /* private */ 210 ObjectClass parent_class; 211 } MemoryRegionClass; 212 213 214 enum IOMMUMemoryRegionAttr { 215 IOMMU_ATTR_SPAPR_TCE_FD 216 }; 217 218 /** 219 * IOMMUMemoryRegionClass: 220 * 221 * All IOMMU implementations need to subclass TYPE_IOMMU_MEMORY_REGION 222 * and provide an implementation of at least the @translate method here 223 * to handle requests to the memory region. Other methods are optional. 224 * 225 * The IOMMU implementation must use the IOMMU notifier infrastructure 226 * to report whenever mappings are changed, by calling 227 * memory_region_notify_iommu() (or, if necessary, by calling 228 * memory_region_notify_one() for each registered notifier). 229 * 230 * Conceptually an IOMMU provides a mapping from input address 231 * to an output TLB entry. If the IOMMU is aware of memory transaction 232 * attributes and the output TLB entry depends on the transaction 233 * attributes, we represent this using IOMMU indexes. Each index 234 * selects a particular translation table that the IOMMU has: 235 * @attrs_to_index returns the IOMMU index for a set of transaction attributes 236 * @translate takes an input address and an IOMMU index 237 * and the mapping returned can only depend on the input address and the 238 * IOMMU index. 239 * 240 * Most IOMMUs don't care about the transaction attributes and support 241 * only a single IOMMU index. A more complex IOMMU might have one index 242 * for secure transactions and one for non-secure transactions. 243 */ 244 typedef struct IOMMUMemoryRegionClass { 245 /* private */ 246 MemoryRegionClass parent_class; 247 248 /* 249 * Return a TLB entry that contains a given address. 250 * 251 * The IOMMUAccessFlags indicated via @flag are optional and may 252 * be specified as IOMMU_NONE to indicate that the caller needs 253 * the full translation information for both reads and writes. If 254 * the access flags are specified then the IOMMU implementation 255 * may use this as an optimization, to stop doing a page table 256 * walk as soon as it knows that the requested permissions are not 257 * allowed. If IOMMU_NONE is passed then the IOMMU must do the 258 * full page table walk and report the permissions in the returned 259 * IOMMUTLBEntry. (Note that this implies that an IOMMU may not 260 * return different mappings for reads and writes.) 261 * 262 * The returned information remains valid while the caller is 263 * holding the big QEMU lock or is inside an RCU critical section; 264 * if the caller wishes to cache the mapping beyond that it must 265 * register an IOMMU notifier so it can invalidate its cached 266 * information when the IOMMU mapping changes. 267 * 268 * @iommu: the IOMMUMemoryRegion 269 * @hwaddr: address to be translated within the memory region 270 * @flag: requested access permissions 271 * @iommu_idx: IOMMU index for the translation 272 */ 273 IOMMUTLBEntry (*translate)(IOMMUMemoryRegion *iommu, hwaddr addr, 274 IOMMUAccessFlags flag, int iommu_idx); 275 /* Returns minimum supported page size in bytes. 276 * If this method is not provided then the minimum is assumed to 277 * be TARGET_PAGE_SIZE. 278 * 279 * @iommu: the IOMMUMemoryRegion 280 */ 281 uint64_t (*get_min_page_size)(IOMMUMemoryRegion *iommu); 282 /* Called when IOMMU Notifier flag changes (ie when the set of 283 * events which IOMMU users are requesting notification for changes). 284 * Optional method -- need not be provided if the IOMMU does not 285 * need to know exactly which events must be notified. 286 * 287 * @iommu: the IOMMUMemoryRegion 288 * @old_flags: events which previously needed to be notified 289 * @new_flags: events which now need to be notified 290 */ 291 void (*notify_flag_changed)(IOMMUMemoryRegion *iommu, 292 IOMMUNotifierFlag old_flags, 293 IOMMUNotifierFlag new_flags); 294 /* Called to handle memory_region_iommu_replay(). 295 * 296 * The default implementation of memory_region_iommu_replay() is to 297 * call the IOMMU translate method for every page in the address space 298 * with flag == IOMMU_NONE and then call the notifier if translate 299 * returns a valid mapping. If this method is implemented then it 300 * overrides the default behaviour, and must provide the full semantics 301 * of memory_region_iommu_replay(), by calling @notifier for every 302 * translation present in the IOMMU. 303 * 304 * Optional method -- an IOMMU only needs to provide this method 305 * if the default is inefficient or produces undesirable side effects. 306 * 307 * Note: this is not related to record-and-replay functionality. 308 */ 309 void (*replay)(IOMMUMemoryRegion *iommu, IOMMUNotifier *notifier); 310 311 /* Get IOMMU misc attributes. This is an optional method that 312 * can be used to allow users of the IOMMU to get implementation-specific 313 * information. The IOMMU implements this method to handle calls 314 * by IOMMU users to memory_region_iommu_get_attr() by filling in 315 * the arbitrary data pointer for any IOMMUMemoryRegionAttr values that 316 * the IOMMU supports. If the method is unimplemented then 317 * memory_region_iommu_get_attr() will always return -EINVAL. 318 * 319 * @iommu: the IOMMUMemoryRegion 320 * @attr: attribute being queried 321 * @data: memory to fill in with the attribute data 322 * 323 * Returns 0 on success, or a negative errno; in particular 324 * returns -EINVAL for unrecognized or unimplemented attribute types. 325 */ 326 int (*get_attr)(IOMMUMemoryRegion *iommu, enum IOMMUMemoryRegionAttr attr, 327 void *data); 328 329 /* Return the IOMMU index to use for a given set of transaction attributes. 330 * 331 * Optional method: if an IOMMU only supports a single IOMMU index then 332 * the default implementation of memory_region_iommu_attrs_to_index() 333 * will return 0. 334 * 335 * The indexes supported by an IOMMU must be contiguous, starting at 0. 336 * 337 * @iommu: the IOMMUMemoryRegion 338 * @attrs: memory transaction attributes 339 */ 340 int (*attrs_to_index)(IOMMUMemoryRegion *iommu, MemTxAttrs attrs); 341 342 /* Return the number of IOMMU indexes this IOMMU supports. 343 * 344 * Optional method: if this method is not provided, then 345 * memory_region_iommu_num_indexes() will return 1, indicating that 346 * only a single IOMMU index is supported. 347 * 348 * @iommu: the IOMMUMemoryRegion 349 */ 350 int (*num_indexes)(IOMMUMemoryRegion *iommu); 351 } IOMMUMemoryRegionClass; 352 353 typedef struct CoalescedMemoryRange CoalescedMemoryRange; 354 typedef struct MemoryRegionIoeventfd MemoryRegionIoeventfd; 355 356 struct MemoryRegion { 357 Object parent_obj; 358 359 /* All fields are private - violators will be prosecuted */ 360 361 /* The following fields should fit in a cache line */ 362 bool romd_mode; 363 bool ram; 364 bool subpage; 365 bool readonly; /* For RAM regions */ 366 bool nonvolatile; 367 bool rom_device; 368 bool flush_coalesced_mmio; 369 bool global_locking; 370 uint8_t dirty_log_mask; 371 bool is_iommu; 372 RAMBlock *ram_block; 373 Object *owner; 374 375 const MemoryRegionOps *ops; 376 void *opaque; 377 MemoryRegion *container; 378 Int128 size; 379 hwaddr addr; 380 void (*destructor)(MemoryRegion *mr); 381 uint64_t align; 382 bool terminates; 383 bool ram_device; 384 bool enabled; 385 bool warning_printed; /* For reservations */ 386 uint8_t vga_logging_count; 387 MemoryRegion *alias; 388 hwaddr alias_offset; 389 int32_t priority; 390 QTAILQ_HEAD(, MemoryRegion) subregions; 391 QTAILQ_ENTRY(MemoryRegion) subregions_link; 392 QTAILQ_HEAD(, CoalescedMemoryRange) coalesced; 393 const char *name; 394 unsigned ioeventfd_nb; 395 MemoryRegionIoeventfd *ioeventfds; 396 }; 397 398 struct IOMMUMemoryRegion { 399 MemoryRegion parent_obj; 400 401 QLIST_HEAD(, IOMMUNotifier) iommu_notify; 402 IOMMUNotifierFlag iommu_notify_flags; 403 }; 404 405 #define IOMMU_NOTIFIER_FOREACH(n, mr) \ 406 QLIST_FOREACH((n), &(mr)->iommu_notify, node) 407 408 /** 409 * MemoryListener: callbacks structure for updates to the physical memory map 410 * 411 * Allows a component to adjust to changes in the guest-visible memory map. 412 * Use with memory_listener_register() and memory_listener_unregister(). 413 */ 414 struct MemoryListener { 415 void (*begin)(MemoryListener *listener); 416 void (*commit)(MemoryListener *listener); 417 void (*region_add)(MemoryListener *listener, MemoryRegionSection *section); 418 void (*region_del)(MemoryListener *listener, MemoryRegionSection *section); 419 void (*region_nop)(MemoryListener *listener, MemoryRegionSection *section); 420 void (*log_start)(MemoryListener *listener, MemoryRegionSection *section, 421 int old, int new); 422 void (*log_stop)(MemoryListener *listener, MemoryRegionSection *section, 423 int old, int new); 424 void (*log_sync)(MemoryListener *listener, MemoryRegionSection *section); 425 void (*log_clear)(MemoryListener *listener, MemoryRegionSection *section); 426 void (*log_global_start)(MemoryListener *listener); 427 void (*log_global_stop)(MemoryListener *listener); 428 void (*log_global_after_sync)(MemoryListener *listener); 429 void (*eventfd_add)(MemoryListener *listener, MemoryRegionSection *section, 430 bool match_data, uint64_t data, EventNotifier *e); 431 void (*eventfd_del)(MemoryListener *listener, MemoryRegionSection *section, 432 bool match_data, uint64_t data, EventNotifier *e); 433 void (*coalesced_io_add)(MemoryListener *listener, MemoryRegionSection *section, 434 hwaddr addr, hwaddr len); 435 void (*coalesced_io_del)(MemoryListener *listener, MemoryRegionSection *section, 436 hwaddr addr, hwaddr len); 437 /* Lower = earlier (during add), later (during del) */ 438 unsigned priority; 439 AddressSpace *address_space; 440 QTAILQ_ENTRY(MemoryListener) link; 441 QTAILQ_ENTRY(MemoryListener) link_as; 442 }; 443 444 /** 445 * AddressSpace: describes a mapping of addresses to #MemoryRegion objects 446 */ 447 struct AddressSpace { 448 /* All fields are private. */ 449 struct rcu_head rcu; 450 char *name; 451 MemoryRegion *root; 452 453 /* Accessed via RCU. */ 454 struct FlatView *current_map; 455 456 int ioeventfd_nb; 457 struct MemoryRegionIoeventfd *ioeventfds; 458 QTAILQ_HEAD(, MemoryListener) listeners; 459 QTAILQ_ENTRY(AddressSpace) address_spaces_link; 460 }; 461 462 typedef struct AddressSpaceDispatch AddressSpaceDispatch; 463 typedef struct FlatRange FlatRange; 464 465 /* Flattened global view of current active memory hierarchy. Kept in sorted 466 * order. 467 */ 468 struct FlatView { 469 struct rcu_head rcu; 470 unsigned ref; 471 FlatRange *ranges; 472 unsigned nr; 473 unsigned nr_allocated; 474 struct AddressSpaceDispatch *dispatch; 475 MemoryRegion *root; 476 }; 477 478 static inline FlatView *address_space_to_flatview(AddressSpace *as) 479 { 480 return atomic_rcu_read(&as->current_map); 481 } 482 483 484 /** 485 * MemoryRegionSection: describes a fragment of a #MemoryRegion 486 * 487 * @mr: the region, or %NULL if empty 488 * @fv: the flat view of the address space the region is mapped in 489 * @offset_within_region: the beginning of the section, relative to @mr's start 490 * @size: the size of the section; will not exceed @mr's boundaries 491 * @offset_within_address_space: the address of the first byte of the section 492 * relative to the region's address space 493 * @readonly: writes to this section are ignored 494 * @nonvolatile: this section is non-volatile 495 */ 496 struct MemoryRegionSection { 497 MemoryRegion *mr; 498 FlatView *fv; 499 hwaddr offset_within_region; 500 Int128 size; 501 hwaddr offset_within_address_space; 502 bool readonly; 503 bool nonvolatile; 504 }; 505 506 /** 507 * memory_region_init: Initialize a memory region 508 * 509 * The region typically acts as a container for other memory regions. Use 510 * memory_region_add_subregion() to add subregions. 511 * 512 * @mr: the #MemoryRegion to be initialized 513 * @owner: the object that tracks the region's reference count 514 * @name: used for debugging; not visible to the user or ABI 515 * @size: size of the region; any subregions beyond this size will be clipped 516 */ 517 void memory_region_init(MemoryRegion *mr, 518 struct Object *owner, 519 const char *name, 520 uint64_t size); 521 522 /** 523 * memory_region_ref: Add 1 to a memory region's reference count 524 * 525 * Whenever memory regions are accessed outside the BQL, they need to be 526 * preserved against hot-unplug. MemoryRegions actually do not have their 527 * own reference count; they piggyback on a QOM object, their "owner". 528 * This function adds a reference to the owner. 529 * 530 * All MemoryRegions must have an owner if they can disappear, even if the 531 * device they belong to operates exclusively under the BQL. This is because 532 * the region could be returned at any time by memory_region_find, and this 533 * is usually under guest control. 534 * 535 * @mr: the #MemoryRegion 536 */ 537 void memory_region_ref(MemoryRegion *mr); 538 539 /** 540 * memory_region_unref: Remove 1 to a memory region's reference count 541 * 542 * Whenever memory regions are accessed outside the BQL, they need to be 543 * preserved against hot-unplug. MemoryRegions actually do not have their 544 * own reference count; they piggyback on a QOM object, their "owner". 545 * This function removes a reference to the owner and possibly destroys it. 546 * 547 * @mr: the #MemoryRegion 548 */ 549 void memory_region_unref(MemoryRegion *mr); 550 551 /** 552 * memory_region_init_io: Initialize an I/O memory region. 553 * 554 * Accesses into the region will cause the callbacks in @ops to be called. 555 * if @size is nonzero, subregions will be clipped to @size. 556 * 557 * @mr: the #MemoryRegion to be initialized. 558 * @owner: the object that tracks the region's reference count 559 * @ops: a structure containing read and write callbacks to be used when 560 * I/O is performed on the region. 561 * @opaque: passed to the read and write callbacks of the @ops structure. 562 * @name: used for debugging; not visible to the user or ABI 563 * @size: size of the region. 564 */ 565 void memory_region_init_io(MemoryRegion *mr, 566 struct Object *owner, 567 const MemoryRegionOps *ops, 568 void *opaque, 569 const char *name, 570 uint64_t size); 571 572 /** 573 * memory_region_init_ram_nomigrate: Initialize RAM memory region. Accesses 574 * into the region will modify memory 575 * directly. 576 * 577 * @mr: the #MemoryRegion to be initialized. 578 * @owner: the object that tracks the region's reference count 579 * @name: Region name, becomes part of RAMBlock name used in migration stream 580 * must be unique within any device 581 * @size: size of the region. 582 * @errp: pointer to Error*, to store an error if it happens. 583 * 584 * Note that this function does not do anything to cause the data in the 585 * RAM memory region to be migrated; that is the responsibility of the caller. 586 */ 587 void memory_region_init_ram_nomigrate(MemoryRegion *mr, 588 struct Object *owner, 589 const char *name, 590 uint64_t size, 591 Error **errp); 592 593 /** 594 * memory_region_init_ram_shared_nomigrate: Initialize RAM memory region. 595 * Accesses into the region will 596 * modify memory directly. 597 * 598 * @mr: the #MemoryRegion to be initialized. 599 * @owner: the object that tracks the region's reference count 600 * @name: Region name, becomes part of RAMBlock name used in migration stream 601 * must be unique within any device 602 * @size: size of the region. 603 * @share: allow remapping RAM to different addresses 604 * @errp: pointer to Error*, to store an error if it happens. 605 * 606 * Note that this function is similar to memory_region_init_ram_nomigrate. 607 * The only difference is part of the RAM region can be remapped. 608 */ 609 void memory_region_init_ram_shared_nomigrate(MemoryRegion *mr, 610 struct Object *owner, 611 const char *name, 612 uint64_t size, 613 bool share, 614 Error **errp); 615 616 /** 617 * memory_region_init_resizeable_ram: Initialize memory region with resizeable 618 * RAM. Accesses into the region will 619 * modify memory directly. Only an initial 620 * portion of this RAM is actually used. 621 * The used size can change across reboots. 622 * 623 * @mr: the #MemoryRegion to be initialized. 624 * @owner: the object that tracks the region's reference count 625 * @name: Region name, becomes part of RAMBlock name used in migration stream 626 * must be unique within any device 627 * @size: used size of the region. 628 * @max_size: max size of the region. 629 * @resized: callback to notify owner about used size change. 630 * @errp: pointer to Error*, to store an error if it happens. 631 * 632 * Note that this function does not do anything to cause the data in the 633 * RAM memory region to be migrated; that is the responsibility of the caller. 634 */ 635 void memory_region_init_resizeable_ram(MemoryRegion *mr, 636 struct Object *owner, 637 const char *name, 638 uint64_t size, 639 uint64_t max_size, 640 void (*resized)(const char*, 641 uint64_t length, 642 void *host), 643 Error **errp); 644 #ifdef CONFIG_POSIX 645 646 /** 647 * memory_region_init_ram_from_file: Initialize RAM memory region with a 648 * mmap-ed backend. 649 * 650 * @mr: the #MemoryRegion to be initialized. 651 * @owner: the object that tracks the region's reference count 652 * @name: Region name, becomes part of RAMBlock name used in migration stream 653 * must be unique within any device 654 * @size: size of the region. 655 * @align: alignment of the region base address; if 0, the default alignment 656 * (getpagesize()) will be used. 657 * @ram_flags: Memory region features: 658 * - RAM_SHARED: memory must be mmaped with the MAP_SHARED flag 659 * - RAM_PMEM: the memory is persistent memory 660 * Other bits are ignored now. 661 * @path: the path in which to allocate the RAM. 662 * @errp: pointer to Error*, to store an error if it happens. 663 * 664 * Note that this function does not do anything to cause the data in the 665 * RAM memory region to be migrated; that is the responsibility of the caller. 666 */ 667 void memory_region_init_ram_from_file(MemoryRegion *mr, 668 struct Object *owner, 669 const char *name, 670 uint64_t size, 671 uint64_t align, 672 uint32_t ram_flags, 673 const char *path, 674 Error **errp); 675 676 /** 677 * memory_region_init_ram_from_fd: Initialize RAM memory region with a 678 * mmap-ed backend. 679 * 680 * @mr: the #MemoryRegion to be initialized. 681 * @owner: the object that tracks the region's reference count 682 * @name: the name of the region. 683 * @size: size of the region. 684 * @share: %true if memory must be mmaped with the MAP_SHARED flag 685 * @fd: the fd to mmap. 686 * @errp: pointer to Error*, to store an error if it happens. 687 * 688 * Note that this function does not do anything to cause the data in the 689 * RAM memory region to be migrated; that is the responsibility of the caller. 690 */ 691 void memory_region_init_ram_from_fd(MemoryRegion *mr, 692 struct Object *owner, 693 const char *name, 694 uint64_t size, 695 bool share, 696 int fd, 697 Error **errp); 698 #endif 699 700 /** 701 * memory_region_init_ram_ptr: Initialize RAM memory region from a 702 * user-provided pointer. Accesses into the 703 * region will modify memory directly. 704 * 705 * @mr: the #MemoryRegion to be initialized. 706 * @owner: the object that tracks the region's reference count 707 * @name: Region name, becomes part of RAMBlock name used in migration stream 708 * must be unique within any device 709 * @size: size of the region. 710 * @ptr: memory to be mapped; must contain at least @size bytes. 711 * 712 * Note that this function does not do anything to cause the data in the 713 * RAM memory region to be migrated; that is the responsibility of the caller. 714 */ 715 void memory_region_init_ram_ptr(MemoryRegion *mr, 716 struct Object *owner, 717 const char *name, 718 uint64_t size, 719 void *ptr); 720 721 /** 722 * memory_region_init_ram_device_ptr: Initialize RAM device memory region from 723 * a user-provided pointer. 724 * 725 * A RAM device represents a mapping to a physical device, such as to a PCI 726 * MMIO BAR of an vfio-pci assigned device. The memory region may be mapped 727 * into the VM address space and access to the region will modify memory 728 * directly. However, the memory region should not be included in a memory 729 * dump (device may not be enabled/mapped at the time of the dump), and 730 * operations incompatible with manipulating MMIO should be avoided. Replaces 731 * skip_dump flag. 732 * 733 * @mr: the #MemoryRegion to be initialized. 734 * @owner: the object that tracks the region's reference count 735 * @name: the name of the region. 736 * @size: size of the region. 737 * @ptr: memory to be mapped; must contain at least @size bytes. 738 * 739 * Note that this function does not do anything to cause the data in the 740 * RAM memory region to be migrated; that is the responsibility of the caller. 741 * (For RAM device memory regions, migrating the contents rarely makes sense.) 742 */ 743 void memory_region_init_ram_device_ptr(MemoryRegion *mr, 744 struct Object *owner, 745 const char *name, 746 uint64_t size, 747 void *ptr); 748 749 /** 750 * memory_region_init_alias: Initialize a memory region that aliases all or a 751 * part of another memory region. 752 * 753 * @mr: the #MemoryRegion to be initialized. 754 * @owner: the object that tracks the region's reference count 755 * @name: used for debugging; not visible to the user or ABI 756 * @orig: the region to be referenced; @mr will be equivalent to 757 * @orig between @offset and @offset + @size - 1. 758 * @offset: start of the section in @orig to be referenced. 759 * @size: size of the region. 760 */ 761 void memory_region_init_alias(MemoryRegion *mr, 762 struct Object *owner, 763 const char *name, 764 MemoryRegion *orig, 765 hwaddr offset, 766 uint64_t size); 767 768 /** 769 * memory_region_init_rom_nomigrate: Initialize a ROM memory region. 770 * 771 * This has the same effect as calling memory_region_init_ram_nomigrate() 772 * and then marking the resulting region read-only with 773 * memory_region_set_readonly(). 774 * 775 * Note that this function does not do anything to cause the data in the 776 * RAM side of the memory region to be migrated; that is the responsibility 777 * of the caller. 778 * 779 * @mr: the #MemoryRegion to be initialized. 780 * @owner: the object that tracks the region's reference count 781 * @name: Region name, becomes part of RAMBlock name used in migration stream 782 * must be unique within any device 783 * @size: size of the region. 784 * @errp: pointer to Error*, to store an error if it happens. 785 */ 786 void memory_region_init_rom_nomigrate(MemoryRegion *mr, 787 struct Object *owner, 788 const char *name, 789 uint64_t size, 790 Error **errp); 791 792 /** 793 * memory_region_init_rom_device_nomigrate: Initialize a ROM memory region. 794 * Writes are handled via callbacks. 795 * 796 * Note that this function does not do anything to cause the data in the 797 * RAM side of the memory region to be migrated; that is the responsibility 798 * of the caller. 799 * 800 * @mr: the #MemoryRegion to be initialized. 801 * @owner: the object that tracks the region's reference count 802 * @ops: callbacks for write access handling (must not be NULL). 803 * @opaque: passed to the read and write callbacks of the @ops structure. 804 * @name: Region name, becomes part of RAMBlock name used in migration stream 805 * must be unique within any device 806 * @size: size of the region. 807 * @errp: pointer to Error*, to store an error if it happens. 808 */ 809 void memory_region_init_rom_device_nomigrate(MemoryRegion *mr, 810 struct Object *owner, 811 const MemoryRegionOps *ops, 812 void *opaque, 813 const char *name, 814 uint64_t size, 815 Error **errp); 816 817 /** 818 * memory_region_init_iommu: Initialize a memory region of a custom type 819 * that translates addresses 820 * 821 * An IOMMU region translates addresses and forwards accesses to a target 822 * memory region. 823 * 824 * The IOMMU implementation must define a subclass of TYPE_IOMMU_MEMORY_REGION. 825 * @_iommu_mr should be a pointer to enough memory for an instance of 826 * that subclass, @instance_size is the size of that subclass, and 827 * @mrtypename is its name. This function will initialize @_iommu_mr as an 828 * instance of the subclass, and its methods will then be called to handle 829 * accesses to the memory region. See the documentation of 830 * #IOMMUMemoryRegionClass for further details. 831 * 832 * @_iommu_mr: the #IOMMUMemoryRegion to be initialized 833 * @instance_size: the IOMMUMemoryRegion subclass instance size 834 * @mrtypename: the type name of the #IOMMUMemoryRegion 835 * @owner: the object that tracks the region's reference count 836 * @name: used for debugging; not visible to the user or ABI 837 * @size: size of the region. 838 */ 839 void memory_region_init_iommu(void *_iommu_mr, 840 size_t instance_size, 841 const char *mrtypename, 842 Object *owner, 843 const char *name, 844 uint64_t size); 845 846 /** 847 * memory_region_init_ram - Initialize RAM memory region. Accesses into the 848 * region will modify memory directly. 849 * 850 * @mr: the #MemoryRegion to be initialized 851 * @owner: the object that tracks the region's reference count (must be 852 * TYPE_DEVICE or a subclass of TYPE_DEVICE, or NULL) 853 * @name: name of the memory region 854 * @size: size of the region in bytes 855 * @errp: pointer to Error*, to store an error if it happens. 856 * 857 * This function allocates RAM for a board model or device, and 858 * arranges for it to be migrated (by calling vmstate_register_ram() 859 * if @owner is a DeviceState, or vmstate_register_ram_global() if 860 * @owner is NULL). 861 * 862 * TODO: Currently we restrict @owner to being either NULL (for 863 * global RAM regions with no owner) or devices, so that we can 864 * give the RAM block a unique name for migration purposes. 865 * We should lift this restriction and allow arbitrary Objects. 866 * If you pass a non-NULL non-device @owner then we will assert. 867 */ 868 void memory_region_init_ram(MemoryRegion *mr, 869 struct Object *owner, 870 const char *name, 871 uint64_t size, 872 Error **errp); 873 874 /** 875 * memory_region_init_rom: Initialize a ROM memory region. 876 * 877 * This has the same effect as calling memory_region_init_ram() 878 * and then marking the resulting region read-only with 879 * memory_region_set_readonly(). This includes arranging for the 880 * contents to be migrated. 881 * 882 * TODO: Currently we restrict @owner to being either NULL (for 883 * global RAM regions with no owner) or devices, so that we can 884 * give the RAM block a unique name for migration purposes. 885 * We should lift this restriction and allow arbitrary Objects. 886 * If you pass a non-NULL non-device @owner then we will assert. 887 * 888 * @mr: the #MemoryRegion to be initialized. 889 * @owner: the object that tracks the region's reference count 890 * @name: Region name, becomes part of RAMBlock name used in migration stream 891 * must be unique within any device 892 * @size: size of the region. 893 * @errp: pointer to Error*, to store an error if it happens. 894 */ 895 void memory_region_init_rom(MemoryRegion *mr, 896 struct Object *owner, 897 const char *name, 898 uint64_t size, 899 Error **errp); 900 901 /** 902 * memory_region_init_rom_device: Initialize a ROM memory region. 903 * Writes are handled via callbacks. 904 * 905 * This function initializes a memory region backed by RAM for reads 906 * and callbacks for writes, and arranges for the RAM backing to 907 * be migrated (by calling vmstate_register_ram() 908 * if @owner is a DeviceState, or vmstate_register_ram_global() if 909 * @owner is NULL). 910 * 911 * TODO: Currently we restrict @owner to being either NULL (for 912 * global RAM regions with no owner) or devices, so that we can 913 * give the RAM block a unique name for migration purposes. 914 * We should lift this restriction and allow arbitrary Objects. 915 * If you pass a non-NULL non-device @owner then we will assert. 916 * 917 * @mr: the #MemoryRegion to be initialized. 918 * @owner: the object that tracks the region's reference count 919 * @ops: callbacks for write access handling (must not be NULL). 920 * @name: Region name, becomes part of RAMBlock name used in migration stream 921 * must be unique within any device 922 * @size: size of the region. 923 * @errp: pointer to Error*, to store an error if it happens. 924 */ 925 void memory_region_init_rom_device(MemoryRegion *mr, 926 struct Object *owner, 927 const MemoryRegionOps *ops, 928 void *opaque, 929 const char *name, 930 uint64_t size, 931 Error **errp); 932 933 934 /** 935 * memory_region_owner: get a memory region's owner. 936 * 937 * @mr: the memory region being queried. 938 */ 939 struct Object *memory_region_owner(MemoryRegion *mr); 940 941 /** 942 * memory_region_size: get a memory region's size. 943 * 944 * @mr: the memory region being queried. 945 */ 946 uint64_t memory_region_size(MemoryRegion *mr); 947 948 /** 949 * memory_region_is_ram: check whether a memory region is random access 950 * 951 * Returns %true if a memory region is random access. 952 * 953 * @mr: the memory region being queried 954 */ 955 static inline bool memory_region_is_ram(MemoryRegion *mr) 956 { 957 return mr->ram; 958 } 959 960 /** 961 * memory_region_is_ram_device: check whether a memory region is a ram device 962 * 963 * Returns %true if a memory region is a device backed ram region 964 * 965 * @mr: the memory region being queried 966 */ 967 bool memory_region_is_ram_device(MemoryRegion *mr); 968 969 /** 970 * memory_region_is_romd: check whether a memory region is in ROMD mode 971 * 972 * Returns %true if a memory region is a ROM device and currently set to allow 973 * direct reads. 974 * 975 * @mr: the memory region being queried 976 */ 977 static inline bool memory_region_is_romd(MemoryRegion *mr) 978 { 979 return mr->rom_device && mr->romd_mode; 980 } 981 982 /** 983 * memory_region_get_iommu: check whether a memory region is an iommu 984 * 985 * Returns pointer to IOMMUMemoryRegion if a memory region is an iommu, 986 * otherwise NULL. 987 * 988 * @mr: the memory region being queried 989 */ 990 static inline IOMMUMemoryRegion *memory_region_get_iommu(MemoryRegion *mr) 991 { 992 if (mr->alias) { 993 return memory_region_get_iommu(mr->alias); 994 } 995 if (mr->is_iommu) { 996 return (IOMMUMemoryRegion *) mr; 997 } 998 return NULL; 999 } 1000 1001 /** 1002 * memory_region_get_iommu_class_nocheck: returns iommu memory region class 1003 * if an iommu or NULL if not 1004 * 1005 * Returns pointer to IOMMUMemoryRegionClass if a memory region is an iommu, 1006 * otherwise NULL. This is fast path avoiding QOM checking, use with caution. 1007 * 1008 * @mr: the memory region being queried 1009 */ 1010 static inline IOMMUMemoryRegionClass *memory_region_get_iommu_class_nocheck( 1011 IOMMUMemoryRegion *iommu_mr) 1012 { 1013 return (IOMMUMemoryRegionClass *) (((Object *)iommu_mr)->class); 1014 } 1015 1016 #define memory_region_is_iommu(mr) (memory_region_get_iommu(mr) != NULL) 1017 1018 /** 1019 * memory_region_iommu_get_min_page_size: get minimum supported page size 1020 * for an iommu 1021 * 1022 * Returns minimum supported page size for an iommu. 1023 * 1024 * @iommu_mr: the memory region being queried 1025 */ 1026 uint64_t memory_region_iommu_get_min_page_size(IOMMUMemoryRegion *iommu_mr); 1027 1028 /** 1029 * memory_region_notify_iommu: notify a change in an IOMMU translation entry. 1030 * 1031 * The notification type will be decided by entry.perm bits: 1032 * 1033 * - For UNMAP (cache invalidation) notifies: set entry.perm to IOMMU_NONE. 1034 * - For MAP (newly added entry) notifies: set entry.perm to the 1035 * permission of the page (which is definitely !IOMMU_NONE). 1036 * 1037 * Note: for any IOMMU implementation, an in-place mapping change 1038 * should be notified with an UNMAP followed by a MAP. 1039 * 1040 * @iommu_mr: the memory region that was changed 1041 * @iommu_idx: the IOMMU index for the translation table which has changed 1042 * @entry: the new entry in the IOMMU translation table. The entry 1043 * replaces all old entries for the same virtual I/O address range. 1044 * Deleted entries have .@perm == 0. 1045 */ 1046 void memory_region_notify_iommu(IOMMUMemoryRegion *iommu_mr, 1047 int iommu_idx, 1048 IOMMUTLBEntry entry); 1049 1050 /** 1051 * memory_region_notify_one: notify a change in an IOMMU translation 1052 * entry to a single notifier 1053 * 1054 * This works just like memory_region_notify_iommu(), but it only 1055 * notifies a specific notifier, not all of them. 1056 * 1057 * @notifier: the notifier to be notified 1058 * @entry: the new entry in the IOMMU translation table. The entry 1059 * replaces all old entries for the same virtual I/O address range. 1060 * Deleted entries have .@perm == 0. 1061 */ 1062 void memory_region_notify_one(IOMMUNotifier *notifier, 1063 IOMMUTLBEntry *entry); 1064 1065 /** 1066 * memory_region_register_iommu_notifier: register a notifier for changes to 1067 * IOMMU translation entries. 1068 * 1069 * @mr: the memory region to observe 1070 * @n: the IOMMUNotifier to be added; the notify callback receives a 1071 * pointer to an #IOMMUTLBEntry as the opaque value; the pointer 1072 * ceases to be valid on exit from the notifier. 1073 */ 1074 void memory_region_register_iommu_notifier(MemoryRegion *mr, 1075 IOMMUNotifier *n); 1076 1077 /** 1078 * memory_region_iommu_replay: replay existing IOMMU translations to 1079 * a notifier with the minimum page granularity returned by 1080 * mr->iommu_ops->get_page_size(). 1081 * 1082 * Note: this is not related to record-and-replay functionality. 1083 * 1084 * @iommu_mr: the memory region to observe 1085 * @n: the notifier to which to replay iommu mappings 1086 */ 1087 void memory_region_iommu_replay(IOMMUMemoryRegion *iommu_mr, IOMMUNotifier *n); 1088 1089 /** 1090 * memory_region_iommu_replay_all: replay existing IOMMU translations 1091 * to all the notifiers registered. 1092 * 1093 * Note: this is not related to record-and-replay functionality. 1094 * 1095 * @iommu_mr: the memory region to observe 1096 */ 1097 void memory_region_iommu_replay_all(IOMMUMemoryRegion *iommu_mr); 1098 1099 /** 1100 * memory_region_unregister_iommu_notifier: unregister a notifier for 1101 * changes to IOMMU translation entries. 1102 * 1103 * @mr: the memory region which was observed and for which notity_stopped() 1104 * needs to be called 1105 * @n: the notifier to be removed. 1106 */ 1107 void memory_region_unregister_iommu_notifier(MemoryRegion *mr, 1108 IOMMUNotifier *n); 1109 1110 /** 1111 * memory_region_iommu_get_attr: return an IOMMU attr if get_attr() is 1112 * defined on the IOMMU. 1113 * 1114 * Returns 0 on success, or a negative errno otherwise. In particular, 1115 * -EINVAL indicates that the IOMMU does not support the requested 1116 * attribute. 1117 * 1118 * @iommu_mr: the memory region 1119 * @attr: the requested attribute 1120 * @data: a pointer to the requested attribute data 1121 */ 1122 int memory_region_iommu_get_attr(IOMMUMemoryRegion *iommu_mr, 1123 enum IOMMUMemoryRegionAttr attr, 1124 void *data); 1125 1126 /** 1127 * memory_region_iommu_attrs_to_index: return the IOMMU index to 1128 * use for translations with the given memory transaction attributes. 1129 * 1130 * @iommu_mr: the memory region 1131 * @attrs: the memory transaction attributes 1132 */ 1133 int memory_region_iommu_attrs_to_index(IOMMUMemoryRegion *iommu_mr, 1134 MemTxAttrs attrs); 1135 1136 /** 1137 * memory_region_iommu_num_indexes: return the total number of IOMMU 1138 * indexes that this IOMMU supports. 1139 * 1140 * @iommu_mr: the memory region 1141 */ 1142 int memory_region_iommu_num_indexes(IOMMUMemoryRegion *iommu_mr); 1143 1144 /** 1145 * memory_region_name: get a memory region's name 1146 * 1147 * Returns the string that was used to initialize the memory region. 1148 * 1149 * @mr: the memory region being queried 1150 */ 1151 const char *memory_region_name(const MemoryRegion *mr); 1152 1153 /** 1154 * memory_region_is_logging: return whether a memory region is logging writes 1155 * 1156 * Returns %true if the memory region is logging writes for the given client 1157 * 1158 * @mr: the memory region being queried 1159 * @client: the client being queried 1160 */ 1161 bool memory_region_is_logging(MemoryRegion *mr, uint8_t client); 1162 1163 /** 1164 * memory_region_get_dirty_log_mask: return the clients for which a 1165 * memory region is logging writes. 1166 * 1167 * Returns a bitmap of clients, in which the DIRTY_MEMORY_* constants 1168 * are the bit indices. 1169 * 1170 * @mr: the memory region being queried 1171 */ 1172 uint8_t memory_region_get_dirty_log_mask(MemoryRegion *mr); 1173 1174 /** 1175 * memory_region_is_rom: check whether a memory region is ROM 1176 * 1177 * Returns %true if a memory region is read-only memory. 1178 * 1179 * @mr: the memory region being queried 1180 */ 1181 static inline bool memory_region_is_rom(MemoryRegion *mr) 1182 { 1183 return mr->ram && mr->readonly; 1184 } 1185 1186 /** 1187 * memory_region_is_nonvolatile: check whether a memory region is non-volatile 1188 * 1189 * Returns %true is a memory region is non-volatile memory. 1190 * 1191 * @mr: the memory region being queried 1192 */ 1193 static inline bool memory_region_is_nonvolatile(MemoryRegion *mr) 1194 { 1195 return mr->nonvolatile; 1196 } 1197 1198 /** 1199 * memory_region_get_fd: Get a file descriptor backing a RAM memory region. 1200 * 1201 * Returns a file descriptor backing a file-based RAM memory region, 1202 * or -1 if the region is not a file-based RAM memory region. 1203 * 1204 * @mr: the RAM or alias memory region being queried. 1205 */ 1206 int memory_region_get_fd(MemoryRegion *mr); 1207 1208 /** 1209 * memory_region_from_host: Convert a pointer into a RAM memory region 1210 * and an offset within it. 1211 * 1212 * Given a host pointer inside a RAM memory region (created with 1213 * memory_region_init_ram() or memory_region_init_ram_ptr()), return 1214 * the MemoryRegion and the offset within it. 1215 * 1216 * Use with care; by the time this function returns, the returned pointer is 1217 * not protected by RCU anymore. If the caller is not within an RCU critical 1218 * section and does not hold the iothread lock, it must have other means of 1219 * protecting the pointer, such as a reference to the region that includes 1220 * the incoming ram_addr_t. 1221 * 1222 * @ptr: the host pointer to be converted 1223 * @offset: the offset within memory region 1224 */ 1225 MemoryRegion *memory_region_from_host(void *ptr, ram_addr_t *offset); 1226 1227 /** 1228 * memory_region_get_ram_ptr: Get a pointer into a RAM memory region. 1229 * 1230 * Returns a host pointer to a RAM memory region (created with 1231 * memory_region_init_ram() or memory_region_init_ram_ptr()). 1232 * 1233 * Use with care; by the time this function returns, the returned pointer is 1234 * not protected by RCU anymore. If the caller is not within an RCU critical 1235 * section and does not hold the iothread lock, it must have other means of 1236 * protecting the pointer, such as a reference to the region that includes 1237 * the incoming ram_addr_t. 1238 * 1239 * @mr: the memory region being queried. 1240 */ 1241 void *memory_region_get_ram_ptr(MemoryRegion *mr); 1242 1243 /* memory_region_ram_resize: Resize a RAM region. 1244 * 1245 * Only legal before guest might have detected the memory size: e.g. on 1246 * incoming migration, or right after reset. 1247 * 1248 * @mr: a memory region created with @memory_region_init_resizeable_ram. 1249 * @newsize: the new size the region 1250 * @errp: pointer to Error*, to store an error if it happens. 1251 */ 1252 void memory_region_ram_resize(MemoryRegion *mr, ram_addr_t newsize, 1253 Error **errp); 1254 1255 /** 1256 * memory_region_set_log: Turn dirty logging on or off for a region. 1257 * 1258 * Turns dirty logging on or off for a specified client (display, migration). 1259 * Only meaningful for RAM regions. 1260 * 1261 * @mr: the memory region being updated. 1262 * @log: whether dirty logging is to be enabled or disabled. 1263 * @client: the user of the logging information; %DIRTY_MEMORY_VGA only. 1264 */ 1265 void memory_region_set_log(MemoryRegion *mr, bool log, unsigned client); 1266 1267 /** 1268 * memory_region_set_dirty: Mark a range of bytes as dirty in a memory region. 1269 * 1270 * Marks a range of bytes as dirty, after it has been dirtied outside 1271 * guest code. 1272 * 1273 * @mr: the memory region being dirtied. 1274 * @addr: the address (relative to the start of the region) being dirtied. 1275 * @size: size of the range being dirtied. 1276 */ 1277 void memory_region_set_dirty(MemoryRegion *mr, hwaddr addr, 1278 hwaddr size); 1279 1280 /** 1281 * memory_region_clear_dirty_bitmap - clear dirty bitmap for memory range 1282 * 1283 * This function is called when the caller wants to clear the remote 1284 * dirty bitmap of a memory range within the memory region. This can 1285 * be used by e.g. KVM to manually clear dirty log when 1286 * KVM_CAP_MANUAL_DIRTY_LOG_PROTECT is declared support by the host 1287 * kernel. 1288 * 1289 * @mr: the memory region to clear the dirty log upon 1290 * @start: start address offset within the memory region 1291 * @len: length of the memory region to clear dirty bitmap 1292 */ 1293 void memory_region_clear_dirty_bitmap(MemoryRegion *mr, hwaddr start, 1294 hwaddr len); 1295 1296 /** 1297 * memory_region_snapshot_and_clear_dirty: Get a snapshot of the dirty 1298 * bitmap and clear it. 1299 * 1300 * Creates a snapshot of the dirty bitmap, clears the dirty bitmap and 1301 * returns the snapshot. The snapshot can then be used to query dirty 1302 * status, using memory_region_snapshot_get_dirty. Snapshotting allows 1303 * querying the same page multiple times, which is especially useful for 1304 * display updates where the scanlines often are not page aligned. 1305 * 1306 * The dirty bitmap region which gets copyed into the snapshot (and 1307 * cleared afterwards) can be larger than requested. The boundaries 1308 * are rounded up/down so complete bitmap longs (covering 64 pages on 1309 * 64bit hosts) can be copied over into the bitmap snapshot. Which 1310 * isn't a problem for display updates as the extra pages are outside 1311 * the visible area, and in case the visible area changes a full 1312 * display redraw is due anyway. Should other use cases for this 1313 * function emerge we might have to revisit this implementation 1314 * detail. 1315 * 1316 * Use g_free to release DirtyBitmapSnapshot. 1317 * 1318 * @mr: the memory region being queried. 1319 * @addr: the address (relative to the start of the region) being queried. 1320 * @size: the size of the range being queried. 1321 * @client: the user of the logging information; typically %DIRTY_MEMORY_VGA. 1322 */ 1323 DirtyBitmapSnapshot *memory_region_snapshot_and_clear_dirty(MemoryRegion *mr, 1324 hwaddr addr, 1325 hwaddr size, 1326 unsigned client); 1327 1328 /** 1329 * memory_region_snapshot_get_dirty: Check whether a range of bytes is dirty 1330 * in the specified dirty bitmap snapshot. 1331 * 1332 * @mr: the memory region being queried. 1333 * @snap: the dirty bitmap snapshot 1334 * @addr: the address (relative to the start of the region) being queried. 1335 * @size: the size of the range being queried. 1336 */ 1337 bool memory_region_snapshot_get_dirty(MemoryRegion *mr, 1338 DirtyBitmapSnapshot *snap, 1339 hwaddr addr, hwaddr size); 1340 1341 /** 1342 * memory_region_reset_dirty: Mark a range of pages as clean, for a specified 1343 * client. 1344 * 1345 * Marks a range of pages as no longer dirty. 1346 * 1347 * @mr: the region being updated. 1348 * @addr: the start of the subrange being cleaned. 1349 * @size: the size of the subrange being cleaned. 1350 * @client: the user of the logging information; %DIRTY_MEMORY_MIGRATION or 1351 * %DIRTY_MEMORY_VGA. 1352 */ 1353 void memory_region_reset_dirty(MemoryRegion *mr, hwaddr addr, 1354 hwaddr size, unsigned client); 1355 1356 /** 1357 * memory_region_flush_rom_device: Mark a range of pages dirty and invalidate 1358 * TBs (for self-modifying code). 1359 * 1360 * The MemoryRegionOps->write() callback of a ROM device must use this function 1361 * to mark byte ranges that have been modified internally, such as by directly 1362 * accessing the memory returned by memory_region_get_ram_ptr(). 1363 * 1364 * This function marks the range dirty and invalidates TBs so that TCG can 1365 * detect self-modifying code. 1366 * 1367 * @mr: the region being flushed. 1368 * @addr: the start, relative to the start of the region, of the range being 1369 * flushed. 1370 * @size: the size, in bytes, of the range being flushed. 1371 */ 1372 void memory_region_flush_rom_device(MemoryRegion *mr, hwaddr addr, hwaddr size); 1373 1374 /** 1375 * memory_region_set_readonly: Turn a memory region read-only (or read-write) 1376 * 1377 * Allows a memory region to be marked as read-only (turning it into a ROM). 1378 * only useful on RAM regions. 1379 * 1380 * @mr: the region being updated. 1381 * @readonly: whether rhe region is to be ROM or RAM. 1382 */ 1383 void memory_region_set_readonly(MemoryRegion *mr, bool readonly); 1384 1385 /** 1386 * memory_region_set_nonvolatile: Turn a memory region non-volatile 1387 * 1388 * Allows a memory region to be marked as non-volatile. 1389 * only useful on RAM regions. 1390 * 1391 * @mr: the region being updated. 1392 * @nonvolatile: whether rhe region is to be non-volatile. 1393 */ 1394 void memory_region_set_nonvolatile(MemoryRegion *mr, bool nonvolatile); 1395 1396 /** 1397 * memory_region_rom_device_set_romd: enable/disable ROMD mode 1398 * 1399 * Allows a ROM device (initialized with memory_region_init_rom_device() to 1400 * set to ROMD mode (default) or MMIO mode. When it is in ROMD mode, the 1401 * device is mapped to guest memory and satisfies read access directly. 1402 * When in MMIO mode, reads are forwarded to the #MemoryRegion.read function. 1403 * Writes are always handled by the #MemoryRegion.write function. 1404 * 1405 * @mr: the memory region to be updated 1406 * @romd_mode: %true to put the region into ROMD mode 1407 */ 1408 void memory_region_rom_device_set_romd(MemoryRegion *mr, bool romd_mode); 1409 1410 /** 1411 * memory_region_set_coalescing: Enable memory coalescing for the region. 1412 * 1413 * Enabled writes to a region to be queued for later processing. MMIO ->write 1414 * callbacks may be delayed until a non-coalesced MMIO is issued. 1415 * Only useful for IO regions. Roughly similar to write-combining hardware. 1416 * 1417 * @mr: the memory region to be write coalesced 1418 */ 1419 void memory_region_set_coalescing(MemoryRegion *mr); 1420 1421 /** 1422 * memory_region_add_coalescing: Enable memory coalescing for a sub-range of 1423 * a region. 1424 * 1425 * Like memory_region_set_coalescing(), but works on a sub-range of a region. 1426 * Multiple calls can be issued coalesced disjoint ranges. 1427 * 1428 * @mr: the memory region to be updated. 1429 * @offset: the start of the range within the region to be coalesced. 1430 * @size: the size of the subrange to be coalesced. 1431 */ 1432 void memory_region_add_coalescing(MemoryRegion *mr, 1433 hwaddr offset, 1434 uint64_t size); 1435 1436 /** 1437 * memory_region_clear_coalescing: Disable MMIO coalescing for the region. 1438 * 1439 * Disables any coalescing caused by memory_region_set_coalescing() or 1440 * memory_region_add_coalescing(). Roughly equivalent to uncacheble memory 1441 * hardware. 1442 * 1443 * @mr: the memory region to be updated. 1444 */ 1445 void memory_region_clear_coalescing(MemoryRegion *mr); 1446 1447 /** 1448 * memory_region_set_flush_coalesced: Enforce memory coalescing flush before 1449 * accesses. 1450 * 1451 * Ensure that pending coalesced MMIO request are flushed before the memory 1452 * region is accessed. This property is automatically enabled for all regions 1453 * passed to memory_region_set_coalescing() and memory_region_add_coalescing(). 1454 * 1455 * @mr: the memory region to be updated. 1456 */ 1457 void memory_region_set_flush_coalesced(MemoryRegion *mr); 1458 1459 /** 1460 * memory_region_clear_flush_coalesced: Disable memory coalescing flush before 1461 * accesses. 1462 * 1463 * Clear the automatic coalesced MMIO flushing enabled via 1464 * memory_region_set_flush_coalesced. Note that this service has no effect on 1465 * memory regions that have MMIO coalescing enabled for themselves. For them, 1466 * automatic flushing will stop once coalescing is disabled. 1467 * 1468 * @mr: the memory region to be updated. 1469 */ 1470 void memory_region_clear_flush_coalesced(MemoryRegion *mr); 1471 1472 /** 1473 * memory_region_clear_global_locking: Declares that access processing does 1474 * not depend on the QEMU global lock. 1475 * 1476 * By clearing this property, accesses to the memory region will be processed 1477 * outside of QEMU's global lock (unless the lock is held on when issuing the 1478 * access request). In this case, the device model implementing the access 1479 * handlers is responsible for synchronization of concurrency. 1480 * 1481 * @mr: the memory region to be updated. 1482 */ 1483 void memory_region_clear_global_locking(MemoryRegion *mr); 1484 1485 /** 1486 * memory_region_add_eventfd: Request an eventfd to be triggered when a word 1487 * is written to a location. 1488 * 1489 * Marks a word in an IO region (initialized with memory_region_init_io()) 1490 * as a trigger for an eventfd event. The I/O callback will not be called. 1491 * The caller must be prepared to handle failure (that is, take the required 1492 * action if the callback _is_ called). 1493 * 1494 * @mr: the memory region being updated. 1495 * @addr: the address within @mr that is to be monitored 1496 * @size: the size of the access to trigger the eventfd 1497 * @match_data: whether to match against @data, instead of just @addr 1498 * @data: the data to match against the guest write 1499 * @e: event notifier to be triggered when @addr, @size, and @data all match. 1500 **/ 1501 void memory_region_add_eventfd(MemoryRegion *mr, 1502 hwaddr addr, 1503 unsigned size, 1504 bool match_data, 1505 uint64_t data, 1506 EventNotifier *e); 1507 1508 /** 1509 * memory_region_del_eventfd: Cancel an eventfd. 1510 * 1511 * Cancels an eventfd trigger requested by a previous 1512 * memory_region_add_eventfd() call. 1513 * 1514 * @mr: the memory region being updated. 1515 * @addr: the address within @mr that is to be monitored 1516 * @size: the size of the access to trigger the eventfd 1517 * @match_data: whether to match against @data, instead of just @addr 1518 * @data: the data to match against the guest write 1519 * @e: event notifier to be triggered when @addr, @size, and @data all match. 1520 */ 1521 void memory_region_del_eventfd(MemoryRegion *mr, 1522 hwaddr addr, 1523 unsigned size, 1524 bool match_data, 1525 uint64_t data, 1526 EventNotifier *e); 1527 1528 /** 1529 * memory_region_add_subregion: Add a subregion to a container. 1530 * 1531 * Adds a subregion at @offset. The subregion may not overlap with other 1532 * subregions (except for those explicitly marked as overlapping). A region 1533 * may only be added once as a subregion (unless removed with 1534 * memory_region_del_subregion()); use memory_region_init_alias() if you 1535 * want a region to be a subregion in multiple locations. 1536 * 1537 * @mr: the region to contain the new subregion; must be a container 1538 * initialized with memory_region_init(). 1539 * @offset: the offset relative to @mr where @subregion is added. 1540 * @subregion: the subregion to be added. 1541 */ 1542 void memory_region_add_subregion(MemoryRegion *mr, 1543 hwaddr offset, 1544 MemoryRegion *subregion); 1545 /** 1546 * memory_region_add_subregion_overlap: Add a subregion to a container 1547 * with overlap. 1548 * 1549 * Adds a subregion at @offset. The subregion may overlap with other 1550 * subregions. Conflicts are resolved by having a higher @priority hide a 1551 * lower @priority. Subregions without priority are taken as @priority 0. 1552 * A region may only be added once as a subregion (unless removed with 1553 * memory_region_del_subregion()); use memory_region_init_alias() if you 1554 * want a region to be a subregion in multiple locations. 1555 * 1556 * @mr: the region to contain the new subregion; must be a container 1557 * initialized with memory_region_init(). 1558 * @offset: the offset relative to @mr where @subregion is added. 1559 * @subregion: the subregion to be added. 1560 * @priority: used for resolving overlaps; highest priority wins. 1561 */ 1562 void memory_region_add_subregion_overlap(MemoryRegion *mr, 1563 hwaddr offset, 1564 MemoryRegion *subregion, 1565 int priority); 1566 1567 /** 1568 * memory_region_get_ram_addr: Get the ram address associated with a memory 1569 * region 1570 */ 1571 ram_addr_t memory_region_get_ram_addr(MemoryRegion *mr); 1572 1573 uint64_t memory_region_get_alignment(const MemoryRegion *mr); 1574 /** 1575 * memory_region_del_subregion: Remove a subregion. 1576 * 1577 * Removes a subregion from its container. 1578 * 1579 * @mr: the container to be updated. 1580 * @subregion: the region being removed; must be a current subregion of @mr. 1581 */ 1582 void memory_region_del_subregion(MemoryRegion *mr, 1583 MemoryRegion *subregion); 1584 1585 /* 1586 * memory_region_set_enabled: dynamically enable or disable a region 1587 * 1588 * Enables or disables a memory region. A disabled memory region 1589 * ignores all accesses to itself and its subregions. It does not 1590 * obscure sibling subregions with lower priority - it simply behaves as 1591 * if it was removed from the hierarchy. 1592 * 1593 * Regions default to being enabled. 1594 * 1595 * @mr: the region to be updated 1596 * @enabled: whether to enable or disable the region 1597 */ 1598 void memory_region_set_enabled(MemoryRegion *mr, bool enabled); 1599 1600 /* 1601 * memory_region_set_address: dynamically update the address of a region 1602 * 1603 * Dynamically updates the address of a region, relative to its container. 1604 * May be used on regions are currently part of a memory hierarchy. 1605 * 1606 * @mr: the region to be updated 1607 * @addr: new address, relative to container region 1608 */ 1609 void memory_region_set_address(MemoryRegion *mr, hwaddr addr); 1610 1611 /* 1612 * memory_region_set_size: dynamically update the size of a region. 1613 * 1614 * Dynamically updates the size of a region. 1615 * 1616 * @mr: the region to be updated 1617 * @size: used size of the region. 1618 */ 1619 void memory_region_set_size(MemoryRegion *mr, uint64_t size); 1620 1621 /* 1622 * memory_region_set_alias_offset: dynamically update a memory alias's offset 1623 * 1624 * Dynamically updates the offset into the target region that an alias points 1625 * to, as if the fourth argument to memory_region_init_alias() has changed. 1626 * 1627 * @mr: the #MemoryRegion to be updated; should be an alias. 1628 * @offset: the new offset into the target memory region 1629 */ 1630 void memory_region_set_alias_offset(MemoryRegion *mr, 1631 hwaddr offset); 1632 1633 /** 1634 * memory_region_present: checks if an address relative to a @container 1635 * translates into #MemoryRegion within @container 1636 * 1637 * Answer whether a #MemoryRegion within @container covers the address 1638 * @addr. 1639 * 1640 * @container: a #MemoryRegion within which @addr is a relative address 1641 * @addr: the area within @container to be searched 1642 */ 1643 bool memory_region_present(MemoryRegion *container, hwaddr addr); 1644 1645 /** 1646 * memory_region_is_mapped: returns true if #MemoryRegion is mapped 1647 * into any address space. 1648 * 1649 * @mr: a #MemoryRegion which should be checked if it's mapped 1650 */ 1651 bool memory_region_is_mapped(MemoryRegion *mr); 1652 1653 /** 1654 * memory_region_find: translate an address/size relative to a 1655 * MemoryRegion into a #MemoryRegionSection. 1656 * 1657 * Locates the first #MemoryRegion within @mr that overlaps the range 1658 * given by @addr and @size. 1659 * 1660 * Returns a #MemoryRegionSection that describes a contiguous overlap. 1661 * It will have the following characteristics: 1662 * .@size = 0 iff no overlap was found 1663 * .@mr is non-%NULL iff an overlap was found 1664 * 1665 * Remember that in the return value the @offset_within_region is 1666 * relative to the returned region (in the .@mr field), not to the 1667 * @mr argument. 1668 * 1669 * Similarly, the .@offset_within_address_space is relative to the 1670 * address space that contains both regions, the passed and the 1671 * returned one. However, in the special case where the @mr argument 1672 * has no container (and thus is the root of the address space), the 1673 * following will hold: 1674 * .@offset_within_address_space >= @addr 1675 * .@offset_within_address_space + .@size <= @addr + @size 1676 * 1677 * @mr: a MemoryRegion within which @addr is a relative address 1678 * @addr: start of the area within @as to be searched 1679 * @size: size of the area to be searched 1680 */ 1681 MemoryRegionSection memory_region_find(MemoryRegion *mr, 1682 hwaddr addr, uint64_t size); 1683 1684 /** 1685 * memory_global_dirty_log_sync: synchronize the dirty log for all memory 1686 * 1687 * Synchronizes the dirty page log for all address spaces. 1688 */ 1689 void memory_global_dirty_log_sync(void); 1690 1691 /** 1692 * memory_global_dirty_log_sync: synchronize the dirty log for all memory 1693 * 1694 * Synchronizes the vCPUs with a thread that is reading the dirty bitmap. 1695 * This function must be called after the dirty log bitmap is cleared, and 1696 * before dirty guest memory pages are read. If you are using 1697 * #DirtyBitmapSnapshot, memory_region_snapshot_and_clear_dirty() takes 1698 * care of doing this. 1699 */ 1700 void memory_global_after_dirty_log_sync(void); 1701 1702 /** 1703 * memory_region_transaction_begin: Start a transaction. 1704 * 1705 * During a transaction, changes will be accumulated and made visible 1706 * only when the transaction ends (is committed). 1707 */ 1708 void memory_region_transaction_begin(void); 1709 1710 /** 1711 * memory_region_transaction_commit: Commit a transaction and make changes 1712 * visible to the guest. 1713 */ 1714 void memory_region_transaction_commit(void); 1715 1716 /** 1717 * memory_listener_register: register callbacks to be called when memory 1718 * sections are mapped or unmapped into an address 1719 * space 1720 * 1721 * @listener: an object containing the callbacks to be called 1722 * @filter: if non-%NULL, only regions in this address space will be observed 1723 */ 1724 void memory_listener_register(MemoryListener *listener, AddressSpace *filter); 1725 1726 /** 1727 * memory_listener_unregister: undo the effect of memory_listener_register() 1728 * 1729 * @listener: an object containing the callbacks to be removed 1730 */ 1731 void memory_listener_unregister(MemoryListener *listener); 1732 1733 /** 1734 * memory_global_dirty_log_start: begin dirty logging for all regions 1735 */ 1736 void memory_global_dirty_log_start(void); 1737 1738 /** 1739 * memory_global_dirty_log_stop: end dirty logging for all regions 1740 */ 1741 void memory_global_dirty_log_stop(void); 1742 1743 void mtree_info(bool flatview, bool dispatch_tree, bool owner); 1744 1745 /** 1746 * memory_region_dispatch_read: perform a read directly to the specified 1747 * MemoryRegion. 1748 * 1749 * @mr: #MemoryRegion to access 1750 * @addr: address within that region 1751 * @pval: pointer to uint64_t which the data is written to 1752 * @size: size of the access in bytes 1753 * @attrs: memory transaction attributes to use for the access 1754 */ 1755 MemTxResult memory_region_dispatch_read(MemoryRegion *mr, 1756 hwaddr addr, 1757 uint64_t *pval, 1758 unsigned size, 1759 MemTxAttrs attrs); 1760 /** 1761 * memory_region_dispatch_write: perform a write directly to the specified 1762 * MemoryRegion. 1763 * 1764 * @mr: #MemoryRegion to access 1765 * @addr: address within that region 1766 * @data: data to write 1767 * @size: size of the access in bytes 1768 * @attrs: memory transaction attributes to use for the access 1769 */ 1770 MemTxResult memory_region_dispatch_write(MemoryRegion *mr, 1771 hwaddr addr, 1772 uint64_t data, 1773 unsigned size, 1774 MemTxAttrs attrs); 1775 1776 /** 1777 * address_space_init: initializes an address space 1778 * 1779 * @as: an uninitialized #AddressSpace 1780 * @root: a #MemoryRegion that routes addresses for the address space 1781 * @name: an address space name. The name is only used for debugging 1782 * output. 1783 */ 1784 void address_space_init(AddressSpace *as, MemoryRegion *root, const char *name); 1785 1786 /** 1787 * address_space_destroy: destroy an address space 1788 * 1789 * Releases all resources associated with an address space. After an address space 1790 * is destroyed, its root memory region (given by address_space_init()) may be destroyed 1791 * as well. 1792 * 1793 * @as: address space to be destroyed 1794 */ 1795 void address_space_destroy(AddressSpace *as); 1796 1797 /** 1798 * address_space_remove_listeners: unregister all listeners of an address space 1799 * 1800 * Removes all callbacks previously registered with memory_listener_register() 1801 * for @as. 1802 * 1803 * @as: an initialized #AddressSpace 1804 */ 1805 void address_space_remove_listeners(AddressSpace *as); 1806 1807 /** 1808 * address_space_rw: read from or write to an address space. 1809 * 1810 * Return a MemTxResult indicating whether the operation succeeded 1811 * or failed (eg unassigned memory, device rejected the transaction, 1812 * IOMMU fault). 1813 * 1814 * @as: #AddressSpace to be accessed 1815 * @addr: address within that address space 1816 * @attrs: memory transaction attributes 1817 * @buf: buffer with the data transferred 1818 * @len: the number of bytes to read or write 1819 * @is_write: indicates the transfer direction 1820 */ 1821 MemTxResult address_space_rw(AddressSpace *as, hwaddr addr, 1822 MemTxAttrs attrs, uint8_t *buf, 1823 hwaddr len, bool is_write); 1824 1825 /** 1826 * address_space_write: write to address space. 1827 * 1828 * Return a MemTxResult indicating whether the operation succeeded 1829 * or failed (eg unassigned memory, device rejected the transaction, 1830 * IOMMU fault). 1831 * 1832 * @as: #AddressSpace to be accessed 1833 * @addr: address within that address space 1834 * @attrs: memory transaction attributes 1835 * @buf: buffer with the data transferred 1836 * @len: the number of bytes to write 1837 */ 1838 MemTxResult address_space_write(AddressSpace *as, hwaddr addr, 1839 MemTxAttrs attrs, 1840 const uint8_t *buf, hwaddr len); 1841 1842 /** 1843 * address_space_write_rom: write to address space, including ROM. 1844 * 1845 * This function writes to the specified address space, but will 1846 * write data to both ROM and RAM. This is used for non-guest 1847 * writes like writes from the gdb debug stub or initial loading 1848 * of ROM contents. 1849 * 1850 * Note that portions of the write which attempt to write data to 1851 * a device will be silently ignored -- only real RAM and ROM will 1852 * be written to. 1853 * 1854 * Return a MemTxResult indicating whether the operation succeeded 1855 * or failed (eg unassigned memory, device rejected the transaction, 1856 * IOMMU fault). 1857 * 1858 * @as: #AddressSpace to be accessed 1859 * @addr: address within that address space 1860 * @attrs: memory transaction attributes 1861 * @buf: buffer with the data transferred 1862 * @len: the number of bytes to write 1863 */ 1864 MemTxResult address_space_write_rom(AddressSpace *as, hwaddr addr, 1865 MemTxAttrs attrs, 1866 const uint8_t *buf, hwaddr len); 1867 1868 /* address_space_ld*: load from an address space 1869 * address_space_st*: store to an address space 1870 * 1871 * These functions perform a load or store of the byte, word, 1872 * longword or quad to the specified address within the AddressSpace. 1873 * The _le suffixed functions treat the data as little endian; 1874 * _be indicates big endian; no suffix indicates "same endianness 1875 * as guest CPU". 1876 * 1877 * The "guest CPU endianness" accessors are deprecated for use outside 1878 * target-* code; devices should be CPU-agnostic and use either the LE 1879 * or the BE accessors. 1880 * 1881 * @as #AddressSpace to be accessed 1882 * @addr: address within that address space 1883 * @val: data value, for stores 1884 * @attrs: memory transaction attributes 1885 * @result: location to write the success/failure of the transaction; 1886 * if NULL, this information is discarded 1887 */ 1888 1889 #define SUFFIX 1890 #define ARG1 as 1891 #define ARG1_DECL AddressSpace *as 1892 #include "exec/memory_ldst.inc.h" 1893 1894 #define SUFFIX 1895 #define ARG1 as 1896 #define ARG1_DECL AddressSpace *as 1897 #include "exec/memory_ldst_phys.inc.h" 1898 1899 struct MemoryRegionCache { 1900 void *ptr; 1901 hwaddr xlat; 1902 hwaddr len; 1903 FlatView *fv; 1904 MemoryRegionSection mrs; 1905 bool is_write; 1906 }; 1907 1908 #define MEMORY_REGION_CACHE_INVALID ((MemoryRegionCache) { .mrs.mr = NULL }) 1909 1910 1911 /* address_space_ld*_cached: load from a cached #MemoryRegion 1912 * address_space_st*_cached: store into a cached #MemoryRegion 1913 * 1914 * These functions perform a load or store of the byte, word, 1915 * longword or quad to the specified address. The address is 1916 * a physical address in the AddressSpace, but it must lie within 1917 * a #MemoryRegion that was mapped with address_space_cache_init. 1918 * 1919 * The _le suffixed functions treat the data as little endian; 1920 * _be indicates big endian; no suffix indicates "same endianness 1921 * as guest CPU". 1922 * 1923 * The "guest CPU endianness" accessors are deprecated for use outside 1924 * target-* code; devices should be CPU-agnostic and use either the LE 1925 * or the BE accessors. 1926 * 1927 * @cache: previously initialized #MemoryRegionCache to be accessed 1928 * @addr: address within the address space 1929 * @val: data value, for stores 1930 * @attrs: memory transaction attributes 1931 * @result: location to write the success/failure of the transaction; 1932 * if NULL, this information is discarded 1933 */ 1934 1935 #define SUFFIX _cached_slow 1936 #define ARG1 cache 1937 #define ARG1_DECL MemoryRegionCache *cache 1938 #include "exec/memory_ldst.inc.h" 1939 1940 /* Inline fast path for direct RAM access. */ 1941 static inline uint8_t address_space_ldub_cached(MemoryRegionCache *cache, 1942 hwaddr addr, MemTxAttrs attrs, MemTxResult *result) 1943 { 1944 assert(addr < cache->len); 1945 if (likely(cache->ptr)) { 1946 return ldub_p(cache->ptr + addr); 1947 } else { 1948 return address_space_ldub_cached_slow(cache, addr, attrs, result); 1949 } 1950 } 1951 1952 static inline void address_space_stb_cached(MemoryRegionCache *cache, 1953 hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result) 1954 { 1955 assert(addr < cache->len); 1956 if (likely(cache->ptr)) { 1957 stb_p(cache->ptr + addr, val); 1958 } else { 1959 address_space_stb_cached_slow(cache, addr, val, attrs, result); 1960 } 1961 } 1962 1963 #define ENDIANNESS _le 1964 #include "exec/memory_ldst_cached.inc.h" 1965 1966 #define ENDIANNESS _be 1967 #include "exec/memory_ldst_cached.inc.h" 1968 1969 #define SUFFIX _cached 1970 #define ARG1 cache 1971 #define ARG1_DECL MemoryRegionCache *cache 1972 #include "exec/memory_ldst_phys.inc.h" 1973 1974 /* address_space_cache_init: prepare for repeated access to a physical 1975 * memory region 1976 * 1977 * @cache: #MemoryRegionCache to be filled 1978 * @as: #AddressSpace to be accessed 1979 * @addr: address within that address space 1980 * @len: length of buffer 1981 * @is_write: indicates the transfer direction 1982 * 1983 * Will only work with RAM, and may map a subset of the requested range by 1984 * returning a value that is less than @len. On failure, return a negative 1985 * errno value. 1986 * 1987 * Because it only works with RAM, this function can be used for 1988 * read-modify-write operations. In this case, is_write should be %true. 1989 * 1990 * Note that addresses passed to the address_space_*_cached functions 1991 * are relative to @addr. 1992 */ 1993 int64_t address_space_cache_init(MemoryRegionCache *cache, 1994 AddressSpace *as, 1995 hwaddr addr, 1996 hwaddr len, 1997 bool is_write); 1998 1999 /** 2000 * address_space_cache_invalidate: complete a write to a #MemoryRegionCache 2001 * 2002 * @cache: The #MemoryRegionCache to operate on. 2003 * @addr: The first physical address that was written, relative to the 2004 * address that was passed to @address_space_cache_init. 2005 * @access_len: The number of bytes that were written starting at @addr. 2006 */ 2007 void address_space_cache_invalidate(MemoryRegionCache *cache, 2008 hwaddr addr, 2009 hwaddr access_len); 2010 2011 /** 2012 * address_space_cache_destroy: free a #MemoryRegionCache 2013 * 2014 * @cache: The #MemoryRegionCache whose memory should be released. 2015 */ 2016 void address_space_cache_destroy(MemoryRegionCache *cache); 2017 2018 /* address_space_get_iotlb_entry: translate an address into an IOTLB 2019 * entry. Should be called from an RCU critical section. 2020 */ 2021 IOMMUTLBEntry address_space_get_iotlb_entry(AddressSpace *as, hwaddr addr, 2022 bool is_write, MemTxAttrs attrs); 2023 2024 /* address_space_translate: translate an address range into an address space 2025 * into a MemoryRegion and an address range into that section. Should be 2026 * called from an RCU critical section, to avoid that the last reference 2027 * to the returned region disappears after address_space_translate returns. 2028 * 2029 * @fv: #FlatView to be accessed 2030 * @addr: address within that address space 2031 * @xlat: pointer to address within the returned memory region section's 2032 * #MemoryRegion. 2033 * @len: pointer to length 2034 * @is_write: indicates the transfer direction 2035 * @attrs: memory attributes 2036 */ 2037 MemoryRegion *flatview_translate(FlatView *fv, 2038 hwaddr addr, hwaddr *xlat, 2039 hwaddr *len, bool is_write, 2040 MemTxAttrs attrs); 2041 2042 static inline MemoryRegion *address_space_translate(AddressSpace *as, 2043 hwaddr addr, hwaddr *xlat, 2044 hwaddr *len, bool is_write, 2045 MemTxAttrs attrs) 2046 { 2047 return flatview_translate(address_space_to_flatview(as), 2048 addr, xlat, len, is_write, attrs); 2049 } 2050 2051 /* address_space_access_valid: check for validity of accessing an address 2052 * space range 2053 * 2054 * Check whether memory is assigned to the given address space range, and 2055 * access is permitted by any IOMMU regions that are active for the address 2056 * space. 2057 * 2058 * For now, addr and len should be aligned to a page size. This limitation 2059 * will be lifted in the future. 2060 * 2061 * @as: #AddressSpace to be accessed 2062 * @addr: address within that address space 2063 * @len: length of the area to be checked 2064 * @is_write: indicates the transfer direction 2065 * @attrs: memory attributes 2066 */ 2067 bool address_space_access_valid(AddressSpace *as, hwaddr addr, hwaddr len, 2068 bool is_write, MemTxAttrs attrs); 2069 2070 /* address_space_map: map a physical memory region into a host virtual address 2071 * 2072 * May map a subset of the requested range, given by and returned in @plen. 2073 * May return %NULL if resources needed to perform the mapping are exhausted. 2074 * Use only for reads OR writes - not for read-modify-write operations. 2075 * Use cpu_register_map_client() to know when retrying the map operation is 2076 * likely to succeed. 2077 * 2078 * @as: #AddressSpace to be accessed 2079 * @addr: address within that address space 2080 * @plen: pointer to length of buffer; updated on return 2081 * @is_write: indicates the transfer direction 2082 * @attrs: memory attributes 2083 */ 2084 void *address_space_map(AddressSpace *as, hwaddr addr, 2085 hwaddr *plen, bool is_write, MemTxAttrs attrs); 2086 2087 /* address_space_unmap: Unmaps a memory region previously mapped by address_space_map() 2088 * 2089 * Will also mark the memory as dirty if @is_write == %true. @access_len gives 2090 * the amount of memory that was actually read or written by the caller. 2091 * 2092 * @as: #AddressSpace used 2093 * @buffer: host pointer as returned by address_space_map() 2094 * @len: buffer length as returned by address_space_map() 2095 * @access_len: amount of data actually transferred 2096 * @is_write: indicates the transfer direction 2097 */ 2098 void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len, 2099 int is_write, hwaddr access_len); 2100 2101 2102 /* Internal functions, part of the implementation of address_space_read. */ 2103 MemTxResult address_space_read_full(AddressSpace *as, hwaddr addr, 2104 MemTxAttrs attrs, uint8_t *buf, hwaddr len); 2105 MemTxResult flatview_read_continue(FlatView *fv, hwaddr addr, 2106 MemTxAttrs attrs, uint8_t *buf, 2107 hwaddr len, hwaddr addr1, hwaddr l, 2108 MemoryRegion *mr); 2109 void *qemu_map_ram_ptr(RAMBlock *ram_block, ram_addr_t addr); 2110 2111 /* Internal functions, part of the implementation of address_space_read_cached 2112 * and address_space_write_cached. */ 2113 void address_space_read_cached_slow(MemoryRegionCache *cache, 2114 hwaddr addr, void *buf, hwaddr len); 2115 void address_space_write_cached_slow(MemoryRegionCache *cache, 2116 hwaddr addr, const void *buf, hwaddr len); 2117 2118 static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write) 2119 { 2120 if (is_write) { 2121 return memory_region_is_ram(mr) && 2122 !mr->readonly && !memory_region_is_ram_device(mr); 2123 } else { 2124 return (memory_region_is_ram(mr) && !memory_region_is_ram_device(mr)) || 2125 memory_region_is_romd(mr); 2126 } 2127 } 2128 2129 /** 2130 * address_space_read: read from an address space. 2131 * 2132 * Return a MemTxResult indicating whether the operation succeeded 2133 * or failed (eg unassigned memory, device rejected the transaction, 2134 * IOMMU fault). Called within RCU critical section. 2135 * 2136 * @as: #AddressSpace to be accessed 2137 * @addr: address within that address space 2138 * @attrs: memory transaction attributes 2139 * @buf: buffer with the data transferred 2140 */ 2141 static inline __attribute__((__always_inline__)) 2142 MemTxResult address_space_read(AddressSpace *as, hwaddr addr, 2143 MemTxAttrs attrs, uint8_t *buf, 2144 hwaddr len) 2145 { 2146 MemTxResult result = MEMTX_OK; 2147 hwaddr l, addr1; 2148 void *ptr; 2149 MemoryRegion *mr; 2150 FlatView *fv; 2151 2152 if (__builtin_constant_p(len)) { 2153 if (len) { 2154 rcu_read_lock(); 2155 fv = address_space_to_flatview(as); 2156 l = len; 2157 mr = flatview_translate(fv, addr, &addr1, &l, false, attrs); 2158 if (len == l && memory_access_is_direct(mr, false)) { 2159 ptr = qemu_map_ram_ptr(mr->ram_block, addr1); 2160 memcpy(buf, ptr, len); 2161 } else { 2162 result = flatview_read_continue(fv, addr, attrs, buf, len, 2163 addr1, l, mr); 2164 } 2165 rcu_read_unlock(); 2166 } 2167 } else { 2168 result = address_space_read_full(as, addr, attrs, buf, len); 2169 } 2170 return result; 2171 } 2172 2173 /** 2174 * address_space_read_cached: read from a cached RAM region 2175 * 2176 * @cache: Cached region to be addressed 2177 * @addr: address relative to the base of the RAM region 2178 * @buf: buffer with the data transferred 2179 * @len: length of the data transferred 2180 */ 2181 static inline void 2182 address_space_read_cached(MemoryRegionCache *cache, hwaddr addr, 2183 void *buf, hwaddr len) 2184 { 2185 assert(addr < cache->len && len <= cache->len - addr); 2186 if (likely(cache->ptr)) { 2187 memcpy(buf, cache->ptr + addr, len); 2188 } else { 2189 address_space_read_cached_slow(cache, addr, buf, len); 2190 } 2191 } 2192 2193 /** 2194 * address_space_write_cached: write to a cached RAM region 2195 * 2196 * @cache: Cached region to be addressed 2197 * @addr: address relative to the base of the RAM region 2198 * @buf: buffer with the data transferred 2199 * @len: length of the data transferred 2200 */ 2201 static inline void 2202 address_space_write_cached(MemoryRegionCache *cache, hwaddr addr, 2203 void *buf, hwaddr len) 2204 { 2205 assert(addr < cache->len && len <= cache->len - addr); 2206 if (likely(cache->ptr)) { 2207 memcpy(cache->ptr + addr, buf, len); 2208 } else { 2209 address_space_write_cached_slow(cache, addr, buf, len); 2210 } 2211 } 2212 2213 #endif 2214 2215 #endif 2216