1 /* 2 * Physical memory management API 3 * 4 * Copyright 2011 Red Hat, Inc. and/or its affiliates 5 * 6 * Authors: 7 * Avi Kivity <avi@redhat.com> 8 * 9 * This work is licensed under the terms of the GNU GPL, version 2. See 10 * the COPYING file in the top-level directory. 11 * 12 */ 13 14 #ifndef MEMORY_H 15 #define MEMORY_H 16 17 #ifndef CONFIG_USER_ONLY 18 19 #include "exec/cpu-common.h" 20 #include "exec/hwaddr.h" 21 #include "exec/memattrs.h" 22 #include "exec/memop.h" 23 #include "exec/ramlist.h" 24 #include "qemu/bswap.h" 25 #include "qemu/queue.h" 26 #include "qemu/int128.h" 27 #include "qemu/range.h" 28 #include "qemu/notify.h" 29 #include "qom/object.h" 30 #include "qemu/rcu.h" 31 32 #define RAM_ADDR_INVALID (~(ram_addr_t)0) 33 34 #define MAX_PHYS_ADDR_SPACE_BITS 62 35 #define MAX_PHYS_ADDR (((hwaddr)1 << MAX_PHYS_ADDR_SPACE_BITS) - 1) 36 37 #define TYPE_MEMORY_REGION "memory-region" 38 DECLARE_INSTANCE_CHECKER(MemoryRegion, MEMORY_REGION, 39 TYPE_MEMORY_REGION) 40 41 #define TYPE_IOMMU_MEMORY_REGION "iommu-memory-region" 42 typedef struct IOMMUMemoryRegionClass IOMMUMemoryRegionClass; 43 DECLARE_OBJ_CHECKERS(IOMMUMemoryRegion, IOMMUMemoryRegionClass, 44 IOMMU_MEMORY_REGION, TYPE_IOMMU_MEMORY_REGION) 45 46 #define TYPE_RAM_DISCARD_MANAGER "ram-discard-manager" 47 typedef struct RamDiscardManagerClass RamDiscardManagerClass; 48 typedef struct RamDiscardManager RamDiscardManager; 49 DECLARE_OBJ_CHECKERS(RamDiscardManager, RamDiscardManagerClass, 50 RAM_DISCARD_MANAGER, TYPE_RAM_DISCARD_MANAGER); 51 52 #ifdef CONFIG_FUZZ 53 void fuzz_dma_read_cb(size_t addr, 54 size_t len, 55 MemoryRegion *mr); 56 #else 57 static inline void fuzz_dma_read_cb(size_t addr, 58 size_t len, 59 MemoryRegion *mr) 60 { 61 /* Do Nothing */ 62 } 63 #endif 64 65 /* Possible bits for global_dirty_log_{start|stop} */ 66 67 /* Dirty tracking enabled because migration is running */ 68 #define GLOBAL_DIRTY_MIGRATION (1U << 0) 69 70 /* Dirty tracking enabled because measuring dirty rate */ 71 #define GLOBAL_DIRTY_DIRTY_RATE (1U << 1) 72 73 /* Dirty tracking enabled because dirty limit */ 74 #define GLOBAL_DIRTY_LIMIT (1U << 2) 75 76 #define GLOBAL_DIRTY_MASK (0x7) 77 78 extern unsigned int global_dirty_tracking; 79 80 typedef struct MemoryRegionOps MemoryRegionOps; 81 82 struct ReservedRegion { 83 Range range; 84 unsigned type; 85 }; 86 87 /** 88 * struct MemoryRegionSection: describes a fragment of a #MemoryRegion 89 * 90 * @mr: the region, or %NULL if empty 91 * @fv: the flat view of the address space the region is mapped in 92 * @offset_within_region: the beginning of the section, relative to @mr's start 93 * @size: the size of the section; will not exceed @mr's boundaries 94 * @offset_within_address_space: the address of the first byte of the section 95 * relative to the region's address space 96 * @readonly: writes to this section are ignored 97 * @nonvolatile: this section is non-volatile 98 * @unmergeable: this section should not get merged with adjacent sections 99 */ 100 struct MemoryRegionSection { 101 Int128 size; 102 MemoryRegion *mr; 103 FlatView *fv; 104 hwaddr offset_within_region; 105 hwaddr offset_within_address_space; 106 bool readonly; 107 bool nonvolatile; 108 bool unmergeable; 109 }; 110 111 typedef struct IOMMUTLBEntry IOMMUTLBEntry; 112 113 /* See address_space_translate: bit 0 is read, bit 1 is write. */ 114 typedef enum { 115 IOMMU_NONE = 0, 116 IOMMU_RO = 1, 117 IOMMU_WO = 2, 118 IOMMU_RW = 3, 119 } IOMMUAccessFlags; 120 121 #define IOMMU_ACCESS_FLAG(r, w) (((r) ? IOMMU_RO : 0) | ((w) ? IOMMU_WO : 0)) 122 123 struct IOMMUTLBEntry { 124 AddressSpace *target_as; 125 hwaddr iova; 126 hwaddr translated_addr; 127 hwaddr addr_mask; /* 0xfff = 4k translation */ 128 IOMMUAccessFlags perm; 129 }; 130 131 /* 132 * Bitmap for different IOMMUNotifier capabilities. Each notifier can 133 * register with one or multiple IOMMU Notifier capability bit(s). 134 * 135 * Normally there're two use cases for the notifiers: 136 * 137 * (1) When the device needs accurate synchronizations of the vIOMMU page 138 * tables, it needs to register with both MAP|UNMAP notifies (which 139 * is defined as IOMMU_NOTIFIER_IOTLB_EVENTS below). 140 * 141 * Regarding to accurate synchronization, it's when the notified 142 * device maintains a shadow page table and must be notified on each 143 * guest MAP (page table entry creation) and UNMAP (invalidation) 144 * events (e.g. VFIO). Both notifications must be accurate so that 145 * the shadow page table is fully in sync with the guest view. 146 * 147 * (2) When the device doesn't need accurate synchronizations of the 148 * vIOMMU page tables, it needs to register only with UNMAP or 149 * DEVIOTLB_UNMAP notifies. 150 * 151 * It's when the device maintains a cache of IOMMU translations 152 * (IOTLB) and is able to fill that cache by requesting translations 153 * from the vIOMMU through a protocol similar to ATS (Address 154 * Translation Service). 155 * 156 * Note that in this mode the vIOMMU will not maintain a shadowed 157 * page table for the address space, and the UNMAP messages can cover 158 * more than the pages that used to get mapped. The IOMMU notifiee 159 * should be able to take care of over-sized invalidations. 160 */ 161 typedef enum { 162 IOMMU_NOTIFIER_NONE = 0, 163 /* Notify cache invalidations */ 164 IOMMU_NOTIFIER_UNMAP = 0x1, 165 /* Notify entry changes (newly created entries) */ 166 IOMMU_NOTIFIER_MAP = 0x2, 167 /* Notify changes on device IOTLB entries */ 168 IOMMU_NOTIFIER_DEVIOTLB_UNMAP = 0x04, 169 } IOMMUNotifierFlag; 170 171 #define IOMMU_NOTIFIER_IOTLB_EVENTS (IOMMU_NOTIFIER_MAP | IOMMU_NOTIFIER_UNMAP) 172 #define IOMMU_NOTIFIER_DEVIOTLB_EVENTS IOMMU_NOTIFIER_DEVIOTLB_UNMAP 173 #define IOMMU_NOTIFIER_ALL (IOMMU_NOTIFIER_IOTLB_EVENTS | \ 174 IOMMU_NOTIFIER_DEVIOTLB_EVENTS) 175 176 struct IOMMUNotifier; 177 typedef void (*IOMMUNotify)(struct IOMMUNotifier *notifier, 178 IOMMUTLBEntry *data); 179 180 struct IOMMUNotifier { 181 IOMMUNotify notify; 182 IOMMUNotifierFlag notifier_flags; 183 /* Notify for address space range start <= addr <= end */ 184 hwaddr start; 185 hwaddr end; 186 int iommu_idx; 187 QLIST_ENTRY(IOMMUNotifier) node; 188 }; 189 typedef struct IOMMUNotifier IOMMUNotifier; 190 191 typedef struct IOMMUTLBEvent { 192 IOMMUNotifierFlag type; 193 IOMMUTLBEntry entry; 194 } IOMMUTLBEvent; 195 196 /* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */ 197 #define RAM_PREALLOC (1 << 0) 198 199 /* RAM is mmap-ed with MAP_SHARED */ 200 #define RAM_SHARED (1 << 1) 201 202 /* Only a portion of RAM (used_length) is actually used, and migrated. 203 * Resizing RAM while migrating can result in the migration being canceled. 204 */ 205 #define RAM_RESIZEABLE (1 << 2) 206 207 /* UFFDIO_ZEROPAGE is available on this RAMBlock to atomically 208 * zero the page and wake waiting processes. 209 * (Set during postcopy) 210 */ 211 #define RAM_UF_ZEROPAGE (1 << 3) 212 213 /* RAM can be migrated */ 214 #define RAM_MIGRATABLE (1 << 4) 215 216 /* RAM is a persistent kind memory */ 217 #define RAM_PMEM (1 << 5) 218 219 220 /* 221 * UFFDIO_WRITEPROTECT is used on this RAMBlock to 222 * support 'write-tracking' migration type. 223 * Implies ram_state->ram_wt_enabled. 224 */ 225 #define RAM_UF_WRITEPROTECT (1 << 6) 226 227 /* 228 * RAM is mmap-ed with MAP_NORESERVE. When set, reserving swap space (or huge 229 * pages if applicable) is skipped: will bail out if not supported. When not 230 * set, the OS will do the reservation, if supported for the memory type. 231 */ 232 #define RAM_NORESERVE (1 << 7) 233 234 /* RAM that isn't accessible through normal means. */ 235 #define RAM_PROTECTED (1 << 8) 236 237 /* RAM is an mmap-ed named file */ 238 #define RAM_NAMED_FILE (1 << 9) 239 240 /* RAM is mmap-ed read-only */ 241 #define RAM_READONLY (1 << 10) 242 243 /* RAM FD is opened read-only */ 244 #define RAM_READONLY_FD (1 << 11) 245 246 static inline void iommu_notifier_init(IOMMUNotifier *n, IOMMUNotify fn, 247 IOMMUNotifierFlag flags, 248 hwaddr start, hwaddr end, 249 int iommu_idx) 250 { 251 n->notify = fn; 252 n->notifier_flags = flags; 253 n->start = start; 254 n->end = end; 255 n->iommu_idx = iommu_idx; 256 } 257 258 /* 259 * Memory region callbacks 260 */ 261 struct MemoryRegionOps { 262 /* Read from the memory region. @addr is relative to @mr; @size is 263 * in bytes. */ 264 uint64_t (*read)(void *opaque, 265 hwaddr addr, 266 unsigned size); 267 /* Write to the memory region. @addr is relative to @mr; @size is 268 * in bytes. */ 269 void (*write)(void *opaque, 270 hwaddr addr, 271 uint64_t data, 272 unsigned size); 273 274 MemTxResult (*read_with_attrs)(void *opaque, 275 hwaddr addr, 276 uint64_t *data, 277 unsigned size, 278 MemTxAttrs attrs); 279 MemTxResult (*write_with_attrs)(void *opaque, 280 hwaddr addr, 281 uint64_t data, 282 unsigned size, 283 MemTxAttrs attrs); 284 285 enum device_endian endianness; 286 /* Guest-visible constraints: */ 287 struct { 288 /* If nonzero, specify bounds on access sizes beyond which a machine 289 * check is thrown. 290 */ 291 unsigned min_access_size; 292 unsigned max_access_size; 293 /* If true, unaligned accesses are supported. Otherwise unaligned 294 * accesses throw machine checks. 295 */ 296 bool unaligned; 297 /* 298 * If present, and returns #false, the transaction is not accepted 299 * by the device (and results in machine dependent behaviour such 300 * as a machine check exception). 301 */ 302 bool (*accepts)(void *opaque, hwaddr addr, 303 unsigned size, bool is_write, 304 MemTxAttrs attrs); 305 } valid; 306 /* Internal implementation constraints: */ 307 struct { 308 /* If nonzero, specifies the minimum size implemented. Smaller sizes 309 * will be rounded upwards and a partial result will be returned. 310 */ 311 unsigned min_access_size; 312 /* If nonzero, specifies the maximum size implemented. Larger sizes 313 * will be done as a series of accesses with smaller sizes. 314 */ 315 unsigned max_access_size; 316 /* If true, unaligned accesses are supported. Otherwise all accesses 317 * are converted to (possibly multiple) naturally aligned accesses. 318 */ 319 bool unaligned; 320 } impl; 321 }; 322 323 typedef struct MemoryRegionClass { 324 /* private */ 325 ObjectClass parent_class; 326 } MemoryRegionClass; 327 328 329 enum IOMMUMemoryRegionAttr { 330 IOMMU_ATTR_SPAPR_TCE_FD 331 }; 332 333 /* 334 * IOMMUMemoryRegionClass: 335 * 336 * All IOMMU implementations need to subclass TYPE_IOMMU_MEMORY_REGION 337 * and provide an implementation of at least the @translate method here 338 * to handle requests to the memory region. Other methods are optional. 339 * 340 * The IOMMU implementation must use the IOMMU notifier infrastructure 341 * to report whenever mappings are changed, by calling 342 * memory_region_notify_iommu() (or, if necessary, by calling 343 * memory_region_notify_iommu_one() for each registered notifier). 344 * 345 * Conceptually an IOMMU provides a mapping from input address 346 * to an output TLB entry. If the IOMMU is aware of memory transaction 347 * attributes and the output TLB entry depends on the transaction 348 * attributes, we represent this using IOMMU indexes. Each index 349 * selects a particular translation table that the IOMMU has: 350 * 351 * @attrs_to_index returns the IOMMU index for a set of transaction attributes 352 * 353 * @translate takes an input address and an IOMMU index 354 * 355 * and the mapping returned can only depend on the input address and the 356 * IOMMU index. 357 * 358 * Most IOMMUs don't care about the transaction attributes and support 359 * only a single IOMMU index. A more complex IOMMU might have one index 360 * for secure transactions and one for non-secure transactions. 361 */ 362 struct IOMMUMemoryRegionClass { 363 /* private: */ 364 MemoryRegionClass parent_class; 365 366 /* public: */ 367 /** 368 * @translate: 369 * 370 * Return a TLB entry that contains a given address. 371 * 372 * The IOMMUAccessFlags indicated via @flag are optional and may 373 * be specified as IOMMU_NONE to indicate that the caller needs 374 * the full translation information for both reads and writes. If 375 * the access flags are specified then the IOMMU implementation 376 * may use this as an optimization, to stop doing a page table 377 * walk as soon as it knows that the requested permissions are not 378 * allowed. If IOMMU_NONE is passed then the IOMMU must do the 379 * full page table walk and report the permissions in the returned 380 * IOMMUTLBEntry. (Note that this implies that an IOMMU may not 381 * return different mappings for reads and writes.) 382 * 383 * The returned information remains valid while the caller is 384 * holding the big QEMU lock or is inside an RCU critical section; 385 * if the caller wishes to cache the mapping beyond that it must 386 * register an IOMMU notifier so it can invalidate its cached 387 * information when the IOMMU mapping changes. 388 * 389 * @iommu: the IOMMUMemoryRegion 390 * 391 * @hwaddr: address to be translated within the memory region 392 * 393 * @flag: requested access permission 394 * 395 * @iommu_idx: IOMMU index for the translation 396 */ 397 IOMMUTLBEntry (*translate)(IOMMUMemoryRegion *iommu, hwaddr addr, 398 IOMMUAccessFlags flag, int iommu_idx); 399 /** 400 * @get_min_page_size: 401 * 402 * Returns minimum supported page size in bytes. 403 * 404 * If this method is not provided then the minimum is assumed to 405 * be TARGET_PAGE_SIZE. 406 * 407 * @iommu: the IOMMUMemoryRegion 408 */ 409 uint64_t (*get_min_page_size)(IOMMUMemoryRegion *iommu); 410 /** 411 * @notify_flag_changed: 412 * 413 * Called when IOMMU Notifier flag changes (ie when the set of 414 * events which IOMMU users are requesting notification for changes). 415 * Optional method -- need not be provided if the IOMMU does not 416 * need to know exactly which events must be notified. 417 * 418 * @iommu: the IOMMUMemoryRegion 419 * 420 * @old_flags: events which previously needed to be notified 421 * 422 * @new_flags: events which now need to be notified 423 * 424 * Returns 0 on success, or a negative errno; in particular 425 * returns -EINVAL if the new flag bitmap is not supported by the 426 * IOMMU memory region. In case of failure, the error object 427 * must be created 428 */ 429 int (*notify_flag_changed)(IOMMUMemoryRegion *iommu, 430 IOMMUNotifierFlag old_flags, 431 IOMMUNotifierFlag new_flags, 432 Error **errp); 433 /** 434 * @replay: 435 * 436 * Called to handle memory_region_iommu_replay(). 437 * 438 * The default implementation of memory_region_iommu_replay() is to 439 * call the IOMMU translate method for every page in the address space 440 * with flag == IOMMU_NONE and then call the notifier if translate 441 * returns a valid mapping. If this method is implemented then it 442 * overrides the default behaviour, and must provide the full semantics 443 * of memory_region_iommu_replay(), by calling @notifier for every 444 * translation present in the IOMMU. 445 * 446 * Optional method -- an IOMMU only needs to provide this method 447 * if the default is inefficient or produces undesirable side effects. 448 * 449 * Note: this is not related to record-and-replay functionality. 450 */ 451 void (*replay)(IOMMUMemoryRegion *iommu, IOMMUNotifier *notifier); 452 453 /** 454 * @get_attr: 455 * 456 * Get IOMMU misc attributes. This is an optional method that 457 * can be used to allow users of the IOMMU to get implementation-specific 458 * information. The IOMMU implements this method to handle calls 459 * by IOMMU users to memory_region_iommu_get_attr() by filling in 460 * the arbitrary data pointer for any IOMMUMemoryRegionAttr values that 461 * the IOMMU supports. If the method is unimplemented then 462 * memory_region_iommu_get_attr() will always return -EINVAL. 463 * 464 * @iommu: the IOMMUMemoryRegion 465 * 466 * @attr: attribute being queried 467 * 468 * @data: memory to fill in with the attribute data 469 * 470 * Returns 0 on success, or a negative errno; in particular 471 * returns -EINVAL for unrecognized or unimplemented attribute types. 472 */ 473 int (*get_attr)(IOMMUMemoryRegion *iommu, enum IOMMUMemoryRegionAttr attr, 474 void *data); 475 476 /** 477 * @attrs_to_index: 478 * 479 * Return the IOMMU index to use for a given set of transaction attributes. 480 * 481 * Optional method: if an IOMMU only supports a single IOMMU index then 482 * the default implementation of memory_region_iommu_attrs_to_index() 483 * will return 0. 484 * 485 * The indexes supported by an IOMMU must be contiguous, starting at 0. 486 * 487 * @iommu: the IOMMUMemoryRegion 488 * @attrs: memory transaction attributes 489 */ 490 int (*attrs_to_index)(IOMMUMemoryRegion *iommu, MemTxAttrs attrs); 491 492 /** 493 * @num_indexes: 494 * 495 * Return the number of IOMMU indexes this IOMMU supports. 496 * 497 * Optional method: if this method is not provided, then 498 * memory_region_iommu_num_indexes() will return 1, indicating that 499 * only a single IOMMU index is supported. 500 * 501 * @iommu: the IOMMUMemoryRegion 502 */ 503 int (*num_indexes)(IOMMUMemoryRegion *iommu); 504 505 /** 506 * @iommu_set_page_size_mask: 507 * 508 * Restrict the page size mask that can be supported with a given IOMMU 509 * memory region. Used for example to propagate host physical IOMMU page 510 * size mask limitations to the virtual IOMMU. 511 * 512 * Optional method: if this method is not provided, then the default global 513 * page mask is used. 514 * 515 * @iommu: the IOMMUMemoryRegion 516 * 517 * @page_size_mask: a bitmask of supported page sizes. At least one bit, 518 * representing the smallest page size, must be set. Additional set bits 519 * represent supported block sizes. For example a host physical IOMMU that 520 * uses page tables with a page size of 4kB, and supports 2MB and 4GB 521 * blocks, will set mask 0x40201000. A granule of 4kB with indiscriminate 522 * block sizes is specified with mask 0xfffffffffffff000. 523 * 524 * Returns 0 on success, or a negative error. In case of failure, the error 525 * object must be created. 526 */ 527 int (*iommu_set_page_size_mask)(IOMMUMemoryRegion *iommu, 528 uint64_t page_size_mask, 529 Error **errp); 530 /** 531 * @iommu_set_iova_ranges: 532 * 533 * Propagate information about the usable IOVA ranges for a given IOMMU 534 * memory region. Used for example to propagate host physical device 535 * reserved memory region constraints to the virtual IOMMU. 536 * 537 * Optional method: if this method is not provided, then the default IOVA 538 * aperture is used. 539 * 540 * @iommu: the IOMMUMemoryRegion 541 * 542 * @iova_ranges: list of ordered IOVA ranges (at least one range) 543 * 544 * Returns 0 on success, or a negative error. In case of failure, the error 545 * object must be created. 546 */ 547 int (*iommu_set_iova_ranges)(IOMMUMemoryRegion *iommu, 548 GList *iova_ranges, 549 Error **errp); 550 }; 551 552 typedef struct RamDiscardListener RamDiscardListener; 553 typedef int (*NotifyRamPopulate)(RamDiscardListener *rdl, 554 MemoryRegionSection *section); 555 typedef void (*NotifyRamDiscard)(RamDiscardListener *rdl, 556 MemoryRegionSection *section); 557 558 struct RamDiscardListener { 559 /* 560 * @notify_populate: 561 * 562 * Notification that previously discarded memory is about to get populated. 563 * Listeners are able to object. If any listener objects, already 564 * successfully notified listeners are notified about a discard again. 565 * 566 * @rdl: the #RamDiscardListener getting notified 567 * @section: the #MemoryRegionSection to get populated. The section 568 * is aligned within the memory region to the minimum granularity 569 * unless it would exceed the registered section. 570 * 571 * Returns 0 on success. If the notification is rejected by the listener, 572 * an error is returned. 573 */ 574 NotifyRamPopulate notify_populate; 575 576 /* 577 * @notify_discard: 578 * 579 * Notification that previously populated memory was discarded successfully 580 * and listeners should drop all references to such memory and prevent 581 * new population (e.g., unmap). 582 * 583 * @rdl: the #RamDiscardListener getting notified 584 * @section: the #MemoryRegionSection to get populated. The section 585 * is aligned within the memory region to the minimum granularity 586 * unless it would exceed the registered section. 587 */ 588 NotifyRamDiscard notify_discard; 589 590 /* 591 * @double_discard_supported: 592 * 593 * The listener suppors getting @notify_discard notifications that span 594 * already discarded parts. 595 */ 596 bool double_discard_supported; 597 598 MemoryRegionSection *section; 599 QLIST_ENTRY(RamDiscardListener) next; 600 }; 601 602 static inline void ram_discard_listener_init(RamDiscardListener *rdl, 603 NotifyRamPopulate populate_fn, 604 NotifyRamDiscard discard_fn, 605 bool double_discard_supported) 606 { 607 rdl->notify_populate = populate_fn; 608 rdl->notify_discard = discard_fn; 609 rdl->double_discard_supported = double_discard_supported; 610 } 611 612 typedef int (*ReplayRamPopulate)(MemoryRegionSection *section, void *opaque); 613 typedef void (*ReplayRamDiscard)(MemoryRegionSection *section, void *opaque); 614 615 /* 616 * RamDiscardManagerClass: 617 * 618 * A #RamDiscardManager coordinates which parts of specific RAM #MemoryRegion 619 * regions are currently populated to be used/accessed by the VM, notifying 620 * after parts were discarded (freeing up memory) and before parts will be 621 * populated (consuming memory), to be used/accessed by the VM. 622 * 623 * A #RamDiscardManager can only be set for a RAM #MemoryRegion while the 624 * #MemoryRegion isn't mapped into an address space yet (either directly 625 * or via an alias); it cannot change while the #MemoryRegion is 626 * mapped into an address space. 627 * 628 * The #RamDiscardManager is intended to be used by technologies that are 629 * incompatible with discarding of RAM (e.g., VFIO, which may pin all 630 * memory inside a #MemoryRegion), and require proper coordination to only 631 * map the currently populated parts, to hinder parts that are expected to 632 * remain discarded from silently getting populated and consuming memory. 633 * Technologies that support discarding of RAM don't have to bother and can 634 * simply map the whole #MemoryRegion. 635 * 636 * An example #RamDiscardManager is virtio-mem, which logically (un)plugs 637 * memory within an assigned RAM #MemoryRegion, coordinated with the VM. 638 * Logically unplugging memory consists of discarding RAM. The VM agreed to not 639 * access unplugged (discarded) memory - especially via DMA. virtio-mem will 640 * properly coordinate with listeners before memory is plugged (populated), 641 * and after memory is unplugged (discarded). 642 * 643 * Listeners are called in multiples of the minimum granularity (unless it 644 * would exceed the registered range) and changes are aligned to the minimum 645 * granularity within the #MemoryRegion. Listeners have to prepare for memory 646 * becoming discarded in a different granularity than it was populated and the 647 * other way around. 648 */ 649 struct RamDiscardManagerClass { 650 /* private */ 651 InterfaceClass parent_class; 652 653 /* public */ 654 655 /** 656 * @get_min_granularity: 657 * 658 * Get the minimum granularity in which listeners will get notified 659 * about changes within the #MemoryRegion via the #RamDiscardManager. 660 * 661 * @rdm: the #RamDiscardManager 662 * @mr: the #MemoryRegion 663 * 664 * Returns the minimum granularity. 665 */ 666 uint64_t (*get_min_granularity)(const RamDiscardManager *rdm, 667 const MemoryRegion *mr); 668 669 /** 670 * @is_populated: 671 * 672 * Check whether the given #MemoryRegionSection is completely populated 673 * (i.e., no parts are currently discarded) via the #RamDiscardManager. 674 * There are no alignment requirements. 675 * 676 * @rdm: the #RamDiscardManager 677 * @section: the #MemoryRegionSection 678 * 679 * Returns whether the given range is completely populated. 680 */ 681 bool (*is_populated)(const RamDiscardManager *rdm, 682 const MemoryRegionSection *section); 683 684 /** 685 * @replay_populated: 686 * 687 * Call the #ReplayRamPopulate callback for all populated parts within the 688 * #MemoryRegionSection via the #RamDiscardManager. 689 * 690 * In case any call fails, no further calls are made. 691 * 692 * @rdm: the #RamDiscardManager 693 * @section: the #MemoryRegionSection 694 * @replay_fn: the #ReplayRamPopulate callback 695 * @opaque: pointer to forward to the callback 696 * 697 * Returns 0 on success, or a negative error if any notification failed. 698 */ 699 int (*replay_populated)(const RamDiscardManager *rdm, 700 MemoryRegionSection *section, 701 ReplayRamPopulate replay_fn, void *opaque); 702 703 /** 704 * @replay_discarded: 705 * 706 * Call the #ReplayRamDiscard callback for all discarded parts within the 707 * #MemoryRegionSection via the #RamDiscardManager. 708 * 709 * @rdm: the #RamDiscardManager 710 * @section: the #MemoryRegionSection 711 * @replay_fn: the #ReplayRamDiscard callback 712 * @opaque: pointer to forward to the callback 713 */ 714 void (*replay_discarded)(const RamDiscardManager *rdm, 715 MemoryRegionSection *section, 716 ReplayRamDiscard replay_fn, void *opaque); 717 718 /** 719 * @register_listener: 720 * 721 * Register a #RamDiscardListener for the given #MemoryRegionSection and 722 * immediately notify the #RamDiscardListener about all populated parts 723 * within the #MemoryRegionSection via the #RamDiscardManager. 724 * 725 * In case any notification fails, no further notifications are triggered 726 * and an error is logged. 727 * 728 * @rdm: the #RamDiscardManager 729 * @rdl: the #RamDiscardListener 730 * @section: the #MemoryRegionSection 731 */ 732 void (*register_listener)(RamDiscardManager *rdm, 733 RamDiscardListener *rdl, 734 MemoryRegionSection *section); 735 736 /** 737 * @unregister_listener: 738 * 739 * Unregister a previously registered #RamDiscardListener via the 740 * #RamDiscardManager after notifying the #RamDiscardListener about all 741 * populated parts becoming unpopulated within the registered 742 * #MemoryRegionSection. 743 * 744 * @rdm: the #RamDiscardManager 745 * @rdl: the #RamDiscardListener 746 */ 747 void (*unregister_listener)(RamDiscardManager *rdm, 748 RamDiscardListener *rdl); 749 }; 750 751 uint64_t ram_discard_manager_get_min_granularity(const RamDiscardManager *rdm, 752 const MemoryRegion *mr); 753 754 bool ram_discard_manager_is_populated(const RamDiscardManager *rdm, 755 const MemoryRegionSection *section); 756 757 int ram_discard_manager_replay_populated(const RamDiscardManager *rdm, 758 MemoryRegionSection *section, 759 ReplayRamPopulate replay_fn, 760 void *opaque); 761 762 void ram_discard_manager_replay_discarded(const RamDiscardManager *rdm, 763 MemoryRegionSection *section, 764 ReplayRamDiscard replay_fn, 765 void *opaque); 766 767 void ram_discard_manager_register_listener(RamDiscardManager *rdm, 768 RamDiscardListener *rdl, 769 MemoryRegionSection *section); 770 771 void ram_discard_manager_unregister_listener(RamDiscardManager *rdm, 772 RamDiscardListener *rdl); 773 774 bool memory_get_xlat_addr(IOMMUTLBEntry *iotlb, void **vaddr, 775 ram_addr_t *ram_addr, bool *read_only, 776 bool *mr_has_discard_manager); 777 778 typedef struct CoalescedMemoryRange CoalescedMemoryRange; 779 typedef struct MemoryRegionIoeventfd MemoryRegionIoeventfd; 780 781 /** MemoryRegion: 782 * 783 * A struct representing a memory region. 784 */ 785 struct MemoryRegion { 786 Object parent_obj; 787 788 /* private: */ 789 790 /* The following fields should fit in a cache line */ 791 bool romd_mode; 792 bool ram; 793 bool subpage; 794 bool readonly; /* For RAM regions */ 795 bool nonvolatile; 796 bool rom_device; 797 bool flush_coalesced_mmio; 798 bool unmergeable; 799 uint8_t dirty_log_mask; 800 bool is_iommu; 801 RAMBlock *ram_block; 802 Object *owner; 803 /* owner as TYPE_DEVICE. Used for re-entrancy checks in MR access hotpath */ 804 DeviceState *dev; 805 806 const MemoryRegionOps *ops; 807 void *opaque; 808 MemoryRegion *container; 809 int mapped_via_alias; /* Mapped via an alias, container might be NULL */ 810 Int128 size; 811 hwaddr addr; 812 void (*destructor)(MemoryRegion *mr); 813 uint64_t align; 814 bool terminates; 815 bool ram_device; 816 bool enabled; 817 bool warning_printed; /* For reservations */ 818 uint8_t vga_logging_count; 819 MemoryRegion *alias; 820 hwaddr alias_offset; 821 int32_t priority; 822 QTAILQ_HEAD(, MemoryRegion) subregions; 823 QTAILQ_ENTRY(MemoryRegion) subregions_link; 824 QTAILQ_HEAD(, CoalescedMemoryRange) coalesced; 825 const char *name; 826 unsigned ioeventfd_nb; 827 MemoryRegionIoeventfd *ioeventfds; 828 RamDiscardManager *rdm; /* Only for RAM */ 829 830 /* For devices designed to perform re-entrant IO into their own IO MRs */ 831 bool disable_reentrancy_guard; 832 }; 833 834 struct IOMMUMemoryRegion { 835 MemoryRegion parent_obj; 836 837 QLIST_HEAD(, IOMMUNotifier) iommu_notify; 838 IOMMUNotifierFlag iommu_notify_flags; 839 }; 840 841 #define IOMMU_NOTIFIER_FOREACH(n, mr) \ 842 QLIST_FOREACH((n), &(mr)->iommu_notify, node) 843 844 #define MEMORY_LISTENER_PRIORITY_MIN 0 845 #define MEMORY_LISTENER_PRIORITY_ACCEL 10 846 #define MEMORY_LISTENER_PRIORITY_DEV_BACKEND 10 847 848 /** 849 * struct MemoryListener: callbacks structure for updates to the physical memory map 850 * 851 * Allows a component to adjust to changes in the guest-visible memory map. 852 * Use with memory_listener_register() and memory_listener_unregister(). 853 */ 854 struct MemoryListener { 855 /** 856 * @begin: 857 * 858 * Called at the beginning of an address space update transaction. 859 * Followed by calls to #MemoryListener.region_add(), 860 * #MemoryListener.region_del(), #MemoryListener.region_nop(), 861 * #MemoryListener.log_start() and #MemoryListener.log_stop() in 862 * increasing address order. 863 * 864 * @listener: The #MemoryListener. 865 */ 866 void (*begin)(MemoryListener *listener); 867 868 /** 869 * @commit: 870 * 871 * Called at the end of an address space update transaction, 872 * after the last call to #MemoryListener.region_add(), 873 * #MemoryListener.region_del() or #MemoryListener.region_nop(), 874 * #MemoryListener.log_start() and #MemoryListener.log_stop(). 875 * 876 * @listener: The #MemoryListener. 877 */ 878 void (*commit)(MemoryListener *listener); 879 880 /** 881 * @region_add: 882 * 883 * Called during an address space update transaction, 884 * for a section of the address space that is new in this address space 885 * space since the last transaction. 886 * 887 * @listener: The #MemoryListener. 888 * @section: The new #MemoryRegionSection. 889 */ 890 void (*region_add)(MemoryListener *listener, MemoryRegionSection *section); 891 892 /** 893 * @region_del: 894 * 895 * Called during an address space update transaction, 896 * for a section of the address space that has disappeared in the address 897 * space since the last transaction. 898 * 899 * @listener: The #MemoryListener. 900 * @section: The old #MemoryRegionSection. 901 */ 902 void (*region_del)(MemoryListener *listener, MemoryRegionSection *section); 903 904 /** 905 * @region_nop: 906 * 907 * Called during an address space update transaction, 908 * for a section of the address space that is in the same place in the address 909 * space as in the last transaction. 910 * 911 * @listener: The #MemoryListener. 912 * @section: The #MemoryRegionSection. 913 */ 914 void (*region_nop)(MemoryListener *listener, MemoryRegionSection *section); 915 916 /** 917 * @log_start: 918 * 919 * Called during an address space update transaction, after 920 * one of #MemoryListener.region_add(), #MemoryListener.region_del() or 921 * #MemoryListener.region_nop(), if dirty memory logging clients have 922 * become active since the last transaction. 923 * 924 * @listener: The #MemoryListener. 925 * @section: The #MemoryRegionSection. 926 * @old: A bitmap of dirty memory logging clients that were active in 927 * the previous transaction. 928 * @new: A bitmap of dirty memory logging clients that are active in 929 * the current transaction. 930 */ 931 void (*log_start)(MemoryListener *listener, MemoryRegionSection *section, 932 int old, int new); 933 934 /** 935 * @log_stop: 936 * 937 * Called during an address space update transaction, after 938 * one of #MemoryListener.region_add(), #MemoryListener.region_del() or 939 * #MemoryListener.region_nop() and possibly after 940 * #MemoryListener.log_start(), if dirty memory logging clients have 941 * become inactive since the last transaction. 942 * 943 * @listener: The #MemoryListener. 944 * @section: The #MemoryRegionSection. 945 * @old: A bitmap of dirty memory logging clients that were active in 946 * the previous transaction. 947 * @new: A bitmap of dirty memory logging clients that are active in 948 * the current transaction. 949 */ 950 void (*log_stop)(MemoryListener *listener, MemoryRegionSection *section, 951 int old, int new); 952 953 /** 954 * @log_sync: 955 * 956 * Called by memory_region_snapshot_and_clear_dirty() and 957 * memory_global_dirty_log_sync(), before accessing QEMU's "official" 958 * copy of the dirty memory bitmap for a #MemoryRegionSection. 959 * 960 * @listener: The #MemoryListener. 961 * @section: The #MemoryRegionSection. 962 */ 963 void (*log_sync)(MemoryListener *listener, MemoryRegionSection *section); 964 965 /** 966 * @log_sync_global: 967 * 968 * This is the global version of @log_sync when the listener does 969 * not have a way to synchronize the log with finer granularity. 970 * When the listener registers with @log_sync_global defined, then 971 * its @log_sync must be NULL. Vice versa. 972 * 973 * @listener: The #MemoryListener. 974 * @last_stage: The last stage to synchronize the log during migration. 975 * The caller should guarantee that the synchronization with true for 976 * @last_stage is triggered for once after all VCPUs have been stopped. 977 */ 978 void (*log_sync_global)(MemoryListener *listener, bool last_stage); 979 980 /** 981 * @log_clear: 982 * 983 * Called before reading the dirty memory bitmap for a 984 * #MemoryRegionSection. 985 * 986 * @listener: The #MemoryListener. 987 * @section: The #MemoryRegionSection. 988 */ 989 void (*log_clear)(MemoryListener *listener, MemoryRegionSection *section); 990 991 /** 992 * @log_global_start: 993 * 994 * Called by memory_global_dirty_log_start(), which 995 * enables the %DIRTY_LOG_MIGRATION client on all memory regions in 996 * the address space. #MemoryListener.log_global_start() is also 997 * called when a #MemoryListener is added, if global dirty logging is 998 * active at that time. 999 * 1000 * @listener: The #MemoryListener. 1001 */ 1002 void (*log_global_start)(MemoryListener *listener); 1003 1004 /** 1005 * @log_global_stop: 1006 * 1007 * Called by memory_global_dirty_log_stop(), which 1008 * disables the %DIRTY_LOG_MIGRATION client on all memory regions in 1009 * the address space. 1010 * 1011 * @listener: The #MemoryListener. 1012 */ 1013 void (*log_global_stop)(MemoryListener *listener); 1014 1015 /** 1016 * @log_global_after_sync: 1017 * 1018 * Called after reading the dirty memory bitmap 1019 * for any #MemoryRegionSection. 1020 * 1021 * @listener: The #MemoryListener. 1022 */ 1023 void (*log_global_after_sync)(MemoryListener *listener); 1024 1025 /** 1026 * @eventfd_add: 1027 * 1028 * Called during an address space update transaction, 1029 * for a section of the address space that has had a new ioeventfd 1030 * registration since the last transaction. 1031 * 1032 * @listener: The #MemoryListener. 1033 * @section: The new #MemoryRegionSection. 1034 * @match_data: The @match_data parameter for the new ioeventfd. 1035 * @data: The @data parameter for the new ioeventfd. 1036 * @e: The #EventNotifier parameter for the new ioeventfd. 1037 */ 1038 void (*eventfd_add)(MemoryListener *listener, MemoryRegionSection *section, 1039 bool match_data, uint64_t data, EventNotifier *e); 1040 1041 /** 1042 * @eventfd_del: 1043 * 1044 * Called during an address space update transaction, 1045 * for a section of the address space that has dropped an ioeventfd 1046 * registration since the last transaction. 1047 * 1048 * @listener: The #MemoryListener. 1049 * @section: The new #MemoryRegionSection. 1050 * @match_data: The @match_data parameter for the dropped ioeventfd. 1051 * @data: The @data parameter for the dropped ioeventfd. 1052 * @e: The #EventNotifier parameter for the dropped ioeventfd. 1053 */ 1054 void (*eventfd_del)(MemoryListener *listener, MemoryRegionSection *section, 1055 bool match_data, uint64_t data, EventNotifier *e); 1056 1057 /** 1058 * @coalesced_io_add: 1059 * 1060 * Called during an address space update transaction, 1061 * for a section of the address space that has had a new coalesced 1062 * MMIO range registration since the last transaction. 1063 * 1064 * @listener: The #MemoryListener. 1065 * @section: The new #MemoryRegionSection. 1066 * @addr: The starting address for the coalesced MMIO range. 1067 * @len: The length of the coalesced MMIO range. 1068 */ 1069 void (*coalesced_io_add)(MemoryListener *listener, MemoryRegionSection *section, 1070 hwaddr addr, hwaddr len); 1071 1072 /** 1073 * @coalesced_io_del: 1074 * 1075 * Called during an address space update transaction, 1076 * for a section of the address space that has dropped a coalesced 1077 * MMIO range since the last transaction. 1078 * 1079 * @listener: The #MemoryListener. 1080 * @section: The new #MemoryRegionSection. 1081 * @addr: The starting address for the coalesced MMIO range. 1082 * @len: The length of the coalesced MMIO range. 1083 */ 1084 void (*coalesced_io_del)(MemoryListener *listener, MemoryRegionSection *section, 1085 hwaddr addr, hwaddr len); 1086 /** 1087 * @priority: 1088 * 1089 * Govern the order in which memory listeners are invoked. Lower priorities 1090 * are invoked earlier for "add" or "start" callbacks, and later for "delete" 1091 * or "stop" callbacks. 1092 */ 1093 unsigned priority; 1094 1095 /** 1096 * @name: 1097 * 1098 * Name of the listener. It can be used in contexts where we'd like to 1099 * identify one memory listener with the rest. 1100 */ 1101 const char *name; 1102 1103 /* private: */ 1104 AddressSpace *address_space; 1105 QTAILQ_ENTRY(MemoryListener) link; 1106 QTAILQ_ENTRY(MemoryListener) link_as; 1107 }; 1108 1109 /** 1110 * struct AddressSpace: describes a mapping of addresses to #MemoryRegion objects 1111 */ 1112 struct AddressSpace { 1113 /* private: */ 1114 struct rcu_head rcu; 1115 char *name; 1116 MemoryRegion *root; 1117 1118 /* Accessed via RCU. */ 1119 struct FlatView *current_map; 1120 1121 int ioeventfd_nb; 1122 int ioeventfd_notifiers; 1123 struct MemoryRegionIoeventfd *ioeventfds; 1124 QTAILQ_HEAD(, MemoryListener) listeners; 1125 QTAILQ_ENTRY(AddressSpace) address_spaces_link; 1126 }; 1127 1128 typedef struct AddressSpaceDispatch AddressSpaceDispatch; 1129 typedef struct FlatRange FlatRange; 1130 1131 /* Flattened global view of current active memory hierarchy. Kept in sorted 1132 * order. 1133 */ 1134 struct FlatView { 1135 struct rcu_head rcu; 1136 unsigned ref; 1137 FlatRange *ranges; 1138 unsigned nr; 1139 unsigned nr_allocated; 1140 struct AddressSpaceDispatch *dispatch; 1141 MemoryRegion *root; 1142 }; 1143 1144 static inline FlatView *address_space_to_flatview(AddressSpace *as) 1145 { 1146 return qatomic_rcu_read(&as->current_map); 1147 } 1148 1149 /** 1150 * typedef flatview_cb: callback for flatview_for_each_range() 1151 * 1152 * @start: start address of the range within the FlatView 1153 * @len: length of the range in bytes 1154 * @mr: MemoryRegion covering this range 1155 * @offset_in_region: offset of the first byte of the range within @mr 1156 * @opaque: data pointer passed to flatview_for_each_range() 1157 * 1158 * Returns: true to stop the iteration, false to keep going. 1159 */ 1160 typedef bool (*flatview_cb)(Int128 start, 1161 Int128 len, 1162 const MemoryRegion *mr, 1163 hwaddr offset_in_region, 1164 void *opaque); 1165 1166 /** 1167 * flatview_for_each_range: Iterate through a FlatView 1168 * @fv: the FlatView to iterate through 1169 * @cb: function to call for each range 1170 * @opaque: opaque data pointer to pass to @cb 1171 * 1172 * A FlatView is made up of a list of non-overlapping ranges, each of 1173 * which is a slice of a MemoryRegion. This function iterates through 1174 * each range in @fv, calling @cb. The callback function can terminate 1175 * iteration early by returning 'true'. 1176 */ 1177 void flatview_for_each_range(FlatView *fv, flatview_cb cb, void *opaque); 1178 1179 static inline bool MemoryRegionSection_eq(MemoryRegionSection *a, 1180 MemoryRegionSection *b) 1181 { 1182 return a->mr == b->mr && 1183 a->fv == b->fv && 1184 a->offset_within_region == b->offset_within_region && 1185 a->offset_within_address_space == b->offset_within_address_space && 1186 int128_eq(a->size, b->size) && 1187 a->readonly == b->readonly && 1188 a->nonvolatile == b->nonvolatile; 1189 } 1190 1191 /** 1192 * memory_region_section_new_copy: Copy a memory region section 1193 * 1194 * Allocate memory for a new copy, copy the memory region section, and 1195 * properly take a reference on all relevant members. 1196 * 1197 * @s: the #MemoryRegionSection to copy 1198 */ 1199 MemoryRegionSection *memory_region_section_new_copy(MemoryRegionSection *s); 1200 1201 /** 1202 * memory_region_section_new_copy: Free a copied memory region section 1203 * 1204 * Free a copy of a memory section created via memory_region_section_new_copy(). 1205 * properly dropping references on all relevant members. 1206 * 1207 * @s: the #MemoryRegionSection to copy 1208 */ 1209 void memory_region_section_free_copy(MemoryRegionSection *s); 1210 1211 /** 1212 * memory_region_init: Initialize a memory region 1213 * 1214 * The region typically acts as a container for other memory regions. Use 1215 * memory_region_add_subregion() to add subregions. 1216 * 1217 * @mr: the #MemoryRegion to be initialized 1218 * @owner: the object that tracks the region's reference count 1219 * @name: used for debugging; not visible to the user or ABI 1220 * @size: size of the region; any subregions beyond this size will be clipped 1221 */ 1222 void memory_region_init(MemoryRegion *mr, 1223 Object *owner, 1224 const char *name, 1225 uint64_t size); 1226 1227 /** 1228 * memory_region_ref: Add 1 to a memory region's reference count 1229 * 1230 * Whenever memory regions are accessed outside the BQL, they need to be 1231 * preserved against hot-unplug. MemoryRegions actually do not have their 1232 * own reference count; they piggyback on a QOM object, their "owner". 1233 * This function adds a reference to the owner. 1234 * 1235 * All MemoryRegions must have an owner if they can disappear, even if the 1236 * device they belong to operates exclusively under the BQL. This is because 1237 * the region could be returned at any time by memory_region_find, and this 1238 * is usually under guest control. 1239 * 1240 * @mr: the #MemoryRegion 1241 */ 1242 void memory_region_ref(MemoryRegion *mr); 1243 1244 /** 1245 * memory_region_unref: Remove 1 to a memory region's reference count 1246 * 1247 * Whenever memory regions are accessed outside the BQL, they need to be 1248 * preserved against hot-unplug. MemoryRegions actually do not have their 1249 * own reference count; they piggyback on a QOM object, their "owner". 1250 * This function removes a reference to the owner and possibly destroys it. 1251 * 1252 * @mr: the #MemoryRegion 1253 */ 1254 void memory_region_unref(MemoryRegion *mr); 1255 1256 /** 1257 * memory_region_init_io: Initialize an I/O memory region. 1258 * 1259 * Accesses into the region will cause the callbacks in @ops to be called. 1260 * if @size is nonzero, subregions will be clipped to @size. 1261 * 1262 * @mr: the #MemoryRegion to be initialized. 1263 * @owner: the object that tracks the region's reference count 1264 * @ops: a structure containing read and write callbacks to be used when 1265 * I/O is performed on the region. 1266 * @opaque: passed to the read and write callbacks of the @ops structure. 1267 * @name: used for debugging; not visible to the user or ABI 1268 * @size: size of the region. 1269 */ 1270 void memory_region_init_io(MemoryRegion *mr, 1271 Object *owner, 1272 const MemoryRegionOps *ops, 1273 void *opaque, 1274 const char *name, 1275 uint64_t size); 1276 1277 /** 1278 * memory_region_init_ram_nomigrate: Initialize RAM memory region. Accesses 1279 * into the region will modify memory 1280 * directly. 1281 * 1282 * @mr: the #MemoryRegion to be initialized. 1283 * @owner: the object that tracks the region's reference count 1284 * @name: Region name, becomes part of RAMBlock name used in migration stream 1285 * must be unique within any device 1286 * @size: size of the region. 1287 * @errp: pointer to Error*, to store an error if it happens. 1288 * 1289 * Note that this function does not do anything to cause the data in the 1290 * RAM memory region to be migrated; that is the responsibility of the caller. 1291 * 1292 * Return: true on success, else false setting @errp with error. 1293 */ 1294 bool memory_region_init_ram_nomigrate(MemoryRegion *mr, 1295 Object *owner, 1296 const char *name, 1297 uint64_t size, 1298 Error **errp); 1299 1300 /** 1301 * memory_region_init_ram_flags_nomigrate: Initialize RAM memory region. 1302 * Accesses into the region will 1303 * modify memory directly. 1304 * 1305 * @mr: the #MemoryRegion to be initialized. 1306 * @owner: the object that tracks the region's reference count 1307 * @name: Region name, becomes part of RAMBlock name used in migration stream 1308 * must be unique within any device 1309 * @size: size of the region. 1310 * @ram_flags: RamBlock flags. Supported flags: RAM_SHARED, RAM_NORESERVE. 1311 * @errp: pointer to Error*, to store an error if it happens. 1312 * 1313 * Note that this function does not do anything to cause the data in the 1314 * RAM memory region to be migrated; that is the responsibility of the caller. 1315 * 1316 * Return: true on success, else false setting @errp with error. 1317 */ 1318 bool memory_region_init_ram_flags_nomigrate(MemoryRegion *mr, 1319 Object *owner, 1320 const char *name, 1321 uint64_t size, 1322 uint32_t ram_flags, 1323 Error **errp); 1324 1325 /** 1326 * memory_region_init_resizeable_ram: Initialize memory region with resizable 1327 * RAM. Accesses into the region will 1328 * modify memory directly. Only an initial 1329 * portion of this RAM is actually used. 1330 * Changing the size while migrating 1331 * can result in the migration being 1332 * canceled. 1333 * 1334 * @mr: the #MemoryRegion to be initialized. 1335 * @owner: the object that tracks the region's reference count 1336 * @name: Region name, becomes part of RAMBlock name used in migration stream 1337 * must be unique within any device 1338 * @size: used size of the region. 1339 * @max_size: max size of the region. 1340 * @resized: callback to notify owner about used size change. 1341 * @errp: pointer to Error*, to store an error if it happens. 1342 * 1343 * Note that this function does not do anything to cause the data in the 1344 * RAM memory region to be migrated; that is the responsibility of the caller. 1345 * 1346 * Return: true on success, else false setting @errp with error. 1347 */ 1348 bool memory_region_init_resizeable_ram(MemoryRegion *mr, 1349 Object *owner, 1350 const char *name, 1351 uint64_t size, 1352 uint64_t max_size, 1353 void (*resized)(const char*, 1354 uint64_t length, 1355 void *host), 1356 Error **errp); 1357 #ifdef CONFIG_POSIX 1358 1359 /** 1360 * memory_region_init_ram_from_file: Initialize RAM memory region with a 1361 * mmap-ed backend. 1362 * 1363 * @mr: the #MemoryRegion to be initialized. 1364 * @owner: the object that tracks the region's reference count 1365 * @name: Region name, becomes part of RAMBlock name used in migration stream 1366 * must be unique within any device 1367 * @size: size of the region. 1368 * @align: alignment of the region base address; if 0, the default alignment 1369 * (getpagesize()) will be used. 1370 * @ram_flags: RamBlock flags. Supported flags: RAM_SHARED, RAM_PMEM, 1371 * RAM_NORESERVE, RAM_PROTECTED, RAM_NAMED_FILE, RAM_READONLY, 1372 * RAM_READONLY_FD 1373 * @path: the path in which to allocate the RAM. 1374 * @offset: offset within the file referenced by path 1375 * @errp: pointer to Error*, to store an error if it happens. 1376 * 1377 * Note that this function does not do anything to cause the data in the 1378 * RAM memory region to be migrated; that is the responsibility of the caller. 1379 * 1380 * Return: true on success, else false setting @errp with error. 1381 */ 1382 bool memory_region_init_ram_from_file(MemoryRegion *mr, 1383 Object *owner, 1384 const char *name, 1385 uint64_t size, 1386 uint64_t align, 1387 uint32_t ram_flags, 1388 const char *path, 1389 ram_addr_t offset, 1390 Error **errp); 1391 1392 /** 1393 * memory_region_init_ram_from_fd: Initialize RAM memory region with a 1394 * mmap-ed backend. 1395 * 1396 * @mr: the #MemoryRegion to be initialized. 1397 * @owner: the object that tracks the region's reference count 1398 * @name: the name of the region. 1399 * @size: size of the region. 1400 * @ram_flags: RamBlock flags. Supported flags: RAM_SHARED, RAM_PMEM, 1401 * RAM_NORESERVE, RAM_PROTECTED, RAM_NAMED_FILE, RAM_READONLY, 1402 * RAM_READONLY_FD 1403 * @fd: the fd to mmap. 1404 * @offset: offset within the file referenced by fd 1405 * @errp: pointer to Error*, to store an error if it happens. 1406 * 1407 * Note that this function does not do anything to cause the data in the 1408 * RAM memory region to be migrated; that is the responsibility of the caller. 1409 * 1410 * Return: true on success, else false setting @errp with error. 1411 */ 1412 bool memory_region_init_ram_from_fd(MemoryRegion *mr, 1413 Object *owner, 1414 const char *name, 1415 uint64_t size, 1416 uint32_t ram_flags, 1417 int fd, 1418 ram_addr_t offset, 1419 Error **errp); 1420 #endif 1421 1422 /** 1423 * memory_region_init_ram_ptr: Initialize RAM memory region from a 1424 * user-provided pointer. Accesses into the 1425 * region will modify memory directly. 1426 * 1427 * @mr: the #MemoryRegion to be initialized. 1428 * @owner: the object that tracks the region's reference count 1429 * @name: Region name, becomes part of RAMBlock name used in migration stream 1430 * must be unique within any device 1431 * @size: size of the region. 1432 * @ptr: memory to be mapped; must contain at least @size bytes. 1433 * 1434 * Note that this function does not do anything to cause the data in the 1435 * RAM memory region to be migrated; that is the responsibility of the caller. 1436 */ 1437 void memory_region_init_ram_ptr(MemoryRegion *mr, 1438 Object *owner, 1439 const char *name, 1440 uint64_t size, 1441 void *ptr); 1442 1443 /** 1444 * memory_region_init_ram_device_ptr: Initialize RAM device memory region from 1445 * a user-provided pointer. 1446 * 1447 * A RAM device represents a mapping to a physical device, such as to a PCI 1448 * MMIO BAR of an vfio-pci assigned device. The memory region may be mapped 1449 * into the VM address space and access to the region will modify memory 1450 * directly. However, the memory region should not be included in a memory 1451 * dump (device may not be enabled/mapped at the time of the dump), and 1452 * operations incompatible with manipulating MMIO should be avoided. Replaces 1453 * skip_dump flag. 1454 * 1455 * @mr: the #MemoryRegion to be initialized. 1456 * @owner: the object that tracks the region's reference count 1457 * @name: the name of the region. 1458 * @size: size of the region. 1459 * @ptr: memory to be mapped; must contain at least @size bytes. 1460 * 1461 * Note that this function does not do anything to cause the data in the 1462 * RAM memory region to be migrated; that is the responsibility of the caller. 1463 * (For RAM device memory regions, migrating the contents rarely makes sense.) 1464 */ 1465 void memory_region_init_ram_device_ptr(MemoryRegion *mr, 1466 Object *owner, 1467 const char *name, 1468 uint64_t size, 1469 void *ptr); 1470 1471 /** 1472 * memory_region_init_alias: Initialize a memory region that aliases all or a 1473 * part of another memory region. 1474 * 1475 * @mr: the #MemoryRegion to be initialized. 1476 * @owner: the object that tracks the region's reference count 1477 * @name: used for debugging; not visible to the user or ABI 1478 * @orig: the region to be referenced; @mr will be equivalent to 1479 * @orig between @offset and @offset + @size - 1. 1480 * @offset: start of the section in @orig to be referenced. 1481 * @size: size of the region. 1482 */ 1483 void memory_region_init_alias(MemoryRegion *mr, 1484 Object *owner, 1485 const char *name, 1486 MemoryRegion *orig, 1487 hwaddr offset, 1488 uint64_t size); 1489 1490 /** 1491 * memory_region_init_rom_nomigrate: Initialize a ROM memory region. 1492 * 1493 * This has the same effect as calling memory_region_init_ram_nomigrate() 1494 * and then marking the resulting region read-only with 1495 * memory_region_set_readonly(). 1496 * 1497 * Note that this function does not do anything to cause the data in the 1498 * RAM side of the memory region to be migrated; that is the responsibility 1499 * of the caller. 1500 * 1501 * @mr: the #MemoryRegion to be initialized. 1502 * @owner: the object that tracks the region's reference count 1503 * @name: Region name, becomes part of RAMBlock name used in migration stream 1504 * must be unique within any device 1505 * @size: size of the region. 1506 * @errp: pointer to Error*, to store an error if it happens. 1507 * 1508 * Return: true on success, else false setting @errp with error. 1509 */ 1510 bool memory_region_init_rom_nomigrate(MemoryRegion *mr, 1511 Object *owner, 1512 const char *name, 1513 uint64_t size, 1514 Error **errp); 1515 1516 /** 1517 * memory_region_init_rom_device_nomigrate: Initialize a ROM memory region. 1518 * Writes are handled via callbacks. 1519 * 1520 * Note that this function does not do anything to cause the data in the 1521 * RAM side of the memory region to be migrated; that is the responsibility 1522 * of the caller. 1523 * 1524 * @mr: the #MemoryRegion to be initialized. 1525 * @owner: the object that tracks the region's reference count 1526 * @ops: callbacks for write access handling (must not be NULL). 1527 * @opaque: passed to the read and write callbacks of the @ops structure. 1528 * @name: Region name, becomes part of RAMBlock name used in migration stream 1529 * must be unique within any device 1530 * @size: size of the region. 1531 * @errp: pointer to Error*, to store an error if it happens. 1532 * 1533 * Return: true on success, else false setting @errp with error. 1534 */ 1535 bool memory_region_init_rom_device_nomigrate(MemoryRegion *mr, 1536 Object *owner, 1537 const MemoryRegionOps *ops, 1538 void *opaque, 1539 const char *name, 1540 uint64_t size, 1541 Error **errp); 1542 1543 /** 1544 * memory_region_init_iommu: Initialize a memory region of a custom type 1545 * that translates addresses 1546 * 1547 * An IOMMU region translates addresses and forwards accesses to a target 1548 * memory region. 1549 * 1550 * The IOMMU implementation must define a subclass of TYPE_IOMMU_MEMORY_REGION. 1551 * @_iommu_mr should be a pointer to enough memory for an instance of 1552 * that subclass, @instance_size is the size of that subclass, and 1553 * @mrtypename is its name. This function will initialize @_iommu_mr as an 1554 * instance of the subclass, and its methods will then be called to handle 1555 * accesses to the memory region. See the documentation of 1556 * #IOMMUMemoryRegionClass for further details. 1557 * 1558 * @_iommu_mr: the #IOMMUMemoryRegion to be initialized 1559 * @instance_size: the IOMMUMemoryRegion subclass instance size 1560 * @mrtypename: the type name of the #IOMMUMemoryRegion 1561 * @owner: the object that tracks the region's reference count 1562 * @name: used for debugging; not visible to the user or ABI 1563 * @size: size of the region. 1564 */ 1565 void memory_region_init_iommu(void *_iommu_mr, 1566 size_t instance_size, 1567 const char *mrtypename, 1568 Object *owner, 1569 const char *name, 1570 uint64_t size); 1571 1572 /** 1573 * memory_region_init_ram - Initialize RAM memory region. Accesses into the 1574 * region will modify memory directly. 1575 * 1576 * @mr: the #MemoryRegion to be initialized 1577 * @owner: the object that tracks the region's reference count (must be 1578 * TYPE_DEVICE or a subclass of TYPE_DEVICE, or NULL) 1579 * @name: name of the memory region 1580 * @size: size of the region in bytes 1581 * @errp: pointer to Error*, to store an error if it happens. 1582 * 1583 * This function allocates RAM for a board model or device, and 1584 * arranges for it to be migrated (by calling vmstate_register_ram() 1585 * if @owner is a DeviceState, or vmstate_register_ram_global() if 1586 * @owner is NULL). 1587 * 1588 * TODO: Currently we restrict @owner to being either NULL (for 1589 * global RAM regions with no owner) or devices, so that we can 1590 * give the RAM block a unique name for migration purposes. 1591 * We should lift this restriction and allow arbitrary Objects. 1592 * If you pass a non-NULL non-device @owner then we will assert. 1593 * 1594 * Return: true on success, else false setting @errp with error. 1595 */ 1596 bool memory_region_init_ram(MemoryRegion *mr, 1597 Object *owner, 1598 const char *name, 1599 uint64_t size, 1600 Error **errp); 1601 1602 /** 1603 * memory_region_init_rom: Initialize a ROM memory region. 1604 * 1605 * This has the same effect as calling memory_region_init_ram() 1606 * and then marking the resulting region read-only with 1607 * memory_region_set_readonly(). This includes arranging for the 1608 * contents to be migrated. 1609 * 1610 * TODO: Currently we restrict @owner to being either NULL (for 1611 * global RAM regions with no owner) or devices, so that we can 1612 * give the RAM block a unique name for migration purposes. 1613 * We should lift this restriction and allow arbitrary Objects. 1614 * If you pass a non-NULL non-device @owner then we will assert. 1615 * 1616 * @mr: the #MemoryRegion to be initialized. 1617 * @owner: the object that tracks the region's reference count 1618 * @name: Region name, becomes part of RAMBlock name used in migration stream 1619 * must be unique within any device 1620 * @size: size of the region. 1621 * @errp: pointer to Error*, to store an error if it happens. 1622 * 1623 * Return: true on success, else false setting @errp with error. 1624 */ 1625 bool memory_region_init_rom(MemoryRegion *mr, 1626 Object *owner, 1627 const char *name, 1628 uint64_t size, 1629 Error **errp); 1630 1631 /** 1632 * memory_region_init_rom_device: Initialize a ROM memory region. 1633 * Writes are handled via callbacks. 1634 * 1635 * This function initializes a memory region backed by RAM for reads 1636 * and callbacks for writes, and arranges for the RAM backing to 1637 * be migrated (by calling vmstate_register_ram() 1638 * if @owner is a DeviceState, or vmstate_register_ram_global() if 1639 * @owner is NULL). 1640 * 1641 * TODO: Currently we restrict @owner to being either NULL (for 1642 * global RAM regions with no owner) or devices, so that we can 1643 * give the RAM block a unique name for migration purposes. 1644 * We should lift this restriction and allow arbitrary Objects. 1645 * If you pass a non-NULL non-device @owner then we will assert. 1646 * 1647 * @mr: the #MemoryRegion to be initialized. 1648 * @owner: the object that tracks the region's reference count 1649 * @ops: callbacks for write access handling (must not be NULL). 1650 * @opaque: passed to the read and write callbacks of the @ops structure. 1651 * @name: Region name, becomes part of RAMBlock name used in migration stream 1652 * must be unique within any device 1653 * @size: size of the region. 1654 * @errp: pointer to Error*, to store an error if it happens. 1655 * 1656 * Return: true on success, else false setting @errp with error. 1657 */ 1658 bool memory_region_init_rom_device(MemoryRegion *mr, 1659 Object *owner, 1660 const MemoryRegionOps *ops, 1661 void *opaque, 1662 const char *name, 1663 uint64_t size, 1664 Error **errp); 1665 1666 1667 /** 1668 * memory_region_owner: get a memory region's owner. 1669 * 1670 * @mr: the memory region being queried. 1671 */ 1672 Object *memory_region_owner(MemoryRegion *mr); 1673 1674 /** 1675 * memory_region_size: get a memory region's size. 1676 * 1677 * @mr: the memory region being queried. 1678 */ 1679 uint64_t memory_region_size(MemoryRegion *mr); 1680 1681 /** 1682 * memory_region_is_ram: check whether a memory region is random access 1683 * 1684 * Returns %true if a memory region is random access. 1685 * 1686 * @mr: the memory region being queried 1687 */ 1688 static inline bool memory_region_is_ram(MemoryRegion *mr) 1689 { 1690 return mr->ram; 1691 } 1692 1693 /** 1694 * memory_region_is_ram_device: check whether a memory region is a ram device 1695 * 1696 * Returns %true if a memory region is a device backed ram region 1697 * 1698 * @mr: the memory region being queried 1699 */ 1700 bool memory_region_is_ram_device(MemoryRegion *mr); 1701 1702 /** 1703 * memory_region_is_romd: check whether a memory region is in ROMD mode 1704 * 1705 * Returns %true if a memory region is a ROM device and currently set to allow 1706 * direct reads. 1707 * 1708 * @mr: the memory region being queried 1709 */ 1710 static inline bool memory_region_is_romd(MemoryRegion *mr) 1711 { 1712 return mr->rom_device && mr->romd_mode; 1713 } 1714 1715 /** 1716 * memory_region_is_protected: check whether a memory region is protected 1717 * 1718 * Returns %true if a memory region is protected RAM and cannot be accessed 1719 * via standard mechanisms, e.g. DMA. 1720 * 1721 * @mr: the memory region being queried 1722 */ 1723 bool memory_region_is_protected(MemoryRegion *mr); 1724 1725 /** 1726 * memory_region_get_iommu: check whether a memory region is an iommu 1727 * 1728 * Returns pointer to IOMMUMemoryRegion if a memory region is an iommu, 1729 * otherwise NULL. 1730 * 1731 * @mr: the memory region being queried 1732 */ 1733 static inline IOMMUMemoryRegion *memory_region_get_iommu(MemoryRegion *mr) 1734 { 1735 if (mr->alias) { 1736 return memory_region_get_iommu(mr->alias); 1737 } 1738 if (mr->is_iommu) { 1739 return (IOMMUMemoryRegion *) mr; 1740 } 1741 return NULL; 1742 } 1743 1744 /** 1745 * memory_region_get_iommu_class_nocheck: returns iommu memory region class 1746 * if an iommu or NULL if not 1747 * 1748 * Returns pointer to IOMMUMemoryRegionClass if a memory region is an iommu, 1749 * otherwise NULL. This is fast path avoiding QOM checking, use with caution. 1750 * 1751 * @iommu_mr: the memory region being queried 1752 */ 1753 static inline IOMMUMemoryRegionClass *memory_region_get_iommu_class_nocheck( 1754 IOMMUMemoryRegion *iommu_mr) 1755 { 1756 return (IOMMUMemoryRegionClass *) (((Object *)iommu_mr)->class); 1757 } 1758 1759 #define memory_region_is_iommu(mr) (memory_region_get_iommu(mr) != NULL) 1760 1761 /** 1762 * memory_region_iommu_get_min_page_size: get minimum supported page size 1763 * for an iommu 1764 * 1765 * Returns minimum supported page size for an iommu. 1766 * 1767 * @iommu_mr: the memory region being queried 1768 */ 1769 uint64_t memory_region_iommu_get_min_page_size(IOMMUMemoryRegion *iommu_mr); 1770 1771 /** 1772 * memory_region_notify_iommu: notify a change in an IOMMU translation entry. 1773 * 1774 * Note: for any IOMMU implementation, an in-place mapping change 1775 * should be notified with an UNMAP followed by a MAP. 1776 * 1777 * @iommu_mr: the memory region that was changed 1778 * @iommu_idx: the IOMMU index for the translation table which has changed 1779 * @event: TLB event with the new entry in the IOMMU translation table. 1780 * The entry replaces all old entries for the same virtual I/O address 1781 * range. 1782 */ 1783 void memory_region_notify_iommu(IOMMUMemoryRegion *iommu_mr, 1784 int iommu_idx, 1785 IOMMUTLBEvent event); 1786 1787 /** 1788 * memory_region_notify_iommu_one: notify a change in an IOMMU translation 1789 * entry to a single notifier 1790 * 1791 * This works just like memory_region_notify_iommu(), but it only 1792 * notifies a specific notifier, not all of them. 1793 * 1794 * @notifier: the notifier to be notified 1795 * @event: TLB event with the new entry in the IOMMU translation table. 1796 * The entry replaces all old entries for the same virtual I/O address 1797 * range. 1798 */ 1799 void memory_region_notify_iommu_one(IOMMUNotifier *notifier, 1800 IOMMUTLBEvent *event); 1801 1802 /** 1803 * memory_region_unmap_iommu_notifier_range: notify a unmap for an IOMMU 1804 * translation that covers the 1805 * range of a notifier 1806 * 1807 * @notifier: the notifier to be notified 1808 */ 1809 void memory_region_unmap_iommu_notifier_range(IOMMUNotifier *notifier); 1810 1811 1812 /** 1813 * memory_region_register_iommu_notifier: register a notifier for changes to 1814 * IOMMU translation entries. 1815 * 1816 * Returns 0 on success, or a negative errno otherwise. In particular, 1817 * -EINVAL indicates that at least one of the attributes of the notifier 1818 * is not supported (flag/range) by the IOMMU memory region. In case of error 1819 * the error object must be created. 1820 * 1821 * @mr: the memory region to observe 1822 * @n: the IOMMUNotifier to be added; the notify callback receives a 1823 * pointer to an #IOMMUTLBEntry as the opaque value; the pointer 1824 * ceases to be valid on exit from the notifier. 1825 * @errp: pointer to Error*, to store an error if it happens. 1826 */ 1827 int memory_region_register_iommu_notifier(MemoryRegion *mr, 1828 IOMMUNotifier *n, Error **errp); 1829 1830 /** 1831 * memory_region_iommu_replay: replay existing IOMMU translations to 1832 * a notifier with the minimum page granularity returned by 1833 * mr->iommu_ops->get_page_size(). 1834 * 1835 * Note: this is not related to record-and-replay functionality. 1836 * 1837 * @iommu_mr: the memory region to observe 1838 * @n: the notifier to which to replay iommu mappings 1839 */ 1840 void memory_region_iommu_replay(IOMMUMemoryRegion *iommu_mr, IOMMUNotifier *n); 1841 1842 /** 1843 * memory_region_unregister_iommu_notifier: unregister a notifier for 1844 * changes to IOMMU translation entries. 1845 * 1846 * @mr: the memory region which was observed and for which notity_stopped() 1847 * needs to be called 1848 * @n: the notifier to be removed. 1849 */ 1850 void memory_region_unregister_iommu_notifier(MemoryRegion *mr, 1851 IOMMUNotifier *n); 1852 1853 /** 1854 * memory_region_iommu_get_attr: return an IOMMU attr if get_attr() is 1855 * defined on the IOMMU. 1856 * 1857 * Returns 0 on success, or a negative errno otherwise. In particular, 1858 * -EINVAL indicates that the IOMMU does not support the requested 1859 * attribute. 1860 * 1861 * @iommu_mr: the memory region 1862 * @attr: the requested attribute 1863 * @data: a pointer to the requested attribute data 1864 */ 1865 int memory_region_iommu_get_attr(IOMMUMemoryRegion *iommu_mr, 1866 enum IOMMUMemoryRegionAttr attr, 1867 void *data); 1868 1869 /** 1870 * memory_region_iommu_attrs_to_index: return the IOMMU index to 1871 * use for translations with the given memory transaction attributes. 1872 * 1873 * @iommu_mr: the memory region 1874 * @attrs: the memory transaction attributes 1875 */ 1876 int memory_region_iommu_attrs_to_index(IOMMUMemoryRegion *iommu_mr, 1877 MemTxAttrs attrs); 1878 1879 /** 1880 * memory_region_iommu_num_indexes: return the total number of IOMMU 1881 * indexes that this IOMMU supports. 1882 * 1883 * @iommu_mr: the memory region 1884 */ 1885 int memory_region_iommu_num_indexes(IOMMUMemoryRegion *iommu_mr); 1886 1887 /** 1888 * memory_region_iommu_set_page_size_mask: set the supported page 1889 * sizes for a given IOMMU memory region 1890 * 1891 * @iommu_mr: IOMMU memory region 1892 * @page_size_mask: supported page size mask 1893 * @errp: pointer to Error*, to store an error if it happens. 1894 */ 1895 int memory_region_iommu_set_page_size_mask(IOMMUMemoryRegion *iommu_mr, 1896 uint64_t page_size_mask, 1897 Error **errp); 1898 1899 /** 1900 * memory_region_iommu_set_iova_ranges - Set the usable IOVA ranges 1901 * for a given IOMMU MR region 1902 * 1903 * @iommu: IOMMU memory region 1904 * @iova_ranges: list of ordered IOVA ranges (at least one range) 1905 * @errp: pointer to Error*, to store an error if it happens. 1906 */ 1907 int memory_region_iommu_set_iova_ranges(IOMMUMemoryRegion *iommu, 1908 GList *iova_ranges, 1909 Error **errp); 1910 1911 /** 1912 * memory_region_name: get a memory region's name 1913 * 1914 * Returns the string that was used to initialize the memory region. 1915 * 1916 * @mr: the memory region being queried 1917 */ 1918 const char *memory_region_name(const MemoryRegion *mr); 1919 1920 /** 1921 * memory_region_is_logging: return whether a memory region is logging writes 1922 * 1923 * Returns %true if the memory region is logging writes for the given client 1924 * 1925 * @mr: the memory region being queried 1926 * @client: the client being queried 1927 */ 1928 bool memory_region_is_logging(MemoryRegion *mr, uint8_t client); 1929 1930 /** 1931 * memory_region_get_dirty_log_mask: return the clients for which a 1932 * memory region is logging writes. 1933 * 1934 * Returns a bitmap of clients, in which the DIRTY_MEMORY_* constants 1935 * are the bit indices. 1936 * 1937 * @mr: the memory region being queried 1938 */ 1939 uint8_t memory_region_get_dirty_log_mask(MemoryRegion *mr); 1940 1941 /** 1942 * memory_region_is_rom: check whether a memory region is ROM 1943 * 1944 * Returns %true if a memory region is read-only memory. 1945 * 1946 * @mr: the memory region being queried 1947 */ 1948 static inline bool memory_region_is_rom(MemoryRegion *mr) 1949 { 1950 return mr->ram && mr->readonly; 1951 } 1952 1953 /** 1954 * memory_region_is_nonvolatile: check whether a memory region is non-volatile 1955 * 1956 * Returns %true is a memory region is non-volatile memory. 1957 * 1958 * @mr: the memory region being queried 1959 */ 1960 static inline bool memory_region_is_nonvolatile(MemoryRegion *mr) 1961 { 1962 return mr->nonvolatile; 1963 } 1964 1965 /** 1966 * memory_region_get_fd: Get a file descriptor backing a RAM memory region. 1967 * 1968 * Returns a file descriptor backing a file-based RAM memory region, 1969 * or -1 if the region is not a file-based RAM memory region. 1970 * 1971 * @mr: the RAM or alias memory region being queried. 1972 */ 1973 int memory_region_get_fd(MemoryRegion *mr); 1974 1975 /** 1976 * memory_region_from_host: Convert a pointer into a RAM memory region 1977 * and an offset within it. 1978 * 1979 * Given a host pointer inside a RAM memory region (created with 1980 * memory_region_init_ram() or memory_region_init_ram_ptr()), return 1981 * the MemoryRegion and the offset within it. 1982 * 1983 * Use with care; by the time this function returns, the returned pointer is 1984 * not protected by RCU anymore. If the caller is not within an RCU critical 1985 * section and does not hold the BQL, it must have other means of 1986 * protecting the pointer, such as a reference to the region that includes 1987 * the incoming ram_addr_t. 1988 * 1989 * @ptr: the host pointer to be converted 1990 * @offset: the offset within memory region 1991 */ 1992 MemoryRegion *memory_region_from_host(void *ptr, ram_addr_t *offset); 1993 1994 /** 1995 * memory_region_get_ram_ptr: Get a pointer into a RAM memory region. 1996 * 1997 * Returns a host pointer to a RAM memory region (created with 1998 * memory_region_init_ram() or memory_region_init_ram_ptr()). 1999 * 2000 * Use with care; by the time this function returns, the returned pointer is 2001 * not protected by RCU anymore. If the caller is not within an RCU critical 2002 * section and does not hold the BQL, it must have other means of 2003 * protecting the pointer, such as a reference to the region that includes 2004 * the incoming ram_addr_t. 2005 * 2006 * @mr: the memory region being queried. 2007 */ 2008 void *memory_region_get_ram_ptr(MemoryRegion *mr); 2009 2010 /* memory_region_ram_resize: Resize a RAM region. 2011 * 2012 * Resizing RAM while migrating can result in the migration being canceled. 2013 * Care has to be taken if the guest might have already detected the memory. 2014 * 2015 * @mr: a memory region created with @memory_region_init_resizeable_ram. 2016 * @newsize: the new size the region 2017 * @errp: pointer to Error*, to store an error if it happens. 2018 */ 2019 void memory_region_ram_resize(MemoryRegion *mr, ram_addr_t newsize, 2020 Error **errp); 2021 2022 /** 2023 * memory_region_msync: Synchronize selected address range of 2024 * a memory mapped region 2025 * 2026 * @mr: the memory region to be msync 2027 * @addr: the initial address of the range to be sync 2028 * @size: the size of the range to be sync 2029 */ 2030 void memory_region_msync(MemoryRegion *mr, hwaddr addr, hwaddr size); 2031 2032 /** 2033 * memory_region_writeback: Trigger cache writeback for 2034 * selected address range 2035 * 2036 * @mr: the memory region to be updated 2037 * @addr: the initial address of the range to be written back 2038 * @size: the size of the range to be written back 2039 */ 2040 void memory_region_writeback(MemoryRegion *mr, hwaddr addr, hwaddr size); 2041 2042 /** 2043 * memory_region_set_log: Turn dirty logging on or off for a region. 2044 * 2045 * Turns dirty logging on or off for a specified client (display, migration). 2046 * Only meaningful for RAM regions. 2047 * 2048 * @mr: the memory region being updated. 2049 * @log: whether dirty logging is to be enabled or disabled. 2050 * @client: the user of the logging information; %DIRTY_MEMORY_VGA only. 2051 */ 2052 void memory_region_set_log(MemoryRegion *mr, bool log, unsigned client); 2053 2054 /** 2055 * memory_region_set_dirty: Mark a range of bytes as dirty in a memory region. 2056 * 2057 * Marks a range of bytes as dirty, after it has been dirtied outside 2058 * guest code. 2059 * 2060 * @mr: the memory region being dirtied. 2061 * @addr: the address (relative to the start of the region) being dirtied. 2062 * @size: size of the range being dirtied. 2063 */ 2064 void memory_region_set_dirty(MemoryRegion *mr, hwaddr addr, 2065 hwaddr size); 2066 2067 /** 2068 * memory_region_clear_dirty_bitmap - clear dirty bitmap for memory range 2069 * 2070 * This function is called when the caller wants to clear the remote 2071 * dirty bitmap of a memory range within the memory region. This can 2072 * be used by e.g. KVM to manually clear dirty log when 2073 * KVM_CAP_MANUAL_DIRTY_LOG_PROTECT is declared support by the host 2074 * kernel. 2075 * 2076 * @mr: the memory region to clear the dirty log upon 2077 * @start: start address offset within the memory region 2078 * @len: length of the memory region to clear dirty bitmap 2079 */ 2080 void memory_region_clear_dirty_bitmap(MemoryRegion *mr, hwaddr start, 2081 hwaddr len); 2082 2083 /** 2084 * memory_region_snapshot_and_clear_dirty: Get a snapshot of the dirty 2085 * bitmap and clear it. 2086 * 2087 * Creates a snapshot of the dirty bitmap, clears the dirty bitmap and 2088 * returns the snapshot. The snapshot can then be used to query dirty 2089 * status, using memory_region_snapshot_get_dirty. Snapshotting allows 2090 * querying the same page multiple times, which is especially useful for 2091 * display updates where the scanlines often are not page aligned. 2092 * 2093 * The dirty bitmap region which gets copied into the snapshot (and 2094 * cleared afterwards) can be larger than requested. The boundaries 2095 * are rounded up/down so complete bitmap longs (covering 64 pages on 2096 * 64bit hosts) can be copied over into the bitmap snapshot. Which 2097 * isn't a problem for display updates as the extra pages are outside 2098 * the visible area, and in case the visible area changes a full 2099 * display redraw is due anyway. Should other use cases for this 2100 * function emerge we might have to revisit this implementation 2101 * detail. 2102 * 2103 * Use g_free to release DirtyBitmapSnapshot. 2104 * 2105 * @mr: the memory region being queried. 2106 * @addr: the address (relative to the start of the region) being queried. 2107 * @size: the size of the range being queried. 2108 * @client: the user of the logging information; typically %DIRTY_MEMORY_VGA. 2109 */ 2110 DirtyBitmapSnapshot *memory_region_snapshot_and_clear_dirty(MemoryRegion *mr, 2111 hwaddr addr, 2112 hwaddr size, 2113 unsigned client); 2114 2115 /** 2116 * memory_region_snapshot_get_dirty: Check whether a range of bytes is dirty 2117 * in the specified dirty bitmap snapshot. 2118 * 2119 * @mr: the memory region being queried. 2120 * @snap: the dirty bitmap snapshot 2121 * @addr: the address (relative to the start of the region) being queried. 2122 * @size: the size of the range being queried. 2123 */ 2124 bool memory_region_snapshot_get_dirty(MemoryRegion *mr, 2125 DirtyBitmapSnapshot *snap, 2126 hwaddr addr, hwaddr size); 2127 2128 /** 2129 * memory_region_reset_dirty: Mark a range of pages as clean, for a specified 2130 * client. 2131 * 2132 * Marks a range of pages as no longer dirty. 2133 * 2134 * @mr: the region being updated. 2135 * @addr: the start of the subrange being cleaned. 2136 * @size: the size of the subrange being cleaned. 2137 * @client: the user of the logging information; %DIRTY_MEMORY_MIGRATION or 2138 * %DIRTY_MEMORY_VGA. 2139 */ 2140 void memory_region_reset_dirty(MemoryRegion *mr, hwaddr addr, 2141 hwaddr size, unsigned client); 2142 2143 /** 2144 * memory_region_flush_rom_device: Mark a range of pages dirty and invalidate 2145 * TBs (for self-modifying code). 2146 * 2147 * The MemoryRegionOps->write() callback of a ROM device must use this function 2148 * to mark byte ranges that have been modified internally, such as by directly 2149 * accessing the memory returned by memory_region_get_ram_ptr(). 2150 * 2151 * This function marks the range dirty and invalidates TBs so that TCG can 2152 * detect self-modifying code. 2153 * 2154 * @mr: the region being flushed. 2155 * @addr: the start, relative to the start of the region, of the range being 2156 * flushed. 2157 * @size: the size, in bytes, of the range being flushed. 2158 */ 2159 void memory_region_flush_rom_device(MemoryRegion *mr, hwaddr addr, hwaddr size); 2160 2161 /** 2162 * memory_region_set_readonly: Turn a memory region read-only (or read-write) 2163 * 2164 * Allows a memory region to be marked as read-only (turning it into a ROM). 2165 * only useful on RAM regions. 2166 * 2167 * @mr: the region being updated. 2168 * @readonly: whether rhe region is to be ROM or RAM. 2169 */ 2170 void memory_region_set_readonly(MemoryRegion *mr, bool readonly); 2171 2172 /** 2173 * memory_region_set_nonvolatile: Turn a memory region non-volatile 2174 * 2175 * Allows a memory region to be marked as non-volatile. 2176 * only useful on RAM regions. 2177 * 2178 * @mr: the region being updated. 2179 * @nonvolatile: whether rhe region is to be non-volatile. 2180 */ 2181 void memory_region_set_nonvolatile(MemoryRegion *mr, bool nonvolatile); 2182 2183 /** 2184 * memory_region_rom_device_set_romd: enable/disable ROMD mode 2185 * 2186 * Allows a ROM device (initialized with memory_region_init_rom_device() to 2187 * set to ROMD mode (default) or MMIO mode. When it is in ROMD mode, the 2188 * device is mapped to guest memory and satisfies read access directly. 2189 * When in MMIO mode, reads are forwarded to the #MemoryRegion.read function. 2190 * Writes are always handled by the #MemoryRegion.write function. 2191 * 2192 * @mr: the memory region to be updated 2193 * @romd_mode: %true to put the region into ROMD mode 2194 */ 2195 void memory_region_rom_device_set_romd(MemoryRegion *mr, bool romd_mode); 2196 2197 /** 2198 * memory_region_set_coalescing: Enable memory coalescing for the region. 2199 * 2200 * Enabled writes to a region to be queued for later processing. MMIO ->write 2201 * callbacks may be delayed until a non-coalesced MMIO is issued. 2202 * Only useful for IO regions. Roughly similar to write-combining hardware. 2203 * 2204 * @mr: the memory region to be write coalesced 2205 */ 2206 void memory_region_set_coalescing(MemoryRegion *mr); 2207 2208 /** 2209 * memory_region_add_coalescing: Enable memory coalescing for a sub-range of 2210 * a region. 2211 * 2212 * Like memory_region_set_coalescing(), but works on a sub-range of a region. 2213 * Multiple calls can be issued coalesced disjoint ranges. 2214 * 2215 * @mr: the memory region to be updated. 2216 * @offset: the start of the range within the region to be coalesced. 2217 * @size: the size of the subrange to be coalesced. 2218 */ 2219 void memory_region_add_coalescing(MemoryRegion *mr, 2220 hwaddr offset, 2221 uint64_t size); 2222 2223 /** 2224 * memory_region_clear_coalescing: Disable MMIO coalescing for the region. 2225 * 2226 * Disables any coalescing caused by memory_region_set_coalescing() or 2227 * memory_region_add_coalescing(). Roughly equivalent to uncacheble memory 2228 * hardware. 2229 * 2230 * @mr: the memory region to be updated. 2231 */ 2232 void memory_region_clear_coalescing(MemoryRegion *mr); 2233 2234 /** 2235 * memory_region_set_flush_coalesced: Enforce memory coalescing flush before 2236 * accesses. 2237 * 2238 * Ensure that pending coalesced MMIO request are flushed before the memory 2239 * region is accessed. This property is automatically enabled for all regions 2240 * passed to memory_region_set_coalescing() and memory_region_add_coalescing(). 2241 * 2242 * @mr: the memory region to be updated. 2243 */ 2244 void memory_region_set_flush_coalesced(MemoryRegion *mr); 2245 2246 /** 2247 * memory_region_clear_flush_coalesced: Disable memory coalescing flush before 2248 * accesses. 2249 * 2250 * Clear the automatic coalesced MMIO flushing enabled via 2251 * memory_region_set_flush_coalesced. Note that this service has no effect on 2252 * memory regions that have MMIO coalescing enabled for themselves. For them, 2253 * automatic flushing will stop once coalescing is disabled. 2254 * 2255 * @mr: the memory region to be updated. 2256 */ 2257 void memory_region_clear_flush_coalesced(MemoryRegion *mr); 2258 2259 /** 2260 * memory_region_add_eventfd: Request an eventfd to be triggered when a word 2261 * is written to a location. 2262 * 2263 * Marks a word in an IO region (initialized with memory_region_init_io()) 2264 * as a trigger for an eventfd event. The I/O callback will not be called. 2265 * The caller must be prepared to handle failure (that is, take the required 2266 * action if the callback _is_ called). 2267 * 2268 * @mr: the memory region being updated. 2269 * @addr: the address within @mr that is to be monitored 2270 * @size: the size of the access to trigger the eventfd 2271 * @match_data: whether to match against @data, instead of just @addr 2272 * @data: the data to match against the guest write 2273 * @e: event notifier to be triggered when @addr, @size, and @data all match. 2274 **/ 2275 void memory_region_add_eventfd(MemoryRegion *mr, 2276 hwaddr addr, 2277 unsigned size, 2278 bool match_data, 2279 uint64_t data, 2280 EventNotifier *e); 2281 2282 /** 2283 * memory_region_del_eventfd: Cancel an eventfd. 2284 * 2285 * Cancels an eventfd trigger requested by a previous 2286 * memory_region_add_eventfd() call. 2287 * 2288 * @mr: the memory region being updated. 2289 * @addr: the address within @mr that is to be monitored 2290 * @size: the size of the access to trigger the eventfd 2291 * @match_data: whether to match against @data, instead of just @addr 2292 * @data: the data to match against the guest write 2293 * @e: event notifier to be triggered when @addr, @size, and @data all match. 2294 */ 2295 void memory_region_del_eventfd(MemoryRegion *mr, 2296 hwaddr addr, 2297 unsigned size, 2298 bool match_data, 2299 uint64_t data, 2300 EventNotifier *e); 2301 2302 /** 2303 * memory_region_add_subregion: Add a subregion to a container. 2304 * 2305 * Adds a subregion at @offset. The subregion may not overlap with other 2306 * subregions (except for those explicitly marked as overlapping). A region 2307 * may only be added once as a subregion (unless removed with 2308 * memory_region_del_subregion()); use memory_region_init_alias() if you 2309 * want a region to be a subregion in multiple locations. 2310 * 2311 * @mr: the region to contain the new subregion; must be a container 2312 * initialized with memory_region_init(). 2313 * @offset: the offset relative to @mr where @subregion is added. 2314 * @subregion: the subregion to be added. 2315 */ 2316 void memory_region_add_subregion(MemoryRegion *mr, 2317 hwaddr offset, 2318 MemoryRegion *subregion); 2319 /** 2320 * memory_region_add_subregion_overlap: Add a subregion to a container 2321 * with overlap. 2322 * 2323 * Adds a subregion at @offset. The subregion may overlap with other 2324 * subregions. Conflicts are resolved by having a higher @priority hide a 2325 * lower @priority. Subregions without priority are taken as @priority 0. 2326 * A region may only be added once as a subregion (unless removed with 2327 * memory_region_del_subregion()); use memory_region_init_alias() if you 2328 * want a region to be a subregion in multiple locations. 2329 * 2330 * @mr: the region to contain the new subregion; must be a container 2331 * initialized with memory_region_init(). 2332 * @offset: the offset relative to @mr where @subregion is added. 2333 * @subregion: the subregion to be added. 2334 * @priority: used for resolving overlaps; highest priority wins. 2335 */ 2336 void memory_region_add_subregion_overlap(MemoryRegion *mr, 2337 hwaddr offset, 2338 MemoryRegion *subregion, 2339 int priority); 2340 2341 /** 2342 * memory_region_get_ram_addr: Get the ram address associated with a memory 2343 * region 2344 * 2345 * @mr: the region to be queried 2346 */ 2347 ram_addr_t memory_region_get_ram_addr(MemoryRegion *mr); 2348 2349 uint64_t memory_region_get_alignment(const MemoryRegion *mr); 2350 /** 2351 * memory_region_del_subregion: Remove a subregion. 2352 * 2353 * Removes a subregion from its container. 2354 * 2355 * @mr: the container to be updated. 2356 * @subregion: the region being removed; must be a current subregion of @mr. 2357 */ 2358 void memory_region_del_subregion(MemoryRegion *mr, 2359 MemoryRegion *subregion); 2360 2361 /* 2362 * memory_region_set_enabled: dynamically enable or disable a region 2363 * 2364 * Enables or disables a memory region. A disabled memory region 2365 * ignores all accesses to itself and its subregions. It does not 2366 * obscure sibling subregions with lower priority - it simply behaves as 2367 * if it was removed from the hierarchy. 2368 * 2369 * Regions default to being enabled. 2370 * 2371 * @mr: the region to be updated 2372 * @enabled: whether to enable or disable the region 2373 */ 2374 void memory_region_set_enabled(MemoryRegion *mr, bool enabled); 2375 2376 /* 2377 * memory_region_set_address: dynamically update the address of a region 2378 * 2379 * Dynamically updates the address of a region, relative to its container. 2380 * May be used on regions are currently part of a memory hierarchy. 2381 * 2382 * @mr: the region to be updated 2383 * @addr: new address, relative to container region 2384 */ 2385 void memory_region_set_address(MemoryRegion *mr, hwaddr addr); 2386 2387 /* 2388 * memory_region_set_size: dynamically update the size of a region. 2389 * 2390 * Dynamically updates the size of a region. 2391 * 2392 * @mr: the region to be updated 2393 * @size: used size of the region. 2394 */ 2395 void memory_region_set_size(MemoryRegion *mr, uint64_t size); 2396 2397 /* 2398 * memory_region_set_alias_offset: dynamically update a memory alias's offset 2399 * 2400 * Dynamically updates the offset into the target region that an alias points 2401 * to, as if the fourth argument to memory_region_init_alias() has changed. 2402 * 2403 * @mr: the #MemoryRegion to be updated; should be an alias. 2404 * @offset: the new offset into the target memory region 2405 */ 2406 void memory_region_set_alias_offset(MemoryRegion *mr, 2407 hwaddr offset); 2408 2409 /* 2410 * memory_region_set_unmergeable: Set a memory region unmergeable 2411 * 2412 * Mark a memory region unmergeable, resulting in the memory region (or 2413 * everything contained in a memory region container) not getting merged when 2414 * simplifying the address space and notifying memory listeners. Consequently, 2415 * memory listeners will never get notified about ranges that are larger than 2416 * the original memory regions. 2417 * 2418 * This is primarily useful when multiple aliases to a RAM memory region are 2419 * mapped into a memory region container, and updates (e.g., enable/disable or 2420 * map/unmap) of individual memory region aliases are not supposed to affect 2421 * other memory regions in the same container. 2422 * 2423 * @mr: the #MemoryRegion to be updated 2424 * @unmergeable: whether to mark the #MemoryRegion unmergeable 2425 */ 2426 void memory_region_set_unmergeable(MemoryRegion *mr, bool unmergeable); 2427 2428 /** 2429 * memory_region_present: checks if an address relative to a @container 2430 * translates into #MemoryRegion within @container 2431 * 2432 * Answer whether a #MemoryRegion within @container covers the address 2433 * @addr. 2434 * 2435 * @container: a #MemoryRegion within which @addr is a relative address 2436 * @addr: the area within @container to be searched 2437 */ 2438 bool memory_region_present(MemoryRegion *container, hwaddr addr); 2439 2440 /** 2441 * memory_region_is_mapped: returns true if #MemoryRegion is mapped 2442 * into another memory region, which does not necessarily imply that it is 2443 * mapped into an address space. 2444 * 2445 * @mr: a #MemoryRegion which should be checked if it's mapped 2446 */ 2447 bool memory_region_is_mapped(MemoryRegion *mr); 2448 2449 /** 2450 * memory_region_get_ram_discard_manager: get the #RamDiscardManager for a 2451 * #MemoryRegion 2452 * 2453 * The #RamDiscardManager cannot change while a memory region is mapped. 2454 * 2455 * @mr: the #MemoryRegion 2456 */ 2457 RamDiscardManager *memory_region_get_ram_discard_manager(MemoryRegion *mr); 2458 2459 /** 2460 * memory_region_has_ram_discard_manager: check whether a #MemoryRegion has a 2461 * #RamDiscardManager assigned 2462 * 2463 * @mr: the #MemoryRegion 2464 */ 2465 static inline bool memory_region_has_ram_discard_manager(MemoryRegion *mr) 2466 { 2467 return !!memory_region_get_ram_discard_manager(mr); 2468 } 2469 2470 /** 2471 * memory_region_set_ram_discard_manager: set the #RamDiscardManager for a 2472 * #MemoryRegion 2473 * 2474 * This function must not be called for a mapped #MemoryRegion, a #MemoryRegion 2475 * that does not cover RAM, or a #MemoryRegion that already has a 2476 * #RamDiscardManager assigned. 2477 * 2478 * @mr: the #MemoryRegion 2479 * @rdm: #RamDiscardManager to set 2480 */ 2481 void memory_region_set_ram_discard_manager(MemoryRegion *mr, 2482 RamDiscardManager *rdm); 2483 2484 /** 2485 * memory_region_find: translate an address/size relative to a 2486 * MemoryRegion into a #MemoryRegionSection. 2487 * 2488 * Locates the first #MemoryRegion within @mr that overlaps the range 2489 * given by @addr and @size. 2490 * 2491 * Returns a #MemoryRegionSection that describes a contiguous overlap. 2492 * It will have the following characteristics: 2493 * - @size = 0 iff no overlap was found 2494 * - @mr is non-%NULL iff an overlap was found 2495 * 2496 * Remember that in the return value the @offset_within_region is 2497 * relative to the returned region (in the .@mr field), not to the 2498 * @mr argument. 2499 * 2500 * Similarly, the .@offset_within_address_space is relative to the 2501 * address space that contains both regions, the passed and the 2502 * returned one. However, in the special case where the @mr argument 2503 * has no container (and thus is the root of the address space), the 2504 * following will hold: 2505 * - @offset_within_address_space >= @addr 2506 * - @offset_within_address_space + .@size <= @addr + @size 2507 * 2508 * @mr: a MemoryRegion within which @addr is a relative address 2509 * @addr: start of the area within @as to be searched 2510 * @size: size of the area to be searched 2511 */ 2512 MemoryRegionSection memory_region_find(MemoryRegion *mr, 2513 hwaddr addr, uint64_t size); 2514 2515 /** 2516 * memory_global_dirty_log_sync: synchronize the dirty log for all memory 2517 * 2518 * Synchronizes the dirty page log for all address spaces. 2519 * 2520 * @last_stage: whether this is the last stage of live migration 2521 */ 2522 void memory_global_dirty_log_sync(bool last_stage); 2523 2524 /** 2525 * memory_global_dirty_log_sync: synchronize the dirty log for all memory 2526 * 2527 * Synchronizes the vCPUs with a thread that is reading the dirty bitmap. 2528 * This function must be called after the dirty log bitmap is cleared, and 2529 * before dirty guest memory pages are read. If you are using 2530 * #DirtyBitmapSnapshot, memory_region_snapshot_and_clear_dirty() takes 2531 * care of doing this. 2532 */ 2533 void memory_global_after_dirty_log_sync(void); 2534 2535 /** 2536 * memory_region_transaction_begin: Start a transaction. 2537 * 2538 * During a transaction, changes will be accumulated and made visible 2539 * only when the transaction ends (is committed). 2540 */ 2541 void memory_region_transaction_begin(void); 2542 2543 /** 2544 * memory_region_transaction_commit: Commit a transaction and make changes 2545 * visible to the guest. 2546 */ 2547 void memory_region_transaction_commit(void); 2548 2549 /** 2550 * memory_listener_register: register callbacks to be called when memory 2551 * sections are mapped or unmapped into an address 2552 * space 2553 * 2554 * @listener: an object containing the callbacks to be called 2555 * @filter: if non-%NULL, only regions in this address space will be observed 2556 */ 2557 void memory_listener_register(MemoryListener *listener, AddressSpace *filter); 2558 2559 /** 2560 * memory_listener_unregister: undo the effect of memory_listener_register() 2561 * 2562 * @listener: an object containing the callbacks to be removed 2563 */ 2564 void memory_listener_unregister(MemoryListener *listener); 2565 2566 /** 2567 * memory_global_dirty_log_start: begin dirty logging for all regions 2568 * 2569 * @flags: purpose of starting dirty log, migration or dirty rate 2570 */ 2571 void memory_global_dirty_log_start(unsigned int flags); 2572 2573 /** 2574 * memory_global_dirty_log_stop: end dirty logging for all regions 2575 * 2576 * @flags: purpose of stopping dirty log, migration or dirty rate 2577 */ 2578 void memory_global_dirty_log_stop(unsigned int flags); 2579 2580 void mtree_info(bool flatview, bool dispatch_tree, bool owner, bool disabled); 2581 2582 bool memory_region_access_valid(MemoryRegion *mr, hwaddr addr, 2583 unsigned size, bool is_write, 2584 MemTxAttrs attrs); 2585 2586 /** 2587 * memory_region_dispatch_read: perform a read directly to the specified 2588 * MemoryRegion. 2589 * 2590 * @mr: #MemoryRegion to access 2591 * @addr: address within that region 2592 * @pval: pointer to uint64_t which the data is written to 2593 * @op: size, sign, and endianness of the memory operation 2594 * @attrs: memory transaction attributes to use for the access 2595 */ 2596 MemTxResult memory_region_dispatch_read(MemoryRegion *mr, 2597 hwaddr addr, 2598 uint64_t *pval, 2599 MemOp op, 2600 MemTxAttrs attrs); 2601 /** 2602 * memory_region_dispatch_write: perform a write directly to the specified 2603 * MemoryRegion. 2604 * 2605 * @mr: #MemoryRegion to access 2606 * @addr: address within that region 2607 * @data: data to write 2608 * @op: size, sign, and endianness of the memory operation 2609 * @attrs: memory transaction attributes to use for the access 2610 */ 2611 MemTxResult memory_region_dispatch_write(MemoryRegion *mr, 2612 hwaddr addr, 2613 uint64_t data, 2614 MemOp op, 2615 MemTxAttrs attrs); 2616 2617 /** 2618 * address_space_init: initializes an address space 2619 * 2620 * @as: an uninitialized #AddressSpace 2621 * @root: a #MemoryRegion that routes addresses for the address space 2622 * @name: an address space name. The name is only used for debugging 2623 * output. 2624 */ 2625 void address_space_init(AddressSpace *as, MemoryRegion *root, const char *name); 2626 2627 /** 2628 * address_space_destroy: destroy an address space 2629 * 2630 * Releases all resources associated with an address space. After an address space 2631 * is destroyed, its root memory region (given by address_space_init()) may be destroyed 2632 * as well. 2633 * 2634 * @as: address space to be destroyed 2635 */ 2636 void address_space_destroy(AddressSpace *as); 2637 2638 /** 2639 * address_space_remove_listeners: unregister all listeners of an address space 2640 * 2641 * Removes all callbacks previously registered with memory_listener_register() 2642 * for @as. 2643 * 2644 * @as: an initialized #AddressSpace 2645 */ 2646 void address_space_remove_listeners(AddressSpace *as); 2647 2648 /** 2649 * address_space_rw: read from or write to an address space. 2650 * 2651 * Return a MemTxResult indicating whether the operation succeeded 2652 * or failed (eg unassigned memory, device rejected the transaction, 2653 * IOMMU fault). 2654 * 2655 * @as: #AddressSpace to be accessed 2656 * @addr: address within that address space 2657 * @attrs: memory transaction attributes 2658 * @buf: buffer with the data transferred 2659 * @len: the number of bytes to read or write 2660 * @is_write: indicates the transfer direction 2661 */ 2662 MemTxResult address_space_rw(AddressSpace *as, hwaddr addr, 2663 MemTxAttrs attrs, void *buf, 2664 hwaddr len, bool is_write); 2665 2666 /** 2667 * address_space_write: write to address space. 2668 * 2669 * Return a MemTxResult indicating whether the operation succeeded 2670 * or failed (eg unassigned memory, device rejected the transaction, 2671 * IOMMU fault). 2672 * 2673 * @as: #AddressSpace to be accessed 2674 * @addr: address within that address space 2675 * @attrs: memory transaction attributes 2676 * @buf: buffer with the data transferred 2677 * @len: the number of bytes to write 2678 */ 2679 MemTxResult address_space_write(AddressSpace *as, hwaddr addr, 2680 MemTxAttrs attrs, 2681 const void *buf, hwaddr len); 2682 2683 /** 2684 * address_space_write_rom: write to address space, including ROM. 2685 * 2686 * This function writes to the specified address space, but will 2687 * write data to both ROM and RAM. This is used for non-guest 2688 * writes like writes from the gdb debug stub or initial loading 2689 * of ROM contents. 2690 * 2691 * Note that portions of the write which attempt to write data to 2692 * a device will be silently ignored -- only real RAM and ROM will 2693 * be written to. 2694 * 2695 * Return a MemTxResult indicating whether the operation succeeded 2696 * or failed (eg unassigned memory, device rejected the transaction, 2697 * IOMMU fault). 2698 * 2699 * @as: #AddressSpace to be accessed 2700 * @addr: address within that address space 2701 * @attrs: memory transaction attributes 2702 * @buf: buffer with the data transferred 2703 * @len: the number of bytes to write 2704 */ 2705 MemTxResult address_space_write_rom(AddressSpace *as, hwaddr addr, 2706 MemTxAttrs attrs, 2707 const void *buf, hwaddr len); 2708 2709 /* address_space_ld*: load from an address space 2710 * address_space_st*: store to an address space 2711 * 2712 * These functions perform a load or store of the byte, word, 2713 * longword or quad to the specified address within the AddressSpace. 2714 * The _le suffixed functions treat the data as little endian; 2715 * _be indicates big endian; no suffix indicates "same endianness 2716 * as guest CPU". 2717 * 2718 * The "guest CPU endianness" accessors are deprecated for use outside 2719 * target-* code; devices should be CPU-agnostic and use either the LE 2720 * or the BE accessors. 2721 * 2722 * @as #AddressSpace to be accessed 2723 * @addr: address within that address space 2724 * @val: data value, for stores 2725 * @attrs: memory transaction attributes 2726 * @result: location to write the success/failure of the transaction; 2727 * if NULL, this information is discarded 2728 */ 2729 2730 #define SUFFIX 2731 #define ARG1 as 2732 #define ARG1_DECL AddressSpace *as 2733 #include "exec/memory_ldst.h.inc" 2734 2735 #define SUFFIX 2736 #define ARG1 as 2737 #define ARG1_DECL AddressSpace *as 2738 #include "exec/memory_ldst_phys.h.inc" 2739 2740 struct MemoryRegionCache { 2741 void *ptr; 2742 hwaddr xlat; 2743 hwaddr len; 2744 FlatView *fv; 2745 MemoryRegionSection mrs; 2746 bool is_write; 2747 }; 2748 2749 /* address_space_ld*_cached: load from a cached #MemoryRegion 2750 * address_space_st*_cached: store into a cached #MemoryRegion 2751 * 2752 * These functions perform a load or store of the byte, word, 2753 * longword or quad to the specified address. The address is 2754 * a physical address in the AddressSpace, but it must lie within 2755 * a #MemoryRegion that was mapped with address_space_cache_init. 2756 * 2757 * The _le suffixed functions treat the data as little endian; 2758 * _be indicates big endian; no suffix indicates "same endianness 2759 * as guest CPU". 2760 * 2761 * The "guest CPU endianness" accessors are deprecated for use outside 2762 * target-* code; devices should be CPU-agnostic and use either the LE 2763 * or the BE accessors. 2764 * 2765 * @cache: previously initialized #MemoryRegionCache to be accessed 2766 * @addr: address within the address space 2767 * @val: data value, for stores 2768 * @attrs: memory transaction attributes 2769 * @result: location to write the success/failure of the transaction; 2770 * if NULL, this information is discarded 2771 */ 2772 2773 #define SUFFIX _cached_slow 2774 #define ARG1 cache 2775 #define ARG1_DECL MemoryRegionCache *cache 2776 #include "exec/memory_ldst.h.inc" 2777 2778 /* Inline fast path for direct RAM access. */ 2779 static inline uint8_t address_space_ldub_cached(MemoryRegionCache *cache, 2780 hwaddr addr, MemTxAttrs attrs, MemTxResult *result) 2781 { 2782 assert(addr < cache->len); 2783 if (likely(cache->ptr)) { 2784 return ldub_p(cache->ptr + addr); 2785 } else { 2786 return address_space_ldub_cached_slow(cache, addr, attrs, result); 2787 } 2788 } 2789 2790 static inline void address_space_stb_cached(MemoryRegionCache *cache, 2791 hwaddr addr, uint8_t val, MemTxAttrs attrs, MemTxResult *result) 2792 { 2793 assert(addr < cache->len); 2794 if (likely(cache->ptr)) { 2795 stb_p(cache->ptr + addr, val); 2796 } else { 2797 address_space_stb_cached_slow(cache, addr, val, attrs, result); 2798 } 2799 } 2800 2801 #define ENDIANNESS _le 2802 #include "exec/memory_ldst_cached.h.inc" 2803 2804 #define ENDIANNESS _be 2805 #include "exec/memory_ldst_cached.h.inc" 2806 2807 #define SUFFIX _cached 2808 #define ARG1 cache 2809 #define ARG1_DECL MemoryRegionCache *cache 2810 #include "exec/memory_ldst_phys.h.inc" 2811 2812 /* address_space_cache_init: prepare for repeated access to a physical 2813 * memory region 2814 * 2815 * @cache: #MemoryRegionCache to be filled 2816 * @as: #AddressSpace to be accessed 2817 * @addr: address within that address space 2818 * @len: length of buffer 2819 * @is_write: indicates the transfer direction 2820 * 2821 * Will only work with RAM, and may map a subset of the requested range by 2822 * returning a value that is less than @len. On failure, return a negative 2823 * errno value. 2824 * 2825 * Because it only works with RAM, this function can be used for 2826 * read-modify-write operations. In this case, is_write should be %true. 2827 * 2828 * Note that addresses passed to the address_space_*_cached functions 2829 * are relative to @addr. 2830 */ 2831 int64_t address_space_cache_init(MemoryRegionCache *cache, 2832 AddressSpace *as, 2833 hwaddr addr, 2834 hwaddr len, 2835 bool is_write); 2836 2837 /** 2838 * address_space_cache_init_empty: Initialize empty #MemoryRegionCache 2839 * 2840 * @cache: The #MemoryRegionCache to operate on. 2841 * 2842 * Initializes #MemoryRegionCache structure without memory region attached. 2843 * Cache initialized this way can only be safely destroyed, but not used. 2844 */ 2845 static inline void address_space_cache_init_empty(MemoryRegionCache *cache) 2846 { 2847 cache->mrs.mr = NULL; 2848 /* There is no real need to initialize fv, but it makes Coverity happy. */ 2849 cache->fv = NULL; 2850 } 2851 2852 /** 2853 * address_space_cache_invalidate: complete a write to a #MemoryRegionCache 2854 * 2855 * @cache: The #MemoryRegionCache to operate on. 2856 * @addr: The first physical address that was written, relative to the 2857 * address that was passed to @address_space_cache_init. 2858 * @access_len: The number of bytes that were written starting at @addr. 2859 */ 2860 void address_space_cache_invalidate(MemoryRegionCache *cache, 2861 hwaddr addr, 2862 hwaddr access_len); 2863 2864 /** 2865 * address_space_cache_destroy: free a #MemoryRegionCache 2866 * 2867 * @cache: The #MemoryRegionCache whose memory should be released. 2868 */ 2869 void address_space_cache_destroy(MemoryRegionCache *cache); 2870 2871 /* address_space_get_iotlb_entry: translate an address into an IOTLB 2872 * entry. Should be called from an RCU critical section. 2873 */ 2874 IOMMUTLBEntry address_space_get_iotlb_entry(AddressSpace *as, hwaddr addr, 2875 bool is_write, MemTxAttrs attrs); 2876 2877 /* address_space_translate: translate an address range into an address space 2878 * into a MemoryRegion and an address range into that section. Should be 2879 * called from an RCU critical section, to avoid that the last reference 2880 * to the returned region disappears after address_space_translate returns. 2881 * 2882 * @fv: #FlatView to be accessed 2883 * @addr: address within that address space 2884 * @xlat: pointer to address within the returned memory region section's 2885 * #MemoryRegion. 2886 * @len: pointer to length 2887 * @is_write: indicates the transfer direction 2888 * @attrs: memory attributes 2889 */ 2890 MemoryRegion *flatview_translate(FlatView *fv, 2891 hwaddr addr, hwaddr *xlat, 2892 hwaddr *len, bool is_write, 2893 MemTxAttrs attrs); 2894 2895 static inline MemoryRegion *address_space_translate(AddressSpace *as, 2896 hwaddr addr, hwaddr *xlat, 2897 hwaddr *len, bool is_write, 2898 MemTxAttrs attrs) 2899 { 2900 return flatview_translate(address_space_to_flatview(as), 2901 addr, xlat, len, is_write, attrs); 2902 } 2903 2904 /* address_space_access_valid: check for validity of accessing an address 2905 * space range 2906 * 2907 * Check whether memory is assigned to the given address space range, and 2908 * access is permitted by any IOMMU regions that are active for the address 2909 * space. 2910 * 2911 * For now, addr and len should be aligned to a page size. This limitation 2912 * will be lifted in the future. 2913 * 2914 * @as: #AddressSpace to be accessed 2915 * @addr: address within that address space 2916 * @len: length of the area to be checked 2917 * @is_write: indicates the transfer direction 2918 * @attrs: memory attributes 2919 */ 2920 bool address_space_access_valid(AddressSpace *as, hwaddr addr, hwaddr len, 2921 bool is_write, MemTxAttrs attrs); 2922 2923 /* address_space_map: map a physical memory region into a host virtual address 2924 * 2925 * May map a subset of the requested range, given by and returned in @plen. 2926 * May return %NULL and set *@plen to zero(0), if resources needed to perform 2927 * the mapping are exhausted. 2928 * Use only for reads OR writes - not for read-modify-write operations. 2929 * Use cpu_register_map_client() to know when retrying the map operation is 2930 * likely to succeed. 2931 * 2932 * @as: #AddressSpace to be accessed 2933 * @addr: address within that address space 2934 * @plen: pointer to length of buffer; updated on return 2935 * @is_write: indicates the transfer direction 2936 * @attrs: memory attributes 2937 */ 2938 void *address_space_map(AddressSpace *as, hwaddr addr, 2939 hwaddr *plen, bool is_write, MemTxAttrs attrs); 2940 2941 /* address_space_unmap: Unmaps a memory region previously mapped by address_space_map() 2942 * 2943 * Will also mark the memory as dirty if @is_write == %true. @access_len gives 2944 * the amount of memory that was actually read or written by the caller. 2945 * 2946 * @as: #AddressSpace used 2947 * @buffer: host pointer as returned by address_space_map() 2948 * @len: buffer length as returned by address_space_map() 2949 * @access_len: amount of data actually transferred 2950 * @is_write: indicates the transfer direction 2951 */ 2952 void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len, 2953 bool is_write, hwaddr access_len); 2954 2955 2956 /* Internal functions, part of the implementation of address_space_read. */ 2957 MemTxResult address_space_read_full(AddressSpace *as, hwaddr addr, 2958 MemTxAttrs attrs, void *buf, hwaddr len); 2959 MemTxResult flatview_read_continue(FlatView *fv, hwaddr addr, 2960 MemTxAttrs attrs, void *buf, 2961 hwaddr len, hwaddr addr1, hwaddr l, 2962 MemoryRegion *mr); 2963 void *qemu_map_ram_ptr(RAMBlock *ram_block, ram_addr_t addr); 2964 2965 /* Internal functions, part of the implementation of address_space_read_cached 2966 * and address_space_write_cached. */ 2967 MemTxResult address_space_read_cached_slow(MemoryRegionCache *cache, 2968 hwaddr addr, void *buf, hwaddr len); 2969 MemTxResult address_space_write_cached_slow(MemoryRegionCache *cache, 2970 hwaddr addr, const void *buf, 2971 hwaddr len); 2972 2973 int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr); 2974 bool prepare_mmio_access(MemoryRegion *mr); 2975 2976 static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write) 2977 { 2978 if (is_write) { 2979 return memory_region_is_ram(mr) && !mr->readonly && 2980 !mr->rom_device && !memory_region_is_ram_device(mr); 2981 } else { 2982 return (memory_region_is_ram(mr) && !memory_region_is_ram_device(mr)) || 2983 memory_region_is_romd(mr); 2984 } 2985 } 2986 2987 /** 2988 * address_space_read: read from an address space. 2989 * 2990 * Return a MemTxResult indicating whether the operation succeeded 2991 * or failed (eg unassigned memory, device rejected the transaction, 2992 * IOMMU fault). Called within RCU critical section. 2993 * 2994 * @as: #AddressSpace to be accessed 2995 * @addr: address within that address space 2996 * @attrs: memory transaction attributes 2997 * @buf: buffer with the data transferred 2998 * @len: length of the data transferred 2999 */ 3000 static inline __attribute__((__always_inline__)) 3001 MemTxResult address_space_read(AddressSpace *as, hwaddr addr, 3002 MemTxAttrs attrs, void *buf, 3003 hwaddr len) 3004 { 3005 MemTxResult result = MEMTX_OK; 3006 hwaddr l, addr1; 3007 void *ptr; 3008 MemoryRegion *mr; 3009 FlatView *fv; 3010 3011 if (__builtin_constant_p(len)) { 3012 if (len) { 3013 RCU_READ_LOCK_GUARD(); 3014 fv = address_space_to_flatview(as); 3015 l = len; 3016 mr = flatview_translate(fv, addr, &addr1, &l, false, attrs); 3017 if (len == l && memory_access_is_direct(mr, false)) { 3018 ptr = qemu_map_ram_ptr(mr->ram_block, addr1); 3019 memcpy(buf, ptr, len); 3020 } else { 3021 result = flatview_read_continue(fv, addr, attrs, buf, len, 3022 addr1, l, mr); 3023 } 3024 } 3025 } else { 3026 result = address_space_read_full(as, addr, attrs, buf, len); 3027 } 3028 return result; 3029 } 3030 3031 /** 3032 * address_space_read_cached: read from a cached RAM region 3033 * 3034 * @cache: Cached region to be addressed 3035 * @addr: address relative to the base of the RAM region 3036 * @buf: buffer with the data transferred 3037 * @len: length of the data transferred 3038 */ 3039 static inline MemTxResult 3040 address_space_read_cached(MemoryRegionCache *cache, hwaddr addr, 3041 void *buf, hwaddr len) 3042 { 3043 assert(addr < cache->len && len <= cache->len - addr); 3044 fuzz_dma_read_cb(cache->xlat + addr, len, cache->mrs.mr); 3045 if (likely(cache->ptr)) { 3046 memcpy(buf, cache->ptr + addr, len); 3047 return MEMTX_OK; 3048 } else { 3049 return address_space_read_cached_slow(cache, addr, buf, len); 3050 } 3051 } 3052 3053 /** 3054 * address_space_write_cached: write to a cached RAM region 3055 * 3056 * @cache: Cached region to be addressed 3057 * @addr: address relative to the base of the RAM region 3058 * @buf: buffer with the data transferred 3059 * @len: length of the data transferred 3060 */ 3061 static inline MemTxResult 3062 address_space_write_cached(MemoryRegionCache *cache, hwaddr addr, 3063 const void *buf, hwaddr len) 3064 { 3065 assert(addr < cache->len && len <= cache->len - addr); 3066 if (likely(cache->ptr)) { 3067 memcpy(cache->ptr + addr, buf, len); 3068 return MEMTX_OK; 3069 } else { 3070 return address_space_write_cached_slow(cache, addr, buf, len); 3071 } 3072 } 3073 3074 /** 3075 * address_space_set: Fill address space with a constant byte. 3076 * 3077 * Return a MemTxResult indicating whether the operation succeeded 3078 * or failed (eg unassigned memory, device rejected the transaction, 3079 * IOMMU fault). 3080 * 3081 * @as: #AddressSpace to be accessed 3082 * @addr: address within that address space 3083 * @c: constant byte to fill the memory 3084 * @len: the number of bytes to fill with the constant byte 3085 * @attrs: memory transaction attributes 3086 */ 3087 MemTxResult address_space_set(AddressSpace *as, hwaddr addr, 3088 uint8_t c, hwaddr len, MemTxAttrs attrs); 3089 3090 #ifdef NEED_CPU_H 3091 /* enum device_endian to MemOp. */ 3092 static inline MemOp devend_memop(enum device_endian end) 3093 { 3094 QEMU_BUILD_BUG_ON(DEVICE_HOST_ENDIAN != DEVICE_LITTLE_ENDIAN && 3095 DEVICE_HOST_ENDIAN != DEVICE_BIG_ENDIAN); 3096 3097 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN 3098 /* Swap if non-host endianness or native (target) endianness */ 3099 return (end == DEVICE_HOST_ENDIAN) ? 0 : MO_BSWAP; 3100 #else 3101 const int non_host_endianness = 3102 DEVICE_LITTLE_ENDIAN ^ DEVICE_BIG_ENDIAN ^ DEVICE_HOST_ENDIAN; 3103 3104 /* In this case, native (target) endianness needs no swap. */ 3105 return (end == non_host_endianness) ? MO_BSWAP : 0; 3106 #endif 3107 } 3108 #endif 3109 3110 /* 3111 * Inhibit technologies that require discarding of pages in RAM blocks, e.g., 3112 * to manage the actual amount of memory consumed by the VM (then, the memory 3113 * provided by RAM blocks might be bigger than the desired memory consumption). 3114 * This *must* be set if: 3115 * - Discarding parts of a RAM blocks does not result in the change being 3116 * reflected in the VM and the pages getting freed. 3117 * - All memory in RAM blocks is pinned or duplicated, invaldiating any previous 3118 * discards blindly. 3119 * - Discarding parts of a RAM blocks will result in integrity issues (e.g., 3120 * encrypted VMs). 3121 * Technologies that only temporarily pin the current working set of a 3122 * driver are fine, because we don't expect such pages to be discarded 3123 * (esp. based on guest action like balloon inflation). 3124 * 3125 * This is *not* to be used to protect from concurrent discards (esp., 3126 * postcopy). 3127 * 3128 * Returns 0 if successful. Returns -EBUSY if a technology that relies on 3129 * discards to work reliably is active. 3130 */ 3131 int ram_block_discard_disable(bool state); 3132 3133 /* 3134 * See ram_block_discard_disable(): only disable uncoordinated discards, 3135 * keeping coordinated discards (via the RamDiscardManager) enabled. 3136 */ 3137 int ram_block_uncoordinated_discard_disable(bool state); 3138 3139 /* 3140 * Inhibit technologies that disable discarding of pages in RAM blocks. 3141 * 3142 * Returns 0 if successful. Returns -EBUSY if discards are already set to 3143 * broken. 3144 */ 3145 int ram_block_discard_require(bool state); 3146 3147 /* 3148 * See ram_block_discard_require(): only inhibit technologies that disable 3149 * uncoordinated discarding of pages in RAM blocks, allowing co-existance with 3150 * technologies that only inhibit uncoordinated discards (via the 3151 * RamDiscardManager). 3152 */ 3153 int ram_block_coordinated_discard_require(bool state); 3154 3155 /* 3156 * Test if any discarding of memory in ram blocks is disabled. 3157 */ 3158 bool ram_block_discard_is_disabled(void); 3159 3160 /* 3161 * Test if any discarding of memory in ram blocks is required to work reliably. 3162 */ 3163 bool ram_block_discard_is_required(void); 3164 3165 #endif 3166 3167 #endif 3168