1 /* 2 * Physical memory management API 3 * 4 * Copyright 2011 Red Hat, Inc. and/or its affiliates 5 * 6 * Authors: 7 * Avi Kivity <avi@redhat.com> 8 * 9 * This work is licensed under the terms of the GNU GPL, version 2. See 10 * the COPYING file in the top-level directory. 11 * 12 */ 13 14 #ifndef MEMORY_H 15 #define MEMORY_H 16 17 #ifndef CONFIG_USER_ONLY 18 19 #include "exec/cpu-common.h" 20 #include "exec/hwaddr.h" 21 #include "exec/memattrs.h" 22 #include "exec/memop.h" 23 #include "exec/ramlist.h" 24 #include "qemu/bswap.h" 25 #include "qemu/queue.h" 26 #include "qemu/int128.h" 27 #include "qemu/range.h" 28 #include "qemu/notify.h" 29 #include "qom/object.h" 30 #include "qemu/rcu.h" 31 32 #define RAM_ADDR_INVALID (~(ram_addr_t)0) 33 34 #define MAX_PHYS_ADDR_SPACE_BITS 62 35 #define MAX_PHYS_ADDR (((hwaddr)1 << MAX_PHYS_ADDR_SPACE_BITS) - 1) 36 37 #define TYPE_MEMORY_REGION "memory-region" 38 DECLARE_INSTANCE_CHECKER(MemoryRegion, MEMORY_REGION, 39 TYPE_MEMORY_REGION) 40 41 #define TYPE_IOMMU_MEMORY_REGION "iommu-memory-region" 42 typedef struct IOMMUMemoryRegionClass IOMMUMemoryRegionClass; 43 DECLARE_OBJ_CHECKERS(IOMMUMemoryRegion, IOMMUMemoryRegionClass, 44 IOMMU_MEMORY_REGION, TYPE_IOMMU_MEMORY_REGION) 45 46 #define TYPE_RAM_DISCARD_MANAGER "ram-discard-manager" 47 typedef struct RamDiscardManagerClass RamDiscardManagerClass; 48 typedef struct RamDiscardManager RamDiscardManager; 49 DECLARE_OBJ_CHECKERS(RamDiscardManager, RamDiscardManagerClass, 50 RAM_DISCARD_MANAGER, TYPE_RAM_DISCARD_MANAGER); 51 52 #ifdef CONFIG_FUZZ 53 void fuzz_dma_read_cb(size_t addr, 54 size_t len, 55 MemoryRegion *mr); 56 #else 57 static inline void fuzz_dma_read_cb(size_t addr, 58 size_t len, 59 MemoryRegion *mr) 60 { 61 /* Do Nothing */ 62 } 63 #endif 64 65 /* Possible bits for global_dirty_log_{start|stop} */ 66 67 /* Dirty tracking enabled because migration is running */ 68 #define GLOBAL_DIRTY_MIGRATION (1U << 0) 69 70 /* Dirty tracking enabled because measuring dirty rate */ 71 #define GLOBAL_DIRTY_DIRTY_RATE (1U << 1) 72 73 /* Dirty tracking enabled because dirty limit */ 74 #define GLOBAL_DIRTY_LIMIT (1U << 2) 75 76 #define GLOBAL_DIRTY_MASK (0x7) 77 78 extern unsigned int global_dirty_tracking; 79 80 typedef struct MemoryRegionOps MemoryRegionOps; 81 82 struct ReservedRegion { 83 Range range; 84 unsigned type; 85 }; 86 87 /** 88 * struct MemoryRegionSection: describes a fragment of a #MemoryRegion 89 * 90 * @mr: the region, or %NULL if empty 91 * @fv: the flat view of the address space the region is mapped in 92 * @offset_within_region: the beginning of the section, relative to @mr's start 93 * @size: the size of the section; will not exceed @mr's boundaries 94 * @offset_within_address_space: the address of the first byte of the section 95 * relative to the region's address space 96 * @readonly: writes to this section are ignored 97 * @nonvolatile: this section is non-volatile 98 * @unmergeable: this section should not get merged with adjacent sections 99 */ 100 struct MemoryRegionSection { 101 Int128 size; 102 MemoryRegion *mr; 103 FlatView *fv; 104 hwaddr offset_within_region; 105 hwaddr offset_within_address_space; 106 bool readonly; 107 bool nonvolatile; 108 bool unmergeable; 109 }; 110 111 typedef struct IOMMUTLBEntry IOMMUTLBEntry; 112 113 /* See address_space_translate: bit 0 is read, bit 1 is write. */ 114 typedef enum { 115 IOMMU_NONE = 0, 116 IOMMU_RO = 1, 117 IOMMU_WO = 2, 118 IOMMU_RW = 3, 119 } IOMMUAccessFlags; 120 121 #define IOMMU_ACCESS_FLAG(r, w) (((r) ? IOMMU_RO : 0) | ((w) ? IOMMU_WO : 0)) 122 123 struct IOMMUTLBEntry { 124 AddressSpace *target_as; 125 hwaddr iova; 126 hwaddr translated_addr; 127 hwaddr addr_mask; /* 0xfff = 4k translation */ 128 IOMMUAccessFlags perm; 129 }; 130 131 /* 132 * Bitmap for different IOMMUNotifier capabilities. Each notifier can 133 * register with one or multiple IOMMU Notifier capability bit(s). 134 * 135 * Normally there're two use cases for the notifiers: 136 * 137 * (1) When the device needs accurate synchronizations of the vIOMMU page 138 * tables, it needs to register with both MAP|UNMAP notifies (which 139 * is defined as IOMMU_NOTIFIER_IOTLB_EVENTS below). 140 * 141 * Regarding to accurate synchronization, it's when the notified 142 * device maintains a shadow page table and must be notified on each 143 * guest MAP (page table entry creation) and UNMAP (invalidation) 144 * events (e.g. VFIO). Both notifications must be accurate so that 145 * the shadow page table is fully in sync with the guest view. 146 * 147 * (2) When the device doesn't need accurate synchronizations of the 148 * vIOMMU page tables, it needs to register only with UNMAP or 149 * DEVIOTLB_UNMAP notifies. 150 * 151 * It's when the device maintains a cache of IOMMU translations 152 * (IOTLB) and is able to fill that cache by requesting translations 153 * from the vIOMMU through a protocol similar to ATS (Address 154 * Translation Service). 155 * 156 * Note that in this mode the vIOMMU will not maintain a shadowed 157 * page table for the address space, and the UNMAP messages can cover 158 * more than the pages that used to get mapped. The IOMMU notifiee 159 * should be able to take care of over-sized invalidations. 160 */ 161 typedef enum { 162 IOMMU_NOTIFIER_NONE = 0, 163 /* Notify cache invalidations */ 164 IOMMU_NOTIFIER_UNMAP = 0x1, 165 /* Notify entry changes (newly created entries) */ 166 IOMMU_NOTIFIER_MAP = 0x2, 167 /* Notify changes on device IOTLB entries */ 168 IOMMU_NOTIFIER_DEVIOTLB_UNMAP = 0x04, 169 } IOMMUNotifierFlag; 170 171 #define IOMMU_NOTIFIER_IOTLB_EVENTS (IOMMU_NOTIFIER_MAP | IOMMU_NOTIFIER_UNMAP) 172 #define IOMMU_NOTIFIER_DEVIOTLB_EVENTS IOMMU_NOTIFIER_DEVIOTLB_UNMAP 173 #define IOMMU_NOTIFIER_ALL (IOMMU_NOTIFIER_IOTLB_EVENTS | \ 174 IOMMU_NOTIFIER_DEVIOTLB_EVENTS) 175 176 struct IOMMUNotifier; 177 typedef void (*IOMMUNotify)(struct IOMMUNotifier *notifier, 178 IOMMUTLBEntry *data); 179 180 struct IOMMUNotifier { 181 IOMMUNotify notify; 182 IOMMUNotifierFlag notifier_flags; 183 /* Notify for address space range start <= addr <= end */ 184 hwaddr start; 185 hwaddr end; 186 int iommu_idx; 187 QLIST_ENTRY(IOMMUNotifier) node; 188 }; 189 typedef struct IOMMUNotifier IOMMUNotifier; 190 191 typedef struct IOMMUTLBEvent { 192 IOMMUNotifierFlag type; 193 IOMMUTLBEntry entry; 194 } IOMMUTLBEvent; 195 196 /* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */ 197 #define RAM_PREALLOC (1 << 0) 198 199 /* RAM is mmap-ed with MAP_SHARED */ 200 #define RAM_SHARED (1 << 1) 201 202 /* Only a portion of RAM (used_length) is actually used, and migrated. 203 * Resizing RAM while migrating can result in the migration being canceled. 204 */ 205 #define RAM_RESIZEABLE (1 << 2) 206 207 /* UFFDIO_ZEROPAGE is available on this RAMBlock to atomically 208 * zero the page and wake waiting processes. 209 * (Set during postcopy) 210 */ 211 #define RAM_UF_ZEROPAGE (1 << 3) 212 213 /* RAM can be migrated */ 214 #define RAM_MIGRATABLE (1 << 4) 215 216 /* RAM is a persistent kind memory */ 217 #define RAM_PMEM (1 << 5) 218 219 220 /* 221 * UFFDIO_WRITEPROTECT is used on this RAMBlock to 222 * support 'write-tracking' migration type. 223 * Implies ram_state->ram_wt_enabled. 224 */ 225 #define RAM_UF_WRITEPROTECT (1 << 6) 226 227 /* 228 * RAM is mmap-ed with MAP_NORESERVE. When set, reserving swap space (or huge 229 * pages if applicable) is skipped: will bail out if not supported. When not 230 * set, the OS will do the reservation, if supported for the memory type. 231 */ 232 #define RAM_NORESERVE (1 << 7) 233 234 /* RAM that isn't accessible through normal means. */ 235 #define RAM_PROTECTED (1 << 8) 236 237 /* RAM is an mmap-ed named file */ 238 #define RAM_NAMED_FILE (1 << 9) 239 240 /* RAM is mmap-ed read-only */ 241 #define RAM_READONLY (1 << 10) 242 243 /* RAM FD is opened read-only */ 244 #define RAM_READONLY_FD (1 << 11) 245 246 /* RAM can be private that has kvm guest memfd backend */ 247 #define RAM_GUEST_MEMFD (1 << 12) 248 249 static inline void iommu_notifier_init(IOMMUNotifier *n, IOMMUNotify fn, 250 IOMMUNotifierFlag flags, 251 hwaddr start, hwaddr end, 252 int iommu_idx) 253 { 254 n->notify = fn; 255 n->notifier_flags = flags; 256 n->start = start; 257 n->end = end; 258 n->iommu_idx = iommu_idx; 259 } 260 261 /* 262 * Memory region callbacks 263 */ 264 struct MemoryRegionOps { 265 /* Read from the memory region. @addr is relative to @mr; @size is 266 * in bytes. */ 267 uint64_t (*read)(void *opaque, 268 hwaddr addr, 269 unsigned size); 270 /* Write to the memory region. @addr is relative to @mr; @size is 271 * in bytes. */ 272 void (*write)(void *opaque, 273 hwaddr addr, 274 uint64_t data, 275 unsigned size); 276 277 MemTxResult (*read_with_attrs)(void *opaque, 278 hwaddr addr, 279 uint64_t *data, 280 unsigned size, 281 MemTxAttrs attrs); 282 MemTxResult (*write_with_attrs)(void *opaque, 283 hwaddr addr, 284 uint64_t data, 285 unsigned size, 286 MemTxAttrs attrs); 287 288 enum device_endian endianness; 289 /* Guest-visible constraints: */ 290 struct { 291 /* If nonzero, specify bounds on access sizes beyond which a machine 292 * check is thrown. 293 */ 294 unsigned min_access_size; 295 unsigned max_access_size; 296 /* If true, unaligned accesses are supported. Otherwise unaligned 297 * accesses throw machine checks. 298 */ 299 bool unaligned; 300 /* 301 * If present, and returns #false, the transaction is not accepted 302 * by the device (and results in machine dependent behaviour such 303 * as a machine check exception). 304 */ 305 bool (*accepts)(void *opaque, hwaddr addr, 306 unsigned size, bool is_write, 307 MemTxAttrs attrs); 308 } valid; 309 /* Internal implementation constraints: */ 310 struct { 311 /* If nonzero, specifies the minimum size implemented. Smaller sizes 312 * will be rounded upwards and a partial result will be returned. 313 */ 314 unsigned min_access_size; 315 /* If nonzero, specifies the maximum size implemented. Larger sizes 316 * will be done as a series of accesses with smaller sizes. 317 */ 318 unsigned max_access_size; 319 /* If true, unaligned accesses are supported. Otherwise all accesses 320 * are converted to (possibly multiple) naturally aligned accesses. 321 */ 322 bool unaligned; 323 } impl; 324 }; 325 326 typedef struct MemoryRegionClass { 327 /* private */ 328 ObjectClass parent_class; 329 } MemoryRegionClass; 330 331 332 enum IOMMUMemoryRegionAttr { 333 IOMMU_ATTR_SPAPR_TCE_FD 334 }; 335 336 /* 337 * IOMMUMemoryRegionClass: 338 * 339 * All IOMMU implementations need to subclass TYPE_IOMMU_MEMORY_REGION 340 * and provide an implementation of at least the @translate method here 341 * to handle requests to the memory region. Other methods are optional. 342 * 343 * The IOMMU implementation must use the IOMMU notifier infrastructure 344 * to report whenever mappings are changed, by calling 345 * memory_region_notify_iommu() (or, if necessary, by calling 346 * memory_region_notify_iommu_one() for each registered notifier). 347 * 348 * Conceptually an IOMMU provides a mapping from input address 349 * to an output TLB entry. If the IOMMU is aware of memory transaction 350 * attributes and the output TLB entry depends on the transaction 351 * attributes, we represent this using IOMMU indexes. Each index 352 * selects a particular translation table that the IOMMU has: 353 * 354 * @attrs_to_index returns the IOMMU index for a set of transaction attributes 355 * 356 * @translate takes an input address and an IOMMU index 357 * 358 * and the mapping returned can only depend on the input address and the 359 * IOMMU index. 360 * 361 * Most IOMMUs don't care about the transaction attributes and support 362 * only a single IOMMU index. A more complex IOMMU might have one index 363 * for secure transactions and one for non-secure transactions. 364 */ 365 struct IOMMUMemoryRegionClass { 366 /* private: */ 367 MemoryRegionClass parent_class; 368 369 /* public: */ 370 /** 371 * @translate: 372 * 373 * Return a TLB entry that contains a given address. 374 * 375 * The IOMMUAccessFlags indicated via @flag are optional and may 376 * be specified as IOMMU_NONE to indicate that the caller needs 377 * the full translation information for both reads and writes. If 378 * the access flags are specified then the IOMMU implementation 379 * may use this as an optimization, to stop doing a page table 380 * walk as soon as it knows that the requested permissions are not 381 * allowed. If IOMMU_NONE is passed then the IOMMU must do the 382 * full page table walk and report the permissions in the returned 383 * IOMMUTLBEntry. (Note that this implies that an IOMMU may not 384 * return different mappings for reads and writes.) 385 * 386 * The returned information remains valid while the caller is 387 * holding the big QEMU lock or is inside an RCU critical section; 388 * if the caller wishes to cache the mapping beyond that it must 389 * register an IOMMU notifier so it can invalidate its cached 390 * information when the IOMMU mapping changes. 391 * 392 * @iommu: the IOMMUMemoryRegion 393 * 394 * @hwaddr: address to be translated within the memory region 395 * 396 * @flag: requested access permission 397 * 398 * @iommu_idx: IOMMU index for the translation 399 */ 400 IOMMUTLBEntry (*translate)(IOMMUMemoryRegion *iommu, hwaddr addr, 401 IOMMUAccessFlags flag, int iommu_idx); 402 /** 403 * @get_min_page_size: 404 * 405 * Returns minimum supported page size in bytes. 406 * 407 * If this method is not provided then the minimum is assumed to 408 * be TARGET_PAGE_SIZE. 409 * 410 * @iommu: the IOMMUMemoryRegion 411 */ 412 uint64_t (*get_min_page_size)(IOMMUMemoryRegion *iommu); 413 /** 414 * @notify_flag_changed: 415 * 416 * Called when IOMMU Notifier flag changes (ie when the set of 417 * events which IOMMU users are requesting notification for changes). 418 * Optional method -- need not be provided if the IOMMU does not 419 * need to know exactly which events must be notified. 420 * 421 * @iommu: the IOMMUMemoryRegion 422 * 423 * @old_flags: events which previously needed to be notified 424 * 425 * @new_flags: events which now need to be notified 426 * 427 * Returns 0 on success, or a negative errno; in particular 428 * returns -EINVAL if the new flag bitmap is not supported by the 429 * IOMMU memory region. In case of failure, the error object 430 * must be created 431 */ 432 int (*notify_flag_changed)(IOMMUMemoryRegion *iommu, 433 IOMMUNotifierFlag old_flags, 434 IOMMUNotifierFlag new_flags, 435 Error **errp); 436 /** 437 * @replay: 438 * 439 * Called to handle memory_region_iommu_replay(). 440 * 441 * The default implementation of memory_region_iommu_replay() is to 442 * call the IOMMU translate method for every page in the address space 443 * with flag == IOMMU_NONE and then call the notifier if translate 444 * returns a valid mapping. If this method is implemented then it 445 * overrides the default behaviour, and must provide the full semantics 446 * of memory_region_iommu_replay(), by calling @notifier for every 447 * translation present in the IOMMU. 448 * 449 * Optional method -- an IOMMU only needs to provide this method 450 * if the default is inefficient or produces undesirable side effects. 451 * 452 * Note: this is not related to record-and-replay functionality. 453 */ 454 void (*replay)(IOMMUMemoryRegion *iommu, IOMMUNotifier *notifier); 455 456 /** 457 * @get_attr: 458 * 459 * Get IOMMU misc attributes. This is an optional method that 460 * can be used to allow users of the IOMMU to get implementation-specific 461 * information. The IOMMU implements this method to handle calls 462 * by IOMMU users to memory_region_iommu_get_attr() by filling in 463 * the arbitrary data pointer for any IOMMUMemoryRegionAttr values that 464 * the IOMMU supports. If the method is unimplemented then 465 * memory_region_iommu_get_attr() will always return -EINVAL. 466 * 467 * @iommu: the IOMMUMemoryRegion 468 * 469 * @attr: attribute being queried 470 * 471 * @data: memory to fill in with the attribute data 472 * 473 * Returns 0 on success, or a negative errno; in particular 474 * returns -EINVAL for unrecognized or unimplemented attribute types. 475 */ 476 int (*get_attr)(IOMMUMemoryRegion *iommu, enum IOMMUMemoryRegionAttr attr, 477 void *data); 478 479 /** 480 * @attrs_to_index: 481 * 482 * Return the IOMMU index to use for a given set of transaction attributes. 483 * 484 * Optional method: if an IOMMU only supports a single IOMMU index then 485 * the default implementation of memory_region_iommu_attrs_to_index() 486 * will return 0. 487 * 488 * The indexes supported by an IOMMU must be contiguous, starting at 0. 489 * 490 * @iommu: the IOMMUMemoryRegion 491 * @attrs: memory transaction attributes 492 */ 493 int (*attrs_to_index)(IOMMUMemoryRegion *iommu, MemTxAttrs attrs); 494 495 /** 496 * @num_indexes: 497 * 498 * Return the number of IOMMU indexes this IOMMU supports. 499 * 500 * Optional method: if this method is not provided, then 501 * memory_region_iommu_num_indexes() will return 1, indicating that 502 * only a single IOMMU index is supported. 503 * 504 * @iommu: the IOMMUMemoryRegion 505 */ 506 int (*num_indexes)(IOMMUMemoryRegion *iommu); 507 }; 508 509 typedef struct RamDiscardListener RamDiscardListener; 510 typedef int (*NotifyRamPopulate)(RamDiscardListener *rdl, 511 MemoryRegionSection *section); 512 typedef void (*NotifyRamDiscard)(RamDiscardListener *rdl, 513 MemoryRegionSection *section); 514 515 struct RamDiscardListener { 516 /* 517 * @notify_populate: 518 * 519 * Notification that previously discarded memory is about to get populated. 520 * Listeners are able to object. If any listener objects, already 521 * successfully notified listeners are notified about a discard again. 522 * 523 * @rdl: the #RamDiscardListener getting notified 524 * @section: the #MemoryRegionSection to get populated. The section 525 * is aligned within the memory region to the minimum granularity 526 * unless it would exceed the registered section. 527 * 528 * Returns 0 on success. If the notification is rejected by the listener, 529 * an error is returned. 530 */ 531 NotifyRamPopulate notify_populate; 532 533 /* 534 * @notify_discard: 535 * 536 * Notification that previously populated memory was discarded successfully 537 * and listeners should drop all references to such memory and prevent 538 * new population (e.g., unmap). 539 * 540 * @rdl: the #RamDiscardListener getting notified 541 * @section: the #MemoryRegionSection to get populated. The section 542 * is aligned within the memory region to the minimum granularity 543 * unless it would exceed the registered section. 544 */ 545 NotifyRamDiscard notify_discard; 546 547 /* 548 * @double_discard_supported: 549 * 550 * The listener suppors getting @notify_discard notifications that span 551 * already discarded parts. 552 */ 553 bool double_discard_supported; 554 555 MemoryRegionSection *section; 556 QLIST_ENTRY(RamDiscardListener) next; 557 }; 558 559 static inline void ram_discard_listener_init(RamDiscardListener *rdl, 560 NotifyRamPopulate populate_fn, 561 NotifyRamDiscard discard_fn, 562 bool double_discard_supported) 563 { 564 rdl->notify_populate = populate_fn; 565 rdl->notify_discard = discard_fn; 566 rdl->double_discard_supported = double_discard_supported; 567 } 568 569 typedef int (*ReplayRamPopulate)(MemoryRegionSection *section, void *opaque); 570 typedef void (*ReplayRamDiscard)(MemoryRegionSection *section, void *opaque); 571 572 /* 573 * RamDiscardManagerClass: 574 * 575 * A #RamDiscardManager coordinates which parts of specific RAM #MemoryRegion 576 * regions are currently populated to be used/accessed by the VM, notifying 577 * after parts were discarded (freeing up memory) and before parts will be 578 * populated (consuming memory), to be used/accessed by the VM. 579 * 580 * A #RamDiscardManager can only be set for a RAM #MemoryRegion while the 581 * #MemoryRegion isn't mapped into an address space yet (either directly 582 * or via an alias); it cannot change while the #MemoryRegion is 583 * mapped into an address space. 584 * 585 * The #RamDiscardManager is intended to be used by technologies that are 586 * incompatible with discarding of RAM (e.g., VFIO, which may pin all 587 * memory inside a #MemoryRegion), and require proper coordination to only 588 * map the currently populated parts, to hinder parts that are expected to 589 * remain discarded from silently getting populated and consuming memory. 590 * Technologies that support discarding of RAM don't have to bother and can 591 * simply map the whole #MemoryRegion. 592 * 593 * An example #RamDiscardManager is virtio-mem, which logically (un)plugs 594 * memory within an assigned RAM #MemoryRegion, coordinated with the VM. 595 * Logically unplugging memory consists of discarding RAM. The VM agreed to not 596 * access unplugged (discarded) memory - especially via DMA. virtio-mem will 597 * properly coordinate with listeners before memory is plugged (populated), 598 * and after memory is unplugged (discarded). 599 * 600 * Listeners are called in multiples of the minimum granularity (unless it 601 * would exceed the registered range) and changes are aligned to the minimum 602 * granularity within the #MemoryRegion. Listeners have to prepare for memory 603 * becoming discarded in a different granularity than it was populated and the 604 * other way around. 605 */ 606 struct RamDiscardManagerClass { 607 /* private */ 608 InterfaceClass parent_class; 609 610 /* public */ 611 612 /** 613 * @get_min_granularity: 614 * 615 * Get the minimum granularity in which listeners will get notified 616 * about changes within the #MemoryRegion via the #RamDiscardManager. 617 * 618 * @rdm: the #RamDiscardManager 619 * @mr: the #MemoryRegion 620 * 621 * Returns the minimum granularity. 622 */ 623 uint64_t (*get_min_granularity)(const RamDiscardManager *rdm, 624 const MemoryRegion *mr); 625 626 /** 627 * @is_populated: 628 * 629 * Check whether the given #MemoryRegionSection is completely populated 630 * (i.e., no parts are currently discarded) via the #RamDiscardManager. 631 * There are no alignment requirements. 632 * 633 * @rdm: the #RamDiscardManager 634 * @section: the #MemoryRegionSection 635 * 636 * Returns whether the given range is completely populated. 637 */ 638 bool (*is_populated)(const RamDiscardManager *rdm, 639 const MemoryRegionSection *section); 640 641 /** 642 * @replay_populated: 643 * 644 * Call the #ReplayRamPopulate callback for all populated parts within the 645 * #MemoryRegionSection via the #RamDiscardManager. 646 * 647 * In case any call fails, no further calls are made. 648 * 649 * @rdm: the #RamDiscardManager 650 * @section: the #MemoryRegionSection 651 * @replay_fn: the #ReplayRamPopulate callback 652 * @opaque: pointer to forward to the callback 653 * 654 * Returns 0 on success, or a negative error if any notification failed. 655 */ 656 int (*replay_populated)(const RamDiscardManager *rdm, 657 MemoryRegionSection *section, 658 ReplayRamPopulate replay_fn, void *opaque); 659 660 /** 661 * @replay_discarded: 662 * 663 * Call the #ReplayRamDiscard callback for all discarded parts within the 664 * #MemoryRegionSection via the #RamDiscardManager. 665 * 666 * @rdm: the #RamDiscardManager 667 * @section: the #MemoryRegionSection 668 * @replay_fn: the #ReplayRamDiscard callback 669 * @opaque: pointer to forward to the callback 670 */ 671 void (*replay_discarded)(const RamDiscardManager *rdm, 672 MemoryRegionSection *section, 673 ReplayRamDiscard replay_fn, void *opaque); 674 675 /** 676 * @register_listener: 677 * 678 * Register a #RamDiscardListener for the given #MemoryRegionSection and 679 * immediately notify the #RamDiscardListener about all populated parts 680 * within the #MemoryRegionSection via the #RamDiscardManager. 681 * 682 * In case any notification fails, no further notifications are triggered 683 * and an error is logged. 684 * 685 * @rdm: the #RamDiscardManager 686 * @rdl: the #RamDiscardListener 687 * @section: the #MemoryRegionSection 688 */ 689 void (*register_listener)(RamDiscardManager *rdm, 690 RamDiscardListener *rdl, 691 MemoryRegionSection *section); 692 693 /** 694 * @unregister_listener: 695 * 696 * Unregister a previously registered #RamDiscardListener via the 697 * #RamDiscardManager after notifying the #RamDiscardListener about all 698 * populated parts becoming unpopulated within the registered 699 * #MemoryRegionSection. 700 * 701 * @rdm: the #RamDiscardManager 702 * @rdl: the #RamDiscardListener 703 */ 704 void (*unregister_listener)(RamDiscardManager *rdm, 705 RamDiscardListener *rdl); 706 }; 707 708 uint64_t ram_discard_manager_get_min_granularity(const RamDiscardManager *rdm, 709 const MemoryRegion *mr); 710 711 bool ram_discard_manager_is_populated(const RamDiscardManager *rdm, 712 const MemoryRegionSection *section); 713 714 int ram_discard_manager_replay_populated(const RamDiscardManager *rdm, 715 MemoryRegionSection *section, 716 ReplayRamPopulate replay_fn, 717 void *opaque); 718 719 void ram_discard_manager_replay_discarded(const RamDiscardManager *rdm, 720 MemoryRegionSection *section, 721 ReplayRamDiscard replay_fn, 722 void *opaque); 723 724 void ram_discard_manager_register_listener(RamDiscardManager *rdm, 725 RamDiscardListener *rdl, 726 MemoryRegionSection *section); 727 728 void ram_discard_manager_unregister_listener(RamDiscardManager *rdm, 729 RamDiscardListener *rdl); 730 731 /** 732 * memory_get_xlat_addr: Extract addresses from a TLB entry 733 * 734 * @iotlb: pointer to an #IOMMUTLBEntry 735 * @vaddr: virtual address 736 * @ram_addr: RAM address 737 * @read_only: indicates if writes are allowed 738 * @mr_has_discard_manager: indicates memory is controlled by a 739 * RamDiscardManager 740 * @errp: pointer to Error*, to store an error if it happens. 741 * 742 * Return: true on success, else false setting @errp with error. 743 */ 744 bool memory_get_xlat_addr(IOMMUTLBEntry *iotlb, void **vaddr, 745 ram_addr_t *ram_addr, bool *read_only, 746 bool *mr_has_discard_manager, Error **errp); 747 748 typedef struct CoalescedMemoryRange CoalescedMemoryRange; 749 typedef struct MemoryRegionIoeventfd MemoryRegionIoeventfd; 750 751 /** MemoryRegion: 752 * 753 * A struct representing a memory region. 754 */ 755 struct MemoryRegion { 756 Object parent_obj; 757 758 /* private: */ 759 760 /* The following fields should fit in a cache line */ 761 bool romd_mode; 762 bool ram; 763 bool subpage; 764 bool readonly; /* For RAM regions */ 765 bool nonvolatile; 766 bool rom_device; 767 bool flush_coalesced_mmio; 768 bool unmergeable; 769 uint8_t dirty_log_mask; 770 bool is_iommu; 771 RAMBlock *ram_block; 772 Object *owner; 773 /* owner as TYPE_DEVICE. Used for re-entrancy checks in MR access hotpath */ 774 DeviceState *dev; 775 776 const MemoryRegionOps *ops; 777 void *opaque; 778 MemoryRegion *container; 779 int mapped_via_alias; /* Mapped via an alias, container might be NULL */ 780 Int128 size; 781 hwaddr addr; 782 void (*destructor)(MemoryRegion *mr); 783 uint64_t align; 784 bool terminates; 785 bool ram_device; 786 bool enabled; 787 bool warning_printed; /* For reservations */ 788 uint8_t vga_logging_count; 789 MemoryRegion *alias; 790 hwaddr alias_offset; 791 int32_t priority; 792 QTAILQ_HEAD(, MemoryRegion) subregions; 793 QTAILQ_ENTRY(MemoryRegion) subregions_link; 794 QTAILQ_HEAD(, CoalescedMemoryRange) coalesced; 795 const char *name; 796 unsigned ioeventfd_nb; 797 MemoryRegionIoeventfd *ioeventfds; 798 RamDiscardManager *rdm; /* Only for RAM */ 799 800 /* For devices designed to perform re-entrant IO into their own IO MRs */ 801 bool disable_reentrancy_guard; 802 }; 803 804 struct IOMMUMemoryRegion { 805 MemoryRegion parent_obj; 806 807 QLIST_HEAD(, IOMMUNotifier) iommu_notify; 808 IOMMUNotifierFlag iommu_notify_flags; 809 }; 810 811 #define IOMMU_NOTIFIER_FOREACH(n, mr) \ 812 QLIST_FOREACH((n), &(mr)->iommu_notify, node) 813 814 #define MEMORY_LISTENER_PRIORITY_MIN 0 815 #define MEMORY_LISTENER_PRIORITY_ACCEL 10 816 #define MEMORY_LISTENER_PRIORITY_DEV_BACKEND 10 817 818 /** 819 * struct MemoryListener: callbacks structure for updates to the physical memory map 820 * 821 * Allows a component to adjust to changes in the guest-visible memory map. 822 * Use with memory_listener_register() and memory_listener_unregister(). 823 */ 824 struct MemoryListener { 825 /** 826 * @begin: 827 * 828 * Called at the beginning of an address space update transaction. 829 * Followed by calls to #MemoryListener.region_add(), 830 * #MemoryListener.region_del(), #MemoryListener.region_nop(), 831 * #MemoryListener.log_start() and #MemoryListener.log_stop() in 832 * increasing address order. 833 * 834 * @listener: The #MemoryListener. 835 */ 836 void (*begin)(MemoryListener *listener); 837 838 /** 839 * @commit: 840 * 841 * Called at the end of an address space update transaction, 842 * after the last call to #MemoryListener.region_add(), 843 * #MemoryListener.region_del() or #MemoryListener.region_nop(), 844 * #MemoryListener.log_start() and #MemoryListener.log_stop(). 845 * 846 * @listener: The #MemoryListener. 847 */ 848 void (*commit)(MemoryListener *listener); 849 850 /** 851 * @region_add: 852 * 853 * Called during an address space update transaction, 854 * for a section of the address space that is new in this address space 855 * space since the last transaction. 856 * 857 * @listener: The #MemoryListener. 858 * @section: The new #MemoryRegionSection. 859 */ 860 void (*region_add)(MemoryListener *listener, MemoryRegionSection *section); 861 862 /** 863 * @region_del: 864 * 865 * Called during an address space update transaction, 866 * for a section of the address space that has disappeared in the address 867 * space since the last transaction. 868 * 869 * @listener: The #MemoryListener. 870 * @section: The old #MemoryRegionSection. 871 */ 872 void (*region_del)(MemoryListener *listener, MemoryRegionSection *section); 873 874 /** 875 * @region_nop: 876 * 877 * Called during an address space update transaction, 878 * for a section of the address space that is in the same place in the address 879 * space as in the last transaction. 880 * 881 * @listener: The #MemoryListener. 882 * @section: The #MemoryRegionSection. 883 */ 884 void (*region_nop)(MemoryListener *listener, MemoryRegionSection *section); 885 886 /** 887 * @log_start: 888 * 889 * Called during an address space update transaction, after 890 * one of #MemoryListener.region_add(), #MemoryListener.region_del() or 891 * #MemoryListener.region_nop(), if dirty memory logging clients have 892 * become active since the last transaction. 893 * 894 * @listener: The #MemoryListener. 895 * @section: The #MemoryRegionSection. 896 * @old: A bitmap of dirty memory logging clients that were active in 897 * the previous transaction. 898 * @new: A bitmap of dirty memory logging clients that are active in 899 * the current transaction. 900 */ 901 void (*log_start)(MemoryListener *listener, MemoryRegionSection *section, 902 int old_val, int new_val); 903 904 /** 905 * @log_stop: 906 * 907 * Called during an address space update transaction, after 908 * one of #MemoryListener.region_add(), #MemoryListener.region_del() or 909 * #MemoryListener.region_nop() and possibly after 910 * #MemoryListener.log_start(), if dirty memory logging clients have 911 * become inactive since the last transaction. 912 * 913 * @listener: The #MemoryListener. 914 * @section: The #MemoryRegionSection. 915 * @old: A bitmap of dirty memory logging clients that were active in 916 * the previous transaction. 917 * @new: A bitmap of dirty memory logging clients that are active in 918 * the current transaction. 919 */ 920 void (*log_stop)(MemoryListener *listener, MemoryRegionSection *section, 921 int old_val, int new_val); 922 923 /** 924 * @log_sync: 925 * 926 * Called by memory_region_snapshot_and_clear_dirty() and 927 * memory_global_dirty_log_sync(), before accessing QEMU's "official" 928 * copy of the dirty memory bitmap for a #MemoryRegionSection. 929 * 930 * @listener: The #MemoryListener. 931 * @section: The #MemoryRegionSection. 932 */ 933 void (*log_sync)(MemoryListener *listener, MemoryRegionSection *section); 934 935 /** 936 * @log_sync_global: 937 * 938 * This is the global version of @log_sync when the listener does 939 * not have a way to synchronize the log with finer granularity. 940 * When the listener registers with @log_sync_global defined, then 941 * its @log_sync must be NULL. Vice versa. 942 * 943 * @listener: The #MemoryListener. 944 * @last_stage: The last stage to synchronize the log during migration. 945 * The caller should guarantee that the synchronization with true for 946 * @last_stage is triggered for once after all VCPUs have been stopped. 947 */ 948 void (*log_sync_global)(MemoryListener *listener, bool last_stage); 949 950 /** 951 * @log_clear: 952 * 953 * Called before reading the dirty memory bitmap for a 954 * #MemoryRegionSection. 955 * 956 * @listener: The #MemoryListener. 957 * @section: The #MemoryRegionSection. 958 */ 959 void (*log_clear)(MemoryListener *listener, MemoryRegionSection *section); 960 961 /** 962 * @log_global_start: 963 * 964 * Called by memory_global_dirty_log_start(), which 965 * enables the %DIRTY_LOG_MIGRATION client on all memory regions in 966 * the address space. #MemoryListener.log_global_start() is also 967 * called when a #MemoryListener is added, if global dirty logging is 968 * active at that time. 969 * 970 * @listener: The #MemoryListener. 971 * @errp: pointer to Error*, to store an error if it happens. 972 * 973 * Return: true on success, else false setting @errp with error. 974 */ 975 bool (*log_global_start)(MemoryListener *listener, Error **errp); 976 977 /** 978 * @log_global_stop: 979 * 980 * Called by memory_global_dirty_log_stop(), which 981 * disables the %DIRTY_LOG_MIGRATION client on all memory regions in 982 * the address space. 983 * 984 * @listener: The #MemoryListener. 985 */ 986 void (*log_global_stop)(MemoryListener *listener); 987 988 /** 989 * @log_global_after_sync: 990 * 991 * Called after reading the dirty memory bitmap 992 * for any #MemoryRegionSection. 993 * 994 * @listener: The #MemoryListener. 995 */ 996 void (*log_global_after_sync)(MemoryListener *listener); 997 998 /** 999 * @eventfd_add: 1000 * 1001 * Called during an address space update transaction, 1002 * for a section of the address space that has had a new ioeventfd 1003 * registration since the last transaction. 1004 * 1005 * @listener: The #MemoryListener. 1006 * @section: The new #MemoryRegionSection. 1007 * @match_data: The @match_data parameter for the new ioeventfd. 1008 * @data: The @data parameter for the new ioeventfd. 1009 * @e: The #EventNotifier parameter for the new ioeventfd. 1010 */ 1011 void (*eventfd_add)(MemoryListener *listener, MemoryRegionSection *section, 1012 bool match_data, uint64_t data, EventNotifier *e); 1013 1014 /** 1015 * @eventfd_del: 1016 * 1017 * Called during an address space update transaction, 1018 * for a section of the address space that has dropped an ioeventfd 1019 * registration since the last transaction. 1020 * 1021 * @listener: The #MemoryListener. 1022 * @section: The new #MemoryRegionSection. 1023 * @match_data: The @match_data parameter for the dropped ioeventfd. 1024 * @data: The @data parameter for the dropped ioeventfd. 1025 * @e: The #EventNotifier parameter for the dropped ioeventfd. 1026 */ 1027 void (*eventfd_del)(MemoryListener *listener, MemoryRegionSection *section, 1028 bool match_data, uint64_t data, EventNotifier *e); 1029 1030 /** 1031 * @coalesced_io_add: 1032 * 1033 * Called during an address space update transaction, 1034 * for a section of the address space that has had a new coalesced 1035 * MMIO range registration since the last transaction. 1036 * 1037 * @listener: The #MemoryListener. 1038 * @section: The new #MemoryRegionSection. 1039 * @addr: The starting address for the coalesced MMIO range. 1040 * @len: The length of the coalesced MMIO range. 1041 */ 1042 void (*coalesced_io_add)(MemoryListener *listener, MemoryRegionSection *section, 1043 hwaddr addr, hwaddr len); 1044 1045 /** 1046 * @coalesced_io_del: 1047 * 1048 * Called during an address space update transaction, 1049 * for a section of the address space that has dropped a coalesced 1050 * MMIO range since the last transaction. 1051 * 1052 * @listener: The #MemoryListener. 1053 * @section: The new #MemoryRegionSection. 1054 * @addr: The starting address for the coalesced MMIO range. 1055 * @len: The length of the coalesced MMIO range. 1056 */ 1057 void (*coalesced_io_del)(MemoryListener *listener, MemoryRegionSection *section, 1058 hwaddr addr, hwaddr len); 1059 /** 1060 * @priority: 1061 * 1062 * Govern the order in which memory listeners are invoked. Lower priorities 1063 * are invoked earlier for "add" or "start" callbacks, and later for "delete" 1064 * or "stop" callbacks. 1065 */ 1066 unsigned priority; 1067 1068 /** 1069 * @name: 1070 * 1071 * Name of the listener. It can be used in contexts where we'd like to 1072 * identify one memory listener with the rest. 1073 */ 1074 const char *name; 1075 1076 /* private: */ 1077 AddressSpace *address_space; 1078 QTAILQ_ENTRY(MemoryListener) link; 1079 QTAILQ_ENTRY(MemoryListener) link_as; 1080 }; 1081 1082 typedef struct AddressSpaceMapClient { 1083 QEMUBH *bh; 1084 QLIST_ENTRY(AddressSpaceMapClient) link; 1085 } AddressSpaceMapClient; 1086 1087 #define DEFAULT_MAX_BOUNCE_BUFFER_SIZE (4096) 1088 1089 /** 1090 * struct AddressSpace: describes a mapping of addresses to #MemoryRegion objects 1091 */ 1092 struct AddressSpace { 1093 /* private: */ 1094 struct rcu_head rcu; 1095 char *name; 1096 MemoryRegion *root; 1097 1098 /* Accessed via RCU. */ 1099 struct FlatView *current_map; 1100 1101 int ioeventfd_nb; 1102 int ioeventfd_notifiers; 1103 struct MemoryRegionIoeventfd *ioeventfds; 1104 QTAILQ_HEAD(, MemoryListener) listeners; 1105 QTAILQ_ENTRY(AddressSpace) address_spaces_link; 1106 1107 /* 1108 * Maximum DMA bounce buffer size used for indirect memory map requests. 1109 * This limits the total size of bounce buffer allocations made for 1110 * DMA requests to indirect memory regions within this AddressSpace. DMA 1111 * requests that exceed the limit (e.g. due to overly large requested size 1112 * or concurrent DMA requests having claimed too much buffer space) will be 1113 * rejected and left to the caller to handle. 1114 */ 1115 size_t max_bounce_buffer_size; 1116 /* Total size of bounce buffers currently allocated, atomically accessed */ 1117 size_t bounce_buffer_size; 1118 /* List of callbacks to invoke when buffers free up */ 1119 QemuMutex map_client_list_lock; 1120 QLIST_HEAD(, AddressSpaceMapClient) map_client_list; 1121 }; 1122 1123 typedef struct AddressSpaceDispatch AddressSpaceDispatch; 1124 typedef struct FlatRange FlatRange; 1125 1126 /* Flattened global view of current active memory hierarchy. Kept in sorted 1127 * order. 1128 */ 1129 struct FlatView { 1130 struct rcu_head rcu; 1131 unsigned ref; 1132 FlatRange *ranges; 1133 unsigned nr; 1134 unsigned nr_allocated; 1135 struct AddressSpaceDispatch *dispatch; 1136 MemoryRegion *root; 1137 }; 1138 1139 static inline FlatView *address_space_to_flatview(AddressSpace *as) 1140 { 1141 return qatomic_rcu_read(&as->current_map); 1142 } 1143 1144 /** 1145 * typedef flatview_cb: callback for flatview_for_each_range() 1146 * 1147 * @start: start address of the range within the FlatView 1148 * @len: length of the range in bytes 1149 * @mr: MemoryRegion covering this range 1150 * @offset_in_region: offset of the first byte of the range within @mr 1151 * @opaque: data pointer passed to flatview_for_each_range() 1152 * 1153 * Returns: true to stop the iteration, false to keep going. 1154 */ 1155 typedef bool (*flatview_cb)(Int128 start, 1156 Int128 len, 1157 const MemoryRegion *mr, 1158 hwaddr offset_in_region, 1159 void *opaque); 1160 1161 /** 1162 * flatview_for_each_range: Iterate through a FlatView 1163 * @fv: the FlatView to iterate through 1164 * @cb: function to call for each range 1165 * @opaque: opaque data pointer to pass to @cb 1166 * 1167 * A FlatView is made up of a list of non-overlapping ranges, each of 1168 * which is a slice of a MemoryRegion. This function iterates through 1169 * each range in @fv, calling @cb. The callback function can terminate 1170 * iteration early by returning 'true'. 1171 */ 1172 void flatview_for_each_range(FlatView *fv, flatview_cb cb, void *opaque); 1173 1174 static inline bool MemoryRegionSection_eq(MemoryRegionSection *a, 1175 MemoryRegionSection *b) 1176 { 1177 return a->mr == b->mr && 1178 a->fv == b->fv && 1179 a->offset_within_region == b->offset_within_region && 1180 a->offset_within_address_space == b->offset_within_address_space && 1181 int128_eq(a->size, b->size) && 1182 a->readonly == b->readonly && 1183 a->nonvolatile == b->nonvolatile; 1184 } 1185 1186 /** 1187 * memory_region_section_new_copy: Copy a memory region section 1188 * 1189 * Allocate memory for a new copy, copy the memory region section, and 1190 * properly take a reference on all relevant members. 1191 * 1192 * @s: the #MemoryRegionSection to copy 1193 */ 1194 MemoryRegionSection *memory_region_section_new_copy(MemoryRegionSection *s); 1195 1196 /** 1197 * memory_region_section_new_copy: Free a copied memory region section 1198 * 1199 * Free a copy of a memory section created via memory_region_section_new_copy(). 1200 * properly dropping references on all relevant members. 1201 * 1202 * @s: the #MemoryRegionSection to copy 1203 */ 1204 void memory_region_section_free_copy(MemoryRegionSection *s); 1205 1206 /** 1207 * memory_region_init: Initialize a memory region 1208 * 1209 * The region typically acts as a container for other memory regions. Use 1210 * memory_region_add_subregion() to add subregions. 1211 * 1212 * @mr: the #MemoryRegion to be initialized 1213 * @owner: the object that tracks the region's reference count 1214 * @name: used for debugging; not visible to the user or ABI 1215 * @size: size of the region; any subregions beyond this size will be clipped 1216 */ 1217 void memory_region_init(MemoryRegion *mr, 1218 Object *owner, 1219 const char *name, 1220 uint64_t size); 1221 1222 /** 1223 * memory_region_ref: Add 1 to a memory region's reference count 1224 * 1225 * Whenever memory regions are accessed outside the BQL, they need to be 1226 * preserved against hot-unplug. MemoryRegions actually do not have their 1227 * own reference count; they piggyback on a QOM object, their "owner". 1228 * This function adds a reference to the owner. 1229 * 1230 * All MemoryRegions must have an owner if they can disappear, even if the 1231 * device they belong to operates exclusively under the BQL. This is because 1232 * the region could be returned at any time by memory_region_find, and this 1233 * is usually under guest control. 1234 * 1235 * @mr: the #MemoryRegion 1236 */ 1237 void memory_region_ref(MemoryRegion *mr); 1238 1239 /** 1240 * memory_region_unref: Remove 1 to a memory region's reference count 1241 * 1242 * Whenever memory regions are accessed outside the BQL, they need to be 1243 * preserved against hot-unplug. MemoryRegions actually do not have their 1244 * own reference count; they piggyback on a QOM object, their "owner". 1245 * This function removes a reference to the owner and possibly destroys it. 1246 * 1247 * @mr: the #MemoryRegion 1248 */ 1249 void memory_region_unref(MemoryRegion *mr); 1250 1251 /** 1252 * memory_region_init_io: Initialize an I/O memory region. 1253 * 1254 * Accesses into the region will cause the callbacks in @ops to be called. 1255 * if @size is nonzero, subregions will be clipped to @size. 1256 * 1257 * @mr: the #MemoryRegion to be initialized. 1258 * @owner: the object that tracks the region's reference count 1259 * @ops: a structure containing read and write callbacks to be used when 1260 * I/O is performed on the region. 1261 * @opaque: passed to the read and write callbacks of the @ops structure. 1262 * @name: used for debugging; not visible to the user or ABI 1263 * @size: size of the region. 1264 */ 1265 void memory_region_init_io(MemoryRegion *mr, 1266 Object *owner, 1267 const MemoryRegionOps *ops, 1268 void *opaque, 1269 const char *name, 1270 uint64_t size); 1271 1272 /** 1273 * memory_region_init_ram_nomigrate: Initialize RAM memory region. Accesses 1274 * into the region will modify memory 1275 * directly. 1276 * 1277 * @mr: the #MemoryRegion to be initialized. 1278 * @owner: the object that tracks the region's reference count 1279 * @name: Region name, becomes part of RAMBlock name used in migration stream 1280 * must be unique within any device 1281 * @size: size of the region. 1282 * @errp: pointer to Error*, to store an error if it happens. 1283 * 1284 * Note that this function does not do anything to cause the data in the 1285 * RAM memory region to be migrated; that is the responsibility of the caller. 1286 * 1287 * Return: true on success, else false setting @errp with error. 1288 */ 1289 bool memory_region_init_ram_nomigrate(MemoryRegion *mr, 1290 Object *owner, 1291 const char *name, 1292 uint64_t size, 1293 Error **errp); 1294 1295 /** 1296 * memory_region_init_ram_flags_nomigrate: Initialize RAM memory region. 1297 * Accesses into the region will 1298 * modify memory directly. 1299 * 1300 * @mr: the #MemoryRegion to be initialized. 1301 * @owner: the object that tracks the region's reference count 1302 * @name: Region name, becomes part of RAMBlock name used in migration stream 1303 * must be unique within any device 1304 * @size: size of the region. 1305 * @ram_flags: RamBlock flags. Supported flags: RAM_SHARED, RAM_NORESERVE, 1306 * RAM_GUEST_MEMFD. 1307 * @errp: pointer to Error*, to store an error if it happens. 1308 * 1309 * Note that this function does not do anything to cause the data in the 1310 * RAM memory region to be migrated; that is the responsibility of the caller. 1311 * 1312 * Return: true on success, else false setting @errp with error. 1313 */ 1314 bool memory_region_init_ram_flags_nomigrate(MemoryRegion *mr, 1315 Object *owner, 1316 const char *name, 1317 uint64_t size, 1318 uint32_t ram_flags, 1319 Error **errp); 1320 1321 /** 1322 * memory_region_init_resizeable_ram: Initialize memory region with resizable 1323 * RAM. Accesses into the region will 1324 * modify memory directly. Only an initial 1325 * portion of this RAM is actually used. 1326 * Changing the size while migrating 1327 * can result in the migration being 1328 * canceled. 1329 * 1330 * @mr: the #MemoryRegion to be initialized. 1331 * @owner: the object that tracks the region's reference count 1332 * @name: Region name, becomes part of RAMBlock name used in migration stream 1333 * must be unique within any device 1334 * @size: used size of the region. 1335 * @max_size: max size of the region. 1336 * @resized: callback to notify owner about used size change. 1337 * @errp: pointer to Error*, to store an error if it happens. 1338 * 1339 * Note that this function does not do anything to cause the data in the 1340 * RAM memory region to be migrated; that is the responsibility of the caller. 1341 * 1342 * Return: true on success, else false setting @errp with error. 1343 */ 1344 bool memory_region_init_resizeable_ram(MemoryRegion *mr, 1345 Object *owner, 1346 const char *name, 1347 uint64_t size, 1348 uint64_t max_size, 1349 void (*resized)(const char*, 1350 uint64_t length, 1351 void *host), 1352 Error **errp); 1353 #ifdef CONFIG_POSIX 1354 1355 /** 1356 * memory_region_init_ram_from_file: Initialize RAM memory region with a 1357 * mmap-ed backend. 1358 * 1359 * @mr: the #MemoryRegion to be initialized. 1360 * @owner: the object that tracks the region's reference count 1361 * @name: Region name, becomes part of RAMBlock name used in migration stream 1362 * must be unique within any device 1363 * @size: size of the region. 1364 * @align: alignment of the region base address; if 0, the default alignment 1365 * (getpagesize()) will be used. 1366 * @ram_flags: RamBlock flags. Supported flags: RAM_SHARED, RAM_PMEM, 1367 * RAM_NORESERVE, RAM_PROTECTED, RAM_NAMED_FILE, RAM_READONLY, 1368 * RAM_READONLY_FD, RAM_GUEST_MEMFD 1369 * @path: the path in which to allocate the RAM. 1370 * @offset: offset within the file referenced by path 1371 * @errp: pointer to Error*, to store an error if it happens. 1372 * 1373 * Note that this function does not do anything to cause the data in the 1374 * RAM memory region to be migrated; that is the responsibility of the caller. 1375 * 1376 * Return: true on success, else false setting @errp with error. 1377 */ 1378 bool memory_region_init_ram_from_file(MemoryRegion *mr, 1379 Object *owner, 1380 const char *name, 1381 uint64_t size, 1382 uint64_t align, 1383 uint32_t ram_flags, 1384 const char *path, 1385 ram_addr_t offset, 1386 Error **errp); 1387 1388 /** 1389 * memory_region_init_ram_from_fd: Initialize RAM memory region with a 1390 * mmap-ed backend. 1391 * 1392 * @mr: the #MemoryRegion to be initialized. 1393 * @owner: the object that tracks the region's reference count 1394 * @name: the name of the region. 1395 * @size: size of the region. 1396 * @ram_flags: RamBlock flags. Supported flags: RAM_SHARED, RAM_PMEM, 1397 * RAM_NORESERVE, RAM_PROTECTED, RAM_NAMED_FILE, RAM_READONLY, 1398 * RAM_READONLY_FD, RAM_GUEST_MEMFD 1399 * @fd: the fd to mmap. 1400 * @offset: offset within the file referenced by fd 1401 * @errp: pointer to Error*, to store an error if it happens. 1402 * 1403 * Note that this function does not do anything to cause the data in the 1404 * RAM memory region to be migrated; that is the responsibility of the caller. 1405 * 1406 * Return: true on success, else false setting @errp with error. 1407 */ 1408 bool memory_region_init_ram_from_fd(MemoryRegion *mr, 1409 Object *owner, 1410 const char *name, 1411 uint64_t size, 1412 uint32_t ram_flags, 1413 int fd, 1414 ram_addr_t offset, 1415 Error **errp); 1416 #endif 1417 1418 /** 1419 * memory_region_init_ram_ptr: Initialize RAM memory region from a 1420 * user-provided pointer. Accesses into the 1421 * region will modify memory directly. 1422 * 1423 * @mr: the #MemoryRegion to be initialized. 1424 * @owner: the object that tracks the region's reference count 1425 * @name: Region name, becomes part of RAMBlock name used in migration stream 1426 * must be unique within any device 1427 * @size: size of the region. 1428 * @ptr: memory to be mapped; must contain at least @size bytes. 1429 * 1430 * Note that this function does not do anything to cause the data in the 1431 * RAM memory region to be migrated; that is the responsibility of the caller. 1432 */ 1433 void memory_region_init_ram_ptr(MemoryRegion *mr, 1434 Object *owner, 1435 const char *name, 1436 uint64_t size, 1437 void *ptr); 1438 1439 /** 1440 * memory_region_init_ram_device_ptr: Initialize RAM device memory region from 1441 * a user-provided pointer. 1442 * 1443 * A RAM device represents a mapping to a physical device, such as to a PCI 1444 * MMIO BAR of an vfio-pci assigned device. The memory region may be mapped 1445 * into the VM address space and access to the region will modify memory 1446 * directly. However, the memory region should not be included in a memory 1447 * dump (device may not be enabled/mapped at the time of the dump), and 1448 * operations incompatible with manipulating MMIO should be avoided. Replaces 1449 * skip_dump flag. 1450 * 1451 * @mr: the #MemoryRegion to be initialized. 1452 * @owner: the object that tracks the region's reference count 1453 * @name: the name of the region. 1454 * @size: size of the region. 1455 * @ptr: memory to be mapped; must contain at least @size bytes. 1456 * 1457 * Note that this function does not do anything to cause the data in the 1458 * RAM memory region to be migrated; that is the responsibility of the caller. 1459 * (For RAM device memory regions, migrating the contents rarely makes sense.) 1460 */ 1461 void memory_region_init_ram_device_ptr(MemoryRegion *mr, 1462 Object *owner, 1463 const char *name, 1464 uint64_t size, 1465 void *ptr); 1466 1467 /** 1468 * memory_region_init_alias: Initialize a memory region that aliases all or a 1469 * part of another memory region. 1470 * 1471 * @mr: the #MemoryRegion to be initialized. 1472 * @owner: the object that tracks the region's reference count 1473 * @name: used for debugging; not visible to the user or ABI 1474 * @orig: the region to be referenced; @mr will be equivalent to 1475 * @orig between @offset and @offset + @size - 1. 1476 * @offset: start of the section in @orig to be referenced. 1477 * @size: size of the region. 1478 */ 1479 void memory_region_init_alias(MemoryRegion *mr, 1480 Object *owner, 1481 const char *name, 1482 MemoryRegion *orig, 1483 hwaddr offset, 1484 uint64_t size); 1485 1486 /** 1487 * memory_region_init_rom_nomigrate: Initialize a ROM memory region. 1488 * 1489 * This has the same effect as calling memory_region_init_ram_nomigrate() 1490 * and then marking the resulting region read-only with 1491 * memory_region_set_readonly(). 1492 * 1493 * Note that this function does not do anything to cause the data in the 1494 * RAM side of the memory region to be migrated; that is the responsibility 1495 * of the caller. 1496 * 1497 * @mr: the #MemoryRegion to be initialized. 1498 * @owner: the object that tracks the region's reference count 1499 * @name: Region name, becomes part of RAMBlock name used in migration stream 1500 * must be unique within any device 1501 * @size: size of the region. 1502 * @errp: pointer to Error*, to store an error if it happens. 1503 * 1504 * Return: true on success, else false setting @errp with error. 1505 */ 1506 bool memory_region_init_rom_nomigrate(MemoryRegion *mr, 1507 Object *owner, 1508 const char *name, 1509 uint64_t size, 1510 Error **errp); 1511 1512 /** 1513 * memory_region_init_rom_device_nomigrate: Initialize a ROM memory region. 1514 * Writes are handled via callbacks. 1515 * 1516 * Note that this function does not do anything to cause the data in the 1517 * RAM side of the memory region to be migrated; that is the responsibility 1518 * of the caller. 1519 * 1520 * @mr: the #MemoryRegion to be initialized. 1521 * @owner: the object that tracks the region's reference count 1522 * @ops: callbacks for write access handling (must not be NULL). 1523 * @opaque: passed to the read and write callbacks of the @ops structure. 1524 * @name: Region name, becomes part of RAMBlock name used in migration stream 1525 * must be unique within any device 1526 * @size: size of the region. 1527 * @errp: pointer to Error*, to store an error if it happens. 1528 * 1529 * Return: true on success, else false setting @errp with error. 1530 */ 1531 bool memory_region_init_rom_device_nomigrate(MemoryRegion *mr, 1532 Object *owner, 1533 const MemoryRegionOps *ops, 1534 void *opaque, 1535 const char *name, 1536 uint64_t size, 1537 Error **errp); 1538 1539 /** 1540 * memory_region_init_iommu: Initialize a memory region of a custom type 1541 * that translates addresses 1542 * 1543 * An IOMMU region translates addresses and forwards accesses to a target 1544 * memory region. 1545 * 1546 * The IOMMU implementation must define a subclass of TYPE_IOMMU_MEMORY_REGION. 1547 * @_iommu_mr should be a pointer to enough memory for an instance of 1548 * that subclass, @instance_size is the size of that subclass, and 1549 * @mrtypename is its name. This function will initialize @_iommu_mr as an 1550 * instance of the subclass, and its methods will then be called to handle 1551 * accesses to the memory region. See the documentation of 1552 * #IOMMUMemoryRegionClass for further details. 1553 * 1554 * @_iommu_mr: the #IOMMUMemoryRegion to be initialized 1555 * @instance_size: the IOMMUMemoryRegion subclass instance size 1556 * @mrtypename: the type name of the #IOMMUMemoryRegion 1557 * @owner: the object that tracks the region's reference count 1558 * @name: used for debugging; not visible to the user or ABI 1559 * @size: size of the region. 1560 */ 1561 void memory_region_init_iommu(void *_iommu_mr, 1562 size_t instance_size, 1563 const char *mrtypename, 1564 Object *owner, 1565 const char *name, 1566 uint64_t size); 1567 1568 /** 1569 * memory_region_init_ram - Initialize RAM memory region. Accesses into the 1570 * region will modify memory directly. 1571 * 1572 * @mr: the #MemoryRegion to be initialized 1573 * @owner: the object that tracks the region's reference count (must be 1574 * TYPE_DEVICE or a subclass of TYPE_DEVICE, or NULL) 1575 * @name: name of the memory region 1576 * @size: size of the region in bytes 1577 * @errp: pointer to Error*, to store an error if it happens. 1578 * 1579 * This function allocates RAM for a board model or device, and 1580 * arranges for it to be migrated (by calling vmstate_register_ram() 1581 * if @owner is a DeviceState, or vmstate_register_ram_global() if 1582 * @owner is NULL). 1583 * 1584 * TODO: Currently we restrict @owner to being either NULL (for 1585 * global RAM regions with no owner) or devices, so that we can 1586 * give the RAM block a unique name for migration purposes. 1587 * We should lift this restriction and allow arbitrary Objects. 1588 * If you pass a non-NULL non-device @owner then we will assert. 1589 * 1590 * Return: true on success, else false setting @errp with error. 1591 */ 1592 bool memory_region_init_ram(MemoryRegion *mr, 1593 Object *owner, 1594 const char *name, 1595 uint64_t size, 1596 Error **errp); 1597 1598 bool memory_region_init_ram_guest_memfd(MemoryRegion *mr, 1599 Object *owner, 1600 const char *name, 1601 uint64_t size, 1602 Error **errp); 1603 1604 /** 1605 * memory_region_init_rom: Initialize a ROM memory region. 1606 * 1607 * This has the same effect as calling memory_region_init_ram() 1608 * and then marking the resulting region read-only with 1609 * memory_region_set_readonly(). This includes arranging for the 1610 * contents to be migrated. 1611 * 1612 * TODO: Currently we restrict @owner to being either NULL (for 1613 * global RAM regions with no owner) or devices, so that we can 1614 * give the RAM block a unique name for migration purposes. 1615 * We should lift this restriction and allow arbitrary Objects. 1616 * If you pass a non-NULL non-device @owner then we will assert. 1617 * 1618 * @mr: the #MemoryRegion to be initialized. 1619 * @owner: the object that tracks the region's reference count 1620 * @name: Region name, becomes part of RAMBlock name used in migration stream 1621 * must be unique within any device 1622 * @size: size of the region. 1623 * @errp: pointer to Error*, to store an error if it happens. 1624 * 1625 * Return: true on success, else false setting @errp with error. 1626 */ 1627 bool memory_region_init_rom(MemoryRegion *mr, 1628 Object *owner, 1629 const char *name, 1630 uint64_t size, 1631 Error **errp); 1632 1633 /** 1634 * memory_region_init_rom_device: Initialize a ROM memory region. 1635 * Writes are handled via callbacks. 1636 * 1637 * This function initializes a memory region backed by RAM for reads 1638 * and callbacks for writes, and arranges for the RAM backing to 1639 * be migrated (by calling vmstate_register_ram() 1640 * if @owner is a DeviceState, or vmstate_register_ram_global() if 1641 * @owner is NULL). 1642 * 1643 * TODO: Currently we restrict @owner to being either NULL (for 1644 * global RAM regions with no owner) or devices, so that we can 1645 * give the RAM block a unique name for migration purposes. 1646 * We should lift this restriction and allow arbitrary Objects. 1647 * If you pass a non-NULL non-device @owner then we will assert. 1648 * 1649 * @mr: the #MemoryRegion to be initialized. 1650 * @owner: the object that tracks the region's reference count 1651 * @ops: callbacks for write access handling (must not be NULL). 1652 * @opaque: passed to the read and write callbacks of the @ops structure. 1653 * @name: Region name, becomes part of RAMBlock name used in migration stream 1654 * must be unique within any device 1655 * @size: size of the region. 1656 * @errp: pointer to Error*, to store an error if it happens. 1657 * 1658 * Return: true on success, else false setting @errp with error. 1659 */ 1660 bool memory_region_init_rom_device(MemoryRegion *mr, 1661 Object *owner, 1662 const MemoryRegionOps *ops, 1663 void *opaque, 1664 const char *name, 1665 uint64_t size, 1666 Error **errp); 1667 1668 1669 /** 1670 * memory_region_owner: get a memory region's owner. 1671 * 1672 * @mr: the memory region being queried. 1673 */ 1674 Object *memory_region_owner(MemoryRegion *mr); 1675 1676 /** 1677 * memory_region_size: get a memory region's size. 1678 * 1679 * @mr: the memory region being queried. 1680 */ 1681 uint64_t memory_region_size(MemoryRegion *mr); 1682 1683 /** 1684 * memory_region_is_ram: check whether a memory region is random access 1685 * 1686 * Returns %true if a memory region is random access. 1687 * 1688 * @mr: the memory region being queried 1689 */ 1690 static inline bool memory_region_is_ram(MemoryRegion *mr) 1691 { 1692 return mr->ram; 1693 } 1694 1695 /** 1696 * memory_region_is_ram_device: check whether a memory region is a ram device 1697 * 1698 * Returns %true if a memory region is a device backed ram region 1699 * 1700 * @mr: the memory region being queried 1701 */ 1702 bool memory_region_is_ram_device(MemoryRegion *mr); 1703 1704 /** 1705 * memory_region_is_romd: check whether a memory region is in ROMD mode 1706 * 1707 * Returns %true if a memory region is a ROM device and currently set to allow 1708 * direct reads. 1709 * 1710 * @mr: the memory region being queried 1711 */ 1712 static inline bool memory_region_is_romd(MemoryRegion *mr) 1713 { 1714 return mr->rom_device && mr->romd_mode; 1715 } 1716 1717 /** 1718 * memory_region_is_protected: check whether a memory region is protected 1719 * 1720 * Returns %true if a memory region is protected RAM and cannot be accessed 1721 * via standard mechanisms, e.g. DMA. 1722 * 1723 * @mr: the memory region being queried 1724 */ 1725 bool memory_region_is_protected(MemoryRegion *mr); 1726 1727 /** 1728 * memory_region_has_guest_memfd: check whether a memory region has guest_memfd 1729 * associated 1730 * 1731 * Returns %true if a memory region's ram_block has valid guest_memfd assigned. 1732 * 1733 * @mr: the memory region being queried 1734 */ 1735 bool memory_region_has_guest_memfd(MemoryRegion *mr); 1736 1737 /** 1738 * memory_region_get_iommu: check whether a memory region is an iommu 1739 * 1740 * Returns pointer to IOMMUMemoryRegion if a memory region is an iommu, 1741 * otherwise NULL. 1742 * 1743 * @mr: the memory region being queried 1744 */ 1745 static inline IOMMUMemoryRegion *memory_region_get_iommu(MemoryRegion *mr) 1746 { 1747 if (mr->alias) { 1748 return memory_region_get_iommu(mr->alias); 1749 } 1750 if (mr->is_iommu) { 1751 return (IOMMUMemoryRegion *) mr; 1752 } 1753 return NULL; 1754 } 1755 1756 /** 1757 * memory_region_get_iommu_class_nocheck: returns iommu memory region class 1758 * if an iommu or NULL if not 1759 * 1760 * Returns pointer to IOMMUMemoryRegionClass if a memory region is an iommu, 1761 * otherwise NULL. This is fast path avoiding QOM checking, use with caution. 1762 * 1763 * @iommu_mr: the memory region being queried 1764 */ 1765 static inline IOMMUMemoryRegionClass *memory_region_get_iommu_class_nocheck( 1766 IOMMUMemoryRegion *iommu_mr) 1767 { 1768 return (IOMMUMemoryRegionClass *) (((Object *)iommu_mr)->class); 1769 } 1770 1771 #define memory_region_is_iommu(mr) (memory_region_get_iommu(mr) != NULL) 1772 1773 /** 1774 * memory_region_iommu_get_min_page_size: get minimum supported page size 1775 * for an iommu 1776 * 1777 * Returns minimum supported page size for an iommu. 1778 * 1779 * @iommu_mr: the memory region being queried 1780 */ 1781 uint64_t memory_region_iommu_get_min_page_size(IOMMUMemoryRegion *iommu_mr); 1782 1783 /** 1784 * memory_region_notify_iommu: notify a change in an IOMMU translation entry. 1785 * 1786 * Note: for any IOMMU implementation, an in-place mapping change 1787 * should be notified with an UNMAP followed by a MAP. 1788 * 1789 * @iommu_mr: the memory region that was changed 1790 * @iommu_idx: the IOMMU index for the translation table which has changed 1791 * @event: TLB event with the new entry in the IOMMU translation table. 1792 * The entry replaces all old entries for the same virtual I/O address 1793 * range. 1794 */ 1795 void memory_region_notify_iommu(IOMMUMemoryRegion *iommu_mr, 1796 int iommu_idx, 1797 const IOMMUTLBEvent event); 1798 1799 /** 1800 * memory_region_notify_iommu_one: notify a change in an IOMMU translation 1801 * entry to a single notifier 1802 * 1803 * This works just like memory_region_notify_iommu(), but it only 1804 * notifies a specific notifier, not all of them. 1805 * 1806 * @notifier: the notifier to be notified 1807 * @event: TLB event with the new entry in the IOMMU translation table. 1808 * The entry replaces all old entries for the same virtual I/O address 1809 * range. 1810 */ 1811 void memory_region_notify_iommu_one(IOMMUNotifier *notifier, 1812 const IOMMUTLBEvent *event); 1813 1814 /** 1815 * memory_region_unmap_iommu_notifier_range: notify a unmap for an IOMMU 1816 * translation that covers the 1817 * range of a notifier 1818 * 1819 * @notifier: the notifier to be notified 1820 */ 1821 void memory_region_unmap_iommu_notifier_range(IOMMUNotifier *notifier); 1822 1823 1824 /** 1825 * memory_region_register_iommu_notifier: register a notifier for changes to 1826 * IOMMU translation entries. 1827 * 1828 * Returns 0 on success, or a negative errno otherwise. In particular, 1829 * -EINVAL indicates that at least one of the attributes of the notifier 1830 * is not supported (flag/range) by the IOMMU memory region. In case of error 1831 * the error object must be created. 1832 * 1833 * @mr: the memory region to observe 1834 * @n: the IOMMUNotifier to be added; the notify callback receives a 1835 * pointer to an #IOMMUTLBEntry as the opaque value; the pointer 1836 * ceases to be valid on exit from the notifier. 1837 * @errp: pointer to Error*, to store an error if it happens. 1838 */ 1839 int memory_region_register_iommu_notifier(MemoryRegion *mr, 1840 IOMMUNotifier *n, Error **errp); 1841 1842 /** 1843 * memory_region_iommu_replay: replay existing IOMMU translations to 1844 * a notifier with the minimum page granularity returned by 1845 * mr->iommu_ops->get_page_size(). 1846 * 1847 * Note: this is not related to record-and-replay functionality. 1848 * 1849 * @iommu_mr: the memory region to observe 1850 * @n: the notifier to which to replay iommu mappings 1851 */ 1852 void memory_region_iommu_replay(IOMMUMemoryRegion *iommu_mr, IOMMUNotifier *n); 1853 1854 /** 1855 * memory_region_unregister_iommu_notifier: unregister a notifier for 1856 * changes to IOMMU translation entries. 1857 * 1858 * @mr: the memory region which was observed and for which notify_stopped() 1859 * needs to be called 1860 * @n: the notifier to be removed. 1861 */ 1862 void memory_region_unregister_iommu_notifier(MemoryRegion *mr, 1863 IOMMUNotifier *n); 1864 1865 /** 1866 * memory_region_iommu_get_attr: return an IOMMU attr if get_attr() is 1867 * defined on the IOMMU. 1868 * 1869 * Returns 0 on success, or a negative errno otherwise. In particular, 1870 * -EINVAL indicates that the IOMMU does not support the requested 1871 * attribute. 1872 * 1873 * @iommu_mr: the memory region 1874 * @attr: the requested attribute 1875 * @data: a pointer to the requested attribute data 1876 */ 1877 int memory_region_iommu_get_attr(IOMMUMemoryRegion *iommu_mr, 1878 enum IOMMUMemoryRegionAttr attr, 1879 void *data); 1880 1881 /** 1882 * memory_region_iommu_attrs_to_index: return the IOMMU index to 1883 * use for translations with the given memory transaction attributes. 1884 * 1885 * @iommu_mr: the memory region 1886 * @attrs: the memory transaction attributes 1887 */ 1888 int memory_region_iommu_attrs_to_index(IOMMUMemoryRegion *iommu_mr, 1889 MemTxAttrs attrs); 1890 1891 /** 1892 * memory_region_iommu_num_indexes: return the total number of IOMMU 1893 * indexes that this IOMMU supports. 1894 * 1895 * @iommu_mr: the memory region 1896 */ 1897 int memory_region_iommu_num_indexes(IOMMUMemoryRegion *iommu_mr); 1898 1899 /** 1900 * memory_region_name: get a memory region's name 1901 * 1902 * Returns the string that was used to initialize the memory region. 1903 * 1904 * @mr: the memory region being queried 1905 */ 1906 const char *memory_region_name(const MemoryRegion *mr); 1907 1908 /** 1909 * memory_region_is_logging: return whether a memory region is logging writes 1910 * 1911 * Returns %true if the memory region is logging writes for the given client 1912 * 1913 * @mr: the memory region being queried 1914 * @client: the client being queried 1915 */ 1916 bool memory_region_is_logging(MemoryRegion *mr, uint8_t client); 1917 1918 /** 1919 * memory_region_get_dirty_log_mask: return the clients for which a 1920 * memory region is logging writes. 1921 * 1922 * Returns a bitmap of clients, in which the DIRTY_MEMORY_* constants 1923 * are the bit indices. 1924 * 1925 * @mr: the memory region being queried 1926 */ 1927 uint8_t memory_region_get_dirty_log_mask(MemoryRegion *mr); 1928 1929 /** 1930 * memory_region_is_rom: check whether a memory region is ROM 1931 * 1932 * Returns %true if a memory region is read-only memory. 1933 * 1934 * @mr: the memory region being queried 1935 */ 1936 static inline bool memory_region_is_rom(MemoryRegion *mr) 1937 { 1938 return mr->ram && mr->readonly; 1939 } 1940 1941 /** 1942 * memory_region_is_nonvolatile: check whether a memory region is non-volatile 1943 * 1944 * Returns %true is a memory region is non-volatile memory. 1945 * 1946 * @mr: the memory region being queried 1947 */ 1948 static inline bool memory_region_is_nonvolatile(MemoryRegion *mr) 1949 { 1950 return mr->nonvolatile; 1951 } 1952 1953 /** 1954 * memory_region_get_fd: Get a file descriptor backing a RAM memory region. 1955 * 1956 * Returns a file descriptor backing a file-based RAM memory region, 1957 * or -1 if the region is not a file-based RAM memory region. 1958 * 1959 * @mr: the RAM or alias memory region being queried. 1960 */ 1961 int memory_region_get_fd(MemoryRegion *mr); 1962 1963 /** 1964 * memory_region_from_host: Convert a pointer into a RAM memory region 1965 * and an offset within it. 1966 * 1967 * Given a host pointer inside a RAM memory region (created with 1968 * memory_region_init_ram() or memory_region_init_ram_ptr()), return 1969 * the MemoryRegion and the offset within it. 1970 * 1971 * Use with care; by the time this function returns, the returned pointer is 1972 * not protected by RCU anymore. If the caller is not within an RCU critical 1973 * section and does not hold the BQL, it must have other means of 1974 * protecting the pointer, such as a reference to the region that includes 1975 * the incoming ram_addr_t. 1976 * 1977 * @ptr: the host pointer to be converted 1978 * @offset: the offset within memory region 1979 */ 1980 MemoryRegion *memory_region_from_host(void *ptr, ram_addr_t *offset); 1981 1982 /** 1983 * memory_region_get_ram_ptr: Get a pointer into a RAM memory region. 1984 * 1985 * Returns a host pointer to a RAM memory region (created with 1986 * memory_region_init_ram() or memory_region_init_ram_ptr()). 1987 * 1988 * Use with care; by the time this function returns, the returned pointer is 1989 * not protected by RCU anymore. If the caller is not within an RCU critical 1990 * section and does not hold the BQL, it must have other means of 1991 * protecting the pointer, such as a reference to the region that includes 1992 * the incoming ram_addr_t. 1993 * 1994 * @mr: the memory region being queried. 1995 */ 1996 void *memory_region_get_ram_ptr(MemoryRegion *mr); 1997 1998 /* memory_region_ram_resize: Resize a RAM region. 1999 * 2000 * Resizing RAM while migrating can result in the migration being canceled. 2001 * Care has to be taken if the guest might have already detected the memory. 2002 * 2003 * @mr: a memory region created with @memory_region_init_resizeable_ram. 2004 * @newsize: the new size the region 2005 * @errp: pointer to Error*, to store an error if it happens. 2006 */ 2007 void memory_region_ram_resize(MemoryRegion *mr, ram_addr_t newsize, 2008 Error **errp); 2009 2010 /** 2011 * memory_region_msync: Synchronize selected address range of 2012 * a memory mapped region 2013 * 2014 * @mr: the memory region to be msync 2015 * @addr: the initial address of the range to be sync 2016 * @size: the size of the range to be sync 2017 */ 2018 void memory_region_msync(MemoryRegion *mr, hwaddr addr, hwaddr size); 2019 2020 /** 2021 * memory_region_writeback: Trigger cache writeback for 2022 * selected address range 2023 * 2024 * @mr: the memory region to be updated 2025 * @addr: the initial address of the range to be written back 2026 * @size: the size of the range to be written back 2027 */ 2028 void memory_region_writeback(MemoryRegion *mr, hwaddr addr, hwaddr size); 2029 2030 /** 2031 * memory_region_set_log: Turn dirty logging on or off for a region. 2032 * 2033 * Turns dirty logging on or off for a specified client (display, migration). 2034 * Only meaningful for RAM regions. 2035 * 2036 * @mr: the memory region being updated. 2037 * @log: whether dirty logging is to be enabled or disabled. 2038 * @client: the user of the logging information; %DIRTY_MEMORY_VGA only. 2039 */ 2040 void memory_region_set_log(MemoryRegion *mr, bool log, unsigned client); 2041 2042 /** 2043 * memory_region_set_dirty: Mark a range of bytes as dirty in a memory region. 2044 * 2045 * Marks a range of bytes as dirty, after it has been dirtied outside 2046 * guest code. 2047 * 2048 * @mr: the memory region being dirtied. 2049 * @addr: the address (relative to the start of the region) being dirtied. 2050 * @size: size of the range being dirtied. 2051 */ 2052 void memory_region_set_dirty(MemoryRegion *mr, hwaddr addr, 2053 hwaddr size); 2054 2055 /** 2056 * memory_region_clear_dirty_bitmap - clear dirty bitmap for memory range 2057 * 2058 * This function is called when the caller wants to clear the remote 2059 * dirty bitmap of a memory range within the memory region. This can 2060 * be used by e.g. KVM to manually clear dirty log when 2061 * KVM_CAP_MANUAL_DIRTY_LOG_PROTECT is declared support by the host 2062 * kernel. 2063 * 2064 * @mr: the memory region to clear the dirty log upon 2065 * @start: start address offset within the memory region 2066 * @len: length of the memory region to clear dirty bitmap 2067 */ 2068 void memory_region_clear_dirty_bitmap(MemoryRegion *mr, hwaddr start, 2069 hwaddr len); 2070 2071 /** 2072 * memory_region_snapshot_and_clear_dirty: Get a snapshot of the dirty 2073 * bitmap and clear it. 2074 * 2075 * Creates a snapshot of the dirty bitmap, clears the dirty bitmap and 2076 * returns the snapshot. The snapshot can then be used to query dirty 2077 * status, using memory_region_snapshot_get_dirty. Snapshotting allows 2078 * querying the same page multiple times, which is especially useful for 2079 * display updates where the scanlines often are not page aligned. 2080 * 2081 * The dirty bitmap region which gets copied into the snapshot (and 2082 * cleared afterwards) can be larger than requested. The boundaries 2083 * are rounded up/down so complete bitmap longs (covering 64 pages on 2084 * 64bit hosts) can be copied over into the bitmap snapshot. Which 2085 * isn't a problem for display updates as the extra pages are outside 2086 * the visible area, and in case the visible area changes a full 2087 * display redraw is due anyway. Should other use cases for this 2088 * function emerge we might have to revisit this implementation 2089 * detail. 2090 * 2091 * Use g_free to release DirtyBitmapSnapshot. 2092 * 2093 * @mr: the memory region being queried. 2094 * @addr: the address (relative to the start of the region) being queried. 2095 * @size: the size of the range being queried. 2096 * @client: the user of the logging information; typically %DIRTY_MEMORY_VGA. 2097 */ 2098 DirtyBitmapSnapshot *memory_region_snapshot_and_clear_dirty(MemoryRegion *mr, 2099 hwaddr addr, 2100 hwaddr size, 2101 unsigned client); 2102 2103 /** 2104 * memory_region_snapshot_get_dirty: Check whether a range of bytes is dirty 2105 * in the specified dirty bitmap snapshot. 2106 * 2107 * @mr: the memory region being queried. 2108 * @snap: the dirty bitmap snapshot 2109 * @addr: the address (relative to the start of the region) being queried. 2110 * @size: the size of the range being queried. 2111 */ 2112 bool memory_region_snapshot_get_dirty(MemoryRegion *mr, 2113 DirtyBitmapSnapshot *snap, 2114 hwaddr addr, hwaddr size); 2115 2116 /** 2117 * memory_region_reset_dirty: Mark a range of pages as clean, for a specified 2118 * client. 2119 * 2120 * Marks a range of pages as no longer dirty. 2121 * 2122 * @mr: the region being updated. 2123 * @addr: the start of the subrange being cleaned. 2124 * @size: the size of the subrange being cleaned. 2125 * @client: the user of the logging information; %DIRTY_MEMORY_MIGRATION or 2126 * %DIRTY_MEMORY_VGA. 2127 */ 2128 void memory_region_reset_dirty(MemoryRegion *mr, hwaddr addr, 2129 hwaddr size, unsigned client); 2130 2131 /** 2132 * memory_region_flush_rom_device: Mark a range of pages dirty and invalidate 2133 * TBs (for self-modifying code). 2134 * 2135 * The MemoryRegionOps->write() callback of a ROM device must use this function 2136 * to mark byte ranges that have been modified internally, such as by directly 2137 * accessing the memory returned by memory_region_get_ram_ptr(). 2138 * 2139 * This function marks the range dirty and invalidates TBs so that TCG can 2140 * detect self-modifying code. 2141 * 2142 * @mr: the region being flushed. 2143 * @addr: the start, relative to the start of the region, of the range being 2144 * flushed. 2145 * @size: the size, in bytes, of the range being flushed. 2146 */ 2147 void memory_region_flush_rom_device(MemoryRegion *mr, hwaddr addr, hwaddr size); 2148 2149 /** 2150 * memory_region_set_readonly: Turn a memory region read-only (or read-write) 2151 * 2152 * Allows a memory region to be marked as read-only (turning it into a ROM). 2153 * only useful on RAM regions. 2154 * 2155 * @mr: the region being updated. 2156 * @readonly: whether rhe region is to be ROM or RAM. 2157 */ 2158 void memory_region_set_readonly(MemoryRegion *mr, bool readonly); 2159 2160 /** 2161 * memory_region_set_nonvolatile: Turn a memory region non-volatile 2162 * 2163 * Allows a memory region to be marked as non-volatile. 2164 * only useful on RAM regions. 2165 * 2166 * @mr: the region being updated. 2167 * @nonvolatile: whether rhe region is to be non-volatile. 2168 */ 2169 void memory_region_set_nonvolatile(MemoryRegion *mr, bool nonvolatile); 2170 2171 /** 2172 * memory_region_rom_device_set_romd: enable/disable ROMD mode 2173 * 2174 * Allows a ROM device (initialized with memory_region_init_rom_device() to 2175 * set to ROMD mode (default) or MMIO mode. When it is in ROMD mode, the 2176 * device is mapped to guest memory and satisfies read access directly. 2177 * When in MMIO mode, reads are forwarded to the #MemoryRegion.read function. 2178 * Writes are always handled by the #MemoryRegion.write function. 2179 * 2180 * @mr: the memory region to be updated 2181 * @romd_mode: %true to put the region into ROMD mode 2182 */ 2183 void memory_region_rom_device_set_romd(MemoryRegion *mr, bool romd_mode); 2184 2185 /** 2186 * memory_region_set_coalescing: Enable memory coalescing for the region. 2187 * 2188 * Enabled writes to a region to be queued for later processing. MMIO ->write 2189 * callbacks may be delayed until a non-coalesced MMIO is issued. 2190 * Only useful for IO regions. Roughly similar to write-combining hardware. 2191 * 2192 * @mr: the memory region to be write coalesced 2193 */ 2194 void memory_region_set_coalescing(MemoryRegion *mr); 2195 2196 /** 2197 * memory_region_add_coalescing: Enable memory coalescing for a sub-range of 2198 * a region. 2199 * 2200 * Like memory_region_set_coalescing(), but works on a sub-range of a region. 2201 * Multiple calls can be issued coalesced disjoint ranges. 2202 * 2203 * @mr: the memory region to be updated. 2204 * @offset: the start of the range within the region to be coalesced. 2205 * @size: the size of the subrange to be coalesced. 2206 */ 2207 void memory_region_add_coalescing(MemoryRegion *mr, 2208 hwaddr offset, 2209 uint64_t size); 2210 2211 /** 2212 * memory_region_clear_coalescing: Disable MMIO coalescing for the region. 2213 * 2214 * Disables any coalescing caused by memory_region_set_coalescing() or 2215 * memory_region_add_coalescing(). Roughly equivalent to uncacheble memory 2216 * hardware. 2217 * 2218 * @mr: the memory region to be updated. 2219 */ 2220 void memory_region_clear_coalescing(MemoryRegion *mr); 2221 2222 /** 2223 * memory_region_set_flush_coalesced: Enforce memory coalescing flush before 2224 * accesses. 2225 * 2226 * Ensure that pending coalesced MMIO request are flushed before the memory 2227 * region is accessed. This property is automatically enabled for all regions 2228 * passed to memory_region_set_coalescing() and memory_region_add_coalescing(). 2229 * 2230 * @mr: the memory region to be updated. 2231 */ 2232 void memory_region_set_flush_coalesced(MemoryRegion *mr); 2233 2234 /** 2235 * memory_region_clear_flush_coalesced: Disable memory coalescing flush before 2236 * accesses. 2237 * 2238 * Clear the automatic coalesced MMIO flushing enabled via 2239 * memory_region_set_flush_coalesced. Note that this service has no effect on 2240 * memory regions that have MMIO coalescing enabled for themselves. For them, 2241 * automatic flushing will stop once coalescing is disabled. 2242 * 2243 * @mr: the memory region to be updated. 2244 */ 2245 void memory_region_clear_flush_coalesced(MemoryRegion *mr); 2246 2247 /** 2248 * memory_region_add_eventfd: Request an eventfd to be triggered when a word 2249 * is written to a location. 2250 * 2251 * Marks a word in an IO region (initialized with memory_region_init_io()) 2252 * as a trigger for an eventfd event. The I/O callback will not be called. 2253 * The caller must be prepared to handle failure (that is, take the required 2254 * action if the callback _is_ called). 2255 * 2256 * @mr: the memory region being updated. 2257 * @addr: the address within @mr that is to be monitored 2258 * @size: the size of the access to trigger the eventfd 2259 * @match_data: whether to match against @data, instead of just @addr 2260 * @data: the data to match against the guest write 2261 * @e: event notifier to be triggered when @addr, @size, and @data all match. 2262 **/ 2263 void memory_region_add_eventfd(MemoryRegion *mr, 2264 hwaddr addr, 2265 unsigned size, 2266 bool match_data, 2267 uint64_t data, 2268 EventNotifier *e); 2269 2270 /** 2271 * memory_region_del_eventfd: Cancel an eventfd. 2272 * 2273 * Cancels an eventfd trigger requested by a previous 2274 * memory_region_add_eventfd() call. 2275 * 2276 * @mr: the memory region being updated. 2277 * @addr: the address within @mr that is to be monitored 2278 * @size: the size of the access to trigger the eventfd 2279 * @match_data: whether to match against @data, instead of just @addr 2280 * @data: the data to match against the guest write 2281 * @e: event notifier to be triggered when @addr, @size, and @data all match. 2282 */ 2283 void memory_region_del_eventfd(MemoryRegion *mr, 2284 hwaddr addr, 2285 unsigned size, 2286 bool match_data, 2287 uint64_t data, 2288 EventNotifier *e); 2289 2290 /** 2291 * memory_region_add_subregion: Add a subregion to a container. 2292 * 2293 * Adds a subregion at @offset. The subregion may not overlap with other 2294 * subregions (except for those explicitly marked as overlapping). A region 2295 * may only be added once as a subregion (unless removed with 2296 * memory_region_del_subregion()); use memory_region_init_alias() if you 2297 * want a region to be a subregion in multiple locations. 2298 * 2299 * @mr: the region to contain the new subregion; must be a container 2300 * initialized with memory_region_init(). 2301 * @offset: the offset relative to @mr where @subregion is added. 2302 * @subregion: the subregion to be added. 2303 */ 2304 void memory_region_add_subregion(MemoryRegion *mr, 2305 hwaddr offset, 2306 MemoryRegion *subregion); 2307 /** 2308 * memory_region_add_subregion_overlap: Add a subregion to a container 2309 * with overlap. 2310 * 2311 * Adds a subregion at @offset. The subregion may overlap with other 2312 * subregions. Conflicts are resolved by having a higher @priority hide a 2313 * lower @priority. Subregions without priority are taken as @priority 0. 2314 * A region may only be added once as a subregion (unless removed with 2315 * memory_region_del_subregion()); use memory_region_init_alias() if you 2316 * want a region to be a subregion in multiple locations. 2317 * 2318 * @mr: the region to contain the new subregion; must be a container 2319 * initialized with memory_region_init(). 2320 * @offset: the offset relative to @mr where @subregion is added. 2321 * @subregion: the subregion to be added. 2322 * @priority: used for resolving overlaps; highest priority wins. 2323 */ 2324 void memory_region_add_subregion_overlap(MemoryRegion *mr, 2325 hwaddr offset, 2326 MemoryRegion *subregion, 2327 int priority); 2328 2329 /** 2330 * memory_region_get_ram_addr: Get the ram address associated with a memory 2331 * region 2332 * 2333 * @mr: the region to be queried 2334 */ 2335 ram_addr_t memory_region_get_ram_addr(MemoryRegion *mr); 2336 2337 uint64_t memory_region_get_alignment(const MemoryRegion *mr); 2338 /** 2339 * memory_region_del_subregion: Remove a subregion. 2340 * 2341 * Removes a subregion from its container. 2342 * 2343 * @mr: the container to be updated. 2344 * @subregion: the region being removed; must be a current subregion of @mr. 2345 */ 2346 void memory_region_del_subregion(MemoryRegion *mr, 2347 MemoryRegion *subregion); 2348 2349 /* 2350 * memory_region_set_enabled: dynamically enable or disable a region 2351 * 2352 * Enables or disables a memory region. A disabled memory region 2353 * ignores all accesses to itself and its subregions. It does not 2354 * obscure sibling subregions with lower priority - it simply behaves as 2355 * if it was removed from the hierarchy. 2356 * 2357 * Regions default to being enabled. 2358 * 2359 * @mr: the region to be updated 2360 * @enabled: whether to enable or disable the region 2361 */ 2362 void memory_region_set_enabled(MemoryRegion *mr, bool enabled); 2363 2364 /* 2365 * memory_region_set_address: dynamically update the address of a region 2366 * 2367 * Dynamically updates the address of a region, relative to its container. 2368 * May be used on regions are currently part of a memory hierarchy. 2369 * 2370 * @mr: the region to be updated 2371 * @addr: new address, relative to container region 2372 */ 2373 void memory_region_set_address(MemoryRegion *mr, hwaddr addr); 2374 2375 /* 2376 * memory_region_set_size: dynamically update the size of a region. 2377 * 2378 * Dynamically updates the size of a region. 2379 * 2380 * @mr: the region to be updated 2381 * @size: used size of the region. 2382 */ 2383 void memory_region_set_size(MemoryRegion *mr, uint64_t size); 2384 2385 /* 2386 * memory_region_set_alias_offset: dynamically update a memory alias's offset 2387 * 2388 * Dynamically updates the offset into the target region that an alias points 2389 * to, as if the fourth argument to memory_region_init_alias() has changed. 2390 * 2391 * @mr: the #MemoryRegion to be updated; should be an alias. 2392 * @offset: the new offset into the target memory region 2393 */ 2394 void memory_region_set_alias_offset(MemoryRegion *mr, 2395 hwaddr offset); 2396 2397 /* 2398 * memory_region_set_unmergeable: Set a memory region unmergeable 2399 * 2400 * Mark a memory region unmergeable, resulting in the memory region (or 2401 * everything contained in a memory region container) not getting merged when 2402 * simplifying the address space and notifying memory listeners. Consequently, 2403 * memory listeners will never get notified about ranges that are larger than 2404 * the original memory regions. 2405 * 2406 * This is primarily useful when multiple aliases to a RAM memory region are 2407 * mapped into a memory region container, and updates (e.g., enable/disable or 2408 * map/unmap) of individual memory region aliases are not supposed to affect 2409 * other memory regions in the same container. 2410 * 2411 * @mr: the #MemoryRegion to be updated 2412 * @unmergeable: whether to mark the #MemoryRegion unmergeable 2413 */ 2414 void memory_region_set_unmergeable(MemoryRegion *mr, bool unmergeable); 2415 2416 /** 2417 * memory_region_present: checks if an address relative to a @container 2418 * translates into #MemoryRegion within @container 2419 * 2420 * Answer whether a #MemoryRegion within @container covers the address 2421 * @addr. 2422 * 2423 * @container: a #MemoryRegion within which @addr is a relative address 2424 * @addr: the area within @container to be searched 2425 */ 2426 bool memory_region_present(MemoryRegion *container, hwaddr addr); 2427 2428 /** 2429 * memory_region_is_mapped: returns true if #MemoryRegion is mapped 2430 * into another memory region, which does not necessarily imply that it is 2431 * mapped into an address space. 2432 * 2433 * @mr: a #MemoryRegion which should be checked if it's mapped 2434 */ 2435 bool memory_region_is_mapped(MemoryRegion *mr); 2436 2437 /** 2438 * memory_region_get_ram_discard_manager: get the #RamDiscardManager for a 2439 * #MemoryRegion 2440 * 2441 * The #RamDiscardManager cannot change while a memory region is mapped. 2442 * 2443 * @mr: the #MemoryRegion 2444 */ 2445 RamDiscardManager *memory_region_get_ram_discard_manager(MemoryRegion *mr); 2446 2447 /** 2448 * memory_region_has_ram_discard_manager: check whether a #MemoryRegion has a 2449 * #RamDiscardManager assigned 2450 * 2451 * @mr: the #MemoryRegion 2452 */ 2453 static inline bool memory_region_has_ram_discard_manager(MemoryRegion *mr) 2454 { 2455 return !!memory_region_get_ram_discard_manager(mr); 2456 } 2457 2458 /** 2459 * memory_region_set_ram_discard_manager: set the #RamDiscardManager for a 2460 * #MemoryRegion 2461 * 2462 * This function must not be called for a mapped #MemoryRegion, a #MemoryRegion 2463 * that does not cover RAM, or a #MemoryRegion that already has a 2464 * #RamDiscardManager assigned. 2465 * 2466 * @mr: the #MemoryRegion 2467 * @rdm: #RamDiscardManager to set 2468 */ 2469 void memory_region_set_ram_discard_manager(MemoryRegion *mr, 2470 RamDiscardManager *rdm); 2471 2472 /** 2473 * memory_region_find: translate an address/size relative to a 2474 * MemoryRegion into a #MemoryRegionSection. 2475 * 2476 * Locates the first #MemoryRegion within @mr that overlaps the range 2477 * given by @addr and @size. 2478 * 2479 * Returns a #MemoryRegionSection that describes a contiguous overlap. 2480 * It will have the following characteristics: 2481 * - @size = 0 iff no overlap was found 2482 * - @mr is non-%NULL iff an overlap was found 2483 * 2484 * Remember that in the return value the @offset_within_region is 2485 * relative to the returned region (in the .@mr field), not to the 2486 * @mr argument. 2487 * 2488 * Similarly, the .@offset_within_address_space is relative to the 2489 * address space that contains both regions, the passed and the 2490 * returned one. However, in the special case where the @mr argument 2491 * has no container (and thus is the root of the address space), the 2492 * following will hold: 2493 * - @offset_within_address_space >= @addr 2494 * - @offset_within_address_space + .@size <= @addr + @size 2495 * 2496 * @mr: a MemoryRegion within which @addr is a relative address 2497 * @addr: start of the area within @as to be searched 2498 * @size: size of the area to be searched 2499 */ 2500 MemoryRegionSection memory_region_find(MemoryRegion *mr, 2501 hwaddr addr, uint64_t size); 2502 2503 /** 2504 * memory_global_dirty_log_sync: synchronize the dirty log for all memory 2505 * 2506 * Synchronizes the dirty page log for all address spaces. 2507 * 2508 * @last_stage: whether this is the last stage of live migration 2509 */ 2510 void memory_global_dirty_log_sync(bool last_stage); 2511 2512 /** 2513 * memory_global_dirty_log_sync: synchronize the dirty log for all memory 2514 * 2515 * Synchronizes the vCPUs with a thread that is reading the dirty bitmap. 2516 * This function must be called after the dirty log bitmap is cleared, and 2517 * before dirty guest memory pages are read. If you are using 2518 * #DirtyBitmapSnapshot, memory_region_snapshot_and_clear_dirty() takes 2519 * care of doing this. 2520 */ 2521 void memory_global_after_dirty_log_sync(void); 2522 2523 /** 2524 * memory_region_transaction_begin: Start a transaction. 2525 * 2526 * During a transaction, changes will be accumulated and made visible 2527 * only when the transaction ends (is committed). 2528 */ 2529 void memory_region_transaction_begin(void); 2530 2531 /** 2532 * memory_region_transaction_commit: Commit a transaction and make changes 2533 * visible to the guest. 2534 */ 2535 void memory_region_transaction_commit(void); 2536 2537 /** 2538 * memory_listener_register: register callbacks to be called when memory 2539 * sections are mapped or unmapped into an address 2540 * space 2541 * 2542 * @listener: an object containing the callbacks to be called 2543 * @filter: if non-%NULL, only regions in this address space will be observed 2544 */ 2545 void memory_listener_register(MemoryListener *listener, AddressSpace *filter); 2546 2547 /** 2548 * memory_listener_unregister: undo the effect of memory_listener_register() 2549 * 2550 * @listener: an object containing the callbacks to be removed 2551 */ 2552 void memory_listener_unregister(MemoryListener *listener); 2553 2554 /** 2555 * memory_global_dirty_log_start: begin dirty logging for all regions 2556 * 2557 * @flags: purpose of starting dirty log, migration or dirty rate 2558 * @errp: pointer to Error*, to store an error if it happens. 2559 * 2560 * Return: true on success, else false setting @errp with error. 2561 */ 2562 bool memory_global_dirty_log_start(unsigned int flags, Error **errp); 2563 2564 /** 2565 * memory_global_dirty_log_stop: end dirty logging for all regions 2566 * 2567 * @flags: purpose of stopping dirty log, migration or dirty rate 2568 */ 2569 void memory_global_dirty_log_stop(unsigned int flags); 2570 2571 void mtree_info(bool flatview, bool dispatch_tree, bool owner, bool disabled); 2572 2573 bool memory_region_access_valid(MemoryRegion *mr, hwaddr addr, 2574 unsigned size, bool is_write, 2575 MemTxAttrs attrs); 2576 2577 /** 2578 * memory_region_dispatch_read: perform a read directly to the specified 2579 * MemoryRegion. 2580 * 2581 * @mr: #MemoryRegion to access 2582 * @addr: address within that region 2583 * @pval: pointer to uint64_t which the data is written to 2584 * @op: size, sign, and endianness of the memory operation 2585 * @attrs: memory transaction attributes to use for the access 2586 */ 2587 MemTxResult memory_region_dispatch_read(MemoryRegion *mr, 2588 hwaddr addr, 2589 uint64_t *pval, 2590 MemOp op, 2591 MemTxAttrs attrs); 2592 /** 2593 * memory_region_dispatch_write: perform a write directly to the specified 2594 * MemoryRegion. 2595 * 2596 * @mr: #MemoryRegion to access 2597 * @addr: address within that region 2598 * @data: data to write 2599 * @op: size, sign, and endianness of the memory operation 2600 * @attrs: memory transaction attributes to use for the access 2601 */ 2602 MemTxResult memory_region_dispatch_write(MemoryRegion *mr, 2603 hwaddr addr, 2604 uint64_t data, 2605 MemOp op, 2606 MemTxAttrs attrs); 2607 2608 /** 2609 * address_space_init: initializes an address space 2610 * 2611 * @as: an uninitialized #AddressSpace 2612 * @root: a #MemoryRegion that routes addresses for the address space 2613 * @name: an address space name. The name is only used for debugging 2614 * output. 2615 */ 2616 void address_space_init(AddressSpace *as, MemoryRegion *root, const char *name); 2617 2618 /** 2619 * address_space_destroy: destroy an address space 2620 * 2621 * Releases all resources associated with an address space. After an address space 2622 * is destroyed, its root memory region (given by address_space_init()) may be destroyed 2623 * as well. 2624 * 2625 * @as: address space to be destroyed 2626 */ 2627 void address_space_destroy(AddressSpace *as); 2628 2629 /** 2630 * address_space_remove_listeners: unregister all listeners of an address space 2631 * 2632 * Removes all callbacks previously registered with memory_listener_register() 2633 * for @as. 2634 * 2635 * @as: an initialized #AddressSpace 2636 */ 2637 void address_space_remove_listeners(AddressSpace *as); 2638 2639 /** 2640 * address_space_rw: read from or write to an address space. 2641 * 2642 * Return a MemTxResult indicating whether the operation succeeded 2643 * or failed (eg unassigned memory, device rejected the transaction, 2644 * IOMMU fault). 2645 * 2646 * @as: #AddressSpace to be accessed 2647 * @addr: address within that address space 2648 * @attrs: memory transaction attributes 2649 * @buf: buffer with the data transferred 2650 * @len: the number of bytes to read or write 2651 * @is_write: indicates the transfer direction 2652 */ 2653 MemTxResult address_space_rw(AddressSpace *as, hwaddr addr, 2654 MemTxAttrs attrs, void *buf, 2655 hwaddr len, bool is_write); 2656 2657 /** 2658 * address_space_write: write to address space. 2659 * 2660 * Return a MemTxResult indicating whether the operation succeeded 2661 * or failed (eg unassigned memory, device rejected the transaction, 2662 * IOMMU fault). 2663 * 2664 * @as: #AddressSpace to be accessed 2665 * @addr: address within that address space 2666 * @attrs: memory transaction attributes 2667 * @buf: buffer with the data transferred 2668 * @len: the number of bytes to write 2669 */ 2670 MemTxResult address_space_write(AddressSpace *as, hwaddr addr, 2671 MemTxAttrs attrs, 2672 const void *buf, hwaddr len); 2673 2674 /** 2675 * address_space_write_rom: write to address space, including ROM. 2676 * 2677 * This function writes to the specified address space, but will 2678 * write data to both ROM and RAM. This is used for non-guest 2679 * writes like writes from the gdb debug stub or initial loading 2680 * of ROM contents. 2681 * 2682 * Note that portions of the write which attempt to write data to 2683 * a device will be silently ignored -- only real RAM and ROM will 2684 * be written to. 2685 * 2686 * Return a MemTxResult indicating whether the operation succeeded 2687 * or failed (eg unassigned memory, device rejected the transaction, 2688 * IOMMU fault). 2689 * 2690 * @as: #AddressSpace to be accessed 2691 * @addr: address within that address space 2692 * @attrs: memory transaction attributes 2693 * @buf: buffer with the data transferred 2694 * @len: the number of bytes to write 2695 */ 2696 MemTxResult address_space_write_rom(AddressSpace *as, hwaddr addr, 2697 MemTxAttrs attrs, 2698 const void *buf, hwaddr len); 2699 2700 /* address_space_ld*: load from an address space 2701 * address_space_st*: store to an address space 2702 * 2703 * These functions perform a load or store of the byte, word, 2704 * longword or quad to the specified address within the AddressSpace. 2705 * The _le suffixed functions treat the data as little endian; 2706 * _be indicates big endian; no suffix indicates "same endianness 2707 * as guest CPU". 2708 * 2709 * The "guest CPU endianness" accessors are deprecated for use outside 2710 * target-* code; devices should be CPU-agnostic and use either the LE 2711 * or the BE accessors. 2712 * 2713 * @as #AddressSpace to be accessed 2714 * @addr: address within that address space 2715 * @val: data value, for stores 2716 * @attrs: memory transaction attributes 2717 * @result: location to write the success/failure of the transaction; 2718 * if NULL, this information is discarded 2719 */ 2720 2721 #define SUFFIX 2722 #define ARG1 as 2723 #define ARG1_DECL AddressSpace *as 2724 #include "exec/memory_ldst.h.inc" 2725 2726 #define SUFFIX 2727 #define ARG1 as 2728 #define ARG1_DECL AddressSpace *as 2729 #include "exec/memory_ldst_phys.h.inc" 2730 2731 struct MemoryRegionCache { 2732 uint8_t *ptr; 2733 hwaddr xlat; 2734 hwaddr len; 2735 FlatView *fv; 2736 MemoryRegionSection mrs; 2737 bool is_write; 2738 }; 2739 2740 /* address_space_ld*_cached: load from a cached #MemoryRegion 2741 * address_space_st*_cached: store into a cached #MemoryRegion 2742 * 2743 * These functions perform a load or store of the byte, word, 2744 * longword or quad to the specified address. The address is 2745 * a physical address in the AddressSpace, but it must lie within 2746 * a #MemoryRegion that was mapped with address_space_cache_init. 2747 * 2748 * The _le suffixed functions treat the data as little endian; 2749 * _be indicates big endian; no suffix indicates "same endianness 2750 * as guest CPU". 2751 * 2752 * The "guest CPU endianness" accessors are deprecated for use outside 2753 * target-* code; devices should be CPU-agnostic and use either the LE 2754 * or the BE accessors. 2755 * 2756 * @cache: previously initialized #MemoryRegionCache to be accessed 2757 * @addr: address within the address space 2758 * @val: data value, for stores 2759 * @attrs: memory transaction attributes 2760 * @result: location to write the success/failure of the transaction; 2761 * if NULL, this information is discarded 2762 */ 2763 2764 #define SUFFIX _cached_slow 2765 #define ARG1 cache 2766 #define ARG1_DECL MemoryRegionCache *cache 2767 #include "exec/memory_ldst.h.inc" 2768 2769 /* Inline fast path for direct RAM access. */ 2770 static inline uint8_t address_space_ldub_cached(MemoryRegionCache *cache, 2771 hwaddr addr, MemTxAttrs attrs, MemTxResult *result) 2772 { 2773 assert(addr < cache->len); 2774 if (likely(cache->ptr)) { 2775 return ldub_p(cache->ptr + addr); 2776 } else { 2777 return address_space_ldub_cached_slow(cache, addr, attrs, result); 2778 } 2779 } 2780 2781 static inline void address_space_stb_cached(MemoryRegionCache *cache, 2782 hwaddr addr, uint8_t val, MemTxAttrs attrs, MemTxResult *result) 2783 { 2784 assert(addr < cache->len); 2785 if (likely(cache->ptr)) { 2786 stb_p(cache->ptr + addr, val); 2787 } else { 2788 address_space_stb_cached_slow(cache, addr, val, attrs, result); 2789 } 2790 } 2791 2792 #define ENDIANNESS _le 2793 #include "exec/memory_ldst_cached.h.inc" 2794 2795 #define ENDIANNESS _be 2796 #include "exec/memory_ldst_cached.h.inc" 2797 2798 #define SUFFIX _cached 2799 #define ARG1 cache 2800 #define ARG1_DECL MemoryRegionCache *cache 2801 #include "exec/memory_ldst_phys.h.inc" 2802 2803 /* address_space_cache_init: prepare for repeated access to a physical 2804 * memory region 2805 * 2806 * @cache: #MemoryRegionCache to be filled 2807 * @as: #AddressSpace to be accessed 2808 * @addr: address within that address space 2809 * @len: length of buffer 2810 * @is_write: indicates the transfer direction 2811 * 2812 * Will only work with RAM, and may map a subset of the requested range by 2813 * returning a value that is less than @len. On failure, return a negative 2814 * errno value. 2815 * 2816 * Because it only works with RAM, this function can be used for 2817 * read-modify-write operations. In this case, is_write should be %true. 2818 * 2819 * Note that addresses passed to the address_space_*_cached functions 2820 * are relative to @addr. 2821 */ 2822 int64_t address_space_cache_init(MemoryRegionCache *cache, 2823 AddressSpace *as, 2824 hwaddr addr, 2825 hwaddr len, 2826 bool is_write); 2827 2828 /** 2829 * address_space_cache_init_empty: Initialize empty #MemoryRegionCache 2830 * 2831 * @cache: The #MemoryRegionCache to operate on. 2832 * 2833 * Initializes #MemoryRegionCache structure without memory region attached. 2834 * Cache initialized this way can only be safely destroyed, but not used. 2835 */ 2836 static inline void address_space_cache_init_empty(MemoryRegionCache *cache) 2837 { 2838 cache->mrs.mr = NULL; 2839 /* There is no real need to initialize fv, but it makes Coverity happy. */ 2840 cache->fv = NULL; 2841 } 2842 2843 /** 2844 * address_space_cache_invalidate: complete a write to a #MemoryRegionCache 2845 * 2846 * @cache: The #MemoryRegionCache to operate on. 2847 * @addr: The first physical address that was written, relative to the 2848 * address that was passed to @address_space_cache_init. 2849 * @access_len: The number of bytes that were written starting at @addr. 2850 */ 2851 void address_space_cache_invalidate(MemoryRegionCache *cache, 2852 hwaddr addr, 2853 hwaddr access_len); 2854 2855 /** 2856 * address_space_cache_destroy: free a #MemoryRegionCache 2857 * 2858 * @cache: The #MemoryRegionCache whose memory should be released. 2859 */ 2860 void address_space_cache_destroy(MemoryRegionCache *cache); 2861 2862 /* address_space_get_iotlb_entry: translate an address into an IOTLB 2863 * entry. Should be called from an RCU critical section. 2864 */ 2865 IOMMUTLBEntry address_space_get_iotlb_entry(AddressSpace *as, hwaddr addr, 2866 bool is_write, MemTxAttrs attrs); 2867 2868 /* address_space_translate: translate an address range into an address space 2869 * into a MemoryRegion and an address range into that section. Should be 2870 * called from an RCU critical section, to avoid that the last reference 2871 * to the returned region disappears after address_space_translate returns. 2872 * 2873 * @fv: #FlatView to be accessed 2874 * @addr: address within that address space 2875 * @xlat: pointer to address within the returned memory region section's 2876 * #MemoryRegion. 2877 * @len: pointer to length 2878 * @is_write: indicates the transfer direction 2879 * @attrs: memory attributes 2880 */ 2881 MemoryRegion *flatview_translate(FlatView *fv, 2882 hwaddr addr, hwaddr *xlat, 2883 hwaddr *len, bool is_write, 2884 MemTxAttrs attrs); 2885 2886 static inline MemoryRegion *address_space_translate(AddressSpace *as, 2887 hwaddr addr, hwaddr *xlat, 2888 hwaddr *len, bool is_write, 2889 MemTxAttrs attrs) 2890 { 2891 return flatview_translate(address_space_to_flatview(as), 2892 addr, xlat, len, is_write, attrs); 2893 } 2894 2895 /* address_space_access_valid: check for validity of accessing an address 2896 * space range 2897 * 2898 * Check whether memory is assigned to the given address space range, and 2899 * access is permitted by any IOMMU regions that are active for the address 2900 * space. 2901 * 2902 * For now, addr and len should be aligned to a page size. This limitation 2903 * will be lifted in the future. 2904 * 2905 * @as: #AddressSpace to be accessed 2906 * @addr: address within that address space 2907 * @len: length of the area to be checked 2908 * @is_write: indicates the transfer direction 2909 * @attrs: memory attributes 2910 */ 2911 bool address_space_access_valid(AddressSpace *as, hwaddr addr, hwaddr len, 2912 bool is_write, MemTxAttrs attrs); 2913 2914 /* address_space_map: map a physical memory region into a host virtual address 2915 * 2916 * May map a subset of the requested range, given by and returned in @plen. 2917 * May return %NULL and set *@plen to zero(0), if resources needed to perform 2918 * the mapping are exhausted. 2919 * Use only for reads OR writes - not for read-modify-write operations. 2920 * Use address_space_register_map_client() to know when retrying the map 2921 * operation is likely to succeed. 2922 * 2923 * @as: #AddressSpace to be accessed 2924 * @addr: address within that address space 2925 * @plen: pointer to length of buffer; updated on return 2926 * @is_write: indicates the transfer direction 2927 * @attrs: memory attributes 2928 */ 2929 void *address_space_map(AddressSpace *as, hwaddr addr, 2930 hwaddr *plen, bool is_write, MemTxAttrs attrs); 2931 2932 /* address_space_unmap: Unmaps a memory region previously mapped by address_space_map() 2933 * 2934 * Will also mark the memory as dirty if @is_write == %true. @access_len gives 2935 * the amount of memory that was actually read or written by the caller. 2936 * 2937 * @as: #AddressSpace used 2938 * @buffer: host pointer as returned by address_space_map() 2939 * @len: buffer length as returned by address_space_map() 2940 * @access_len: amount of data actually transferred 2941 * @is_write: indicates the transfer direction 2942 */ 2943 void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len, 2944 bool is_write, hwaddr access_len); 2945 2946 /* 2947 * address_space_register_map_client: Register a callback to invoke when 2948 * resources for address_space_map() are available again. 2949 * 2950 * address_space_map may fail when there are not enough resources available, 2951 * such as when bounce buffer memory would exceed the limit. The callback can 2952 * be used to retry the address_space_map operation. Note that the callback 2953 * gets automatically removed after firing. 2954 * 2955 * @as: #AddressSpace to be accessed 2956 * @bh: callback to invoke when address_space_map() retry is appropriate 2957 */ 2958 void address_space_register_map_client(AddressSpace *as, QEMUBH *bh); 2959 2960 /* 2961 * address_space_unregister_map_client: Unregister a callback that has 2962 * previously been registered and not fired yet. 2963 * 2964 * @as: #AddressSpace to be accessed 2965 * @bh: callback to unregister 2966 */ 2967 void address_space_unregister_map_client(AddressSpace *as, QEMUBH *bh); 2968 2969 /* Internal functions, part of the implementation of address_space_read. */ 2970 MemTxResult address_space_read_full(AddressSpace *as, hwaddr addr, 2971 MemTxAttrs attrs, void *buf, hwaddr len); 2972 MemTxResult flatview_read_continue(FlatView *fv, hwaddr addr, 2973 MemTxAttrs attrs, void *buf, 2974 hwaddr len, hwaddr addr1, hwaddr l, 2975 MemoryRegion *mr); 2976 void *qemu_map_ram_ptr(RAMBlock *ram_block, ram_addr_t addr); 2977 2978 /* Internal functions, part of the implementation of address_space_read_cached 2979 * and address_space_write_cached. */ 2980 MemTxResult address_space_read_cached_slow(MemoryRegionCache *cache, 2981 hwaddr addr, void *buf, hwaddr len); 2982 MemTxResult address_space_write_cached_slow(MemoryRegionCache *cache, 2983 hwaddr addr, const void *buf, 2984 hwaddr len); 2985 2986 int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr); 2987 bool prepare_mmio_access(MemoryRegion *mr); 2988 2989 static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write) 2990 { 2991 if (is_write) { 2992 return memory_region_is_ram(mr) && !mr->readonly && 2993 !mr->rom_device && !memory_region_is_ram_device(mr); 2994 } else { 2995 return (memory_region_is_ram(mr) && !memory_region_is_ram_device(mr)) || 2996 memory_region_is_romd(mr); 2997 } 2998 } 2999 3000 /** 3001 * address_space_read: read from an address space. 3002 * 3003 * Return a MemTxResult indicating whether the operation succeeded 3004 * or failed (eg unassigned memory, device rejected the transaction, 3005 * IOMMU fault). Called within RCU critical section. 3006 * 3007 * @as: #AddressSpace to be accessed 3008 * @addr: address within that address space 3009 * @attrs: memory transaction attributes 3010 * @buf: buffer with the data transferred 3011 * @len: length of the data transferred 3012 */ 3013 static inline __attribute__((__always_inline__)) 3014 MemTxResult address_space_read(AddressSpace *as, hwaddr addr, 3015 MemTxAttrs attrs, void *buf, 3016 hwaddr len) 3017 { 3018 MemTxResult result = MEMTX_OK; 3019 hwaddr l, addr1; 3020 void *ptr; 3021 MemoryRegion *mr; 3022 FlatView *fv; 3023 3024 if (__builtin_constant_p(len)) { 3025 if (len) { 3026 RCU_READ_LOCK_GUARD(); 3027 fv = address_space_to_flatview(as); 3028 l = len; 3029 mr = flatview_translate(fv, addr, &addr1, &l, false, attrs); 3030 if (len == l && memory_access_is_direct(mr, false)) { 3031 ptr = qemu_map_ram_ptr(mr->ram_block, addr1); 3032 memcpy(buf, ptr, len); 3033 } else { 3034 result = flatview_read_continue(fv, addr, attrs, buf, len, 3035 addr1, l, mr); 3036 } 3037 } 3038 } else { 3039 result = address_space_read_full(as, addr, attrs, buf, len); 3040 } 3041 return result; 3042 } 3043 3044 /** 3045 * address_space_read_cached: read from a cached RAM region 3046 * 3047 * @cache: Cached region to be addressed 3048 * @addr: address relative to the base of the RAM region 3049 * @buf: buffer with the data transferred 3050 * @len: length of the data transferred 3051 */ 3052 static inline MemTxResult 3053 address_space_read_cached(MemoryRegionCache *cache, hwaddr addr, 3054 void *buf, hwaddr len) 3055 { 3056 assert(addr < cache->len && len <= cache->len - addr); 3057 fuzz_dma_read_cb(cache->xlat + addr, len, cache->mrs.mr); 3058 if (likely(cache->ptr)) { 3059 memcpy(buf, cache->ptr + addr, len); 3060 return MEMTX_OK; 3061 } else { 3062 return address_space_read_cached_slow(cache, addr, buf, len); 3063 } 3064 } 3065 3066 /** 3067 * address_space_write_cached: write to a cached RAM region 3068 * 3069 * @cache: Cached region to be addressed 3070 * @addr: address relative to the base of the RAM region 3071 * @buf: buffer with the data transferred 3072 * @len: length of the data transferred 3073 */ 3074 static inline MemTxResult 3075 address_space_write_cached(MemoryRegionCache *cache, hwaddr addr, 3076 const void *buf, hwaddr len) 3077 { 3078 assert(addr < cache->len && len <= cache->len - addr); 3079 if (likely(cache->ptr)) { 3080 memcpy(cache->ptr + addr, buf, len); 3081 return MEMTX_OK; 3082 } else { 3083 return address_space_write_cached_slow(cache, addr, buf, len); 3084 } 3085 } 3086 3087 /** 3088 * address_space_set: Fill address space with a constant byte. 3089 * 3090 * Return a MemTxResult indicating whether the operation succeeded 3091 * or failed (eg unassigned memory, device rejected the transaction, 3092 * IOMMU fault). 3093 * 3094 * @as: #AddressSpace to be accessed 3095 * @addr: address within that address space 3096 * @c: constant byte to fill the memory 3097 * @len: the number of bytes to fill with the constant byte 3098 * @attrs: memory transaction attributes 3099 */ 3100 MemTxResult address_space_set(AddressSpace *as, hwaddr addr, 3101 uint8_t c, hwaddr len, MemTxAttrs attrs); 3102 3103 #ifdef COMPILING_PER_TARGET 3104 /* enum device_endian to MemOp. */ 3105 static inline MemOp devend_memop(enum device_endian end) 3106 { 3107 QEMU_BUILD_BUG_ON(DEVICE_HOST_ENDIAN != DEVICE_LITTLE_ENDIAN && 3108 DEVICE_HOST_ENDIAN != DEVICE_BIG_ENDIAN); 3109 3110 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN 3111 /* Swap if non-host endianness or native (target) endianness */ 3112 return (end == DEVICE_HOST_ENDIAN) ? 0 : MO_BSWAP; 3113 #else 3114 const int non_host_endianness = 3115 DEVICE_LITTLE_ENDIAN ^ DEVICE_BIG_ENDIAN ^ DEVICE_HOST_ENDIAN; 3116 3117 /* In this case, native (target) endianness needs no swap. */ 3118 return (end == non_host_endianness) ? MO_BSWAP : 0; 3119 #endif 3120 } 3121 #endif /* COMPILING_PER_TARGET */ 3122 3123 /* 3124 * Inhibit technologies that require discarding of pages in RAM blocks, e.g., 3125 * to manage the actual amount of memory consumed by the VM (then, the memory 3126 * provided by RAM blocks might be bigger than the desired memory consumption). 3127 * This *must* be set if: 3128 * - Discarding parts of a RAM blocks does not result in the change being 3129 * reflected in the VM and the pages getting freed. 3130 * - All memory in RAM blocks is pinned or duplicated, invaldiating any previous 3131 * discards blindly. 3132 * - Discarding parts of a RAM blocks will result in integrity issues (e.g., 3133 * encrypted VMs). 3134 * Technologies that only temporarily pin the current working set of a 3135 * driver are fine, because we don't expect such pages to be discarded 3136 * (esp. based on guest action like balloon inflation). 3137 * 3138 * This is *not* to be used to protect from concurrent discards (esp., 3139 * postcopy). 3140 * 3141 * Returns 0 if successful. Returns -EBUSY if a technology that relies on 3142 * discards to work reliably is active. 3143 */ 3144 int ram_block_discard_disable(bool state); 3145 3146 /* 3147 * See ram_block_discard_disable(): only disable uncoordinated discards, 3148 * keeping coordinated discards (via the RamDiscardManager) enabled. 3149 */ 3150 int ram_block_uncoordinated_discard_disable(bool state); 3151 3152 /* 3153 * Inhibit technologies that disable discarding of pages in RAM blocks. 3154 * 3155 * Returns 0 if successful. Returns -EBUSY if discards are already set to 3156 * broken. 3157 */ 3158 int ram_block_discard_require(bool state); 3159 3160 /* 3161 * See ram_block_discard_require(): only inhibit technologies that disable 3162 * uncoordinated discarding of pages in RAM blocks, allowing co-existence with 3163 * technologies that only inhibit uncoordinated discards (via the 3164 * RamDiscardManager). 3165 */ 3166 int ram_block_coordinated_discard_require(bool state); 3167 3168 /* 3169 * Test if any discarding of memory in ram blocks is disabled. 3170 */ 3171 bool ram_block_discard_is_disabled(void); 3172 3173 /* 3174 * Test if any discarding of memory in ram blocks is required to work reliably. 3175 */ 3176 bool ram_block_discard_is_required(void); 3177 3178 #endif 3179 3180 #endif 3181