xref: /openbmc/qemu/include/system/memory.h (revision f47a672a72acd6e2712031f0bc4d4f3ae4b6302c)
18be545baSRichard Henderson /*
28be545baSRichard Henderson  * Physical memory management API
38be545baSRichard Henderson  *
48be545baSRichard Henderson  * Copyright 2011 Red Hat, Inc. and/or its affiliates
58be545baSRichard Henderson  *
68be545baSRichard Henderson  * Authors:
78be545baSRichard Henderson  *  Avi Kivity <avi@redhat.com>
88be545baSRichard Henderson  *
98be545baSRichard Henderson  * This work is licensed under the terms of the GNU GPL, version 2.  See
108be545baSRichard Henderson  * the COPYING file in the top-level directory.
118be545baSRichard Henderson  *
128be545baSRichard Henderson  */
138be545baSRichard Henderson 
148be545baSRichard Henderson #ifndef SYSTEM_MEMORY_H
158be545baSRichard Henderson #define SYSTEM_MEMORY_H
168be545baSRichard Henderson 
178be545baSRichard Henderson #include "exec/cpu-common.h"
188be545baSRichard Henderson #include "exec/hwaddr.h"
198be545baSRichard Henderson #include "exec/memattrs.h"
208be545baSRichard Henderson #include "exec/memop.h"
218be545baSRichard Henderson #include "exec/ramlist.h"
228be545baSRichard Henderson #include "exec/tswap.h"
238be545baSRichard Henderson #include "qemu/bswap.h"
248be545baSRichard Henderson #include "qemu/queue.h"
258be545baSRichard Henderson #include "qemu/int128.h"
268be545baSRichard Henderson #include "qemu/range.h"
278be545baSRichard Henderson #include "qemu/notify.h"
288be545baSRichard Henderson #include "qom/object.h"
298be545baSRichard Henderson #include "qemu/rcu.h"
308be545baSRichard Henderson 
318be545baSRichard Henderson #define RAM_ADDR_INVALID (~(ram_addr_t)0)
328be545baSRichard Henderson 
338be545baSRichard Henderson #define MAX_PHYS_ADDR_SPACE_BITS 62
348be545baSRichard Henderson #define MAX_PHYS_ADDR            (((hwaddr)1 << MAX_PHYS_ADDR_SPACE_BITS) - 1)
358be545baSRichard Henderson 
368be545baSRichard Henderson #define TYPE_MEMORY_REGION "memory-region"
378be545baSRichard Henderson DECLARE_INSTANCE_CHECKER(MemoryRegion, MEMORY_REGION,
388be545baSRichard Henderson                          TYPE_MEMORY_REGION)
398be545baSRichard Henderson 
408be545baSRichard Henderson #define TYPE_IOMMU_MEMORY_REGION "iommu-memory-region"
418be545baSRichard Henderson typedef struct IOMMUMemoryRegionClass IOMMUMemoryRegionClass;
428be545baSRichard Henderson DECLARE_OBJ_CHECKERS(IOMMUMemoryRegion, IOMMUMemoryRegionClass,
438be545baSRichard Henderson                      IOMMU_MEMORY_REGION, TYPE_IOMMU_MEMORY_REGION)
448be545baSRichard Henderson 
458be545baSRichard Henderson #define TYPE_RAM_DISCARD_MANAGER "ram-discard-manager"
468be545baSRichard Henderson typedef struct RamDiscardManagerClass RamDiscardManagerClass;
478be545baSRichard Henderson typedef struct RamDiscardManager RamDiscardManager;
488be545baSRichard Henderson DECLARE_OBJ_CHECKERS(RamDiscardManager, RamDiscardManagerClass,
498be545baSRichard Henderson                      RAM_DISCARD_MANAGER, TYPE_RAM_DISCARD_MANAGER);
508be545baSRichard Henderson 
518be545baSRichard Henderson #ifdef CONFIG_FUZZ
528be545baSRichard Henderson void fuzz_dma_read_cb(size_t addr,
538be545baSRichard Henderson                       size_t len,
548be545baSRichard Henderson                       MemoryRegion *mr);
558be545baSRichard Henderson #else
568be545baSRichard Henderson static inline void fuzz_dma_read_cb(size_t addr,
578be545baSRichard Henderson                                     size_t len,
588be545baSRichard Henderson                                     MemoryRegion *mr)
598be545baSRichard Henderson {
608be545baSRichard Henderson     /* Do Nothing */
618be545baSRichard Henderson }
628be545baSRichard Henderson #endif
638be545baSRichard Henderson 
648be545baSRichard Henderson /* Possible bits for global_dirty_log_{start|stop} */
658be545baSRichard Henderson 
668be545baSRichard Henderson /* Dirty tracking enabled because migration is running */
678be545baSRichard Henderson #define GLOBAL_DIRTY_MIGRATION  (1U << 0)
688be545baSRichard Henderson 
698be545baSRichard Henderson /* Dirty tracking enabled because measuring dirty rate */
708be545baSRichard Henderson #define GLOBAL_DIRTY_DIRTY_RATE (1U << 1)
718be545baSRichard Henderson 
728be545baSRichard Henderson /* Dirty tracking enabled because dirty limit */
738be545baSRichard Henderson #define GLOBAL_DIRTY_LIMIT      (1U << 2)
748be545baSRichard Henderson 
758be545baSRichard Henderson #define GLOBAL_DIRTY_MASK  (0x7)
768be545baSRichard Henderson 
778be545baSRichard Henderson extern unsigned int global_dirty_tracking;
788be545baSRichard Henderson 
798be545baSRichard Henderson typedef struct MemoryRegionOps MemoryRegionOps;
808be545baSRichard Henderson 
818be545baSRichard Henderson struct ReservedRegion {
828be545baSRichard Henderson     Range range;
838be545baSRichard Henderson     unsigned type;
848be545baSRichard Henderson };
858be545baSRichard Henderson 
868be545baSRichard Henderson /**
878be545baSRichard Henderson  * struct MemoryRegionSection: describes a fragment of a #MemoryRegion
888be545baSRichard Henderson  *
898be545baSRichard Henderson  * @mr: the region, or %NULL if empty
908be545baSRichard Henderson  * @fv: the flat view of the address space the region is mapped in
918be545baSRichard Henderson  * @offset_within_region: the beginning of the section, relative to @mr's start
928be545baSRichard Henderson  * @size: the size of the section; will not exceed @mr's boundaries
938be545baSRichard Henderson  * @offset_within_address_space: the address of the first byte of the section
948be545baSRichard Henderson  *     relative to the region's address space
958be545baSRichard Henderson  * @readonly: writes to this section are ignored
968be545baSRichard Henderson  * @nonvolatile: this section is non-volatile
978be545baSRichard Henderson  * @unmergeable: this section should not get merged with adjacent sections
988be545baSRichard Henderson  */
998be545baSRichard Henderson struct MemoryRegionSection {
1008be545baSRichard Henderson     Int128 size;
1018be545baSRichard Henderson     MemoryRegion *mr;
1028be545baSRichard Henderson     FlatView *fv;
1038be545baSRichard Henderson     hwaddr offset_within_region;
1048be545baSRichard Henderson     hwaddr offset_within_address_space;
1058be545baSRichard Henderson     bool readonly;
1068be545baSRichard Henderson     bool nonvolatile;
1078be545baSRichard Henderson     bool unmergeable;
1088be545baSRichard Henderson };
1098be545baSRichard Henderson 
1108be545baSRichard Henderson typedef struct IOMMUTLBEntry IOMMUTLBEntry;
1118be545baSRichard Henderson 
1128be545baSRichard Henderson /* See address_space_translate: bit 0 is read, bit 1 is write.  */
1138be545baSRichard Henderson typedef enum {
1148be545baSRichard Henderson     IOMMU_NONE = 0,
1158be545baSRichard Henderson     IOMMU_RO   = 1,
1168be545baSRichard Henderson     IOMMU_WO   = 2,
1178be545baSRichard Henderson     IOMMU_RW   = 3,
1188be545baSRichard Henderson } IOMMUAccessFlags;
1198be545baSRichard Henderson 
1208be545baSRichard Henderson #define IOMMU_ACCESS_FLAG(r, w) (((r) ? IOMMU_RO : 0) | ((w) ? IOMMU_WO : 0))
1218be545baSRichard Henderson 
1228be545baSRichard Henderson struct IOMMUTLBEntry {
1238be545baSRichard Henderson     AddressSpace    *target_as;
1248be545baSRichard Henderson     hwaddr           iova;
1258be545baSRichard Henderson     hwaddr           translated_addr;
1268be545baSRichard Henderson     hwaddr           addr_mask;  /* 0xfff = 4k translation */
1278be545baSRichard Henderson     IOMMUAccessFlags perm;
1288be545baSRichard Henderson };
1298be545baSRichard Henderson 
1308be545baSRichard Henderson /*
1318be545baSRichard Henderson  * Bitmap for different IOMMUNotifier capabilities. Each notifier can
1328be545baSRichard Henderson  * register with one or multiple IOMMU Notifier capability bit(s).
1338be545baSRichard Henderson  *
1348be545baSRichard Henderson  * Normally there're two use cases for the notifiers:
1358be545baSRichard Henderson  *
1368be545baSRichard Henderson  *   (1) When the device needs accurate synchronizations of the vIOMMU page
1378be545baSRichard Henderson  *       tables, it needs to register with both MAP|UNMAP notifies (which
1388be545baSRichard Henderson  *       is defined as IOMMU_NOTIFIER_IOTLB_EVENTS below).
1398be545baSRichard Henderson  *
1408be545baSRichard Henderson  *       Regarding to accurate synchronization, it's when the notified
1418be545baSRichard Henderson  *       device maintains a shadow page table and must be notified on each
1428be545baSRichard Henderson  *       guest MAP (page table entry creation) and UNMAP (invalidation)
1438be545baSRichard Henderson  *       events (e.g. VFIO). Both notifications must be accurate so that
1448be545baSRichard Henderson  *       the shadow page table is fully in sync with the guest view.
1458be545baSRichard Henderson  *
1468be545baSRichard Henderson  *   (2) When the device doesn't need accurate synchronizations of the
1478be545baSRichard Henderson  *       vIOMMU page tables, it needs to register only with UNMAP or
1488be545baSRichard Henderson  *       DEVIOTLB_UNMAP notifies.
1498be545baSRichard Henderson  *
1508be545baSRichard Henderson  *       It's when the device maintains a cache of IOMMU translations
1518be545baSRichard Henderson  *       (IOTLB) and is able to fill that cache by requesting translations
1528be545baSRichard Henderson  *       from the vIOMMU through a protocol similar to ATS (Address
1538be545baSRichard Henderson  *       Translation Service).
1548be545baSRichard Henderson  *
1558be545baSRichard Henderson  *       Note that in this mode the vIOMMU will not maintain a shadowed
1568be545baSRichard Henderson  *       page table for the address space, and the UNMAP messages can cover
1578be545baSRichard Henderson  *       more than the pages that used to get mapped.  The IOMMU notifiee
1588be545baSRichard Henderson  *       should be able to take care of over-sized invalidations.
1598be545baSRichard Henderson  */
1608be545baSRichard Henderson typedef enum {
1618be545baSRichard Henderson     IOMMU_NOTIFIER_NONE = 0,
1628be545baSRichard Henderson     /* Notify cache invalidations */
1638be545baSRichard Henderson     IOMMU_NOTIFIER_UNMAP = 0x1,
1648be545baSRichard Henderson     /* Notify entry changes (newly created entries) */
1658be545baSRichard Henderson     IOMMU_NOTIFIER_MAP = 0x2,
1668be545baSRichard Henderson     /* Notify changes on device IOTLB entries */
1678be545baSRichard Henderson     IOMMU_NOTIFIER_DEVIOTLB_UNMAP = 0x04,
1688be545baSRichard Henderson } IOMMUNotifierFlag;
1698be545baSRichard Henderson 
1708be545baSRichard Henderson #define IOMMU_NOTIFIER_IOTLB_EVENTS (IOMMU_NOTIFIER_MAP | IOMMU_NOTIFIER_UNMAP)
1718be545baSRichard Henderson #define IOMMU_NOTIFIER_DEVIOTLB_EVENTS IOMMU_NOTIFIER_DEVIOTLB_UNMAP
1728be545baSRichard Henderson #define IOMMU_NOTIFIER_ALL (IOMMU_NOTIFIER_IOTLB_EVENTS | \
1738be545baSRichard Henderson                             IOMMU_NOTIFIER_DEVIOTLB_EVENTS)
1748be545baSRichard Henderson 
1758be545baSRichard Henderson struct IOMMUNotifier;
1768be545baSRichard Henderson typedef void (*IOMMUNotify)(struct IOMMUNotifier *notifier,
1778be545baSRichard Henderson                             IOMMUTLBEntry *data);
1788be545baSRichard Henderson 
1798be545baSRichard Henderson struct IOMMUNotifier {
1808be545baSRichard Henderson     IOMMUNotify notify;
1818be545baSRichard Henderson     IOMMUNotifierFlag notifier_flags;
1828be545baSRichard Henderson     /* Notify for address space range start <= addr <= end */
1838be545baSRichard Henderson     hwaddr start;
1848be545baSRichard Henderson     hwaddr end;
1858be545baSRichard Henderson     int iommu_idx;
1867e94e452SCLEMENT MATHIEU--DRIF     void *opaque;
1878be545baSRichard Henderson     QLIST_ENTRY(IOMMUNotifier) node;
1888be545baSRichard Henderson };
1898be545baSRichard Henderson typedef struct IOMMUNotifier IOMMUNotifier;
1908be545baSRichard Henderson 
1918be545baSRichard Henderson typedef struct IOMMUTLBEvent {
1928be545baSRichard Henderson     IOMMUNotifierFlag type;
1938be545baSRichard Henderson     IOMMUTLBEntry entry;
1948be545baSRichard Henderson } IOMMUTLBEvent;
1958be545baSRichard Henderson 
1968be545baSRichard Henderson /* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
1978be545baSRichard Henderson #define RAM_PREALLOC   (1 << 0)
1988be545baSRichard Henderson 
1998be545baSRichard Henderson /* RAM is mmap-ed with MAP_SHARED */
2008be545baSRichard Henderson #define RAM_SHARED     (1 << 1)
2018be545baSRichard Henderson 
2028be545baSRichard Henderson /* Only a portion of RAM (used_length) is actually used, and migrated.
2038be545baSRichard Henderson  * Resizing RAM while migrating can result in the migration being canceled.
2048be545baSRichard Henderson  */
2058be545baSRichard Henderson #define RAM_RESIZEABLE (1 << 2)
2068be545baSRichard Henderson 
2078be545baSRichard Henderson /* UFFDIO_ZEROPAGE is available on this RAMBlock to atomically
2088be545baSRichard Henderson  * zero the page and wake waiting processes.
2098be545baSRichard Henderson  * (Set during postcopy)
2108be545baSRichard Henderson  */
2118be545baSRichard Henderson #define RAM_UF_ZEROPAGE (1 << 3)
2128be545baSRichard Henderson 
2138be545baSRichard Henderson /* RAM can be migrated */
2148be545baSRichard Henderson #define RAM_MIGRATABLE (1 << 4)
2158be545baSRichard Henderson 
2168be545baSRichard Henderson /* RAM is a persistent kind memory */
2178be545baSRichard Henderson #define RAM_PMEM (1 << 5)
2188be545baSRichard Henderson 
2198be545baSRichard Henderson 
2208be545baSRichard Henderson /*
2218be545baSRichard Henderson  * UFFDIO_WRITEPROTECT is used on this RAMBlock to
2228be545baSRichard Henderson  * support 'write-tracking' migration type.
2238be545baSRichard Henderson  * Implies ram_state->ram_wt_enabled.
2248be545baSRichard Henderson  */
2258be545baSRichard Henderson #define RAM_UF_WRITEPROTECT (1 << 6)
2268be545baSRichard Henderson 
2278be545baSRichard Henderson /*
2288be545baSRichard Henderson  * RAM is mmap-ed with MAP_NORESERVE. When set, reserving swap space (or huge
2298be545baSRichard Henderson  * pages if applicable) is skipped: will bail out if not supported. When not
2308be545baSRichard Henderson  * set, the OS will do the reservation, if supported for the memory type.
2318be545baSRichard Henderson  */
2328be545baSRichard Henderson #define RAM_NORESERVE (1 << 7)
2338be545baSRichard Henderson 
2348be545baSRichard Henderson /* RAM that isn't accessible through normal means. */
2358be545baSRichard Henderson #define RAM_PROTECTED (1 << 8)
2368be545baSRichard Henderson 
2378be545baSRichard Henderson /* RAM is an mmap-ed named file */
2388be545baSRichard Henderson #define RAM_NAMED_FILE (1 << 9)
2398be545baSRichard Henderson 
2408be545baSRichard Henderson /* RAM is mmap-ed read-only */
2418be545baSRichard Henderson #define RAM_READONLY (1 << 10)
2428be545baSRichard Henderson 
2438be545baSRichard Henderson /* RAM FD is opened read-only */
2448be545baSRichard Henderson #define RAM_READONLY_FD (1 << 11)
2458be545baSRichard Henderson 
2468be545baSRichard Henderson /* RAM can be private that has kvm guest memfd backend */
2478be545baSRichard Henderson #define RAM_GUEST_MEMFD   (1 << 12)
2488be545baSRichard Henderson 
2498be545baSRichard Henderson /*
2508be545baSRichard Henderson  * In RAMBlock creation functions, if MAP_SHARED is 0 in the flags parameter,
2518be545baSRichard Henderson  * the implementation may still create a shared mapping if other conditions
2528be545baSRichard Henderson  * require it.  Callers who specifically want a private mapping, eg objects
2538be545baSRichard Henderson  * specified by the user, must pass RAM_PRIVATE.
2548be545baSRichard Henderson  * After RAMBlock creation, MAP_SHARED in the block's flags indicates whether
2558be545baSRichard Henderson  * the block is shared or private, and MAP_PRIVATE is omitted.
2568be545baSRichard Henderson  */
2578be545baSRichard Henderson #define RAM_PRIVATE (1 << 13)
2588be545baSRichard Henderson 
2598be545baSRichard Henderson static inline void iommu_notifier_init(IOMMUNotifier *n, IOMMUNotify fn,
2608be545baSRichard Henderson                                        IOMMUNotifierFlag flags,
2618be545baSRichard Henderson                                        hwaddr start, hwaddr end,
2628be545baSRichard Henderson                                        int iommu_idx)
2638be545baSRichard Henderson {
2648be545baSRichard Henderson     n->notify = fn;
2658be545baSRichard Henderson     n->notifier_flags = flags;
2668be545baSRichard Henderson     n->start = start;
2678be545baSRichard Henderson     n->end = end;
2688be545baSRichard Henderson     n->iommu_idx = iommu_idx;
2698be545baSRichard Henderson }
2708be545baSRichard Henderson 
2718be545baSRichard Henderson /*
2728be545baSRichard Henderson  * Memory region callbacks
2738be545baSRichard Henderson  */
2748be545baSRichard Henderson struct MemoryRegionOps {
2758be545baSRichard Henderson     /* Read from the memory region. @addr is relative to @mr; @size is
2768be545baSRichard Henderson      * in bytes. */
2778be545baSRichard Henderson     uint64_t (*read)(void *opaque,
2788be545baSRichard Henderson                      hwaddr addr,
2798be545baSRichard Henderson                      unsigned size);
2808be545baSRichard Henderson     /* Write to the memory region. @addr is relative to @mr; @size is
2818be545baSRichard Henderson      * in bytes. */
2828be545baSRichard Henderson     void (*write)(void *opaque,
2838be545baSRichard Henderson                   hwaddr addr,
2848be545baSRichard Henderson                   uint64_t data,
2858be545baSRichard Henderson                   unsigned size);
2868be545baSRichard Henderson 
2878be545baSRichard Henderson     MemTxResult (*read_with_attrs)(void *opaque,
2888be545baSRichard Henderson                                    hwaddr addr,
2898be545baSRichard Henderson                                    uint64_t *data,
2908be545baSRichard Henderson                                    unsigned size,
2918be545baSRichard Henderson                                    MemTxAttrs attrs);
2928be545baSRichard Henderson     MemTxResult (*write_with_attrs)(void *opaque,
2938be545baSRichard Henderson                                     hwaddr addr,
2948be545baSRichard Henderson                                     uint64_t data,
2958be545baSRichard Henderson                                     unsigned size,
2968be545baSRichard Henderson                                     MemTxAttrs attrs);
2978be545baSRichard Henderson 
2988be545baSRichard Henderson     enum device_endian endianness;
2998be545baSRichard Henderson     /* Guest-visible constraints: */
3008be545baSRichard Henderson     struct {
3018be545baSRichard Henderson         /* If nonzero, specify bounds on access sizes beyond which a machine
3028be545baSRichard Henderson          * check is thrown.
3038be545baSRichard Henderson          */
3048be545baSRichard Henderson         unsigned min_access_size;
3058be545baSRichard Henderson         unsigned max_access_size;
3068be545baSRichard Henderson         /* If true, unaligned accesses are supported.  Otherwise unaligned
3078be545baSRichard Henderson          * accesses throw machine checks.
3088be545baSRichard Henderson          */
3098be545baSRichard Henderson          bool unaligned;
3108be545baSRichard Henderson         /*
3118be545baSRichard Henderson          * If present, and returns #false, the transaction is not accepted
3128be545baSRichard Henderson          * by the device (and results in machine dependent behaviour such
3138be545baSRichard Henderson          * as a machine check exception).
3148be545baSRichard Henderson          */
3158be545baSRichard Henderson         bool (*accepts)(void *opaque, hwaddr addr,
3168be545baSRichard Henderson                         unsigned size, bool is_write,
3178be545baSRichard Henderson                         MemTxAttrs attrs);
3188be545baSRichard Henderson     } valid;
3198be545baSRichard Henderson     /* Internal implementation constraints: */
3208be545baSRichard Henderson     struct {
3218be545baSRichard Henderson         /* If nonzero, specifies the minimum size implemented.  Smaller sizes
3228be545baSRichard Henderson          * will be rounded upwards and a partial result will be returned.
3238be545baSRichard Henderson          */
3248be545baSRichard Henderson         unsigned min_access_size;
3258be545baSRichard Henderson         /* If nonzero, specifies the maximum size implemented.  Larger sizes
3268be545baSRichard Henderson          * will be done as a series of accesses with smaller sizes.
3278be545baSRichard Henderson          */
3288be545baSRichard Henderson         unsigned max_access_size;
3298be545baSRichard Henderson         /* If true, unaligned accesses are supported.  Otherwise all accesses
3308be545baSRichard Henderson          * are converted to (possibly multiple) naturally aligned accesses.
3318be545baSRichard Henderson          */
3328be545baSRichard Henderson         bool unaligned;
3338be545baSRichard Henderson     } impl;
3348be545baSRichard Henderson };
3358be545baSRichard Henderson 
3368be545baSRichard Henderson typedef struct MemoryRegionClass {
3378be545baSRichard Henderson     /* private */
3388be545baSRichard Henderson     ObjectClass parent_class;
3398be545baSRichard Henderson } MemoryRegionClass;
3408be545baSRichard Henderson 
3418be545baSRichard Henderson 
3428be545baSRichard Henderson enum IOMMUMemoryRegionAttr {
3438be545baSRichard Henderson     IOMMU_ATTR_SPAPR_TCE_FD
3448be545baSRichard Henderson };
3458be545baSRichard Henderson 
3468be545baSRichard Henderson /*
3478be545baSRichard Henderson  * IOMMUMemoryRegionClass:
3488be545baSRichard Henderson  *
3498be545baSRichard Henderson  * All IOMMU implementations need to subclass TYPE_IOMMU_MEMORY_REGION
3508be545baSRichard Henderson  * and provide an implementation of at least the @translate method here
3518be545baSRichard Henderson  * to handle requests to the memory region. Other methods are optional.
3528be545baSRichard Henderson  *
3538be545baSRichard Henderson  * The IOMMU implementation must use the IOMMU notifier infrastructure
3548be545baSRichard Henderson  * to report whenever mappings are changed, by calling
3558be545baSRichard Henderson  * memory_region_notify_iommu() (or, if necessary, by calling
3568be545baSRichard Henderson  * memory_region_notify_iommu_one() for each registered notifier).
3578be545baSRichard Henderson  *
3588be545baSRichard Henderson  * Conceptually an IOMMU provides a mapping from input address
3598be545baSRichard Henderson  * to an output TLB entry. If the IOMMU is aware of memory transaction
3608be545baSRichard Henderson  * attributes and the output TLB entry depends on the transaction
3618be545baSRichard Henderson  * attributes, we represent this using IOMMU indexes. Each index
3628be545baSRichard Henderson  * selects a particular translation table that the IOMMU has:
3638be545baSRichard Henderson  *
3648be545baSRichard Henderson  *   @attrs_to_index returns the IOMMU index for a set of transaction attributes
3658be545baSRichard Henderson  *
3668be545baSRichard Henderson  *   @translate takes an input address and an IOMMU index
3678be545baSRichard Henderson  *
3688be545baSRichard Henderson  * and the mapping returned can only depend on the input address and the
3698be545baSRichard Henderson  * IOMMU index.
3708be545baSRichard Henderson  *
3718be545baSRichard Henderson  * Most IOMMUs don't care about the transaction attributes and support
3728be545baSRichard Henderson  * only a single IOMMU index. A more complex IOMMU might have one index
3738be545baSRichard Henderson  * for secure transactions and one for non-secure transactions.
3748be545baSRichard Henderson  */
3758be545baSRichard Henderson struct IOMMUMemoryRegionClass {
3768be545baSRichard Henderson     /* private: */
3778be545baSRichard Henderson     MemoryRegionClass parent_class;
3788be545baSRichard Henderson 
3798be545baSRichard Henderson     /* public: */
3808be545baSRichard Henderson     /**
3818be545baSRichard Henderson      * @translate:
3828be545baSRichard Henderson      *
3838be545baSRichard Henderson      * Return a TLB entry that contains a given address.
3848be545baSRichard Henderson      *
3858be545baSRichard Henderson      * The IOMMUAccessFlags indicated via @flag are optional and may
3868be545baSRichard Henderson      * be specified as IOMMU_NONE to indicate that the caller needs
3878be545baSRichard Henderson      * the full translation information for both reads and writes. If
3888be545baSRichard Henderson      * the access flags are specified then the IOMMU implementation
3898be545baSRichard Henderson      * may use this as an optimization, to stop doing a page table
3908be545baSRichard Henderson      * walk as soon as it knows that the requested permissions are not
3918be545baSRichard Henderson      * allowed. If IOMMU_NONE is passed then the IOMMU must do the
3928be545baSRichard Henderson      * full page table walk and report the permissions in the returned
3938be545baSRichard Henderson      * IOMMUTLBEntry. (Note that this implies that an IOMMU may not
3948be545baSRichard Henderson      * return different mappings for reads and writes.)
3958be545baSRichard Henderson      *
3968be545baSRichard Henderson      * The returned information remains valid while the caller is
3978be545baSRichard Henderson      * holding the big QEMU lock or is inside an RCU critical section;
3988be545baSRichard Henderson      * if the caller wishes to cache the mapping beyond that it must
3998be545baSRichard Henderson      * register an IOMMU notifier so it can invalidate its cached
4008be545baSRichard Henderson      * information when the IOMMU mapping changes.
4018be545baSRichard Henderson      *
4028be545baSRichard Henderson      * @iommu: the IOMMUMemoryRegion
4038be545baSRichard Henderson      *
4048be545baSRichard Henderson      * @hwaddr: address to be translated within the memory region
4058be545baSRichard Henderson      *
4068be545baSRichard Henderson      * @flag: requested access permission
4078be545baSRichard Henderson      *
4088be545baSRichard Henderson      * @iommu_idx: IOMMU index for the translation
4098be545baSRichard Henderson      */
4108be545baSRichard Henderson     IOMMUTLBEntry (*translate)(IOMMUMemoryRegion *iommu, hwaddr addr,
4118be545baSRichard Henderson                                IOMMUAccessFlags flag, int iommu_idx);
4128be545baSRichard Henderson     /**
4138be545baSRichard Henderson      * @get_min_page_size:
4148be545baSRichard Henderson      *
4158be545baSRichard Henderson      * Returns minimum supported page size in bytes.
4168be545baSRichard Henderson      *
4178be545baSRichard Henderson      * If this method is not provided then the minimum is assumed to
4188be545baSRichard Henderson      * be TARGET_PAGE_SIZE.
4198be545baSRichard Henderson      *
4208be545baSRichard Henderson      * @iommu: the IOMMUMemoryRegion
4218be545baSRichard Henderson      */
4228be545baSRichard Henderson     uint64_t (*get_min_page_size)(IOMMUMemoryRegion *iommu);
4238be545baSRichard Henderson     /**
4248be545baSRichard Henderson      * @notify_flag_changed:
4258be545baSRichard Henderson      *
4268be545baSRichard Henderson      * Called when IOMMU Notifier flag changes (ie when the set of
4278be545baSRichard Henderson      * events which IOMMU users are requesting notification for changes).
4288be545baSRichard Henderson      * Optional method -- need not be provided if the IOMMU does not
4298be545baSRichard Henderson      * need to know exactly which events must be notified.
4308be545baSRichard Henderson      *
4318be545baSRichard Henderson      * @iommu: the IOMMUMemoryRegion
4328be545baSRichard Henderson      *
4338be545baSRichard Henderson      * @old_flags: events which previously needed to be notified
4348be545baSRichard Henderson      *
4358be545baSRichard Henderson      * @new_flags: events which now need to be notified
4368be545baSRichard Henderson      *
4378be545baSRichard Henderson      * Returns 0 on success, or a negative errno; in particular
4388be545baSRichard Henderson      * returns -EINVAL if the new flag bitmap is not supported by the
4398be545baSRichard Henderson      * IOMMU memory region. In case of failure, the error object
4408be545baSRichard Henderson      * must be created
4418be545baSRichard Henderson      */
4428be545baSRichard Henderson     int (*notify_flag_changed)(IOMMUMemoryRegion *iommu,
4438be545baSRichard Henderson                                IOMMUNotifierFlag old_flags,
4448be545baSRichard Henderson                                IOMMUNotifierFlag new_flags,
4458be545baSRichard Henderson                                Error **errp);
4468be545baSRichard Henderson     /**
4478be545baSRichard Henderson      * @replay:
4488be545baSRichard Henderson      *
4498be545baSRichard Henderson      * Called to handle memory_region_iommu_replay().
4508be545baSRichard Henderson      *
4518be545baSRichard Henderson      * The default implementation of memory_region_iommu_replay() is to
4528be545baSRichard Henderson      * call the IOMMU translate method for every page in the address space
4538be545baSRichard Henderson      * with flag == IOMMU_NONE and then call the notifier if translate
4548be545baSRichard Henderson      * returns a valid mapping. If this method is implemented then it
4558be545baSRichard Henderson      * overrides the default behaviour, and must provide the full semantics
4568be545baSRichard Henderson      * of memory_region_iommu_replay(), by calling @notifier for every
4578be545baSRichard Henderson      * translation present in the IOMMU.
4588be545baSRichard Henderson      *
4598be545baSRichard Henderson      * Optional method -- an IOMMU only needs to provide this method
4608be545baSRichard Henderson      * if the default is inefficient or produces undesirable side effects.
4618be545baSRichard Henderson      *
4628be545baSRichard Henderson      * Note: this is not related to record-and-replay functionality.
4638be545baSRichard Henderson      */
4648be545baSRichard Henderson     void (*replay)(IOMMUMemoryRegion *iommu, IOMMUNotifier *notifier);
4658be545baSRichard Henderson 
4668be545baSRichard Henderson     /**
4678be545baSRichard Henderson      * @get_attr:
4688be545baSRichard Henderson      *
4698be545baSRichard Henderson      * Get IOMMU misc attributes. This is an optional method that
4708be545baSRichard Henderson      * can be used to allow users of the IOMMU to get implementation-specific
4718be545baSRichard Henderson      * information. The IOMMU implements this method to handle calls
4728be545baSRichard Henderson      * by IOMMU users to memory_region_iommu_get_attr() by filling in
4738be545baSRichard Henderson      * the arbitrary data pointer for any IOMMUMemoryRegionAttr values that
4748be545baSRichard Henderson      * the IOMMU supports. If the method is unimplemented then
4758be545baSRichard Henderson      * memory_region_iommu_get_attr() will always return -EINVAL.
4768be545baSRichard Henderson      *
4778be545baSRichard Henderson      * @iommu: the IOMMUMemoryRegion
4788be545baSRichard Henderson      *
4798be545baSRichard Henderson      * @attr: attribute being queried
4808be545baSRichard Henderson      *
4818be545baSRichard Henderson      * @data: memory to fill in with the attribute data
4828be545baSRichard Henderson      *
4838be545baSRichard Henderson      * Returns 0 on success, or a negative errno; in particular
4848be545baSRichard Henderson      * returns -EINVAL for unrecognized or unimplemented attribute types.
4858be545baSRichard Henderson      */
4868be545baSRichard Henderson     int (*get_attr)(IOMMUMemoryRegion *iommu, enum IOMMUMemoryRegionAttr attr,
4878be545baSRichard Henderson                     void *data);
4888be545baSRichard Henderson 
4898be545baSRichard Henderson     /**
4908be545baSRichard Henderson      * @attrs_to_index:
4918be545baSRichard Henderson      *
4928be545baSRichard Henderson      * Return the IOMMU index to use for a given set of transaction attributes.
4938be545baSRichard Henderson      *
4948be545baSRichard Henderson      * Optional method: if an IOMMU only supports a single IOMMU index then
4958be545baSRichard Henderson      * the default implementation of memory_region_iommu_attrs_to_index()
4968be545baSRichard Henderson      * will return 0.
4978be545baSRichard Henderson      *
4988be545baSRichard Henderson      * The indexes supported by an IOMMU must be contiguous, starting at 0.
4998be545baSRichard Henderson      *
5008be545baSRichard Henderson      * @iommu: the IOMMUMemoryRegion
5018be545baSRichard Henderson      * @attrs: memory transaction attributes
5028be545baSRichard Henderson      */
5038be545baSRichard Henderson     int (*attrs_to_index)(IOMMUMemoryRegion *iommu, MemTxAttrs attrs);
5048be545baSRichard Henderson 
5058be545baSRichard Henderson     /**
5068be545baSRichard Henderson      * @num_indexes:
5078be545baSRichard Henderson      *
5088be545baSRichard Henderson      * Return the number of IOMMU indexes this IOMMU supports.
5098be545baSRichard Henderson      *
5108be545baSRichard Henderson      * Optional method: if this method is not provided, then
5118be545baSRichard Henderson      * memory_region_iommu_num_indexes() will return 1, indicating that
5128be545baSRichard Henderson      * only a single IOMMU index is supported.
5138be545baSRichard Henderson      *
5148be545baSRichard Henderson      * @iommu: the IOMMUMemoryRegion
5158be545baSRichard Henderson      */
5168be545baSRichard Henderson     int (*num_indexes)(IOMMUMemoryRegion *iommu);
5178be545baSRichard Henderson };
5188be545baSRichard Henderson 
5198be545baSRichard Henderson typedef struct RamDiscardListener RamDiscardListener;
5208be545baSRichard Henderson typedef int (*NotifyRamPopulate)(RamDiscardListener *rdl,
5218be545baSRichard Henderson                                  MemoryRegionSection *section);
5228be545baSRichard Henderson typedef void (*NotifyRamDiscard)(RamDiscardListener *rdl,
5238be545baSRichard Henderson                                  MemoryRegionSection *section);
5248be545baSRichard Henderson 
5258be545baSRichard Henderson struct RamDiscardListener {
5268be545baSRichard Henderson     /*
5278be545baSRichard Henderson      * @notify_populate:
5288be545baSRichard Henderson      *
5298be545baSRichard Henderson      * Notification that previously discarded memory is about to get populated.
5308be545baSRichard Henderson      * Listeners are able to object. If any listener objects, already
5318be545baSRichard Henderson      * successfully notified listeners are notified about a discard again.
5328be545baSRichard Henderson      *
5338be545baSRichard Henderson      * @rdl: the #RamDiscardListener getting notified
5348be545baSRichard Henderson      * @section: the #MemoryRegionSection to get populated. The section
5358be545baSRichard Henderson      *           is aligned within the memory region to the minimum granularity
5368be545baSRichard Henderson      *           unless it would exceed the registered section.
5378be545baSRichard Henderson      *
5388be545baSRichard Henderson      * Returns 0 on success. If the notification is rejected by the listener,
5398be545baSRichard Henderson      * an error is returned.
5408be545baSRichard Henderson      */
5418be545baSRichard Henderson     NotifyRamPopulate notify_populate;
5428be545baSRichard Henderson 
5438be545baSRichard Henderson     /*
5448be545baSRichard Henderson      * @notify_discard:
5458be545baSRichard Henderson      *
5468be545baSRichard Henderson      * Notification that previously populated memory was discarded successfully
5478be545baSRichard Henderson      * and listeners should drop all references to such memory and prevent
5488be545baSRichard Henderson      * new population (e.g., unmap).
5498be545baSRichard Henderson      *
5508be545baSRichard Henderson      * @rdl: the #RamDiscardListener getting notified
5518be545baSRichard Henderson      * @section: the #MemoryRegionSection to get populated. The section
5528be545baSRichard Henderson      *           is aligned within the memory region to the minimum granularity
5538be545baSRichard Henderson      *           unless it would exceed the registered section.
5548be545baSRichard Henderson      */
5558be545baSRichard Henderson     NotifyRamDiscard notify_discard;
5568be545baSRichard Henderson 
5578be545baSRichard Henderson     /*
5588be545baSRichard Henderson      * @double_discard_supported:
5598be545baSRichard Henderson      *
5608be545baSRichard Henderson      * The listener suppors getting @notify_discard notifications that span
5618be545baSRichard Henderson      * already discarded parts.
5628be545baSRichard Henderson      */
5638be545baSRichard Henderson     bool double_discard_supported;
5648be545baSRichard Henderson 
5658be545baSRichard Henderson     MemoryRegionSection *section;
5668be545baSRichard Henderson     QLIST_ENTRY(RamDiscardListener) next;
5678be545baSRichard Henderson };
5688be545baSRichard Henderson 
5698be545baSRichard Henderson static inline void ram_discard_listener_init(RamDiscardListener *rdl,
5708be545baSRichard Henderson                                              NotifyRamPopulate populate_fn,
5718be545baSRichard Henderson                                              NotifyRamDiscard discard_fn,
5728be545baSRichard Henderson                                              bool double_discard_supported)
5738be545baSRichard Henderson {
5748be545baSRichard Henderson     rdl->notify_populate = populate_fn;
5758be545baSRichard Henderson     rdl->notify_discard = discard_fn;
5768be545baSRichard Henderson     rdl->double_discard_supported = double_discard_supported;
5778be545baSRichard Henderson }
5788be545baSRichard Henderson 
5798be545baSRichard Henderson typedef int (*ReplayRamPopulate)(MemoryRegionSection *section, void *opaque);
5808be545baSRichard Henderson typedef void (*ReplayRamDiscard)(MemoryRegionSection *section, void *opaque);
5818be545baSRichard Henderson 
5828be545baSRichard Henderson /*
5838be545baSRichard Henderson  * RamDiscardManagerClass:
5848be545baSRichard Henderson  *
5858be545baSRichard Henderson  * A #RamDiscardManager coordinates which parts of specific RAM #MemoryRegion
5868be545baSRichard Henderson  * regions are currently populated to be used/accessed by the VM, notifying
5878be545baSRichard Henderson  * after parts were discarded (freeing up memory) and before parts will be
5888be545baSRichard Henderson  * populated (consuming memory), to be used/accessed by the VM.
5898be545baSRichard Henderson  *
5908be545baSRichard Henderson  * A #RamDiscardManager can only be set for a RAM #MemoryRegion while the
5918be545baSRichard Henderson  * #MemoryRegion isn't mapped into an address space yet (either directly
5928be545baSRichard Henderson  * or via an alias); it cannot change while the #MemoryRegion is
5938be545baSRichard Henderson  * mapped into an address space.
5948be545baSRichard Henderson  *
5958be545baSRichard Henderson  * The #RamDiscardManager is intended to be used by technologies that are
5968be545baSRichard Henderson  * incompatible with discarding of RAM (e.g., VFIO, which may pin all
5978be545baSRichard Henderson  * memory inside a #MemoryRegion), and require proper coordination to only
5988be545baSRichard Henderson  * map the currently populated parts, to hinder parts that are expected to
5998be545baSRichard Henderson  * remain discarded from silently getting populated and consuming memory.
6008be545baSRichard Henderson  * Technologies that support discarding of RAM don't have to bother and can
6018be545baSRichard Henderson  * simply map the whole #MemoryRegion.
6028be545baSRichard Henderson  *
6038be545baSRichard Henderson  * An example #RamDiscardManager is virtio-mem, which logically (un)plugs
6048be545baSRichard Henderson  * memory within an assigned RAM #MemoryRegion, coordinated with the VM.
6058be545baSRichard Henderson  * Logically unplugging memory consists of discarding RAM. The VM agreed to not
6068be545baSRichard Henderson  * access unplugged (discarded) memory - especially via DMA. virtio-mem will
6078be545baSRichard Henderson  * properly coordinate with listeners before memory is plugged (populated),
6088be545baSRichard Henderson  * and after memory is unplugged (discarded).
6098be545baSRichard Henderson  *
6108be545baSRichard Henderson  * Listeners are called in multiples of the minimum granularity (unless it
6118be545baSRichard Henderson  * would exceed the registered range) and changes are aligned to the minimum
6128be545baSRichard Henderson  * granularity within the #MemoryRegion. Listeners have to prepare for memory
6138be545baSRichard Henderson  * becoming discarded in a different granularity than it was populated and the
6148be545baSRichard Henderson  * other way around.
6158be545baSRichard Henderson  */
6168be545baSRichard Henderson struct RamDiscardManagerClass {
6178be545baSRichard Henderson     /* private */
6188be545baSRichard Henderson     InterfaceClass parent_class;
6198be545baSRichard Henderson 
6208be545baSRichard Henderson     /* public */
6218be545baSRichard Henderson 
6228be545baSRichard Henderson     /**
6238be545baSRichard Henderson      * @get_min_granularity:
6248be545baSRichard Henderson      *
6258be545baSRichard Henderson      * Get the minimum granularity in which listeners will get notified
6268be545baSRichard Henderson      * about changes within the #MemoryRegion via the #RamDiscardManager.
6278be545baSRichard Henderson      *
6288be545baSRichard Henderson      * @rdm: the #RamDiscardManager
6298be545baSRichard Henderson      * @mr: the #MemoryRegion
6308be545baSRichard Henderson      *
6318be545baSRichard Henderson      * Returns the minimum granularity.
6328be545baSRichard Henderson      */
6338be545baSRichard Henderson     uint64_t (*get_min_granularity)(const RamDiscardManager *rdm,
6348be545baSRichard Henderson                                     const MemoryRegion *mr);
6358be545baSRichard Henderson 
6368be545baSRichard Henderson     /**
6378be545baSRichard Henderson      * @is_populated:
6388be545baSRichard Henderson      *
6398be545baSRichard Henderson      * Check whether the given #MemoryRegionSection is completely populated
6408be545baSRichard Henderson      * (i.e., no parts are currently discarded) via the #RamDiscardManager.
6418be545baSRichard Henderson      * There are no alignment requirements.
6428be545baSRichard Henderson      *
6438be545baSRichard Henderson      * @rdm: the #RamDiscardManager
6448be545baSRichard Henderson      * @section: the #MemoryRegionSection
6458be545baSRichard Henderson      *
6468be545baSRichard Henderson      * Returns whether the given range is completely populated.
6478be545baSRichard Henderson      */
6488be545baSRichard Henderson     bool (*is_populated)(const RamDiscardManager *rdm,
6498be545baSRichard Henderson                          const MemoryRegionSection *section);
6508be545baSRichard Henderson 
6518be545baSRichard Henderson     /**
6528be545baSRichard Henderson      * @replay_populated:
6538be545baSRichard Henderson      *
6548be545baSRichard Henderson      * Call the #ReplayRamPopulate callback for all populated parts within the
6558be545baSRichard Henderson      * #MemoryRegionSection via the #RamDiscardManager.
6568be545baSRichard Henderson      *
6578be545baSRichard Henderson      * In case any call fails, no further calls are made.
6588be545baSRichard Henderson      *
6598be545baSRichard Henderson      * @rdm: the #RamDiscardManager
6608be545baSRichard Henderson      * @section: the #MemoryRegionSection
6618be545baSRichard Henderson      * @replay_fn: the #ReplayRamPopulate callback
6628be545baSRichard Henderson      * @opaque: pointer to forward to the callback
6638be545baSRichard Henderson      *
6648be545baSRichard Henderson      * Returns 0 on success, or a negative error if any notification failed.
6658be545baSRichard Henderson      */
6668be545baSRichard Henderson     int (*replay_populated)(const RamDiscardManager *rdm,
6678be545baSRichard Henderson                             MemoryRegionSection *section,
6688be545baSRichard Henderson                             ReplayRamPopulate replay_fn, void *opaque);
6698be545baSRichard Henderson 
6708be545baSRichard Henderson     /**
6718be545baSRichard Henderson      * @replay_discarded:
6728be545baSRichard Henderson      *
6738be545baSRichard Henderson      * Call the #ReplayRamDiscard callback for all discarded parts within the
6748be545baSRichard Henderson      * #MemoryRegionSection via the #RamDiscardManager.
6758be545baSRichard Henderson      *
6768be545baSRichard Henderson      * @rdm: the #RamDiscardManager
6778be545baSRichard Henderson      * @section: the #MemoryRegionSection
6788be545baSRichard Henderson      * @replay_fn: the #ReplayRamDiscard callback
6798be545baSRichard Henderson      * @opaque: pointer to forward to the callback
6808be545baSRichard Henderson      */
6818be545baSRichard Henderson     void (*replay_discarded)(const RamDiscardManager *rdm,
6828be545baSRichard Henderson                              MemoryRegionSection *section,
6838be545baSRichard Henderson                              ReplayRamDiscard replay_fn, void *opaque);
6848be545baSRichard Henderson 
6858be545baSRichard Henderson     /**
6868be545baSRichard Henderson      * @register_listener:
6878be545baSRichard Henderson      *
6888be545baSRichard Henderson      * Register a #RamDiscardListener for the given #MemoryRegionSection and
6898be545baSRichard Henderson      * immediately notify the #RamDiscardListener about all populated parts
6908be545baSRichard Henderson      * within the #MemoryRegionSection via the #RamDiscardManager.
6918be545baSRichard Henderson      *
6928be545baSRichard Henderson      * In case any notification fails, no further notifications are triggered
6938be545baSRichard Henderson      * and an error is logged.
6948be545baSRichard Henderson      *
6958be545baSRichard Henderson      * @rdm: the #RamDiscardManager
6968be545baSRichard Henderson      * @rdl: the #RamDiscardListener
6978be545baSRichard Henderson      * @section: the #MemoryRegionSection
6988be545baSRichard Henderson      */
6998be545baSRichard Henderson     void (*register_listener)(RamDiscardManager *rdm,
7008be545baSRichard Henderson                               RamDiscardListener *rdl,
7018be545baSRichard Henderson                               MemoryRegionSection *section);
7028be545baSRichard Henderson 
7038be545baSRichard Henderson     /**
7048be545baSRichard Henderson      * @unregister_listener:
7058be545baSRichard Henderson      *
7068be545baSRichard Henderson      * Unregister a previously registered #RamDiscardListener via the
7078be545baSRichard Henderson      * #RamDiscardManager after notifying the #RamDiscardListener about all
7088be545baSRichard Henderson      * populated parts becoming unpopulated within the registered
7098be545baSRichard Henderson      * #MemoryRegionSection.
7108be545baSRichard Henderson      *
7118be545baSRichard Henderson      * @rdm: the #RamDiscardManager
7128be545baSRichard Henderson      * @rdl: the #RamDiscardListener
7138be545baSRichard Henderson      */
7148be545baSRichard Henderson     void (*unregister_listener)(RamDiscardManager *rdm,
7158be545baSRichard Henderson                                 RamDiscardListener *rdl);
7168be545baSRichard Henderson };
7178be545baSRichard Henderson 
7188be545baSRichard Henderson uint64_t ram_discard_manager_get_min_granularity(const RamDiscardManager *rdm,
7198be545baSRichard Henderson                                                  const MemoryRegion *mr);
7208be545baSRichard Henderson 
7218be545baSRichard Henderson bool ram_discard_manager_is_populated(const RamDiscardManager *rdm,
7228be545baSRichard Henderson                                       const MemoryRegionSection *section);
7238be545baSRichard Henderson 
7248be545baSRichard Henderson int ram_discard_manager_replay_populated(const RamDiscardManager *rdm,
7258be545baSRichard Henderson                                          MemoryRegionSection *section,
7268be545baSRichard Henderson                                          ReplayRamPopulate replay_fn,
7278be545baSRichard Henderson                                          void *opaque);
7288be545baSRichard Henderson 
7298be545baSRichard Henderson void ram_discard_manager_replay_discarded(const RamDiscardManager *rdm,
7308be545baSRichard Henderson                                           MemoryRegionSection *section,
7318be545baSRichard Henderson                                           ReplayRamDiscard replay_fn,
7328be545baSRichard Henderson                                           void *opaque);
7338be545baSRichard Henderson 
7348be545baSRichard Henderson void ram_discard_manager_register_listener(RamDiscardManager *rdm,
7358be545baSRichard Henderson                                            RamDiscardListener *rdl,
7368be545baSRichard Henderson                                            MemoryRegionSection *section);
7378be545baSRichard Henderson 
7388be545baSRichard Henderson void ram_discard_manager_unregister_listener(RamDiscardManager *rdm,
7398be545baSRichard Henderson                                              RamDiscardListener *rdl);
7408be545baSRichard Henderson 
7418be545baSRichard Henderson /**
742e3353d63SSteve Sistare  * memory_translate_iotlb: Extract addresses from a TLB entry.
743e3353d63SSteve Sistare  *                         Called with rcu_read_lock held.
7448be545baSRichard Henderson  *
7458be545baSRichard Henderson  * @iotlb: pointer to an #IOMMUTLBEntry
746e3353d63SSteve Sistare  * @xlat_p: return the offset of the entry from the start of the returned
747e3353d63SSteve Sistare  *          MemoryRegion.
7488be545baSRichard Henderson  * @errp: pointer to Error*, to store an error if it happens.
7498be545baSRichard Henderson  *
750e3353d63SSteve Sistare  * Return: On success, return the MemoryRegion containing the @iotlb translated
751e3353d63SSteve Sistare  *         addr.  The MemoryRegion must not be accessed after rcu_read_unlock.
752e3353d63SSteve Sistare  *         On failure, return NULL, setting @errp with error.
7538be545baSRichard Henderson  */
754e3353d63SSteve Sistare MemoryRegion *memory_translate_iotlb(IOMMUTLBEntry *iotlb, hwaddr *xlat_p,
755e3353d63SSteve Sistare                                      Error **errp);
7568be545baSRichard Henderson 
7578be545baSRichard Henderson typedef struct CoalescedMemoryRange CoalescedMemoryRange;
7588be545baSRichard Henderson typedef struct MemoryRegionIoeventfd MemoryRegionIoeventfd;
7598be545baSRichard Henderson 
7608be545baSRichard Henderson /** MemoryRegion:
7618be545baSRichard Henderson  *
7628be545baSRichard Henderson  * A struct representing a memory region.
7638be545baSRichard Henderson  */
7648be545baSRichard Henderson struct MemoryRegion {
7658be545baSRichard Henderson     Object parent_obj;
7668be545baSRichard Henderson 
7678be545baSRichard Henderson     /* private: */
7688be545baSRichard Henderson 
7698be545baSRichard Henderson     /* The following fields should fit in a cache line */
7708be545baSRichard Henderson     bool romd_mode;
7718be545baSRichard Henderson     bool ram;
7728be545baSRichard Henderson     bool subpage;
7738be545baSRichard Henderson     bool readonly; /* For RAM regions */
7748be545baSRichard Henderson     bool nonvolatile;
7758be545baSRichard Henderson     bool rom_device;
7768be545baSRichard Henderson     bool flush_coalesced_mmio;
7778be545baSRichard Henderson     bool unmergeable;
7788be545baSRichard Henderson     uint8_t dirty_log_mask;
7798be545baSRichard Henderson     bool is_iommu;
7808be545baSRichard Henderson     RAMBlock *ram_block;
7818be545baSRichard Henderson     Object *owner;
7828be545baSRichard Henderson     /* owner as TYPE_DEVICE. Used for re-entrancy checks in MR access hotpath */
7838be545baSRichard Henderson     DeviceState *dev;
7848be545baSRichard Henderson 
7858be545baSRichard Henderson     const MemoryRegionOps *ops;
7868be545baSRichard Henderson     void *opaque;
7878be545baSRichard Henderson     MemoryRegion *container;
7888be545baSRichard Henderson     int mapped_via_alias; /* Mapped via an alias, container might be NULL */
7898be545baSRichard Henderson     Int128 size;
7908be545baSRichard Henderson     hwaddr addr;
7918be545baSRichard Henderson     void (*destructor)(MemoryRegion *mr);
7928be545baSRichard Henderson     uint64_t align;
7938be545baSRichard Henderson     bool terminates;
7948be545baSRichard Henderson     bool ram_device;
7958be545baSRichard Henderson     bool enabled;
7968be545baSRichard Henderson     uint8_t vga_logging_count;
7978be545baSRichard Henderson     MemoryRegion *alias;
7988be545baSRichard Henderson     hwaddr alias_offset;
7998be545baSRichard Henderson     int32_t priority;
8008be545baSRichard Henderson     QTAILQ_HEAD(, MemoryRegion) subregions;
8018be545baSRichard Henderson     QTAILQ_ENTRY(MemoryRegion) subregions_link;
8028be545baSRichard Henderson     QTAILQ_HEAD(, CoalescedMemoryRange) coalesced;
8038be545baSRichard Henderson     const char *name;
8048be545baSRichard Henderson     unsigned ioeventfd_nb;
8058be545baSRichard Henderson     MemoryRegionIoeventfd *ioeventfds;
8068be545baSRichard Henderson     RamDiscardManager *rdm; /* Only for RAM */
8078be545baSRichard Henderson 
8088be545baSRichard Henderson     /* For devices designed to perform re-entrant IO into their own IO MRs */
8098be545baSRichard Henderson     bool disable_reentrancy_guard;
8108be545baSRichard Henderson };
8118be545baSRichard Henderson 
8128be545baSRichard Henderson struct IOMMUMemoryRegion {
8138be545baSRichard Henderson     MemoryRegion parent_obj;
8148be545baSRichard Henderson 
8158be545baSRichard Henderson     QLIST_HEAD(, IOMMUNotifier) iommu_notify;
8168be545baSRichard Henderson     IOMMUNotifierFlag iommu_notify_flags;
8178be545baSRichard Henderson };
8188be545baSRichard Henderson 
8198be545baSRichard Henderson #define IOMMU_NOTIFIER_FOREACH(n, mr) \
8208be545baSRichard Henderson     QLIST_FOREACH((n), &(mr)->iommu_notify, node)
8218be545baSRichard Henderson 
8228be545baSRichard Henderson #define MEMORY_LISTENER_PRIORITY_MIN            0
8238be545baSRichard Henderson #define MEMORY_LISTENER_PRIORITY_ACCEL          10
8248be545baSRichard Henderson #define MEMORY_LISTENER_PRIORITY_DEV_BACKEND    10
8258be545baSRichard Henderson 
8268be545baSRichard Henderson /**
8278be545baSRichard Henderson  * struct MemoryListener: callbacks structure for updates to the physical memory map
8288be545baSRichard Henderson  *
8298be545baSRichard Henderson  * Allows a component to adjust to changes in the guest-visible memory map.
8308be545baSRichard Henderson  * Use with memory_listener_register() and memory_listener_unregister().
8318be545baSRichard Henderson  */
8328be545baSRichard Henderson struct MemoryListener {
8338be545baSRichard Henderson     /**
8348be545baSRichard Henderson      * @begin:
8358be545baSRichard Henderson      *
8368be545baSRichard Henderson      * Called at the beginning of an address space update transaction.
8378be545baSRichard Henderson      * Followed by calls to #MemoryListener.region_add(),
8388be545baSRichard Henderson      * #MemoryListener.region_del(), #MemoryListener.region_nop(),
8398be545baSRichard Henderson      * #MemoryListener.log_start() and #MemoryListener.log_stop() in
8408be545baSRichard Henderson      * increasing address order.
8418be545baSRichard Henderson      *
8428be545baSRichard Henderson      * @listener: The #MemoryListener.
8438be545baSRichard Henderson      */
8448be545baSRichard Henderson     void (*begin)(MemoryListener *listener);
8458be545baSRichard Henderson 
8468be545baSRichard Henderson     /**
8478be545baSRichard Henderson      * @commit:
8488be545baSRichard Henderson      *
8498be545baSRichard Henderson      * Called at the end of an address space update transaction,
8508be545baSRichard Henderson      * after the last call to #MemoryListener.region_add(),
8518be545baSRichard Henderson      * #MemoryListener.region_del() or #MemoryListener.region_nop(),
8528be545baSRichard Henderson      * #MemoryListener.log_start() and #MemoryListener.log_stop().
8538be545baSRichard Henderson      *
8548be545baSRichard Henderson      * @listener: The #MemoryListener.
8558be545baSRichard Henderson      */
8568be545baSRichard Henderson     void (*commit)(MemoryListener *listener);
8578be545baSRichard Henderson 
8588be545baSRichard Henderson     /**
8598be545baSRichard Henderson      * @region_add:
8608be545baSRichard Henderson      *
8618be545baSRichard Henderson      * Called during an address space update transaction,
8628be545baSRichard Henderson      * for a section of the address space that is new in this address space
8638be545baSRichard Henderson      * space since the last transaction.
8648be545baSRichard Henderson      *
8658be545baSRichard Henderson      * @listener: The #MemoryListener.
8668be545baSRichard Henderson      * @section: The new #MemoryRegionSection.
8678be545baSRichard Henderson      */
8688be545baSRichard Henderson     void (*region_add)(MemoryListener *listener, MemoryRegionSection *section);
8698be545baSRichard Henderson 
8708be545baSRichard Henderson     /**
8718be545baSRichard Henderson      * @region_del:
8728be545baSRichard Henderson      *
8738be545baSRichard Henderson      * Called during an address space update transaction,
8748be545baSRichard Henderson      * for a section of the address space that has disappeared in the address
8758be545baSRichard Henderson      * space since the last transaction.
8768be545baSRichard Henderson      *
8778be545baSRichard Henderson      * @listener: The #MemoryListener.
8788be545baSRichard Henderson      * @section: The old #MemoryRegionSection.
8798be545baSRichard Henderson      */
8808be545baSRichard Henderson     void (*region_del)(MemoryListener *listener, MemoryRegionSection *section);
8818be545baSRichard Henderson 
8828be545baSRichard Henderson     /**
8838be545baSRichard Henderson      * @region_nop:
8848be545baSRichard Henderson      *
8858be545baSRichard Henderson      * Called during an address space update transaction,
8868be545baSRichard Henderson      * for a section of the address space that is in the same place in the address
8878be545baSRichard Henderson      * space as in the last transaction.
8888be545baSRichard Henderson      *
8898be545baSRichard Henderson      * @listener: The #MemoryListener.
8908be545baSRichard Henderson      * @section: The #MemoryRegionSection.
8918be545baSRichard Henderson      */
8928be545baSRichard Henderson     void (*region_nop)(MemoryListener *listener, MemoryRegionSection *section);
8938be545baSRichard Henderson 
8948be545baSRichard Henderson     /**
8958be545baSRichard Henderson      * @log_start:
8968be545baSRichard Henderson      *
8978be545baSRichard Henderson      * Called during an address space update transaction, after
8988be545baSRichard Henderson      * one of #MemoryListener.region_add(), #MemoryListener.region_del() or
8998be545baSRichard Henderson      * #MemoryListener.region_nop(), if dirty memory logging clients have
9008be545baSRichard Henderson      * become active since the last transaction.
9018be545baSRichard Henderson      *
9028be545baSRichard Henderson      * @listener: The #MemoryListener.
9038be545baSRichard Henderson      * @section: The #MemoryRegionSection.
9048be545baSRichard Henderson      * @old: A bitmap of dirty memory logging clients that were active in
9058be545baSRichard Henderson      * the previous transaction.
9068be545baSRichard Henderson      * @new: A bitmap of dirty memory logging clients that are active in
9078be545baSRichard Henderson      * the current transaction.
9088be545baSRichard Henderson      */
9098be545baSRichard Henderson     void (*log_start)(MemoryListener *listener, MemoryRegionSection *section,
9108be545baSRichard Henderson                       int old_val, int new_val);
9118be545baSRichard Henderson 
9128be545baSRichard Henderson     /**
9138be545baSRichard Henderson      * @log_stop:
9148be545baSRichard Henderson      *
9158be545baSRichard Henderson      * Called during an address space update transaction, after
9168be545baSRichard Henderson      * one of #MemoryListener.region_add(), #MemoryListener.region_del() or
9178be545baSRichard Henderson      * #MemoryListener.region_nop() and possibly after
9188be545baSRichard Henderson      * #MemoryListener.log_start(), if dirty memory logging clients have
9198be545baSRichard Henderson      * become inactive since the last transaction.
9208be545baSRichard Henderson      *
9218be545baSRichard Henderson      * @listener: The #MemoryListener.
9228be545baSRichard Henderson      * @section: The #MemoryRegionSection.
9238be545baSRichard Henderson      * @old: A bitmap of dirty memory logging clients that were active in
9248be545baSRichard Henderson      * the previous transaction.
9258be545baSRichard Henderson      * @new: A bitmap of dirty memory logging clients that are active in
9268be545baSRichard Henderson      * the current transaction.
9278be545baSRichard Henderson      */
9288be545baSRichard Henderson     void (*log_stop)(MemoryListener *listener, MemoryRegionSection *section,
9298be545baSRichard Henderson                      int old_val, int new_val);
9308be545baSRichard Henderson 
9318be545baSRichard Henderson     /**
9328be545baSRichard Henderson      * @log_sync:
9338be545baSRichard Henderson      *
9348be545baSRichard Henderson      * Called by memory_region_snapshot_and_clear_dirty() and
9358be545baSRichard Henderson      * memory_global_dirty_log_sync(), before accessing QEMU's "official"
9368be545baSRichard Henderson      * copy of the dirty memory bitmap for a #MemoryRegionSection.
9378be545baSRichard Henderson      *
9388be545baSRichard Henderson      * @listener: The #MemoryListener.
9398be545baSRichard Henderson      * @section: The #MemoryRegionSection.
9408be545baSRichard Henderson      */
9418be545baSRichard Henderson     void (*log_sync)(MemoryListener *listener, MemoryRegionSection *section);
9428be545baSRichard Henderson 
9438be545baSRichard Henderson     /**
9448be545baSRichard Henderson      * @log_sync_global:
9458be545baSRichard Henderson      *
9468be545baSRichard Henderson      * This is the global version of @log_sync when the listener does
9478be545baSRichard Henderson      * not have a way to synchronize the log with finer granularity.
9488be545baSRichard Henderson      * When the listener registers with @log_sync_global defined, then
9498be545baSRichard Henderson      * its @log_sync must be NULL.  Vice versa.
9508be545baSRichard Henderson      *
9518be545baSRichard Henderson      * @listener: The #MemoryListener.
9528be545baSRichard Henderson      * @last_stage: The last stage to synchronize the log during migration.
9538be545baSRichard Henderson      * The caller should guarantee that the synchronization with true for
9548be545baSRichard Henderson      * @last_stage is triggered for once after all VCPUs have been stopped.
9558be545baSRichard Henderson      */
9568be545baSRichard Henderson     void (*log_sync_global)(MemoryListener *listener, bool last_stage);
9578be545baSRichard Henderson 
9588be545baSRichard Henderson     /**
9598be545baSRichard Henderson      * @log_clear:
9608be545baSRichard Henderson      *
9618be545baSRichard Henderson      * Called before reading the dirty memory bitmap for a
9628be545baSRichard Henderson      * #MemoryRegionSection.
9638be545baSRichard Henderson      *
9648be545baSRichard Henderson      * @listener: The #MemoryListener.
9658be545baSRichard Henderson      * @section: The #MemoryRegionSection.
9668be545baSRichard Henderson      */
9678be545baSRichard Henderson     void (*log_clear)(MemoryListener *listener, MemoryRegionSection *section);
9688be545baSRichard Henderson 
9698be545baSRichard Henderson     /**
9708be545baSRichard Henderson      * @log_global_start:
9718be545baSRichard Henderson      *
9728be545baSRichard Henderson      * Called by memory_global_dirty_log_start(), which
9738be545baSRichard Henderson      * enables the %DIRTY_LOG_MIGRATION client on all memory regions in
9748be545baSRichard Henderson      * the address space.  #MemoryListener.log_global_start() is also
9758be545baSRichard Henderson      * called when a #MemoryListener is added, if global dirty logging is
9768be545baSRichard Henderson      * active at that time.
9778be545baSRichard Henderson      *
9788be545baSRichard Henderson      * @listener: The #MemoryListener.
9798be545baSRichard Henderson      * @errp: pointer to Error*, to store an error if it happens.
9808be545baSRichard Henderson      *
9818be545baSRichard Henderson      * Return: true on success, else false setting @errp with error.
9828be545baSRichard Henderson      */
9838be545baSRichard Henderson     bool (*log_global_start)(MemoryListener *listener, Error **errp);
9848be545baSRichard Henderson 
9858be545baSRichard Henderson     /**
9868be545baSRichard Henderson      * @log_global_stop:
9878be545baSRichard Henderson      *
9888be545baSRichard Henderson      * Called by memory_global_dirty_log_stop(), which
9898be545baSRichard Henderson      * disables the %DIRTY_LOG_MIGRATION client on all memory regions in
9908be545baSRichard Henderson      * the address space.
9918be545baSRichard Henderson      *
9928be545baSRichard Henderson      * @listener: The #MemoryListener.
9938be545baSRichard Henderson      */
9948be545baSRichard Henderson     void (*log_global_stop)(MemoryListener *listener);
9958be545baSRichard Henderson 
9968be545baSRichard Henderson     /**
9978be545baSRichard Henderson      * @log_global_after_sync:
9988be545baSRichard Henderson      *
9998be545baSRichard Henderson      * Called after reading the dirty memory bitmap
10008be545baSRichard Henderson      * for any #MemoryRegionSection.
10018be545baSRichard Henderson      *
10028be545baSRichard Henderson      * @listener: The #MemoryListener.
10038be545baSRichard Henderson      */
10048be545baSRichard Henderson     void (*log_global_after_sync)(MemoryListener *listener);
10058be545baSRichard Henderson 
10068be545baSRichard Henderson     /**
10078be545baSRichard Henderson      * @eventfd_add:
10088be545baSRichard Henderson      *
10098be545baSRichard Henderson      * Called during an address space update transaction,
10108be545baSRichard Henderson      * for a section of the address space that has had a new ioeventfd
10118be545baSRichard Henderson      * registration since the last transaction.
10128be545baSRichard Henderson      *
10138be545baSRichard Henderson      * @listener: The #MemoryListener.
10148be545baSRichard Henderson      * @section: The new #MemoryRegionSection.
10158be545baSRichard Henderson      * @match_data: The @match_data parameter for the new ioeventfd.
10168be545baSRichard Henderson      * @data: The @data parameter for the new ioeventfd.
10178be545baSRichard Henderson      * @e: The #EventNotifier parameter for the new ioeventfd.
10188be545baSRichard Henderson      */
10198be545baSRichard Henderson     void (*eventfd_add)(MemoryListener *listener, MemoryRegionSection *section,
10208be545baSRichard Henderson                         bool match_data, uint64_t data, EventNotifier *e);
10218be545baSRichard Henderson 
10228be545baSRichard Henderson     /**
10238be545baSRichard Henderson      * @eventfd_del:
10248be545baSRichard Henderson      *
10258be545baSRichard Henderson      * Called during an address space update transaction,
10268be545baSRichard Henderson      * for a section of the address space that has dropped an ioeventfd
10278be545baSRichard Henderson      * registration since the last transaction.
10288be545baSRichard Henderson      *
10298be545baSRichard Henderson      * @listener: The #MemoryListener.
10308be545baSRichard Henderson      * @section: The new #MemoryRegionSection.
10318be545baSRichard Henderson      * @match_data: The @match_data parameter for the dropped ioeventfd.
10328be545baSRichard Henderson      * @data: The @data parameter for the dropped ioeventfd.
10338be545baSRichard Henderson      * @e: The #EventNotifier parameter for the dropped ioeventfd.
10348be545baSRichard Henderson      */
10358be545baSRichard Henderson     void (*eventfd_del)(MemoryListener *listener, MemoryRegionSection *section,
10368be545baSRichard Henderson                         bool match_data, uint64_t data, EventNotifier *e);
10378be545baSRichard Henderson 
10388be545baSRichard Henderson     /**
10398be545baSRichard Henderson      * @coalesced_io_add:
10408be545baSRichard Henderson      *
10418be545baSRichard Henderson      * Called during an address space update transaction,
10428be545baSRichard Henderson      * for a section of the address space that has had a new coalesced
10438be545baSRichard Henderson      * MMIO range registration since the last transaction.
10448be545baSRichard Henderson      *
10458be545baSRichard Henderson      * @listener: The #MemoryListener.
10468be545baSRichard Henderson      * @section: The new #MemoryRegionSection.
10478be545baSRichard Henderson      * @addr: The starting address for the coalesced MMIO range.
10488be545baSRichard Henderson      * @len: The length of the coalesced MMIO range.
10498be545baSRichard Henderson      */
10508be545baSRichard Henderson     void (*coalesced_io_add)(MemoryListener *listener, MemoryRegionSection *section,
10518be545baSRichard Henderson                                hwaddr addr, hwaddr len);
10528be545baSRichard Henderson 
10538be545baSRichard Henderson     /**
10548be545baSRichard Henderson      * @coalesced_io_del:
10558be545baSRichard Henderson      *
10568be545baSRichard Henderson      * Called during an address space update transaction,
10578be545baSRichard Henderson      * for a section of the address space that has dropped a coalesced
10588be545baSRichard Henderson      * MMIO range since the last transaction.
10598be545baSRichard Henderson      *
10608be545baSRichard Henderson      * @listener: The #MemoryListener.
10618be545baSRichard Henderson      * @section: The new #MemoryRegionSection.
10628be545baSRichard Henderson      * @addr: The starting address for the coalesced MMIO range.
10638be545baSRichard Henderson      * @len: The length of the coalesced MMIO range.
10648be545baSRichard Henderson      */
10658be545baSRichard Henderson     void (*coalesced_io_del)(MemoryListener *listener, MemoryRegionSection *section,
10668be545baSRichard Henderson                                hwaddr addr, hwaddr len);
10678be545baSRichard Henderson     /**
10688be545baSRichard Henderson      * @priority:
10698be545baSRichard Henderson      *
10708be545baSRichard Henderson      * Govern the order in which memory listeners are invoked. Lower priorities
10718be545baSRichard Henderson      * are invoked earlier for "add" or "start" callbacks, and later for "delete"
10728be545baSRichard Henderson      * or "stop" callbacks.
10738be545baSRichard Henderson      */
10748be545baSRichard Henderson     unsigned priority;
10758be545baSRichard Henderson 
10768be545baSRichard Henderson     /**
10778be545baSRichard Henderson      * @name:
10788be545baSRichard Henderson      *
10798be545baSRichard Henderson      * Name of the listener.  It can be used in contexts where we'd like to
10808be545baSRichard Henderson      * identify one memory listener with the rest.
10818be545baSRichard Henderson      */
10828be545baSRichard Henderson     const char *name;
10838be545baSRichard Henderson 
10848be545baSRichard Henderson     /* private: */
10858be545baSRichard Henderson     AddressSpace *address_space;
10868be545baSRichard Henderson     QTAILQ_ENTRY(MemoryListener) link;
10878be545baSRichard Henderson     QTAILQ_ENTRY(MemoryListener) link_as;
10888be545baSRichard Henderson };
10898be545baSRichard Henderson 
10908be545baSRichard Henderson typedef struct AddressSpaceMapClient {
10918be545baSRichard Henderson     QEMUBH *bh;
10928be545baSRichard Henderson     QLIST_ENTRY(AddressSpaceMapClient) link;
10938be545baSRichard Henderson } AddressSpaceMapClient;
10948be545baSRichard Henderson 
10958be545baSRichard Henderson #define DEFAULT_MAX_BOUNCE_BUFFER_SIZE (4096)
10968be545baSRichard Henderson 
10978be545baSRichard Henderson /**
10988be545baSRichard Henderson  * struct AddressSpace: describes a mapping of addresses to #MemoryRegion objects
10998be545baSRichard Henderson  */
11008be545baSRichard Henderson struct AddressSpace {
11018be545baSRichard Henderson     /* private: */
11028be545baSRichard Henderson     struct rcu_head rcu;
11038be545baSRichard Henderson     char *name;
11048be545baSRichard Henderson     MemoryRegion *root;
11058be545baSRichard Henderson 
11068be545baSRichard Henderson     /* Accessed via RCU.  */
11078be545baSRichard Henderson     struct FlatView *current_map;
11088be545baSRichard Henderson 
11098be545baSRichard Henderson     int ioeventfd_nb;
11108be545baSRichard Henderson     int ioeventfd_notifiers;
11118be545baSRichard Henderson     struct MemoryRegionIoeventfd *ioeventfds;
11128be545baSRichard Henderson     QTAILQ_HEAD(, MemoryListener) listeners;
11138be545baSRichard Henderson     QTAILQ_ENTRY(AddressSpace) address_spaces_link;
11148be545baSRichard Henderson 
11158be545baSRichard Henderson     /*
11168be545baSRichard Henderson      * Maximum DMA bounce buffer size used for indirect memory map requests.
11178be545baSRichard Henderson      * This limits the total size of bounce buffer allocations made for
11188be545baSRichard Henderson      * DMA requests to indirect memory regions within this AddressSpace. DMA
11198be545baSRichard Henderson      * requests that exceed the limit (e.g. due to overly large requested size
11208be545baSRichard Henderson      * or concurrent DMA requests having claimed too much buffer space) will be
11218be545baSRichard Henderson      * rejected and left to the caller to handle.
11228be545baSRichard Henderson      */
11238be545baSRichard Henderson     size_t max_bounce_buffer_size;
11248be545baSRichard Henderson     /* Total size of bounce buffers currently allocated, atomically accessed */
11258be545baSRichard Henderson     size_t bounce_buffer_size;
11268be545baSRichard Henderson     /* List of callbacks to invoke when buffers free up */
11278be545baSRichard Henderson     QemuMutex map_client_list_lock;
11288be545baSRichard Henderson     QLIST_HEAD(, AddressSpaceMapClient) map_client_list;
11298be545baSRichard Henderson };
11308be545baSRichard Henderson 
11318be545baSRichard Henderson typedef struct AddressSpaceDispatch AddressSpaceDispatch;
11328be545baSRichard Henderson typedef struct FlatRange FlatRange;
11338be545baSRichard Henderson 
11348be545baSRichard Henderson /* Flattened global view of current active memory hierarchy.  Kept in sorted
11358be545baSRichard Henderson  * order.
11368be545baSRichard Henderson  */
11378be545baSRichard Henderson struct FlatView {
11388be545baSRichard Henderson     struct rcu_head rcu;
11398be545baSRichard Henderson     unsigned ref;
11408be545baSRichard Henderson     FlatRange *ranges;
11418be545baSRichard Henderson     unsigned nr;
11428be545baSRichard Henderson     unsigned nr_allocated;
11438be545baSRichard Henderson     struct AddressSpaceDispatch *dispatch;
11448be545baSRichard Henderson     MemoryRegion *root;
11458be545baSRichard Henderson };
11468be545baSRichard Henderson 
11478be545baSRichard Henderson static inline FlatView *address_space_to_flatview(AddressSpace *as)
11488be545baSRichard Henderson {
11498be545baSRichard Henderson     return qatomic_rcu_read(&as->current_map);
11508be545baSRichard Henderson }
11518be545baSRichard Henderson 
11528be545baSRichard Henderson /**
11538be545baSRichard Henderson  * typedef flatview_cb: callback for flatview_for_each_range()
11548be545baSRichard Henderson  *
11558be545baSRichard Henderson  * @start: start address of the range within the FlatView
11568be545baSRichard Henderson  * @len: length of the range in bytes
11578be545baSRichard Henderson  * @mr: MemoryRegion covering this range
11588be545baSRichard Henderson  * @offset_in_region: offset of the first byte of the range within @mr
11598be545baSRichard Henderson  * @opaque: data pointer passed to flatview_for_each_range()
11608be545baSRichard Henderson  *
11618be545baSRichard Henderson  * Returns: true to stop the iteration, false to keep going.
11628be545baSRichard Henderson  */
11638be545baSRichard Henderson typedef bool (*flatview_cb)(Int128 start,
11648be545baSRichard Henderson                             Int128 len,
11658be545baSRichard Henderson                             const MemoryRegion *mr,
11668be545baSRichard Henderson                             hwaddr offset_in_region,
11678be545baSRichard Henderson                             void *opaque);
11688be545baSRichard Henderson 
11698be545baSRichard Henderson /**
11708be545baSRichard Henderson  * flatview_for_each_range: Iterate through a FlatView
11718be545baSRichard Henderson  * @fv: the FlatView to iterate through
11728be545baSRichard Henderson  * @cb: function to call for each range
11738be545baSRichard Henderson  * @opaque: opaque data pointer to pass to @cb
11748be545baSRichard Henderson  *
11758be545baSRichard Henderson  * A FlatView is made up of a list of non-overlapping ranges, each of
11768be545baSRichard Henderson  * which is a slice of a MemoryRegion. This function iterates through
11778be545baSRichard Henderson  * each range in @fv, calling @cb. The callback function can terminate
11788be545baSRichard Henderson  * iteration early by returning 'true'.
11798be545baSRichard Henderson  */
11808be545baSRichard Henderson void flatview_for_each_range(FlatView *fv, flatview_cb cb, void *opaque);
11818be545baSRichard Henderson 
11828be545baSRichard Henderson static inline bool MemoryRegionSection_eq(MemoryRegionSection *a,
11838be545baSRichard Henderson                                           MemoryRegionSection *b)
11848be545baSRichard Henderson {
11858be545baSRichard Henderson     return a->mr == b->mr &&
11868be545baSRichard Henderson            a->fv == b->fv &&
11878be545baSRichard Henderson            a->offset_within_region == b->offset_within_region &&
11888be545baSRichard Henderson            a->offset_within_address_space == b->offset_within_address_space &&
11898be545baSRichard Henderson            int128_eq(a->size, b->size) &&
11908be545baSRichard Henderson            a->readonly == b->readonly &&
11918be545baSRichard Henderson            a->nonvolatile == b->nonvolatile;
11928be545baSRichard Henderson }
11938be545baSRichard Henderson 
11948be545baSRichard Henderson /**
11958be545baSRichard Henderson  * memory_region_section_new_copy: Copy a memory region section
11968be545baSRichard Henderson  *
11978be545baSRichard Henderson  * Allocate memory for a new copy, copy the memory region section, and
11988be545baSRichard Henderson  * properly take a reference on all relevant members.
11998be545baSRichard Henderson  *
12008be545baSRichard Henderson  * @s: the #MemoryRegionSection to copy
12018be545baSRichard Henderson  */
12028be545baSRichard Henderson MemoryRegionSection *memory_region_section_new_copy(MemoryRegionSection *s);
12038be545baSRichard Henderson 
12048be545baSRichard Henderson /**
12058be545baSRichard Henderson  * memory_region_section_free_copy: Free a copied memory region section
12068be545baSRichard Henderson  *
12078be545baSRichard Henderson  * Free a copy of a memory section created via memory_region_section_new_copy().
12088be545baSRichard Henderson  * properly dropping references on all relevant members.
12098be545baSRichard Henderson  *
12108be545baSRichard Henderson  * @s: the #MemoryRegionSection to copy
12118be545baSRichard Henderson  */
12128be545baSRichard Henderson void memory_region_section_free_copy(MemoryRegionSection *s);
12138be545baSRichard Henderson 
12148be545baSRichard Henderson /**
1215*f47a672aSChenyi Qiang  * memory_region_section_intersect_range: Adjust the memory section to cover
1216*f47a672aSChenyi Qiang  * the intersection with the given range.
1217*f47a672aSChenyi Qiang  *
1218*f47a672aSChenyi Qiang  * @s: the #MemoryRegionSection to be adjusted
1219*f47a672aSChenyi Qiang  * @offset: the offset of the given range in the memory region
1220*f47a672aSChenyi Qiang  * @size: the size of the given range
1221*f47a672aSChenyi Qiang  *
1222*f47a672aSChenyi Qiang  * Returns false if the intersection is empty, otherwise returns true.
1223*f47a672aSChenyi Qiang  */
1224*f47a672aSChenyi Qiang static inline bool memory_region_section_intersect_range(MemoryRegionSection *s,
1225*f47a672aSChenyi Qiang                                                          uint64_t offset,
1226*f47a672aSChenyi Qiang                                                          uint64_t size)
1227*f47a672aSChenyi Qiang {
1228*f47a672aSChenyi Qiang     uint64_t start = MAX(s->offset_within_region, offset);
1229*f47a672aSChenyi Qiang     Int128 end = int128_min(int128_add(int128_make64(s->offset_within_region),
1230*f47a672aSChenyi Qiang                                        s->size),
1231*f47a672aSChenyi Qiang                             int128_add(int128_make64(offset),
1232*f47a672aSChenyi Qiang                                        int128_make64(size)));
1233*f47a672aSChenyi Qiang 
1234*f47a672aSChenyi Qiang     if (int128_le(end, int128_make64(start))) {
1235*f47a672aSChenyi Qiang         return false;
1236*f47a672aSChenyi Qiang     }
1237*f47a672aSChenyi Qiang 
1238*f47a672aSChenyi Qiang     s->offset_within_address_space += start - s->offset_within_region;
1239*f47a672aSChenyi Qiang     s->offset_within_region = start;
1240*f47a672aSChenyi Qiang     s->size = int128_sub(end, int128_make64(start));
1241*f47a672aSChenyi Qiang     return true;
1242*f47a672aSChenyi Qiang }
1243*f47a672aSChenyi Qiang 
1244*f47a672aSChenyi Qiang /**
12458be545baSRichard Henderson  * memory_region_init: Initialize a memory region
12468be545baSRichard Henderson  *
12478be545baSRichard Henderson  * The region typically acts as a container for other memory regions.  Use
12488be545baSRichard Henderson  * memory_region_add_subregion() to add subregions.
12498be545baSRichard Henderson  *
12508be545baSRichard Henderson  * @mr: the #MemoryRegion to be initialized
12518be545baSRichard Henderson  * @owner: the object that tracks the region's reference count
12528be545baSRichard Henderson  * @name: used for debugging; not visible to the user or ABI
12538be545baSRichard Henderson  * @size: size of the region; any subregions beyond this size will be clipped
12548be545baSRichard Henderson  */
12558be545baSRichard Henderson void memory_region_init(MemoryRegion *mr,
12568be545baSRichard Henderson                         Object *owner,
12578be545baSRichard Henderson                         const char *name,
12588be545baSRichard Henderson                         uint64_t size);
12598be545baSRichard Henderson 
12608be545baSRichard Henderson /**
12618be545baSRichard Henderson  * memory_region_ref: Add 1 to a memory region's reference count
12628be545baSRichard Henderson  *
12638be545baSRichard Henderson  * Whenever memory regions are accessed outside the BQL, they need to be
12648be545baSRichard Henderson  * preserved against hot-unplug.  MemoryRegions actually do not have their
12658be545baSRichard Henderson  * own reference count; they piggyback on a QOM object, their "owner".
12668be545baSRichard Henderson  * This function adds a reference to the owner.
12678be545baSRichard Henderson  *
12688be545baSRichard Henderson  * All MemoryRegions must have an owner if they can disappear, even if the
12698be545baSRichard Henderson  * device they belong to operates exclusively under the BQL.  This is because
12708be545baSRichard Henderson  * the region could be returned at any time by memory_region_find, and this
12718be545baSRichard Henderson  * is usually under guest control.
12728be545baSRichard Henderson  *
12738be545baSRichard Henderson  * @mr: the #MemoryRegion
12748be545baSRichard Henderson  */
12758be545baSRichard Henderson void memory_region_ref(MemoryRegion *mr);
12768be545baSRichard Henderson 
12778be545baSRichard Henderson /**
12788be545baSRichard Henderson  * memory_region_unref: Remove 1 to a memory region's reference count
12798be545baSRichard Henderson  *
12808be545baSRichard Henderson  * Whenever memory regions are accessed outside the BQL, they need to be
12818be545baSRichard Henderson  * preserved against hot-unplug.  MemoryRegions actually do not have their
12828be545baSRichard Henderson  * own reference count; they piggyback on a QOM object, their "owner".
12838be545baSRichard Henderson  * This function removes a reference to the owner and possibly destroys it.
12848be545baSRichard Henderson  *
12858be545baSRichard Henderson  * @mr: the #MemoryRegion
12868be545baSRichard Henderson  */
12878be545baSRichard Henderson void memory_region_unref(MemoryRegion *mr);
12888be545baSRichard Henderson 
12898be545baSRichard Henderson /**
12908be545baSRichard Henderson  * memory_region_init_io: Initialize an I/O memory region.
12918be545baSRichard Henderson  *
12928be545baSRichard Henderson  * Accesses into the region will cause the callbacks in @ops to be called.
12938be545baSRichard Henderson  * if @size is nonzero, subregions will be clipped to @size.
12948be545baSRichard Henderson  *
12958be545baSRichard Henderson  * @mr: the #MemoryRegion to be initialized.
12968be545baSRichard Henderson  * @owner: the object that tracks the region's reference count
12978be545baSRichard Henderson  * @ops: a structure containing read and write callbacks to be used when
12988be545baSRichard Henderson  *       I/O is performed on the region.
12998be545baSRichard Henderson  * @opaque: passed to the read and write callbacks of the @ops structure.
13008be545baSRichard Henderson  * @name: used for debugging; not visible to the user or ABI
13018be545baSRichard Henderson  * @size: size of the region.
13028be545baSRichard Henderson  */
13038be545baSRichard Henderson void memory_region_init_io(MemoryRegion *mr,
13048be545baSRichard Henderson                            Object *owner,
13058be545baSRichard Henderson                            const MemoryRegionOps *ops,
13068be545baSRichard Henderson                            void *opaque,
13078be545baSRichard Henderson                            const char *name,
13088be545baSRichard Henderson                            uint64_t size);
13098be545baSRichard Henderson 
13108be545baSRichard Henderson /**
13118be545baSRichard Henderson  * memory_region_init_ram_nomigrate:  Initialize RAM memory region.  Accesses
13128be545baSRichard Henderson  *                                    into the region will modify memory
13138be545baSRichard Henderson  *                                    directly.
13148be545baSRichard Henderson  *
13158be545baSRichard Henderson  * @mr: the #MemoryRegion to be initialized.
13168be545baSRichard Henderson  * @owner: the object that tracks the region's reference count
13178be545baSRichard Henderson  * @name: Region name, becomes part of RAMBlock name used in migration stream
13188be545baSRichard Henderson  *        must be unique within any device
13198be545baSRichard Henderson  * @size: size of the region.
13208be545baSRichard Henderson  * @errp: pointer to Error*, to store an error if it happens.
13218be545baSRichard Henderson  *
13228be545baSRichard Henderson  * Note that this function does not do anything to cause the data in the
13238be545baSRichard Henderson  * RAM memory region to be migrated; that is the responsibility of the caller.
13248be545baSRichard Henderson  *
13258be545baSRichard Henderson  * Return: true on success, else false setting @errp with error.
13268be545baSRichard Henderson  */
13278be545baSRichard Henderson bool memory_region_init_ram_nomigrate(MemoryRegion *mr,
13288be545baSRichard Henderson                                       Object *owner,
13298be545baSRichard Henderson                                       const char *name,
13308be545baSRichard Henderson                                       uint64_t size,
13318be545baSRichard Henderson                                       Error **errp);
13328be545baSRichard Henderson 
13338be545baSRichard Henderson /**
13348be545baSRichard Henderson  * memory_region_init_ram_flags_nomigrate:  Initialize RAM memory region.
13358be545baSRichard Henderson  *                                          Accesses into the region will
13368be545baSRichard Henderson  *                                          modify memory directly.
13378be545baSRichard Henderson  *
13388be545baSRichard Henderson  * @mr: the #MemoryRegion to be initialized.
13398be545baSRichard Henderson  * @owner: the object that tracks the region's reference count
13408be545baSRichard Henderson  * @name: Region name, becomes part of RAMBlock name used in migration stream
13418be545baSRichard Henderson  *        must be unique within any device
13428be545baSRichard Henderson  * @size: size of the region.
13438be545baSRichard Henderson  * @ram_flags: RamBlock flags. Supported flags: RAM_SHARED, RAM_NORESERVE,
13448be545baSRichard Henderson  *             RAM_GUEST_MEMFD.
13458be545baSRichard Henderson  * @errp: pointer to Error*, to store an error if it happens.
13468be545baSRichard Henderson  *
13478be545baSRichard Henderson  * Note that this function does not do anything to cause the data in the
13488be545baSRichard Henderson  * RAM memory region to be migrated; that is the responsibility of the caller.
13498be545baSRichard Henderson  *
13508be545baSRichard Henderson  * Return: true on success, else false setting @errp with error.
13518be545baSRichard Henderson  */
13528be545baSRichard Henderson bool memory_region_init_ram_flags_nomigrate(MemoryRegion *mr,
13538be545baSRichard Henderson                                             Object *owner,
13548be545baSRichard Henderson                                             const char *name,
13558be545baSRichard Henderson                                             uint64_t size,
13568be545baSRichard Henderson                                             uint32_t ram_flags,
13578be545baSRichard Henderson                                             Error **errp);
13588be545baSRichard Henderson 
13598be545baSRichard Henderson /**
13608be545baSRichard Henderson  * memory_region_init_resizeable_ram:  Initialize memory region with resizable
13618be545baSRichard Henderson  *                                     RAM.  Accesses into the region will
13628be545baSRichard Henderson  *                                     modify memory directly.  Only an initial
13638be545baSRichard Henderson  *                                     portion of this RAM is actually used.
13648be545baSRichard Henderson  *                                     Changing the size while migrating
13658be545baSRichard Henderson  *                                     can result in the migration being
13668be545baSRichard Henderson  *                                     canceled.
13678be545baSRichard Henderson  *
13688be545baSRichard Henderson  * @mr: the #MemoryRegion to be initialized.
13698be545baSRichard Henderson  * @owner: the object that tracks the region's reference count
13708be545baSRichard Henderson  * @name: Region name, becomes part of RAMBlock name used in migration stream
13718be545baSRichard Henderson  *        must be unique within any device
13728be545baSRichard Henderson  * @size: used size of the region.
13738be545baSRichard Henderson  * @max_size: max size of the region.
13748be545baSRichard Henderson  * @resized: callback to notify owner about used size change.
13758be545baSRichard Henderson  * @errp: pointer to Error*, to store an error if it happens.
13768be545baSRichard Henderson  *
13778be545baSRichard Henderson  * Note that this function does not do anything to cause the data in the
13788be545baSRichard Henderson  * RAM memory region to be migrated; that is the responsibility of the caller.
13798be545baSRichard Henderson  *
13808be545baSRichard Henderson  * Return: true on success, else false setting @errp with error.
13818be545baSRichard Henderson  */
13828be545baSRichard Henderson bool memory_region_init_resizeable_ram(MemoryRegion *mr,
13838be545baSRichard Henderson                                        Object *owner,
13848be545baSRichard Henderson                                        const char *name,
13858be545baSRichard Henderson                                        uint64_t size,
13868be545baSRichard Henderson                                        uint64_t max_size,
13878be545baSRichard Henderson                                        void (*resized)(const char*,
13888be545baSRichard Henderson                                                        uint64_t length,
13898be545baSRichard Henderson                                                        void *host),
13908be545baSRichard Henderson                                        Error **errp);
13918be545baSRichard Henderson #ifdef CONFIG_POSIX
13928be545baSRichard Henderson 
13938be545baSRichard Henderson /**
13948be545baSRichard Henderson  * memory_region_init_ram_from_file:  Initialize RAM memory region with a
13958be545baSRichard Henderson  *                                    mmap-ed backend.
13968be545baSRichard Henderson  *
13978be545baSRichard Henderson  * @mr: the #MemoryRegion to be initialized.
13988be545baSRichard Henderson  * @owner: the object that tracks the region's reference count
13998be545baSRichard Henderson  * @name: Region name, becomes part of RAMBlock name used in migration stream
14008be545baSRichard Henderson  *        must be unique within any device
14018be545baSRichard Henderson  * @size: size of the region.
14028be545baSRichard Henderson  * @align: alignment of the region base address; if 0, the default alignment
14038be545baSRichard Henderson  *         (getpagesize()) will be used.
14048be545baSRichard Henderson  * @ram_flags: RamBlock flags. Supported flags: RAM_SHARED, RAM_PMEM,
14058be545baSRichard Henderson  *             RAM_NORESERVE, RAM_PROTECTED, RAM_NAMED_FILE, RAM_READONLY,
14068be545baSRichard Henderson  *             RAM_READONLY_FD, RAM_GUEST_MEMFD
14078be545baSRichard Henderson  * @path: the path in which to allocate the RAM.
14088be545baSRichard Henderson  * @offset: offset within the file referenced by path
14098be545baSRichard Henderson  * @errp: pointer to Error*, to store an error if it happens.
14108be545baSRichard Henderson  *
14118be545baSRichard Henderson  * Note that this function does not do anything to cause the data in the
14128be545baSRichard Henderson  * RAM memory region to be migrated; that is the responsibility of the caller.
14138be545baSRichard Henderson  *
14148be545baSRichard Henderson  * Return: true on success, else false setting @errp with error.
14158be545baSRichard Henderson  */
14168be545baSRichard Henderson bool memory_region_init_ram_from_file(MemoryRegion *mr,
14178be545baSRichard Henderson                                       Object *owner,
14188be545baSRichard Henderson                                       const char *name,
14198be545baSRichard Henderson                                       uint64_t size,
14208be545baSRichard Henderson                                       uint64_t align,
14218be545baSRichard Henderson                                       uint32_t ram_flags,
14228be545baSRichard Henderson                                       const char *path,
14238be545baSRichard Henderson                                       ram_addr_t offset,
14248be545baSRichard Henderson                                       Error **errp);
14258be545baSRichard Henderson 
14268be545baSRichard Henderson /**
14278be545baSRichard Henderson  * memory_region_init_ram_from_fd:  Initialize RAM memory region with a
14288be545baSRichard Henderson  *                                  mmap-ed backend.
14298be545baSRichard Henderson  *
14308be545baSRichard Henderson  * @mr: the #MemoryRegion to be initialized.
14318be545baSRichard Henderson  * @owner: the object that tracks the region's reference count
14328be545baSRichard Henderson  * @name: the name of the region.
14338be545baSRichard Henderson  * @size: size of the region.
14348be545baSRichard Henderson  * @ram_flags: RamBlock flags. Supported flags: RAM_SHARED, RAM_PMEM,
14358be545baSRichard Henderson  *             RAM_NORESERVE, RAM_PROTECTED, RAM_NAMED_FILE, RAM_READONLY,
14368be545baSRichard Henderson  *             RAM_READONLY_FD, RAM_GUEST_MEMFD
14378be545baSRichard Henderson  * @fd: the fd to mmap.
14388be545baSRichard Henderson  * @offset: offset within the file referenced by fd
14398be545baSRichard Henderson  * @errp: pointer to Error*, to store an error if it happens.
14408be545baSRichard Henderson  *
14418be545baSRichard Henderson  * Note that this function does not do anything to cause the data in the
14428be545baSRichard Henderson  * RAM memory region to be migrated; that is the responsibility of the caller.
14438be545baSRichard Henderson  *
14448be545baSRichard Henderson  * Return: true on success, else false setting @errp with error.
14458be545baSRichard Henderson  */
14468be545baSRichard Henderson bool memory_region_init_ram_from_fd(MemoryRegion *mr,
14478be545baSRichard Henderson                                     Object *owner,
14488be545baSRichard Henderson                                     const char *name,
14498be545baSRichard Henderson                                     uint64_t size,
14508be545baSRichard Henderson                                     uint32_t ram_flags,
14518be545baSRichard Henderson                                     int fd,
14528be545baSRichard Henderson                                     ram_addr_t offset,
14538be545baSRichard Henderson                                     Error **errp);
14548be545baSRichard Henderson #endif
14558be545baSRichard Henderson 
14568be545baSRichard Henderson /**
14578be545baSRichard Henderson  * memory_region_init_ram_ptr:  Initialize RAM memory region from a
14588be545baSRichard Henderson  *                              user-provided pointer.  Accesses into the
14598be545baSRichard Henderson  *                              region will modify memory directly.
14608be545baSRichard Henderson  *
14618be545baSRichard Henderson  * @mr: the #MemoryRegion to be initialized.
14628be545baSRichard Henderson  * @owner: the object that tracks the region's reference count
14638be545baSRichard Henderson  * @name: Region name, becomes part of RAMBlock name used in migration stream
14648be545baSRichard Henderson  *        must be unique within any device
14658be545baSRichard Henderson  * @size: size of the region.
14668be545baSRichard Henderson  * @ptr: memory to be mapped; must contain at least @size bytes.
14678be545baSRichard Henderson  *
14688be545baSRichard Henderson  * Note that this function does not do anything to cause the data in the
14698be545baSRichard Henderson  * RAM memory region to be migrated; that is the responsibility of the caller.
14708be545baSRichard Henderson  */
14718be545baSRichard Henderson void memory_region_init_ram_ptr(MemoryRegion *mr,
14728be545baSRichard Henderson                                 Object *owner,
14738be545baSRichard Henderson                                 const char *name,
14748be545baSRichard Henderson                                 uint64_t size,
14758be545baSRichard Henderson                                 void *ptr);
14768be545baSRichard Henderson 
14778be545baSRichard Henderson /**
14788be545baSRichard Henderson  * memory_region_init_ram_device_ptr:  Initialize RAM device memory region from
14798be545baSRichard Henderson  *                                     a user-provided pointer.
14808be545baSRichard Henderson  *
14818be545baSRichard Henderson  * A RAM device represents a mapping to a physical device, such as to a PCI
14828be545baSRichard Henderson  * MMIO BAR of an vfio-pci assigned device.  The memory region may be mapped
14838be545baSRichard Henderson  * into the VM address space and access to the region will modify memory
14848be545baSRichard Henderson  * directly.  However, the memory region should not be included in a memory
14858be545baSRichard Henderson  * dump (device may not be enabled/mapped at the time of the dump), and
14868be545baSRichard Henderson  * operations incompatible with manipulating MMIO should be avoided.  Replaces
14878be545baSRichard Henderson  * skip_dump flag.
14888be545baSRichard Henderson  *
14898be545baSRichard Henderson  * @mr: the #MemoryRegion to be initialized.
14908be545baSRichard Henderson  * @owner: the object that tracks the region's reference count
14918be545baSRichard Henderson  * @name: the name of the region.
14928be545baSRichard Henderson  * @size: size of the region.
14938be545baSRichard Henderson  * @ptr: memory to be mapped; must contain at least @size bytes.
14948be545baSRichard Henderson  *
14958be545baSRichard Henderson  * Note that this function does not do anything to cause the data in the
14968be545baSRichard Henderson  * RAM memory region to be migrated; that is the responsibility of the caller.
14978be545baSRichard Henderson  * (For RAM device memory regions, migrating the contents rarely makes sense.)
14988be545baSRichard Henderson  */
14998be545baSRichard Henderson void memory_region_init_ram_device_ptr(MemoryRegion *mr,
15008be545baSRichard Henderson                                        Object *owner,
15018be545baSRichard Henderson                                        const char *name,
15028be545baSRichard Henderson                                        uint64_t size,
15038be545baSRichard Henderson                                        void *ptr);
15048be545baSRichard Henderson 
15058be545baSRichard Henderson /**
15068be545baSRichard Henderson  * memory_region_init_alias: Initialize a memory region that aliases all or a
15078be545baSRichard Henderson  *                           part of another memory region.
15088be545baSRichard Henderson  *
15098be545baSRichard Henderson  * @mr: the #MemoryRegion to be initialized.
15108be545baSRichard Henderson  * @owner: the object that tracks the region's reference count
15118be545baSRichard Henderson  * @name: used for debugging; not visible to the user or ABI
15128be545baSRichard Henderson  * @orig: the region to be referenced; @mr will be equivalent to
15138be545baSRichard Henderson  *        @orig between @offset and @offset + @size - 1.
15148be545baSRichard Henderson  * @offset: start of the section in @orig to be referenced.
15158be545baSRichard Henderson  * @size: size of the region.
15168be545baSRichard Henderson  */
15178be545baSRichard Henderson void memory_region_init_alias(MemoryRegion *mr,
15188be545baSRichard Henderson                               Object *owner,
15198be545baSRichard Henderson                               const char *name,
15208be545baSRichard Henderson                               MemoryRegion *orig,
15218be545baSRichard Henderson                               hwaddr offset,
15228be545baSRichard Henderson                               uint64_t size);
15238be545baSRichard Henderson 
15248be545baSRichard Henderson /**
15258be545baSRichard Henderson  * memory_region_init_rom_nomigrate: Initialize a ROM memory region.
15268be545baSRichard Henderson  *
15278be545baSRichard Henderson  * This has the same effect as calling memory_region_init_ram_nomigrate()
15288be545baSRichard Henderson  * and then marking the resulting region read-only with
15298be545baSRichard Henderson  * memory_region_set_readonly().
15308be545baSRichard Henderson  *
15318be545baSRichard Henderson  * Note that this function does not do anything to cause the data in the
15328be545baSRichard Henderson  * RAM side of the memory region to be migrated; that is the responsibility
15338be545baSRichard Henderson  * of the caller.
15348be545baSRichard Henderson  *
15358be545baSRichard Henderson  * @mr: the #MemoryRegion to be initialized.
15368be545baSRichard Henderson  * @owner: the object that tracks the region's reference count
15378be545baSRichard Henderson  * @name: Region name, becomes part of RAMBlock name used in migration stream
15388be545baSRichard Henderson  *        must be unique within any device
15398be545baSRichard Henderson  * @size: size of the region.
15408be545baSRichard Henderson  * @errp: pointer to Error*, to store an error if it happens.
15418be545baSRichard Henderson  *
15428be545baSRichard Henderson  * Return: true on success, else false setting @errp with error.
15438be545baSRichard Henderson  */
15448be545baSRichard Henderson bool memory_region_init_rom_nomigrate(MemoryRegion *mr,
15458be545baSRichard Henderson                                       Object *owner,
15468be545baSRichard Henderson                                       const char *name,
15478be545baSRichard Henderson                                       uint64_t size,
15488be545baSRichard Henderson                                       Error **errp);
15498be545baSRichard Henderson 
15508be545baSRichard Henderson /**
15518be545baSRichard Henderson  * memory_region_init_rom_device_nomigrate:  Initialize a ROM memory region.
15528be545baSRichard Henderson  *                                 Writes are handled via callbacks.
15538be545baSRichard Henderson  *
15548be545baSRichard Henderson  * Note that this function does not do anything to cause the data in the
15558be545baSRichard Henderson  * RAM side of the memory region to be migrated; that is the responsibility
15568be545baSRichard Henderson  * of the caller.
15578be545baSRichard Henderson  *
15588be545baSRichard Henderson  * @mr: the #MemoryRegion to be initialized.
15598be545baSRichard Henderson  * @owner: the object that tracks the region's reference count
15608be545baSRichard Henderson  * @ops: callbacks for write access handling (must not be NULL).
15618be545baSRichard Henderson  * @opaque: passed to the read and write callbacks of the @ops structure.
15628be545baSRichard Henderson  * @name: Region name, becomes part of RAMBlock name used in migration stream
15638be545baSRichard Henderson  *        must be unique within any device
15648be545baSRichard Henderson  * @size: size of the region.
15658be545baSRichard Henderson  * @errp: pointer to Error*, to store an error if it happens.
15668be545baSRichard Henderson  *
15678be545baSRichard Henderson  * Return: true on success, else false setting @errp with error.
15688be545baSRichard Henderson  */
15698be545baSRichard Henderson bool memory_region_init_rom_device_nomigrate(MemoryRegion *mr,
15708be545baSRichard Henderson                                              Object *owner,
15718be545baSRichard Henderson                                              const MemoryRegionOps *ops,
15728be545baSRichard Henderson                                              void *opaque,
15738be545baSRichard Henderson                                              const char *name,
15748be545baSRichard Henderson                                              uint64_t size,
15758be545baSRichard Henderson                                              Error **errp);
15768be545baSRichard Henderson 
15778be545baSRichard Henderson /**
15788be545baSRichard Henderson  * memory_region_init_iommu: Initialize a memory region of a custom type
15798be545baSRichard Henderson  * that translates addresses
15808be545baSRichard Henderson  *
15818be545baSRichard Henderson  * An IOMMU region translates addresses and forwards accesses to a target
15828be545baSRichard Henderson  * memory region.
15838be545baSRichard Henderson  *
15848be545baSRichard Henderson  * The IOMMU implementation must define a subclass of TYPE_IOMMU_MEMORY_REGION.
15858be545baSRichard Henderson  * @_iommu_mr should be a pointer to enough memory for an instance of
15868be545baSRichard Henderson  * that subclass, @instance_size is the size of that subclass, and
15878be545baSRichard Henderson  * @mrtypename is its name. This function will initialize @_iommu_mr as an
15888be545baSRichard Henderson  * instance of the subclass, and its methods will then be called to handle
15898be545baSRichard Henderson  * accesses to the memory region. See the documentation of
15908be545baSRichard Henderson  * #IOMMUMemoryRegionClass for further details.
15918be545baSRichard Henderson  *
15928be545baSRichard Henderson  * @_iommu_mr: the #IOMMUMemoryRegion to be initialized
15938be545baSRichard Henderson  * @instance_size: the IOMMUMemoryRegion subclass instance size
15948be545baSRichard Henderson  * @mrtypename: the type name of the #IOMMUMemoryRegion
15958be545baSRichard Henderson  * @owner: the object that tracks the region's reference count
15968be545baSRichard Henderson  * @name: used for debugging; not visible to the user or ABI
15978be545baSRichard Henderson  * @size: size of the region.
15988be545baSRichard Henderson  */
15998be545baSRichard Henderson void memory_region_init_iommu(void *_iommu_mr,
16008be545baSRichard Henderson                               size_t instance_size,
16018be545baSRichard Henderson                               const char *mrtypename,
16028be545baSRichard Henderson                               Object *owner,
16038be545baSRichard Henderson                               const char *name,
16048be545baSRichard Henderson                               uint64_t size);
16058be545baSRichard Henderson 
16068be545baSRichard Henderson /**
16078be545baSRichard Henderson  * memory_region_init_ram - Initialize RAM memory region.  Accesses into the
16088be545baSRichard Henderson  *                          region will modify memory directly.
16098be545baSRichard Henderson  *
16108be545baSRichard Henderson  * @mr: the #MemoryRegion to be initialized
16118be545baSRichard Henderson  * @owner: the object that tracks the region's reference count (must be
16128be545baSRichard Henderson  *         TYPE_DEVICE or a subclass of TYPE_DEVICE, or NULL)
16138be545baSRichard Henderson  * @name: name of the memory region
16148be545baSRichard Henderson  * @size: size of the region in bytes
16158be545baSRichard Henderson  * @errp: pointer to Error*, to store an error if it happens.
16168be545baSRichard Henderson  *
16178be545baSRichard Henderson  * This function allocates RAM for a board model or device, and
16188be545baSRichard Henderson  * arranges for it to be migrated (by calling vmstate_register_ram()
16198be545baSRichard Henderson  * if @owner is a DeviceState, or vmstate_register_ram_global() if
16208be545baSRichard Henderson  * @owner is NULL).
16218be545baSRichard Henderson  *
16228be545baSRichard Henderson  * TODO: Currently we restrict @owner to being either NULL (for
16238be545baSRichard Henderson  * global RAM regions with no owner) or devices, so that we can
16248be545baSRichard Henderson  * give the RAM block a unique name for migration purposes.
16258be545baSRichard Henderson  * We should lift this restriction and allow arbitrary Objects.
16268be545baSRichard Henderson  * If you pass a non-NULL non-device @owner then we will assert.
16278be545baSRichard Henderson  *
16288be545baSRichard Henderson  * Return: true on success, else false setting @errp with error.
16298be545baSRichard Henderson  */
16308be545baSRichard Henderson bool memory_region_init_ram(MemoryRegion *mr,
16318be545baSRichard Henderson                             Object *owner,
16328be545baSRichard Henderson                             const char *name,
16338be545baSRichard Henderson                             uint64_t size,
16348be545baSRichard Henderson                             Error **errp);
16358be545baSRichard Henderson 
16368be545baSRichard Henderson bool memory_region_init_ram_guest_memfd(MemoryRegion *mr,
16378be545baSRichard Henderson                                         Object *owner,
16388be545baSRichard Henderson                                         const char *name,
16398be545baSRichard Henderson                                         uint64_t size,
16408be545baSRichard Henderson                                         Error **errp);
16418be545baSRichard Henderson 
16428be545baSRichard Henderson /**
16438be545baSRichard Henderson  * memory_region_init_rom: Initialize a ROM memory region.
16448be545baSRichard Henderson  *
16458be545baSRichard Henderson  * This has the same effect as calling memory_region_init_ram()
16468be545baSRichard Henderson  * and then marking the resulting region read-only with
16478be545baSRichard Henderson  * memory_region_set_readonly(). This includes arranging for the
16488be545baSRichard Henderson  * contents to be migrated.
16498be545baSRichard Henderson  *
16508be545baSRichard Henderson  * TODO: Currently we restrict @owner to being either NULL (for
16518be545baSRichard Henderson  * global RAM regions with no owner) or devices, so that we can
16528be545baSRichard Henderson  * give the RAM block a unique name for migration purposes.
16538be545baSRichard Henderson  * We should lift this restriction and allow arbitrary Objects.
16548be545baSRichard Henderson  * If you pass a non-NULL non-device @owner then we will assert.
16558be545baSRichard Henderson  *
16568be545baSRichard Henderson  * @mr: the #MemoryRegion to be initialized.
16578be545baSRichard Henderson  * @owner: the object that tracks the region's reference count
16588be545baSRichard Henderson  * @name: Region name, becomes part of RAMBlock name used in migration stream
16598be545baSRichard Henderson  *        must be unique within any device
16608be545baSRichard Henderson  * @size: size of the region.
16618be545baSRichard Henderson  * @errp: pointer to Error*, to store an error if it happens.
16628be545baSRichard Henderson  *
16638be545baSRichard Henderson  * Return: true on success, else false setting @errp with error.
16648be545baSRichard Henderson  */
16658be545baSRichard Henderson bool memory_region_init_rom(MemoryRegion *mr,
16668be545baSRichard Henderson                             Object *owner,
16678be545baSRichard Henderson                             const char *name,
16688be545baSRichard Henderson                             uint64_t size,
16698be545baSRichard Henderson                             Error **errp);
16708be545baSRichard Henderson 
16718be545baSRichard Henderson /**
16728be545baSRichard Henderson  * memory_region_init_rom_device:  Initialize a ROM memory region.
16738be545baSRichard Henderson  *                                 Writes are handled via callbacks.
16748be545baSRichard Henderson  *
16758be545baSRichard Henderson  * This function initializes a memory region backed by RAM for reads
16768be545baSRichard Henderson  * and callbacks for writes, and arranges for the RAM backing to
16778be545baSRichard Henderson  * be migrated (by calling vmstate_register_ram()
16788be545baSRichard Henderson  * if @owner is a DeviceState, or vmstate_register_ram_global() if
16798be545baSRichard Henderson  * @owner is NULL).
16808be545baSRichard Henderson  *
16818be545baSRichard Henderson  * TODO: Currently we restrict @owner to being either NULL (for
16828be545baSRichard Henderson  * global RAM regions with no owner) or devices, so that we can
16838be545baSRichard Henderson  * give the RAM block a unique name for migration purposes.
16848be545baSRichard Henderson  * We should lift this restriction and allow arbitrary Objects.
16858be545baSRichard Henderson  * If you pass a non-NULL non-device @owner then we will assert.
16868be545baSRichard Henderson  *
16878be545baSRichard Henderson  * @mr: the #MemoryRegion to be initialized.
16888be545baSRichard Henderson  * @owner: the object that tracks the region's reference count
16898be545baSRichard Henderson  * @ops: callbacks for write access handling (must not be NULL).
16908be545baSRichard Henderson  * @opaque: passed to the read and write callbacks of the @ops structure.
16918be545baSRichard Henderson  * @name: Region name, becomes part of RAMBlock name used in migration stream
16928be545baSRichard Henderson  *        must be unique within any device
16938be545baSRichard Henderson  * @size: size of the region.
16948be545baSRichard Henderson  * @errp: pointer to Error*, to store an error if it happens.
16958be545baSRichard Henderson  *
16968be545baSRichard Henderson  * Return: true on success, else false setting @errp with error.
16978be545baSRichard Henderson  */
16988be545baSRichard Henderson bool memory_region_init_rom_device(MemoryRegion *mr,
16998be545baSRichard Henderson                                    Object *owner,
17008be545baSRichard Henderson                                    const MemoryRegionOps *ops,
17018be545baSRichard Henderson                                    void *opaque,
17028be545baSRichard Henderson                                    const char *name,
17038be545baSRichard Henderson                                    uint64_t size,
17048be545baSRichard Henderson                                    Error **errp);
17058be545baSRichard Henderson 
17068be545baSRichard Henderson 
17078be545baSRichard Henderson /**
17088be545baSRichard Henderson  * memory_region_owner: get a memory region's owner.
17098be545baSRichard Henderson  *
17108be545baSRichard Henderson  * @mr: the memory region being queried.
17118be545baSRichard Henderson  */
17128be545baSRichard Henderson Object *memory_region_owner(MemoryRegion *mr);
17138be545baSRichard Henderson 
17148be545baSRichard Henderson /**
17158be545baSRichard Henderson  * memory_region_size: get a memory region's size.
17168be545baSRichard Henderson  *
17178be545baSRichard Henderson  * @mr: the memory region being queried.
17188be545baSRichard Henderson  */
17198be545baSRichard Henderson uint64_t memory_region_size(MemoryRegion *mr);
17208be545baSRichard Henderson 
17218be545baSRichard Henderson /**
17228be545baSRichard Henderson  * memory_region_is_ram: check whether a memory region is random access
17238be545baSRichard Henderson  *
17248be545baSRichard Henderson  * Returns %true if a memory region is random access.
17258be545baSRichard Henderson  *
17268be545baSRichard Henderson  * @mr: the memory region being queried
17278be545baSRichard Henderson  */
17288be545baSRichard Henderson static inline bool memory_region_is_ram(MemoryRegion *mr)
17298be545baSRichard Henderson {
17308be545baSRichard Henderson     return mr->ram;
17318be545baSRichard Henderson }
17328be545baSRichard Henderson 
17338be545baSRichard Henderson /**
17348be545baSRichard Henderson  * memory_region_is_ram_device: check whether a memory region is a ram device
17358be545baSRichard Henderson  *
17368be545baSRichard Henderson  * Returns %true if a memory region is a device backed ram region
17378be545baSRichard Henderson  *
17388be545baSRichard Henderson  * @mr: the memory region being queried
17398be545baSRichard Henderson  */
17408be545baSRichard Henderson bool memory_region_is_ram_device(MemoryRegion *mr);
17418be545baSRichard Henderson 
17428be545baSRichard Henderson /**
17438be545baSRichard Henderson  * memory_region_is_romd: check whether a memory region is in ROMD mode
17448be545baSRichard Henderson  *
17458be545baSRichard Henderson  * Returns %true if a memory region is a ROM device and currently set to allow
17468be545baSRichard Henderson  * direct reads.
17478be545baSRichard Henderson  *
17488be545baSRichard Henderson  * @mr: the memory region being queried
17498be545baSRichard Henderson  */
17508be545baSRichard Henderson static inline bool memory_region_is_romd(MemoryRegion *mr)
17518be545baSRichard Henderson {
17528be545baSRichard Henderson     return mr->rom_device && mr->romd_mode;
17538be545baSRichard Henderson }
17548be545baSRichard Henderson 
17558be545baSRichard Henderson /**
17568be545baSRichard Henderson  * memory_region_is_protected: check whether a memory region is protected
17578be545baSRichard Henderson  *
17588be545baSRichard Henderson  * Returns %true if a memory region is protected RAM and cannot be accessed
17598be545baSRichard Henderson  * via standard mechanisms, e.g. DMA.
17608be545baSRichard Henderson  *
17618be545baSRichard Henderson  * @mr: the memory region being queried
17628be545baSRichard Henderson  */
17638be545baSRichard Henderson bool memory_region_is_protected(MemoryRegion *mr);
17648be545baSRichard Henderson 
17658be545baSRichard Henderson /**
17668be545baSRichard Henderson  * memory_region_has_guest_memfd: check whether a memory region has guest_memfd
17678be545baSRichard Henderson  *     associated
17688be545baSRichard Henderson  *
17698be545baSRichard Henderson  * Returns %true if a memory region's ram_block has valid guest_memfd assigned.
17708be545baSRichard Henderson  *
17718be545baSRichard Henderson  * @mr: the memory region being queried
17728be545baSRichard Henderson  */
17738be545baSRichard Henderson bool memory_region_has_guest_memfd(MemoryRegion *mr);
17748be545baSRichard Henderson 
17758be545baSRichard Henderson /**
17768be545baSRichard Henderson  * memory_region_get_iommu: check whether a memory region is an iommu
17778be545baSRichard Henderson  *
17788be545baSRichard Henderson  * Returns pointer to IOMMUMemoryRegion if a memory region is an iommu,
17798be545baSRichard Henderson  * otherwise NULL.
17808be545baSRichard Henderson  *
17818be545baSRichard Henderson  * @mr: the memory region being queried
17828be545baSRichard Henderson  */
17838be545baSRichard Henderson static inline IOMMUMemoryRegion *memory_region_get_iommu(MemoryRegion *mr)
17848be545baSRichard Henderson {
17858be545baSRichard Henderson     if (mr->alias) {
17868be545baSRichard Henderson         return memory_region_get_iommu(mr->alias);
17878be545baSRichard Henderson     }
17888be545baSRichard Henderson     if (mr->is_iommu) {
17898be545baSRichard Henderson         return (IOMMUMemoryRegion *) mr;
17908be545baSRichard Henderson     }
17918be545baSRichard Henderson     return NULL;
17928be545baSRichard Henderson }
17938be545baSRichard Henderson 
17948be545baSRichard Henderson /**
17958be545baSRichard Henderson  * memory_region_get_iommu_class_nocheck: returns iommu memory region class
17968be545baSRichard Henderson  *   if an iommu or NULL if not
17978be545baSRichard Henderson  *
17988be545baSRichard Henderson  * Returns pointer to IOMMUMemoryRegionClass if a memory region is an iommu,
17998be545baSRichard Henderson  * otherwise NULL. This is fast path avoiding QOM checking, use with caution.
18008be545baSRichard Henderson  *
18018be545baSRichard Henderson  * @iommu_mr: the memory region being queried
18028be545baSRichard Henderson  */
18038be545baSRichard Henderson static inline IOMMUMemoryRegionClass *memory_region_get_iommu_class_nocheck(
18048be545baSRichard Henderson         IOMMUMemoryRegion *iommu_mr)
18058be545baSRichard Henderson {
18068be545baSRichard Henderson     return (IOMMUMemoryRegionClass *) (((Object *)iommu_mr)->class);
18078be545baSRichard Henderson }
18088be545baSRichard Henderson 
18098be545baSRichard Henderson #define memory_region_is_iommu(mr) (memory_region_get_iommu(mr) != NULL)
18108be545baSRichard Henderson 
18118be545baSRichard Henderson /**
18128be545baSRichard Henderson  * memory_region_iommu_get_min_page_size: get minimum supported page size
18138be545baSRichard Henderson  * for an iommu
18148be545baSRichard Henderson  *
18158be545baSRichard Henderson  * Returns minimum supported page size for an iommu.
18168be545baSRichard Henderson  *
18178be545baSRichard Henderson  * @iommu_mr: the memory region being queried
18188be545baSRichard Henderson  */
18198be545baSRichard Henderson uint64_t memory_region_iommu_get_min_page_size(IOMMUMemoryRegion *iommu_mr);
18208be545baSRichard Henderson 
18218be545baSRichard Henderson /**
18228be545baSRichard Henderson  * memory_region_notify_iommu: notify a change in an IOMMU translation entry.
18238be545baSRichard Henderson  *
18248be545baSRichard Henderson  * Note: for any IOMMU implementation, an in-place mapping change
18258be545baSRichard Henderson  * should be notified with an UNMAP followed by a MAP.
18268be545baSRichard Henderson  *
18278be545baSRichard Henderson  * @iommu_mr: the memory region that was changed
18288be545baSRichard Henderson  * @iommu_idx: the IOMMU index for the translation table which has changed
18298be545baSRichard Henderson  * @event: TLB event with the new entry in the IOMMU translation table.
18308be545baSRichard Henderson  *         The entry replaces all old entries for the same virtual I/O address
18318be545baSRichard Henderson  *         range.
18328be545baSRichard Henderson  */
18338be545baSRichard Henderson void memory_region_notify_iommu(IOMMUMemoryRegion *iommu_mr,
18348be545baSRichard Henderson                                 int iommu_idx,
18358be545baSRichard Henderson                                 const IOMMUTLBEvent event);
18368be545baSRichard Henderson 
18378be545baSRichard Henderson /**
18388be545baSRichard Henderson  * memory_region_notify_iommu_one: notify a change in an IOMMU translation
18398be545baSRichard Henderson  *                           entry to a single notifier
18408be545baSRichard Henderson  *
18418be545baSRichard Henderson  * This works just like memory_region_notify_iommu(), but it only
18428be545baSRichard Henderson  * notifies a specific notifier, not all of them.
18438be545baSRichard Henderson  *
18448be545baSRichard Henderson  * @notifier: the notifier to be notified
18458be545baSRichard Henderson  * @event: TLB event with the new entry in the IOMMU translation table.
18468be545baSRichard Henderson  *         The entry replaces all old entries for the same virtual I/O address
18478be545baSRichard Henderson  *         range.
18488be545baSRichard Henderson  */
18498be545baSRichard Henderson void memory_region_notify_iommu_one(IOMMUNotifier *notifier,
18508be545baSRichard Henderson                                     const IOMMUTLBEvent *event);
18518be545baSRichard Henderson 
18528be545baSRichard Henderson /**
18538be545baSRichard Henderson  * memory_region_unmap_iommu_notifier_range: notify a unmap for an IOMMU
18548be545baSRichard Henderson  *                                           translation that covers the
18558be545baSRichard Henderson  *                                           range of a notifier
18568be545baSRichard Henderson  *
18578be545baSRichard Henderson  * @notifier: the notifier to be notified
18588be545baSRichard Henderson  */
18598be545baSRichard Henderson void memory_region_unmap_iommu_notifier_range(IOMMUNotifier *notifier);
18608be545baSRichard Henderson 
18618be545baSRichard Henderson 
18628be545baSRichard Henderson /**
18638be545baSRichard Henderson  * memory_region_register_iommu_notifier: register a notifier for changes to
18648be545baSRichard Henderson  * IOMMU translation entries.
18658be545baSRichard Henderson  *
18668be545baSRichard Henderson  * Returns 0 on success, or a negative errno otherwise. In particular,
18678be545baSRichard Henderson  * -EINVAL indicates that at least one of the attributes of the notifier
18688be545baSRichard Henderson  * is not supported (flag/range) by the IOMMU memory region. In case of error
18698be545baSRichard Henderson  * the error object must be created.
18708be545baSRichard Henderson  *
18718be545baSRichard Henderson  * @mr: the memory region to observe
18728be545baSRichard Henderson  * @n: the IOMMUNotifier to be added; the notify callback receives a
18738be545baSRichard Henderson  *     pointer to an #IOMMUTLBEntry as the opaque value; the pointer
18748be545baSRichard Henderson  *     ceases to be valid on exit from the notifier.
18758be545baSRichard Henderson  * @errp: pointer to Error*, to store an error if it happens.
18768be545baSRichard Henderson  */
18778be545baSRichard Henderson int memory_region_register_iommu_notifier(MemoryRegion *mr,
18788be545baSRichard Henderson                                           IOMMUNotifier *n, Error **errp);
18798be545baSRichard Henderson 
18808be545baSRichard Henderson /**
18818be545baSRichard Henderson  * memory_region_iommu_replay: replay existing IOMMU translations to
18828be545baSRichard Henderson  * a notifier with the minimum page granularity returned by
18838be545baSRichard Henderson  * mr->iommu_ops->get_page_size().
18848be545baSRichard Henderson  *
18858be545baSRichard Henderson  * Note: this is not related to record-and-replay functionality.
18868be545baSRichard Henderson  *
18878be545baSRichard Henderson  * @iommu_mr: the memory region to observe
18888be545baSRichard Henderson  * @n: the notifier to which to replay iommu mappings
18898be545baSRichard Henderson  */
18908be545baSRichard Henderson void memory_region_iommu_replay(IOMMUMemoryRegion *iommu_mr, IOMMUNotifier *n);
18918be545baSRichard Henderson 
18928be545baSRichard Henderson /**
18938be545baSRichard Henderson  * memory_region_unregister_iommu_notifier: unregister a notifier for
18948be545baSRichard Henderson  * changes to IOMMU translation entries.
18958be545baSRichard Henderson  *
18968be545baSRichard Henderson  * @mr: the memory region which was observed and for which notify_stopped()
18978be545baSRichard Henderson  *      needs to be called
18988be545baSRichard Henderson  * @n: the notifier to be removed.
18998be545baSRichard Henderson  */
19008be545baSRichard Henderson void memory_region_unregister_iommu_notifier(MemoryRegion *mr,
19018be545baSRichard Henderson                                              IOMMUNotifier *n);
19028be545baSRichard Henderson 
19038be545baSRichard Henderson /**
19048be545baSRichard Henderson  * memory_region_iommu_get_attr: return an IOMMU attr if get_attr() is
19058be545baSRichard Henderson  * defined on the IOMMU.
19068be545baSRichard Henderson  *
19078be545baSRichard Henderson  * Returns 0 on success, or a negative errno otherwise. In particular,
19088be545baSRichard Henderson  * -EINVAL indicates that the IOMMU does not support the requested
19098be545baSRichard Henderson  * attribute.
19108be545baSRichard Henderson  *
19118be545baSRichard Henderson  * @iommu_mr: the memory region
19128be545baSRichard Henderson  * @attr: the requested attribute
19138be545baSRichard Henderson  * @data: a pointer to the requested attribute data
19148be545baSRichard Henderson  */
19158be545baSRichard Henderson int memory_region_iommu_get_attr(IOMMUMemoryRegion *iommu_mr,
19168be545baSRichard Henderson                                  enum IOMMUMemoryRegionAttr attr,
19178be545baSRichard Henderson                                  void *data);
19188be545baSRichard Henderson 
19198be545baSRichard Henderson /**
19208be545baSRichard Henderson  * memory_region_iommu_attrs_to_index: return the IOMMU index to
19218be545baSRichard Henderson  * use for translations with the given memory transaction attributes.
19228be545baSRichard Henderson  *
19238be545baSRichard Henderson  * @iommu_mr: the memory region
19248be545baSRichard Henderson  * @attrs: the memory transaction attributes
19258be545baSRichard Henderson  */
19268be545baSRichard Henderson int memory_region_iommu_attrs_to_index(IOMMUMemoryRegion *iommu_mr,
19278be545baSRichard Henderson                                        MemTxAttrs attrs);
19288be545baSRichard Henderson 
19298be545baSRichard Henderson /**
19308be545baSRichard Henderson  * memory_region_iommu_num_indexes: return the total number of IOMMU
19318be545baSRichard Henderson  * indexes that this IOMMU supports.
19328be545baSRichard Henderson  *
19338be545baSRichard Henderson  * @iommu_mr: the memory region
19348be545baSRichard Henderson  */
19358be545baSRichard Henderson int memory_region_iommu_num_indexes(IOMMUMemoryRegion *iommu_mr);
19368be545baSRichard Henderson 
19378be545baSRichard Henderson /**
19388be545baSRichard Henderson  * memory_region_name: get a memory region's name
19398be545baSRichard Henderson  *
19408be545baSRichard Henderson  * Returns the string that was used to initialize the memory region.
19418be545baSRichard Henderson  *
19428be545baSRichard Henderson  * @mr: the memory region being queried
19438be545baSRichard Henderson  */
19448be545baSRichard Henderson const char *memory_region_name(const MemoryRegion *mr);
19458be545baSRichard Henderson 
19468be545baSRichard Henderson /**
19478be545baSRichard Henderson  * memory_region_is_logging: return whether a memory region is logging writes
19488be545baSRichard Henderson  *
19498be545baSRichard Henderson  * Returns %true if the memory region is logging writes for the given client
19508be545baSRichard Henderson  *
19518be545baSRichard Henderson  * @mr: the memory region being queried
19528be545baSRichard Henderson  * @client: the client being queried
19538be545baSRichard Henderson  */
19548be545baSRichard Henderson bool memory_region_is_logging(MemoryRegion *mr, uint8_t client);
19558be545baSRichard Henderson 
19568be545baSRichard Henderson /**
19578be545baSRichard Henderson  * memory_region_get_dirty_log_mask: return the clients for which a
19588be545baSRichard Henderson  * memory region is logging writes.
19598be545baSRichard Henderson  *
19608be545baSRichard Henderson  * Returns a bitmap of clients, in which the DIRTY_MEMORY_* constants
19618be545baSRichard Henderson  * are the bit indices.
19628be545baSRichard Henderson  *
19638be545baSRichard Henderson  * @mr: the memory region being queried
19648be545baSRichard Henderson  */
19658be545baSRichard Henderson uint8_t memory_region_get_dirty_log_mask(MemoryRegion *mr);
19668be545baSRichard Henderson 
19678be545baSRichard Henderson /**
19688be545baSRichard Henderson  * memory_region_is_rom: check whether a memory region is ROM
19698be545baSRichard Henderson  *
19708be545baSRichard Henderson  * Returns %true if a memory region is read-only memory.
19718be545baSRichard Henderson  *
19728be545baSRichard Henderson  * @mr: the memory region being queried
19738be545baSRichard Henderson  */
19748be545baSRichard Henderson static inline bool memory_region_is_rom(MemoryRegion *mr)
19758be545baSRichard Henderson {
19768be545baSRichard Henderson     return mr->ram && mr->readonly;
19778be545baSRichard Henderson }
19788be545baSRichard Henderson 
19798be545baSRichard Henderson /**
19808be545baSRichard Henderson  * memory_region_is_nonvolatile: check whether a memory region is non-volatile
19818be545baSRichard Henderson  *
19828be545baSRichard Henderson  * Returns %true is a memory region is non-volatile memory.
19838be545baSRichard Henderson  *
19848be545baSRichard Henderson  * @mr: the memory region being queried
19858be545baSRichard Henderson  */
19868be545baSRichard Henderson static inline bool memory_region_is_nonvolatile(MemoryRegion *mr)
19878be545baSRichard Henderson {
19888be545baSRichard Henderson     return mr->nonvolatile;
19898be545baSRichard Henderson }
19908be545baSRichard Henderson 
19918be545baSRichard Henderson /**
19928be545baSRichard Henderson  * memory_region_get_fd: Get a file descriptor backing a RAM memory region.
19938be545baSRichard Henderson  *
19948be545baSRichard Henderson  * Returns a file descriptor backing a file-based RAM memory region,
19958be545baSRichard Henderson  * or -1 if the region is not a file-based RAM memory region.
19968be545baSRichard Henderson  *
19978be545baSRichard Henderson  * @mr: the RAM or alias memory region being queried.
19988be545baSRichard Henderson  */
19998be545baSRichard Henderson int memory_region_get_fd(MemoryRegion *mr);
20008be545baSRichard Henderson 
20018be545baSRichard Henderson /**
20028be545baSRichard Henderson  * memory_region_from_host: Convert a pointer into a RAM memory region
20038be545baSRichard Henderson  * and an offset within it.
20048be545baSRichard Henderson  *
20058be545baSRichard Henderson  * Given a host pointer inside a RAM memory region (created with
20068be545baSRichard Henderson  * memory_region_init_ram() or memory_region_init_ram_ptr()), return
20078be545baSRichard Henderson  * the MemoryRegion and the offset within it.
20088be545baSRichard Henderson  *
20098be545baSRichard Henderson  * Use with care; by the time this function returns, the returned pointer is
20108be545baSRichard Henderson  * not protected by RCU anymore.  If the caller is not within an RCU critical
20118be545baSRichard Henderson  * section and does not hold the BQL, it must have other means of
20128be545baSRichard Henderson  * protecting the pointer, such as a reference to the region that includes
20138be545baSRichard Henderson  * the incoming ram_addr_t.
20148be545baSRichard Henderson  *
20158be545baSRichard Henderson  * @ptr: the host pointer to be converted
20168be545baSRichard Henderson  * @offset: the offset within memory region
20178be545baSRichard Henderson  */
20188be545baSRichard Henderson MemoryRegion *memory_region_from_host(void *ptr, ram_addr_t *offset);
20198be545baSRichard Henderson 
20208be545baSRichard Henderson /**
20218be545baSRichard Henderson  * memory_region_get_ram_ptr: Get a pointer into a RAM memory region.
20228be545baSRichard Henderson  *
20238be545baSRichard Henderson  * Returns a host pointer to a RAM memory region (created with
20248be545baSRichard Henderson  * memory_region_init_ram() or memory_region_init_ram_ptr()).
20258be545baSRichard Henderson  *
20268be545baSRichard Henderson  * Use with care; by the time this function returns, the returned pointer is
20278be545baSRichard Henderson  * not protected by RCU anymore.  If the caller is not within an RCU critical
20288be545baSRichard Henderson  * section and does not hold the BQL, it must have other means of
20298be545baSRichard Henderson  * protecting the pointer, such as a reference to the region that includes
20308be545baSRichard Henderson  * the incoming ram_addr_t.
20318be545baSRichard Henderson  *
20328be545baSRichard Henderson  * @mr: the memory region being queried.
20338be545baSRichard Henderson  */
20348be545baSRichard Henderson void *memory_region_get_ram_ptr(MemoryRegion *mr);
20358be545baSRichard Henderson 
20368be545baSRichard Henderson /* memory_region_ram_resize: Resize a RAM region.
20378be545baSRichard Henderson  *
20388be545baSRichard Henderson  * Resizing RAM while migrating can result in the migration being canceled.
20398be545baSRichard Henderson  * Care has to be taken if the guest might have already detected the memory.
20408be545baSRichard Henderson  *
20418be545baSRichard Henderson  * @mr: a memory region created with @memory_region_init_resizeable_ram.
20428be545baSRichard Henderson  * @newsize: the new size the region
20438be545baSRichard Henderson  * @errp: pointer to Error*, to store an error if it happens.
20448be545baSRichard Henderson  */
20458be545baSRichard Henderson void memory_region_ram_resize(MemoryRegion *mr, ram_addr_t newsize,
20468be545baSRichard Henderson                               Error **errp);
20478be545baSRichard Henderson 
20488be545baSRichard Henderson /**
20498be545baSRichard Henderson  * memory_region_msync: Synchronize selected address range of
20508be545baSRichard Henderson  * a memory mapped region
20518be545baSRichard Henderson  *
20528be545baSRichard Henderson  * @mr: the memory region to be msync
20538be545baSRichard Henderson  * @addr: the initial address of the range to be sync
20548be545baSRichard Henderson  * @size: the size of the range to be sync
20558be545baSRichard Henderson  */
20568be545baSRichard Henderson void memory_region_msync(MemoryRegion *mr, hwaddr addr, hwaddr size);
20578be545baSRichard Henderson 
20588be545baSRichard Henderson /**
20598be545baSRichard Henderson  * memory_region_writeback: Trigger cache writeback for
20608be545baSRichard Henderson  * selected address range
20618be545baSRichard Henderson  *
20628be545baSRichard Henderson  * @mr: the memory region to be updated
20638be545baSRichard Henderson  * @addr: the initial address of the range to be written back
20648be545baSRichard Henderson  * @size: the size of the range to be written back
20658be545baSRichard Henderson  */
20668be545baSRichard Henderson void memory_region_writeback(MemoryRegion *mr, hwaddr addr, hwaddr size);
20678be545baSRichard Henderson 
20688be545baSRichard Henderson /**
20698be545baSRichard Henderson  * memory_region_set_log: Turn dirty logging on or off for a region.
20708be545baSRichard Henderson  *
20718be545baSRichard Henderson  * Turns dirty logging on or off for a specified client (display, migration).
20728be545baSRichard Henderson  * Only meaningful for RAM regions.
20738be545baSRichard Henderson  *
20748be545baSRichard Henderson  * @mr: the memory region being updated.
20758be545baSRichard Henderson  * @log: whether dirty logging is to be enabled or disabled.
20768be545baSRichard Henderson  * @client: the user of the logging information; %DIRTY_MEMORY_VGA only.
20778be545baSRichard Henderson  */
20788be545baSRichard Henderson void memory_region_set_log(MemoryRegion *mr, bool log, unsigned client);
20798be545baSRichard Henderson 
20808be545baSRichard Henderson /**
20818be545baSRichard Henderson  * memory_region_set_dirty: Mark a range of bytes as dirty in a memory region.
20828be545baSRichard Henderson  *
20838be545baSRichard Henderson  * Marks a range of bytes as dirty, after it has been dirtied outside
20848be545baSRichard Henderson  * guest code.
20858be545baSRichard Henderson  *
20868be545baSRichard Henderson  * @mr: the memory region being dirtied.
20878be545baSRichard Henderson  * @addr: the address (relative to the start of the region) being dirtied.
20888be545baSRichard Henderson  * @size: size of the range being dirtied.
20898be545baSRichard Henderson  */
20908be545baSRichard Henderson void memory_region_set_dirty(MemoryRegion *mr, hwaddr addr,
20918be545baSRichard Henderson                              hwaddr size);
20928be545baSRichard Henderson 
20938be545baSRichard Henderson /**
20948be545baSRichard Henderson  * memory_region_clear_dirty_bitmap - clear dirty bitmap for memory range
20958be545baSRichard Henderson  *
20968be545baSRichard Henderson  * This function is called when the caller wants to clear the remote
20978be545baSRichard Henderson  * dirty bitmap of a memory range within the memory region.  This can
20988be545baSRichard Henderson  * be used by e.g. KVM to manually clear dirty log when
20998be545baSRichard Henderson  * KVM_CAP_MANUAL_DIRTY_LOG_PROTECT is declared support by the host
21008be545baSRichard Henderson  * kernel.
21018be545baSRichard Henderson  *
21028be545baSRichard Henderson  * @mr:     the memory region to clear the dirty log upon
21038be545baSRichard Henderson  * @start:  start address offset within the memory region
21048be545baSRichard Henderson  * @len:    length of the memory region to clear dirty bitmap
21058be545baSRichard Henderson  */
21068be545baSRichard Henderson void memory_region_clear_dirty_bitmap(MemoryRegion *mr, hwaddr start,
21078be545baSRichard Henderson                                       hwaddr len);
21088be545baSRichard Henderson 
21098be545baSRichard Henderson /**
21108be545baSRichard Henderson  * memory_region_snapshot_and_clear_dirty: Get a snapshot of the dirty
21118be545baSRichard Henderson  *                                         bitmap and clear it.
21128be545baSRichard Henderson  *
21138be545baSRichard Henderson  * Creates a snapshot of the dirty bitmap, clears the dirty bitmap and
21148be545baSRichard Henderson  * returns the snapshot.  The snapshot can then be used to query dirty
21158be545baSRichard Henderson  * status, using memory_region_snapshot_get_dirty.  Snapshotting allows
21168be545baSRichard Henderson  * querying the same page multiple times, which is especially useful for
21178be545baSRichard Henderson  * display updates where the scanlines often are not page aligned.
21188be545baSRichard Henderson  *
21198be545baSRichard Henderson  * The dirty bitmap region which gets copied into the snapshot (and
21208be545baSRichard Henderson  * cleared afterwards) can be larger than requested.  The boundaries
21218be545baSRichard Henderson  * are rounded up/down so complete bitmap longs (covering 64 pages on
21228be545baSRichard Henderson  * 64bit hosts) can be copied over into the bitmap snapshot.  Which
21238be545baSRichard Henderson  * isn't a problem for display updates as the extra pages are outside
21248be545baSRichard Henderson  * the visible area, and in case the visible area changes a full
21258be545baSRichard Henderson  * display redraw is due anyway.  Should other use cases for this
21268be545baSRichard Henderson  * function emerge we might have to revisit this implementation
21278be545baSRichard Henderson  * detail.
21288be545baSRichard Henderson  *
21298be545baSRichard Henderson  * Use g_free to release DirtyBitmapSnapshot.
21308be545baSRichard Henderson  *
21318be545baSRichard Henderson  * @mr: the memory region being queried.
21328be545baSRichard Henderson  * @addr: the address (relative to the start of the region) being queried.
21338be545baSRichard Henderson  * @size: the size of the range being queried.
21348be545baSRichard Henderson  * @client: the user of the logging information; typically %DIRTY_MEMORY_VGA.
21358be545baSRichard Henderson  */
21368be545baSRichard Henderson DirtyBitmapSnapshot *memory_region_snapshot_and_clear_dirty(MemoryRegion *mr,
21378be545baSRichard Henderson                                                             hwaddr addr,
21388be545baSRichard Henderson                                                             hwaddr size,
21398be545baSRichard Henderson                                                             unsigned client);
21408be545baSRichard Henderson 
21418be545baSRichard Henderson /**
21428be545baSRichard Henderson  * memory_region_snapshot_get_dirty: Check whether a range of bytes is dirty
21438be545baSRichard Henderson  *                                   in the specified dirty bitmap snapshot.
21448be545baSRichard Henderson  *
21458be545baSRichard Henderson  * @mr: the memory region being queried.
21468be545baSRichard Henderson  * @snap: the dirty bitmap snapshot
21478be545baSRichard Henderson  * @addr: the address (relative to the start of the region) being queried.
21488be545baSRichard Henderson  * @size: the size of the range being queried.
21498be545baSRichard Henderson  */
21508be545baSRichard Henderson bool memory_region_snapshot_get_dirty(MemoryRegion *mr,
21518be545baSRichard Henderson                                       DirtyBitmapSnapshot *snap,
21528be545baSRichard Henderson                                       hwaddr addr, hwaddr size);
21538be545baSRichard Henderson 
21548be545baSRichard Henderson /**
21558be545baSRichard Henderson  * memory_region_reset_dirty: Mark a range of pages as clean, for a specified
21568be545baSRichard Henderson  *                            client.
21578be545baSRichard Henderson  *
21588be545baSRichard Henderson  * Marks a range of pages as no longer dirty.
21598be545baSRichard Henderson  *
21608be545baSRichard Henderson  * @mr: the region being updated.
21618be545baSRichard Henderson  * @addr: the start of the subrange being cleaned.
21628be545baSRichard Henderson  * @size: the size of the subrange being cleaned.
21638be545baSRichard Henderson  * @client: the user of the logging information; %DIRTY_MEMORY_MIGRATION or
21648be545baSRichard Henderson  *          %DIRTY_MEMORY_VGA.
21658be545baSRichard Henderson  */
21668be545baSRichard Henderson void memory_region_reset_dirty(MemoryRegion *mr, hwaddr addr,
21678be545baSRichard Henderson                                hwaddr size, unsigned client);
21688be545baSRichard Henderson 
21698be545baSRichard Henderson /**
21708be545baSRichard Henderson  * memory_region_flush_rom_device: Mark a range of pages dirty and invalidate
21718be545baSRichard Henderson  *                                 TBs (for self-modifying code).
21728be545baSRichard Henderson  *
21738be545baSRichard Henderson  * The MemoryRegionOps->write() callback of a ROM device must use this function
21748be545baSRichard Henderson  * to mark byte ranges that have been modified internally, such as by directly
21758be545baSRichard Henderson  * accessing the memory returned by memory_region_get_ram_ptr().
21768be545baSRichard Henderson  *
21778be545baSRichard Henderson  * This function marks the range dirty and invalidates TBs so that TCG can
21788be545baSRichard Henderson  * detect self-modifying code.
21798be545baSRichard Henderson  *
21808be545baSRichard Henderson  * @mr: the region being flushed.
21818be545baSRichard Henderson  * @addr: the start, relative to the start of the region, of the range being
21828be545baSRichard Henderson  *        flushed.
21838be545baSRichard Henderson  * @size: the size, in bytes, of the range being flushed.
21848be545baSRichard Henderson  */
21858be545baSRichard Henderson void memory_region_flush_rom_device(MemoryRegion *mr, hwaddr addr, hwaddr size);
21868be545baSRichard Henderson 
21878be545baSRichard Henderson /**
21888be545baSRichard Henderson  * memory_region_set_readonly: Turn a memory region read-only (or read-write)
21898be545baSRichard Henderson  *
21908be545baSRichard Henderson  * Allows a memory region to be marked as read-only (turning it into a ROM).
21918be545baSRichard Henderson  * only useful on RAM regions.
21928be545baSRichard Henderson  *
21938be545baSRichard Henderson  * @mr: the region being updated.
21948be545baSRichard Henderson  * @readonly: whether the region is to be ROM or RAM.
21958be545baSRichard Henderson  */
21968be545baSRichard Henderson void memory_region_set_readonly(MemoryRegion *mr, bool readonly);
21978be545baSRichard Henderson 
21988be545baSRichard Henderson /**
21998be545baSRichard Henderson  * memory_region_set_nonvolatile: Turn a memory region non-volatile
22008be545baSRichard Henderson  *
22018be545baSRichard Henderson  * Allows a memory region to be marked as non-volatile.
22028be545baSRichard Henderson  * only useful on RAM regions.
22038be545baSRichard Henderson  *
22048be545baSRichard Henderson  * @mr: the region being updated.
22058be545baSRichard Henderson  * @nonvolatile: whether the region is to be non-volatile.
22068be545baSRichard Henderson  */
22078be545baSRichard Henderson void memory_region_set_nonvolatile(MemoryRegion *mr, bool nonvolatile);
22088be545baSRichard Henderson 
22098be545baSRichard Henderson /**
22108be545baSRichard Henderson  * memory_region_rom_device_set_romd: enable/disable ROMD mode
22118be545baSRichard Henderson  *
22128be545baSRichard Henderson  * Allows a ROM device (initialized with memory_region_init_rom_device() to
22138be545baSRichard Henderson  * set to ROMD mode (default) or MMIO mode.  When it is in ROMD mode, the
22148be545baSRichard Henderson  * device is mapped to guest memory and satisfies read access directly.
22158be545baSRichard Henderson  * When in MMIO mode, reads are forwarded to the #MemoryRegion.read function.
22168be545baSRichard Henderson  * Writes are always handled by the #MemoryRegion.write function.
22178be545baSRichard Henderson  *
22188be545baSRichard Henderson  * @mr: the memory region to be updated
22198be545baSRichard Henderson  * @romd_mode: %true to put the region into ROMD mode
22208be545baSRichard Henderson  */
22218be545baSRichard Henderson void memory_region_rom_device_set_romd(MemoryRegion *mr, bool romd_mode);
22228be545baSRichard Henderson 
22238be545baSRichard Henderson /**
22248be545baSRichard Henderson  * memory_region_set_coalescing: Enable memory coalescing for the region.
22258be545baSRichard Henderson  *
22268be545baSRichard Henderson  * Enabled writes to a region to be queued for later processing. MMIO ->write
22278be545baSRichard Henderson  * callbacks may be delayed until a non-coalesced MMIO is issued.
22288be545baSRichard Henderson  * Only useful for IO regions.  Roughly similar to write-combining hardware.
22298be545baSRichard Henderson  *
22308be545baSRichard Henderson  * @mr: the memory region to be write coalesced
22318be545baSRichard Henderson  */
22328be545baSRichard Henderson void memory_region_set_coalescing(MemoryRegion *mr);
22338be545baSRichard Henderson 
22348be545baSRichard Henderson /**
22358be545baSRichard Henderson  * memory_region_add_coalescing: Enable memory coalescing for a sub-range of
22368be545baSRichard Henderson  *                               a region.
22378be545baSRichard Henderson  *
22388be545baSRichard Henderson  * Like memory_region_set_coalescing(), but works on a sub-range of a region.
22398be545baSRichard Henderson  * Multiple calls can be issued coalesced disjoint ranges.
22408be545baSRichard Henderson  *
22418be545baSRichard Henderson  * @mr: the memory region to be updated.
22428be545baSRichard Henderson  * @offset: the start of the range within the region to be coalesced.
22438be545baSRichard Henderson  * @size: the size of the subrange to be coalesced.
22448be545baSRichard Henderson  */
22458be545baSRichard Henderson void memory_region_add_coalescing(MemoryRegion *mr,
22468be545baSRichard Henderson                                   hwaddr offset,
22478be545baSRichard Henderson                                   uint64_t size);
22488be545baSRichard Henderson 
22498be545baSRichard Henderson /**
22508be545baSRichard Henderson  * memory_region_clear_coalescing: Disable MMIO coalescing for the region.
22518be545baSRichard Henderson  *
22528be545baSRichard Henderson  * Disables any coalescing caused by memory_region_set_coalescing() or
22538be545baSRichard Henderson  * memory_region_add_coalescing().  Roughly equivalent to uncacheble memory
22548be545baSRichard Henderson  * hardware.
22558be545baSRichard Henderson  *
22568be545baSRichard Henderson  * @mr: the memory region to be updated.
22578be545baSRichard Henderson  */
22588be545baSRichard Henderson void memory_region_clear_coalescing(MemoryRegion *mr);
22598be545baSRichard Henderson 
22608be545baSRichard Henderson /**
22618be545baSRichard Henderson  * memory_region_set_flush_coalesced: Enforce memory coalescing flush before
22628be545baSRichard Henderson  *                                    accesses.
22638be545baSRichard Henderson  *
22648be545baSRichard Henderson  * Ensure that pending coalesced MMIO request are flushed before the memory
22658be545baSRichard Henderson  * region is accessed. This property is automatically enabled for all regions
22668be545baSRichard Henderson  * passed to memory_region_set_coalescing() and memory_region_add_coalescing().
22678be545baSRichard Henderson  *
22688be545baSRichard Henderson  * @mr: the memory region to be updated.
22698be545baSRichard Henderson  */
22708be545baSRichard Henderson void memory_region_set_flush_coalesced(MemoryRegion *mr);
22718be545baSRichard Henderson 
22728be545baSRichard Henderson /**
22738be545baSRichard Henderson  * memory_region_clear_flush_coalesced: Disable memory coalescing flush before
22748be545baSRichard Henderson  *                                      accesses.
22758be545baSRichard Henderson  *
22768be545baSRichard Henderson  * Clear the automatic coalesced MMIO flushing enabled via
22778be545baSRichard Henderson  * memory_region_set_flush_coalesced. Note that this service has no effect on
22788be545baSRichard Henderson  * memory regions that have MMIO coalescing enabled for themselves. For them,
22798be545baSRichard Henderson  * automatic flushing will stop once coalescing is disabled.
22808be545baSRichard Henderson  *
22818be545baSRichard Henderson  * @mr: the memory region to be updated.
22828be545baSRichard Henderson  */
22838be545baSRichard Henderson void memory_region_clear_flush_coalesced(MemoryRegion *mr);
22848be545baSRichard Henderson 
22858be545baSRichard Henderson /**
22868be545baSRichard Henderson  * memory_region_add_eventfd: Request an eventfd to be triggered when a word
22878be545baSRichard Henderson  *                            is written to a location.
22888be545baSRichard Henderson  *
22898be545baSRichard Henderson  * Marks a word in an IO region (initialized with memory_region_init_io())
22908be545baSRichard Henderson  * as a trigger for an eventfd event.  The I/O callback will not be called.
22918be545baSRichard Henderson  * The caller must be prepared to handle failure (that is, take the required
22928be545baSRichard Henderson  * action if the callback _is_ called).
22938be545baSRichard Henderson  *
22948be545baSRichard Henderson  * @mr: the memory region being updated.
22958be545baSRichard Henderson  * @addr: the address within @mr that is to be monitored
22968be545baSRichard Henderson  * @size: the size of the access to trigger the eventfd
22978be545baSRichard Henderson  * @match_data: whether to match against @data, instead of just @addr
22988be545baSRichard Henderson  * @data: the data to match against the guest write
22998be545baSRichard Henderson  * @e: event notifier to be triggered when @addr, @size, and @data all match.
23008be545baSRichard Henderson  **/
23018be545baSRichard Henderson void memory_region_add_eventfd(MemoryRegion *mr,
23028be545baSRichard Henderson                                hwaddr addr,
23038be545baSRichard Henderson                                unsigned size,
23048be545baSRichard Henderson                                bool match_data,
23058be545baSRichard Henderson                                uint64_t data,
23068be545baSRichard Henderson                                EventNotifier *e);
23078be545baSRichard Henderson 
23088be545baSRichard Henderson /**
23098be545baSRichard Henderson  * memory_region_del_eventfd: Cancel an eventfd.
23108be545baSRichard Henderson  *
23118be545baSRichard Henderson  * Cancels an eventfd trigger requested by a previous
23128be545baSRichard Henderson  * memory_region_add_eventfd() call.
23138be545baSRichard Henderson  *
23148be545baSRichard Henderson  * @mr: the memory region being updated.
23158be545baSRichard Henderson  * @addr: the address within @mr that is to be monitored
23168be545baSRichard Henderson  * @size: the size of the access to trigger the eventfd
23178be545baSRichard Henderson  * @match_data: whether to match against @data, instead of just @addr
23188be545baSRichard Henderson  * @data: the data to match against the guest write
23198be545baSRichard Henderson  * @e: event notifier to be triggered when @addr, @size, and @data all match.
23208be545baSRichard Henderson  */
23218be545baSRichard Henderson void memory_region_del_eventfd(MemoryRegion *mr,
23228be545baSRichard Henderson                                hwaddr addr,
23238be545baSRichard Henderson                                unsigned size,
23248be545baSRichard Henderson                                bool match_data,
23258be545baSRichard Henderson                                uint64_t data,
23268be545baSRichard Henderson                                EventNotifier *e);
23278be545baSRichard Henderson 
23288be545baSRichard Henderson /**
23298be545baSRichard Henderson  * memory_region_add_subregion: Add a subregion to a container.
23308be545baSRichard Henderson  *
23318be545baSRichard Henderson  * Adds a subregion at @offset.  The subregion may not overlap with other
23328be545baSRichard Henderson  * subregions (except for those explicitly marked as overlapping).  A region
23338be545baSRichard Henderson  * may only be added once as a subregion (unless removed with
23348be545baSRichard Henderson  * memory_region_del_subregion()); use memory_region_init_alias() if you
23358be545baSRichard Henderson  * want a region to be a subregion in multiple locations.
23368be545baSRichard Henderson  *
23378be545baSRichard Henderson  * @mr: the region to contain the new subregion; must be a container
23388be545baSRichard Henderson  *      initialized with memory_region_init().
23398be545baSRichard Henderson  * @offset: the offset relative to @mr where @subregion is added.
23408be545baSRichard Henderson  * @subregion: the subregion to be added.
23418be545baSRichard Henderson  */
23428be545baSRichard Henderson void memory_region_add_subregion(MemoryRegion *mr,
23438be545baSRichard Henderson                                  hwaddr offset,
23448be545baSRichard Henderson                                  MemoryRegion *subregion);
23458be545baSRichard Henderson /**
23468be545baSRichard Henderson  * memory_region_add_subregion_overlap: Add a subregion to a container
23478be545baSRichard Henderson  *                                      with overlap.
23488be545baSRichard Henderson  *
23498be545baSRichard Henderson  * Adds a subregion at @offset.  The subregion may overlap with other
23508be545baSRichard Henderson  * subregions.  Conflicts are resolved by having a higher @priority hide a
23518be545baSRichard Henderson  * lower @priority. Subregions without priority are taken as @priority 0.
23528be545baSRichard Henderson  * A region may only be added once as a subregion (unless removed with
23538be545baSRichard Henderson  * memory_region_del_subregion()); use memory_region_init_alias() if you
23548be545baSRichard Henderson  * want a region to be a subregion in multiple locations.
23558be545baSRichard Henderson  *
23568be545baSRichard Henderson  * @mr: the region to contain the new subregion; must be a container
23578be545baSRichard Henderson  *      initialized with memory_region_init().
23588be545baSRichard Henderson  * @offset: the offset relative to @mr where @subregion is added.
23598be545baSRichard Henderson  * @subregion: the subregion to be added.
23608be545baSRichard Henderson  * @priority: used for resolving overlaps; highest priority wins.
23618be545baSRichard Henderson  */
23628be545baSRichard Henderson void memory_region_add_subregion_overlap(MemoryRegion *mr,
23638be545baSRichard Henderson                                          hwaddr offset,
23648be545baSRichard Henderson                                          MemoryRegion *subregion,
23658be545baSRichard Henderson                                          int priority);
23668be545baSRichard Henderson 
23678be545baSRichard Henderson /**
23688be545baSRichard Henderson  * memory_region_get_ram_addr: Get the ram address associated with a memory
23698be545baSRichard Henderson  *                             region
23708be545baSRichard Henderson  *
23718be545baSRichard Henderson  * @mr: the region to be queried
23728be545baSRichard Henderson  */
23738be545baSRichard Henderson ram_addr_t memory_region_get_ram_addr(MemoryRegion *mr);
23748be545baSRichard Henderson 
23758be545baSRichard Henderson uint64_t memory_region_get_alignment(const MemoryRegion *mr);
23768be545baSRichard Henderson /**
23778be545baSRichard Henderson  * memory_region_del_subregion: Remove a subregion.
23788be545baSRichard Henderson  *
23798be545baSRichard Henderson  * Removes a subregion from its container.
23808be545baSRichard Henderson  *
23818be545baSRichard Henderson  * @mr: the container to be updated.
23828be545baSRichard Henderson  * @subregion: the region being removed; must be a current subregion of @mr.
23838be545baSRichard Henderson  */
23848be545baSRichard Henderson void memory_region_del_subregion(MemoryRegion *mr,
23858be545baSRichard Henderson                                  MemoryRegion *subregion);
23868be545baSRichard Henderson 
23878be545baSRichard Henderson /*
23888be545baSRichard Henderson  * memory_region_set_enabled: dynamically enable or disable a region
23898be545baSRichard Henderson  *
23908be545baSRichard Henderson  * Enables or disables a memory region.  A disabled memory region
23918be545baSRichard Henderson  * ignores all accesses to itself and its subregions.  It does not
23928be545baSRichard Henderson  * obscure sibling subregions with lower priority - it simply behaves as
23938be545baSRichard Henderson  * if it was removed from the hierarchy.
23948be545baSRichard Henderson  *
23958be545baSRichard Henderson  * Regions default to being enabled.
23968be545baSRichard Henderson  *
23978be545baSRichard Henderson  * @mr: the region to be updated
23988be545baSRichard Henderson  * @enabled: whether to enable or disable the region
23998be545baSRichard Henderson  */
24008be545baSRichard Henderson void memory_region_set_enabled(MemoryRegion *mr, bool enabled);
24018be545baSRichard Henderson 
24028be545baSRichard Henderson /*
24038be545baSRichard Henderson  * memory_region_set_address: dynamically update the address of a region
24048be545baSRichard Henderson  *
24058be545baSRichard Henderson  * Dynamically updates the address of a region, relative to its container.
24068be545baSRichard Henderson  * May be used on regions are currently part of a memory hierarchy.
24078be545baSRichard Henderson  *
24088be545baSRichard Henderson  * @mr: the region to be updated
24098be545baSRichard Henderson  * @addr: new address, relative to container region
24108be545baSRichard Henderson  */
24118be545baSRichard Henderson void memory_region_set_address(MemoryRegion *mr, hwaddr addr);
24128be545baSRichard Henderson 
24138be545baSRichard Henderson /*
24148be545baSRichard Henderson  * memory_region_set_size: dynamically update the size of a region.
24158be545baSRichard Henderson  *
24168be545baSRichard Henderson  * Dynamically updates the size of a region.
24178be545baSRichard Henderson  *
24188be545baSRichard Henderson  * @mr: the region to be updated
24198be545baSRichard Henderson  * @size: used size of the region.
24208be545baSRichard Henderson  */
24218be545baSRichard Henderson void memory_region_set_size(MemoryRegion *mr, uint64_t size);
24228be545baSRichard Henderson 
24238be545baSRichard Henderson /*
24248be545baSRichard Henderson  * memory_region_set_alias_offset: dynamically update a memory alias's offset
24258be545baSRichard Henderson  *
24268be545baSRichard Henderson  * Dynamically updates the offset into the target region that an alias points
24278be545baSRichard Henderson  * to, as if the fourth argument to memory_region_init_alias() has changed.
24288be545baSRichard Henderson  *
24298be545baSRichard Henderson  * @mr: the #MemoryRegion to be updated; should be an alias.
24308be545baSRichard Henderson  * @offset: the new offset into the target memory region
24318be545baSRichard Henderson  */
24328be545baSRichard Henderson void memory_region_set_alias_offset(MemoryRegion *mr,
24338be545baSRichard Henderson                                     hwaddr offset);
24348be545baSRichard Henderson 
24358be545baSRichard Henderson /*
24368be545baSRichard Henderson  * memory_region_set_unmergeable: Set a memory region unmergeable
24378be545baSRichard Henderson  *
24388be545baSRichard Henderson  * Mark a memory region unmergeable, resulting in the memory region (or
24398be545baSRichard Henderson  * everything contained in a memory region container) not getting merged when
24408be545baSRichard Henderson  * simplifying the address space and notifying memory listeners. Consequently,
24418be545baSRichard Henderson  * memory listeners will never get notified about ranges that are larger than
24428be545baSRichard Henderson  * the original memory regions.
24438be545baSRichard Henderson  *
24448be545baSRichard Henderson  * This is primarily useful when multiple aliases to a RAM memory region are
24458be545baSRichard Henderson  * mapped into a memory region container, and updates (e.g., enable/disable or
24468be545baSRichard Henderson  * map/unmap) of individual memory region aliases are not supposed to affect
24478be545baSRichard Henderson  * other memory regions in the same container.
24488be545baSRichard Henderson  *
24498be545baSRichard Henderson  * @mr: the #MemoryRegion to be updated
24508be545baSRichard Henderson  * @unmergeable: whether to mark the #MemoryRegion unmergeable
24518be545baSRichard Henderson  */
24528be545baSRichard Henderson void memory_region_set_unmergeable(MemoryRegion *mr, bool unmergeable);
24538be545baSRichard Henderson 
24548be545baSRichard Henderson /**
24558be545baSRichard Henderson  * memory_region_present: checks if an address relative to a @container
24568be545baSRichard Henderson  * translates into #MemoryRegion within @container
24578be545baSRichard Henderson  *
24588be545baSRichard Henderson  * Answer whether a #MemoryRegion within @container covers the address
24598be545baSRichard Henderson  * @addr.
24608be545baSRichard Henderson  *
24618be545baSRichard Henderson  * @container: a #MemoryRegion within which @addr is a relative address
24628be545baSRichard Henderson  * @addr: the area within @container to be searched
24638be545baSRichard Henderson  */
24648be545baSRichard Henderson bool memory_region_present(MemoryRegion *container, hwaddr addr);
24658be545baSRichard Henderson 
24668be545baSRichard Henderson /**
24678be545baSRichard Henderson  * memory_region_is_mapped: returns true if #MemoryRegion is mapped
24688be545baSRichard Henderson  * into another memory region, which does not necessarily imply that it is
24698be545baSRichard Henderson  * mapped into an address space.
24708be545baSRichard Henderson  *
24718be545baSRichard Henderson  * @mr: a #MemoryRegion which should be checked if it's mapped
24728be545baSRichard Henderson  */
24738be545baSRichard Henderson bool memory_region_is_mapped(MemoryRegion *mr);
24748be545baSRichard Henderson 
24758be545baSRichard Henderson /**
24768be545baSRichard Henderson  * memory_region_get_ram_discard_manager: get the #RamDiscardManager for a
24778be545baSRichard Henderson  * #MemoryRegion
24788be545baSRichard Henderson  *
24798be545baSRichard Henderson  * The #RamDiscardManager cannot change while a memory region is mapped.
24808be545baSRichard Henderson  *
24818be545baSRichard Henderson  * @mr: the #MemoryRegion
24828be545baSRichard Henderson  */
24838be545baSRichard Henderson RamDiscardManager *memory_region_get_ram_discard_manager(MemoryRegion *mr);
24848be545baSRichard Henderson 
24858be545baSRichard Henderson /**
24868be545baSRichard Henderson  * memory_region_has_ram_discard_manager: check whether a #MemoryRegion has a
24878be545baSRichard Henderson  * #RamDiscardManager assigned
24888be545baSRichard Henderson  *
24898be545baSRichard Henderson  * @mr: the #MemoryRegion
24908be545baSRichard Henderson  */
24918be545baSRichard Henderson static inline bool memory_region_has_ram_discard_manager(MemoryRegion *mr)
24928be545baSRichard Henderson {
24938be545baSRichard Henderson     return !!memory_region_get_ram_discard_manager(mr);
24948be545baSRichard Henderson }
24958be545baSRichard Henderson 
24968be545baSRichard Henderson /**
24978be545baSRichard Henderson  * memory_region_set_ram_discard_manager: set the #RamDiscardManager for a
24988be545baSRichard Henderson  * #MemoryRegion
24998be545baSRichard Henderson  *
25008be545baSRichard Henderson  * This function must not be called for a mapped #MemoryRegion, a #MemoryRegion
25018be545baSRichard Henderson  * that does not cover RAM, or a #MemoryRegion that already has a
25028be545baSRichard Henderson  * #RamDiscardManager assigned.
25038be545baSRichard Henderson  *
25048be545baSRichard Henderson  * @mr: the #MemoryRegion
25058be545baSRichard Henderson  * @rdm: #RamDiscardManager to set
25068be545baSRichard Henderson  */
25078be545baSRichard Henderson void memory_region_set_ram_discard_manager(MemoryRegion *mr,
25088be545baSRichard Henderson                                            RamDiscardManager *rdm);
25098be545baSRichard Henderson 
25108be545baSRichard Henderson /**
25118be545baSRichard Henderson  * memory_region_find: translate an address/size relative to a
25128be545baSRichard Henderson  * MemoryRegion into a #MemoryRegionSection.
25138be545baSRichard Henderson  *
25148be545baSRichard Henderson  * Locates the first #MemoryRegion within @mr that overlaps the range
25158be545baSRichard Henderson  * given by @addr and @size.
25168be545baSRichard Henderson  *
25178be545baSRichard Henderson  * Returns a #MemoryRegionSection that describes a contiguous overlap.
25188be545baSRichard Henderson  * It will have the following characteristics:
25198be545baSRichard Henderson  * - @size = 0 iff no overlap was found
25208be545baSRichard Henderson  * - @mr is non-%NULL iff an overlap was found
25218be545baSRichard Henderson  *
25228be545baSRichard Henderson  * Remember that in the return value the @offset_within_region is
25238be545baSRichard Henderson  * relative to the returned region (in the .@mr field), not to the
25248be545baSRichard Henderson  * @mr argument.
25258be545baSRichard Henderson  *
25268be545baSRichard Henderson  * Similarly, the .@offset_within_address_space is relative to the
25278be545baSRichard Henderson  * address space that contains both regions, the passed and the
25288be545baSRichard Henderson  * returned one.  However, in the special case where the @mr argument
25298be545baSRichard Henderson  * has no container (and thus is the root of the address space), the
25308be545baSRichard Henderson  * following will hold:
25318be545baSRichard Henderson  * - @offset_within_address_space >= @addr
25328be545baSRichard Henderson  * - @offset_within_address_space + .@size <= @addr + @size
25338be545baSRichard Henderson  *
25348be545baSRichard Henderson  * @mr: a MemoryRegion within which @addr is a relative address
25358be545baSRichard Henderson  * @addr: start of the area within @as to be searched
25368be545baSRichard Henderson  * @size: size of the area to be searched
25378be545baSRichard Henderson  */
25388be545baSRichard Henderson MemoryRegionSection memory_region_find(MemoryRegion *mr,
25398be545baSRichard Henderson                                        hwaddr addr, uint64_t size);
25408be545baSRichard Henderson 
25418be545baSRichard Henderson /**
25428be545baSRichard Henderson  * memory_global_dirty_log_sync: synchronize the dirty log for all memory
25438be545baSRichard Henderson  *
25448be545baSRichard Henderson  * Synchronizes the dirty page log for all address spaces.
25458be545baSRichard Henderson  *
25468be545baSRichard Henderson  * @last_stage: whether this is the last stage of live migration
25478be545baSRichard Henderson  */
25488be545baSRichard Henderson void memory_global_dirty_log_sync(bool last_stage);
25498be545baSRichard Henderson 
25508be545baSRichard Henderson /**
25518be545baSRichard Henderson  * memory_global_after_dirty_log_sync: synchronize the dirty log for all memory
25528be545baSRichard Henderson  *
25538be545baSRichard Henderson  * Synchronizes the vCPUs with a thread that is reading the dirty bitmap.
25548be545baSRichard Henderson  * This function must be called after the dirty log bitmap is cleared, and
25558be545baSRichard Henderson  * before dirty guest memory pages are read.  If you are using
25568be545baSRichard Henderson  * #DirtyBitmapSnapshot, memory_region_snapshot_and_clear_dirty() takes
25578be545baSRichard Henderson  * care of doing this.
25588be545baSRichard Henderson  */
25598be545baSRichard Henderson void memory_global_after_dirty_log_sync(void);
25608be545baSRichard Henderson 
25618be545baSRichard Henderson /**
25628be545baSRichard Henderson  * memory_region_transaction_begin: Start a transaction.
25638be545baSRichard Henderson  *
25648be545baSRichard Henderson  * During a transaction, changes will be accumulated and made visible
25658be545baSRichard Henderson  * only when the transaction ends (is committed).
25668be545baSRichard Henderson  */
25678be545baSRichard Henderson void memory_region_transaction_begin(void);
25688be545baSRichard Henderson 
25698be545baSRichard Henderson /**
25708be545baSRichard Henderson  * memory_region_transaction_commit: Commit a transaction and make changes
25718be545baSRichard Henderson  *                                   visible to the guest.
25728be545baSRichard Henderson  */
25738be545baSRichard Henderson void memory_region_transaction_commit(void);
25748be545baSRichard Henderson 
25758be545baSRichard Henderson /**
25768be545baSRichard Henderson  * memory_listener_register: register callbacks to be called when memory
25778be545baSRichard Henderson  *                           sections are mapped or unmapped into an address
25788be545baSRichard Henderson  *                           space
25798be545baSRichard Henderson  *
25808be545baSRichard Henderson  * @listener: an object containing the callbacks to be called
25818be545baSRichard Henderson  * @filter: if non-%NULL, only regions in this address space will be observed
25828be545baSRichard Henderson  */
25838be545baSRichard Henderson void memory_listener_register(MemoryListener *listener, AddressSpace *filter);
25848be545baSRichard Henderson 
25858be545baSRichard Henderson /**
25868be545baSRichard Henderson  * memory_listener_unregister: undo the effect of memory_listener_register()
25878be545baSRichard Henderson  *
25888be545baSRichard Henderson  * @listener: an object containing the callbacks to be removed
25898be545baSRichard Henderson  */
25908be545baSRichard Henderson void memory_listener_unregister(MemoryListener *listener);
25918be545baSRichard Henderson 
25928be545baSRichard Henderson /**
25938be545baSRichard Henderson  * memory_global_dirty_log_start: begin dirty logging for all regions
25948be545baSRichard Henderson  *
25958be545baSRichard Henderson  * @flags: purpose of starting dirty log, migration or dirty rate
25968be545baSRichard Henderson  * @errp: pointer to Error*, to store an error if it happens.
25978be545baSRichard Henderson  *
25988be545baSRichard Henderson  * Return: true on success, else false setting @errp with error.
25998be545baSRichard Henderson  */
26008be545baSRichard Henderson bool memory_global_dirty_log_start(unsigned int flags, Error **errp);
26018be545baSRichard Henderson 
26028be545baSRichard Henderson /**
26038be545baSRichard Henderson  * memory_global_dirty_log_stop: end dirty logging for all regions
26048be545baSRichard Henderson  *
26058be545baSRichard Henderson  * @flags: purpose of stopping dirty log, migration or dirty rate
26068be545baSRichard Henderson  */
26078be545baSRichard Henderson void memory_global_dirty_log_stop(unsigned int flags);
26088be545baSRichard Henderson 
26098be545baSRichard Henderson void mtree_info(bool flatview, bool dispatch_tree, bool owner, bool disabled);
26108be545baSRichard Henderson 
26118be545baSRichard Henderson bool memory_region_access_valid(MemoryRegion *mr, hwaddr addr,
26128be545baSRichard Henderson                                 unsigned size, bool is_write,
26138be545baSRichard Henderson                                 MemTxAttrs attrs);
26148be545baSRichard Henderson 
26158be545baSRichard Henderson /**
26168be545baSRichard Henderson  * memory_region_dispatch_read: perform a read directly to the specified
26178be545baSRichard Henderson  * MemoryRegion.
26188be545baSRichard Henderson  *
26198be545baSRichard Henderson  * @mr: #MemoryRegion to access
26208be545baSRichard Henderson  * @addr: address within that region
26218be545baSRichard Henderson  * @pval: pointer to uint64_t which the data is written to
26228be545baSRichard Henderson  * @op: size, sign, and endianness of the memory operation
26238be545baSRichard Henderson  * @attrs: memory transaction attributes to use for the access
26248be545baSRichard Henderson  */
26258be545baSRichard Henderson MemTxResult memory_region_dispatch_read(MemoryRegion *mr,
26268be545baSRichard Henderson                                         hwaddr addr,
26278be545baSRichard Henderson                                         uint64_t *pval,
26288be545baSRichard Henderson                                         MemOp op,
26298be545baSRichard Henderson                                         MemTxAttrs attrs);
26308be545baSRichard Henderson /**
26318be545baSRichard Henderson  * memory_region_dispatch_write: perform a write directly to the specified
26328be545baSRichard Henderson  * MemoryRegion.
26338be545baSRichard Henderson  *
26348be545baSRichard Henderson  * @mr: #MemoryRegion to access
26358be545baSRichard Henderson  * @addr: address within that region
26368be545baSRichard Henderson  * @data: data to write
26378be545baSRichard Henderson  * @op: size, sign, and endianness of the memory operation
26388be545baSRichard Henderson  * @attrs: memory transaction attributes to use for the access
26398be545baSRichard Henderson  */
26408be545baSRichard Henderson MemTxResult memory_region_dispatch_write(MemoryRegion *mr,
26418be545baSRichard Henderson                                          hwaddr addr,
26428be545baSRichard Henderson                                          uint64_t data,
26438be545baSRichard Henderson                                          MemOp op,
26448be545baSRichard Henderson                                          MemTxAttrs attrs);
26458be545baSRichard Henderson 
26468be545baSRichard Henderson /**
26478be545baSRichard Henderson  * address_space_init: initializes an address space
26488be545baSRichard Henderson  *
26498be545baSRichard Henderson  * @as: an uninitialized #AddressSpace
26508be545baSRichard Henderson  * @root: a #MemoryRegion that routes addresses for the address space
26518be545baSRichard Henderson  * @name: an address space name.  The name is only used for debugging
26528be545baSRichard Henderson  *        output.
26538be545baSRichard Henderson  */
26548be545baSRichard Henderson void address_space_init(AddressSpace *as, MemoryRegion *root, const char *name);
26558be545baSRichard Henderson 
26568be545baSRichard Henderson /**
26578be545baSRichard Henderson  * address_space_destroy: destroy an address space
26588be545baSRichard Henderson  *
26598be545baSRichard Henderson  * Releases all resources associated with an address space.  After an address space
26608be545baSRichard Henderson  * is destroyed, its root memory region (given by address_space_init()) may be destroyed
26618be545baSRichard Henderson  * as well.
26628be545baSRichard Henderson  *
26638be545baSRichard Henderson  * @as: address space to be destroyed
26648be545baSRichard Henderson  */
26658be545baSRichard Henderson void address_space_destroy(AddressSpace *as);
26668be545baSRichard Henderson 
26678be545baSRichard Henderson /**
26688be545baSRichard Henderson  * address_space_remove_listeners: unregister all listeners of an address space
26698be545baSRichard Henderson  *
26708be545baSRichard Henderson  * Removes all callbacks previously registered with memory_listener_register()
26718be545baSRichard Henderson  * for @as.
26728be545baSRichard Henderson  *
26738be545baSRichard Henderson  * @as: an initialized #AddressSpace
26748be545baSRichard Henderson  */
26758be545baSRichard Henderson void address_space_remove_listeners(AddressSpace *as);
26768be545baSRichard Henderson 
26778be545baSRichard Henderson /**
26788be545baSRichard Henderson  * address_space_rw: read from or write to an address space.
26798be545baSRichard Henderson  *
26808be545baSRichard Henderson  * Return a MemTxResult indicating whether the operation succeeded
26818be545baSRichard Henderson  * or failed (eg unassigned memory, device rejected the transaction,
26828be545baSRichard Henderson  * IOMMU fault).
26838be545baSRichard Henderson  *
26848be545baSRichard Henderson  * @as: #AddressSpace to be accessed
26858be545baSRichard Henderson  * @addr: address within that address space
26868be545baSRichard Henderson  * @attrs: memory transaction attributes
26878be545baSRichard Henderson  * @buf: buffer with the data transferred
26888be545baSRichard Henderson  * @len: the number of bytes to read or write
26898be545baSRichard Henderson  * @is_write: indicates the transfer direction
26908be545baSRichard Henderson  */
26918be545baSRichard Henderson MemTxResult address_space_rw(AddressSpace *as, hwaddr addr,
26928be545baSRichard Henderson                              MemTxAttrs attrs, void *buf,
26938be545baSRichard Henderson                              hwaddr len, bool is_write);
26948be545baSRichard Henderson 
26958be545baSRichard Henderson /**
26968be545baSRichard Henderson  * address_space_write: write to address space.
26978be545baSRichard Henderson  *
26988be545baSRichard Henderson  * Return a MemTxResult indicating whether the operation succeeded
26998be545baSRichard Henderson  * or failed (eg unassigned memory, device rejected the transaction,
27008be545baSRichard Henderson  * IOMMU fault).
27018be545baSRichard Henderson  *
27028be545baSRichard Henderson  * @as: #AddressSpace to be accessed
27038be545baSRichard Henderson  * @addr: address within that address space
27048be545baSRichard Henderson  * @attrs: memory transaction attributes
27058be545baSRichard Henderson  * @buf: buffer with the data transferred
27068be545baSRichard Henderson  * @len: the number of bytes to write
27078be545baSRichard Henderson  */
27088be545baSRichard Henderson MemTxResult address_space_write(AddressSpace *as, hwaddr addr,
27098be545baSRichard Henderson                                 MemTxAttrs attrs,
27108be545baSRichard Henderson                                 const void *buf, hwaddr len);
27118be545baSRichard Henderson 
27128be545baSRichard Henderson /**
27138be545baSRichard Henderson  * address_space_write_rom: write to address space, including ROM.
27148be545baSRichard Henderson  *
27158be545baSRichard Henderson  * This function writes to the specified address space, but will
27168be545baSRichard Henderson  * write data to both ROM and RAM. This is used for non-guest
27178be545baSRichard Henderson  * writes like writes from the gdb debug stub or initial loading
27188be545baSRichard Henderson  * of ROM contents.
27198be545baSRichard Henderson  *
27208be545baSRichard Henderson  * Note that portions of the write which attempt to write data to
27218be545baSRichard Henderson  * a device will be silently ignored -- only real RAM and ROM will
27228be545baSRichard Henderson  * be written to.
27238be545baSRichard Henderson  *
27248be545baSRichard Henderson  * Return a MemTxResult indicating whether the operation succeeded
27258be545baSRichard Henderson  * or failed (eg unassigned memory, device rejected the transaction,
27268be545baSRichard Henderson  * IOMMU fault).
27278be545baSRichard Henderson  *
27288be545baSRichard Henderson  * @as: #AddressSpace to be accessed
27298be545baSRichard Henderson  * @addr: address within that address space
27308be545baSRichard Henderson  * @attrs: memory transaction attributes
27318be545baSRichard Henderson  * @buf: buffer with the data transferred
27328be545baSRichard Henderson  * @len: the number of bytes to write
27338be545baSRichard Henderson  */
27348be545baSRichard Henderson MemTxResult address_space_write_rom(AddressSpace *as, hwaddr addr,
27358be545baSRichard Henderson                                     MemTxAttrs attrs,
27368be545baSRichard Henderson                                     const void *buf, hwaddr len);
27378be545baSRichard Henderson 
27388be545baSRichard Henderson /* address_space_ld*: load from an address space
27398be545baSRichard Henderson  * address_space_st*: store to an address space
27408be545baSRichard Henderson  *
27418be545baSRichard Henderson  * These functions perform a load or store of the byte, word,
27428be545baSRichard Henderson  * longword or quad to the specified address within the AddressSpace.
27438be545baSRichard Henderson  * The _le suffixed functions treat the data as little endian;
27448be545baSRichard Henderson  * _be indicates big endian; no suffix indicates "same endianness
27458be545baSRichard Henderson  * as guest CPU".
27468be545baSRichard Henderson  *
27478be545baSRichard Henderson  * The "guest CPU endianness" accessors are deprecated for use outside
27488be545baSRichard Henderson  * target-* code; devices should be CPU-agnostic and use either the LE
27498be545baSRichard Henderson  * or the BE accessors.
27508be545baSRichard Henderson  *
27518be545baSRichard Henderson  * @as #AddressSpace to be accessed
27528be545baSRichard Henderson  * @addr: address within that address space
27538be545baSRichard Henderson  * @val: data value, for stores
27548be545baSRichard Henderson  * @attrs: memory transaction attributes
27558be545baSRichard Henderson  * @result: location to write the success/failure of the transaction;
27568be545baSRichard Henderson  *   if NULL, this information is discarded
27578be545baSRichard Henderson  */
27588be545baSRichard Henderson 
27598be545baSRichard Henderson #define SUFFIX
27608be545baSRichard Henderson #define ARG1         as
27618be545baSRichard Henderson #define ARG1_DECL    AddressSpace *as
27628be545baSRichard Henderson #include "exec/memory_ldst.h.inc"
27638be545baSRichard Henderson 
27648be545baSRichard Henderson static inline void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val)
27658be545baSRichard Henderson {
27668be545baSRichard Henderson     address_space_stl_notdirty(as, addr, val,
27678be545baSRichard Henderson                                MEMTXATTRS_UNSPECIFIED, NULL);
27688be545baSRichard Henderson }
27698be545baSRichard Henderson 
27708be545baSRichard Henderson #define SUFFIX
27718be545baSRichard Henderson #define ARG1         as
27728be545baSRichard Henderson #define ARG1_DECL    AddressSpace *as
27738be545baSRichard Henderson #include "exec/memory_ldst_phys.h.inc"
27748be545baSRichard Henderson 
27758be545baSRichard Henderson struct MemoryRegionCache {
27768be545baSRichard Henderson     uint8_t *ptr;
27778be545baSRichard Henderson     hwaddr xlat;
27788be545baSRichard Henderson     hwaddr len;
27798be545baSRichard Henderson     FlatView *fv;
27808be545baSRichard Henderson     MemoryRegionSection mrs;
27818be545baSRichard Henderson     bool is_write;
27828be545baSRichard Henderson };
27838be545baSRichard Henderson 
27848be545baSRichard Henderson /* address_space_ld*_cached: load from a cached #MemoryRegion
27858be545baSRichard Henderson  * address_space_st*_cached: store into a cached #MemoryRegion
27868be545baSRichard Henderson  *
27878be545baSRichard Henderson  * These functions perform a load or store of the byte, word,
27888be545baSRichard Henderson  * longword or quad to the specified address.  The address is
27898be545baSRichard Henderson  * a physical address in the AddressSpace, but it must lie within
27908be545baSRichard Henderson  * a #MemoryRegion that was mapped with address_space_cache_init.
27918be545baSRichard Henderson  *
27928be545baSRichard Henderson  * The _le suffixed functions treat the data as little endian;
27938be545baSRichard Henderson  * _be indicates big endian; no suffix indicates "same endianness
27948be545baSRichard Henderson  * as guest CPU".
27958be545baSRichard Henderson  *
27968be545baSRichard Henderson  * The "guest CPU endianness" accessors are deprecated for use outside
27978be545baSRichard Henderson  * target-* code; devices should be CPU-agnostic and use either the LE
27988be545baSRichard Henderson  * or the BE accessors.
27998be545baSRichard Henderson  *
28008be545baSRichard Henderson  * @cache: previously initialized #MemoryRegionCache to be accessed
28018be545baSRichard Henderson  * @addr: address within the address space
28028be545baSRichard Henderson  * @val: data value, for stores
28038be545baSRichard Henderson  * @attrs: memory transaction attributes
28048be545baSRichard Henderson  * @result: location to write the success/failure of the transaction;
28058be545baSRichard Henderson  *   if NULL, this information is discarded
28068be545baSRichard Henderson  */
28078be545baSRichard Henderson 
28088be545baSRichard Henderson #define SUFFIX       _cached_slow
28098be545baSRichard Henderson #define ARG1         cache
28108be545baSRichard Henderson #define ARG1_DECL    MemoryRegionCache *cache
28118be545baSRichard Henderson #include "exec/memory_ldst.h.inc"
28128be545baSRichard Henderson 
28138be545baSRichard Henderson /* Inline fast path for direct RAM access.  */
28148be545baSRichard Henderson static inline uint8_t address_space_ldub_cached(MemoryRegionCache *cache,
28158be545baSRichard Henderson     hwaddr addr, MemTxAttrs attrs, MemTxResult *result)
28168be545baSRichard Henderson {
28178be545baSRichard Henderson     assert(addr < cache->len);
28188be545baSRichard Henderson     if (likely(cache->ptr)) {
28198be545baSRichard Henderson         return ldub_p(cache->ptr + addr);
28208be545baSRichard Henderson     } else {
28218be545baSRichard Henderson         return address_space_ldub_cached_slow(cache, addr, attrs, result);
28228be545baSRichard Henderson     }
28238be545baSRichard Henderson }
28248be545baSRichard Henderson 
28258be545baSRichard Henderson static inline void address_space_stb_cached(MemoryRegionCache *cache,
28268be545baSRichard Henderson     hwaddr addr, uint8_t val, MemTxAttrs attrs, MemTxResult *result)
28278be545baSRichard Henderson {
28288be545baSRichard Henderson     assert(addr < cache->len);
28298be545baSRichard Henderson     if (likely(cache->ptr)) {
28308be545baSRichard Henderson         stb_p(cache->ptr + addr, val);
28318be545baSRichard Henderson     } else {
28328be545baSRichard Henderson         address_space_stb_cached_slow(cache, addr, val, attrs, result);
28338be545baSRichard Henderson     }
28348be545baSRichard Henderson }
28358be545baSRichard Henderson 
28368be545baSRichard Henderson #define ENDIANNESS
28378be545baSRichard Henderson #include "exec/memory_ldst_cached.h.inc"
28388be545baSRichard Henderson 
28398be545baSRichard Henderson #define ENDIANNESS   _le
28408be545baSRichard Henderson #include "exec/memory_ldst_cached.h.inc"
28418be545baSRichard Henderson 
28428be545baSRichard Henderson #define ENDIANNESS   _be
28438be545baSRichard Henderson #include "exec/memory_ldst_cached.h.inc"
28448be545baSRichard Henderson 
28458be545baSRichard Henderson #define SUFFIX       _cached
28468be545baSRichard Henderson #define ARG1         cache
28478be545baSRichard Henderson #define ARG1_DECL    MemoryRegionCache *cache
28488be545baSRichard Henderson #include "exec/memory_ldst_phys.h.inc"
28498be545baSRichard Henderson 
28508be545baSRichard Henderson /* address_space_cache_init: prepare for repeated access to a physical
28518be545baSRichard Henderson  * memory region
28528be545baSRichard Henderson  *
28538be545baSRichard Henderson  * @cache: #MemoryRegionCache to be filled
28548be545baSRichard Henderson  * @as: #AddressSpace to be accessed
28558be545baSRichard Henderson  * @addr: address within that address space
28568be545baSRichard Henderson  * @len: length of buffer
28578be545baSRichard Henderson  * @is_write: indicates the transfer direction
28588be545baSRichard Henderson  *
28598be545baSRichard Henderson  * Will only work with RAM, and may map a subset of the requested range by
28608be545baSRichard Henderson  * returning a value that is less than @len.  On failure, return a negative
28618be545baSRichard Henderson  * errno value.
28628be545baSRichard Henderson  *
28638be545baSRichard Henderson  * Because it only works with RAM, this function can be used for
28648be545baSRichard Henderson  * read-modify-write operations.  In this case, is_write should be %true.
28658be545baSRichard Henderson  *
28668be545baSRichard Henderson  * Note that addresses passed to the address_space_*_cached functions
28678be545baSRichard Henderson  * are relative to @addr.
28688be545baSRichard Henderson  */
28698be545baSRichard Henderson int64_t address_space_cache_init(MemoryRegionCache *cache,
28708be545baSRichard Henderson                                  AddressSpace *as,
28718be545baSRichard Henderson                                  hwaddr addr,
28728be545baSRichard Henderson                                  hwaddr len,
28738be545baSRichard Henderson                                  bool is_write);
28748be545baSRichard Henderson 
28758be545baSRichard Henderson /**
28768be545baSRichard Henderson  * address_space_cache_init_empty: Initialize empty #MemoryRegionCache
28778be545baSRichard Henderson  *
28788be545baSRichard Henderson  * @cache: The #MemoryRegionCache to operate on.
28798be545baSRichard Henderson  *
28808be545baSRichard Henderson  * Initializes #MemoryRegionCache structure without memory region attached.
28818be545baSRichard Henderson  * Cache initialized this way can only be safely destroyed, but not used.
28828be545baSRichard Henderson  */
28838be545baSRichard Henderson static inline void address_space_cache_init_empty(MemoryRegionCache *cache)
28848be545baSRichard Henderson {
28858be545baSRichard Henderson     cache->mrs.mr = NULL;
28868be545baSRichard Henderson     /* There is no real need to initialize fv, but it makes Coverity happy. */
28878be545baSRichard Henderson     cache->fv = NULL;
28888be545baSRichard Henderson }
28898be545baSRichard Henderson 
28908be545baSRichard Henderson /**
28918be545baSRichard Henderson  * address_space_cache_invalidate: complete a write to a #MemoryRegionCache
28928be545baSRichard Henderson  *
28938be545baSRichard Henderson  * @cache: The #MemoryRegionCache to operate on.
28948be545baSRichard Henderson  * @addr: The first physical address that was written, relative to the
28958be545baSRichard Henderson  * address that was passed to @address_space_cache_init.
28968be545baSRichard Henderson  * @access_len: The number of bytes that were written starting at @addr.
28978be545baSRichard Henderson  */
28988be545baSRichard Henderson void address_space_cache_invalidate(MemoryRegionCache *cache,
28998be545baSRichard Henderson                                     hwaddr addr,
29008be545baSRichard Henderson                                     hwaddr access_len);
29018be545baSRichard Henderson 
29028be545baSRichard Henderson /**
29038be545baSRichard Henderson  * address_space_cache_destroy: free a #MemoryRegionCache
29048be545baSRichard Henderson  *
29058be545baSRichard Henderson  * @cache: The #MemoryRegionCache whose memory should be released.
29068be545baSRichard Henderson  */
29078be545baSRichard Henderson void address_space_cache_destroy(MemoryRegionCache *cache);
29088be545baSRichard Henderson 
29098be545baSRichard Henderson /* address_space_get_iotlb_entry: translate an address into an IOTLB
29108be545baSRichard Henderson  * entry. Should be called from an RCU critical section.
29118be545baSRichard Henderson  */
29128be545baSRichard Henderson IOMMUTLBEntry address_space_get_iotlb_entry(AddressSpace *as, hwaddr addr,
29138be545baSRichard Henderson                                             bool is_write, MemTxAttrs attrs);
29148be545baSRichard Henderson 
29158be545baSRichard Henderson /* address_space_translate: translate an address range into an address space
29168be545baSRichard Henderson  * into a MemoryRegion and an address range into that section.  Should be
29178be545baSRichard Henderson  * called from an RCU critical section, to avoid that the last reference
29188be545baSRichard Henderson  * to the returned region disappears after address_space_translate returns.
29198be545baSRichard Henderson  *
29208be545baSRichard Henderson  * @fv: #FlatView to be accessed
29218be545baSRichard Henderson  * @addr: address within that address space
29228be545baSRichard Henderson  * @xlat: pointer to address within the returned memory region section's
29238be545baSRichard Henderson  * #MemoryRegion.
29248be545baSRichard Henderson  * @len: pointer to length
29258be545baSRichard Henderson  * @is_write: indicates the transfer direction
29268be545baSRichard Henderson  * @attrs: memory attributes
29278be545baSRichard Henderson  */
29288be545baSRichard Henderson MemoryRegion *flatview_translate(FlatView *fv,
29298be545baSRichard Henderson                                  hwaddr addr, hwaddr *xlat,
29308be545baSRichard Henderson                                  hwaddr *len, bool is_write,
29318be545baSRichard Henderson                                  MemTxAttrs attrs);
29328be545baSRichard Henderson 
29338be545baSRichard Henderson static inline MemoryRegion *address_space_translate(AddressSpace *as,
29348be545baSRichard Henderson                                                     hwaddr addr, hwaddr *xlat,
29358be545baSRichard Henderson                                                     hwaddr *len, bool is_write,
29368be545baSRichard Henderson                                                     MemTxAttrs attrs)
29378be545baSRichard Henderson {
29388be545baSRichard Henderson     return flatview_translate(address_space_to_flatview(as),
29398be545baSRichard Henderson                               addr, xlat, len, is_write, attrs);
29408be545baSRichard Henderson }
29418be545baSRichard Henderson 
29428be545baSRichard Henderson /* address_space_access_valid: check for validity of accessing an address
29438be545baSRichard Henderson  * space range
29448be545baSRichard Henderson  *
29458be545baSRichard Henderson  * Check whether memory is assigned to the given address space range, and
29468be545baSRichard Henderson  * access is permitted by any IOMMU regions that are active for the address
29478be545baSRichard Henderson  * space.
29488be545baSRichard Henderson  *
29498be545baSRichard Henderson  * For now, addr and len should be aligned to a page size.  This limitation
29508be545baSRichard Henderson  * will be lifted in the future.
29518be545baSRichard Henderson  *
29528be545baSRichard Henderson  * @as: #AddressSpace to be accessed
29538be545baSRichard Henderson  * @addr: address within that address space
29548be545baSRichard Henderson  * @len: length of the area to be checked
29558be545baSRichard Henderson  * @is_write: indicates the transfer direction
29568be545baSRichard Henderson  * @attrs: memory attributes
29578be545baSRichard Henderson  */
29588be545baSRichard Henderson bool address_space_access_valid(AddressSpace *as, hwaddr addr, hwaddr len,
29598be545baSRichard Henderson                                 bool is_write, MemTxAttrs attrs);
29608be545baSRichard Henderson 
29618be545baSRichard Henderson /* address_space_map: map a physical memory region into a host virtual address
29628be545baSRichard Henderson  *
29638be545baSRichard Henderson  * May map a subset of the requested range, given by and returned in @plen.
29648be545baSRichard Henderson  * May return %NULL and set *@plen to zero(0), if resources needed to perform
29658be545baSRichard Henderson  * the mapping are exhausted.
29668be545baSRichard Henderson  * Use only for reads OR writes - not for read-modify-write operations.
29678be545baSRichard Henderson  * Use address_space_register_map_client() to know when retrying the map
29688be545baSRichard Henderson  * operation is likely to succeed.
29698be545baSRichard Henderson  *
29708be545baSRichard Henderson  * @as: #AddressSpace to be accessed
29718be545baSRichard Henderson  * @addr: address within that address space
29728be545baSRichard Henderson  * @plen: pointer to length of buffer; updated on return
29738be545baSRichard Henderson  * @is_write: indicates the transfer direction
29748be545baSRichard Henderson  * @attrs: memory attributes
29758be545baSRichard Henderson  */
29768be545baSRichard Henderson void *address_space_map(AddressSpace *as, hwaddr addr,
29778be545baSRichard Henderson                         hwaddr *plen, bool is_write, MemTxAttrs attrs);
29788be545baSRichard Henderson 
29798be545baSRichard Henderson /* address_space_unmap: Unmaps a memory region previously mapped by address_space_map()
29808be545baSRichard Henderson  *
29818be545baSRichard Henderson  * Will also mark the memory as dirty if @is_write == %true.  @access_len gives
29828be545baSRichard Henderson  * the amount of memory that was actually read or written by the caller.
29838be545baSRichard Henderson  *
29848be545baSRichard Henderson  * @as: #AddressSpace used
29858be545baSRichard Henderson  * @buffer: host pointer as returned by address_space_map()
29868be545baSRichard Henderson  * @len: buffer length as returned by address_space_map()
29878be545baSRichard Henderson  * @access_len: amount of data actually transferred
29888be545baSRichard Henderson  * @is_write: indicates the transfer direction
29898be545baSRichard Henderson  */
29908be545baSRichard Henderson void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
29918be545baSRichard Henderson                          bool is_write, hwaddr access_len);
29928be545baSRichard Henderson 
29938be545baSRichard Henderson /*
29948be545baSRichard Henderson  * address_space_register_map_client: Register a callback to invoke when
29958be545baSRichard Henderson  * resources for address_space_map() are available again.
29968be545baSRichard Henderson  *
29978be545baSRichard Henderson  * address_space_map may fail when there are not enough resources available,
29988be545baSRichard Henderson  * such as when bounce buffer memory would exceed the limit. The callback can
29998be545baSRichard Henderson  * be used to retry the address_space_map operation. Note that the callback
30008be545baSRichard Henderson  * gets automatically removed after firing.
30018be545baSRichard Henderson  *
30028be545baSRichard Henderson  * @as: #AddressSpace to be accessed
30038be545baSRichard Henderson  * @bh: callback to invoke when address_space_map() retry is appropriate
30048be545baSRichard Henderson  */
30058be545baSRichard Henderson void address_space_register_map_client(AddressSpace *as, QEMUBH *bh);
30068be545baSRichard Henderson 
30078be545baSRichard Henderson /*
30088be545baSRichard Henderson  * address_space_unregister_map_client: Unregister a callback that has
30098be545baSRichard Henderson  * previously been registered and not fired yet.
30108be545baSRichard Henderson  *
30118be545baSRichard Henderson  * @as: #AddressSpace to be accessed
30128be545baSRichard Henderson  * @bh: callback to unregister
30138be545baSRichard Henderson  */
30148be545baSRichard Henderson void address_space_unregister_map_client(AddressSpace *as, QEMUBH *bh);
30158be545baSRichard Henderson 
30168be545baSRichard Henderson /* Internal functions, part of the implementation of address_space_read.  */
30178be545baSRichard Henderson MemTxResult address_space_read_full(AddressSpace *as, hwaddr addr,
30188be545baSRichard Henderson                                     MemTxAttrs attrs, void *buf, hwaddr len);
30198be545baSRichard Henderson MemTxResult flatview_read_continue(FlatView *fv, hwaddr addr,
30208be545baSRichard Henderson                                    MemTxAttrs attrs, void *buf,
30218be545baSRichard Henderson                                    hwaddr len, hwaddr addr1, hwaddr l,
30228be545baSRichard Henderson                                    MemoryRegion *mr);
30238be545baSRichard Henderson void *qemu_map_ram_ptr(RAMBlock *ram_block, ram_addr_t addr);
30248be545baSRichard Henderson 
30258be545baSRichard Henderson /* Internal functions, part of the implementation of address_space_read_cached
30268be545baSRichard Henderson  * and address_space_write_cached.  */
30278be545baSRichard Henderson MemTxResult address_space_read_cached_slow(MemoryRegionCache *cache,
30288be545baSRichard Henderson                                            hwaddr addr, void *buf, hwaddr len);
30298be545baSRichard Henderson MemTxResult address_space_write_cached_slow(MemoryRegionCache *cache,
30308be545baSRichard Henderson                                             hwaddr addr, const void *buf,
30318be545baSRichard Henderson                                             hwaddr len);
30328be545baSRichard Henderson 
30338be545baSRichard Henderson int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr);
30348be545baSRichard Henderson bool prepare_mmio_access(MemoryRegion *mr);
30358be545baSRichard Henderson 
30368be545baSRichard Henderson static inline bool memory_region_supports_direct_access(MemoryRegion *mr)
30378be545baSRichard Henderson {
30388be545baSRichard Henderson     /* ROM DEVICE regions only allow direct access if in ROMD mode. */
30398be545baSRichard Henderson     if (memory_region_is_romd(mr)) {
30408be545baSRichard Henderson         return true;
30418be545baSRichard Henderson     }
30428be545baSRichard Henderson     if (!memory_region_is_ram(mr)) {
30438be545baSRichard Henderson         return false;
30448be545baSRichard Henderson     }
30458be545baSRichard Henderson     /*
30468be545baSRichard Henderson      * RAM DEVICE regions can be accessed directly using memcpy, but it might
30478be545baSRichard Henderson      * be MMIO and access using mempy can be wrong (e.g., using instructions not
30488be545baSRichard Henderson      * intended for MMIO access). So we treat this as IO.
30498be545baSRichard Henderson      */
30508be545baSRichard Henderson     return !memory_region_is_ram_device(mr);
30518be545baSRichard Henderson }
30528be545baSRichard Henderson 
30538be545baSRichard Henderson static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write,
30548be545baSRichard Henderson                                            MemTxAttrs attrs)
30558be545baSRichard Henderson {
30568be545baSRichard Henderson     if (!memory_region_supports_direct_access(mr)) {
30578be545baSRichard Henderson         return false;
30588be545baSRichard Henderson     }
30598be545baSRichard Henderson     /* Debug access can write to ROM. */
30608be545baSRichard Henderson     if (is_write && !attrs.debug) {
30618be545baSRichard Henderson         return !mr->readonly && !mr->rom_device;
30628be545baSRichard Henderson     }
30638be545baSRichard Henderson     return true;
30648be545baSRichard Henderson }
30658be545baSRichard Henderson 
30668be545baSRichard Henderson /**
30678be545baSRichard Henderson  * address_space_read: read from an address space.
30688be545baSRichard Henderson  *
30698be545baSRichard Henderson  * Return a MemTxResult indicating whether the operation succeeded
30708be545baSRichard Henderson  * or failed (eg unassigned memory, device rejected the transaction,
30718be545baSRichard Henderson  * IOMMU fault).  Called within RCU critical section.
30728be545baSRichard Henderson  *
30738be545baSRichard Henderson  * @as: #AddressSpace to be accessed
30748be545baSRichard Henderson  * @addr: address within that address space
30758be545baSRichard Henderson  * @attrs: memory transaction attributes
30768be545baSRichard Henderson  * @buf: buffer with the data transferred
30778be545baSRichard Henderson  * @len: length of the data transferred
30788be545baSRichard Henderson  */
30798be545baSRichard Henderson static inline __attribute__((__always_inline__))
30808be545baSRichard Henderson MemTxResult address_space_read(AddressSpace *as, hwaddr addr,
30818be545baSRichard Henderson                                MemTxAttrs attrs, void *buf,
30828be545baSRichard Henderson                                hwaddr len)
30838be545baSRichard Henderson {
30848be545baSRichard Henderson     MemTxResult result = MEMTX_OK;
30858be545baSRichard Henderson     hwaddr l, addr1;
30868be545baSRichard Henderson     void *ptr;
30878be545baSRichard Henderson     MemoryRegion *mr;
30888be545baSRichard Henderson     FlatView *fv;
30898be545baSRichard Henderson 
30908be545baSRichard Henderson     if (__builtin_constant_p(len)) {
30918be545baSRichard Henderson         if (len) {
30928be545baSRichard Henderson             RCU_READ_LOCK_GUARD();
30938be545baSRichard Henderson             fv = address_space_to_flatview(as);
30948be545baSRichard Henderson             l = len;
30958be545baSRichard Henderson             mr = flatview_translate(fv, addr, &addr1, &l, false, attrs);
30968be545baSRichard Henderson             if (len == l && memory_access_is_direct(mr, false, attrs)) {
30978be545baSRichard Henderson                 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
30988be545baSRichard Henderson                 memcpy(buf, ptr, len);
30998be545baSRichard Henderson             } else {
31008be545baSRichard Henderson                 result = flatview_read_continue(fv, addr, attrs, buf, len,
31018be545baSRichard Henderson                                                 addr1, l, mr);
31028be545baSRichard Henderson             }
31038be545baSRichard Henderson         }
31048be545baSRichard Henderson     } else {
31058be545baSRichard Henderson         result = address_space_read_full(as, addr, attrs, buf, len);
31068be545baSRichard Henderson     }
31078be545baSRichard Henderson     return result;
31088be545baSRichard Henderson }
31098be545baSRichard Henderson 
31108be545baSRichard Henderson /**
31118be545baSRichard Henderson  * address_space_read_cached: read from a cached RAM region
31128be545baSRichard Henderson  *
31138be545baSRichard Henderson  * @cache: Cached region to be addressed
31148be545baSRichard Henderson  * @addr: address relative to the base of the RAM region
31158be545baSRichard Henderson  * @buf: buffer with the data transferred
31168be545baSRichard Henderson  * @len: length of the data transferred
31178be545baSRichard Henderson  */
31188be545baSRichard Henderson static inline MemTxResult
31198be545baSRichard Henderson address_space_read_cached(MemoryRegionCache *cache, hwaddr addr,
31208be545baSRichard Henderson                           void *buf, hwaddr len)
31218be545baSRichard Henderson {
31228be545baSRichard Henderson     assert(addr < cache->len && len <= cache->len - addr);
31238be545baSRichard Henderson     fuzz_dma_read_cb(cache->xlat + addr, len, cache->mrs.mr);
31248be545baSRichard Henderson     if (likely(cache->ptr)) {
31258be545baSRichard Henderson         memcpy(buf, cache->ptr + addr, len);
31268be545baSRichard Henderson         return MEMTX_OK;
31278be545baSRichard Henderson     } else {
31288be545baSRichard Henderson         return address_space_read_cached_slow(cache, addr, buf, len);
31298be545baSRichard Henderson     }
31308be545baSRichard Henderson }
31318be545baSRichard Henderson 
31328be545baSRichard Henderson /**
31338be545baSRichard Henderson  * address_space_write_cached: write to a cached RAM region
31348be545baSRichard Henderson  *
31358be545baSRichard Henderson  * @cache: Cached region to be addressed
31368be545baSRichard Henderson  * @addr: address relative to the base of the RAM region
31378be545baSRichard Henderson  * @buf: buffer with the data transferred
31388be545baSRichard Henderson  * @len: length of the data transferred
31398be545baSRichard Henderson  */
31408be545baSRichard Henderson static inline MemTxResult
31418be545baSRichard Henderson address_space_write_cached(MemoryRegionCache *cache, hwaddr addr,
31428be545baSRichard Henderson                            const void *buf, hwaddr len)
31438be545baSRichard Henderson {
31448be545baSRichard Henderson     assert(addr < cache->len && len <= cache->len - addr);
31458be545baSRichard Henderson     if (likely(cache->ptr)) {
31468be545baSRichard Henderson         memcpy(cache->ptr + addr, buf, len);
31478be545baSRichard Henderson         return MEMTX_OK;
31488be545baSRichard Henderson     } else {
31498be545baSRichard Henderson         return address_space_write_cached_slow(cache, addr, buf, len);
31508be545baSRichard Henderson     }
31518be545baSRichard Henderson }
31528be545baSRichard Henderson 
31538be545baSRichard Henderson /**
31548be545baSRichard Henderson  * address_space_set: Fill address space with a constant byte.
31558be545baSRichard Henderson  *
31568be545baSRichard Henderson  * Return a MemTxResult indicating whether the operation succeeded
31578be545baSRichard Henderson  * or failed (eg unassigned memory, device rejected the transaction,
31588be545baSRichard Henderson  * IOMMU fault).
31598be545baSRichard Henderson  *
31608be545baSRichard Henderson  * @as: #AddressSpace to be accessed
31618be545baSRichard Henderson  * @addr: address within that address space
31628be545baSRichard Henderson  * @c: constant byte to fill the memory
31638be545baSRichard Henderson  * @len: the number of bytes to fill with the constant byte
31648be545baSRichard Henderson  * @attrs: memory transaction attributes
31658be545baSRichard Henderson  */
31668be545baSRichard Henderson MemTxResult address_space_set(AddressSpace *as, hwaddr addr,
31678be545baSRichard Henderson                               uint8_t c, hwaddr len, MemTxAttrs attrs);
31688be545baSRichard Henderson 
31698be545baSRichard Henderson /*
31708be545baSRichard Henderson  * Inhibit technologies that require discarding of pages in RAM blocks, e.g.,
31718be545baSRichard Henderson  * to manage the actual amount of memory consumed by the VM (then, the memory
31728be545baSRichard Henderson  * provided by RAM blocks might be bigger than the desired memory consumption).
31738be545baSRichard Henderson  * This *must* be set if:
31748be545baSRichard Henderson  * - Discarding parts of a RAM blocks does not result in the change being
31758be545baSRichard Henderson  *   reflected in the VM and the pages getting freed.
31768be545baSRichard Henderson  * - All memory in RAM blocks is pinned or duplicated, invaldiating any previous
31778be545baSRichard Henderson  *   discards blindly.
31788be545baSRichard Henderson  * - Discarding parts of a RAM blocks will result in integrity issues (e.g.,
31798be545baSRichard Henderson  *   encrypted VMs).
31808be545baSRichard Henderson  * Technologies that only temporarily pin the current working set of a
31818be545baSRichard Henderson  * driver are fine, because we don't expect such pages to be discarded
31828be545baSRichard Henderson  * (esp. based on guest action like balloon inflation).
31838be545baSRichard Henderson  *
31848be545baSRichard Henderson  * This is *not* to be used to protect from concurrent discards (esp.,
31858be545baSRichard Henderson  * postcopy).
31868be545baSRichard Henderson  *
31878be545baSRichard Henderson  * Returns 0 if successful. Returns -EBUSY if a technology that relies on
31888be545baSRichard Henderson  * discards to work reliably is active.
31898be545baSRichard Henderson  */
31908be545baSRichard Henderson int ram_block_discard_disable(bool state);
31918be545baSRichard Henderson 
31928be545baSRichard Henderson /*
31938be545baSRichard Henderson  * See ram_block_discard_disable(): only disable uncoordinated discards,
31948be545baSRichard Henderson  * keeping coordinated discards (via the RamDiscardManager) enabled.
31958be545baSRichard Henderson  */
31968be545baSRichard Henderson int ram_block_uncoordinated_discard_disable(bool state);
31978be545baSRichard Henderson 
31988be545baSRichard Henderson /*
31998be545baSRichard Henderson  * Inhibit technologies that disable discarding of pages in RAM blocks.
32008be545baSRichard Henderson  *
32018be545baSRichard Henderson  * Returns 0 if successful. Returns -EBUSY if discards are already set to
32028be545baSRichard Henderson  * broken.
32038be545baSRichard Henderson  */
32048be545baSRichard Henderson int ram_block_discard_require(bool state);
32058be545baSRichard Henderson 
32068be545baSRichard Henderson /*
32078be545baSRichard Henderson  * See ram_block_discard_require(): only inhibit technologies that disable
32088be545baSRichard Henderson  * uncoordinated discarding of pages in RAM blocks, allowing co-existence with
32098be545baSRichard Henderson  * technologies that only inhibit uncoordinated discards (via the
32108be545baSRichard Henderson  * RamDiscardManager).
32118be545baSRichard Henderson  */
32128be545baSRichard Henderson int ram_block_coordinated_discard_require(bool state);
32138be545baSRichard Henderson 
32148be545baSRichard Henderson /*
32158be545baSRichard Henderson  * Test if any discarding of memory in ram blocks is disabled.
32168be545baSRichard Henderson  */
32178be545baSRichard Henderson bool ram_block_discard_is_disabled(void);
32188be545baSRichard Henderson 
32198be545baSRichard Henderson /*
32208be545baSRichard Henderson  * Test if any discarding of memory in ram blocks is required to work reliably.
32218be545baSRichard Henderson  */
32228be545baSRichard Henderson bool ram_block_discard_is_required(void);
32238be545baSRichard Henderson 
32248be545baSRichard Henderson void ram_block_add_cpr_blocker(RAMBlock *rb, Error **errp);
32258be545baSRichard Henderson void ram_block_del_cpr_blocker(RAMBlock *rb);
32268be545baSRichard Henderson 
32278be545baSRichard Henderson #endif
3228