xref: /openbmc/qemu/include/system/memory.h (revision 2205b8466733f8c6e3306c964f31c5a7cac69dfa)
18be545baSRichard Henderson /*
28be545baSRichard Henderson  * Physical memory management API
38be545baSRichard Henderson  *
48be545baSRichard Henderson  * Copyright 2011 Red Hat, Inc. and/or its affiliates
58be545baSRichard Henderson  *
68be545baSRichard Henderson  * Authors:
78be545baSRichard Henderson  *  Avi Kivity <avi@redhat.com>
88be545baSRichard Henderson  *
98be545baSRichard Henderson  * This work is licensed under the terms of the GNU GPL, version 2.  See
108be545baSRichard Henderson  * the COPYING file in the top-level directory.
118be545baSRichard Henderson  *
128be545baSRichard Henderson  */
138be545baSRichard Henderson 
148be545baSRichard Henderson #ifndef SYSTEM_MEMORY_H
158be545baSRichard Henderson #define SYSTEM_MEMORY_H
168be545baSRichard Henderson 
178be545baSRichard Henderson #include "exec/cpu-common.h"
188be545baSRichard Henderson #include "exec/hwaddr.h"
198be545baSRichard Henderson #include "exec/memattrs.h"
208be545baSRichard Henderson #include "exec/memop.h"
218be545baSRichard Henderson #include "exec/ramlist.h"
228be545baSRichard Henderson #include "exec/tswap.h"
238be545baSRichard Henderson #include "qemu/bswap.h"
248be545baSRichard Henderson #include "qemu/queue.h"
258be545baSRichard Henderson #include "qemu/int128.h"
268be545baSRichard Henderson #include "qemu/range.h"
278be545baSRichard Henderson #include "qemu/notify.h"
288be545baSRichard Henderson #include "qom/object.h"
298be545baSRichard Henderson #include "qemu/rcu.h"
308be545baSRichard Henderson 
318be545baSRichard Henderson #define RAM_ADDR_INVALID (~(ram_addr_t)0)
328be545baSRichard Henderson 
338be545baSRichard Henderson #define MAX_PHYS_ADDR_SPACE_BITS 62
348be545baSRichard Henderson #define MAX_PHYS_ADDR            (((hwaddr)1 << MAX_PHYS_ADDR_SPACE_BITS) - 1)
358be545baSRichard Henderson 
368be545baSRichard Henderson #define TYPE_MEMORY_REGION "memory-region"
378be545baSRichard Henderson DECLARE_INSTANCE_CHECKER(MemoryRegion, MEMORY_REGION,
388be545baSRichard Henderson                          TYPE_MEMORY_REGION)
398be545baSRichard Henderson 
408be545baSRichard Henderson #define TYPE_IOMMU_MEMORY_REGION "iommu-memory-region"
418be545baSRichard Henderson typedef struct IOMMUMemoryRegionClass IOMMUMemoryRegionClass;
428be545baSRichard Henderson DECLARE_OBJ_CHECKERS(IOMMUMemoryRegion, IOMMUMemoryRegionClass,
438be545baSRichard Henderson                      IOMMU_MEMORY_REGION, TYPE_IOMMU_MEMORY_REGION)
448be545baSRichard Henderson 
458be545baSRichard Henderson #define TYPE_RAM_DISCARD_MANAGER "ram-discard-manager"
468be545baSRichard Henderson typedef struct RamDiscardManagerClass RamDiscardManagerClass;
478be545baSRichard Henderson typedef struct RamDiscardManager RamDiscardManager;
488be545baSRichard Henderson DECLARE_OBJ_CHECKERS(RamDiscardManager, RamDiscardManagerClass,
498be545baSRichard Henderson                      RAM_DISCARD_MANAGER, TYPE_RAM_DISCARD_MANAGER);
508be545baSRichard Henderson 
518be545baSRichard Henderson #ifdef CONFIG_FUZZ
528be545baSRichard Henderson void fuzz_dma_read_cb(size_t addr,
538be545baSRichard Henderson                       size_t len,
548be545baSRichard Henderson                       MemoryRegion *mr);
558be545baSRichard Henderson #else
568be545baSRichard Henderson static inline void fuzz_dma_read_cb(size_t addr,
578be545baSRichard Henderson                                     size_t len,
588be545baSRichard Henderson                                     MemoryRegion *mr)
598be545baSRichard Henderson {
608be545baSRichard Henderson     /* Do Nothing */
618be545baSRichard Henderson }
628be545baSRichard Henderson #endif
638be545baSRichard Henderson 
648be545baSRichard Henderson /* Possible bits for global_dirty_log_{start|stop} */
658be545baSRichard Henderson 
668be545baSRichard Henderson /* Dirty tracking enabled because migration is running */
678be545baSRichard Henderson #define GLOBAL_DIRTY_MIGRATION  (1U << 0)
688be545baSRichard Henderson 
698be545baSRichard Henderson /* Dirty tracking enabled because measuring dirty rate */
708be545baSRichard Henderson #define GLOBAL_DIRTY_DIRTY_RATE (1U << 1)
718be545baSRichard Henderson 
728be545baSRichard Henderson /* Dirty tracking enabled because dirty limit */
738be545baSRichard Henderson #define GLOBAL_DIRTY_LIMIT      (1U << 2)
748be545baSRichard Henderson 
758be545baSRichard Henderson #define GLOBAL_DIRTY_MASK  (0x7)
768be545baSRichard Henderson 
778be545baSRichard Henderson extern unsigned int global_dirty_tracking;
788be545baSRichard Henderson 
798be545baSRichard Henderson typedef struct MemoryRegionOps MemoryRegionOps;
808be545baSRichard Henderson 
818be545baSRichard Henderson struct ReservedRegion {
828be545baSRichard Henderson     Range range;
838be545baSRichard Henderson     unsigned type;
848be545baSRichard Henderson };
858be545baSRichard Henderson 
868be545baSRichard Henderson /**
878be545baSRichard Henderson  * struct MemoryRegionSection: describes a fragment of a #MemoryRegion
888be545baSRichard Henderson  *
898be545baSRichard Henderson  * @mr: the region, or %NULL if empty
908be545baSRichard Henderson  * @fv: the flat view of the address space the region is mapped in
918be545baSRichard Henderson  * @offset_within_region: the beginning of the section, relative to @mr's start
928be545baSRichard Henderson  * @size: the size of the section; will not exceed @mr's boundaries
938be545baSRichard Henderson  * @offset_within_address_space: the address of the first byte of the section
948be545baSRichard Henderson  *     relative to the region's address space
958be545baSRichard Henderson  * @readonly: writes to this section are ignored
968be545baSRichard Henderson  * @nonvolatile: this section is non-volatile
978be545baSRichard Henderson  * @unmergeable: this section should not get merged with adjacent sections
988be545baSRichard Henderson  */
998be545baSRichard Henderson struct MemoryRegionSection {
1008be545baSRichard Henderson     Int128 size;
1018be545baSRichard Henderson     MemoryRegion *mr;
1028be545baSRichard Henderson     FlatView *fv;
1038be545baSRichard Henderson     hwaddr offset_within_region;
1048be545baSRichard Henderson     hwaddr offset_within_address_space;
1058be545baSRichard Henderson     bool readonly;
1068be545baSRichard Henderson     bool nonvolatile;
1078be545baSRichard Henderson     bool unmergeable;
1088be545baSRichard Henderson };
1098be545baSRichard Henderson 
1108be545baSRichard Henderson typedef struct IOMMUTLBEntry IOMMUTLBEntry;
1118be545baSRichard Henderson 
1128be545baSRichard Henderson /* See address_space_translate: bit 0 is read, bit 1 is write.  */
1138be545baSRichard Henderson typedef enum {
1148be545baSRichard Henderson     IOMMU_NONE = 0,
1158be545baSRichard Henderson     IOMMU_RO   = 1,
1168be545baSRichard Henderson     IOMMU_WO   = 2,
1178be545baSRichard Henderson     IOMMU_RW   = 3,
1188be545baSRichard Henderson } IOMMUAccessFlags;
1198be545baSRichard Henderson 
1208be545baSRichard Henderson #define IOMMU_ACCESS_FLAG(r, w) (((r) ? IOMMU_RO : 0) | ((w) ? IOMMU_WO : 0))
1218be545baSRichard Henderson 
1228be545baSRichard Henderson struct IOMMUTLBEntry {
1238be545baSRichard Henderson     AddressSpace    *target_as;
1248be545baSRichard Henderson     hwaddr           iova;
1258be545baSRichard Henderson     hwaddr           translated_addr;
1268be545baSRichard Henderson     hwaddr           addr_mask;  /* 0xfff = 4k translation */
1278be545baSRichard Henderson     IOMMUAccessFlags perm;
1288be545baSRichard Henderson };
1298be545baSRichard Henderson 
1308be545baSRichard Henderson /*
1318be545baSRichard Henderson  * Bitmap for different IOMMUNotifier capabilities. Each notifier can
1328be545baSRichard Henderson  * register with one or multiple IOMMU Notifier capability bit(s).
1338be545baSRichard Henderson  *
1348be545baSRichard Henderson  * Normally there're two use cases for the notifiers:
1358be545baSRichard Henderson  *
1368be545baSRichard Henderson  *   (1) When the device needs accurate synchronizations of the vIOMMU page
1378be545baSRichard Henderson  *       tables, it needs to register with both MAP|UNMAP notifies (which
1388be545baSRichard Henderson  *       is defined as IOMMU_NOTIFIER_IOTLB_EVENTS below).
1398be545baSRichard Henderson  *
1408be545baSRichard Henderson  *       Regarding to accurate synchronization, it's when the notified
1418be545baSRichard Henderson  *       device maintains a shadow page table and must be notified on each
1428be545baSRichard Henderson  *       guest MAP (page table entry creation) and UNMAP (invalidation)
1438be545baSRichard Henderson  *       events (e.g. VFIO). Both notifications must be accurate so that
1448be545baSRichard Henderson  *       the shadow page table is fully in sync with the guest view.
1458be545baSRichard Henderson  *
1468be545baSRichard Henderson  *   (2) When the device doesn't need accurate synchronizations of the
1478be545baSRichard Henderson  *       vIOMMU page tables, it needs to register only with UNMAP or
1488be545baSRichard Henderson  *       DEVIOTLB_UNMAP notifies.
1498be545baSRichard Henderson  *
1508be545baSRichard Henderson  *       It's when the device maintains a cache of IOMMU translations
1518be545baSRichard Henderson  *       (IOTLB) and is able to fill that cache by requesting translations
1528be545baSRichard Henderson  *       from the vIOMMU through a protocol similar to ATS (Address
1538be545baSRichard Henderson  *       Translation Service).
1548be545baSRichard Henderson  *
1558be545baSRichard Henderson  *       Note that in this mode the vIOMMU will not maintain a shadowed
1568be545baSRichard Henderson  *       page table for the address space, and the UNMAP messages can cover
1578be545baSRichard Henderson  *       more than the pages that used to get mapped.  The IOMMU notifiee
1588be545baSRichard Henderson  *       should be able to take care of over-sized invalidations.
1598be545baSRichard Henderson  */
1608be545baSRichard Henderson typedef enum {
1618be545baSRichard Henderson     IOMMU_NOTIFIER_NONE = 0,
1628be545baSRichard Henderson     /* Notify cache invalidations */
1638be545baSRichard Henderson     IOMMU_NOTIFIER_UNMAP = 0x1,
1648be545baSRichard Henderson     /* Notify entry changes (newly created entries) */
1658be545baSRichard Henderson     IOMMU_NOTIFIER_MAP = 0x2,
1668be545baSRichard Henderson     /* Notify changes on device IOTLB entries */
1678be545baSRichard Henderson     IOMMU_NOTIFIER_DEVIOTLB_UNMAP = 0x04,
1688be545baSRichard Henderson } IOMMUNotifierFlag;
1698be545baSRichard Henderson 
1708be545baSRichard Henderson #define IOMMU_NOTIFIER_IOTLB_EVENTS (IOMMU_NOTIFIER_MAP | IOMMU_NOTIFIER_UNMAP)
1718be545baSRichard Henderson #define IOMMU_NOTIFIER_DEVIOTLB_EVENTS IOMMU_NOTIFIER_DEVIOTLB_UNMAP
1728be545baSRichard Henderson #define IOMMU_NOTIFIER_ALL (IOMMU_NOTIFIER_IOTLB_EVENTS | \
1738be545baSRichard Henderson                             IOMMU_NOTIFIER_DEVIOTLB_EVENTS)
1748be545baSRichard Henderson 
1758be545baSRichard Henderson struct IOMMUNotifier;
1768be545baSRichard Henderson typedef void (*IOMMUNotify)(struct IOMMUNotifier *notifier,
1778be545baSRichard Henderson                             IOMMUTLBEntry *data);
1788be545baSRichard Henderson 
1798be545baSRichard Henderson struct IOMMUNotifier {
1808be545baSRichard Henderson     IOMMUNotify notify;
1818be545baSRichard Henderson     IOMMUNotifierFlag notifier_flags;
1828be545baSRichard Henderson     /* Notify for address space range start <= addr <= end */
1838be545baSRichard Henderson     hwaddr start;
1848be545baSRichard Henderson     hwaddr end;
1858be545baSRichard Henderson     int iommu_idx;
1867e94e452SCLEMENT MATHIEU--DRIF     void *opaque;
1878be545baSRichard Henderson     QLIST_ENTRY(IOMMUNotifier) node;
1888be545baSRichard Henderson };
1898be545baSRichard Henderson typedef struct IOMMUNotifier IOMMUNotifier;
1908be545baSRichard Henderson 
1918be545baSRichard Henderson typedef struct IOMMUTLBEvent {
1928be545baSRichard Henderson     IOMMUNotifierFlag type;
1938be545baSRichard Henderson     IOMMUTLBEntry entry;
1948be545baSRichard Henderson } IOMMUTLBEvent;
1958be545baSRichard Henderson 
1968be545baSRichard Henderson /* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
1978be545baSRichard Henderson #define RAM_PREALLOC   (1 << 0)
1988be545baSRichard Henderson 
1998be545baSRichard Henderson /* RAM is mmap-ed with MAP_SHARED */
2008be545baSRichard Henderson #define RAM_SHARED     (1 << 1)
2018be545baSRichard Henderson 
2028be545baSRichard Henderson /* Only a portion of RAM (used_length) is actually used, and migrated.
2038be545baSRichard Henderson  * Resizing RAM while migrating can result in the migration being canceled.
2048be545baSRichard Henderson  */
2058be545baSRichard Henderson #define RAM_RESIZEABLE (1 << 2)
2068be545baSRichard Henderson 
2078be545baSRichard Henderson /* UFFDIO_ZEROPAGE is available on this RAMBlock to atomically
2088be545baSRichard Henderson  * zero the page and wake waiting processes.
2098be545baSRichard Henderson  * (Set during postcopy)
2108be545baSRichard Henderson  */
2118be545baSRichard Henderson #define RAM_UF_ZEROPAGE (1 << 3)
2128be545baSRichard Henderson 
2138be545baSRichard Henderson /* RAM can be migrated */
2148be545baSRichard Henderson #define RAM_MIGRATABLE (1 << 4)
2158be545baSRichard Henderson 
2168be545baSRichard Henderson /* RAM is a persistent kind memory */
2178be545baSRichard Henderson #define RAM_PMEM (1 << 5)
2188be545baSRichard Henderson 
2198be545baSRichard Henderson 
2208be545baSRichard Henderson /*
2218be545baSRichard Henderson  * UFFDIO_WRITEPROTECT is used on this RAMBlock to
2228be545baSRichard Henderson  * support 'write-tracking' migration type.
2238be545baSRichard Henderson  * Implies ram_state->ram_wt_enabled.
2248be545baSRichard Henderson  */
2258be545baSRichard Henderson #define RAM_UF_WRITEPROTECT (1 << 6)
2268be545baSRichard Henderson 
2278be545baSRichard Henderson /*
2288be545baSRichard Henderson  * RAM is mmap-ed with MAP_NORESERVE. When set, reserving swap space (or huge
2298be545baSRichard Henderson  * pages if applicable) is skipped: will bail out if not supported. When not
2308be545baSRichard Henderson  * set, the OS will do the reservation, if supported for the memory type.
2318be545baSRichard Henderson  */
2328be545baSRichard Henderson #define RAM_NORESERVE (1 << 7)
2338be545baSRichard Henderson 
2348be545baSRichard Henderson /* RAM that isn't accessible through normal means. */
2358be545baSRichard Henderson #define RAM_PROTECTED (1 << 8)
2368be545baSRichard Henderson 
2378be545baSRichard Henderson /* RAM is an mmap-ed named file */
2388be545baSRichard Henderson #define RAM_NAMED_FILE (1 << 9)
2398be545baSRichard Henderson 
2408be545baSRichard Henderson /* RAM is mmap-ed read-only */
2418be545baSRichard Henderson #define RAM_READONLY (1 << 10)
2428be545baSRichard Henderson 
2438be545baSRichard Henderson /* RAM FD is opened read-only */
2448be545baSRichard Henderson #define RAM_READONLY_FD (1 << 11)
2458be545baSRichard Henderson 
2468be545baSRichard Henderson /* RAM can be private that has kvm guest memfd backend */
2478be545baSRichard Henderson #define RAM_GUEST_MEMFD   (1 << 12)
2488be545baSRichard Henderson 
2498be545baSRichard Henderson /*
2508be545baSRichard Henderson  * In RAMBlock creation functions, if MAP_SHARED is 0 in the flags parameter,
2518be545baSRichard Henderson  * the implementation may still create a shared mapping if other conditions
2528be545baSRichard Henderson  * require it.  Callers who specifically want a private mapping, eg objects
2538be545baSRichard Henderson  * specified by the user, must pass RAM_PRIVATE.
2548be545baSRichard Henderson  * After RAMBlock creation, MAP_SHARED in the block's flags indicates whether
2558be545baSRichard Henderson  * the block is shared or private, and MAP_PRIVATE is omitted.
2568be545baSRichard Henderson  */
2578be545baSRichard Henderson #define RAM_PRIVATE (1 << 13)
2588be545baSRichard Henderson 
2598be545baSRichard Henderson static inline void iommu_notifier_init(IOMMUNotifier *n, IOMMUNotify fn,
2608be545baSRichard Henderson                                        IOMMUNotifierFlag flags,
2618be545baSRichard Henderson                                        hwaddr start, hwaddr end,
2628be545baSRichard Henderson                                        int iommu_idx)
2638be545baSRichard Henderson {
2648be545baSRichard Henderson     n->notify = fn;
2658be545baSRichard Henderson     n->notifier_flags = flags;
2668be545baSRichard Henderson     n->start = start;
2678be545baSRichard Henderson     n->end = end;
2688be545baSRichard Henderson     n->iommu_idx = iommu_idx;
2698be545baSRichard Henderson }
2708be545baSRichard Henderson 
2718be545baSRichard Henderson /*
2728be545baSRichard Henderson  * Memory region callbacks
2738be545baSRichard Henderson  */
2748be545baSRichard Henderson struct MemoryRegionOps {
2758be545baSRichard Henderson     /* Read from the memory region. @addr is relative to @mr; @size is
2768be545baSRichard Henderson      * in bytes. */
2778be545baSRichard Henderson     uint64_t (*read)(void *opaque,
2788be545baSRichard Henderson                      hwaddr addr,
2798be545baSRichard Henderson                      unsigned size);
2808be545baSRichard Henderson     /* Write to the memory region. @addr is relative to @mr; @size is
2818be545baSRichard Henderson      * in bytes. */
2828be545baSRichard Henderson     void (*write)(void *opaque,
2838be545baSRichard Henderson                   hwaddr addr,
2848be545baSRichard Henderson                   uint64_t data,
2858be545baSRichard Henderson                   unsigned size);
2868be545baSRichard Henderson 
2878be545baSRichard Henderson     MemTxResult (*read_with_attrs)(void *opaque,
2888be545baSRichard Henderson                                    hwaddr addr,
2898be545baSRichard Henderson                                    uint64_t *data,
2908be545baSRichard Henderson                                    unsigned size,
2918be545baSRichard Henderson                                    MemTxAttrs attrs);
2928be545baSRichard Henderson     MemTxResult (*write_with_attrs)(void *opaque,
2938be545baSRichard Henderson                                     hwaddr addr,
2948be545baSRichard Henderson                                     uint64_t data,
2958be545baSRichard Henderson                                     unsigned size,
2968be545baSRichard Henderson                                     MemTxAttrs attrs);
2978be545baSRichard Henderson 
2988be545baSRichard Henderson     enum device_endian endianness;
2998be545baSRichard Henderson     /* Guest-visible constraints: */
3008be545baSRichard Henderson     struct {
3018be545baSRichard Henderson         /* If nonzero, specify bounds on access sizes beyond which a machine
3028be545baSRichard Henderson          * check is thrown.
3038be545baSRichard Henderson          */
3048be545baSRichard Henderson         unsigned min_access_size;
3058be545baSRichard Henderson         unsigned max_access_size;
3068be545baSRichard Henderson         /* If true, unaligned accesses are supported.  Otherwise unaligned
3078be545baSRichard Henderson          * accesses throw machine checks.
3088be545baSRichard Henderson          */
3098be545baSRichard Henderson          bool unaligned;
3108be545baSRichard Henderson         /*
3118be545baSRichard Henderson          * If present, and returns #false, the transaction is not accepted
3128be545baSRichard Henderson          * by the device (and results in machine dependent behaviour such
3138be545baSRichard Henderson          * as a machine check exception).
3148be545baSRichard Henderson          */
3158be545baSRichard Henderson         bool (*accepts)(void *opaque, hwaddr addr,
3168be545baSRichard Henderson                         unsigned size, bool is_write,
3178be545baSRichard Henderson                         MemTxAttrs attrs);
3188be545baSRichard Henderson     } valid;
3198be545baSRichard Henderson     /* Internal implementation constraints: */
3208be545baSRichard Henderson     struct {
3218be545baSRichard Henderson         /* If nonzero, specifies the minimum size implemented.  Smaller sizes
3228be545baSRichard Henderson          * will be rounded upwards and a partial result will be returned.
3238be545baSRichard Henderson          */
3248be545baSRichard Henderson         unsigned min_access_size;
3258be545baSRichard Henderson         /* If nonzero, specifies the maximum size implemented.  Larger sizes
3268be545baSRichard Henderson          * will be done as a series of accesses with smaller sizes.
3278be545baSRichard Henderson          */
3288be545baSRichard Henderson         unsigned max_access_size;
3298be545baSRichard Henderson         /* If true, unaligned accesses are supported.  Otherwise all accesses
3308be545baSRichard Henderson          * are converted to (possibly multiple) naturally aligned accesses.
3318be545baSRichard Henderson          */
3328be545baSRichard Henderson         bool unaligned;
3338be545baSRichard Henderson     } impl;
3348be545baSRichard Henderson };
3358be545baSRichard Henderson 
3368be545baSRichard Henderson typedef struct MemoryRegionClass {
3378be545baSRichard Henderson     /* private */
3388be545baSRichard Henderson     ObjectClass parent_class;
3398be545baSRichard Henderson } MemoryRegionClass;
3408be545baSRichard Henderson 
3418be545baSRichard Henderson 
3428be545baSRichard Henderson enum IOMMUMemoryRegionAttr {
3438be545baSRichard Henderson     IOMMU_ATTR_SPAPR_TCE_FD
3448be545baSRichard Henderson };
3458be545baSRichard Henderson 
3468be545baSRichard Henderson /*
3478be545baSRichard Henderson  * IOMMUMemoryRegionClass:
3488be545baSRichard Henderson  *
3498be545baSRichard Henderson  * All IOMMU implementations need to subclass TYPE_IOMMU_MEMORY_REGION
3508be545baSRichard Henderson  * and provide an implementation of at least the @translate method here
3518be545baSRichard Henderson  * to handle requests to the memory region. Other methods are optional.
3528be545baSRichard Henderson  *
3538be545baSRichard Henderson  * The IOMMU implementation must use the IOMMU notifier infrastructure
3548be545baSRichard Henderson  * to report whenever mappings are changed, by calling
3558be545baSRichard Henderson  * memory_region_notify_iommu() (or, if necessary, by calling
3568be545baSRichard Henderson  * memory_region_notify_iommu_one() for each registered notifier).
3578be545baSRichard Henderson  *
3588be545baSRichard Henderson  * Conceptually an IOMMU provides a mapping from input address
3598be545baSRichard Henderson  * to an output TLB entry. If the IOMMU is aware of memory transaction
3608be545baSRichard Henderson  * attributes and the output TLB entry depends on the transaction
3618be545baSRichard Henderson  * attributes, we represent this using IOMMU indexes. Each index
3628be545baSRichard Henderson  * selects a particular translation table that the IOMMU has:
3638be545baSRichard Henderson  *
3648be545baSRichard Henderson  *   @attrs_to_index returns the IOMMU index for a set of transaction attributes
3658be545baSRichard Henderson  *
3668be545baSRichard Henderson  *   @translate takes an input address and an IOMMU index
3678be545baSRichard Henderson  *
3688be545baSRichard Henderson  * and the mapping returned can only depend on the input address and the
3698be545baSRichard Henderson  * IOMMU index.
3708be545baSRichard Henderson  *
3718be545baSRichard Henderson  * Most IOMMUs don't care about the transaction attributes and support
3728be545baSRichard Henderson  * only a single IOMMU index. A more complex IOMMU might have one index
3738be545baSRichard Henderson  * for secure transactions and one for non-secure transactions.
3748be545baSRichard Henderson  */
3758be545baSRichard Henderson struct IOMMUMemoryRegionClass {
3768be545baSRichard Henderson     /* private: */
3778be545baSRichard Henderson     MemoryRegionClass parent_class;
3788be545baSRichard Henderson 
3798be545baSRichard Henderson     /* public: */
3808be545baSRichard Henderson     /**
3818be545baSRichard Henderson      * @translate:
3828be545baSRichard Henderson      *
3838be545baSRichard Henderson      * Return a TLB entry that contains a given address.
3848be545baSRichard Henderson      *
3858be545baSRichard Henderson      * The IOMMUAccessFlags indicated via @flag are optional and may
3868be545baSRichard Henderson      * be specified as IOMMU_NONE to indicate that the caller needs
3878be545baSRichard Henderson      * the full translation information for both reads and writes. If
3888be545baSRichard Henderson      * the access flags are specified then the IOMMU implementation
3898be545baSRichard Henderson      * may use this as an optimization, to stop doing a page table
3908be545baSRichard Henderson      * walk as soon as it knows that the requested permissions are not
3918be545baSRichard Henderson      * allowed. If IOMMU_NONE is passed then the IOMMU must do the
3928be545baSRichard Henderson      * full page table walk and report the permissions in the returned
3938be545baSRichard Henderson      * IOMMUTLBEntry. (Note that this implies that an IOMMU may not
3948be545baSRichard Henderson      * return different mappings for reads and writes.)
3958be545baSRichard Henderson      *
3968be545baSRichard Henderson      * The returned information remains valid while the caller is
3978be545baSRichard Henderson      * holding the big QEMU lock or is inside an RCU critical section;
3988be545baSRichard Henderson      * if the caller wishes to cache the mapping beyond that it must
3998be545baSRichard Henderson      * register an IOMMU notifier so it can invalidate its cached
4008be545baSRichard Henderson      * information when the IOMMU mapping changes.
4018be545baSRichard Henderson      *
4028be545baSRichard Henderson      * @iommu: the IOMMUMemoryRegion
4038be545baSRichard Henderson      *
4048be545baSRichard Henderson      * @hwaddr: address to be translated within the memory region
4058be545baSRichard Henderson      *
4068be545baSRichard Henderson      * @flag: requested access permission
4078be545baSRichard Henderson      *
4088be545baSRichard Henderson      * @iommu_idx: IOMMU index for the translation
4098be545baSRichard Henderson      */
4108be545baSRichard Henderson     IOMMUTLBEntry (*translate)(IOMMUMemoryRegion *iommu, hwaddr addr,
4118be545baSRichard Henderson                                IOMMUAccessFlags flag, int iommu_idx);
4128be545baSRichard Henderson     /**
4138be545baSRichard Henderson      * @get_min_page_size:
4148be545baSRichard Henderson      *
4158be545baSRichard Henderson      * Returns minimum supported page size in bytes.
4168be545baSRichard Henderson      *
4178be545baSRichard Henderson      * If this method is not provided then the minimum is assumed to
4188be545baSRichard Henderson      * be TARGET_PAGE_SIZE.
4198be545baSRichard Henderson      *
4208be545baSRichard Henderson      * @iommu: the IOMMUMemoryRegion
4218be545baSRichard Henderson      */
4228be545baSRichard Henderson     uint64_t (*get_min_page_size)(IOMMUMemoryRegion *iommu);
4238be545baSRichard Henderson     /**
4248be545baSRichard Henderson      * @notify_flag_changed:
4258be545baSRichard Henderson      *
4268be545baSRichard Henderson      * Called when IOMMU Notifier flag changes (ie when the set of
4278be545baSRichard Henderson      * events which IOMMU users are requesting notification for changes).
4288be545baSRichard Henderson      * Optional method -- need not be provided if the IOMMU does not
4298be545baSRichard Henderson      * need to know exactly which events must be notified.
4308be545baSRichard Henderson      *
4318be545baSRichard Henderson      * @iommu: the IOMMUMemoryRegion
4328be545baSRichard Henderson      *
4338be545baSRichard Henderson      * @old_flags: events which previously needed to be notified
4348be545baSRichard Henderson      *
4358be545baSRichard Henderson      * @new_flags: events which now need to be notified
4368be545baSRichard Henderson      *
4378be545baSRichard Henderson      * Returns 0 on success, or a negative errno; in particular
4388be545baSRichard Henderson      * returns -EINVAL if the new flag bitmap is not supported by the
4398be545baSRichard Henderson      * IOMMU memory region. In case of failure, the error object
4408be545baSRichard Henderson      * must be created
4418be545baSRichard Henderson      */
4428be545baSRichard Henderson     int (*notify_flag_changed)(IOMMUMemoryRegion *iommu,
4438be545baSRichard Henderson                                IOMMUNotifierFlag old_flags,
4448be545baSRichard Henderson                                IOMMUNotifierFlag new_flags,
4458be545baSRichard Henderson                                Error **errp);
4468be545baSRichard Henderson     /**
4478be545baSRichard Henderson      * @replay:
4488be545baSRichard Henderson      *
4498be545baSRichard Henderson      * Called to handle memory_region_iommu_replay().
4508be545baSRichard Henderson      *
4518be545baSRichard Henderson      * The default implementation of memory_region_iommu_replay() is to
4528be545baSRichard Henderson      * call the IOMMU translate method for every page in the address space
4538be545baSRichard Henderson      * with flag == IOMMU_NONE and then call the notifier if translate
4548be545baSRichard Henderson      * returns a valid mapping. If this method is implemented then it
4558be545baSRichard Henderson      * overrides the default behaviour, and must provide the full semantics
4568be545baSRichard Henderson      * of memory_region_iommu_replay(), by calling @notifier for every
4578be545baSRichard Henderson      * translation present in the IOMMU.
4588be545baSRichard Henderson      *
4598be545baSRichard Henderson      * Optional method -- an IOMMU only needs to provide this method
4608be545baSRichard Henderson      * if the default is inefficient or produces undesirable side effects.
4618be545baSRichard Henderson      *
4628be545baSRichard Henderson      * Note: this is not related to record-and-replay functionality.
4638be545baSRichard Henderson      */
4648be545baSRichard Henderson     void (*replay)(IOMMUMemoryRegion *iommu, IOMMUNotifier *notifier);
4658be545baSRichard Henderson 
4668be545baSRichard Henderson     /**
4678be545baSRichard Henderson      * @get_attr:
4688be545baSRichard Henderson      *
4698be545baSRichard Henderson      * Get IOMMU misc attributes. This is an optional method that
4708be545baSRichard Henderson      * can be used to allow users of the IOMMU to get implementation-specific
4718be545baSRichard Henderson      * information. The IOMMU implements this method to handle calls
4728be545baSRichard Henderson      * by IOMMU users to memory_region_iommu_get_attr() by filling in
4738be545baSRichard Henderson      * the arbitrary data pointer for any IOMMUMemoryRegionAttr values that
4748be545baSRichard Henderson      * the IOMMU supports. If the method is unimplemented then
4758be545baSRichard Henderson      * memory_region_iommu_get_attr() will always return -EINVAL.
4768be545baSRichard Henderson      *
4778be545baSRichard Henderson      * @iommu: the IOMMUMemoryRegion
4788be545baSRichard Henderson      *
4798be545baSRichard Henderson      * @attr: attribute being queried
4808be545baSRichard Henderson      *
4818be545baSRichard Henderson      * @data: memory to fill in with the attribute data
4828be545baSRichard Henderson      *
4838be545baSRichard Henderson      * Returns 0 on success, or a negative errno; in particular
4848be545baSRichard Henderson      * returns -EINVAL for unrecognized or unimplemented attribute types.
4858be545baSRichard Henderson      */
4868be545baSRichard Henderson     int (*get_attr)(IOMMUMemoryRegion *iommu, enum IOMMUMemoryRegionAttr attr,
4878be545baSRichard Henderson                     void *data);
4888be545baSRichard Henderson 
4898be545baSRichard Henderson     /**
4908be545baSRichard Henderson      * @attrs_to_index:
4918be545baSRichard Henderson      *
4928be545baSRichard Henderson      * Return the IOMMU index to use for a given set of transaction attributes.
4938be545baSRichard Henderson      *
4948be545baSRichard Henderson      * Optional method: if an IOMMU only supports a single IOMMU index then
4958be545baSRichard Henderson      * the default implementation of memory_region_iommu_attrs_to_index()
4968be545baSRichard Henderson      * will return 0.
4978be545baSRichard Henderson      *
4988be545baSRichard Henderson      * The indexes supported by an IOMMU must be contiguous, starting at 0.
4998be545baSRichard Henderson      *
5008be545baSRichard Henderson      * @iommu: the IOMMUMemoryRegion
5018be545baSRichard Henderson      * @attrs: memory transaction attributes
5028be545baSRichard Henderson      */
5038be545baSRichard Henderson     int (*attrs_to_index)(IOMMUMemoryRegion *iommu, MemTxAttrs attrs);
5048be545baSRichard Henderson 
5058be545baSRichard Henderson     /**
5068be545baSRichard Henderson      * @num_indexes:
5078be545baSRichard Henderson      *
5088be545baSRichard Henderson      * Return the number of IOMMU indexes this IOMMU supports.
5098be545baSRichard Henderson      *
5108be545baSRichard Henderson      * Optional method: if this method is not provided, then
5118be545baSRichard Henderson      * memory_region_iommu_num_indexes() will return 1, indicating that
5128be545baSRichard Henderson      * only a single IOMMU index is supported.
5138be545baSRichard Henderson      *
5148be545baSRichard Henderson      * @iommu: the IOMMUMemoryRegion
5158be545baSRichard Henderson      */
5168be545baSRichard Henderson     int (*num_indexes)(IOMMUMemoryRegion *iommu);
5178be545baSRichard Henderson };
5188be545baSRichard Henderson 
5198be545baSRichard Henderson typedef struct RamDiscardListener RamDiscardListener;
5208be545baSRichard Henderson typedef int (*NotifyRamPopulate)(RamDiscardListener *rdl,
5218be545baSRichard Henderson                                  MemoryRegionSection *section);
5228be545baSRichard Henderson typedef void (*NotifyRamDiscard)(RamDiscardListener *rdl,
5238be545baSRichard Henderson                                  MemoryRegionSection *section);
5248be545baSRichard Henderson 
5258be545baSRichard Henderson struct RamDiscardListener {
5268be545baSRichard Henderson     /*
5278be545baSRichard Henderson      * @notify_populate:
5288be545baSRichard Henderson      *
5298be545baSRichard Henderson      * Notification that previously discarded memory is about to get populated.
5308be545baSRichard Henderson      * Listeners are able to object. If any listener objects, already
5318be545baSRichard Henderson      * successfully notified listeners are notified about a discard again.
5328be545baSRichard Henderson      *
5338be545baSRichard Henderson      * @rdl: the #RamDiscardListener getting notified
5348be545baSRichard Henderson      * @section: the #MemoryRegionSection to get populated. The section
5358be545baSRichard Henderson      *           is aligned within the memory region to the minimum granularity
5368be545baSRichard Henderson      *           unless it would exceed the registered section.
5378be545baSRichard Henderson      *
5388be545baSRichard Henderson      * Returns 0 on success. If the notification is rejected by the listener,
5398be545baSRichard Henderson      * an error is returned.
5408be545baSRichard Henderson      */
5418be545baSRichard Henderson     NotifyRamPopulate notify_populate;
5428be545baSRichard Henderson 
5438be545baSRichard Henderson     /*
5448be545baSRichard Henderson      * @notify_discard:
5458be545baSRichard Henderson      *
5468be545baSRichard Henderson      * Notification that previously populated memory was discarded successfully
5478be545baSRichard Henderson      * and listeners should drop all references to such memory and prevent
5488be545baSRichard Henderson      * new population (e.g., unmap).
5498be545baSRichard Henderson      *
5508be545baSRichard Henderson      * @rdl: the #RamDiscardListener getting notified
5518be545baSRichard Henderson      * @section: the #MemoryRegionSection to get populated. The section
5528be545baSRichard Henderson      *           is aligned within the memory region to the minimum granularity
5538be545baSRichard Henderson      *           unless it would exceed the registered section.
5548be545baSRichard Henderson      */
5558be545baSRichard Henderson     NotifyRamDiscard notify_discard;
5568be545baSRichard Henderson 
5578be545baSRichard Henderson     /*
5588be545baSRichard Henderson      * @double_discard_supported:
5598be545baSRichard Henderson      *
5608be545baSRichard Henderson      * The listener suppors getting @notify_discard notifications that span
5618be545baSRichard Henderson      * already discarded parts.
5628be545baSRichard Henderson      */
5638be545baSRichard Henderson     bool double_discard_supported;
5648be545baSRichard Henderson 
5658be545baSRichard Henderson     MemoryRegionSection *section;
5668be545baSRichard Henderson     QLIST_ENTRY(RamDiscardListener) next;
5678be545baSRichard Henderson };
5688be545baSRichard Henderson 
5698be545baSRichard Henderson static inline void ram_discard_listener_init(RamDiscardListener *rdl,
5708be545baSRichard Henderson                                              NotifyRamPopulate populate_fn,
5718be545baSRichard Henderson                                              NotifyRamDiscard discard_fn,
5728be545baSRichard Henderson                                              bool double_discard_supported)
5738be545baSRichard Henderson {
5748be545baSRichard Henderson     rdl->notify_populate = populate_fn;
5758be545baSRichard Henderson     rdl->notify_discard = discard_fn;
5768be545baSRichard Henderson     rdl->double_discard_supported = double_discard_supported;
5778be545baSRichard Henderson }
5788be545baSRichard Henderson 
579*2205b846SChenyi Qiang /**
580*2205b846SChenyi Qiang  * typedef ReplayRamDiscardState:
581*2205b846SChenyi Qiang  *
582*2205b846SChenyi Qiang  * The callback handler for #RamDiscardManagerClass.replay_populated/
583*2205b846SChenyi Qiang  * #RamDiscardManagerClass.replay_discarded to invoke on populated/discarded
584*2205b846SChenyi Qiang  * parts.
585*2205b846SChenyi Qiang  *
586*2205b846SChenyi Qiang  * @section: the #MemoryRegionSection of populated/discarded part
587*2205b846SChenyi Qiang  * @opaque: pointer to forward to the callback
588*2205b846SChenyi Qiang  *
589*2205b846SChenyi Qiang  * Returns 0 on success, or a negative error if failed.
590*2205b846SChenyi Qiang  */
591*2205b846SChenyi Qiang typedef int (*ReplayRamDiscardState)(MemoryRegionSection *section,
592*2205b846SChenyi Qiang                                      void *opaque);
5938be545baSRichard Henderson 
5948be545baSRichard Henderson /*
5958be545baSRichard Henderson  * RamDiscardManagerClass:
5968be545baSRichard Henderson  *
5978be545baSRichard Henderson  * A #RamDiscardManager coordinates which parts of specific RAM #MemoryRegion
5988be545baSRichard Henderson  * regions are currently populated to be used/accessed by the VM, notifying
5998be545baSRichard Henderson  * after parts were discarded (freeing up memory) and before parts will be
6008be545baSRichard Henderson  * populated (consuming memory), to be used/accessed by the VM.
6018be545baSRichard Henderson  *
6028be545baSRichard Henderson  * A #RamDiscardManager can only be set for a RAM #MemoryRegion while the
6038be545baSRichard Henderson  * #MemoryRegion isn't mapped into an address space yet (either directly
6048be545baSRichard Henderson  * or via an alias); it cannot change while the #MemoryRegion is
6058be545baSRichard Henderson  * mapped into an address space.
6068be545baSRichard Henderson  *
6078be545baSRichard Henderson  * The #RamDiscardManager is intended to be used by technologies that are
6088be545baSRichard Henderson  * incompatible with discarding of RAM (e.g., VFIO, which may pin all
6098be545baSRichard Henderson  * memory inside a #MemoryRegion), and require proper coordination to only
6108be545baSRichard Henderson  * map the currently populated parts, to hinder parts that are expected to
6118be545baSRichard Henderson  * remain discarded from silently getting populated and consuming memory.
6128be545baSRichard Henderson  * Technologies that support discarding of RAM don't have to bother and can
6138be545baSRichard Henderson  * simply map the whole #MemoryRegion.
6148be545baSRichard Henderson  *
6158be545baSRichard Henderson  * An example #RamDiscardManager is virtio-mem, which logically (un)plugs
6168be545baSRichard Henderson  * memory within an assigned RAM #MemoryRegion, coordinated with the VM.
6178be545baSRichard Henderson  * Logically unplugging memory consists of discarding RAM. The VM agreed to not
6188be545baSRichard Henderson  * access unplugged (discarded) memory - especially via DMA. virtio-mem will
6198be545baSRichard Henderson  * properly coordinate with listeners before memory is plugged (populated),
6208be545baSRichard Henderson  * and after memory is unplugged (discarded).
6218be545baSRichard Henderson  *
6228be545baSRichard Henderson  * Listeners are called in multiples of the minimum granularity (unless it
6238be545baSRichard Henderson  * would exceed the registered range) and changes are aligned to the minimum
6248be545baSRichard Henderson  * granularity within the #MemoryRegion. Listeners have to prepare for memory
6258be545baSRichard Henderson  * becoming discarded in a different granularity than it was populated and the
6268be545baSRichard Henderson  * other way around.
6278be545baSRichard Henderson  */
6288be545baSRichard Henderson struct RamDiscardManagerClass {
6298be545baSRichard Henderson     /* private */
6308be545baSRichard Henderson     InterfaceClass parent_class;
6318be545baSRichard Henderson 
6328be545baSRichard Henderson     /* public */
6338be545baSRichard Henderson 
6348be545baSRichard Henderson     /**
6358be545baSRichard Henderson      * @get_min_granularity:
6368be545baSRichard Henderson      *
6378be545baSRichard Henderson      * Get the minimum granularity in which listeners will get notified
6388be545baSRichard Henderson      * about changes within the #MemoryRegion via the #RamDiscardManager.
6398be545baSRichard Henderson      *
6408be545baSRichard Henderson      * @rdm: the #RamDiscardManager
6418be545baSRichard Henderson      * @mr: the #MemoryRegion
6428be545baSRichard Henderson      *
6438be545baSRichard Henderson      * Returns the minimum granularity.
6448be545baSRichard Henderson      */
6458be545baSRichard Henderson     uint64_t (*get_min_granularity)(const RamDiscardManager *rdm,
6468be545baSRichard Henderson                                     const MemoryRegion *mr);
6478be545baSRichard Henderson 
6488be545baSRichard Henderson     /**
6498be545baSRichard Henderson      * @is_populated:
6508be545baSRichard Henderson      *
6518be545baSRichard Henderson      * Check whether the given #MemoryRegionSection is completely populated
6528be545baSRichard Henderson      * (i.e., no parts are currently discarded) via the #RamDiscardManager.
6538be545baSRichard Henderson      * There are no alignment requirements.
6548be545baSRichard Henderson      *
6558be545baSRichard Henderson      * @rdm: the #RamDiscardManager
6568be545baSRichard Henderson      * @section: the #MemoryRegionSection
6578be545baSRichard Henderson      *
6588be545baSRichard Henderson      * Returns whether the given range is completely populated.
6598be545baSRichard Henderson      */
6608be545baSRichard Henderson     bool (*is_populated)(const RamDiscardManager *rdm,
6618be545baSRichard Henderson                          const MemoryRegionSection *section);
6628be545baSRichard Henderson 
6638be545baSRichard Henderson     /**
6648be545baSRichard Henderson      * @replay_populated:
6658be545baSRichard Henderson      *
666*2205b846SChenyi Qiang      * Call the #ReplayRamDiscardState callback for all populated parts within
667*2205b846SChenyi Qiang      * the #MemoryRegionSection via the #RamDiscardManager.
6688be545baSRichard Henderson      *
6698be545baSRichard Henderson      * In case any call fails, no further calls are made.
6708be545baSRichard Henderson      *
6718be545baSRichard Henderson      * @rdm: the #RamDiscardManager
6728be545baSRichard Henderson      * @section: the #MemoryRegionSection
673*2205b846SChenyi Qiang      * @replay_fn: the #ReplayRamDiscardState callback
6748be545baSRichard Henderson      * @opaque: pointer to forward to the callback
6758be545baSRichard Henderson      *
6768be545baSRichard Henderson      * Returns 0 on success, or a negative error if any notification failed.
6778be545baSRichard Henderson      */
6788be545baSRichard Henderson     int (*replay_populated)(const RamDiscardManager *rdm,
6798be545baSRichard Henderson                             MemoryRegionSection *section,
680*2205b846SChenyi Qiang                             ReplayRamDiscardState replay_fn, void *opaque);
6818be545baSRichard Henderson 
6828be545baSRichard Henderson     /**
6838be545baSRichard Henderson      * @replay_discarded:
6848be545baSRichard Henderson      *
685*2205b846SChenyi Qiang      * Call the #ReplayRamDiscardState callback for all discarded parts within
686*2205b846SChenyi Qiang      * the #MemoryRegionSection via the #RamDiscardManager.
6878be545baSRichard Henderson      *
6888be545baSRichard Henderson      * @rdm: the #RamDiscardManager
6898be545baSRichard Henderson      * @section: the #MemoryRegionSection
690*2205b846SChenyi Qiang      * @replay_fn: the #ReplayRamDiscardState callback
6918be545baSRichard Henderson      * @opaque: pointer to forward to the callback
692*2205b846SChenyi Qiang      *
693*2205b846SChenyi Qiang      * Returns 0 on success, or a negative error if any notification failed.
6948be545baSRichard Henderson      */
695*2205b846SChenyi Qiang     int (*replay_discarded)(const RamDiscardManager *rdm,
6968be545baSRichard Henderson                             MemoryRegionSection *section,
697*2205b846SChenyi Qiang                             ReplayRamDiscardState replay_fn, void *opaque);
6988be545baSRichard Henderson 
6998be545baSRichard Henderson     /**
7008be545baSRichard Henderson      * @register_listener:
7018be545baSRichard Henderson      *
7028be545baSRichard Henderson      * Register a #RamDiscardListener for the given #MemoryRegionSection and
7038be545baSRichard Henderson      * immediately notify the #RamDiscardListener about all populated parts
7048be545baSRichard Henderson      * within the #MemoryRegionSection via the #RamDiscardManager.
7058be545baSRichard Henderson      *
7068be545baSRichard Henderson      * In case any notification fails, no further notifications are triggered
7078be545baSRichard Henderson      * and an error is logged.
7088be545baSRichard Henderson      *
7098be545baSRichard Henderson      * @rdm: the #RamDiscardManager
7108be545baSRichard Henderson      * @rdl: the #RamDiscardListener
7118be545baSRichard Henderson      * @section: the #MemoryRegionSection
7128be545baSRichard Henderson      */
7138be545baSRichard Henderson     void (*register_listener)(RamDiscardManager *rdm,
7148be545baSRichard Henderson                               RamDiscardListener *rdl,
7158be545baSRichard Henderson                               MemoryRegionSection *section);
7168be545baSRichard Henderson 
7178be545baSRichard Henderson     /**
7188be545baSRichard Henderson      * @unregister_listener:
7198be545baSRichard Henderson      *
7208be545baSRichard Henderson      * Unregister a previously registered #RamDiscardListener via the
7218be545baSRichard Henderson      * #RamDiscardManager after notifying the #RamDiscardListener about all
7228be545baSRichard Henderson      * populated parts becoming unpopulated within the registered
7238be545baSRichard Henderson      * #MemoryRegionSection.
7248be545baSRichard Henderson      *
7258be545baSRichard Henderson      * @rdm: the #RamDiscardManager
7268be545baSRichard Henderson      * @rdl: the #RamDiscardListener
7278be545baSRichard Henderson      */
7288be545baSRichard Henderson     void (*unregister_listener)(RamDiscardManager *rdm,
7298be545baSRichard Henderson                                 RamDiscardListener *rdl);
7308be545baSRichard Henderson };
7318be545baSRichard Henderson 
7328be545baSRichard Henderson uint64_t ram_discard_manager_get_min_granularity(const RamDiscardManager *rdm,
7338be545baSRichard Henderson                                                  const MemoryRegion *mr);
7348be545baSRichard Henderson 
7358be545baSRichard Henderson bool ram_discard_manager_is_populated(const RamDiscardManager *rdm,
7368be545baSRichard Henderson                                       const MemoryRegionSection *section);
7378be545baSRichard Henderson 
738*2205b846SChenyi Qiang /**
739*2205b846SChenyi Qiang  * ram_discard_manager_replay_populated:
740*2205b846SChenyi Qiang  *
741*2205b846SChenyi Qiang  * A wrapper to call the #RamDiscardManagerClass.replay_populated callback
742*2205b846SChenyi Qiang  * of the #RamDiscardManager.
743*2205b846SChenyi Qiang  *
744*2205b846SChenyi Qiang  * @rdm: the #RamDiscardManager
745*2205b846SChenyi Qiang  * @section: the #MemoryRegionSection
746*2205b846SChenyi Qiang  * @replay_fn: the #ReplayRamDiscardState callback
747*2205b846SChenyi Qiang  * @opaque: pointer to forward to the callback
748*2205b846SChenyi Qiang  *
749*2205b846SChenyi Qiang  * Returns 0 on success, or a negative error if any notification failed.
750*2205b846SChenyi Qiang  */
7518be545baSRichard Henderson int ram_discard_manager_replay_populated(const RamDiscardManager *rdm,
7528be545baSRichard Henderson                                          MemoryRegionSection *section,
753*2205b846SChenyi Qiang                                          ReplayRamDiscardState replay_fn,
7548be545baSRichard Henderson                                          void *opaque);
7558be545baSRichard Henderson 
756*2205b846SChenyi Qiang /**
757*2205b846SChenyi Qiang  * ram_discard_manager_replay_discarded:
758*2205b846SChenyi Qiang  *
759*2205b846SChenyi Qiang  * A wrapper to call the #RamDiscardManagerClass.replay_discarded callback
760*2205b846SChenyi Qiang  * of the #RamDiscardManager.
761*2205b846SChenyi Qiang  *
762*2205b846SChenyi Qiang  * @rdm: the #RamDiscardManager
763*2205b846SChenyi Qiang  * @section: the #MemoryRegionSection
764*2205b846SChenyi Qiang  * @replay_fn: the #ReplayRamDiscardState callback
765*2205b846SChenyi Qiang  * @opaque: pointer to forward to the callback
766*2205b846SChenyi Qiang  *
767*2205b846SChenyi Qiang  * Returns 0 on success, or a negative error if any notification failed.
768*2205b846SChenyi Qiang  */
769*2205b846SChenyi Qiang int ram_discard_manager_replay_discarded(const RamDiscardManager *rdm,
7708be545baSRichard Henderson                                          MemoryRegionSection *section,
771*2205b846SChenyi Qiang                                          ReplayRamDiscardState replay_fn,
7728be545baSRichard Henderson                                          void *opaque);
7738be545baSRichard Henderson 
7748be545baSRichard Henderson void ram_discard_manager_register_listener(RamDiscardManager *rdm,
7758be545baSRichard Henderson                                            RamDiscardListener *rdl,
7768be545baSRichard Henderson                                            MemoryRegionSection *section);
7778be545baSRichard Henderson 
7788be545baSRichard Henderson void ram_discard_manager_unregister_listener(RamDiscardManager *rdm,
7798be545baSRichard Henderson                                              RamDiscardListener *rdl);
7808be545baSRichard Henderson 
7818be545baSRichard Henderson /**
782e3353d63SSteve Sistare  * memory_translate_iotlb: Extract addresses from a TLB entry.
783e3353d63SSteve Sistare  *                         Called with rcu_read_lock held.
7848be545baSRichard Henderson  *
7858be545baSRichard Henderson  * @iotlb: pointer to an #IOMMUTLBEntry
786e3353d63SSteve Sistare  * @xlat_p: return the offset of the entry from the start of the returned
787e3353d63SSteve Sistare  *          MemoryRegion.
7888be545baSRichard Henderson  * @errp: pointer to Error*, to store an error if it happens.
7898be545baSRichard Henderson  *
790e3353d63SSteve Sistare  * Return: On success, return the MemoryRegion containing the @iotlb translated
791e3353d63SSteve Sistare  *         addr.  The MemoryRegion must not be accessed after rcu_read_unlock.
792e3353d63SSteve Sistare  *         On failure, return NULL, setting @errp with error.
7938be545baSRichard Henderson  */
794e3353d63SSteve Sistare MemoryRegion *memory_translate_iotlb(IOMMUTLBEntry *iotlb, hwaddr *xlat_p,
795e3353d63SSteve Sistare                                      Error **errp);
7968be545baSRichard Henderson 
7978be545baSRichard Henderson typedef struct CoalescedMemoryRange CoalescedMemoryRange;
7988be545baSRichard Henderson typedef struct MemoryRegionIoeventfd MemoryRegionIoeventfd;
7998be545baSRichard Henderson 
8008be545baSRichard Henderson /** MemoryRegion:
8018be545baSRichard Henderson  *
8028be545baSRichard Henderson  * A struct representing a memory region.
8038be545baSRichard Henderson  */
8048be545baSRichard Henderson struct MemoryRegion {
8058be545baSRichard Henderson     Object parent_obj;
8068be545baSRichard Henderson 
8078be545baSRichard Henderson     /* private: */
8088be545baSRichard Henderson 
8098be545baSRichard Henderson     /* The following fields should fit in a cache line */
8108be545baSRichard Henderson     bool romd_mode;
8118be545baSRichard Henderson     bool ram;
8128be545baSRichard Henderson     bool subpage;
8138be545baSRichard Henderson     bool readonly; /* For RAM regions */
8148be545baSRichard Henderson     bool nonvolatile;
8158be545baSRichard Henderson     bool rom_device;
8168be545baSRichard Henderson     bool flush_coalesced_mmio;
8178be545baSRichard Henderson     bool unmergeable;
8188be545baSRichard Henderson     uint8_t dirty_log_mask;
8198be545baSRichard Henderson     bool is_iommu;
8208be545baSRichard Henderson     RAMBlock *ram_block;
8218be545baSRichard Henderson     Object *owner;
8228be545baSRichard Henderson     /* owner as TYPE_DEVICE. Used for re-entrancy checks in MR access hotpath */
8238be545baSRichard Henderson     DeviceState *dev;
8248be545baSRichard Henderson 
8258be545baSRichard Henderson     const MemoryRegionOps *ops;
8268be545baSRichard Henderson     void *opaque;
8278be545baSRichard Henderson     MemoryRegion *container;
8288be545baSRichard Henderson     int mapped_via_alias; /* Mapped via an alias, container might be NULL */
8298be545baSRichard Henderson     Int128 size;
8308be545baSRichard Henderson     hwaddr addr;
8318be545baSRichard Henderson     void (*destructor)(MemoryRegion *mr);
8328be545baSRichard Henderson     uint64_t align;
8338be545baSRichard Henderson     bool terminates;
8348be545baSRichard Henderson     bool ram_device;
8358be545baSRichard Henderson     bool enabled;
8368be545baSRichard Henderson     uint8_t vga_logging_count;
8378be545baSRichard Henderson     MemoryRegion *alias;
8388be545baSRichard Henderson     hwaddr alias_offset;
8398be545baSRichard Henderson     int32_t priority;
8408be545baSRichard Henderson     QTAILQ_HEAD(, MemoryRegion) subregions;
8418be545baSRichard Henderson     QTAILQ_ENTRY(MemoryRegion) subregions_link;
8428be545baSRichard Henderson     QTAILQ_HEAD(, CoalescedMemoryRange) coalesced;
8438be545baSRichard Henderson     const char *name;
8448be545baSRichard Henderson     unsigned ioeventfd_nb;
8458be545baSRichard Henderson     MemoryRegionIoeventfd *ioeventfds;
8468be545baSRichard Henderson     RamDiscardManager *rdm; /* Only for RAM */
8478be545baSRichard Henderson 
8488be545baSRichard Henderson     /* For devices designed to perform re-entrant IO into their own IO MRs */
8498be545baSRichard Henderson     bool disable_reentrancy_guard;
8508be545baSRichard Henderson };
8518be545baSRichard Henderson 
8528be545baSRichard Henderson struct IOMMUMemoryRegion {
8538be545baSRichard Henderson     MemoryRegion parent_obj;
8548be545baSRichard Henderson 
8558be545baSRichard Henderson     QLIST_HEAD(, IOMMUNotifier) iommu_notify;
8568be545baSRichard Henderson     IOMMUNotifierFlag iommu_notify_flags;
8578be545baSRichard Henderson };
8588be545baSRichard Henderson 
8598be545baSRichard Henderson #define IOMMU_NOTIFIER_FOREACH(n, mr) \
8608be545baSRichard Henderson     QLIST_FOREACH((n), &(mr)->iommu_notify, node)
8618be545baSRichard Henderson 
8628be545baSRichard Henderson #define MEMORY_LISTENER_PRIORITY_MIN            0
8638be545baSRichard Henderson #define MEMORY_LISTENER_PRIORITY_ACCEL          10
8648be545baSRichard Henderson #define MEMORY_LISTENER_PRIORITY_DEV_BACKEND    10
8658be545baSRichard Henderson 
8668be545baSRichard Henderson /**
8678be545baSRichard Henderson  * struct MemoryListener: callbacks structure for updates to the physical memory map
8688be545baSRichard Henderson  *
8698be545baSRichard Henderson  * Allows a component to adjust to changes in the guest-visible memory map.
8708be545baSRichard Henderson  * Use with memory_listener_register() and memory_listener_unregister().
8718be545baSRichard Henderson  */
8728be545baSRichard Henderson struct MemoryListener {
8738be545baSRichard Henderson     /**
8748be545baSRichard Henderson      * @begin:
8758be545baSRichard Henderson      *
8768be545baSRichard Henderson      * Called at the beginning of an address space update transaction.
8778be545baSRichard Henderson      * Followed by calls to #MemoryListener.region_add(),
8788be545baSRichard Henderson      * #MemoryListener.region_del(), #MemoryListener.region_nop(),
8798be545baSRichard Henderson      * #MemoryListener.log_start() and #MemoryListener.log_stop() in
8808be545baSRichard Henderson      * increasing address order.
8818be545baSRichard Henderson      *
8828be545baSRichard Henderson      * @listener: The #MemoryListener.
8838be545baSRichard Henderson      */
8848be545baSRichard Henderson     void (*begin)(MemoryListener *listener);
8858be545baSRichard Henderson 
8868be545baSRichard Henderson     /**
8878be545baSRichard Henderson      * @commit:
8888be545baSRichard Henderson      *
8898be545baSRichard Henderson      * Called at the end of an address space update transaction,
8908be545baSRichard Henderson      * after the last call to #MemoryListener.region_add(),
8918be545baSRichard Henderson      * #MemoryListener.region_del() or #MemoryListener.region_nop(),
8928be545baSRichard Henderson      * #MemoryListener.log_start() and #MemoryListener.log_stop().
8938be545baSRichard Henderson      *
8948be545baSRichard Henderson      * @listener: The #MemoryListener.
8958be545baSRichard Henderson      */
8968be545baSRichard Henderson     void (*commit)(MemoryListener *listener);
8978be545baSRichard Henderson 
8988be545baSRichard Henderson     /**
8998be545baSRichard Henderson      * @region_add:
9008be545baSRichard Henderson      *
9018be545baSRichard Henderson      * Called during an address space update transaction,
9028be545baSRichard Henderson      * for a section of the address space that is new in this address space
9038be545baSRichard Henderson      * space since the last transaction.
9048be545baSRichard Henderson      *
9058be545baSRichard Henderson      * @listener: The #MemoryListener.
9068be545baSRichard Henderson      * @section: The new #MemoryRegionSection.
9078be545baSRichard Henderson      */
9088be545baSRichard Henderson     void (*region_add)(MemoryListener *listener, MemoryRegionSection *section);
9098be545baSRichard Henderson 
9108be545baSRichard Henderson     /**
9118be545baSRichard Henderson      * @region_del:
9128be545baSRichard Henderson      *
9138be545baSRichard Henderson      * Called during an address space update transaction,
9148be545baSRichard Henderson      * for a section of the address space that has disappeared in the address
9158be545baSRichard Henderson      * space since the last transaction.
9168be545baSRichard Henderson      *
9178be545baSRichard Henderson      * @listener: The #MemoryListener.
9188be545baSRichard Henderson      * @section: The old #MemoryRegionSection.
9198be545baSRichard Henderson      */
9208be545baSRichard Henderson     void (*region_del)(MemoryListener *listener, MemoryRegionSection *section);
9218be545baSRichard Henderson 
9228be545baSRichard Henderson     /**
9238be545baSRichard Henderson      * @region_nop:
9248be545baSRichard Henderson      *
9258be545baSRichard Henderson      * Called during an address space update transaction,
9268be545baSRichard Henderson      * for a section of the address space that is in the same place in the address
9278be545baSRichard Henderson      * space as in the last transaction.
9288be545baSRichard Henderson      *
9298be545baSRichard Henderson      * @listener: The #MemoryListener.
9308be545baSRichard Henderson      * @section: The #MemoryRegionSection.
9318be545baSRichard Henderson      */
9328be545baSRichard Henderson     void (*region_nop)(MemoryListener *listener, MemoryRegionSection *section);
9338be545baSRichard Henderson 
9348be545baSRichard Henderson     /**
9358be545baSRichard Henderson      * @log_start:
9368be545baSRichard Henderson      *
9378be545baSRichard Henderson      * Called during an address space update transaction, after
9388be545baSRichard Henderson      * one of #MemoryListener.region_add(), #MemoryListener.region_del() or
9398be545baSRichard Henderson      * #MemoryListener.region_nop(), if dirty memory logging clients have
9408be545baSRichard Henderson      * become active since the last transaction.
9418be545baSRichard Henderson      *
9428be545baSRichard Henderson      * @listener: The #MemoryListener.
9438be545baSRichard Henderson      * @section: The #MemoryRegionSection.
9448be545baSRichard Henderson      * @old: A bitmap of dirty memory logging clients that were active in
9458be545baSRichard Henderson      * the previous transaction.
9468be545baSRichard Henderson      * @new: A bitmap of dirty memory logging clients that are active in
9478be545baSRichard Henderson      * the current transaction.
9488be545baSRichard Henderson      */
9498be545baSRichard Henderson     void (*log_start)(MemoryListener *listener, MemoryRegionSection *section,
9508be545baSRichard Henderson                       int old_val, int new_val);
9518be545baSRichard Henderson 
9528be545baSRichard Henderson     /**
9538be545baSRichard Henderson      * @log_stop:
9548be545baSRichard Henderson      *
9558be545baSRichard Henderson      * Called during an address space update transaction, after
9568be545baSRichard Henderson      * one of #MemoryListener.region_add(), #MemoryListener.region_del() or
9578be545baSRichard Henderson      * #MemoryListener.region_nop() and possibly after
9588be545baSRichard Henderson      * #MemoryListener.log_start(), if dirty memory logging clients have
9598be545baSRichard Henderson      * become inactive since the last transaction.
9608be545baSRichard Henderson      *
9618be545baSRichard Henderson      * @listener: The #MemoryListener.
9628be545baSRichard Henderson      * @section: The #MemoryRegionSection.
9638be545baSRichard Henderson      * @old: A bitmap of dirty memory logging clients that were active in
9648be545baSRichard Henderson      * the previous transaction.
9658be545baSRichard Henderson      * @new: A bitmap of dirty memory logging clients that are active in
9668be545baSRichard Henderson      * the current transaction.
9678be545baSRichard Henderson      */
9688be545baSRichard Henderson     void (*log_stop)(MemoryListener *listener, MemoryRegionSection *section,
9698be545baSRichard Henderson                      int old_val, int new_val);
9708be545baSRichard Henderson 
9718be545baSRichard Henderson     /**
9728be545baSRichard Henderson      * @log_sync:
9738be545baSRichard Henderson      *
9748be545baSRichard Henderson      * Called by memory_region_snapshot_and_clear_dirty() and
9758be545baSRichard Henderson      * memory_global_dirty_log_sync(), before accessing QEMU's "official"
9768be545baSRichard Henderson      * copy of the dirty memory bitmap for a #MemoryRegionSection.
9778be545baSRichard Henderson      *
9788be545baSRichard Henderson      * @listener: The #MemoryListener.
9798be545baSRichard Henderson      * @section: The #MemoryRegionSection.
9808be545baSRichard Henderson      */
9818be545baSRichard Henderson     void (*log_sync)(MemoryListener *listener, MemoryRegionSection *section);
9828be545baSRichard Henderson 
9838be545baSRichard Henderson     /**
9848be545baSRichard Henderson      * @log_sync_global:
9858be545baSRichard Henderson      *
9868be545baSRichard Henderson      * This is the global version of @log_sync when the listener does
9878be545baSRichard Henderson      * not have a way to synchronize the log with finer granularity.
9888be545baSRichard Henderson      * When the listener registers with @log_sync_global defined, then
9898be545baSRichard Henderson      * its @log_sync must be NULL.  Vice versa.
9908be545baSRichard Henderson      *
9918be545baSRichard Henderson      * @listener: The #MemoryListener.
9928be545baSRichard Henderson      * @last_stage: The last stage to synchronize the log during migration.
9938be545baSRichard Henderson      * The caller should guarantee that the synchronization with true for
9948be545baSRichard Henderson      * @last_stage is triggered for once after all VCPUs have been stopped.
9958be545baSRichard Henderson      */
9968be545baSRichard Henderson     void (*log_sync_global)(MemoryListener *listener, bool last_stage);
9978be545baSRichard Henderson 
9988be545baSRichard Henderson     /**
9998be545baSRichard Henderson      * @log_clear:
10008be545baSRichard Henderson      *
10018be545baSRichard Henderson      * Called before reading the dirty memory bitmap for a
10028be545baSRichard Henderson      * #MemoryRegionSection.
10038be545baSRichard Henderson      *
10048be545baSRichard Henderson      * @listener: The #MemoryListener.
10058be545baSRichard Henderson      * @section: The #MemoryRegionSection.
10068be545baSRichard Henderson      */
10078be545baSRichard Henderson     void (*log_clear)(MemoryListener *listener, MemoryRegionSection *section);
10088be545baSRichard Henderson 
10098be545baSRichard Henderson     /**
10108be545baSRichard Henderson      * @log_global_start:
10118be545baSRichard Henderson      *
10128be545baSRichard Henderson      * Called by memory_global_dirty_log_start(), which
10138be545baSRichard Henderson      * enables the %DIRTY_LOG_MIGRATION client on all memory regions in
10148be545baSRichard Henderson      * the address space.  #MemoryListener.log_global_start() is also
10158be545baSRichard Henderson      * called when a #MemoryListener is added, if global dirty logging is
10168be545baSRichard Henderson      * active at that time.
10178be545baSRichard Henderson      *
10188be545baSRichard Henderson      * @listener: The #MemoryListener.
10198be545baSRichard Henderson      * @errp: pointer to Error*, to store an error if it happens.
10208be545baSRichard Henderson      *
10218be545baSRichard Henderson      * Return: true on success, else false setting @errp with error.
10228be545baSRichard Henderson      */
10238be545baSRichard Henderson     bool (*log_global_start)(MemoryListener *listener, Error **errp);
10248be545baSRichard Henderson 
10258be545baSRichard Henderson     /**
10268be545baSRichard Henderson      * @log_global_stop:
10278be545baSRichard Henderson      *
10288be545baSRichard Henderson      * Called by memory_global_dirty_log_stop(), which
10298be545baSRichard Henderson      * disables the %DIRTY_LOG_MIGRATION client on all memory regions in
10308be545baSRichard Henderson      * the address space.
10318be545baSRichard Henderson      *
10328be545baSRichard Henderson      * @listener: The #MemoryListener.
10338be545baSRichard Henderson      */
10348be545baSRichard Henderson     void (*log_global_stop)(MemoryListener *listener);
10358be545baSRichard Henderson 
10368be545baSRichard Henderson     /**
10378be545baSRichard Henderson      * @log_global_after_sync:
10388be545baSRichard Henderson      *
10398be545baSRichard Henderson      * Called after reading the dirty memory bitmap
10408be545baSRichard Henderson      * for any #MemoryRegionSection.
10418be545baSRichard Henderson      *
10428be545baSRichard Henderson      * @listener: The #MemoryListener.
10438be545baSRichard Henderson      */
10448be545baSRichard Henderson     void (*log_global_after_sync)(MemoryListener *listener);
10458be545baSRichard Henderson 
10468be545baSRichard Henderson     /**
10478be545baSRichard Henderson      * @eventfd_add:
10488be545baSRichard Henderson      *
10498be545baSRichard Henderson      * Called during an address space update transaction,
10508be545baSRichard Henderson      * for a section of the address space that has had a new ioeventfd
10518be545baSRichard Henderson      * registration since the last transaction.
10528be545baSRichard Henderson      *
10538be545baSRichard Henderson      * @listener: The #MemoryListener.
10548be545baSRichard Henderson      * @section: The new #MemoryRegionSection.
10558be545baSRichard Henderson      * @match_data: The @match_data parameter for the new ioeventfd.
10568be545baSRichard Henderson      * @data: The @data parameter for the new ioeventfd.
10578be545baSRichard Henderson      * @e: The #EventNotifier parameter for the new ioeventfd.
10588be545baSRichard Henderson      */
10598be545baSRichard Henderson     void (*eventfd_add)(MemoryListener *listener, MemoryRegionSection *section,
10608be545baSRichard Henderson                         bool match_data, uint64_t data, EventNotifier *e);
10618be545baSRichard Henderson 
10628be545baSRichard Henderson     /**
10638be545baSRichard Henderson      * @eventfd_del:
10648be545baSRichard Henderson      *
10658be545baSRichard Henderson      * Called during an address space update transaction,
10668be545baSRichard Henderson      * for a section of the address space that has dropped an ioeventfd
10678be545baSRichard Henderson      * registration since the last transaction.
10688be545baSRichard Henderson      *
10698be545baSRichard Henderson      * @listener: The #MemoryListener.
10708be545baSRichard Henderson      * @section: The new #MemoryRegionSection.
10718be545baSRichard Henderson      * @match_data: The @match_data parameter for the dropped ioeventfd.
10728be545baSRichard Henderson      * @data: The @data parameter for the dropped ioeventfd.
10738be545baSRichard Henderson      * @e: The #EventNotifier parameter for the dropped ioeventfd.
10748be545baSRichard Henderson      */
10758be545baSRichard Henderson     void (*eventfd_del)(MemoryListener *listener, MemoryRegionSection *section,
10768be545baSRichard Henderson                         bool match_data, uint64_t data, EventNotifier *e);
10778be545baSRichard Henderson 
10788be545baSRichard Henderson     /**
10798be545baSRichard Henderson      * @coalesced_io_add:
10808be545baSRichard Henderson      *
10818be545baSRichard Henderson      * Called during an address space update transaction,
10828be545baSRichard Henderson      * for a section of the address space that has had a new coalesced
10838be545baSRichard Henderson      * MMIO range registration since the last transaction.
10848be545baSRichard Henderson      *
10858be545baSRichard Henderson      * @listener: The #MemoryListener.
10868be545baSRichard Henderson      * @section: The new #MemoryRegionSection.
10878be545baSRichard Henderson      * @addr: The starting address for the coalesced MMIO range.
10888be545baSRichard Henderson      * @len: The length of the coalesced MMIO range.
10898be545baSRichard Henderson      */
10908be545baSRichard Henderson     void (*coalesced_io_add)(MemoryListener *listener, MemoryRegionSection *section,
10918be545baSRichard Henderson                                hwaddr addr, hwaddr len);
10928be545baSRichard Henderson 
10938be545baSRichard Henderson     /**
10948be545baSRichard Henderson      * @coalesced_io_del:
10958be545baSRichard Henderson      *
10968be545baSRichard Henderson      * Called during an address space update transaction,
10978be545baSRichard Henderson      * for a section of the address space that has dropped a coalesced
10988be545baSRichard Henderson      * MMIO range since the last transaction.
10998be545baSRichard Henderson      *
11008be545baSRichard Henderson      * @listener: The #MemoryListener.
11018be545baSRichard Henderson      * @section: The new #MemoryRegionSection.
11028be545baSRichard Henderson      * @addr: The starting address for the coalesced MMIO range.
11038be545baSRichard Henderson      * @len: The length of the coalesced MMIO range.
11048be545baSRichard Henderson      */
11058be545baSRichard Henderson     void (*coalesced_io_del)(MemoryListener *listener, MemoryRegionSection *section,
11068be545baSRichard Henderson                                hwaddr addr, hwaddr len);
11078be545baSRichard Henderson     /**
11088be545baSRichard Henderson      * @priority:
11098be545baSRichard Henderson      *
11108be545baSRichard Henderson      * Govern the order in which memory listeners are invoked. Lower priorities
11118be545baSRichard Henderson      * are invoked earlier for "add" or "start" callbacks, and later for "delete"
11128be545baSRichard Henderson      * or "stop" callbacks.
11138be545baSRichard Henderson      */
11148be545baSRichard Henderson     unsigned priority;
11158be545baSRichard Henderson 
11168be545baSRichard Henderson     /**
11178be545baSRichard Henderson      * @name:
11188be545baSRichard Henderson      *
11198be545baSRichard Henderson      * Name of the listener.  It can be used in contexts where we'd like to
11208be545baSRichard Henderson      * identify one memory listener with the rest.
11218be545baSRichard Henderson      */
11228be545baSRichard Henderson     const char *name;
11238be545baSRichard Henderson 
11248be545baSRichard Henderson     /* private: */
11258be545baSRichard Henderson     AddressSpace *address_space;
11268be545baSRichard Henderson     QTAILQ_ENTRY(MemoryListener) link;
11278be545baSRichard Henderson     QTAILQ_ENTRY(MemoryListener) link_as;
11288be545baSRichard Henderson };
11298be545baSRichard Henderson 
11308be545baSRichard Henderson typedef struct AddressSpaceMapClient {
11318be545baSRichard Henderson     QEMUBH *bh;
11328be545baSRichard Henderson     QLIST_ENTRY(AddressSpaceMapClient) link;
11338be545baSRichard Henderson } AddressSpaceMapClient;
11348be545baSRichard Henderson 
11358be545baSRichard Henderson #define DEFAULT_MAX_BOUNCE_BUFFER_SIZE (4096)
11368be545baSRichard Henderson 
11378be545baSRichard Henderson /**
11388be545baSRichard Henderson  * struct AddressSpace: describes a mapping of addresses to #MemoryRegion objects
11398be545baSRichard Henderson  */
11408be545baSRichard Henderson struct AddressSpace {
11418be545baSRichard Henderson     /* private: */
11428be545baSRichard Henderson     struct rcu_head rcu;
11438be545baSRichard Henderson     char *name;
11448be545baSRichard Henderson     MemoryRegion *root;
11458be545baSRichard Henderson 
11468be545baSRichard Henderson     /* Accessed via RCU.  */
11478be545baSRichard Henderson     struct FlatView *current_map;
11488be545baSRichard Henderson 
11498be545baSRichard Henderson     int ioeventfd_nb;
11508be545baSRichard Henderson     int ioeventfd_notifiers;
11518be545baSRichard Henderson     struct MemoryRegionIoeventfd *ioeventfds;
11528be545baSRichard Henderson     QTAILQ_HEAD(, MemoryListener) listeners;
11538be545baSRichard Henderson     QTAILQ_ENTRY(AddressSpace) address_spaces_link;
11548be545baSRichard Henderson 
11558be545baSRichard Henderson     /*
11568be545baSRichard Henderson      * Maximum DMA bounce buffer size used for indirect memory map requests.
11578be545baSRichard Henderson      * This limits the total size of bounce buffer allocations made for
11588be545baSRichard Henderson      * DMA requests to indirect memory regions within this AddressSpace. DMA
11598be545baSRichard Henderson      * requests that exceed the limit (e.g. due to overly large requested size
11608be545baSRichard Henderson      * or concurrent DMA requests having claimed too much buffer space) will be
11618be545baSRichard Henderson      * rejected and left to the caller to handle.
11628be545baSRichard Henderson      */
11638be545baSRichard Henderson     size_t max_bounce_buffer_size;
11648be545baSRichard Henderson     /* Total size of bounce buffers currently allocated, atomically accessed */
11658be545baSRichard Henderson     size_t bounce_buffer_size;
11668be545baSRichard Henderson     /* List of callbacks to invoke when buffers free up */
11678be545baSRichard Henderson     QemuMutex map_client_list_lock;
11688be545baSRichard Henderson     QLIST_HEAD(, AddressSpaceMapClient) map_client_list;
11698be545baSRichard Henderson };
11708be545baSRichard Henderson 
11718be545baSRichard Henderson typedef struct AddressSpaceDispatch AddressSpaceDispatch;
11728be545baSRichard Henderson typedef struct FlatRange FlatRange;
11738be545baSRichard Henderson 
11748be545baSRichard Henderson /* Flattened global view of current active memory hierarchy.  Kept in sorted
11758be545baSRichard Henderson  * order.
11768be545baSRichard Henderson  */
11778be545baSRichard Henderson struct FlatView {
11788be545baSRichard Henderson     struct rcu_head rcu;
11798be545baSRichard Henderson     unsigned ref;
11808be545baSRichard Henderson     FlatRange *ranges;
11818be545baSRichard Henderson     unsigned nr;
11828be545baSRichard Henderson     unsigned nr_allocated;
11838be545baSRichard Henderson     struct AddressSpaceDispatch *dispatch;
11848be545baSRichard Henderson     MemoryRegion *root;
11858be545baSRichard Henderson };
11868be545baSRichard Henderson 
11878be545baSRichard Henderson static inline FlatView *address_space_to_flatview(AddressSpace *as)
11888be545baSRichard Henderson {
11898be545baSRichard Henderson     return qatomic_rcu_read(&as->current_map);
11908be545baSRichard Henderson }
11918be545baSRichard Henderson 
11928be545baSRichard Henderson /**
11938be545baSRichard Henderson  * typedef flatview_cb: callback for flatview_for_each_range()
11948be545baSRichard Henderson  *
11958be545baSRichard Henderson  * @start: start address of the range within the FlatView
11968be545baSRichard Henderson  * @len: length of the range in bytes
11978be545baSRichard Henderson  * @mr: MemoryRegion covering this range
11988be545baSRichard Henderson  * @offset_in_region: offset of the first byte of the range within @mr
11998be545baSRichard Henderson  * @opaque: data pointer passed to flatview_for_each_range()
12008be545baSRichard Henderson  *
12018be545baSRichard Henderson  * Returns: true to stop the iteration, false to keep going.
12028be545baSRichard Henderson  */
12038be545baSRichard Henderson typedef bool (*flatview_cb)(Int128 start,
12048be545baSRichard Henderson                             Int128 len,
12058be545baSRichard Henderson                             const MemoryRegion *mr,
12068be545baSRichard Henderson                             hwaddr offset_in_region,
12078be545baSRichard Henderson                             void *opaque);
12088be545baSRichard Henderson 
12098be545baSRichard Henderson /**
12108be545baSRichard Henderson  * flatview_for_each_range: Iterate through a FlatView
12118be545baSRichard Henderson  * @fv: the FlatView to iterate through
12128be545baSRichard Henderson  * @cb: function to call for each range
12138be545baSRichard Henderson  * @opaque: opaque data pointer to pass to @cb
12148be545baSRichard Henderson  *
12158be545baSRichard Henderson  * A FlatView is made up of a list of non-overlapping ranges, each of
12168be545baSRichard Henderson  * which is a slice of a MemoryRegion. This function iterates through
12178be545baSRichard Henderson  * each range in @fv, calling @cb. The callback function can terminate
12188be545baSRichard Henderson  * iteration early by returning 'true'.
12198be545baSRichard Henderson  */
12208be545baSRichard Henderson void flatview_for_each_range(FlatView *fv, flatview_cb cb, void *opaque);
12218be545baSRichard Henderson 
12228be545baSRichard Henderson static inline bool MemoryRegionSection_eq(MemoryRegionSection *a,
12238be545baSRichard Henderson                                           MemoryRegionSection *b)
12248be545baSRichard Henderson {
12258be545baSRichard Henderson     return a->mr == b->mr &&
12268be545baSRichard Henderson            a->fv == b->fv &&
12278be545baSRichard Henderson            a->offset_within_region == b->offset_within_region &&
12288be545baSRichard Henderson            a->offset_within_address_space == b->offset_within_address_space &&
12298be545baSRichard Henderson            int128_eq(a->size, b->size) &&
12308be545baSRichard Henderson            a->readonly == b->readonly &&
12318be545baSRichard Henderson            a->nonvolatile == b->nonvolatile;
12328be545baSRichard Henderson }
12338be545baSRichard Henderson 
12348be545baSRichard Henderson /**
12358be545baSRichard Henderson  * memory_region_section_new_copy: Copy a memory region section
12368be545baSRichard Henderson  *
12378be545baSRichard Henderson  * Allocate memory for a new copy, copy the memory region section, and
12388be545baSRichard Henderson  * properly take a reference on all relevant members.
12398be545baSRichard Henderson  *
12408be545baSRichard Henderson  * @s: the #MemoryRegionSection to copy
12418be545baSRichard Henderson  */
12428be545baSRichard Henderson MemoryRegionSection *memory_region_section_new_copy(MemoryRegionSection *s);
12438be545baSRichard Henderson 
12448be545baSRichard Henderson /**
12458be545baSRichard Henderson  * memory_region_section_free_copy: Free a copied memory region section
12468be545baSRichard Henderson  *
12478be545baSRichard Henderson  * Free a copy of a memory section created via memory_region_section_new_copy().
12488be545baSRichard Henderson  * properly dropping references on all relevant members.
12498be545baSRichard Henderson  *
12508be545baSRichard Henderson  * @s: the #MemoryRegionSection to copy
12518be545baSRichard Henderson  */
12528be545baSRichard Henderson void memory_region_section_free_copy(MemoryRegionSection *s);
12538be545baSRichard Henderson 
12548be545baSRichard Henderson /**
1255f47a672aSChenyi Qiang  * memory_region_section_intersect_range: Adjust the memory section to cover
1256f47a672aSChenyi Qiang  * the intersection with the given range.
1257f47a672aSChenyi Qiang  *
1258f47a672aSChenyi Qiang  * @s: the #MemoryRegionSection to be adjusted
1259f47a672aSChenyi Qiang  * @offset: the offset of the given range in the memory region
1260f47a672aSChenyi Qiang  * @size: the size of the given range
1261f47a672aSChenyi Qiang  *
1262f47a672aSChenyi Qiang  * Returns false if the intersection is empty, otherwise returns true.
1263f47a672aSChenyi Qiang  */
1264f47a672aSChenyi Qiang static inline bool memory_region_section_intersect_range(MemoryRegionSection *s,
1265f47a672aSChenyi Qiang                                                          uint64_t offset,
1266f47a672aSChenyi Qiang                                                          uint64_t size)
1267f47a672aSChenyi Qiang {
1268f47a672aSChenyi Qiang     uint64_t start = MAX(s->offset_within_region, offset);
1269f47a672aSChenyi Qiang     Int128 end = int128_min(int128_add(int128_make64(s->offset_within_region),
1270f47a672aSChenyi Qiang                                        s->size),
1271f47a672aSChenyi Qiang                             int128_add(int128_make64(offset),
1272f47a672aSChenyi Qiang                                        int128_make64(size)));
1273f47a672aSChenyi Qiang 
1274f47a672aSChenyi Qiang     if (int128_le(end, int128_make64(start))) {
1275f47a672aSChenyi Qiang         return false;
1276f47a672aSChenyi Qiang     }
1277f47a672aSChenyi Qiang 
1278f47a672aSChenyi Qiang     s->offset_within_address_space += start - s->offset_within_region;
1279f47a672aSChenyi Qiang     s->offset_within_region = start;
1280f47a672aSChenyi Qiang     s->size = int128_sub(end, int128_make64(start));
1281f47a672aSChenyi Qiang     return true;
1282f47a672aSChenyi Qiang }
1283f47a672aSChenyi Qiang 
1284f47a672aSChenyi Qiang /**
12858be545baSRichard Henderson  * memory_region_init: Initialize a memory region
12868be545baSRichard Henderson  *
12878be545baSRichard Henderson  * The region typically acts as a container for other memory regions.  Use
12888be545baSRichard Henderson  * memory_region_add_subregion() to add subregions.
12898be545baSRichard Henderson  *
12908be545baSRichard Henderson  * @mr: the #MemoryRegion to be initialized
12918be545baSRichard Henderson  * @owner: the object that tracks the region's reference count
12928be545baSRichard Henderson  * @name: used for debugging; not visible to the user or ABI
12938be545baSRichard Henderson  * @size: size of the region; any subregions beyond this size will be clipped
12948be545baSRichard Henderson  */
12958be545baSRichard Henderson void memory_region_init(MemoryRegion *mr,
12968be545baSRichard Henderson                         Object *owner,
12978be545baSRichard Henderson                         const char *name,
12988be545baSRichard Henderson                         uint64_t size);
12998be545baSRichard Henderson 
13008be545baSRichard Henderson /**
13018be545baSRichard Henderson  * memory_region_ref: Add 1 to a memory region's reference count
13028be545baSRichard Henderson  *
13038be545baSRichard Henderson  * Whenever memory regions are accessed outside the BQL, they need to be
13048be545baSRichard Henderson  * preserved against hot-unplug.  MemoryRegions actually do not have their
13058be545baSRichard Henderson  * own reference count; they piggyback on a QOM object, their "owner".
13068be545baSRichard Henderson  * This function adds a reference to the owner.
13078be545baSRichard Henderson  *
13088be545baSRichard Henderson  * All MemoryRegions must have an owner if they can disappear, even if the
13098be545baSRichard Henderson  * device they belong to operates exclusively under the BQL.  This is because
13108be545baSRichard Henderson  * the region could be returned at any time by memory_region_find, and this
13118be545baSRichard Henderson  * is usually under guest control.
13128be545baSRichard Henderson  *
13138be545baSRichard Henderson  * @mr: the #MemoryRegion
13148be545baSRichard Henderson  */
13158be545baSRichard Henderson void memory_region_ref(MemoryRegion *mr);
13168be545baSRichard Henderson 
13178be545baSRichard Henderson /**
13188be545baSRichard Henderson  * memory_region_unref: Remove 1 to a memory region's reference count
13198be545baSRichard Henderson  *
13208be545baSRichard Henderson  * Whenever memory regions are accessed outside the BQL, they need to be
13218be545baSRichard Henderson  * preserved against hot-unplug.  MemoryRegions actually do not have their
13228be545baSRichard Henderson  * own reference count; they piggyback on a QOM object, their "owner".
13238be545baSRichard Henderson  * This function removes a reference to the owner and possibly destroys it.
13248be545baSRichard Henderson  *
13258be545baSRichard Henderson  * @mr: the #MemoryRegion
13268be545baSRichard Henderson  */
13278be545baSRichard Henderson void memory_region_unref(MemoryRegion *mr);
13288be545baSRichard Henderson 
13298be545baSRichard Henderson /**
13308be545baSRichard Henderson  * memory_region_init_io: Initialize an I/O memory region.
13318be545baSRichard Henderson  *
13328be545baSRichard Henderson  * Accesses into the region will cause the callbacks in @ops to be called.
13338be545baSRichard Henderson  * if @size is nonzero, subregions will be clipped to @size.
13348be545baSRichard Henderson  *
13358be545baSRichard Henderson  * @mr: the #MemoryRegion to be initialized.
13368be545baSRichard Henderson  * @owner: the object that tracks the region's reference count
13378be545baSRichard Henderson  * @ops: a structure containing read and write callbacks to be used when
13388be545baSRichard Henderson  *       I/O is performed on the region.
13398be545baSRichard Henderson  * @opaque: passed to the read and write callbacks of the @ops structure.
13408be545baSRichard Henderson  * @name: used for debugging; not visible to the user or ABI
13418be545baSRichard Henderson  * @size: size of the region.
13428be545baSRichard Henderson  */
13438be545baSRichard Henderson void memory_region_init_io(MemoryRegion *mr,
13448be545baSRichard Henderson                            Object *owner,
13458be545baSRichard Henderson                            const MemoryRegionOps *ops,
13468be545baSRichard Henderson                            void *opaque,
13478be545baSRichard Henderson                            const char *name,
13488be545baSRichard Henderson                            uint64_t size);
13498be545baSRichard Henderson 
13508be545baSRichard Henderson /**
13518be545baSRichard Henderson  * memory_region_init_ram_nomigrate:  Initialize RAM memory region.  Accesses
13528be545baSRichard Henderson  *                                    into the region will modify memory
13538be545baSRichard Henderson  *                                    directly.
13548be545baSRichard Henderson  *
13558be545baSRichard Henderson  * @mr: the #MemoryRegion to be initialized.
13568be545baSRichard Henderson  * @owner: the object that tracks the region's reference count
13578be545baSRichard Henderson  * @name: Region name, becomes part of RAMBlock name used in migration stream
13588be545baSRichard Henderson  *        must be unique within any device
13598be545baSRichard Henderson  * @size: size of the region.
13608be545baSRichard Henderson  * @errp: pointer to Error*, to store an error if it happens.
13618be545baSRichard Henderson  *
13628be545baSRichard Henderson  * Note that this function does not do anything to cause the data in the
13638be545baSRichard Henderson  * RAM memory region to be migrated; that is the responsibility of the caller.
13648be545baSRichard Henderson  *
13658be545baSRichard Henderson  * Return: true on success, else false setting @errp with error.
13668be545baSRichard Henderson  */
13678be545baSRichard Henderson bool memory_region_init_ram_nomigrate(MemoryRegion *mr,
13688be545baSRichard Henderson                                       Object *owner,
13698be545baSRichard Henderson                                       const char *name,
13708be545baSRichard Henderson                                       uint64_t size,
13718be545baSRichard Henderson                                       Error **errp);
13728be545baSRichard Henderson 
13738be545baSRichard Henderson /**
13748be545baSRichard Henderson  * memory_region_init_ram_flags_nomigrate:  Initialize RAM memory region.
13758be545baSRichard Henderson  *                                          Accesses into the region will
13768be545baSRichard Henderson  *                                          modify memory directly.
13778be545baSRichard Henderson  *
13788be545baSRichard Henderson  * @mr: the #MemoryRegion to be initialized.
13798be545baSRichard Henderson  * @owner: the object that tracks the region's reference count
13808be545baSRichard Henderson  * @name: Region name, becomes part of RAMBlock name used in migration stream
13818be545baSRichard Henderson  *        must be unique within any device
13828be545baSRichard Henderson  * @size: size of the region.
13838be545baSRichard Henderson  * @ram_flags: RamBlock flags. Supported flags: RAM_SHARED, RAM_NORESERVE,
13848be545baSRichard Henderson  *             RAM_GUEST_MEMFD.
13858be545baSRichard Henderson  * @errp: pointer to Error*, to store an error if it happens.
13868be545baSRichard Henderson  *
13878be545baSRichard Henderson  * Note that this function does not do anything to cause the data in the
13888be545baSRichard Henderson  * RAM memory region to be migrated; that is the responsibility of the caller.
13898be545baSRichard Henderson  *
13908be545baSRichard Henderson  * Return: true on success, else false setting @errp with error.
13918be545baSRichard Henderson  */
13928be545baSRichard Henderson bool memory_region_init_ram_flags_nomigrate(MemoryRegion *mr,
13938be545baSRichard Henderson                                             Object *owner,
13948be545baSRichard Henderson                                             const char *name,
13958be545baSRichard Henderson                                             uint64_t size,
13968be545baSRichard Henderson                                             uint32_t ram_flags,
13978be545baSRichard Henderson                                             Error **errp);
13988be545baSRichard Henderson 
13998be545baSRichard Henderson /**
14008be545baSRichard Henderson  * memory_region_init_resizeable_ram:  Initialize memory region with resizable
14018be545baSRichard Henderson  *                                     RAM.  Accesses into the region will
14028be545baSRichard Henderson  *                                     modify memory directly.  Only an initial
14038be545baSRichard Henderson  *                                     portion of this RAM is actually used.
14048be545baSRichard Henderson  *                                     Changing the size while migrating
14058be545baSRichard Henderson  *                                     can result in the migration being
14068be545baSRichard Henderson  *                                     canceled.
14078be545baSRichard Henderson  *
14088be545baSRichard Henderson  * @mr: the #MemoryRegion to be initialized.
14098be545baSRichard Henderson  * @owner: the object that tracks the region's reference count
14108be545baSRichard Henderson  * @name: Region name, becomes part of RAMBlock name used in migration stream
14118be545baSRichard Henderson  *        must be unique within any device
14128be545baSRichard Henderson  * @size: used size of the region.
14138be545baSRichard Henderson  * @max_size: max size of the region.
14148be545baSRichard Henderson  * @resized: callback to notify owner about used size change.
14158be545baSRichard Henderson  * @errp: pointer to Error*, to store an error if it happens.
14168be545baSRichard Henderson  *
14178be545baSRichard Henderson  * Note that this function does not do anything to cause the data in the
14188be545baSRichard Henderson  * RAM memory region to be migrated; that is the responsibility of the caller.
14198be545baSRichard Henderson  *
14208be545baSRichard Henderson  * Return: true on success, else false setting @errp with error.
14218be545baSRichard Henderson  */
14228be545baSRichard Henderson bool memory_region_init_resizeable_ram(MemoryRegion *mr,
14238be545baSRichard Henderson                                        Object *owner,
14248be545baSRichard Henderson                                        const char *name,
14258be545baSRichard Henderson                                        uint64_t size,
14268be545baSRichard Henderson                                        uint64_t max_size,
14278be545baSRichard Henderson                                        void (*resized)(const char*,
14288be545baSRichard Henderson                                                        uint64_t length,
14298be545baSRichard Henderson                                                        void *host),
14308be545baSRichard Henderson                                        Error **errp);
14318be545baSRichard Henderson #ifdef CONFIG_POSIX
14328be545baSRichard Henderson 
14338be545baSRichard Henderson /**
14348be545baSRichard Henderson  * memory_region_init_ram_from_file:  Initialize RAM memory region with a
14358be545baSRichard Henderson  *                                    mmap-ed backend.
14368be545baSRichard Henderson  *
14378be545baSRichard Henderson  * @mr: the #MemoryRegion to be initialized.
14388be545baSRichard Henderson  * @owner: the object that tracks the region's reference count
14398be545baSRichard Henderson  * @name: Region name, becomes part of RAMBlock name used in migration stream
14408be545baSRichard Henderson  *        must be unique within any device
14418be545baSRichard Henderson  * @size: size of the region.
14428be545baSRichard Henderson  * @align: alignment of the region base address; if 0, the default alignment
14438be545baSRichard Henderson  *         (getpagesize()) will be used.
14448be545baSRichard Henderson  * @ram_flags: RamBlock flags. Supported flags: RAM_SHARED, RAM_PMEM,
14458be545baSRichard Henderson  *             RAM_NORESERVE, RAM_PROTECTED, RAM_NAMED_FILE, RAM_READONLY,
14468be545baSRichard Henderson  *             RAM_READONLY_FD, RAM_GUEST_MEMFD
14478be545baSRichard Henderson  * @path: the path in which to allocate the RAM.
14488be545baSRichard Henderson  * @offset: offset within the file referenced by path
14498be545baSRichard Henderson  * @errp: pointer to Error*, to store an error if it happens.
14508be545baSRichard Henderson  *
14518be545baSRichard Henderson  * Note that this function does not do anything to cause the data in the
14528be545baSRichard Henderson  * RAM memory region to be migrated; that is the responsibility of the caller.
14538be545baSRichard Henderson  *
14548be545baSRichard Henderson  * Return: true on success, else false setting @errp with error.
14558be545baSRichard Henderson  */
14568be545baSRichard Henderson bool memory_region_init_ram_from_file(MemoryRegion *mr,
14578be545baSRichard Henderson                                       Object *owner,
14588be545baSRichard Henderson                                       const char *name,
14598be545baSRichard Henderson                                       uint64_t size,
14608be545baSRichard Henderson                                       uint64_t align,
14618be545baSRichard Henderson                                       uint32_t ram_flags,
14628be545baSRichard Henderson                                       const char *path,
14638be545baSRichard Henderson                                       ram_addr_t offset,
14648be545baSRichard Henderson                                       Error **errp);
14658be545baSRichard Henderson 
14668be545baSRichard Henderson /**
14678be545baSRichard Henderson  * memory_region_init_ram_from_fd:  Initialize RAM memory region with a
14688be545baSRichard Henderson  *                                  mmap-ed backend.
14698be545baSRichard Henderson  *
14708be545baSRichard Henderson  * @mr: the #MemoryRegion to be initialized.
14718be545baSRichard Henderson  * @owner: the object that tracks the region's reference count
14728be545baSRichard Henderson  * @name: the name of the region.
14738be545baSRichard Henderson  * @size: size of the region.
14748be545baSRichard Henderson  * @ram_flags: RamBlock flags. Supported flags: RAM_SHARED, RAM_PMEM,
14758be545baSRichard Henderson  *             RAM_NORESERVE, RAM_PROTECTED, RAM_NAMED_FILE, RAM_READONLY,
14768be545baSRichard Henderson  *             RAM_READONLY_FD, RAM_GUEST_MEMFD
14778be545baSRichard Henderson  * @fd: the fd to mmap.
14788be545baSRichard Henderson  * @offset: offset within the file referenced by fd
14798be545baSRichard Henderson  * @errp: pointer to Error*, to store an error if it happens.
14808be545baSRichard Henderson  *
14818be545baSRichard Henderson  * Note that this function does not do anything to cause the data in the
14828be545baSRichard Henderson  * RAM memory region to be migrated; that is the responsibility of the caller.
14838be545baSRichard Henderson  *
14848be545baSRichard Henderson  * Return: true on success, else false setting @errp with error.
14858be545baSRichard Henderson  */
14868be545baSRichard Henderson bool memory_region_init_ram_from_fd(MemoryRegion *mr,
14878be545baSRichard Henderson                                     Object *owner,
14888be545baSRichard Henderson                                     const char *name,
14898be545baSRichard Henderson                                     uint64_t size,
14908be545baSRichard Henderson                                     uint32_t ram_flags,
14918be545baSRichard Henderson                                     int fd,
14928be545baSRichard Henderson                                     ram_addr_t offset,
14938be545baSRichard Henderson                                     Error **errp);
14948be545baSRichard Henderson #endif
14958be545baSRichard Henderson 
14968be545baSRichard Henderson /**
14978be545baSRichard Henderson  * memory_region_init_ram_ptr:  Initialize RAM memory region from a
14988be545baSRichard Henderson  *                              user-provided pointer.  Accesses into the
14998be545baSRichard Henderson  *                              region will modify memory directly.
15008be545baSRichard Henderson  *
15018be545baSRichard Henderson  * @mr: the #MemoryRegion to be initialized.
15028be545baSRichard Henderson  * @owner: the object that tracks the region's reference count
15038be545baSRichard Henderson  * @name: Region name, becomes part of RAMBlock name used in migration stream
15048be545baSRichard Henderson  *        must be unique within any device
15058be545baSRichard Henderson  * @size: size of the region.
15068be545baSRichard Henderson  * @ptr: memory to be mapped; must contain at least @size bytes.
15078be545baSRichard Henderson  *
15088be545baSRichard Henderson  * Note that this function does not do anything to cause the data in the
15098be545baSRichard Henderson  * RAM memory region to be migrated; that is the responsibility of the caller.
15108be545baSRichard Henderson  */
15118be545baSRichard Henderson void memory_region_init_ram_ptr(MemoryRegion *mr,
15128be545baSRichard Henderson                                 Object *owner,
15138be545baSRichard Henderson                                 const char *name,
15148be545baSRichard Henderson                                 uint64_t size,
15158be545baSRichard Henderson                                 void *ptr);
15168be545baSRichard Henderson 
15178be545baSRichard Henderson /**
15188be545baSRichard Henderson  * memory_region_init_ram_device_ptr:  Initialize RAM device memory region from
15198be545baSRichard Henderson  *                                     a user-provided pointer.
15208be545baSRichard Henderson  *
15218be545baSRichard Henderson  * A RAM device represents a mapping to a physical device, such as to a PCI
15228be545baSRichard Henderson  * MMIO BAR of an vfio-pci assigned device.  The memory region may be mapped
15238be545baSRichard Henderson  * into the VM address space and access to the region will modify memory
15248be545baSRichard Henderson  * directly.  However, the memory region should not be included in a memory
15258be545baSRichard Henderson  * dump (device may not be enabled/mapped at the time of the dump), and
15268be545baSRichard Henderson  * operations incompatible with manipulating MMIO should be avoided.  Replaces
15278be545baSRichard Henderson  * skip_dump flag.
15288be545baSRichard Henderson  *
15298be545baSRichard Henderson  * @mr: the #MemoryRegion to be initialized.
15308be545baSRichard Henderson  * @owner: the object that tracks the region's reference count
15318be545baSRichard Henderson  * @name: the name of the region.
15328be545baSRichard Henderson  * @size: size of the region.
15338be545baSRichard Henderson  * @ptr: memory to be mapped; must contain at least @size bytes.
15348be545baSRichard Henderson  *
15358be545baSRichard Henderson  * Note that this function does not do anything to cause the data in the
15368be545baSRichard Henderson  * RAM memory region to be migrated; that is the responsibility of the caller.
15378be545baSRichard Henderson  * (For RAM device memory regions, migrating the contents rarely makes sense.)
15388be545baSRichard Henderson  */
15398be545baSRichard Henderson void memory_region_init_ram_device_ptr(MemoryRegion *mr,
15408be545baSRichard Henderson                                        Object *owner,
15418be545baSRichard Henderson                                        const char *name,
15428be545baSRichard Henderson                                        uint64_t size,
15438be545baSRichard Henderson                                        void *ptr);
15448be545baSRichard Henderson 
15458be545baSRichard Henderson /**
15468be545baSRichard Henderson  * memory_region_init_alias: Initialize a memory region that aliases all or a
15478be545baSRichard Henderson  *                           part of another memory region.
15488be545baSRichard Henderson  *
15498be545baSRichard Henderson  * @mr: the #MemoryRegion to be initialized.
15508be545baSRichard Henderson  * @owner: the object that tracks the region's reference count
15518be545baSRichard Henderson  * @name: used for debugging; not visible to the user or ABI
15528be545baSRichard Henderson  * @orig: the region to be referenced; @mr will be equivalent to
15538be545baSRichard Henderson  *        @orig between @offset and @offset + @size - 1.
15548be545baSRichard Henderson  * @offset: start of the section in @orig to be referenced.
15558be545baSRichard Henderson  * @size: size of the region.
15568be545baSRichard Henderson  */
15578be545baSRichard Henderson void memory_region_init_alias(MemoryRegion *mr,
15588be545baSRichard Henderson                               Object *owner,
15598be545baSRichard Henderson                               const char *name,
15608be545baSRichard Henderson                               MemoryRegion *orig,
15618be545baSRichard Henderson                               hwaddr offset,
15628be545baSRichard Henderson                               uint64_t size);
15638be545baSRichard Henderson 
15648be545baSRichard Henderson /**
15658be545baSRichard Henderson  * memory_region_init_rom_nomigrate: Initialize a ROM memory region.
15668be545baSRichard Henderson  *
15678be545baSRichard Henderson  * This has the same effect as calling memory_region_init_ram_nomigrate()
15688be545baSRichard Henderson  * and then marking the resulting region read-only with
15698be545baSRichard Henderson  * memory_region_set_readonly().
15708be545baSRichard Henderson  *
15718be545baSRichard Henderson  * Note that this function does not do anything to cause the data in the
15728be545baSRichard Henderson  * RAM side of the memory region to be migrated; that is the responsibility
15738be545baSRichard Henderson  * of the caller.
15748be545baSRichard Henderson  *
15758be545baSRichard Henderson  * @mr: the #MemoryRegion to be initialized.
15768be545baSRichard Henderson  * @owner: the object that tracks the region's reference count
15778be545baSRichard Henderson  * @name: Region name, becomes part of RAMBlock name used in migration stream
15788be545baSRichard Henderson  *        must be unique within any device
15798be545baSRichard Henderson  * @size: size of the region.
15808be545baSRichard Henderson  * @errp: pointer to Error*, to store an error if it happens.
15818be545baSRichard Henderson  *
15828be545baSRichard Henderson  * Return: true on success, else false setting @errp with error.
15838be545baSRichard Henderson  */
15848be545baSRichard Henderson bool memory_region_init_rom_nomigrate(MemoryRegion *mr,
15858be545baSRichard Henderson                                       Object *owner,
15868be545baSRichard Henderson                                       const char *name,
15878be545baSRichard Henderson                                       uint64_t size,
15888be545baSRichard Henderson                                       Error **errp);
15898be545baSRichard Henderson 
15908be545baSRichard Henderson /**
15918be545baSRichard Henderson  * memory_region_init_rom_device_nomigrate:  Initialize a ROM memory region.
15928be545baSRichard Henderson  *                                 Writes are handled via callbacks.
15938be545baSRichard Henderson  *
15948be545baSRichard Henderson  * Note that this function does not do anything to cause the data in the
15958be545baSRichard Henderson  * RAM side of the memory region to be migrated; that is the responsibility
15968be545baSRichard Henderson  * of the caller.
15978be545baSRichard Henderson  *
15988be545baSRichard Henderson  * @mr: the #MemoryRegion to be initialized.
15998be545baSRichard Henderson  * @owner: the object that tracks the region's reference count
16008be545baSRichard Henderson  * @ops: callbacks for write access handling (must not be NULL).
16018be545baSRichard Henderson  * @opaque: passed to the read and write callbacks of the @ops structure.
16028be545baSRichard Henderson  * @name: Region name, becomes part of RAMBlock name used in migration stream
16038be545baSRichard Henderson  *        must be unique within any device
16048be545baSRichard Henderson  * @size: size of the region.
16058be545baSRichard Henderson  * @errp: pointer to Error*, to store an error if it happens.
16068be545baSRichard Henderson  *
16078be545baSRichard Henderson  * Return: true on success, else false setting @errp with error.
16088be545baSRichard Henderson  */
16098be545baSRichard Henderson bool memory_region_init_rom_device_nomigrate(MemoryRegion *mr,
16108be545baSRichard Henderson                                              Object *owner,
16118be545baSRichard Henderson                                              const MemoryRegionOps *ops,
16128be545baSRichard Henderson                                              void *opaque,
16138be545baSRichard Henderson                                              const char *name,
16148be545baSRichard Henderson                                              uint64_t size,
16158be545baSRichard Henderson                                              Error **errp);
16168be545baSRichard Henderson 
16178be545baSRichard Henderson /**
16188be545baSRichard Henderson  * memory_region_init_iommu: Initialize a memory region of a custom type
16198be545baSRichard Henderson  * that translates addresses
16208be545baSRichard Henderson  *
16218be545baSRichard Henderson  * An IOMMU region translates addresses and forwards accesses to a target
16228be545baSRichard Henderson  * memory region.
16238be545baSRichard Henderson  *
16248be545baSRichard Henderson  * The IOMMU implementation must define a subclass of TYPE_IOMMU_MEMORY_REGION.
16258be545baSRichard Henderson  * @_iommu_mr should be a pointer to enough memory for an instance of
16268be545baSRichard Henderson  * that subclass, @instance_size is the size of that subclass, and
16278be545baSRichard Henderson  * @mrtypename is its name. This function will initialize @_iommu_mr as an
16288be545baSRichard Henderson  * instance of the subclass, and its methods will then be called to handle
16298be545baSRichard Henderson  * accesses to the memory region. See the documentation of
16308be545baSRichard Henderson  * #IOMMUMemoryRegionClass for further details.
16318be545baSRichard Henderson  *
16328be545baSRichard Henderson  * @_iommu_mr: the #IOMMUMemoryRegion to be initialized
16338be545baSRichard Henderson  * @instance_size: the IOMMUMemoryRegion subclass instance size
16348be545baSRichard Henderson  * @mrtypename: the type name of the #IOMMUMemoryRegion
16358be545baSRichard Henderson  * @owner: the object that tracks the region's reference count
16368be545baSRichard Henderson  * @name: used for debugging; not visible to the user or ABI
16378be545baSRichard Henderson  * @size: size of the region.
16388be545baSRichard Henderson  */
16398be545baSRichard Henderson void memory_region_init_iommu(void *_iommu_mr,
16408be545baSRichard Henderson                               size_t instance_size,
16418be545baSRichard Henderson                               const char *mrtypename,
16428be545baSRichard Henderson                               Object *owner,
16438be545baSRichard Henderson                               const char *name,
16448be545baSRichard Henderson                               uint64_t size);
16458be545baSRichard Henderson 
16468be545baSRichard Henderson /**
16478be545baSRichard Henderson  * memory_region_init_ram - Initialize RAM memory region.  Accesses into the
16488be545baSRichard Henderson  *                          region will modify memory directly.
16498be545baSRichard Henderson  *
16508be545baSRichard Henderson  * @mr: the #MemoryRegion to be initialized
16518be545baSRichard Henderson  * @owner: the object that tracks the region's reference count (must be
16528be545baSRichard Henderson  *         TYPE_DEVICE or a subclass of TYPE_DEVICE, or NULL)
16538be545baSRichard Henderson  * @name: name of the memory region
16548be545baSRichard Henderson  * @size: size of the region in bytes
16558be545baSRichard Henderson  * @errp: pointer to Error*, to store an error if it happens.
16568be545baSRichard Henderson  *
16578be545baSRichard Henderson  * This function allocates RAM for a board model or device, and
16588be545baSRichard Henderson  * arranges for it to be migrated (by calling vmstate_register_ram()
16598be545baSRichard Henderson  * if @owner is a DeviceState, or vmstate_register_ram_global() if
16608be545baSRichard Henderson  * @owner is NULL).
16618be545baSRichard Henderson  *
16628be545baSRichard Henderson  * TODO: Currently we restrict @owner to being either NULL (for
16638be545baSRichard Henderson  * global RAM regions with no owner) or devices, so that we can
16648be545baSRichard Henderson  * give the RAM block a unique name for migration purposes.
16658be545baSRichard Henderson  * We should lift this restriction and allow arbitrary Objects.
16668be545baSRichard Henderson  * If you pass a non-NULL non-device @owner then we will assert.
16678be545baSRichard Henderson  *
16688be545baSRichard Henderson  * Return: true on success, else false setting @errp with error.
16698be545baSRichard Henderson  */
16708be545baSRichard Henderson bool memory_region_init_ram(MemoryRegion *mr,
16718be545baSRichard Henderson                             Object *owner,
16728be545baSRichard Henderson                             const char *name,
16738be545baSRichard Henderson                             uint64_t size,
16748be545baSRichard Henderson                             Error **errp);
16758be545baSRichard Henderson 
16768be545baSRichard Henderson bool memory_region_init_ram_guest_memfd(MemoryRegion *mr,
16778be545baSRichard Henderson                                         Object *owner,
16788be545baSRichard Henderson                                         const char *name,
16798be545baSRichard Henderson                                         uint64_t size,
16808be545baSRichard Henderson                                         Error **errp);
16818be545baSRichard Henderson 
16828be545baSRichard Henderson /**
16838be545baSRichard Henderson  * memory_region_init_rom: Initialize a ROM memory region.
16848be545baSRichard Henderson  *
16858be545baSRichard Henderson  * This has the same effect as calling memory_region_init_ram()
16868be545baSRichard Henderson  * and then marking the resulting region read-only with
16878be545baSRichard Henderson  * memory_region_set_readonly(). This includes arranging for the
16888be545baSRichard Henderson  * contents to be migrated.
16898be545baSRichard Henderson  *
16908be545baSRichard Henderson  * TODO: Currently we restrict @owner to being either NULL (for
16918be545baSRichard Henderson  * global RAM regions with no owner) or devices, so that we can
16928be545baSRichard Henderson  * give the RAM block a unique name for migration purposes.
16938be545baSRichard Henderson  * We should lift this restriction and allow arbitrary Objects.
16948be545baSRichard Henderson  * If you pass a non-NULL non-device @owner then we will assert.
16958be545baSRichard Henderson  *
16968be545baSRichard Henderson  * @mr: the #MemoryRegion to be initialized.
16978be545baSRichard Henderson  * @owner: the object that tracks the region's reference count
16988be545baSRichard Henderson  * @name: Region name, becomes part of RAMBlock name used in migration stream
16998be545baSRichard Henderson  *        must be unique within any device
17008be545baSRichard Henderson  * @size: size of the region.
17018be545baSRichard Henderson  * @errp: pointer to Error*, to store an error if it happens.
17028be545baSRichard Henderson  *
17038be545baSRichard Henderson  * Return: true on success, else false setting @errp with error.
17048be545baSRichard Henderson  */
17058be545baSRichard Henderson bool memory_region_init_rom(MemoryRegion *mr,
17068be545baSRichard Henderson                             Object *owner,
17078be545baSRichard Henderson                             const char *name,
17088be545baSRichard Henderson                             uint64_t size,
17098be545baSRichard Henderson                             Error **errp);
17108be545baSRichard Henderson 
17118be545baSRichard Henderson /**
17128be545baSRichard Henderson  * memory_region_init_rom_device:  Initialize a ROM memory region.
17138be545baSRichard Henderson  *                                 Writes are handled via callbacks.
17148be545baSRichard Henderson  *
17158be545baSRichard Henderson  * This function initializes a memory region backed by RAM for reads
17168be545baSRichard Henderson  * and callbacks for writes, and arranges for the RAM backing to
17178be545baSRichard Henderson  * be migrated (by calling vmstate_register_ram()
17188be545baSRichard Henderson  * if @owner is a DeviceState, or vmstate_register_ram_global() if
17198be545baSRichard Henderson  * @owner is NULL).
17208be545baSRichard Henderson  *
17218be545baSRichard Henderson  * TODO: Currently we restrict @owner to being either NULL (for
17228be545baSRichard Henderson  * global RAM regions with no owner) or devices, so that we can
17238be545baSRichard Henderson  * give the RAM block a unique name for migration purposes.
17248be545baSRichard Henderson  * We should lift this restriction and allow arbitrary Objects.
17258be545baSRichard Henderson  * If you pass a non-NULL non-device @owner then we will assert.
17268be545baSRichard Henderson  *
17278be545baSRichard Henderson  * @mr: the #MemoryRegion to be initialized.
17288be545baSRichard Henderson  * @owner: the object that tracks the region's reference count
17298be545baSRichard Henderson  * @ops: callbacks for write access handling (must not be NULL).
17308be545baSRichard Henderson  * @opaque: passed to the read and write callbacks of the @ops structure.
17318be545baSRichard Henderson  * @name: Region name, becomes part of RAMBlock name used in migration stream
17328be545baSRichard Henderson  *        must be unique within any device
17338be545baSRichard Henderson  * @size: size of the region.
17348be545baSRichard Henderson  * @errp: pointer to Error*, to store an error if it happens.
17358be545baSRichard Henderson  *
17368be545baSRichard Henderson  * Return: true on success, else false setting @errp with error.
17378be545baSRichard Henderson  */
17388be545baSRichard Henderson bool memory_region_init_rom_device(MemoryRegion *mr,
17398be545baSRichard Henderson                                    Object *owner,
17408be545baSRichard Henderson                                    const MemoryRegionOps *ops,
17418be545baSRichard Henderson                                    void *opaque,
17428be545baSRichard Henderson                                    const char *name,
17438be545baSRichard Henderson                                    uint64_t size,
17448be545baSRichard Henderson                                    Error **errp);
17458be545baSRichard Henderson 
17468be545baSRichard Henderson 
17478be545baSRichard Henderson /**
17488be545baSRichard Henderson  * memory_region_owner: get a memory region's owner.
17498be545baSRichard Henderson  *
17508be545baSRichard Henderson  * @mr: the memory region being queried.
17518be545baSRichard Henderson  */
17528be545baSRichard Henderson Object *memory_region_owner(MemoryRegion *mr);
17538be545baSRichard Henderson 
17548be545baSRichard Henderson /**
17558be545baSRichard Henderson  * memory_region_size: get a memory region's size.
17568be545baSRichard Henderson  *
17578be545baSRichard Henderson  * @mr: the memory region being queried.
17588be545baSRichard Henderson  */
17598be545baSRichard Henderson uint64_t memory_region_size(MemoryRegion *mr);
17608be545baSRichard Henderson 
17618be545baSRichard Henderson /**
17628be545baSRichard Henderson  * memory_region_is_ram: check whether a memory region is random access
17638be545baSRichard Henderson  *
17648be545baSRichard Henderson  * Returns %true if a memory region is random access.
17658be545baSRichard Henderson  *
17668be545baSRichard Henderson  * @mr: the memory region being queried
17678be545baSRichard Henderson  */
17688be545baSRichard Henderson static inline bool memory_region_is_ram(MemoryRegion *mr)
17698be545baSRichard Henderson {
17708be545baSRichard Henderson     return mr->ram;
17718be545baSRichard Henderson }
17728be545baSRichard Henderson 
17738be545baSRichard Henderson /**
17748be545baSRichard Henderson  * memory_region_is_ram_device: check whether a memory region is a ram device
17758be545baSRichard Henderson  *
17768be545baSRichard Henderson  * Returns %true if a memory region is a device backed ram region
17778be545baSRichard Henderson  *
17788be545baSRichard Henderson  * @mr: the memory region being queried
17798be545baSRichard Henderson  */
17808be545baSRichard Henderson bool memory_region_is_ram_device(MemoryRegion *mr);
17818be545baSRichard Henderson 
17828be545baSRichard Henderson /**
17838be545baSRichard Henderson  * memory_region_is_romd: check whether a memory region is in ROMD mode
17848be545baSRichard Henderson  *
17858be545baSRichard Henderson  * Returns %true if a memory region is a ROM device and currently set to allow
17868be545baSRichard Henderson  * direct reads.
17878be545baSRichard Henderson  *
17888be545baSRichard Henderson  * @mr: the memory region being queried
17898be545baSRichard Henderson  */
17908be545baSRichard Henderson static inline bool memory_region_is_romd(MemoryRegion *mr)
17918be545baSRichard Henderson {
17928be545baSRichard Henderson     return mr->rom_device && mr->romd_mode;
17938be545baSRichard Henderson }
17948be545baSRichard Henderson 
17958be545baSRichard Henderson /**
17968be545baSRichard Henderson  * memory_region_is_protected: check whether a memory region is protected
17978be545baSRichard Henderson  *
17988be545baSRichard Henderson  * Returns %true if a memory region is protected RAM and cannot be accessed
17998be545baSRichard Henderson  * via standard mechanisms, e.g. DMA.
18008be545baSRichard Henderson  *
18018be545baSRichard Henderson  * @mr: the memory region being queried
18028be545baSRichard Henderson  */
18038be545baSRichard Henderson bool memory_region_is_protected(MemoryRegion *mr);
18048be545baSRichard Henderson 
18058be545baSRichard Henderson /**
18068be545baSRichard Henderson  * memory_region_has_guest_memfd: check whether a memory region has guest_memfd
18078be545baSRichard Henderson  *     associated
18088be545baSRichard Henderson  *
18098be545baSRichard Henderson  * Returns %true if a memory region's ram_block has valid guest_memfd assigned.
18108be545baSRichard Henderson  *
18118be545baSRichard Henderson  * @mr: the memory region being queried
18128be545baSRichard Henderson  */
18138be545baSRichard Henderson bool memory_region_has_guest_memfd(MemoryRegion *mr);
18148be545baSRichard Henderson 
18158be545baSRichard Henderson /**
18168be545baSRichard Henderson  * memory_region_get_iommu: check whether a memory region is an iommu
18178be545baSRichard Henderson  *
18188be545baSRichard Henderson  * Returns pointer to IOMMUMemoryRegion if a memory region is an iommu,
18198be545baSRichard Henderson  * otherwise NULL.
18208be545baSRichard Henderson  *
18218be545baSRichard Henderson  * @mr: the memory region being queried
18228be545baSRichard Henderson  */
18238be545baSRichard Henderson static inline IOMMUMemoryRegion *memory_region_get_iommu(MemoryRegion *mr)
18248be545baSRichard Henderson {
18258be545baSRichard Henderson     if (mr->alias) {
18268be545baSRichard Henderson         return memory_region_get_iommu(mr->alias);
18278be545baSRichard Henderson     }
18288be545baSRichard Henderson     if (mr->is_iommu) {
18298be545baSRichard Henderson         return (IOMMUMemoryRegion *) mr;
18308be545baSRichard Henderson     }
18318be545baSRichard Henderson     return NULL;
18328be545baSRichard Henderson }
18338be545baSRichard Henderson 
18348be545baSRichard Henderson /**
18358be545baSRichard Henderson  * memory_region_get_iommu_class_nocheck: returns iommu memory region class
18368be545baSRichard Henderson  *   if an iommu or NULL if not
18378be545baSRichard Henderson  *
18388be545baSRichard Henderson  * Returns pointer to IOMMUMemoryRegionClass if a memory region is an iommu,
18398be545baSRichard Henderson  * otherwise NULL. This is fast path avoiding QOM checking, use with caution.
18408be545baSRichard Henderson  *
18418be545baSRichard Henderson  * @iommu_mr: the memory region being queried
18428be545baSRichard Henderson  */
18438be545baSRichard Henderson static inline IOMMUMemoryRegionClass *memory_region_get_iommu_class_nocheck(
18448be545baSRichard Henderson         IOMMUMemoryRegion *iommu_mr)
18458be545baSRichard Henderson {
18468be545baSRichard Henderson     return (IOMMUMemoryRegionClass *) (((Object *)iommu_mr)->class);
18478be545baSRichard Henderson }
18488be545baSRichard Henderson 
18498be545baSRichard Henderson #define memory_region_is_iommu(mr) (memory_region_get_iommu(mr) != NULL)
18508be545baSRichard Henderson 
18518be545baSRichard Henderson /**
18528be545baSRichard Henderson  * memory_region_iommu_get_min_page_size: get minimum supported page size
18538be545baSRichard Henderson  * for an iommu
18548be545baSRichard Henderson  *
18558be545baSRichard Henderson  * Returns minimum supported page size for an iommu.
18568be545baSRichard Henderson  *
18578be545baSRichard Henderson  * @iommu_mr: the memory region being queried
18588be545baSRichard Henderson  */
18598be545baSRichard Henderson uint64_t memory_region_iommu_get_min_page_size(IOMMUMemoryRegion *iommu_mr);
18608be545baSRichard Henderson 
18618be545baSRichard Henderson /**
18628be545baSRichard Henderson  * memory_region_notify_iommu: notify a change in an IOMMU translation entry.
18638be545baSRichard Henderson  *
18648be545baSRichard Henderson  * Note: for any IOMMU implementation, an in-place mapping change
18658be545baSRichard Henderson  * should be notified with an UNMAP followed by a MAP.
18668be545baSRichard Henderson  *
18678be545baSRichard Henderson  * @iommu_mr: the memory region that was changed
18688be545baSRichard Henderson  * @iommu_idx: the IOMMU index for the translation table which has changed
18698be545baSRichard Henderson  * @event: TLB event with the new entry in the IOMMU translation table.
18708be545baSRichard Henderson  *         The entry replaces all old entries for the same virtual I/O address
18718be545baSRichard Henderson  *         range.
18728be545baSRichard Henderson  */
18738be545baSRichard Henderson void memory_region_notify_iommu(IOMMUMemoryRegion *iommu_mr,
18748be545baSRichard Henderson                                 int iommu_idx,
18758be545baSRichard Henderson                                 const IOMMUTLBEvent event);
18768be545baSRichard Henderson 
18778be545baSRichard Henderson /**
18788be545baSRichard Henderson  * memory_region_notify_iommu_one: notify a change in an IOMMU translation
18798be545baSRichard Henderson  *                           entry to a single notifier
18808be545baSRichard Henderson  *
18818be545baSRichard Henderson  * This works just like memory_region_notify_iommu(), but it only
18828be545baSRichard Henderson  * notifies a specific notifier, not all of them.
18838be545baSRichard Henderson  *
18848be545baSRichard Henderson  * @notifier: the notifier to be notified
18858be545baSRichard Henderson  * @event: TLB event with the new entry in the IOMMU translation table.
18868be545baSRichard Henderson  *         The entry replaces all old entries for the same virtual I/O address
18878be545baSRichard Henderson  *         range.
18888be545baSRichard Henderson  */
18898be545baSRichard Henderson void memory_region_notify_iommu_one(IOMMUNotifier *notifier,
18908be545baSRichard Henderson                                     const IOMMUTLBEvent *event);
18918be545baSRichard Henderson 
18928be545baSRichard Henderson /**
18938be545baSRichard Henderson  * memory_region_unmap_iommu_notifier_range: notify a unmap for an IOMMU
18948be545baSRichard Henderson  *                                           translation that covers the
18958be545baSRichard Henderson  *                                           range of a notifier
18968be545baSRichard Henderson  *
18978be545baSRichard Henderson  * @notifier: the notifier to be notified
18988be545baSRichard Henderson  */
18998be545baSRichard Henderson void memory_region_unmap_iommu_notifier_range(IOMMUNotifier *notifier);
19008be545baSRichard Henderson 
19018be545baSRichard Henderson 
19028be545baSRichard Henderson /**
19038be545baSRichard Henderson  * memory_region_register_iommu_notifier: register a notifier for changes to
19048be545baSRichard Henderson  * IOMMU translation entries.
19058be545baSRichard Henderson  *
19068be545baSRichard Henderson  * Returns 0 on success, or a negative errno otherwise. In particular,
19078be545baSRichard Henderson  * -EINVAL indicates that at least one of the attributes of the notifier
19088be545baSRichard Henderson  * is not supported (flag/range) by the IOMMU memory region. In case of error
19098be545baSRichard Henderson  * the error object must be created.
19108be545baSRichard Henderson  *
19118be545baSRichard Henderson  * @mr: the memory region to observe
19128be545baSRichard Henderson  * @n: the IOMMUNotifier to be added; the notify callback receives a
19138be545baSRichard Henderson  *     pointer to an #IOMMUTLBEntry as the opaque value; the pointer
19148be545baSRichard Henderson  *     ceases to be valid on exit from the notifier.
19158be545baSRichard Henderson  * @errp: pointer to Error*, to store an error if it happens.
19168be545baSRichard Henderson  */
19178be545baSRichard Henderson int memory_region_register_iommu_notifier(MemoryRegion *mr,
19188be545baSRichard Henderson                                           IOMMUNotifier *n, Error **errp);
19198be545baSRichard Henderson 
19208be545baSRichard Henderson /**
19218be545baSRichard Henderson  * memory_region_iommu_replay: replay existing IOMMU translations to
19228be545baSRichard Henderson  * a notifier with the minimum page granularity returned by
19238be545baSRichard Henderson  * mr->iommu_ops->get_page_size().
19248be545baSRichard Henderson  *
19258be545baSRichard Henderson  * Note: this is not related to record-and-replay functionality.
19268be545baSRichard Henderson  *
19278be545baSRichard Henderson  * @iommu_mr: the memory region to observe
19288be545baSRichard Henderson  * @n: the notifier to which to replay iommu mappings
19298be545baSRichard Henderson  */
19308be545baSRichard Henderson void memory_region_iommu_replay(IOMMUMemoryRegion *iommu_mr, IOMMUNotifier *n);
19318be545baSRichard Henderson 
19328be545baSRichard Henderson /**
19338be545baSRichard Henderson  * memory_region_unregister_iommu_notifier: unregister a notifier for
19348be545baSRichard Henderson  * changes to IOMMU translation entries.
19358be545baSRichard Henderson  *
19368be545baSRichard Henderson  * @mr: the memory region which was observed and for which notify_stopped()
19378be545baSRichard Henderson  *      needs to be called
19388be545baSRichard Henderson  * @n: the notifier to be removed.
19398be545baSRichard Henderson  */
19408be545baSRichard Henderson void memory_region_unregister_iommu_notifier(MemoryRegion *mr,
19418be545baSRichard Henderson                                              IOMMUNotifier *n);
19428be545baSRichard Henderson 
19438be545baSRichard Henderson /**
19448be545baSRichard Henderson  * memory_region_iommu_get_attr: return an IOMMU attr if get_attr() is
19458be545baSRichard Henderson  * defined on the IOMMU.
19468be545baSRichard Henderson  *
19478be545baSRichard Henderson  * Returns 0 on success, or a negative errno otherwise. In particular,
19488be545baSRichard Henderson  * -EINVAL indicates that the IOMMU does not support the requested
19498be545baSRichard Henderson  * attribute.
19508be545baSRichard Henderson  *
19518be545baSRichard Henderson  * @iommu_mr: the memory region
19528be545baSRichard Henderson  * @attr: the requested attribute
19538be545baSRichard Henderson  * @data: a pointer to the requested attribute data
19548be545baSRichard Henderson  */
19558be545baSRichard Henderson int memory_region_iommu_get_attr(IOMMUMemoryRegion *iommu_mr,
19568be545baSRichard Henderson                                  enum IOMMUMemoryRegionAttr attr,
19578be545baSRichard Henderson                                  void *data);
19588be545baSRichard Henderson 
19598be545baSRichard Henderson /**
19608be545baSRichard Henderson  * memory_region_iommu_attrs_to_index: return the IOMMU index to
19618be545baSRichard Henderson  * use for translations with the given memory transaction attributes.
19628be545baSRichard Henderson  *
19638be545baSRichard Henderson  * @iommu_mr: the memory region
19648be545baSRichard Henderson  * @attrs: the memory transaction attributes
19658be545baSRichard Henderson  */
19668be545baSRichard Henderson int memory_region_iommu_attrs_to_index(IOMMUMemoryRegion *iommu_mr,
19678be545baSRichard Henderson                                        MemTxAttrs attrs);
19688be545baSRichard Henderson 
19698be545baSRichard Henderson /**
19708be545baSRichard Henderson  * memory_region_iommu_num_indexes: return the total number of IOMMU
19718be545baSRichard Henderson  * indexes that this IOMMU supports.
19728be545baSRichard Henderson  *
19738be545baSRichard Henderson  * @iommu_mr: the memory region
19748be545baSRichard Henderson  */
19758be545baSRichard Henderson int memory_region_iommu_num_indexes(IOMMUMemoryRegion *iommu_mr);
19768be545baSRichard Henderson 
19778be545baSRichard Henderson /**
19788be545baSRichard Henderson  * memory_region_name: get a memory region's name
19798be545baSRichard Henderson  *
19808be545baSRichard Henderson  * Returns the string that was used to initialize the memory region.
19818be545baSRichard Henderson  *
19828be545baSRichard Henderson  * @mr: the memory region being queried
19838be545baSRichard Henderson  */
19848be545baSRichard Henderson const char *memory_region_name(const MemoryRegion *mr);
19858be545baSRichard Henderson 
19868be545baSRichard Henderson /**
19878be545baSRichard Henderson  * memory_region_is_logging: return whether a memory region is logging writes
19888be545baSRichard Henderson  *
19898be545baSRichard Henderson  * Returns %true if the memory region is logging writes for the given client
19908be545baSRichard Henderson  *
19918be545baSRichard Henderson  * @mr: the memory region being queried
19928be545baSRichard Henderson  * @client: the client being queried
19938be545baSRichard Henderson  */
19948be545baSRichard Henderson bool memory_region_is_logging(MemoryRegion *mr, uint8_t client);
19958be545baSRichard Henderson 
19968be545baSRichard Henderson /**
19978be545baSRichard Henderson  * memory_region_get_dirty_log_mask: return the clients for which a
19988be545baSRichard Henderson  * memory region is logging writes.
19998be545baSRichard Henderson  *
20008be545baSRichard Henderson  * Returns a bitmap of clients, in which the DIRTY_MEMORY_* constants
20018be545baSRichard Henderson  * are the bit indices.
20028be545baSRichard Henderson  *
20038be545baSRichard Henderson  * @mr: the memory region being queried
20048be545baSRichard Henderson  */
20058be545baSRichard Henderson uint8_t memory_region_get_dirty_log_mask(MemoryRegion *mr);
20068be545baSRichard Henderson 
20078be545baSRichard Henderson /**
20088be545baSRichard Henderson  * memory_region_is_rom: check whether a memory region is ROM
20098be545baSRichard Henderson  *
20108be545baSRichard Henderson  * Returns %true if a memory region is read-only memory.
20118be545baSRichard Henderson  *
20128be545baSRichard Henderson  * @mr: the memory region being queried
20138be545baSRichard Henderson  */
20148be545baSRichard Henderson static inline bool memory_region_is_rom(MemoryRegion *mr)
20158be545baSRichard Henderson {
20168be545baSRichard Henderson     return mr->ram && mr->readonly;
20178be545baSRichard Henderson }
20188be545baSRichard Henderson 
20198be545baSRichard Henderson /**
20208be545baSRichard Henderson  * memory_region_is_nonvolatile: check whether a memory region is non-volatile
20218be545baSRichard Henderson  *
20228be545baSRichard Henderson  * Returns %true is a memory region is non-volatile memory.
20238be545baSRichard Henderson  *
20248be545baSRichard Henderson  * @mr: the memory region being queried
20258be545baSRichard Henderson  */
20268be545baSRichard Henderson static inline bool memory_region_is_nonvolatile(MemoryRegion *mr)
20278be545baSRichard Henderson {
20288be545baSRichard Henderson     return mr->nonvolatile;
20298be545baSRichard Henderson }
20308be545baSRichard Henderson 
20318be545baSRichard Henderson /**
20328be545baSRichard Henderson  * memory_region_get_fd: Get a file descriptor backing a RAM memory region.
20338be545baSRichard Henderson  *
20348be545baSRichard Henderson  * Returns a file descriptor backing a file-based RAM memory region,
20358be545baSRichard Henderson  * or -1 if the region is not a file-based RAM memory region.
20368be545baSRichard Henderson  *
20378be545baSRichard Henderson  * @mr: the RAM or alias memory region being queried.
20388be545baSRichard Henderson  */
20398be545baSRichard Henderson int memory_region_get_fd(MemoryRegion *mr);
20408be545baSRichard Henderson 
20418be545baSRichard Henderson /**
20428be545baSRichard Henderson  * memory_region_from_host: Convert a pointer into a RAM memory region
20438be545baSRichard Henderson  * and an offset within it.
20448be545baSRichard Henderson  *
20458be545baSRichard Henderson  * Given a host pointer inside a RAM memory region (created with
20468be545baSRichard Henderson  * memory_region_init_ram() or memory_region_init_ram_ptr()), return
20478be545baSRichard Henderson  * the MemoryRegion and the offset within it.
20488be545baSRichard Henderson  *
20498be545baSRichard Henderson  * Use with care; by the time this function returns, the returned pointer is
20508be545baSRichard Henderson  * not protected by RCU anymore.  If the caller is not within an RCU critical
20518be545baSRichard Henderson  * section and does not hold the BQL, it must have other means of
20528be545baSRichard Henderson  * protecting the pointer, such as a reference to the region that includes
20538be545baSRichard Henderson  * the incoming ram_addr_t.
20548be545baSRichard Henderson  *
20558be545baSRichard Henderson  * @ptr: the host pointer to be converted
20568be545baSRichard Henderson  * @offset: the offset within memory region
20578be545baSRichard Henderson  */
20588be545baSRichard Henderson MemoryRegion *memory_region_from_host(void *ptr, ram_addr_t *offset);
20598be545baSRichard Henderson 
20608be545baSRichard Henderson /**
20618be545baSRichard Henderson  * memory_region_get_ram_ptr: Get a pointer into a RAM memory region.
20628be545baSRichard Henderson  *
20638be545baSRichard Henderson  * Returns a host pointer to a RAM memory region (created with
20648be545baSRichard Henderson  * memory_region_init_ram() or memory_region_init_ram_ptr()).
20658be545baSRichard Henderson  *
20668be545baSRichard Henderson  * Use with care; by the time this function returns, the returned pointer is
20678be545baSRichard Henderson  * not protected by RCU anymore.  If the caller is not within an RCU critical
20688be545baSRichard Henderson  * section and does not hold the BQL, it must have other means of
20698be545baSRichard Henderson  * protecting the pointer, such as a reference to the region that includes
20708be545baSRichard Henderson  * the incoming ram_addr_t.
20718be545baSRichard Henderson  *
20728be545baSRichard Henderson  * @mr: the memory region being queried.
20738be545baSRichard Henderson  */
20748be545baSRichard Henderson void *memory_region_get_ram_ptr(MemoryRegion *mr);
20758be545baSRichard Henderson 
20768be545baSRichard Henderson /* memory_region_ram_resize: Resize a RAM region.
20778be545baSRichard Henderson  *
20788be545baSRichard Henderson  * Resizing RAM while migrating can result in the migration being canceled.
20798be545baSRichard Henderson  * Care has to be taken if the guest might have already detected the memory.
20808be545baSRichard Henderson  *
20818be545baSRichard Henderson  * @mr: a memory region created with @memory_region_init_resizeable_ram.
20828be545baSRichard Henderson  * @newsize: the new size the region
20838be545baSRichard Henderson  * @errp: pointer to Error*, to store an error if it happens.
20848be545baSRichard Henderson  */
20858be545baSRichard Henderson void memory_region_ram_resize(MemoryRegion *mr, ram_addr_t newsize,
20868be545baSRichard Henderson                               Error **errp);
20878be545baSRichard Henderson 
20888be545baSRichard Henderson /**
20898be545baSRichard Henderson  * memory_region_msync: Synchronize selected address range of
20908be545baSRichard Henderson  * a memory mapped region
20918be545baSRichard Henderson  *
20928be545baSRichard Henderson  * @mr: the memory region to be msync
20938be545baSRichard Henderson  * @addr: the initial address of the range to be sync
20948be545baSRichard Henderson  * @size: the size of the range to be sync
20958be545baSRichard Henderson  */
20968be545baSRichard Henderson void memory_region_msync(MemoryRegion *mr, hwaddr addr, hwaddr size);
20978be545baSRichard Henderson 
20988be545baSRichard Henderson /**
20998be545baSRichard Henderson  * memory_region_writeback: Trigger cache writeback for
21008be545baSRichard Henderson  * selected address range
21018be545baSRichard Henderson  *
21028be545baSRichard Henderson  * @mr: the memory region to be updated
21038be545baSRichard Henderson  * @addr: the initial address of the range to be written back
21048be545baSRichard Henderson  * @size: the size of the range to be written back
21058be545baSRichard Henderson  */
21068be545baSRichard Henderson void memory_region_writeback(MemoryRegion *mr, hwaddr addr, hwaddr size);
21078be545baSRichard Henderson 
21088be545baSRichard Henderson /**
21098be545baSRichard Henderson  * memory_region_set_log: Turn dirty logging on or off for a region.
21108be545baSRichard Henderson  *
21118be545baSRichard Henderson  * Turns dirty logging on or off for a specified client (display, migration).
21128be545baSRichard Henderson  * Only meaningful for RAM regions.
21138be545baSRichard Henderson  *
21148be545baSRichard Henderson  * @mr: the memory region being updated.
21158be545baSRichard Henderson  * @log: whether dirty logging is to be enabled or disabled.
21168be545baSRichard Henderson  * @client: the user of the logging information; %DIRTY_MEMORY_VGA only.
21178be545baSRichard Henderson  */
21188be545baSRichard Henderson void memory_region_set_log(MemoryRegion *mr, bool log, unsigned client);
21198be545baSRichard Henderson 
21208be545baSRichard Henderson /**
21218be545baSRichard Henderson  * memory_region_set_dirty: Mark a range of bytes as dirty in a memory region.
21228be545baSRichard Henderson  *
21238be545baSRichard Henderson  * Marks a range of bytes as dirty, after it has been dirtied outside
21248be545baSRichard Henderson  * guest code.
21258be545baSRichard Henderson  *
21268be545baSRichard Henderson  * @mr: the memory region being dirtied.
21278be545baSRichard Henderson  * @addr: the address (relative to the start of the region) being dirtied.
21288be545baSRichard Henderson  * @size: size of the range being dirtied.
21298be545baSRichard Henderson  */
21308be545baSRichard Henderson void memory_region_set_dirty(MemoryRegion *mr, hwaddr addr,
21318be545baSRichard Henderson                              hwaddr size);
21328be545baSRichard Henderson 
21338be545baSRichard Henderson /**
21348be545baSRichard Henderson  * memory_region_clear_dirty_bitmap - clear dirty bitmap for memory range
21358be545baSRichard Henderson  *
21368be545baSRichard Henderson  * This function is called when the caller wants to clear the remote
21378be545baSRichard Henderson  * dirty bitmap of a memory range within the memory region.  This can
21388be545baSRichard Henderson  * be used by e.g. KVM to manually clear dirty log when
21398be545baSRichard Henderson  * KVM_CAP_MANUAL_DIRTY_LOG_PROTECT is declared support by the host
21408be545baSRichard Henderson  * kernel.
21418be545baSRichard Henderson  *
21428be545baSRichard Henderson  * @mr:     the memory region to clear the dirty log upon
21438be545baSRichard Henderson  * @start:  start address offset within the memory region
21448be545baSRichard Henderson  * @len:    length of the memory region to clear dirty bitmap
21458be545baSRichard Henderson  */
21468be545baSRichard Henderson void memory_region_clear_dirty_bitmap(MemoryRegion *mr, hwaddr start,
21478be545baSRichard Henderson                                       hwaddr len);
21488be545baSRichard Henderson 
21498be545baSRichard Henderson /**
21508be545baSRichard Henderson  * memory_region_snapshot_and_clear_dirty: Get a snapshot of the dirty
21518be545baSRichard Henderson  *                                         bitmap and clear it.
21528be545baSRichard Henderson  *
21538be545baSRichard Henderson  * Creates a snapshot of the dirty bitmap, clears the dirty bitmap and
21548be545baSRichard Henderson  * returns the snapshot.  The snapshot can then be used to query dirty
21558be545baSRichard Henderson  * status, using memory_region_snapshot_get_dirty.  Snapshotting allows
21568be545baSRichard Henderson  * querying the same page multiple times, which is especially useful for
21578be545baSRichard Henderson  * display updates where the scanlines often are not page aligned.
21588be545baSRichard Henderson  *
21598be545baSRichard Henderson  * The dirty bitmap region which gets copied into the snapshot (and
21608be545baSRichard Henderson  * cleared afterwards) can be larger than requested.  The boundaries
21618be545baSRichard Henderson  * are rounded up/down so complete bitmap longs (covering 64 pages on
21628be545baSRichard Henderson  * 64bit hosts) can be copied over into the bitmap snapshot.  Which
21638be545baSRichard Henderson  * isn't a problem for display updates as the extra pages are outside
21648be545baSRichard Henderson  * the visible area, and in case the visible area changes a full
21658be545baSRichard Henderson  * display redraw is due anyway.  Should other use cases for this
21668be545baSRichard Henderson  * function emerge we might have to revisit this implementation
21678be545baSRichard Henderson  * detail.
21688be545baSRichard Henderson  *
21698be545baSRichard Henderson  * Use g_free to release DirtyBitmapSnapshot.
21708be545baSRichard Henderson  *
21718be545baSRichard Henderson  * @mr: the memory region being queried.
21728be545baSRichard Henderson  * @addr: the address (relative to the start of the region) being queried.
21738be545baSRichard Henderson  * @size: the size of the range being queried.
21748be545baSRichard Henderson  * @client: the user of the logging information; typically %DIRTY_MEMORY_VGA.
21758be545baSRichard Henderson  */
21768be545baSRichard Henderson DirtyBitmapSnapshot *memory_region_snapshot_and_clear_dirty(MemoryRegion *mr,
21778be545baSRichard Henderson                                                             hwaddr addr,
21788be545baSRichard Henderson                                                             hwaddr size,
21798be545baSRichard Henderson                                                             unsigned client);
21808be545baSRichard Henderson 
21818be545baSRichard Henderson /**
21828be545baSRichard Henderson  * memory_region_snapshot_get_dirty: Check whether a range of bytes is dirty
21838be545baSRichard Henderson  *                                   in the specified dirty bitmap snapshot.
21848be545baSRichard Henderson  *
21858be545baSRichard Henderson  * @mr: the memory region being queried.
21868be545baSRichard Henderson  * @snap: the dirty bitmap snapshot
21878be545baSRichard Henderson  * @addr: the address (relative to the start of the region) being queried.
21888be545baSRichard Henderson  * @size: the size of the range being queried.
21898be545baSRichard Henderson  */
21908be545baSRichard Henderson bool memory_region_snapshot_get_dirty(MemoryRegion *mr,
21918be545baSRichard Henderson                                       DirtyBitmapSnapshot *snap,
21928be545baSRichard Henderson                                       hwaddr addr, hwaddr size);
21938be545baSRichard Henderson 
21948be545baSRichard Henderson /**
21958be545baSRichard Henderson  * memory_region_reset_dirty: Mark a range of pages as clean, for a specified
21968be545baSRichard Henderson  *                            client.
21978be545baSRichard Henderson  *
21988be545baSRichard Henderson  * Marks a range of pages as no longer dirty.
21998be545baSRichard Henderson  *
22008be545baSRichard Henderson  * @mr: the region being updated.
22018be545baSRichard Henderson  * @addr: the start of the subrange being cleaned.
22028be545baSRichard Henderson  * @size: the size of the subrange being cleaned.
22038be545baSRichard Henderson  * @client: the user of the logging information; %DIRTY_MEMORY_MIGRATION or
22048be545baSRichard Henderson  *          %DIRTY_MEMORY_VGA.
22058be545baSRichard Henderson  */
22068be545baSRichard Henderson void memory_region_reset_dirty(MemoryRegion *mr, hwaddr addr,
22078be545baSRichard Henderson                                hwaddr size, unsigned client);
22088be545baSRichard Henderson 
22098be545baSRichard Henderson /**
22108be545baSRichard Henderson  * memory_region_flush_rom_device: Mark a range of pages dirty and invalidate
22118be545baSRichard Henderson  *                                 TBs (for self-modifying code).
22128be545baSRichard Henderson  *
22138be545baSRichard Henderson  * The MemoryRegionOps->write() callback of a ROM device must use this function
22148be545baSRichard Henderson  * to mark byte ranges that have been modified internally, such as by directly
22158be545baSRichard Henderson  * accessing the memory returned by memory_region_get_ram_ptr().
22168be545baSRichard Henderson  *
22178be545baSRichard Henderson  * This function marks the range dirty and invalidates TBs so that TCG can
22188be545baSRichard Henderson  * detect self-modifying code.
22198be545baSRichard Henderson  *
22208be545baSRichard Henderson  * @mr: the region being flushed.
22218be545baSRichard Henderson  * @addr: the start, relative to the start of the region, of the range being
22228be545baSRichard Henderson  *        flushed.
22238be545baSRichard Henderson  * @size: the size, in bytes, of the range being flushed.
22248be545baSRichard Henderson  */
22258be545baSRichard Henderson void memory_region_flush_rom_device(MemoryRegion *mr, hwaddr addr, hwaddr size);
22268be545baSRichard Henderson 
22278be545baSRichard Henderson /**
22288be545baSRichard Henderson  * memory_region_set_readonly: Turn a memory region read-only (or read-write)
22298be545baSRichard Henderson  *
22308be545baSRichard Henderson  * Allows a memory region to be marked as read-only (turning it into a ROM).
22318be545baSRichard Henderson  * only useful on RAM regions.
22328be545baSRichard Henderson  *
22338be545baSRichard Henderson  * @mr: the region being updated.
22348be545baSRichard Henderson  * @readonly: whether the region is to be ROM or RAM.
22358be545baSRichard Henderson  */
22368be545baSRichard Henderson void memory_region_set_readonly(MemoryRegion *mr, bool readonly);
22378be545baSRichard Henderson 
22388be545baSRichard Henderson /**
22398be545baSRichard Henderson  * memory_region_set_nonvolatile: Turn a memory region non-volatile
22408be545baSRichard Henderson  *
22418be545baSRichard Henderson  * Allows a memory region to be marked as non-volatile.
22428be545baSRichard Henderson  * only useful on RAM regions.
22438be545baSRichard Henderson  *
22448be545baSRichard Henderson  * @mr: the region being updated.
22458be545baSRichard Henderson  * @nonvolatile: whether the region is to be non-volatile.
22468be545baSRichard Henderson  */
22478be545baSRichard Henderson void memory_region_set_nonvolatile(MemoryRegion *mr, bool nonvolatile);
22488be545baSRichard Henderson 
22498be545baSRichard Henderson /**
22508be545baSRichard Henderson  * memory_region_rom_device_set_romd: enable/disable ROMD mode
22518be545baSRichard Henderson  *
22528be545baSRichard Henderson  * Allows a ROM device (initialized with memory_region_init_rom_device() to
22538be545baSRichard Henderson  * set to ROMD mode (default) or MMIO mode.  When it is in ROMD mode, the
22548be545baSRichard Henderson  * device is mapped to guest memory and satisfies read access directly.
22558be545baSRichard Henderson  * When in MMIO mode, reads are forwarded to the #MemoryRegion.read function.
22568be545baSRichard Henderson  * Writes are always handled by the #MemoryRegion.write function.
22578be545baSRichard Henderson  *
22588be545baSRichard Henderson  * @mr: the memory region to be updated
22598be545baSRichard Henderson  * @romd_mode: %true to put the region into ROMD mode
22608be545baSRichard Henderson  */
22618be545baSRichard Henderson void memory_region_rom_device_set_romd(MemoryRegion *mr, bool romd_mode);
22628be545baSRichard Henderson 
22638be545baSRichard Henderson /**
22648be545baSRichard Henderson  * memory_region_set_coalescing: Enable memory coalescing for the region.
22658be545baSRichard Henderson  *
22668be545baSRichard Henderson  * Enabled writes to a region to be queued for later processing. MMIO ->write
22678be545baSRichard Henderson  * callbacks may be delayed until a non-coalesced MMIO is issued.
22688be545baSRichard Henderson  * Only useful for IO regions.  Roughly similar to write-combining hardware.
22698be545baSRichard Henderson  *
22708be545baSRichard Henderson  * @mr: the memory region to be write coalesced
22718be545baSRichard Henderson  */
22728be545baSRichard Henderson void memory_region_set_coalescing(MemoryRegion *mr);
22738be545baSRichard Henderson 
22748be545baSRichard Henderson /**
22758be545baSRichard Henderson  * memory_region_add_coalescing: Enable memory coalescing for a sub-range of
22768be545baSRichard Henderson  *                               a region.
22778be545baSRichard Henderson  *
22788be545baSRichard Henderson  * Like memory_region_set_coalescing(), but works on a sub-range of a region.
22798be545baSRichard Henderson  * Multiple calls can be issued coalesced disjoint ranges.
22808be545baSRichard Henderson  *
22818be545baSRichard Henderson  * @mr: the memory region to be updated.
22828be545baSRichard Henderson  * @offset: the start of the range within the region to be coalesced.
22838be545baSRichard Henderson  * @size: the size of the subrange to be coalesced.
22848be545baSRichard Henderson  */
22858be545baSRichard Henderson void memory_region_add_coalescing(MemoryRegion *mr,
22868be545baSRichard Henderson                                   hwaddr offset,
22878be545baSRichard Henderson                                   uint64_t size);
22888be545baSRichard Henderson 
22898be545baSRichard Henderson /**
22908be545baSRichard Henderson  * memory_region_clear_coalescing: Disable MMIO coalescing for the region.
22918be545baSRichard Henderson  *
22928be545baSRichard Henderson  * Disables any coalescing caused by memory_region_set_coalescing() or
22938be545baSRichard Henderson  * memory_region_add_coalescing().  Roughly equivalent to uncacheble memory
22948be545baSRichard Henderson  * hardware.
22958be545baSRichard Henderson  *
22968be545baSRichard Henderson  * @mr: the memory region to be updated.
22978be545baSRichard Henderson  */
22988be545baSRichard Henderson void memory_region_clear_coalescing(MemoryRegion *mr);
22998be545baSRichard Henderson 
23008be545baSRichard Henderson /**
23018be545baSRichard Henderson  * memory_region_set_flush_coalesced: Enforce memory coalescing flush before
23028be545baSRichard Henderson  *                                    accesses.
23038be545baSRichard Henderson  *
23048be545baSRichard Henderson  * Ensure that pending coalesced MMIO request are flushed before the memory
23058be545baSRichard Henderson  * region is accessed. This property is automatically enabled for all regions
23068be545baSRichard Henderson  * passed to memory_region_set_coalescing() and memory_region_add_coalescing().
23078be545baSRichard Henderson  *
23088be545baSRichard Henderson  * @mr: the memory region to be updated.
23098be545baSRichard Henderson  */
23108be545baSRichard Henderson void memory_region_set_flush_coalesced(MemoryRegion *mr);
23118be545baSRichard Henderson 
23128be545baSRichard Henderson /**
23138be545baSRichard Henderson  * memory_region_clear_flush_coalesced: Disable memory coalescing flush before
23148be545baSRichard Henderson  *                                      accesses.
23158be545baSRichard Henderson  *
23168be545baSRichard Henderson  * Clear the automatic coalesced MMIO flushing enabled via
23178be545baSRichard Henderson  * memory_region_set_flush_coalesced. Note that this service has no effect on
23188be545baSRichard Henderson  * memory regions that have MMIO coalescing enabled for themselves. For them,
23198be545baSRichard Henderson  * automatic flushing will stop once coalescing is disabled.
23208be545baSRichard Henderson  *
23218be545baSRichard Henderson  * @mr: the memory region to be updated.
23228be545baSRichard Henderson  */
23238be545baSRichard Henderson void memory_region_clear_flush_coalesced(MemoryRegion *mr);
23248be545baSRichard Henderson 
23258be545baSRichard Henderson /**
23268be545baSRichard Henderson  * memory_region_add_eventfd: Request an eventfd to be triggered when a word
23278be545baSRichard Henderson  *                            is written to a location.
23288be545baSRichard Henderson  *
23298be545baSRichard Henderson  * Marks a word in an IO region (initialized with memory_region_init_io())
23308be545baSRichard Henderson  * as a trigger for an eventfd event.  The I/O callback will not be called.
23318be545baSRichard Henderson  * The caller must be prepared to handle failure (that is, take the required
23328be545baSRichard Henderson  * action if the callback _is_ called).
23338be545baSRichard Henderson  *
23348be545baSRichard Henderson  * @mr: the memory region being updated.
23358be545baSRichard Henderson  * @addr: the address within @mr that is to be monitored
23368be545baSRichard Henderson  * @size: the size of the access to trigger the eventfd
23378be545baSRichard Henderson  * @match_data: whether to match against @data, instead of just @addr
23388be545baSRichard Henderson  * @data: the data to match against the guest write
23398be545baSRichard Henderson  * @e: event notifier to be triggered when @addr, @size, and @data all match.
23408be545baSRichard Henderson  **/
23418be545baSRichard Henderson void memory_region_add_eventfd(MemoryRegion *mr,
23428be545baSRichard Henderson                                hwaddr addr,
23438be545baSRichard Henderson                                unsigned size,
23448be545baSRichard Henderson                                bool match_data,
23458be545baSRichard Henderson                                uint64_t data,
23468be545baSRichard Henderson                                EventNotifier *e);
23478be545baSRichard Henderson 
23488be545baSRichard Henderson /**
23498be545baSRichard Henderson  * memory_region_del_eventfd: Cancel an eventfd.
23508be545baSRichard Henderson  *
23518be545baSRichard Henderson  * Cancels an eventfd trigger requested by a previous
23528be545baSRichard Henderson  * memory_region_add_eventfd() call.
23538be545baSRichard Henderson  *
23548be545baSRichard Henderson  * @mr: the memory region being updated.
23558be545baSRichard Henderson  * @addr: the address within @mr that is to be monitored
23568be545baSRichard Henderson  * @size: the size of the access to trigger the eventfd
23578be545baSRichard Henderson  * @match_data: whether to match against @data, instead of just @addr
23588be545baSRichard Henderson  * @data: the data to match against the guest write
23598be545baSRichard Henderson  * @e: event notifier to be triggered when @addr, @size, and @data all match.
23608be545baSRichard Henderson  */
23618be545baSRichard Henderson void memory_region_del_eventfd(MemoryRegion *mr,
23628be545baSRichard Henderson                                hwaddr addr,
23638be545baSRichard Henderson                                unsigned size,
23648be545baSRichard Henderson                                bool match_data,
23658be545baSRichard Henderson                                uint64_t data,
23668be545baSRichard Henderson                                EventNotifier *e);
23678be545baSRichard Henderson 
23688be545baSRichard Henderson /**
23698be545baSRichard Henderson  * memory_region_add_subregion: Add a subregion to a container.
23708be545baSRichard Henderson  *
23718be545baSRichard Henderson  * Adds a subregion at @offset.  The subregion may not overlap with other
23728be545baSRichard Henderson  * subregions (except for those explicitly marked as overlapping).  A region
23738be545baSRichard Henderson  * may only be added once as a subregion (unless removed with
23748be545baSRichard Henderson  * memory_region_del_subregion()); use memory_region_init_alias() if you
23758be545baSRichard Henderson  * want a region to be a subregion in multiple locations.
23768be545baSRichard Henderson  *
23778be545baSRichard Henderson  * @mr: the region to contain the new subregion; must be a container
23788be545baSRichard Henderson  *      initialized with memory_region_init().
23798be545baSRichard Henderson  * @offset: the offset relative to @mr where @subregion is added.
23808be545baSRichard Henderson  * @subregion: the subregion to be added.
23818be545baSRichard Henderson  */
23828be545baSRichard Henderson void memory_region_add_subregion(MemoryRegion *mr,
23838be545baSRichard Henderson                                  hwaddr offset,
23848be545baSRichard Henderson                                  MemoryRegion *subregion);
23858be545baSRichard Henderson /**
23868be545baSRichard Henderson  * memory_region_add_subregion_overlap: Add a subregion to a container
23878be545baSRichard Henderson  *                                      with overlap.
23888be545baSRichard Henderson  *
23898be545baSRichard Henderson  * Adds a subregion at @offset.  The subregion may overlap with other
23908be545baSRichard Henderson  * subregions.  Conflicts are resolved by having a higher @priority hide a
23918be545baSRichard Henderson  * lower @priority. Subregions without priority are taken as @priority 0.
23928be545baSRichard Henderson  * A region may only be added once as a subregion (unless removed with
23938be545baSRichard Henderson  * memory_region_del_subregion()); use memory_region_init_alias() if you
23948be545baSRichard Henderson  * want a region to be a subregion in multiple locations.
23958be545baSRichard Henderson  *
23968be545baSRichard Henderson  * @mr: the region to contain the new subregion; must be a container
23978be545baSRichard Henderson  *      initialized with memory_region_init().
23988be545baSRichard Henderson  * @offset: the offset relative to @mr where @subregion is added.
23998be545baSRichard Henderson  * @subregion: the subregion to be added.
24008be545baSRichard Henderson  * @priority: used for resolving overlaps; highest priority wins.
24018be545baSRichard Henderson  */
24028be545baSRichard Henderson void memory_region_add_subregion_overlap(MemoryRegion *mr,
24038be545baSRichard Henderson                                          hwaddr offset,
24048be545baSRichard Henderson                                          MemoryRegion *subregion,
24058be545baSRichard Henderson                                          int priority);
24068be545baSRichard Henderson 
24078be545baSRichard Henderson /**
24088be545baSRichard Henderson  * memory_region_get_ram_addr: Get the ram address associated with a memory
24098be545baSRichard Henderson  *                             region
24108be545baSRichard Henderson  *
24118be545baSRichard Henderson  * @mr: the region to be queried
24128be545baSRichard Henderson  */
24138be545baSRichard Henderson ram_addr_t memory_region_get_ram_addr(MemoryRegion *mr);
24148be545baSRichard Henderson 
24158be545baSRichard Henderson uint64_t memory_region_get_alignment(const MemoryRegion *mr);
24168be545baSRichard Henderson /**
24178be545baSRichard Henderson  * memory_region_del_subregion: Remove a subregion.
24188be545baSRichard Henderson  *
24198be545baSRichard Henderson  * Removes a subregion from its container.
24208be545baSRichard Henderson  *
24218be545baSRichard Henderson  * @mr: the container to be updated.
24228be545baSRichard Henderson  * @subregion: the region being removed; must be a current subregion of @mr.
24238be545baSRichard Henderson  */
24248be545baSRichard Henderson void memory_region_del_subregion(MemoryRegion *mr,
24258be545baSRichard Henderson                                  MemoryRegion *subregion);
24268be545baSRichard Henderson 
24278be545baSRichard Henderson /*
24288be545baSRichard Henderson  * memory_region_set_enabled: dynamically enable or disable a region
24298be545baSRichard Henderson  *
24308be545baSRichard Henderson  * Enables or disables a memory region.  A disabled memory region
24318be545baSRichard Henderson  * ignores all accesses to itself and its subregions.  It does not
24328be545baSRichard Henderson  * obscure sibling subregions with lower priority - it simply behaves as
24338be545baSRichard Henderson  * if it was removed from the hierarchy.
24348be545baSRichard Henderson  *
24358be545baSRichard Henderson  * Regions default to being enabled.
24368be545baSRichard Henderson  *
24378be545baSRichard Henderson  * @mr: the region to be updated
24388be545baSRichard Henderson  * @enabled: whether to enable or disable the region
24398be545baSRichard Henderson  */
24408be545baSRichard Henderson void memory_region_set_enabled(MemoryRegion *mr, bool enabled);
24418be545baSRichard Henderson 
24428be545baSRichard Henderson /*
24438be545baSRichard Henderson  * memory_region_set_address: dynamically update the address of a region
24448be545baSRichard Henderson  *
24458be545baSRichard Henderson  * Dynamically updates the address of a region, relative to its container.
24468be545baSRichard Henderson  * May be used on regions are currently part of a memory hierarchy.
24478be545baSRichard Henderson  *
24488be545baSRichard Henderson  * @mr: the region to be updated
24498be545baSRichard Henderson  * @addr: new address, relative to container region
24508be545baSRichard Henderson  */
24518be545baSRichard Henderson void memory_region_set_address(MemoryRegion *mr, hwaddr addr);
24528be545baSRichard Henderson 
24538be545baSRichard Henderson /*
24548be545baSRichard Henderson  * memory_region_set_size: dynamically update the size of a region.
24558be545baSRichard Henderson  *
24568be545baSRichard Henderson  * Dynamically updates the size of a region.
24578be545baSRichard Henderson  *
24588be545baSRichard Henderson  * @mr: the region to be updated
24598be545baSRichard Henderson  * @size: used size of the region.
24608be545baSRichard Henderson  */
24618be545baSRichard Henderson void memory_region_set_size(MemoryRegion *mr, uint64_t size);
24628be545baSRichard Henderson 
24638be545baSRichard Henderson /*
24648be545baSRichard Henderson  * memory_region_set_alias_offset: dynamically update a memory alias's offset
24658be545baSRichard Henderson  *
24668be545baSRichard Henderson  * Dynamically updates the offset into the target region that an alias points
24678be545baSRichard Henderson  * to, as if the fourth argument to memory_region_init_alias() has changed.
24688be545baSRichard Henderson  *
24698be545baSRichard Henderson  * @mr: the #MemoryRegion to be updated; should be an alias.
24708be545baSRichard Henderson  * @offset: the new offset into the target memory region
24718be545baSRichard Henderson  */
24728be545baSRichard Henderson void memory_region_set_alias_offset(MemoryRegion *mr,
24738be545baSRichard Henderson                                     hwaddr offset);
24748be545baSRichard Henderson 
24758be545baSRichard Henderson /*
24768be545baSRichard Henderson  * memory_region_set_unmergeable: Set a memory region unmergeable
24778be545baSRichard Henderson  *
24788be545baSRichard Henderson  * Mark a memory region unmergeable, resulting in the memory region (or
24798be545baSRichard Henderson  * everything contained in a memory region container) not getting merged when
24808be545baSRichard Henderson  * simplifying the address space and notifying memory listeners. Consequently,
24818be545baSRichard Henderson  * memory listeners will never get notified about ranges that are larger than
24828be545baSRichard Henderson  * the original memory regions.
24838be545baSRichard Henderson  *
24848be545baSRichard Henderson  * This is primarily useful when multiple aliases to a RAM memory region are
24858be545baSRichard Henderson  * mapped into a memory region container, and updates (e.g., enable/disable or
24868be545baSRichard Henderson  * map/unmap) of individual memory region aliases are not supposed to affect
24878be545baSRichard Henderson  * other memory regions in the same container.
24888be545baSRichard Henderson  *
24898be545baSRichard Henderson  * @mr: the #MemoryRegion to be updated
24908be545baSRichard Henderson  * @unmergeable: whether to mark the #MemoryRegion unmergeable
24918be545baSRichard Henderson  */
24928be545baSRichard Henderson void memory_region_set_unmergeable(MemoryRegion *mr, bool unmergeable);
24938be545baSRichard Henderson 
24948be545baSRichard Henderson /**
24958be545baSRichard Henderson  * memory_region_present: checks if an address relative to a @container
24968be545baSRichard Henderson  * translates into #MemoryRegion within @container
24978be545baSRichard Henderson  *
24988be545baSRichard Henderson  * Answer whether a #MemoryRegion within @container covers the address
24998be545baSRichard Henderson  * @addr.
25008be545baSRichard Henderson  *
25018be545baSRichard Henderson  * @container: a #MemoryRegion within which @addr is a relative address
25028be545baSRichard Henderson  * @addr: the area within @container to be searched
25038be545baSRichard Henderson  */
25048be545baSRichard Henderson bool memory_region_present(MemoryRegion *container, hwaddr addr);
25058be545baSRichard Henderson 
25068be545baSRichard Henderson /**
25078be545baSRichard Henderson  * memory_region_is_mapped: returns true if #MemoryRegion is mapped
25088be545baSRichard Henderson  * into another memory region, which does not necessarily imply that it is
25098be545baSRichard Henderson  * mapped into an address space.
25108be545baSRichard Henderson  *
25118be545baSRichard Henderson  * @mr: a #MemoryRegion which should be checked if it's mapped
25128be545baSRichard Henderson  */
25138be545baSRichard Henderson bool memory_region_is_mapped(MemoryRegion *mr);
25148be545baSRichard Henderson 
25158be545baSRichard Henderson /**
25168be545baSRichard Henderson  * memory_region_get_ram_discard_manager: get the #RamDiscardManager for a
25178be545baSRichard Henderson  * #MemoryRegion
25188be545baSRichard Henderson  *
25198be545baSRichard Henderson  * The #RamDiscardManager cannot change while a memory region is mapped.
25208be545baSRichard Henderson  *
25218be545baSRichard Henderson  * @mr: the #MemoryRegion
25228be545baSRichard Henderson  */
25238be545baSRichard Henderson RamDiscardManager *memory_region_get_ram_discard_manager(MemoryRegion *mr);
25248be545baSRichard Henderson 
25258be545baSRichard Henderson /**
25268be545baSRichard Henderson  * memory_region_has_ram_discard_manager: check whether a #MemoryRegion has a
25278be545baSRichard Henderson  * #RamDiscardManager assigned
25288be545baSRichard Henderson  *
25298be545baSRichard Henderson  * @mr: the #MemoryRegion
25308be545baSRichard Henderson  */
25318be545baSRichard Henderson static inline bool memory_region_has_ram_discard_manager(MemoryRegion *mr)
25328be545baSRichard Henderson {
25338be545baSRichard Henderson     return !!memory_region_get_ram_discard_manager(mr);
25348be545baSRichard Henderson }
25358be545baSRichard Henderson 
25368be545baSRichard Henderson /**
25378be545baSRichard Henderson  * memory_region_set_ram_discard_manager: set the #RamDiscardManager for a
25388be545baSRichard Henderson  * #MemoryRegion
25398be545baSRichard Henderson  *
25408be545baSRichard Henderson  * This function must not be called for a mapped #MemoryRegion, a #MemoryRegion
25418be545baSRichard Henderson  * that does not cover RAM, or a #MemoryRegion that already has a
2542ff121115SChenyi Qiang  * #RamDiscardManager assigned. Return 0 if the rdm is set successfully.
25438be545baSRichard Henderson  *
25448be545baSRichard Henderson  * @mr: the #MemoryRegion
25458be545baSRichard Henderson  * @rdm: #RamDiscardManager to set
25468be545baSRichard Henderson  */
2547ff121115SChenyi Qiang int memory_region_set_ram_discard_manager(MemoryRegion *mr,
25488be545baSRichard Henderson                                           RamDiscardManager *rdm);
25498be545baSRichard Henderson 
25508be545baSRichard Henderson /**
25518be545baSRichard Henderson  * memory_region_find: translate an address/size relative to a
25528be545baSRichard Henderson  * MemoryRegion into a #MemoryRegionSection.
25538be545baSRichard Henderson  *
25548be545baSRichard Henderson  * Locates the first #MemoryRegion within @mr that overlaps the range
25558be545baSRichard Henderson  * given by @addr and @size.
25568be545baSRichard Henderson  *
25578be545baSRichard Henderson  * Returns a #MemoryRegionSection that describes a contiguous overlap.
25588be545baSRichard Henderson  * It will have the following characteristics:
25598be545baSRichard Henderson  * - @size = 0 iff no overlap was found
25608be545baSRichard Henderson  * - @mr is non-%NULL iff an overlap was found
25618be545baSRichard Henderson  *
25628be545baSRichard Henderson  * Remember that in the return value the @offset_within_region is
25638be545baSRichard Henderson  * relative to the returned region (in the .@mr field), not to the
25648be545baSRichard Henderson  * @mr argument.
25658be545baSRichard Henderson  *
25668be545baSRichard Henderson  * Similarly, the .@offset_within_address_space is relative to the
25678be545baSRichard Henderson  * address space that contains both regions, the passed and the
25688be545baSRichard Henderson  * returned one.  However, in the special case where the @mr argument
25698be545baSRichard Henderson  * has no container (and thus is the root of the address space), the
25708be545baSRichard Henderson  * following will hold:
25718be545baSRichard Henderson  * - @offset_within_address_space >= @addr
25728be545baSRichard Henderson  * - @offset_within_address_space + .@size <= @addr + @size
25738be545baSRichard Henderson  *
25748be545baSRichard Henderson  * @mr: a MemoryRegion within which @addr is a relative address
25758be545baSRichard Henderson  * @addr: start of the area within @as to be searched
25768be545baSRichard Henderson  * @size: size of the area to be searched
25778be545baSRichard Henderson  */
25788be545baSRichard Henderson MemoryRegionSection memory_region_find(MemoryRegion *mr,
25798be545baSRichard Henderson                                        hwaddr addr, uint64_t size);
25808be545baSRichard Henderson 
25818be545baSRichard Henderson /**
25828be545baSRichard Henderson  * memory_global_dirty_log_sync: synchronize the dirty log for all memory
25838be545baSRichard Henderson  *
25848be545baSRichard Henderson  * Synchronizes the dirty page log for all address spaces.
25858be545baSRichard Henderson  *
25868be545baSRichard Henderson  * @last_stage: whether this is the last stage of live migration
25878be545baSRichard Henderson  */
25888be545baSRichard Henderson void memory_global_dirty_log_sync(bool last_stage);
25898be545baSRichard Henderson 
25908be545baSRichard Henderson /**
25918be545baSRichard Henderson  * memory_global_after_dirty_log_sync: synchronize the dirty log for all memory
25928be545baSRichard Henderson  *
25938be545baSRichard Henderson  * Synchronizes the vCPUs with a thread that is reading the dirty bitmap.
25948be545baSRichard Henderson  * This function must be called after the dirty log bitmap is cleared, and
25958be545baSRichard Henderson  * before dirty guest memory pages are read.  If you are using
25968be545baSRichard Henderson  * #DirtyBitmapSnapshot, memory_region_snapshot_and_clear_dirty() takes
25978be545baSRichard Henderson  * care of doing this.
25988be545baSRichard Henderson  */
25998be545baSRichard Henderson void memory_global_after_dirty_log_sync(void);
26008be545baSRichard Henderson 
26018be545baSRichard Henderson /**
26028be545baSRichard Henderson  * memory_region_transaction_begin: Start a transaction.
26038be545baSRichard Henderson  *
26048be545baSRichard Henderson  * During a transaction, changes will be accumulated and made visible
26058be545baSRichard Henderson  * only when the transaction ends (is committed).
26068be545baSRichard Henderson  */
26078be545baSRichard Henderson void memory_region_transaction_begin(void);
26088be545baSRichard Henderson 
26098be545baSRichard Henderson /**
26108be545baSRichard Henderson  * memory_region_transaction_commit: Commit a transaction and make changes
26118be545baSRichard Henderson  *                                   visible to the guest.
26128be545baSRichard Henderson  */
26138be545baSRichard Henderson void memory_region_transaction_commit(void);
26148be545baSRichard Henderson 
26158be545baSRichard Henderson /**
26168be545baSRichard Henderson  * memory_listener_register: register callbacks to be called when memory
26178be545baSRichard Henderson  *                           sections are mapped or unmapped into an address
26188be545baSRichard Henderson  *                           space
26198be545baSRichard Henderson  *
26208be545baSRichard Henderson  * @listener: an object containing the callbacks to be called
26218be545baSRichard Henderson  * @filter: if non-%NULL, only regions in this address space will be observed
26228be545baSRichard Henderson  */
26238be545baSRichard Henderson void memory_listener_register(MemoryListener *listener, AddressSpace *filter);
26248be545baSRichard Henderson 
26258be545baSRichard Henderson /**
26268be545baSRichard Henderson  * memory_listener_unregister: undo the effect of memory_listener_register()
26278be545baSRichard Henderson  *
26288be545baSRichard Henderson  * @listener: an object containing the callbacks to be removed
26298be545baSRichard Henderson  */
26308be545baSRichard Henderson void memory_listener_unregister(MemoryListener *listener);
26318be545baSRichard Henderson 
26328be545baSRichard Henderson /**
26338be545baSRichard Henderson  * memory_global_dirty_log_start: begin dirty logging for all regions
26348be545baSRichard Henderson  *
26358be545baSRichard Henderson  * @flags: purpose of starting dirty log, migration or dirty rate
26368be545baSRichard Henderson  * @errp: pointer to Error*, to store an error if it happens.
26378be545baSRichard Henderson  *
26388be545baSRichard Henderson  * Return: true on success, else false setting @errp with error.
26398be545baSRichard Henderson  */
26408be545baSRichard Henderson bool memory_global_dirty_log_start(unsigned int flags, Error **errp);
26418be545baSRichard Henderson 
26428be545baSRichard Henderson /**
26438be545baSRichard Henderson  * memory_global_dirty_log_stop: end dirty logging for all regions
26448be545baSRichard Henderson  *
26458be545baSRichard Henderson  * @flags: purpose of stopping dirty log, migration or dirty rate
26468be545baSRichard Henderson  */
26478be545baSRichard Henderson void memory_global_dirty_log_stop(unsigned int flags);
26488be545baSRichard Henderson 
26498be545baSRichard Henderson void mtree_info(bool flatview, bool dispatch_tree, bool owner, bool disabled);
26508be545baSRichard Henderson 
26518be545baSRichard Henderson bool memory_region_access_valid(MemoryRegion *mr, hwaddr addr,
26528be545baSRichard Henderson                                 unsigned size, bool is_write,
26538be545baSRichard Henderson                                 MemTxAttrs attrs);
26548be545baSRichard Henderson 
26558be545baSRichard Henderson /**
26568be545baSRichard Henderson  * memory_region_dispatch_read: perform a read directly to the specified
26578be545baSRichard Henderson  * MemoryRegion.
26588be545baSRichard Henderson  *
26598be545baSRichard Henderson  * @mr: #MemoryRegion to access
26608be545baSRichard Henderson  * @addr: address within that region
26618be545baSRichard Henderson  * @pval: pointer to uint64_t which the data is written to
26628be545baSRichard Henderson  * @op: size, sign, and endianness of the memory operation
26638be545baSRichard Henderson  * @attrs: memory transaction attributes to use for the access
26648be545baSRichard Henderson  */
26658be545baSRichard Henderson MemTxResult memory_region_dispatch_read(MemoryRegion *mr,
26668be545baSRichard Henderson                                         hwaddr addr,
26678be545baSRichard Henderson                                         uint64_t *pval,
26688be545baSRichard Henderson                                         MemOp op,
26698be545baSRichard Henderson                                         MemTxAttrs attrs);
26708be545baSRichard Henderson /**
26718be545baSRichard Henderson  * memory_region_dispatch_write: perform a write directly to the specified
26728be545baSRichard Henderson  * MemoryRegion.
26738be545baSRichard Henderson  *
26748be545baSRichard Henderson  * @mr: #MemoryRegion to access
26758be545baSRichard Henderson  * @addr: address within that region
26768be545baSRichard Henderson  * @data: data to write
26778be545baSRichard Henderson  * @op: size, sign, and endianness of the memory operation
26788be545baSRichard Henderson  * @attrs: memory transaction attributes to use for the access
26798be545baSRichard Henderson  */
26808be545baSRichard Henderson MemTxResult memory_region_dispatch_write(MemoryRegion *mr,
26818be545baSRichard Henderson                                          hwaddr addr,
26828be545baSRichard Henderson                                          uint64_t data,
26838be545baSRichard Henderson                                          MemOp op,
26848be545baSRichard Henderson                                          MemTxAttrs attrs);
26858be545baSRichard Henderson 
26868be545baSRichard Henderson /**
26878be545baSRichard Henderson  * address_space_init: initializes an address space
26888be545baSRichard Henderson  *
26898be545baSRichard Henderson  * @as: an uninitialized #AddressSpace
26908be545baSRichard Henderson  * @root: a #MemoryRegion that routes addresses for the address space
26918be545baSRichard Henderson  * @name: an address space name.  The name is only used for debugging
26928be545baSRichard Henderson  *        output.
26938be545baSRichard Henderson  */
26948be545baSRichard Henderson void address_space_init(AddressSpace *as, MemoryRegion *root, const char *name);
26958be545baSRichard Henderson 
26968be545baSRichard Henderson /**
26978be545baSRichard Henderson  * address_space_destroy: destroy an address space
26988be545baSRichard Henderson  *
26998be545baSRichard Henderson  * Releases all resources associated with an address space.  After an address space
27008be545baSRichard Henderson  * is destroyed, its root memory region (given by address_space_init()) may be destroyed
27018be545baSRichard Henderson  * as well.
27028be545baSRichard Henderson  *
27038be545baSRichard Henderson  * @as: address space to be destroyed
27048be545baSRichard Henderson  */
27058be545baSRichard Henderson void address_space_destroy(AddressSpace *as);
27068be545baSRichard Henderson 
27078be545baSRichard Henderson /**
27088be545baSRichard Henderson  * address_space_remove_listeners: unregister all listeners of an address space
27098be545baSRichard Henderson  *
27108be545baSRichard Henderson  * Removes all callbacks previously registered with memory_listener_register()
27118be545baSRichard Henderson  * for @as.
27128be545baSRichard Henderson  *
27138be545baSRichard Henderson  * @as: an initialized #AddressSpace
27148be545baSRichard Henderson  */
27158be545baSRichard Henderson void address_space_remove_listeners(AddressSpace *as);
27168be545baSRichard Henderson 
27178be545baSRichard Henderson /**
27188be545baSRichard Henderson  * address_space_rw: read from or write to an address space.
27198be545baSRichard Henderson  *
27208be545baSRichard Henderson  * Return a MemTxResult indicating whether the operation succeeded
27218be545baSRichard Henderson  * or failed (eg unassigned memory, device rejected the transaction,
27228be545baSRichard Henderson  * IOMMU fault).
27238be545baSRichard Henderson  *
27248be545baSRichard Henderson  * @as: #AddressSpace to be accessed
27258be545baSRichard Henderson  * @addr: address within that address space
27268be545baSRichard Henderson  * @attrs: memory transaction attributes
27278be545baSRichard Henderson  * @buf: buffer with the data transferred
27288be545baSRichard Henderson  * @len: the number of bytes to read or write
27298be545baSRichard Henderson  * @is_write: indicates the transfer direction
27308be545baSRichard Henderson  */
27318be545baSRichard Henderson MemTxResult address_space_rw(AddressSpace *as, hwaddr addr,
27328be545baSRichard Henderson                              MemTxAttrs attrs, void *buf,
27338be545baSRichard Henderson                              hwaddr len, bool is_write);
27348be545baSRichard Henderson 
27358be545baSRichard Henderson /**
27368be545baSRichard Henderson  * address_space_write: write to address space.
27378be545baSRichard Henderson  *
27388be545baSRichard Henderson  * Return a MemTxResult indicating whether the operation succeeded
27398be545baSRichard Henderson  * or failed (eg unassigned memory, device rejected the transaction,
27408be545baSRichard Henderson  * IOMMU fault).
27418be545baSRichard Henderson  *
27428be545baSRichard Henderson  * @as: #AddressSpace to be accessed
27438be545baSRichard Henderson  * @addr: address within that address space
27448be545baSRichard Henderson  * @attrs: memory transaction attributes
27458be545baSRichard Henderson  * @buf: buffer with the data transferred
27468be545baSRichard Henderson  * @len: the number of bytes to write
27478be545baSRichard Henderson  */
27488be545baSRichard Henderson MemTxResult address_space_write(AddressSpace *as, hwaddr addr,
27498be545baSRichard Henderson                                 MemTxAttrs attrs,
27508be545baSRichard Henderson                                 const void *buf, hwaddr len);
27518be545baSRichard Henderson 
27528be545baSRichard Henderson /**
27538be545baSRichard Henderson  * address_space_write_rom: write to address space, including ROM.
27548be545baSRichard Henderson  *
27558be545baSRichard Henderson  * This function writes to the specified address space, but will
27568be545baSRichard Henderson  * write data to both ROM and RAM. This is used for non-guest
27578be545baSRichard Henderson  * writes like writes from the gdb debug stub or initial loading
27588be545baSRichard Henderson  * of ROM contents.
27598be545baSRichard Henderson  *
27608be545baSRichard Henderson  * Note that portions of the write which attempt to write data to
27618be545baSRichard Henderson  * a device will be silently ignored -- only real RAM and ROM will
27628be545baSRichard Henderson  * be written to.
27638be545baSRichard Henderson  *
27648be545baSRichard Henderson  * Return a MemTxResult indicating whether the operation succeeded
27658be545baSRichard Henderson  * or failed (eg unassigned memory, device rejected the transaction,
27668be545baSRichard Henderson  * IOMMU fault).
27678be545baSRichard Henderson  *
27688be545baSRichard Henderson  * @as: #AddressSpace to be accessed
27698be545baSRichard Henderson  * @addr: address within that address space
27708be545baSRichard Henderson  * @attrs: memory transaction attributes
27718be545baSRichard Henderson  * @buf: buffer with the data transferred
27728be545baSRichard Henderson  * @len: the number of bytes to write
27738be545baSRichard Henderson  */
27748be545baSRichard Henderson MemTxResult address_space_write_rom(AddressSpace *as, hwaddr addr,
27758be545baSRichard Henderson                                     MemTxAttrs attrs,
27768be545baSRichard Henderson                                     const void *buf, hwaddr len);
27778be545baSRichard Henderson 
27788be545baSRichard Henderson /* address_space_ld*: load from an address space
27798be545baSRichard Henderson  * address_space_st*: store to an address space
27808be545baSRichard Henderson  *
27818be545baSRichard Henderson  * These functions perform a load or store of the byte, word,
27828be545baSRichard Henderson  * longword or quad to the specified address within the AddressSpace.
27838be545baSRichard Henderson  * The _le suffixed functions treat the data as little endian;
27848be545baSRichard Henderson  * _be indicates big endian; no suffix indicates "same endianness
27858be545baSRichard Henderson  * as guest CPU".
27868be545baSRichard Henderson  *
27878be545baSRichard Henderson  * The "guest CPU endianness" accessors are deprecated for use outside
27888be545baSRichard Henderson  * target-* code; devices should be CPU-agnostic and use either the LE
27898be545baSRichard Henderson  * or the BE accessors.
27908be545baSRichard Henderson  *
27918be545baSRichard Henderson  * @as #AddressSpace to be accessed
27928be545baSRichard Henderson  * @addr: address within that address space
27938be545baSRichard Henderson  * @val: data value, for stores
27948be545baSRichard Henderson  * @attrs: memory transaction attributes
27958be545baSRichard Henderson  * @result: location to write the success/failure of the transaction;
27968be545baSRichard Henderson  *   if NULL, this information is discarded
27978be545baSRichard Henderson  */
27988be545baSRichard Henderson 
27998be545baSRichard Henderson #define SUFFIX
28008be545baSRichard Henderson #define ARG1         as
28018be545baSRichard Henderson #define ARG1_DECL    AddressSpace *as
28028be545baSRichard Henderson #include "exec/memory_ldst.h.inc"
28038be545baSRichard Henderson 
28048be545baSRichard Henderson static inline void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val)
28058be545baSRichard Henderson {
28068be545baSRichard Henderson     address_space_stl_notdirty(as, addr, val,
28078be545baSRichard Henderson                                MEMTXATTRS_UNSPECIFIED, NULL);
28088be545baSRichard Henderson }
28098be545baSRichard Henderson 
28108be545baSRichard Henderson #define SUFFIX
28118be545baSRichard Henderson #define ARG1         as
28128be545baSRichard Henderson #define ARG1_DECL    AddressSpace *as
28138be545baSRichard Henderson #include "exec/memory_ldst_phys.h.inc"
28148be545baSRichard Henderson 
28158be545baSRichard Henderson struct MemoryRegionCache {
28168be545baSRichard Henderson     uint8_t *ptr;
28178be545baSRichard Henderson     hwaddr xlat;
28188be545baSRichard Henderson     hwaddr len;
28198be545baSRichard Henderson     FlatView *fv;
28208be545baSRichard Henderson     MemoryRegionSection mrs;
28218be545baSRichard Henderson     bool is_write;
28228be545baSRichard Henderson };
28238be545baSRichard Henderson 
28248be545baSRichard Henderson /* address_space_ld*_cached: load from a cached #MemoryRegion
28258be545baSRichard Henderson  * address_space_st*_cached: store into a cached #MemoryRegion
28268be545baSRichard Henderson  *
28278be545baSRichard Henderson  * These functions perform a load or store of the byte, word,
28288be545baSRichard Henderson  * longword or quad to the specified address.  The address is
28298be545baSRichard Henderson  * a physical address in the AddressSpace, but it must lie within
28308be545baSRichard Henderson  * a #MemoryRegion that was mapped with address_space_cache_init.
28318be545baSRichard Henderson  *
28328be545baSRichard Henderson  * The _le suffixed functions treat the data as little endian;
28338be545baSRichard Henderson  * _be indicates big endian; no suffix indicates "same endianness
28348be545baSRichard Henderson  * as guest CPU".
28358be545baSRichard Henderson  *
28368be545baSRichard Henderson  * The "guest CPU endianness" accessors are deprecated for use outside
28378be545baSRichard Henderson  * target-* code; devices should be CPU-agnostic and use either the LE
28388be545baSRichard Henderson  * or the BE accessors.
28398be545baSRichard Henderson  *
28408be545baSRichard Henderson  * @cache: previously initialized #MemoryRegionCache to be accessed
28418be545baSRichard Henderson  * @addr: address within the address space
28428be545baSRichard Henderson  * @val: data value, for stores
28438be545baSRichard Henderson  * @attrs: memory transaction attributes
28448be545baSRichard Henderson  * @result: location to write the success/failure of the transaction;
28458be545baSRichard Henderson  *   if NULL, this information is discarded
28468be545baSRichard Henderson  */
28478be545baSRichard Henderson 
28488be545baSRichard Henderson #define SUFFIX       _cached_slow
28498be545baSRichard Henderson #define ARG1         cache
28508be545baSRichard Henderson #define ARG1_DECL    MemoryRegionCache *cache
28518be545baSRichard Henderson #include "exec/memory_ldst.h.inc"
28528be545baSRichard Henderson 
28538be545baSRichard Henderson /* Inline fast path for direct RAM access.  */
28548be545baSRichard Henderson static inline uint8_t address_space_ldub_cached(MemoryRegionCache *cache,
28558be545baSRichard Henderson     hwaddr addr, MemTxAttrs attrs, MemTxResult *result)
28568be545baSRichard Henderson {
28578be545baSRichard Henderson     assert(addr < cache->len);
28588be545baSRichard Henderson     if (likely(cache->ptr)) {
28598be545baSRichard Henderson         return ldub_p(cache->ptr + addr);
28608be545baSRichard Henderson     } else {
28618be545baSRichard Henderson         return address_space_ldub_cached_slow(cache, addr, attrs, result);
28628be545baSRichard Henderson     }
28638be545baSRichard Henderson }
28648be545baSRichard Henderson 
28658be545baSRichard Henderson static inline void address_space_stb_cached(MemoryRegionCache *cache,
28668be545baSRichard Henderson     hwaddr addr, uint8_t val, MemTxAttrs attrs, MemTxResult *result)
28678be545baSRichard Henderson {
28688be545baSRichard Henderson     assert(addr < cache->len);
28698be545baSRichard Henderson     if (likely(cache->ptr)) {
28708be545baSRichard Henderson         stb_p(cache->ptr + addr, val);
28718be545baSRichard Henderson     } else {
28728be545baSRichard Henderson         address_space_stb_cached_slow(cache, addr, val, attrs, result);
28738be545baSRichard Henderson     }
28748be545baSRichard Henderson }
28758be545baSRichard Henderson 
28768be545baSRichard Henderson #define ENDIANNESS
28778be545baSRichard Henderson #include "exec/memory_ldst_cached.h.inc"
28788be545baSRichard Henderson 
28798be545baSRichard Henderson #define ENDIANNESS   _le
28808be545baSRichard Henderson #include "exec/memory_ldst_cached.h.inc"
28818be545baSRichard Henderson 
28828be545baSRichard Henderson #define ENDIANNESS   _be
28838be545baSRichard Henderson #include "exec/memory_ldst_cached.h.inc"
28848be545baSRichard Henderson 
28858be545baSRichard Henderson #define SUFFIX       _cached
28868be545baSRichard Henderson #define ARG1         cache
28878be545baSRichard Henderson #define ARG1_DECL    MemoryRegionCache *cache
28888be545baSRichard Henderson #include "exec/memory_ldst_phys.h.inc"
28898be545baSRichard Henderson 
28908be545baSRichard Henderson /* address_space_cache_init: prepare for repeated access to a physical
28918be545baSRichard Henderson  * memory region
28928be545baSRichard Henderson  *
28938be545baSRichard Henderson  * @cache: #MemoryRegionCache to be filled
28948be545baSRichard Henderson  * @as: #AddressSpace to be accessed
28958be545baSRichard Henderson  * @addr: address within that address space
28968be545baSRichard Henderson  * @len: length of buffer
28978be545baSRichard Henderson  * @is_write: indicates the transfer direction
28988be545baSRichard Henderson  *
28998be545baSRichard Henderson  * Will only work with RAM, and may map a subset of the requested range by
29008be545baSRichard Henderson  * returning a value that is less than @len.  On failure, return a negative
29018be545baSRichard Henderson  * errno value.
29028be545baSRichard Henderson  *
29038be545baSRichard Henderson  * Because it only works with RAM, this function can be used for
29048be545baSRichard Henderson  * read-modify-write operations.  In this case, is_write should be %true.
29058be545baSRichard Henderson  *
29068be545baSRichard Henderson  * Note that addresses passed to the address_space_*_cached functions
29078be545baSRichard Henderson  * are relative to @addr.
29088be545baSRichard Henderson  */
29098be545baSRichard Henderson int64_t address_space_cache_init(MemoryRegionCache *cache,
29108be545baSRichard Henderson                                  AddressSpace *as,
29118be545baSRichard Henderson                                  hwaddr addr,
29128be545baSRichard Henderson                                  hwaddr len,
29138be545baSRichard Henderson                                  bool is_write);
29148be545baSRichard Henderson 
29158be545baSRichard Henderson /**
29168be545baSRichard Henderson  * address_space_cache_init_empty: Initialize empty #MemoryRegionCache
29178be545baSRichard Henderson  *
29188be545baSRichard Henderson  * @cache: The #MemoryRegionCache to operate on.
29198be545baSRichard Henderson  *
29208be545baSRichard Henderson  * Initializes #MemoryRegionCache structure without memory region attached.
29218be545baSRichard Henderson  * Cache initialized this way can only be safely destroyed, but not used.
29228be545baSRichard Henderson  */
29238be545baSRichard Henderson static inline void address_space_cache_init_empty(MemoryRegionCache *cache)
29248be545baSRichard Henderson {
29258be545baSRichard Henderson     cache->mrs.mr = NULL;
29268be545baSRichard Henderson     /* There is no real need to initialize fv, but it makes Coverity happy. */
29278be545baSRichard Henderson     cache->fv = NULL;
29288be545baSRichard Henderson }
29298be545baSRichard Henderson 
29308be545baSRichard Henderson /**
29318be545baSRichard Henderson  * address_space_cache_invalidate: complete a write to a #MemoryRegionCache
29328be545baSRichard Henderson  *
29338be545baSRichard Henderson  * @cache: The #MemoryRegionCache to operate on.
29348be545baSRichard Henderson  * @addr: The first physical address that was written, relative to the
29358be545baSRichard Henderson  * address that was passed to @address_space_cache_init.
29368be545baSRichard Henderson  * @access_len: The number of bytes that were written starting at @addr.
29378be545baSRichard Henderson  */
29388be545baSRichard Henderson void address_space_cache_invalidate(MemoryRegionCache *cache,
29398be545baSRichard Henderson                                     hwaddr addr,
29408be545baSRichard Henderson                                     hwaddr access_len);
29418be545baSRichard Henderson 
29428be545baSRichard Henderson /**
29438be545baSRichard Henderson  * address_space_cache_destroy: free a #MemoryRegionCache
29448be545baSRichard Henderson  *
29458be545baSRichard Henderson  * @cache: The #MemoryRegionCache whose memory should be released.
29468be545baSRichard Henderson  */
29478be545baSRichard Henderson void address_space_cache_destroy(MemoryRegionCache *cache);
29488be545baSRichard Henderson 
29498be545baSRichard Henderson /* address_space_get_iotlb_entry: translate an address into an IOTLB
29508be545baSRichard Henderson  * entry. Should be called from an RCU critical section.
29518be545baSRichard Henderson  */
29528be545baSRichard Henderson IOMMUTLBEntry address_space_get_iotlb_entry(AddressSpace *as, hwaddr addr,
29538be545baSRichard Henderson                                             bool is_write, MemTxAttrs attrs);
29548be545baSRichard Henderson 
29558be545baSRichard Henderson /* address_space_translate: translate an address range into an address space
29568be545baSRichard Henderson  * into a MemoryRegion and an address range into that section.  Should be
29578be545baSRichard Henderson  * called from an RCU critical section, to avoid that the last reference
29588be545baSRichard Henderson  * to the returned region disappears after address_space_translate returns.
29598be545baSRichard Henderson  *
29608be545baSRichard Henderson  * @fv: #FlatView to be accessed
29618be545baSRichard Henderson  * @addr: address within that address space
29628be545baSRichard Henderson  * @xlat: pointer to address within the returned memory region section's
29638be545baSRichard Henderson  * #MemoryRegion.
29648be545baSRichard Henderson  * @len: pointer to length
29658be545baSRichard Henderson  * @is_write: indicates the transfer direction
29668be545baSRichard Henderson  * @attrs: memory attributes
29678be545baSRichard Henderson  */
29688be545baSRichard Henderson MemoryRegion *flatview_translate(FlatView *fv,
29698be545baSRichard Henderson                                  hwaddr addr, hwaddr *xlat,
29708be545baSRichard Henderson                                  hwaddr *len, bool is_write,
29718be545baSRichard Henderson                                  MemTxAttrs attrs);
29728be545baSRichard Henderson 
29738be545baSRichard Henderson static inline MemoryRegion *address_space_translate(AddressSpace *as,
29748be545baSRichard Henderson                                                     hwaddr addr, hwaddr *xlat,
29758be545baSRichard Henderson                                                     hwaddr *len, bool is_write,
29768be545baSRichard Henderson                                                     MemTxAttrs attrs)
29778be545baSRichard Henderson {
29788be545baSRichard Henderson     return flatview_translate(address_space_to_flatview(as),
29798be545baSRichard Henderson                               addr, xlat, len, is_write, attrs);
29808be545baSRichard Henderson }
29818be545baSRichard Henderson 
29828be545baSRichard Henderson /* address_space_access_valid: check for validity of accessing an address
29838be545baSRichard Henderson  * space range
29848be545baSRichard Henderson  *
29858be545baSRichard Henderson  * Check whether memory is assigned to the given address space range, and
29868be545baSRichard Henderson  * access is permitted by any IOMMU regions that are active for the address
29878be545baSRichard Henderson  * space.
29888be545baSRichard Henderson  *
29898be545baSRichard Henderson  * For now, addr and len should be aligned to a page size.  This limitation
29908be545baSRichard Henderson  * will be lifted in the future.
29918be545baSRichard Henderson  *
29928be545baSRichard Henderson  * @as: #AddressSpace to be accessed
29938be545baSRichard Henderson  * @addr: address within that address space
29948be545baSRichard Henderson  * @len: length of the area to be checked
29958be545baSRichard Henderson  * @is_write: indicates the transfer direction
29968be545baSRichard Henderson  * @attrs: memory attributes
29978be545baSRichard Henderson  */
29988be545baSRichard Henderson bool address_space_access_valid(AddressSpace *as, hwaddr addr, hwaddr len,
29998be545baSRichard Henderson                                 bool is_write, MemTxAttrs attrs);
30008be545baSRichard Henderson 
30018be545baSRichard Henderson /* address_space_map: map a physical memory region into a host virtual address
30028be545baSRichard Henderson  *
30038be545baSRichard Henderson  * May map a subset of the requested range, given by and returned in @plen.
30048be545baSRichard Henderson  * May return %NULL and set *@plen to zero(0), if resources needed to perform
30058be545baSRichard Henderson  * the mapping are exhausted.
30068be545baSRichard Henderson  * Use only for reads OR writes - not for read-modify-write operations.
30078be545baSRichard Henderson  * Use address_space_register_map_client() to know when retrying the map
30088be545baSRichard Henderson  * operation is likely to succeed.
30098be545baSRichard Henderson  *
30108be545baSRichard Henderson  * @as: #AddressSpace to be accessed
30118be545baSRichard Henderson  * @addr: address within that address space
30128be545baSRichard Henderson  * @plen: pointer to length of buffer; updated on return
30138be545baSRichard Henderson  * @is_write: indicates the transfer direction
30148be545baSRichard Henderson  * @attrs: memory attributes
30158be545baSRichard Henderson  */
30168be545baSRichard Henderson void *address_space_map(AddressSpace *as, hwaddr addr,
30178be545baSRichard Henderson                         hwaddr *plen, bool is_write, MemTxAttrs attrs);
30188be545baSRichard Henderson 
30198be545baSRichard Henderson /* address_space_unmap: Unmaps a memory region previously mapped by address_space_map()
30208be545baSRichard Henderson  *
30218be545baSRichard Henderson  * Will also mark the memory as dirty if @is_write == %true.  @access_len gives
30228be545baSRichard Henderson  * the amount of memory that was actually read or written by the caller.
30238be545baSRichard Henderson  *
30248be545baSRichard Henderson  * @as: #AddressSpace used
30258be545baSRichard Henderson  * @buffer: host pointer as returned by address_space_map()
30268be545baSRichard Henderson  * @len: buffer length as returned by address_space_map()
30278be545baSRichard Henderson  * @access_len: amount of data actually transferred
30288be545baSRichard Henderson  * @is_write: indicates the transfer direction
30298be545baSRichard Henderson  */
30308be545baSRichard Henderson void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
30318be545baSRichard Henderson                          bool is_write, hwaddr access_len);
30328be545baSRichard Henderson 
30338be545baSRichard Henderson /*
30348be545baSRichard Henderson  * address_space_register_map_client: Register a callback to invoke when
30358be545baSRichard Henderson  * resources for address_space_map() are available again.
30368be545baSRichard Henderson  *
30378be545baSRichard Henderson  * address_space_map may fail when there are not enough resources available,
30388be545baSRichard Henderson  * such as when bounce buffer memory would exceed the limit. The callback can
30398be545baSRichard Henderson  * be used to retry the address_space_map operation. Note that the callback
30408be545baSRichard Henderson  * gets automatically removed after firing.
30418be545baSRichard Henderson  *
30428be545baSRichard Henderson  * @as: #AddressSpace to be accessed
30438be545baSRichard Henderson  * @bh: callback to invoke when address_space_map() retry is appropriate
30448be545baSRichard Henderson  */
30458be545baSRichard Henderson void address_space_register_map_client(AddressSpace *as, QEMUBH *bh);
30468be545baSRichard Henderson 
30478be545baSRichard Henderson /*
30488be545baSRichard Henderson  * address_space_unregister_map_client: Unregister a callback that has
30498be545baSRichard Henderson  * previously been registered and not fired yet.
30508be545baSRichard Henderson  *
30518be545baSRichard Henderson  * @as: #AddressSpace to be accessed
30528be545baSRichard Henderson  * @bh: callback to unregister
30538be545baSRichard Henderson  */
30548be545baSRichard Henderson void address_space_unregister_map_client(AddressSpace *as, QEMUBH *bh);
30558be545baSRichard Henderson 
30568be545baSRichard Henderson /* Internal functions, part of the implementation of address_space_read.  */
30578be545baSRichard Henderson MemTxResult address_space_read_full(AddressSpace *as, hwaddr addr,
30588be545baSRichard Henderson                                     MemTxAttrs attrs, void *buf, hwaddr len);
30598be545baSRichard Henderson MemTxResult flatview_read_continue(FlatView *fv, hwaddr addr,
30608be545baSRichard Henderson                                    MemTxAttrs attrs, void *buf,
30618be545baSRichard Henderson                                    hwaddr len, hwaddr addr1, hwaddr l,
30628be545baSRichard Henderson                                    MemoryRegion *mr);
30638be545baSRichard Henderson void *qemu_map_ram_ptr(RAMBlock *ram_block, ram_addr_t addr);
30648be545baSRichard Henderson 
30658be545baSRichard Henderson /* Internal functions, part of the implementation of address_space_read_cached
30668be545baSRichard Henderson  * and address_space_write_cached.  */
30678be545baSRichard Henderson MemTxResult address_space_read_cached_slow(MemoryRegionCache *cache,
30688be545baSRichard Henderson                                            hwaddr addr, void *buf, hwaddr len);
30698be545baSRichard Henderson MemTxResult address_space_write_cached_slow(MemoryRegionCache *cache,
30708be545baSRichard Henderson                                             hwaddr addr, const void *buf,
30718be545baSRichard Henderson                                             hwaddr len);
30728be545baSRichard Henderson 
30738be545baSRichard Henderson int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr);
30748be545baSRichard Henderson bool prepare_mmio_access(MemoryRegion *mr);
30758be545baSRichard Henderson 
30768be545baSRichard Henderson static inline bool memory_region_supports_direct_access(MemoryRegion *mr)
30778be545baSRichard Henderson {
30788be545baSRichard Henderson     /* ROM DEVICE regions only allow direct access if in ROMD mode. */
30798be545baSRichard Henderson     if (memory_region_is_romd(mr)) {
30808be545baSRichard Henderson         return true;
30818be545baSRichard Henderson     }
30828be545baSRichard Henderson     if (!memory_region_is_ram(mr)) {
30838be545baSRichard Henderson         return false;
30848be545baSRichard Henderson     }
30858be545baSRichard Henderson     /*
30868be545baSRichard Henderson      * RAM DEVICE regions can be accessed directly using memcpy, but it might
30878be545baSRichard Henderson      * be MMIO and access using mempy can be wrong (e.g., using instructions not
30888be545baSRichard Henderson      * intended for MMIO access). So we treat this as IO.
30898be545baSRichard Henderson      */
30908be545baSRichard Henderson     return !memory_region_is_ram_device(mr);
30918be545baSRichard Henderson }
30928be545baSRichard Henderson 
30938be545baSRichard Henderson static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write,
30948be545baSRichard Henderson                                            MemTxAttrs attrs)
30958be545baSRichard Henderson {
30968be545baSRichard Henderson     if (!memory_region_supports_direct_access(mr)) {
30978be545baSRichard Henderson         return false;
30988be545baSRichard Henderson     }
30998be545baSRichard Henderson     /* Debug access can write to ROM. */
31008be545baSRichard Henderson     if (is_write && !attrs.debug) {
31018be545baSRichard Henderson         return !mr->readonly && !mr->rom_device;
31028be545baSRichard Henderson     }
31038be545baSRichard Henderson     return true;
31048be545baSRichard Henderson }
31058be545baSRichard Henderson 
31068be545baSRichard Henderson /**
31078be545baSRichard Henderson  * address_space_read: read from an address space.
31088be545baSRichard Henderson  *
31098be545baSRichard Henderson  * Return a MemTxResult indicating whether the operation succeeded
31108be545baSRichard Henderson  * or failed (eg unassigned memory, device rejected the transaction,
31118be545baSRichard Henderson  * IOMMU fault).  Called within RCU critical section.
31128be545baSRichard Henderson  *
31138be545baSRichard Henderson  * @as: #AddressSpace to be accessed
31148be545baSRichard Henderson  * @addr: address within that address space
31158be545baSRichard Henderson  * @attrs: memory transaction attributes
31168be545baSRichard Henderson  * @buf: buffer with the data transferred
31178be545baSRichard Henderson  * @len: length of the data transferred
31188be545baSRichard Henderson  */
31198be545baSRichard Henderson static inline __attribute__((__always_inline__))
31208be545baSRichard Henderson MemTxResult address_space_read(AddressSpace *as, hwaddr addr,
31218be545baSRichard Henderson                                MemTxAttrs attrs, void *buf,
31228be545baSRichard Henderson                                hwaddr len)
31238be545baSRichard Henderson {
31248be545baSRichard Henderson     MemTxResult result = MEMTX_OK;
31258be545baSRichard Henderson     hwaddr l, addr1;
31268be545baSRichard Henderson     void *ptr;
31278be545baSRichard Henderson     MemoryRegion *mr;
31288be545baSRichard Henderson     FlatView *fv;
31298be545baSRichard Henderson 
31308be545baSRichard Henderson     if (__builtin_constant_p(len)) {
31318be545baSRichard Henderson         if (len) {
31328be545baSRichard Henderson             RCU_READ_LOCK_GUARD();
31338be545baSRichard Henderson             fv = address_space_to_flatview(as);
31348be545baSRichard Henderson             l = len;
31358be545baSRichard Henderson             mr = flatview_translate(fv, addr, &addr1, &l, false, attrs);
31368be545baSRichard Henderson             if (len == l && memory_access_is_direct(mr, false, attrs)) {
31378be545baSRichard Henderson                 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
31388be545baSRichard Henderson                 memcpy(buf, ptr, len);
31398be545baSRichard Henderson             } else {
31408be545baSRichard Henderson                 result = flatview_read_continue(fv, addr, attrs, buf, len,
31418be545baSRichard Henderson                                                 addr1, l, mr);
31428be545baSRichard Henderson             }
31438be545baSRichard Henderson         }
31448be545baSRichard Henderson     } else {
31458be545baSRichard Henderson         result = address_space_read_full(as, addr, attrs, buf, len);
31468be545baSRichard Henderson     }
31478be545baSRichard Henderson     return result;
31488be545baSRichard Henderson }
31498be545baSRichard Henderson 
31508be545baSRichard Henderson /**
31518be545baSRichard Henderson  * address_space_read_cached: read from a cached RAM region
31528be545baSRichard Henderson  *
31538be545baSRichard Henderson  * @cache: Cached region to be addressed
31548be545baSRichard Henderson  * @addr: address relative to the base of the RAM region
31558be545baSRichard Henderson  * @buf: buffer with the data transferred
31568be545baSRichard Henderson  * @len: length of the data transferred
31578be545baSRichard Henderson  */
31588be545baSRichard Henderson static inline MemTxResult
31598be545baSRichard Henderson address_space_read_cached(MemoryRegionCache *cache, hwaddr addr,
31608be545baSRichard Henderson                           void *buf, hwaddr len)
31618be545baSRichard Henderson {
31628be545baSRichard Henderson     assert(addr < cache->len && len <= cache->len - addr);
31638be545baSRichard Henderson     fuzz_dma_read_cb(cache->xlat + addr, len, cache->mrs.mr);
31648be545baSRichard Henderson     if (likely(cache->ptr)) {
31658be545baSRichard Henderson         memcpy(buf, cache->ptr + addr, len);
31668be545baSRichard Henderson         return MEMTX_OK;
31678be545baSRichard Henderson     } else {
31688be545baSRichard Henderson         return address_space_read_cached_slow(cache, addr, buf, len);
31698be545baSRichard Henderson     }
31708be545baSRichard Henderson }
31718be545baSRichard Henderson 
31728be545baSRichard Henderson /**
31738be545baSRichard Henderson  * address_space_write_cached: write to a cached RAM region
31748be545baSRichard Henderson  *
31758be545baSRichard Henderson  * @cache: Cached region to be addressed
31768be545baSRichard Henderson  * @addr: address relative to the base of the RAM region
31778be545baSRichard Henderson  * @buf: buffer with the data transferred
31788be545baSRichard Henderson  * @len: length of the data transferred
31798be545baSRichard Henderson  */
31808be545baSRichard Henderson static inline MemTxResult
31818be545baSRichard Henderson address_space_write_cached(MemoryRegionCache *cache, hwaddr addr,
31828be545baSRichard Henderson                            const void *buf, hwaddr len)
31838be545baSRichard Henderson {
31848be545baSRichard Henderson     assert(addr < cache->len && len <= cache->len - addr);
31858be545baSRichard Henderson     if (likely(cache->ptr)) {
31868be545baSRichard Henderson         memcpy(cache->ptr + addr, buf, len);
31878be545baSRichard Henderson         return MEMTX_OK;
31888be545baSRichard Henderson     } else {
31898be545baSRichard Henderson         return address_space_write_cached_slow(cache, addr, buf, len);
31908be545baSRichard Henderson     }
31918be545baSRichard Henderson }
31928be545baSRichard Henderson 
31938be545baSRichard Henderson /**
31948be545baSRichard Henderson  * address_space_set: Fill address space with a constant byte.
31958be545baSRichard Henderson  *
31968be545baSRichard Henderson  * Return a MemTxResult indicating whether the operation succeeded
31978be545baSRichard Henderson  * or failed (eg unassigned memory, device rejected the transaction,
31988be545baSRichard Henderson  * IOMMU fault).
31998be545baSRichard Henderson  *
32008be545baSRichard Henderson  * @as: #AddressSpace to be accessed
32018be545baSRichard Henderson  * @addr: address within that address space
32028be545baSRichard Henderson  * @c: constant byte to fill the memory
32038be545baSRichard Henderson  * @len: the number of bytes to fill with the constant byte
32048be545baSRichard Henderson  * @attrs: memory transaction attributes
32058be545baSRichard Henderson  */
32068be545baSRichard Henderson MemTxResult address_space_set(AddressSpace *as, hwaddr addr,
32078be545baSRichard Henderson                               uint8_t c, hwaddr len, MemTxAttrs attrs);
32088be545baSRichard Henderson 
32098be545baSRichard Henderson /*
32108be545baSRichard Henderson  * Inhibit technologies that require discarding of pages in RAM blocks, e.g.,
32118be545baSRichard Henderson  * to manage the actual amount of memory consumed by the VM (then, the memory
32128be545baSRichard Henderson  * provided by RAM blocks might be bigger than the desired memory consumption).
32138be545baSRichard Henderson  * This *must* be set if:
32148be545baSRichard Henderson  * - Discarding parts of a RAM blocks does not result in the change being
32158be545baSRichard Henderson  *   reflected in the VM and the pages getting freed.
32168be545baSRichard Henderson  * - All memory in RAM blocks is pinned or duplicated, invaldiating any previous
32178be545baSRichard Henderson  *   discards blindly.
32188be545baSRichard Henderson  * - Discarding parts of a RAM blocks will result in integrity issues (e.g.,
32198be545baSRichard Henderson  *   encrypted VMs).
32208be545baSRichard Henderson  * Technologies that only temporarily pin the current working set of a
32218be545baSRichard Henderson  * driver are fine, because we don't expect such pages to be discarded
32228be545baSRichard Henderson  * (esp. based on guest action like balloon inflation).
32238be545baSRichard Henderson  *
32248be545baSRichard Henderson  * This is *not* to be used to protect from concurrent discards (esp.,
32258be545baSRichard Henderson  * postcopy).
32268be545baSRichard Henderson  *
32278be545baSRichard Henderson  * Returns 0 if successful. Returns -EBUSY if a technology that relies on
32288be545baSRichard Henderson  * discards to work reliably is active.
32298be545baSRichard Henderson  */
32308be545baSRichard Henderson int ram_block_discard_disable(bool state);
32318be545baSRichard Henderson 
32328be545baSRichard Henderson /*
32338be545baSRichard Henderson  * See ram_block_discard_disable(): only disable uncoordinated discards,
32348be545baSRichard Henderson  * keeping coordinated discards (via the RamDiscardManager) enabled.
32358be545baSRichard Henderson  */
32368be545baSRichard Henderson int ram_block_uncoordinated_discard_disable(bool state);
32378be545baSRichard Henderson 
32388be545baSRichard Henderson /*
32398be545baSRichard Henderson  * Inhibit technologies that disable discarding of pages in RAM blocks.
32408be545baSRichard Henderson  *
32418be545baSRichard Henderson  * Returns 0 if successful. Returns -EBUSY if discards are already set to
32428be545baSRichard Henderson  * broken.
32438be545baSRichard Henderson  */
32448be545baSRichard Henderson int ram_block_discard_require(bool state);
32458be545baSRichard Henderson 
32468be545baSRichard Henderson /*
32478be545baSRichard Henderson  * See ram_block_discard_require(): only inhibit technologies that disable
32488be545baSRichard Henderson  * uncoordinated discarding of pages in RAM blocks, allowing co-existence with
32498be545baSRichard Henderson  * technologies that only inhibit uncoordinated discards (via the
32508be545baSRichard Henderson  * RamDiscardManager).
32518be545baSRichard Henderson  */
32528be545baSRichard Henderson int ram_block_coordinated_discard_require(bool state);
32538be545baSRichard Henderson 
32548be545baSRichard Henderson /*
32558be545baSRichard Henderson  * Test if any discarding of memory in ram blocks is disabled.
32568be545baSRichard Henderson  */
32578be545baSRichard Henderson bool ram_block_discard_is_disabled(void);
32588be545baSRichard Henderson 
32598be545baSRichard Henderson /*
32608be545baSRichard Henderson  * Test if any discarding of memory in ram blocks is required to work reliably.
32618be545baSRichard Henderson  */
32628be545baSRichard Henderson bool ram_block_discard_is_required(void);
32638be545baSRichard Henderson 
32648be545baSRichard Henderson void ram_block_add_cpr_blocker(RAMBlock *rb, Error **errp);
32658be545baSRichard Henderson void ram_block_del_cpr_blocker(RAMBlock *rb);
32668be545baSRichard Henderson 
32678be545baSRichard Henderson #endif
3268