18be545baSRichard Henderson /*
28be545baSRichard Henderson * Physical memory management API
38be545baSRichard Henderson *
48be545baSRichard Henderson * Copyright 2011 Red Hat, Inc. and/or its affiliates
58be545baSRichard Henderson *
68be545baSRichard Henderson * Authors:
78be545baSRichard Henderson * Avi Kivity <avi@redhat.com>
88be545baSRichard Henderson *
98be545baSRichard Henderson * This work is licensed under the terms of the GNU GPL, version 2. See
108be545baSRichard Henderson * the COPYING file in the top-level directory.
118be545baSRichard Henderson *
128be545baSRichard Henderson */
138be545baSRichard Henderson
148be545baSRichard Henderson #ifndef SYSTEM_MEMORY_H
158be545baSRichard Henderson #define SYSTEM_MEMORY_H
168be545baSRichard Henderson
178be545baSRichard Henderson #include "exec/cpu-common.h"
188be545baSRichard Henderson #include "exec/hwaddr.h"
198be545baSRichard Henderson #include "exec/memattrs.h"
208be545baSRichard Henderson #include "exec/memop.h"
218be545baSRichard Henderson #include "exec/ramlist.h"
228be545baSRichard Henderson #include "qemu/bswap.h"
238be545baSRichard Henderson #include "qemu/queue.h"
248be545baSRichard Henderson #include "qemu/int128.h"
258be545baSRichard Henderson #include "qemu/range.h"
268be545baSRichard Henderson #include "qemu/notify.h"
278be545baSRichard Henderson #include "qom/object.h"
288be545baSRichard Henderson #include "qemu/rcu.h"
298be545baSRichard Henderson
308be545baSRichard Henderson #define RAM_ADDR_INVALID (~(ram_addr_t)0)
318be545baSRichard Henderson
328be545baSRichard Henderson #define MAX_PHYS_ADDR_SPACE_BITS 62
338be545baSRichard Henderson #define MAX_PHYS_ADDR (((hwaddr)1 << MAX_PHYS_ADDR_SPACE_BITS) - 1)
348be545baSRichard Henderson
358be545baSRichard Henderson #define TYPE_MEMORY_REGION "memory-region"
368be545baSRichard Henderson DECLARE_INSTANCE_CHECKER(MemoryRegion, MEMORY_REGION,
378be545baSRichard Henderson TYPE_MEMORY_REGION)
388be545baSRichard Henderson
398be545baSRichard Henderson #define TYPE_IOMMU_MEMORY_REGION "iommu-memory-region"
408be545baSRichard Henderson typedef struct IOMMUMemoryRegionClass IOMMUMemoryRegionClass;
418be545baSRichard Henderson DECLARE_OBJ_CHECKERS(IOMMUMemoryRegion, IOMMUMemoryRegionClass,
428be545baSRichard Henderson IOMMU_MEMORY_REGION, TYPE_IOMMU_MEMORY_REGION)
438be545baSRichard Henderson
448be545baSRichard Henderson #define TYPE_RAM_DISCARD_MANAGER "ram-discard-manager"
458be545baSRichard Henderson typedef struct RamDiscardManagerClass RamDiscardManagerClass;
468be545baSRichard Henderson typedef struct RamDiscardManager RamDiscardManager;
478be545baSRichard Henderson DECLARE_OBJ_CHECKERS(RamDiscardManager, RamDiscardManagerClass,
488be545baSRichard Henderson RAM_DISCARD_MANAGER, TYPE_RAM_DISCARD_MANAGER);
498be545baSRichard Henderson
508be545baSRichard Henderson #ifdef CONFIG_FUZZ
518be545baSRichard Henderson void fuzz_dma_read_cb(size_t addr,
528be545baSRichard Henderson size_t len,
538be545baSRichard Henderson MemoryRegion *mr);
548be545baSRichard Henderson #else
fuzz_dma_read_cb(size_t addr,size_t len,MemoryRegion * mr)558be545baSRichard Henderson static inline void fuzz_dma_read_cb(size_t addr,
568be545baSRichard Henderson size_t len,
578be545baSRichard Henderson MemoryRegion *mr)
588be545baSRichard Henderson {
598be545baSRichard Henderson /* Do Nothing */
608be545baSRichard Henderson }
618be545baSRichard Henderson #endif
628be545baSRichard Henderson
638be545baSRichard Henderson /* Possible bits for global_dirty_log_{start|stop} */
648be545baSRichard Henderson
658be545baSRichard Henderson /* Dirty tracking enabled because migration is running */
668be545baSRichard Henderson #define GLOBAL_DIRTY_MIGRATION (1U << 0)
678be545baSRichard Henderson
688be545baSRichard Henderson /* Dirty tracking enabled because measuring dirty rate */
698be545baSRichard Henderson #define GLOBAL_DIRTY_DIRTY_RATE (1U << 1)
708be545baSRichard Henderson
718be545baSRichard Henderson /* Dirty tracking enabled because dirty limit */
728be545baSRichard Henderson #define GLOBAL_DIRTY_LIMIT (1U << 2)
738be545baSRichard Henderson
748be545baSRichard Henderson #define GLOBAL_DIRTY_MASK (0x7)
758be545baSRichard Henderson
768be545baSRichard Henderson extern unsigned int global_dirty_tracking;
778be545baSRichard Henderson
788be545baSRichard Henderson typedef struct MemoryRegionOps MemoryRegionOps;
798be545baSRichard Henderson
808be545baSRichard Henderson struct ReservedRegion {
818be545baSRichard Henderson Range range;
828be545baSRichard Henderson unsigned type;
838be545baSRichard Henderson };
848be545baSRichard Henderson
858be545baSRichard Henderson /**
868be545baSRichard Henderson * struct MemoryRegionSection: describes a fragment of a #MemoryRegion
878be545baSRichard Henderson *
888be545baSRichard Henderson * @mr: the region, or %NULL if empty
898be545baSRichard Henderson * @fv: the flat view of the address space the region is mapped in
908be545baSRichard Henderson * @offset_within_region: the beginning of the section, relative to @mr's start
918be545baSRichard Henderson * @size: the size of the section; will not exceed @mr's boundaries
928be545baSRichard Henderson * @offset_within_address_space: the address of the first byte of the section
938be545baSRichard Henderson * relative to the region's address space
948be545baSRichard Henderson * @readonly: writes to this section are ignored
958be545baSRichard Henderson * @nonvolatile: this section is non-volatile
968be545baSRichard Henderson * @unmergeable: this section should not get merged with adjacent sections
978be545baSRichard Henderson */
988be545baSRichard Henderson struct MemoryRegionSection {
998be545baSRichard Henderson Int128 size;
1008be545baSRichard Henderson MemoryRegion *mr;
1018be545baSRichard Henderson FlatView *fv;
1028be545baSRichard Henderson hwaddr offset_within_region;
1038be545baSRichard Henderson hwaddr offset_within_address_space;
1048be545baSRichard Henderson bool readonly;
1058be545baSRichard Henderson bool nonvolatile;
1068be545baSRichard Henderson bool unmergeable;
1078be545baSRichard Henderson };
1088be545baSRichard Henderson
1098be545baSRichard Henderson typedef struct IOMMUTLBEntry IOMMUTLBEntry;
1108be545baSRichard Henderson
1114b05c720SCLEMENT MATHIEU--DRIF /*
1124b05c720SCLEMENT MATHIEU--DRIF * See address_space_translate:
1134b05c720SCLEMENT MATHIEU--DRIF * - bit 0 : read
1144b05c720SCLEMENT MATHIEU--DRIF * - bit 1 : write
1154b05c720SCLEMENT MATHIEU--DRIF * - bit 2 : exec
1164b05c720SCLEMENT MATHIEU--DRIF * - bit 3 : priv
1174b05c720SCLEMENT MATHIEU--DRIF * - bit 4 : global
1184b05c720SCLEMENT MATHIEU--DRIF * - bit 5 : untranslated only
1194b05c720SCLEMENT MATHIEU--DRIF */
1208be545baSRichard Henderson typedef enum {
1218be545baSRichard Henderson IOMMU_NONE = 0,
1228be545baSRichard Henderson IOMMU_RO = 1,
1238be545baSRichard Henderson IOMMU_WO = 2,
1248be545baSRichard Henderson IOMMU_RW = 3,
1254b05c720SCLEMENT MATHIEU--DRIF IOMMU_EXEC = 4,
1264b05c720SCLEMENT MATHIEU--DRIF IOMMU_PRIV = 8,
1274b05c720SCLEMENT MATHIEU--DRIF IOMMU_GLOBAL = 16,
1284b05c720SCLEMENT MATHIEU--DRIF IOMMU_UNTRANSLATED_ONLY = 32,
1298be545baSRichard Henderson } IOMMUAccessFlags;
1308be545baSRichard Henderson
1314b05c720SCLEMENT MATHIEU--DRIF #define IOMMU_ACCESS_FLAG(r, w) (((r) ? IOMMU_RO : 0) | \
1324b05c720SCLEMENT MATHIEU--DRIF ((w) ? IOMMU_WO : 0))
1334b05c720SCLEMENT MATHIEU--DRIF #define IOMMU_ACCESS_FLAG_FULL(r, w, x, p, g, uo) \
1344b05c720SCLEMENT MATHIEU--DRIF (IOMMU_ACCESS_FLAG(r, w) | \
1354b05c720SCLEMENT MATHIEU--DRIF ((x) ? IOMMU_EXEC : 0) | \
1364b05c720SCLEMENT MATHIEU--DRIF ((p) ? IOMMU_PRIV : 0) | \
1374b05c720SCLEMENT MATHIEU--DRIF ((g) ? IOMMU_GLOBAL : 0) | \
1384b05c720SCLEMENT MATHIEU--DRIF ((uo) ? IOMMU_UNTRANSLATED_ONLY : 0))
1398be545baSRichard Henderson
1408be545baSRichard Henderson struct IOMMUTLBEntry {
1418be545baSRichard Henderson AddressSpace *target_as;
1428be545baSRichard Henderson hwaddr iova;
1438be545baSRichard Henderson hwaddr translated_addr;
1448be545baSRichard Henderson hwaddr addr_mask; /* 0xfff = 4k translation */
1458be545baSRichard Henderson IOMMUAccessFlags perm;
146*1f81edd7SCLEMENT MATHIEU--DRIF uint32_t pasid;
1478be545baSRichard Henderson };
1488be545baSRichard Henderson
1498be545baSRichard Henderson /*
1508be545baSRichard Henderson * Bitmap for different IOMMUNotifier capabilities. Each notifier can
1518be545baSRichard Henderson * register with one or multiple IOMMU Notifier capability bit(s).
1528be545baSRichard Henderson *
1538be545baSRichard Henderson * Normally there're two use cases for the notifiers:
1548be545baSRichard Henderson *
1558be545baSRichard Henderson * (1) When the device needs accurate synchronizations of the vIOMMU page
1568be545baSRichard Henderson * tables, it needs to register with both MAP|UNMAP notifies (which
1578be545baSRichard Henderson * is defined as IOMMU_NOTIFIER_IOTLB_EVENTS below).
1588be545baSRichard Henderson *
1598be545baSRichard Henderson * Regarding to accurate synchronization, it's when the notified
1608be545baSRichard Henderson * device maintains a shadow page table and must be notified on each
1618be545baSRichard Henderson * guest MAP (page table entry creation) and UNMAP (invalidation)
1628be545baSRichard Henderson * events (e.g. VFIO). Both notifications must be accurate so that
1638be545baSRichard Henderson * the shadow page table is fully in sync with the guest view.
1648be545baSRichard Henderson *
1658be545baSRichard Henderson * (2) When the device doesn't need accurate synchronizations of the
1668be545baSRichard Henderson * vIOMMU page tables, it needs to register only with UNMAP or
1678be545baSRichard Henderson * DEVIOTLB_UNMAP notifies.
1688be545baSRichard Henderson *
1698be545baSRichard Henderson * It's when the device maintains a cache of IOMMU translations
1708be545baSRichard Henderson * (IOTLB) and is able to fill that cache by requesting translations
1718be545baSRichard Henderson * from the vIOMMU through a protocol similar to ATS (Address
1728be545baSRichard Henderson * Translation Service).
1738be545baSRichard Henderson *
1748be545baSRichard Henderson * Note that in this mode the vIOMMU will not maintain a shadowed
1758be545baSRichard Henderson * page table for the address space, and the UNMAP messages can cover
1768be545baSRichard Henderson * more than the pages that used to get mapped. The IOMMU notifiee
1778be545baSRichard Henderson * should be able to take care of over-sized invalidations.
1788be545baSRichard Henderson */
1798be545baSRichard Henderson typedef enum {
1808be545baSRichard Henderson IOMMU_NOTIFIER_NONE = 0,
1818be545baSRichard Henderson /* Notify cache invalidations */
1828be545baSRichard Henderson IOMMU_NOTIFIER_UNMAP = 0x1,
1838be545baSRichard Henderson /* Notify entry changes (newly created entries) */
1848be545baSRichard Henderson IOMMU_NOTIFIER_MAP = 0x2,
1858be545baSRichard Henderson /* Notify changes on device IOTLB entries */
1868be545baSRichard Henderson IOMMU_NOTIFIER_DEVIOTLB_UNMAP = 0x04,
1878be545baSRichard Henderson } IOMMUNotifierFlag;
1888be545baSRichard Henderson
1898be545baSRichard Henderson #define IOMMU_NOTIFIER_IOTLB_EVENTS (IOMMU_NOTIFIER_MAP | IOMMU_NOTIFIER_UNMAP)
1908be545baSRichard Henderson #define IOMMU_NOTIFIER_DEVIOTLB_EVENTS IOMMU_NOTIFIER_DEVIOTLB_UNMAP
1918be545baSRichard Henderson #define IOMMU_NOTIFIER_ALL (IOMMU_NOTIFIER_IOTLB_EVENTS | \
1928be545baSRichard Henderson IOMMU_NOTIFIER_DEVIOTLB_EVENTS)
1938be545baSRichard Henderson
1948be545baSRichard Henderson struct IOMMUNotifier;
1958be545baSRichard Henderson typedef void (*IOMMUNotify)(struct IOMMUNotifier *notifier,
1968be545baSRichard Henderson IOMMUTLBEntry *data);
1978be545baSRichard Henderson
1988be545baSRichard Henderson struct IOMMUNotifier {
1998be545baSRichard Henderson IOMMUNotify notify;
2008be545baSRichard Henderson IOMMUNotifierFlag notifier_flags;
2018be545baSRichard Henderson /* Notify for address space range start <= addr <= end */
2028be545baSRichard Henderson hwaddr start;
2038be545baSRichard Henderson hwaddr end;
2048be545baSRichard Henderson int iommu_idx;
2057e94e452SCLEMENT MATHIEU--DRIF void *opaque;
2068be545baSRichard Henderson QLIST_ENTRY(IOMMUNotifier) node;
2078be545baSRichard Henderson };
2088be545baSRichard Henderson typedef struct IOMMUNotifier IOMMUNotifier;
2098be545baSRichard Henderson
2108be545baSRichard Henderson typedef struct IOMMUTLBEvent {
2118be545baSRichard Henderson IOMMUNotifierFlag type;
2128be545baSRichard Henderson IOMMUTLBEntry entry;
2138be545baSRichard Henderson } IOMMUTLBEvent;
2148be545baSRichard Henderson
2158be545baSRichard Henderson /* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
2168be545baSRichard Henderson #define RAM_PREALLOC (1 << 0)
2178be545baSRichard Henderson
2188be545baSRichard Henderson /* RAM is mmap-ed with MAP_SHARED */
2198be545baSRichard Henderson #define RAM_SHARED (1 << 1)
2208be545baSRichard Henderson
2218be545baSRichard Henderson /* Only a portion of RAM (used_length) is actually used, and migrated.
2228be545baSRichard Henderson * Resizing RAM while migrating can result in the migration being canceled.
2238be545baSRichard Henderson */
2248be545baSRichard Henderson #define RAM_RESIZEABLE (1 << 2)
2258be545baSRichard Henderson
2268be545baSRichard Henderson /* UFFDIO_ZEROPAGE is available on this RAMBlock to atomically
2278be545baSRichard Henderson * zero the page and wake waiting processes.
2288be545baSRichard Henderson * (Set during postcopy)
2298be545baSRichard Henderson */
2308be545baSRichard Henderson #define RAM_UF_ZEROPAGE (1 << 3)
2318be545baSRichard Henderson
2328be545baSRichard Henderson /* RAM can be migrated */
2338be545baSRichard Henderson #define RAM_MIGRATABLE (1 << 4)
2348be545baSRichard Henderson
2358be545baSRichard Henderson /* RAM is a persistent kind memory */
2368be545baSRichard Henderson #define RAM_PMEM (1 << 5)
2378be545baSRichard Henderson
2388be545baSRichard Henderson
2398be545baSRichard Henderson /*
2408be545baSRichard Henderson * UFFDIO_WRITEPROTECT is used on this RAMBlock to
2418be545baSRichard Henderson * support 'write-tracking' migration type.
2428be545baSRichard Henderson * Implies ram_state->ram_wt_enabled.
2438be545baSRichard Henderson */
2448be545baSRichard Henderson #define RAM_UF_WRITEPROTECT (1 << 6)
2458be545baSRichard Henderson
2468be545baSRichard Henderson /*
2478be545baSRichard Henderson * RAM is mmap-ed with MAP_NORESERVE. When set, reserving swap space (or huge
2488be545baSRichard Henderson * pages if applicable) is skipped: will bail out if not supported. When not
2498be545baSRichard Henderson * set, the OS will do the reservation, if supported for the memory type.
2508be545baSRichard Henderson */
2518be545baSRichard Henderson #define RAM_NORESERVE (1 << 7)
2528be545baSRichard Henderson
2538be545baSRichard Henderson /* RAM that isn't accessible through normal means. */
2548be545baSRichard Henderson #define RAM_PROTECTED (1 << 8)
2558be545baSRichard Henderson
2568be545baSRichard Henderson /* RAM is an mmap-ed named file */
2578be545baSRichard Henderson #define RAM_NAMED_FILE (1 << 9)
2588be545baSRichard Henderson
2598be545baSRichard Henderson /* RAM is mmap-ed read-only */
2608be545baSRichard Henderson #define RAM_READONLY (1 << 10)
2618be545baSRichard Henderson
2628be545baSRichard Henderson /* RAM FD is opened read-only */
2638be545baSRichard Henderson #define RAM_READONLY_FD (1 << 11)
2648be545baSRichard Henderson
2658be545baSRichard Henderson /* RAM can be private that has kvm guest memfd backend */
2668be545baSRichard Henderson #define RAM_GUEST_MEMFD (1 << 12)
2678be545baSRichard Henderson
2688be545baSRichard Henderson /*
2698be545baSRichard Henderson * In RAMBlock creation functions, if MAP_SHARED is 0 in the flags parameter,
2708be545baSRichard Henderson * the implementation may still create a shared mapping if other conditions
2718be545baSRichard Henderson * require it. Callers who specifically want a private mapping, eg objects
2728be545baSRichard Henderson * specified by the user, must pass RAM_PRIVATE.
2738be545baSRichard Henderson * After RAMBlock creation, MAP_SHARED in the block's flags indicates whether
2748be545baSRichard Henderson * the block is shared or private, and MAP_PRIVATE is omitted.
2758be545baSRichard Henderson */
2768be545baSRichard Henderson #define RAM_PRIVATE (1 << 13)
2778be545baSRichard Henderson
iommu_notifier_init(IOMMUNotifier * n,IOMMUNotify fn,IOMMUNotifierFlag flags,hwaddr start,hwaddr end,int iommu_idx)2788be545baSRichard Henderson static inline void iommu_notifier_init(IOMMUNotifier *n, IOMMUNotify fn,
2798be545baSRichard Henderson IOMMUNotifierFlag flags,
2808be545baSRichard Henderson hwaddr start, hwaddr end,
2818be545baSRichard Henderson int iommu_idx)
2828be545baSRichard Henderson {
2838be545baSRichard Henderson n->notify = fn;
2848be545baSRichard Henderson n->notifier_flags = flags;
2858be545baSRichard Henderson n->start = start;
2868be545baSRichard Henderson n->end = end;
2878be545baSRichard Henderson n->iommu_idx = iommu_idx;
2888be545baSRichard Henderson }
2898be545baSRichard Henderson
2908be545baSRichard Henderson /*
2918be545baSRichard Henderson * Memory region callbacks
2928be545baSRichard Henderson */
2938be545baSRichard Henderson struct MemoryRegionOps {
2948be545baSRichard Henderson /* Read from the memory region. @addr is relative to @mr; @size is
2958be545baSRichard Henderson * in bytes. */
2968be545baSRichard Henderson uint64_t (*read)(void *opaque,
2978be545baSRichard Henderson hwaddr addr,
2988be545baSRichard Henderson unsigned size);
2998be545baSRichard Henderson /* Write to the memory region. @addr is relative to @mr; @size is
3008be545baSRichard Henderson * in bytes. */
3018be545baSRichard Henderson void (*write)(void *opaque,
3028be545baSRichard Henderson hwaddr addr,
3038be545baSRichard Henderson uint64_t data,
3048be545baSRichard Henderson unsigned size);
3058be545baSRichard Henderson
3068be545baSRichard Henderson MemTxResult (*read_with_attrs)(void *opaque,
3078be545baSRichard Henderson hwaddr addr,
3088be545baSRichard Henderson uint64_t *data,
3098be545baSRichard Henderson unsigned size,
3108be545baSRichard Henderson MemTxAttrs attrs);
3118be545baSRichard Henderson MemTxResult (*write_with_attrs)(void *opaque,
3128be545baSRichard Henderson hwaddr addr,
3138be545baSRichard Henderson uint64_t data,
3148be545baSRichard Henderson unsigned size,
3158be545baSRichard Henderson MemTxAttrs attrs);
3168be545baSRichard Henderson
3178be545baSRichard Henderson enum device_endian endianness;
3188be545baSRichard Henderson /* Guest-visible constraints: */
3198be545baSRichard Henderson struct {
3208be545baSRichard Henderson /* If nonzero, specify bounds on access sizes beyond which a machine
3218be545baSRichard Henderson * check is thrown.
3228be545baSRichard Henderson */
3238be545baSRichard Henderson unsigned min_access_size;
3248be545baSRichard Henderson unsigned max_access_size;
3258be545baSRichard Henderson /* If true, unaligned accesses are supported. Otherwise unaligned
3268be545baSRichard Henderson * accesses throw machine checks.
3278be545baSRichard Henderson */
3288be545baSRichard Henderson bool unaligned;
3298be545baSRichard Henderson /*
3308be545baSRichard Henderson * If present, and returns #false, the transaction is not accepted
3318be545baSRichard Henderson * by the device (and results in machine dependent behaviour such
3328be545baSRichard Henderson * as a machine check exception).
3338be545baSRichard Henderson */
3348be545baSRichard Henderson bool (*accepts)(void *opaque, hwaddr addr,
3358be545baSRichard Henderson unsigned size, bool is_write,
3368be545baSRichard Henderson MemTxAttrs attrs);
3378be545baSRichard Henderson } valid;
3388be545baSRichard Henderson /* Internal implementation constraints: */
3398be545baSRichard Henderson struct {
3408be545baSRichard Henderson /* If nonzero, specifies the minimum size implemented. Smaller sizes
3418be545baSRichard Henderson * will be rounded upwards and a partial result will be returned.
3428be545baSRichard Henderson */
3438be545baSRichard Henderson unsigned min_access_size;
3448be545baSRichard Henderson /* If nonzero, specifies the maximum size implemented. Larger sizes
3458be545baSRichard Henderson * will be done as a series of accesses with smaller sizes.
3468be545baSRichard Henderson */
3478be545baSRichard Henderson unsigned max_access_size;
3488be545baSRichard Henderson /* If true, unaligned accesses are supported. Otherwise all accesses
3498be545baSRichard Henderson * are converted to (possibly multiple) naturally aligned accesses.
3508be545baSRichard Henderson */
3518be545baSRichard Henderson bool unaligned;
3528be545baSRichard Henderson } impl;
3538be545baSRichard Henderson };
3548be545baSRichard Henderson
3558be545baSRichard Henderson typedef struct MemoryRegionClass {
3568be545baSRichard Henderson /* private */
3578be545baSRichard Henderson ObjectClass parent_class;
3588be545baSRichard Henderson } MemoryRegionClass;
3598be545baSRichard Henderson
3608be545baSRichard Henderson
3618be545baSRichard Henderson enum IOMMUMemoryRegionAttr {
3628be545baSRichard Henderson IOMMU_ATTR_SPAPR_TCE_FD
3638be545baSRichard Henderson };
3648be545baSRichard Henderson
3658be545baSRichard Henderson /*
3668be545baSRichard Henderson * IOMMUMemoryRegionClass:
3678be545baSRichard Henderson *
3688be545baSRichard Henderson * All IOMMU implementations need to subclass TYPE_IOMMU_MEMORY_REGION
3698be545baSRichard Henderson * and provide an implementation of at least the @translate method here
3708be545baSRichard Henderson * to handle requests to the memory region. Other methods are optional.
3718be545baSRichard Henderson *
3728be545baSRichard Henderson * The IOMMU implementation must use the IOMMU notifier infrastructure
3738be545baSRichard Henderson * to report whenever mappings are changed, by calling
3748be545baSRichard Henderson * memory_region_notify_iommu() (or, if necessary, by calling
3758be545baSRichard Henderson * memory_region_notify_iommu_one() for each registered notifier).
3768be545baSRichard Henderson *
3778be545baSRichard Henderson * Conceptually an IOMMU provides a mapping from input address
3788be545baSRichard Henderson * to an output TLB entry. If the IOMMU is aware of memory transaction
3798be545baSRichard Henderson * attributes and the output TLB entry depends on the transaction
3808be545baSRichard Henderson * attributes, we represent this using IOMMU indexes. Each index
3818be545baSRichard Henderson * selects a particular translation table that the IOMMU has:
3828be545baSRichard Henderson *
3838be545baSRichard Henderson * @attrs_to_index returns the IOMMU index for a set of transaction attributes
3848be545baSRichard Henderson *
3858be545baSRichard Henderson * @translate takes an input address and an IOMMU index
3868be545baSRichard Henderson *
3878be545baSRichard Henderson * and the mapping returned can only depend on the input address and the
3888be545baSRichard Henderson * IOMMU index.
3898be545baSRichard Henderson *
3908be545baSRichard Henderson * Most IOMMUs don't care about the transaction attributes and support
3918be545baSRichard Henderson * only a single IOMMU index. A more complex IOMMU might have one index
3928be545baSRichard Henderson * for secure transactions and one for non-secure transactions.
3938be545baSRichard Henderson */
3948be545baSRichard Henderson struct IOMMUMemoryRegionClass {
3958be545baSRichard Henderson /* private: */
3968be545baSRichard Henderson MemoryRegionClass parent_class;
3978be545baSRichard Henderson
3988be545baSRichard Henderson /* public: */
3998be545baSRichard Henderson /**
4008be545baSRichard Henderson * @translate:
4018be545baSRichard Henderson *
4028be545baSRichard Henderson * Return a TLB entry that contains a given address.
4038be545baSRichard Henderson *
4048be545baSRichard Henderson * The IOMMUAccessFlags indicated via @flag are optional and may
4058be545baSRichard Henderson * be specified as IOMMU_NONE to indicate that the caller needs
4068be545baSRichard Henderson * the full translation information for both reads and writes. If
4078be545baSRichard Henderson * the access flags are specified then the IOMMU implementation
4088be545baSRichard Henderson * may use this as an optimization, to stop doing a page table
4098be545baSRichard Henderson * walk as soon as it knows that the requested permissions are not
4108be545baSRichard Henderson * allowed. If IOMMU_NONE is passed then the IOMMU must do the
4118be545baSRichard Henderson * full page table walk and report the permissions in the returned
4128be545baSRichard Henderson * IOMMUTLBEntry. (Note that this implies that an IOMMU may not
4138be545baSRichard Henderson * return different mappings for reads and writes.)
4148be545baSRichard Henderson *
4158be545baSRichard Henderson * The returned information remains valid while the caller is
4168be545baSRichard Henderson * holding the big QEMU lock or is inside an RCU critical section;
4178be545baSRichard Henderson * if the caller wishes to cache the mapping beyond that it must
4188be545baSRichard Henderson * register an IOMMU notifier so it can invalidate its cached
4198be545baSRichard Henderson * information when the IOMMU mapping changes.
4208be545baSRichard Henderson *
4218be545baSRichard Henderson * @iommu: the IOMMUMemoryRegion
4228be545baSRichard Henderson *
4238be545baSRichard Henderson * @hwaddr: address to be translated within the memory region
4248be545baSRichard Henderson *
4258be545baSRichard Henderson * @flag: requested access permission
4268be545baSRichard Henderson *
4278be545baSRichard Henderson * @iommu_idx: IOMMU index for the translation
4288be545baSRichard Henderson */
4298be545baSRichard Henderson IOMMUTLBEntry (*translate)(IOMMUMemoryRegion *iommu, hwaddr addr,
4308be545baSRichard Henderson IOMMUAccessFlags flag, int iommu_idx);
4318be545baSRichard Henderson /**
4328be545baSRichard Henderson * @get_min_page_size:
4338be545baSRichard Henderson *
4348be545baSRichard Henderson * Returns minimum supported page size in bytes.
4358be545baSRichard Henderson *
4368be545baSRichard Henderson * If this method is not provided then the minimum is assumed to
4378be545baSRichard Henderson * be TARGET_PAGE_SIZE.
4388be545baSRichard Henderson *
4398be545baSRichard Henderson * @iommu: the IOMMUMemoryRegion
4408be545baSRichard Henderson */
4418be545baSRichard Henderson uint64_t (*get_min_page_size)(IOMMUMemoryRegion *iommu);
4428be545baSRichard Henderson /**
4438be545baSRichard Henderson * @notify_flag_changed:
4448be545baSRichard Henderson *
4458be545baSRichard Henderson * Called when IOMMU Notifier flag changes (ie when the set of
4468be545baSRichard Henderson * events which IOMMU users are requesting notification for changes).
4478be545baSRichard Henderson * Optional method -- need not be provided if the IOMMU does not
4488be545baSRichard Henderson * need to know exactly which events must be notified.
4498be545baSRichard Henderson *
4508be545baSRichard Henderson * @iommu: the IOMMUMemoryRegion
4518be545baSRichard Henderson *
4528be545baSRichard Henderson * @old_flags: events which previously needed to be notified
4538be545baSRichard Henderson *
4548be545baSRichard Henderson * @new_flags: events which now need to be notified
4558be545baSRichard Henderson *
4568be545baSRichard Henderson * Returns 0 on success, or a negative errno; in particular
4578be545baSRichard Henderson * returns -EINVAL if the new flag bitmap is not supported by the
4588be545baSRichard Henderson * IOMMU memory region. In case of failure, the error object
4598be545baSRichard Henderson * must be created
4608be545baSRichard Henderson */
4618be545baSRichard Henderson int (*notify_flag_changed)(IOMMUMemoryRegion *iommu,
4628be545baSRichard Henderson IOMMUNotifierFlag old_flags,
4638be545baSRichard Henderson IOMMUNotifierFlag new_flags,
4648be545baSRichard Henderson Error **errp);
4658be545baSRichard Henderson /**
4668be545baSRichard Henderson * @replay:
4678be545baSRichard Henderson *
4688be545baSRichard Henderson * Called to handle memory_region_iommu_replay().
4698be545baSRichard Henderson *
4708be545baSRichard Henderson * The default implementation of memory_region_iommu_replay() is to
4718be545baSRichard Henderson * call the IOMMU translate method for every page in the address space
4728be545baSRichard Henderson * with flag == IOMMU_NONE and then call the notifier if translate
4738be545baSRichard Henderson * returns a valid mapping. If this method is implemented then it
4748be545baSRichard Henderson * overrides the default behaviour, and must provide the full semantics
4758be545baSRichard Henderson * of memory_region_iommu_replay(), by calling @notifier for every
4768be545baSRichard Henderson * translation present in the IOMMU.
4778be545baSRichard Henderson *
4788be545baSRichard Henderson * Optional method -- an IOMMU only needs to provide this method
4798be545baSRichard Henderson * if the default is inefficient or produces undesirable side effects.
4808be545baSRichard Henderson *
4818be545baSRichard Henderson * Note: this is not related to record-and-replay functionality.
4828be545baSRichard Henderson */
4838be545baSRichard Henderson void (*replay)(IOMMUMemoryRegion *iommu, IOMMUNotifier *notifier);
4848be545baSRichard Henderson
4858be545baSRichard Henderson /**
4868be545baSRichard Henderson * @get_attr:
4878be545baSRichard Henderson *
4888be545baSRichard Henderson * Get IOMMU misc attributes. This is an optional method that
4898be545baSRichard Henderson * can be used to allow users of the IOMMU to get implementation-specific
4908be545baSRichard Henderson * information. The IOMMU implements this method to handle calls
4918be545baSRichard Henderson * by IOMMU users to memory_region_iommu_get_attr() by filling in
4928be545baSRichard Henderson * the arbitrary data pointer for any IOMMUMemoryRegionAttr values that
4938be545baSRichard Henderson * the IOMMU supports. If the method is unimplemented then
4948be545baSRichard Henderson * memory_region_iommu_get_attr() will always return -EINVAL.
4958be545baSRichard Henderson *
4968be545baSRichard Henderson * @iommu: the IOMMUMemoryRegion
4978be545baSRichard Henderson *
4988be545baSRichard Henderson * @attr: attribute being queried
4998be545baSRichard Henderson *
5008be545baSRichard Henderson * @data: memory to fill in with the attribute data
5018be545baSRichard Henderson *
5028be545baSRichard Henderson * Returns 0 on success, or a negative errno; in particular
5038be545baSRichard Henderson * returns -EINVAL for unrecognized or unimplemented attribute types.
5048be545baSRichard Henderson */
5058be545baSRichard Henderson int (*get_attr)(IOMMUMemoryRegion *iommu, enum IOMMUMemoryRegionAttr attr,
5068be545baSRichard Henderson void *data);
5078be545baSRichard Henderson
5088be545baSRichard Henderson /**
5098be545baSRichard Henderson * @attrs_to_index:
5108be545baSRichard Henderson *
5118be545baSRichard Henderson * Return the IOMMU index to use for a given set of transaction attributes.
5128be545baSRichard Henderson *
5138be545baSRichard Henderson * Optional method: if an IOMMU only supports a single IOMMU index then
5148be545baSRichard Henderson * the default implementation of memory_region_iommu_attrs_to_index()
5158be545baSRichard Henderson * will return 0.
5168be545baSRichard Henderson *
5178be545baSRichard Henderson * The indexes supported by an IOMMU must be contiguous, starting at 0.
5188be545baSRichard Henderson *
5198be545baSRichard Henderson * @iommu: the IOMMUMemoryRegion
5208be545baSRichard Henderson * @attrs: memory transaction attributes
5218be545baSRichard Henderson */
5228be545baSRichard Henderson int (*attrs_to_index)(IOMMUMemoryRegion *iommu, MemTxAttrs attrs);
5238be545baSRichard Henderson
5248be545baSRichard Henderson /**
5258be545baSRichard Henderson * @num_indexes:
5268be545baSRichard Henderson *
5278be545baSRichard Henderson * Return the number of IOMMU indexes this IOMMU supports.
5288be545baSRichard Henderson *
5298be545baSRichard Henderson * Optional method: if this method is not provided, then
5308be545baSRichard Henderson * memory_region_iommu_num_indexes() will return 1, indicating that
5318be545baSRichard Henderson * only a single IOMMU index is supported.
5328be545baSRichard Henderson *
5338be545baSRichard Henderson * @iommu: the IOMMUMemoryRegion
5348be545baSRichard Henderson */
5358be545baSRichard Henderson int (*num_indexes)(IOMMUMemoryRegion *iommu);
5368be545baSRichard Henderson };
5378be545baSRichard Henderson
5388be545baSRichard Henderson typedef struct RamDiscardListener RamDiscardListener;
5398be545baSRichard Henderson typedef int (*NotifyRamPopulate)(RamDiscardListener *rdl,
5408be545baSRichard Henderson MemoryRegionSection *section);
5418be545baSRichard Henderson typedef void (*NotifyRamDiscard)(RamDiscardListener *rdl,
5428be545baSRichard Henderson MemoryRegionSection *section);
5438be545baSRichard Henderson
5448be545baSRichard Henderson struct RamDiscardListener {
5458be545baSRichard Henderson /*
5468be545baSRichard Henderson * @notify_populate:
5478be545baSRichard Henderson *
5488be545baSRichard Henderson * Notification that previously discarded memory is about to get populated.
5498be545baSRichard Henderson * Listeners are able to object. If any listener objects, already
5508be545baSRichard Henderson * successfully notified listeners are notified about a discard again.
5518be545baSRichard Henderson *
5528be545baSRichard Henderson * @rdl: the #RamDiscardListener getting notified
5538be545baSRichard Henderson * @section: the #MemoryRegionSection to get populated. The section
5548be545baSRichard Henderson * is aligned within the memory region to the minimum granularity
5558be545baSRichard Henderson * unless it would exceed the registered section.
5568be545baSRichard Henderson *
5578be545baSRichard Henderson * Returns 0 on success. If the notification is rejected by the listener,
5588be545baSRichard Henderson * an error is returned.
5598be545baSRichard Henderson */
5608be545baSRichard Henderson NotifyRamPopulate notify_populate;
5618be545baSRichard Henderson
5628be545baSRichard Henderson /*
5638be545baSRichard Henderson * @notify_discard:
5648be545baSRichard Henderson *
5658be545baSRichard Henderson * Notification that previously populated memory was discarded successfully
5668be545baSRichard Henderson * and listeners should drop all references to such memory and prevent
5678be545baSRichard Henderson * new population (e.g., unmap).
5688be545baSRichard Henderson *
5698be545baSRichard Henderson * @rdl: the #RamDiscardListener getting notified
5708be545baSRichard Henderson * @section: the #MemoryRegionSection to get populated. The section
5718be545baSRichard Henderson * is aligned within the memory region to the minimum granularity
5728be545baSRichard Henderson * unless it would exceed the registered section.
5738be545baSRichard Henderson */
5748be545baSRichard Henderson NotifyRamDiscard notify_discard;
5758be545baSRichard Henderson
5768be545baSRichard Henderson /*
5778be545baSRichard Henderson * @double_discard_supported:
5788be545baSRichard Henderson *
5798be545baSRichard Henderson * The listener suppors getting @notify_discard notifications that span
5808be545baSRichard Henderson * already discarded parts.
5818be545baSRichard Henderson */
5828be545baSRichard Henderson bool double_discard_supported;
5838be545baSRichard Henderson
5848be545baSRichard Henderson MemoryRegionSection *section;
5858be545baSRichard Henderson QLIST_ENTRY(RamDiscardListener) next;
5868be545baSRichard Henderson };
5878be545baSRichard Henderson
ram_discard_listener_init(RamDiscardListener * rdl,NotifyRamPopulate populate_fn,NotifyRamDiscard discard_fn,bool double_discard_supported)5888be545baSRichard Henderson static inline void ram_discard_listener_init(RamDiscardListener *rdl,
5898be545baSRichard Henderson NotifyRamPopulate populate_fn,
5908be545baSRichard Henderson NotifyRamDiscard discard_fn,
5918be545baSRichard Henderson bool double_discard_supported)
5928be545baSRichard Henderson {
5938be545baSRichard Henderson rdl->notify_populate = populate_fn;
5948be545baSRichard Henderson rdl->notify_discard = discard_fn;
5958be545baSRichard Henderson rdl->double_discard_supported = double_discard_supported;
5968be545baSRichard Henderson }
5978be545baSRichard Henderson
5982205b846SChenyi Qiang /**
5992205b846SChenyi Qiang * typedef ReplayRamDiscardState:
6002205b846SChenyi Qiang *
6012205b846SChenyi Qiang * The callback handler for #RamDiscardManagerClass.replay_populated/
6022205b846SChenyi Qiang * #RamDiscardManagerClass.replay_discarded to invoke on populated/discarded
6032205b846SChenyi Qiang * parts.
6042205b846SChenyi Qiang *
6052205b846SChenyi Qiang * @section: the #MemoryRegionSection of populated/discarded part
6062205b846SChenyi Qiang * @opaque: pointer to forward to the callback
6072205b846SChenyi Qiang *
6082205b846SChenyi Qiang * Returns 0 on success, or a negative error if failed.
6092205b846SChenyi Qiang */
6102205b846SChenyi Qiang typedef int (*ReplayRamDiscardState)(MemoryRegionSection *section,
6112205b846SChenyi Qiang void *opaque);
6128be545baSRichard Henderson
6138be545baSRichard Henderson /*
6148be545baSRichard Henderson * RamDiscardManagerClass:
6158be545baSRichard Henderson *
6168be545baSRichard Henderson * A #RamDiscardManager coordinates which parts of specific RAM #MemoryRegion
6178be545baSRichard Henderson * regions are currently populated to be used/accessed by the VM, notifying
6188be545baSRichard Henderson * after parts were discarded (freeing up memory) and before parts will be
6198be545baSRichard Henderson * populated (consuming memory), to be used/accessed by the VM.
6208be545baSRichard Henderson *
6218be545baSRichard Henderson * A #RamDiscardManager can only be set for a RAM #MemoryRegion while the
6228be545baSRichard Henderson * #MemoryRegion isn't mapped into an address space yet (either directly
6238be545baSRichard Henderson * or via an alias); it cannot change while the #MemoryRegion is
6248be545baSRichard Henderson * mapped into an address space.
6258be545baSRichard Henderson *
6268be545baSRichard Henderson * The #RamDiscardManager is intended to be used by technologies that are
6278be545baSRichard Henderson * incompatible with discarding of RAM (e.g., VFIO, which may pin all
6288be545baSRichard Henderson * memory inside a #MemoryRegion), and require proper coordination to only
6298be545baSRichard Henderson * map the currently populated parts, to hinder parts that are expected to
6308be545baSRichard Henderson * remain discarded from silently getting populated and consuming memory.
6318be545baSRichard Henderson * Technologies that support discarding of RAM don't have to bother and can
6328be545baSRichard Henderson * simply map the whole #MemoryRegion.
6338be545baSRichard Henderson *
6348be545baSRichard Henderson * An example #RamDiscardManager is virtio-mem, which logically (un)plugs
6358be545baSRichard Henderson * memory within an assigned RAM #MemoryRegion, coordinated with the VM.
6368be545baSRichard Henderson * Logically unplugging memory consists of discarding RAM. The VM agreed to not
6378be545baSRichard Henderson * access unplugged (discarded) memory - especially via DMA. virtio-mem will
6388be545baSRichard Henderson * properly coordinate with listeners before memory is plugged (populated),
6398be545baSRichard Henderson * and after memory is unplugged (discarded).
6408be545baSRichard Henderson *
6418be545baSRichard Henderson * Listeners are called in multiples of the minimum granularity (unless it
6428be545baSRichard Henderson * would exceed the registered range) and changes are aligned to the minimum
6438be545baSRichard Henderson * granularity within the #MemoryRegion. Listeners have to prepare for memory
6448be545baSRichard Henderson * becoming discarded in a different granularity than it was populated and the
6458be545baSRichard Henderson * other way around.
6468be545baSRichard Henderson */
6478be545baSRichard Henderson struct RamDiscardManagerClass {
6488be545baSRichard Henderson /* private */
6498be545baSRichard Henderson InterfaceClass parent_class;
6508be545baSRichard Henderson
6518be545baSRichard Henderson /* public */
6528be545baSRichard Henderson
6538be545baSRichard Henderson /**
6548be545baSRichard Henderson * @get_min_granularity:
6558be545baSRichard Henderson *
6568be545baSRichard Henderson * Get the minimum granularity in which listeners will get notified
6578be545baSRichard Henderson * about changes within the #MemoryRegion via the #RamDiscardManager.
6588be545baSRichard Henderson *
6598be545baSRichard Henderson * @rdm: the #RamDiscardManager
6608be545baSRichard Henderson * @mr: the #MemoryRegion
6618be545baSRichard Henderson *
6628be545baSRichard Henderson * Returns the minimum granularity.
6638be545baSRichard Henderson */
6648be545baSRichard Henderson uint64_t (*get_min_granularity)(const RamDiscardManager *rdm,
6658be545baSRichard Henderson const MemoryRegion *mr);
6668be545baSRichard Henderson
6678be545baSRichard Henderson /**
6688be545baSRichard Henderson * @is_populated:
6698be545baSRichard Henderson *
6708be545baSRichard Henderson * Check whether the given #MemoryRegionSection is completely populated
6718be545baSRichard Henderson * (i.e., no parts are currently discarded) via the #RamDiscardManager.
6728be545baSRichard Henderson * There are no alignment requirements.
6738be545baSRichard Henderson *
6748be545baSRichard Henderson * @rdm: the #RamDiscardManager
6758be545baSRichard Henderson * @section: the #MemoryRegionSection
6768be545baSRichard Henderson *
6778be545baSRichard Henderson * Returns whether the given range is completely populated.
6788be545baSRichard Henderson */
6798be545baSRichard Henderson bool (*is_populated)(const RamDiscardManager *rdm,
6808be545baSRichard Henderson const MemoryRegionSection *section);
6818be545baSRichard Henderson
6828be545baSRichard Henderson /**
6838be545baSRichard Henderson * @replay_populated:
6848be545baSRichard Henderson *
6852205b846SChenyi Qiang * Call the #ReplayRamDiscardState callback for all populated parts within
6862205b846SChenyi Qiang * the #MemoryRegionSection via the #RamDiscardManager.
6878be545baSRichard Henderson *
6888be545baSRichard Henderson * In case any call fails, no further calls are made.
6898be545baSRichard Henderson *
6908be545baSRichard Henderson * @rdm: the #RamDiscardManager
6918be545baSRichard Henderson * @section: the #MemoryRegionSection
6922205b846SChenyi Qiang * @replay_fn: the #ReplayRamDiscardState callback
6938be545baSRichard Henderson * @opaque: pointer to forward to the callback
6948be545baSRichard Henderson *
6958be545baSRichard Henderson * Returns 0 on success, or a negative error if any notification failed.
6968be545baSRichard Henderson */
6978be545baSRichard Henderson int (*replay_populated)(const RamDiscardManager *rdm,
6988be545baSRichard Henderson MemoryRegionSection *section,
6992205b846SChenyi Qiang ReplayRamDiscardState replay_fn, void *opaque);
7008be545baSRichard Henderson
7018be545baSRichard Henderson /**
7028be545baSRichard Henderson * @replay_discarded:
7038be545baSRichard Henderson *
7042205b846SChenyi Qiang * Call the #ReplayRamDiscardState callback for all discarded parts within
7052205b846SChenyi Qiang * the #MemoryRegionSection via the #RamDiscardManager.
7068be545baSRichard Henderson *
7078be545baSRichard Henderson * @rdm: the #RamDiscardManager
7088be545baSRichard Henderson * @section: the #MemoryRegionSection
7092205b846SChenyi Qiang * @replay_fn: the #ReplayRamDiscardState callback
7108be545baSRichard Henderson * @opaque: pointer to forward to the callback
7112205b846SChenyi Qiang *
7122205b846SChenyi Qiang * Returns 0 on success, or a negative error if any notification failed.
7138be545baSRichard Henderson */
7142205b846SChenyi Qiang int (*replay_discarded)(const RamDiscardManager *rdm,
7158be545baSRichard Henderson MemoryRegionSection *section,
7162205b846SChenyi Qiang ReplayRamDiscardState replay_fn, void *opaque);
7178be545baSRichard Henderson
7188be545baSRichard Henderson /**
7198be545baSRichard Henderson * @register_listener:
7208be545baSRichard Henderson *
7218be545baSRichard Henderson * Register a #RamDiscardListener for the given #MemoryRegionSection and
7228be545baSRichard Henderson * immediately notify the #RamDiscardListener about all populated parts
7238be545baSRichard Henderson * within the #MemoryRegionSection via the #RamDiscardManager.
7248be545baSRichard Henderson *
7258be545baSRichard Henderson * In case any notification fails, no further notifications are triggered
7268be545baSRichard Henderson * and an error is logged.
7278be545baSRichard Henderson *
7288be545baSRichard Henderson * @rdm: the #RamDiscardManager
7298be545baSRichard Henderson * @rdl: the #RamDiscardListener
7308be545baSRichard Henderson * @section: the #MemoryRegionSection
7318be545baSRichard Henderson */
7328be545baSRichard Henderson void (*register_listener)(RamDiscardManager *rdm,
7338be545baSRichard Henderson RamDiscardListener *rdl,
7348be545baSRichard Henderson MemoryRegionSection *section);
7358be545baSRichard Henderson
7368be545baSRichard Henderson /**
7378be545baSRichard Henderson * @unregister_listener:
7388be545baSRichard Henderson *
7398be545baSRichard Henderson * Unregister a previously registered #RamDiscardListener via the
7408be545baSRichard Henderson * #RamDiscardManager after notifying the #RamDiscardListener about all
7418be545baSRichard Henderson * populated parts becoming unpopulated within the registered
7428be545baSRichard Henderson * #MemoryRegionSection.
7438be545baSRichard Henderson *
7448be545baSRichard Henderson * @rdm: the #RamDiscardManager
7458be545baSRichard Henderson * @rdl: the #RamDiscardListener
7468be545baSRichard Henderson */
7478be545baSRichard Henderson void (*unregister_listener)(RamDiscardManager *rdm,
7488be545baSRichard Henderson RamDiscardListener *rdl);
7498be545baSRichard Henderson };
7508be545baSRichard Henderson
7518be545baSRichard Henderson uint64_t ram_discard_manager_get_min_granularity(const RamDiscardManager *rdm,
7528be545baSRichard Henderson const MemoryRegion *mr);
7538be545baSRichard Henderson
7548be545baSRichard Henderson bool ram_discard_manager_is_populated(const RamDiscardManager *rdm,
7558be545baSRichard Henderson const MemoryRegionSection *section);
7568be545baSRichard Henderson
7572205b846SChenyi Qiang /**
7582205b846SChenyi Qiang * ram_discard_manager_replay_populated:
7592205b846SChenyi Qiang *
7602205b846SChenyi Qiang * A wrapper to call the #RamDiscardManagerClass.replay_populated callback
7612205b846SChenyi Qiang * of the #RamDiscardManager.
7622205b846SChenyi Qiang *
7632205b846SChenyi Qiang * @rdm: the #RamDiscardManager
7642205b846SChenyi Qiang * @section: the #MemoryRegionSection
7652205b846SChenyi Qiang * @replay_fn: the #ReplayRamDiscardState callback
7662205b846SChenyi Qiang * @opaque: pointer to forward to the callback
7672205b846SChenyi Qiang *
7682205b846SChenyi Qiang * Returns 0 on success, or a negative error if any notification failed.
7692205b846SChenyi Qiang */
7708be545baSRichard Henderson int ram_discard_manager_replay_populated(const RamDiscardManager *rdm,
7718be545baSRichard Henderson MemoryRegionSection *section,
7722205b846SChenyi Qiang ReplayRamDiscardState replay_fn,
7738be545baSRichard Henderson void *opaque);
7748be545baSRichard Henderson
7752205b846SChenyi Qiang /**
7762205b846SChenyi Qiang * ram_discard_manager_replay_discarded:
7772205b846SChenyi Qiang *
7782205b846SChenyi Qiang * A wrapper to call the #RamDiscardManagerClass.replay_discarded callback
7792205b846SChenyi Qiang * of the #RamDiscardManager.
7802205b846SChenyi Qiang *
7812205b846SChenyi Qiang * @rdm: the #RamDiscardManager
7822205b846SChenyi Qiang * @section: the #MemoryRegionSection
7832205b846SChenyi Qiang * @replay_fn: the #ReplayRamDiscardState callback
7842205b846SChenyi Qiang * @opaque: pointer to forward to the callback
7852205b846SChenyi Qiang *
7862205b846SChenyi Qiang * Returns 0 on success, or a negative error if any notification failed.
7872205b846SChenyi Qiang */
7882205b846SChenyi Qiang int ram_discard_manager_replay_discarded(const RamDiscardManager *rdm,
7898be545baSRichard Henderson MemoryRegionSection *section,
7902205b846SChenyi Qiang ReplayRamDiscardState replay_fn,
7918be545baSRichard Henderson void *opaque);
7928be545baSRichard Henderson
7938be545baSRichard Henderson void ram_discard_manager_register_listener(RamDiscardManager *rdm,
7948be545baSRichard Henderson RamDiscardListener *rdl,
7958be545baSRichard Henderson MemoryRegionSection *section);
7968be545baSRichard Henderson
7978be545baSRichard Henderson void ram_discard_manager_unregister_listener(RamDiscardManager *rdm,
7988be545baSRichard Henderson RamDiscardListener *rdl);
7998be545baSRichard Henderson
8008be545baSRichard Henderson /**
801e3353d63SSteve Sistare * memory_translate_iotlb: Extract addresses from a TLB entry.
802e3353d63SSteve Sistare * Called with rcu_read_lock held.
8038be545baSRichard Henderson *
8048be545baSRichard Henderson * @iotlb: pointer to an #IOMMUTLBEntry
805e3353d63SSteve Sistare * @xlat_p: return the offset of the entry from the start of the returned
806e3353d63SSteve Sistare * MemoryRegion.
8078be545baSRichard Henderson * @errp: pointer to Error*, to store an error if it happens.
8088be545baSRichard Henderson *
809e3353d63SSteve Sistare * Return: On success, return the MemoryRegion containing the @iotlb translated
810e3353d63SSteve Sistare * addr. The MemoryRegion must not be accessed after rcu_read_unlock.
811e3353d63SSteve Sistare * On failure, return NULL, setting @errp with error.
8128be545baSRichard Henderson */
813e3353d63SSteve Sistare MemoryRegion *memory_translate_iotlb(IOMMUTLBEntry *iotlb, hwaddr *xlat_p,
814e3353d63SSteve Sistare Error **errp);
8158be545baSRichard Henderson
8168be545baSRichard Henderson typedef struct CoalescedMemoryRange CoalescedMemoryRange;
8178be545baSRichard Henderson typedef struct MemoryRegionIoeventfd MemoryRegionIoeventfd;
8188be545baSRichard Henderson
8198be545baSRichard Henderson /** MemoryRegion:
8208be545baSRichard Henderson *
8218be545baSRichard Henderson * A struct representing a memory region.
8228be545baSRichard Henderson */
8238be545baSRichard Henderson struct MemoryRegion {
8248be545baSRichard Henderson Object parent_obj;
8258be545baSRichard Henderson
8268be545baSRichard Henderson /* private: */
8278be545baSRichard Henderson
8288be545baSRichard Henderson /* The following fields should fit in a cache line */
8298be545baSRichard Henderson bool romd_mode;
8308be545baSRichard Henderson bool ram;
8318be545baSRichard Henderson bool subpage;
8328be545baSRichard Henderson bool readonly; /* For RAM regions */
8338be545baSRichard Henderson bool nonvolatile;
8348be545baSRichard Henderson bool rom_device;
8358be545baSRichard Henderson bool flush_coalesced_mmio;
8368be545baSRichard Henderson bool unmergeable;
8378be545baSRichard Henderson uint8_t dirty_log_mask;
8388be545baSRichard Henderson bool is_iommu;
8398be545baSRichard Henderson RAMBlock *ram_block;
8408be545baSRichard Henderson Object *owner;
8418be545baSRichard Henderson /* owner as TYPE_DEVICE. Used for re-entrancy checks in MR access hotpath */
8428be545baSRichard Henderson DeviceState *dev;
8438be545baSRichard Henderson
8448be545baSRichard Henderson const MemoryRegionOps *ops;
8458be545baSRichard Henderson void *opaque;
8468be545baSRichard Henderson MemoryRegion *container;
8478be545baSRichard Henderson int mapped_via_alias; /* Mapped via an alias, container might be NULL */
8488be545baSRichard Henderson Int128 size;
8498be545baSRichard Henderson hwaddr addr;
8508be545baSRichard Henderson void (*destructor)(MemoryRegion *mr);
8518be545baSRichard Henderson uint64_t align;
8528be545baSRichard Henderson bool terminates;
8538be545baSRichard Henderson bool ram_device;
8548be545baSRichard Henderson bool enabled;
8558be545baSRichard Henderson uint8_t vga_logging_count;
8568be545baSRichard Henderson MemoryRegion *alias;
8578be545baSRichard Henderson hwaddr alias_offset;
8588be545baSRichard Henderson int32_t priority;
8598be545baSRichard Henderson QTAILQ_HEAD(, MemoryRegion) subregions;
8608be545baSRichard Henderson QTAILQ_ENTRY(MemoryRegion) subregions_link;
8618be545baSRichard Henderson QTAILQ_HEAD(, CoalescedMemoryRange) coalesced;
8628be545baSRichard Henderson const char *name;
8638be545baSRichard Henderson unsigned ioeventfd_nb;
8648be545baSRichard Henderson MemoryRegionIoeventfd *ioeventfds;
8658be545baSRichard Henderson RamDiscardManager *rdm; /* Only for RAM */
8668be545baSRichard Henderson
8678be545baSRichard Henderson /* For devices designed to perform re-entrant IO into their own IO MRs */
8688be545baSRichard Henderson bool disable_reentrancy_guard;
8698be545baSRichard Henderson };
8708be545baSRichard Henderson
8718be545baSRichard Henderson struct IOMMUMemoryRegion {
8728be545baSRichard Henderson MemoryRegion parent_obj;
8738be545baSRichard Henderson
8748be545baSRichard Henderson QLIST_HEAD(, IOMMUNotifier) iommu_notify;
8758be545baSRichard Henderson IOMMUNotifierFlag iommu_notify_flags;
8768be545baSRichard Henderson };
8778be545baSRichard Henderson
8788be545baSRichard Henderson #define IOMMU_NOTIFIER_FOREACH(n, mr) \
8798be545baSRichard Henderson QLIST_FOREACH((n), &(mr)->iommu_notify, node)
8808be545baSRichard Henderson
8818be545baSRichard Henderson #define MEMORY_LISTENER_PRIORITY_MIN 0
8828be545baSRichard Henderson #define MEMORY_LISTENER_PRIORITY_ACCEL 10
8838be545baSRichard Henderson #define MEMORY_LISTENER_PRIORITY_DEV_BACKEND 10
8848be545baSRichard Henderson
8858be545baSRichard Henderson /**
8868be545baSRichard Henderson * struct MemoryListener: callbacks structure for updates to the physical memory map
8878be545baSRichard Henderson *
8888be545baSRichard Henderson * Allows a component to adjust to changes in the guest-visible memory map.
8898be545baSRichard Henderson * Use with memory_listener_register() and memory_listener_unregister().
8908be545baSRichard Henderson */
8918be545baSRichard Henderson struct MemoryListener {
8928be545baSRichard Henderson /**
8938be545baSRichard Henderson * @begin:
8948be545baSRichard Henderson *
8958be545baSRichard Henderson * Called at the beginning of an address space update transaction.
8968be545baSRichard Henderson * Followed by calls to #MemoryListener.region_add(),
8978be545baSRichard Henderson * #MemoryListener.region_del(), #MemoryListener.region_nop(),
8988be545baSRichard Henderson * #MemoryListener.log_start() and #MemoryListener.log_stop() in
8998be545baSRichard Henderson * increasing address order.
9008be545baSRichard Henderson *
9018be545baSRichard Henderson * @listener: The #MemoryListener.
9028be545baSRichard Henderson */
9038be545baSRichard Henderson void (*begin)(MemoryListener *listener);
9048be545baSRichard Henderson
9058be545baSRichard Henderson /**
9068be545baSRichard Henderson * @commit:
9078be545baSRichard Henderson *
9088be545baSRichard Henderson * Called at the end of an address space update transaction,
9098be545baSRichard Henderson * after the last call to #MemoryListener.region_add(),
9108be545baSRichard Henderson * #MemoryListener.region_del() or #MemoryListener.region_nop(),
9118be545baSRichard Henderson * #MemoryListener.log_start() and #MemoryListener.log_stop().
9128be545baSRichard Henderson *
9138be545baSRichard Henderson * @listener: The #MemoryListener.
9148be545baSRichard Henderson */
9158be545baSRichard Henderson void (*commit)(MemoryListener *listener);
9168be545baSRichard Henderson
9178be545baSRichard Henderson /**
9188be545baSRichard Henderson * @region_add:
9198be545baSRichard Henderson *
9208be545baSRichard Henderson * Called during an address space update transaction,
9218be545baSRichard Henderson * for a section of the address space that is new in this address space
9228be545baSRichard Henderson * space since the last transaction.
9238be545baSRichard Henderson *
9248be545baSRichard Henderson * @listener: The #MemoryListener.
9258be545baSRichard Henderson * @section: The new #MemoryRegionSection.
9268be545baSRichard Henderson */
9278be545baSRichard Henderson void (*region_add)(MemoryListener *listener, MemoryRegionSection *section);
9288be545baSRichard Henderson
9298be545baSRichard Henderson /**
9308be545baSRichard Henderson * @region_del:
9318be545baSRichard Henderson *
9328be545baSRichard Henderson * Called during an address space update transaction,
9338be545baSRichard Henderson * for a section of the address space that has disappeared in the address
9348be545baSRichard Henderson * space since the last transaction.
9358be545baSRichard Henderson *
9368be545baSRichard Henderson * @listener: The #MemoryListener.
9378be545baSRichard Henderson * @section: The old #MemoryRegionSection.
9388be545baSRichard Henderson */
9398be545baSRichard Henderson void (*region_del)(MemoryListener *listener, MemoryRegionSection *section);
9408be545baSRichard Henderson
9418be545baSRichard Henderson /**
9428be545baSRichard Henderson * @region_nop:
9438be545baSRichard Henderson *
9448be545baSRichard Henderson * Called during an address space update transaction,
9458be545baSRichard Henderson * for a section of the address space that is in the same place in the address
9468be545baSRichard Henderson * space as in the last transaction.
9478be545baSRichard Henderson *
9488be545baSRichard Henderson * @listener: The #MemoryListener.
9498be545baSRichard Henderson * @section: The #MemoryRegionSection.
9508be545baSRichard Henderson */
9518be545baSRichard Henderson void (*region_nop)(MemoryListener *listener, MemoryRegionSection *section);
9528be545baSRichard Henderson
9538be545baSRichard Henderson /**
9548be545baSRichard Henderson * @log_start:
9558be545baSRichard Henderson *
9568be545baSRichard Henderson * Called during an address space update transaction, after
9578be545baSRichard Henderson * one of #MemoryListener.region_add(), #MemoryListener.region_del() or
9588be545baSRichard Henderson * #MemoryListener.region_nop(), if dirty memory logging clients have
9598be545baSRichard Henderson * become active since the last transaction.
9608be545baSRichard Henderson *
9618be545baSRichard Henderson * @listener: The #MemoryListener.
9628be545baSRichard Henderson * @section: The #MemoryRegionSection.
9638be545baSRichard Henderson * @old: A bitmap of dirty memory logging clients that were active in
9648be545baSRichard Henderson * the previous transaction.
9658be545baSRichard Henderson * @new: A bitmap of dirty memory logging clients that are active in
9668be545baSRichard Henderson * the current transaction.
9678be545baSRichard Henderson */
9688be545baSRichard Henderson void (*log_start)(MemoryListener *listener, MemoryRegionSection *section,
9698be545baSRichard Henderson int old_val, int new_val);
9708be545baSRichard Henderson
9718be545baSRichard Henderson /**
9728be545baSRichard Henderson * @log_stop:
9738be545baSRichard Henderson *
9748be545baSRichard Henderson * Called during an address space update transaction, after
9758be545baSRichard Henderson * one of #MemoryListener.region_add(), #MemoryListener.region_del() or
9768be545baSRichard Henderson * #MemoryListener.region_nop() and possibly after
9778be545baSRichard Henderson * #MemoryListener.log_start(), if dirty memory logging clients have
9788be545baSRichard Henderson * become inactive since the last transaction.
9798be545baSRichard Henderson *
9808be545baSRichard Henderson * @listener: The #MemoryListener.
9818be545baSRichard Henderson * @section: The #MemoryRegionSection.
9828be545baSRichard Henderson * @old: A bitmap of dirty memory logging clients that were active in
9838be545baSRichard Henderson * the previous transaction.
9848be545baSRichard Henderson * @new: A bitmap of dirty memory logging clients that are active in
9858be545baSRichard Henderson * the current transaction.
9868be545baSRichard Henderson */
9878be545baSRichard Henderson void (*log_stop)(MemoryListener *listener, MemoryRegionSection *section,
9888be545baSRichard Henderson int old_val, int new_val);
9898be545baSRichard Henderson
9908be545baSRichard Henderson /**
9918be545baSRichard Henderson * @log_sync:
9928be545baSRichard Henderson *
9938be545baSRichard Henderson * Called by memory_region_snapshot_and_clear_dirty() and
9948be545baSRichard Henderson * memory_global_dirty_log_sync(), before accessing QEMU's "official"
9958be545baSRichard Henderson * copy of the dirty memory bitmap for a #MemoryRegionSection.
9968be545baSRichard Henderson *
9978be545baSRichard Henderson * @listener: The #MemoryListener.
9988be545baSRichard Henderson * @section: The #MemoryRegionSection.
9998be545baSRichard Henderson */
10008be545baSRichard Henderson void (*log_sync)(MemoryListener *listener, MemoryRegionSection *section);
10018be545baSRichard Henderson
10028be545baSRichard Henderson /**
10038be545baSRichard Henderson * @log_sync_global:
10048be545baSRichard Henderson *
10058be545baSRichard Henderson * This is the global version of @log_sync when the listener does
10068be545baSRichard Henderson * not have a way to synchronize the log with finer granularity.
10078be545baSRichard Henderson * When the listener registers with @log_sync_global defined, then
10088be545baSRichard Henderson * its @log_sync must be NULL. Vice versa.
10098be545baSRichard Henderson *
10108be545baSRichard Henderson * @listener: The #MemoryListener.
10118be545baSRichard Henderson * @last_stage: The last stage to synchronize the log during migration.
10128be545baSRichard Henderson * The caller should guarantee that the synchronization with true for
10138be545baSRichard Henderson * @last_stage is triggered for once after all VCPUs have been stopped.
10148be545baSRichard Henderson */
10158be545baSRichard Henderson void (*log_sync_global)(MemoryListener *listener, bool last_stage);
10168be545baSRichard Henderson
10178be545baSRichard Henderson /**
10188be545baSRichard Henderson * @log_clear:
10198be545baSRichard Henderson *
10208be545baSRichard Henderson * Called before reading the dirty memory bitmap for a
10218be545baSRichard Henderson * #MemoryRegionSection.
10228be545baSRichard Henderson *
10238be545baSRichard Henderson * @listener: The #MemoryListener.
10248be545baSRichard Henderson * @section: The #MemoryRegionSection.
10258be545baSRichard Henderson */
10268be545baSRichard Henderson void (*log_clear)(MemoryListener *listener, MemoryRegionSection *section);
10278be545baSRichard Henderson
10288be545baSRichard Henderson /**
10298be545baSRichard Henderson * @log_global_start:
10308be545baSRichard Henderson *
10318be545baSRichard Henderson * Called by memory_global_dirty_log_start(), which
10328be545baSRichard Henderson * enables the %DIRTY_LOG_MIGRATION client on all memory regions in
10338be545baSRichard Henderson * the address space. #MemoryListener.log_global_start() is also
10348be545baSRichard Henderson * called when a #MemoryListener is added, if global dirty logging is
10358be545baSRichard Henderson * active at that time.
10368be545baSRichard Henderson *
10378be545baSRichard Henderson * @listener: The #MemoryListener.
10388be545baSRichard Henderson * @errp: pointer to Error*, to store an error if it happens.
10398be545baSRichard Henderson *
10408be545baSRichard Henderson * Return: true on success, else false setting @errp with error.
10418be545baSRichard Henderson */
10428be545baSRichard Henderson bool (*log_global_start)(MemoryListener *listener, Error **errp);
10438be545baSRichard Henderson
10448be545baSRichard Henderson /**
10458be545baSRichard Henderson * @log_global_stop:
10468be545baSRichard Henderson *
10478be545baSRichard Henderson * Called by memory_global_dirty_log_stop(), which
10488be545baSRichard Henderson * disables the %DIRTY_LOG_MIGRATION client on all memory regions in
10498be545baSRichard Henderson * the address space.
10508be545baSRichard Henderson *
10518be545baSRichard Henderson * @listener: The #MemoryListener.
10528be545baSRichard Henderson */
10538be545baSRichard Henderson void (*log_global_stop)(MemoryListener *listener);
10548be545baSRichard Henderson
10558be545baSRichard Henderson /**
10568be545baSRichard Henderson * @log_global_after_sync:
10578be545baSRichard Henderson *
10588be545baSRichard Henderson * Called after reading the dirty memory bitmap
10598be545baSRichard Henderson * for any #MemoryRegionSection.
10608be545baSRichard Henderson *
10618be545baSRichard Henderson * @listener: The #MemoryListener.
10628be545baSRichard Henderson */
10638be545baSRichard Henderson void (*log_global_after_sync)(MemoryListener *listener);
10648be545baSRichard Henderson
10658be545baSRichard Henderson /**
10668be545baSRichard Henderson * @eventfd_add:
10678be545baSRichard Henderson *
10688be545baSRichard Henderson * Called during an address space update transaction,
10698be545baSRichard Henderson * for a section of the address space that has had a new ioeventfd
10708be545baSRichard Henderson * registration since the last transaction.
10718be545baSRichard Henderson *
10728be545baSRichard Henderson * @listener: The #MemoryListener.
10738be545baSRichard Henderson * @section: The new #MemoryRegionSection.
10748be545baSRichard Henderson * @match_data: The @match_data parameter for the new ioeventfd.
10758be545baSRichard Henderson * @data: The @data parameter for the new ioeventfd.
10768be545baSRichard Henderson * @e: The #EventNotifier parameter for the new ioeventfd.
10778be545baSRichard Henderson */
10788be545baSRichard Henderson void (*eventfd_add)(MemoryListener *listener, MemoryRegionSection *section,
10798be545baSRichard Henderson bool match_data, uint64_t data, EventNotifier *e);
10808be545baSRichard Henderson
10818be545baSRichard Henderson /**
10828be545baSRichard Henderson * @eventfd_del:
10838be545baSRichard Henderson *
10848be545baSRichard Henderson * Called during an address space update transaction,
10858be545baSRichard Henderson * for a section of the address space that has dropped an ioeventfd
10868be545baSRichard Henderson * registration since the last transaction.
10878be545baSRichard Henderson *
10888be545baSRichard Henderson * @listener: The #MemoryListener.
10898be545baSRichard Henderson * @section: The new #MemoryRegionSection.
10908be545baSRichard Henderson * @match_data: The @match_data parameter for the dropped ioeventfd.
10918be545baSRichard Henderson * @data: The @data parameter for the dropped ioeventfd.
10928be545baSRichard Henderson * @e: The #EventNotifier parameter for the dropped ioeventfd.
10938be545baSRichard Henderson */
10948be545baSRichard Henderson void (*eventfd_del)(MemoryListener *listener, MemoryRegionSection *section,
10958be545baSRichard Henderson bool match_data, uint64_t data, EventNotifier *e);
10968be545baSRichard Henderson
10978be545baSRichard Henderson /**
10988be545baSRichard Henderson * @coalesced_io_add:
10998be545baSRichard Henderson *
11008be545baSRichard Henderson * Called during an address space update transaction,
11018be545baSRichard Henderson * for a section of the address space that has had a new coalesced
11028be545baSRichard Henderson * MMIO range registration since the last transaction.
11038be545baSRichard Henderson *
11048be545baSRichard Henderson * @listener: The #MemoryListener.
11058be545baSRichard Henderson * @section: The new #MemoryRegionSection.
11068be545baSRichard Henderson * @addr: The starting address for the coalesced MMIO range.
11078be545baSRichard Henderson * @len: The length of the coalesced MMIO range.
11088be545baSRichard Henderson */
11098be545baSRichard Henderson void (*coalesced_io_add)(MemoryListener *listener, MemoryRegionSection *section,
11108be545baSRichard Henderson hwaddr addr, hwaddr len);
11118be545baSRichard Henderson
11128be545baSRichard Henderson /**
11138be545baSRichard Henderson * @coalesced_io_del:
11148be545baSRichard Henderson *
11158be545baSRichard Henderson * Called during an address space update transaction,
11168be545baSRichard Henderson * for a section of the address space that has dropped a coalesced
11178be545baSRichard Henderson * MMIO range since the last transaction.
11188be545baSRichard Henderson *
11198be545baSRichard Henderson * @listener: The #MemoryListener.
11208be545baSRichard Henderson * @section: The new #MemoryRegionSection.
11218be545baSRichard Henderson * @addr: The starting address for the coalesced MMIO range.
11228be545baSRichard Henderson * @len: The length of the coalesced MMIO range.
11238be545baSRichard Henderson */
11248be545baSRichard Henderson void (*coalesced_io_del)(MemoryListener *listener, MemoryRegionSection *section,
11258be545baSRichard Henderson hwaddr addr, hwaddr len);
11268be545baSRichard Henderson /**
11278be545baSRichard Henderson * @priority:
11288be545baSRichard Henderson *
11298be545baSRichard Henderson * Govern the order in which memory listeners are invoked. Lower priorities
11308be545baSRichard Henderson * are invoked earlier for "add" or "start" callbacks, and later for "delete"
11318be545baSRichard Henderson * or "stop" callbacks.
11328be545baSRichard Henderson */
11338be545baSRichard Henderson unsigned priority;
11348be545baSRichard Henderson
11358be545baSRichard Henderson /**
11368be545baSRichard Henderson * @name:
11378be545baSRichard Henderson *
11388be545baSRichard Henderson * Name of the listener. It can be used in contexts where we'd like to
11398be545baSRichard Henderson * identify one memory listener with the rest.
11408be545baSRichard Henderson */
11418be545baSRichard Henderson const char *name;
11428be545baSRichard Henderson
11438be545baSRichard Henderson /* private: */
11448be545baSRichard Henderson AddressSpace *address_space;
11458be545baSRichard Henderson QTAILQ_ENTRY(MemoryListener) link;
11468be545baSRichard Henderson QTAILQ_ENTRY(MemoryListener) link_as;
11478be545baSRichard Henderson };
11488be545baSRichard Henderson
11498be545baSRichard Henderson typedef struct AddressSpaceMapClient {
11508be545baSRichard Henderson QEMUBH *bh;
11518be545baSRichard Henderson QLIST_ENTRY(AddressSpaceMapClient) link;
11528be545baSRichard Henderson } AddressSpaceMapClient;
11538be545baSRichard Henderson
11548be545baSRichard Henderson #define DEFAULT_MAX_BOUNCE_BUFFER_SIZE (4096)
11558be545baSRichard Henderson
11568be545baSRichard Henderson /**
11578be545baSRichard Henderson * struct AddressSpace: describes a mapping of addresses to #MemoryRegion objects
11588be545baSRichard Henderson */
11598be545baSRichard Henderson struct AddressSpace {
11608be545baSRichard Henderson /* private: */
11618be545baSRichard Henderson struct rcu_head rcu;
11628be545baSRichard Henderson char *name;
11638be545baSRichard Henderson MemoryRegion *root;
11648be545baSRichard Henderson
11658be545baSRichard Henderson /* Accessed via RCU. */
11668be545baSRichard Henderson struct FlatView *current_map;
11678be545baSRichard Henderson
11688be545baSRichard Henderson int ioeventfd_nb;
11698be545baSRichard Henderson int ioeventfd_notifiers;
11708be545baSRichard Henderson struct MemoryRegionIoeventfd *ioeventfds;
11718be545baSRichard Henderson QTAILQ_HEAD(, MemoryListener) listeners;
11728be545baSRichard Henderson QTAILQ_ENTRY(AddressSpace) address_spaces_link;
11738be545baSRichard Henderson
11748be545baSRichard Henderson /*
11758be545baSRichard Henderson * Maximum DMA bounce buffer size used for indirect memory map requests.
11768be545baSRichard Henderson * This limits the total size of bounce buffer allocations made for
11778be545baSRichard Henderson * DMA requests to indirect memory regions within this AddressSpace. DMA
11788be545baSRichard Henderson * requests that exceed the limit (e.g. due to overly large requested size
11798be545baSRichard Henderson * or concurrent DMA requests having claimed too much buffer space) will be
11808be545baSRichard Henderson * rejected and left to the caller to handle.
11818be545baSRichard Henderson */
11828be545baSRichard Henderson size_t max_bounce_buffer_size;
11838be545baSRichard Henderson /* Total size of bounce buffers currently allocated, atomically accessed */
11848be545baSRichard Henderson size_t bounce_buffer_size;
11858be545baSRichard Henderson /* List of callbacks to invoke when buffers free up */
11868be545baSRichard Henderson QemuMutex map_client_list_lock;
11878be545baSRichard Henderson QLIST_HEAD(, AddressSpaceMapClient) map_client_list;
11888be545baSRichard Henderson };
11898be545baSRichard Henderson
11908be545baSRichard Henderson typedef struct AddressSpaceDispatch AddressSpaceDispatch;
11918be545baSRichard Henderson typedef struct FlatRange FlatRange;
11928be545baSRichard Henderson
11938be545baSRichard Henderson /* Flattened global view of current active memory hierarchy. Kept in sorted
11948be545baSRichard Henderson * order.
11958be545baSRichard Henderson */
11968be545baSRichard Henderson struct FlatView {
11978be545baSRichard Henderson struct rcu_head rcu;
11988be545baSRichard Henderson unsigned ref;
11998be545baSRichard Henderson FlatRange *ranges;
12008be545baSRichard Henderson unsigned nr;
12018be545baSRichard Henderson unsigned nr_allocated;
12028be545baSRichard Henderson struct AddressSpaceDispatch *dispatch;
12038be545baSRichard Henderson MemoryRegion *root;
12048be545baSRichard Henderson };
12058be545baSRichard Henderson
address_space_to_flatview(AddressSpace * as)12068be545baSRichard Henderson static inline FlatView *address_space_to_flatview(AddressSpace *as)
12078be545baSRichard Henderson {
12088be545baSRichard Henderson return qatomic_rcu_read(&as->current_map);
12098be545baSRichard Henderson }
12108be545baSRichard Henderson
12118be545baSRichard Henderson /**
12128be545baSRichard Henderson * typedef flatview_cb: callback for flatview_for_each_range()
12138be545baSRichard Henderson *
12148be545baSRichard Henderson * @start: start address of the range within the FlatView
12158be545baSRichard Henderson * @len: length of the range in bytes
12168be545baSRichard Henderson * @mr: MemoryRegion covering this range
12178be545baSRichard Henderson * @offset_in_region: offset of the first byte of the range within @mr
12188be545baSRichard Henderson * @opaque: data pointer passed to flatview_for_each_range()
12198be545baSRichard Henderson *
12208be545baSRichard Henderson * Returns: true to stop the iteration, false to keep going.
12218be545baSRichard Henderson */
12228be545baSRichard Henderson typedef bool (*flatview_cb)(Int128 start,
12238be545baSRichard Henderson Int128 len,
12248be545baSRichard Henderson const MemoryRegion *mr,
12258be545baSRichard Henderson hwaddr offset_in_region,
12268be545baSRichard Henderson void *opaque);
12278be545baSRichard Henderson
12288be545baSRichard Henderson /**
12298be545baSRichard Henderson * flatview_for_each_range: Iterate through a FlatView
12308be545baSRichard Henderson * @fv: the FlatView to iterate through
12318be545baSRichard Henderson * @cb: function to call for each range
12328be545baSRichard Henderson * @opaque: opaque data pointer to pass to @cb
12338be545baSRichard Henderson *
12348be545baSRichard Henderson * A FlatView is made up of a list of non-overlapping ranges, each of
12358be545baSRichard Henderson * which is a slice of a MemoryRegion. This function iterates through
12368be545baSRichard Henderson * each range in @fv, calling @cb. The callback function can terminate
12378be545baSRichard Henderson * iteration early by returning 'true'.
12388be545baSRichard Henderson */
12398be545baSRichard Henderson void flatview_for_each_range(FlatView *fv, flatview_cb cb, void *opaque);
12408be545baSRichard Henderson
MemoryRegionSection_eq(MemoryRegionSection * a,MemoryRegionSection * b)12418be545baSRichard Henderson static inline bool MemoryRegionSection_eq(MemoryRegionSection *a,
12428be545baSRichard Henderson MemoryRegionSection *b)
12438be545baSRichard Henderson {
12448be545baSRichard Henderson return a->mr == b->mr &&
12458be545baSRichard Henderson a->fv == b->fv &&
12468be545baSRichard Henderson a->offset_within_region == b->offset_within_region &&
12478be545baSRichard Henderson a->offset_within_address_space == b->offset_within_address_space &&
12488be545baSRichard Henderson int128_eq(a->size, b->size) &&
12498be545baSRichard Henderson a->readonly == b->readonly &&
12508be545baSRichard Henderson a->nonvolatile == b->nonvolatile;
12518be545baSRichard Henderson }
12528be545baSRichard Henderson
12538be545baSRichard Henderson /**
12548be545baSRichard Henderson * memory_region_section_new_copy: Copy a memory region section
12558be545baSRichard Henderson *
12568be545baSRichard Henderson * Allocate memory for a new copy, copy the memory region section, and
12578be545baSRichard Henderson * properly take a reference on all relevant members.
12588be545baSRichard Henderson *
12598be545baSRichard Henderson * @s: the #MemoryRegionSection to copy
12608be545baSRichard Henderson */
12618be545baSRichard Henderson MemoryRegionSection *memory_region_section_new_copy(MemoryRegionSection *s);
12628be545baSRichard Henderson
12638be545baSRichard Henderson /**
12648be545baSRichard Henderson * memory_region_section_free_copy: Free a copied memory region section
12658be545baSRichard Henderson *
12668be545baSRichard Henderson * Free a copy of a memory section created via memory_region_section_new_copy().
12678be545baSRichard Henderson * properly dropping references on all relevant members.
12688be545baSRichard Henderson *
12698be545baSRichard Henderson * @s: the #MemoryRegionSection to copy
12708be545baSRichard Henderson */
12718be545baSRichard Henderson void memory_region_section_free_copy(MemoryRegionSection *s);
12728be545baSRichard Henderson
12738be545baSRichard Henderson /**
1274f47a672aSChenyi Qiang * memory_region_section_intersect_range: Adjust the memory section to cover
1275f47a672aSChenyi Qiang * the intersection with the given range.
1276f47a672aSChenyi Qiang *
1277f47a672aSChenyi Qiang * @s: the #MemoryRegionSection to be adjusted
1278f47a672aSChenyi Qiang * @offset: the offset of the given range in the memory region
1279f47a672aSChenyi Qiang * @size: the size of the given range
1280f47a672aSChenyi Qiang *
1281f47a672aSChenyi Qiang * Returns false if the intersection is empty, otherwise returns true.
1282f47a672aSChenyi Qiang */
memory_region_section_intersect_range(MemoryRegionSection * s,uint64_t offset,uint64_t size)1283f47a672aSChenyi Qiang static inline bool memory_region_section_intersect_range(MemoryRegionSection *s,
1284f47a672aSChenyi Qiang uint64_t offset,
1285f47a672aSChenyi Qiang uint64_t size)
1286f47a672aSChenyi Qiang {
1287f47a672aSChenyi Qiang uint64_t start = MAX(s->offset_within_region, offset);
1288f47a672aSChenyi Qiang Int128 end = int128_min(int128_add(int128_make64(s->offset_within_region),
1289f47a672aSChenyi Qiang s->size),
1290f47a672aSChenyi Qiang int128_add(int128_make64(offset),
1291f47a672aSChenyi Qiang int128_make64(size)));
1292f47a672aSChenyi Qiang
1293f47a672aSChenyi Qiang if (int128_le(end, int128_make64(start))) {
1294f47a672aSChenyi Qiang return false;
1295f47a672aSChenyi Qiang }
1296f47a672aSChenyi Qiang
1297f47a672aSChenyi Qiang s->offset_within_address_space += start - s->offset_within_region;
1298f47a672aSChenyi Qiang s->offset_within_region = start;
1299f47a672aSChenyi Qiang s->size = int128_sub(end, int128_make64(start));
1300f47a672aSChenyi Qiang return true;
1301f47a672aSChenyi Qiang }
1302f47a672aSChenyi Qiang
1303f47a672aSChenyi Qiang /**
13048be545baSRichard Henderson * memory_region_init: Initialize a memory region
13058be545baSRichard Henderson *
13068be545baSRichard Henderson * The region typically acts as a container for other memory regions. Use
13078be545baSRichard Henderson * memory_region_add_subregion() to add subregions.
13088be545baSRichard Henderson *
13098be545baSRichard Henderson * @mr: the #MemoryRegion to be initialized
13108be545baSRichard Henderson * @owner: the object that tracks the region's reference count
13118be545baSRichard Henderson * @name: used for debugging; not visible to the user or ABI
13128be545baSRichard Henderson * @size: size of the region; any subregions beyond this size will be clipped
13138be545baSRichard Henderson */
13148be545baSRichard Henderson void memory_region_init(MemoryRegion *mr,
13158be545baSRichard Henderson Object *owner,
13168be545baSRichard Henderson const char *name,
13178be545baSRichard Henderson uint64_t size);
13188be545baSRichard Henderson
13198be545baSRichard Henderson /**
13208be545baSRichard Henderson * memory_region_ref: Add 1 to a memory region's reference count
13218be545baSRichard Henderson *
13228be545baSRichard Henderson * Whenever memory regions are accessed outside the BQL, they need to be
13238be545baSRichard Henderson * preserved against hot-unplug. MemoryRegions actually do not have their
13248be545baSRichard Henderson * own reference count; they piggyback on a QOM object, their "owner".
13258be545baSRichard Henderson * This function adds a reference to the owner.
13268be545baSRichard Henderson *
13278be545baSRichard Henderson * All MemoryRegions must have an owner if they can disappear, even if the
13288be545baSRichard Henderson * device they belong to operates exclusively under the BQL. This is because
13298be545baSRichard Henderson * the region could be returned at any time by memory_region_find, and this
13308be545baSRichard Henderson * is usually under guest control.
13318be545baSRichard Henderson *
13328be545baSRichard Henderson * @mr: the #MemoryRegion
13338be545baSRichard Henderson */
13348be545baSRichard Henderson void memory_region_ref(MemoryRegion *mr);
13358be545baSRichard Henderson
13368be545baSRichard Henderson /**
13378be545baSRichard Henderson * memory_region_unref: Remove 1 to a memory region's reference count
13388be545baSRichard Henderson *
13398be545baSRichard Henderson * Whenever memory regions are accessed outside the BQL, they need to be
13408be545baSRichard Henderson * preserved against hot-unplug. MemoryRegions actually do not have their
13418be545baSRichard Henderson * own reference count; they piggyback on a QOM object, their "owner".
13428be545baSRichard Henderson * This function removes a reference to the owner and possibly destroys it.
13438be545baSRichard Henderson *
13448be545baSRichard Henderson * @mr: the #MemoryRegion
13458be545baSRichard Henderson */
13468be545baSRichard Henderson void memory_region_unref(MemoryRegion *mr);
13478be545baSRichard Henderson
13488be545baSRichard Henderson /**
13498be545baSRichard Henderson * memory_region_init_io: Initialize an I/O memory region.
13508be545baSRichard Henderson *
13518be545baSRichard Henderson * Accesses into the region will cause the callbacks in @ops to be called.
13528be545baSRichard Henderson * if @size is nonzero, subregions will be clipped to @size.
13538be545baSRichard Henderson *
13548be545baSRichard Henderson * @mr: the #MemoryRegion to be initialized.
13558be545baSRichard Henderson * @owner: the object that tracks the region's reference count
13568be545baSRichard Henderson * @ops: a structure containing read and write callbacks to be used when
13578be545baSRichard Henderson * I/O is performed on the region.
13588be545baSRichard Henderson * @opaque: passed to the read and write callbacks of the @ops structure.
13598be545baSRichard Henderson * @name: used for debugging; not visible to the user or ABI
13608be545baSRichard Henderson * @size: size of the region.
13618be545baSRichard Henderson */
13628be545baSRichard Henderson void memory_region_init_io(MemoryRegion *mr,
13638be545baSRichard Henderson Object *owner,
13648be545baSRichard Henderson const MemoryRegionOps *ops,
13658be545baSRichard Henderson void *opaque,
13668be545baSRichard Henderson const char *name,
13678be545baSRichard Henderson uint64_t size);
13688be545baSRichard Henderson
13698be545baSRichard Henderson /**
13708be545baSRichard Henderson * memory_region_init_ram_nomigrate: Initialize RAM memory region. Accesses
13718be545baSRichard Henderson * into the region will modify memory
13728be545baSRichard Henderson * directly.
13738be545baSRichard Henderson *
13748be545baSRichard Henderson * @mr: the #MemoryRegion to be initialized.
13758be545baSRichard Henderson * @owner: the object that tracks the region's reference count
13768be545baSRichard Henderson * @name: Region name, becomes part of RAMBlock name used in migration stream
13778be545baSRichard Henderson * must be unique within any device
13788be545baSRichard Henderson * @size: size of the region.
13798be545baSRichard Henderson * @errp: pointer to Error*, to store an error if it happens.
13808be545baSRichard Henderson *
13818be545baSRichard Henderson * Note that this function does not do anything to cause the data in the
13828be545baSRichard Henderson * RAM memory region to be migrated; that is the responsibility of the caller.
13838be545baSRichard Henderson *
13848be545baSRichard Henderson * Return: true on success, else false setting @errp with error.
13858be545baSRichard Henderson */
13868be545baSRichard Henderson bool memory_region_init_ram_nomigrate(MemoryRegion *mr,
13878be545baSRichard Henderson Object *owner,
13888be545baSRichard Henderson const char *name,
13898be545baSRichard Henderson uint64_t size,
13908be545baSRichard Henderson Error **errp);
13918be545baSRichard Henderson
13928be545baSRichard Henderson /**
13938be545baSRichard Henderson * memory_region_init_ram_flags_nomigrate: Initialize RAM memory region.
13948be545baSRichard Henderson * Accesses into the region will
13958be545baSRichard Henderson * modify memory directly.
13968be545baSRichard Henderson *
13978be545baSRichard Henderson * @mr: the #MemoryRegion to be initialized.
13988be545baSRichard Henderson * @owner: the object that tracks the region's reference count
13998be545baSRichard Henderson * @name: Region name, becomes part of RAMBlock name used in migration stream
14008be545baSRichard Henderson * must be unique within any device
14018be545baSRichard Henderson * @size: size of the region.
14028be545baSRichard Henderson * @ram_flags: RamBlock flags. Supported flags: RAM_SHARED, RAM_NORESERVE,
14038be545baSRichard Henderson * RAM_GUEST_MEMFD.
14048be545baSRichard Henderson * @errp: pointer to Error*, to store an error if it happens.
14058be545baSRichard Henderson *
14068be545baSRichard Henderson * Note that this function does not do anything to cause the data in the
14078be545baSRichard Henderson * RAM memory region to be migrated; that is the responsibility of the caller.
14088be545baSRichard Henderson *
14098be545baSRichard Henderson * Return: true on success, else false setting @errp with error.
14108be545baSRichard Henderson */
14118be545baSRichard Henderson bool memory_region_init_ram_flags_nomigrate(MemoryRegion *mr,
14128be545baSRichard Henderson Object *owner,
14138be545baSRichard Henderson const char *name,
14148be545baSRichard Henderson uint64_t size,
14158be545baSRichard Henderson uint32_t ram_flags,
14168be545baSRichard Henderson Error **errp);
14178be545baSRichard Henderson
14188be545baSRichard Henderson /**
14198be545baSRichard Henderson * memory_region_init_resizeable_ram: Initialize memory region with resizable
14208be545baSRichard Henderson * RAM. Accesses into the region will
14218be545baSRichard Henderson * modify memory directly. Only an initial
14228be545baSRichard Henderson * portion of this RAM is actually used.
14238be545baSRichard Henderson * Changing the size while migrating
14248be545baSRichard Henderson * can result in the migration being
14258be545baSRichard Henderson * canceled.
14268be545baSRichard Henderson *
14278be545baSRichard Henderson * @mr: the #MemoryRegion to be initialized.
14288be545baSRichard Henderson * @owner: the object that tracks the region's reference count
14298be545baSRichard Henderson * @name: Region name, becomes part of RAMBlock name used in migration stream
14308be545baSRichard Henderson * must be unique within any device
14318be545baSRichard Henderson * @size: used size of the region.
14328be545baSRichard Henderson * @max_size: max size of the region.
14338be545baSRichard Henderson * @resized: callback to notify owner about used size change.
14348be545baSRichard Henderson * @errp: pointer to Error*, to store an error if it happens.
14358be545baSRichard Henderson *
14368be545baSRichard Henderson * Note that this function does not do anything to cause the data in the
14378be545baSRichard Henderson * RAM memory region to be migrated; that is the responsibility of the caller.
14388be545baSRichard Henderson *
14398be545baSRichard Henderson * Return: true on success, else false setting @errp with error.
14408be545baSRichard Henderson */
14418be545baSRichard Henderson bool memory_region_init_resizeable_ram(MemoryRegion *mr,
14428be545baSRichard Henderson Object *owner,
14438be545baSRichard Henderson const char *name,
14448be545baSRichard Henderson uint64_t size,
14458be545baSRichard Henderson uint64_t max_size,
14468be545baSRichard Henderson void (*resized)(const char*,
14478be545baSRichard Henderson uint64_t length,
14488be545baSRichard Henderson void *host),
14498be545baSRichard Henderson Error **errp);
14508be545baSRichard Henderson #ifdef CONFIG_POSIX
14518be545baSRichard Henderson
14528be545baSRichard Henderson /**
14538be545baSRichard Henderson * memory_region_init_ram_from_file: Initialize RAM memory region with a
14548be545baSRichard Henderson * mmap-ed backend.
14558be545baSRichard Henderson *
14568be545baSRichard Henderson * @mr: the #MemoryRegion to be initialized.
14578be545baSRichard Henderson * @owner: the object that tracks the region's reference count
14588be545baSRichard Henderson * @name: Region name, becomes part of RAMBlock name used in migration stream
14598be545baSRichard Henderson * must be unique within any device
14608be545baSRichard Henderson * @size: size of the region.
14618be545baSRichard Henderson * @align: alignment of the region base address; if 0, the default alignment
14628be545baSRichard Henderson * (getpagesize()) will be used.
14638be545baSRichard Henderson * @ram_flags: RamBlock flags. Supported flags: RAM_SHARED, RAM_PMEM,
14648be545baSRichard Henderson * RAM_NORESERVE, RAM_PROTECTED, RAM_NAMED_FILE, RAM_READONLY,
14658be545baSRichard Henderson * RAM_READONLY_FD, RAM_GUEST_MEMFD
14668be545baSRichard Henderson * @path: the path in which to allocate the RAM.
14678be545baSRichard Henderson * @offset: offset within the file referenced by path
14688be545baSRichard Henderson * @errp: pointer to Error*, to store an error if it happens.
14698be545baSRichard Henderson *
14708be545baSRichard Henderson * Note that this function does not do anything to cause the data in the
14718be545baSRichard Henderson * RAM memory region to be migrated; that is the responsibility of the caller.
14728be545baSRichard Henderson *
14738be545baSRichard Henderson * Return: true on success, else false setting @errp with error.
14748be545baSRichard Henderson */
14758be545baSRichard Henderson bool memory_region_init_ram_from_file(MemoryRegion *mr,
14768be545baSRichard Henderson Object *owner,
14778be545baSRichard Henderson const char *name,
14788be545baSRichard Henderson uint64_t size,
14798be545baSRichard Henderson uint64_t align,
14808be545baSRichard Henderson uint32_t ram_flags,
14818be545baSRichard Henderson const char *path,
14828be545baSRichard Henderson ram_addr_t offset,
14838be545baSRichard Henderson Error **errp);
14848be545baSRichard Henderson
14858be545baSRichard Henderson /**
14868be545baSRichard Henderson * memory_region_init_ram_from_fd: Initialize RAM memory region with a
14878be545baSRichard Henderson * mmap-ed backend.
14888be545baSRichard Henderson *
14898be545baSRichard Henderson * @mr: the #MemoryRegion to be initialized.
14908be545baSRichard Henderson * @owner: the object that tracks the region's reference count
14918be545baSRichard Henderson * @name: the name of the region.
14928be545baSRichard Henderson * @size: size of the region.
14938be545baSRichard Henderson * @ram_flags: RamBlock flags. Supported flags: RAM_SHARED, RAM_PMEM,
14948be545baSRichard Henderson * RAM_NORESERVE, RAM_PROTECTED, RAM_NAMED_FILE, RAM_READONLY,
14958be545baSRichard Henderson * RAM_READONLY_FD, RAM_GUEST_MEMFD
14968be545baSRichard Henderson * @fd: the fd to mmap.
14978be545baSRichard Henderson * @offset: offset within the file referenced by fd
14988be545baSRichard Henderson * @errp: pointer to Error*, to store an error if it happens.
14998be545baSRichard Henderson *
15008be545baSRichard Henderson * Note that this function does not do anything to cause the data in the
15018be545baSRichard Henderson * RAM memory region to be migrated; that is the responsibility of the caller.
15028be545baSRichard Henderson *
15038be545baSRichard Henderson * Return: true on success, else false setting @errp with error.
15048be545baSRichard Henderson */
15058be545baSRichard Henderson bool memory_region_init_ram_from_fd(MemoryRegion *mr,
15068be545baSRichard Henderson Object *owner,
15078be545baSRichard Henderson const char *name,
15088be545baSRichard Henderson uint64_t size,
15098be545baSRichard Henderson uint32_t ram_flags,
15108be545baSRichard Henderson int fd,
15118be545baSRichard Henderson ram_addr_t offset,
15128be545baSRichard Henderson Error **errp);
15138be545baSRichard Henderson #endif
15148be545baSRichard Henderson
15158be545baSRichard Henderson /**
15168be545baSRichard Henderson * memory_region_init_ram_ptr: Initialize RAM memory region from a
15178be545baSRichard Henderson * user-provided pointer. Accesses into the
15188be545baSRichard Henderson * region will modify memory directly.
15198be545baSRichard Henderson *
15208be545baSRichard Henderson * @mr: the #MemoryRegion to be initialized.
15218be545baSRichard Henderson * @owner: the object that tracks the region's reference count
15228be545baSRichard Henderson * @name: Region name, becomes part of RAMBlock name used in migration stream
15238be545baSRichard Henderson * must be unique within any device
15248be545baSRichard Henderson * @size: size of the region.
15258be545baSRichard Henderson * @ptr: memory to be mapped; must contain at least @size bytes.
15268be545baSRichard Henderson *
15278be545baSRichard Henderson * Note that this function does not do anything to cause the data in the
15288be545baSRichard Henderson * RAM memory region to be migrated; that is the responsibility of the caller.
15298be545baSRichard Henderson */
15308be545baSRichard Henderson void memory_region_init_ram_ptr(MemoryRegion *mr,
15318be545baSRichard Henderson Object *owner,
15328be545baSRichard Henderson const char *name,
15338be545baSRichard Henderson uint64_t size,
15348be545baSRichard Henderson void *ptr);
15358be545baSRichard Henderson
15368be545baSRichard Henderson /**
15378be545baSRichard Henderson * memory_region_init_ram_device_ptr: Initialize RAM device memory region from
15388be545baSRichard Henderson * a user-provided pointer.
15398be545baSRichard Henderson *
15408be545baSRichard Henderson * A RAM device represents a mapping to a physical device, such as to a PCI
15418be545baSRichard Henderson * MMIO BAR of an vfio-pci assigned device. The memory region may be mapped
15428be545baSRichard Henderson * into the VM address space and access to the region will modify memory
15438be545baSRichard Henderson * directly. However, the memory region should not be included in a memory
15448be545baSRichard Henderson * dump (device may not be enabled/mapped at the time of the dump), and
15458be545baSRichard Henderson * operations incompatible with manipulating MMIO should be avoided. Replaces
15468be545baSRichard Henderson * skip_dump flag.
15478be545baSRichard Henderson *
15488be545baSRichard Henderson * @mr: the #MemoryRegion to be initialized.
15498be545baSRichard Henderson * @owner: the object that tracks the region's reference count
15508be545baSRichard Henderson * @name: the name of the region.
15518be545baSRichard Henderson * @size: size of the region.
15528be545baSRichard Henderson * @ptr: memory to be mapped; must contain at least @size bytes.
15538be545baSRichard Henderson *
15548be545baSRichard Henderson * Note that this function does not do anything to cause the data in the
15558be545baSRichard Henderson * RAM memory region to be migrated; that is the responsibility of the caller.
15568be545baSRichard Henderson * (For RAM device memory regions, migrating the contents rarely makes sense.)
15578be545baSRichard Henderson */
15588be545baSRichard Henderson void memory_region_init_ram_device_ptr(MemoryRegion *mr,
15598be545baSRichard Henderson Object *owner,
15608be545baSRichard Henderson const char *name,
15618be545baSRichard Henderson uint64_t size,
15628be545baSRichard Henderson void *ptr);
15638be545baSRichard Henderson
15648be545baSRichard Henderson /**
15658be545baSRichard Henderson * memory_region_init_alias: Initialize a memory region that aliases all or a
15668be545baSRichard Henderson * part of another memory region.
15678be545baSRichard Henderson *
15688be545baSRichard Henderson * @mr: the #MemoryRegion to be initialized.
15698be545baSRichard Henderson * @owner: the object that tracks the region's reference count
15708be545baSRichard Henderson * @name: used for debugging; not visible to the user or ABI
15718be545baSRichard Henderson * @orig: the region to be referenced; @mr will be equivalent to
15728be545baSRichard Henderson * @orig between @offset and @offset + @size - 1.
15738be545baSRichard Henderson * @offset: start of the section in @orig to be referenced.
15748be545baSRichard Henderson * @size: size of the region.
15758be545baSRichard Henderson */
15768be545baSRichard Henderson void memory_region_init_alias(MemoryRegion *mr,
15778be545baSRichard Henderson Object *owner,
15788be545baSRichard Henderson const char *name,
15798be545baSRichard Henderson MemoryRegion *orig,
15808be545baSRichard Henderson hwaddr offset,
15818be545baSRichard Henderson uint64_t size);
15828be545baSRichard Henderson
15838be545baSRichard Henderson /**
15848be545baSRichard Henderson * memory_region_init_rom_nomigrate: Initialize a ROM memory region.
15858be545baSRichard Henderson *
15868be545baSRichard Henderson * This has the same effect as calling memory_region_init_ram_nomigrate()
15878be545baSRichard Henderson * and then marking the resulting region read-only with
15888be545baSRichard Henderson * memory_region_set_readonly().
15898be545baSRichard Henderson *
15908be545baSRichard Henderson * Note that this function does not do anything to cause the data in the
15918be545baSRichard Henderson * RAM side of the memory region to be migrated; that is the responsibility
15928be545baSRichard Henderson * of the caller.
15938be545baSRichard Henderson *
15948be545baSRichard Henderson * @mr: the #MemoryRegion to be initialized.
15958be545baSRichard Henderson * @owner: the object that tracks the region's reference count
15968be545baSRichard Henderson * @name: Region name, becomes part of RAMBlock name used in migration stream
15978be545baSRichard Henderson * must be unique within any device
15988be545baSRichard Henderson * @size: size of the region.
15998be545baSRichard Henderson * @errp: pointer to Error*, to store an error if it happens.
16008be545baSRichard Henderson *
16018be545baSRichard Henderson * Return: true on success, else false setting @errp with error.
16028be545baSRichard Henderson */
16038be545baSRichard Henderson bool memory_region_init_rom_nomigrate(MemoryRegion *mr,
16048be545baSRichard Henderson Object *owner,
16058be545baSRichard Henderson const char *name,
16068be545baSRichard Henderson uint64_t size,
16078be545baSRichard Henderson Error **errp);
16088be545baSRichard Henderson
16098be545baSRichard Henderson /**
16108be545baSRichard Henderson * memory_region_init_rom_device_nomigrate: Initialize a ROM memory region.
16118be545baSRichard Henderson * Writes are handled via callbacks.
16128be545baSRichard Henderson *
16138be545baSRichard Henderson * Note that this function does not do anything to cause the data in the
16148be545baSRichard Henderson * RAM side of the memory region to be migrated; that is the responsibility
16158be545baSRichard Henderson * of the caller.
16168be545baSRichard Henderson *
16178be545baSRichard Henderson * @mr: the #MemoryRegion to be initialized.
16188be545baSRichard Henderson * @owner: the object that tracks the region's reference count
16198be545baSRichard Henderson * @ops: callbacks for write access handling (must not be NULL).
16208be545baSRichard Henderson * @opaque: passed to the read and write callbacks of the @ops structure.
16218be545baSRichard Henderson * @name: Region name, becomes part of RAMBlock name used in migration stream
16228be545baSRichard Henderson * must be unique within any device
16238be545baSRichard Henderson * @size: size of the region.
16248be545baSRichard Henderson * @errp: pointer to Error*, to store an error if it happens.
16258be545baSRichard Henderson *
16268be545baSRichard Henderson * Return: true on success, else false setting @errp with error.
16278be545baSRichard Henderson */
16288be545baSRichard Henderson bool memory_region_init_rom_device_nomigrate(MemoryRegion *mr,
16298be545baSRichard Henderson Object *owner,
16308be545baSRichard Henderson const MemoryRegionOps *ops,
16318be545baSRichard Henderson void *opaque,
16328be545baSRichard Henderson const char *name,
16338be545baSRichard Henderson uint64_t size,
16348be545baSRichard Henderson Error **errp);
16358be545baSRichard Henderson
16368be545baSRichard Henderson /**
16378be545baSRichard Henderson * memory_region_init_iommu: Initialize a memory region of a custom type
16388be545baSRichard Henderson * that translates addresses
16398be545baSRichard Henderson *
16408be545baSRichard Henderson * An IOMMU region translates addresses and forwards accesses to a target
16418be545baSRichard Henderson * memory region.
16428be545baSRichard Henderson *
16438be545baSRichard Henderson * The IOMMU implementation must define a subclass of TYPE_IOMMU_MEMORY_REGION.
16448be545baSRichard Henderson * @_iommu_mr should be a pointer to enough memory for an instance of
16458be545baSRichard Henderson * that subclass, @instance_size is the size of that subclass, and
16468be545baSRichard Henderson * @mrtypename is its name. This function will initialize @_iommu_mr as an
16478be545baSRichard Henderson * instance of the subclass, and its methods will then be called to handle
16488be545baSRichard Henderson * accesses to the memory region. See the documentation of
16498be545baSRichard Henderson * #IOMMUMemoryRegionClass for further details.
16508be545baSRichard Henderson *
16518be545baSRichard Henderson * @_iommu_mr: the #IOMMUMemoryRegion to be initialized
16528be545baSRichard Henderson * @instance_size: the IOMMUMemoryRegion subclass instance size
16538be545baSRichard Henderson * @mrtypename: the type name of the #IOMMUMemoryRegion
16548be545baSRichard Henderson * @owner: the object that tracks the region's reference count
16558be545baSRichard Henderson * @name: used for debugging; not visible to the user or ABI
16568be545baSRichard Henderson * @size: size of the region.
16578be545baSRichard Henderson */
16588be545baSRichard Henderson void memory_region_init_iommu(void *_iommu_mr,
16598be545baSRichard Henderson size_t instance_size,
16608be545baSRichard Henderson const char *mrtypename,
16618be545baSRichard Henderson Object *owner,
16628be545baSRichard Henderson const char *name,
16638be545baSRichard Henderson uint64_t size);
16648be545baSRichard Henderson
16658be545baSRichard Henderson /**
16668be545baSRichard Henderson * memory_region_init_ram - Initialize RAM memory region. Accesses into the
16678be545baSRichard Henderson * region will modify memory directly.
16688be545baSRichard Henderson *
16698be545baSRichard Henderson * @mr: the #MemoryRegion to be initialized
16708be545baSRichard Henderson * @owner: the object that tracks the region's reference count (must be
16718be545baSRichard Henderson * TYPE_DEVICE or a subclass of TYPE_DEVICE, or NULL)
16728be545baSRichard Henderson * @name: name of the memory region
16738be545baSRichard Henderson * @size: size of the region in bytes
16748be545baSRichard Henderson * @errp: pointer to Error*, to store an error if it happens.
16758be545baSRichard Henderson *
16768be545baSRichard Henderson * This function allocates RAM for a board model or device, and
16778be545baSRichard Henderson * arranges for it to be migrated (by calling vmstate_register_ram()
16788be545baSRichard Henderson * if @owner is a DeviceState, or vmstate_register_ram_global() if
16798be545baSRichard Henderson * @owner is NULL).
16808be545baSRichard Henderson *
16818be545baSRichard Henderson * TODO: Currently we restrict @owner to being either NULL (for
16828be545baSRichard Henderson * global RAM regions with no owner) or devices, so that we can
16838be545baSRichard Henderson * give the RAM block a unique name for migration purposes.
16848be545baSRichard Henderson * We should lift this restriction and allow arbitrary Objects.
16858be545baSRichard Henderson * If you pass a non-NULL non-device @owner then we will assert.
16868be545baSRichard Henderson *
16878be545baSRichard Henderson * Return: true on success, else false setting @errp with error.
16888be545baSRichard Henderson */
16898be545baSRichard Henderson bool memory_region_init_ram(MemoryRegion *mr,
16908be545baSRichard Henderson Object *owner,
16918be545baSRichard Henderson const char *name,
16928be545baSRichard Henderson uint64_t size,
16938be545baSRichard Henderson Error **errp);
16948be545baSRichard Henderson
16958be545baSRichard Henderson bool memory_region_init_ram_guest_memfd(MemoryRegion *mr,
16968be545baSRichard Henderson Object *owner,
16978be545baSRichard Henderson const char *name,
16988be545baSRichard Henderson uint64_t size,
16998be545baSRichard Henderson Error **errp);
17008be545baSRichard Henderson
17018be545baSRichard Henderson /**
17028be545baSRichard Henderson * memory_region_init_rom: Initialize a ROM memory region.
17038be545baSRichard Henderson *
17048be545baSRichard Henderson * This has the same effect as calling memory_region_init_ram()
17058be545baSRichard Henderson * and then marking the resulting region read-only with
17068be545baSRichard Henderson * memory_region_set_readonly(). This includes arranging for the
17078be545baSRichard Henderson * contents to be migrated.
17088be545baSRichard Henderson *
17098be545baSRichard Henderson * TODO: Currently we restrict @owner to being either NULL (for
17108be545baSRichard Henderson * global RAM regions with no owner) or devices, so that we can
17118be545baSRichard Henderson * give the RAM block a unique name for migration purposes.
17128be545baSRichard Henderson * We should lift this restriction and allow arbitrary Objects.
17138be545baSRichard Henderson * If you pass a non-NULL non-device @owner then we will assert.
17148be545baSRichard Henderson *
17158be545baSRichard Henderson * @mr: the #MemoryRegion to be initialized.
17168be545baSRichard Henderson * @owner: the object that tracks the region's reference count
17178be545baSRichard Henderson * @name: Region name, becomes part of RAMBlock name used in migration stream
17188be545baSRichard Henderson * must be unique within any device
17198be545baSRichard Henderson * @size: size of the region.
17208be545baSRichard Henderson * @errp: pointer to Error*, to store an error if it happens.
17218be545baSRichard Henderson *
17228be545baSRichard Henderson * Return: true on success, else false setting @errp with error.
17238be545baSRichard Henderson */
17248be545baSRichard Henderson bool memory_region_init_rom(MemoryRegion *mr,
17258be545baSRichard Henderson Object *owner,
17268be545baSRichard Henderson const char *name,
17278be545baSRichard Henderson uint64_t size,
17288be545baSRichard Henderson Error **errp);
17298be545baSRichard Henderson
17308be545baSRichard Henderson /**
17318be545baSRichard Henderson * memory_region_init_rom_device: Initialize a ROM memory region.
17328be545baSRichard Henderson * Writes are handled via callbacks.
17338be545baSRichard Henderson *
17348be545baSRichard Henderson * This function initializes a memory region backed by RAM for reads
17358be545baSRichard Henderson * and callbacks for writes, and arranges for the RAM backing to
17368be545baSRichard Henderson * be migrated (by calling vmstate_register_ram()
17378be545baSRichard Henderson * if @owner is a DeviceState, or vmstate_register_ram_global() if
17388be545baSRichard Henderson * @owner is NULL).
17398be545baSRichard Henderson *
17408be545baSRichard Henderson * TODO: Currently we restrict @owner to being either NULL (for
17418be545baSRichard Henderson * global RAM regions with no owner) or devices, so that we can
17428be545baSRichard Henderson * give the RAM block a unique name for migration purposes.
17438be545baSRichard Henderson * We should lift this restriction and allow arbitrary Objects.
17448be545baSRichard Henderson * If you pass a non-NULL non-device @owner then we will assert.
17458be545baSRichard Henderson *
17468be545baSRichard Henderson * @mr: the #MemoryRegion to be initialized.
17478be545baSRichard Henderson * @owner: the object that tracks the region's reference count
17488be545baSRichard Henderson * @ops: callbacks for write access handling (must not be NULL).
17498be545baSRichard Henderson * @opaque: passed to the read and write callbacks of the @ops structure.
17508be545baSRichard Henderson * @name: Region name, becomes part of RAMBlock name used in migration stream
17518be545baSRichard Henderson * must be unique within any device
17528be545baSRichard Henderson * @size: size of the region.
17538be545baSRichard Henderson * @errp: pointer to Error*, to store an error if it happens.
17548be545baSRichard Henderson *
17558be545baSRichard Henderson * Return: true on success, else false setting @errp with error.
17568be545baSRichard Henderson */
17578be545baSRichard Henderson bool memory_region_init_rom_device(MemoryRegion *mr,
17588be545baSRichard Henderson Object *owner,
17598be545baSRichard Henderson const MemoryRegionOps *ops,
17608be545baSRichard Henderson void *opaque,
17618be545baSRichard Henderson const char *name,
17628be545baSRichard Henderson uint64_t size,
17638be545baSRichard Henderson Error **errp);
17648be545baSRichard Henderson
17658be545baSRichard Henderson
17668be545baSRichard Henderson /**
17678be545baSRichard Henderson * memory_region_owner: get a memory region's owner.
17688be545baSRichard Henderson *
17698be545baSRichard Henderson * @mr: the memory region being queried.
17708be545baSRichard Henderson */
17718be545baSRichard Henderson Object *memory_region_owner(MemoryRegion *mr);
17728be545baSRichard Henderson
17738be545baSRichard Henderson /**
17748be545baSRichard Henderson * memory_region_size: get a memory region's size.
17758be545baSRichard Henderson *
17768be545baSRichard Henderson * @mr: the memory region being queried.
17778be545baSRichard Henderson */
17788be545baSRichard Henderson uint64_t memory_region_size(MemoryRegion *mr);
17798be545baSRichard Henderson
17808be545baSRichard Henderson /**
17818be545baSRichard Henderson * memory_region_is_ram: check whether a memory region is random access
17828be545baSRichard Henderson *
17838be545baSRichard Henderson * Returns %true if a memory region is random access.
17848be545baSRichard Henderson *
17858be545baSRichard Henderson * @mr: the memory region being queried
17868be545baSRichard Henderson */
memory_region_is_ram(MemoryRegion * mr)17878be545baSRichard Henderson static inline bool memory_region_is_ram(MemoryRegion *mr)
17888be545baSRichard Henderson {
17898be545baSRichard Henderson return mr->ram;
17908be545baSRichard Henderson }
17918be545baSRichard Henderson
17928be545baSRichard Henderson /**
17938be545baSRichard Henderson * memory_region_is_ram_device: check whether a memory region is a ram device
17948be545baSRichard Henderson *
17958be545baSRichard Henderson * Returns %true if a memory region is a device backed ram region
17968be545baSRichard Henderson *
17978be545baSRichard Henderson * @mr: the memory region being queried
17988be545baSRichard Henderson */
17998be545baSRichard Henderson bool memory_region_is_ram_device(MemoryRegion *mr);
18008be545baSRichard Henderson
18018be545baSRichard Henderson /**
18028be545baSRichard Henderson * memory_region_is_romd: check whether a memory region is in ROMD mode
18038be545baSRichard Henderson *
18048be545baSRichard Henderson * Returns %true if a memory region is a ROM device and currently set to allow
18058be545baSRichard Henderson * direct reads.
18068be545baSRichard Henderson *
18078be545baSRichard Henderson * @mr: the memory region being queried
18088be545baSRichard Henderson */
memory_region_is_romd(MemoryRegion * mr)18098be545baSRichard Henderson static inline bool memory_region_is_romd(MemoryRegion *mr)
18108be545baSRichard Henderson {
18118be545baSRichard Henderson return mr->rom_device && mr->romd_mode;
18128be545baSRichard Henderson }
18138be545baSRichard Henderson
18148be545baSRichard Henderson /**
18158be545baSRichard Henderson * memory_region_is_protected: check whether a memory region is protected
18168be545baSRichard Henderson *
18178be545baSRichard Henderson * Returns %true if a memory region is protected RAM and cannot be accessed
18188be545baSRichard Henderson * via standard mechanisms, e.g. DMA.
18198be545baSRichard Henderson *
18208be545baSRichard Henderson * @mr: the memory region being queried
18218be545baSRichard Henderson */
18228be545baSRichard Henderson bool memory_region_is_protected(MemoryRegion *mr);
18238be545baSRichard Henderson
18248be545baSRichard Henderson /**
18258be545baSRichard Henderson * memory_region_has_guest_memfd: check whether a memory region has guest_memfd
18268be545baSRichard Henderson * associated
18278be545baSRichard Henderson *
18288be545baSRichard Henderson * Returns %true if a memory region's ram_block has valid guest_memfd assigned.
18298be545baSRichard Henderson *
18308be545baSRichard Henderson * @mr: the memory region being queried
18318be545baSRichard Henderson */
18328be545baSRichard Henderson bool memory_region_has_guest_memfd(MemoryRegion *mr);
18338be545baSRichard Henderson
18348be545baSRichard Henderson /**
18358be545baSRichard Henderson * memory_region_get_iommu: check whether a memory region is an iommu
18368be545baSRichard Henderson *
18378be545baSRichard Henderson * Returns pointer to IOMMUMemoryRegion if a memory region is an iommu,
18388be545baSRichard Henderson * otherwise NULL.
18398be545baSRichard Henderson *
18408be545baSRichard Henderson * @mr: the memory region being queried
18418be545baSRichard Henderson */
memory_region_get_iommu(MemoryRegion * mr)18428be545baSRichard Henderson static inline IOMMUMemoryRegion *memory_region_get_iommu(MemoryRegion *mr)
18438be545baSRichard Henderson {
18448be545baSRichard Henderson if (mr->alias) {
18458be545baSRichard Henderson return memory_region_get_iommu(mr->alias);
18468be545baSRichard Henderson }
18478be545baSRichard Henderson if (mr->is_iommu) {
18488be545baSRichard Henderson return (IOMMUMemoryRegion *) mr;
18498be545baSRichard Henderson }
18508be545baSRichard Henderson return NULL;
18518be545baSRichard Henderson }
18528be545baSRichard Henderson
18538be545baSRichard Henderson /**
18548be545baSRichard Henderson * memory_region_get_iommu_class_nocheck: returns iommu memory region class
18558be545baSRichard Henderson * if an iommu or NULL if not
18568be545baSRichard Henderson *
18578be545baSRichard Henderson * Returns pointer to IOMMUMemoryRegionClass if a memory region is an iommu,
18588be545baSRichard Henderson * otherwise NULL. This is fast path avoiding QOM checking, use with caution.
18598be545baSRichard Henderson *
18608be545baSRichard Henderson * @iommu_mr: the memory region being queried
18618be545baSRichard Henderson */
memory_region_get_iommu_class_nocheck(IOMMUMemoryRegion * iommu_mr)18628be545baSRichard Henderson static inline IOMMUMemoryRegionClass *memory_region_get_iommu_class_nocheck(
18638be545baSRichard Henderson IOMMUMemoryRegion *iommu_mr)
18648be545baSRichard Henderson {
18658be545baSRichard Henderson return (IOMMUMemoryRegionClass *) (((Object *)iommu_mr)->class);
18668be545baSRichard Henderson }
18678be545baSRichard Henderson
18688be545baSRichard Henderson #define memory_region_is_iommu(mr) (memory_region_get_iommu(mr) != NULL)
18698be545baSRichard Henderson
18708be545baSRichard Henderson /**
18718be545baSRichard Henderson * memory_region_iommu_get_min_page_size: get minimum supported page size
18728be545baSRichard Henderson * for an iommu
18738be545baSRichard Henderson *
18748be545baSRichard Henderson * Returns minimum supported page size for an iommu.
18758be545baSRichard Henderson *
18768be545baSRichard Henderson * @iommu_mr: the memory region being queried
18778be545baSRichard Henderson */
18788be545baSRichard Henderson uint64_t memory_region_iommu_get_min_page_size(IOMMUMemoryRegion *iommu_mr);
18798be545baSRichard Henderson
18808be545baSRichard Henderson /**
18818be545baSRichard Henderson * memory_region_notify_iommu: notify a change in an IOMMU translation entry.
18828be545baSRichard Henderson *
18838be545baSRichard Henderson * Note: for any IOMMU implementation, an in-place mapping change
18848be545baSRichard Henderson * should be notified with an UNMAP followed by a MAP.
18858be545baSRichard Henderson *
18868be545baSRichard Henderson * @iommu_mr: the memory region that was changed
18878be545baSRichard Henderson * @iommu_idx: the IOMMU index for the translation table which has changed
18888be545baSRichard Henderson * @event: TLB event with the new entry in the IOMMU translation table.
18898be545baSRichard Henderson * The entry replaces all old entries for the same virtual I/O address
18908be545baSRichard Henderson * range.
18918be545baSRichard Henderson */
18928be545baSRichard Henderson void memory_region_notify_iommu(IOMMUMemoryRegion *iommu_mr,
18938be545baSRichard Henderson int iommu_idx,
18948be545baSRichard Henderson const IOMMUTLBEvent event);
18958be545baSRichard Henderson
18968be545baSRichard Henderson /**
18978be545baSRichard Henderson * memory_region_notify_iommu_one: notify a change in an IOMMU translation
18988be545baSRichard Henderson * entry to a single notifier
18998be545baSRichard Henderson *
19008be545baSRichard Henderson * This works just like memory_region_notify_iommu(), but it only
19018be545baSRichard Henderson * notifies a specific notifier, not all of them.
19028be545baSRichard Henderson *
19038be545baSRichard Henderson * @notifier: the notifier to be notified
19048be545baSRichard Henderson * @event: TLB event with the new entry in the IOMMU translation table.
19058be545baSRichard Henderson * The entry replaces all old entries for the same virtual I/O address
19068be545baSRichard Henderson * range.
19078be545baSRichard Henderson */
19088be545baSRichard Henderson void memory_region_notify_iommu_one(IOMMUNotifier *notifier,
19098be545baSRichard Henderson const IOMMUTLBEvent *event);
19108be545baSRichard Henderson
19118be545baSRichard Henderson /**
19128be545baSRichard Henderson * memory_region_unmap_iommu_notifier_range: notify a unmap for an IOMMU
19138be545baSRichard Henderson * translation that covers the
19148be545baSRichard Henderson * range of a notifier
19158be545baSRichard Henderson *
19168be545baSRichard Henderson * @notifier: the notifier to be notified
19178be545baSRichard Henderson */
19188be545baSRichard Henderson void memory_region_unmap_iommu_notifier_range(IOMMUNotifier *notifier);
19198be545baSRichard Henderson
19208be545baSRichard Henderson
19218be545baSRichard Henderson /**
19228be545baSRichard Henderson * memory_region_register_iommu_notifier: register a notifier for changes to
19238be545baSRichard Henderson * IOMMU translation entries.
19248be545baSRichard Henderson *
19258be545baSRichard Henderson * Returns 0 on success, or a negative errno otherwise. In particular,
19268be545baSRichard Henderson * -EINVAL indicates that at least one of the attributes of the notifier
19278be545baSRichard Henderson * is not supported (flag/range) by the IOMMU memory region. In case of error
19288be545baSRichard Henderson * the error object must be created.
19298be545baSRichard Henderson *
19308be545baSRichard Henderson * @mr: the memory region to observe
19318be545baSRichard Henderson * @n: the IOMMUNotifier to be added; the notify callback receives a
19328be545baSRichard Henderson * pointer to an #IOMMUTLBEntry as the opaque value; the pointer
19338be545baSRichard Henderson * ceases to be valid on exit from the notifier.
19348be545baSRichard Henderson * @errp: pointer to Error*, to store an error if it happens.
19358be545baSRichard Henderson */
19368be545baSRichard Henderson int memory_region_register_iommu_notifier(MemoryRegion *mr,
19378be545baSRichard Henderson IOMMUNotifier *n, Error **errp);
19388be545baSRichard Henderson
19398be545baSRichard Henderson /**
19408be545baSRichard Henderson * memory_region_iommu_replay: replay existing IOMMU translations to
19418be545baSRichard Henderson * a notifier with the minimum page granularity returned by
19428be545baSRichard Henderson * mr->iommu_ops->get_page_size().
19438be545baSRichard Henderson *
19448be545baSRichard Henderson * Note: this is not related to record-and-replay functionality.
19458be545baSRichard Henderson *
19468be545baSRichard Henderson * @iommu_mr: the memory region to observe
19478be545baSRichard Henderson * @n: the notifier to which to replay iommu mappings
19488be545baSRichard Henderson */
19498be545baSRichard Henderson void memory_region_iommu_replay(IOMMUMemoryRegion *iommu_mr, IOMMUNotifier *n);
19508be545baSRichard Henderson
19518be545baSRichard Henderson /**
19528be545baSRichard Henderson * memory_region_unregister_iommu_notifier: unregister a notifier for
19538be545baSRichard Henderson * changes to IOMMU translation entries.
19548be545baSRichard Henderson *
19558be545baSRichard Henderson * @mr: the memory region which was observed and for which notify_stopped()
19568be545baSRichard Henderson * needs to be called
19578be545baSRichard Henderson * @n: the notifier to be removed.
19588be545baSRichard Henderson */
19598be545baSRichard Henderson void memory_region_unregister_iommu_notifier(MemoryRegion *mr,
19608be545baSRichard Henderson IOMMUNotifier *n);
19618be545baSRichard Henderson
19628be545baSRichard Henderson /**
19638be545baSRichard Henderson * memory_region_iommu_get_attr: return an IOMMU attr if get_attr() is
19648be545baSRichard Henderson * defined on the IOMMU.
19658be545baSRichard Henderson *
19668be545baSRichard Henderson * Returns 0 on success, or a negative errno otherwise. In particular,
19678be545baSRichard Henderson * -EINVAL indicates that the IOMMU does not support the requested
19688be545baSRichard Henderson * attribute.
19698be545baSRichard Henderson *
19708be545baSRichard Henderson * @iommu_mr: the memory region
19718be545baSRichard Henderson * @attr: the requested attribute
19728be545baSRichard Henderson * @data: a pointer to the requested attribute data
19738be545baSRichard Henderson */
19748be545baSRichard Henderson int memory_region_iommu_get_attr(IOMMUMemoryRegion *iommu_mr,
19758be545baSRichard Henderson enum IOMMUMemoryRegionAttr attr,
19768be545baSRichard Henderson void *data);
19778be545baSRichard Henderson
19788be545baSRichard Henderson /**
19798be545baSRichard Henderson * memory_region_iommu_attrs_to_index: return the IOMMU index to
19808be545baSRichard Henderson * use for translations with the given memory transaction attributes.
19818be545baSRichard Henderson *
19828be545baSRichard Henderson * @iommu_mr: the memory region
19838be545baSRichard Henderson * @attrs: the memory transaction attributes
19848be545baSRichard Henderson */
19858be545baSRichard Henderson int memory_region_iommu_attrs_to_index(IOMMUMemoryRegion *iommu_mr,
19868be545baSRichard Henderson MemTxAttrs attrs);
19878be545baSRichard Henderson
19888be545baSRichard Henderson /**
19898be545baSRichard Henderson * memory_region_iommu_num_indexes: return the total number of IOMMU
19908be545baSRichard Henderson * indexes that this IOMMU supports.
19918be545baSRichard Henderson *
19928be545baSRichard Henderson * @iommu_mr: the memory region
19938be545baSRichard Henderson */
19948be545baSRichard Henderson int memory_region_iommu_num_indexes(IOMMUMemoryRegion *iommu_mr);
19958be545baSRichard Henderson
19968be545baSRichard Henderson /**
19978be545baSRichard Henderson * memory_region_name: get a memory region's name
19988be545baSRichard Henderson *
19998be545baSRichard Henderson * Returns the string that was used to initialize the memory region.
20008be545baSRichard Henderson *
20018be545baSRichard Henderson * @mr: the memory region being queried
20028be545baSRichard Henderson */
20038be545baSRichard Henderson const char *memory_region_name(const MemoryRegion *mr);
20048be545baSRichard Henderson
20058be545baSRichard Henderson /**
20068be545baSRichard Henderson * memory_region_is_logging: return whether a memory region is logging writes
20078be545baSRichard Henderson *
20088be545baSRichard Henderson * Returns %true if the memory region is logging writes for the given client
20098be545baSRichard Henderson *
20108be545baSRichard Henderson * @mr: the memory region being queried
20118be545baSRichard Henderson * @client: the client being queried
20128be545baSRichard Henderson */
20138be545baSRichard Henderson bool memory_region_is_logging(MemoryRegion *mr, uint8_t client);
20148be545baSRichard Henderson
20158be545baSRichard Henderson /**
20168be545baSRichard Henderson * memory_region_get_dirty_log_mask: return the clients for which a
20178be545baSRichard Henderson * memory region is logging writes.
20188be545baSRichard Henderson *
20198be545baSRichard Henderson * Returns a bitmap of clients, in which the DIRTY_MEMORY_* constants
20208be545baSRichard Henderson * are the bit indices.
20218be545baSRichard Henderson *
20228be545baSRichard Henderson * @mr: the memory region being queried
20238be545baSRichard Henderson */
20248be545baSRichard Henderson uint8_t memory_region_get_dirty_log_mask(MemoryRegion *mr);
20258be545baSRichard Henderson
20268be545baSRichard Henderson /**
20278be545baSRichard Henderson * memory_region_is_rom: check whether a memory region is ROM
20288be545baSRichard Henderson *
20298be545baSRichard Henderson * Returns %true if a memory region is read-only memory.
20308be545baSRichard Henderson *
20318be545baSRichard Henderson * @mr: the memory region being queried
20328be545baSRichard Henderson */
memory_region_is_rom(MemoryRegion * mr)20338be545baSRichard Henderson static inline bool memory_region_is_rom(MemoryRegion *mr)
20348be545baSRichard Henderson {
20358be545baSRichard Henderson return mr->ram && mr->readonly;
20368be545baSRichard Henderson }
20378be545baSRichard Henderson
20388be545baSRichard Henderson /**
20398be545baSRichard Henderson * memory_region_is_nonvolatile: check whether a memory region is non-volatile
20408be545baSRichard Henderson *
20418be545baSRichard Henderson * Returns %true is a memory region is non-volatile memory.
20428be545baSRichard Henderson *
20438be545baSRichard Henderson * @mr: the memory region being queried
20448be545baSRichard Henderson */
memory_region_is_nonvolatile(MemoryRegion * mr)20458be545baSRichard Henderson static inline bool memory_region_is_nonvolatile(MemoryRegion *mr)
20468be545baSRichard Henderson {
20478be545baSRichard Henderson return mr->nonvolatile;
20488be545baSRichard Henderson }
20498be545baSRichard Henderson
20508be545baSRichard Henderson /**
20518be545baSRichard Henderson * memory_region_get_fd: Get a file descriptor backing a RAM memory region.
20528be545baSRichard Henderson *
20538be545baSRichard Henderson * Returns a file descriptor backing a file-based RAM memory region,
20548be545baSRichard Henderson * or -1 if the region is not a file-based RAM memory region.
20558be545baSRichard Henderson *
20568be545baSRichard Henderson * @mr: the RAM or alias memory region being queried.
20578be545baSRichard Henderson */
20588be545baSRichard Henderson int memory_region_get_fd(MemoryRegion *mr);
20598be545baSRichard Henderson
20608be545baSRichard Henderson /**
20618be545baSRichard Henderson * memory_region_from_host: Convert a pointer into a RAM memory region
20628be545baSRichard Henderson * and an offset within it.
20638be545baSRichard Henderson *
20648be545baSRichard Henderson * Given a host pointer inside a RAM memory region (created with
20658be545baSRichard Henderson * memory_region_init_ram() or memory_region_init_ram_ptr()), return
20668be545baSRichard Henderson * the MemoryRegion and the offset within it.
20678be545baSRichard Henderson *
20688be545baSRichard Henderson * Use with care; by the time this function returns, the returned pointer is
20698be545baSRichard Henderson * not protected by RCU anymore. If the caller is not within an RCU critical
20708be545baSRichard Henderson * section and does not hold the BQL, it must have other means of
20718be545baSRichard Henderson * protecting the pointer, such as a reference to the region that includes
20728be545baSRichard Henderson * the incoming ram_addr_t.
20738be545baSRichard Henderson *
20748be545baSRichard Henderson * @ptr: the host pointer to be converted
20758be545baSRichard Henderson * @offset: the offset within memory region
20768be545baSRichard Henderson */
20778be545baSRichard Henderson MemoryRegion *memory_region_from_host(void *ptr, ram_addr_t *offset);
20788be545baSRichard Henderson
20798be545baSRichard Henderson /**
20808be545baSRichard Henderson * memory_region_get_ram_ptr: Get a pointer into a RAM memory region.
20818be545baSRichard Henderson *
20828be545baSRichard Henderson * Returns a host pointer to a RAM memory region (created with
20838be545baSRichard Henderson * memory_region_init_ram() or memory_region_init_ram_ptr()).
20848be545baSRichard Henderson *
20858be545baSRichard Henderson * Use with care; by the time this function returns, the returned pointer is
20868be545baSRichard Henderson * not protected by RCU anymore. If the caller is not within an RCU critical
20878be545baSRichard Henderson * section and does not hold the BQL, it must have other means of
20888be545baSRichard Henderson * protecting the pointer, such as a reference to the region that includes
20898be545baSRichard Henderson * the incoming ram_addr_t.
20908be545baSRichard Henderson *
20918be545baSRichard Henderson * @mr: the memory region being queried.
20928be545baSRichard Henderson */
20938be545baSRichard Henderson void *memory_region_get_ram_ptr(MemoryRegion *mr);
20948be545baSRichard Henderson
20958be545baSRichard Henderson /* memory_region_ram_resize: Resize a RAM region.
20968be545baSRichard Henderson *
20978be545baSRichard Henderson * Resizing RAM while migrating can result in the migration being canceled.
20988be545baSRichard Henderson * Care has to be taken if the guest might have already detected the memory.
20998be545baSRichard Henderson *
21008be545baSRichard Henderson * @mr: a memory region created with @memory_region_init_resizeable_ram.
21018be545baSRichard Henderson * @newsize: the new size the region
21028be545baSRichard Henderson * @errp: pointer to Error*, to store an error if it happens.
21038be545baSRichard Henderson */
21048be545baSRichard Henderson void memory_region_ram_resize(MemoryRegion *mr, ram_addr_t newsize,
21058be545baSRichard Henderson Error **errp);
21068be545baSRichard Henderson
21078be545baSRichard Henderson /**
21088be545baSRichard Henderson * memory_region_msync: Synchronize selected address range of
21098be545baSRichard Henderson * a memory mapped region
21108be545baSRichard Henderson *
21118be545baSRichard Henderson * @mr: the memory region to be msync
21128be545baSRichard Henderson * @addr: the initial address of the range to be sync
21138be545baSRichard Henderson * @size: the size of the range to be sync
21148be545baSRichard Henderson */
21158be545baSRichard Henderson void memory_region_msync(MemoryRegion *mr, hwaddr addr, hwaddr size);
21168be545baSRichard Henderson
21178be545baSRichard Henderson /**
21188be545baSRichard Henderson * memory_region_writeback: Trigger cache writeback for
21198be545baSRichard Henderson * selected address range
21208be545baSRichard Henderson *
21218be545baSRichard Henderson * @mr: the memory region to be updated
21228be545baSRichard Henderson * @addr: the initial address of the range to be written back
21238be545baSRichard Henderson * @size: the size of the range to be written back
21248be545baSRichard Henderson */
21258be545baSRichard Henderson void memory_region_writeback(MemoryRegion *mr, hwaddr addr, hwaddr size);
21268be545baSRichard Henderson
21278be545baSRichard Henderson /**
21288be545baSRichard Henderson * memory_region_set_log: Turn dirty logging on or off for a region.
21298be545baSRichard Henderson *
21308be545baSRichard Henderson * Turns dirty logging on or off for a specified client (display, migration).
21318be545baSRichard Henderson * Only meaningful for RAM regions.
21328be545baSRichard Henderson *
21338be545baSRichard Henderson * @mr: the memory region being updated.
21348be545baSRichard Henderson * @log: whether dirty logging is to be enabled or disabled.
21358be545baSRichard Henderson * @client: the user of the logging information; %DIRTY_MEMORY_VGA only.
21368be545baSRichard Henderson */
21378be545baSRichard Henderson void memory_region_set_log(MemoryRegion *mr, bool log, unsigned client);
21388be545baSRichard Henderson
21398be545baSRichard Henderson /**
21408be545baSRichard Henderson * memory_region_set_dirty: Mark a range of bytes as dirty in a memory region.
21418be545baSRichard Henderson *
21428be545baSRichard Henderson * Marks a range of bytes as dirty, after it has been dirtied outside
21438be545baSRichard Henderson * guest code.
21448be545baSRichard Henderson *
21458be545baSRichard Henderson * @mr: the memory region being dirtied.
21468be545baSRichard Henderson * @addr: the address (relative to the start of the region) being dirtied.
21478be545baSRichard Henderson * @size: size of the range being dirtied.
21488be545baSRichard Henderson */
21498be545baSRichard Henderson void memory_region_set_dirty(MemoryRegion *mr, hwaddr addr,
21508be545baSRichard Henderson hwaddr size);
21518be545baSRichard Henderson
21528be545baSRichard Henderson /**
21538be545baSRichard Henderson * memory_region_clear_dirty_bitmap - clear dirty bitmap for memory range
21548be545baSRichard Henderson *
21558be545baSRichard Henderson * This function is called when the caller wants to clear the remote
21568be545baSRichard Henderson * dirty bitmap of a memory range within the memory region. This can
21578be545baSRichard Henderson * be used by e.g. KVM to manually clear dirty log when
21588be545baSRichard Henderson * KVM_CAP_MANUAL_DIRTY_LOG_PROTECT is declared support by the host
21598be545baSRichard Henderson * kernel.
21608be545baSRichard Henderson *
21618be545baSRichard Henderson * @mr: the memory region to clear the dirty log upon
21628be545baSRichard Henderson * @start: start address offset within the memory region
21638be545baSRichard Henderson * @len: length of the memory region to clear dirty bitmap
21648be545baSRichard Henderson */
21658be545baSRichard Henderson void memory_region_clear_dirty_bitmap(MemoryRegion *mr, hwaddr start,
21668be545baSRichard Henderson hwaddr len);
21678be545baSRichard Henderson
21688be545baSRichard Henderson /**
21698be545baSRichard Henderson * memory_region_snapshot_and_clear_dirty: Get a snapshot of the dirty
21708be545baSRichard Henderson * bitmap and clear it.
21718be545baSRichard Henderson *
21728be545baSRichard Henderson * Creates a snapshot of the dirty bitmap, clears the dirty bitmap and
21738be545baSRichard Henderson * returns the snapshot. The snapshot can then be used to query dirty
21748be545baSRichard Henderson * status, using memory_region_snapshot_get_dirty. Snapshotting allows
21758be545baSRichard Henderson * querying the same page multiple times, which is especially useful for
21768be545baSRichard Henderson * display updates where the scanlines often are not page aligned.
21778be545baSRichard Henderson *
21788be545baSRichard Henderson * The dirty bitmap region which gets copied into the snapshot (and
21798be545baSRichard Henderson * cleared afterwards) can be larger than requested. The boundaries
21808be545baSRichard Henderson * are rounded up/down so complete bitmap longs (covering 64 pages on
21818be545baSRichard Henderson * 64bit hosts) can be copied over into the bitmap snapshot. Which
21828be545baSRichard Henderson * isn't a problem for display updates as the extra pages are outside
21838be545baSRichard Henderson * the visible area, and in case the visible area changes a full
21848be545baSRichard Henderson * display redraw is due anyway. Should other use cases for this
21858be545baSRichard Henderson * function emerge we might have to revisit this implementation
21868be545baSRichard Henderson * detail.
21878be545baSRichard Henderson *
21888be545baSRichard Henderson * Use g_free to release DirtyBitmapSnapshot.
21898be545baSRichard Henderson *
21908be545baSRichard Henderson * @mr: the memory region being queried.
21918be545baSRichard Henderson * @addr: the address (relative to the start of the region) being queried.
21928be545baSRichard Henderson * @size: the size of the range being queried.
21938be545baSRichard Henderson * @client: the user of the logging information; typically %DIRTY_MEMORY_VGA.
21948be545baSRichard Henderson */
21958be545baSRichard Henderson DirtyBitmapSnapshot *memory_region_snapshot_and_clear_dirty(MemoryRegion *mr,
21968be545baSRichard Henderson hwaddr addr,
21978be545baSRichard Henderson hwaddr size,
21988be545baSRichard Henderson unsigned client);
21998be545baSRichard Henderson
22008be545baSRichard Henderson /**
22018be545baSRichard Henderson * memory_region_snapshot_get_dirty: Check whether a range of bytes is dirty
22028be545baSRichard Henderson * in the specified dirty bitmap snapshot.
22038be545baSRichard Henderson *
22048be545baSRichard Henderson * @mr: the memory region being queried.
22058be545baSRichard Henderson * @snap: the dirty bitmap snapshot
22068be545baSRichard Henderson * @addr: the address (relative to the start of the region) being queried.
22078be545baSRichard Henderson * @size: the size of the range being queried.
22088be545baSRichard Henderson */
22098be545baSRichard Henderson bool memory_region_snapshot_get_dirty(MemoryRegion *mr,
22108be545baSRichard Henderson DirtyBitmapSnapshot *snap,
22118be545baSRichard Henderson hwaddr addr, hwaddr size);
22128be545baSRichard Henderson
22138be545baSRichard Henderson /**
22148be545baSRichard Henderson * memory_region_reset_dirty: Mark a range of pages as clean, for a specified
22158be545baSRichard Henderson * client.
22168be545baSRichard Henderson *
22178be545baSRichard Henderson * Marks a range of pages as no longer dirty.
22188be545baSRichard Henderson *
22198be545baSRichard Henderson * @mr: the region being updated.
22208be545baSRichard Henderson * @addr: the start of the subrange being cleaned.
22218be545baSRichard Henderson * @size: the size of the subrange being cleaned.
22228be545baSRichard Henderson * @client: the user of the logging information; %DIRTY_MEMORY_MIGRATION or
22238be545baSRichard Henderson * %DIRTY_MEMORY_VGA.
22248be545baSRichard Henderson */
22258be545baSRichard Henderson void memory_region_reset_dirty(MemoryRegion *mr, hwaddr addr,
22268be545baSRichard Henderson hwaddr size, unsigned client);
22278be545baSRichard Henderson
22288be545baSRichard Henderson /**
22298be545baSRichard Henderson * memory_region_flush_rom_device: Mark a range of pages dirty and invalidate
22308be545baSRichard Henderson * TBs (for self-modifying code).
22318be545baSRichard Henderson *
22328be545baSRichard Henderson * The MemoryRegionOps->write() callback of a ROM device must use this function
22338be545baSRichard Henderson * to mark byte ranges that have been modified internally, such as by directly
22348be545baSRichard Henderson * accessing the memory returned by memory_region_get_ram_ptr().
22358be545baSRichard Henderson *
22368be545baSRichard Henderson * This function marks the range dirty and invalidates TBs so that TCG can
22378be545baSRichard Henderson * detect self-modifying code.
22388be545baSRichard Henderson *
22398be545baSRichard Henderson * @mr: the region being flushed.
22408be545baSRichard Henderson * @addr: the start, relative to the start of the region, of the range being
22418be545baSRichard Henderson * flushed.
22428be545baSRichard Henderson * @size: the size, in bytes, of the range being flushed.
22438be545baSRichard Henderson */
22448be545baSRichard Henderson void memory_region_flush_rom_device(MemoryRegion *mr, hwaddr addr, hwaddr size);
22458be545baSRichard Henderson
22468be545baSRichard Henderson /**
22478be545baSRichard Henderson * memory_region_set_readonly: Turn a memory region read-only (or read-write)
22488be545baSRichard Henderson *
22498be545baSRichard Henderson * Allows a memory region to be marked as read-only (turning it into a ROM).
22508be545baSRichard Henderson * only useful on RAM regions.
22518be545baSRichard Henderson *
22528be545baSRichard Henderson * @mr: the region being updated.
22538be545baSRichard Henderson * @readonly: whether the region is to be ROM or RAM.
22548be545baSRichard Henderson */
22558be545baSRichard Henderson void memory_region_set_readonly(MemoryRegion *mr, bool readonly);
22568be545baSRichard Henderson
22578be545baSRichard Henderson /**
22588be545baSRichard Henderson * memory_region_set_nonvolatile: Turn a memory region non-volatile
22598be545baSRichard Henderson *
22608be545baSRichard Henderson * Allows a memory region to be marked as non-volatile.
22618be545baSRichard Henderson * only useful on RAM regions.
22628be545baSRichard Henderson *
22638be545baSRichard Henderson * @mr: the region being updated.
22648be545baSRichard Henderson * @nonvolatile: whether the region is to be non-volatile.
22658be545baSRichard Henderson */
22668be545baSRichard Henderson void memory_region_set_nonvolatile(MemoryRegion *mr, bool nonvolatile);
22678be545baSRichard Henderson
22688be545baSRichard Henderson /**
22698be545baSRichard Henderson * memory_region_rom_device_set_romd: enable/disable ROMD mode
22708be545baSRichard Henderson *
22718be545baSRichard Henderson * Allows a ROM device (initialized with memory_region_init_rom_device() to
22728be545baSRichard Henderson * set to ROMD mode (default) or MMIO mode. When it is in ROMD mode, the
22738be545baSRichard Henderson * device is mapped to guest memory and satisfies read access directly.
22748be545baSRichard Henderson * When in MMIO mode, reads are forwarded to the #MemoryRegion.read function.
22758be545baSRichard Henderson * Writes are always handled by the #MemoryRegion.write function.
22768be545baSRichard Henderson *
22778be545baSRichard Henderson * @mr: the memory region to be updated
22788be545baSRichard Henderson * @romd_mode: %true to put the region into ROMD mode
22798be545baSRichard Henderson */
22808be545baSRichard Henderson void memory_region_rom_device_set_romd(MemoryRegion *mr, bool romd_mode);
22818be545baSRichard Henderson
22828be545baSRichard Henderson /**
22838be545baSRichard Henderson * memory_region_set_coalescing: Enable memory coalescing for the region.
22848be545baSRichard Henderson *
22858be545baSRichard Henderson * Enabled writes to a region to be queued for later processing. MMIO ->write
22868be545baSRichard Henderson * callbacks may be delayed until a non-coalesced MMIO is issued.
22878be545baSRichard Henderson * Only useful for IO regions. Roughly similar to write-combining hardware.
22888be545baSRichard Henderson *
22898be545baSRichard Henderson * @mr: the memory region to be write coalesced
22908be545baSRichard Henderson */
22918be545baSRichard Henderson void memory_region_set_coalescing(MemoryRegion *mr);
22928be545baSRichard Henderson
22938be545baSRichard Henderson /**
22948be545baSRichard Henderson * memory_region_add_coalescing: Enable memory coalescing for a sub-range of
22958be545baSRichard Henderson * a region.
22968be545baSRichard Henderson *
22978be545baSRichard Henderson * Like memory_region_set_coalescing(), but works on a sub-range of a region.
22988be545baSRichard Henderson * Multiple calls can be issued coalesced disjoint ranges.
22998be545baSRichard Henderson *
23008be545baSRichard Henderson * @mr: the memory region to be updated.
23018be545baSRichard Henderson * @offset: the start of the range within the region to be coalesced.
23028be545baSRichard Henderson * @size: the size of the subrange to be coalesced.
23038be545baSRichard Henderson */
23048be545baSRichard Henderson void memory_region_add_coalescing(MemoryRegion *mr,
23058be545baSRichard Henderson hwaddr offset,
23068be545baSRichard Henderson uint64_t size);
23078be545baSRichard Henderson
23088be545baSRichard Henderson /**
23098be545baSRichard Henderson * memory_region_clear_coalescing: Disable MMIO coalescing for the region.
23108be545baSRichard Henderson *
23118be545baSRichard Henderson * Disables any coalescing caused by memory_region_set_coalescing() or
23128be545baSRichard Henderson * memory_region_add_coalescing(). Roughly equivalent to uncacheble memory
23138be545baSRichard Henderson * hardware.
23148be545baSRichard Henderson *
23158be545baSRichard Henderson * @mr: the memory region to be updated.
23168be545baSRichard Henderson */
23178be545baSRichard Henderson void memory_region_clear_coalescing(MemoryRegion *mr);
23188be545baSRichard Henderson
23198be545baSRichard Henderson /**
23208be545baSRichard Henderson * memory_region_set_flush_coalesced: Enforce memory coalescing flush before
23218be545baSRichard Henderson * accesses.
23228be545baSRichard Henderson *
23238be545baSRichard Henderson * Ensure that pending coalesced MMIO request are flushed before the memory
23248be545baSRichard Henderson * region is accessed. This property is automatically enabled for all regions
23258be545baSRichard Henderson * passed to memory_region_set_coalescing() and memory_region_add_coalescing().
23268be545baSRichard Henderson *
23278be545baSRichard Henderson * @mr: the memory region to be updated.
23288be545baSRichard Henderson */
23298be545baSRichard Henderson void memory_region_set_flush_coalesced(MemoryRegion *mr);
23308be545baSRichard Henderson
23318be545baSRichard Henderson /**
23328be545baSRichard Henderson * memory_region_clear_flush_coalesced: Disable memory coalescing flush before
23338be545baSRichard Henderson * accesses.
23348be545baSRichard Henderson *
23358be545baSRichard Henderson * Clear the automatic coalesced MMIO flushing enabled via
23368be545baSRichard Henderson * memory_region_set_flush_coalesced. Note that this service has no effect on
23378be545baSRichard Henderson * memory regions that have MMIO coalescing enabled for themselves. For them,
23388be545baSRichard Henderson * automatic flushing will stop once coalescing is disabled.
23398be545baSRichard Henderson *
23408be545baSRichard Henderson * @mr: the memory region to be updated.
23418be545baSRichard Henderson */
23428be545baSRichard Henderson void memory_region_clear_flush_coalesced(MemoryRegion *mr);
23438be545baSRichard Henderson
23448be545baSRichard Henderson /**
23458be545baSRichard Henderson * memory_region_add_eventfd: Request an eventfd to be triggered when a word
23468be545baSRichard Henderson * is written to a location.
23478be545baSRichard Henderson *
23488be545baSRichard Henderson * Marks a word in an IO region (initialized with memory_region_init_io())
23498be545baSRichard Henderson * as a trigger for an eventfd event. The I/O callback will not be called.
23508be545baSRichard Henderson * The caller must be prepared to handle failure (that is, take the required
23518be545baSRichard Henderson * action if the callback _is_ called).
23528be545baSRichard Henderson *
23538be545baSRichard Henderson * @mr: the memory region being updated.
23548be545baSRichard Henderson * @addr: the address within @mr that is to be monitored
23558be545baSRichard Henderson * @size: the size of the access to trigger the eventfd
23568be545baSRichard Henderson * @match_data: whether to match against @data, instead of just @addr
23578be545baSRichard Henderson * @data: the data to match against the guest write
23588be545baSRichard Henderson * @e: event notifier to be triggered when @addr, @size, and @data all match.
23598be545baSRichard Henderson **/
23608be545baSRichard Henderson void memory_region_add_eventfd(MemoryRegion *mr,
23618be545baSRichard Henderson hwaddr addr,
23628be545baSRichard Henderson unsigned size,
23638be545baSRichard Henderson bool match_data,
23648be545baSRichard Henderson uint64_t data,
23658be545baSRichard Henderson EventNotifier *e);
23668be545baSRichard Henderson
23678be545baSRichard Henderson /**
23688be545baSRichard Henderson * memory_region_del_eventfd: Cancel an eventfd.
23698be545baSRichard Henderson *
23708be545baSRichard Henderson * Cancels an eventfd trigger requested by a previous
23718be545baSRichard Henderson * memory_region_add_eventfd() call.
23728be545baSRichard Henderson *
23738be545baSRichard Henderson * @mr: the memory region being updated.
23748be545baSRichard Henderson * @addr: the address within @mr that is to be monitored
23758be545baSRichard Henderson * @size: the size of the access to trigger the eventfd
23768be545baSRichard Henderson * @match_data: whether to match against @data, instead of just @addr
23778be545baSRichard Henderson * @data: the data to match against the guest write
23788be545baSRichard Henderson * @e: event notifier to be triggered when @addr, @size, and @data all match.
23798be545baSRichard Henderson */
23808be545baSRichard Henderson void memory_region_del_eventfd(MemoryRegion *mr,
23818be545baSRichard Henderson hwaddr addr,
23828be545baSRichard Henderson unsigned size,
23838be545baSRichard Henderson bool match_data,
23848be545baSRichard Henderson uint64_t data,
23858be545baSRichard Henderson EventNotifier *e);
23868be545baSRichard Henderson
23878be545baSRichard Henderson /**
23888be545baSRichard Henderson * memory_region_add_subregion: Add a subregion to a container.
23898be545baSRichard Henderson *
23908be545baSRichard Henderson * Adds a subregion at @offset. The subregion may not overlap with other
23918be545baSRichard Henderson * subregions (except for those explicitly marked as overlapping). A region
23928be545baSRichard Henderson * may only be added once as a subregion (unless removed with
23938be545baSRichard Henderson * memory_region_del_subregion()); use memory_region_init_alias() if you
23948be545baSRichard Henderson * want a region to be a subregion in multiple locations.
23958be545baSRichard Henderson *
23968be545baSRichard Henderson * @mr: the region to contain the new subregion; must be a container
23978be545baSRichard Henderson * initialized with memory_region_init().
23988be545baSRichard Henderson * @offset: the offset relative to @mr where @subregion is added.
23998be545baSRichard Henderson * @subregion: the subregion to be added.
24008be545baSRichard Henderson */
24018be545baSRichard Henderson void memory_region_add_subregion(MemoryRegion *mr,
24028be545baSRichard Henderson hwaddr offset,
24038be545baSRichard Henderson MemoryRegion *subregion);
24048be545baSRichard Henderson /**
24058be545baSRichard Henderson * memory_region_add_subregion_overlap: Add a subregion to a container
24068be545baSRichard Henderson * with overlap.
24078be545baSRichard Henderson *
24088be545baSRichard Henderson * Adds a subregion at @offset. The subregion may overlap with other
24098be545baSRichard Henderson * subregions. Conflicts are resolved by having a higher @priority hide a
24108be545baSRichard Henderson * lower @priority. Subregions without priority are taken as @priority 0.
24118be545baSRichard Henderson * A region may only be added once as a subregion (unless removed with
24128be545baSRichard Henderson * memory_region_del_subregion()); use memory_region_init_alias() if you
24138be545baSRichard Henderson * want a region to be a subregion in multiple locations.
24148be545baSRichard Henderson *
24158be545baSRichard Henderson * @mr: the region to contain the new subregion; must be a container
24168be545baSRichard Henderson * initialized with memory_region_init().
24178be545baSRichard Henderson * @offset: the offset relative to @mr where @subregion is added.
24188be545baSRichard Henderson * @subregion: the subregion to be added.
24198be545baSRichard Henderson * @priority: used for resolving overlaps; highest priority wins.
24208be545baSRichard Henderson */
24218be545baSRichard Henderson void memory_region_add_subregion_overlap(MemoryRegion *mr,
24228be545baSRichard Henderson hwaddr offset,
24238be545baSRichard Henderson MemoryRegion *subregion,
24248be545baSRichard Henderson int priority);
24258be545baSRichard Henderson
24268be545baSRichard Henderson /**
24278be545baSRichard Henderson * memory_region_get_ram_addr: Get the ram address associated with a memory
24288be545baSRichard Henderson * region
24298be545baSRichard Henderson *
24308be545baSRichard Henderson * @mr: the region to be queried
24318be545baSRichard Henderson */
24328be545baSRichard Henderson ram_addr_t memory_region_get_ram_addr(MemoryRegion *mr);
24338be545baSRichard Henderson
24348be545baSRichard Henderson uint64_t memory_region_get_alignment(const MemoryRegion *mr);
24358be545baSRichard Henderson /**
24368be545baSRichard Henderson * memory_region_del_subregion: Remove a subregion.
24378be545baSRichard Henderson *
24388be545baSRichard Henderson * Removes a subregion from its container.
24398be545baSRichard Henderson *
24408be545baSRichard Henderson * @mr: the container to be updated.
24418be545baSRichard Henderson * @subregion: the region being removed; must be a current subregion of @mr.
24428be545baSRichard Henderson */
24438be545baSRichard Henderson void memory_region_del_subregion(MemoryRegion *mr,
24448be545baSRichard Henderson MemoryRegion *subregion);
24458be545baSRichard Henderson
24468be545baSRichard Henderson /*
24478be545baSRichard Henderson * memory_region_set_enabled: dynamically enable or disable a region
24488be545baSRichard Henderson *
24498be545baSRichard Henderson * Enables or disables a memory region. A disabled memory region
24508be545baSRichard Henderson * ignores all accesses to itself and its subregions. It does not
24518be545baSRichard Henderson * obscure sibling subregions with lower priority - it simply behaves as
24528be545baSRichard Henderson * if it was removed from the hierarchy.
24538be545baSRichard Henderson *
24548be545baSRichard Henderson * Regions default to being enabled.
24558be545baSRichard Henderson *
24568be545baSRichard Henderson * @mr: the region to be updated
24578be545baSRichard Henderson * @enabled: whether to enable or disable the region
24588be545baSRichard Henderson */
24598be545baSRichard Henderson void memory_region_set_enabled(MemoryRegion *mr, bool enabled);
24608be545baSRichard Henderson
24618be545baSRichard Henderson /*
24628be545baSRichard Henderson * memory_region_set_address: dynamically update the address of a region
24638be545baSRichard Henderson *
24648be545baSRichard Henderson * Dynamically updates the address of a region, relative to its container.
24658be545baSRichard Henderson * May be used on regions are currently part of a memory hierarchy.
24668be545baSRichard Henderson *
24678be545baSRichard Henderson * @mr: the region to be updated
24688be545baSRichard Henderson * @addr: new address, relative to container region
24698be545baSRichard Henderson */
24708be545baSRichard Henderson void memory_region_set_address(MemoryRegion *mr, hwaddr addr);
24718be545baSRichard Henderson
24728be545baSRichard Henderson /*
24738be545baSRichard Henderson * memory_region_set_size: dynamically update the size of a region.
24748be545baSRichard Henderson *
24758be545baSRichard Henderson * Dynamically updates the size of a region.
24768be545baSRichard Henderson *
24778be545baSRichard Henderson * @mr: the region to be updated
24788be545baSRichard Henderson * @size: used size of the region.
24798be545baSRichard Henderson */
24808be545baSRichard Henderson void memory_region_set_size(MemoryRegion *mr, uint64_t size);
24818be545baSRichard Henderson
24828be545baSRichard Henderson /*
24838be545baSRichard Henderson * memory_region_set_alias_offset: dynamically update a memory alias's offset
24848be545baSRichard Henderson *
24858be545baSRichard Henderson * Dynamically updates the offset into the target region that an alias points
24868be545baSRichard Henderson * to, as if the fourth argument to memory_region_init_alias() has changed.
24878be545baSRichard Henderson *
24888be545baSRichard Henderson * @mr: the #MemoryRegion to be updated; should be an alias.
24898be545baSRichard Henderson * @offset: the new offset into the target memory region
24908be545baSRichard Henderson */
24918be545baSRichard Henderson void memory_region_set_alias_offset(MemoryRegion *mr,
24928be545baSRichard Henderson hwaddr offset);
24938be545baSRichard Henderson
24948be545baSRichard Henderson /*
24958be545baSRichard Henderson * memory_region_set_unmergeable: Set a memory region unmergeable
24968be545baSRichard Henderson *
24978be545baSRichard Henderson * Mark a memory region unmergeable, resulting in the memory region (or
24988be545baSRichard Henderson * everything contained in a memory region container) not getting merged when
24998be545baSRichard Henderson * simplifying the address space and notifying memory listeners. Consequently,
25008be545baSRichard Henderson * memory listeners will never get notified about ranges that are larger than
25018be545baSRichard Henderson * the original memory regions.
25028be545baSRichard Henderson *
25038be545baSRichard Henderson * This is primarily useful when multiple aliases to a RAM memory region are
25048be545baSRichard Henderson * mapped into a memory region container, and updates (e.g., enable/disable or
25058be545baSRichard Henderson * map/unmap) of individual memory region aliases are not supposed to affect
25068be545baSRichard Henderson * other memory regions in the same container.
25078be545baSRichard Henderson *
25088be545baSRichard Henderson * @mr: the #MemoryRegion to be updated
25098be545baSRichard Henderson * @unmergeable: whether to mark the #MemoryRegion unmergeable
25108be545baSRichard Henderson */
25118be545baSRichard Henderson void memory_region_set_unmergeable(MemoryRegion *mr, bool unmergeable);
25128be545baSRichard Henderson
25138be545baSRichard Henderson /**
25148be545baSRichard Henderson * memory_region_present: checks if an address relative to a @container
25158be545baSRichard Henderson * translates into #MemoryRegion within @container
25168be545baSRichard Henderson *
25178be545baSRichard Henderson * Answer whether a #MemoryRegion within @container covers the address
25188be545baSRichard Henderson * @addr.
25198be545baSRichard Henderson *
25208be545baSRichard Henderson * @container: a #MemoryRegion within which @addr is a relative address
25218be545baSRichard Henderson * @addr: the area within @container to be searched
25228be545baSRichard Henderson */
25238be545baSRichard Henderson bool memory_region_present(MemoryRegion *container, hwaddr addr);
25248be545baSRichard Henderson
25258be545baSRichard Henderson /**
25268be545baSRichard Henderson * memory_region_is_mapped: returns true if #MemoryRegion is mapped
25278be545baSRichard Henderson * into another memory region, which does not necessarily imply that it is
25288be545baSRichard Henderson * mapped into an address space.
25298be545baSRichard Henderson *
25308be545baSRichard Henderson * @mr: a #MemoryRegion which should be checked if it's mapped
25318be545baSRichard Henderson */
25328be545baSRichard Henderson bool memory_region_is_mapped(MemoryRegion *mr);
25338be545baSRichard Henderson
25348be545baSRichard Henderson /**
25358be545baSRichard Henderson * memory_region_get_ram_discard_manager: get the #RamDiscardManager for a
25368be545baSRichard Henderson * #MemoryRegion
25378be545baSRichard Henderson *
25388be545baSRichard Henderson * The #RamDiscardManager cannot change while a memory region is mapped.
25398be545baSRichard Henderson *
25408be545baSRichard Henderson * @mr: the #MemoryRegion
25418be545baSRichard Henderson */
25428be545baSRichard Henderson RamDiscardManager *memory_region_get_ram_discard_manager(MemoryRegion *mr);
25438be545baSRichard Henderson
25448be545baSRichard Henderson /**
25458be545baSRichard Henderson * memory_region_has_ram_discard_manager: check whether a #MemoryRegion has a
25468be545baSRichard Henderson * #RamDiscardManager assigned
25478be545baSRichard Henderson *
25488be545baSRichard Henderson * @mr: the #MemoryRegion
25498be545baSRichard Henderson */
memory_region_has_ram_discard_manager(MemoryRegion * mr)25508be545baSRichard Henderson static inline bool memory_region_has_ram_discard_manager(MemoryRegion *mr)
25518be545baSRichard Henderson {
25528be545baSRichard Henderson return !!memory_region_get_ram_discard_manager(mr);
25538be545baSRichard Henderson }
25548be545baSRichard Henderson
25558be545baSRichard Henderson /**
25568be545baSRichard Henderson * memory_region_set_ram_discard_manager: set the #RamDiscardManager for a
25578be545baSRichard Henderson * #MemoryRegion
25588be545baSRichard Henderson *
25598be545baSRichard Henderson * This function must not be called for a mapped #MemoryRegion, a #MemoryRegion
25608be545baSRichard Henderson * that does not cover RAM, or a #MemoryRegion that already has a
2561ff121115SChenyi Qiang * #RamDiscardManager assigned. Return 0 if the rdm is set successfully.
25628be545baSRichard Henderson *
25638be545baSRichard Henderson * @mr: the #MemoryRegion
25648be545baSRichard Henderson * @rdm: #RamDiscardManager to set
25658be545baSRichard Henderson */
2566ff121115SChenyi Qiang int memory_region_set_ram_discard_manager(MemoryRegion *mr,
25678be545baSRichard Henderson RamDiscardManager *rdm);
25688be545baSRichard Henderson
25698be545baSRichard Henderson /**
25708be545baSRichard Henderson * memory_region_find: translate an address/size relative to a
25718be545baSRichard Henderson * MemoryRegion into a #MemoryRegionSection.
25728be545baSRichard Henderson *
25738be545baSRichard Henderson * Locates the first #MemoryRegion within @mr that overlaps the range
25748be545baSRichard Henderson * given by @addr and @size.
25758be545baSRichard Henderson *
25768be545baSRichard Henderson * Returns a #MemoryRegionSection that describes a contiguous overlap.
25778be545baSRichard Henderson * It will have the following characteristics:
25788be545baSRichard Henderson * - @size = 0 iff no overlap was found
25798be545baSRichard Henderson * - @mr is non-%NULL iff an overlap was found
25808be545baSRichard Henderson *
25818be545baSRichard Henderson * Remember that in the return value the @offset_within_region is
25828be545baSRichard Henderson * relative to the returned region (in the .@mr field), not to the
25838be545baSRichard Henderson * @mr argument.
25848be545baSRichard Henderson *
25858be545baSRichard Henderson * Similarly, the .@offset_within_address_space is relative to the
25868be545baSRichard Henderson * address space that contains both regions, the passed and the
25878be545baSRichard Henderson * returned one. However, in the special case where the @mr argument
25888be545baSRichard Henderson * has no container (and thus is the root of the address space), the
25898be545baSRichard Henderson * following will hold:
25908be545baSRichard Henderson * - @offset_within_address_space >= @addr
25918be545baSRichard Henderson * - @offset_within_address_space + .@size <= @addr + @size
25928be545baSRichard Henderson *
25938be545baSRichard Henderson * @mr: a MemoryRegion within which @addr is a relative address
25948be545baSRichard Henderson * @addr: start of the area within @as to be searched
25958be545baSRichard Henderson * @size: size of the area to be searched
25968be545baSRichard Henderson */
25978be545baSRichard Henderson MemoryRegionSection memory_region_find(MemoryRegion *mr,
25988be545baSRichard Henderson hwaddr addr, uint64_t size);
25998be545baSRichard Henderson
26008be545baSRichard Henderson /**
26018be545baSRichard Henderson * memory_global_dirty_log_sync: synchronize the dirty log for all memory
26028be545baSRichard Henderson *
26038be545baSRichard Henderson * Synchronizes the dirty page log for all address spaces.
26048be545baSRichard Henderson *
26058be545baSRichard Henderson * @last_stage: whether this is the last stage of live migration
26068be545baSRichard Henderson */
26078be545baSRichard Henderson void memory_global_dirty_log_sync(bool last_stage);
26088be545baSRichard Henderson
26098be545baSRichard Henderson /**
26108be545baSRichard Henderson * memory_global_after_dirty_log_sync: synchronize the dirty log for all memory
26118be545baSRichard Henderson *
26128be545baSRichard Henderson * Synchronizes the vCPUs with a thread that is reading the dirty bitmap.
26138be545baSRichard Henderson * This function must be called after the dirty log bitmap is cleared, and
26148be545baSRichard Henderson * before dirty guest memory pages are read. If you are using
26158be545baSRichard Henderson * #DirtyBitmapSnapshot, memory_region_snapshot_and_clear_dirty() takes
26168be545baSRichard Henderson * care of doing this.
26178be545baSRichard Henderson */
26188be545baSRichard Henderson void memory_global_after_dirty_log_sync(void);
26198be545baSRichard Henderson
26208be545baSRichard Henderson /**
26218be545baSRichard Henderson * memory_region_transaction_begin: Start a transaction.
26228be545baSRichard Henderson *
26238be545baSRichard Henderson * During a transaction, changes will be accumulated and made visible
26248be545baSRichard Henderson * only when the transaction ends (is committed).
26258be545baSRichard Henderson */
26268be545baSRichard Henderson void memory_region_transaction_begin(void);
26278be545baSRichard Henderson
26288be545baSRichard Henderson /**
26298be545baSRichard Henderson * memory_region_transaction_commit: Commit a transaction and make changes
26308be545baSRichard Henderson * visible to the guest.
26318be545baSRichard Henderson */
26328be545baSRichard Henderson void memory_region_transaction_commit(void);
26338be545baSRichard Henderson
26348be545baSRichard Henderson /**
26358be545baSRichard Henderson * memory_listener_register: register callbacks to be called when memory
26368be545baSRichard Henderson * sections are mapped or unmapped into an address
26378be545baSRichard Henderson * space
26388be545baSRichard Henderson *
26398be545baSRichard Henderson * @listener: an object containing the callbacks to be called
26408be545baSRichard Henderson * @filter: if non-%NULL, only regions in this address space will be observed
26418be545baSRichard Henderson */
26428be545baSRichard Henderson void memory_listener_register(MemoryListener *listener, AddressSpace *filter);
26438be545baSRichard Henderson
26448be545baSRichard Henderson /**
26458be545baSRichard Henderson * memory_listener_unregister: undo the effect of memory_listener_register()
26468be545baSRichard Henderson *
26478be545baSRichard Henderson * @listener: an object containing the callbacks to be removed
26488be545baSRichard Henderson */
26498be545baSRichard Henderson void memory_listener_unregister(MemoryListener *listener);
26508be545baSRichard Henderson
26518be545baSRichard Henderson /**
26528be545baSRichard Henderson * memory_global_dirty_log_start: begin dirty logging for all regions
26538be545baSRichard Henderson *
26548be545baSRichard Henderson * @flags: purpose of starting dirty log, migration or dirty rate
26558be545baSRichard Henderson * @errp: pointer to Error*, to store an error if it happens.
26568be545baSRichard Henderson *
26578be545baSRichard Henderson * Return: true on success, else false setting @errp with error.
26588be545baSRichard Henderson */
26598be545baSRichard Henderson bool memory_global_dirty_log_start(unsigned int flags, Error **errp);
26608be545baSRichard Henderson
26618be545baSRichard Henderson /**
26628be545baSRichard Henderson * memory_global_dirty_log_stop: end dirty logging for all regions
26638be545baSRichard Henderson *
26648be545baSRichard Henderson * @flags: purpose of stopping dirty log, migration or dirty rate
26658be545baSRichard Henderson */
26668be545baSRichard Henderson void memory_global_dirty_log_stop(unsigned int flags);
26678be545baSRichard Henderson
26688be545baSRichard Henderson void mtree_info(bool flatview, bool dispatch_tree, bool owner, bool disabled);
26698be545baSRichard Henderson
26708be545baSRichard Henderson bool memory_region_access_valid(MemoryRegion *mr, hwaddr addr,
26718be545baSRichard Henderson unsigned size, bool is_write,
26728be545baSRichard Henderson MemTxAttrs attrs);
26738be545baSRichard Henderson
26748be545baSRichard Henderson /**
26758be545baSRichard Henderson * memory_region_dispatch_read: perform a read directly to the specified
26768be545baSRichard Henderson * MemoryRegion.
26778be545baSRichard Henderson *
26788be545baSRichard Henderson * @mr: #MemoryRegion to access
26798be545baSRichard Henderson * @addr: address within that region
26808be545baSRichard Henderson * @pval: pointer to uint64_t which the data is written to
26818be545baSRichard Henderson * @op: size, sign, and endianness of the memory operation
26828be545baSRichard Henderson * @attrs: memory transaction attributes to use for the access
26838be545baSRichard Henderson */
26848be545baSRichard Henderson MemTxResult memory_region_dispatch_read(MemoryRegion *mr,
26858be545baSRichard Henderson hwaddr addr,
26868be545baSRichard Henderson uint64_t *pval,
26878be545baSRichard Henderson MemOp op,
26888be545baSRichard Henderson MemTxAttrs attrs);
26898be545baSRichard Henderson /**
26908be545baSRichard Henderson * memory_region_dispatch_write: perform a write directly to the specified
26918be545baSRichard Henderson * MemoryRegion.
26928be545baSRichard Henderson *
26938be545baSRichard Henderson * @mr: #MemoryRegion to access
26948be545baSRichard Henderson * @addr: address within that region
26958be545baSRichard Henderson * @data: data to write
26968be545baSRichard Henderson * @op: size, sign, and endianness of the memory operation
26978be545baSRichard Henderson * @attrs: memory transaction attributes to use for the access
26988be545baSRichard Henderson */
26998be545baSRichard Henderson MemTxResult memory_region_dispatch_write(MemoryRegion *mr,
27008be545baSRichard Henderson hwaddr addr,
27018be545baSRichard Henderson uint64_t data,
27028be545baSRichard Henderson MemOp op,
27038be545baSRichard Henderson MemTxAttrs attrs);
27048be545baSRichard Henderson
27058be545baSRichard Henderson /**
27068be545baSRichard Henderson * address_space_init: initializes an address space
27078be545baSRichard Henderson *
27088be545baSRichard Henderson * @as: an uninitialized #AddressSpace
27098be545baSRichard Henderson * @root: a #MemoryRegion that routes addresses for the address space
27108be545baSRichard Henderson * @name: an address space name. The name is only used for debugging
27118be545baSRichard Henderson * output.
27128be545baSRichard Henderson */
27138be545baSRichard Henderson void address_space_init(AddressSpace *as, MemoryRegion *root, const char *name);
27148be545baSRichard Henderson
27158be545baSRichard Henderson /**
27168be545baSRichard Henderson * address_space_destroy: destroy an address space
27178be545baSRichard Henderson *
27188be545baSRichard Henderson * Releases all resources associated with an address space. After an address space
27198be545baSRichard Henderson * is destroyed, its root memory region (given by address_space_init()) may be destroyed
27208be545baSRichard Henderson * as well.
27218be545baSRichard Henderson *
27228be545baSRichard Henderson * @as: address space to be destroyed
27238be545baSRichard Henderson */
27248be545baSRichard Henderson void address_space_destroy(AddressSpace *as);
27258be545baSRichard Henderson
27268be545baSRichard Henderson /**
27278be545baSRichard Henderson * address_space_remove_listeners: unregister all listeners of an address space
27288be545baSRichard Henderson *
27298be545baSRichard Henderson * Removes all callbacks previously registered with memory_listener_register()
27308be545baSRichard Henderson * for @as.
27318be545baSRichard Henderson *
27328be545baSRichard Henderson * @as: an initialized #AddressSpace
27338be545baSRichard Henderson */
27348be545baSRichard Henderson void address_space_remove_listeners(AddressSpace *as);
27358be545baSRichard Henderson
27368be545baSRichard Henderson /**
27378be545baSRichard Henderson * address_space_rw: read from or write to an address space.
27388be545baSRichard Henderson *
27398be545baSRichard Henderson * Return a MemTxResult indicating whether the operation succeeded
27408be545baSRichard Henderson * or failed (eg unassigned memory, device rejected the transaction,
27418be545baSRichard Henderson * IOMMU fault).
27428be545baSRichard Henderson *
27438be545baSRichard Henderson * @as: #AddressSpace to be accessed
27448be545baSRichard Henderson * @addr: address within that address space
27458be545baSRichard Henderson * @attrs: memory transaction attributes
27468be545baSRichard Henderson * @buf: buffer with the data transferred
27478be545baSRichard Henderson * @len: the number of bytes to read or write
27488be545baSRichard Henderson * @is_write: indicates the transfer direction
27498be545baSRichard Henderson */
27508be545baSRichard Henderson MemTxResult address_space_rw(AddressSpace *as, hwaddr addr,
27518be545baSRichard Henderson MemTxAttrs attrs, void *buf,
27528be545baSRichard Henderson hwaddr len, bool is_write);
27538be545baSRichard Henderson
27548be545baSRichard Henderson /**
27558be545baSRichard Henderson * address_space_write: write to address space.
27568be545baSRichard Henderson *
27578be545baSRichard Henderson * Return a MemTxResult indicating whether the operation succeeded
27588be545baSRichard Henderson * or failed (eg unassigned memory, device rejected the transaction,
27598be545baSRichard Henderson * IOMMU fault).
27608be545baSRichard Henderson *
27618be545baSRichard Henderson * @as: #AddressSpace to be accessed
27628be545baSRichard Henderson * @addr: address within that address space
27638be545baSRichard Henderson * @attrs: memory transaction attributes
27648be545baSRichard Henderson * @buf: buffer with the data transferred
27658be545baSRichard Henderson * @len: the number of bytes to write
27668be545baSRichard Henderson */
27678be545baSRichard Henderson MemTxResult address_space_write(AddressSpace *as, hwaddr addr,
27688be545baSRichard Henderson MemTxAttrs attrs,
27698be545baSRichard Henderson const void *buf, hwaddr len);
27708be545baSRichard Henderson
27718be545baSRichard Henderson /**
27728be545baSRichard Henderson * address_space_write_rom: write to address space, including ROM.
27738be545baSRichard Henderson *
27748be545baSRichard Henderson * This function writes to the specified address space, but will
27758be545baSRichard Henderson * write data to both ROM and RAM. This is used for non-guest
27768be545baSRichard Henderson * writes like writes from the gdb debug stub or initial loading
27778be545baSRichard Henderson * of ROM contents.
27788be545baSRichard Henderson *
27798be545baSRichard Henderson * Note that portions of the write which attempt to write data to
27808be545baSRichard Henderson * a device will be silently ignored -- only real RAM and ROM will
27818be545baSRichard Henderson * be written to.
27828be545baSRichard Henderson *
27838be545baSRichard Henderson * Return a MemTxResult indicating whether the operation succeeded
27848be545baSRichard Henderson * or failed (eg unassigned memory, device rejected the transaction,
27858be545baSRichard Henderson * IOMMU fault).
27868be545baSRichard Henderson *
27878be545baSRichard Henderson * @as: #AddressSpace to be accessed
27888be545baSRichard Henderson * @addr: address within that address space
27898be545baSRichard Henderson * @attrs: memory transaction attributes
27908be545baSRichard Henderson * @buf: buffer with the data transferred
27918be545baSRichard Henderson * @len: the number of bytes to write
27928be545baSRichard Henderson */
27938be545baSRichard Henderson MemTxResult address_space_write_rom(AddressSpace *as, hwaddr addr,
27948be545baSRichard Henderson MemTxAttrs attrs,
27958be545baSRichard Henderson const void *buf, hwaddr len);
27968be545baSRichard Henderson
27978be545baSRichard Henderson /* address_space_ld*: load from an address space
27988be545baSRichard Henderson * address_space_st*: store to an address space
27998be545baSRichard Henderson *
28008be545baSRichard Henderson * These functions perform a load or store of the byte, word,
28018be545baSRichard Henderson * longword or quad to the specified address within the AddressSpace.
28028be545baSRichard Henderson * The _le suffixed functions treat the data as little endian;
28038be545baSRichard Henderson * _be indicates big endian; no suffix indicates "same endianness
28048be545baSRichard Henderson * as guest CPU".
28058be545baSRichard Henderson *
28068be545baSRichard Henderson * The "guest CPU endianness" accessors are deprecated for use outside
28078be545baSRichard Henderson * target-* code; devices should be CPU-agnostic and use either the LE
28088be545baSRichard Henderson * or the BE accessors.
28098be545baSRichard Henderson *
28108be545baSRichard Henderson * @as #AddressSpace to be accessed
28118be545baSRichard Henderson * @addr: address within that address space
28128be545baSRichard Henderson * @val: data value, for stores
28138be545baSRichard Henderson * @attrs: memory transaction attributes
28148be545baSRichard Henderson * @result: location to write the success/failure of the transaction;
28158be545baSRichard Henderson * if NULL, this information is discarded
28168be545baSRichard Henderson */
28178be545baSRichard Henderson
28188be545baSRichard Henderson #define SUFFIX
28198be545baSRichard Henderson #define ARG1 as
28208be545baSRichard Henderson #define ARG1_DECL AddressSpace *as
28218be545baSRichard Henderson #include "exec/memory_ldst.h.inc"
28228be545baSRichard Henderson
stl_phys_notdirty(AddressSpace * as,hwaddr addr,uint32_t val)28238be545baSRichard Henderson static inline void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val)
28248be545baSRichard Henderson {
28258be545baSRichard Henderson address_space_stl_notdirty(as, addr, val,
28268be545baSRichard Henderson MEMTXATTRS_UNSPECIFIED, NULL);
28278be545baSRichard Henderson }
28288be545baSRichard Henderson
28298be545baSRichard Henderson #define SUFFIX
28308be545baSRichard Henderson #define ARG1 as
28318be545baSRichard Henderson #define ARG1_DECL AddressSpace *as
28328be545baSRichard Henderson #include "exec/memory_ldst_phys.h.inc"
28338be545baSRichard Henderson
28348be545baSRichard Henderson struct MemoryRegionCache {
28358be545baSRichard Henderson uint8_t *ptr;
28368be545baSRichard Henderson hwaddr xlat;
28378be545baSRichard Henderson hwaddr len;
28388be545baSRichard Henderson FlatView *fv;
28398be545baSRichard Henderson MemoryRegionSection mrs;
28408be545baSRichard Henderson bool is_write;
28418be545baSRichard Henderson };
28428be545baSRichard Henderson
28438be545baSRichard Henderson /* address_space_ld*_cached: load from a cached #MemoryRegion
28448be545baSRichard Henderson * address_space_st*_cached: store into a cached #MemoryRegion
28458be545baSRichard Henderson *
28468be545baSRichard Henderson * These functions perform a load or store of the byte, word,
28478be545baSRichard Henderson * longword or quad to the specified address. The address is
28488be545baSRichard Henderson * a physical address in the AddressSpace, but it must lie within
28498be545baSRichard Henderson * a #MemoryRegion that was mapped with address_space_cache_init.
28508be545baSRichard Henderson *
28518be545baSRichard Henderson * The _le suffixed functions treat the data as little endian;
28528be545baSRichard Henderson * _be indicates big endian; no suffix indicates "same endianness
28538be545baSRichard Henderson * as guest CPU".
28548be545baSRichard Henderson *
28558be545baSRichard Henderson * The "guest CPU endianness" accessors are deprecated for use outside
28568be545baSRichard Henderson * target-* code; devices should be CPU-agnostic and use either the LE
28578be545baSRichard Henderson * or the BE accessors.
28588be545baSRichard Henderson *
28598be545baSRichard Henderson * @cache: previously initialized #MemoryRegionCache to be accessed
28608be545baSRichard Henderson * @addr: address within the address space
28618be545baSRichard Henderson * @val: data value, for stores
28628be545baSRichard Henderson * @attrs: memory transaction attributes
28638be545baSRichard Henderson * @result: location to write the success/failure of the transaction;
28648be545baSRichard Henderson * if NULL, this information is discarded
28658be545baSRichard Henderson */
28668be545baSRichard Henderson
28678be545baSRichard Henderson #define SUFFIX _cached_slow
28688be545baSRichard Henderson #define ARG1 cache
28698be545baSRichard Henderson #define ARG1_DECL MemoryRegionCache *cache
28708be545baSRichard Henderson #include "exec/memory_ldst.h.inc"
28718be545baSRichard Henderson
28728be545baSRichard Henderson /* Inline fast path for direct RAM access. */
address_space_ldub_cached(MemoryRegionCache * cache,hwaddr addr,MemTxAttrs attrs,MemTxResult * result)28738be545baSRichard Henderson static inline uint8_t address_space_ldub_cached(MemoryRegionCache *cache,
28748be545baSRichard Henderson hwaddr addr, MemTxAttrs attrs, MemTxResult *result)
28758be545baSRichard Henderson {
28768be545baSRichard Henderson assert(addr < cache->len);
28778be545baSRichard Henderson if (likely(cache->ptr)) {
28788be545baSRichard Henderson return ldub_p(cache->ptr + addr);
28798be545baSRichard Henderson } else {
28808be545baSRichard Henderson return address_space_ldub_cached_slow(cache, addr, attrs, result);
28818be545baSRichard Henderson }
28828be545baSRichard Henderson }
28838be545baSRichard Henderson
address_space_stb_cached(MemoryRegionCache * cache,hwaddr addr,uint8_t val,MemTxAttrs attrs,MemTxResult * result)28848be545baSRichard Henderson static inline void address_space_stb_cached(MemoryRegionCache *cache,
28858be545baSRichard Henderson hwaddr addr, uint8_t val, MemTxAttrs attrs, MemTxResult *result)
28868be545baSRichard Henderson {
28878be545baSRichard Henderson assert(addr < cache->len);
28888be545baSRichard Henderson if (likely(cache->ptr)) {
28898be545baSRichard Henderson stb_p(cache->ptr + addr, val);
28908be545baSRichard Henderson } else {
28918be545baSRichard Henderson address_space_stb_cached_slow(cache, addr, val, attrs, result);
28928be545baSRichard Henderson }
28938be545baSRichard Henderson }
28948be545baSRichard Henderson
28958be545baSRichard Henderson #define ENDIANNESS
28968be545baSRichard Henderson #include "exec/memory_ldst_cached.h.inc"
28978be545baSRichard Henderson
28988be545baSRichard Henderson #define ENDIANNESS _le
28998be545baSRichard Henderson #include "exec/memory_ldst_cached.h.inc"
29008be545baSRichard Henderson
29018be545baSRichard Henderson #define ENDIANNESS _be
29028be545baSRichard Henderson #include "exec/memory_ldst_cached.h.inc"
29038be545baSRichard Henderson
29048be545baSRichard Henderson #define SUFFIX _cached
29058be545baSRichard Henderson #define ARG1 cache
29068be545baSRichard Henderson #define ARG1_DECL MemoryRegionCache *cache
29078be545baSRichard Henderson #include "exec/memory_ldst_phys.h.inc"
29088be545baSRichard Henderson
29098be545baSRichard Henderson /* address_space_cache_init: prepare for repeated access to a physical
29108be545baSRichard Henderson * memory region
29118be545baSRichard Henderson *
29128be545baSRichard Henderson * @cache: #MemoryRegionCache to be filled
29138be545baSRichard Henderson * @as: #AddressSpace to be accessed
29148be545baSRichard Henderson * @addr: address within that address space
29158be545baSRichard Henderson * @len: length of buffer
29168be545baSRichard Henderson * @is_write: indicates the transfer direction
29178be545baSRichard Henderson *
29188be545baSRichard Henderson * Will only work with RAM, and may map a subset of the requested range by
29198be545baSRichard Henderson * returning a value that is less than @len. On failure, return a negative
29208be545baSRichard Henderson * errno value.
29218be545baSRichard Henderson *
29228be545baSRichard Henderson * Because it only works with RAM, this function can be used for
29238be545baSRichard Henderson * read-modify-write operations. In this case, is_write should be %true.
29248be545baSRichard Henderson *
29258be545baSRichard Henderson * Note that addresses passed to the address_space_*_cached functions
29268be545baSRichard Henderson * are relative to @addr.
29278be545baSRichard Henderson */
29288be545baSRichard Henderson int64_t address_space_cache_init(MemoryRegionCache *cache,
29298be545baSRichard Henderson AddressSpace *as,
29308be545baSRichard Henderson hwaddr addr,
29318be545baSRichard Henderson hwaddr len,
29328be545baSRichard Henderson bool is_write);
29338be545baSRichard Henderson
29348be545baSRichard Henderson /**
29358be545baSRichard Henderson * address_space_cache_init_empty: Initialize empty #MemoryRegionCache
29368be545baSRichard Henderson *
29378be545baSRichard Henderson * @cache: The #MemoryRegionCache to operate on.
29388be545baSRichard Henderson *
29398be545baSRichard Henderson * Initializes #MemoryRegionCache structure without memory region attached.
29408be545baSRichard Henderson * Cache initialized this way can only be safely destroyed, but not used.
29418be545baSRichard Henderson */
address_space_cache_init_empty(MemoryRegionCache * cache)29428be545baSRichard Henderson static inline void address_space_cache_init_empty(MemoryRegionCache *cache)
29438be545baSRichard Henderson {
29448be545baSRichard Henderson cache->mrs.mr = NULL;
29458be545baSRichard Henderson /* There is no real need to initialize fv, but it makes Coverity happy. */
29468be545baSRichard Henderson cache->fv = NULL;
29478be545baSRichard Henderson }
29488be545baSRichard Henderson
29498be545baSRichard Henderson /**
29508be545baSRichard Henderson * address_space_cache_invalidate: complete a write to a #MemoryRegionCache
29518be545baSRichard Henderson *
29528be545baSRichard Henderson * @cache: The #MemoryRegionCache to operate on.
29538be545baSRichard Henderson * @addr: The first physical address that was written, relative to the
29548be545baSRichard Henderson * address that was passed to @address_space_cache_init.
29558be545baSRichard Henderson * @access_len: The number of bytes that were written starting at @addr.
29568be545baSRichard Henderson */
29578be545baSRichard Henderson void address_space_cache_invalidate(MemoryRegionCache *cache,
29588be545baSRichard Henderson hwaddr addr,
29598be545baSRichard Henderson hwaddr access_len);
29608be545baSRichard Henderson
29618be545baSRichard Henderson /**
29628be545baSRichard Henderson * address_space_cache_destroy: free a #MemoryRegionCache
29638be545baSRichard Henderson *
29648be545baSRichard Henderson * @cache: The #MemoryRegionCache whose memory should be released.
29658be545baSRichard Henderson */
29668be545baSRichard Henderson void address_space_cache_destroy(MemoryRegionCache *cache);
29678be545baSRichard Henderson
29688be545baSRichard Henderson /* address_space_get_iotlb_entry: translate an address into an IOTLB
29698be545baSRichard Henderson * entry. Should be called from an RCU critical section.
29708be545baSRichard Henderson */
29718be545baSRichard Henderson IOMMUTLBEntry address_space_get_iotlb_entry(AddressSpace *as, hwaddr addr,
29728be545baSRichard Henderson bool is_write, MemTxAttrs attrs);
29738be545baSRichard Henderson
29748be545baSRichard Henderson /* address_space_translate: translate an address range into an address space
29758be545baSRichard Henderson * into a MemoryRegion and an address range into that section. Should be
29768be545baSRichard Henderson * called from an RCU critical section, to avoid that the last reference
29778be545baSRichard Henderson * to the returned region disappears after address_space_translate returns.
29788be545baSRichard Henderson *
29798be545baSRichard Henderson * @fv: #FlatView to be accessed
29808be545baSRichard Henderson * @addr: address within that address space
29818be545baSRichard Henderson * @xlat: pointer to address within the returned memory region section's
29828be545baSRichard Henderson * #MemoryRegion.
29838be545baSRichard Henderson * @len: pointer to length
29848be545baSRichard Henderson * @is_write: indicates the transfer direction
29858be545baSRichard Henderson * @attrs: memory attributes
29868be545baSRichard Henderson */
29878be545baSRichard Henderson MemoryRegion *flatview_translate(FlatView *fv,
29888be545baSRichard Henderson hwaddr addr, hwaddr *xlat,
29898be545baSRichard Henderson hwaddr *len, bool is_write,
29908be545baSRichard Henderson MemTxAttrs attrs);
29918be545baSRichard Henderson
address_space_translate(AddressSpace * as,hwaddr addr,hwaddr * xlat,hwaddr * len,bool is_write,MemTxAttrs attrs)29928be545baSRichard Henderson static inline MemoryRegion *address_space_translate(AddressSpace *as,
29938be545baSRichard Henderson hwaddr addr, hwaddr *xlat,
29948be545baSRichard Henderson hwaddr *len, bool is_write,
29958be545baSRichard Henderson MemTxAttrs attrs)
29968be545baSRichard Henderson {
29978be545baSRichard Henderson return flatview_translate(address_space_to_flatview(as),
29988be545baSRichard Henderson addr, xlat, len, is_write, attrs);
29998be545baSRichard Henderson }
30008be545baSRichard Henderson
30018be545baSRichard Henderson /* address_space_access_valid: check for validity of accessing an address
30028be545baSRichard Henderson * space range
30038be545baSRichard Henderson *
30048be545baSRichard Henderson * Check whether memory is assigned to the given address space range, and
30058be545baSRichard Henderson * access is permitted by any IOMMU regions that are active for the address
30068be545baSRichard Henderson * space.
30078be545baSRichard Henderson *
30088be545baSRichard Henderson * For now, addr and len should be aligned to a page size. This limitation
30098be545baSRichard Henderson * will be lifted in the future.
30108be545baSRichard Henderson *
30118be545baSRichard Henderson * @as: #AddressSpace to be accessed
30128be545baSRichard Henderson * @addr: address within that address space
30138be545baSRichard Henderson * @len: length of the area to be checked
30148be545baSRichard Henderson * @is_write: indicates the transfer direction
30158be545baSRichard Henderson * @attrs: memory attributes
30168be545baSRichard Henderson */
30178be545baSRichard Henderson bool address_space_access_valid(AddressSpace *as, hwaddr addr, hwaddr len,
30188be545baSRichard Henderson bool is_write, MemTxAttrs attrs);
30198be545baSRichard Henderson
30208be545baSRichard Henderson /* address_space_map: map a physical memory region into a host virtual address
30218be545baSRichard Henderson *
30228be545baSRichard Henderson * May map a subset of the requested range, given by and returned in @plen.
30238be545baSRichard Henderson * May return %NULL and set *@plen to zero(0), if resources needed to perform
30248be545baSRichard Henderson * the mapping are exhausted.
30258be545baSRichard Henderson * Use only for reads OR writes - not for read-modify-write operations.
30268be545baSRichard Henderson * Use address_space_register_map_client() to know when retrying the map
30278be545baSRichard Henderson * operation is likely to succeed.
30288be545baSRichard Henderson *
30298be545baSRichard Henderson * @as: #AddressSpace to be accessed
30308be545baSRichard Henderson * @addr: address within that address space
30318be545baSRichard Henderson * @plen: pointer to length of buffer; updated on return
30328be545baSRichard Henderson * @is_write: indicates the transfer direction
30338be545baSRichard Henderson * @attrs: memory attributes
30348be545baSRichard Henderson */
30358be545baSRichard Henderson void *address_space_map(AddressSpace *as, hwaddr addr,
30368be545baSRichard Henderson hwaddr *plen, bool is_write, MemTxAttrs attrs);
30378be545baSRichard Henderson
30388be545baSRichard Henderson /* address_space_unmap: Unmaps a memory region previously mapped by address_space_map()
30398be545baSRichard Henderson *
30408be545baSRichard Henderson * Will also mark the memory as dirty if @is_write == %true. @access_len gives
30418be545baSRichard Henderson * the amount of memory that was actually read or written by the caller.
30428be545baSRichard Henderson *
30438be545baSRichard Henderson * @as: #AddressSpace used
30448be545baSRichard Henderson * @buffer: host pointer as returned by address_space_map()
30458be545baSRichard Henderson * @len: buffer length as returned by address_space_map()
30468be545baSRichard Henderson * @access_len: amount of data actually transferred
30478be545baSRichard Henderson * @is_write: indicates the transfer direction
30488be545baSRichard Henderson */
30498be545baSRichard Henderson void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
30508be545baSRichard Henderson bool is_write, hwaddr access_len);
30518be545baSRichard Henderson
30528be545baSRichard Henderson /*
30538be545baSRichard Henderson * address_space_register_map_client: Register a callback to invoke when
30548be545baSRichard Henderson * resources for address_space_map() are available again.
30558be545baSRichard Henderson *
30568be545baSRichard Henderson * address_space_map may fail when there are not enough resources available,
30578be545baSRichard Henderson * such as when bounce buffer memory would exceed the limit. The callback can
30588be545baSRichard Henderson * be used to retry the address_space_map operation. Note that the callback
30598be545baSRichard Henderson * gets automatically removed after firing.
30608be545baSRichard Henderson *
30618be545baSRichard Henderson * @as: #AddressSpace to be accessed
30628be545baSRichard Henderson * @bh: callback to invoke when address_space_map() retry is appropriate
30638be545baSRichard Henderson */
30648be545baSRichard Henderson void address_space_register_map_client(AddressSpace *as, QEMUBH *bh);
30658be545baSRichard Henderson
30668be545baSRichard Henderson /*
30678be545baSRichard Henderson * address_space_unregister_map_client: Unregister a callback that has
30688be545baSRichard Henderson * previously been registered and not fired yet.
30698be545baSRichard Henderson *
30708be545baSRichard Henderson * @as: #AddressSpace to be accessed
30718be545baSRichard Henderson * @bh: callback to unregister
30728be545baSRichard Henderson */
30738be545baSRichard Henderson void address_space_unregister_map_client(AddressSpace *as, QEMUBH *bh);
30748be545baSRichard Henderson
30758be545baSRichard Henderson /* Internal functions, part of the implementation of address_space_read. */
30768be545baSRichard Henderson MemTxResult address_space_read_full(AddressSpace *as, hwaddr addr,
30778be545baSRichard Henderson MemTxAttrs attrs, void *buf, hwaddr len);
30788be545baSRichard Henderson MemTxResult flatview_read_continue(FlatView *fv, hwaddr addr,
30798be545baSRichard Henderson MemTxAttrs attrs, void *buf,
30808be545baSRichard Henderson hwaddr len, hwaddr addr1, hwaddr l,
30818be545baSRichard Henderson MemoryRegion *mr);
30828be545baSRichard Henderson void *qemu_map_ram_ptr(RAMBlock *ram_block, ram_addr_t addr);
30838be545baSRichard Henderson
30848be545baSRichard Henderson /* Internal functions, part of the implementation of address_space_read_cached
30858be545baSRichard Henderson * and address_space_write_cached. */
30868be545baSRichard Henderson MemTxResult address_space_read_cached_slow(MemoryRegionCache *cache,
30878be545baSRichard Henderson hwaddr addr, void *buf, hwaddr len);
30888be545baSRichard Henderson MemTxResult address_space_write_cached_slow(MemoryRegionCache *cache,
30898be545baSRichard Henderson hwaddr addr, const void *buf,
30908be545baSRichard Henderson hwaddr len);
30918be545baSRichard Henderson
30928be545baSRichard Henderson int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr);
30938be545baSRichard Henderson bool prepare_mmio_access(MemoryRegion *mr);
30948be545baSRichard Henderson
memory_region_supports_direct_access(MemoryRegion * mr)30958be545baSRichard Henderson static inline bool memory_region_supports_direct_access(MemoryRegion *mr)
30968be545baSRichard Henderson {
30978be545baSRichard Henderson /* ROM DEVICE regions only allow direct access if in ROMD mode. */
30988be545baSRichard Henderson if (memory_region_is_romd(mr)) {
30998be545baSRichard Henderson return true;
31008be545baSRichard Henderson }
31018be545baSRichard Henderson if (!memory_region_is_ram(mr)) {
31028be545baSRichard Henderson return false;
31038be545baSRichard Henderson }
31048be545baSRichard Henderson /*
31058be545baSRichard Henderson * RAM DEVICE regions can be accessed directly using memcpy, but it might
31068be545baSRichard Henderson * be MMIO and access using mempy can be wrong (e.g., using instructions not
31078be545baSRichard Henderson * intended for MMIO access). So we treat this as IO.
31088be545baSRichard Henderson */
31098be545baSRichard Henderson return !memory_region_is_ram_device(mr);
31108be545baSRichard Henderson }
31118be545baSRichard Henderson
memory_access_is_direct(MemoryRegion * mr,bool is_write,MemTxAttrs attrs)31128be545baSRichard Henderson static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write,
31138be545baSRichard Henderson MemTxAttrs attrs)
31148be545baSRichard Henderson {
31158be545baSRichard Henderson if (!memory_region_supports_direct_access(mr)) {
31168be545baSRichard Henderson return false;
31178be545baSRichard Henderson }
31188be545baSRichard Henderson /* Debug access can write to ROM. */
31198be545baSRichard Henderson if (is_write && !attrs.debug) {
31208be545baSRichard Henderson return !mr->readonly && !mr->rom_device;
31218be545baSRichard Henderson }
31228be545baSRichard Henderson return true;
31238be545baSRichard Henderson }
31248be545baSRichard Henderson
31258be545baSRichard Henderson /**
31268be545baSRichard Henderson * address_space_read: read from an address space.
31278be545baSRichard Henderson *
31288be545baSRichard Henderson * Return a MemTxResult indicating whether the operation succeeded
31298be545baSRichard Henderson * or failed (eg unassigned memory, device rejected the transaction,
31308be545baSRichard Henderson * IOMMU fault). Called within RCU critical section.
31318be545baSRichard Henderson *
31328be545baSRichard Henderson * @as: #AddressSpace to be accessed
31338be545baSRichard Henderson * @addr: address within that address space
31348be545baSRichard Henderson * @attrs: memory transaction attributes
31358be545baSRichard Henderson * @buf: buffer with the data transferred
31368be545baSRichard Henderson * @len: length of the data transferred
31378be545baSRichard Henderson */
31388be545baSRichard Henderson static inline __attribute__((__always_inline__))
address_space_read(AddressSpace * as,hwaddr addr,MemTxAttrs attrs,void * buf,hwaddr len)31398be545baSRichard Henderson MemTxResult address_space_read(AddressSpace *as, hwaddr addr,
31408be545baSRichard Henderson MemTxAttrs attrs, void *buf,
31418be545baSRichard Henderson hwaddr len)
31428be545baSRichard Henderson {
31438be545baSRichard Henderson MemTxResult result = MEMTX_OK;
31448be545baSRichard Henderson hwaddr l, addr1;
31458be545baSRichard Henderson void *ptr;
31468be545baSRichard Henderson MemoryRegion *mr;
31478be545baSRichard Henderson FlatView *fv;
31488be545baSRichard Henderson
31498be545baSRichard Henderson if (__builtin_constant_p(len)) {
31508be545baSRichard Henderson if (len) {
31518be545baSRichard Henderson RCU_READ_LOCK_GUARD();
31528be545baSRichard Henderson fv = address_space_to_flatview(as);
31538be545baSRichard Henderson l = len;
31548be545baSRichard Henderson mr = flatview_translate(fv, addr, &addr1, &l, false, attrs);
31558be545baSRichard Henderson if (len == l && memory_access_is_direct(mr, false, attrs)) {
31568be545baSRichard Henderson ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
31578be545baSRichard Henderson memcpy(buf, ptr, len);
31588be545baSRichard Henderson } else {
31598be545baSRichard Henderson result = flatview_read_continue(fv, addr, attrs, buf, len,
31608be545baSRichard Henderson addr1, l, mr);
31618be545baSRichard Henderson }
31628be545baSRichard Henderson }
31638be545baSRichard Henderson } else {
31648be545baSRichard Henderson result = address_space_read_full(as, addr, attrs, buf, len);
31658be545baSRichard Henderson }
31668be545baSRichard Henderson return result;
31678be545baSRichard Henderson }
31688be545baSRichard Henderson
31698be545baSRichard Henderson /**
31708be545baSRichard Henderson * address_space_read_cached: read from a cached RAM region
31718be545baSRichard Henderson *
31728be545baSRichard Henderson * @cache: Cached region to be addressed
31738be545baSRichard Henderson * @addr: address relative to the base of the RAM region
31748be545baSRichard Henderson * @buf: buffer with the data transferred
31758be545baSRichard Henderson * @len: length of the data transferred
31768be545baSRichard Henderson */
31778be545baSRichard Henderson static inline MemTxResult
address_space_read_cached(MemoryRegionCache * cache,hwaddr addr,void * buf,hwaddr len)31788be545baSRichard Henderson address_space_read_cached(MemoryRegionCache *cache, hwaddr addr,
31798be545baSRichard Henderson void *buf, hwaddr len)
31808be545baSRichard Henderson {
31818be545baSRichard Henderson assert(addr < cache->len && len <= cache->len - addr);
31828be545baSRichard Henderson fuzz_dma_read_cb(cache->xlat + addr, len, cache->mrs.mr);
31838be545baSRichard Henderson if (likely(cache->ptr)) {
31848be545baSRichard Henderson memcpy(buf, cache->ptr + addr, len);
31858be545baSRichard Henderson return MEMTX_OK;
31868be545baSRichard Henderson } else {
31878be545baSRichard Henderson return address_space_read_cached_slow(cache, addr, buf, len);
31888be545baSRichard Henderson }
31898be545baSRichard Henderson }
31908be545baSRichard Henderson
31918be545baSRichard Henderson /**
31928be545baSRichard Henderson * address_space_write_cached: write to a cached RAM region
31938be545baSRichard Henderson *
31948be545baSRichard Henderson * @cache: Cached region to be addressed
31958be545baSRichard Henderson * @addr: address relative to the base of the RAM region
31968be545baSRichard Henderson * @buf: buffer with the data transferred
31978be545baSRichard Henderson * @len: length of the data transferred
31988be545baSRichard Henderson */
31998be545baSRichard Henderson static inline MemTxResult
address_space_write_cached(MemoryRegionCache * cache,hwaddr addr,const void * buf,hwaddr len)32008be545baSRichard Henderson address_space_write_cached(MemoryRegionCache *cache, hwaddr addr,
32018be545baSRichard Henderson const void *buf, hwaddr len)
32028be545baSRichard Henderson {
32038be545baSRichard Henderson assert(addr < cache->len && len <= cache->len - addr);
32048be545baSRichard Henderson if (likely(cache->ptr)) {
32058be545baSRichard Henderson memcpy(cache->ptr + addr, buf, len);
32068be545baSRichard Henderson return MEMTX_OK;
32078be545baSRichard Henderson } else {
32088be545baSRichard Henderson return address_space_write_cached_slow(cache, addr, buf, len);
32098be545baSRichard Henderson }
32108be545baSRichard Henderson }
32118be545baSRichard Henderson
32128be545baSRichard Henderson /**
32138be545baSRichard Henderson * address_space_set: Fill address space with a constant byte.
32148be545baSRichard Henderson *
32158be545baSRichard Henderson * Return a MemTxResult indicating whether the operation succeeded
32168be545baSRichard Henderson * or failed (eg unassigned memory, device rejected the transaction,
32178be545baSRichard Henderson * IOMMU fault).
32188be545baSRichard Henderson *
32198be545baSRichard Henderson * @as: #AddressSpace to be accessed
32208be545baSRichard Henderson * @addr: address within that address space
32218be545baSRichard Henderson * @c: constant byte to fill the memory
32228be545baSRichard Henderson * @len: the number of bytes to fill with the constant byte
32238be545baSRichard Henderson * @attrs: memory transaction attributes
32248be545baSRichard Henderson */
32258be545baSRichard Henderson MemTxResult address_space_set(AddressSpace *as, hwaddr addr,
32268be545baSRichard Henderson uint8_t c, hwaddr len, MemTxAttrs attrs);
32278be545baSRichard Henderson
32288be545baSRichard Henderson /*
32298be545baSRichard Henderson * Inhibit technologies that require discarding of pages in RAM blocks, e.g.,
32308be545baSRichard Henderson * to manage the actual amount of memory consumed by the VM (then, the memory
32318be545baSRichard Henderson * provided by RAM blocks might be bigger than the desired memory consumption).
32328be545baSRichard Henderson * This *must* be set if:
32338be545baSRichard Henderson * - Discarding parts of a RAM blocks does not result in the change being
32348be545baSRichard Henderson * reflected in the VM and the pages getting freed.
32358be545baSRichard Henderson * - All memory in RAM blocks is pinned or duplicated, invaldiating any previous
32368be545baSRichard Henderson * discards blindly.
32378be545baSRichard Henderson * - Discarding parts of a RAM blocks will result in integrity issues (e.g.,
32388be545baSRichard Henderson * encrypted VMs).
32398be545baSRichard Henderson * Technologies that only temporarily pin the current working set of a
32408be545baSRichard Henderson * driver are fine, because we don't expect such pages to be discarded
32418be545baSRichard Henderson * (esp. based on guest action like balloon inflation).
32428be545baSRichard Henderson *
32438be545baSRichard Henderson * This is *not* to be used to protect from concurrent discards (esp.,
32448be545baSRichard Henderson * postcopy).
32458be545baSRichard Henderson *
32468be545baSRichard Henderson * Returns 0 if successful. Returns -EBUSY if a technology that relies on
32478be545baSRichard Henderson * discards to work reliably is active.
32488be545baSRichard Henderson */
32498be545baSRichard Henderson int ram_block_discard_disable(bool state);
32508be545baSRichard Henderson
32518be545baSRichard Henderson /*
32528be545baSRichard Henderson * See ram_block_discard_disable(): only disable uncoordinated discards,
32538be545baSRichard Henderson * keeping coordinated discards (via the RamDiscardManager) enabled.
32548be545baSRichard Henderson */
32558be545baSRichard Henderson int ram_block_uncoordinated_discard_disable(bool state);
32568be545baSRichard Henderson
32578be545baSRichard Henderson /*
32588be545baSRichard Henderson * Inhibit technologies that disable discarding of pages in RAM blocks.
32598be545baSRichard Henderson *
32608be545baSRichard Henderson * Returns 0 if successful. Returns -EBUSY if discards are already set to
32618be545baSRichard Henderson * broken.
32628be545baSRichard Henderson */
32638be545baSRichard Henderson int ram_block_discard_require(bool state);
32648be545baSRichard Henderson
32658be545baSRichard Henderson /*
32668be545baSRichard Henderson * See ram_block_discard_require(): only inhibit technologies that disable
32678be545baSRichard Henderson * uncoordinated discarding of pages in RAM blocks, allowing co-existence with
32688be545baSRichard Henderson * technologies that only inhibit uncoordinated discards (via the
32698be545baSRichard Henderson * RamDiscardManager).
32708be545baSRichard Henderson */
32718be545baSRichard Henderson int ram_block_coordinated_discard_require(bool state);
32728be545baSRichard Henderson
32738be545baSRichard Henderson /*
32748be545baSRichard Henderson * Test if any discarding of memory in ram blocks is disabled.
32758be545baSRichard Henderson */
32768be545baSRichard Henderson bool ram_block_discard_is_disabled(void);
32778be545baSRichard Henderson
32788be545baSRichard Henderson /*
32798be545baSRichard Henderson * Test if any discarding of memory in ram blocks is required to work reliably.
32808be545baSRichard Henderson */
32818be545baSRichard Henderson bool ram_block_discard_is_required(void);
32828be545baSRichard Henderson
32838be545baSRichard Henderson void ram_block_add_cpr_blocker(RAMBlock *rb, Error **errp);
32848be545baSRichard Henderson void ram_block_del_cpr_blocker(RAMBlock *rb);
32858be545baSRichard Henderson
32868be545baSRichard Henderson #endif
3287