xref: /openbmc/qemu/include/exec/memory.h (revision 9eb9350c0e519be97716f6b27f664bd0a3c41a36)
1022c62cbSPaolo Bonzini /*
2022c62cbSPaolo Bonzini  * Physical memory management API
3022c62cbSPaolo Bonzini  *
4022c62cbSPaolo Bonzini  * Copyright 2011 Red Hat, Inc. and/or its affiliates
5022c62cbSPaolo Bonzini  *
6022c62cbSPaolo Bonzini  * Authors:
7022c62cbSPaolo Bonzini  *  Avi Kivity <avi@redhat.com>
8022c62cbSPaolo Bonzini  *
9022c62cbSPaolo Bonzini  * This work is licensed under the terms of the GNU GPL, version 2.  See
10022c62cbSPaolo Bonzini  * the COPYING file in the top-level directory.
11022c62cbSPaolo Bonzini  *
12022c62cbSPaolo Bonzini  */
13022c62cbSPaolo Bonzini 
14022c62cbSPaolo Bonzini #ifndef MEMORY_H
15022c62cbSPaolo Bonzini #define MEMORY_H
16022c62cbSPaolo Bonzini 
17022c62cbSPaolo Bonzini #ifndef CONFIG_USER_ONLY
18022c62cbSPaolo Bonzini 
19022c62cbSPaolo Bonzini #include "exec/cpu-common.h"
20022c62cbSPaolo Bonzini #include "exec/hwaddr.h"
21cc05c43aSPeter Maydell #include "exec/memattrs.h"
22e67c9046STony Nguyen #include "exec/memop.h"
230987d735SPaolo Bonzini #include "exec/ramlist.h"
241b53ecd9SMarkus Armbruster #include "qemu/bswap.h"
251de7afc9SPaolo Bonzini #include "qemu/queue.h"
261de7afc9SPaolo Bonzini #include "qemu/int128.h"
27e8f433f8SEric Auger #include "qemu/range.h"
2806866575SDavid Gibson #include "qemu/notify.h"
29b4fefef9SPeter Crosthwaite #include "qom/object.h"
30374f2981SPaolo Bonzini #include "qemu/rcu.h"
31022c62cbSPaolo Bonzini 
3207bdaa41SPaolo Bonzini #define RAM_ADDR_INVALID (~(ram_addr_t)0)
3307bdaa41SPaolo Bonzini 
34052e87b0SPaolo Bonzini #define MAX_PHYS_ADDR_SPACE_BITS 62
35052e87b0SPaolo Bonzini #define MAX_PHYS_ADDR            (((hwaddr)1 << MAX_PHYS_ADDR_SPACE_BITS) - 1)
36052e87b0SPaolo Bonzini 
37bb3c92edSMarkus Armbruster #define TYPE_MEMORY_REGION "memory-region"
388110fa1dSEduardo Habkost DECLARE_INSTANCE_CHECKER(MemoryRegion, MEMORY_REGION,
398110fa1dSEduardo Habkost                          TYPE_MEMORY_REGION)
40b4fefef9SPeter Crosthwaite 
41bb3c92edSMarkus Armbruster #define TYPE_IOMMU_MEMORY_REGION "iommu-memory-region"
42db1015e9SEduardo Habkost typedef struct IOMMUMemoryRegionClass IOMMUMemoryRegionClass;
438110fa1dSEduardo Habkost DECLARE_OBJ_CHECKERS(IOMMUMemoryRegion, IOMMUMemoryRegionClass,
448110fa1dSEduardo Habkost                      IOMMU_MEMORY_REGION, TYPE_IOMMU_MEMORY_REGION)
453df9d748SAlexey Kardashevskiy 
46a36ea38aSThomas Huth #define TYPE_RAM_DISCARD_MANAGER "ram-discard-manager"
478947d7fcSDavid Hildenbrand typedef struct RamDiscardManagerClass RamDiscardManagerClass;
488947d7fcSDavid Hildenbrand typedef struct RamDiscardManager RamDiscardManager;
498947d7fcSDavid Hildenbrand DECLARE_OBJ_CHECKERS(RamDiscardManager, RamDiscardManagerClass,
508947d7fcSDavid Hildenbrand                      RAM_DISCARD_MANAGER, TYPE_RAM_DISCARD_MANAGER);
518947d7fcSDavid Hildenbrand 
5220f5a302SAlexander Bulekov #ifdef CONFIG_FUZZ
5320f5a302SAlexander Bulekov void fuzz_dma_read_cb(size_t addr,
5420f5a302SAlexander Bulekov                       size_t len,
55fc1c8344SAlexander Bulekov                       MemoryRegion *mr);
56e7d3222eSAlexander Bulekov #else
fuzz_dma_read_cb(size_t addr,size_t len,MemoryRegion * mr)57e7d3222eSAlexander Bulekov static inline void fuzz_dma_read_cb(size_t addr,
58e7d3222eSAlexander Bulekov                                     size_t len,
59fc1c8344SAlexander Bulekov                                     MemoryRegion *mr)
60e7d3222eSAlexander Bulekov {
61e7d3222eSAlexander Bulekov     /* Do Nothing */
62e7d3222eSAlexander Bulekov }
6320f5a302SAlexander Bulekov #endif
6420f5a302SAlexander Bulekov 
6563b41db4SHyman Huang(黄勇) /* Possible bits for global_dirty_log_{start|stop} */
6663b41db4SHyman Huang(黄勇) 
6763b41db4SHyman Huang(黄勇) /* Dirty tracking enabled because migration is running */
6863b41db4SHyman Huang(黄勇) #define GLOBAL_DIRTY_MIGRATION  (1U << 0)
6963b41db4SHyman Huang(黄勇) 
7063b41db4SHyman Huang(黄勇) /* Dirty tracking enabled because measuring dirty rate */
7163b41db4SHyman Huang(黄勇) #define GLOBAL_DIRTY_DIRTY_RATE (1U << 1)
7263b41db4SHyman Huang(黄勇) 
73cc2b33eaSHyman Huang(黄勇) /* Dirty tracking enabled because dirty limit */
74cc2b33eaSHyman Huang(黄勇) #define GLOBAL_DIRTY_LIMIT      (1U << 2)
75cc2b33eaSHyman Huang(黄勇) 
76cc2b33eaSHyman Huang(黄勇) #define GLOBAL_DIRTY_MASK  (0x7)
7763b41db4SHyman Huang(黄勇) 
7863b41db4SHyman Huang(黄勇) extern unsigned int global_dirty_tracking;
79ae7a2bcaSPeter Xu 
80022c62cbSPaolo Bonzini typedef struct MemoryRegionOps MemoryRegionOps;
81022c62cbSPaolo Bonzini 
82f7806925SEric Auger struct ReservedRegion {
83e8f433f8SEric Auger     Range range;
84f7806925SEric Auger     unsigned type;
85f7806925SEric Auger };
86f7806925SEric Auger 
878947d7fcSDavid Hildenbrand /**
888947d7fcSDavid Hildenbrand  * struct MemoryRegionSection: describes a fragment of a #MemoryRegion
898947d7fcSDavid Hildenbrand  *
908947d7fcSDavid Hildenbrand  * @mr: the region, or %NULL if empty
918947d7fcSDavid Hildenbrand  * @fv: the flat view of the address space the region is mapped in
928947d7fcSDavid Hildenbrand  * @offset_within_region: the beginning of the section, relative to @mr's start
938947d7fcSDavid Hildenbrand  * @size: the size of the section; will not exceed @mr's boundaries
948947d7fcSDavid Hildenbrand  * @offset_within_address_space: the address of the first byte of the section
958947d7fcSDavid Hildenbrand  *     relative to the region's address space
968947d7fcSDavid Hildenbrand  * @readonly: writes to this section are ignored
978947d7fcSDavid Hildenbrand  * @nonvolatile: this section is non-volatile
98533f5d66SDavid Hildenbrand  * @unmergeable: this section should not get merged with adjacent sections
998947d7fcSDavid Hildenbrand  */
1008947d7fcSDavid Hildenbrand struct MemoryRegionSection {
1018947d7fcSDavid Hildenbrand     Int128 size;
1028947d7fcSDavid Hildenbrand     MemoryRegion *mr;
1038947d7fcSDavid Hildenbrand     FlatView *fv;
1048947d7fcSDavid Hildenbrand     hwaddr offset_within_region;
1058947d7fcSDavid Hildenbrand     hwaddr offset_within_address_space;
1068947d7fcSDavid Hildenbrand     bool readonly;
1078947d7fcSDavid Hildenbrand     bool nonvolatile;
108533f5d66SDavid Hildenbrand     bool unmergeable;
1098947d7fcSDavid Hildenbrand };
1108947d7fcSDavid Hildenbrand 
11130951157SAvi Kivity typedef struct IOMMUTLBEntry IOMMUTLBEntry;
11230951157SAvi Kivity 
11330951157SAvi Kivity /* See address_space_translate: bit 0 is read, bit 1 is write.  */
11430951157SAvi Kivity typedef enum {
11530951157SAvi Kivity     IOMMU_NONE = 0,
11630951157SAvi Kivity     IOMMU_RO   = 1,
11730951157SAvi Kivity     IOMMU_WO   = 2,
11830951157SAvi Kivity     IOMMU_RW   = 3,
11930951157SAvi Kivity } IOMMUAccessFlags;
12030951157SAvi Kivity 
121f06a696dSPeter Xu #define IOMMU_ACCESS_FLAG(r, w) (((r) ? IOMMU_RO : 0) | ((w) ? IOMMU_WO : 0))
122f06a696dSPeter Xu 
12330951157SAvi Kivity struct IOMMUTLBEntry {
12430951157SAvi Kivity     AddressSpace    *target_as;
12530951157SAvi Kivity     hwaddr           iova;
12630951157SAvi Kivity     hwaddr           translated_addr;
12730951157SAvi Kivity     hwaddr           addr_mask;  /* 0xfff = 4k translation */
12830951157SAvi Kivity     IOMMUAccessFlags perm;
12930951157SAvi Kivity };
13030951157SAvi Kivity 
131cdb30812SPeter Xu /*
132cdb30812SPeter Xu  * Bitmap for different IOMMUNotifier capabilities. Each notifier can
133cdb30812SPeter Xu  * register with one or multiple IOMMU Notifier capability bit(s).
1348a7c6060SPeter Xu  *
1358a7c6060SPeter Xu  * Normally there're two use cases for the notifiers:
1368a7c6060SPeter Xu  *
1378a7c6060SPeter Xu  *   (1) When the device needs accurate synchronizations of the vIOMMU page
1388a7c6060SPeter Xu  *       tables, it needs to register with both MAP|UNMAP notifies (which
1398a7c6060SPeter Xu  *       is defined as IOMMU_NOTIFIER_IOTLB_EVENTS below).
1408a7c6060SPeter Xu  *
1418a7c6060SPeter Xu  *       Regarding to accurate synchronization, it's when the notified
1428a7c6060SPeter Xu  *       device maintains a shadow page table and must be notified on each
1438a7c6060SPeter Xu  *       guest MAP (page table entry creation) and UNMAP (invalidation)
1448a7c6060SPeter Xu  *       events (e.g. VFIO). Both notifications must be accurate so that
1458a7c6060SPeter Xu  *       the shadow page table is fully in sync with the guest view.
1468a7c6060SPeter Xu  *
1478a7c6060SPeter Xu  *   (2) When the device doesn't need accurate synchronizations of the
1488a7c6060SPeter Xu  *       vIOMMU page tables, it needs to register only with UNMAP or
1498a7c6060SPeter Xu  *       DEVIOTLB_UNMAP notifies.
1508a7c6060SPeter Xu  *
1518a7c6060SPeter Xu  *       It's when the device maintains a cache of IOMMU translations
1528a7c6060SPeter Xu  *       (IOTLB) and is able to fill that cache by requesting translations
1538a7c6060SPeter Xu  *       from the vIOMMU through a protocol similar to ATS (Address
1548a7c6060SPeter Xu  *       Translation Service).
1558a7c6060SPeter Xu  *
1568a7c6060SPeter Xu  *       Note that in this mode the vIOMMU will not maintain a shadowed
1578a7c6060SPeter Xu  *       page table for the address space, and the UNMAP messages can cover
1588a7c6060SPeter Xu  *       more than the pages that used to get mapped.  The IOMMU notifiee
1598a7c6060SPeter Xu  *       should be able to take care of over-sized invalidations.
160cdb30812SPeter Xu  */
161cdb30812SPeter Xu typedef enum {
162cdb30812SPeter Xu     IOMMU_NOTIFIER_NONE = 0,
163cdb30812SPeter Xu     /* Notify cache invalidations */
164cdb30812SPeter Xu     IOMMU_NOTIFIER_UNMAP = 0x1,
165cdb30812SPeter Xu     /* Notify entry changes (newly created entries) */
166cdb30812SPeter Xu     IOMMU_NOTIFIER_MAP = 0x2,
167b68ba1caSEugenio Pérez     /* Notify changes on device IOTLB entries */
168b68ba1caSEugenio Pérez     IOMMU_NOTIFIER_DEVIOTLB_UNMAP = 0x04,
169cdb30812SPeter Xu } IOMMUNotifierFlag;
170cdb30812SPeter Xu 
171b68ba1caSEugenio Pérez #define IOMMU_NOTIFIER_IOTLB_EVENTS (IOMMU_NOTIFIER_MAP | IOMMU_NOTIFIER_UNMAP)
172b68ba1caSEugenio Pérez #define IOMMU_NOTIFIER_DEVIOTLB_EVENTS IOMMU_NOTIFIER_DEVIOTLB_UNMAP
173b68ba1caSEugenio Pérez #define IOMMU_NOTIFIER_ALL (IOMMU_NOTIFIER_IOTLB_EVENTS | \
174b68ba1caSEugenio Pérez                             IOMMU_NOTIFIER_DEVIOTLB_EVENTS)
175cdb30812SPeter Xu 
176698feb5eSPeter Xu struct IOMMUNotifier;
177698feb5eSPeter Xu typedef void (*IOMMUNotify)(struct IOMMUNotifier *notifier,
178698feb5eSPeter Xu                             IOMMUTLBEntry *data);
179698feb5eSPeter Xu 
180cdb30812SPeter Xu struct IOMMUNotifier {
181698feb5eSPeter Xu     IOMMUNotify notify;
182cdb30812SPeter Xu     IOMMUNotifierFlag notifier_flags;
183698feb5eSPeter Xu     /* Notify for address space range start <= addr <= end */
184698feb5eSPeter Xu     hwaddr start;
185698feb5eSPeter Xu     hwaddr end;
186cb1efcf4SPeter Maydell     int iommu_idx;
187cdb30812SPeter Xu     QLIST_ENTRY(IOMMUNotifier) node;
188cdb30812SPeter Xu };
189cdb30812SPeter Xu typedef struct IOMMUNotifier IOMMUNotifier;
190cdb30812SPeter Xu 
1915039caf3SEugenio Pérez typedef struct IOMMUTLBEvent {
1925039caf3SEugenio Pérez     IOMMUNotifierFlag type;
1935039caf3SEugenio Pérez     IOMMUTLBEntry entry;
1945039caf3SEugenio Pérez } IOMMUTLBEvent;
1955039caf3SEugenio Pérez 
196b0e5de93SJunyan He /* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
197b0e5de93SJunyan He #define RAM_PREALLOC   (1 << 0)
198b0e5de93SJunyan He 
199b0e5de93SJunyan He /* RAM is mmap-ed with MAP_SHARED */
200b0e5de93SJunyan He #define RAM_SHARED     (1 << 1)
201b0e5de93SJunyan He 
202b0e5de93SJunyan He /* Only a portion of RAM (used_length) is actually used, and migrated.
203c7c0e724SDavid Hildenbrand  * Resizing RAM while migrating can result in the migration being canceled.
204b0e5de93SJunyan He  */
205b0e5de93SJunyan He #define RAM_RESIZEABLE (1 << 2)
206b0e5de93SJunyan He 
207b0e5de93SJunyan He /* UFFDIO_ZEROPAGE is available on this RAMBlock to atomically
208b0e5de93SJunyan He  * zero the page and wake waiting processes.
209b0e5de93SJunyan He  * (Set during postcopy)
210b0e5de93SJunyan He  */
211b0e5de93SJunyan He #define RAM_UF_ZEROPAGE (1 << 3)
212b0e5de93SJunyan He 
213b0e5de93SJunyan He /* RAM can be migrated */
214b0e5de93SJunyan He #define RAM_MIGRATABLE (1 << 4)
215b0e5de93SJunyan He 
216a4de8552SJunyan He /* RAM is a persistent kind memory */
217a4de8552SJunyan He #define RAM_PMEM (1 << 5)
218a4de8552SJunyan He 
2190e9b5cd6SAndrey Gruzdev 
220278e2f55SAndrey Gruzdev /*
221278e2f55SAndrey Gruzdev  * UFFDIO_WRITEPROTECT is used on this RAMBlock to
222278e2f55SAndrey Gruzdev  * support 'write-tracking' migration type.
223278e2f55SAndrey Gruzdev  * Implies ram_state->ram_wt_enabled.
224278e2f55SAndrey Gruzdev  */
225278e2f55SAndrey Gruzdev #define RAM_UF_WRITEPROTECT (1 << 6)
226278e2f55SAndrey Gruzdev 
2278dbe22c6SDavid Hildenbrand /*
2288dbe22c6SDavid Hildenbrand  * RAM is mmap-ed with MAP_NORESERVE. When set, reserving swap space (or huge
2298dbe22c6SDavid Hildenbrand  * pages if applicable) is skipped: will bail out if not supported. When not
2308dbe22c6SDavid Hildenbrand  * set, the OS will do the reservation, if supported for the memory type.
2318dbe22c6SDavid Hildenbrand  */
2328dbe22c6SDavid Hildenbrand #define RAM_NORESERVE (1 << 7)
2338dbe22c6SDavid Hildenbrand 
23456918a12SSean Christopherson /* RAM that isn't accessible through normal means. */
23556918a12SSean Christopherson #define RAM_PROTECTED (1 << 8)
23656918a12SSean Christopherson 
237b0182e53SSteve Sistare /* RAM is an mmap-ed named file */
238b0182e53SSteve Sistare #define RAM_NAMED_FILE (1 << 9)
239b0182e53SSteve Sistare 
2405c52a219SDavid Hildenbrand /* RAM is mmap-ed read-only */
2415c52a219SDavid Hildenbrand #define RAM_READONLY (1 << 10)
2425c52a219SDavid Hildenbrand 
2435c52a219SDavid Hildenbrand /* RAM FD is opened read-only */
2445c52a219SDavid Hildenbrand #define RAM_READONLY_FD (1 << 11)
2455c52a219SDavid Hildenbrand 
24615f7a80cSXiaoyao Li /* RAM can be private that has kvm guest memfd backend */
24715f7a80cSXiaoyao Li #define RAM_GUEST_MEMFD   (1 << 12)
24815f7a80cSXiaoyao Li 
iommu_notifier_init(IOMMUNotifier * n,IOMMUNotify fn,IOMMUNotifierFlag flags,hwaddr start,hwaddr end,int iommu_idx)249698feb5eSPeter Xu static inline void iommu_notifier_init(IOMMUNotifier *n, IOMMUNotify fn,
250698feb5eSPeter Xu                                        IOMMUNotifierFlag flags,
251cb1efcf4SPeter Maydell                                        hwaddr start, hwaddr end,
252cb1efcf4SPeter Maydell                                        int iommu_idx)
253698feb5eSPeter Xu {
254698feb5eSPeter Xu     n->notify = fn;
255698feb5eSPeter Xu     n->notifier_flags = flags;
256698feb5eSPeter Xu     n->start = start;
257698feb5eSPeter Xu     n->end = end;
258cb1efcf4SPeter Maydell     n->iommu_idx = iommu_idx;
259698feb5eSPeter Xu }
260698feb5eSPeter Xu 
261022c62cbSPaolo Bonzini /*
262022c62cbSPaolo Bonzini  * Memory region callbacks
263022c62cbSPaolo Bonzini  */
264022c62cbSPaolo Bonzini struct MemoryRegionOps {
265022c62cbSPaolo Bonzini     /* Read from the memory region. @addr is relative to @mr; @size is
266022c62cbSPaolo Bonzini      * in bytes. */
267022c62cbSPaolo Bonzini     uint64_t (*read)(void *opaque,
268022c62cbSPaolo Bonzini                      hwaddr addr,
269022c62cbSPaolo Bonzini                      unsigned size);
270022c62cbSPaolo Bonzini     /* Write to the memory region. @addr is relative to @mr; @size is
271022c62cbSPaolo Bonzini      * in bytes. */
272022c62cbSPaolo Bonzini     void (*write)(void *opaque,
273022c62cbSPaolo Bonzini                   hwaddr addr,
274022c62cbSPaolo Bonzini                   uint64_t data,
275022c62cbSPaolo Bonzini                   unsigned size);
276022c62cbSPaolo Bonzini 
277cc05c43aSPeter Maydell     MemTxResult (*read_with_attrs)(void *opaque,
278cc05c43aSPeter Maydell                                    hwaddr addr,
279cc05c43aSPeter Maydell                                    uint64_t *data,
280cc05c43aSPeter Maydell                                    unsigned size,
281cc05c43aSPeter Maydell                                    MemTxAttrs attrs);
282cc05c43aSPeter Maydell     MemTxResult (*write_with_attrs)(void *opaque,
283cc05c43aSPeter Maydell                                     hwaddr addr,
284cc05c43aSPeter Maydell                                     uint64_t data,
285cc05c43aSPeter Maydell                                     unsigned size,
286cc05c43aSPeter Maydell                                     MemTxAttrs attrs);
287cc05c43aSPeter Maydell 
288022c62cbSPaolo Bonzini     enum device_endian endianness;
289022c62cbSPaolo Bonzini     /* Guest-visible constraints: */
290022c62cbSPaolo Bonzini     struct {
291022c62cbSPaolo Bonzini         /* If nonzero, specify bounds on access sizes beyond which a machine
292022c62cbSPaolo Bonzini          * check is thrown.
293022c62cbSPaolo Bonzini          */
294022c62cbSPaolo Bonzini         unsigned min_access_size;
295022c62cbSPaolo Bonzini         unsigned max_access_size;
296022c62cbSPaolo Bonzini         /* If true, unaligned accesses are supported.  Otherwise unaligned
297022c62cbSPaolo Bonzini          * accesses throw machine checks.
298022c62cbSPaolo Bonzini          */
299022c62cbSPaolo Bonzini          bool unaligned;
300022c62cbSPaolo Bonzini         /*
301022c62cbSPaolo Bonzini          * If present, and returns #false, the transaction is not accepted
302022c62cbSPaolo Bonzini          * by the device (and results in machine dependent behaviour such
303022c62cbSPaolo Bonzini          * as a machine check exception).
304022c62cbSPaolo Bonzini          */
305022c62cbSPaolo Bonzini         bool (*accepts)(void *opaque, hwaddr addr,
3068372d383SPeter Maydell                         unsigned size, bool is_write,
3078372d383SPeter Maydell                         MemTxAttrs attrs);
308022c62cbSPaolo Bonzini     } valid;
309022c62cbSPaolo Bonzini     /* Internal implementation constraints: */
310022c62cbSPaolo Bonzini     struct {
311022c62cbSPaolo Bonzini         /* If nonzero, specifies the minimum size implemented.  Smaller sizes
312022c62cbSPaolo Bonzini          * will be rounded upwards and a partial result will be returned.
313022c62cbSPaolo Bonzini          */
314022c62cbSPaolo Bonzini         unsigned min_access_size;
315022c62cbSPaolo Bonzini         /* If nonzero, specifies the maximum size implemented.  Larger sizes
316022c62cbSPaolo Bonzini          * will be done as a series of accesses with smaller sizes.
317022c62cbSPaolo Bonzini          */
318022c62cbSPaolo Bonzini         unsigned max_access_size;
319022c62cbSPaolo Bonzini         /* If true, unaligned accesses are supported.  Otherwise all accesses
320022c62cbSPaolo Bonzini          * are converted to (possibly multiple) naturally aligned accesses.
321022c62cbSPaolo Bonzini          */
322022c62cbSPaolo Bonzini         bool unaligned;
323022c62cbSPaolo Bonzini     } impl;
324022c62cbSPaolo Bonzini };
325022c62cbSPaolo Bonzini 
3261b53ecd9SMarkus Armbruster typedef struct MemoryRegionClass {
3271b53ecd9SMarkus Armbruster     /* private */
3281b53ecd9SMarkus Armbruster     ObjectClass parent_class;
3291b53ecd9SMarkus Armbruster } MemoryRegionClass;
3301b53ecd9SMarkus Armbruster 
3311b53ecd9SMarkus Armbruster 
332f1334de6SAlexey Kardashevskiy enum IOMMUMemoryRegionAttr {
333f1334de6SAlexey Kardashevskiy     IOMMU_ATTR_SPAPR_TCE_FD
334f1334de6SAlexey Kardashevskiy };
335f1334de6SAlexey Kardashevskiy 
336acbef3ccSEduardo Habkost /*
3372ce931d0SPeter Maydell  * IOMMUMemoryRegionClass:
3382ce931d0SPeter Maydell  *
3392ce931d0SPeter Maydell  * All IOMMU implementations need to subclass TYPE_IOMMU_MEMORY_REGION
3402ce931d0SPeter Maydell  * and provide an implementation of at least the @translate method here
3412ce931d0SPeter Maydell  * to handle requests to the memory region. Other methods are optional.
3422ce931d0SPeter Maydell  *
3432ce931d0SPeter Maydell  * The IOMMU implementation must use the IOMMU notifier infrastructure
3442ce931d0SPeter Maydell  * to report whenever mappings are changed, by calling
3452ce931d0SPeter Maydell  * memory_region_notify_iommu() (or, if necessary, by calling
3463b5ebf85SEugenio Pérez  * memory_region_notify_iommu_one() for each registered notifier).
34721f40209SPeter Maydell  *
34821f40209SPeter Maydell  * Conceptually an IOMMU provides a mapping from input address
34921f40209SPeter Maydell  * to an output TLB entry. If the IOMMU is aware of memory transaction
35021f40209SPeter Maydell  * attributes and the output TLB entry depends on the transaction
35121f40209SPeter Maydell  * attributes, we represent this using IOMMU indexes. Each index
35221f40209SPeter Maydell  * selects a particular translation table that the IOMMU has:
353ffb716f0SEduardo Habkost  *
35421f40209SPeter Maydell  *   @attrs_to_index returns the IOMMU index for a set of transaction attributes
355ffb716f0SEduardo Habkost  *
35621f40209SPeter Maydell  *   @translate takes an input address and an IOMMU index
357ffb716f0SEduardo Habkost  *
35821f40209SPeter Maydell  * and the mapping returned can only depend on the input address and the
35921f40209SPeter Maydell  * IOMMU index.
36021f40209SPeter Maydell  *
36121f40209SPeter Maydell  * Most IOMMUs don't care about the transaction attributes and support
36221f40209SPeter Maydell  * only a single IOMMU index. A more complex IOMMU might have one index
36321f40209SPeter Maydell  * for secure transactions and one for non-secure transactions.
3642ce931d0SPeter Maydell  */
365db1015e9SEduardo Habkost struct IOMMUMemoryRegionClass {
366ffb716f0SEduardo Habkost     /* private: */
3671b53ecd9SMarkus Armbruster     MemoryRegionClass parent_class;
36830951157SAvi Kivity 
369ffb716f0SEduardo Habkost     /* public: */
370ffb716f0SEduardo Habkost     /**
371ffb716f0SEduardo Habkost      * @translate:
372ffb716f0SEduardo Habkost      *
3732ce931d0SPeter Maydell      * Return a TLB entry that contains a given address.
3742ce931d0SPeter Maydell      *
3752ce931d0SPeter Maydell      * The IOMMUAccessFlags indicated via @flag are optional and may
3762ce931d0SPeter Maydell      * be specified as IOMMU_NONE to indicate that the caller needs
3772ce931d0SPeter Maydell      * the full translation information for both reads and writes. If
3782ce931d0SPeter Maydell      * the access flags are specified then the IOMMU implementation
3792ce931d0SPeter Maydell      * may use this as an optimization, to stop doing a page table
3802ce931d0SPeter Maydell      * walk as soon as it knows that the requested permissions are not
3812ce931d0SPeter Maydell      * allowed. If IOMMU_NONE is passed then the IOMMU must do the
3822ce931d0SPeter Maydell      * full page table walk and report the permissions in the returned
3832ce931d0SPeter Maydell      * IOMMUTLBEntry. (Note that this implies that an IOMMU may not
3842ce931d0SPeter Maydell      * return different mappings for reads and writes.)
3852ce931d0SPeter Maydell      *
3862ce931d0SPeter Maydell      * The returned information remains valid while the caller is
3872ce931d0SPeter Maydell      * holding the big QEMU lock or is inside an RCU critical section;
3882ce931d0SPeter Maydell      * if the caller wishes to cache the mapping beyond that it must
3892ce931d0SPeter Maydell      * register an IOMMU notifier so it can invalidate its cached
3902ce931d0SPeter Maydell      * information when the IOMMU mapping changes.
3912ce931d0SPeter Maydell      *
3922ce931d0SPeter Maydell      * @iommu: the IOMMUMemoryRegion
393ffb716f0SEduardo Habkost      *
3942ce931d0SPeter Maydell      * @hwaddr: address to be translated within the memory region
395ffb716f0SEduardo Habkost      *
396ffb716f0SEduardo Habkost      * @flag: requested access permission
397ffb716f0SEduardo Habkost      *
3982c91bcf2SPeter Maydell      * @iommu_idx: IOMMU index for the translation
399bf55b7afSPeter Xu      */
4003df9d748SAlexey Kardashevskiy     IOMMUTLBEntry (*translate)(IOMMUMemoryRegion *iommu, hwaddr addr,
4012c91bcf2SPeter Maydell                                IOMMUAccessFlags flag, int iommu_idx);
402ffb716f0SEduardo Habkost     /**
403ffb716f0SEduardo Habkost      * @get_min_page_size:
404ffb716f0SEduardo Habkost      *
405ffb716f0SEduardo Habkost      * Returns minimum supported page size in bytes.
406ffb716f0SEduardo Habkost      *
4072ce931d0SPeter Maydell      * If this method is not provided then the minimum is assumed to
4082ce931d0SPeter Maydell      * be TARGET_PAGE_SIZE.
4092ce931d0SPeter Maydell      *
4102ce931d0SPeter Maydell      * @iommu: the IOMMUMemoryRegion
4112ce931d0SPeter Maydell      */
4123df9d748SAlexey Kardashevskiy     uint64_t (*get_min_page_size)(IOMMUMemoryRegion *iommu);
413ffb716f0SEduardo Habkost     /**
414ffb716f0SEduardo Habkost      * @notify_flag_changed:
415ffb716f0SEduardo Habkost      *
416ffb716f0SEduardo Habkost      * Called when IOMMU Notifier flag changes (ie when the set of
4172ce931d0SPeter Maydell      * events which IOMMU users are requesting notification for changes).
4182ce931d0SPeter Maydell      * Optional method -- need not be provided if the IOMMU does not
4192ce931d0SPeter Maydell      * need to know exactly which events must be notified.
4202ce931d0SPeter Maydell      *
4212ce931d0SPeter Maydell      * @iommu: the IOMMUMemoryRegion
422ffb716f0SEduardo Habkost      *
4232ce931d0SPeter Maydell      * @old_flags: events which previously needed to be notified
424ffb716f0SEduardo Habkost      *
4252ce931d0SPeter Maydell      * @new_flags: events which now need to be notified
426549d4005SEric Auger      *
427549d4005SEric Auger      * Returns 0 on success, or a negative errno; in particular
428549d4005SEric Auger      * returns -EINVAL if the new flag bitmap is not supported by the
429549d4005SEric Auger      * IOMMU memory region. In case of failure, the error object
430549d4005SEric Auger      * must be created
4312ce931d0SPeter Maydell      */
432549d4005SEric Auger     int (*notify_flag_changed)(IOMMUMemoryRegion *iommu,
4335bf3d319SPeter Xu                                IOMMUNotifierFlag old_flags,
434549d4005SEric Auger                                IOMMUNotifierFlag new_flags,
435549d4005SEric Auger                                Error **errp);
436ffb716f0SEduardo Habkost     /**
437ffb716f0SEduardo Habkost      * @replay:
438ffb716f0SEduardo Habkost      *
439ffb716f0SEduardo Habkost      * Called to handle memory_region_iommu_replay().
4402ce931d0SPeter Maydell      *
4412ce931d0SPeter Maydell      * The default implementation of memory_region_iommu_replay() is to
4422ce931d0SPeter Maydell      * call the IOMMU translate method for every page in the address space
4432ce931d0SPeter Maydell      * with flag == IOMMU_NONE and then call the notifier if translate
4442ce931d0SPeter Maydell      * returns a valid mapping. If this method is implemented then it
4452ce931d0SPeter Maydell      * overrides the default behaviour, and must provide the full semantics
4462ce931d0SPeter Maydell      * of memory_region_iommu_replay(), by calling @notifier for every
4472ce931d0SPeter Maydell      * translation present in the IOMMU.
4482ce931d0SPeter Maydell      *
4492ce931d0SPeter Maydell      * Optional method -- an IOMMU only needs to provide this method
4502ce931d0SPeter Maydell      * if the default is inefficient or produces undesirable side effects.
4512ce931d0SPeter Maydell      *
4522ce931d0SPeter Maydell      * Note: this is not related to record-and-replay functionality.
4532ce931d0SPeter Maydell      */
4543df9d748SAlexey Kardashevskiy     void (*replay)(IOMMUMemoryRegion *iommu, IOMMUNotifier *notifier);
455f1334de6SAlexey Kardashevskiy 
456ffb716f0SEduardo Habkost     /**
457ffb716f0SEduardo Habkost      * @get_attr:
458ffb716f0SEduardo Habkost      *
459ffb716f0SEduardo Habkost      * Get IOMMU misc attributes. This is an optional method that
4602ce931d0SPeter Maydell      * can be used to allow users of the IOMMU to get implementation-specific
4612ce931d0SPeter Maydell      * information. The IOMMU implements this method to handle calls
4622ce931d0SPeter Maydell      * by IOMMU users to memory_region_iommu_get_attr() by filling in
4632ce931d0SPeter Maydell      * the arbitrary data pointer for any IOMMUMemoryRegionAttr values that
4642ce931d0SPeter Maydell      * the IOMMU supports. If the method is unimplemented then
4652ce931d0SPeter Maydell      * memory_region_iommu_get_attr() will always return -EINVAL.
4662ce931d0SPeter Maydell      *
4672ce931d0SPeter Maydell      * @iommu: the IOMMUMemoryRegion
468ffb716f0SEduardo Habkost      *
4692ce931d0SPeter Maydell      * @attr: attribute being queried
470ffb716f0SEduardo Habkost      *
4712ce931d0SPeter Maydell      * @data: memory to fill in with the attribute data
4722ce931d0SPeter Maydell      *
4732ce931d0SPeter Maydell      * Returns 0 on success, or a negative errno; in particular
4742ce931d0SPeter Maydell      * returns -EINVAL for unrecognized or unimplemented attribute types.
4752ce931d0SPeter Maydell      */
4762ce931d0SPeter Maydell     int (*get_attr)(IOMMUMemoryRegion *iommu, enum IOMMUMemoryRegionAttr attr,
477f1334de6SAlexey Kardashevskiy                     void *data);
47821f40209SPeter Maydell 
479ffb716f0SEduardo Habkost     /**
480ffb716f0SEduardo Habkost      * @attrs_to_index:
481ffb716f0SEduardo Habkost      *
482ffb716f0SEduardo Habkost      * Return the IOMMU index to use for a given set of transaction attributes.
48321f40209SPeter Maydell      *
48421f40209SPeter Maydell      * Optional method: if an IOMMU only supports a single IOMMU index then
48521f40209SPeter Maydell      * the default implementation of memory_region_iommu_attrs_to_index()
48621f40209SPeter Maydell      * will return 0.
48721f40209SPeter Maydell      *
48821f40209SPeter Maydell      * The indexes supported by an IOMMU must be contiguous, starting at 0.
48921f40209SPeter Maydell      *
49021f40209SPeter Maydell      * @iommu: the IOMMUMemoryRegion
49121f40209SPeter Maydell      * @attrs: memory transaction attributes
49221f40209SPeter Maydell      */
49321f40209SPeter Maydell     int (*attrs_to_index)(IOMMUMemoryRegion *iommu, MemTxAttrs attrs);
49421f40209SPeter Maydell 
495ffb716f0SEduardo Habkost     /**
496ffb716f0SEduardo Habkost      * @num_indexes:
497ffb716f0SEduardo Habkost      *
498ffb716f0SEduardo Habkost      * Return the number of IOMMU indexes this IOMMU supports.
49921f40209SPeter Maydell      *
50021f40209SPeter Maydell      * Optional method: if this method is not provided, then
50121f40209SPeter Maydell      * memory_region_iommu_num_indexes() will return 1, indicating that
50221f40209SPeter Maydell      * only a single IOMMU index is supported.
50321f40209SPeter Maydell      *
50421f40209SPeter Maydell      * @iommu: the IOMMUMemoryRegion
50521f40209SPeter Maydell      */
50621f40209SPeter Maydell     int (*num_indexes)(IOMMUMemoryRegion *iommu);
507db1015e9SEduardo Habkost };
50830951157SAvi Kivity 
5098947d7fcSDavid Hildenbrand typedef struct RamDiscardListener RamDiscardListener;
5108947d7fcSDavid Hildenbrand typedef int (*NotifyRamPopulate)(RamDiscardListener *rdl,
5118947d7fcSDavid Hildenbrand                                  MemoryRegionSection *section);
5128947d7fcSDavid Hildenbrand typedef void (*NotifyRamDiscard)(RamDiscardListener *rdl,
5138947d7fcSDavid Hildenbrand                                  MemoryRegionSection *section);
5148947d7fcSDavid Hildenbrand 
5158947d7fcSDavid Hildenbrand struct RamDiscardListener {
5168947d7fcSDavid Hildenbrand     /*
5178947d7fcSDavid Hildenbrand      * @notify_populate:
5188947d7fcSDavid Hildenbrand      *
5198947d7fcSDavid Hildenbrand      * Notification that previously discarded memory is about to get populated.
5208947d7fcSDavid Hildenbrand      * Listeners are able to object. If any listener objects, already
5218947d7fcSDavid Hildenbrand      * successfully notified listeners are notified about a discard again.
5228947d7fcSDavid Hildenbrand      *
5238947d7fcSDavid Hildenbrand      * @rdl: the #RamDiscardListener getting notified
5248947d7fcSDavid Hildenbrand      * @section: the #MemoryRegionSection to get populated. The section
5258947d7fcSDavid Hildenbrand      *           is aligned within the memory region to the minimum granularity
5268947d7fcSDavid Hildenbrand      *           unless it would exceed the registered section.
5278947d7fcSDavid Hildenbrand      *
5288947d7fcSDavid Hildenbrand      * Returns 0 on success. If the notification is rejected by the listener,
5298947d7fcSDavid Hildenbrand      * an error is returned.
5308947d7fcSDavid Hildenbrand      */
5318947d7fcSDavid Hildenbrand     NotifyRamPopulate notify_populate;
5328947d7fcSDavid Hildenbrand 
5338947d7fcSDavid Hildenbrand     /*
5348947d7fcSDavid Hildenbrand      * @notify_discard:
5358947d7fcSDavid Hildenbrand      *
5368947d7fcSDavid Hildenbrand      * Notification that previously populated memory was discarded successfully
5378947d7fcSDavid Hildenbrand      * and listeners should drop all references to such memory and prevent
5388947d7fcSDavid Hildenbrand      * new population (e.g., unmap).
5398947d7fcSDavid Hildenbrand      *
5408947d7fcSDavid Hildenbrand      * @rdl: the #RamDiscardListener getting notified
5418947d7fcSDavid Hildenbrand      * @section: the #MemoryRegionSection to get populated. The section
5428947d7fcSDavid Hildenbrand      *           is aligned within the memory region to the minimum granularity
5438947d7fcSDavid Hildenbrand      *           unless it would exceed the registered section.
5448947d7fcSDavid Hildenbrand      */
5458947d7fcSDavid Hildenbrand     NotifyRamDiscard notify_discard;
5468947d7fcSDavid Hildenbrand 
5478947d7fcSDavid Hildenbrand     /*
5488947d7fcSDavid Hildenbrand      * @double_discard_supported:
5498947d7fcSDavid Hildenbrand      *
5508947d7fcSDavid Hildenbrand      * The listener suppors getting @notify_discard notifications that span
5518947d7fcSDavid Hildenbrand      * already discarded parts.
5528947d7fcSDavid Hildenbrand      */
5538947d7fcSDavid Hildenbrand     bool double_discard_supported;
5548947d7fcSDavid Hildenbrand 
5558947d7fcSDavid Hildenbrand     MemoryRegionSection *section;
5568947d7fcSDavid Hildenbrand     QLIST_ENTRY(RamDiscardListener) next;
5578947d7fcSDavid Hildenbrand };
5588947d7fcSDavid Hildenbrand 
ram_discard_listener_init(RamDiscardListener * rdl,NotifyRamPopulate populate_fn,NotifyRamDiscard discard_fn,bool double_discard_supported)5598947d7fcSDavid Hildenbrand static inline void ram_discard_listener_init(RamDiscardListener *rdl,
5608947d7fcSDavid Hildenbrand                                              NotifyRamPopulate populate_fn,
5618947d7fcSDavid Hildenbrand                                              NotifyRamDiscard discard_fn,
5628947d7fcSDavid Hildenbrand                                              bool double_discard_supported)
5638947d7fcSDavid Hildenbrand {
5648947d7fcSDavid Hildenbrand     rdl->notify_populate = populate_fn;
5658947d7fcSDavid Hildenbrand     rdl->notify_discard = discard_fn;
5668947d7fcSDavid Hildenbrand     rdl->double_discard_supported = double_discard_supported;
5678947d7fcSDavid Hildenbrand }
5688947d7fcSDavid Hildenbrand 
5698947d7fcSDavid Hildenbrand typedef int (*ReplayRamPopulate)(MemoryRegionSection *section, void *opaque);
570adaf9d92SDavid Hildenbrand typedef void (*ReplayRamDiscard)(MemoryRegionSection *section, void *opaque);
5718947d7fcSDavid Hildenbrand 
5728947d7fcSDavid Hildenbrand /*
5738947d7fcSDavid Hildenbrand  * RamDiscardManagerClass:
5748947d7fcSDavid Hildenbrand  *
5758947d7fcSDavid Hildenbrand  * A #RamDiscardManager coordinates which parts of specific RAM #MemoryRegion
5768947d7fcSDavid Hildenbrand  * regions are currently populated to be used/accessed by the VM, notifying
5778947d7fcSDavid Hildenbrand  * after parts were discarded (freeing up memory) and before parts will be
5782cb40d44SStefan Weil  * populated (consuming memory), to be used/accessed by the VM.
5798947d7fcSDavid Hildenbrand  *
5808947d7fcSDavid Hildenbrand  * A #RamDiscardManager can only be set for a RAM #MemoryRegion while the
581aa5317efSDavid Hildenbrand  * #MemoryRegion isn't mapped into an address space yet (either directly
582aa5317efSDavid Hildenbrand  * or via an alias); it cannot change while the #MemoryRegion is
583aa5317efSDavid Hildenbrand  * mapped into an address space.
5848947d7fcSDavid Hildenbrand  *
5858947d7fcSDavid Hildenbrand  * The #RamDiscardManager is intended to be used by technologies that are
5868947d7fcSDavid Hildenbrand  * incompatible with discarding of RAM (e.g., VFIO, which may pin all
5878947d7fcSDavid Hildenbrand  * memory inside a #MemoryRegion), and require proper coordination to only
5888947d7fcSDavid Hildenbrand  * map the currently populated parts, to hinder parts that are expected to
5898947d7fcSDavid Hildenbrand  * remain discarded from silently getting populated and consuming memory.
5908947d7fcSDavid Hildenbrand  * Technologies that support discarding of RAM don't have to bother and can
5918947d7fcSDavid Hildenbrand  * simply map the whole #MemoryRegion.
5928947d7fcSDavid Hildenbrand  *
5938947d7fcSDavid Hildenbrand  * An example #RamDiscardManager is virtio-mem, which logically (un)plugs
5948947d7fcSDavid Hildenbrand  * memory within an assigned RAM #MemoryRegion, coordinated with the VM.
5958947d7fcSDavid Hildenbrand  * Logically unplugging memory consists of discarding RAM. The VM agreed to not
5968947d7fcSDavid Hildenbrand  * access unplugged (discarded) memory - especially via DMA. virtio-mem will
5978947d7fcSDavid Hildenbrand  * properly coordinate with listeners before memory is plugged (populated),
5988947d7fcSDavid Hildenbrand  * and after memory is unplugged (discarded).
5998947d7fcSDavid Hildenbrand  *
6008947d7fcSDavid Hildenbrand  * Listeners are called in multiples of the minimum granularity (unless it
6018947d7fcSDavid Hildenbrand  * would exceed the registered range) and changes are aligned to the minimum
6028947d7fcSDavid Hildenbrand  * granularity within the #MemoryRegion. Listeners have to prepare for memory
6032cb40d44SStefan Weil  * becoming discarded in a different granularity than it was populated and the
6048947d7fcSDavid Hildenbrand  * other way around.
6058947d7fcSDavid Hildenbrand  */
6068947d7fcSDavid Hildenbrand struct RamDiscardManagerClass {
6078947d7fcSDavid Hildenbrand     /* private */
6088947d7fcSDavid Hildenbrand     InterfaceClass parent_class;
6098947d7fcSDavid Hildenbrand 
6108947d7fcSDavid Hildenbrand     /* public */
6118947d7fcSDavid Hildenbrand 
6128947d7fcSDavid Hildenbrand     /**
6138947d7fcSDavid Hildenbrand      * @get_min_granularity:
6148947d7fcSDavid Hildenbrand      *
6158947d7fcSDavid Hildenbrand      * Get the minimum granularity in which listeners will get notified
6168947d7fcSDavid Hildenbrand      * about changes within the #MemoryRegion via the #RamDiscardManager.
6178947d7fcSDavid Hildenbrand      *
6188947d7fcSDavid Hildenbrand      * @rdm: the #RamDiscardManager
6198947d7fcSDavid Hildenbrand      * @mr: the #MemoryRegion
6208947d7fcSDavid Hildenbrand      *
6218947d7fcSDavid Hildenbrand      * Returns the minimum granularity.
6228947d7fcSDavid Hildenbrand      */
6238947d7fcSDavid Hildenbrand     uint64_t (*get_min_granularity)(const RamDiscardManager *rdm,
6248947d7fcSDavid Hildenbrand                                     const MemoryRegion *mr);
6258947d7fcSDavid Hildenbrand 
6268947d7fcSDavid Hildenbrand     /**
6278947d7fcSDavid Hildenbrand      * @is_populated:
6288947d7fcSDavid Hildenbrand      *
6298947d7fcSDavid Hildenbrand      * Check whether the given #MemoryRegionSection is completely populated
6308947d7fcSDavid Hildenbrand      * (i.e., no parts are currently discarded) via the #RamDiscardManager.
6318947d7fcSDavid Hildenbrand      * There are no alignment requirements.
6328947d7fcSDavid Hildenbrand      *
6338947d7fcSDavid Hildenbrand      * @rdm: the #RamDiscardManager
6348947d7fcSDavid Hildenbrand      * @section: the #MemoryRegionSection
6358947d7fcSDavid Hildenbrand      *
6368947d7fcSDavid Hildenbrand      * Returns whether the given range is completely populated.
6378947d7fcSDavid Hildenbrand      */
6388947d7fcSDavid Hildenbrand     bool (*is_populated)(const RamDiscardManager *rdm,
6398947d7fcSDavid Hildenbrand                          const MemoryRegionSection *section);
6408947d7fcSDavid Hildenbrand 
6418947d7fcSDavid Hildenbrand     /**
6428947d7fcSDavid Hildenbrand      * @replay_populated:
6438947d7fcSDavid Hildenbrand      *
6448947d7fcSDavid Hildenbrand      * Call the #ReplayRamPopulate callback for all populated parts within the
6458947d7fcSDavid Hildenbrand      * #MemoryRegionSection via the #RamDiscardManager.
6468947d7fcSDavid Hildenbrand      *
6478947d7fcSDavid Hildenbrand      * In case any call fails, no further calls are made.
6488947d7fcSDavid Hildenbrand      *
6498947d7fcSDavid Hildenbrand      * @rdm: the #RamDiscardManager
6508947d7fcSDavid Hildenbrand      * @section: the #MemoryRegionSection
6518947d7fcSDavid Hildenbrand      * @replay_fn: the #ReplayRamPopulate callback
6528947d7fcSDavid Hildenbrand      * @opaque: pointer to forward to the callback
6538947d7fcSDavid Hildenbrand      *
6548947d7fcSDavid Hildenbrand      * Returns 0 on success, or a negative error if any notification failed.
6558947d7fcSDavid Hildenbrand      */
6568947d7fcSDavid Hildenbrand     int (*replay_populated)(const RamDiscardManager *rdm,
6578947d7fcSDavid Hildenbrand                             MemoryRegionSection *section,
6588947d7fcSDavid Hildenbrand                             ReplayRamPopulate replay_fn, void *opaque);
6598947d7fcSDavid Hildenbrand 
6608947d7fcSDavid Hildenbrand     /**
661adaf9d92SDavid Hildenbrand      * @replay_discarded:
662adaf9d92SDavid Hildenbrand      *
663adaf9d92SDavid Hildenbrand      * Call the #ReplayRamDiscard callback for all discarded parts within the
664adaf9d92SDavid Hildenbrand      * #MemoryRegionSection via the #RamDiscardManager.
665adaf9d92SDavid Hildenbrand      *
666adaf9d92SDavid Hildenbrand      * @rdm: the #RamDiscardManager
667adaf9d92SDavid Hildenbrand      * @section: the #MemoryRegionSection
668adaf9d92SDavid Hildenbrand      * @replay_fn: the #ReplayRamDiscard callback
669adaf9d92SDavid Hildenbrand      * @opaque: pointer to forward to the callback
670adaf9d92SDavid Hildenbrand      */
671adaf9d92SDavid Hildenbrand     void (*replay_discarded)(const RamDiscardManager *rdm,
672adaf9d92SDavid Hildenbrand                              MemoryRegionSection *section,
673adaf9d92SDavid Hildenbrand                              ReplayRamDiscard replay_fn, void *opaque);
674adaf9d92SDavid Hildenbrand 
675adaf9d92SDavid Hildenbrand     /**
6768947d7fcSDavid Hildenbrand      * @register_listener:
6778947d7fcSDavid Hildenbrand      *
6788947d7fcSDavid Hildenbrand      * Register a #RamDiscardListener for the given #MemoryRegionSection and
6798947d7fcSDavid Hildenbrand      * immediately notify the #RamDiscardListener about all populated parts
6808947d7fcSDavid Hildenbrand      * within the #MemoryRegionSection via the #RamDiscardManager.
6818947d7fcSDavid Hildenbrand      *
6828947d7fcSDavid Hildenbrand      * In case any notification fails, no further notifications are triggered
6838947d7fcSDavid Hildenbrand      * and an error is logged.
6848947d7fcSDavid Hildenbrand      *
6858947d7fcSDavid Hildenbrand      * @rdm: the #RamDiscardManager
6868947d7fcSDavid Hildenbrand      * @rdl: the #RamDiscardListener
6878947d7fcSDavid Hildenbrand      * @section: the #MemoryRegionSection
6888947d7fcSDavid Hildenbrand      */
6898947d7fcSDavid Hildenbrand     void (*register_listener)(RamDiscardManager *rdm,
6908947d7fcSDavid Hildenbrand                               RamDiscardListener *rdl,
6918947d7fcSDavid Hildenbrand                               MemoryRegionSection *section);
6928947d7fcSDavid Hildenbrand 
6938947d7fcSDavid Hildenbrand     /**
6948947d7fcSDavid Hildenbrand      * @unregister_listener:
6958947d7fcSDavid Hildenbrand      *
6968947d7fcSDavid Hildenbrand      * Unregister a previously registered #RamDiscardListener via the
6978947d7fcSDavid Hildenbrand      * #RamDiscardManager after notifying the #RamDiscardListener about all
6988947d7fcSDavid Hildenbrand      * populated parts becoming unpopulated within the registered
6998947d7fcSDavid Hildenbrand      * #MemoryRegionSection.
7008947d7fcSDavid Hildenbrand      *
7018947d7fcSDavid Hildenbrand      * @rdm: the #RamDiscardManager
7028947d7fcSDavid Hildenbrand      * @rdl: the #RamDiscardListener
7038947d7fcSDavid Hildenbrand      */
7048947d7fcSDavid Hildenbrand     void (*unregister_listener)(RamDiscardManager *rdm,
7058947d7fcSDavid Hildenbrand                                 RamDiscardListener *rdl);
7068947d7fcSDavid Hildenbrand };
7078947d7fcSDavid Hildenbrand 
7088947d7fcSDavid Hildenbrand uint64_t ram_discard_manager_get_min_granularity(const RamDiscardManager *rdm,
7098947d7fcSDavid Hildenbrand                                                  const MemoryRegion *mr);
7108947d7fcSDavid Hildenbrand 
7118947d7fcSDavid Hildenbrand bool ram_discard_manager_is_populated(const RamDiscardManager *rdm,
7128947d7fcSDavid Hildenbrand                                       const MemoryRegionSection *section);
7138947d7fcSDavid Hildenbrand 
7148947d7fcSDavid Hildenbrand int ram_discard_manager_replay_populated(const RamDiscardManager *rdm,
7158947d7fcSDavid Hildenbrand                                          MemoryRegionSection *section,
7168947d7fcSDavid Hildenbrand                                          ReplayRamPopulate replay_fn,
7178947d7fcSDavid Hildenbrand                                          void *opaque);
7188947d7fcSDavid Hildenbrand 
719adaf9d92SDavid Hildenbrand void ram_discard_manager_replay_discarded(const RamDiscardManager *rdm,
720adaf9d92SDavid Hildenbrand                                           MemoryRegionSection *section,
721adaf9d92SDavid Hildenbrand                                           ReplayRamDiscard replay_fn,
722adaf9d92SDavid Hildenbrand                                           void *opaque);
723adaf9d92SDavid Hildenbrand 
7248947d7fcSDavid Hildenbrand void ram_discard_manager_register_listener(RamDiscardManager *rdm,
7258947d7fcSDavid Hildenbrand                                            RamDiscardListener *rdl,
7268947d7fcSDavid Hildenbrand                                            MemoryRegionSection *section);
7278947d7fcSDavid Hildenbrand 
7288947d7fcSDavid Hildenbrand void ram_discard_manager_unregister_listener(RamDiscardManager *rdm,
7298947d7fcSDavid Hildenbrand                                              RamDiscardListener *rdl);
7308947d7fcSDavid Hildenbrand 
731ebb481c0SCédric Le Goater /**
732ebb481c0SCédric Le Goater  * memory_get_xlat_addr: Extract addresses from a TLB entry
733ebb481c0SCédric Le Goater  *
734ebb481c0SCédric Le Goater  * @iotlb: pointer to an #IOMMUTLBEntry
735ebb481c0SCédric Le Goater  * @vaddr: virtual address
736ebb481c0SCédric Le Goater  * @ram_addr: RAM address
737ebb481c0SCédric Le Goater  * @read_only: indicates if writes are allowed
738ebb481c0SCédric Le Goater  * @mr_has_discard_manager: indicates memory is controlled by a
739ebb481c0SCédric Le Goater  *                          RamDiscardManager
740ebb481c0SCédric Le Goater  * @errp: pointer to Error*, to store an error if it happens.
741ebb481c0SCédric Le Goater  *
742ebb481c0SCédric Le Goater  * Return: true on success, else false setting @errp with error.
743ebb481c0SCédric Le Goater  */
744baa44bceSCindy Lu bool memory_get_xlat_addr(IOMMUTLBEntry *iotlb, void **vaddr,
745baa44bceSCindy Lu                           ram_addr_t *ram_addr, bool *read_only,
746ebb481c0SCédric Le Goater                           bool *mr_has_discard_manager, Error **errp);
747baa44bceSCindy Lu 
748022c62cbSPaolo Bonzini typedef struct CoalescedMemoryRange CoalescedMemoryRange;
749022c62cbSPaolo Bonzini typedef struct MemoryRegionIoeventfd MemoryRegionIoeventfd;
750022c62cbSPaolo Bonzini 
75108226b44SPaolo Bonzini /** MemoryRegion:
75208226b44SPaolo Bonzini  *
75308226b44SPaolo Bonzini  * A struct representing a memory region.
75408226b44SPaolo Bonzini  */
755022c62cbSPaolo Bonzini struct MemoryRegion {
756b4fefef9SPeter Crosthwaite     Object parent_obj;
757a676854fSPaolo Bonzini 
75808226b44SPaolo Bonzini     /* private: */
759a676854fSPaolo Bonzini 
760a676854fSPaolo Bonzini     /* The following fields should fit in a cache line */
761a676854fSPaolo Bonzini     bool romd_mode;
762a676854fSPaolo Bonzini     bool ram;
763a676854fSPaolo Bonzini     bool subpage;
764a676854fSPaolo Bonzini     bool readonly; /* For RAM regions */
765c26763f8SMarc-André Lureau     bool nonvolatile;
766a676854fSPaolo Bonzini     bool rom_device;
767a676854fSPaolo Bonzini     bool flush_coalesced_mmio;
768533f5d66SDavid Hildenbrand     bool unmergeable;
769a676854fSPaolo Bonzini     uint8_t dirty_log_mask;
7703df9d748SAlexey Kardashevskiy     bool is_iommu;
77158eaa217SGonglei     RAMBlock *ram_block;
772612263cfSPaolo Bonzini     Object *owner;
773a2e1753bSAlexander Bulekov     /* owner as TYPE_DEVICE. Used for re-entrancy checks in MR access hotpath */
774a2e1753bSAlexander Bulekov     DeviceState *dev;
775a676854fSPaolo Bonzini 
776a676854fSPaolo Bonzini     const MemoryRegionOps *ops;
777022c62cbSPaolo Bonzini     void *opaque;
778feca4ac1SPaolo Bonzini     MemoryRegion *container;
7795ead6218SDavid Hildenbrand     int mapped_via_alias; /* Mapped via an alias, container might be NULL */
780022c62cbSPaolo Bonzini     Int128 size;
781022c62cbSPaolo Bonzini     hwaddr addr;
782022c62cbSPaolo Bonzini     void (*destructor)(MemoryRegion *mr);
783a2b257d6SIgor Mammedov     uint64_t align;
784022c62cbSPaolo Bonzini     bool terminates;
78521e00fa5SAlex Williamson     bool ram_device;
786022c62cbSPaolo Bonzini     bool enabled;
787022c62cbSPaolo Bonzini     bool warning_printed; /* For reservations */
788deb809edSPaolo Bonzini     uint8_t vga_logging_count;
789022c62cbSPaolo Bonzini     MemoryRegion *alias;
790022c62cbSPaolo Bonzini     hwaddr alias_offset;
791d33382daSPeter Crosthwaite     int32_t priority;
792b58deb34SPaolo Bonzini     QTAILQ_HEAD(, MemoryRegion) subregions;
793022c62cbSPaolo Bonzini     QTAILQ_ENTRY(MemoryRegion) subregions_link;
794b58deb34SPaolo Bonzini     QTAILQ_HEAD(, CoalescedMemoryRange) coalesced;
795302fa283SPeter Maydell     const char *name;
796022c62cbSPaolo Bonzini     unsigned ioeventfd_nb;
797022c62cbSPaolo Bonzini     MemoryRegionIoeventfd *ioeventfds;
7988947d7fcSDavid Hildenbrand     RamDiscardManager *rdm; /* Only for RAM */
799a2e1753bSAlexander Bulekov 
800a2e1753bSAlexander Bulekov     /* For devices designed to perform re-entrant IO into their own IO MRs */
801a2e1753bSAlexander Bulekov     bool disable_reentrancy_guard;
8023df9d748SAlexey Kardashevskiy };
8033df9d748SAlexey Kardashevskiy 
8043df9d748SAlexey Kardashevskiy struct IOMMUMemoryRegion {
8053df9d748SAlexey Kardashevskiy     MemoryRegion parent_obj;
8063df9d748SAlexey Kardashevskiy 
807cdb30812SPeter Xu     QLIST_HEAD(, IOMMUNotifier) iommu_notify;
8085bf3d319SPeter Xu     IOMMUNotifierFlag iommu_notify_flags;
809022c62cbSPaolo Bonzini };
810022c62cbSPaolo Bonzini 
811512fa408SPeter Xu #define IOMMU_NOTIFIER_FOREACH(n, mr) \
812512fa408SPeter Xu     QLIST_FOREACH((n), &(mr)->iommu_notify, node)
813512fa408SPeter Xu 
81414a868c6SIsaku Yamahata #define MEMORY_LISTENER_PRIORITY_MIN            0
8155369a36cSIsaku Yamahata #define MEMORY_LISTENER_PRIORITY_ACCEL          10
8168be0461dSIsaku Yamahata #define MEMORY_LISTENER_PRIORITY_DEV_BACKEND    10
8175369a36cSIsaku Yamahata 
818c2fc83e8SPaolo Bonzini /**
819301302f0SEduardo Habkost  * struct MemoryListener: callbacks structure for updates to the physical memory map
820c2fc83e8SPaolo Bonzini  *
821c2fc83e8SPaolo Bonzini  * Allows a component to adjust to changes in the guest-visible memory map.
822c2fc83e8SPaolo Bonzini  * Use with memory_listener_register() and memory_listener_unregister().
823c2fc83e8SPaolo Bonzini  */
824c2fc83e8SPaolo Bonzini struct MemoryListener {
8255d248213SPaolo Bonzini     /**
8265d248213SPaolo Bonzini      * @begin:
8275d248213SPaolo Bonzini      *
8285d248213SPaolo Bonzini      * Called at the beginning of an address space update transaction.
8295d248213SPaolo Bonzini      * Followed by calls to #MemoryListener.region_add(),
8305d248213SPaolo Bonzini      * #MemoryListener.region_del(), #MemoryListener.region_nop(),
8315d248213SPaolo Bonzini      * #MemoryListener.log_start() and #MemoryListener.log_stop() in
8325d248213SPaolo Bonzini      * increasing address order.
8335d248213SPaolo Bonzini      *
8345d248213SPaolo Bonzini      * @listener: The #MemoryListener.
8355d248213SPaolo Bonzini      */
836c2fc83e8SPaolo Bonzini     void (*begin)(MemoryListener *listener);
8375d248213SPaolo Bonzini 
8385d248213SPaolo Bonzini     /**
8395d248213SPaolo Bonzini      * @commit:
8405d248213SPaolo Bonzini      *
8415d248213SPaolo Bonzini      * Called at the end of an address space update transaction,
8425d248213SPaolo Bonzini      * after the last call to #MemoryListener.region_add(),
8435d248213SPaolo Bonzini      * #MemoryListener.region_del() or #MemoryListener.region_nop(),
8445d248213SPaolo Bonzini      * #MemoryListener.log_start() and #MemoryListener.log_stop().
8455d248213SPaolo Bonzini      *
8465d248213SPaolo Bonzini      * @listener: The #MemoryListener.
8475d248213SPaolo Bonzini      */
848c2fc83e8SPaolo Bonzini     void (*commit)(MemoryListener *listener);
8495d248213SPaolo Bonzini 
8505d248213SPaolo Bonzini     /**
8515d248213SPaolo Bonzini      * @region_add:
8525d248213SPaolo Bonzini      *
8535d248213SPaolo Bonzini      * Called during an address space update transaction,
8545d248213SPaolo Bonzini      * for a section of the address space that is new in this address space
8555d248213SPaolo Bonzini      * space since the last transaction.
8565d248213SPaolo Bonzini      *
8575d248213SPaolo Bonzini      * @listener: The #MemoryListener.
8585d248213SPaolo Bonzini      * @section: The new #MemoryRegionSection.
8595d248213SPaolo Bonzini      */
860c2fc83e8SPaolo Bonzini     void (*region_add)(MemoryListener *listener, MemoryRegionSection *section);
8615d248213SPaolo Bonzini 
8625d248213SPaolo Bonzini     /**
8635d248213SPaolo Bonzini      * @region_del:
8645d248213SPaolo Bonzini      *
8655d248213SPaolo Bonzini      * Called during an address space update transaction,
8665d248213SPaolo Bonzini      * for a section of the address space that has disappeared in the address
8675d248213SPaolo Bonzini      * space since the last transaction.
8685d248213SPaolo Bonzini      *
8695d248213SPaolo Bonzini      * @listener: The #MemoryListener.
8705d248213SPaolo Bonzini      * @section: The old #MemoryRegionSection.
8715d248213SPaolo Bonzini      */
872c2fc83e8SPaolo Bonzini     void (*region_del)(MemoryListener *listener, MemoryRegionSection *section);
8735d248213SPaolo Bonzini 
8745d248213SPaolo Bonzini     /**
8755d248213SPaolo Bonzini      * @region_nop:
8765d248213SPaolo Bonzini      *
8775d248213SPaolo Bonzini      * Called during an address space update transaction,
8785d248213SPaolo Bonzini      * for a section of the address space that is in the same place in the address
8795d248213SPaolo Bonzini      * space as in the last transaction.
8805d248213SPaolo Bonzini      *
8815d248213SPaolo Bonzini      * @listener: The #MemoryListener.
8825d248213SPaolo Bonzini      * @section: The #MemoryRegionSection.
8835d248213SPaolo Bonzini      */
884c2fc83e8SPaolo Bonzini     void (*region_nop)(MemoryListener *listener, MemoryRegionSection *section);
8855d248213SPaolo Bonzini 
8865d248213SPaolo Bonzini     /**
8875d248213SPaolo Bonzini      * @log_start:
8885d248213SPaolo Bonzini      *
8895d248213SPaolo Bonzini      * Called during an address space update transaction, after
8905d248213SPaolo Bonzini      * one of #MemoryListener.region_add(), #MemoryListener.region_del() or
8915d248213SPaolo Bonzini      * #MemoryListener.region_nop(), if dirty memory logging clients have
8925d248213SPaolo Bonzini      * become active since the last transaction.
8935d248213SPaolo Bonzini      *
8945d248213SPaolo Bonzini      * @listener: The #MemoryListener.
8955d248213SPaolo Bonzini      * @section: The #MemoryRegionSection.
8965d248213SPaolo Bonzini      * @old: A bitmap of dirty memory logging clients that were active in
8975d248213SPaolo Bonzini      * the previous transaction.
8985d248213SPaolo Bonzini      * @new: A bitmap of dirty memory logging clients that are active in
8995d248213SPaolo Bonzini      * the current transaction.
9005d248213SPaolo Bonzini      */
901b2dfd71cSPaolo Bonzini     void (*log_start)(MemoryListener *listener, MemoryRegionSection *section,
90217c7df80SRoman Kiryanov                       int old_val, int new_val);
9035d248213SPaolo Bonzini 
9045d248213SPaolo Bonzini     /**
9055d248213SPaolo Bonzini      * @log_stop:
9065d248213SPaolo Bonzini      *
9075d248213SPaolo Bonzini      * Called during an address space update transaction, after
9085d248213SPaolo Bonzini      * one of #MemoryListener.region_add(), #MemoryListener.region_del() or
9095d248213SPaolo Bonzini      * #MemoryListener.region_nop() and possibly after
9105d248213SPaolo Bonzini      * #MemoryListener.log_start(), if dirty memory logging clients have
9115d248213SPaolo Bonzini      * become inactive since the last transaction.
9125d248213SPaolo Bonzini      *
9135d248213SPaolo Bonzini      * @listener: The #MemoryListener.
9145d248213SPaolo Bonzini      * @section: The #MemoryRegionSection.
9155d248213SPaolo Bonzini      * @old: A bitmap of dirty memory logging clients that were active in
9165d248213SPaolo Bonzini      * the previous transaction.
9175d248213SPaolo Bonzini      * @new: A bitmap of dirty memory logging clients that are active in
9185d248213SPaolo Bonzini      * the current transaction.
9195d248213SPaolo Bonzini      */
920b2dfd71cSPaolo Bonzini     void (*log_stop)(MemoryListener *listener, MemoryRegionSection *section,
92117c7df80SRoman Kiryanov                      int old_val, int new_val);
9225d248213SPaolo Bonzini 
9235d248213SPaolo Bonzini     /**
9245d248213SPaolo Bonzini      * @log_sync:
9255d248213SPaolo Bonzini      *
9265d248213SPaolo Bonzini      * Called by memory_region_snapshot_and_clear_dirty() and
9275d248213SPaolo Bonzini      * memory_global_dirty_log_sync(), before accessing QEMU's "official"
9285d248213SPaolo Bonzini      * copy of the dirty memory bitmap for a #MemoryRegionSection.
9295d248213SPaolo Bonzini      *
9305d248213SPaolo Bonzini      * @listener: The #MemoryListener.
9315d248213SPaolo Bonzini      * @section: The #MemoryRegionSection.
9325d248213SPaolo Bonzini      */
933c2fc83e8SPaolo Bonzini     void (*log_sync)(MemoryListener *listener, MemoryRegionSection *section);
9345d248213SPaolo Bonzini 
9355d248213SPaolo Bonzini     /**
936b87eaa9bSPeter Xu      * @log_sync_global:
937b87eaa9bSPeter Xu      *
938b87eaa9bSPeter Xu      * This is the global version of @log_sync when the listener does
939b87eaa9bSPeter Xu      * not have a way to synchronize the log with finer granularity.
940b87eaa9bSPeter Xu      * When the listener registers with @log_sync_global defined, then
941b87eaa9bSPeter Xu      * its @log_sync must be NULL.  Vice versa.
942b87eaa9bSPeter Xu      *
943b87eaa9bSPeter Xu      * @listener: The #MemoryListener.
9441e493be5SGavin Shan      * @last_stage: The last stage to synchronize the log during migration.
945313e1629SStefan Weil      * The caller should guarantee that the synchronization with true for
9461e493be5SGavin Shan      * @last_stage is triggered for once after all VCPUs have been stopped.
947b87eaa9bSPeter Xu      */
9481e493be5SGavin Shan     void (*log_sync_global)(MemoryListener *listener, bool last_stage);
949b87eaa9bSPeter Xu 
950b87eaa9bSPeter Xu     /**
9515d248213SPaolo Bonzini      * @log_clear:
9525d248213SPaolo Bonzini      *
9535d248213SPaolo Bonzini      * Called before reading the dirty memory bitmap for a
9545d248213SPaolo Bonzini      * #MemoryRegionSection.
9555d248213SPaolo Bonzini      *
9565d248213SPaolo Bonzini      * @listener: The #MemoryListener.
9575d248213SPaolo Bonzini      * @section: The #MemoryRegionSection.
9585d248213SPaolo Bonzini      */
959077874e0SPeter Xu     void (*log_clear)(MemoryListener *listener, MemoryRegionSection *section);
9605d248213SPaolo Bonzini 
9615d248213SPaolo Bonzini     /**
9625d248213SPaolo Bonzini      * @log_global_start:
9635d248213SPaolo Bonzini      *
9645d248213SPaolo Bonzini      * Called by memory_global_dirty_log_start(), which
9655d248213SPaolo Bonzini      * enables the %DIRTY_LOG_MIGRATION client on all memory regions in
9665d248213SPaolo Bonzini      * the address space.  #MemoryListener.log_global_start() is also
9675d248213SPaolo Bonzini      * called when a #MemoryListener is added, if global dirty logging is
9685d248213SPaolo Bonzini      * active at that time.
9695d248213SPaolo Bonzini      *
9705d248213SPaolo Bonzini      * @listener: The #MemoryListener.
9713688fec8SCédric Le Goater      * @errp: pointer to Error*, to store an error if it happens.
9723688fec8SCédric Le Goater      *
9733688fec8SCédric Le Goater      * Return: true on success, else false setting @errp with error.
9745d248213SPaolo Bonzini      */
9753688fec8SCédric Le Goater     bool (*log_global_start)(MemoryListener *listener, Error **errp);
9765d248213SPaolo Bonzini 
9775d248213SPaolo Bonzini     /**
9785d248213SPaolo Bonzini      * @log_global_stop:
9795d248213SPaolo Bonzini      *
9805d248213SPaolo Bonzini      * Called by memory_global_dirty_log_stop(), which
9815d248213SPaolo Bonzini      * disables the %DIRTY_LOG_MIGRATION client on all memory regions in
9825d248213SPaolo Bonzini      * the address space.
9835d248213SPaolo Bonzini      *
9845d248213SPaolo Bonzini      * @listener: The #MemoryListener.
9855d248213SPaolo Bonzini      */
986c2fc83e8SPaolo Bonzini     void (*log_global_stop)(MemoryListener *listener);
9875d248213SPaolo Bonzini 
9885d248213SPaolo Bonzini     /**
9895d248213SPaolo Bonzini      * @log_global_after_sync:
9905d248213SPaolo Bonzini      *
9915d248213SPaolo Bonzini      * Called after reading the dirty memory bitmap
9925d248213SPaolo Bonzini      * for any #MemoryRegionSection.
9935d248213SPaolo Bonzini      *
9945d248213SPaolo Bonzini      * @listener: The #MemoryListener.
9955d248213SPaolo Bonzini      */
9969458a9a1SPaolo Bonzini     void (*log_global_after_sync)(MemoryListener *listener);
9975d248213SPaolo Bonzini 
9985d248213SPaolo Bonzini     /**
9995d248213SPaolo Bonzini      * @eventfd_add:
10005d248213SPaolo Bonzini      *
10015d248213SPaolo Bonzini      * Called during an address space update transaction,
10025d248213SPaolo Bonzini      * for a section of the address space that has had a new ioeventfd
10035d248213SPaolo Bonzini      * registration since the last transaction.
10045d248213SPaolo Bonzini      *
10055d248213SPaolo Bonzini      * @listener: The #MemoryListener.
10065d248213SPaolo Bonzini      * @section: The new #MemoryRegionSection.
10075d248213SPaolo Bonzini      * @match_data: The @match_data parameter for the new ioeventfd.
10085d248213SPaolo Bonzini      * @data: The @data parameter for the new ioeventfd.
10095d248213SPaolo Bonzini      * @e: The #EventNotifier parameter for the new ioeventfd.
10105d248213SPaolo Bonzini      */
1011c2fc83e8SPaolo Bonzini     void (*eventfd_add)(MemoryListener *listener, MemoryRegionSection *section,
1012c2fc83e8SPaolo Bonzini                         bool match_data, uint64_t data, EventNotifier *e);
10135d248213SPaolo Bonzini 
10145d248213SPaolo Bonzini     /**
10155d248213SPaolo Bonzini      * @eventfd_del:
10165d248213SPaolo Bonzini      *
10175d248213SPaolo Bonzini      * Called during an address space update transaction,
10185d248213SPaolo Bonzini      * for a section of the address space that has dropped an ioeventfd
10195d248213SPaolo Bonzini      * registration since the last transaction.
10205d248213SPaolo Bonzini      *
10215d248213SPaolo Bonzini      * @listener: The #MemoryListener.
10225d248213SPaolo Bonzini      * @section: The new #MemoryRegionSection.
10235d248213SPaolo Bonzini      * @match_data: The @match_data parameter for the dropped ioeventfd.
10245d248213SPaolo Bonzini      * @data: The @data parameter for the dropped ioeventfd.
10255d248213SPaolo Bonzini      * @e: The #EventNotifier parameter for the dropped ioeventfd.
10265d248213SPaolo Bonzini      */
1027c2fc83e8SPaolo Bonzini     void (*eventfd_del)(MemoryListener *listener, MemoryRegionSection *section,
1028c2fc83e8SPaolo Bonzini                         bool match_data, uint64_t data, EventNotifier *e);
10295d248213SPaolo Bonzini 
10305d248213SPaolo Bonzini     /**
10315d248213SPaolo Bonzini      * @coalesced_io_add:
10325d248213SPaolo Bonzini      *
10335d248213SPaolo Bonzini      * Called during an address space update transaction,
10345d248213SPaolo Bonzini      * for a section of the address space that has had a new coalesced
10355d248213SPaolo Bonzini      * MMIO range registration since the last transaction.
10365d248213SPaolo Bonzini      *
10375d248213SPaolo Bonzini      * @listener: The #MemoryListener.
10385d248213SPaolo Bonzini      * @section: The new #MemoryRegionSection.
10395d248213SPaolo Bonzini      * @addr: The starting address for the coalesced MMIO range.
10405d248213SPaolo Bonzini      * @len: The length of the coalesced MMIO range.
10415d248213SPaolo Bonzini      */
1042e6d34aeeSPeng Hao     void (*coalesced_io_add)(MemoryListener *listener, MemoryRegionSection *section,
1043c2fc83e8SPaolo Bonzini                                hwaddr addr, hwaddr len);
10445d248213SPaolo Bonzini 
10455d248213SPaolo Bonzini     /**
10465d248213SPaolo Bonzini      * @coalesced_io_del:
10475d248213SPaolo Bonzini      *
10485d248213SPaolo Bonzini      * Called during an address space update transaction,
10495d248213SPaolo Bonzini      * for a section of the address space that has dropped a coalesced
10505d248213SPaolo Bonzini      * MMIO range since the last transaction.
10515d248213SPaolo Bonzini      *
10525d248213SPaolo Bonzini      * @listener: The #MemoryListener.
10535d248213SPaolo Bonzini      * @section: The new #MemoryRegionSection.
10545d248213SPaolo Bonzini      * @addr: The starting address for the coalesced MMIO range.
10555d248213SPaolo Bonzini      * @len: The length of the coalesced MMIO range.
10565d248213SPaolo Bonzini      */
1057e6d34aeeSPeng Hao     void (*coalesced_io_del)(MemoryListener *listener, MemoryRegionSection *section,
1058c2fc83e8SPaolo Bonzini                                hwaddr addr, hwaddr len);
10595d248213SPaolo Bonzini     /**
10605d248213SPaolo Bonzini      * @priority:
10615d248213SPaolo Bonzini      *
10625d248213SPaolo Bonzini      * Govern the order in which memory listeners are invoked. Lower priorities
10635d248213SPaolo Bonzini      * are invoked earlier for "add" or "start" callbacks, and later for "delete"
10645d248213SPaolo Bonzini      * or "stop" callbacks.
10655d248213SPaolo Bonzini      */
1066c2fc83e8SPaolo Bonzini     unsigned priority;
10675d248213SPaolo Bonzini 
1068142518bdSPeter Xu     /**
1069142518bdSPeter Xu      * @name:
1070142518bdSPeter Xu      *
1071142518bdSPeter Xu      * Name of the listener.  It can be used in contexts where we'd like to
1072142518bdSPeter Xu      * identify one memory listener with the rest.
1073142518bdSPeter Xu      */
1074142518bdSPeter Xu     const char *name;
1075142518bdSPeter Xu 
10765d248213SPaolo Bonzini     /* private: */
1077d45fa784SPaolo Bonzini     AddressSpace *address_space;
1078c2fc83e8SPaolo Bonzini     QTAILQ_ENTRY(MemoryListener) link;
10799a54635dSPaolo Bonzini     QTAILQ_ENTRY(MemoryListener) link_as;
1080c2fc83e8SPaolo Bonzini };
1081c2fc83e8SPaolo Bonzini 
108269e78f1bSMattias Nissler typedef struct AddressSpaceMapClient {
108369e78f1bSMattias Nissler     QEMUBH *bh;
108469e78f1bSMattias Nissler     QLIST_ENTRY(AddressSpaceMapClient) link;
108569e78f1bSMattias Nissler } AddressSpaceMapClient;
108669e78f1bSMattias Nissler 
1087637b0aa1SMattias Nissler #define DEFAULT_MAX_BOUNCE_BUFFER_SIZE (4096)
108869e78f1bSMattias Nissler 
1089022c62cbSPaolo Bonzini /**
1090301302f0SEduardo Habkost  * struct AddressSpace: describes a mapping of addresses to #MemoryRegion objects
1091022c62cbSPaolo Bonzini  */
1092022c62cbSPaolo Bonzini struct AddressSpace {
109308226b44SPaolo Bonzini     /* private: */
1094374f2981SPaolo Bonzini     struct rcu_head rcu;
10957dca8043SAlexey Kardashevskiy     char *name;
1096022c62cbSPaolo Bonzini     MemoryRegion *root;
1097374f2981SPaolo Bonzini 
1098374f2981SPaolo Bonzini     /* Accessed via RCU.  */
1099022c62cbSPaolo Bonzini     struct FlatView *current_map;
1100374f2981SPaolo Bonzini 
1101022c62cbSPaolo Bonzini     int ioeventfd_nb;
1102544cff46Shongmianquan     int ioeventfd_notifiers;
1103022c62cbSPaolo Bonzini     struct MemoryRegionIoeventfd *ioeventfds;
1104eae3eb3eSPaolo Bonzini     QTAILQ_HEAD(, MemoryListener) listeners;
1105022c62cbSPaolo Bonzini     QTAILQ_ENTRY(AddressSpace) address_spaces_link;
110669e78f1bSMattias Nissler 
1107*c3ec57e4SMattias Nissler     /*
1108*c3ec57e4SMattias Nissler      * Maximum DMA bounce buffer size used for indirect memory map requests.
1109*c3ec57e4SMattias Nissler      * This limits the total size of bounce buffer allocations made for
1110*c3ec57e4SMattias Nissler      * DMA requests to indirect memory regions within this AddressSpace. DMA
1111*c3ec57e4SMattias Nissler      * requests that exceed the limit (e.g. due to overly large requested size
1112*c3ec57e4SMattias Nissler      * or concurrent DMA requests having claimed too much buffer space) will be
1113*c3ec57e4SMattias Nissler      * rejected and left to the caller to handle.
1114*c3ec57e4SMattias Nissler      */
1115637b0aa1SMattias Nissler     size_t max_bounce_buffer_size;
1116637b0aa1SMattias Nissler     /* Total size of bounce buffers currently allocated, atomically accessed */
1117637b0aa1SMattias Nissler     size_t bounce_buffer_size;
111869e78f1bSMattias Nissler     /* List of callbacks to invoke when buffers free up */
111969e78f1bSMattias Nissler     QemuMutex map_client_list_lock;
112069e78f1bSMattias Nissler     QLIST_HEAD(, AddressSpaceMapClient) map_client_list;
1121022c62cbSPaolo Bonzini };
1122022c62cbSPaolo Bonzini 
1123785a507eSPaolo Bonzini typedef struct AddressSpaceDispatch AddressSpaceDispatch;
1124785a507eSPaolo Bonzini typedef struct FlatRange FlatRange;
1125785a507eSPaolo Bonzini 
1126785a507eSPaolo Bonzini /* Flattened global view of current active memory hierarchy.  Kept in sorted
1127785a507eSPaolo Bonzini  * order.
1128785a507eSPaolo Bonzini  */
1129785a507eSPaolo Bonzini struct FlatView {
1130785a507eSPaolo Bonzini     struct rcu_head rcu;
1131785a507eSPaolo Bonzini     unsigned ref;
1132785a507eSPaolo Bonzini     FlatRange *ranges;
1133785a507eSPaolo Bonzini     unsigned nr;
1134785a507eSPaolo Bonzini     unsigned nr_allocated;
1135785a507eSPaolo Bonzini     struct AddressSpaceDispatch *dispatch;
1136785a507eSPaolo Bonzini     MemoryRegion *root;
1137785a507eSPaolo Bonzini };
1138785a507eSPaolo Bonzini 
address_space_to_flatview(AddressSpace * as)1139785a507eSPaolo Bonzini static inline FlatView *address_space_to_flatview(AddressSpace *as)
1140785a507eSPaolo Bonzini {
1141d73415a3SStefan Hajnoczi     return qatomic_rcu_read(&as->current_map);
1142785a507eSPaolo Bonzini }
1143785a507eSPaolo Bonzini 
1144a5e32ec1SPeter Maydell /**
1145a5e32ec1SPeter Maydell  * typedef flatview_cb: callback for flatview_for_each_range()
1146a5e32ec1SPeter Maydell  *
1147a5e32ec1SPeter Maydell  * @start: start address of the range within the FlatView
1148a5e32ec1SPeter Maydell  * @len: length of the range in bytes
1149a5e32ec1SPeter Maydell  * @mr: MemoryRegion covering this range
1150b3566001SPeter Maydell  * @offset_in_region: offset of the first byte of the range within @mr
1151a5e32ec1SPeter Maydell  * @opaque: data pointer passed to flatview_for_each_range()
1152a5e32ec1SPeter Maydell  *
1153a5e32ec1SPeter Maydell  * Returns: true to stop the iteration, false to keep going.
1154a5e32ec1SPeter Maydell  */
1155d1e8cf77SPeter Maydell typedef bool (*flatview_cb)(Int128 start,
1156fb5ef4eeSAlexander Bulekov                             Int128 len,
1157a5e32ec1SPeter Maydell                             const MemoryRegion *mr,
1158b3566001SPeter Maydell                             hwaddr offset_in_region,
1159a5e32ec1SPeter Maydell                             void *opaque);
1160fb5ef4eeSAlexander Bulekov 
1161a5e32ec1SPeter Maydell /**
1162a5e32ec1SPeter Maydell  * flatview_for_each_range: Iterate through a FlatView
1163a5e32ec1SPeter Maydell  * @fv: the FlatView to iterate through
1164a5e32ec1SPeter Maydell  * @cb: function to call for each range
1165a5e32ec1SPeter Maydell  * @opaque: opaque data pointer to pass to @cb
1166a5e32ec1SPeter Maydell  *
1167a5e32ec1SPeter Maydell  * A FlatView is made up of a list of non-overlapping ranges, each of
1168a5e32ec1SPeter Maydell  * which is a slice of a MemoryRegion. This function iterates through
1169a5e32ec1SPeter Maydell  * each range in @fv, calling @cb. The callback function can terminate
1170a5e32ec1SPeter Maydell  * iteration early by returning 'true'.
1171a5e32ec1SPeter Maydell  */
1172fb5ef4eeSAlexander Bulekov void flatview_for_each_range(FlatView *fv, flatview_cb cb, void *opaque);
117316620684SAlexey Kardashevskiy 
MemoryRegionSection_eq(MemoryRegionSection * a,MemoryRegionSection * b)11749366cf02SDr. David Alan Gilbert static inline bool MemoryRegionSection_eq(MemoryRegionSection *a,
11759366cf02SDr. David Alan Gilbert                                           MemoryRegionSection *b)
11769366cf02SDr. David Alan Gilbert {
11779366cf02SDr. David Alan Gilbert     return a->mr == b->mr &&
11789366cf02SDr. David Alan Gilbert            a->fv == b->fv &&
11799366cf02SDr. David Alan Gilbert            a->offset_within_region == b->offset_within_region &&
11809366cf02SDr. David Alan Gilbert            a->offset_within_address_space == b->offset_within_address_space &&
11819366cf02SDr. David Alan Gilbert            int128_eq(a->size, b->size) &&
11829366cf02SDr. David Alan Gilbert            a->readonly == b->readonly &&
11839366cf02SDr. David Alan Gilbert            a->nonvolatile == b->nonvolatile;
11849366cf02SDr. David Alan Gilbert }
11859366cf02SDr. David Alan Gilbert 
1186022c62cbSPaolo Bonzini /**
118722843838SDavid Hildenbrand  * memory_region_section_new_copy: Copy a memory region section
118822843838SDavid Hildenbrand  *
118922843838SDavid Hildenbrand  * Allocate memory for a new copy, copy the memory region section, and
119022843838SDavid Hildenbrand  * properly take a reference on all relevant members.
119122843838SDavid Hildenbrand  *
119222843838SDavid Hildenbrand  * @s: the #MemoryRegionSection to copy
119322843838SDavid Hildenbrand  */
119422843838SDavid Hildenbrand MemoryRegionSection *memory_region_section_new_copy(MemoryRegionSection *s);
119522843838SDavid Hildenbrand 
119622843838SDavid Hildenbrand /**
119722843838SDavid Hildenbrand  * memory_region_section_new_copy: Free a copied memory region section
119822843838SDavid Hildenbrand  *
119922843838SDavid Hildenbrand  * Free a copy of a memory section created via memory_region_section_new_copy().
120022843838SDavid Hildenbrand  * properly dropping references on all relevant members.
120122843838SDavid Hildenbrand  *
120222843838SDavid Hildenbrand  * @s: the #MemoryRegionSection to copy
120322843838SDavid Hildenbrand  */
120422843838SDavid Hildenbrand void memory_region_section_free_copy(MemoryRegionSection *s);
120522843838SDavid Hildenbrand 
120622843838SDavid Hildenbrand /**
1207022c62cbSPaolo Bonzini  * memory_region_init: Initialize a memory region
1208022c62cbSPaolo Bonzini  *
1209022c62cbSPaolo Bonzini  * The region typically acts as a container for other memory regions.  Use
1210022c62cbSPaolo Bonzini  * memory_region_add_subregion() to add subregions.
1211022c62cbSPaolo Bonzini  *
1212022c62cbSPaolo Bonzini  * @mr: the #MemoryRegion to be initialized
12132c9b15caSPaolo Bonzini  * @owner: the object that tracks the region's reference count
1214022c62cbSPaolo Bonzini  * @name: used for debugging; not visible to the user or ABI
1215022c62cbSPaolo Bonzini  * @size: size of the region; any subregions beyond this size will be clipped
1216022c62cbSPaolo Bonzini  */
1217022c62cbSPaolo Bonzini void memory_region_init(MemoryRegion *mr,
1218d32335e8SPhilippe Mathieu-Daudé                         Object *owner,
1219022c62cbSPaolo Bonzini                         const char *name,
1220022c62cbSPaolo Bonzini                         uint64_t size);
122146637be2SPaolo Bonzini 
122246637be2SPaolo Bonzini /**
122346637be2SPaolo Bonzini  * memory_region_ref: Add 1 to a memory region's reference count
122446637be2SPaolo Bonzini  *
122546637be2SPaolo Bonzini  * Whenever memory regions are accessed outside the BQL, they need to be
122646637be2SPaolo Bonzini  * preserved against hot-unplug.  MemoryRegions actually do not have their
122746637be2SPaolo Bonzini  * own reference count; they piggyback on a QOM object, their "owner".
122846637be2SPaolo Bonzini  * This function adds a reference to the owner.
122946637be2SPaolo Bonzini  *
123046637be2SPaolo Bonzini  * All MemoryRegions must have an owner if they can disappear, even if the
123146637be2SPaolo Bonzini  * device they belong to operates exclusively under the BQL.  This is because
123246637be2SPaolo Bonzini  * the region could be returned at any time by memory_region_find, and this
123346637be2SPaolo Bonzini  * is usually under guest control.
123446637be2SPaolo Bonzini  *
123546637be2SPaolo Bonzini  * @mr: the #MemoryRegion
123646637be2SPaolo Bonzini  */
123746637be2SPaolo Bonzini void memory_region_ref(MemoryRegion *mr);
123846637be2SPaolo Bonzini 
123946637be2SPaolo Bonzini /**
124046637be2SPaolo Bonzini  * memory_region_unref: Remove 1 to a memory region's reference count
124146637be2SPaolo Bonzini  *
124246637be2SPaolo Bonzini  * Whenever memory regions are accessed outside the BQL, they need to be
124346637be2SPaolo Bonzini  * preserved against hot-unplug.  MemoryRegions actually do not have their
124446637be2SPaolo Bonzini  * own reference count; they piggyback on a QOM object, their "owner".
124546637be2SPaolo Bonzini  * This function removes a reference to the owner and possibly destroys it.
124646637be2SPaolo Bonzini  *
124746637be2SPaolo Bonzini  * @mr: the #MemoryRegion
124846637be2SPaolo Bonzini  */
124946637be2SPaolo Bonzini void memory_region_unref(MemoryRegion *mr);
125046637be2SPaolo Bonzini 
1251022c62cbSPaolo Bonzini /**
1252022c62cbSPaolo Bonzini  * memory_region_init_io: Initialize an I/O memory region.
1253022c62cbSPaolo Bonzini  *
1254022c62cbSPaolo Bonzini  * Accesses into the region will cause the callbacks in @ops to be called.
1255022c62cbSPaolo Bonzini  * if @size is nonzero, subregions will be clipped to @size.
1256022c62cbSPaolo Bonzini  *
1257022c62cbSPaolo Bonzini  * @mr: the #MemoryRegion to be initialized.
12582c9b15caSPaolo Bonzini  * @owner: the object that tracks the region's reference count
1259022c62cbSPaolo Bonzini  * @ops: a structure containing read and write callbacks to be used when
1260022c62cbSPaolo Bonzini  *       I/O is performed on the region.
1261b6af0975SDaniel P. Berrange  * @opaque: passed to the read and write callbacks of the @ops structure.
1262022c62cbSPaolo Bonzini  * @name: used for debugging; not visible to the user or ABI
1263022c62cbSPaolo Bonzini  * @size: size of the region.
1264022c62cbSPaolo Bonzini  */
1265022c62cbSPaolo Bonzini void memory_region_init_io(MemoryRegion *mr,
1266d32335e8SPhilippe Mathieu-Daudé                            Object *owner,
1267022c62cbSPaolo Bonzini                            const MemoryRegionOps *ops,
1268022c62cbSPaolo Bonzini                            void *opaque,
1269022c62cbSPaolo Bonzini                            const char *name,
1270022c62cbSPaolo Bonzini                            uint64_t size);
1271022c62cbSPaolo Bonzini 
1272022c62cbSPaolo Bonzini /**
12731cfe48c1SPeter Maydell  * memory_region_init_ram_nomigrate:  Initialize RAM memory region.  Accesses
12741cfe48c1SPeter Maydell  *                                    into the region will modify memory
12751cfe48c1SPeter Maydell  *                                    directly.
1276022c62cbSPaolo Bonzini  *
1277022c62cbSPaolo Bonzini  * @mr: the #MemoryRegion to be initialized.
12782c9b15caSPaolo Bonzini  * @owner: the object that tracks the region's reference count
1279e8f5fe2dSDr. David Alan Gilbert  * @name: Region name, becomes part of RAMBlock name used in migration stream
1280e8f5fe2dSDr. David Alan Gilbert  *        must be unique within any device
1281022c62cbSPaolo Bonzini  * @size: size of the region.
128249946538SHu Tao  * @errp: pointer to Error*, to store an error if it happens.
1283a5c0234bSPeter Maydell  *
1284a5c0234bSPeter Maydell  * Note that this function does not do anything to cause the data in the
1285a5c0234bSPeter Maydell  * RAM memory region to be migrated; that is the responsibility of the caller.
128662c19b72SPhilippe Mathieu-Daudé  *
128762c19b72SPhilippe Mathieu-Daudé  * Return: true on success, else false setting @errp with error.
1288022c62cbSPaolo Bonzini  */
128962c19b72SPhilippe Mathieu-Daudé bool memory_region_init_ram_nomigrate(MemoryRegion *mr,
1290d32335e8SPhilippe Mathieu-Daudé                                       Object *owner,
1291022c62cbSPaolo Bonzini                                       const char *name,
129249946538SHu Tao                                       uint64_t size,
129349946538SHu Tao                                       Error **errp);
1294022c62cbSPaolo Bonzini 
129560786ef3SMichael S. Tsirkin /**
12967f863cbaSDavid Hildenbrand  * memory_region_init_ram_flags_nomigrate:  Initialize RAM memory region.
129706329cceSMarcel Apfelbaum  *                                          Accesses into the region will
129806329cceSMarcel Apfelbaum  *                                          modify memory directly.
129906329cceSMarcel Apfelbaum  *
130006329cceSMarcel Apfelbaum  * @mr: the #MemoryRegion to be initialized.
130106329cceSMarcel Apfelbaum  * @owner: the object that tracks the region's reference count
130206329cceSMarcel Apfelbaum  * @name: Region name, becomes part of RAMBlock name used in migration stream
130306329cceSMarcel Apfelbaum  *        must be unique within any device
130406329cceSMarcel Apfelbaum  * @size: size of the region.
130515f7a80cSXiaoyao Li  * @ram_flags: RamBlock flags. Supported flags: RAM_SHARED, RAM_NORESERVE,
130615f7a80cSXiaoyao Li  *             RAM_GUEST_MEMFD.
130706329cceSMarcel Apfelbaum  * @errp: pointer to Error*, to store an error if it happens.
130806329cceSMarcel Apfelbaum  *
13097f863cbaSDavid Hildenbrand  * Note that this function does not do anything to cause the data in the
13107f863cbaSDavid Hildenbrand  * RAM memory region to be migrated; that is the responsibility of the caller.
1311cbbc4340SPhilippe Mathieu-Daudé  *
1312cbbc4340SPhilippe Mathieu-Daudé  * Return: true on success, else false setting @errp with error.
131306329cceSMarcel Apfelbaum  */
1314cbbc4340SPhilippe Mathieu-Daudé bool memory_region_init_ram_flags_nomigrate(MemoryRegion *mr,
1315d32335e8SPhilippe Mathieu-Daudé                                             Object *owner,
131606329cceSMarcel Apfelbaum                                             const char *name,
131706329cceSMarcel Apfelbaum                                             uint64_t size,
13187f863cbaSDavid Hildenbrand                                             uint32_t ram_flags,
131906329cceSMarcel Apfelbaum                                             Error **errp);
132006329cceSMarcel Apfelbaum 
132106329cceSMarcel Apfelbaum /**
13222cb40d44SStefan Weil  * memory_region_init_resizeable_ram:  Initialize memory region with resizable
132360786ef3SMichael S. Tsirkin  *                                     RAM.  Accesses into the region will
132460786ef3SMichael S. Tsirkin  *                                     modify memory directly.  Only an initial
132560786ef3SMichael S. Tsirkin  *                                     portion of this RAM is actually used.
1326c7c0e724SDavid Hildenbrand  *                                     Changing the size while migrating
1327c7c0e724SDavid Hildenbrand  *                                     can result in the migration being
1328c7c0e724SDavid Hildenbrand  *                                     canceled.
132960786ef3SMichael S. Tsirkin  *
133060786ef3SMichael S. Tsirkin  * @mr: the #MemoryRegion to be initialized.
133160786ef3SMichael S. Tsirkin  * @owner: the object that tracks the region's reference count
1332e8f5fe2dSDr. David Alan Gilbert  * @name: Region name, becomes part of RAMBlock name used in migration stream
1333e8f5fe2dSDr. David Alan Gilbert  *        must be unique within any device
133460786ef3SMichael S. Tsirkin  * @size: used size of the region.
133560786ef3SMichael S. Tsirkin  * @max_size: max size of the region.
133660786ef3SMichael S. Tsirkin  * @resized: callback to notify owner about used size change.
133760786ef3SMichael S. Tsirkin  * @errp: pointer to Error*, to store an error if it happens.
1338a5c0234bSPeter Maydell  *
1339a5c0234bSPeter Maydell  * Note that this function does not do anything to cause the data in the
1340a5c0234bSPeter Maydell  * RAM memory region to be migrated; that is the responsibility of the caller.
1341f25a9fbbSPhilippe Mathieu-Daudé  *
1342f25a9fbbSPhilippe Mathieu-Daudé  * Return: true on success, else false setting @errp with error.
134360786ef3SMichael S. Tsirkin  */
1344f25a9fbbSPhilippe Mathieu-Daudé bool memory_region_init_resizeable_ram(MemoryRegion *mr,
1345d32335e8SPhilippe Mathieu-Daudé                                        Object *owner,
134660786ef3SMichael S. Tsirkin                                        const char *name,
134760786ef3SMichael S. Tsirkin                                        uint64_t size,
134860786ef3SMichael S. Tsirkin                                        uint64_t max_size,
134960786ef3SMichael S. Tsirkin                                        void (*resized)(const char*,
135060786ef3SMichael S. Tsirkin                                                        uint64_t length,
135160786ef3SMichael S. Tsirkin                                                        void *host),
135260786ef3SMichael S. Tsirkin                                        Error **errp);
1353d5dbde46SHikaru Nishida #ifdef CONFIG_POSIX
1354cbfc0171SJunyan He 
13550b183fc8SPaolo Bonzini /**
13560b183fc8SPaolo Bonzini  * memory_region_init_ram_from_file:  Initialize RAM memory region with a
13570b183fc8SPaolo Bonzini  *                                    mmap-ed backend.
13580b183fc8SPaolo Bonzini  *
13590b183fc8SPaolo Bonzini  * @mr: the #MemoryRegion to be initialized.
13600b183fc8SPaolo Bonzini  * @owner: the object that tracks the region's reference count
1361e8f5fe2dSDr. David Alan Gilbert  * @name: Region name, becomes part of RAMBlock name used in migration stream
1362e8f5fe2dSDr. David Alan Gilbert  *        must be unique within any device
13630b183fc8SPaolo Bonzini  * @size: size of the region.
136498376843SHaozhong Zhang  * @align: alignment of the region base address; if 0, the default alignment
136598376843SHaozhong Zhang  *         (getpagesize()) will be used.
13668dbe22c6SDavid Hildenbrand  * @ram_flags: RamBlock flags. Supported flags: RAM_SHARED, RAM_PMEM,
13675c52a219SDavid Hildenbrand  *             RAM_NORESERVE, RAM_PROTECTED, RAM_NAMED_FILE, RAM_READONLY,
136815f7a80cSXiaoyao Li  *             RAM_READONLY_FD, RAM_GUEST_MEMFD
13690b183fc8SPaolo Bonzini  * @path: the path in which to allocate the RAM.
13704b870dc4SAlexander Graf  * @offset: offset within the file referenced by path
13717f56e740SPaolo Bonzini  * @errp: pointer to Error*, to store an error if it happens.
1372a5c0234bSPeter Maydell  *
1373a5c0234bSPeter Maydell  * Note that this function does not do anything to cause the data in the
1374a5c0234bSPeter Maydell  * RAM memory region to be migrated; that is the responsibility of the caller.
13759b9d11acSPhilippe Mathieu-Daudé  *
13769b9d11acSPhilippe Mathieu-Daudé  * Return: true on success, else false setting @errp with error.
13770b183fc8SPaolo Bonzini  */
13789b9d11acSPhilippe Mathieu-Daudé bool memory_region_init_ram_from_file(MemoryRegion *mr,
1379d32335e8SPhilippe Mathieu-Daudé                                       Object *owner,
13800b183fc8SPaolo Bonzini                                       const char *name,
13810b183fc8SPaolo Bonzini                                       uint64_t size,
138298376843SHaozhong Zhang                                       uint64_t align,
1383cbfc0171SJunyan He                                       uint32_t ram_flags,
13847f56e740SPaolo Bonzini                                       const char *path,
13854b870dc4SAlexander Graf                                       ram_addr_t offset,
13867f56e740SPaolo Bonzini                                       Error **errp);
1387fea617c5SMarc-André Lureau 
1388fea617c5SMarc-André Lureau /**
1389fea617c5SMarc-André Lureau  * memory_region_init_ram_from_fd:  Initialize RAM memory region with a
1390fea617c5SMarc-André Lureau  *                                  mmap-ed backend.
1391fea617c5SMarc-André Lureau  *
1392fea617c5SMarc-André Lureau  * @mr: the #MemoryRegion to be initialized.
1393fea617c5SMarc-André Lureau  * @owner: the object that tracks the region's reference count
1394fea617c5SMarc-André Lureau  * @name: the name of the region.
1395fea617c5SMarc-André Lureau  * @size: size of the region.
13968dbe22c6SDavid Hildenbrand  * @ram_flags: RamBlock flags. Supported flags: RAM_SHARED, RAM_PMEM,
13975c52a219SDavid Hildenbrand  *             RAM_NORESERVE, RAM_PROTECTED, RAM_NAMED_FILE, RAM_READONLY,
139815f7a80cSXiaoyao Li  *             RAM_READONLY_FD, RAM_GUEST_MEMFD
1399fea617c5SMarc-André Lureau  * @fd: the fd to mmap.
140044a4ff31SJagannathan Raman  * @offset: offset within the file referenced by fd
1401fea617c5SMarc-André Lureau  * @errp: pointer to Error*, to store an error if it happens.
1402a5c0234bSPeter Maydell  *
1403a5c0234bSPeter Maydell  * Note that this function does not do anything to cause the data in the
1404a5c0234bSPeter Maydell  * RAM memory region to be migrated; that is the responsibility of the caller.
14059583a905SPhilippe Mathieu-Daudé  *
14069583a905SPhilippe Mathieu-Daudé  * Return: true on success, else false setting @errp with error.
1407fea617c5SMarc-André Lureau  */
14089583a905SPhilippe Mathieu-Daudé bool memory_region_init_ram_from_fd(MemoryRegion *mr,
1409d32335e8SPhilippe Mathieu-Daudé                                     Object *owner,
1410fea617c5SMarc-André Lureau                                     const char *name,
1411fea617c5SMarc-André Lureau                                     uint64_t size,
1412d5015b80SDavid Hildenbrand                                     uint32_t ram_flags,
1413fea617c5SMarc-André Lureau                                     int fd,
141444a4ff31SJagannathan Raman                                     ram_addr_t offset,
1415fea617c5SMarc-André Lureau                                     Error **errp);
14160b183fc8SPaolo Bonzini #endif
14170b183fc8SPaolo Bonzini 
1418022c62cbSPaolo Bonzini /**
1419022c62cbSPaolo Bonzini  * memory_region_init_ram_ptr:  Initialize RAM memory region from a
1420022c62cbSPaolo Bonzini  *                              user-provided pointer.  Accesses into the
1421022c62cbSPaolo Bonzini  *                              region will modify memory directly.
1422022c62cbSPaolo Bonzini  *
1423022c62cbSPaolo Bonzini  * @mr: the #MemoryRegion to be initialized.
14242c9b15caSPaolo Bonzini  * @owner: the object that tracks the region's reference count
1425e8f5fe2dSDr. David Alan Gilbert  * @name: Region name, becomes part of RAMBlock name used in migration stream
1426e8f5fe2dSDr. David Alan Gilbert  *        must be unique within any device
1427022c62cbSPaolo Bonzini  * @size: size of the region.
1428022c62cbSPaolo Bonzini  * @ptr: memory to be mapped; must contain at least @size bytes.
1429a5c0234bSPeter Maydell  *
1430a5c0234bSPeter Maydell  * Note that this function does not do anything to cause the data in the
1431a5c0234bSPeter Maydell  * RAM memory region to be migrated; that is the responsibility of the caller.
1432022c62cbSPaolo Bonzini  */
1433022c62cbSPaolo Bonzini void memory_region_init_ram_ptr(MemoryRegion *mr,
1434d32335e8SPhilippe Mathieu-Daudé                                 Object *owner,
1435022c62cbSPaolo Bonzini                                 const char *name,
1436022c62cbSPaolo Bonzini                                 uint64_t size,
1437022c62cbSPaolo Bonzini                                 void *ptr);
1438022c62cbSPaolo Bonzini 
1439022c62cbSPaolo Bonzini /**
144021e00fa5SAlex Williamson  * memory_region_init_ram_device_ptr:  Initialize RAM device memory region from
144121e00fa5SAlex Williamson  *                                     a user-provided pointer.
144221e00fa5SAlex Williamson  *
144321e00fa5SAlex Williamson  * A RAM device represents a mapping to a physical device, such as to a PCI
144421e00fa5SAlex Williamson  * MMIO BAR of an vfio-pci assigned device.  The memory region may be mapped
144521e00fa5SAlex Williamson  * into the VM address space and access to the region will modify memory
144621e00fa5SAlex Williamson  * directly.  However, the memory region should not be included in a memory
144721e00fa5SAlex Williamson  * dump (device may not be enabled/mapped at the time of the dump), and
144821e00fa5SAlex Williamson  * operations incompatible with manipulating MMIO should be avoided.  Replaces
144921e00fa5SAlex Williamson  * skip_dump flag.
145021e00fa5SAlex Williamson  *
145121e00fa5SAlex Williamson  * @mr: the #MemoryRegion to be initialized.
145221e00fa5SAlex Williamson  * @owner: the object that tracks the region's reference count
145321e00fa5SAlex Williamson  * @name: the name of the region.
145421e00fa5SAlex Williamson  * @size: size of the region.
145521e00fa5SAlex Williamson  * @ptr: memory to be mapped; must contain at least @size bytes.
1456a5c0234bSPeter Maydell  *
1457a5c0234bSPeter Maydell  * Note that this function does not do anything to cause the data in the
1458a5c0234bSPeter Maydell  * RAM memory region to be migrated; that is the responsibility of the caller.
1459a5c0234bSPeter Maydell  * (For RAM device memory regions, migrating the contents rarely makes sense.)
146021e00fa5SAlex Williamson  */
146121e00fa5SAlex Williamson void memory_region_init_ram_device_ptr(MemoryRegion *mr,
1462d32335e8SPhilippe Mathieu-Daudé                                        Object *owner,
146321e00fa5SAlex Williamson                                        const char *name,
146421e00fa5SAlex Williamson                                        uint64_t size,
146521e00fa5SAlex Williamson                                        void *ptr);
146621e00fa5SAlex Williamson 
146721e00fa5SAlex Williamson /**
1468022c62cbSPaolo Bonzini  * memory_region_init_alias: Initialize a memory region that aliases all or a
1469022c62cbSPaolo Bonzini  *                           part of another memory region.
1470022c62cbSPaolo Bonzini  *
1471022c62cbSPaolo Bonzini  * @mr: the #MemoryRegion to be initialized.
14722c9b15caSPaolo Bonzini  * @owner: the object that tracks the region's reference count
1473022c62cbSPaolo Bonzini  * @name: used for debugging; not visible to the user or ABI
1474022c62cbSPaolo Bonzini  * @orig: the region to be referenced; @mr will be equivalent to
1475022c62cbSPaolo Bonzini  *        @orig between @offset and @offset + @size - 1.
1476022c62cbSPaolo Bonzini  * @offset: start of the section in @orig to be referenced.
1477022c62cbSPaolo Bonzini  * @size: size of the region.
1478022c62cbSPaolo Bonzini  */
1479022c62cbSPaolo Bonzini void memory_region_init_alias(MemoryRegion *mr,
1480d32335e8SPhilippe Mathieu-Daudé                               Object *owner,
1481022c62cbSPaolo Bonzini                               const char *name,
1482022c62cbSPaolo Bonzini                               MemoryRegion *orig,
1483022c62cbSPaolo Bonzini                               hwaddr offset,
1484022c62cbSPaolo Bonzini                               uint64_t size);
1485022c62cbSPaolo Bonzini 
1486022c62cbSPaolo Bonzini /**
1487b59821a9SPeter Maydell  * memory_region_init_rom_nomigrate: Initialize a ROM memory region.
1488a1777f7fSPeter Maydell  *
1489b59821a9SPeter Maydell  * This has the same effect as calling memory_region_init_ram_nomigrate()
1490a1777f7fSPeter Maydell  * and then marking the resulting region read-only with
1491a1777f7fSPeter Maydell  * memory_region_set_readonly().
1492a1777f7fSPeter Maydell  *
1493b59821a9SPeter Maydell  * Note that this function does not do anything to cause the data in the
1494b59821a9SPeter Maydell  * RAM side of the memory region to be migrated; that is the responsibility
1495b59821a9SPeter Maydell  * of the caller.
1496b59821a9SPeter Maydell  *
1497a1777f7fSPeter Maydell  * @mr: the #MemoryRegion to be initialized.
1498a1777f7fSPeter Maydell  * @owner: the object that tracks the region's reference count
1499e8f5fe2dSDr. David Alan Gilbert  * @name: Region name, becomes part of RAMBlock name used in migration stream
1500e8f5fe2dSDr. David Alan Gilbert  *        must be unique within any device
1501a1777f7fSPeter Maydell  * @size: size of the region.
1502a1777f7fSPeter Maydell  * @errp: pointer to Error*, to store an error if it happens.
1503197faa70SPhilippe Mathieu-Daudé  *
1504197faa70SPhilippe Mathieu-Daudé  * Return: true on success, else false setting @errp with error.
1505a1777f7fSPeter Maydell  */
1506197faa70SPhilippe Mathieu-Daudé bool memory_region_init_rom_nomigrate(MemoryRegion *mr,
1507d32335e8SPhilippe Mathieu-Daudé                                       Object *owner,
1508a1777f7fSPeter Maydell                                       const char *name,
1509a1777f7fSPeter Maydell                                       uint64_t size,
1510a1777f7fSPeter Maydell                                       Error **errp);
1511a1777f7fSPeter Maydell 
1512a1777f7fSPeter Maydell /**
1513b59821a9SPeter Maydell  * memory_region_init_rom_device_nomigrate:  Initialize a ROM memory region.
1514b59821a9SPeter Maydell  *                                 Writes are handled via callbacks.
1515b59821a9SPeter Maydell  *
1516b59821a9SPeter Maydell  * Note that this function does not do anything to cause the data in the
1517b59821a9SPeter Maydell  * RAM side of the memory region to be migrated; that is the responsibility
1518b59821a9SPeter Maydell  * of the caller.
1519022c62cbSPaolo Bonzini  *
1520022c62cbSPaolo Bonzini  * @mr: the #MemoryRegion to be initialized.
15212c9b15caSPaolo Bonzini  * @owner: the object that tracks the region's reference count
152239e0b03dSPeter Maydell  * @ops: callbacks for write access handling (must not be NULL).
152357914ecbSJay Zhou  * @opaque: passed to the read and write callbacks of the @ops structure.
1524e8f5fe2dSDr. David Alan Gilbert  * @name: Region name, becomes part of RAMBlock name used in migration stream
1525e8f5fe2dSDr. David Alan Gilbert  *        must be unique within any device
1526022c62cbSPaolo Bonzini  * @size: size of the region.
152733e0eb52SHu Tao  * @errp: pointer to Error*, to store an error if it happens.
1528ae076b6cSPhilippe Mathieu-Daudé  *
1529ae076b6cSPhilippe Mathieu-Daudé  * Return: true on success, else false setting @errp with error.
1530022c62cbSPaolo Bonzini  */
1531ae076b6cSPhilippe Mathieu-Daudé bool memory_region_init_rom_device_nomigrate(MemoryRegion *mr,
1532d32335e8SPhilippe Mathieu-Daudé                                              Object *owner,
1533022c62cbSPaolo Bonzini                                              const MemoryRegionOps *ops,
1534022c62cbSPaolo Bonzini                                              void *opaque,
1535022c62cbSPaolo Bonzini                                              const char *name,
153633e0eb52SHu Tao                                              uint64_t size,
153733e0eb52SHu Tao                                              Error **errp);
1538022c62cbSPaolo Bonzini 
1539022c62cbSPaolo Bonzini /**
15401221a474SAlexey Kardashevskiy  * memory_region_init_iommu: Initialize a memory region of a custom type
15411221a474SAlexey Kardashevskiy  * that translates addresses
154230951157SAvi Kivity  *
154330951157SAvi Kivity  * An IOMMU region translates addresses and forwards accesses to a target
154430951157SAvi Kivity  * memory region.
154530951157SAvi Kivity  *
15462ce931d0SPeter Maydell  * The IOMMU implementation must define a subclass of TYPE_IOMMU_MEMORY_REGION.
15472ce931d0SPeter Maydell  * @_iommu_mr should be a pointer to enough memory for an instance of
15482ce931d0SPeter Maydell  * that subclass, @instance_size is the size of that subclass, and
15492ce931d0SPeter Maydell  * @mrtypename is its name. This function will initialize @_iommu_mr as an
15502ce931d0SPeter Maydell  * instance of the subclass, and its methods will then be called to handle
15512ce931d0SPeter Maydell  * accesses to the memory region. See the documentation of
15522ce931d0SPeter Maydell  * #IOMMUMemoryRegionClass for further details.
15532ce931d0SPeter Maydell  *
15541221a474SAlexey Kardashevskiy  * @_iommu_mr: the #IOMMUMemoryRegion to be initialized
15551221a474SAlexey Kardashevskiy  * @instance_size: the IOMMUMemoryRegion subclass instance size
155657914ecbSJay Zhou  * @mrtypename: the type name of the #IOMMUMemoryRegion
15572c9b15caSPaolo Bonzini  * @owner: the object that tracks the region's reference count
155830951157SAvi Kivity  * @name: used for debugging; not visible to the user or ABI
155930951157SAvi Kivity  * @size: size of the region.
156030951157SAvi Kivity  */
15611221a474SAlexey Kardashevskiy void memory_region_init_iommu(void *_iommu_mr,
15621221a474SAlexey Kardashevskiy                               size_t instance_size,
15631221a474SAlexey Kardashevskiy                               const char *mrtypename,
15641221a474SAlexey Kardashevskiy                               Object *owner,
156530951157SAvi Kivity                               const char *name,
156630951157SAvi Kivity                               uint64_t size);
156730951157SAvi Kivity 
1568022c62cbSPaolo Bonzini /**
1569b08199c6SPeter Maydell  * memory_region_init_ram - Initialize RAM memory region.  Accesses into the
1570b08199c6SPeter Maydell  *                          region will modify memory directly.
1571b08199c6SPeter Maydell  *
1572b08199c6SPeter Maydell  * @mr: the #MemoryRegion to be initialized
1573b08199c6SPeter Maydell  * @owner: the object that tracks the region's reference count (must be
1574b08199c6SPeter Maydell  *         TYPE_DEVICE or a subclass of TYPE_DEVICE, or NULL)
1575b08199c6SPeter Maydell  * @name: name of the memory region
1576b08199c6SPeter Maydell  * @size: size of the region in bytes
1577b08199c6SPeter Maydell  * @errp: pointer to Error*, to store an error if it happens.
1578b08199c6SPeter Maydell  *
1579b08199c6SPeter Maydell  * This function allocates RAM for a board model or device, and
1580b08199c6SPeter Maydell  * arranges for it to be migrated (by calling vmstate_register_ram()
1581b08199c6SPeter Maydell  * if @owner is a DeviceState, or vmstate_register_ram_global() if
1582b08199c6SPeter Maydell  * @owner is NULL).
1583b08199c6SPeter Maydell  *
1584b08199c6SPeter Maydell  * TODO: Currently we restrict @owner to being either NULL (for
1585b08199c6SPeter Maydell  * global RAM regions with no owner) or devices, so that we can
1586b08199c6SPeter Maydell  * give the RAM block a unique name for migration purposes.
1587b08199c6SPeter Maydell  * We should lift this restriction and allow arbitrary Objects.
1588b08199c6SPeter Maydell  * If you pass a non-NULL non-device @owner then we will assert.
1589fe5f33d6SPhilippe Mathieu-Daudé  *
1590fe5f33d6SPhilippe Mathieu-Daudé  * Return: true on success, else false setting @errp with error.
1591b08199c6SPeter Maydell  */
1592fe5f33d6SPhilippe Mathieu-Daudé bool memory_region_init_ram(MemoryRegion *mr,
1593d32335e8SPhilippe Mathieu-Daudé                             Object *owner,
1594b08199c6SPeter Maydell                             const char *name,
1595b08199c6SPeter Maydell                             uint64_t size,
1596b08199c6SPeter Maydell                             Error **errp);
1597b08199c6SPeter Maydell 
1598a0aa6db7SXiaoyao Li bool memory_region_init_ram_guest_memfd(MemoryRegion *mr,
1599a0aa6db7SXiaoyao Li                                         Object *owner,
1600a0aa6db7SXiaoyao Li                                         const char *name,
1601a0aa6db7SXiaoyao Li                                         uint64_t size,
1602a0aa6db7SXiaoyao Li                                         Error **errp);
1603a0aa6db7SXiaoyao Li 
1604b08199c6SPeter Maydell /**
1605b08199c6SPeter Maydell  * memory_region_init_rom: Initialize a ROM memory region.
1606b08199c6SPeter Maydell  *
1607b08199c6SPeter Maydell  * This has the same effect as calling memory_region_init_ram()
1608b08199c6SPeter Maydell  * and then marking the resulting region read-only with
1609b08199c6SPeter Maydell  * memory_region_set_readonly(). This includes arranging for the
1610b08199c6SPeter Maydell  * contents to be migrated.
1611b08199c6SPeter Maydell  *
1612b08199c6SPeter Maydell  * TODO: Currently we restrict @owner to being either NULL (for
1613b08199c6SPeter Maydell  * global RAM regions with no owner) or devices, so that we can
1614b08199c6SPeter Maydell  * give the RAM block a unique name for migration purposes.
1615b08199c6SPeter Maydell  * We should lift this restriction and allow arbitrary Objects.
1616b08199c6SPeter Maydell  * If you pass a non-NULL non-device @owner then we will assert.
1617b08199c6SPeter Maydell  *
1618b08199c6SPeter Maydell  * @mr: the #MemoryRegion to be initialized.
1619b08199c6SPeter Maydell  * @owner: the object that tracks the region's reference count
1620b08199c6SPeter Maydell  * @name: Region name, becomes part of RAMBlock name used in migration stream
1621b08199c6SPeter Maydell  *        must be unique within any device
1622b08199c6SPeter Maydell  * @size: size of the region.
1623b08199c6SPeter Maydell  * @errp: pointer to Error*, to store an error if it happens.
1624b9159451SPhilippe Mathieu-Daudé  *
1625b9159451SPhilippe Mathieu-Daudé  * Return: true on success, else false setting @errp with error.
1626b08199c6SPeter Maydell  */
1627b9159451SPhilippe Mathieu-Daudé bool memory_region_init_rom(MemoryRegion *mr,
1628d32335e8SPhilippe Mathieu-Daudé                             Object *owner,
1629b08199c6SPeter Maydell                             const char *name,
1630b08199c6SPeter Maydell                             uint64_t size,
1631b08199c6SPeter Maydell                             Error **errp);
1632b08199c6SPeter Maydell 
1633b08199c6SPeter Maydell /**
1634b08199c6SPeter Maydell  * memory_region_init_rom_device:  Initialize a ROM memory region.
1635b08199c6SPeter Maydell  *                                 Writes are handled via callbacks.
1636b08199c6SPeter Maydell  *
1637b08199c6SPeter Maydell  * This function initializes a memory region backed by RAM for reads
1638b08199c6SPeter Maydell  * and callbacks for writes, and arranges for the RAM backing to
1639b08199c6SPeter Maydell  * be migrated (by calling vmstate_register_ram()
1640b08199c6SPeter Maydell  * if @owner is a DeviceState, or vmstate_register_ram_global() if
1641b08199c6SPeter Maydell  * @owner is NULL).
1642b08199c6SPeter Maydell  *
1643b08199c6SPeter Maydell  * TODO: Currently we restrict @owner to being either NULL (for
1644b08199c6SPeter Maydell  * global RAM regions with no owner) or devices, so that we can
1645b08199c6SPeter Maydell  * give the RAM block a unique name for migration purposes.
1646b08199c6SPeter Maydell  * We should lift this restriction and allow arbitrary Objects.
1647b08199c6SPeter Maydell  * If you pass a non-NULL non-device @owner then we will assert.
1648b08199c6SPeter Maydell  *
1649b08199c6SPeter Maydell  * @mr: the #MemoryRegion to be initialized.
1650b08199c6SPeter Maydell  * @owner: the object that tracks the region's reference count
1651b08199c6SPeter Maydell  * @ops: callbacks for write access handling (must not be NULL).
16525d248213SPaolo Bonzini  * @opaque: passed to the read and write callbacks of the @ops structure.
1653b08199c6SPeter Maydell  * @name: Region name, becomes part of RAMBlock name used in migration stream
1654b08199c6SPeter Maydell  *        must be unique within any device
1655b08199c6SPeter Maydell  * @size: size of the region.
1656b08199c6SPeter Maydell  * @errp: pointer to Error*, to store an error if it happens.
165762f5c1b2SPhilippe Mathieu-Daudé  *
165862f5c1b2SPhilippe Mathieu-Daudé  * Return: true on success, else false setting @errp with error.
1659b08199c6SPeter Maydell  */
166062f5c1b2SPhilippe Mathieu-Daudé bool memory_region_init_rom_device(MemoryRegion *mr,
1661d32335e8SPhilippe Mathieu-Daudé                                    Object *owner,
1662b08199c6SPeter Maydell                                    const MemoryRegionOps *ops,
1663b08199c6SPeter Maydell                                    void *opaque,
1664b08199c6SPeter Maydell                                    const char *name,
1665b08199c6SPeter Maydell                                    uint64_t size,
1666b08199c6SPeter Maydell                                    Error **errp);
1667b08199c6SPeter Maydell 
1668b08199c6SPeter Maydell 
1669b08199c6SPeter Maydell /**
1670803c0816SPaolo Bonzini  * memory_region_owner: get a memory region's owner.
1671803c0816SPaolo Bonzini  *
1672803c0816SPaolo Bonzini  * @mr: the memory region being queried.
1673803c0816SPaolo Bonzini  */
1674d32335e8SPhilippe Mathieu-Daudé Object *memory_region_owner(MemoryRegion *mr);
1675803c0816SPaolo Bonzini 
1676803c0816SPaolo Bonzini /**
1677022c62cbSPaolo Bonzini  * memory_region_size: get a memory region's size.
1678022c62cbSPaolo Bonzini  *
1679022c62cbSPaolo Bonzini  * @mr: the memory region being queried.
1680022c62cbSPaolo Bonzini  */
1681022c62cbSPaolo Bonzini uint64_t memory_region_size(MemoryRegion *mr);
1682022c62cbSPaolo Bonzini 
1683022c62cbSPaolo Bonzini /**
1684022c62cbSPaolo Bonzini  * memory_region_is_ram: check whether a memory region is random access
1685022c62cbSPaolo Bonzini  *
1686847b31f0SLi Qiang  * Returns %true if a memory region is random access.
1687022c62cbSPaolo Bonzini  *
1688022c62cbSPaolo Bonzini  * @mr: the memory region being queried
1689022c62cbSPaolo Bonzini  */
memory_region_is_ram(MemoryRegion * mr)16901619d1feSPaolo Bonzini static inline bool memory_region_is_ram(MemoryRegion *mr)
16911619d1feSPaolo Bonzini {
16921619d1feSPaolo Bonzini     return mr->ram;
16931619d1feSPaolo Bonzini }
1694022c62cbSPaolo Bonzini 
1695022c62cbSPaolo Bonzini /**
169621e00fa5SAlex Williamson  * memory_region_is_ram_device: check whether a memory region is a ram device
1697e4dc3f59SNikunj A Dadhania  *
1698847b31f0SLi Qiang  * Returns %true if a memory region is a device backed ram region
1699e4dc3f59SNikunj A Dadhania  *
1700e4dc3f59SNikunj A Dadhania  * @mr: the memory region being queried
1701e4dc3f59SNikunj A Dadhania  */
170221e00fa5SAlex Williamson bool memory_region_is_ram_device(MemoryRegion *mr);
1703e4dc3f59SNikunj A Dadhania 
1704e4dc3f59SNikunj A Dadhania /**
17055f9a5ea1SJan Kiszka  * memory_region_is_romd: check whether a memory region is in ROMD mode
1706022c62cbSPaolo Bonzini  *
17075f9a5ea1SJan Kiszka  * Returns %true if a memory region is a ROM device and currently set to allow
1708022c62cbSPaolo Bonzini  * direct reads.
1709022c62cbSPaolo Bonzini  *
1710022c62cbSPaolo Bonzini  * @mr: the memory region being queried
1711022c62cbSPaolo Bonzini  */
memory_region_is_romd(MemoryRegion * mr)1712022c62cbSPaolo Bonzini static inline bool memory_region_is_romd(MemoryRegion *mr)
1713022c62cbSPaolo Bonzini {
17145f9a5ea1SJan Kiszka     return mr->rom_device && mr->romd_mode;
1715022c62cbSPaolo Bonzini }
1716022c62cbSPaolo Bonzini 
1717022c62cbSPaolo Bonzini /**
171856918a12SSean Christopherson  * memory_region_is_protected: check whether a memory region is protected
171956918a12SSean Christopherson  *
172056918a12SSean Christopherson  * Returns %true if a memory region is protected RAM and cannot be accessed
172156918a12SSean Christopherson  * via standard mechanisms, e.g. DMA.
172256918a12SSean Christopherson  *
172356918a12SSean Christopherson  * @mr: the memory region being queried
172456918a12SSean Christopherson  */
172556918a12SSean Christopherson bool memory_region_is_protected(MemoryRegion *mr);
172656918a12SSean Christopherson 
172756918a12SSean Christopherson /**
172815f7a80cSXiaoyao Li  * memory_region_has_guest_memfd: check whether a memory region has guest_memfd
172915f7a80cSXiaoyao Li  *     associated
173015f7a80cSXiaoyao Li  *
173115f7a80cSXiaoyao Li  * Returns %true if a memory region's ram_block has valid guest_memfd assigned.
173215f7a80cSXiaoyao Li  *
173315f7a80cSXiaoyao Li  * @mr: the memory region being queried
173415f7a80cSXiaoyao Li  */
173515f7a80cSXiaoyao Li bool memory_region_has_guest_memfd(MemoryRegion *mr);
173615f7a80cSXiaoyao Li 
173715f7a80cSXiaoyao Li /**
17383df9d748SAlexey Kardashevskiy  * memory_region_get_iommu: check whether a memory region is an iommu
173930951157SAvi Kivity  *
17403df9d748SAlexey Kardashevskiy  * Returns pointer to IOMMUMemoryRegion if a memory region is an iommu,
17413df9d748SAlexey Kardashevskiy  * otherwise NULL.
174230951157SAvi Kivity  *
174330951157SAvi Kivity  * @mr: the memory region being queried
174430951157SAvi Kivity  */
memory_region_get_iommu(MemoryRegion * mr)17453df9d748SAlexey Kardashevskiy static inline IOMMUMemoryRegion *memory_region_get_iommu(MemoryRegion *mr)
17461619d1feSPaolo Bonzini {
174712d37882SJason Wang     if (mr->alias) {
17483df9d748SAlexey Kardashevskiy         return memory_region_get_iommu(mr->alias);
174912d37882SJason Wang     }
17503df9d748SAlexey Kardashevskiy     if (mr->is_iommu) {
17513df9d748SAlexey Kardashevskiy         return (IOMMUMemoryRegion *) mr;
17523df9d748SAlexey Kardashevskiy     }
17533df9d748SAlexey Kardashevskiy     return NULL;
17541619d1feSPaolo Bonzini }
17551619d1feSPaolo Bonzini 
17561221a474SAlexey Kardashevskiy /**
17571221a474SAlexey Kardashevskiy  * memory_region_get_iommu_class_nocheck: returns iommu memory region class
17581221a474SAlexey Kardashevskiy  *   if an iommu or NULL if not
17591221a474SAlexey Kardashevskiy  *
176057914ecbSJay Zhou  * Returns pointer to IOMMUMemoryRegionClass if a memory region is an iommu,
176157914ecbSJay Zhou  * otherwise NULL. This is fast path avoiding QOM checking, use with caution.
17621221a474SAlexey Kardashevskiy  *
17635d248213SPaolo Bonzini  * @iommu_mr: the memory region being queried
17641221a474SAlexey Kardashevskiy  */
memory_region_get_iommu_class_nocheck(IOMMUMemoryRegion * iommu_mr)17651221a474SAlexey Kardashevskiy static inline IOMMUMemoryRegionClass *memory_region_get_iommu_class_nocheck(
17661221a474SAlexey Kardashevskiy         IOMMUMemoryRegion *iommu_mr)
17671221a474SAlexey Kardashevskiy {
17681221a474SAlexey Kardashevskiy     return (IOMMUMemoryRegionClass *) (((Object *)iommu_mr)->class);
17691221a474SAlexey Kardashevskiy }
17701221a474SAlexey Kardashevskiy 
17713df9d748SAlexey Kardashevskiy #define memory_region_is_iommu(mr) (memory_region_get_iommu(mr) != NULL)
177230951157SAvi Kivity 
177330951157SAvi Kivity /**
1774f682e9c2SAlexey Kardashevskiy  * memory_region_iommu_get_min_page_size: get minimum supported page size
1775f682e9c2SAlexey Kardashevskiy  * for an iommu
1776f682e9c2SAlexey Kardashevskiy  *
1777f682e9c2SAlexey Kardashevskiy  * Returns minimum supported page size for an iommu.
1778f682e9c2SAlexey Kardashevskiy  *
17793df9d748SAlexey Kardashevskiy  * @iommu_mr: the memory region being queried
1780f682e9c2SAlexey Kardashevskiy  */
17813df9d748SAlexey Kardashevskiy uint64_t memory_region_iommu_get_min_page_size(IOMMUMemoryRegion *iommu_mr);
1782f682e9c2SAlexey Kardashevskiy 
1783f682e9c2SAlexey Kardashevskiy /**
178406866575SDavid Gibson  * memory_region_notify_iommu: notify a change in an IOMMU translation entry.
178506866575SDavid Gibson  *
1786cdb30812SPeter Xu  * Note: for any IOMMU implementation, an in-place mapping change
1787cdb30812SPeter Xu  * should be notified with an UNMAP followed by a MAP.
1788cdb30812SPeter Xu  *
17893df9d748SAlexey Kardashevskiy  * @iommu_mr: the memory region that was changed
1790cb1efcf4SPeter Maydell  * @iommu_idx: the IOMMU index for the translation table which has changed
17915039caf3SEugenio Pérez  * @event: TLB event with the new entry in the IOMMU translation table.
17925039caf3SEugenio Pérez  *         The entry replaces all old entries for the same virtual I/O address
17935039caf3SEugenio Pérez  *         range.
179406866575SDavid Gibson  */
17953df9d748SAlexey Kardashevskiy void memory_region_notify_iommu(IOMMUMemoryRegion *iommu_mr,
1796cb1efcf4SPeter Maydell                                 int iommu_idx,
1797eb5b2896SPhilippe Mathieu-Daudé                                 const IOMMUTLBEvent event);
179806866575SDavid Gibson 
179906866575SDavid Gibson /**
18003b5ebf85SEugenio Pérez  * memory_region_notify_iommu_one: notify a change in an IOMMU translation
1801bd2bfa4cSPeter Xu  *                           entry to a single notifier
1802bd2bfa4cSPeter Xu  *
1803bd2bfa4cSPeter Xu  * This works just like memory_region_notify_iommu(), but it only
1804bd2bfa4cSPeter Xu  * notifies a specific notifier, not all of them.
1805bd2bfa4cSPeter Xu  *
1806bd2bfa4cSPeter Xu  * @notifier: the notifier to be notified
18075039caf3SEugenio Pérez  * @event: TLB event with the new entry in the IOMMU translation table.
18085039caf3SEugenio Pérez  *         The entry replaces all old entries for the same virtual I/O address
18095039caf3SEugenio Pérez  *         range.
1810bd2bfa4cSPeter Xu  */
18113b5ebf85SEugenio Pérez void memory_region_notify_iommu_one(IOMMUNotifier *notifier,
1812ec40be99SPhilippe Mathieu-Daudé                                     const IOMMUTLBEvent *event);
1813bd2bfa4cSPeter Xu 
1814bd2bfa4cSPeter Xu /**
18157caebbf9SJason Wang  * memory_region_unmap_iommu_notifier_range: notify a unmap for an IOMMU
18167caebbf9SJason Wang  *                                           translation that covers the
18177caebbf9SJason Wang  *                                           range of a notifier
18187caebbf9SJason Wang  *
18197caebbf9SJason Wang  * @notifier: the notifier to be notified
18207caebbf9SJason Wang  */
1821afa55c6eSBernhard Beschow void memory_region_unmap_iommu_notifier_range(IOMMUNotifier *notifier);
18227caebbf9SJason Wang 
18237caebbf9SJason Wang 
18247caebbf9SJason Wang /**
182506866575SDavid Gibson  * memory_region_register_iommu_notifier: register a notifier for changes to
182606866575SDavid Gibson  * IOMMU translation entries.
182706866575SDavid Gibson  *
1828549d4005SEric Auger  * Returns 0 on success, or a negative errno otherwise. In particular,
1829549d4005SEric Auger  * -EINVAL indicates that at least one of the attributes of the notifier
1830549d4005SEric Auger  * is not supported (flag/range) by the IOMMU memory region. In case of error
1831549d4005SEric Auger  * the error object must be created.
1832549d4005SEric Auger  *
183306866575SDavid Gibson  * @mr: the memory region to observe
1834cdb30812SPeter Xu  * @n: the IOMMUNotifier to be added; the notify callback receives a
1835cdb30812SPeter Xu  *     pointer to an #IOMMUTLBEntry as the opaque value; the pointer
1836cdb30812SPeter Xu  *     ceases to be valid on exit from the notifier.
18375d248213SPaolo Bonzini  * @errp: pointer to Error*, to store an error if it happens.
183806866575SDavid Gibson  */
1839549d4005SEric Auger int memory_region_register_iommu_notifier(MemoryRegion *mr,
1840549d4005SEric Auger                                           IOMMUNotifier *n, Error **errp);
184106866575SDavid Gibson 
184206866575SDavid Gibson /**
1843a788f227SDavid Gibson  * memory_region_iommu_replay: replay existing IOMMU translations to
1844f682e9c2SAlexey Kardashevskiy  * a notifier with the minimum page granularity returned by
1845f682e9c2SAlexey Kardashevskiy  * mr->iommu_ops->get_page_size().
1846a788f227SDavid Gibson  *
18472ce931d0SPeter Maydell  * Note: this is not related to record-and-replay functionality.
18482ce931d0SPeter Maydell  *
18493df9d748SAlexey Kardashevskiy  * @iommu_mr: the memory region to observe
1850a788f227SDavid Gibson  * @n: the notifier to which to replay iommu mappings
1851a788f227SDavid Gibson  */
18523df9d748SAlexey Kardashevskiy void memory_region_iommu_replay(IOMMUMemoryRegion *iommu_mr, IOMMUNotifier *n);
1853a788f227SDavid Gibson 
1854a788f227SDavid Gibson /**
185506866575SDavid Gibson  * memory_region_unregister_iommu_notifier: unregister a notifier for
185606866575SDavid Gibson  * changes to IOMMU translation entries.
185706866575SDavid Gibson  *
185824c32ed3SStefan Weil  * @mr: the memory region which was observed and for which notify_stopped()
1859d22d8956SAlexey Kardashevskiy  *      needs to be called
186006866575SDavid Gibson  * @n: the notifier to be removed.
186106866575SDavid Gibson  */
1862cdb30812SPeter Xu void memory_region_unregister_iommu_notifier(MemoryRegion *mr,
1863cdb30812SPeter Xu                                              IOMMUNotifier *n);
186406866575SDavid Gibson 
186506866575SDavid Gibson /**
1866f1334de6SAlexey Kardashevskiy  * memory_region_iommu_get_attr: return an IOMMU attr if get_attr() is
1867f1334de6SAlexey Kardashevskiy  * defined on the IOMMU.
1868f1334de6SAlexey Kardashevskiy  *
18692ce931d0SPeter Maydell  * Returns 0 on success, or a negative errno otherwise. In particular,
18702ce931d0SPeter Maydell  * -EINVAL indicates that the IOMMU does not support the requested
18712ce931d0SPeter Maydell  * attribute.
1872f1334de6SAlexey Kardashevskiy  *
1873f1334de6SAlexey Kardashevskiy  * @iommu_mr: the memory region
1874f1334de6SAlexey Kardashevskiy  * @attr: the requested attribute
1875f1334de6SAlexey Kardashevskiy  * @data: a pointer to the requested attribute data
1876f1334de6SAlexey Kardashevskiy  */
1877f1334de6SAlexey Kardashevskiy int memory_region_iommu_get_attr(IOMMUMemoryRegion *iommu_mr,
1878f1334de6SAlexey Kardashevskiy                                  enum IOMMUMemoryRegionAttr attr,
1879f1334de6SAlexey Kardashevskiy                                  void *data);
1880f1334de6SAlexey Kardashevskiy 
1881f1334de6SAlexey Kardashevskiy /**
188221f40209SPeter Maydell  * memory_region_iommu_attrs_to_index: return the IOMMU index to
188321f40209SPeter Maydell  * use for translations with the given memory transaction attributes.
188421f40209SPeter Maydell  *
188521f40209SPeter Maydell  * @iommu_mr: the memory region
188621f40209SPeter Maydell  * @attrs: the memory transaction attributes
188721f40209SPeter Maydell  */
188821f40209SPeter Maydell int memory_region_iommu_attrs_to_index(IOMMUMemoryRegion *iommu_mr,
188921f40209SPeter Maydell                                        MemTxAttrs attrs);
189021f40209SPeter Maydell 
189121f40209SPeter Maydell /**
189221f40209SPeter Maydell  * memory_region_iommu_num_indexes: return the total number of IOMMU
189321f40209SPeter Maydell  * indexes that this IOMMU supports.
189421f40209SPeter Maydell  *
189521f40209SPeter Maydell  * @iommu_mr: the memory region
189621f40209SPeter Maydell  */
189721f40209SPeter Maydell int memory_region_iommu_num_indexes(IOMMUMemoryRegion *iommu_mr);
189821f40209SPeter Maydell 
189921f40209SPeter Maydell /**
1900022c62cbSPaolo Bonzini  * memory_region_name: get a memory region's name
1901022c62cbSPaolo Bonzini  *
1902022c62cbSPaolo Bonzini  * Returns the string that was used to initialize the memory region.
1903022c62cbSPaolo Bonzini  *
1904022c62cbSPaolo Bonzini  * @mr: the memory region being queried
1905022c62cbSPaolo Bonzini  */
19065d546d4bSPeter Crosthwaite const char *memory_region_name(const MemoryRegion *mr);
1907022c62cbSPaolo Bonzini 
1908022c62cbSPaolo Bonzini /**
1909022c62cbSPaolo Bonzini  * memory_region_is_logging: return whether a memory region is logging writes
1910022c62cbSPaolo Bonzini  *
19112d1a35beSPaolo Bonzini  * Returns %true if the memory region is logging writes for the given client
19122d1a35beSPaolo Bonzini  *
19132d1a35beSPaolo Bonzini  * @mr: the memory region being queried
19142d1a35beSPaolo Bonzini  * @client: the client being queried
19152d1a35beSPaolo Bonzini  */
19162d1a35beSPaolo Bonzini bool memory_region_is_logging(MemoryRegion *mr, uint8_t client);
19172d1a35beSPaolo Bonzini 
19182d1a35beSPaolo Bonzini /**
19192d1a35beSPaolo Bonzini  * memory_region_get_dirty_log_mask: return the clients for which a
19202d1a35beSPaolo Bonzini  * memory region is logging writes.
19212d1a35beSPaolo Bonzini  *
1922677e7805SPaolo Bonzini  * Returns a bitmap of clients, in which the DIRTY_MEMORY_* constants
1923677e7805SPaolo Bonzini  * are the bit indices.
1924022c62cbSPaolo Bonzini  *
1925022c62cbSPaolo Bonzini  * @mr: the memory region being queried
1926022c62cbSPaolo Bonzini  */
19272d1a35beSPaolo Bonzini uint8_t memory_region_get_dirty_log_mask(MemoryRegion *mr);
1928022c62cbSPaolo Bonzini 
1929022c62cbSPaolo Bonzini /**
1930022c62cbSPaolo Bonzini  * memory_region_is_rom: check whether a memory region is ROM
1931022c62cbSPaolo Bonzini  *
1932847b31f0SLi Qiang  * Returns %true if a memory region is read-only memory.
1933022c62cbSPaolo Bonzini  *
1934022c62cbSPaolo Bonzini  * @mr: the memory region being queried
1935022c62cbSPaolo Bonzini  */
memory_region_is_rom(MemoryRegion * mr)19361619d1feSPaolo Bonzini static inline bool memory_region_is_rom(MemoryRegion *mr)
19371619d1feSPaolo Bonzini {
19381619d1feSPaolo Bonzini     return mr->ram && mr->readonly;
19391619d1feSPaolo Bonzini }
19401619d1feSPaolo Bonzini 
1941c26763f8SMarc-André Lureau /**
1942c26763f8SMarc-André Lureau  * memory_region_is_nonvolatile: check whether a memory region is non-volatile
1943c26763f8SMarc-André Lureau  *
1944c26763f8SMarc-André Lureau  * Returns %true is a memory region is non-volatile memory.
1945c26763f8SMarc-André Lureau  *
1946c26763f8SMarc-André Lureau  * @mr: the memory region being queried
1947c26763f8SMarc-André Lureau  */
memory_region_is_nonvolatile(MemoryRegion * mr)1948c26763f8SMarc-André Lureau static inline bool memory_region_is_nonvolatile(MemoryRegion *mr)
1949c26763f8SMarc-André Lureau {
1950c26763f8SMarc-André Lureau     return mr->nonvolatile;
1951c26763f8SMarc-André Lureau }
1952022c62cbSPaolo Bonzini 
1953022c62cbSPaolo Bonzini /**
1954a35ba7beSPaolo Bonzini  * memory_region_get_fd: Get a file descriptor backing a RAM memory region.
1955a35ba7beSPaolo Bonzini  *
1956a35ba7beSPaolo Bonzini  * Returns a file descriptor backing a file-based RAM memory region,
1957a35ba7beSPaolo Bonzini  * or -1 if the region is not a file-based RAM memory region.
1958a35ba7beSPaolo Bonzini  *
1959a35ba7beSPaolo Bonzini  * @mr: the RAM or alias memory region being queried.
1960a35ba7beSPaolo Bonzini  */
1961a35ba7beSPaolo Bonzini int memory_region_get_fd(MemoryRegion *mr);
1962a35ba7beSPaolo Bonzini 
1963a35ba7beSPaolo Bonzini /**
196407bdaa41SPaolo Bonzini  * memory_region_from_host: Convert a pointer into a RAM memory region
196507bdaa41SPaolo Bonzini  * and an offset within it.
196607bdaa41SPaolo Bonzini  *
196707bdaa41SPaolo Bonzini  * Given a host pointer inside a RAM memory region (created with
196807bdaa41SPaolo Bonzini  * memory_region_init_ram() or memory_region_init_ram_ptr()), return
196907bdaa41SPaolo Bonzini  * the MemoryRegion and the offset within it.
197007bdaa41SPaolo Bonzini  *
197107bdaa41SPaolo Bonzini  * Use with care; by the time this function returns, the returned pointer is
197207bdaa41SPaolo Bonzini  * not protected by RCU anymore.  If the caller is not within an RCU critical
1973a4a411fbSStefan Hajnoczi  * section and does not hold the BQL, it must have other means of
197407bdaa41SPaolo Bonzini  * protecting the pointer, such as a reference to the region that includes
197507bdaa41SPaolo Bonzini  * the incoming ram_addr_t.
197607bdaa41SPaolo Bonzini  *
197757914ecbSJay Zhou  * @ptr: the host pointer to be converted
197857914ecbSJay Zhou  * @offset: the offset within memory region
197907bdaa41SPaolo Bonzini  */
198007bdaa41SPaolo Bonzini MemoryRegion *memory_region_from_host(void *ptr, ram_addr_t *offset);
198107bdaa41SPaolo Bonzini 
198207bdaa41SPaolo Bonzini /**
1983022c62cbSPaolo Bonzini  * memory_region_get_ram_ptr: Get a pointer into a RAM memory region.
1984022c62cbSPaolo Bonzini  *
1985022c62cbSPaolo Bonzini  * Returns a host pointer to a RAM memory region (created with
198649b24afcSPaolo Bonzini  * memory_region_init_ram() or memory_region_init_ram_ptr()).
198749b24afcSPaolo Bonzini  *
198849b24afcSPaolo Bonzini  * Use with care; by the time this function returns, the returned pointer is
198949b24afcSPaolo Bonzini  * not protected by RCU anymore.  If the caller is not within an RCU critical
1990a4a411fbSStefan Hajnoczi  * section and does not hold the BQL, it must have other means of
199149b24afcSPaolo Bonzini  * protecting the pointer, such as a reference to the region that includes
199249b24afcSPaolo Bonzini  * the incoming ram_addr_t.
1993022c62cbSPaolo Bonzini  *
1994022c62cbSPaolo Bonzini  * @mr: the memory region being queried.
1995022c62cbSPaolo Bonzini  */
1996022c62cbSPaolo Bonzini void *memory_region_get_ram_ptr(MemoryRegion *mr);
1997022c62cbSPaolo Bonzini 
199837d7c084SPaolo Bonzini /* memory_region_ram_resize: Resize a RAM region.
199937d7c084SPaolo Bonzini  *
2000c7c0e724SDavid Hildenbrand  * Resizing RAM while migrating can result in the migration being canceled.
2001c7c0e724SDavid Hildenbrand  * Care has to be taken if the guest might have already detected the memory.
200237d7c084SPaolo Bonzini  *
200337d7c084SPaolo Bonzini  * @mr: a memory region created with @memory_region_init_resizeable_ram.
200437d7c084SPaolo Bonzini  * @newsize: the new size the region
200537d7c084SPaolo Bonzini  * @errp: pointer to Error*, to store an error if it happens.
200637d7c084SPaolo Bonzini  */
200737d7c084SPaolo Bonzini void memory_region_ram_resize(MemoryRegion *mr, ram_addr_t newsize,
200837d7c084SPaolo Bonzini                               Error **errp);
20099ecc996aSPhilippe Mathieu-Daudé 
201061c490e2SBeata Michalska /**
20119ecc996aSPhilippe Mathieu-Daudé  * memory_region_msync: Synchronize selected address range of
20129ecc996aSPhilippe Mathieu-Daudé  * a memory mapped region
20139ecc996aSPhilippe Mathieu-Daudé  *
20149ecc996aSPhilippe Mathieu-Daudé  * @mr: the memory region to be msync
20159ecc996aSPhilippe Mathieu-Daudé  * @addr: the initial address of the range to be sync
20169ecc996aSPhilippe Mathieu-Daudé  * @size: the size of the range to be sync
20179ecc996aSPhilippe Mathieu-Daudé  */
20189ecc996aSPhilippe Mathieu-Daudé void memory_region_msync(MemoryRegion *mr, hwaddr addr, hwaddr size);
20199ecc996aSPhilippe Mathieu-Daudé 
20209ecc996aSPhilippe Mathieu-Daudé /**
20219ecc996aSPhilippe Mathieu-Daudé  * memory_region_writeback: Trigger cache writeback for
20225d248213SPaolo Bonzini  * selected address range
202361c490e2SBeata Michalska  *
20245d248213SPaolo Bonzini  * @mr: the memory region to be updated
20255d248213SPaolo Bonzini  * @addr: the initial address of the range to be written back
20265d248213SPaolo Bonzini  * @size: the size of the range to be written back
202761c490e2SBeata Michalska  */
20284dfe59d1SPhilippe Mathieu-Daudé void memory_region_writeback(MemoryRegion *mr, hwaddr addr, hwaddr size);
202937d7c084SPaolo Bonzini 
2030022c62cbSPaolo Bonzini /**
2031022c62cbSPaolo Bonzini  * memory_region_set_log: Turn dirty logging on or off for a region.
2032022c62cbSPaolo Bonzini  *
2033022c62cbSPaolo Bonzini  * Turns dirty logging on or off for a specified client (display, migration).
2034022c62cbSPaolo Bonzini  * Only meaningful for RAM regions.
2035022c62cbSPaolo Bonzini  *
2036022c62cbSPaolo Bonzini  * @mr: the memory region being updated.
2037022c62cbSPaolo Bonzini  * @log: whether dirty logging is to be enabled or disabled.
2038dbddac6dSPaolo Bonzini  * @client: the user of the logging information; %DIRTY_MEMORY_VGA only.
2039022c62cbSPaolo Bonzini  */
2040022c62cbSPaolo Bonzini void memory_region_set_log(MemoryRegion *mr, bool log, unsigned client);
2041022c62cbSPaolo Bonzini 
2042022c62cbSPaolo Bonzini /**
2043022c62cbSPaolo Bonzini  * memory_region_set_dirty: Mark a range of bytes as dirty in a memory region.
2044022c62cbSPaolo Bonzini  *
2045022c62cbSPaolo Bonzini  * Marks a range of bytes as dirty, after it has been dirtied outside
2046022c62cbSPaolo Bonzini  * guest code.
2047022c62cbSPaolo Bonzini  *
2048022c62cbSPaolo Bonzini  * @mr: the memory region being dirtied.
2049022c62cbSPaolo Bonzini  * @addr: the address (relative to the start of the region) being dirtied.
2050022c62cbSPaolo Bonzini  * @size: size of the range being dirtied.
2051022c62cbSPaolo Bonzini  */
2052022c62cbSPaolo Bonzini void memory_region_set_dirty(MemoryRegion *mr, hwaddr addr,
2053022c62cbSPaolo Bonzini                              hwaddr size);
2054022c62cbSPaolo Bonzini 
2055022c62cbSPaolo Bonzini /**
2056077874e0SPeter Xu  * memory_region_clear_dirty_bitmap - clear dirty bitmap for memory range
2057077874e0SPeter Xu  *
2058077874e0SPeter Xu  * This function is called when the caller wants to clear the remote
2059077874e0SPeter Xu  * dirty bitmap of a memory range within the memory region.  This can
2060077874e0SPeter Xu  * be used by e.g. KVM to manually clear dirty log when
2061077874e0SPeter Xu  * KVM_CAP_MANUAL_DIRTY_LOG_PROTECT is declared support by the host
2062077874e0SPeter Xu  * kernel.
2063077874e0SPeter Xu  *
2064077874e0SPeter Xu  * @mr:     the memory region to clear the dirty log upon
2065077874e0SPeter Xu  * @start:  start address offset within the memory region
2066077874e0SPeter Xu  * @len:    length of the memory region to clear dirty bitmap
2067077874e0SPeter Xu  */
2068077874e0SPeter Xu void memory_region_clear_dirty_bitmap(MemoryRegion *mr, hwaddr start,
2069077874e0SPeter Xu                                       hwaddr len);
2070077874e0SPeter Xu 
2071077874e0SPeter Xu /**
20728deaf12cSGerd Hoffmann  * memory_region_snapshot_and_clear_dirty: Get a snapshot of the dirty
20738deaf12cSGerd Hoffmann  *                                         bitmap and clear it.
20748deaf12cSGerd Hoffmann  *
20758deaf12cSGerd Hoffmann  * Creates a snapshot of the dirty bitmap, clears the dirty bitmap and
20768deaf12cSGerd Hoffmann  * returns the snapshot.  The snapshot can then be used to query dirty
207777302fb5SPaolo Bonzini  * status, using memory_region_snapshot_get_dirty.  Snapshotting allows
207877302fb5SPaolo Bonzini  * querying the same page multiple times, which is especially useful for
207977302fb5SPaolo Bonzini  * display updates where the scanlines often are not page aligned.
20808deaf12cSGerd Hoffmann  *
20811e458f11SStefan Weil  * The dirty bitmap region which gets copied into the snapshot (and
20828deaf12cSGerd Hoffmann  * cleared afterwards) can be larger than requested.  The boundaries
20838deaf12cSGerd Hoffmann  * are rounded up/down so complete bitmap longs (covering 64 pages on
20848deaf12cSGerd Hoffmann  * 64bit hosts) can be copied over into the bitmap snapshot.  Which
20858deaf12cSGerd Hoffmann  * isn't a problem for display updates as the extra pages are outside
20868deaf12cSGerd Hoffmann  * the visible area, and in case the visible area changes a full
20878deaf12cSGerd Hoffmann  * display redraw is due anyway.  Should other use cases for this
20888deaf12cSGerd Hoffmann  * function emerge we might have to revisit this implementation
20898deaf12cSGerd Hoffmann  * detail.
20908deaf12cSGerd Hoffmann  *
20918deaf12cSGerd Hoffmann  * Use g_free to release DirtyBitmapSnapshot.
20928deaf12cSGerd Hoffmann  *
20938deaf12cSGerd Hoffmann  * @mr: the memory region being queried.
20948deaf12cSGerd Hoffmann  * @addr: the address (relative to the start of the region) being queried.
20958deaf12cSGerd Hoffmann  * @size: the size of the range being queried.
20968deaf12cSGerd Hoffmann  * @client: the user of the logging information; typically %DIRTY_MEMORY_VGA.
20978deaf12cSGerd Hoffmann  */
20988deaf12cSGerd Hoffmann DirtyBitmapSnapshot *memory_region_snapshot_and_clear_dirty(MemoryRegion *mr,
20998deaf12cSGerd Hoffmann                                                             hwaddr addr,
21008deaf12cSGerd Hoffmann                                                             hwaddr size,
21018deaf12cSGerd Hoffmann                                                             unsigned client);
21028deaf12cSGerd Hoffmann 
21038deaf12cSGerd Hoffmann /**
21048deaf12cSGerd Hoffmann  * memory_region_snapshot_get_dirty: Check whether a range of bytes is dirty
21058deaf12cSGerd Hoffmann  *                                   in the specified dirty bitmap snapshot.
21068deaf12cSGerd Hoffmann  *
21078deaf12cSGerd Hoffmann  * @mr: the memory region being queried.
21088deaf12cSGerd Hoffmann  * @snap: the dirty bitmap snapshot
21098deaf12cSGerd Hoffmann  * @addr: the address (relative to the start of the region) being queried.
21108deaf12cSGerd Hoffmann  * @size: the size of the range being queried.
21118deaf12cSGerd Hoffmann  */
21128deaf12cSGerd Hoffmann bool memory_region_snapshot_get_dirty(MemoryRegion *mr,
21138deaf12cSGerd Hoffmann                                       DirtyBitmapSnapshot *snap,
21148deaf12cSGerd Hoffmann                                       hwaddr addr, hwaddr size);
21158deaf12cSGerd Hoffmann 
21166c279db8SJuan Quintela /**
2117022c62cbSPaolo Bonzini  * memory_region_reset_dirty: Mark a range of pages as clean, for a specified
2118022c62cbSPaolo Bonzini  *                            client.
2119022c62cbSPaolo Bonzini  *
2120022c62cbSPaolo Bonzini  * Marks a range of pages as no longer dirty.
2121022c62cbSPaolo Bonzini  *
2122022c62cbSPaolo Bonzini  * @mr: the region being updated.
2123022c62cbSPaolo Bonzini  * @addr: the start of the subrange being cleaned.
2124022c62cbSPaolo Bonzini  * @size: the size of the subrange being cleaned.
2125022c62cbSPaolo Bonzini  * @client: the user of the logging information; %DIRTY_MEMORY_MIGRATION or
2126022c62cbSPaolo Bonzini  *          %DIRTY_MEMORY_VGA.
2127022c62cbSPaolo Bonzini  */
2128022c62cbSPaolo Bonzini void memory_region_reset_dirty(MemoryRegion *mr, hwaddr addr,
2129022c62cbSPaolo Bonzini                                hwaddr size, unsigned client);
2130022c62cbSPaolo Bonzini 
2131022c62cbSPaolo Bonzini /**
2132047be4edSStefan Hajnoczi  * memory_region_flush_rom_device: Mark a range of pages dirty and invalidate
2133047be4edSStefan Hajnoczi  *                                 TBs (for self-modifying code).
2134047be4edSStefan Hajnoczi  *
2135047be4edSStefan Hajnoczi  * The MemoryRegionOps->write() callback of a ROM device must use this function
2136047be4edSStefan Hajnoczi  * to mark byte ranges that have been modified internally, such as by directly
2137047be4edSStefan Hajnoczi  * accessing the memory returned by memory_region_get_ram_ptr().
2138047be4edSStefan Hajnoczi  *
2139047be4edSStefan Hajnoczi  * This function marks the range dirty and invalidates TBs so that TCG can
2140047be4edSStefan Hajnoczi  * detect self-modifying code.
2141047be4edSStefan Hajnoczi  *
2142047be4edSStefan Hajnoczi  * @mr: the region being flushed.
2143047be4edSStefan Hajnoczi  * @addr: the start, relative to the start of the region, of the range being
2144047be4edSStefan Hajnoczi  *        flushed.
2145047be4edSStefan Hajnoczi  * @size: the size, in bytes, of the range being flushed.
2146047be4edSStefan Hajnoczi  */
2147047be4edSStefan Hajnoczi void memory_region_flush_rom_device(MemoryRegion *mr, hwaddr addr, hwaddr size);
2148047be4edSStefan Hajnoczi 
2149047be4edSStefan Hajnoczi /**
2150022c62cbSPaolo Bonzini  * memory_region_set_readonly: Turn a memory region read-only (or read-write)
2151022c62cbSPaolo Bonzini  *
2152022c62cbSPaolo Bonzini  * Allows a memory region to be marked as read-only (turning it into a ROM).
2153022c62cbSPaolo Bonzini  * only useful on RAM regions.
2154022c62cbSPaolo Bonzini  *
2155022c62cbSPaolo Bonzini  * @mr: the region being updated.
2156022c62cbSPaolo Bonzini  * @readonly: whether rhe region is to be ROM or RAM.
2157022c62cbSPaolo Bonzini  */
2158022c62cbSPaolo Bonzini void memory_region_set_readonly(MemoryRegion *mr, bool readonly);
2159022c62cbSPaolo Bonzini 
2160022c62cbSPaolo Bonzini /**
2161c26763f8SMarc-André Lureau  * memory_region_set_nonvolatile: Turn a memory region non-volatile
2162c26763f8SMarc-André Lureau  *
2163c26763f8SMarc-André Lureau  * Allows a memory region to be marked as non-volatile.
2164c26763f8SMarc-André Lureau  * only useful on RAM regions.
2165c26763f8SMarc-André Lureau  *
2166c26763f8SMarc-André Lureau  * @mr: the region being updated.
2167c26763f8SMarc-André Lureau  * @nonvolatile: whether rhe region is to be non-volatile.
2168c26763f8SMarc-André Lureau  */
2169c26763f8SMarc-André Lureau void memory_region_set_nonvolatile(MemoryRegion *mr, bool nonvolatile);
2170c26763f8SMarc-André Lureau 
2171c26763f8SMarc-André Lureau /**
21725f9a5ea1SJan Kiszka  * memory_region_rom_device_set_romd: enable/disable ROMD mode
2173022c62cbSPaolo Bonzini  *
2174022c62cbSPaolo Bonzini  * Allows a ROM device (initialized with memory_region_init_rom_device() to
21755f9a5ea1SJan Kiszka  * set to ROMD mode (default) or MMIO mode.  When it is in ROMD mode, the
21765f9a5ea1SJan Kiszka  * device is mapped to guest memory and satisfies read access directly.
21775f9a5ea1SJan Kiszka  * When in MMIO mode, reads are forwarded to the #MemoryRegion.read function.
21785f9a5ea1SJan Kiszka  * Writes are always handled by the #MemoryRegion.write function.
2179022c62cbSPaolo Bonzini  *
2180022c62cbSPaolo Bonzini  * @mr: the memory region to be updated
21815f9a5ea1SJan Kiszka  * @romd_mode: %true to put the region into ROMD mode
2182022c62cbSPaolo Bonzini  */
21835f9a5ea1SJan Kiszka void memory_region_rom_device_set_romd(MemoryRegion *mr, bool romd_mode);
2184022c62cbSPaolo Bonzini 
2185022c62cbSPaolo Bonzini /**
2186022c62cbSPaolo Bonzini  * memory_region_set_coalescing: Enable memory coalescing for the region.
2187022c62cbSPaolo Bonzini  *
2188022c62cbSPaolo Bonzini  * Enabled writes to a region to be queued for later processing. MMIO ->write
2189022c62cbSPaolo Bonzini  * callbacks may be delayed until a non-coalesced MMIO is issued.
2190022c62cbSPaolo Bonzini  * Only useful for IO regions.  Roughly similar to write-combining hardware.
2191022c62cbSPaolo Bonzini  *
2192022c62cbSPaolo Bonzini  * @mr: the memory region to be write coalesced
2193022c62cbSPaolo Bonzini  */
2194022c62cbSPaolo Bonzini void memory_region_set_coalescing(MemoryRegion *mr);
2195022c62cbSPaolo Bonzini 
2196022c62cbSPaolo Bonzini /**
2197022c62cbSPaolo Bonzini  * memory_region_add_coalescing: Enable memory coalescing for a sub-range of
2198022c62cbSPaolo Bonzini  *                               a region.
2199022c62cbSPaolo Bonzini  *
2200022c62cbSPaolo Bonzini  * Like memory_region_set_coalescing(), but works on a sub-range of a region.
2201022c62cbSPaolo Bonzini  * Multiple calls can be issued coalesced disjoint ranges.
2202022c62cbSPaolo Bonzini  *
2203022c62cbSPaolo Bonzini  * @mr: the memory region to be updated.
2204022c62cbSPaolo Bonzini  * @offset: the start of the range within the region to be coalesced.
2205022c62cbSPaolo Bonzini  * @size: the size of the subrange to be coalesced.
2206022c62cbSPaolo Bonzini  */
2207022c62cbSPaolo Bonzini void memory_region_add_coalescing(MemoryRegion *mr,
2208022c62cbSPaolo Bonzini                                   hwaddr offset,
2209022c62cbSPaolo Bonzini                                   uint64_t size);
2210022c62cbSPaolo Bonzini 
2211022c62cbSPaolo Bonzini /**
2212022c62cbSPaolo Bonzini  * memory_region_clear_coalescing: Disable MMIO coalescing for the region.
2213022c62cbSPaolo Bonzini  *
2214022c62cbSPaolo Bonzini  * Disables any coalescing caused by memory_region_set_coalescing() or
2215022c62cbSPaolo Bonzini  * memory_region_add_coalescing().  Roughly equivalent to uncacheble memory
2216022c62cbSPaolo Bonzini  * hardware.
2217022c62cbSPaolo Bonzini  *
2218022c62cbSPaolo Bonzini  * @mr: the memory region to be updated.
2219022c62cbSPaolo Bonzini  */
2220022c62cbSPaolo Bonzini void memory_region_clear_coalescing(MemoryRegion *mr);
2221022c62cbSPaolo Bonzini 
2222022c62cbSPaolo Bonzini /**
2223022c62cbSPaolo Bonzini  * memory_region_set_flush_coalesced: Enforce memory coalescing flush before
2224022c62cbSPaolo Bonzini  *                                    accesses.
2225022c62cbSPaolo Bonzini  *
2226022c62cbSPaolo Bonzini  * Ensure that pending coalesced MMIO request are flushed before the memory
2227022c62cbSPaolo Bonzini  * region is accessed. This property is automatically enabled for all regions
2228022c62cbSPaolo Bonzini  * passed to memory_region_set_coalescing() and memory_region_add_coalescing().
2229022c62cbSPaolo Bonzini  *
2230022c62cbSPaolo Bonzini  * @mr: the memory region to be updated.
2231022c62cbSPaolo Bonzini  */
2232022c62cbSPaolo Bonzini void memory_region_set_flush_coalesced(MemoryRegion *mr);
2233022c62cbSPaolo Bonzini 
2234022c62cbSPaolo Bonzini /**
2235022c62cbSPaolo Bonzini  * memory_region_clear_flush_coalesced: Disable memory coalescing flush before
2236022c62cbSPaolo Bonzini  *                                      accesses.
2237022c62cbSPaolo Bonzini  *
2238022c62cbSPaolo Bonzini  * Clear the automatic coalesced MMIO flushing enabled via
2239022c62cbSPaolo Bonzini  * memory_region_set_flush_coalesced. Note that this service has no effect on
2240022c62cbSPaolo Bonzini  * memory regions that have MMIO coalescing enabled for themselves. For them,
2241022c62cbSPaolo Bonzini  * automatic flushing will stop once coalescing is disabled.
2242022c62cbSPaolo Bonzini  *
2243022c62cbSPaolo Bonzini  * @mr: the memory region to be updated.
2244022c62cbSPaolo Bonzini  */
2245022c62cbSPaolo Bonzini void memory_region_clear_flush_coalesced(MemoryRegion *mr);
2246022c62cbSPaolo Bonzini 
2247022c62cbSPaolo Bonzini /**
2248022c62cbSPaolo Bonzini  * memory_region_add_eventfd: Request an eventfd to be triggered when a word
2249022c62cbSPaolo Bonzini  *                            is written to a location.
2250022c62cbSPaolo Bonzini  *
2251022c62cbSPaolo Bonzini  * Marks a word in an IO region (initialized with memory_region_init_io())
2252022c62cbSPaolo Bonzini  * as a trigger for an eventfd event.  The I/O callback will not be called.
2253022c62cbSPaolo Bonzini  * The caller must be prepared to handle failure (that is, take the required
2254022c62cbSPaolo Bonzini  * action if the callback _is_ called).
2255022c62cbSPaolo Bonzini  *
2256022c62cbSPaolo Bonzini  * @mr: the memory region being updated.
2257022c62cbSPaolo Bonzini  * @addr: the address within @mr that is to be monitored
2258022c62cbSPaolo Bonzini  * @size: the size of the access to trigger the eventfd
2259022c62cbSPaolo Bonzini  * @match_data: whether to match against @data, instead of just @addr
2260022c62cbSPaolo Bonzini  * @data: the data to match against the guest write
226157914ecbSJay Zhou  * @e: event notifier to be triggered when @addr, @size, and @data all match.
2262022c62cbSPaolo Bonzini  **/
2263022c62cbSPaolo Bonzini void memory_region_add_eventfd(MemoryRegion *mr,
2264022c62cbSPaolo Bonzini                                hwaddr addr,
2265022c62cbSPaolo Bonzini                                unsigned size,
2266022c62cbSPaolo Bonzini                                bool match_data,
2267022c62cbSPaolo Bonzini                                uint64_t data,
2268022c62cbSPaolo Bonzini                                EventNotifier *e);
2269022c62cbSPaolo Bonzini 
2270022c62cbSPaolo Bonzini /**
2271022c62cbSPaolo Bonzini  * memory_region_del_eventfd: Cancel an eventfd.
2272022c62cbSPaolo Bonzini  *
2273022c62cbSPaolo Bonzini  * Cancels an eventfd trigger requested by a previous
2274022c62cbSPaolo Bonzini  * memory_region_add_eventfd() call.
2275022c62cbSPaolo Bonzini  *
2276022c62cbSPaolo Bonzini  * @mr: the memory region being updated.
2277022c62cbSPaolo Bonzini  * @addr: the address within @mr that is to be monitored
2278022c62cbSPaolo Bonzini  * @size: the size of the access to trigger the eventfd
2279022c62cbSPaolo Bonzini  * @match_data: whether to match against @data, instead of just @addr
2280022c62cbSPaolo Bonzini  * @data: the data to match against the guest write
228157914ecbSJay Zhou  * @e: event notifier to be triggered when @addr, @size, and @data all match.
2282022c62cbSPaolo Bonzini  */
2283022c62cbSPaolo Bonzini void memory_region_del_eventfd(MemoryRegion *mr,
2284022c62cbSPaolo Bonzini                                hwaddr addr,
2285022c62cbSPaolo Bonzini                                unsigned size,
2286022c62cbSPaolo Bonzini                                bool match_data,
2287022c62cbSPaolo Bonzini                                uint64_t data,
2288022c62cbSPaolo Bonzini                                EventNotifier *e);
2289022c62cbSPaolo Bonzini 
2290022c62cbSPaolo Bonzini /**
2291022c62cbSPaolo Bonzini  * memory_region_add_subregion: Add a subregion to a container.
2292022c62cbSPaolo Bonzini  *
2293022c62cbSPaolo Bonzini  * Adds a subregion at @offset.  The subregion may not overlap with other
2294022c62cbSPaolo Bonzini  * subregions (except for those explicitly marked as overlapping).  A region
2295022c62cbSPaolo Bonzini  * may only be added once as a subregion (unless removed with
2296022c62cbSPaolo Bonzini  * memory_region_del_subregion()); use memory_region_init_alias() if you
2297022c62cbSPaolo Bonzini  * want a region to be a subregion in multiple locations.
2298022c62cbSPaolo Bonzini  *
2299022c62cbSPaolo Bonzini  * @mr: the region to contain the new subregion; must be a container
2300022c62cbSPaolo Bonzini  *      initialized with memory_region_init().
2301022c62cbSPaolo Bonzini  * @offset: the offset relative to @mr where @subregion is added.
2302022c62cbSPaolo Bonzini  * @subregion: the subregion to be added.
2303022c62cbSPaolo Bonzini  */
2304022c62cbSPaolo Bonzini void memory_region_add_subregion(MemoryRegion *mr,
2305022c62cbSPaolo Bonzini                                  hwaddr offset,
2306022c62cbSPaolo Bonzini                                  MemoryRegion *subregion);
2307022c62cbSPaolo Bonzini /**
2308022c62cbSPaolo Bonzini  * memory_region_add_subregion_overlap: Add a subregion to a container
2309022c62cbSPaolo Bonzini  *                                      with overlap.
2310022c62cbSPaolo Bonzini  *
2311022c62cbSPaolo Bonzini  * Adds a subregion at @offset.  The subregion may overlap with other
2312022c62cbSPaolo Bonzini  * subregions.  Conflicts are resolved by having a higher @priority hide a
2313022c62cbSPaolo Bonzini  * lower @priority. Subregions without priority are taken as @priority 0.
2314022c62cbSPaolo Bonzini  * A region may only be added once as a subregion (unless removed with
2315022c62cbSPaolo Bonzini  * memory_region_del_subregion()); use memory_region_init_alias() if you
2316022c62cbSPaolo Bonzini  * want a region to be a subregion in multiple locations.
2317022c62cbSPaolo Bonzini  *
2318022c62cbSPaolo Bonzini  * @mr: the region to contain the new subregion; must be a container
2319022c62cbSPaolo Bonzini  *      initialized with memory_region_init().
2320022c62cbSPaolo Bonzini  * @offset: the offset relative to @mr where @subregion is added.
2321022c62cbSPaolo Bonzini  * @subregion: the subregion to be added.
2322022c62cbSPaolo Bonzini  * @priority: used for resolving overlaps; highest priority wins.
2323022c62cbSPaolo Bonzini  */
2324022c62cbSPaolo Bonzini void memory_region_add_subregion_overlap(MemoryRegion *mr,
2325022c62cbSPaolo Bonzini                                          hwaddr offset,
2326022c62cbSPaolo Bonzini                                          MemoryRegion *subregion,
2327a1ff8ae0SMarcel Apfelbaum                                          int priority);
2328022c62cbSPaolo Bonzini 
2329022c62cbSPaolo Bonzini /**
2330022c62cbSPaolo Bonzini  * memory_region_get_ram_addr: Get the ram address associated with a memory
2331022c62cbSPaolo Bonzini  *                             region
23325d248213SPaolo Bonzini  *
23335d248213SPaolo Bonzini  * @mr: the region to be queried
2334022c62cbSPaolo Bonzini  */
23357ebb2745SFam Zheng ram_addr_t memory_region_get_ram_addr(MemoryRegion *mr);
2336022c62cbSPaolo Bonzini 
2337a2b257d6SIgor Mammedov uint64_t memory_region_get_alignment(const MemoryRegion *mr);
2338022c62cbSPaolo Bonzini /**
2339022c62cbSPaolo Bonzini  * memory_region_del_subregion: Remove a subregion.
2340022c62cbSPaolo Bonzini  *
2341022c62cbSPaolo Bonzini  * Removes a subregion from its container.
2342022c62cbSPaolo Bonzini  *
2343022c62cbSPaolo Bonzini  * @mr: the container to be updated.
2344022c62cbSPaolo Bonzini  * @subregion: the region being removed; must be a current subregion of @mr.
2345022c62cbSPaolo Bonzini  */
2346022c62cbSPaolo Bonzini void memory_region_del_subregion(MemoryRegion *mr,
2347022c62cbSPaolo Bonzini                                  MemoryRegion *subregion);
2348022c62cbSPaolo Bonzini 
2349022c62cbSPaolo Bonzini /*
2350022c62cbSPaolo Bonzini  * memory_region_set_enabled: dynamically enable or disable a region
2351022c62cbSPaolo Bonzini  *
2352022c62cbSPaolo Bonzini  * Enables or disables a memory region.  A disabled memory region
2353022c62cbSPaolo Bonzini  * ignores all accesses to itself and its subregions.  It does not
2354022c62cbSPaolo Bonzini  * obscure sibling subregions with lower priority - it simply behaves as
2355022c62cbSPaolo Bonzini  * if it was removed from the hierarchy.
2356022c62cbSPaolo Bonzini  *
2357022c62cbSPaolo Bonzini  * Regions default to being enabled.
2358022c62cbSPaolo Bonzini  *
2359022c62cbSPaolo Bonzini  * @mr: the region to be updated
2360022c62cbSPaolo Bonzini  * @enabled: whether to enable or disable the region
2361022c62cbSPaolo Bonzini  */
2362022c62cbSPaolo Bonzini void memory_region_set_enabled(MemoryRegion *mr, bool enabled);
2363022c62cbSPaolo Bonzini 
2364022c62cbSPaolo Bonzini /*
2365022c62cbSPaolo Bonzini  * memory_region_set_address: dynamically update the address of a region
2366022c62cbSPaolo Bonzini  *
2367feca4ac1SPaolo Bonzini  * Dynamically updates the address of a region, relative to its container.
2368022c62cbSPaolo Bonzini  * May be used on regions are currently part of a memory hierarchy.
2369022c62cbSPaolo Bonzini  *
2370022c62cbSPaolo Bonzini  * @mr: the region to be updated
2371feca4ac1SPaolo Bonzini  * @addr: new address, relative to container region
2372022c62cbSPaolo Bonzini  */
2373022c62cbSPaolo Bonzini void memory_region_set_address(MemoryRegion *mr, hwaddr addr);
2374022c62cbSPaolo Bonzini 
2375022c62cbSPaolo Bonzini /*
2376e7af4c67SMichael S. Tsirkin  * memory_region_set_size: dynamically update the size of a region.
2377e7af4c67SMichael S. Tsirkin  *
2378e7af4c67SMichael S. Tsirkin  * Dynamically updates the size of a region.
2379e7af4c67SMichael S. Tsirkin  *
2380e7af4c67SMichael S. Tsirkin  * @mr: the region to be updated
2381e7af4c67SMichael S. Tsirkin  * @size: used size of the region.
2382e7af4c67SMichael S. Tsirkin  */
2383e7af4c67SMichael S. Tsirkin void memory_region_set_size(MemoryRegion *mr, uint64_t size);
2384e7af4c67SMichael S. Tsirkin 
2385e7af4c67SMichael S. Tsirkin /*
2386022c62cbSPaolo Bonzini  * memory_region_set_alias_offset: dynamically update a memory alias's offset
2387022c62cbSPaolo Bonzini  *
2388022c62cbSPaolo Bonzini  * Dynamically updates the offset into the target region that an alias points
2389022c62cbSPaolo Bonzini  * to, as if the fourth argument to memory_region_init_alias() has changed.
2390022c62cbSPaolo Bonzini  *
2391022c62cbSPaolo Bonzini  * @mr: the #MemoryRegion to be updated; should be an alias.
2392022c62cbSPaolo Bonzini  * @offset: the new offset into the target memory region
2393022c62cbSPaolo Bonzini  */
2394022c62cbSPaolo Bonzini void memory_region_set_alias_offset(MemoryRegion *mr,
2395022c62cbSPaolo Bonzini                                     hwaddr offset);
2396022c62cbSPaolo Bonzini 
2397533f5d66SDavid Hildenbrand /*
2398533f5d66SDavid Hildenbrand  * memory_region_set_unmergeable: Set a memory region unmergeable
2399533f5d66SDavid Hildenbrand  *
2400533f5d66SDavid Hildenbrand  * Mark a memory region unmergeable, resulting in the memory region (or
2401533f5d66SDavid Hildenbrand  * everything contained in a memory region container) not getting merged when
2402533f5d66SDavid Hildenbrand  * simplifying the address space and notifying memory listeners. Consequently,
2403533f5d66SDavid Hildenbrand  * memory listeners will never get notified about ranges that are larger than
2404533f5d66SDavid Hildenbrand  * the original memory regions.
2405533f5d66SDavid Hildenbrand  *
2406533f5d66SDavid Hildenbrand  * This is primarily useful when multiple aliases to a RAM memory region are
2407533f5d66SDavid Hildenbrand  * mapped into a memory region container, and updates (e.g., enable/disable or
2408533f5d66SDavid Hildenbrand  * map/unmap) of individual memory region aliases are not supposed to affect
2409533f5d66SDavid Hildenbrand  * other memory regions in the same container.
2410533f5d66SDavid Hildenbrand  *
2411533f5d66SDavid Hildenbrand  * @mr: the #MemoryRegion to be updated
2412533f5d66SDavid Hildenbrand  * @unmergeable: whether to mark the #MemoryRegion unmergeable
2413533f5d66SDavid Hildenbrand  */
2414533f5d66SDavid Hildenbrand void memory_region_set_unmergeable(MemoryRegion *mr, bool unmergeable);
2415533f5d66SDavid Hildenbrand 
2416022c62cbSPaolo Bonzini /**
2417feca4ac1SPaolo Bonzini  * memory_region_present: checks if an address relative to a @container
2418feca4ac1SPaolo Bonzini  * translates into #MemoryRegion within @container
24193ce10901SPaolo Bonzini  *
2420feca4ac1SPaolo Bonzini  * Answer whether a #MemoryRegion within @container covers the address
24213ce10901SPaolo Bonzini  * @addr.
24223ce10901SPaolo Bonzini  *
2423feca4ac1SPaolo Bonzini  * @container: a #MemoryRegion within which @addr is a relative address
2424feca4ac1SPaolo Bonzini  * @addr: the area within @container to be searched
24253ce10901SPaolo Bonzini  */
2426feca4ac1SPaolo Bonzini bool memory_region_present(MemoryRegion *container, hwaddr addr);
24273ce10901SPaolo Bonzini 
24283ce10901SPaolo Bonzini /**
2429eed2bacfSIgor Mammedov  * memory_region_is_mapped: returns true if #MemoryRegion is mapped
2430455faf03SDavid Hildenbrand  * into another memory region, which does not necessarily imply that it is
2431455faf03SDavid Hildenbrand  * mapped into an address space.
2432eed2bacfSIgor Mammedov  *
2433eed2bacfSIgor Mammedov  * @mr: a #MemoryRegion which should be checked if it's mapped
2434eed2bacfSIgor Mammedov  */
2435eed2bacfSIgor Mammedov bool memory_region_is_mapped(MemoryRegion *mr);
2436eed2bacfSIgor Mammedov 
2437eed2bacfSIgor Mammedov /**
24388947d7fcSDavid Hildenbrand  * memory_region_get_ram_discard_manager: get the #RamDiscardManager for a
24398947d7fcSDavid Hildenbrand  * #MemoryRegion
24408947d7fcSDavid Hildenbrand  *
24418947d7fcSDavid Hildenbrand  * The #RamDiscardManager cannot change while a memory region is mapped.
24428947d7fcSDavid Hildenbrand  *
24438947d7fcSDavid Hildenbrand  * @mr: the #MemoryRegion
24448947d7fcSDavid Hildenbrand  */
24458947d7fcSDavid Hildenbrand RamDiscardManager *memory_region_get_ram_discard_manager(MemoryRegion *mr);
24468947d7fcSDavid Hildenbrand 
24478947d7fcSDavid Hildenbrand /**
24488947d7fcSDavid Hildenbrand  * memory_region_has_ram_discard_manager: check whether a #MemoryRegion has a
24498947d7fcSDavid Hildenbrand  * #RamDiscardManager assigned
24508947d7fcSDavid Hildenbrand  *
24518947d7fcSDavid Hildenbrand  * @mr: the #MemoryRegion
24528947d7fcSDavid Hildenbrand  */
memory_region_has_ram_discard_manager(MemoryRegion * mr)24538947d7fcSDavid Hildenbrand static inline bool memory_region_has_ram_discard_manager(MemoryRegion *mr)
24548947d7fcSDavid Hildenbrand {
24558947d7fcSDavid Hildenbrand     return !!memory_region_get_ram_discard_manager(mr);
24568947d7fcSDavid Hildenbrand }
24578947d7fcSDavid Hildenbrand 
24588947d7fcSDavid Hildenbrand /**
24598947d7fcSDavid Hildenbrand  * memory_region_set_ram_discard_manager: set the #RamDiscardManager for a
24608947d7fcSDavid Hildenbrand  * #MemoryRegion
24618947d7fcSDavid Hildenbrand  *
24628947d7fcSDavid Hildenbrand  * This function must not be called for a mapped #MemoryRegion, a #MemoryRegion
24638947d7fcSDavid Hildenbrand  * that does not cover RAM, or a #MemoryRegion that already has a
24648947d7fcSDavid Hildenbrand  * #RamDiscardManager assigned.
24658947d7fcSDavid Hildenbrand  *
24668947d7fcSDavid Hildenbrand  * @mr: the #MemoryRegion
24678947d7fcSDavid Hildenbrand  * @rdm: #RamDiscardManager to set
24688947d7fcSDavid Hildenbrand  */
24698947d7fcSDavid Hildenbrand void memory_region_set_ram_discard_manager(MemoryRegion *mr,
24708947d7fcSDavid Hildenbrand                                            RamDiscardManager *rdm);
24718947d7fcSDavid Hildenbrand 
24728947d7fcSDavid Hildenbrand /**
247373034e9eSPaolo Bonzini  * memory_region_find: translate an address/size relative to a
247473034e9eSPaolo Bonzini  * MemoryRegion into a #MemoryRegionSection.
2475022c62cbSPaolo Bonzini  *
247673034e9eSPaolo Bonzini  * Locates the first #MemoryRegion within @mr that overlaps the range
247773034e9eSPaolo Bonzini  * given by @addr and @size.
2478022c62cbSPaolo Bonzini  *
2479022c62cbSPaolo Bonzini  * Returns a #MemoryRegionSection that describes a contiguous overlap.
2480022c62cbSPaolo Bonzini  * It will have the following characteristics:
248108226b44SPaolo Bonzini  * - @size = 0 iff no overlap was found
248208226b44SPaolo Bonzini  * - @mr is non-%NULL iff an overlap was found
2483022c62cbSPaolo Bonzini  *
248473034e9eSPaolo Bonzini  * Remember that in the return value the @offset_within_region is
248573034e9eSPaolo Bonzini  * relative to the returned region (in the .@mr field), not to the
248673034e9eSPaolo Bonzini  * @mr argument.
248773034e9eSPaolo Bonzini  *
248873034e9eSPaolo Bonzini  * Similarly, the .@offset_within_address_space is relative to the
248973034e9eSPaolo Bonzini  * address space that contains both regions, the passed and the
249073034e9eSPaolo Bonzini  * returned one.  However, in the special case where the @mr argument
2491feca4ac1SPaolo Bonzini  * has no container (and thus is the root of the address space), the
249273034e9eSPaolo Bonzini  * following will hold:
249308226b44SPaolo Bonzini  * - @offset_within_address_space >= @addr
249408226b44SPaolo Bonzini  * - @offset_within_address_space + .@size <= @addr + @size
249573034e9eSPaolo Bonzini  *
249673034e9eSPaolo Bonzini  * @mr: a MemoryRegion within which @addr is a relative address
249773034e9eSPaolo Bonzini  * @addr: start of the area within @as to be searched
2498022c62cbSPaolo Bonzini  * @size: size of the area to be searched
2499022c62cbSPaolo Bonzini  */
250073034e9eSPaolo Bonzini MemoryRegionSection memory_region_find(MemoryRegion *mr,
2501022c62cbSPaolo Bonzini                                        hwaddr addr, uint64_t size);
2502022c62cbSPaolo Bonzini 
2503022c62cbSPaolo Bonzini /**
25049c1f8f44SPaolo Bonzini  * memory_global_dirty_log_sync: synchronize the dirty log for all memory
2505022c62cbSPaolo Bonzini  *
25069c1f8f44SPaolo Bonzini  * Synchronizes the dirty page log for all address spaces.
25071e493be5SGavin Shan  *
25081e493be5SGavin Shan  * @last_stage: whether this is the last stage of live migration
2509022c62cbSPaolo Bonzini  */
25101e493be5SGavin Shan void memory_global_dirty_log_sync(bool last_stage);
2511022c62cbSPaolo Bonzini 
2512022c62cbSPaolo Bonzini /**
25139458a9a1SPaolo Bonzini  * memory_global_dirty_log_sync: synchronize the dirty log for all memory
25149458a9a1SPaolo Bonzini  *
25159458a9a1SPaolo Bonzini  * Synchronizes the vCPUs with a thread that is reading the dirty bitmap.
25169458a9a1SPaolo Bonzini  * This function must be called after the dirty log bitmap is cleared, and
25179458a9a1SPaolo Bonzini  * before dirty guest memory pages are read.  If you are using
25189458a9a1SPaolo Bonzini  * #DirtyBitmapSnapshot, memory_region_snapshot_and_clear_dirty() takes
25199458a9a1SPaolo Bonzini  * care of doing this.
25209458a9a1SPaolo Bonzini  */
25219458a9a1SPaolo Bonzini void memory_global_after_dirty_log_sync(void);
25229458a9a1SPaolo Bonzini 
25239458a9a1SPaolo Bonzini /**
2524022c62cbSPaolo Bonzini  * memory_region_transaction_begin: Start a transaction.
2525022c62cbSPaolo Bonzini  *
2526022c62cbSPaolo Bonzini  * During a transaction, changes will be accumulated and made visible
2527022c62cbSPaolo Bonzini  * only when the transaction ends (is committed).
2528022c62cbSPaolo Bonzini  */
2529022c62cbSPaolo Bonzini void memory_region_transaction_begin(void);
2530022c62cbSPaolo Bonzini 
2531022c62cbSPaolo Bonzini /**
2532022c62cbSPaolo Bonzini  * memory_region_transaction_commit: Commit a transaction and make changes
2533022c62cbSPaolo Bonzini  *                                   visible to the guest.
2534022c62cbSPaolo Bonzini  */
2535022c62cbSPaolo Bonzini void memory_region_transaction_commit(void);
2536022c62cbSPaolo Bonzini 
2537022c62cbSPaolo Bonzini /**
2538022c62cbSPaolo Bonzini  * memory_listener_register: register callbacks to be called when memory
2539022c62cbSPaolo Bonzini  *                           sections are mapped or unmapped into an address
2540022c62cbSPaolo Bonzini  *                           space
2541022c62cbSPaolo Bonzini  *
2542022c62cbSPaolo Bonzini  * @listener: an object containing the callbacks to be called
2543022c62cbSPaolo Bonzini  * @filter: if non-%NULL, only regions in this address space will be observed
2544022c62cbSPaolo Bonzini  */
2545022c62cbSPaolo Bonzini void memory_listener_register(MemoryListener *listener, AddressSpace *filter);
2546022c62cbSPaolo Bonzini 
2547022c62cbSPaolo Bonzini /**
2548022c62cbSPaolo Bonzini  * memory_listener_unregister: undo the effect of memory_listener_register()
2549022c62cbSPaolo Bonzini  *
2550022c62cbSPaolo Bonzini  * @listener: an object containing the callbacks to be removed
2551022c62cbSPaolo Bonzini  */
2552022c62cbSPaolo Bonzini void memory_listener_unregister(MemoryListener *listener);
2553022c62cbSPaolo Bonzini 
2554022c62cbSPaolo Bonzini /**
2555022c62cbSPaolo Bonzini  * memory_global_dirty_log_start: begin dirty logging for all regions
255663b41db4SHyman Huang(黄勇)  *
255763b41db4SHyman Huang(黄勇)  * @flags: purpose of starting dirty log, migration or dirty rate
2558639ec3fbSCédric Le Goater  * @errp: pointer to Error*, to store an error if it happens.
2559639ec3fbSCédric Le Goater  *
2560639ec3fbSCédric Le Goater  * Return: true on success, else false setting @errp with error.
2561022c62cbSPaolo Bonzini  */
2562639ec3fbSCédric Le Goater bool memory_global_dirty_log_start(unsigned int flags, Error **errp);
2563022c62cbSPaolo Bonzini 
2564022c62cbSPaolo Bonzini /**
2565022c62cbSPaolo Bonzini  * memory_global_dirty_log_stop: end dirty logging for all regions
256663b41db4SHyman Huang(黄勇)  *
256763b41db4SHyman Huang(黄勇)  * @flags: purpose of stopping dirty log, migration or dirty rate
2568022c62cbSPaolo Bonzini  */
256963b41db4SHyman Huang(黄勇) void memory_global_dirty_log_stop(unsigned int flags);
2570022c62cbSPaolo Bonzini 
25712261d393SPhilippe Mathieu-Daudé void mtree_info(bool flatview, bool dispatch_tree, bool owner, bool disabled);
2572022c62cbSPaolo Bonzini 
257394e273dbSPhilippe Mathieu-Daudé bool memory_region_access_valid(MemoryRegion *mr, hwaddr addr,
257494e273dbSPhilippe Mathieu-Daudé                                 unsigned size, bool is_write,
257594e273dbSPhilippe Mathieu-Daudé                                 MemTxAttrs attrs);
257694e273dbSPhilippe Mathieu-Daudé 
2577022c62cbSPaolo Bonzini /**
25783b643495SPeter Maydell  * memory_region_dispatch_read: perform a read directly to the specified
25793b643495SPeter Maydell  * MemoryRegion.
25803b643495SPeter Maydell  *
25813b643495SPeter Maydell  * @mr: #MemoryRegion to access
25823b643495SPeter Maydell  * @addr: address within that region
25833b643495SPeter Maydell  * @pval: pointer to uint64_t which the data is written to
2584e67c9046STony Nguyen  * @op: size, sign, and endianness of the memory operation
25853b643495SPeter Maydell  * @attrs: memory transaction attributes to use for the access
25863b643495SPeter Maydell  */
25873b643495SPeter Maydell MemTxResult memory_region_dispatch_read(MemoryRegion *mr,
25883b643495SPeter Maydell                                         hwaddr addr,
25893b643495SPeter Maydell                                         uint64_t *pval,
2590e67c9046STony Nguyen                                         MemOp op,
25913b643495SPeter Maydell                                         MemTxAttrs attrs);
25923b643495SPeter Maydell /**
25933b643495SPeter Maydell  * memory_region_dispatch_write: perform a write directly to the specified
25943b643495SPeter Maydell  * MemoryRegion.
25953b643495SPeter Maydell  *
25963b643495SPeter Maydell  * @mr: #MemoryRegion to access
25973b643495SPeter Maydell  * @addr: address within that region
25983b643495SPeter Maydell  * @data: data to write
2599e67c9046STony Nguyen  * @op: size, sign, and endianness of the memory operation
26003b643495SPeter Maydell  * @attrs: memory transaction attributes to use for the access
26013b643495SPeter Maydell  */
26023b643495SPeter Maydell MemTxResult memory_region_dispatch_write(MemoryRegion *mr,
26033b643495SPeter Maydell                                          hwaddr addr,
26043b643495SPeter Maydell                                          uint64_t data,
2605e67c9046STony Nguyen                                          MemOp op,
26063b643495SPeter Maydell                                          MemTxAttrs attrs);
26073b643495SPeter Maydell 
26083b643495SPeter Maydell /**
2609022c62cbSPaolo Bonzini  * address_space_init: initializes an address space
2610022c62cbSPaolo Bonzini  *
2611022c62cbSPaolo Bonzini  * @as: an uninitialized #AddressSpace
261267cc32ebSVeres Lajos  * @root: a #MemoryRegion that routes addresses for the address space
26137dca8043SAlexey Kardashevskiy  * @name: an address space name.  The name is only used for debugging
26147dca8043SAlexey Kardashevskiy  *        output.
2615022c62cbSPaolo Bonzini  */
26167dca8043SAlexey Kardashevskiy void address_space_init(AddressSpace *as, MemoryRegion *root, const char *name);
2617022c62cbSPaolo Bonzini 
2618f0c02d15SPeter Crosthwaite /**
2619022c62cbSPaolo Bonzini  * address_space_destroy: destroy an address space
2620022c62cbSPaolo Bonzini  *
2621022c62cbSPaolo Bonzini  * Releases all resources associated with an address space.  After an address space
2622022c62cbSPaolo Bonzini  * is destroyed, its root memory region (given by address_space_init()) may be destroyed
2623022c62cbSPaolo Bonzini  * as well.
2624022c62cbSPaolo Bonzini  *
2625022c62cbSPaolo Bonzini  * @as: address space to be destroyed
2626022c62cbSPaolo Bonzini  */
2627022c62cbSPaolo Bonzini void address_space_destroy(AddressSpace *as);
2628022c62cbSPaolo Bonzini 
2629022c62cbSPaolo Bonzini /**
2630a2166410SGreg Kurz  * address_space_remove_listeners: unregister all listeners of an address space
2631a2166410SGreg Kurz  *
2632a2166410SGreg Kurz  * Removes all callbacks previously registered with memory_listener_register()
2633a2166410SGreg Kurz  * for @as.
2634a2166410SGreg Kurz  *
2635a2166410SGreg Kurz  * @as: an initialized #AddressSpace
2636a2166410SGreg Kurz  */
2637a2166410SGreg Kurz void address_space_remove_listeners(AddressSpace *as);
2638a2166410SGreg Kurz 
2639a2166410SGreg Kurz /**
2640022c62cbSPaolo Bonzini  * address_space_rw: read from or write to an address space.
2641022c62cbSPaolo Bonzini  *
26425c9eb028SPeter Maydell  * Return a MemTxResult indicating whether the operation succeeded
26435c9eb028SPeter Maydell  * or failed (eg unassigned memory, device rejected the transaction,
26445c9eb028SPeter Maydell  * IOMMU fault).
2645fd8aaa76SPaolo Bonzini  *
2646022c62cbSPaolo Bonzini  * @as: #AddressSpace to be accessed
2647022c62cbSPaolo Bonzini  * @addr: address within that address space
26485c9eb028SPeter Maydell  * @attrs: memory transaction attributes
2649022c62cbSPaolo Bonzini  * @buf: buffer with the data transferred
265057914ecbSJay Zhou  * @len: the number of bytes to read or write
2651022c62cbSPaolo Bonzini  * @is_write: indicates the transfer direction
2652022c62cbSPaolo Bonzini  */
26535c9eb028SPeter Maydell MemTxResult address_space_rw(AddressSpace *as, hwaddr addr,
2654daa3dda4SPhilippe Mathieu-Daudé                              MemTxAttrs attrs, void *buf,
26550c249ff7SLi Zhijian                              hwaddr len, bool is_write);
2656022c62cbSPaolo Bonzini 
2657022c62cbSPaolo Bonzini /**
2658022c62cbSPaolo Bonzini  * address_space_write: write to address space.
2659022c62cbSPaolo Bonzini  *
26605c9eb028SPeter Maydell  * Return a MemTxResult indicating whether the operation succeeded
26615c9eb028SPeter Maydell  * or failed (eg unassigned memory, device rejected the transaction,
26625c9eb028SPeter Maydell  * IOMMU fault).
2663022c62cbSPaolo Bonzini  *
2664022c62cbSPaolo Bonzini  * @as: #AddressSpace to be accessed
2665022c62cbSPaolo Bonzini  * @addr: address within that address space
26665c9eb028SPeter Maydell  * @attrs: memory transaction attributes
2667022c62cbSPaolo Bonzini  * @buf: buffer with the data transferred
266857914ecbSJay Zhou  * @len: the number of bytes to write
2669022c62cbSPaolo Bonzini  */
26705c9eb028SPeter Maydell MemTxResult address_space_write(AddressSpace *as, hwaddr addr,
26715c9eb028SPeter Maydell                                 MemTxAttrs attrs,
2672daa3dda4SPhilippe Mathieu-Daudé                                 const void *buf, hwaddr len);
2673fd8aaa76SPaolo Bonzini 
26743c8133f9SPeter Maydell /**
26753c8133f9SPeter Maydell  * address_space_write_rom: write to address space, including ROM.
26763c8133f9SPeter Maydell  *
26773c8133f9SPeter Maydell  * This function writes to the specified address space, but will
26783c8133f9SPeter Maydell  * write data to both ROM and RAM. This is used for non-guest
26793c8133f9SPeter Maydell  * writes like writes from the gdb debug stub or initial loading
26803c8133f9SPeter Maydell  * of ROM contents.
26813c8133f9SPeter Maydell  *
26823c8133f9SPeter Maydell  * Note that portions of the write which attempt to write data to
26833c8133f9SPeter Maydell  * a device will be silently ignored -- only real RAM and ROM will
26843c8133f9SPeter Maydell  * be written to.
26853c8133f9SPeter Maydell  *
26863c8133f9SPeter Maydell  * Return a MemTxResult indicating whether the operation succeeded
26873c8133f9SPeter Maydell  * or failed (eg unassigned memory, device rejected the transaction,
26883c8133f9SPeter Maydell  * IOMMU fault).
26893c8133f9SPeter Maydell  *
26903c8133f9SPeter Maydell  * @as: #AddressSpace to be accessed
26913c8133f9SPeter Maydell  * @addr: address within that address space
26923c8133f9SPeter Maydell  * @attrs: memory transaction attributes
26933c8133f9SPeter Maydell  * @buf: buffer with the data transferred
26943c8133f9SPeter Maydell  * @len: the number of bytes to write
26953c8133f9SPeter Maydell  */
26963c8133f9SPeter Maydell MemTxResult address_space_write_rom(AddressSpace *as, hwaddr addr,
26973c8133f9SPeter Maydell                                     MemTxAttrs attrs,
2698daa3dda4SPhilippe Mathieu-Daudé                                     const void *buf, hwaddr len);
26993c8133f9SPeter Maydell 
27003cc8f884SPaolo Bonzini /* address_space_ld*: load from an address space
270150013115SPeter Maydell  * address_space_st*: store to an address space
270250013115SPeter Maydell  *
270350013115SPeter Maydell  * These functions perform a load or store of the byte, word,
270450013115SPeter Maydell  * longword or quad to the specified address within the AddressSpace.
270550013115SPeter Maydell  * The _le suffixed functions treat the data as little endian;
270650013115SPeter Maydell  * _be indicates big endian; no suffix indicates "same endianness
270750013115SPeter Maydell  * as guest CPU".
270850013115SPeter Maydell  *
270950013115SPeter Maydell  * The "guest CPU endianness" accessors are deprecated for use outside
271050013115SPeter Maydell  * target-* code; devices should be CPU-agnostic and use either the LE
271150013115SPeter Maydell  * or the BE accessors.
271250013115SPeter Maydell  *
271350013115SPeter Maydell  * @as #AddressSpace to be accessed
271450013115SPeter Maydell  * @addr: address within that address space
271550013115SPeter Maydell  * @val: data value, for stores
271650013115SPeter Maydell  * @attrs: memory transaction attributes
271750013115SPeter Maydell  * @result: location to write the success/failure of the transaction;
271850013115SPeter Maydell  *   if NULL, this information is discarded
271950013115SPeter Maydell  */
272050013115SPeter Maydell 
27214269c82bSPaolo Bonzini #define SUFFIX
27224269c82bSPaolo Bonzini #define ARG1         as
27234269c82bSPaolo Bonzini #define ARG1_DECL    AddressSpace *as
27240979ed01SPaolo Bonzini #include "exec/memory_ldst.h.inc"
27254269c82bSPaolo Bonzini 
27264269c82bSPaolo Bonzini #define SUFFIX
27274269c82bSPaolo Bonzini #define ARG1         as
27284269c82bSPaolo Bonzini #define ARG1_DECL    AddressSpace *as
27290979ed01SPaolo Bonzini #include "exec/memory_ldst_phys.h.inc"
27300ce265ffSPaolo Bonzini 
27311f4e496eSPaolo Bonzini struct MemoryRegionCache {
27327246c4ccSRoman Kiryanov     uint8_t *ptr;
27331f4e496eSPaolo Bonzini     hwaddr xlat;
27341f4e496eSPaolo Bonzini     hwaddr len;
273548564041SPaolo Bonzini     FlatView *fv;
273648564041SPaolo Bonzini     MemoryRegionSection mrs;
273748564041SPaolo Bonzini     bool is_write;
27381f4e496eSPaolo Bonzini };
27391f4e496eSPaolo Bonzini 
27404269c82bSPaolo Bonzini /* address_space_ld*_cached: load from a cached #MemoryRegion
27414269c82bSPaolo Bonzini  * address_space_st*_cached: store into a cached #MemoryRegion
27424269c82bSPaolo Bonzini  *
27434269c82bSPaolo Bonzini  * These functions perform a load or store of the byte, word,
27444269c82bSPaolo Bonzini  * longword or quad to the specified address.  The address is
27454269c82bSPaolo Bonzini  * a physical address in the AddressSpace, but it must lie within
27464269c82bSPaolo Bonzini  * a #MemoryRegion that was mapped with address_space_cache_init.
27474269c82bSPaolo Bonzini  *
27484269c82bSPaolo Bonzini  * The _le suffixed functions treat the data as little endian;
27494269c82bSPaolo Bonzini  * _be indicates big endian; no suffix indicates "same endianness
27504269c82bSPaolo Bonzini  * as guest CPU".
27514269c82bSPaolo Bonzini  *
27524269c82bSPaolo Bonzini  * The "guest CPU endianness" accessors are deprecated for use outside
27534269c82bSPaolo Bonzini  * target-* code; devices should be CPU-agnostic and use either the LE
27544269c82bSPaolo Bonzini  * or the BE accessors.
27554269c82bSPaolo Bonzini  *
27564269c82bSPaolo Bonzini  * @cache: previously initialized #MemoryRegionCache to be accessed
27574269c82bSPaolo Bonzini  * @addr: address within the address space
27584269c82bSPaolo Bonzini  * @val: data value, for stores
27594269c82bSPaolo Bonzini  * @attrs: memory transaction attributes
27604269c82bSPaolo Bonzini  * @result: location to write the success/failure of the transaction;
27614269c82bSPaolo Bonzini  *   if NULL, this information is discarded
27624269c82bSPaolo Bonzini  */
27634269c82bSPaolo Bonzini 
276448564041SPaolo Bonzini #define SUFFIX       _cached_slow
27654269c82bSPaolo Bonzini #define ARG1         cache
27664269c82bSPaolo Bonzini #define ARG1_DECL    MemoryRegionCache *cache
27670979ed01SPaolo Bonzini #include "exec/memory_ldst.h.inc"
27684269c82bSPaolo Bonzini 
276948564041SPaolo Bonzini /* Inline fast path for direct RAM access.  */
address_space_ldub_cached(MemoryRegionCache * cache,hwaddr addr,MemTxAttrs attrs,MemTxResult * result)277048564041SPaolo Bonzini static inline uint8_t address_space_ldub_cached(MemoryRegionCache *cache,
277148564041SPaolo Bonzini     hwaddr addr, MemTxAttrs attrs, MemTxResult *result)
277248564041SPaolo Bonzini {
277348564041SPaolo Bonzini     assert(addr < cache->len);
277448564041SPaolo Bonzini     if (likely(cache->ptr)) {
277548564041SPaolo Bonzini         return ldub_p(cache->ptr + addr);
277648564041SPaolo Bonzini     } else {
277748564041SPaolo Bonzini         return address_space_ldub_cached_slow(cache, addr, attrs, result);
277848564041SPaolo Bonzini     }
277948564041SPaolo Bonzini }
278048564041SPaolo Bonzini 
address_space_stb_cached(MemoryRegionCache * cache,hwaddr addr,uint8_t val,MemTxAttrs attrs,MemTxResult * result)278148564041SPaolo Bonzini static inline void address_space_stb_cached(MemoryRegionCache *cache,
27824121f4b3SPhilippe Mathieu-Daudé     hwaddr addr, uint8_t val, MemTxAttrs attrs, MemTxResult *result)
278348564041SPaolo Bonzini {
278448564041SPaolo Bonzini     assert(addr < cache->len);
278548564041SPaolo Bonzini     if (likely(cache->ptr)) {
278648564041SPaolo Bonzini         stb_p(cache->ptr + addr, val);
278748564041SPaolo Bonzini     } else {
278848564041SPaolo Bonzini         address_space_stb_cached_slow(cache, addr, val, attrs, result);
278948564041SPaolo Bonzini     }
279048564041SPaolo Bonzini }
279148564041SPaolo Bonzini 
279248564041SPaolo Bonzini #define ENDIANNESS   _le
27930979ed01SPaolo Bonzini #include "exec/memory_ldst_cached.h.inc"
279448564041SPaolo Bonzini 
279548564041SPaolo Bonzini #define ENDIANNESS   _be
27960979ed01SPaolo Bonzini #include "exec/memory_ldst_cached.h.inc"
279748564041SPaolo Bonzini 
27984269c82bSPaolo Bonzini #define SUFFIX       _cached
27994269c82bSPaolo Bonzini #define ARG1         cache
28004269c82bSPaolo Bonzini #define ARG1_DECL    MemoryRegionCache *cache
28010979ed01SPaolo Bonzini #include "exec/memory_ldst_phys.h.inc"
28024269c82bSPaolo Bonzini 
28031f4e496eSPaolo Bonzini /* address_space_cache_init: prepare for repeated access to a physical
28041f4e496eSPaolo Bonzini  * memory region
28051f4e496eSPaolo Bonzini  *
28061f4e496eSPaolo Bonzini  * @cache: #MemoryRegionCache to be filled
28071f4e496eSPaolo Bonzini  * @as: #AddressSpace to be accessed
28081f4e496eSPaolo Bonzini  * @addr: address within that address space
28091f4e496eSPaolo Bonzini  * @len: length of buffer
28101f4e496eSPaolo Bonzini  * @is_write: indicates the transfer direction
28111f4e496eSPaolo Bonzini  *
28121f4e496eSPaolo Bonzini  * Will only work with RAM, and may map a subset of the requested range by
28131f4e496eSPaolo Bonzini  * returning a value that is less than @len.  On failure, return a negative
28141f4e496eSPaolo Bonzini  * errno value.
28151f4e496eSPaolo Bonzini  *
28161f4e496eSPaolo Bonzini  * Because it only works with RAM, this function can be used for
28171f4e496eSPaolo Bonzini  * read-modify-write operations.  In this case, is_write should be %true.
28181f4e496eSPaolo Bonzini  *
28191f4e496eSPaolo Bonzini  * Note that addresses passed to the address_space_*_cached functions
28201f4e496eSPaolo Bonzini  * are relative to @addr.
28211f4e496eSPaolo Bonzini  */
28221f4e496eSPaolo Bonzini int64_t address_space_cache_init(MemoryRegionCache *cache,
28231f4e496eSPaolo Bonzini                                  AddressSpace *as,
28241f4e496eSPaolo Bonzini                                  hwaddr addr,
28251f4e496eSPaolo Bonzini                                  hwaddr len,
28261f4e496eSPaolo Bonzini                                  bool is_write);
28271f4e496eSPaolo Bonzini 
28281f4e496eSPaolo Bonzini /**
282943d63769SIlya Maximets  * address_space_cache_init_empty: Initialize empty #MemoryRegionCache
283043d63769SIlya Maximets  *
283143d63769SIlya Maximets  * @cache: The #MemoryRegionCache to operate on.
283243d63769SIlya Maximets  *
283343d63769SIlya Maximets  * Initializes #MemoryRegionCache structure without memory region attached.
283443d63769SIlya Maximets  * Cache initialized this way can only be safely destroyed, but not used.
283543d63769SIlya Maximets  */
address_space_cache_init_empty(MemoryRegionCache * cache)283643d63769SIlya Maximets static inline void address_space_cache_init_empty(MemoryRegionCache *cache)
283743d63769SIlya Maximets {
283843d63769SIlya Maximets     cache->mrs.mr = NULL;
2839b15c18c5SIlya Maximets     /* There is no real need to initialize fv, but it makes Coverity happy. */
2840b15c18c5SIlya Maximets     cache->fv = NULL;
284143d63769SIlya Maximets }
284243d63769SIlya Maximets 
284343d63769SIlya Maximets /**
28441f4e496eSPaolo Bonzini  * address_space_cache_invalidate: complete a write to a #MemoryRegionCache
28451f4e496eSPaolo Bonzini  *
28461f4e496eSPaolo Bonzini  * @cache: The #MemoryRegionCache to operate on.
28471f4e496eSPaolo Bonzini  * @addr: The first physical address that was written, relative to the
28481f4e496eSPaolo Bonzini  * address that was passed to @address_space_cache_init.
28491f4e496eSPaolo Bonzini  * @access_len: The number of bytes that were written starting at @addr.
28501f4e496eSPaolo Bonzini  */
28511f4e496eSPaolo Bonzini void address_space_cache_invalidate(MemoryRegionCache *cache,
28521f4e496eSPaolo Bonzini                                     hwaddr addr,
28531f4e496eSPaolo Bonzini                                     hwaddr access_len);
28541f4e496eSPaolo Bonzini 
28551f4e496eSPaolo Bonzini /**
28561f4e496eSPaolo Bonzini  * address_space_cache_destroy: free a #MemoryRegionCache
28571f4e496eSPaolo Bonzini  *
28581f4e496eSPaolo Bonzini  * @cache: The #MemoryRegionCache whose memory should be released.
28591f4e496eSPaolo Bonzini  */
28601f4e496eSPaolo Bonzini void address_space_cache_destroy(MemoryRegionCache *cache);
28611f4e496eSPaolo Bonzini 
2862052c8fa9SJason Wang /* address_space_get_iotlb_entry: translate an address into an IOTLB
2863052c8fa9SJason Wang  * entry. Should be called from an RCU critical section.
2864052c8fa9SJason Wang  */
2865052c8fa9SJason Wang IOMMUTLBEntry address_space_get_iotlb_entry(AddressSpace *as, hwaddr addr,
28667446eb07SPeter Maydell                                             bool is_write, MemTxAttrs attrs);
28671f4e496eSPaolo Bonzini 
2868149f54b5SPaolo Bonzini /* address_space_translate: translate an address range into an address space
286941063e1eSPaolo Bonzini  * into a MemoryRegion and an address range into that section.  Should be
287041063e1eSPaolo Bonzini  * called from an RCU critical section, to avoid that the last reference
287141063e1eSPaolo Bonzini  * to the returned region disappears after address_space_translate returns.
2872149f54b5SPaolo Bonzini  *
287357914ecbSJay Zhou  * @fv: #FlatView to be accessed
2874149f54b5SPaolo Bonzini  * @addr: address within that address space
2875149f54b5SPaolo Bonzini  * @xlat: pointer to address within the returned memory region section's
2876149f54b5SPaolo Bonzini  * #MemoryRegion.
2877149f54b5SPaolo Bonzini  * @len: pointer to length
2878149f54b5SPaolo Bonzini  * @is_write: indicates the transfer direction
2879bc6b1cecSPeter Maydell  * @attrs: memory attributes
2880149f54b5SPaolo Bonzini  */
288116620684SAlexey Kardashevskiy MemoryRegion *flatview_translate(FlatView *fv,
288216620684SAlexey Kardashevskiy                                  hwaddr addr, hwaddr *xlat,
2883efa99a2fSPeter Maydell                                  hwaddr *len, bool is_write,
2884efa99a2fSPeter Maydell                                  MemTxAttrs attrs);
288516620684SAlexey Kardashevskiy 
address_space_translate(AddressSpace * as,hwaddr addr,hwaddr * xlat,hwaddr * len,bool is_write,MemTxAttrs attrs)288616620684SAlexey Kardashevskiy static inline MemoryRegion *address_space_translate(AddressSpace *as,
288716620684SAlexey Kardashevskiy                                                     hwaddr addr, hwaddr *xlat,
2888bc6b1cecSPeter Maydell                                                     hwaddr *len, bool is_write,
2889bc6b1cecSPeter Maydell                                                     MemTxAttrs attrs)
289016620684SAlexey Kardashevskiy {
289116620684SAlexey Kardashevskiy     return flatview_translate(address_space_to_flatview(as),
2892efa99a2fSPeter Maydell                               addr, xlat, len, is_write, attrs);
289316620684SAlexey Kardashevskiy }
2894149f54b5SPaolo Bonzini 
289551644ab7SPaolo Bonzini /* address_space_access_valid: check for validity of accessing an address
289651644ab7SPaolo Bonzini  * space range
289751644ab7SPaolo Bonzini  *
289830951157SAvi Kivity  * Check whether memory is assigned to the given address space range, and
289930951157SAvi Kivity  * access is permitted by any IOMMU regions that are active for the address
290030951157SAvi Kivity  * space.
290151644ab7SPaolo Bonzini  *
290251644ab7SPaolo Bonzini  * For now, addr and len should be aligned to a page size.  This limitation
290351644ab7SPaolo Bonzini  * will be lifted in the future.
290451644ab7SPaolo Bonzini  *
290551644ab7SPaolo Bonzini  * @as: #AddressSpace to be accessed
290651644ab7SPaolo Bonzini  * @addr: address within that address space
290751644ab7SPaolo Bonzini  * @len: length of the area to be checked
290851644ab7SPaolo Bonzini  * @is_write: indicates the transfer direction
2909fddffa42SPeter Maydell  * @attrs: memory attributes
291051644ab7SPaolo Bonzini  */
29110c249ff7SLi Zhijian bool address_space_access_valid(AddressSpace *as, hwaddr addr, hwaddr len,
2912fddffa42SPeter Maydell                                 bool is_write, MemTxAttrs attrs);
291351644ab7SPaolo Bonzini 
2914022c62cbSPaolo Bonzini /* address_space_map: map a physical memory region into a host virtual address
2915022c62cbSPaolo Bonzini  *
2916022c62cbSPaolo Bonzini  * May map a subset of the requested range, given by and returned in @plen.
291777f55eacSPrasad J Pandit  * May return %NULL and set *@plen to zero(0), if resources needed to perform
291877f55eacSPrasad J Pandit  * the mapping are exhausted.
2919022c62cbSPaolo Bonzini  * Use only for reads OR writes - not for read-modify-write operations.
29205c627197SMattias Nissler  * Use address_space_register_map_client() to know when retrying the map
29215c627197SMattias Nissler  * operation is likely to succeed.
2922022c62cbSPaolo Bonzini  *
2923022c62cbSPaolo Bonzini  * @as: #AddressSpace to be accessed
2924022c62cbSPaolo Bonzini  * @addr: address within that address space
2925022c62cbSPaolo Bonzini  * @plen: pointer to length of buffer; updated on return
2926022c62cbSPaolo Bonzini  * @is_write: indicates the transfer direction
2927f26404fbSPeter Maydell  * @attrs: memory attributes
2928022c62cbSPaolo Bonzini  */
2929022c62cbSPaolo Bonzini void *address_space_map(AddressSpace *as, hwaddr addr,
2930f26404fbSPeter Maydell                         hwaddr *plen, bool is_write, MemTxAttrs attrs);
2931022c62cbSPaolo Bonzini 
2932022c62cbSPaolo Bonzini /* address_space_unmap: Unmaps a memory region previously mapped by address_space_map()
2933022c62cbSPaolo Bonzini  *
2934022c62cbSPaolo Bonzini  * Will also mark the memory as dirty if @is_write == %true.  @access_len gives
2935022c62cbSPaolo Bonzini  * the amount of memory that was actually read or written by the caller.
2936022c62cbSPaolo Bonzini  *
2937022c62cbSPaolo Bonzini  * @as: #AddressSpace used
293857914ecbSJay Zhou  * @buffer: host pointer as returned by address_space_map()
2939022c62cbSPaolo Bonzini  * @len: buffer length as returned by address_space_map()
2940022c62cbSPaolo Bonzini  * @access_len: amount of data actually transferred
2941022c62cbSPaolo Bonzini  * @is_write: indicates the transfer direction
2942022c62cbSPaolo Bonzini  */
2943022c62cbSPaolo Bonzini void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2944ae5883abSPhilippe Mathieu-Daudé                          bool is_write, hwaddr access_len);
2945022c62cbSPaolo Bonzini 
29465c627197SMattias Nissler /*
29475c627197SMattias Nissler  * address_space_register_map_client: Register a callback to invoke when
29485c627197SMattias Nissler  * resources for address_space_map() are available again.
29495c627197SMattias Nissler  *
29505c627197SMattias Nissler  * address_space_map may fail when there are not enough resources available,
29515c627197SMattias Nissler  * such as when bounce buffer memory would exceed the limit. The callback can
29525c627197SMattias Nissler  * be used to retry the address_space_map operation. Note that the callback
29535c627197SMattias Nissler  * gets automatically removed after firing.
29545c627197SMattias Nissler  *
29555c627197SMattias Nissler  * @as: #AddressSpace to be accessed
29565c627197SMattias Nissler  * @bh: callback to invoke when address_space_map() retry is appropriate
29575c627197SMattias Nissler  */
29585c627197SMattias Nissler void address_space_register_map_client(AddressSpace *as, QEMUBH *bh);
29595c627197SMattias Nissler 
29605c627197SMattias Nissler /*
29615c627197SMattias Nissler  * address_space_unregister_map_client: Unregister a callback that has
29625c627197SMattias Nissler  * previously been registered and not fired yet.
29635c627197SMattias Nissler  *
29645c627197SMattias Nissler  * @as: #AddressSpace to be accessed
29655c627197SMattias Nissler  * @bh: callback to unregister
29665c627197SMattias Nissler  */
29675c627197SMattias Nissler void address_space_unregister_map_client(AddressSpace *as, QEMUBH *bh);
2968022c62cbSPaolo Bonzini 
2969a203ac70SPaolo Bonzini /* Internal functions, part of the implementation of address_space_read.  */
2970b2a44fcaSPaolo Bonzini MemTxResult address_space_read_full(AddressSpace *as, hwaddr addr,
2971daa3dda4SPhilippe Mathieu-Daudé                                     MemTxAttrs attrs, void *buf, hwaddr len);
297216620684SAlexey Kardashevskiy MemTxResult flatview_read_continue(FlatView *fv, hwaddr addr,
2973a152be43SPhilippe Mathieu-Daudé                                    MemTxAttrs attrs, void *buf,
29740c249ff7SLi Zhijian                                    hwaddr len, hwaddr addr1, hwaddr l,
2975a203ac70SPaolo Bonzini                                    MemoryRegion *mr);
29760878d0e1SPaolo Bonzini void *qemu_map_ram_ptr(RAMBlock *ram_block, ram_addr_t addr);
29773cc8f884SPaolo Bonzini 
297848564041SPaolo Bonzini /* Internal functions, part of the implementation of address_space_read_cached
297948564041SPaolo Bonzini  * and address_space_write_cached.  */
298038df19faSPhilippe Mathieu-Daudé MemTxResult address_space_read_cached_slow(MemoryRegionCache *cache,
29810c249ff7SLi Zhijian                                            hwaddr addr, void *buf, hwaddr len);
298238df19faSPhilippe Mathieu-Daudé MemTxResult address_space_write_cached_slow(MemoryRegionCache *cache,
298338df19faSPhilippe Mathieu-Daudé                                             hwaddr addr, const void *buf,
298438df19faSPhilippe Mathieu-Daudé                                             hwaddr len);
298548564041SPaolo Bonzini 
29863123f93dSJagannathan Raman int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr);
29873123f93dSJagannathan Raman bool prepare_mmio_access(MemoryRegion *mr);
29883123f93dSJagannathan Raman 
memory_access_is_direct(MemoryRegion * mr,bool is_write)29893cc8f884SPaolo Bonzini static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write)
29903cc8f884SPaolo Bonzini {
29913cc8f884SPaolo Bonzini     if (is_write) {
2992d489ae4aSAlexander Duyck         return memory_region_is_ram(mr) && !mr->readonly &&
2993d489ae4aSAlexander Duyck                !mr->rom_device && !memory_region_is_ram_device(mr);
29943cc8f884SPaolo Bonzini     } else {
29954a2e242bSAlex Williamson         return (memory_region_is_ram(mr) && !memory_region_is_ram_device(mr)) ||
29964a2e242bSAlex Williamson                memory_region_is_romd(mr);
29973cc8f884SPaolo Bonzini     }
29983cc8f884SPaolo Bonzini }
29993cc8f884SPaolo Bonzini 
30003cc8f884SPaolo Bonzini /**
30013cc8f884SPaolo Bonzini  * address_space_read: read from an address space.
30023cc8f884SPaolo Bonzini  *
30033cc8f884SPaolo Bonzini  * Return a MemTxResult indicating whether the operation succeeded
30043cc8f884SPaolo Bonzini  * or failed (eg unassigned memory, device rejected the transaction,
3005b2a44fcaSPaolo Bonzini  * IOMMU fault).  Called within RCU critical section.
30063cc8f884SPaolo Bonzini  *
3007b2a44fcaSPaolo Bonzini  * @as: #AddressSpace to be accessed
30083cc8f884SPaolo Bonzini  * @addr: address within that address space
30093cc8f884SPaolo Bonzini  * @attrs: memory transaction attributes
30103cc8f884SPaolo Bonzini  * @buf: buffer with the data transferred
30115d248213SPaolo Bonzini  * @len: length of the data transferred
30123cc8f884SPaolo Bonzini  */
30133cc8f884SPaolo Bonzini static inline __attribute__((__always_inline__))
address_space_read(AddressSpace * as,hwaddr addr,MemTxAttrs attrs,void * buf,hwaddr len)3014b2a44fcaSPaolo Bonzini MemTxResult address_space_read(AddressSpace *as, hwaddr addr,
3015daa3dda4SPhilippe Mathieu-Daudé                                MemTxAttrs attrs, void *buf,
30160c249ff7SLi Zhijian                                hwaddr len)
30173cc8f884SPaolo Bonzini {
30183cc8f884SPaolo Bonzini     MemTxResult result = MEMTX_OK;
30193cc8f884SPaolo Bonzini     hwaddr l, addr1;
30203cc8f884SPaolo Bonzini     void *ptr;
30213cc8f884SPaolo Bonzini     MemoryRegion *mr;
3022b2a44fcaSPaolo Bonzini     FlatView *fv;
30233cc8f884SPaolo Bonzini 
30243cc8f884SPaolo Bonzini     if (__builtin_constant_p(len)) {
30253cc8f884SPaolo Bonzini         if (len) {
3026293a733dSPaolo Bonzini             RCU_READ_LOCK_GUARD();
3027b2a44fcaSPaolo Bonzini             fv = address_space_to_flatview(as);
30283cc8f884SPaolo Bonzini             l = len;
3029efa99a2fSPeter Maydell             mr = flatview_translate(fv, addr, &addr1, &l, false, attrs);
30303cc8f884SPaolo Bonzini             if (len == l && memory_access_is_direct(mr, false)) {
30310878d0e1SPaolo Bonzini                 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
30323cc8f884SPaolo Bonzini                 memcpy(buf, ptr, len);
30333cc8f884SPaolo Bonzini             } else {
303416620684SAlexey Kardashevskiy                 result = flatview_read_continue(fv, addr, attrs, buf, len,
30353cc8f884SPaolo Bonzini                                                 addr1, l, mr);
30363cc8f884SPaolo Bonzini             }
30373cc8f884SPaolo Bonzini         }
30383cc8f884SPaolo Bonzini     } else {
3039b2a44fcaSPaolo Bonzini         result = address_space_read_full(as, addr, attrs, buf, len);
30403cc8f884SPaolo Bonzini     }
30413cc8f884SPaolo Bonzini     return result;
30423cc8f884SPaolo Bonzini }
3043a203ac70SPaolo Bonzini 
30441f4e496eSPaolo Bonzini /**
30451f4e496eSPaolo Bonzini  * address_space_read_cached: read from a cached RAM region
30461f4e496eSPaolo Bonzini  *
30471f4e496eSPaolo Bonzini  * @cache: Cached region to be addressed
30481f4e496eSPaolo Bonzini  * @addr: address relative to the base of the RAM region
30491f4e496eSPaolo Bonzini  * @buf: buffer with the data transferred
30501f4e496eSPaolo Bonzini  * @len: length of the data transferred
30511f4e496eSPaolo Bonzini  */
305238df19faSPhilippe Mathieu-Daudé static inline MemTxResult
address_space_read_cached(MemoryRegionCache * cache,hwaddr addr,void * buf,hwaddr len)30531f4e496eSPaolo Bonzini address_space_read_cached(MemoryRegionCache *cache, hwaddr addr,
30540c249ff7SLi Zhijian                           void *buf, hwaddr len)
30551f4e496eSPaolo Bonzini {
30561f4e496eSPaolo Bonzini     assert(addr < cache->len && len <= cache->len - addr);
3057fc1c8344SAlexander Bulekov     fuzz_dma_read_cb(cache->xlat + addr, len, cache->mrs.mr);
305848564041SPaolo Bonzini     if (likely(cache->ptr)) {
305948564041SPaolo Bonzini         memcpy(buf, cache->ptr + addr, len);
306038df19faSPhilippe Mathieu-Daudé         return MEMTX_OK;
306148564041SPaolo Bonzini     } else {
306238df19faSPhilippe Mathieu-Daudé         return address_space_read_cached_slow(cache, addr, buf, len);
306348564041SPaolo Bonzini     }
30641f4e496eSPaolo Bonzini }
30651f4e496eSPaolo Bonzini 
30661f4e496eSPaolo Bonzini /**
30671f4e496eSPaolo Bonzini  * address_space_write_cached: write to a cached RAM region
30681f4e496eSPaolo Bonzini  *
30691f4e496eSPaolo Bonzini  * @cache: Cached region to be addressed
30701f4e496eSPaolo Bonzini  * @addr: address relative to the base of the RAM region
30711f4e496eSPaolo Bonzini  * @buf: buffer with the data transferred
30721f4e496eSPaolo Bonzini  * @len: length of the data transferred
30731f4e496eSPaolo Bonzini  */
307438df19faSPhilippe Mathieu-Daudé static inline MemTxResult
address_space_write_cached(MemoryRegionCache * cache,hwaddr addr,const void * buf,hwaddr len)30751f4e496eSPaolo Bonzini address_space_write_cached(MemoryRegionCache *cache, hwaddr addr,
3076daa3dda4SPhilippe Mathieu-Daudé                            const void *buf, hwaddr len)
30771f4e496eSPaolo Bonzini {
30781f4e496eSPaolo Bonzini     assert(addr < cache->len && len <= cache->len - addr);
307948564041SPaolo Bonzini     if (likely(cache->ptr)) {
308048564041SPaolo Bonzini         memcpy(cache->ptr + addr, buf, len);
308138df19faSPhilippe Mathieu-Daudé         return MEMTX_OK;
308248564041SPaolo Bonzini     } else {
308338df19faSPhilippe Mathieu-Daudé         return address_space_write_cached_slow(cache, addr, buf, len);
308448564041SPaolo Bonzini     }
30851f4e496eSPaolo Bonzini }
30861f4e496eSPaolo Bonzini 
308775f01c68SPhilippe Mathieu-Daudé /**
308875f01c68SPhilippe Mathieu-Daudé  * address_space_set: Fill address space with a constant byte.
308975f01c68SPhilippe Mathieu-Daudé  *
309075f01c68SPhilippe Mathieu-Daudé  * Return a MemTxResult indicating whether the operation succeeded
309175f01c68SPhilippe Mathieu-Daudé  * or failed (eg unassigned memory, device rejected the transaction,
309275f01c68SPhilippe Mathieu-Daudé  * IOMMU fault).
309375f01c68SPhilippe Mathieu-Daudé  *
309475f01c68SPhilippe Mathieu-Daudé  * @as: #AddressSpace to be accessed
309575f01c68SPhilippe Mathieu-Daudé  * @addr: address within that address space
309675f01c68SPhilippe Mathieu-Daudé  * @c: constant byte to fill the memory
309775f01c68SPhilippe Mathieu-Daudé  * @len: the number of bytes to fill with the constant byte
309875f01c68SPhilippe Mathieu-Daudé  * @attrs: memory transaction attributes
309975f01c68SPhilippe Mathieu-Daudé  */
310075f01c68SPhilippe Mathieu-Daudé MemTxResult address_space_set(AddressSpace *as, hwaddr addr,
310175f01c68SPhilippe Mathieu-Daudé                               uint8_t c, hwaddr len, MemTxAttrs attrs);
310275f01c68SPhilippe Mathieu-Daudé 
31037d7a21baSPhilippe Mathieu-Daudé #ifdef COMPILING_PER_TARGET
3104d5d680caSTony Nguyen /* enum device_endian to MemOp.  */
devend_memop(enum device_endian end)31057a3df11cSPaolo Bonzini static inline MemOp devend_memop(enum device_endian end)
31067a3df11cSPaolo Bonzini {
31077a3df11cSPaolo Bonzini     QEMU_BUILD_BUG_ON(DEVICE_HOST_ENDIAN != DEVICE_LITTLE_ENDIAN &&
31087a3df11cSPaolo Bonzini                       DEVICE_HOST_ENDIAN != DEVICE_BIG_ENDIAN);
31097a3df11cSPaolo Bonzini 
3110ee3eb3a7SMarc-André Lureau #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
31117a3df11cSPaolo Bonzini     /* Swap if non-host endianness or native (target) endianness */
31127a3df11cSPaolo Bonzini     return (end == DEVICE_HOST_ENDIAN) ? 0 : MO_BSWAP;
31137a3df11cSPaolo Bonzini #else
31147a3df11cSPaolo Bonzini     const int non_host_endianness =
31157a3df11cSPaolo Bonzini         DEVICE_LITTLE_ENDIAN ^ DEVICE_BIG_ENDIAN ^ DEVICE_HOST_ENDIAN;
31167a3df11cSPaolo Bonzini 
31177a3df11cSPaolo Bonzini     /* In this case, native (target) endianness needs no swap.  */
31187a3df11cSPaolo Bonzini     return (end == non_host_endianness) ? MO_BSWAP : 0;
31197a3df11cSPaolo Bonzini #endif
31207a3df11cSPaolo Bonzini }
31217d7a21baSPhilippe Mathieu-Daudé #endif /* COMPILING_PER_TARGET */
3122d5d680caSTony Nguyen 
3123d24f31dbSDavid Hildenbrand /*
3124d24f31dbSDavid Hildenbrand  * Inhibit technologies that require discarding of pages in RAM blocks, e.g.,
3125d24f31dbSDavid Hildenbrand  * to manage the actual amount of memory consumed by the VM (then, the memory
3126d24f31dbSDavid Hildenbrand  * provided by RAM blocks might be bigger than the desired memory consumption).
3127d24f31dbSDavid Hildenbrand  * This *must* be set if:
3128d24f31dbSDavid Hildenbrand  * - Discarding parts of a RAM blocks does not result in the change being
3129d24f31dbSDavid Hildenbrand  *   reflected in the VM and the pages getting freed.
3130d24f31dbSDavid Hildenbrand  * - All memory in RAM blocks is pinned or duplicated, invaldiating any previous
3131d24f31dbSDavid Hildenbrand  *   discards blindly.
3132d24f31dbSDavid Hildenbrand  * - Discarding parts of a RAM blocks will result in integrity issues (e.g.,
3133d24f31dbSDavid Hildenbrand  *   encrypted VMs).
3134d24f31dbSDavid Hildenbrand  * Technologies that only temporarily pin the current working set of a
3135d24f31dbSDavid Hildenbrand  * driver are fine, because we don't expect such pages to be discarded
3136d24f31dbSDavid Hildenbrand  * (esp. based on guest action like balloon inflation).
3137d24f31dbSDavid Hildenbrand  *
3138d24f31dbSDavid Hildenbrand  * This is *not* to be used to protect from concurrent discards (esp.,
3139d24f31dbSDavid Hildenbrand  * postcopy).
3140d24f31dbSDavid Hildenbrand  *
3141d24f31dbSDavid Hildenbrand  * Returns 0 if successful. Returns -EBUSY if a technology that relies on
3142d24f31dbSDavid Hildenbrand  * discards to work reliably is active.
3143d24f31dbSDavid Hildenbrand  */
3144d24f31dbSDavid Hildenbrand int ram_block_discard_disable(bool state);
3145d24f31dbSDavid Hildenbrand 
3146d24f31dbSDavid Hildenbrand /*
31477e6d32ebSDavid Hildenbrand  * See ram_block_discard_disable(): only disable uncoordinated discards,
31487e6d32ebSDavid Hildenbrand  * keeping coordinated discards (via the RamDiscardManager) enabled.
31497e6d32ebSDavid Hildenbrand  */
31507e6d32ebSDavid Hildenbrand int ram_block_uncoordinated_discard_disable(bool state);
31517e6d32ebSDavid Hildenbrand 
31527e6d32ebSDavid Hildenbrand /*
3153d24f31dbSDavid Hildenbrand  * Inhibit technologies that disable discarding of pages in RAM blocks.
3154d24f31dbSDavid Hildenbrand  *
3155d24f31dbSDavid Hildenbrand  * Returns 0 if successful. Returns -EBUSY if discards are already set to
3156d24f31dbSDavid Hildenbrand  * broken.
3157d24f31dbSDavid Hildenbrand  */
3158d24f31dbSDavid Hildenbrand int ram_block_discard_require(bool state);
3159d24f31dbSDavid Hildenbrand 
3160d24f31dbSDavid Hildenbrand /*
31617e6d32ebSDavid Hildenbrand  * See ram_block_discard_require(): only inhibit technologies that disable
31622432e063SManos Pitsidianakis  * uncoordinated discarding of pages in RAM blocks, allowing co-existence with
31637e6d32ebSDavid Hildenbrand  * technologies that only inhibit uncoordinated discards (via the
31647e6d32ebSDavid Hildenbrand  * RamDiscardManager).
31657e6d32ebSDavid Hildenbrand  */
31667e6d32ebSDavid Hildenbrand int ram_block_coordinated_discard_require(bool state);
31677e6d32ebSDavid Hildenbrand 
31687e6d32ebSDavid Hildenbrand /*
31697e6d32ebSDavid Hildenbrand  * Test if any discarding of memory in ram blocks is disabled.
3170d24f31dbSDavid Hildenbrand  */
3171d24f31dbSDavid Hildenbrand bool ram_block_discard_is_disabled(void);
3172d24f31dbSDavid Hildenbrand 
3173d24f31dbSDavid Hildenbrand /*
31747e6d32ebSDavid Hildenbrand  * Test if any discarding of memory in ram blocks is required to work reliably.
3175d24f31dbSDavid Hildenbrand  */
3176d24f31dbSDavid Hildenbrand bool ram_block_discard_is_required(void);
3177d24f31dbSDavid Hildenbrand 
3178022c62cbSPaolo Bonzini #endif
3179022c62cbSPaolo Bonzini 
3180022c62cbSPaolo Bonzini #endif
3181