xref: /openbmc/qemu/include/exec/memory.h (revision 1221a4746769f70231beab4db8da1c937e60340c)
1022c62cbSPaolo Bonzini /*
2022c62cbSPaolo Bonzini  * Physical memory management API
3022c62cbSPaolo Bonzini  *
4022c62cbSPaolo Bonzini  * Copyright 2011 Red Hat, Inc. and/or its affiliates
5022c62cbSPaolo Bonzini  *
6022c62cbSPaolo Bonzini  * Authors:
7022c62cbSPaolo Bonzini  *  Avi Kivity <avi@redhat.com>
8022c62cbSPaolo Bonzini  *
9022c62cbSPaolo Bonzini  * This work is licensed under the terms of the GNU GPL, version 2.  See
10022c62cbSPaolo Bonzini  * the COPYING file in the top-level directory.
11022c62cbSPaolo Bonzini  *
12022c62cbSPaolo Bonzini  */
13022c62cbSPaolo Bonzini 
14022c62cbSPaolo Bonzini #ifndef MEMORY_H
15022c62cbSPaolo Bonzini #define MEMORY_H
16022c62cbSPaolo Bonzini 
17022c62cbSPaolo Bonzini #ifndef CONFIG_USER_ONLY
18022c62cbSPaolo Bonzini 
19022c62cbSPaolo Bonzini #include "exec/cpu-common.h"
20022c62cbSPaolo Bonzini #include "exec/hwaddr.h"
21cc05c43aSPeter Maydell #include "exec/memattrs.h"
220987d735SPaolo Bonzini #include "exec/ramlist.h"
231de7afc9SPaolo Bonzini #include "qemu/queue.h"
241de7afc9SPaolo Bonzini #include "qemu/int128.h"
2506866575SDavid Gibson #include "qemu/notify.h"
26b4fefef9SPeter Crosthwaite #include "qom/object.h"
27374f2981SPaolo Bonzini #include "qemu/rcu.h"
28*1221a474SAlexey Kardashevskiy #include "hw/qdev-core.h"
29022c62cbSPaolo Bonzini 
3007bdaa41SPaolo Bonzini #define RAM_ADDR_INVALID (~(ram_addr_t)0)
3107bdaa41SPaolo Bonzini 
32052e87b0SPaolo Bonzini #define MAX_PHYS_ADDR_SPACE_BITS 62
33052e87b0SPaolo Bonzini #define MAX_PHYS_ADDR            (((hwaddr)1 << MAX_PHYS_ADDR_SPACE_BITS) - 1)
34052e87b0SPaolo Bonzini 
35b4fefef9SPeter Crosthwaite #define TYPE_MEMORY_REGION "qemu:memory-region"
36b4fefef9SPeter Crosthwaite #define MEMORY_REGION(obj) \
37b4fefef9SPeter Crosthwaite         OBJECT_CHECK(MemoryRegion, (obj), TYPE_MEMORY_REGION)
38b4fefef9SPeter Crosthwaite 
393df9d748SAlexey Kardashevskiy #define TYPE_IOMMU_MEMORY_REGION "qemu:iommu-memory-region"
403df9d748SAlexey Kardashevskiy #define IOMMU_MEMORY_REGION(obj) \
413df9d748SAlexey Kardashevskiy         OBJECT_CHECK(IOMMUMemoryRegion, (obj), TYPE_IOMMU_MEMORY_REGION)
42*1221a474SAlexey Kardashevskiy #define IOMMU_MEMORY_REGION_CLASS(klass) \
43*1221a474SAlexey Kardashevskiy         OBJECT_CLASS_CHECK(IOMMUMemoryRegionClass, (klass), \
44*1221a474SAlexey Kardashevskiy                          TYPE_IOMMU_MEMORY_REGION)
45*1221a474SAlexey Kardashevskiy #define IOMMU_MEMORY_REGION_GET_CLASS(obj) \
46*1221a474SAlexey Kardashevskiy         OBJECT_GET_CLASS(IOMMUMemoryRegionClass, (obj), \
47*1221a474SAlexey Kardashevskiy                          TYPE_IOMMU_MEMORY_REGION)
483df9d748SAlexey Kardashevskiy 
49022c62cbSPaolo Bonzini typedef struct MemoryRegionOps MemoryRegionOps;
50022c62cbSPaolo Bonzini typedef struct MemoryRegionMmio MemoryRegionMmio;
51022c62cbSPaolo Bonzini 
52022c62cbSPaolo Bonzini struct MemoryRegionMmio {
53022c62cbSPaolo Bonzini     CPUReadMemoryFunc *read[3];
54022c62cbSPaolo Bonzini     CPUWriteMemoryFunc *write[3];
55022c62cbSPaolo Bonzini };
56022c62cbSPaolo Bonzini 
5730951157SAvi Kivity typedef struct IOMMUTLBEntry IOMMUTLBEntry;
5830951157SAvi Kivity 
5930951157SAvi Kivity /* See address_space_translate: bit 0 is read, bit 1 is write.  */
6030951157SAvi Kivity typedef enum {
6130951157SAvi Kivity     IOMMU_NONE = 0,
6230951157SAvi Kivity     IOMMU_RO   = 1,
6330951157SAvi Kivity     IOMMU_WO   = 2,
6430951157SAvi Kivity     IOMMU_RW   = 3,
6530951157SAvi Kivity } IOMMUAccessFlags;
6630951157SAvi Kivity 
67f06a696dSPeter Xu #define IOMMU_ACCESS_FLAG(r, w) (((r) ? IOMMU_RO : 0) | ((w) ? IOMMU_WO : 0))
68f06a696dSPeter Xu 
6930951157SAvi Kivity struct IOMMUTLBEntry {
7030951157SAvi Kivity     AddressSpace    *target_as;
7130951157SAvi Kivity     hwaddr           iova;
7230951157SAvi Kivity     hwaddr           translated_addr;
7330951157SAvi Kivity     hwaddr           addr_mask;  /* 0xfff = 4k translation */
7430951157SAvi Kivity     IOMMUAccessFlags perm;
7530951157SAvi Kivity };
7630951157SAvi Kivity 
77cdb30812SPeter Xu /*
78cdb30812SPeter Xu  * Bitmap for different IOMMUNotifier capabilities. Each notifier can
79cdb30812SPeter Xu  * register with one or multiple IOMMU Notifier capability bit(s).
80cdb30812SPeter Xu  */
81cdb30812SPeter Xu typedef enum {
82cdb30812SPeter Xu     IOMMU_NOTIFIER_NONE = 0,
83cdb30812SPeter Xu     /* Notify cache invalidations */
84cdb30812SPeter Xu     IOMMU_NOTIFIER_UNMAP = 0x1,
85cdb30812SPeter Xu     /* Notify entry changes (newly created entries) */
86cdb30812SPeter Xu     IOMMU_NOTIFIER_MAP = 0x2,
87cdb30812SPeter Xu } IOMMUNotifierFlag;
88cdb30812SPeter Xu 
89cdb30812SPeter Xu #define IOMMU_NOTIFIER_ALL (IOMMU_NOTIFIER_MAP | IOMMU_NOTIFIER_UNMAP)
90cdb30812SPeter Xu 
91698feb5eSPeter Xu struct IOMMUNotifier;
92698feb5eSPeter Xu typedef void (*IOMMUNotify)(struct IOMMUNotifier *notifier,
93698feb5eSPeter Xu                             IOMMUTLBEntry *data);
94698feb5eSPeter Xu 
95cdb30812SPeter Xu struct IOMMUNotifier {
96698feb5eSPeter Xu     IOMMUNotify notify;
97cdb30812SPeter Xu     IOMMUNotifierFlag notifier_flags;
98698feb5eSPeter Xu     /* Notify for address space range start <= addr <= end */
99698feb5eSPeter Xu     hwaddr start;
100698feb5eSPeter Xu     hwaddr end;
101cdb30812SPeter Xu     QLIST_ENTRY(IOMMUNotifier) node;
102cdb30812SPeter Xu };
103cdb30812SPeter Xu typedef struct IOMMUNotifier IOMMUNotifier;
104cdb30812SPeter Xu 
105698feb5eSPeter Xu static inline void iommu_notifier_init(IOMMUNotifier *n, IOMMUNotify fn,
106698feb5eSPeter Xu                                        IOMMUNotifierFlag flags,
107698feb5eSPeter Xu                                        hwaddr start, hwaddr end)
108698feb5eSPeter Xu {
109698feb5eSPeter Xu     n->notify = fn;
110698feb5eSPeter Xu     n->notifier_flags = flags;
111698feb5eSPeter Xu     n->start = start;
112698feb5eSPeter Xu     n->end = end;
113698feb5eSPeter Xu }
114698feb5eSPeter Xu 
115cc05c43aSPeter Maydell /* New-style MMIO accessors can indicate that the transaction failed.
116cc05c43aSPeter Maydell  * A zero (MEMTX_OK) response means success; anything else is a failure
117cc05c43aSPeter Maydell  * of some kind. The memory subsystem will bitwise-OR together results
118cc05c43aSPeter Maydell  * if it is synthesizing an operation from multiple smaller accesses.
119cc05c43aSPeter Maydell  */
120cc05c43aSPeter Maydell #define MEMTX_OK 0
121cc05c43aSPeter Maydell #define MEMTX_ERROR             (1U << 0) /* device returned an error */
122cc05c43aSPeter Maydell #define MEMTX_DECODE_ERROR      (1U << 1) /* nothing at that address */
123cc05c43aSPeter Maydell typedef uint32_t MemTxResult;
124cc05c43aSPeter Maydell 
125022c62cbSPaolo Bonzini /*
126022c62cbSPaolo Bonzini  * Memory region callbacks
127022c62cbSPaolo Bonzini  */
128022c62cbSPaolo Bonzini struct MemoryRegionOps {
129022c62cbSPaolo Bonzini     /* Read from the memory region. @addr is relative to @mr; @size is
130022c62cbSPaolo Bonzini      * in bytes. */
131022c62cbSPaolo Bonzini     uint64_t (*read)(void *opaque,
132022c62cbSPaolo Bonzini                      hwaddr addr,
133022c62cbSPaolo Bonzini                      unsigned size);
134022c62cbSPaolo Bonzini     /* Write to the memory region. @addr is relative to @mr; @size is
135022c62cbSPaolo Bonzini      * in bytes. */
136022c62cbSPaolo Bonzini     void (*write)(void *opaque,
137022c62cbSPaolo Bonzini                   hwaddr addr,
138022c62cbSPaolo Bonzini                   uint64_t data,
139022c62cbSPaolo Bonzini                   unsigned size);
140022c62cbSPaolo Bonzini 
141cc05c43aSPeter Maydell     MemTxResult (*read_with_attrs)(void *opaque,
142cc05c43aSPeter Maydell                                    hwaddr addr,
143cc05c43aSPeter Maydell                                    uint64_t *data,
144cc05c43aSPeter Maydell                                    unsigned size,
145cc05c43aSPeter Maydell                                    MemTxAttrs attrs);
146cc05c43aSPeter Maydell     MemTxResult (*write_with_attrs)(void *opaque,
147cc05c43aSPeter Maydell                                     hwaddr addr,
148cc05c43aSPeter Maydell                                     uint64_t data,
149cc05c43aSPeter Maydell                                     unsigned size,
150cc05c43aSPeter Maydell                                     MemTxAttrs attrs);
151c9356746SKONRAD Frederic     /* Instruction execution pre-callback:
152c9356746SKONRAD Frederic      * @addr is the address of the access relative to the @mr.
153c9356746SKONRAD Frederic      * @size is the size of the area returned by the callback.
154c9356746SKONRAD Frederic      * @offset is the location of the pointer inside @mr.
155c9356746SKONRAD Frederic      *
156c9356746SKONRAD Frederic      * Returns a pointer to a location which contains guest code.
157c9356746SKONRAD Frederic      */
158c9356746SKONRAD Frederic     void *(*request_ptr)(void *opaque, hwaddr addr, unsigned *size,
159c9356746SKONRAD Frederic                          unsigned *offset);
160cc05c43aSPeter Maydell 
161022c62cbSPaolo Bonzini     enum device_endian endianness;
162022c62cbSPaolo Bonzini     /* Guest-visible constraints: */
163022c62cbSPaolo Bonzini     struct {
164022c62cbSPaolo Bonzini         /* If nonzero, specify bounds on access sizes beyond which a machine
165022c62cbSPaolo Bonzini          * check is thrown.
166022c62cbSPaolo Bonzini          */
167022c62cbSPaolo Bonzini         unsigned min_access_size;
168022c62cbSPaolo Bonzini         unsigned max_access_size;
169022c62cbSPaolo Bonzini         /* If true, unaligned accesses are supported.  Otherwise unaligned
170022c62cbSPaolo Bonzini          * accesses throw machine checks.
171022c62cbSPaolo Bonzini          */
172022c62cbSPaolo Bonzini          bool unaligned;
173022c62cbSPaolo Bonzini         /*
174022c62cbSPaolo Bonzini          * If present, and returns #false, the transaction is not accepted
175022c62cbSPaolo Bonzini          * by the device (and results in machine dependent behaviour such
176022c62cbSPaolo Bonzini          * as a machine check exception).
177022c62cbSPaolo Bonzini          */
178022c62cbSPaolo Bonzini         bool (*accepts)(void *opaque, hwaddr addr,
179022c62cbSPaolo Bonzini                         unsigned size, bool is_write);
180022c62cbSPaolo Bonzini     } valid;
181022c62cbSPaolo Bonzini     /* Internal implementation constraints: */
182022c62cbSPaolo Bonzini     struct {
183022c62cbSPaolo Bonzini         /* If nonzero, specifies the minimum size implemented.  Smaller sizes
184022c62cbSPaolo Bonzini          * will be rounded upwards and a partial result will be returned.
185022c62cbSPaolo Bonzini          */
186022c62cbSPaolo Bonzini         unsigned min_access_size;
187022c62cbSPaolo Bonzini         /* If nonzero, specifies the maximum size implemented.  Larger sizes
188022c62cbSPaolo Bonzini          * will be done as a series of accesses with smaller sizes.
189022c62cbSPaolo Bonzini          */
190022c62cbSPaolo Bonzini         unsigned max_access_size;
191022c62cbSPaolo Bonzini         /* If true, unaligned accesses are supported.  Otherwise all accesses
192022c62cbSPaolo Bonzini          * are converted to (possibly multiple) naturally aligned accesses.
193022c62cbSPaolo Bonzini          */
194022c62cbSPaolo Bonzini         bool unaligned;
195022c62cbSPaolo Bonzini     } impl;
196022c62cbSPaolo Bonzini 
197022c62cbSPaolo Bonzini     /* If .read and .write are not present, old_mmio may be used for
198022c62cbSPaolo Bonzini      * backwards compatibility with old mmio registration
199022c62cbSPaolo Bonzini      */
200022c62cbSPaolo Bonzini     const MemoryRegionMmio old_mmio;
201022c62cbSPaolo Bonzini };
202022c62cbSPaolo Bonzini 
203*1221a474SAlexey Kardashevskiy typedef struct IOMMUMemoryRegionClass {
204*1221a474SAlexey Kardashevskiy     /* private */
205*1221a474SAlexey Kardashevskiy     struct DeviceClass parent_class;
20630951157SAvi Kivity 
207bf55b7afSPeter Xu     /*
208bf55b7afSPeter Xu      * Return a TLB entry that contains a given address. Flag should
209bf55b7afSPeter Xu      * be the access permission of this translation operation. We can
210bf55b7afSPeter Xu      * set flag to IOMMU_NONE to mean that we don't need any
211bf55b7afSPeter Xu      * read/write permission checks, like, when for region replay.
212bf55b7afSPeter Xu      */
2133df9d748SAlexey Kardashevskiy     IOMMUTLBEntry (*translate)(IOMMUMemoryRegion *iommu, hwaddr addr,
214bf55b7afSPeter Xu                                IOMMUAccessFlags flag);
215f682e9c2SAlexey Kardashevskiy     /* Returns minimum supported page size */
2163df9d748SAlexey Kardashevskiy     uint64_t (*get_min_page_size)(IOMMUMemoryRegion *iommu);
2175bf3d319SPeter Xu     /* Called when IOMMU Notifier flag changed */
2183df9d748SAlexey Kardashevskiy     void (*notify_flag_changed)(IOMMUMemoryRegion *iommu,
2195bf3d319SPeter Xu                                 IOMMUNotifierFlag old_flags,
2205bf3d319SPeter Xu                                 IOMMUNotifierFlag new_flags);
221faa362e3SPeter Xu     /* Set this up to provide customized IOMMU replay function */
2223df9d748SAlexey Kardashevskiy     void (*replay)(IOMMUMemoryRegion *iommu, IOMMUNotifier *notifier);
223*1221a474SAlexey Kardashevskiy } IOMMUMemoryRegionClass;
22430951157SAvi Kivity 
225022c62cbSPaolo Bonzini typedef struct CoalescedMemoryRange CoalescedMemoryRange;
226022c62cbSPaolo Bonzini typedef struct MemoryRegionIoeventfd MemoryRegionIoeventfd;
227022c62cbSPaolo Bonzini 
228022c62cbSPaolo Bonzini struct MemoryRegion {
229b4fefef9SPeter Crosthwaite     Object parent_obj;
230a676854fSPaolo Bonzini 
231022c62cbSPaolo Bonzini     /* All fields are private - violators will be prosecuted */
232a676854fSPaolo Bonzini 
233a676854fSPaolo Bonzini     /* The following fields should fit in a cache line */
234a676854fSPaolo Bonzini     bool romd_mode;
235a676854fSPaolo Bonzini     bool ram;
236a676854fSPaolo Bonzini     bool subpage;
237a676854fSPaolo Bonzini     bool readonly; /* For RAM regions */
238a676854fSPaolo Bonzini     bool rom_device;
239a676854fSPaolo Bonzini     bool flush_coalesced_mmio;
240a676854fSPaolo Bonzini     bool global_locking;
241a676854fSPaolo Bonzini     uint8_t dirty_log_mask;
2423df9d748SAlexey Kardashevskiy     bool is_iommu;
24358eaa217SGonglei     RAMBlock *ram_block;
244612263cfSPaolo Bonzini     Object *owner;
245a676854fSPaolo Bonzini 
246a676854fSPaolo Bonzini     const MemoryRegionOps *ops;
247022c62cbSPaolo Bonzini     void *opaque;
248feca4ac1SPaolo Bonzini     MemoryRegion *container;
249022c62cbSPaolo Bonzini     Int128 size;
250022c62cbSPaolo Bonzini     hwaddr addr;
251022c62cbSPaolo Bonzini     void (*destructor)(MemoryRegion *mr);
252a2b257d6SIgor Mammedov     uint64_t align;
253022c62cbSPaolo Bonzini     bool terminates;
25421e00fa5SAlex Williamson     bool ram_device;
255022c62cbSPaolo Bonzini     bool enabled;
256022c62cbSPaolo Bonzini     bool warning_printed; /* For reservations */
257deb809edSPaolo Bonzini     uint8_t vga_logging_count;
258022c62cbSPaolo Bonzini     MemoryRegion *alias;
259022c62cbSPaolo Bonzini     hwaddr alias_offset;
260d33382daSPeter Crosthwaite     int32_t priority;
261022c62cbSPaolo Bonzini     QTAILQ_HEAD(subregions, MemoryRegion) subregions;
262022c62cbSPaolo Bonzini     QTAILQ_ENTRY(MemoryRegion) subregions_link;
263022c62cbSPaolo Bonzini     QTAILQ_HEAD(coalesced_ranges, CoalescedMemoryRange) coalesced;
264302fa283SPeter Maydell     const char *name;
265022c62cbSPaolo Bonzini     unsigned ioeventfd_nb;
266022c62cbSPaolo Bonzini     MemoryRegionIoeventfd *ioeventfds;
2673df9d748SAlexey Kardashevskiy };
2683df9d748SAlexey Kardashevskiy 
2693df9d748SAlexey Kardashevskiy struct IOMMUMemoryRegion {
2703df9d748SAlexey Kardashevskiy     MemoryRegion parent_obj;
2713df9d748SAlexey Kardashevskiy 
272cdb30812SPeter Xu     QLIST_HEAD(, IOMMUNotifier) iommu_notify;
2735bf3d319SPeter Xu     IOMMUNotifierFlag iommu_notify_flags;
274022c62cbSPaolo Bonzini };
275022c62cbSPaolo Bonzini 
276512fa408SPeter Xu #define IOMMU_NOTIFIER_FOREACH(n, mr) \
277512fa408SPeter Xu     QLIST_FOREACH((n), &(mr)->iommu_notify, node)
278512fa408SPeter Xu 
279c2fc83e8SPaolo Bonzini /**
280c2fc83e8SPaolo Bonzini  * MemoryListener: callbacks structure for updates to the physical memory map
281c2fc83e8SPaolo Bonzini  *
282c2fc83e8SPaolo Bonzini  * Allows a component to adjust to changes in the guest-visible memory map.
283c2fc83e8SPaolo Bonzini  * Use with memory_listener_register() and memory_listener_unregister().
284c2fc83e8SPaolo Bonzini  */
285c2fc83e8SPaolo Bonzini struct MemoryListener {
286c2fc83e8SPaolo Bonzini     void (*begin)(MemoryListener *listener);
287c2fc83e8SPaolo Bonzini     void (*commit)(MemoryListener *listener);
288c2fc83e8SPaolo Bonzini     void (*region_add)(MemoryListener *listener, MemoryRegionSection *section);
289c2fc83e8SPaolo Bonzini     void (*region_del)(MemoryListener *listener, MemoryRegionSection *section);
290c2fc83e8SPaolo Bonzini     void (*region_nop)(MemoryListener *listener, MemoryRegionSection *section);
291b2dfd71cSPaolo Bonzini     void (*log_start)(MemoryListener *listener, MemoryRegionSection *section,
292b2dfd71cSPaolo Bonzini                       int old, int new);
293b2dfd71cSPaolo Bonzini     void (*log_stop)(MemoryListener *listener, MemoryRegionSection *section,
294b2dfd71cSPaolo Bonzini                      int old, int new);
295c2fc83e8SPaolo Bonzini     void (*log_sync)(MemoryListener *listener, MemoryRegionSection *section);
296c2fc83e8SPaolo Bonzini     void (*log_global_start)(MemoryListener *listener);
297c2fc83e8SPaolo Bonzini     void (*log_global_stop)(MemoryListener *listener);
298c2fc83e8SPaolo Bonzini     void (*eventfd_add)(MemoryListener *listener, MemoryRegionSection *section,
299c2fc83e8SPaolo Bonzini                         bool match_data, uint64_t data, EventNotifier *e);
300c2fc83e8SPaolo Bonzini     void (*eventfd_del)(MemoryListener *listener, MemoryRegionSection *section,
301c2fc83e8SPaolo Bonzini                         bool match_data, uint64_t data, EventNotifier *e);
302c2fc83e8SPaolo Bonzini     void (*coalesced_mmio_add)(MemoryListener *listener, MemoryRegionSection *section,
303c2fc83e8SPaolo Bonzini                                hwaddr addr, hwaddr len);
304c2fc83e8SPaolo Bonzini     void (*coalesced_mmio_del)(MemoryListener *listener, MemoryRegionSection *section,
305c2fc83e8SPaolo Bonzini                                hwaddr addr, hwaddr len);
306c2fc83e8SPaolo Bonzini     /* Lower = earlier (during add), later (during del) */
307c2fc83e8SPaolo Bonzini     unsigned priority;
308d45fa784SPaolo Bonzini     AddressSpace *address_space;
309c2fc83e8SPaolo Bonzini     QTAILQ_ENTRY(MemoryListener) link;
3109a54635dSPaolo Bonzini     QTAILQ_ENTRY(MemoryListener) link_as;
311c2fc83e8SPaolo Bonzini };
312c2fc83e8SPaolo Bonzini 
313022c62cbSPaolo Bonzini /**
314022c62cbSPaolo Bonzini  * AddressSpace: describes a mapping of addresses to #MemoryRegion objects
315022c62cbSPaolo Bonzini  */
316022c62cbSPaolo Bonzini struct AddressSpace {
317022c62cbSPaolo Bonzini     /* All fields are private. */
318374f2981SPaolo Bonzini     struct rcu_head rcu;
3197dca8043SAlexey Kardashevskiy     char *name;
320022c62cbSPaolo Bonzini     MemoryRegion *root;
321f0c02d15SPeter Crosthwaite     int ref_count;
322f0c02d15SPeter Crosthwaite     bool malloced;
323374f2981SPaolo Bonzini 
324374f2981SPaolo Bonzini     /* Accessed via RCU.  */
325022c62cbSPaolo Bonzini     struct FlatView *current_map;
326374f2981SPaolo Bonzini 
327022c62cbSPaolo Bonzini     int ioeventfd_nb;
328022c62cbSPaolo Bonzini     struct MemoryRegionIoeventfd *ioeventfds;
329022c62cbSPaolo Bonzini     struct AddressSpaceDispatch *dispatch;
33000752703SPaolo Bonzini     struct AddressSpaceDispatch *next_dispatch;
33189ae337aSPaolo Bonzini     MemoryListener dispatch_listener;
3329a54635dSPaolo Bonzini     QTAILQ_HEAD(memory_listeners_as, MemoryListener) listeners;
333022c62cbSPaolo Bonzini     QTAILQ_ENTRY(AddressSpace) address_spaces_link;
334022c62cbSPaolo Bonzini };
335022c62cbSPaolo Bonzini 
336022c62cbSPaolo Bonzini /**
337022c62cbSPaolo Bonzini  * MemoryRegionSection: describes a fragment of a #MemoryRegion
338022c62cbSPaolo Bonzini  *
339022c62cbSPaolo Bonzini  * @mr: the region, or %NULL if empty
340022c62cbSPaolo Bonzini  * @address_space: the address space the region is mapped in
341022c62cbSPaolo Bonzini  * @offset_within_region: the beginning of the section, relative to @mr's start
342022c62cbSPaolo Bonzini  * @size: the size of the section; will not exceed @mr's boundaries
343022c62cbSPaolo Bonzini  * @offset_within_address_space: the address of the first byte of the section
344022c62cbSPaolo Bonzini  *     relative to the region's address space
345022c62cbSPaolo Bonzini  * @readonly: writes to this section are ignored
346022c62cbSPaolo Bonzini  */
347022c62cbSPaolo Bonzini struct MemoryRegionSection {
348022c62cbSPaolo Bonzini     MemoryRegion *mr;
349022c62cbSPaolo Bonzini     AddressSpace *address_space;
350022c62cbSPaolo Bonzini     hwaddr offset_within_region;
351052e87b0SPaolo Bonzini     Int128 size;
352022c62cbSPaolo Bonzini     hwaddr offset_within_address_space;
353022c62cbSPaolo Bonzini     bool readonly;
354022c62cbSPaolo Bonzini };
355022c62cbSPaolo Bonzini 
356022c62cbSPaolo Bonzini /**
357022c62cbSPaolo Bonzini  * memory_region_init: Initialize a memory region
358022c62cbSPaolo Bonzini  *
359022c62cbSPaolo Bonzini  * The region typically acts as a container for other memory regions.  Use
360022c62cbSPaolo Bonzini  * memory_region_add_subregion() to add subregions.
361022c62cbSPaolo Bonzini  *
362022c62cbSPaolo Bonzini  * @mr: the #MemoryRegion to be initialized
3632c9b15caSPaolo Bonzini  * @owner: the object that tracks the region's reference count
364022c62cbSPaolo Bonzini  * @name: used for debugging; not visible to the user or ABI
365022c62cbSPaolo Bonzini  * @size: size of the region; any subregions beyond this size will be clipped
366022c62cbSPaolo Bonzini  */
367022c62cbSPaolo Bonzini void memory_region_init(MemoryRegion *mr,
3682c9b15caSPaolo Bonzini                         struct Object *owner,
369022c62cbSPaolo Bonzini                         const char *name,
370022c62cbSPaolo Bonzini                         uint64_t size);
37146637be2SPaolo Bonzini 
37246637be2SPaolo Bonzini /**
37346637be2SPaolo Bonzini  * memory_region_ref: Add 1 to a memory region's reference count
37446637be2SPaolo Bonzini  *
37546637be2SPaolo Bonzini  * Whenever memory regions are accessed outside the BQL, they need to be
37646637be2SPaolo Bonzini  * preserved against hot-unplug.  MemoryRegions actually do not have their
37746637be2SPaolo Bonzini  * own reference count; they piggyback on a QOM object, their "owner".
37846637be2SPaolo Bonzini  * This function adds a reference to the owner.
37946637be2SPaolo Bonzini  *
38046637be2SPaolo Bonzini  * All MemoryRegions must have an owner if they can disappear, even if the
38146637be2SPaolo Bonzini  * device they belong to operates exclusively under the BQL.  This is because
38246637be2SPaolo Bonzini  * the region could be returned at any time by memory_region_find, and this
38346637be2SPaolo Bonzini  * is usually under guest control.
38446637be2SPaolo Bonzini  *
38546637be2SPaolo Bonzini  * @mr: the #MemoryRegion
38646637be2SPaolo Bonzini  */
38746637be2SPaolo Bonzini void memory_region_ref(MemoryRegion *mr);
38846637be2SPaolo Bonzini 
38946637be2SPaolo Bonzini /**
39046637be2SPaolo Bonzini  * memory_region_unref: Remove 1 to a memory region's reference count
39146637be2SPaolo Bonzini  *
39246637be2SPaolo Bonzini  * Whenever memory regions are accessed outside the BQL, they need to be
39346637be2SPaolo Bonzini  * preserved against hot-unplug.  MemoryRegions actually do not have their
39446637be2SPaolo Bonzini  * own reference count; they piggyback on a QOM object, their "owner".
39546637be2SPaolo Bonzini  * This function removes a reference to the owner and possibly destroys it.
39646637be2SPaolo Bonzini  *
39746637be2SPaolo Bonzini  * @mr: the #MemoryRegion
39846637be2SPaolo Bonzini  */
39946637be2SPaolo Bonzini void memory_region_unref(MemoryRegion *mr);
40046637be2SPaolo Bonzini 
401022c62cbSPaolo Bonzini /**
402022c62cbSPaolo Bonzini  * memory_region_init_io: Initialize an I/O memory region.
403022c62cbSPaolo Bonzini  *
404022c62cbSPaolo Bonzini  * Accesses into the region will cause the callbacks in @ops to be called.
405022c62cbSPaolo Bonzini  * if @size is nonzero, subregions will be clipped to @size.
406022c62cbSPaolo Bonzini  *
407022c62cbSPaolo Bonzini  * @mr: the #MemoryRegion to be initialized.
4082c9b15caSPaolo Bonzini  * @owner: the object that tracks the region's reference count
409022c62cbSPaolo Bonzini  * @ops: a structure containing read and write callbacks to be used when
410022c62cbSPaolo Bonzini  *       I/O is performed on the region.
411b6af0975SDaniel P. Berrange  * @opaque: passed to the read and write callbacks of the @ops structure.
412022c62cbSPaolo Bonzini  * @name: used for debugging; not visible to the user or ABI
413022c62cbSPaolo Bonzini  * @size: size of the region.
414022c62cbSPaolo Bonzini  */
415022c62cbSPaolo Bonzini void memory_region_init_io(MemoryRegion *mr,
4162c9b15caSPaolo Bonzini                            struct Object *owner,
417022c62cbSPaolo Bonzini                            const MemoryRegionOps *ops,
418022c62cbSPaolo Bonzini                            void *opaque,
419022c62cbSPaolo Bonzini                            const char *name,
420022c62cbSPaolo Bonzini                            uint64_t size);
421022c62cbSPaolo Bonzini 
422022c62cbSPaolo Bonzini /**
423022c62cbSPaolo Bonzini  * memory_region_init_ram:  Initialize RAM memory region.  Accesses into the
424022c62cbSPaolo Bonzini  *                          region will modify memory directly.
425022c62cbSPaolo Bonzini  *
426022c62cbSPaolo Bonzini  * @mr: the #MemoryRegion to be initialized.
4272c9b15caSPaolo Bonzini  * @owner: the object that tracks the region's reference count
428e8f5fe2dSDr. David Alan Gilbert  * @name: Region name, becomes part of RAMBlock name used in migration stream
429e8f5fe2dSDr. David Alan Gilbert  *        must be unique within any device
430022c62cbSPaolo Bonzini  * @size: size of the region.
43149946538SHu Tao  * @errp: pointer to Error*, to store an error if it happens.
432022c62cbSPaolo Bonzini  */
433022c62cbSPaolo Bonzini void memory_region_init_ram(MemoryRegion *mr,
4342c9b15caSPaolo Bonzini                             struct Object *owner,
435022c62cbSPaolo Bonzini                             const char *name,
43649946538SHu Tao                             uint64_t size,
43749946538SHu Tao                             Error **errp);
438022c62cbSPaolo Bonzini 
43960786ef3SMichael S. Tsirkin /**
44060786ef3SMichael S. Tsirkin  * memory_region_init_resizeable_ram:  Initialize memory region with resizeable
44160786ef3SMichael S. Tsirkin  *                                     RAM.  Accesses into the region will
44260786ef3SMichael S. Tsirkin  *                                     modify memory directly.  Only an initial
44360786ef3SMichael S. Tsirkin  *                                     portion of this RAM is actually used.
44460786ef3SMichael S. Tsirkin  *                                     The used size can change across reboots.
44560786ef3SMichael S. Tsirkin  *
44660786ef3SMichael S. Tsirkin  * @mr: the #MemoryRegion to be initialized.
44760786ef3SMichael S. Tsirkin  * @owner: the object that tracks the region's reference count
448e8f5fe2dSDr. David Alan Gilbert  * @name: Region name, becomes part of RAMBlock name used in migration stream
449e8f5fe2dSDr. David Alan Gilbert  *        must be unique within any device
45060786ef3SMichael S. Tsirkin  * @size: used size of the region.
45160786ef3SMichael S. Tsirkin  * @max_size: max size of the region.
45260786ef3SMichael S. Tsirkin  * @resized: callback to notify owner about used size change.
45360786ef3SMichael S. Tsirkin  * @errp: pointer to Error*, to store an error if it happens.
45460786ef3SMichael S. Tsirkin  */
45560786ef3SMichael S. Tsirkin void memory_region_init_resizeable_ram(MemoryRegion *mr,
45660786ef3SMichael S. Tsirkin                                        struct Object *owner,
45760786ef3SMichael S. Tsirkin                                        const char *name,
45860786ef3SMichael S. Tsirkin                                        uint64_t size,
45960786ef3SMichael S. Tsirkin                                        uint64_t max_size,
46060786ef3SMichael S. Tsirkin                                        void (*resized)(const char*,
46160786ef3SMichael S. Tsirkin                                                        uint64_t length,
46260786ef3SMichael S. Tsirkin                                                        void *host),
46360786ef3SMichael S. Tsirkin                                        Error **errp);
4640b183fc8SPaolo Bonzini #ifdef __linux__
4650b183fc8SPaolo Bonzini /**
4660b183fc8SPaolo Bonzini  * memory_region_init_ram_from_file:  Initialize RAM memory region with a
4670b183fc8SPaolo Bonzini  *                                    mmap-ed backend.
4680b183fc8SPaolo Bonzini  *
4690b183fc8SPaolo Bonzini  * @mr: the #MemoryRegion to be initialized.
4700b183fc8SPaolo Bonzini  * @owner: the object that tracks the region's reference count
471e8f5fe2dSDr. David Alan Gilbert  * @name: Region name, becomes part of RAMBlock name used in migration stream
472e8f5fe2dSDr. David Alan Gilbert  *        must be unique within any device
4730b183fc8SPaolo Bonzini  * @size: size of the region.
474dbcb8981SPaolo Bonzini  * @share: %true if memory must be mmaped with the MAP_SHARED flag
4750b183fc8SPaolo Bonzini  * @path: the path in which to allocate the RAM.
4767f56e740SPaolo Bonzini  * @errp: pointer to Error*, to store an error if it happens.
4770b183fc8SPaolo Bonzini  */
4780b183fc8SPaolo Bonzini void memory_region_init_ram_from_file(MemoryRegion *mr,
4790b183fc8SPaolo Bonzini                                       struct Object *owner,
4800b183fc8SPaolo Bonzini                                       const char *name,
4810b183fc8SPaolo Bonzini                                       uint64_t size,
482dbcb8981SPaolo Bonzini                                       bool share,
4837f56e740SPaolo Bonzini                                       const char *path,
4847f56e740SPaolo Bonzini                                       Error **errp);
485fea617c5SMarc-André Lureau 
486fea617c5SMarc-André Lureau /**
487fea617c5SMarc-André Lureau  * memory_region_init_ram_from_fd:  Initialize RAM memory region with a
488fea617c5SMarc-André Lureau  *                                  mmap-ed backend.
489fea617c5SMarc-André Lureau  *
490fea617c5SMarc-André Lureau  * @mr: the #MemoryRegion to be initialized.
491fea617c5SMarc-André Lureau  * @owner: the object that tracks the region's reference count
492fea617c5SMarc-André Lureau  * @name: the name of the region.
493fea617c5SMarc-André Lureau  * @size: size of the region.
494fea617c5SMarc-André Lureau  * @share: %true if memory must be mmaped with the MAP_SHARED flag
495fea617c5SMarc-André Lureau  * @fd: the fd to mmap.
496fea617c5SMarc-André Lureau  * @errp: pointer to Error*, to store an error if it happens.
497fea617c5SMarc-André Lureau  */
498fea617c5SMarc-André Lureau void memory_region_init_ram_from_fd(MemoryRegion *mr,
499fea617c5SMarc-André Lureau                                     struct Object *owner,
500fea617c5SMarc-André Lureau                                     const char *name,
501fea617c5SMarc-André Lureau                                     uint64_t size,
502fea617c5SMarc-André Lureau                                     bool share,
503fea617c5SMarc-André Lureau                                     int fd,
504fea617c5SMarc-André Lureau                                     Error **errp);
5050b183fc8SPaolo Bonzini #endif
5060b183fc8SPaolo Bonzini 
507022c62cbSPaolo Bonzini /**
508022c62cbSPaolo Bonzini  * memory_region_init_ram_ptr:  Initialize RAM memory region from a
509022c62cbSPaolo Bonzini  *                              user-provided pointer.  Accesses into the
510022c62cbSPaolo Bonzini  *                              region will modify memory directly.
511022c62cbSPaolo Bonzini  *
512022c62cbSPaolo Bonzini  * @mr: the #MemoryRegion to be initialized.
5132c9b15caSPaolo Bonzini  * @owner: the object that tracks the region's reference count
514e8f5fe2dSDr. David Alan Gilbert  * @name: Region name, becomes part of RAMBlock name used in migration stream
515e8f5fe2dSDr. David Alan Gilbert  *        must be unique within any device
516022c62cbSPaolo Bonzini  * @size: size of the region.
517022c62cbSPaolo Bonzini  * @ptr: memory to be mapped; must contain at least @size bytes.
518022c62cbSPaolo Bonzini  */
519022c62cbSPaolo Bonzini void memory_region_init_ram_ptr(MemoryRegion *mr,
5202c9b15caSPaolo Bonzini                                 struct Object *owner,
521022c62cbSPaolo Bonzini                                 const char *name,
522022c62cbSPaolo Bonzini                                 uint64_t size,
523022c62cbSPaolo Bonzini                                 void *ptr);
524022c62cbSPaolo Bonzini 
525022c62cbSPaolo Bonzini /**
52621e00fa5SAlex Williamson  * memory_region_init_ram_device_ptr:  Initialize RAM device memory region from
52721e00fa5SAlex Williamson  *                                     a user-provided pointer.
52821e00fa5SAlex Williamson  *
52921e00fa5SAlex Williamson  * A RAM device represents a mapping to a physical device, such as to a PCI
53021e00fa5SAlex Williamson  * MMIO BAR of an vfio-pci assigned device.  The memory region may be mapped
53121e00fa5SAlex Williamson  * into the VM address space and access to the region will modify memory
53221e00fa5SAlex Williamson  * directly.  However, the memory region should not be included in a memory
53321e00fa5SAlex Williamson  * dump (device may not be enabled/mapped at the time of the dump), and
53421e00fa5SAlex Williamson  * operations incompatible with manipulating MMIO should be avoided.  Replaces
53521e00fa5SAlex Williamson  * skip_dump flag.
53621e00fa5SAlex Williamson  *
53721e00fa5SAlex Williamson  * @mr: the #MemoryRegion to be initialized.
53821e00fa5SAlex Williamson  * @owner: the object that tracks the region's reference count
53921e00fa5SAlex Williamson  * @name: the name of the region.
54021e00fa5SAlex Williamson  * @size: size of the region.
54121e00fa5SAlex Williamson  * @ptr: memory to be mapped; must contain at least @size bytes.
54221e00fa5SAlex Williamson  */
54321e00fa5SAlex Williamson void memory_region_init_ram_device_ptr(MemoryRegion *mr,
54421e00fa5SAlex Williamson                                        struct Object *owner,
54521e00fa5SAlex Williamson                                        const char *name,
54621e00fa5SAlex Williamson                                        uint64_t size,
54721e00fa5SAlex Williamson                                        void *ptr);
54821e00fa5SAlex Williamson 
54921e00fa5SAlex Williamson /**
550022c62cbSPaolo Bonzini  * memory_region_init_alias: Initialize a memory region that aliases all or a
551022c62cbSPaolo Bonzini  *                           part of another memory region.
552022c62cbSPaolo Bonzini  *
553022c62cbSPaolo Bonzini  * @mr: the #MemoryRegion to be initialized.
5542c9b15caSPaolo Bonzini  * @owner: the object that tracks the region's reference count
555022c62cbSPaolo Bonzini  * @name: used for debugging; not visible to the user or ABI
556022c62cbSPaolo Bonzini  * @orig: the region to be referenced; @mr will be equivalent to
557022c62cbSPaolo Bonzini  *        @orig between @offset and @offset + @size - 1.
558022c62cbSPaolo Bonzini  * @offset: start of the section in @orig to be referenced.
559022c62cbSPaolo Bonzini  * @size: size of the region.
560022c62cbSPaolo Bonzini  */
561022c62cbSPaolo Bonzini void memory_region_init_alias(MemoryRegion *mr,
5622c9b15caSPaolo Bonzini                               struct Object *owner,
563022c62cbSPaolo Bonzini                               const char *name,
564022c62cbSPaolo Bonzini                               MemoryRegion *orig,
565022c62cbSPaolo Bonzini                               hwaddr offset,
566022c62cbSPaolo Bonzini                               uint64_t size);
567022c62cbSPaolo Bonzini 
568022c62cbSPaolo Bonzini /**
569a1777f7fSPeter Maydell  * memory_region_init_rom: Initialize a ROM memory region.
570a1777f7fSPeter Maydell  *
571a1777f7fSPeter Maydell  * This has the same effect as calling memory_region_init_ram()
572a1777f7fSPeter Maydell  * and then marking the resulting region read-only with
573a1777f7fSPeter Maydell  * memory_region_set_readonly().
574a1777f7fSPeter Maydell  *
575a1777f7fSPeter Maydell  * @mr: the #MemoryRegion to be initialized.
576a1777f7fSPeter Maydell  * @owner: the object that tracks the region's reference count
577e8f5fe2dSDr. David Alan Gilbert  * @name: Region name, becomes part of RAMBlock name used in migration stream
578e8f5fe2dSDr. David Alan Gilbert  *        must be unique within any device
579a1777f7fSPeter Maydell  * @size: size of the region.
580a1777f7fSPeter Maydell  * @errp: pointer to Error*, to store an error if it happens.
581a1777f7fSPeter Maydell  */
582a1777f7fSPeter Maydell void memory_region_init_rom(MemoryRegion *mr,
583a1777f7fSPeter Maydell                             struct Object *owner,
584a1777f7fSPeter Maydell                             const char *name,
585a1777f7fSPeter Maydell                             uint64_t size,
586a1777f7fSPeter Maydell                             Error **errp);
587a1777f7fSPeter Maydell 
588a1777f7fSPeter Maydell /**
589022c62cbSPaolo Bonzini  * memory_region_init_rom_device:  Initialize a ROM memory region.  Writes are
590022c62cbSPaolo Bonzini  *                                 handled via callbacks.
591022c62cbSPaolo Bonzini  *
592022c62cbSPaolo Bonzini  * @mr: the #MemoryRegion to be initialized.
5932c9b15caSPaolo Bonzini  * @owner: the object that tracks the region's reference count
59439e0b03dSPeter Maydell  * @ops: callbacks for write access handling (must not be NULL).
595e8f5fe2dSDr. David Alan Gilbert  * @name: Region name, becomes part of RAMBlock name used in migration stream
596e8f5fe2dSDr. David Alan Gilbert  *        must be unique within any device
597022c62cbSPaolo Bonzini  * @size: size of the region.
59833e0eb52SHu Tao  * @errp: pointer to Error*, to store an error if it happens.
599022c62cbSPaolo Bonzini  */
600022c62cbSPaolo Bonzini void memory_region_init_rom_device(MemoryRegion *mr,
6012c9b15caSPaolo Bonzini                                    struct Object *owner,
602022c62cbSPaolo Bonzini                                    const MemoryRegionOps *ops,
603022c62cbSPaolo Bonzini                                    void *opaque,
604022c62cbSPaolo Bonzini                                    const char *name,
60533e0eb52SHu Tao                                    uint64_t size,
60633e0eb52SHu Tao                                    Error **errp);
607022c62cbSPaolo Bonzini 
608022c62cbSPaolo Bonzini /**
609022c62cbSPaolo Bonzini  * memory_region_init_reservation: Initialize a memory region that reserves
610022c62cbSPaolo Bonzini  *                                 I/O space.
611022c62cbSPaolo Bonzini  *
612022c62cbSPaolo Bonzini  * A reservation region primariy serves debugging purposes.  It claims I/O
613022c62cbSPaolo Bonzini  * space that is not supposed to be handled by QEMU itself.  Any access via
614022c62cbSPaolo Bonzini  * the memory API will cause an abort().
6156d6d2abfSPavel Fedin  * This function is deprecated. Use memory_region_init_io() with NULL
6166d6d2abfSPavel Fedin  * callbacks instead.
617022c62cbSPaolo Bonzini  *
618022c62cbSPaolo Bonzini  * @mr: the #MemoryRegion to be initialized
6192c9b15caSPaolo Bonzini  * @owner: the object that tracks the region's reference count
620022c62cbSPaolo Bonzini  * @name: used for debugging; not visible to the user or ABI
621022c62cbSPaolo Bonzini  * @size: size of the region.
622022c62cbSPaolo Bonzini  */
6236d6d2abfSPavel Fedin static inline void memory_region_init_reservation(MemoryRegion *mr,
6246d6d2abfSPavel Fedin                                     Object *owner,
625022c62cbSPaolo Bonzini                                     const char *name,
6266d6d2abfSPavel Fedin                                     uint64_t size)
6276d6d2abfSPavel Fedin {
6286d6d2abfSPavel Fedin     memory_region_init_io(mr, owner, NULL, mr, name, size);
6296d6d2abfSPavel Fedin }
63030951157SAvi Kivity 
63130951157SAvi Kivity /**
632*1221a474SAlexey Kardashevskiy  * memory_region_init_iommu: Initialize a memory region of a custom type
633*1221a474SAlexey Kardashevskiy  * that translates addresses
63430951157SAvi Kivity  *
63530951157SAvi Kivity  * An IOMMU region translates addresses and forwards accesses to a target
63630951157SAvi Kivity  * memory region.
63730951157SAvi Kivity  *
638*1221a474SAlexey Kardashevskiy  * @typename: QOM class name
639*1221a474SAlexey Kardashevskiy  * @_iommu_mr: the #IOMMUMemoryRegion to be initialized
640*1221a474SAlexey Kardashevskiy  * @instance_size: the IOMMUMemoryRegion subclass instance size
6412c9b15caSPaolo Bonzini  * @owner: the object that tracks the region's reference count
64230951157SAvi Kivity  * @ops: a function that translates addresses into the @target region
64330951157SAvi Kivity  * @name: used for debugging; not visible to the user or ABI
64430951157SAvi Kivity  * @size: size of the region.
64530951157SAvi Kivity  */
646*1221a474SAlexey Kardashevskiy void memory_region_init_iommu(void *_iommu_mr,
647*1221a474SAlexey Kardashevskiy                               size_t instance_size,
648*1221a474SAlexey Kardashevskiy                               const char *mrtypename,
649*1221a474SAlexey Kardashevskiy                               Object *owner,
65030951157SAvi Kivity                               const char *name,
65130951157SAvi Kivity                               uint64_t size);
65230951157SAvi Kivity 
653022c62cbSPaolo Bonzini /**
654803c0816SPaolo Bonzini  * memory_region_owner: get a memory region's owner.
655803c0816SPaolo Bonzini  *
656803c0816SPaolo Bonzini  * @mr: the memory region being queried.
657803c0816SPaolo Bonzini  */
658803c0816SPaolo Bonzini struct Object *memory_region_owner(MemoryRegion *mr);
659803c0816SPaolo Bonzini 
660803c0816SPaolo Bonzini /**
661022c62cbSPaolo Bonzini  * memory_region_size: get a memory region's size.
662022c62cbSPaolo Bonzini  *
663022c62cbSPaolo Bonzini  * @mr: the memory region being queried.
664022c62cbSPaolo Bonzini  */
665022c62cbSPaolo Bonzini uint64_t memory_region_size(MemoryRegion *mr);
666022c62cbSPaolo Bonzini 
667022c62cbSPaolo Bonzini /**
668022c62cbSPaolo Bonzini  * memory_region_is_ram: check whether a memory region is random access
669022c62cbSPaolo Bonzini  *
670022c62cbSPaolo Bonzini  * Returns %true is a memory region is random access.
671022c62cbSPaolo Bonzini  *
672022c62cbSPaolo Bonzini  * @mr: the memory region being queried
673022c62cbSPaolo Bonzini  */
6741619d1feSPaolo Bonzini static inline bool memory_region_is_ram(MemoryRegion *mr)
6751619d1feSPaolo Bonzini {
6761619d1feSPaolo Bonzini     return mr->ram;
6771619d1feSPaolo Bonzini }
678022c62cbSPaolo Bonzini 
679022c62cbSPaolo Bonzini /**
68021e00fa5SAlex Williamson  * memory_region_is_ram_device: check whether a memory region is a ram device
681e4dc3f59SNikunj A Dadhania  *
68221e00fa5SAlex Williamson  * Returns %true is a memory region is a device backed ram region
683e4dc3f59SNikunj A Dadhania  *
684e4dc3f59SNikunj A Dadhania  * @mr: the memory region being queried
685e4dc3f59SNikunj A Dadhania  */
68621e00fa5SAlex Williamson bool memory_region_is_ram_device(MemoryRegion *mr);
687e4dc3f59SNikunj A Dadhania 
688e4dc3f59SNikunj A Dadhania /**
6895f9a5ea1SJan Kiszka  * memory_region_is_romd: check whether a memory region is in ROMD mode
690022c62cbSPaolo Bonzini  *
6915f9a5ea1SJan Kiszka  * Returns %true if a memory region is a ROM device and currently set to allow
692022c62cbSPaolo Bonzini  * direct reads.
693022c62cbSPaolo Bonzini  *
694022c62cbSPaolo Bonzini  * @mr: the memory region being queried
695022c62cbSPaolo Bonzini  */
696022c62cbSPaolo Bonzini static inline bool memory_region_is_romd(MemoryRegion *mr)
697022c62cbSPaolo Bonzini {
6985f9a5ea1SJan Kiszka     return mr->rom_device && mr->romd_mode;
699022c62cbSPaolo Bonzini }
700022c62cbSPaolo Bonzini 
701022c62cbSPaolo Bonzini /**
7023df9d748SAlexey Kardashevskiy  * memory_region_get_iommu: check whether a memory region is an iommu
70330951157SAvi Kivity  *
7043df9d748SAlexey Kardashevskiy  * Returns pointer to IOMMUMemoryRegion if a memory region is an iommu,
7053df9d748SAlexey Kardashevskiy  * otherwise NULL.
70630951157SAvi Kivity  *
70730951157SAvi Kivity  * @mr: the memory region being queried
70830951157SAvi Kivity  */
7093df9d748SAlexey Kardashevskiy static inline IOMMUMemoryRegion *memory_region_get_iommu(MemoryRegion *mr)
7101619d1feSPaolo Bonzini {
71112d37882SJason Wang     if (mr->alias) {
7123df9d748SAlexey Kardashevskiy         return memory_region_get_iommu(mr->alias);
71312d37882SJason Wang     }
7143df9d748SAlexey Kardashevskiy     if (mr->is_iommu) {
7153df9d748SAlexey Kardashevskiy         return (IOMMUMemoryRegion *) mr;
7163df9d748SAlexey Kardashevskiy     }
7173df9d748SAlexey Kardashevskiy     return NULL;
7181619d1feSPaolo Bonzini }
7191619d1feSPaolo Bonzini 
720*1221a474SAlexey Kardashevskiy /**
721*1221a474SAlexey Kardashevskiy  * memory_region_get_iommu_class_nocheck: returns iommu memory region class
722*1221a474SAlexey Kardashevskiy  *   if an iommu or NULL if not
723*1221a474SAlexey Kardashevskiy  *
724*1221a474SAlexey Kardashevskiy  * Returns pointer to IOMMUMemoryRegioniClass if a memory region is an iommu,
725*1221a474SAlexey Kardashevskiy  * otherwise NULL. This is fast path avoinding QOM checking, use with caution.
726*1221a474SAlexey Kardashevskiy  *
727*1221a474SAlexey Kardashevskiy  * @mr: the memory region being queried
728*1221a474SAlexey Kardashevskiy  */
729*1221a474SAlexey Kardashevskiy static inline IOMMUMemoryRegionClass *memory_region_get_iommu_class_nocheck(
730*1221a474SAlexey Kardashevskiy         IOMMUMemoryRegion *iommu_mr)
731*1221a474SAlexey Kardashevskiy {
732*1221a474SAlexey Kardashevskiy     return (IOMMUMemoryRegionClass *) (((Object *)iommu_mr)->class);
733*1221a474SAlexey Kardashevskiy }
734*1221a474SAlexey Kardashevskiy 
7353df9d748SAlexey Kardashevskiy #define memory_region_is_iommu(mr) (memory_region_get_iommu(mr) != NULL)
73630951157SAvi Kivity 
73730951157SAvi Kivity /**
738f682e9c2SAlexey Kardashevskiy  * memory_region_iommu_get_min_page_size: get minimum supported page size
739f682e9c2SAlexey Kardashevskiy  * for an iommu
740f682e9c2SAlexey Kardashevskiy  *
741f682e9c2SAlexey Kardashevskiy  * Returns minimum supported page size for an iommu.
742f682e9c2SAlexey Kardashevskiy  *
7433df9d748SAlexey Kardashevskiy  * @iommu_mr: the memory region being queried
744f682e9c2SAlexey Kardashevskiy  */
7453df9d748SAlexey Kardashevskiy uint64_t memory_region_iommu_get_min_page_size(IOMMUMemoryRegion *iommu_mr);
746f682e9c2SAlexey Kardashevskiy 
747f682e9c2SAlexey Kardashevskiy /**
74806866575SDavid Gibson  * memory_region_notify_iommu: notify a change in an IOMMU translation entry.
74906866575SDavid Gibson  *
750cdb30812SPeter Xu  * The notification type will be decided by entry.perm bits:
751cdb30812SPeter Xu  *
752cdb30812SPeter Xu  * - For UNMAP (cache invalidation) notifies: set entry.perm to IOMMU_NONE.
753cdb30812SPeter Xu  * - For MAP (newly added entry) notifies: set entry.perm to the
754cdb30812SPeter Xu  *   permission of the page (which is definitely !IOMMU_NONE).
755cdb30812SPeter Xu  *
756cdb30812SPeter Xu  * Note: for any IOMMU implementation, an in-place mapping change
757cdb30812SPeter Xu  * should be notified with an UNMAP followed by a MAP.
758cdb30812SPeter Xu  *
7593df9d748SAlexey Kardashevskiy  * @iommu_mr: the memory region that was changed
76006866575SDavid Gibson  * @entry: the new entry in the IOMMU translation table.  The entry
76106866575SDavid Gibson  *         replaces all old entries for the same virtual I/O address range.
76206866575SDavid Gibson  *         Deleted entries have .@perm == 0.
76306866575SDavid Gibson  */
7643df9d748SAlexey Kardashevskiy void memory_region_notify_iommu(IOMMUMemoryRegion *iommu_mr,
76506866575SDavid Gibson                                 IOMMUTLBEntry entry);
76606866575SDavid Gibson 
76706866575SDavid Gibson /**
768bd2bfa4cSPeter Xu  * memory_region_notify_one: notify a change in an IOMMU translation
769bd2bfa4cSPeter Xu  *                           entry to a single notifier
770bd2bfa4cSPeter Xu  *
771bd2bfa4cSPeter Xu  * This works just like memory_region_notify_iommu(), but it only
772bd2bfa4cSPeter Xu  * notifies a specific notifier, not all of them.
773bd2bfa4cSPeter Xu  *
774bd2bfa4cSPeter Xu  * @notifier: the notifier to be notified
775bd2bfa4cSPeter Xu  * @entry: the new entry in the IOMMU translation table.  The entry
776bd2bfa4cSPeter Xu  *         replaces all old entries for the same virtual I/O address range.
777bd2bfa4cSPeter Xu  *         Deleted entries have .@perm == 0.
778bd2bfa4cSPeter Xu  */
779bd2bfa4cSPeter Xu void memory_region_notify_one(IOMMUNotifier *notifier,
780bd2bfa4cSPeter Xu                               IOMMUTLBEntry *entry);
781bd2bfa4cSPeter Xu 
782bd2bfa4cSPeter Xu /**
78306866575SDavid Gibson  * memory_region_register_iommu_notifier: register a notifier for changes to
78406866575SDavid Gibson  * IOMMU translation entries.
78506866575SDavid Gibson  *
78606866575SDavid Gibson  * @mr: the memory region to observe
787cdb30812SPeter Xu  * @n: the IOMMUNotifier to be added; the notify callback receives a
788cdb30812SPeter Xu  *     pointer to an #IOMMUTLBEntry as the opaque value; the pointer
789cdb30812SPeter Xu  *     ceases to be valid on exit from the notifier.
79006866575SDavid Gibson  */
791cdb30812SPeter Xu void memory_region_register_iommu_notifier(MemoryRegion *mr,
792cdb30812SPeter Xu                                            IOMMUNotifier *n);
79306866575SDavid Gibson 
79406866575SDavid Gibson /**
795a788f227SDavid Gibson  * memory_region_iommu_replay: replay existing IOMMU translations to
796f682e9c2SAlexey Kardashevskiy  * a notifier with the minimum page granularity returned by
797f682e9c2SAlexey Kardashevskiy  * mr->iommu_ops->get_page_size().
798a788f227SDavid Gibson  *
7993df9d748SAlexey Kardashevskiy  * @iommu_mr: the memory region to observe
800a788f227SDavid Gibson  * @n: the notifier to which to replay iommu mappings
801a788f227SDavid Gibson  */
8023df9d748SAlexey Kardashevskiy void memory_region_iommu_replay(IOMMUMemoryRegion *iommu_mr, IOMMUNotifier *n);
803a788f227SDavid Gibson 
804a788f227SDavid Gibson /**
805de472e4aSPeter Xu  * memory_region_iommu_replay_all: replay existing IOMMU translations
806de472e4aSPeter Xu  * to all the notifiers registered.
807de472e4aSPeter Xu  *
8083df9d748SAlexey Kardashevskiy  * @iommu_mr: the memory region to observe
809de472e4aSPeter Xu  */
8103df9d748SAlexey Kardashevskiy void memory_region_iommu_replay_all(IOMMUMemoryRegion *iommu_mr);
811de472e4aSPeter Xu 
812de472e4aSPeter Xu /**
81306866575SDavid Gibson  * memory_region_unregister_iommu_notifier: unregister a notifier for
81406866575SDavid Gibson  * changes to IOMMU translation entries.
81506866575SDavid Gibson  *
816d22d8956SAlexey Kardashevskiy  * @mr: the memory region which was observed and for which notity_stopped()
817d22d8956SAlexey Kardashevskiy  *      needs to be called
81806866575SDavid Gibson  * @n: the notifier to be removed.
81906866575SDavid Gibson  */
820cdb30812SPeter Xu void memory_region_unregister_iommu_notifier(MemoryRegion *mr,
821cdb30812SPeter Xu                                              IOMMUNotifier *n);
82206866575SDavid Gibson 
82306866575SDavid Gibson /**
824022c62cbSPaolo Bonzini  * memory_region_name: get a memory region's name
825022c62cbSPaolo Bonzini  *
826022c62cbSPaolo Bonzini  * Returns the string that was used to initialize the memory region.
827022c62cbSPaolo Bonzini  *
828022c62cbSPaolo Bonzini  * @mr: the memory region being queried
829022c62cbSPaolo Bonzini  */
8305d546d4bSPeter Crosthwaite const char *memory_region_name(const MemoryRegion *mr);
831022c62cbSPaolo Bonzini 
832022c62cbSPaolo Bonzini /**
833022c62cbSPaolo Bonzini  * memory_region_is_logging: return whether a memory region is logging writes
834022c62cbSPaolo Bonzini  *
8352d1a35beSPaolo Bonzini  * Returns %true if the memory region is logging writes for the given client
8362d1a35beSPaolo Bonzini  *
8372d1a35beSPaolo Bonzini  * @mr: the memory region being queried
8382d1a35beSPaolo Bonzini  * @client: the client being queried
8392d1a35beSPaolo Bonzini  */
8402d1a35beSPaolo Bonzini bool memory_region_is_logging(MemoryRegion *mr, uint8_t client);
8412d1a35beSPaolo Bonzini 
8422d1a35beSPaolo Bonzini /**
8432d1a35beSPaolo Bonzini  * memory_region_get_dirty_log_mask: return the clients for which a
8442d1a35beSPaolo Bonzini  * memory region is logging writes.
8452d1a35beSPaolo Bonzini  *
846677e7805SPaolo Bonzini  * Returns a bitmap of clients, in which the DIRTY_MEMORY_* constants
847677e7805SPaolo Bonzini  * are the bit indices.
848022c62cbSPaolo Bonzini  *
849022c62cbSPaolo Bonzini  * @mr: the memory region being queried
850022c62cbSPaolo Bonzini  */
8512d1a35beSPaolo Bonzini uint8_t memory_region_get_dirty_log_mask(MemoryRegion *mr);
852022c62cbSPaolo Bonzini 
853022c62cbSPaolo Bonzini /**
854022c62cbSPaolo Bonzini  * memory_region_is_rom: check whether a memory region is ROM
855022c62cbSPaolo Bonzini  *
856022c62cbSPaolo Bonzini  * Returns %true is a memory region is read-only memory.
857022c62cbSPaolo Bonzini  *
858022c62cbSPaolo Bonzini  * @mr: the memory region being queried
859022c62cbSPaolo Bonzini  */
8601619d1feSPaolo Bonzini static inline bool memory_region_is_rom(MemoryRegion *mr)
8611619d1feSPaolo Bonzini {
8621619d1feSPaolo Bonzini     return mr->ram && mr->readonly;
8631619d1feSPaolo Bonzini }
8641619d1feSPaolo Bonzini 
865022c62cbSPaolo Bonzini 
866022c62cbSPaolo Bonzini /**
867a35ba7beSPaolo Bonzini  * memory_region_get_fd: Get a file descriptor backing a RAM memory region.
868a35ba7beSPaolo Bonzini  *
869a35ba7beSPaolo Bonzini  * Returns a file descriptor backing a file-based RAM memory region,
870a35ba7beSPaolo Bonzini  * or -1 if the region is not a file-based RAM memory region.
871a35ba7beSPaolo Bonzini  *
872a35ba7beSPaolo Bonzini  * @mr: the RAM or alias memory region being queried.
873a35ba7beSPaolo Bonzini  */
874a35ba7beSPaolo Bonzini int memory_region_get_fd(MemoryRegion *mr);
875a35ba7beSPaolo Bonzini 
876a35ba7beSPaolo Bonzini /**
87707bdaa41SPaolo Bonzini  * memory_region_from_host: Convert a pointer into a RAM memory region
87807bdaa41SPaolo Bonzini  * and an offset within it.
87907bdaa41SPaolo Bonzini  *
88007bdaa41SPaolo Bonzini  * Given a host pointer inside a RAM memory region (created with
88107bdaa41SPaolo Bonzini  * memory_region_init_ram() or memory_region_init_ram_ptr()), return
88207bdaa41SPaolo Bonzini  * the MemoryRegion and the offset within it.
88307bdaa41SPaolo Bonzini  *
88407bdaa41SPaolo Bonzini  * Use with care; by the time this function returns, the returned pointer is
88507bdaa41SPaolo Bonzini  * not protected by RCU anymore.  If the caller is not within an RCU critical
88607bdaa41SPaolo Bonzini  * section and does not hold the iothread lock, it must have other means of
88707bdaa41SPaolo Bonzini  * protecting the pointer, such as a reference to the region that includes
88807bdaa41SPaolo Bonzini  * the incoming ram_addr_t.
88907bdaa41SPaolo Bonzini  *
89007bdaa41SPaolo Bonzini  * @mr: the memory region being queried.
89107bdaa41SPaolo Bonzini  */
89207bdaa41SPaolo Bonzini MemoryRegion *memory_region_from_host(void *ptr, ram_addr_t *offset);
89307bdaa41SPaolo Bonzini 
89407bdaa41SPaolo Bonzini /**
895022c62cbSPaolo Bonzini  * memory_region_get_ram_ptr: Get a pointer into a RAM memory region.
896022c62cbSPaolo Bonzini  *
897022c62cbSPaolo Bonzini  * Returns a host pointer to a RAM memory region (created with
89849b24afcSPaolo Bonzini  * memory_region_init_ram() or memory_region_init_ram_ptr()).
89949b24afcSPaolo Bonzini  *
90049b24afcSPaolo Bonzini  * Use with care; by the time this function returns, the returned pointer is
90149b24afcSPaolo Bonzini  * not protected by RCU anymore.  If the caller is not within an RCU critical
90249b24afcSPaolo Bonzini  * section and does not hold the iothread lock, it must have other means of
90349b24afcSPaolo Bonzini  * protecting the pointer, such as a reference to the region that includes
90449b24afcSPaolo Bonzini  * the incoming ram_addr_t.
905022c62cbSPaolo Bonzini  *
906022c62cbSPaolo Bonzini  * @mr: the memory region being queried.
907022c62cbSPaolo Bonzini  */
908022c62cbSPaolo Bonzini void *memory_region_get_ram_ptr(MemoryRegion *mr);
909022c62cbSPaolo Bonzini 
91037d7c084SPaolo Bonzini /* memory_region_ram_resize: Resize a RAM region.
91137d7c084SPaolo Bonzini  *
91237d7c084SPaolo Bonzini  * Only legal before guest might have detected the memory size: e.g. on
91337d7c084SPaolo Bonzini  * incoming migration, or right after reset.
91437d7c084SPaolo Bonzini  *
91537d7c084SPaolo Bonzini  * @mr: a memory region created with @memory_region_init_resizeable_ram.
91637d7c084SPaolo Bonzini  * @newsize: the new size the region
91737d7c084SPaolo Bonzini  * @errp: pointer to Error*, to store an error if it happens.
91837d7c084SPaolo Bonzini  */
91937d7c084SPaolo Bonzini void memory_region_ram_resize(MemoryRegion *mr, ram_addr_t newsize,
92037d7c084SPaolo Bonzini                               Error **errp);
92137d7c084SPaolo Bonzini 
922022c62cbSPaolo Bonzini /**
923022c62cbSPaolo Bonzini  * memory_region_set_log: Turn dirty logging on or off for a region.
924022c62cbSPaolo Bonzini  *
925022c62cbSPaolo Bonzini  * Turns dirty logging on or off for a specified client (display, migration).
926022c62cbSPaolo Bonzini  * Only meaningful for RAM regions.
927022c62cbSPaolo Bonzini  *
928022c62cbSPaolo Bonzini  * @mr: the memory region being updated.
929022c62cbSPaolo Bonzini  * @log: whether dirty logging is to be enabled or disabled.
930dbddac6dSPaolo Bonzini  * @client: the user of the logging information; %DIRTY_MEMORY_VGA only.
931022c62cbSPaolo Bonzini  */
932022c62cbSPaolo Bonzini void memory_region_set_log(MemoryRegion *mr, bool log, unsigned client);
933022c62cbSPaolo Bonzini 
934022c62cbSPaolo Bonzini /**
935022c62cbSPaolo Bonzini  * memory_region_get_dirty: Check whether a range of bytes is dirty
936022c62cbSPaolo Bonzini  *                          for a specified client.
937022c62cbSPaolo Bonzini  *
938022c62cbSPaolo Bonzini  * Checks whether a range of bytes has been written to since the last
939022c62cbSPaolo Bonzini  * call to memory_region_reset_dirty() with the same @client.  Dirty logging
940022c62cbSPaolo Bonzini  * must be enabled.
941022c62cbSPaolo Bonzini  *
942022c62cbSPaolo Bonzini  * @mr: the memory region being queried.
943022c62cbSPaolo Bonzini  * @addr: the address (relative to the start of the region) being queried.
944022c62cbSPaolo Bonzini  * @size: the size of the range being queried.
945022c62cbSPaolo Bonzini  * @client: the user of the logging information; %DIRTY_MEMORY_MIGRATION or
946022c62cbSPaolo Bonzini  *          %DIRTY_MEMORY_VGA.
947022c62cbSPaolo Bonzini  */
948022c62cbSPaolo Bonzini bool memory_region_get_dirty(MemoryRegion *mr, hwaddr addr,
949022c62cbSPaolo Bonzini                              hwaddr size, unsigned client);
950022c62cbSPaolo Bonzini 
951022c62cbSPaolo Bonzini /**
952022c62cbSPaolo Bonzini  * memory_region_set_dirty: Mark a range of bytes as dirty in a memory region.
953022c62cbSPaolo Bonzini  *
954022c62cbSPaolo Bonzini  * Marks a range of bytes as dirty, after it has been dirtied outside
955022c62cbSPaolo Bonzini  * guest code.
956022c62cbSPaolo Bonzini  *
957022c62cbSPaolo Bonzini  * @mr: the memory region being dirtied.
958022c62cbSPaolo Bonzini  * @addr: the address (relative to the start of the region) being dirtied.
959022c62cbSPaolo Bonzini  * @size: size of the range being dirtied.
960022c62cbSPaolo Bonzini  */
961022c62cbSPaolo Bonzini void memory_region_set_dirty(MemoryRegion *mr, hwaddr addr,
962022c62cbSPaolo Bonzini                              hwaddr size);
963022c62cbSPaolo Bonzini 
964022c62cbSPaolo Bonzini /**
9656c279db8SJuan Quintela  * memory_region_test_and_clear_dirty: Check whether a range of bytes is dirty
9666c279db8SJuan Quintela  *                                     for a specified client. It clears them.
9676c279db8SJuan Quintela  *
9686c279db8SJuan Quintela  * Checks whether a range of bytes has been written to since the last
9696c279db8SJuan Quintela  * call to memory_region_reset_dirty() with the same @client.  Dirty logging
9706c279db8SJuan Quintela  * must be enabled.
9716c279db8SJuan Quintela  *
9726c279db8SJuan Quintela  * @mr: the memory region being queried.
9736c279db8SJuan Quintela  * @addr: the address (relative to the start of the region) being queried.
9746c279db8SJuan Quintela  * @size: the size of the range being queried.
9756c279db8SJuan Quintela  * @client: the user of the logging information; %DIRTY_MEMORY_MIGRATION or
9766c279db8SJuan Quintela  *          %DIRTY_MEMORY_VGA.
9776c279db8SJuan Quintela  */
9786c279db8SJuan Quintela bool memory_region_test_and_clear_dirty(MemoryRegion *mr, hwaddr addr,
9796c279db8SJuan Quintela                                         hwaddr size, unsigned client);
9808deaf12cSGerd Hoffmann 
9818deaf12cSGerd Hoffmann /**
9828deaf12cSGerd Hoffmann  * memory_region_snapshot_and_clear_dirty: Get a snapshot of the dirty
9838deaf12cSGerd Hoffmann  *                                         bitmap and clear it.
9848deaf12cSGerd Hoffmann  *
9858deaf12cSGerd Hoffmann  * Creates a snapshot of the dirty bitmap, clears the dirty bitmap and
9868deaf12cSGerd Hoffmann  * returns the snapshot.  The snapshot can then be used to query dirty
9878deaf12cSGerd Hoffmann  * status, using memory_region_snapshot_get_dirty.  Unlike
9888deaf12cSGerd Hoffmann  * memory_region_test_and_clear_dirty this allows to query the same
9898deaf12cSGerd Hoffmann  * page multiple times, which is especially useful for display updates
9908deaf12cSGerd Hoffmann  * where the scanlines often are not page aligned.
9918deaf12cSGerd Hoffmann  *
9928deaf12cSGerd Hoffmann  * The dirty bitmap region which gets copyed into the snapshot (and
9938deaf12cSGerd Hoffmann  * cleared afterwards) can be larger than requested.  The boundaries
9948deaf12cSGerd Hoffmann  * are rounded up/down so complete bitmap longs (covering 64 pages on
9958deaf12cSGerd Hoffmann  * 64bit hosts) can be copied over into the bitmap snapshot.  Which
9968deaf12cSGerd Hoffmann  * isn't a problem for display updates as the extra pages are outside
9978deaf12cSGerd Hoffmann  * the visible area, and in case the visible area changes a full
9988deaf12cSGerd Hoffmann  * display redraw is due anyway.  Should other use cases for this
9998deaf12cSGerd Hoffmann  * function emerge we might have to revisit this implementation
10008deaf12cSGerd Hoffmann  * detail.
10018deaf12cSGerd Hoffmann  *
10028deaf12cSGerd Hoffmann  * Use g_free to release DirtyBitmapSnapshot.
10038deaf12cSGerd Hoffmann  *
10048deaf12cSGerd Hoffmann  * @mr: the memory region being queried.
10058deaf12cSGerd Hoffmann  * @addr: the address (relative to the start of the region) being queried.
10068deaf12cSGerd Hoffmann  * @size: the size of the range being queried.
10078deaf12cSGerd Hoffmann  * @client: the user of the logging information; typically %DIRTY_MEMORY_VGA.
10088deaf12cSGerd Hoffmann  */
10098deaf12cSGerd Hoffmann DirtyBitmapSnapshot *memory_region_snapshot_and_clear_dirty(MemoryRegion *mr,
10108deaf12cSGerd Hoffmann                                                             hwaddr addr,
10118deaf12cSGerd Hoffmann                                                             hwaddr size,
10128deaf12cSGerd Hoffmann                                                             unsigned client);
10138deaf12cSGerd Hoffmann 
10148deaf12cSGerd Hoffmann /**
10158deaf12cSGerd Hoffmann  * memory_region_snapshot_get_dirty: Check whether a range of bytes is dirty
10168deaf12cSGerd Hoffmann  *                                   in the specified dirty bitmap snapshot.
10178deaf12cSGerd Hoffmann  *
10188deaf12cSGerd Hoffmann  * @mr: the memory region being queried.
10198deaf12cSGerd Hoffmann  * @snap: the dirty bitmap snapshot
10208deaf12cSGerd Hoffmann  * @addr: the address (relative to the start of the region) being queried.
10218deaf12cSGerd Hoffmann  * @size: the size of the range being queried.
10228deaf12cSGerd Hoffmann  */
10238deaf12cSGerd Hoffmann bool memory_region_snapshot_get_dirty(MemoryRegion *mr,
10248deaf12cSGerd Hoffmann                                       DirtyBitmapSnapshot *snap,
10258deaf12cSGerd Hoffmann                                       hwaddr addr, hwaddr size);
10268deaf12cSGerd Hoffmann 
10276c279db8SJuan Quintela /**
1028022c62cbSPaolo Bonzini  * memory_region_sync_dirty_bitmap: Synchronize a region's dirty bitmap with
1029022c62cbSPaolo Bonzini  *                                  any external TLBs (e.g. kvm)
1030022c62cbSPaolo Bonzini  *
1031022c62cbSPaolo Bonzini  * Flushes dirty information from accelerators such as kvm and vhost-net
1032022c62cbSPaolo Bonzini  * and makes it available to users of the memory API.
1033022c62cbSPaolo Bonzini  *
1034022c62cbSPaolo Bonzini  * @mr: the region being flushed.
1035022c62cbSPaolo Bonzini  */
1036022c62cbSPaolo Bonzini void memory_region_sync_dirty_bitmap(MemoryRegion *mr);
1037022c62cbSPaolo Bonzini 
1038022c62cbSPaolo Bonzini /**
1039022c62cbSPaolo Bonzini  * memory_region_reset_dirty: Mark a range of pages as clean, for a specified
1040022c62cbSPaolo Bonzini  *                            client.
1041022c62cbSPaolo Bonzini  *
1042022c62cbSPaolo Bonzini  * Marks a range of pages as no longer dirty.
1043022c62cbSPaolo Bonzini  *
1044022c62cbSPaolo Bonzini  * @mr: the region being updated.
1045022c62cbSPaolo Bonzini  * @addr: the start of the subrange being cleaned.
1046022c62cbSPaolo Bonzini  * @size: the size of the subrange being cleaned.
1047022c62cbSPaolo Bonzini  * @client: the user of the logging information; %DIRTY_MEMORY_MIGRATION or
1048022c62cbSPaolo Bonzini  *          %DIRTY_MEMORY_VGA.
1049022c62cbSPaolo Bonzini  */
1050022c62cbSPaolo Bonzini void memory_region_reset_dirty(MemoryRegion *mr, hwaddr addr,
1051022c62cbSPaolo Bonzini                                hwaddr size, unsigned client);
1052022c62cbSPaolo Bonzini 
1053022c62cbSPaolo Bonzini /**
1054022c62cbSPaolo Bonzini  * memory_region_set_readonly: Turn a memory region read-only (or read-write)
1055022c62cbSPaolo Bonzini  *
1056022c62cbSPaolo Bonzini  * Allows a memory region to be marked as read-only (turning it into a ROM).
1057022c62cbSPaolo Bonzini  * only useful on RAM regions.
1058022c62cbSPaolo Bonzini  *
1059022c62cbSPaolo Bonzini  * @mr: the region being updated.
1060022c62cbSPaolo Bonzini  * @readonly: whether rhe region is to be ROM or RAM.
1061022c62cbSPaolo Bonzini  */
1062022c62cbSPaolo Bonzini void memory_region_set_readonly(MemoryRegion *mr, bool readonly);
1063022c62cbSPaolo Bonzini 
1064022c62cbSPaolo Bonzini /**
10655f9a5ea1SJan Kiszka  * memory_region_rom_device_set_romd: enable/disable ROMD mode
1066022c62cbSPaolo Bonzini  *
1067022c62cbSPaolo Bonzini  * Allows a ROM device (initialized with memory_region_init_rom_device() to
10685f9a5ea1SJan Kiszka  * set to ROMD mode (default) or MMIO mode.  When it is in ROMD mode, the
10695f9a5ea1SJan Kiszka  * device is mapped to guest memory and satisfies read access directly.
10705f9a5ea1SJan Kiszka  * When in MMIO mode, reads are forwarded to the #MemoryRegion.read function.
10715f9a5ea1SJan Kiszka  * Writes are always handled by the #MemoryRegion.write function.
1072022c62cbSPaolo Bonzini  *
1073022c62cbSPaolo Bonzini  * @mr: the memory region to be updated
10745f9a5ea1SJan Kiszka  * @romd_mode: %true to put the region into ROMD mode
1075022c62cbSPaolo Bonzini  */
10765f9a5ea1SJan Kiszka void memory_region_rom_device_set_romd(MemoryRegion *mr, bool romd_mode);
1077022c62cbSPaolo Bonzini 
1078022c62cbSPaolo Bonzini /**
1079022c62cbSPaolo Bonzini  * memory_region_set_coalescing: Enable memory coalescing for the region.
1080022c62cbSPaolo Bonzini  *
1081022c62cbSPaolo Bonzini  * Enabled writes to a region to be queued for later processing. MMIO ->write
1082022c62cbSPaolo Bonzini  * callbacks may be delayed until a non-coalesced MMIO is issued.
1083022c62cbSPaolo Bonzini  * Only useful for IO regions.  Roughly similar to write-combining hardware.
1084022c62cbSPaolo Bonzini  *
1085022c62cbSPaolo Bonzini  * @mr: the memory region to be write coalesced
1086022c62cbSPaolo Bonzini  */
1087022c62cbSPaolo Bonzini void memory_region_set_coalescing(MemoryRegion *mr);
1088022c62cbSPaolo Bonzini 
1089022c62cbSPaolo Bonzini /**
1090022c62cbSPaolo Bonzini  * memory_region_add_coalescing: Enable memory coalescing for a sub-range of
1091022c62cbSPaolo Bonzini  *                               a region.
1092022c62cbSPaolo Bonzini  *
1093022c62cbSPaolo Bonzini  * Like memory_region_set_coalescing(), but works on a sub-range of a region.
1094022c62cbSPaolo Bonzini  * Multiple calls can be issued coalesced disjoint ranges.
1095022c62cbSPaolo Bonzini  *
1096022c62cbSPaolo Bonzini  * @mr: the memory region to be updated.
1097022c62cbSPaolo Bonzini  * @offset: the start of the range within the region to be coalesced.
1098022c62cbSPaolo Bonzini  * @size: the size of the subrange to be coalesced.
1099022c62cbSPaolo Bonzini  */
1100022c62cbSPaolo Bonzini void memory_region_add_coalescing(MemoryRegion *mr,
1101022c62cbSPaolo Bonzini                                   hwaddr offset,
1102022c62cbSPaolo Bonzini                                   uint64_t size);
1103022c62cbSPaolo Bonzini 
1104022c62cbSPaolo Bonzini /**
1105022c62cbSPaolo Bonzini  * memory_region_clear_coalescing: Disable MMIO coalescing for the region.
1106022c62cbSPaolo Bonzini  *
1107022c62cbSPaolo Bonzini  * Disables any coalescing caused by memory_region_set_coalescing() or
1108022c62cbSPaolo Bonzini  * memory_region_add_coalescing().  Roughly equivalent to uncacheble memory
1109022c62cbSPaolo Bonzini  * hardware.
1110022c62cbSPaolo Bonzini  *
1111022c62cbSPaolo Bonzini  * @mr: the memory region to be updated.
1112022c62cbSPaolo Bonzini  */
1113022c62cbSPaolo Bonzini void memory_region_clear_coalescing(MemoryRegion *mr);
1114022c62cbSPaolo Bonzini 
1115022c62cbSPaolo Bonzini /**
1116022c62cbSPaolo Bonzini  * memory_region_set_flush_coalesced: Enforce memory coalescing flush before
1117022c62cbSPaolo Bonzini  *                                    accesses.
1118022c62cbSPaolo Bonzini  *
1119022c62cbSPaolo Bonzini  * Ensure that pending coalesced MMIO request are flushed before the memory
1120022c62cbSPaolo Bonzini  * region is accessed. This property is automatically enabled for all regions
1121022c62cbSPaolo Bonzini  * passed to memory_region_set_coalescing() and memory_region_add_coalescing().
1122022c62cbSPaolo Bonzini  *
1123022c62cbSPaolo Bonzini  * @mr: the memory region to be updated.
1124022c62cbSPaolo Bonzini  */
1125022c62cbSPaolo Bonzini void memory_region_set_flush_coalesced(MemoryRegion *mr);
1126022c62cbSPaolo Bonzini 
1127022c62cbSPaolo Bonzini /**
1128022c62cbSPaolo Bonzini  * memory_region_clear_flush_coalesced: Disable memory coalescing flush before
1129022c62cbSPaolo Bonzini  *                                      accesses.
1130022c62cbSPaolo Bonzini  *
1131022c62cbSPaolo Bonzini  * Clear the automatic coalesced MMIO flushing enabled via
1132022c62cbSPaolo Bonzini  * memory_region_set_flush_coalesced. Note that this service has no effect on
1133022c62cbSPaolo Bonzini  * memory regions that have MMIO coalescing enabled for themselves. For them,
1134022c62cbSPaolo Bonzini  * automatic flushing will stop once coalescing is disabled.
1135022c62cbSPaolo Bonzini  *
1136022c62cbSPaolo Bonzini  * @mr: the memory region to be updated.
1137022c62cbSPaolo Bonzini  */
1138022c62cbSPaolo Bonzini void memory_region_clear_flush_coalesced(MemoryRegion *mr);
1139022c62cbSPaolo Bonzini 
1140022c62cbSPaolo Bonzini /**
1141196ea131SJan Kiszka  * memory_region_set_global_locking: Declares the access processing requires
1142196ea131SJan Kiszka  *                                   QEMU's global lock.
1143196ea131SJan Kiszka  *
1144196ea131SJan Kiszka  * When this is invoked, accesses to the memory region will be processed while
1145196ea131SJan Kiszka  * holding the global lock of QEMU. This is the default behavior of memory
1146196ea131SJan Kiszka  * regions.
1147196ea131SJan Kiszka  *
1148196ea131SJan Kiszka  * @mr: the memory region to be updated.
1149196ea131SJan Kiszka  */
1150196ea131SJan Kiszka void memory_region_set_global_locking(MemoryRegion *mr);
1151196ea131SJan Kiszka 
1152196ea131SJan Kiszka /**
1153196ea131SJan Kiszka  * memory_region_clear_global_locking: Declares that access processing does
1154196ea131SJan Kiszka  *                                     not depend on the QEMU global lock.
1155196ea131SJan Kiszka  *
1156196ea131SJan Kiszka  * By clearing this property, accesses to the memory region will be processed
1157196ea131SJan Kiszka  * outside of QEMU's global lock (unless the lock is held on when issuing the
1158196ea131SJan Kiszka  * access request). In this case, the device model implementing the access
1159196ea131SJan Kiszka  * handlers is responsible for synchronization of concurrency.
1160196ea131SJan Kiszka  *
1161196ea131SJan Kiszka  * @mr: the memory region to be updated.
1162196ea131SJan Kiszka  */
1163196ea131SJan Kiszka void memory_region_clear_global_locking(MemoryRegion *mr);
1164196ea131SJan Kiszka 
1165196ea131SJan Kiszka /**
1166022c62cbSPaolo Bonzini  * memory_region_add_eventfd: Request an eventfd to be triggered when a word
1167022c62cbSPaolo Bonzini  *                            is written to a location.
1168022c62cbSPaolo Bonzini  *
1169022c62cbSPaolo Bonzini  * Marks a word in an IO region (initialized with memory_region_init_io())
1170022c62cbSPaolo Bonzini  * as a trigger for an eventfd event.  The I/O callback will not be called.
1171022c62cbSPaolo Bonzini  * The caller must be prepared to handle failure (that is, take the required
1172022c62cbSPaolo Bonzini  * action if the callback _is_ called).
1173022c62cbSPaolo Bonzini  *
1174022c62cbSPaolo Bonzini  * @mr: the memory region being updated.
1175022c62cbSPaolo Bonzini  * @addr: the address within @mr that is to be monitored
1176022c62cbSPaolo Bonzini  * @size: the size of the access to trigger the eventfd
1177022c62cbSPaolo Bonzini  * @match_data: whether to match against @data, instead of just @addr
1178022c62cbSPaolo Bonzini  * @data: the data to match against the guest write
1179022c62cbSPaolo Bonzini  * @fd: the eventfd to be triggered when @addr, @size, and @data all match.
1180022c62cbSPaolo Bonzini  **/
1181022c62cbSPaolo Bonzini void memory_region_add_eventfd(MemoryRegion *mr,
1182022c62cbSPaolo Bonzini                                hwaddr addr,
1183022c62cbSPaolo Bonzini                                unsigned size,
1184022c62cbSPaolo Bonzini                                bool match_data,
1185022c62cbSPaolo Bonzini                                uint64_t data,
1186022c62cbSPaolo Bonzini                                EventNotifier *e);
1187022c62cbSPaolo Bonzini 
1188022c62cbSPaolo Bonzini /**
1189022c62cbSPaolo Bonzini  * memory_region_del_eventfd: Cancel an eventfd.
1190022c62cbSPaolo Bonzini  *
1191022c62cbSPaolo Bonzini  * Cancels an eventfd trigger requested by a previous
1192022c62cbSPaolo Bonzini  * memory_region_add_eventfd() call.
1193022c62cbSPaolo Bonzini  *
1194022c62cbSPaolo Bonzini  * @mr: the memory region being updated.
1195022c62cbSPaolo Bonzini  * @addr: the address within @mr that is to be monitored
1196022c62cbSPaolo Bonzini  * @size: the size of the access to trigger the eventfd
1197022c62cbSPaolo Bonzini  * @match_data: whether to match against @data, instead of just @addr
1198022c62cbSPaolo Bonzini  * @data: the data to match against the guest write
1199022c62cbSPaolo Bonzini  * @fd: the eventfd to be triggered when @addr, @size, and @data all match.
1200022c62cbSPaolo Bonzini  */
1201022c62cbSPaolo Bonzini void memory_region_del_eventfd(MemoryRegion *mr,
1202022c62cbSPaolo Bonzini                                hwaddr addr,
1203022c62cbSPaolo Bonzini                                unsigned size,
1204022c62cbSPaolo Bonzini                                bool match_data,
1205022c62cbSPaolo Bonzini                                uint64_t data,
1206022c62cbSPaolo Bonzini                                EventNotifier *e);
1207022c62cbSPaolo Bonzini 
1208022c62cbSPaolo Bonzini /**
1209022c62cbSPaolo Bonzini  * memory_region_add_subregion: Add a subregion to a container.
1210022c62cbSPaolo Bonzini  *
1211022c62cbSPaolo Bonzini  * Adds a subregion at @offset.  The subregion may not overlap with other
1212022c62cbSPaolo Bonzini  * subregions (except for those explicitly marked as overlapping).  A region
1213022c62cbSPaolo Bonzini  * may only be added once as a subregion (unless removed with
1214022c62cbSPaolo Bonzini  * memory_region_del_subregion()); use memory_region_init_alias() if you
1215022c62cbSPaolo Bonzini  * want a region to be a subregion in multiple locations.
1216022c62cbSPaolo Bonzini  *
1217022c62cbSPaolo Bonzini  * @mr: the region to contain the new subregion; must be a container
1218022c62cbSPaolo Bonzini  *      initialized with memory_region_init().
1219022c62cbSPaolo Bonzini  * @offset: the offset relative to @mr where @subregion is added.
1220022c62cbSPaolo Bonzini  * @subregion: the subregion to be added.
1221022c62cbSPaolo Bonzini  */
1222022c62cbSPaolo Bonzini void memory_region_add_subregion(MemoryRegion *mr,
1223022c62cbSPaolo Bonzini                                  hwaddr offset,
1224022c62cbSPaolo Bonzini                                  MemoryRegion *subregion);
1225022c62cbSPaolo Bonzini /**
1226022c62cbSPaolo Bonzini  * memory_region_add_subregion_overlap: Add a subregion to a container
1227022c62cbSPaolo Bonzini  *                                      with overlap.
1228022c62cbSPaolo Bonzini  *
1229022c62cbSPaolo Bonzini  * Adds a subregion at @offset.  The subregion may overlap with other
1230022c62cbSPaolo Bonzini  * subregions.  Conflicts are resolved by having a higher @priority hide a
1231022c62cbSPaolo Bonzini  * lower @priority. Subregions without priority are taken as @priority 0.
1232022c62cbSPaolo Bonzini  * A region may only be added once as a subregion (unless removed with
1233022c62cbSPaolo Bonzini  * memory_region_del_subregion()); use memory_region_init_alias() if you
1234022c62cbSPaolo Bonzini  * want a region to be a subregion in multiple locations.
1235022c62cbSPaolo Bonzini  *
1236022c62cbSPaolo Bonzini  * @mr: the region to contain the new subregion; must be a container
1237022c62cbSPaolo Bonzini  *      initialized with memory_region_init().
1238022c62cbSPaolo Bonzini  * @offset: the offset relative to @mr where @subregion is added.
1239022c62cbSPaolo Bonzini  * @subregion: the subregion to be added.
1240022c62cbSPaolo Bonzini  * @priority: used for resolving overlaps; highest priority wins.
1241022c62cbSPaolo Bonzini  */
1242022c62cbSPaolo Bonzini void memory_region_add_subregion_overlap(MemoryRegion *mr,
1243022c62cbSPaolo Bonzini                                          hwaddr offset,
1244022c62cbSPaolo Bonzini                                          MemoryRegion *subregion,
1245a1ff8ae0SMarcel Apfelbaum                                          int priority);
1246022c62cbSPaolo Bonzini 
1247022c62cbSPaolo Bonzini /**
1248022c62cbSPaolo Bonzini  * memory_region_get_ram_addr: Get the ram address associated with a memory
1249022c62cbSPaolo Bonzini  *                             region
1250022c62cbSPaolo Bonzini  */
12517ebb2745SFam Zheng ram_addr_t memory_region_get_ram_addr(MemoryRegion *mr);
1252022c62cbSPaolo Bonzini 
1253a2b257d6SIgor Mammedov uint64_t memory_region_get_alignment(const MemoryRegion *mr);
1254022c62cbSPaolo Bonzini /**
1255022c62cbSPaolo Bonzini  * memory_region_del_subregion: Remove a subregion.
1256022c62cbSPaolo Bonzini  *
1257022c62cbSPaolo Bonzini  * Removes a subregion from its container.
1258022c62cbSPaolo Bonzini  *
1259022c62cbSPaolo Bonzini  * @mr: the container to be updated.
1260022c62cbSPaolo Bonzini  * @subregion: the region being removed; must be a current subregion of @mr.
1261022c62cbSPaolo Bonzini  */
1262022c62cbSPaolo Bonzini void memory_region_del_subregion(MemoryRegion *mr,
1263022c62cbSPaolo Bonzini                                  MemoryRegion *subregion);
1264022c62cbSPaolo Bonzini 
1265022c62cbSPaolo Bonzini /*
1266022c62cbSPaolo Bonzini  * memory_region_set_enabled: dynamically enable or disable a region
1267022c62cbSPaolo Bonzini  *
1268022c62cbSPaolo Bonzini  * Enables or disables a memory region.  A disabled memory region
1269022c62cbSPaolo Bonzini  * ignores all accesses to itself and its subregions.  It does not
1270022c62cbSPaolo Bonzini  * obscure sibling subregions with lower priority - it simply behaves as
1271022c62cbSPaolo Bonzini  * if it was removed from the hierarchy.
1272022c62cbSPaolo Bonzini  *
1273022c62cbSPaolo Bonzini  * Regions default to being enabled.
1274022c62cbSPaolo Bonzini  *
1275022c62cbSPaolo Bonzini  * @mr: the region to be updated
1276022c62cbSPaolo Bonzini  * @enabled: whether to enable or disable the region
1277022c62cbSPaolo Bonzini  */
1278022c62cbSPaolo Bonzini void memory_region_set_enabled(MemoryRegion *mr, bool enabled);
1279022c62cbSPaolo Bonzini 
1280022c62cbSPaolo Bonzini /*
1281022c62cbSPaolo Bonzini  * memory_region_set_address: dynamically update the address of a region
1282022c62cbSPaolo Bonzini  *
1283feca4ac1SPaolo Bonzini  * Dynamically updates the address of a region, relative to its container.
1284022c62cbSPaolo Bonzini  * May be used on regions are currently part of a memory hierarchy.
1285022c62cbSPaolo Bonzini  *
1286022c62cbSPaolo Bonzini  * @mr: the region to be updated
1287feca4ac1SPaolo Bonzini  * @addr: new address, relative to container region
1288022c62cbSPaolo Bonzini  */
1289022c62cbSPaolo Bonzini void memory_region_set_address(MemoryRegion *mr, hwaddr addr);
1290022c62cbSPaolo Bonzini 
1291022c62cbSPaolo Bonzini /*
1292e7af4c67SMichael S. Tsirkin  * memory_region_set_size: dynamically update the size of a region.
1293e7af4c67SMichael S. Tsirkin  *
1294e7af4c67SMichael S. Tsirkin  * Dynamically updates the size of a region.
1295e7af4c67SMichael S. Tsirkin  *
1296e7af4c67SMichael S. Tsirkin  * @mr: the region to be updated
1297e7af4c67SMichael S. Tsirkin  * @size: used size of the region.
1298e7af4c67SMichael S. Tsirkin  */
1299e7af4c67SMichael S. Tsirkin void memory_region_set_size(MemoryRegion *mr, uint64_t size);
1300e7af4c67SMichael S. Tsirkin 
1301e7af4c67SMichael S. Tsirkin /*
1302022c62cbSPaolo Bonzini  * memory_region_set_alias_offset: dynamically update a memory alias's offset
1303022c62cbSPaolo Bonzini  *
1304022c62cbSPaolo Bonzini  * Dynamically updates the offset into the target region that an alias points
1305022c62cbSPaolo Bonzini  * to, as if the fourth argument to memory_region_init_alias() has changed.
1306022c62cbSPaolo Bonzini  *
1307022c62cbSPaolo Bonzini  * @mr: the #MemoryRegion to be updated; should be an alias.
1308022c62cbSPaolo Bonzini  * @offset: the new offset into the target memory region
1309022c62cbSPaolo Bonzini  */
1310022c62cbSPaolo Bonzini void memory_region_set_alias_offset(MemoryRegion *mr,
1311022c62cbSPaolo Bonzini                                     hwaddr offset);
1312022c62cbSPaolo Bonzini 
1313022c62cbSPaolo Bonzini /**
1314feca4ac1SPaolo Bonzini  * memory_region_present: checks if an address relative to a @container
1315feca4ac1SPaolo Bonzini  * translates into #MemoryRegion within @container
13163ce10901SPaolo Bonzini  *
1317feca4ac1SPaolo Bonzini  * Answer whether a #MemoryRegion within @container covers the address
13183ce10901SPaolo Bonzini  * @addr.
13193ce10901SPaolo Bonzini  *
1320feca4ac1SPaolo Bonzini  * @container: a #MemoryRegion within which @addr is a relative address
1321feca4ac1SPaolo Bonzini  * @addr: the area within @container to be searched
13223ce10901SPaolo Bonzini  */
1323feca4ac1SPaolo Bonzini bool memory_region_present(MemoryRegion *container, hwaddr addr);
13243ce10901SPaolo Bonzini 
13253ce10901SPaolo Bonzini /**
1326eed2bacfSIgor Mammedov  * memory_region_is_mapped: returns true if #MemoryRegion is mapped
1327eed2bacfSIgor Mammedov  * into any address space.
1328eed2bacfSIgor Mammedov  *
1329eed2bacfSIgor Mammedov  * @mr: a #MemoryRegion which should be checked if it's mapped
1330eed2bacfSIgor Mammedov  */
1331eed2bacfSIgor Mammedov bool memory_region_is_mapped(MemoryRegion *mr);
1332eed2bacfSIgor Mammedov 
1333eed2bacfSIgor Mammedov /**
133473034e9eSPaolo Bonzini  * memory_region_find: translate an address/size relative to a
133573034e9eSPaolo Bonzini  * MemoryRegion into a #MemoryRegionSection.
1336022c62cbSPaolo Bonzini  *
133773034e9eSPaolo Bonzini  * Locates the first #MemoryRegion within @mr that overlaps the range
133873034e9eSPaolo Bonzini  * given by @addr and @size.
1339022c62cbSPaolo Bonzini  *
1340022c62cbSPaolo Bonzini  * Returns a #MemoryRegionSection that describes a contiguous overlap.
1341022c62cbSPaolo Bonzini  * It will have the following characteristics:
1342022c62cbSPaolo Bonzini  *    .@size = 0 iff no overlap was found
1343022c62cbSPaolo Bonzini  *    .@mr is non-%NULL iff an overlap was found
1344022c62cbSPaolo Bonzini  *
134573034e9eSPaolo Bonzini  * Remember that in the return value the @offset_within_region is
134673034e9eSPaolo Bonzini  * relative to the returned region (in the .@mr field), not to the
134773034e9eSPaolo Bonzini  * @mr argument.
134873034e9eSPaolo Bonzini  *
134973034e9eSPaolo Bonzini  * Similarly, the .@offset_within_address_space is relative to the
135073034e9eSPaolo Bonzini  * address space that contains both regions, the passed and the
135173034e9eSPaolo Bonzini  * returned one.  However, in the special case where the @mr argument
1352feca4ac1SPaolo Bonzini  * has no container (and thus is the root of the address space), the
135373034e9eSPaolo Bonzini  * following will hold:
135473034e9eSPaolo Bonzini  *    .@offset_within_address_space >= @addr
135573034e9eSPaolo Bonzini  *    .@offset_within_address_space + .@size <= @addr + @size
135673034e9eSPaolo Bonzini  *
135773034e9eSPaolo Bonzini  * @mr: a MemoryRegion within which @addr is a relative address
135873034e9eSPaolo Bonzini  * @addr: start of the area within @as to be searched
1359022c62cbSPaolo Bonzini  * @size: size of the area to be searched
1360022c62cbSPaolo Bonzini  */
136173034e9eSPaolo Bonzini MemoryRegionSection memory_region_find(MemoryRegion *mr,
1362022c62cbSPaolo Bonzini                                        hwaddr addr, uint64_t size);
1363022c62cbSPaolo Bonzini 
1364022c62cbSPaolo Bonzini /**
13659c1f8f44SPaolo Bonzini  * memory_global_dirty_log_sync: synchronize the dirty log for all memory
1366022c62cbSPaolo Bonzini  *
13679c1f8f44SPaolo Bonzini  * Synchronizes the dirty page log for all address spaces.
1368022c62cbSPaolo Bonzini  */
13699c1f8f44SPaolo Bonzini void memory_global_dirty_log_sync(void);
1370022c62cbSPaolo Bonzini 
1371022c62cbSPaolo Bonzini /**
1372022c62cbSPaolo Bonzini  * memory_region_transaction_begin: Start a transaction.
1373022c62cbSPaolo Bonzini  *
1374022c62cbSPaolo Bonzini  * During a transaction, changes will be accumulated and made visible
1375022c62cbSPaolo Bonzini  * only when the transaction ends (is committed).
1376022c62cbSPaolo Bonzini  */
1377022c62cbSPaolo Bonzini void memory_region_transaction_begin(void);
1378022c62cbSPaolo Bonzini 
1379022c62cbSPaolo Bonzini /**
1380022c62cbSPaolo Bonzini  * memory_region_transaction_commit: Commit a transaction and make changes
1381022c62cbSPaolo Bonzini  *                                   visible to the guest.
1382022c62cbSPaolo Bonzini  */
1383022c62cbSPaolo Bonzini void memory_region_transaction_commit(void);
1384022c62cbSPaolo Bonzini 
1385022c62cbSPaolo Bonzini /**
1386022c62cbSPaolo Bonzini  * memory_listener_register: register callbacks to be called when memory
1387022c62cbSPaolo Bonzini  *                           sections are mapped or unmapped into an address
1388022c62cbSPaolo Bonzini  *                           space
1389022c62cbSPaolo Bonzini  *
1390022c62cbSPaolo Bonzini  * @listener: an object containing the callbacks to be called
1391022c62cbSPaolo Bonzini  * @filter: if non-%NULL, only regions in this address space will be observed
1392022c62cbSPaolo Bonzini  */
1393022c62cbSPaolo Bonzini void memory_listener_register(MemoryListener *listener, AddressSpace *filter);
1394022c62cbSPaolo Bonzini 
1395022c62cbSPaolo Bonzini /**
1396022c62cbSPaolo Bonzini  * memory_listener_unregister: undo the effect of memory_listener_register()
1397022c62cbSPaolo Bonzini  *
1398022c62cbSPaolo Bonzini  * @listener: an object containing the callbacks to be removed
1399022c62cbSPaolo Bonzini  */
1400022c62cbSPaolo Bonzini void memory_listener_unregister(MemoryListener *listener);
1401022c62cbSPaolo Bonzini 
1402022c62cbSPaolo Bonzini /**
1403022c62cbSPaolo Bonzini  * memory_global_dirty_log_start: begin dirty logging for all regions
1404022c62cbSPaolo Bonzini  */
1405022c62cbSPaolo Bonzini void memory_global_dirty_log_start(void);
1406022c62cbSPaolo Bonzini 
1407022c62cbSPaolo Bonzini /**
1408022c62cbSPaolo Bonzini  * memory_global_dirty_log_stop: end dirty logging for all regions
1409022c62cbSPaolo Bonzini  */
1410022c62cbSPaolo Bonzini void memory_global_dirty_log_stop(void);
1411022c62cbSPaolo Bonzini 
141257bb40c9SPeter Xu void mtree_info(fprintf_function mon_printf, void *f, bool flatview);
1413022c62cbSPaolo Bonzini 
1414022c62cbSPaolo Bonzini /**
1415c9356746SKONRAD Frederic  * memory_region_request_mmio_ptr: request a pointer to an mmio
1416c9356746SKONRAD Frederic  * MemoryRegion. If it is possible map a RAM MemoryRegion with this pointer.
1417c9356746SKONRAD Frederic  * When the device wants to invalidate the pointer it will call
1418c9356746SKONRAD Frederic  * memory_region_invalidate_mmio_ptr.
1419c9356746SKONRAD Frederic  *
1420c9356746SKONRAD Frederic  * @mr: #MemoryRegion to check
1421c9356746SKONRAD Frederic  * @addr: address within that region
1422c9356746SKONRAD Frederic  *
1423c9356746SKONRAD Frederic  * Returns true on success, false otherwise.
1424c9356746SKONRAD Frederic  */
1425c9356746SKONRAD Frederic bool memory_region_request_mmio_ptr(MemoryRegion *mr, hwaddr addr);
1426c9356746SKONRAD Frederic 
1427c9356746SKONRAD Frederic /**
1428c9356746SKONRAD Frederic  * memory_region_invalidate_mmio_ptr: invalidate the pointer to an mmio
1429c9356746SKONRAD Frederic  * previously requested.
1430c9356746SKONRAD Frederic  * In the end that means that if something wants to execute from this area it
1431c9356746SKONRAD Frederic  * will need to request the pointer again.
1432c9356746SKONRAD Frederic  *
1433c9356746SKONRAD Frederic  * @mr: #MemoryRegion associated to the pointer.
1434c9356746SKONRAD Frederic  * @addr: address within that region
1435c9356746SKONRAD Frederic  * @size: size of that area.
1436c9356746SKONRAD Frederic  */
1437c9356746SKONRAD Frederic void memory_region_invalidate_mmio_ptr(MemoryRegion *mr, hwaddr offset,
1438c9356746SKONRAD Frederic                                        unsigned size);
1439c9356746SKONRAD Frederic 
1440c9356746SKONRAD Frederic /**
14413b643495SPeter Maydell  * memory_region_dispatch_read: perform a read directly to the specified
14423b643495SPeter Maydell  * MemoryRegion.
14433b643495SPeter Maydell  *
14443b643495SPeter Maydell  * @mr: #MemoryRegion to access
14453b643495SPeter Maydell  * @addr: address within that region
14463b643495SPeter Maydell  * @pval: pointer to uint64_t which the data is written to
14473b643495SPeter Maydell  * @size: size of the access in bytes
14483b643495SPeter Maydell  * @attrs: memory transaction attributes to use for the access
14493b643495SPeter Maydell  */
14503b643495SPeter Maydell MemTxResult memory_region_dispatch_read(MemoryRegion *mr,
14513b643495SPeter Maydell                                         hwaddr addr,
14523b643495SPeter Maydell                                         uint64_t *pval,
14533b643495SPeter Maydell                                         unsigned size,
14543b643495SPeter Maydell                                         MemTxAttrs attrs);
14553b643495SPeter Maydell /**
14563b643495SPeter Maydell  * memory_region_dispatch_write: perform a write directly to the specified
14573b643495SPeter Maydell  * MemoryRegion.
14583b643495SPeter Maydell  *
14593b643495SPeter Maydell  * @mr: #MemoryRegion to access
14603b643495SPeter Maydell  * @addr: address within that region
14613b643495SPeter Maydell  * @data: data to write
14623b643495SPeter Maydell  * @size: size of the access in bytes
14633b643495SPeter Maydell  * @attrs: memory transaction attributes to use for the access
14643b643495SPeter Maydell  */
14653b643495SPeter Maydell MemTxResult memory_region_dispatch_write(MemoryRegion *mr,
14663b643495SPeter Maydell                                          hwaddr addr,
14673b643495SPeter Maydell                                          uint64_t data,
14683b643495SPeter Maydell                                          unsigned size,
14693b643495SPeter Maydell                                          MemTxAttrs attrs);
14703b643495SPeter Maydell 
14713b643495SPeter Maydell /**
1472022c62cbSPaolo Bonzini  * address_space_init: initializes an address space
1473022c62cbSPaolo Bonzini  *
1474022c62cbSPaolo Bonzini  * @as: an uninitialized #AddressSpace
147567cc32ebSVeres Lajos  * @root: a #MemoryRegion that routes addresses for the address space
14767dca8043SAlexey Kardashevskiy  * @name: an address space name.  The name is only used for debugging
14777dca8043SAlexey Kardashevskiy  *        output.
1478022c62cbSPaolo Bonzini  */
14797dca8043SAlexey Kardashevskiy void address_space_init(AddressSpace *as, MemoryRegion *root, const char *name);
1480022c62cbSPaolo Bonzini 
1481f0c02d15SPeter Crosthwaite /**
1482f0c02d15SPeter Crosthwaite  * address_space_init_shareable: return an address space for a memory region,
1483f0c02d15SPeter Crosthwaite  *                               creating it if it does not already exist
1484f0c02d15SPeter Crosthwaite  *
1485f0c02d15SPeter Crosthwaite  * @root: a #MemoryRegion that routes addresses for the address space
1486f0c02d15SPeter Crosthwaite  * @name: an address space name.  The name is only used for debugging
1487f0c02d15SPeter Crosthwaite  *        output.
1488f0c02d15SPeter Crosthwaite  *
1489f0c02d15SPeter Crosthwaite  * This function will return a pointer to an existing AddressSpace
1490f0c02d15SPeter Crosthwaite  * which was initialized with the specified MemoryRegion, or it will
1491f0c02d15SPeter Crosthwaite  * create and initialize one if it does not already exist. The ASes
1492f0c02d15SPeter Crosthwaite  * are reference-counted, so the memory will be freed automatically
1493f0c02d15SPeter Crosthwaite  * when the AddressSpace is destroyed via address_space_destroy.
1494f0c02d15SPeter Crosthwaite  */
1495f0c02d15SPeter Crosthwaite AddressSpace *address_space_init_shareable(MemoryRegion *root,
1496f0c02d15SPeter Crosthwaite                                            const char *name);
1497022c62cbSPaolo Bonzini 
1498022c62cbSPaolo Bonzini /**
1499022c62cbSPaolo Bonzini  * address_space_destroy: destroy an address space
1500022c62cbSPaolo Bonzini  *
1501022c62cbSPaolo Bonzini  * Releases all resources associated with an address space.  After an address space
1502022c62cbSPaolo Bonzini  * is destroyed, its root memory region (given by address_space_init()) may be destroyed
1503022c62cbSPaolo Bonzini  * as well.
1504022c62cbSPaolo Bonzini  *
1505022c62cbSPaolo Bonzini  * @as: address space to be destroyed
1506022c62cbSPaolo Bonzini  */
1507022c62cbSPaolo Bonzini void address_space_destroy(AddressSpace *as);
1508022c62cbSPaolo Bonzini 
1509022c62cbSPaolo Bonzini /**
1510022c62cbSPaolo Bonzini  * address_space_rw: read from or write to an address space.
1511022c62cbSPaolo Bonzini  *
15125c9eb028SPeter Maydell  * Return a MemTxResult indicating whether the operation succeeded
15135c9eb028SPeter Maydell  * or failed (eg unassigned memory, device rejected the transaction,
15145c9eb028SPeter Maydell  * IOMMU fault).
1515fd8aaa76SPaolo Bonzini  *
1516022c62cbSPaolo Bonzini  * @as: #AddressSpace to be accessed
1517022c62cbSPaolo Bonzini  * @addr: address within that address space
15185c9eb028SPeter Maydell  * @attrs: memory transaction attributes
1519022c62cbSPaolo Bonzini  * @buf: buffer with the data transferred
1520022c62cbSPaolo Bonzini  * @is_write: indicates the transfer direction
1521022c62cbSPaolo Bonzini  */
15225c9eb028SPeter Maydell MemTxResult address_space_rw(AddressSpace *as, hwaddr addr,
15235c9eb028SPeter Maydell                              MemTxAttrs attrs, uint8_t *buf,
1524022c62cbSPaolo Bonzini                              int len, bool is_write);
1525022c62cbSPaolo Bonzini 
1526022c62cbSPaolo Bonzini /**
1527022c62cbSPaolo Bonzini  * address_space_write: write to address space.
1528022c62cbSPaolo Bonzini  *
15295c9eb028SPeter Maydell  * Return a MemTxResult indicating whether the operation succeeded
15305c9eb028SPeter Maydell  * or failed (eg unassigned memory, device rejected the transaction,
15315c9eb028SPeter Maydell  * IOMMU fault).
1532022c62cbSPaolo Bonzini  *
1533022c62cbSPaolo Bonzini  * @as: #AddressSpace to be accessed
1534022c62cbSPaolo Bonzini  * @addr: address within that address space
15355c9eb028SPeter Maydell  * @attrs: memory transaction attributes
1536022c62cbSPaolo Bonzini  * @buf: buffer with the data transferred
1537022c62cbSPaolo Bonzini  */
15385c9eb028SPeter Maydell MemTxResult address_space_write(AddressSpace *as, hwaddr addr,
15395c9eb028SPeter Maydell                                 MemTxAttrs attrs,
1540fd8aaa76SPaolo Bonzini                                 const uint8_t *buf, int len);
1541fd8aaa76SPaolo Bonzini 
15423cc8f884SPaolo Bonzini /* address_space_ld*: load from an address space
154350013115SPeter Maydell  * address_space_st*: store to an address space
154450013115SPeter Maydell  *
154550013115SPeter Maydell  * These functions perform a load or store of the byte, word,
154650013115SPeter Maydell  * longword or quad to the specified address within the AddressSpace.
154750013115SPeter Maydell  * The _le suffixed functions treat the data as little endian;
154850013115SPeter Maydell  * _be indicates big endian; no suffix indicates "same endianness
154950013115SPeter Maydell  * as guest CPU".
155050013115SPeter Maydell  *
155150013115SPeter Maydell  * The "guest CPU endianness" accessors are deprecated for use outside
155250013115SPeter Maydell  * target-* code; devices should be CPU-agnostic and use either the LE
155350013115SPeter Maydell  * or the BE accessors.
155450013115SPeter Maydell  *
155550013115SPeter Maydell  * @as #AddressSpace to be accessed
155650013115SPeter Maydell  * @addr: address within that address space
155750013115SPeter Maydell  * @val: data value, for stores
155850013115SPeter Maydell  * @attrs: memory transaction attributes
155950013115SPeter Maydell  * @result: location to write the success/failure of the transaction;
156050013115SPeter Maydell  *   if NULL, this information is discarded
156150013115SPeter Maydell  */
156250013115SPeter Maydell uint32_t address_space_ldub(AddressSpace *as, hwaddr addr,
156350013115SPeter Maydell                             MemTxAttrs attrs, MemTxResult *result);
156450013115SPeter Maydell uint32_t address_space_lduw_le(AddressSpace *as, hwaddr addr,
156550013115SPeter Maydell                             MemTxAttrs attrs, MemTxResult *result);
156650013115SPeter Maydell uint32_t address_space_lduw_be(AddressSpace *as, hwaddr addr,
156750013115SPeter Maydell                             MemTxAttrs attrs, MemTxResult *result);
156850013115SPeter Maydell uint32_t address_space_ldl_le(AddressSpace *as, hwaddr addr,
156950013115SPeter Maydell                             MemTxAttrs attrs, MemTxResult *result);
157050013115SPeter Maydell uint32_t address_space_ldl_be(AddressSpace *as, hwaddr addr,
157150013115SPeter Maydell                             MemTxAttrs attrs, MemTxResult *result);
157250013115SPeter Maydell uint64_t address_space_ldq_le(AddressSpace *as, hwaddr addr,
157350013115SPeter Maydell                             MemTxAttrs attrs, MemTxResult *result);
157450013115SPeter Maydell uint64_t address_space_ldq_be(AddressSpace *as, hwaddr addr,
157550013115SPeter Maydell                             MemTxAttrs attrs, MemTxResult *result);
157650013115SPeter Maydell void address_space_stb(AddressSpace *as, hwaddr addr, uint32_t val,
157750013115SPeter Maydell                             MemTxAttrs attrs, MemTxResult *result);
157850013115SPeter Maydell void address_space_stw_le(AddressSpace *as, hwaddr addr, uint32_t val,
157950013115SPeter Maydell                             MemTxAttrs attrs, MemTxResult *result);
158050013115SPeter Maydell void address_space_stw_be(AddressSpace *as, hwaddr addr, uint32_t val,
158150013115SPeter Maydell                             MemTxAttrs attrs, MemTxResult *result);
158250013115SPeter Maydell void address_space_stl_le(AddressSpace *as, hwaddr addr, uint32_t val,
158350013115SPeter Maydell                             MemTxAttrs attrs, MemTxResult *result);
158450013115SPeter Maydell void address_space_stl_be(AddressSpace *as, hwaddr addr, uint32_t val,
158550013115SPeter Maydell                             MemTxAttrs attrs, MemTxResult *result);
158650013115SPeter Maydell void address_space_stq_le(AddressSpace *as, hwaddr addr, uint64_t val,
158750013115SPeter Maydell                             MemTxAttrs attrs, MemTxResult *result);
158850013115SPeter Maydell void address_space_stq_be(AddressSpace *as, hwaddr addr, uint64_t val,
158950013115SPeter Maydell                             MemTxAttrs attrs, MemTxResult *result);
159050013115SPeter Maydell 
15910ce265ffSPaolo Bonzini uint32_t ldub_phys(AddressSpace *as, hwaddr addr);
15920ce265ffSPaolo Bonzini uint32_t lduw_le_phys(AddressSpace *as, hwaddr addr);
15930ce265ffSPaolo Bonzini uint32_t lduw_be_phys(AddressSpace *as, hwaddr addr);
15940ce265ffSPaolo Bonzini uint32_t ldl_le_phys(AddressSpace *as, hwaddr addr);
15950ce265ffSPaolo Bonzini uint32_t ldl_be_phys(AddressSpace *as, hwaddr addr);
15960ce265ffSPaolo Bonzini uint64_t ldq_le_phys(AddressSpace *as, hwaddr addr);
15970ce265ffSPaolo Bonzini uint64_t ldq_be_phys(AddressSpace *as, hwaddr addr);
15980ce265ffSPaolo Bonzini void stb_phys(AddressSpace *as, hwaddr addr, uint32_t val);
15990ce265ffSPaolo Bonzini void stw_le_phys(AddressSpace *as, hwaddr addr, uint32_t val);
16000ce265ffSPaolo Bonzini void stw_be_phys(AddressSpace *as, hwaddr addr, uint32_t val);
16010ce265ffSPaolo Bonzini void stl_le_phys(AddressSpace *as, hwaddr addr, uint32_t val);
16020ce265ffSPaolo Bonzini void stl_be_phys(AddressSpace *as, hwaddr addr, uint32_t val);
16030ce265ffSPaolo Bonzini void stq_le_phys(AddressSpace *as, hwaddr addr, uint64_t val);
16040ce265ffSPaolo Bonzini void stq_be_phys(AddressSpace *as, hwaddr addr, uint64_t val);
16050ce265ffSPaolo Bonzini 
16061f4e496eSPaolo Bonzini struct MemoryRegionCache {
16071f4e496eSPaolo Bonzini     hwaddr xlat;
16081f4e496eSPaolo Bonzini     hwaddr len;
160990c4fe5fSPaolo Bonzini     AddressSpace *as;
16101f4e496eSPaolo Bonzini };
16111f4e496eSPaolo Bonzini 
161290c4fe5fSPaolo Bonzini #define MEMORY_REGION_CACHE_INVALID ((MemoryRegionCache) { .as = NULL })
16135eba0404SPaolo Bonzini 
16141f4e496eSPaolo Bonzini /* address_space_cache_init: prepare for repeated access to a physical
16151f4e496eSPaolo Bonzini  * memory region
16161f4e496eSPaolo Bonzini  *
16171f4e496eSPaolo Bonzini  * @cache: #MemoryRegionCache to be filled
16181f4e496eSPaolo Bonzini  * @as: #AddressSpace to be accessed
16191f4e496eSPaolo Bonzini  * @addr: address within that address space
16201f4e496eSPaolo Bonzini  * @len: length of buffer
16211f4e496eSPaolo Bonzini  * @is_write: indicates the transfer direction
16221f4e496eSPaolo Bonzini  *
16231f4e496eSPaolo Bonzini  * Will only work with RAM, and may map a subset of the requested range by
16241f4e496eSPaolo Bonzini  * returning a value that is less than @len.  On failure, return a negative
16251f4e496eSPaolo Bonzini  * errno value.
16261f4e496eSPaolo Bonzini  *
16271f4e496eSPaolo Bonzini  * Because it only works with RAM, this function can be used for
16281f4e496eSPaolo Bonzini  * read-modify-write operations.  In this case, is_write should be %true.
16291f4e496eSPaolo Bonzini  *
16301f4e496eSPaolo Bonzini  * Note that addresses passed to the address_space_*_cached functions
16311f4e496eSPaolo Bonzini  * are relative to @addr.
16321f4e496eSPaolo Bonzini  */
16331f4e496eSPaolo Bonzini int64_t address_space_cache_init(MemoryRegionCache *cache,
16341f4e496eSPaolo Bonzini                                  AddressSpace *as,
16351f4e496eSPaolo Bonzini                                  hwaddr addr,
16361f4e496eSPaolo Bonzini                                  hwaddr len,
16371f4e496eSPaolo Bonzini                                  bool is_write);
16381f4e496eSPaolo Bonzini 
16391f4e496eSPaolo Bonzini /**
16401f4e496eSPaolo Bonzini  * address_space_cache_invalidate: complete a write to a #MemoryRegionCache
16411f4e496eSPaolo Bonzini  *
16421f4e496eSPaolo Bonzini  * @cache: The #MemoryRegionCache to operate on.
16431f4e496eSPaolo Bonzini  * @addr: The first physical address that was written, relative to the
16441f4e496eSPaolo Bonzini  * address that was passed to @address_space_cache_init.
16451f4e496eSPaolo Bonzini  * @access_len: The number of bytes that were written starting at @addr.
16461f4e496eSPaolo Bonzini  */
16471f4e496eSPaolo Bonzini void address_space_cache_invalidate(MemoryRegionCache *cache,
16481f4e496eSPaolo Bonzini                                     hwaddr addr,
16491f4e496eSPaolo Bonzini                                     hwaddr access_len);
16501f4e496eSPaolo Bonzini 
16511f4e496eSPaolo Bonzini /**
16521f4e496eSPaolo Bonzini  * address_space_cache_destroy: free a #MemoryRegionCache
16531f4e496eSPaolo Bonzini  *
16541f4e496eSPaolo Bonzini  * @cache: The #MemoryRegionCache whose memory should be released.
16551f4e496eSPaolo Bonzini  */
16561f4e496eSPaolo Bonzini void address_space_cache_destroy(MemoryRegionCache *cache);
16571f4e496eSPaolo Bonzini 
16581f4e496eSPaolo Bonzini /* address_space_ld*_cached: load from a cached #MemoryRegion
16591f4e496eSPaolo Bonzini  * address_space_st*_cached: store into a cached #MemoryRegion
16601f4e496eSPaolo Bonzini  *
16611f4e496eSPaolo Bonzini  * These functions perform a load or store of the byte, word,
16621f4e496eSPaolo Bonzini  * longword or quad to the specified address.  The address is
16631f4e496eSPaolo Bonzini  * a physical address in the AddressSpace, but it must lie within
16641f4e496eSPaolo Bonzini  * a #MemoryRegion that was mapped with address_space_cache_init.
16651f4e496eSPaolo Bonzini  *
16661f4e496eSPaolo Bonzini  * The _le suffixed functions treat the data as little endian;
16671f4e496eSPaolo Bonzini  * _be indicates big endian; no suffix indicates "same endianness
16681f4e496eSPaolo Bonzini  * as guest CPU".
16691f4e496eSPaolo Bonzini  *
16701f4e496eSPaolo Bonzini  * The "guest CPU endianness" accessors are deprecated for use outside
16711f4e496eSPaolo Bonzini  * target-* code; devices should be CPU-agnostic and use either the LE
16721f4e496eSPaolo Bonzini  * or the BE accessors.
16731f4e496eSPaolo Bonzini  *
16741f4e496eSPaolo Bonzini  * @cache: previously initialized #MemoryRegionCache to be accessed
16751f4e496eSPaolo Bonzini  * @addr: address within the address space
16761f4e496eSPaolo Bonzini  * @val: data value, for stores
16771f4e496eSPaolo Bonzini  * @attrs: memory transaction attributes
16781f4e496eSPaolo Bonzini  * @result: location to write the success/failure of the transaction;
16791f4e496eSPaolo Bonzini  *   if NULL, this information is discarded
16801f4e496eSPaolo Bonzini  */
16811f4e496eSPaolo Bonzini uint32_t address_space_ldub_cached(MemoryRegionCache *cache, hwaddr addr,
16821f4e496eSPaolo Bonzini                             MemTxAttrs attrs, MemTxResult *result);
16831f4e496eSPaolo Bonzini uint32_t address_space_lduw_le_cached(MemoryRegionCache *cache, hwaddr addr,
16841f4e496eSPaolo Bonzini                             MemTxAttrs attrs, MemTxResult *result);
16851f4e496eSPaolo Bonzini uint32_t address_space_lduw_be_cached(MemoryRegionCache *cache, hwaddr addr,
16861f4e496eSPaolo Bonzini                             MemTxAttrs attrs, MemTxResult *result);
16871f4e496eSPaolo Bonzini uint32_t address_space_ldl_le_cached(MemoryRegionCache *cache, hwaddr addr,
16881f4e496eSPaolo Bonzini                             MemTxAttrs attrs, MemTxResult *result);
16891f4e496eSPaolo Bonzini uint32_t address_space_ldl_be_cached(MemoryRegionCache *cache, hwaddr addr,
16901f4e496eSPaolo Bonzini                             MemTxAttrs attrs, MemTxResult *result);
16911f4e496eSPaolo Bonzini uint64_t address_space_ldq_le_cached(MemoryRegionCache *cache, hwaddr addr,
16921f4e496eSPaolo Bonzini                             MemTxAttrs attrs, MemTxResult *result);
16931f4e496eSPaolo Bonzini uint64_t address_space_ldq_be_cached(MemoryRegionCache *cache, hwaddr addr,
16941f4e496eSPaolo Bonzini                             MemTxAttrs attrs, MemTxResult *result);
16951f4e496eSPaolo Bonzini void address_space_stb_cached(MemoryRegionCache *cache, hwaddr addr, uint32_t val,
16961f4e496eSPaolo Bonzini                             MemTxAttrs attrs, MemTxResult *result);
16971f4e496eSPaolo Bonzini void address_space_stw_le_cached(MemoryRegionCache *cache, hwaddr addr, uint32_t val,
16981f4e496eSPaolo Bonzini                             MemTxAttrs attrs, MemTxResult *result);
16991f4e496eSPaolo Bonzini void address_space_stw_be_cached(MemoryRegionCache *cache, hwaddr addr, uint32_t val,
17001f4e496eSPaolo Bonzini                             MemTxAttrs attrs, MemTxResult *result);
17011f4e496eSPaolo Bonzini void address_space_stl_le_cached(MemoryRegionCache *cache, hwaddr addr, uint32_t val,
17021f4e496eSPaolo Bonzini                             MemTxAttrs attrs, MemTxResult *result);
17031f4e496eSPaolo Bonzini void address_space_stl_be_cached(MemoryRegionCache *cache, hwaddr addr, uint32_t val,
17041f4e496eSPaolo Bonzini                             MemTxAttrs attrs, MemTxResult *result);
17051f4e496eSPaolo Bonzini void address_space_stq_le_cached(MemoryRegionCache *cache, hwaddr addr, uint64_t val,
17061f4e496eSPaolo Bonzini                             MemTxAttrs attrs, MemTxResult *result);
17071f4e496eSPaolo Bonzini void address_space_stq_be_cached(MemoryRegionCache *cache, hwaddr addr, uint64_t val,
17081f4e496eSPaolo Bonzini                             MemTxAttrs attrs, MemTxResult *result);
17091f4e496eSPaolo Bonzini 
17101f4e496eSPaolo Bonzini uint32_t ldub_phys_cached(MemoryRegionCache *cache, hwaddr addr);
17111f4e496eSPaolo Bonzini uint32_t lduw_le_phys_cached(MemoryRegionCache *cache, hwaddr addr);
17121f4e496eSPaolo Bonzini uint32_t lduw_be_phys_cached(MemoryRegionCache *cache, hwaddr addr);
17131f4e496eSPaolo Bonzini uint32_t ldl_le_phys_cached(MemoryRegionCache *cache, hwaddr addr);
17141f4e496eSPaolo Bonzini uint32_t ldl_be_phys_cached(MemoryRegionCache *cache, hwaddr addr);
17151f4e496eSPaolo Bonzini uint64_t ldq_le_phys_cached(MemoryRegionCache *cache, hwaddr addr);
17161f4e496eSPaolo Bonzini uint64_t ldq_be_phys_cached(MemoryRegionCache *cache, hwaddr addr);
17171f4e496eSPaolo Bonzini void stb_phys_cached(MemoryRegionCache *cache, hwaddr addr, uint32_t val);
17181f4e496eSPaolo Bonzini void stw_le_phys_cached(MemoryRegionCache *cache, hwaddr addr, uint32_t val);
17191f4e496eSPaolo Bonzini void stw_be_phys_cached(MemoryRegionCache *cache, hwaddr addr, uint32_t val);
17201f4e496eSPaolo Bonzini void stl_le_phys_cached(MemoryRegionCache *cache, hwaddr addr, uint32_t val);
17211f4e496eSPaolo Bonzini void stl_be_phys_cached(MemoryRegionCache *cache, hwaddr addr, uint32_t val);
17221f4e496eSPaolo Bonzini void stq_le_phys_cached(MemoryRegionCache *cache, hwaddr addr, uint64_t val);
17231f4e496eSPaolo Bonzini void stq_be_phys_cached(MemoryRegionCache *cache, hwaddr addr, uint64_t val);
1724052c8fa9SJason Wang /* address_space_get_iotlb_entry: translate an address into an IOTLB
1725052c8fa9SJason Wang  * entry. Should be called from an RCU critical section.
1726052c8fa9SJason Wang  */
1727052c8fa9SJason Wang IOMMUTLBEntry address_space_get_iotlb_entry(AddressSpace *as, hwaddr addr,
1728052c8fa9SJason Wang                                             bool is_write);
17291f4e496eSPaolo Bonzini 
1730149f54b5SPaolo Bonzini /* address_space_translate: translate an address range into an address space
173141063e1eSPaolo Bonzini  * into a MemoryRegion and an address range into that section.  Should be
173241063e1eSPaolo Bonzini  * called from an RCU critical section, to avoid that the last reference
173341063e1eSPaolo Bonzini  * to the returned region disappears after address_space_translate returns.
1734149f54b5SPaolo Bonzini  *
1735149f54b5SPaolo Bonzini  * @as: #AddressSpace to be accessed
1736149f54b5SPaolo Bonzini  * @addr: address within that address space
1737149f54b5SPaolo Bonzini  * @xlat: pointer to address within the returned memory region section's
1738149f54b5SPaolo Bonzini  * #MemoryRegion.
1739149f54b5SPaolo Bonzini  * @len: pointer to length
1740149f54b5SPaolo Bonzini  * @is_write: indicates the transfer direction
1741149f54b5SPaolo Bonzini  */
17425c8a00ceSPaolo Bonzini MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
1743149f54b5SPaolo Bonzini                                       hwaddr *xlat, hwaddr *len,
1744149f54b5SPaolo Bonzini                                       bool is_write);
1745149f54b5SPaolo Bonzini 
174651644ab7SPaolo Bonzini /* address_space_access_valid: check for validity of accessing an address
174751644ab7SPaolo Bonzini  * space range
174851644ab7SPaolo Bonzini  *
174930951157SAvi Kivity  * Check whether memory is assigned to the given address space range, and
175030951157SAvi Kivity  * access is permitted by any IOMMU regions that are active for the address
175130951157SAvi Kivity  * space.
175251644ab7SPaolo Bonzini  *
175351644ab7SPaolo Bonzini  * For now, addr and len should be aligned to a page size.  This limitation
175451644ab7SPaolo Bonzini  * will be lifted in the future.
175551644ab7SPaolo Bonzini  *
175651644ab7SPaolo Bonzini  * @as: #AddressSpace to be accessed
175751644ab7SPaolo Bonzini  * @addr: address within that address space
175851644ab7SPaolo Bonzini  * @len: length of the area to be checked
175951644ab7SPaolo Bonzini  * @is_write: indicates the transfer direction
176051644ab7SPaolo Bonzini  */
176151644ab7SPaolo Bonzini bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write);
176251644ab7SPaolo Bonzini 
1763022c62cbSPaolo Bonzini /* address_space_map: map a physical memory region into a host virtual address
1764022c62cbSPaolo Bonzini  *
1765022c62cbSPaolo Bonzini  * May map a subset of the requested range, given by and returned in @plen.
1766022c62cbSPaolo Bonzini  * May return %NULL if resources needed to perform the mapping are exhausted.
1767022c62cbSPaolo Bonzini  * Use only for reads OR writes - not for read-modify-write operations.
1768022c62cbSPaolo Bonzini  * Use cpu_register_map_client() to know when retrying the map operation is
1769022c62cbSPaolo Bonzini  * likely to succeed.
1770022c62cbSPaolo Bonzini  *
1771022c62cbSPaolo Bonzini  * @as: #AddressSpace to be accessed
1772022c62cbSPaolo Bonzini  * @addr: address within that address space
1773022c62cbSPaolo Bonzini  * @plen: pointer to length of buffer; updated on return
1774022c62cbSPaolo Bonzini  * @is_write: indicates the transfer direction
1775022c62cbSPaolo Bonzini  */
1776022c62cbSPaolo Bonzini void *address_space_map(AddressSpace *as, hwaddr addr,
1777022c62cbSPaolo Bonzini                         hwaddr *plen, bool is_write);
1778022c62cbSPaolo Bonzini 
1779022c62cbSPaolo Bonzini /* address_space_unmap: Unmaps a memory region previously mapped by address_space_map()
1780022c62cbSPaolo Bonzini  *
1781022c62cbSPaolo Bonzini  * Will also mark the memory as dirty if @is_write == %true.  @access_len gives
1782022c62cbSPaolo Bonzini  * the amount of memory that was actually read or written by the caller.
1783022c62cbSPaolo Bonzini  *
1784022c62cbSPaolo Bonzini  * @as: #AddressSpace used
1785022c62cbSPaolo Bonzini  * @addr: address within that address space
1786022c62cbSPaolo Bonzini  * @len: buffer length as returned by address_space_map()
1787022c62cbSPaolo Bonzini  * @access_len: amount of data actually transferred
1788022c62cbSPaolo Bonzini  * @is_write: indicates the transfer direction
1789022c62cbSPaolo Bonzini  */
1790022c62cbSPaolo Bonzini void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
1791022c62cbSPaolo Bonzini                          int is_write, hwaddr access_len);
1792022c62cbSPaolo Bonzini 
1793022c62cbSPaolo Bonzini 
1794a203ac70SPaolo Bonzini /* Internal functions, part of the implementation of address_space_read.  */
1795a203ac70SPaolo Bonzini MemTxResult address_space_read_continue(AddressSpace *as, hwaddr addr,
1796a203ac70SPaolo Bonzini                                         MemTxAttrs attrs, uint8_t *buf,
1797a203ac70SPaolo Bonzini                                         int len, hwaddr addr1, hwaddr l,
1798a203ac70SPaolo Bonzini 					MemoryRegion *mr);
17993cc8f884SPaolo Bonzini MemTxResult address_space_read_full(AddressSpace *as, hwaddr addr,
18003cc8f884SPaolo Bonzini                                     MemTxAttrs attrs, uint8_t *buf, int len);
18010878d0e1SPaolo Bonzini void *qemu_map_ram_ptr(RAMBlock *ram_block, ram_addr_t addr);
18023cc8f884SPaolo Bonzini 
18033cc8f884SPaolo Bonzini static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write)
18043cc8f884SPaolo Bonzini {
18053cc8f884SPaolo Bonzini     if (is_write) {
18064a2e242bSAlex Williamson         return memory_region_is_ram(mr) &&
18074a2e242bSAlex Williamson                !mr->readonly && !memory_region_is_ram_device(mr);
18083cc8f884SPaolo Bonzini     } else {
18094a2e242bSAlex Williamson         return (memory_region_is_ram(mr) && !memory_region_is_ram_device(mr)) ||
18104a2e242bSAlex Williamson                memory_region_is_romd(mr);
18113cc8f884SPaolo Bonzini     }
18123cc8f884SPaolo Bonzini }
18133cc8f884SPaolo Bonzini 
18143cc8f884SPaolo Bonzini /**
18153cc8f884SPaolo Bonzini  * address_space_read: read from an address space.
18163cc8f884SPaolo Bonzini  *
18173cc8f884SPaolo Bonzini  * Return a MemTxResult indicating whether the operation succeeded
18183cc8f884SPaolo Bonzini  * or failed (eg unassigned memory, device rejected the transaction,
18193cc8f884SPaolo Bonzini  * IOMMU fault).
18203cc8f884SPaolo Bonzini  *
18213cc8f884SPaolo Bonzini  * @as: #AddressSpace to be accessed
18223cc8f884SPaolo Bonzini  * @addr: address within that address space
18233cc8f884SPaolo Bonzini  * @attrs: memory transaction attributes
18243cc8f884SPaolo Bonzini  * @buf: buffer with the data transferred
18253cc8f884SPaolo Bonzini  */
18263cc8f884SPaolo Bonzini static inline __attribute__((__always_inline__))
18273cc8f884SPaolo Bonzini MemTxResult address_space_read(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
18283cc8f884SPaolo Bonzini                                uint8_t *buf, int len)
18293cc8f884SPaolo Bonzini {
18303cc8f884SPaolo Bonzini     MemTxResult result = MEMTX_OK;
18313cc8f884SPaolo Bonzini     hwaddr l, addr1;
18323cc8f884SPaolo Bonzini     void *ptr;
18333cc8f884SPaolo Bonzini     MemoryRegion *mr;
18343cc8f884SPaolo Bonzini 
18353cc8f884SPaolo Bonzini     if (__builtin_constant_p(len)) {
18363cc8f884SPaolo Bonzini         if (len) {
18373cc8f884SPaolo Bonzini             rcu_read_lock();
18383cc8f884SPaolo Bonzini             l = len;
18393cc8f884SPaolo Bonzini             mr = address_space_translate(as, addr, &addr1, &l, false);
18403cc8f884SPaolo Bonzini             if (len == l && memory_access_is_direct(mr, false)) {
18410878d0e1SPaolo Bonzini                 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
18423cc8f884SPaolo Bonzini                 memcpy(buf, ptr, len);
18433cc8f884SPaolo Bonzini             } else {
18443cc8f884SPaolo Bonzini                 result = address_space_read_continue(as, addr, attrs, buf, len,
18453cc8f884SPaolo Bonzini                                                      addr1, l, mr);
18463cc8f884SPaolo Bonzini             }
18473cc8f884SPaolo Bonzini             rcu_read_unlock();
18483cc8f884SPaolo Bonzini         }
18493cc8f884SPaolo Bonzini     } else {
18503cc8f884SPaolo Bonzini         result = address_space_read_full(as, addr, attrs, buf, len);
18513cc8f884SPaolo Bonzini     }
18523cc8f884SPaolo Bonzini     return result;
18533cc8f884SPaolo Bonzini }
1854a203ac70SPaolo Bonzini 
18551f4e496eSPaolo Bonzini /**
18561f4e496eSPaolo Bonzini  * address_space_read_cached: read from a cached RAM region
18571f4e496eSPaolo Bonzini  *
18581f4e496eSPaolo Bonzini  * @cache: Cached region to be addressed
18591f4e496eSPaolo Bonzini  * @addr: address relative to the base of the RAM region
18601f4e496eSPaolo Bonzini  * @buf: buffer with the data transferred
18611f4e496eSPaolo Bonzini  * @len: length of the data transferred
18621f4e496eSPaolo Bonzini  */
18631f4e496eSPaolo Bonzini static inline void
18641f4e496eSPaolo Bonzini address_space_read_cached(MemoryRegionCache *cache, hwaddr addr,
18651f4e496eSPaolo Bonzini                           void *buf, int len)
18661f4e496eSPaolo Bonzini {
18671f4e496eSPaolo Bonzini     assert(addr < cache->len && len <= cache->len - addr);
186890c4fe5fSPaolo Bonzini     address_space_read(cache->as, cache->xlat + addr, MEMTXATTRS_UNSPECIFIED, buf, len);
18691f4e496eSPaolo Bonzini }
18701f4e496eSPaolo Bonzini 
18711f4e496eSPaolo Bonzini /**
18721f4e496eSPaolo Bonzini  * address_space_write_cached: write to a cached RAM region
18731f4e496eSPaolo Bonzini  *
18741f4e496eSPaolo Bonzini  * @cache: Cached region to be addressed
18751f4e496eSPaolo Bonzini  * @addr: address relative to the base of the RAM region
18761f4e496eSPaolo Bonzini  * @buf: buffer with the data transferred
18771f4e496eSPaolo Bonzini  * @len: length of the data transferred
18781f4e496eSPaolo Bonzini  */
18791f4e496eSPaolo Bonzini static inline void
18801f4e496eSPaolo Bonzini address_space_write_cached(MemoryRegionCache *cache, hwaddr addr,
18811f4e496eSPaolo Bonzini                            void *buf, int len)
18821f4e496eSPaolo Bonzini {
18831f4e496eSPaolo Bonzini     assert(addr < cache->len && len <= cache->len - addr);
188490c4fe5fSPaolo Bonzini     address_space_write(cache->as, cache->xlat + addr, MEMTXATTRS_UNSPECIFIED, buf, len);
18851f4e496eSPaolo Bonzini }
18861f4e496eSPaolo Bonzini 
1887022c62cbSPaolo Bonzini #endif
1888022c62cbSPaolo Bonzini 
1889022c62cbSPaolo Bonzini #endif
1890