xref: /openbmc/qemu/include/exec/memory.h (revision 6c35ed68)
1 /*
2  * Physical memory management API
3  *
4  * Copyright 2011 Red Hat, Inc. and/or its affiliates
5  *
6  * Authors:
7  *  Avi Kivity <avi@redhat.com>
8  *
9  * This work is licensed under the terms of the GNU GPL, version 2.  See
10  * the COPYING file in the top-level directory.
11  *
12  */
13 
14 #ifndef MEMORY_H
15 #define MEMORY_H
16 
17 #ifndef CONFIG_USER_ONLY
18 
19 #include "exec/cpu-common.h"
20 #include "exec/hwaddr.h"
21 #include "exec/memattrs.h"
22 #include "exec/memop.h"
23 #include "exec/ramlist.h"
24 #include "qemu/bswap.h"
25 #include "qemu/queue.h"
26 #include "qemu/int128.h"
27 #include "qemu/notify.h"
28 #include "qom/object.h"
29 #include "qemu/rcu.h"
30 
31 #define RAM_ADDR_INVALID (~(ram_addr_t)0)
32 
33 #define MAX_PHYS_ADDR_SPACE_BITS 62
34 #define MAX_PHYS_ADDR            (((hwaddr)1 << MAX_PHYS_ADDR_SPACE_BITS) - 1)
35 
36 #define TYPE_MEMORY_REGION "qemu:memory-region"
37 #define MEMORY_REGION(obj) \
38         OBJECT_CHECK(MemoryRegion, (obj), TYPE_MEMORY_REGION)
39 
40 #define TYPE_IOMMU_MEMORY_REGION "qemu:iommu-memory-region"
41 #define IOMMU_MEMORY_REGION(obj) \
42         OBJECT_CHECK(IOMMUMemoryRegion, (obj), TYPE_IOMMU_MEMORY_REGION)
43 #define IOMMU_MEMORY_REGION_CLASS(klass) \
44         OBJECT_CLASS_CHECK(IOMMUMemoryRegionClass, (klass), \
45                          TYPE_IOMMU_MEMORY_REGION)
46 #define IOMMU_MEMORY_REGION_GET_CLASS(obj) \
47         OBJECT_GET_CLASS(IOMMUMemoryRegionClass, (obj), \
48                          TYPE_IOMMU_MEMORY_REGION)
49 
50 extern bool global_dirty_log;
51 
52 typedef struct MemoryRegionOps MemoryRegionOps;
53 typedef struct MemoryRegionMmio MemoryRegionMmio;
54 
55 struct MemoryRegionMmio {
56     CPUReadMemoryFunc *read[3];
57     CPUWriteMemoryFunc *write[3];
58 };
59 
60 typedef struct IOMMUTLBEntry IOMMUTLBEntry;
61 
62 /* See address_space_translate: bit 0 is read, bit 1 is write.  */
63 typedef enum {
64     IOMMU_NONE = 0,
65     IOMMU_RO   = 1,
66     IOMMU_WO   = 2,
67     IOMMU_RW   = 3,
68 } IOMMUAccessFlags;
69 
70 #define IOMMU_ACCESS_FLAG(r, w) (((r) ? IOMMU_RO : 0) | ((w) ? IOMMU_WO : 0))
71 
72 struct IOMMUTLBEntry {
73     AddressSpace    *target_as;
74     hwaddr           iova;
75     hwaddr           translated_addr;
76     hwaddr           addr_mask;  /* 0xfff = 4k translation */
77     IOMMUAccessFlags perm;
78 };
79 
80 /*
81  * Bitmap for different IOMMUNotifier capabilities. Each notifier can
82  * register with one or multiple IOMMU Notifier capability bit(s).
83  */
84 typedef enum {
85     IOMMU_NOTIFIER_NONE = 0,
86     /* Notify cache invalidations */
87     IOMMU_NOTIFIER_UNMAP = 0x1,
88     /* Notify entry changes (newly created entries) */
89     IOMMU_NOTIFIER_MAP = 0x2,
90 } IOMMUNotifierFlag;
91 
92 #define IOMMU_NOTIFIER_ALL (IOMMU_NOTIFIER_MAP | IOMMU_NOTIFIER_UNMAP)
93 
94 struct IOMMUNotifier;
95 typedef void (*IOMMUNotify)(struct IOMMUNotifier *notifier,
96                             IOMMUTLBEntry *data);
97 
98 struct IOMMUNotifier {
99     IOMMUNotify notify;
100     IOMMUNotifierFlag notifier_flags;
101     /* Notify for address space range start <= addr <= end */
102     hwaddr start;
103     hwaddr end;
104     int iommu_idx;
105     QLIST_ENTRY(IOMMUNotifier) node;
106 };
107 typedef struct IOMMUNotifier IOMMUNotifier;
108 
109 /* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
110 #define RAM_PREALLOC   (1 << 0)
111 
112 /* RAM is mmap-ed with MAP_SHARED */
113 #define RAM_SHARED     (1 << 1)
114 
115 /* Only a portion of RAM (used_length) is actually used, and migrated.
116  * This used_length size can change across reboots.
117  */
118 #define RAM_RESIZEABLE (1 << 2)
119 
120 /* UFFDIO_ZEROPAGE is available on this RAMBlock to atomically
121  * zero the page and wake waiting processes.
122  * (Set during postcopy)
123  */
124 #define RAM_UF_ZEROPAGE (1 << 3)
125 
126 /* RAM can be migrated */
127 #define RAM_MIGRATABLE (1 << 4)
128 
129 /* RAM is a persistent kind memory */
130 #define RAM_PMEM (1 << 5)
131 
132 static inline void iommu_notifier_init(IOMMUNotifier *n, IOMMUNotify fn,
133                                        IOMMUNotifierFlag flags,
134                                        hwaddr start, hwaddr end,
135                                        int iommu_idx)
136 {
137     n->notify = fn;
138     n->notifier_flags = flags;
139     n->start = start;
140     n->end = end;
141     n->iommu_idx = iommu_idx;
142 }
143 
144 /*
145  * Memory region callbacks
146  */
147 struct MemoryRegionOps {
148     /* Read from the memory region. @addr is relative to @mr; @size is
149      * in bytes. */
150     uint64_t (*read)(void *opaque,
151                      hwaddr addr,
152                      unsigned size);
153     /* Write to the memory region. @addr is relative to @mr; @size is
154      * in bytes. */
155     void (*write)(void *opaque,
156                   hwaddr addr,
157                   uint64_t data,
158                   unsigned size);
159 
160     MemTxResult (*read_with_attrs)(void *opaque,
161                                    hwaddr addr,
162                                    uint64_t *data,
163                                    unsigned size,
164                                    MemTxAttrs attrs);
165     MemTxResult (*write_with_attrs)(void *opaque,
166                                     hwaddr addr,
167                                     uint64_t data,
168                                     unsigned size,
169                                     MemTxAttrs attrs);
170 
171     enum device_endian endianness;
172     /* Guest-visible constraints: */
173     struct {
174         /* If nonzero, specify bounds on access sizes beyond which a machine
175          * check is thrown.
176          */
177         unsigned min_access_size;
178         unsigned max_access_size;
179         /* If true, unaligned accesses are supported.  Otherwise unaligned
180          * accesses throw machine checks.
181          */
182          bool unaligned;
183         /*
184          * If present, and returns #false, the transaction is not accepted
185          * by the device (and results in machine dependent behaviour such
186          * as a machine check exception).
187          */
188         bool (*accepts)(void *opaque, hwaddr addr,
189                         unsigned size, bool is_write,
190                         MemTxAttrs attrs);
191     } valid;
192     /* Internal implementation constraints: */
193     struct {
194         /* If nonzero, specifies the minimum size implemented.  Smaller sizes
195          * will be rounded upwards and a partial result will be returned.
196          */
197         unsigned min_access_size;
198         /* If nonzero, specifies the maximum size implemented.  Larger sizes
199          * will be done as a series of accesses with smaller sizes.
200          */
201         unsigned max_access_size;
202         /* If true, unaligned accesses are supported.  Otherwise all accesses
203          * are converted to (possibly multiple) naturally aligned accesses.
204          */
205         bool unaligned;
206     } impl;
207 };
208 
209 typedef struct MemoryRegionClass {
210     /* private */
211     ObjectClass parent_class;
212 } MemoryRegionClass;
213 
214 
215 enum IOMMUMemoryRegionAttr {
216     IOMMU_ATTR_SPAPR_TCE_FD
217 };
218 
219 /**
220  * IOMMUMemoryRegionClass:
221  *
222  * All IOMMU implementations need to subclass TYPE_IOMMU_MEMORY_REGION
223  * and provide an implementation of at least the @translate method here
224  * to handle requests to the memory region. Other methods are optional.
225  *
226  * The IOMMU implementation must use the IOMMU notifier infrastructure
227  * to report whenever mappings are changed, by calling
228  * memory_region_notify_iommu() (or, if necessary, by calling
229  * memory_region_notify_one() for each registered notifier).
230  *
231  * Conceptually an IOMMU provides a mapping from input address
232  * to an output TLB entry. If the IOMMU is aware of memory transaction
233  * attributes and the output TLB entry depends on the transaction
234  * attributes, we represent this using IOMMU indexes. Each index
235  * selects a particular translation table that the IOMMU has:
236  *   @attrs_to_index returns the IOMMU index for a set of transaction attributes
237  *   @translate takes an input address and an IOMMU index
238  * and the mapping returned can only depend on the input address and the
239  * IOMMU index.
240  *
241  * Most IOMMUs don't care about the transaction attributes and support
242  * only a single IOMMU index. A more complex IOMMU might have one index
243  * for secure transactions and one for non-secure transactions.
244  */
245 typedef struct IOMMUMemoryRegionClass {
246     /* private */
247     MemoryRegionClass parent_class;
248 
249     /*
250      * Return a TLB entry that contains a given address.
251      *
252      * The IOMMUAccessFlags indicated via @flag are optional and may
253      * be specified as IOMMU_NONE to indicate that the caller needs
254      * the full translation information for both reads and writes. If
255      * the access flags are specified then the IOMMU implementation
256      * may use this as an optimization, to stop doing a page table
257      * walk as soon as it knows that the requested permissions are not
258      * allowed. If IOMMU_NONE is passed then the IOMMU must do the
259      * full page table walk and report the permissions in the returned
260      * IOMMUTLBEntry. (Note that this implies that an IOMMU may not
261      * return different mappings for reads and writes.)
262      *
263      * The returned information remains valid while the caller is
264      * holding the big QEMU lock or is inside an RCU critical section;
265      * if the caller wishes to cache the mapping beyond that it must
266      * register an IOMMU notifier so it can invalidate its cached
267      * information when the IOMMU mapping changes.
268      *
269      * @iommu: the IOMMUMemoryRegion
270      * @hwaddr: address to be translated within the memory region
271      * @flag: requested access permissions
272      * @iommu_idx: IOMMU index for the translation
273      */
274     IOMMUTLBEntry (*translate)(IOMMUMemoryRegion *iommu, hwaddr addr,
275                                IOMMUAccessFlags flag, int iommu_idx);
276     /* Returns minimum supported page size in bytes.
277      * If this method is not provided then the minimum is assumed to
278      * be TARGET_PAGE_SIZE.
279      *
280      * @iommu: the IOMMUMemoryRegion
281      */
282     uint64_t (*get_min_page_size)(IOMMUMemoryRegion *iommu);
283     /* Called when IOMMU Notifier flag changes (ie when the set of
284      * events which IOMMU users are requesting notification for changes).
285      * Optional method -- need not be provided if the IOMMU does not
286      * need to know exactly which events must be notified.
287      *
288      * @iommu: the IOMMUMemoryRegion
289      * @old_flags: events which previously needed to be notified
290      * @new_flags: events which now need to be notified
291      */
292     void (*notify_flag_changed)(IOMMUMemoryRegion *iommu,
293                                 IOMMUNotifierFlag old_flags,
294                                 IOMMUNotifierFlag new_flags);
295     /* Called to handle memory_region_iommu_replay().
296      *
297      * The default implementation of memory_region_iommu_replay() is to
298      * call the IOMMU translate method for every page in the address space
299      * with flag == IOMMU_NONE and then call the notifier if translate
300      * returns a valid mapping. If this method is implemented then it
301      * overrides the default behaviour, and must provide the full semantics
302      * of memory_region_iommu_replay(), by calling @notifier for every
303      * translation present in the IOMMU.
304      *
305      * Optional method -- an IOMMU only needs to provide this method
306      * if the default is inefficient or produces undesirable side effects.
307      *
308      * Note: this is not related to record-and-replay functionality.
309      */
310     void (*replay)(IOMMUMemoryRegion *iommu, IOMMUNotifier *notifier);
311 
312     /* Get IOMMU misc attributes. This is an optional method that
313      * can be used to allow users of the IOMMU to get implementation-specific
314      * information. The IOMMU implements this method to handle calls
315      * by IOMMU users to memory_region_iommu_get_attr() by filling in
316      * the arbitrary data pointer for any IOMMUMemoryRegionAttr values that
317      * the IOMMU supports. If the method is unimplemented then
318      * memory_region_iommu_get_attr() will always return -EINVAL.
319      *
320      * @iommu: the IOMMUMemoryRegion
321      * @attr: attribute being queried
322      * @data: memory to fill in with the attribute data
323      *
324      * Returns 0 on success, or a negative errno; in particular
325      * returns -EINVAL for unrecognized or unimplemented attribute types.
326      */
327     int (*get_attr)(IOMMUMemoryRegion *iommu, enum IOMMUMemoryRegionAttr attr,
328                     void *data);
329 
330     /* Return the IOMMU index to use for a given set of transaction attributes.
331      *
332      * Optional method: if an IOMMU only supports a single IOMMU index then
333      * the default implementation of memory_region_iommu_attrs_to_index()
334      * will return 0.
335      *
336      * The indexes supported by an IOMMU must be contiguous, starting at 0.
337      *
338      * @iommu: the IOMMUMemoryRegion
339      * @attrs: memory transaction attributes
340      */
341     int (*attrs_to_index)(IOMMUMemoryRegion *iommu, MemTxAttrs attrs);
342 
343     /* Return the number of IOMMU indexes this IOMMU supports.
344      *
345      * Optional method: if this method is not provided, then
346      * memory_region_iommu_num_indexes() will return 1, indicating that
347      * only a single IOMMU index is supported.
348      *
349      * @iommu: the IOMMUMemoryRegion
350      */
351     int (*num_indexes)(IOMMUMemoryRegion *iommu);
352 } IOMMUMemoryRegionClass;
353 
354 typedef struct CoalescedMemoryRange CoalescedMemoryRange;
355 typedef struct MemoryRegionIoeventfd MemoryRegionIoeventfd;
356 
357 struct MemoryRegion {
358     Object parent_obj;
359 
360     /* All fields are private - violators will be prosecuted */
361 
362     /* The following fields should fit in a cache line */
363     bool romd_mode;
364     bool ram;
365     bool subpage;
366     bool readonly; /* For RAM regions */
367     bool nonvolatile;
368     bool rom_device;
369     bool flush_coalesced_mmio;
370     bool global_locking;
371     uint8_t dirty_log_mask;
372     bool is_iommu;
373     RAMBlock *ram_block;
374     Object *owner;
375 
376     const MemoryRegionOps *ops;
377     void *opaque;
378     MemoryRegion *container;
379     Int128 size;
380     hwaddr addr;
381     void (*destructor)(MemoryRegion *mr);
382     uint64_t align;
383     bool terminates;
384     bool ram_device;
385     bool enabled;
386     bool warning_printed; /* For reservations */
387     uint8_t vga_logging_count;
388     MemoryRegion *alias;
389     hwaddr alias_offset;
390     int32_t priority;
391     QTAILQ_HEAD(, MemoryRegion) subregions;
392     QTAILQ_ENTRY(MemoryRegion) subregions_link;
393     QTAILQ_HEAD(, CoalescedMemoryRange) coalesced;
394     const char *name;
395     unsigned ioeventfd_nb;
396     MemoryRegionIoeventfd *ioeventfds;
397 };
398 
399 struct IOMMUMemoryRegion {
400     MemoryRegion parent_obj;
401 
402     QLIST_HEAD(, IOMMUNotifier) iommu_notify;
403     IOMMUNotifierFlag iommu_notify_flags;
404 };
405 
406 #define IOMMU_NOTIFIER_FOREACH(n, mr) \
407     QLIST_FOREACH((n), &(mr)->iommu_notify, node)
408 
409 /**
410  * MemoryListener: callbacks structure for updates to the physical memory map
411  *
412  * Allows a component to adjust to changes in the guest-visible memory map.
413  * Use with memory_listener_register() and memory_listener_unregister().
414  */
415 struct MemoryListener {
416     void (*begin)(MemoryListener *listener);
417     void (*commit)(MemoryListener *listener);
418     void (*region_add)(MemoryListener *listener, MemoryRegionSection *section);
419     void (*region_del)(MemoryListener *listener, MemoryRegionSection *section);
420     void (*region_nop)(MemoryListener *listener, MemoryRegionSection *section);
421     void (*log_start)(MemoryListener *listener, MemoryRegionSection *section,
422                       int old, int new);
423     void (*log_stop)(MemoryListener *listener, MemoryRegionSection *section,
424                      int old, int new);
425     void (*log_sync)(MemoryListener *listener, MemoryRegionSection *section);
426     void (*log_clear)(MemoryListener *listener, MemoryRegionSection *section);
427     void (*log_global_start)(MemoryListener *listener);
428     void (*log_global_stop)(MemoryListener *listener);
429     void (*log_global_after_sync)(MemoryListener *listener);
430     void (*eventfd_add)(MemoryListener *listener, MemoryRegionSection *section,
431                         bool match_data, uint64_t data, EventNotifier *e);
432     void (*eventfd_del)(MemoryListener *listener, MemoryRegionSection *section,
433                         bool match_data, uint64_t data, EventNotifier *e);
434     void (*coalesced_io_add)(MemoryListener *listener, MemoryRegionSection *section,
435                                hwaddr addr, hwaddr len);
436     void (*coalesced_io_del)(MemoryListener *listener, MemoryRegionSection *section,
437                                hwaddr addr, hwaddr len);
438     /* Lower = earlier (during add), later (during del) */
439     unsigned priority;
440     AddressSpace *address_space;
441     QTAILQ_ENTRY(MemoryListener) link;
442     QTAILQ_ENTRY(MemoryListener) link_as;
443 };
444 
445 /**
446  * AddressSpace: describes a mapping of addresses to #MemoryRegion objects
447  */
448 struct AddressSpace {
449     /* All fields are private. */
450     struct rcu_head rcu;
451     char *name;
452     MemoryRegion *root;
453 
454     /* Accessed via RCU.  */
455     struct FlatView *current_map;
456 
457     int ioeventfd_nb;
458     struct MemoryRegionIoeventfd *ioeventfds;
459     QTAILQ_HEAD(, MemoryListener) listeners;
460     QTAILQ_ENTRY(AddressSpace) address_spaces_link;
461 };
462 
463 typedef struct AddressSpaceDispatch AddressSpaceDispatch;
464 typedef struct FlatRange FlatRange;
465 
466 /* Flattened global view of current active memory hierarchy.  Kept in sorted
467  * order.
468  */
469 struct FlatView {
470     struct rcu_head rcu;
471     unsigned ref;
472     FlatRange *ranges;
473     unsigned nr;
474     unsigned nr_allocated;
475     struct AddressSpaceDispatch *dispatch;
476     MemoryRegion *root;
477 };
478 
479 static inline FlatView *address_space_to_flatview(AddressSpace *as)
480 {
481     return atomic_rcu_read(&as->current_map);
482 }
483 
484 
485 /**
486  * MemoryRegionSection: describes a fragment of a #MemoryRegion
487  *
488  * @mr: the region, or %NULL if empty
489  * @fv: the flat view of the address space the region is mapped in
490  * @offset_within_region: the beginning of the section, relative to @mr's start
491  * @size: the size of the section; will not exceed @mr's boundaries
492  * @offset_within_address_space: the address of the first byte of the section
493  *     relative to the region's address space
494  * @readonly: writes to this section are ignored
495  * @nonvolatile: this section is non-volatile
496  */
497 struct MemoryRegionSection {
498     Int128 size;
499     MemoryRegion *mr;
500     FlatView *fv;
501     hwaddr offset_within_region;
502     hwaddr offset_within_address_space;
503     bool readonly;
504     bool nonvolatile;
505 };
506 
507 static inline bool MemoryRegionSection_eq(MemoryRegionSection *a,
508                                           MemoryRegionSection *b)
509 {
510     return a->mr == b->mr &&
511            a->fv == b->fv &&
512            a->offset_within_region == b->offset_within_region &&
513            a->offset_within_address_space == b->offset_within_address_space &&
514            int128_eq(a->size, b->size) &&
515            a->readonly == b->readonly &&
516            a->nonvolatile == b->nonvolatile;
517 }
518 
519 /**
520  * memory_region_init: Initialize a memory region
521  *
522  * The region typically acts as a container for other memory regions.  Use
523  * memory_region_add_subregion() to add subregions.
524  *
525  * @mr: the #MemoryRegion to be initialized
526  * @owner: the object that tracks the region's reference count
527  * @name: used for debugging; not visible to the user or ABI
528  * @size: size of the region; any subregions beyond this size will be clipped
529  */
530 void memory_region_init(MemoryRegion *mr,
531                         struct Object *owner,
532                         const char *name,
533                         uint64_t size);
534 
535 /**
536  * memory_region_ref: Add 1 to a memory region's reference count
537  *
538  * Whenever memory regions are accessed outside the BQL, they need to be
539  * preserved against hot-unplug.  MemoryRegions actually do not have their
540  * own reference count; they piggyback on a QOM object, their "owner".
541  * This function adds a reference to the owner.
542  *
543  * All MemoryRegions must have an owner if they can disappear, even if the
544  * device they belong to operates exclusively under the BQL.  This is because
545  * the region could be returned at any time by memory_region_find, and this
546  * is usually under guest control.
547  *
548  * @mr: the #MemoryRegion
549  */
550 void memory_region_ref(MemoryRegion *mr);
551 
552 /**
553  * memory_region_unref: Remove 1 to a memory region's reference count
554  *
555  * Whenever memory regions are accessed outside the BQL, they need to be
556  * preserved against hot-unplug.  MemoryRegions actually do not have their
557  * own reference count; they piggyback on a QOM object, their "owner".
558  * This function removes a reference to the owner and possibly destroys it.
559  *
560  * @mr: the #MemoryRegion
561  */
562 void memory_region_unref(MemoryRegion *mr);
563 
564 /**
565  * memory_region_init_io: Initialize an I/O memory region.
566  *
567  * Accesses into the region will cause the callbacks in @ops to be called.
568  * if @size is nonzero, subregions will be clipped to @size.
569  *
570  * @mr: the #MemoryRegion to be initialized.
571  * @owner: the object that tracks the region's reference count
572  * @ops: a structure containing read and write callbacks to be used when
573  *       I/O is performed on the region.
574  * @opaque: passed to the read and write callbacks of the @ops structure.
575  * @name: used for debugging; not visible to the user or ABI
576  * @size: size of the region.
577  */
578 void memory_region_init_io(MemoryRegion *mr,
579                            struct Object *owner,
580                            const MemoryRegionOps *ops,
581                            void *opaque,
582                            const char *name,
583                            uint64_t size);
584 
585 /**
586  * memory_region_init_ram_nomigrate:  Initialize RAM memory region.  Accesses
587  *                                    into the region will modify memory
588  *                                    directly.
589  *
590  * @mr: the #MemoryRegion to be initialized.
591  * @owner: the object that tracks the region's reference count
592  * @name: Region name, becomes part of RAMBlock name used in migration stream
593  *        must be unique within any device
594  * @size: size of the region.
595  * @errp: pointer to Error*, to store an error if it happens.
596  *
597  * Note that this function does not do anything to cause the data in the
598  * RAM memory region to be migrated; that is the responsibility of the caller.
599  */
600 void memory_region_init_ram_nomigrate(MemoryRegion *mr,
601                                       struct Object *owner,
602                                       const char *name,
603                                       uint64_t size,
604                                       Error **errp);
605 
606 /**
607  * memory_region_init_ram_shared_nomigrate:  Initialize RAM memory region.
608  *                                           Accesses into the region will
609  *                                           modify memory directly.
610  *
611  * @mr: the #MemoryRegion to be initialized.
612  * @owner: the object that tracks the region's reference count
613  * @name: Region name, becomes part of RAMBlock name used in migration stream
614  *        must be unique within any device
615  * @size: size of the region.
616  * @share: allow remapping RAM to different addresses
617  * @errp: pointer to Error*, to store an error if it happens.
618  *
619  * Note that this function is similar to memory_region_init_ram_nomigrate.
620  * The only difference is part of the RAM region can be remapped.
621  */
622 void memory_region_init_ram_shared_nomigrate(MemoryRegion *mr,
623                                              struct Object *owner,
624                                              const char *name,
625                                              uint64_t size,
626                                              bool share,
627                                              Error **errp);
628 
629 /**
630  * memory_region_init_resizeable_ram:  Initialize memory region with resizeable
631  *                                     RAM.  Accesses into the region will
632  *                                     modify memory directly.  Only an initial
633  *                                     portion of this RAM is actually used.
634  *                                     The used size can change across reboots.
635  *
636  * @mr: the #MemoryRegion to be initialized.
637  * @owner: the object that tracks the region's reference count
638  * @name: Region name, becomes part of RAMBlock name used in migration stream
639  *        must be unique within any device
640  * @size: used size of the region.
641  * @max_size: max size of the region.
642  * @resized: callback to notify owner about used size change.
643  * @errp: pointer to Error*, to store an error if it happens.
644  *
645  * Note that this function does not do anything to cause the data in the
646  * RAM memory region to be migrated; that is the responsibility of the caller.
647  */
648 void memory_region_init_resizeable_ram(MemoryRegion *mr,
649                                        struct Object *owner,
650                                        const char *name,
651                                        uint64_t size,
652                                        uint64_t max_size,
653                                        void (*resized)(const char*,
654                                                        uint64_t length,
655                                                        void *host),
656                                        Error **errp);
657 #ifdef CONFIG_POSIX
658 
659 /**
660  * memory_region_init_ram_from_file:  Initialize RAM memory region with a
661  *                                    mmap-ed backend.
662  *
663  * @mr: the #MemoryRegion to be initialized.
664  * @owner: the object that tracks the region's reference count
665  * @name: Region name, becomes part of RAMBlock name used in migration stream
666  *        must be unique within any device
667  * @size: size of the region.
668  * @align: alignment of the region base address; if 0, the default alignment
669  *         (getpagesize()) will be used.
670  * @ram_flags: Memory region features:
671  *             - RAM_SHARED: memory must be mmaped with the MAP_SHARED flag
672  *             - RAM_PMEM: the memory is persistent memory
673  *             Other bits are ignored now.
674  * @path: the path in which to allocate the RAM.
675  * @errp: pointer to Error*, to store an error if it happens.
676  *
677  * Note that this function does not do anything to cause the data in the
678  * RAM memory region to be migrated; that is the responsibility of the caller.
679  */
680 void memory_region_init_ram_from_file(MemoryRegion *mr,
681                                       struct Object *owner,
682                                       const char *name,
683                                       uint64_t size,
684                                       uint64_t align,
685                                       uint32_t ram_flags,
686                                       const char *path,
687                                       Error **errp);
688 
689 /**
690  * memory_region_init_ram_from_fd:  Initialize RAM memory region with a
691  *                                  mmap-ed backend.
692  *
693  * @mr: the #MemoryRegion to be initialized.
694  * @owner: the object that tracks the region's reference count
695  * @name: the name of the region.
696  * @size: size of the region.
697  * @share: %true if memory must be mmaped with the MAP_SHARED flag
698  * @fd: the fd to mmap.
699  * @errp: pointer to Error*, to store an error if it happens.
700  *
701  * Note that this function does not do anything to cause the data in the
702  * RAM memory region to be migrated; that is the responsibility of the caller.
703  */
704 void memory_region_init_ram_from_fd(MemoryRegion *mr,
705                                     struct Object *owner,
706                                     const char *name,
707                                     uint64_t size,
708                                     bool share,
709                                     int fd,
710                                     Error **errp);
711 #endif
712 
713 /**
714  * memory_region_init_ram_ptr:  Initialize RAM memory region from a
715  *                              user-provided pointer.  Accesses into the
716  *                              region will modify memory directly.
717  *
718  * @mr: the #MemoryRegion to be initialized.
719  * @owner: the object that tracks the region's reference count
720  * @name: Region name, becomes part of RAMBlock name used in migration stream
721  *        must be unique within any device
722  * @size: size of the region.
723  * @ptr: memory to be mapped; must contain at least @size bytes.
724  *
725  * Note that this function does not do anything to cause the data in the
726  * RAM memory region to be migrated; that is the responsibility of the caller.
727  */
728 void memory_region_init_ram_ptr(MemoryRegion *mr,
729                                 struct Object *owner,
730                                 const char *name,
731                                 uint64_t size,
732                                 void *ptr);
733 
734 /**
735  * memory_region_init_ram_device_ptr:  Initialize RAM device memory region from
736  *                                     a user-provided pointer.
737  *
738  * A RAM device represents a mapping to a physical device, such as to a PCI
739  * MMIO BAR of an vfio-pci assigned device.  The memory region may be mapped
740  * into the VM address space and access to the region will modify memory
741  * directly.  However, the memory region should not be included in a memory
742  * dump (device may not be enabled/mapped at the time of the dump), and
743  * operations incompatible with manipulating MMIO should be avoided.  Replaces
744  * skip_dump flag.
745  *
746  * @mr: the #MemoryRegion to be initialized.
747  * @owner: the object that tracks the region's reference count
748  * @name: the name of the region.
749  * @size: size of the region.
750  * @ptr: memory to be mapped; must contain at least @size bytes.
751  *
752  * Note that this function does not do anything to cause the data in the
753  * RAM memory region to be migrated; that is the responsibility of the caller.
754  * (For RAM device memory regions, migrating the contents rarely makes sense.)
755  */
756 void memory_region_init_ram_device_ptr(MemoryRegion *mr,
757                                        struct Object *owner,
758                                        const char *name,
759                                        uint64_t size,
760                                        void *ptr);
761 
762 /**
763  * memory_region_init_alias: Initialize a memory region that aliases all or a
764  *                           part of another memory region.
765  *
766  * @mr: the #MemoryRegion to be initialized.
767  * @owner: the object that tracks the region's reference count
768  * @name: used for debugging; not visible to the user or ABI
769  * @orig: the region to be referenced; @mr will be equivalent to
770  *        @orig between @offset and @offset + @size - 1.
771  * @offset: start of the section in @orig to be referenced.
772  * @size: size of the region.
773  */
774 void memory_region_init_alias(MemoryRegion *mr,
775                               struct Object *owner,
776                               const char *name,
777                               MemoryRegion *orig,
778                               hwaddr offset,
779                               uint64_t size);
780 
781 /**
782  * memory_region_init_rom_nomigrate: Initialize a ROM memory region.
783  *
784  * This has the same effect as calling memory_region_init_ram_nomigrate()
785  * and then marking the resulting region read-only with
786  * memory_region_set_readonly().
787  *
788  * Note that this function does not do anything to cause the data in the
789  * RAM side of the memory region to be migrated; that is the responsibility
790  * of the caller.
791  *
792  * @mr: the #MemoryRegion to be initialized.
793  * @owner: the object that tracks the region's reference count
794  * @name: Region name, becomes part of RAMBlock name used in migration stream
795  *        must be unique within any device
796  * @size: size of the region.
797  * @errp: pointer to Error*, to store an error if it happens.
798  */
799 void memory_region_init_rom_nomigrate(MemoryRegion *mr,
800                                       struct Object *owner,
801                                       const char *name,
802                                       uint64_t size,
803                                       Error **errp);
804 
805 /**
806  * memory_region_init_rom_device_nomigrate:  Initialize a ROM memory region.
807  *                                 Writes are handled via callbacks.
808  *
809  * Note that this function does not do anything to cause the data in the
810  * RAM side of the memory region to be migrated; that is the responsibility
811  * of the caller.
812  *
813  * @mr: the #MemoryRegion to be initialized.
814  * @owner: the object that tracks the region's reference count
815  * @ops: callbacks for write access handling (must not be NULL).
816  * @opaque: passed to the read and write callbacks of the @ops structure.
817  * @name: Region name, becomes part of RAMBlock name used in migration stream
818  *        must be unique within any device
819  * @size: size of the region.
820  * @errp: pointer to Error*, to store an error if it happens.
821  */
822 void memory_region_init_rom_device_nomigrate(MemoryRegion *mr,
823                                              struct Object *owner,
824                                              const MemoryRegionOps *ops,
825                                              void *opaque,
826                                              const char *name,
827                                              uint64_t size,
828                                              Error **errp);
829 
830 /**
831  * memory_region_init_iommu: Initialize a memory region of a custom type
832  * that translates addresses
833  *
834  * An IOMMU region translates addresses and forwards accesses to a target
835  * memory region.
836  *
837  * The IOMMU implementation must define a subclass of TYPE_IOMMU_MEMORY_REGION.
838  * @_iommu_mr should be a pointer to enough memory for an instance of
839  * that subclass, @instance_size is the size of that subclass, and
840  * @mrtypename is its name. This function will initialize @_iommu_mr as an
841  * instance of the subclass, and its methods will then be called to handle
842  * accesses to the memory region. See the documentation of
843  * #IOMMUMemoryRegionClass for further details.
844  *
845  * @_iommu_mr: the #IOMMUMemoryRegion to be initialized
846  * @instance_size: the IOMMUMemoryRegion subclass instance size
847  * @mrtypename: the type name of the #IOMMUMemoryRegion
848  * @owner: the object that tracks the region's reference count
849  * @name: used for debugging; not visible to the user or ABI
850  * @size: size of the region.
851  */
852 void memory_region_init_iommu(void *_iommu_mr,
853                               size_t instance_size,
854                               const char *mrtypename,
855                               Object *owner,
856                               const char *name,
857                               uint64_t size);
858 
859 /**
860  * memory_region_init_ram - Initialize RAM memory region.  Accesses into the
861  *                          region will modify memory directly.
862  *
863  * @mr: the #MemoryRegion to be initialized
864  * @owner: the object that tracks the region's reference count (must be
865  *         TYPE_DEVICE or a subclass of TYPE_DEVICE, or NULL)
866  * @name: name of the memory region
867  * @size: size of the region in bytes
868  * @errp: pointer to Error*, to store an error if it happens.
869  *
870  * This function allocates RAM for a board model or device, and
871  * arranges for it to be migrated (by calling vmstate_register_ram()
872  * if @owner is a DeviceState, or vmstate_register_ram_global() if
873  * @owner is NULL).
874  *
875  * TODO: Currently we restrict @owner to being either NULL (for
876  * global RAM regions with no owner) or devices, so that we can
877  * give the RAM block a unique name for migration purposes.
878  * We should lift this restriction and allow arbitrary Objects.
879  * If you pass a non-NULL non-device @owner then we will assert.
880  */
881 void memory_region_init_ram(MemoryRegion *mr,
882                             struct Object *owner,
883                             const char *name,
884                             uint64_t size,
885                             Error **errp);
886 
887 /**
888  * memory_region_init_rom: Initialize a ROM memory region.
889  *
890  * This has the same effect as calling memory_region_init_ram()
891  * and then marking the resulting region read-only with
892  * memory_region_set_readonly(). This includes arranging for the
893  * contents to be migrated.
894  *
895  * TODO: Currently we restrict @owner to being either NULL (for
896  * global RAM regions with no owner) or devices, so that we can
897  * give the RAM block a unique name for migration purposes.
898  * We should lift this restriction and allow arbitrary Objects.
899  * If you pass a non-NULL non-device @owner then we will assert.
900  *
901  * @mr: the #MemoryRegion to be initialized.
902  * @owner: the object that tracks the region's reference count
903  * @name: Region name, becomes part of RAMBlock name used in migration stream
904  *        must be unique within any device
905  * @size: size of the region.
906  * @errp: pointer to Error*, to store an error if it happens.
907  */
908 void memory_region_init_rom(MemoryRegion *mr,
909                             struct Object *owner,
910                             const char *name,
911                             uint64_t size,
912                             Error **errp);
913 
914 /**
915  * memory_region_init_rom_device:  Initialize a ROM memory region.
916  *                                 Writes are handled via callbacks.
917  *
918  * This function initializes a memory region backed by RAM for reads
919  * and callbacks for writes, and arranges for the RAM backing to
920  * be migrated (by calling vmstate_register_ram()
921  * if @owner is a DeviceState, or vmstate_register_ram_global() if
922  * @owner is NULL).
923  *
924  * TODO: Currently we restrict @owner to being either NULL (for
925  * global RAM regions with no owner) or devices, so that we can
926  * give the RAM block a unique name for migration purposes.
927  * We should lift this restriction and allow arbitrary Objects.
928  * If you pass a non-NULL non-device @owner then we will assert.
929  *
930  * @mr: the #MemoryRegion to be initialized.
931  * @owner: the object that tracks the region's reference count
932  * @ops: callbacks for write access handling (must not be NULL).
933  * @name: Region name, becomes part of RAMBlock name used in migration stream
934  *        must be unique within any device
935  * @size: size of the region.
936  * @errp: pointer to Error*, to store an error if it happens.
937  */
938 void memory_region_init_rom_device(MemoryRegion *mr,
939                                    struct Object *owner,
940                                    const MemoryRegionOps *ops,
941                                    void *opaque,
942                                    const char *name,
943                                    uint64_t size,
944                                    Error **errp);
945 
946 
947 /**
948  * memory_region_owner: get a memory region's owner.
949  *
950  * @mr: the memory region being queried.
951  */
952 struct Object *memory_region_owner(MemoryRegion *mr);
953 
954 /**
955  * memory_region_size: get a memory region's size.
956  *
957  * @mr: the memory region being queried.
958  */
959 uint64_t memory_region_size(MemoryRegion *mr);
960 
961 /**
962  * memory_region_is_ram: check whether a memory region is random access
963  *
964  * Returns %true if a memory region is random access.
965  *
966  * @mr: the memory region being queried
967  */
968 static inline bool memory_region_is_ram(MemoryRegion *mr)
969 {
970     return mr->ram;
971 }
972 
973 /**
974  * memory_region_is_ram_device: check whether a memory region is a ram device
975  *
976  * Returns %true if a memory region is a device backed ram region
977  *
978  * @mr: the memory region being queried
979  */
980 bool memory_region_is_ram_device(MemoryRegion *mr);
981 
982 /**
983  * memory_region_is_romd: check whether a memory region is in ROMD mode
984  *
985  * Returns %true if a memory region is a ROM device and currently set to allow
986  * direct reads.
987  *
988  * @mr: the memory region being queried
989  */
990 static inline bool memory_region_is_romd(MemoryRegion *mr)
991 {
992     return mr->rom_device && mr->romd_mode;
993 }
994 
995 /**
996  * memory_region_get_iommu: check whether a memory region is an iommu
997  *
998  * Returns pointer to IOMMUMemoryRegion if a memory region is an iommu,
999  * otherwise NULL.
1000  *
1001  * @mr: the memory region being queried
1002  */
1003 static inline IOMMUMemoryRegion *memory_region_get_iommu(MemoryRegion *mr)
1004 {
1005     if (mr->alias) {
1006         return memory_region_get_iommu(mr->alias);
1007     }
1008     if (mr->is_iommu) {
1009         return (IOMMUMemoryRegion *) mr;
1010     }
1011     return NULL;
1012 }
1013 
1014 /**
1015  * memory_region_get_iommu_class_nocheck: returns iommu memory region class
1016  *   if an iommu or NULL if not
1017  *
1018  * Returns pointer to IOMMUMemoryRegionClass if a memory region is an iommu,
1019  * otherwise NULL. This is fast path avoiding QOM checking, use with caution.
1020  *
1021  * @mr: the memory region being queried
1022  */
1023 static inline IOMMUMemoryRegionClass *memory_region_get_iommu_class_nocheck(
1024         IOMMUMemoryRegion *iommu_mr)
1025 {
1026     return (IOMMUMemoryRegionClass *) (((Object *)iommu_mr)->class);
1027 }
1028 
1029 #define memory_region_is_iommu(mr) (memory_region_get_iommu(mr) != NULL)
1030 
1031 /**
1032  * memory_region_iommu_get_min_page_size: get minimum supported page size
1033  * for an iommu
1034  *
1035  * Returns minimum supported page size for an iommu.
1036  *
1037  * @iommu_mr: the memory region being queried
1038  */
1039 uint64_t memory_region_iommu_get_min_page_size(IOMMUMemoryRegion *iommu_mr);
1040 
1041 /**
1042  * memory_region_notify_iommu: notify a change in an IOMMU translation entry.
1043  *
1044  * The notification type will be decided by entry.perm bits:
1045  *
1046  * - For UNMAP (cache invalidation) notifies: set entry.perm to IOMMU_NONE.
1047  * - For MAP (newly added entry) notifies: set entry.perm to the
1048  *   permission of the page (which is definitely !IOMMU_NONE).
1049  *
1050  * Note: for any IOMMU implementation, an in-place mapping change
1051  * should be notified with an UNMAP followed by a MAP.
1052  *
1053  * @iommu_mr: the memory region that was changed
1054  * @iommu_idx: the IOMMU index for the translation table which has changed
1055  * @entry: the new entry in the IOMMU translation table.  The entry
1056  *         replaces all old entries for the same virtual I/O address range.
1057  *         Deleted entries have .@perm == 0.
1058  */
1059 void memory_region_notify_iommu(IOMMUMemoryRegion *iommu_mr,
1060                                 int iommu_idx,
1061                                 IOMMUTLBEntry entry);
1062 
1063 /**
1064  * memory_region_notify_one: notify a change in an IOMMU translation
1065  *                           entry to a single notifier
1066  *
1067  * This works just like memory_region_notify_iommu(), but it only
1068  * notifies a specific notifier, not all of them.
1069  *
1070  * @notifier: the notifier to be notified
1071  * @entry: the new entry in the IOMMU translation table.  The entry
1072  *         replaces all old entries for the same virtual I/O address range.
1073  *         Deleted entries have .@perm == 0.
1074  */
1075 void memory_region_notify_one(IOMMUNotifier *notifier,
1076                               IOMMUTLBEntry *entry);
1077 
1078 /**
1079  * memory_region_register_iommu_notifier: register a notifier for changes to
1080  * IOMMU translation entries.
1081  *
1082  * @mr: the memory region to observe
1083  * @n: the IOMMUNotifier to be added; the notify callback receives a
1084  *     pointer to an #IOMMUTLBEntry as the opaque value; the pointer
1085  *     ceases to be valid on exit from the notifier.
1086  */
1087 void memory_region_register_iommu_notifier(MemoryRegion *mr,
1088                                            IOMMUNotifier *n);
1089 
1090 /**
1091  * memory_region_iommu_replay: replay existing IOMMU translations to
1092  * a notifier with the minimum page granularity returned by
1093  * mr->iommu_ops->get_page_size().
1094  *
1095  * Note: this is not related to record-and-replay functionality.
1096  *
1097  * @iommu_mr: the memory region to observe
1098  * @n: the notifier to which to replay iommu mappings
1099  */
1100 void memory_region_iommu_replay(IOMMUMemoryRegion *iommu_mr, IOMMUNotifier *n);
1101 
1102 /**
1103  * memory_region_unregister_iommu_notifier: unregister a notifier for
1104  * changes to IOMMU translation entries.
1105  *
1106  * @mr: the memory region which was observed and for which notity_stopped()
1107  *      needs to be called
1108  * @n: the notifier to be removed.
1109  */
1110 void memory_region_unregister_iommu_notifier(MemoryRegion *mr,
1111                                              IOMMUNotifier *n);
1112 
1113 /**
1114  * memory_region_iommu_get_attr: return an IOMMU attr if get_attr() is
1115  * defined on the IOMMU.
1116  *
1117  * Returns 0 on success, or a negative errno otherwise. In particular,
1118  * -EINVAL indicates that the IOMMU does not support the requested
1119  * attribute.
1120  *
1121  * @iommu_mr: the memory region
1122  * @attr: the requested attribute
1123  * @data: a pointer to the requested attribute data
1124  */
1125 int memory_region_iommu_get_attr(IOMMUMemoryRegion *iommu_mr,
1126                                  enum IOMMUMemoryRegionAttr attr,
1127                                  void *data);
1128 
1129 /**
1130  * memory_region_iommu_attrs_to_index: return the IOMMU index to
1131  * use for translations with the given memory transaction attributes.
1132  *
1133  * @iommu_mr: the memory region
1134  * @attrs: the memory transaction attributes
1135  */
1136 int memory_region_iommu_attrs_to_index(IOMMUMemoryRegion *iommu_mr,
1137                                        MemTxAttrs attrs);
1138 
1139 /**
1140  * memory_region_iommu_num_indexes: return the total number of IOMMU
1141  * indexes that this IOMMU supports.
1142  *
1143  * @iommu_mr: the memory region
1144  */
1145 int memory_region_iommu_num_indexes(IOMMUMemoryRegion *iommu_mr);
1146 
1147 /**
1148  * memory_region_name: get a memory region's name
1149  *
1150  * Returns the string that was used to initialize the memory region.
1151  *
1152  * @mr: the memory region being queried
1153  */
1154 const char *memory_region_name(const MemoryRegion *mr);
1155 
1156 /**
1157  * memory_region_is_logging: return whether a memory region is logging writes
1158  *
1159  * Returns %true if the memory region is logging writes for the given client
1160  *
1161  * @mr: the memory region being queried
1162  * @client: the client being queried
1163  */
1164 bool memory_region_is_logging(MemoryRegion *mr, uint8_t client);
1165 
1166 /**
1167  * memory_region_get_dirty_log_mask: return the clients for which a
1168  * memory region is logging writes.
1169  *
1170  * Returns a bitmap of clients, in which the DIRTY_MEMORY_* constants
1171  * are the bit indices.
1172  *
1173  * @mr: the memory region being queried
1174  */
1175 uint8_t memory_region_get_dirty_log_mask(MemoryRegion *mr);
1176 
1177 /**
1178  * memory_region_is_rom: check whether a memory region is ROM
1179  *
1180  * Returns %true if a memory region is read-only memory.
1181  *
1182  * @mr: the memory region being queried
1183  */
1184 static inline bool memory_region_is_rom(MemoryRegion *mr)
1185 {
1186     return mr->ram && mr->readonly;
1187 }
1188 
1189 /**
1190  * memory_region_is_nonvolatile: check whether a memory region is non-volatile
1191  *
1192  * Returns %true is a memory region is non-volatile memory.
1193  *
1194  * @mr: the memory region being queried
1195  */
1196 static inline bool memory_region_is_nonvolatile(MemoryRegion *mr)
1197 {
1198     return mr->nonvolatile;
1199 }
1200 
1201 /**
1202  * memory_region_get_fd: Get a file descriptor backing a RAM memory region.
1203  *
1204  * Returns a file descriptor backing a file-based RAM memory region,
1205  * or -1 if the region is not a file-based RAM memory region.
1206  *
1207  * @mr: the RAM or alias memory region being queried.
1208  */
1209 int memory_region_get_fd(MemoryRegion *mr);
1210 
1211 /**
1212  * memory_region_from_host: Convert a pointer into a RAM memory region
1213  * and an offset within it.
1214  *
1215  * Given a host pointer inside a RAM memory region (created with
1216  * memory_region_init_ram() or memory_region_init_ram_ptr()), return
1217  * the MemoryRegion and the offset within it.
1218  *
1219  * Use with care; by the time this function returns, the returned pointer is
1220  * not protected by RCU anymore.  If the caller is not within an RCU critical
1221  * section and does not hold the iothread lock, it must have other means of
1222  * protecting the pointer, such as a reference to the region that includes
1223  * the incoming ram_addr_t.
1224  *
1225  * @ptr: the host pointer to be converted
1226  * @offset: the offset within memory region
1227  */
1228 MemoryRegion *memory_region_from_host(void *ptr, ram_addr_t *offset);
1229 
1230 /**
1231  * memory_region_get_ram_ptr: Get a pointer into a RAM memory region.
1232  *
1233  * Returns a host pointer to a RAM memory region (created with
1234  * memory_region_init_ram() or memory_region_init_ram_ptr()).
1235  *
1236  * Use with care; by the time this function returns, the returned pointer is
1237  * not protected by RCU anymore.  If the caller is not within an RCU critical
1238  * section and does not hold the iothread lock, it must have other means of
1239  * protecting the pointer, such as a reference to the region that includes
1240  * the incoming ram_addr_t.
1241  *
1242  * @mr: the memory region being queried.
1243  */
1244 void *memory_region_get_ram_ptr(MemoryRegion *mr);
1245 
1246 /* memory_region_ram_resize: Resize a RAM region.
1247  *
1248  * Only legal before guest might have detected the memory size: e.g. on
1249  * incoming migration, or right after reset.
1250  *
1251  * @mr: a memory region created with @memory_region_init_resizeable_ram.
1252  * @newsize: the new size the region
1253  * @errp: pointer to Error*, to store an error if it happens.
1254  */
1255 void memory_region_ram_resize(MemoryRegion *mr, ram_addr_t newsize,
1256                               Error **errp);
1257 
1258 /**
1259  * memory_region_set_log: Turn dirty logging on or off for a region.
1260  *
1261  * Turns dirty logging on or off for a specified client (display, migration).
1262  * Only meaningful for RAM regions.
1263  *
1264  * @mr: the memory region being updated.
1265  * @log: whether dirty logging is to be enabled or disabled.
1266  * @client: the user of the logging information; %DIRTY_MEMORY_VGA only.
1267  */
1268 void memory_region_set_log(MemoryRegion *mr, bool log, unsigned client);
1269 
1270 /**
1271  * memory_region_set_dirty: Mark a range of bytes as dirty in a memory region.
1272  *
1273  * Marks a range of bytes as dirty, after it has been dirtied outside
1274  * guest code.
1275  *
1276  * @mr: the memory region being dirtied.
1277  * @addr: the address (relative to the start of the region) being dirtied.
1278  * @size: size of the range being dirtied.
1279  */
1280 void memory_region_set_dirty(MemoryRegion *mr, hwaddr addr,
1281                              hwaddr size);
1282 
1283 /**
1284  * memory_region_clear_dirty_bitmap - clear dirty bitmap for memory range
1285  *
1286  * This function is called when the caller wants to clear the remote
1287  * dirty bitmap of a memory range within the memory region.  This can
1288  * be used by e.g. KVM to manually clear dirty log when
1289  * KVM_CAP_MANUAL_DIRTY_LOG_PROTECT is declared support by the host
1290  * kernel.
1291  *
1292  * @mr:     the memory region to clear the dirty log upon
1293  * @start:  start address offset within the memory region
1294  * @len:    length of the memory region to clear dirty bitmap
1295  */
1296 void memory_region_clear_dirty_bitmap(MemoryRegion *mr, hwaddr start,
1297                                       hwaddr len);
1298 
1299 /**
1300  * memory_region_snapshot_and_clear_dirty: Get a snapshot of the dirty
1301  *                                         bitmap and clear it.
1302  *
1303  * Creates a snapshot of the dirty bitmap, clears the dirty bitmap and
1304  * returns the snapshot.  The snapshot can then be used to query dirty
1305  * status, using memory_region_snapshot_get_dirty.  Snapshotting allows
1306  * querying the same page multiple times, which is especially useful for
1307  * display updates where the scanlines often are not page aligned.
1308  *
1309  * The dirty bitmap region which gets copyed into the snapshot (and
1310  * cleared afterwards) can be larger than requested.  The boundaries
1311  * are rounded up/down so complete bitmap longs (covering 64 pages on
1312  * 64bit hosts) can be copied over into the bitmap snapshot.  Which
1313  * isn't a problem for display updates as the extra pages are outside
1314  * the visible area, and in case the visible area changes a full
1315  * display redraw is due anyway.  Should other use cases for this
1316  * function emerge we might have to revisit this implementation
1317  * detail.
1318  *
1319  * Use g_free to release DirtyBitmapSnapshot.
1320  *
1321  * @mr: the memory region being queried.
1322  * @addr: the address (relative to the start of the region) being queried.
1323  * @size: the size of the range being queried.
1324  * @client: the user of the logging information; typically %DIRTY_MEMORY_VGA.
1325  */
1326 DirtyBitmapSnapshot *memory_region_snapshot_and_clear_dirty(MemoryRegion *mr,
1327                                                             hwaddr addr,
1328                                                             hwaddr size,
1329                                                             unsigned client);
1330 
1331 /**
1332  * memory_region_snapshot_get_dirty: Check whether a range of bytes is dirty
1333  *                                   in the specified dirty bitmap snapshot.
1334  *
1335  * @mr: the memory region being queried.
1336  * @snap: the dirty bitmap snapshot
1337  * @addr: the address (relative to the start of the region) being queried.
1338  * @size: the size of the range being queried.
1339  */
1340 bool memory_region_snapshot_get_dirty(MemoryRegion *mr,
1341                                       DirtyBitmapSnapshot *snap,
1342                                       hwaddr addr, hwaddr size);
1343 
1344 /**
1345  * memory_region_reset_dirty: Mark a range of pages as clean, for a specified
1346  *                            client.
1347  *
1348  * Marks a range of pages as no longer dirty.
1349  *
1350  * @mr: the region being updated.
1351  * @addr: the start of the subrange being cleaned.
1352  * @size: the size of the subrange being cleaned.
1353  * @client: the user of the logging information; %DIRTY_MEMORY_MIGRATION or
1354  *          %DIRTY_MEMORY_VGA.
1355  */
1356 void memory_region_reset_dirty(MemoryRegion *mr, hwaddr addr,
1357                                hwaddr size, unsigned client);
1358 
1359 /**
1360  * memory_region_flush_rom_device: Mark a range of pages dirty and invalidate
1361  *                                 TBs (for self-modifying code).
1362  *
1363  * The MemoryRegionOps->write() callback of a ROM device must use this function
1364  * to mark byte ranges that have been modified internally, such as by directly
1365  * accessing the memory returned by memory_region_get_ram_ptr().
1366  *
1367  * This function marks the range dirty and invalidates TBs so that TCG can
1368  * detect self-modifying code.
1369  *
1370  * @mr: the region being flushed.
1371  * @addr: the start, relative to the start of the region, of the range being
1372  *        flushed.
1373  * @size: the size, in bytes, of the range being flushed.
1374  */
1375 void memory_region_flush_rom_device(MemoryRegion *mr, hwaddr addr, hwaddr size);
1376 
1377 /**
1378  * memory_region_set_readonly: Turn a memory region read-only (or read-write)
1379  *
1380  * Allows a memory region to be marked as read-only (turning it into a ROM).
1381  * only useful on RAM regions.
1382  *
1383  * @mr: the region being updated.
1384  * @readonly: whether rhe region is to be ROM or RAM.
1385  */
1386 void memory_region_set_readonly(MemoryRegion *mr, bool readonly);
1387 
1388 /**
1389  * memory_region_set_nonvolatile: Turn a memory region non-volatile
1390  *
1391  * Allows a memory region to be marked as non-volatile.
1392  * only useful on RAM regions.
1393  *
1394  * @mr: the region being updated.
1395  * @nonvolatile: whether rhe region is to be non-volatile.
1396  */
1397 void memory_region_set_nonvolatile(MemoryRegion *mr, bool nonvolatile);
1398 
1399 /**
1400  * memory_region_rom_device_set_romd: enable/disable ROMD mode
1401  *
1402  * Allows a ROM device (initialized with memory_region_init_rom_device() to
1403  * set to ROMD mode (default) or MMIO mode.  When it is in ROMD mode, the
1404  * device is mapped to guest memory and satisfies read access directly.
1405  * When in MMIO mode, reads are forwarded to the #MemoryRegion.read function.
1406  * Writes are always handled by the #MemoryRegion.write function.
1407  *
1408  * @mr: the memory region to be updated
1409  * @romd_mode: %true to put the region into ROMD mode
1410  */
1411 void memory_region_rom_device_set_romd(MemoryRegion *mr, bool romd_mode);
1412 
1413 /**
1414  * memory_region_set_coalescing: Enable memory coalescing for the region.
1415  *
1416  * Enabled writes to a region to be queued for later processing. MMIO ->write
1417  * callbacks may be delayed until a non-coalesced MMIO is issued.
1418  * Only useful for IO regions.  Roughly similar to write-combining hardware.
1419  *
1420  * @mr: the memory region to be write coalesced
1421  */
1422 void memory_region_set_coalescing(MemoryRegion *mr);
1423 
1424 /**
1425  * memory_region_add_coalescing: Enable memory coalescing for a sub-range of
1426  *                               a region.
1427  *
1428  * Like memory_region_set_coalescing(), but works on a sub-range of a region.
1429  * Multiple calls can be issued coalesced disjoint ranges.
1430  *
1431  * @mr: the memory region to be updated.
1432  * @offset: the start of the range within the region to be coalesced.
1433  * @size: the size of the subrange to be coalesced.
1434  */
1435 void memory_region_add_coalescing(MemoryRegion *mr,
1436                                   hwaddr offset,
1437                                   uint64_t size);
1438 
1439 /**
1440  * memory_region_clear_coalescing: Disable MMIO coalescing for the region.
1441  *
1442  * Disables any coalescing caused by memory_region_set_coalescing() or
1443  * memory_region_add_coalescing().  Roughly equivalent to uncacheble memory
1444  * hardware.
1445  *
1446  * @mr: the memory region to be updated.
1447  */
1448 void memory_region_clear_coalescing(MemoryRegion *mr);
1449 
1450 /**
1451  * memory_region_set_flush_coalesced: Enforce memory coalescing flush before
1452  *                                    accesses.
1453  *
1454  * Ensure that pending coalesced MMIO request are flushed before the memory
1455  * region is accessed. This property is automatically enabled for all regions
1456  * passed to memory_region_set_coalescing() and memory_region_add_coalescing().
1457  *
1458  * @mr: the memory region to be updated.
1459  */
1460 void memory_region_set_flush_coalesced(MemoryRegion *mr);
1461 
1462 /**
1463  * memory_region_clear_flush_coalesced: Disable memory coalescing flush before
1464  *                                      accesses.
1465  *
1466  * Clear the automatic coalesced MMIO flushing enabled via
1467  * memory_region_set_flush_coalesced. Note that this service has no effect on
1468  * memory regions that have MMIO coalescing enabled for themselves. For them,
1469  * automatic flushing will stop once coalescing is disabled.
1470  *
1471  * @mr: the memory region to be updated.
1472  */
1473 void memory_region_clear_flush_coalesced(MemoryRegion *mr);
1474 
1475 /**
1476  * memory_region_clear_global_locking: Declares that access processing does
1477  *                                     not depend on the QEMU global lock.
1478  *
1479  * By clearing this property, accesses to the memory region will be processed
1480  * outside of QEMU's global lock (unless the lock is held on when issuing the
1481  * access request). In this case, the device model implementing the access
1482  * handlers is responsible for synchronization of concurrency.
1483  *
1484  * @mr: the memory region to be updated.
1485  */
1486 void memory_region_clear_global_locking(MemoryRegion *mr);
1487 
1488 /**
1489  * memory_region_add_eventfd: Request an eventfd to be triggered when a word
1490  *                            is written to a location.
1491  *
1492  * Marks a word in an IO region (initialized with memory_region_init_io())
1493  * as a trigger for an eventfd event.  The I/O callback will not be called.
1494  * The caller must be prepared to handle failure (that is, take the required
1495  * action if the callback _is_ called).
1496  *
1497  * @mr: the memory region being updated.
1498  * @addr: the address within @mr that is to be monitored
1499  * @size: the size of the access to trigger the eventfd
1500  * @match_data: whether to match against @data, instead of just @addr
1501  * @data: the data to match against the guest write
1502  * @e: event notifier to be triggered when @addr, @size, and @data all match.
1503  **/
1504 void memory_region_add_eventfd(MemoryRegion *mr,
1505                                hwaddr addr,
1506                                unsigned size,
1507                                bool match_data,
1508                                uint64_t data,
1509                                EventNotifier *e);
1510 
1511 /**
1512  * memory_region_del_eventfd: Cancel an eventfd.
1513  *
1514  * Cancels an eventfd trigger requested by a previous
1515  * memory_region_add_eventfd() call.
1516  *
1517  * @mr: the memory region being updated.
1518  * @addr: the address within @mr that is to be monitored
1519  * @size: the size of the access to trigger the eventfd
1520  * @match_data: whether to match against @data, instead of just @addr
1521  * @data: the data to match against the guest write
1522  * @e: event notifier to be triggered when @addr, @size, and @data all match.
1523  */
1524 void memory_region_del_eventfd(MemoryRegion *mr,
1525                                hwaddr addr,
1526                                unsigned size,
1527                                bool match_data,
1528                                uint64_t data,
1529                                EventNotifier *e);
1530 
1531 /**
1532  * memory_region_add_subregion: Add a subregion to a container.
1533  *
1534  * Adds a subregion at @offset.  The subregion may not overlap with other
1535  * subregions (except for those explicitly marked as overlapping).  A region
1536  * may only be added once as a subregion (unless removed with
1537  * memory_region_del_subregion()); use memory_region_init_alias() if you
1538  * want a region to be a subregion in multiple locations.
1539  *
1540  * @mr: the region to contain the new subregion; must be a container
1541  *      initialized with memory_region_init().
1542  * @offset: the offset relative to @mr where @subregion is added.
1543  * @subregion: the subregion to be added.
1544  */
1545 void memory_region_add_subregion(MemoryRegion *mr,
1546                                  hwaddr offset,
1547                                  MemoryRegion *subregion);
1548 /**
1549  * memory_region_add_subregion_overlap: Add a subregion to a container
1550  *                                      with overlap.
1551  *
1552  * Adds a subregion at @offset.  The subregion may overlap with other
1553  * subregions.  Conflicts are resolved by having a higher @priority hide a
1554  * lower @priority. Subregions without priority are taken as @priority 0.
1555  * A region may only be added once as a subregion (unless removed with
1556  * memory_region_del_subregion()); use memory_region_init_alias() if you
1557  * want a region to be a subregion in multiple locations.
1558  *
1559  * @mr: the region to contain the new subregion; must be a container
1560  *      initialized with memory_region_init().
1561  * @offset: the offset relative to @mr where @subregion is added.
1562  * @subregion: the subregion to be added.
1563  * @priority: used for resolving overlaps; highest priority wins.
1564  */
1565 void memory_region_add_subregion_overlap(MemoryRegion *mr,
1566                                          hwaddr offset,
1567                                          MemoryRegion *subregion,
1568                                          int priority);
1569 
1570 /**
1571  * memory_region_get_ram_addr: Get the ram address associated with a memory
1572  *                             region
1573  */
1574 ram_addr_t memory_region_get_ram_addr(MemoryRegion *mr);
1575 
1576 uint64_t memory_region_get_alignment(const MemoryRegion *mr);
1577 /**
1578  * memory_region_del_subregion: Remove a subregion.
1579  *
1580  * Removes a subregion from its container.
1581  *
1582  * @mr: the container to be updated.
1583  * @subregion: the region being removed; must be a current subregion of @mr.
1584  */
1585 void memory_region_del_subregion(MemoryRegion *mr,
1586                                  MemoryRegion *subregion);
1587 
1588 /*
1589  * memory_region_set_enabled: dynamically enable or disable a region
1590  *
1591  * Enables or disables a memory region.  A disabled memory region
1592  * ignores all accesses to itself and its subregions.  It does not
1593  * obscure sibling subregions with lower priority - it simply behaves as
1594  * if it was removed from the hierarchy.
1595  *
1596  * Regions default to being enabled.
1597  *
1598  * @mr: the region to be updated
1599  * @enabled: whether to enable or disable the region
1600  */
1601 void memory_region_set_enabled(MemoryRegion *mr, bool enabled);
1602 
1603 /*
1604  * memory_region_set_address: dynamically update the address of a region
1605  *
1606  * Dynamically updates the address of a region, relative to its container.
1607  * May be used on regions are currently part of a memory hierarchy.
1608  *
1609  * @mr: the region to be updated
1610  * @addr: new address, relative to container region
1611  */
1612 void memory_region_set_address(MemoryRegion *mr, hwaddr addr);
1613 
1614 /*
1615  * memory_region_set_size: dynamically update the size of a region.
1616  *
1617  * Dynamically updates the size of a region.
1618  *
1619  * @mr: the region to be updated
1620  * @size: used size of the region.
1621  */
1622 void memory_region_set_size(MemoryRegion *mr, uint64_t size);
1623 
1624 /*
1625  * memory_region_set_alias_offset: dynamically update a memory alias's offset
1626  *
1627  * Dynamically updates the offset into the target region that an alias points
1628  * to, as if the fourth argument to memory_region_init_alias() has changed.
1629  *
1630  * @mr: the #MemoryRegion to be updated; should be an alias.
1631  * @offset: the new offset into the target memory region
1632  */
1633 void memory_region_set_alias_offset(MemoryRegion *mr,
1634                                     hwaddr offset);
1635 
1636 /**
1637  * memory_region_present: checks if an address relative to a @container
1638  * translates into #MemoryRegion within @container
1639  *
1640  * Answer whether a #MemoryRegion within @container covers the address
1641  * @addr.
1642  *
1643  * @container: a #MemoryRegion within which @addr is a relative address
1644  * @addr: the area within @container to be searched
1645  */
1646 bool memory_region_present(MemoryRegion *container, hwaddr addr);
1647 
1648 /**
1649  * memory_region_is_mapped: returns true if #MemoryRegion is mapped
1650  * into any address space.
1651  *
1652  * @mr: a #MemoryRegion which should be checked if it's mapped
1653  */
1654 bool memory_region_is_mapped(MemoryRegion *mr);
1655 
1656 /**
1657  * memory_region_find: translate an address/size relative to a
1658  * MemoryRegion into a #MemoryRegionSection.
1659  *
1660  * Locates the first #MemoryRegion within @mr that overlaps the range
1661  * given by @addr and @size.
1662  *
1663  * Returns a #MemoryRegionSection that describes a contiguous overlap.
1664  * It will have the following characteristics:
1665  *    .@size = 0 iff no overlap was found
1666  *    .@mr is non-%NULL iff an overlap was found
1667  *
1668  * Remember that in the return value the @offset_within_region is
1669  * relative to the returned region (in the .@mr field), not to the
1670  * @mr argument.
1671  *
1672  * Similarly, the .@offset_within_address_space is relative to the
1673  * address space that contains both regions, the passed and the
1674  * returned one.  However, in the special case where the @mr argument
1675  * has no container (and thus is the root of the address space), the
1676  * following will hold:
1677  *    .@offset_within_address_space >= @addr
1678  *    .@offset_within_address_space + .@size <= @addr + @size
1679  *
1680  * @mr: a MemoryRegion within which @addr is a relative address
1681  * @addr: start of the area within @as to be searched
1682  * @size: size of the area to be searched
1683  */
1684 MemoryRegionSection memory_region_find(MemoryRegion *mr,
1685                                        hwaddr addr, uint64_t size);
1686 
1687 /**
1688  * memory_global_dirty_log_sync: synchronize the dirty log for all memory
1689  *
1690  * Synchronizes the dirty page log for all address spaces.
1691  */
1692 void memory_global_dirty_log_sync(void);
1693 
1694 /**
1695  * memory_global_dirty_log_sync: synchronize the dirty log for all memory
1696  *
1697  * Synchronizes the vCPUs with a thread that is reading the dirty bitmap.
1698  * This function must be called after the dirty log bitmap is cleared, and
1699  * before dirty guest memory pages are read.  If you are using
1700  * #DirtyBitmapSnapshot, memory_region_snapshot_and_clear_dirty() takes
1701  * care of doing this.
1702  */
1703 void memory_global_after_dirty_log_sync(void);
1704 
1705 /**
1706  * memory_region_transaction_begin: Start a transaction.
1707  *
1708  * During a transaction, changes will be accumulated and made visible
1709  * only when the transaction ends (is committed).
1710  */
1711 void memory_region_transaction_begin(void);
1712 
1713 /**
1714  * memory_region_transaction_commit: Commit a transaction and make changes
1715  *                                   visible to the guest.
1716  */
1717 void memory_region_transaction_commit(void);
1718 
1719 /**
1720  * memory_listener_register: register callbacks to be called when memory
1721  *                           sections are mapped or unmapped into an address
1722  *                           space
1723  *
1724  * @listener: an object containing the callbacks to be called
1725  * @filter: if non-%NULL, only regions in this address space will be observed
1726  */
1727 void memory_listener_register(MemoryListener *listener, AddressSpace *filter);
1728 
1729 /**
1730  * memory_listener_unregister: undo the effect of memory_listener_register()
1731  *
1732  * @listener: an object containing the callbacks to be removed
1733  */
1734 void memory_listener_unregister(MemoryListener *listener);
1735 
1736 /**
1737  * memory_global_dirty_log_start: begin dirty logging for all regions
1738  */
1739 void memory_global_dirty_log_start(void);
1740 
1741 /**
1742  * memory_global_dirty_log_stop: end dirty logging for all regions
1743  */
1744 void memory_global_dirty_log_stop(void);
1745 
1746 void mtree_info(bool flatview, bool dispatch_tree, bool owner);
1747 
1748 /**
1749  * memory_region_dispatch_read: perform a read directly to the specified
1750  * MemoryRegion.
1751  *
1752  * @mr: #MemoryRegion to access
1753  * @addr: address within that region
1754  * @pval: pointer to uint64_t which the data is written to
1755  * @op: size, sign, and endianness of the memory operation
1756  * @attrs: memory transaction attributes to use for the access
1757  */
1758 MemTxResult memory_region_dispatch_read(MemoryRegion *mr,
1759                                         hwaddr addr,
1760                                         uint64_t *pval,
1761                                         MemOp op,
1762                                         MemTxAttrs attrs);
1763 /**
1764  * memory_region_dispatch_write: perform a write directly to the specified
1765  * MemoryRegion.
1766  *
1767  * @mr: #MemoryRegion to access
1768  * @addr: address within that region
1769  * @data: data to write
1770  * @op: size, sign, and endianness of the memory operation
1771  * @attrs: memory transaction attributes to use for the access
1772  */
1773 MemTxResult memory_region_dispatch_write(MemoryRegion *mr,
1774                                          hwaddr addr,
1775                                          uint64_t data,
1776                                          MemOp op,
1777                                          MemTxAttrs attrs);
1778 
1779 /**
1780  * address_space_init: initializes an address space
1781  *
1782  * @as: an uninitialized #AddressSpace
1783  * @root: a #MemoryRegion that routes addresses for the address space
1784  * @name: an address space name.  The name is only used for debugging
1785  *        output.
1786  */
1787 void address_space_init(AddressSpace *as, MemoryRegion *root, const char *name);
1788 
1789 /**
1790  * address_space_destroy: destroy an address space
1791  *
1792  * Releases all resources associated with an address space.  After an address space
1793  * is destroyed, its root memory region (given by address_space_init()) may be destroyed
1794  * as well.
1795  *
1796  * @as: address space to be destroyed
1797  */
1798 void address_space_destroy(AddressSpace *as);
1799 
1800 /**
1801  * address_space_remove_listeners: unregister all listeners of an address space
1802  *
1803  * Removes all callbacks previously registered with memory_listener_register()
1804  * for @as.
1805  *
1806  * @as: an initialized #AddressSpace
1807  */
1808 void address_space_remove_listeners(AddressSpace *as);
1809 
1810 /**
1811  * address_space_rw: read from or write to an address space.
1812  *
1813  * Return a MemTxResult indicating whether the operation succeeded
1814  * or failed (eg unassigned memory, device rejected the transaction,
1815  * IOMMU fault).
1816  *
1817  * @as: #AddressSpace to be accessed
1818  * @addr: address within that address space
1819  * @attrs: memory transaction attributes
1820  * @buf: buffer with the data transferred
1821  * @len: the number of bytes to read or write
1822  * @is_write: indicates the transfer direction
1823  */
1824 MemTxResult address_space_rw(AddressSpace *as, hwaddr addr,
1825                              MemTxAttrs attrs, uint8_t *buf,
1826                              hwaddr len, bool is_write);
1827 
1828 /**
1829  * address_space_write: write to address space.
1830  *
1831  * Return a MemTxResult indicating whether the operation succeeded
1832  * or failed (eg unassigned memory, device rejected the transaction,
1833  * IOMMU fault).
1834  *
1835  * @as: #AddressSpace to be accessed
1836  * @addr: address within that address space
1837  * @attrs: memory transaction attributes
1838  * @buf: buffer with the data transferred
1839  * @len: the number of bytes to write
1840  */
1841 MemTxResult address_space_write(AddressSpace *as, hwaddr addr,
1842                                 MemTxAttrs attrs,
1843                                 const uint8_t *buf, hwaddr len);
1844 
1845 /**
1846  * address_space_write_rom: write to address space, including ROM.
1847  *
1848  * This function writes to the specified address space, but will
1849  * write data to both ROM and RAM. This is used for non-guest
1850  * writes like writes from the gdb debug stub or initial loading
1851  * of ROM contents.
1852  *
1853  * Note that portions of the write which attempt to write data to
1854  * a device will be silently ignored -- only real RAM and ROM will
1855  * be written to.
1856  *
1857  * Return a MemTxResult indicating whether the operation succeeded
1858  * or failed (eg unassigned memory, device rejected the transaction,
1859  * IOMMU fault).
1860  *
1861  * @as: #AddressSpace to be accessed
1862  * @addr: address within that address space
1863  * @attrs: memory transaction attributes
1864  * @buf: buffer with the data transferred
1865  * @len: the number of bytes to write
1866  */
1867 MemTxResult address_space_write_rom(AddressSpace *as, hwaddr addr,
1868                                     MemTxAttrs attrs,
1869                                     const uint8_t *buf, hwaddr len);
1870 
1871 /* address_space_ld*: load from an address space
1872  * address_space_st*: store to an address space
1873  *
1874  * These functions perform a load or store of the byte, word,
1875  * longword or quad to the specified address within the AddressSpace.
1876  * The _le suffixed functions treat the data as little endian;
1877  * _be indicates big endian; no suffix indicates "same endianness
1878  * as guest CPU".
1879  *
1880  * The "guest CPU endianness" accessors are deprecated for use outside
1881  * target-* code; devices should be CPU-agnostic and use either the LE
1882  * or the BE accessors.
1883  *
1884  * @as #AddressSpace to be accessed
1885  * @addr: address within that address space
1886  * @val: data value, for stores
1887  * @attrs: memory transaction attributes
1888  * @result: location to write the success/failure of the transaction;
1889  *   if NULL, this information is discarded
1890  */
1891 
1892 #define SUFFIX
1893 #define ARG1         as
1894 #define ARG1_DECL    AddressSpace *as
1895 #include "exec/memory_ldst.inc.h"
1896 
1897 #define SUFFIX
1898 #define ARG1         as
1899 #define ARG1_DECL    AddressSpace *as
1900 #include "exec/memory_ldst_phys.inc.h"
1901 
1902 struct MemoryRegionCache {
1903     void *ptr;
1904     hwaddr xlat;
1905     hwaddr len;
1906     FlatView *fv;
1907     MemoryRegionSection mrs;
1908     bool is_write;
1909 };
1910 
1911 #define MEMORY_REGION_CACHE_INVALID ((MemoryRegionCache) { .mrs.mr = NULL })
1912 
1913 
1914 /* address_space_ld*_cached: load from a cached #MemoryRegion
1915  * address_space_st*_cached: store into a cached #MemoryRegion
1916  *
1917  * These functions perform a load or store of the byte, word,
1918  * longword or quad to the specified address.  The address is
1919  * a physical address in the AddressSpace, but it must lie within
1920  * a #MemoryRegion that was mapped with address_space_cache_init.
1921  *
1922  * The _le suffixed functions treat the data as little endian;
1923  * _be indicates big endian; no suffix indicates "same endianness
1924  * as guest CPU".
1925  *
1926  * The "guest CPU endianness" accessors are deprecated for use outside
1927  * target-* code; devices should be CPU-agnostic and use either the LE
1928  * or the BE accessors.
1929  *
1930  * @cache: previously initialized #MemoryRegionCache to be accessed
1931  * @addr: address within the address space
1932  * @val: data value, for stores
1933  * @attrs: memory transaction attributes
1934  * @result: location to write the success/failure of the transaction;
1935  *   if NULL, this information is discarded
1936  */
1937 
1938 #define SUFFIX       _cached_slow
1939 #define ARG1         cache
1940 #define ARG1_DECL    MemoryRegionCache *cache
1941 #include "exec/memory_ldst.inc.h"
1942 
1943 /* Inline fast path for direct RAM access.  */
1944 static inline uint8_t address_space_ldub_cached(MemoryRegionCache *cache,
1945     hwaddr addr, MemTxAttrs attrs, MemTxResult *result)
1946 {
1947     assert(addr < cache->len);
1948     if (likely(cache->ptr)) {
1949         return ldub_p(cache->ptr + addr);
1950     } else {
1951         return address_space_ldub_cached_slow(cache, addr, attrs, result);
1952     }
1953 }
1954 
1955 static inline void address_space_stb_cached(MemoryRegionCache *cache,
1956     hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result)
1957 {
1958     assert(addr < cache->len);
1959     if (likely(cache->ptr)) {
1960         stb_p(cache->ptr + addr, val);
1961     } else {
1962         address_space_stb_cached_slow(cache, addr, val, attrs, result);
1963     }
1964 }
1965 
1966 #define ENDIANNESS   _le
1967 #include "exec/memory_ldst_cached.inc.h"
1968 
1969 #define ENDIANNESS   _be
1970 #include "exec/memory_ldst_cached.inc.h"
1971 
1972 #define SUFFIX       _cached
1973 #define ARG1         cache
1974 #define ARG1_DECL    MemoryRegionCache *cache
1975 #include "exec/memory_ldst_phys.inc.h"
1976 
1977 /* address_space_cache_init: prepare for repeated access to a physical
1978  * memory region
1979  *
1980  * @cache: #MemoryRegionCache to be filled
1981  * @as: #AddressSpace to be accessed
1982  * @addr: address within that address space
1983  * @len: length of buffer
1984  * @is_write: indicates the transfer direction
1985  *
1986  * Will only work with RAM, and may map a subset of the requested range by
1987  * returning a value that is less than @len.  On failure, return a negative
1988  * errno value.
1989  *
1990  * Because it only works with RAM, this function can be used for
1991  * read-modify-write operations.  In this case, is_write should be %true.
1992  *
1993  * Note that addresses passed to the address_space_*_cached functions
1994  * are relative to @addr.
1995  */
1996 int64_t address_space_cache_init(MemoryRegionCache *cache,
1997                                  AddressSpace *as,
1998                                  hwaddr addr,
1999                                  hwaddr len,
2000                                  bool is_write);
2001 
2002 /**
2003  * address_space_cache_invalidate: complete a write to a #MemoryRegionCache
2004  *
2005  * @cache: The #MemoryRegionCache to operate on.
2006  * @addr: The first physical address that was written, relative to the
2007  * address that was passed to @address_space_cache_init.
2008  * @access_len: The number of bytes that were written starting at @addr.
2009  */
2010 void address_space_cache_invalidate(MemoryRegionCache *cache,
2011                                     hwaddr addr,
2012                                     hwaddr access_len);
2013 
2014 /**
2015  * address_space_cache_destroy: free a #MemoryRegionCache
2016  *
2017  * @cache: The #MemoryRegionCache whose memory should be released.
2018  */
2019 void address_space_cache_destroy(MemoryRegionCache *cache);
2020 
2021 /* address_space_get_iotlb_entry: translate an address into an IOTLB
2022  * entry. Should be called from an RCU critical section.
2023  */
2024 IOMMUTLBEntry address_space_get_iotlb_entry(AddressSpace *as, hwaddr addr,
2025                                             bool is_write, MemTxAttrs attrs);
2026 
2027 /* address_space_translate: translate an address range into an address space
2028  * into a MemoryRegion and an address range into that section.  Should be
2029  * called from an RCU critical section, to avoid that the last reference
2030  * to the returned region disappears after address_space_translate returns.
2031  *
2032  * @fv: #FlatView to be accessed
2033  * @addr: address within that address space
2034  * @xlat: pointer to address within the returned memory region section's
2035  * #MemoryRegion.
2036  * @len: pointer to length
2037  * @is_write: indicates the transfer direction
2038  * @attrs: memory attributes
2039  */
2040 MemoryRegion *flatview_translate(FlatView *fv,
2041                                  hwaddr addr, hwaddr *xlat,
2042                                  hwaddr *len, bool is_write,
2043                                  MemTxAttrs attrs);
2044 
2045 static inline MemoryRegion *address_space_translate(AddressSpace *as,
2046                                                     hwaddr addr, hwaddr *xlat,
2047                                                     hwaddr *len, bool is_write,
2048                                                     MemTxAttrs attrs)
2049 {
2050     return flatview_translate(address_space_to_flatview(as),
2051                               addr, xlat, len, is_write, attrs);
2052 }
2053 
2054 /* address_space_access_valid: check for validity of accessing an address
2055  * space range
2056  *
2057  * Check whether memory is assigned to the given address space range, and
2058  * access is permitted by any IOMMU regions that are active for the address
2059  * space.
2060  *
2061  * For now, addr and len should be aligned to a page size.  This limitation
2062  * will be lifted in the future.
2063  *
2064  * @as: #AddressSpace to be accessed
2065  * @addr: address within that address space
2066  * @len: length of the area to be checked
2067  * @is_write: indicates the transfer direction
2068  * @attrs: memory attributes
2069  */
2070 bool address_space_access_valid(AddressSpace *as, hwaddr addr, hwaddr len,
2071                                 bool is_write, MemTxAttrs attrs);
2072 
2073 /* address_space_map: map a physical memory region into a host virtual address
2074  *
2075  * May map a subset of the requested range, given by and returned in @plen.
2076  * May return %NULL if resources needed to perform the mapping are exhausted.
2077  * Use only for reads OR writes - not for read-modify-write operations.
2078  * Use cpu_register_map_client() to know when retrying the map operation is
2079  * likely to succeed.
2080  *
2081  * @as: #AddressSpace to be accessed
2082  * @addr: address within that address space
2083  * @plen: pointer to length of buffer; updated on return
2084  * @is_write: indicates the transfer direction
2085  * @attrs: memory attributes
2086  */
2087 void *address_space_map(AddressSpace *as, hwaddr addr,
2088                         hwaddr *plen, bool is_write, MemTxAttrs attrs);
2089 
2090 /* address_space_unmap: Unmaps a memory region previously mapped by address_space_map()
2091  *
2092  * Will also mark the memory as dirty if @is_write == %true.  @access_len gives
2093  * the amount of memory that was actually read or written by the caller.
2094  *
2095  * @as: #AddressSpace used
2096  * @buffer: host pointer as returned by address_space_map()
2097  * @len: buffer length as returned by address_space_map()
2098  * @access_len: amount of data actually transferred
2099  * @is_write: indicates the transfer direction
2100  */
2101 void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2102                          int is_write, hwaddr access_len);
2103 
2104 
2105 /* Internal functions, part of the implementation of address_space_read.  */
2106 MemTxResult address_space_read_full(AddressSpace *as, hwaddr addr,
2107                                     MemTxAttrs attrs, uint8_t *buf, hwaddr len);
2108 MemTxResult flatview_read_continue(FlatView *fv, hwaddr addr,
2109                                    MemTxAttrs attrs, uint8_t *buf,
2110                                    hwaddr len, hwaddr addr1, hwaddr l,
2111                                    MemoryRegion *mr);
2112 void *qemu_map_ram_ptr(RAMBlock *ram_block, ram_addr_t addr);
2113 
2114 /* Internal functions, part of the implementation of address_space_read_cached
2115  * and address_space_write_cached.  */
2116 void address_space_read_cached_slow(MemoryRegionCache *cache,
2117                                     hwaddr addr, void *buf, hwaddr len);
2118 void address_space_write_cached_slow(MemoryRegionCache *cache,
2119                                      hwaddr addr, const void *buf, hwaddr len);
2120 
2121 static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write)
2122 {
2123     if (is_write) {
2124         return memory_region_is_ram(mr) &&
2125                !mr->readonly && !memory_region_is_ram_device(mr);
2126     } else {
2127         return (memory_region_is_ram(mr) && !memory_region_is_ram_device(mr)) ||
2128                memory_region_is_romd(mr);
2129     }
2130 }
2131 
2132 /**
2133  * address_space_read: read from an address space.
2134  *
2135  * Return a MemTxResult indicating whether the operation succeeded
2136  * or failed (eg unassigned memory, device rejected the transaction,
2137  * IOMMU fault).  Called within RCU critical section.
2138  *
2139  * @as: #AddressSpace to be accessed
2140  * @addr: address within that address space
2141  * @attrs: memory transaction attributes
2142  * @buf: buffer with the data transferred
2143  */
2144 static inline __attribute__((__always_inline__))
2145 MemTxResult address_space_read(AddressSpace *as, hwaddr addr,
2146                                MemTxAttrs attrs, uint8_t *buf,
2147                                hwaddr len)
2148 {
2149     MemTxResult result = MEMTX_OK;
2150     hwaddr l, addr1;
2151     void *ptr;
2152     MemoryRegion *mr;
2153     FlatView *fv;
2154 
2155     if (__builtin_constant_p(len)) {
2156         if (len) {
2157             rcu_read_lock();
2158             fv = address_space_to_flatview(as);
2159             l = len;
2160             mr = flatview_translate(fv, addr, &addr1, &l, false, attrs);
2161             if (len == l && memory_access_is_direct(mr, false)) {
2162                 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
2163                 memcpy(buf, ptr, len);
2164             } else {
2165                 result = flatview_read_continue(fv, addr, attrs, buf, len,
2166                                                 addr1, l, mr);
2167             }
2168             rcu_read_unlock();
2169         }
2170     } else {
2171         result = address_space_read_full(as, addr, attrs, buf, len);
2172     }
2173     return result;
2174 }
2175 
2176 /**
2177  * address_space_read_cached: read from a cached RAM region
2178  *
2179  * @cache: Cached region to be addressed
2180  * @addr: address relative to the base of the RAM region
2181  * @buf: buffer with the data transferred
2182  * @len: length of the data transferred
2183  */
2184 static inline void
2185 address_space_read_cached(MemoryRegionCache *cache, hwaddr addr,
2186                           void *buf, hwaddr len)
2187 {
2188     assert(addr < cache->len && len <= cache->len - addr);
2189     if (likely(cache->ptr)) {
2190         memcpy(buf, cache->ptr + addr, len);
2191     } else {
2192         address_space_read_cached_slow(cache, addr, buf, len);
2193     }
2194 }
2195 
2196 /**
2197  * address_space_write_cached: write to a cached RAM region
2198  *
2199  * @cache: Cached region to be addressed
2200  * @addr: address relative to the base of the RAM region
2201  * @buf: buffer with the data transferred
2202  * @len: length of the data transferred
2203  */
2204 static inline void
2205 address_space_write_cached(MemoryRegionCache *cache, hwaddr addr,
2206                            void *buf, hwaddr len)
2207 {
2208     assert(addr < cache->len && len <= cache->len - addr);
2209     if (likely(cache->ptr)) {
2210         memcpy(cache->ptr + addr, buf, len);
2211     } else {
2212         address_space_write_cached_slow(cache, addr, buf, len);
2213     }
2214 }
2215 
2216 #ifdef NEED_CPU_H
2217 /* enum device_endian to MemOp.  */
2218 static inline MemOp devend_memop(enum device_endian end)
2219 {
2220     QEMU_BUILD_BUG_ON(DEVICE_HOST_ENDIAN != DEVICE_LITTLE_ENDIAN &&
2221                       DEVICE_HOST_ENDIAN != DEVICE_BIG_ENDIAN);
2222 
2223 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
2224     /* Swap if non-host endianness or native (target) endianness */
2225     return (end == DEVICE_HOST_ENDIAN) ? 0 : MO_BSWAP;
2226 #else
2227     const int non_host_endianness =
2228         DEVICE_LITTLE_ENDIAN ^ DEVICE_BIG_ENDIAN ^ DEVICE_HOST_ENDIAN;
2229 
2230     /* In this case, native (target) endianness needs no swap.  */
2231     return (end == non_host_endianness) ? MO_BSWAP : 0;
2232 #endif
2233 }
2234 #endif
2235 
2236 #endif
2237 
2238 #endif
2239