1 /*
2 * Physical memory management API
3 *
4 * Copyright 2011 Red Hat, Inc. and/or its affiliates
5 *
6 * Authors:
7 * Avi Kivity <avi@redhat.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
11 *
12 */
13
14 #ifndef SYSTEM_MEMORY_H
15 #define SYSTEM_MEMORY_H
16
17 #include "exec/cpu-common.h"
18 #include "exec/hwaddr.h"
19 #include "exec/memattrs.h"
20 #include "exec/memop.h"
21 #include "exec/ramlist.h"
22 #include "qemu/bswap.h"
23 #include "qemu/queue.h"
24 #include "qemu/int128.h"
25 #include "qemu/range.h"
26 #include "qemu/notify.h"
27 #include "qom/object.h"
28 #include "qemu/rcu.h"
29
30 #define RAM_ADDR_INVALID (~(ram_addr_t)0)
31
32 #define MAX_PHYS_ADDR_SPACE_BITS 62
33 #define MAX_PHYS_ADDR (((hwaddr)1 << MAX_PHYS_ADDR_SPACE_BITS) - 1)
34
35 #define TYPE_MEMORY_REGION "memory-region"
36 DECLARE_INSTANCE_CHECKER(MemoryRegion, MEMORY_REGION,
37 TYPE_MEMORY_REGION)
38
39 #define TYPE_IOMMU_MEMORY_REGION "iommu-memory-region"
40 typedef struct IOMMUMemoryRegionClass IOMMUMemoryRegionClass;
41 DECLARE_OBJ_CHECKERS(IOMMUMemoryRegion, IOMMUMemoryRegionClass,
42 IOMMU_MEMORY_REGION, TYPE_IOMMU_MEMORY_REGION)
43
44 #define TYPE_RAM_DISCARD_MANAGER "ram-discard-manager"
45 typedef struct RamDiscardManagerClass RamDiscardManagerClass;
46 typedef struct RamDiscardManager RamDiscardManager;
47 DECLARE_OBJ_CHECKERS(RamDiscardManager, RamDiscardManagerClass,
48 RAM_DISCARD_MANAGER, TYPE_RAM_DISCARD_MANAGER);
49
50 #ifdef CONFIG_FUZZ
51 void fuzz_dma_read_cb(size_t addr,
52 size_t len,
53 MemoryRegion *mr);
54 #else
fuzz_dma_read_cb(size_t addr,size_t len,MemoryRegion * mr)55 static inline void fuzz_dma_read_cb(size_t addr,
56 size_t len,
57 MemoryRegion *mr)
58 {
59 /* Do Nothing */
60 }
61 #endif
62
63 /* Possible bits for global_dirty_log_{start|stop} */
64
65 /* Dirty tracking enabled because migration is running */
66 #define GLOBAL_DIRTY_MIGRATION (1U << 0)
67
68 /* Dirty tracking enabled because measuring dirty rate */
69 #define GLOBAL_DIRTY_DIRTY_RATE (1U << 1)
70
71 /* Dirty tracking enabled because dirty limit */
72 #define GLOBAL_DIRTY_LIMIT (1U << 2)
73
74 #define GLOBAL_DIRTY_MASK (0x7)
75
76 extern unsigned int global_dirty_tracking;
77
78 typedef struct MemoryRegionOps MemoryRegionOps;
79
80 struct ReservedRegion {
81 Range range;
82 unsigned type;
83 };
84
85 /**
86 * struct MemoryRegionSection: describes a fragment of a #MemoryRegion
87 *
88 * @mr: the region, or %NULL if empty
89 * @fv: the flat view of the address space the region is mapped in
90 * @offset_within_region: the beginning of the section, relative to @mr's start
91 * @size: the size of the section; will not exceed @mr's boundaries
92 * @offset_within_address_space: the address of the first byte of the section
93 * relative to the region's address space
94 * @readonly: writes to this section are ignored
95 * @nonvolatile: this section is non-volatile
96 * @unmergeable: this section should not get merged with adjacent sections
97 */
98 struct MemoryRegionSection {
99 Int128 size;
100 MemoryRegion *mr;
101 FlatView *fv;
102 hwaddr offset_within_region;
103 hwaddr offset_within_address_space;
104 bool readonly;
105 bool nonvolatile;
106 bool unmergeable;
107 };
108
109 typedef struct IOMMUTLBEntry IOMMUTLBEntry;
110
111 /*
112 * See address_space_translate:
113 * - bit 0 : read
114 * - bit 1 : write
115 * - bit 2 : exec
116 * - bit 3 : priv
117 * - bit 4 : global
118 * - bit 5 : untranslated only
119 */
120 typedef enum {
121 IOMMU_NONE = 0,
122 IOMMU_RO = 1,
123 IOMMU_WO = 2,
124 IOMMU_RW = 3,
125 IOMMU_EXEC = 4,
126 IOMMU_PRIV = 8,
127 IOMMU_GLOBAL = 16,
128 IOMMU_UNTRANSLATED_ONLY = 32,
129 } IOMMUAccessFlags;
130
131 #define IOMMU_ACCESS_FLAG(r, w) (((r) ? IOMMU_RO : 0) | \
132 ((w) ? IOMMU_WO : 0))
133 #define IOMMU_ACCESS_FLAG_FULL(r, w, x, p, g, uo) \
134 (IOMMU_ACCESS_FLAG(r, w) | \
135 ((x) ? IOMMU_EXEC : 0) | \
136 ((p) ? IOMMU_PRIV : 0) | \
137 ((g) ? IOMMU_GLOBAL : 0) | \
138 ((uo) ? IOMMU_UNTRANSLATED_ONLY : 0))
139
140 struct IOMMUTLBEntry {
141 AddressSpace *target_as;
142 hwaddr iova;
143 hwaddr translated_addr;
144 hwaddr addr_mask; /* 0xfff = 4k translation */
145 IOMMUAccessFlags perm;
146 uint32_t pasid;
147 };
148
149 /*
150 * Bitmap for different IOMMUNotifier capabilities. Each notifier can
151 * register with one or multiple IOMMU Notifier capability bit(s).
152 *
153 * Normally there're two use cases for the notifiers:
154 *
155 * (1) When the device needs accurate synchronizations of the vIOMMU page
156 * tables, it needs to register with both MAP|UNMAP notifies (which
157 * is defined as IOMMU_NOTIFIER_IOTLB_EVENTS below).
158 *
159 * Regarding to accurate synchronization, it's when the notified
160 * device maintains a shadow page table and must be notified on each
161 * guest MAP (page table entry creation) and UNMAP (invalidation)
162 * events (e.g. VFIO). Both notifications must be accurate so that
163 * the shadow page table is fully in sync with the guest view.
164 *
165 * (2) When the device doesn't need accurate synchronizations of the
166 * vIOMMU page tables, it needs to register only with UNMAP or
167 * DEVIOTLB_UNMAP notifies.
168 *
169 * It's when the device maintains a cache of IOMMU translations
170 * (IOTLB) and is able to fill that cache by requesting translations
171 * from the vIOMMU through a protocol similar to ATS (Address
172 * Translation Service).
173 *
174 * Note that in this mode the vIOMMU will not maintain a shadowed
175 * page table for the address space, and the UNMAP messages can cover
176 * more than the pages that used to get mapped. The IOMMU notifiee
177 * should be able to take care of over-sized invalidations.
178 */
179 typedef enum {
180 IOMMU_NOTIFIER_NONE = 0,
181 /* Notify cache invalidations */
182 IOMMU_NOTIFIER_UNMAP = 0x1,
183 /* Notify entry changes (newly created entries) */
184 IOMMU_NOTIFIER_MAP = 0x2,
185 /* Notify changes on device IOTLB entries */
186 IOMMU_NOTIFIER_DEVIOTLB_UNMAP = 0x04,
187 } IOMMUNotifierFlag;
188
189 #define IOMMU_NOTIFIER_IOTLB_EVENTS (IOMMU_NOTIFIER_MAP | IOMMU_NOTIFIER_UNMAP)
190 #define IOMMU_NOTIFIER_DEVIOTLB_EVENTS IOMMU_NOTIFIER_DEVIOTLB_UNMAP
191 #define IOMMU_NOTIFIER_ALL (IOMMU_NOTIFIER_IOTLB_EVENTS | \
192 IOMMU_NOTIFIER_DEVIOTLB_EVENTS)
193
194 struct IOMMUNotifier;
195 typedef void (*IOMMUNotify)(struct IOMMUNotifier *notifier,
196 IOMMUTLBEntry *data);
197
198 struct IOMMUNotifier {
199 IOMMUNotify notify;
200 IOMMUNotifierFlag notifier_flags;
201 /* Notify for address space range start <= addr <= end */
202 hwaddr start;
203 hwaddr end;
204 int iommu_idx;
205 void *opaque;
206 QLIST_ENTRY(IOMMUNotifier) node;
207 };
208 typedef struct IOMMUNotifier IOMMUNotifier;
209
210 typedef struct IOMMUTLBEvent {
211 IOMMUNotifierFlag type;
212 IOMMUTLBEntry entry;
213 } IOMMUTLBEvent;
214
215 /* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
216 #define RAM_PREALLOC (1 << 0)
217
218 /* RAM is mmap-ed with MAP_SHARED */
219 #define RAM_SHARED (1 << 1)
220
221 /* Only a portion of RAM (used_length) is actually used, and migrated.
222 * Resizing RAM while migrating can result in the migration being canceled.
223 */
224 #define RAM_RESIZEABLE (1 << 2)
225
226 /* UFFDIO_ZEROPAGE is available on this RAMBlock to atomically
227 * zero the page and wake waiting processes.
228 * (Set during postcopy)
229 */
230 #define RAM_UF_ZEROPAGE (1 << 3)
231
232 /* RAM can be migrated */
233 #define RAM_MIGRATABLE (1 << 4)
234
235 /* RAM is a persistent kind memory */
236 #define RAM_PMEM (1 << 5)
237
238
239 /*
240 * UFFDIO_WRITEPROTECT is used on this RAMBlock to
241 * support 'write-tracking' migration type.
242 * Implies ram_state->ram_wt_enabled.
243 */
244 #define RAM_UF_WRITEPROTECT (1 << 6)
245
246 /*
247 * RAM is mmap-ed with MAP_NORESERVE. When set, reserving swap space (or huge
248 * pages if applicable) is skipped: will bail out if not supported. When not
249 * set, the OS will do the reservation, if supported for the memory type.
250 */
251 #define RAM_NORESERVE (1 << 7)
252
253 /* RAM that isn't accessible through normal means. */
254 #define RAM_PROTECTED (1 << 8)
255
256 /* RAM is an mmap-ed named file */
257 #define RAM_NAMED_FILE (1 << 9)
258
259 /* RAM is mmap-ed read-only */
260 #define RAM_READONLY (1 << 10)
261
262 /* RAM FD is opened read-only */
263 #define RAM_READONLY_FD (1 << 11)
264
265 /* RAM can be private that has kvm guest memfd backend */
266 #define RAM_GUEST_MEMFD (1 << 12)
267
268 /*
269 * In RAMBlock creation functions, if MAP_SHARED is 0 in the flags parameter,
270 * the implementation may still create a shared mapping if other conditions
271 * require it. Callers who specifically want a private mapping, eg objects
272 * specified by the user, must pass RAM_PRIVATE.
273 * After RAMBlock creation, MAP_SHARED in the block's flags indicates whether
274 * the block is shared or private, and MAP_PRIVATE is omitted.
275 */
276 #define RAM_PRIVATE (1 << 13)
277
iommu_notifier_init(IOMMUNotifier * n,IOMMUNotify fn,IOMMUNotifierFlag flags,hwaddr start,hwaddr end,int iommu_idx)278 static inline void iommu_notifier_init(IOMMUNotifier *n, IOMMUNotify fn,
279 IOMMUNotifierFlag flags,
280 hwaddr start, hwaddr end,
281 int iommu_idx)
282 {
283 n->notify = fn;
284 n->notifier_flags = flags;
285 n->start = start;
286 n->end = end;
287 n->iommu_idx = iommu_idx;
288 }
289
290 /*
291 * Memory region callbacks
292 */
293 struct MemoryRegionOps {
294 /* Read from the memory region. @addr is relative to @mr; @size is
295 * in bytes. */
296 uint64_t (*read)(void *opaque,
297 hwaddr addr,
298 unsigned size);
299 /* Write to the memory region. @addr is relative to @mr; @size is
300 * in bytes. */
301 void (*write)(void *opaque,
302 hwaddr addr,
303 uint64_t data,
304 unsigned size);
305
306 MemTxResult (*read_with_attrs)(void *opaque,
307 hwaddr addr,
308 uint64_t *data,
309 unsigned size,
310 MemTxAttrs attrs);
311 MemTxResult (*write_with_attrs)(void *opaque,
312 hwaddr addr,
313 uint64_t data,
314 unsigned size,
315 MemTxAttrs attrs);
316
317 enum device_endian endianness;
318 /* Guest-visible constraints: */
319 struct {
320 /* If nonzero, specify bounds on access sizes beyond which a machine
321 * check is thrown.
322 */
323 unsigned min_access_size;
324 unsigned max_access_size;
325 /* If true, unaligned accesses are supported. Otherwise unaligned
326 * accesses throw machine checks.
327 */
328 bool unaligned;
329 /*
330 * If present, and returns #false, the transaction is not accepted
331 * by the device (and results in machine dependent behaviour such
332 * as a machine check exception).
333 */
334 bool (*accepts)(void *opaque, hwaddr addr,
335 unsigned size, bool is_write,
336 MemTxAttrs attrs);
337 } valid;
338 /* Internal implementation constraints: */
339 struct {
340 /* If nonzero, specifies the minimum size implemented. Smaller sizes
341 * will be rounded upwards and a partial result will be returned.
342 */
343 unsigned min_access_size;
344 /* If nonzero, specifies the maximum size implemented. Larger sizes
345 * will be done as a series of accesses with smaller sizes.
346 */
347 unsigned max_access_size;
348 /* If true, unaligned accesses are supported. Otherwise all accesses
349 * are converted to (possibly multiple) naturally aligned accesses.
350 */
351 bool unaligned;
352 } impl;
353 };
354
355 typedef struct MemoryRegionClass {
356 /* private */
357 ObjectClass parent_class;
358 } MemoryRegionClass;
359
360
361 enum IOMMUMemoryRegionAttr {
362 IOMMU_ATTR_SPAPR_TCE_FD
363 };
364
365 /*
366 * IOMMUMemoryRegionClass:
367 *
368 * All IOMMU implementations need to subclass TYPE_IOMMU_MEMORY_REGION
369 * and provide an implementation of at least the @translate method here
370 * to handle requests to the memory region. Other methods are optional.
371 *
372 * The IOMMU implementation must use the IOMMU notifier infrastructure
373 * to report whenever mappings are changed, by calling
374 * memory_region_notify_iommu() (or, if necessary, by calling
375 * memory_region_notify_iommu_one() for each registered notifier).
376 *
377 * Conceptually an IOMMU provides a mapping from input address
378 * to an output TLB entry. If the IOMMU is aware of memory transaction
379 * attributes and the output TLB entry depends on the transaction
380 * attributes, we represent this using IOMMU indexes. Each index
381 * selects a particular translation table that the IOMMU has:
382 *
383 * @attrs_to_index returns the IOMMU index for a set of transaction attributes
384 *
385 * @translate takes an input address and an IOMMU index
386 *
387 * and the mapping returned can only depend on the input address and the
388 * IOMMU index.
389 *
390 * Most IOMMUs don't care about the transaction attributes and support
391 * only a single IOMMU index. A more complex IOMMU might have one index
392 * for secure transactions and one for non-secure transactions.
393 */
394 struct IOMMUMemoryRegionClass {
395 /* private: */
396 MemoryRegionClass parent_class;
397
398 /* public: */
399 /**
400 * @translate:
401 *
402 * Return a TLB entry that contains a given address.
403 *
404 * The IOMMUAccessFlags indicated via @flag are optional and may
405 * be specified as IOMMU_NONE to indicate that the caller needs
406 * the full translation information for both reads and writes. If
407 * the access flags are specified then the IOMMU implementation
408 * may use this as an optimization, to stop doing a page table
409 * walk as soon as it knows that the requested permissions are not
410 * allowed. If IOMMU_NONE is passed then the IOMMU must do the
411 * full page table walk and report the permissions in the returned
412 * IOMMUTLBEntry. (Note that this implies that an IOMMU may not
413 * return different mappings for reads and writes.)
414 *
415 * The returned information remains valid while the caller is
416 * holding the big QEMU lock or is inside an RCU critical section;
417 * if the caller wishes to cache the mapping beyond that it must
418 * register an IOMMU notifier so it can invalidate its cached
419 * information when the IOMMU mapping changes.
420 *
421 * @iommu: the IOMMUMemoryRegion
422 *
423 * @hwaddr: address to be translated within the memory region
424 *
425 * @flag: requested access permission
426 *
427 * @iommu_idx: IOMMU index for the translation
428 */
429 IOMMUTLBEntry (*translate)(IOMMUMemoryRegion *iommu, hwaddr addr,
430 IOMMUAccessFlags flag, int iommu_idx);
431 /**
432 * @get_min_page_size:
433 *
434 * Returns minimum supported page size in bytes.
435 *
436 * If this method is not provided then the minimum is assumed to
437 * be TARGET_PAGE_SIZE.
438 *
439 * @iommu: the IOMMUMemoryRegion
440 */
441 uint64_t (*get_min_page_size)(IOMMUMemoryRegion *iommu);
442 /**
443 * @notify_flag_changed:
444 *
445 * Called when IOMMU Notifier flag changes (ie when the set of
446 * events which IOMMU users are requesting notification for changes).
447 * Optional method -- need not be provided if the IOMMU does not
448 * need to know exactly which events must be notified.
449 *
450 * @iommu: the IOMMUMemoryRegion
451 *
452 * @old_flags: events which previously needed to be notified
453 *
454 * @new_flags: events which now need to be notified
455 *
456 * Returns 0 on success, or a negative errno; in particular
457 * returns -EINVAL if the new flag bitmap is not supported by the
458 * IOMMU memory region. In case of failure, the error object
459 * must be created
460 */
461 int (*notify_flag_changed)(IOMMUMemoryRegion *iommu,
462 IOMMUNotifierFlag old_flags,
463 IOMMUNotifierFlag new_flags,
464 Error **errp);
465 /**
466 * @replay:
467 *
468 * Called to handle memory_region_iommu_replay().
469 *
470 * The default implementation of memory_region_iommu_replay() is to
471 * call the IOMMU translate method for every page in the address space
472 * with flag == IOMMU_NONE and then call the notifier if translate
473 * returns a valid mapping. If this method is implemented then it
474 * overrides the default behaviour, and must provide the full semantics
475 * of memory_region_iommu_replay(), by calling @notifier for every
476 * translation present in the IOMMU.
477 *
478 * Optional method -- an IOMMU only needs to provide this method
479 * if the default is inefficient or produces undesirable side effects.
480 *
481 * Note: this is not related to record-and-replay functionality.
482 */
483 void (*replay)(IOMMUMemoryRegion *iommu, IOMMUNotifier *notifier);
484
485 /**
486 * @get_attr:
487 *
488 * Get IOMMU misc attributes. This is an optional method that
489 * can be used to allow users of the IOMMU to get implementation-specific
490 * information. The IOMMU implements this method to handle calls
491 * by IOMMU users to memory_region_iommu_get_attr() by filling in
492 * the arbitrary data pointer for any IOMMUMemoryRegionAttr values that
493 * the IOMMU supports. If the method is unimplemented then
494 * memory_region_iommu_get_attr() will always return -EINVAL.
495 *
496 * @iommu: the IOMMUMemoryRegion
497 *
498 * @attr: attribute being queried
499 *
500 * @data: memory to fill in with the attribute data
501 *
502 * Returns 0 on success, or a negative errno; in particular
503 * returns -EINVAL for unrecognized or unimplemented attribute types.
504 */
505 int (*get_attr)(IOMMUMemoryRegion *iommu, enum IOMMUMemoryRegionAttr attr,
506 void *data);
507
508 /**
509 * @attrs_to_index:
510 *
511 * Return the IOMMU index to use for a given set of transaction attributes.
512 *
513 * Optional method: if an IOMMU only supports a single IOMMU index then
514 * the default implementation of memory_region_iommu_attrs_to_index()
515 * will return 0.
516 *
517 * The indexes supported by an IOMMU must be contiguous, starting at 0.
518 *
519 * @iommu: the IOMMUMemoryRegion
520 * @attrs: memory transaction attributes
521 */
522 int (*attrs_to_index)(IOMMUMemoryRegion *iommu, MemTxAttrs attrs);
523
524 /**
525 * @num_indexes:
526 *
527 * Return the number of IOMMU indexes this IOMMU supports.
528 *
529 * Optional method: if this method is not provided, then
530 * memory_region_iommu_num_indexes() will return 1, indicating that
531 * only a single IOMMU index is supported.
532 *
533 * @iommu: the IOMMUMemoryRegion
534 */
535 int (*num_indexes)(IOMMUMemoryRegion *iommu);
536 };
537
538 typedef struct RamDiscardListener RamDiscardListener;
539 typedef int (*NotifyRamPopulate)(RamDiscardListener *rdl,
540 MemoryRegionSection *section);
541 typedef void (*NotifyRamDiscard)(RamDiscardListener *rdl,
542 MemoryRegionSection *section);
543
544 struct RamDiscardListener {
545 /*
546 * @notify_populate:
547 *
548 * Notification that previously discarded memory is about to get populated.
549 * Listeners are able to object. If any listener objects, already
550 * successfully notified listeners are notified about a discard again.
551 *
552 * @rdl: the #RamDiscardListener getting notified
553 * @section: the #MemoryRegionSection to get populated. The section
554 * is aligned within the memory region to the minimum granularity
555 * unless it would exceed the registered section.
556 *
557 * Returns 0 on success. If the notification is rejected by the listener,
558 * an error is returned.
559 */
560 NotifyRamPopulate notify_populate;
561
562 /*
563 * @notify_discard:
564 *
565 * Notification that previously populated memory was discarded successfully
566 * and listeners should drop all references to such memory and prevent
567 * new population (e.g., unmap).
568 *
569 * @rdl: the #RamDiscardListener getting notified
570 * @section: the #MemoryRegionSection to get populated. The section
571 * is aligned within the memory region to the minimum granularity
572 * unless it would exceed the registered section.
573 */
574 NotifyRamDiscard notify_discard;
575
576 /*
577 * @double_discard_supported:
578 *
579 * The listener suppors getting @notify_discard notifications that span
580 * already discarded parts.
581 */
582 bool double_discard_supported;
583
584 MemoryRegionSection *section;
585 QLIST_ENTRY(RamDiscardListener) next;
586 };
587
ram_discard_listener_init(RamDiscardListener * rdl,NotifyRamPopulate populate_fn,NotifyRamDiscard discard_fn,bool double_discard_supported)588 static inline void ram_discard_listener_init(RamDiscardListener *rdl,
589 NotifyRamPopulate populate_fn,
590 NotifyRamDiscard discard_fn,
591 bool double_discard_supported)
592 {
593 rdl->notify_populate = populate_fn;
594 rdl->notify_discard = discard_fn;
595 rdl->double_discard_supported = double_discard_supported;
596 }
597
598 /**
599 * typedef ReplayRamDiscardState:
600 *
601 * The callback handler for #RamDiscardManagerClass.replay_populated/
602 * #RamDiscardManagerClass.replay_discarded to invoke on populated/discarded
603 * parts.
604 *
605 * @section: the #MemoryRegionSection of populated/discarded part
606 * @opaque: pointer to forward to the callback
607 *
608 * Returns 0 on success, or a negative error if failed.
609 */
610 typedef int (*ReplayRamDiscardState)(MemoryRegionSection *section,
611 void *opaque);
612
613 /*
614 * RamDiscardManagerClass:
615 *
616 * A #RamDiscardManager coordinates which parts of specific RAM #MemoryRegion
617 * regions are currently populated to be used/accessed by the VM, notifying
618 * after parts were discarded (freeing up memory) and before parts will be
619 * populated (consuming memory), to be used/accessed by the VM.
620 *
621 * A #RamDiscardManager can only be set for a RAM #MemoryRegion while the
622 * #MemoryRegion isn't mapped into an address space yet (either directly
623 * or via an alias); it cannot change while the #MemoryRegion is
624 * mapped into an address space.
625 *
626 * The #RamDiscardManager is intended to be used by technologies that are
627 * incompatible with discarding of RAM (e.g., VFIO, which may pin all
628 * memory inside a #MemoryRegion), and require proper coordination to only
629 * map the currently populated parts, to hinder parts that are expected to
630 * remain discarded from silently getting populated and consuming memory.
631 * Technologies that support discarding of RAM don't have to bother and can
632 * simply map the whole #MemoryRegion.
633 *
634 * An example #RamDiscardManager is virtio-mem, which logically (un)plugs
635 * memory within an assigned RAM #MemoryRegion, coordinated with the VM.
636 * Logically unplugging memory consists of discarding RAM. The VM agreed to not
637 * access unplugged (discarded) memory - especially via DMA. virtio-mem will
638 * properly coordinate with listeners before memory is plugged (populated),
639 * and after memory is unplugged (discarded).
640 *
641 * Listeners are called in multiples of the minimum granularity (unless it
642 * would exceed the registered range) and changes are aligned to the minimum
643 * granularity within the #MemoryRegion. Listeners have to prepare for memory
644 * becoming discarded in a different granularity than it was populated and the
645 * other way around.
646 */
647 struct RamDiscardManagerClass {
648 /* private */
649 InterfaceClass parent_class;
650
651 /* public */
652
653 /**
654 * @get_min_granularity:
655 *
656 * Get the minimum granularity in which listeners will get notified
657 * about changes within the #MemoryRegion via the #RamDiscardManager.
658 *
659 * @rdm: the #RamDiscardManager
660 * @mr: the #MemoryRegion
661 *
662 * Returns the minimum granularity.
663 */
664 uint64_t (*get_min_granularity)(const RamDiscardManager *rdm,
665 const MemoryRegion *mr);
666
667 /**
668 * @is_populated:
669 *
670 * Check whether the given #MemoryRegionSection is completely populated
671 * (i.e., no parts are currently discarded) via the #RamDiscardManager.
672 * There are no alignment requirements.
673 *
674 * @rdm: the #RamDiscardManager
675 * @section: the #MemoryRegionSection
676 *
677 * Returns whether the given range is completely populated.
678 */
679 bool (*is_populated)(const RamDiscardManager *rdm,
680 const MemoryRegionSection *section);
681
682 /**
683 * @replay_populated:
684 *
685 * Call the #ReplayRamDiscardState callback for all populated parts within
686 * the #MemoryRegionSection via the #RamDiscardManager.
687 *
688 * In case any call fails, no further calls are made.
689 *
690 * @rdm: the #RamDiscardManager
691 * @section: the #MemoryRegionSection
692 * @replay_fn: the #ReplayRamDiscardState callback
693 * @opaque: pointer to forward to the callback
694 *
695 * Returns 0 on success, or a negative error if any notification failed.
696 */
697 int (*replay_populated)(const RamDiscardManager *rdm,
698 MemoryRegionSection *section,
699 ReplayRamDiscardState replay_fn, void *opaque);
700
701 /**
702 * @replay_discarded:
703 *
704 * Call the #ReplayRamDiscardState callback for all discarded parts within
705 * the #MemoryRegionSection via the #RamDiscardManager.
706 *
707 * @rdm: the #RamDiscardManager
708 * @section: the #MemoryRegionSection
709 * @replay_fn: the #ReplayRamDiscardState callback
710 * @opaque: pointer to forward to the callback
711 *
712 * Returns 0 on success, or a negative error if any notification failed.
713 */
714 int (*replay_discarded)(const RamDiscardManager *rdm,
715 MemoryRegionSection *section,
716 ReplayRamDiscardState replay_fn, void *opaque);
717
718 /**
719 * @register_listener:
720 *
721 * Register a #RamDiscardListener for the given #MemoryRegionSection and
722 * immediately notify the #RamDiscardListener about all populated parts
723 * within the #MemoryRegionSection via the #RamDiscardManager.
724 *
725 * In case any notification fails, no further notifications are triggered
726 * and an error is logged.
727 *
728 * @rdm: the #RamDiscardManager
729 * @rdl: the #RamDiscardListener
730 * @section: the #MemoryRegionSection
731 */
732 void (*register_listener)(RamDiscardManager *rdm,
733 RamDiscardListener *rdl,
734 MemoryRegionSection *section);
735
736 /**
737 * @unregister_listener:
738 *
739 * Unregister a previously registered #RamDiscardListener via the
740 * #RamDiscardManager after notifying the #RamDiscardListener about all
741 * populated parts becoming unpopulated within the registered
742 * #MemoryRegionSection.
743 *
744 * @rdm: the #RamDiscardManager
745 * @rdl: the #RamDiscardListener
746 */
747 void (*unregister_listener)(RamDiscardManager *rdm,
748 RamDiscardListener *rdl);
749 };
750
751 uint64_t ram_discard_manager_get_min_granularity(const RamDiscardManager *rdm,
752 const MemoryRegion *mr);
753
754 bool ram_discard_manager_is_populated(const RamDiscardManager *rdm,
755 const MemoryRegionSection *section);
756
757 /**
758 * ram_discard_manager_replay_populated:
759 *
760 * A wrapper to call the #RamDiscardManagerClass.replay_populated callback
761 * of the #RamDiscardManager.
762 *
763 * @rdm: the #RamDiscardManager
764 * @section: the #MemoryRegionSection
765 * @replay_fn: the #ReplayRamDiscardState callback
766 * @opaque: pointer to forward to the callback
767 *
768 * Returns 0 on success, or a negative error if any notification failed.
769 */
770 int ram_discard_manager_replay_populated(const RamDiscardManager *rdm,
771 MemoryRegionSection *section,
772 ReplayRamDiscardState replay_fn,
773 void *opaque);
774
775 /**
776 * ram_discard_manager_replay_discarded:
777 *
778 * A wrapper to call the #RamDiscardManagerClass.replay_discarded callback
779 * of the #RamDiscardManager.
780 *
781 * @rdm: the #RamDiscardManager
782 * @section: the #MemoryRegionSection
783 * @replay_fn: the #ReplayRamDiscardState callback
784 * @opaque: pointer to forward to the callback
785 *
786 * Returns 0 on success, or a negative error if any notification failed.
787 */
788 int ram_discard_manager_replay_discarded(const RamDiscardManager *rdm,
789 MemoryRegionSection *section,
790 ReplayRamDiscardState replay_fn,
791 void *opaque);
792
793 void ram_discard_manager_register_listener(RamDiscardManager *rdm,
794 RamDiscardListener *rdl,
795 MemoryRegionSection *section);
796
797 void ram_discard_manager_unregister_listener(RamDiscardManager *rdm,
798 RamDiscardListener *rdl);
799
800 /**
801 * memory_translate_iotlb: Extract addresses from a TLB entry.
802 * Called with rcu_read_lock held.
803 *
804 * @iotlb: pointer to an #IOMMUTLBEntry
805 * @xlat_p: return the offset of the entry from the start of the returned
806 * MemoryRegion.
807 * @errp: pointer to Error*, to store an error if it happens.
808 *
809 * Return: On success, return the MemoryRegion containing the @iotlb translated
810 * addr. The MemoryRegion must not be accessed after rcu_read_unlock.
811 * On failure, return NULL, setting @errp with error.
812 */
813 MemoryRegion *memory_translate_iotlb(IOMMUTLBEntry *iotlb, hwaddr *xlat_p,
814 Error **errp);
815
816 typedef struct CoalescedMemoryRange CoalescedMemoryRange;
817 typedef struct MemoryRegionIoeventfd MemoryRegionIoeventfd;
818
819 /** MemoryRegion:
820 *
821 * A struct representing a memory region.
822 */
823 struct MemoryRegion {
824 Object parent_obj;
825
826 /* private: */
827
828 /* The following fields should fit in a cache line */
829 bool romd_mode;
830 bool ram;
831 bool subpage;
832 bool readonly; /* For RAM regions */
833 bool nonvolatile;
834 bool rom_device;
835 bool flush_coalesced_mmio;
836 bool unmergeable;
837 uint8_t dirty_log_mask;
838 bool is_iommu;
839 RAMBlock *ram_block;
840 Object *owner;
841 /* owner as TYPE_DEVICE. Used for re-entrancy checks in MR access hotpath */
842 DeviceState *dev;
843
844 const MemoryRegionOps *ops;
845 void *opaque;
846 MemoryRegion *container;
847 int mapped_via_alias; /* Mapped via an alias, container might be NULL */
848 Int128 size;
849 hwaddr addr;
850 void (*destructor)(MemoryRegion *mr);
851 uint64_t align;
852 bool terminates;
853 bool ram_device;
854 bool enabled;
855 uint8_t vga_logging_count;
856 MemoryRegion *alias;
857 hwaddr alias_offset;
858 int32_t priority;
859 QTAILQ_HEAD(, MemoryRegion) subregions;
860 QTAILQ_ENTRY(MemoryRegion) subregions_link;
861 QTAILQ_HEAD(, CoalescedMemoryRange) coalesced;
862 const char *name;
863 unsigned ioeventfd_nb;
864 MemoryRegionIoeventfd *ioeventfds;
865 RamDiscardManager *rdm; /* Only for RAM */
866
867 /* For devices designed to perform re-entrant IO into their own IO MRs */
868 bool disable_reentrancy_guard;
869 };
870
871 struct IOMMUMemoryRegion {
872 MemoryRegion parent_obj;
873
874 QLIST_HEAD(, IOMMUNotifier) iommu_notify;
875 IOMMUNotifierFlag iommu_notify_flags;
876 };
877
878 #define IOMMU_NOTIFIER_FOREACH(n, mr) \
879 QLIST_FOREACH((n), &(mr)->iommu_notify, node)
880
881 #define MEMORY_LISTENER_PRIORITY_MIN 0
882 #define MEMORY_LISTENER_PRIORITY_ACCEL 10
883 #define MEMORY_LISTENER_PRIORITY_DEV_BACKEND 10
884
885 /**
886 * struct MemoryListener: callbacks structure for updates to the physical memory map
887 *
888 * Allows a component to adjust to changes in the guest-visible memory map.
889 * Use with memory_listener_register() and memory_listener_unregister().
890 */
891 struct MemoryListener {
892 /**
893 * @begin:
894 *
895 * Called at the beginning of an address space update transaction.
896 * Followed by calls to #MemoryListener.region_add(),
897 * #MemoryListener.region_del(), #MemoryListener.region_nop(),
898 * #MemoryListener.log_start() and #MemoryListener.log_stop() in
899 * increasing address order.
900 *
901 * @listener: The #MemoryListener.
902 */
903 void (*begin)(MemoryListener *listener);
904
905 /**
906 * @commit:
907 *
908 * Called at the end of an address space update transaction,
909 * after the last call to #MemoryListener.region_add(),
910 * #MemoryListener.region_del() or #MemoryListener.region_nop(),
911 * #MemoryListener.log_start() and #MemoryListener.log_stop().
912 *
913 * @listener: The #MemoryListener.
914 */
915 void (*commit)(MemoryListener *listener);
916
917 /**
918 * @region_add:
919 *
920 * Called during an address space update transaction,
921 * for a section of the address space that is new in this address space
922 * space since the last transaction.
923 *
924 * @listener: The #MemoryListener.
925 * @section: The new #MemoryRegionSection.
926 */
927 void (*region_add)(MemoryListener *listener, MemoryRegionSection *section);
928
929 /**
930 * @region_del:
931 *
932 * Called during an address space update transaction,
933 * for a section of the address space that has disappeared in the address
934 * space since the last transaction.
935 *
936 * @listener: The #MemoryListener.
937 * @section: The old #MemoryRegionSection.
938 */
939 void (*region_del)(MemoryListener *listener, MemoryRegionSection *section);
940
941 /**
942 * @region_nop:
943 *
944 * Called during an address space update transaction,
945 * for a section of the address space that is in the same place in the address
946 * space as in the last transaction.
947 *
948 * @listener: The #MemoryListener.
949 * @section: The #MemoryRegionSection.
950 */
951 void (*region_nop)(MemoryListener *listener, MemoryRegionSection *section);
952
953 /**
954 * @log_start:
955 *
956 * Called during an address space update transaction, after
957 * one of #MemoryListener.region_add(), #MemoryListener.region_del() or
958 * #MemoryListener.region_nop(), if dirty memory logging clients have
959 * become active since the last transaction.
960 *
961 * @listener: The #MemoryListener.
962 * @section: The #MemoryRegionSection.
963 * @old: A bitmap of dirty memory logging clients that were active in
964 * the previous transaction.
965 * @new: A bitmap of dirty memory logging clients that are active in
966 * the current transaction.
967 */
968 void (*log_start)(MemoryListener *listener, MemoryRegionSection *section,
969 int old_val, int new_val);
970
971 /**
972 * @log_stop:
973 *
974 * Called during an address space update transaction, after
975 * one of #MemoryListener.region_add(), #MemoryListener.region_del() or
976 * #MemoryListener.region_nop() and possibly after
977 * #MemoryListener.log_start(), if dirty memory logging clients have
978 * become inactive since the last transaction.
979 *
980 * @listener: The #MemoryListener.
981 * @section: The #MemoryRegionSection.
982 * @old: A bitmap of dirty memory logging clients that were active in
983 * the previous transaction.
984 * @new: A bitmap of dirty memory logging clients that are active in
985 * the current transaction.
986 */
987 void (*log_stop)(MemoryListener *listener, MemoryRegionSection *section,
988 int old_val, int new_val);
989
990 /**
991 * @log_sync:
992 *
993 * Called by memory_region_snapshot_and_clear_dirty() and
994 * memory_global_dirty_log_sync(), before accessing QEMU's "official"
995 * copy of the dirty memory bitmap for a #MemoryRegionSection.
996 *
997 * @listener: The #MemoryListener.
998 * @section: The #MemoryRegionSection.
999 */
1000 void (*log_sync)(MemoryListener *listener, MemoryRegionSection *section);
1001
1002 /**
1003 * @log_sync_global:
1004 *
1005 * This is the global version of @log_sync when the listener does
1006 * not have a way to synchronize the log with finer granularity.
1007 * When the listener registers with @log_sync_global defined, then
1008 * its @log_sync must be NULL. Vice versa.
1009 *
1010 * @listener: The #MemoryListener.
1011 * @last_stage: The last stage to synchronize the log during migration.
1012 * The caller should guarantee that the synchronization with true for
1013 * @last_stage is triggered for once after all VCPUs have been stopped.
1014 */
1015 void (*log_sync_global)(MemoryListener *listener, bool last_stage);
1016
1017 /**
1018 * @log_clear:
1019 *
1020 * Called before reading the dirty memory bitmap for a
1021 * #MemoryRegionSection.
1022 *
1023 * @listener: The #MemoryListener.
1024 * @section: The #MemoryRegionSection.
1025 */
1026 void (*log_clear)(MemoryListener *listener, MemoryRegionSection *section);
1027
1028 /**
1029 * @log_global_start:
1030 *
1031 * Called by memory_global_dirty_log_start(), which
1032 * enables the %DIRTY_LOG_MIGRATION client on all memory regions in
1033 * the address space. #MemoryListener.log_global_start() is also
1034 * called when a #MemoryListener is added, if global dirty logging is
1035 * active at that time.
1036 *
1037 * @listener: The #MemoryListener.
1038 * @errp: pointer to Error*, to store an error if it happens.
1039 *
1040 * Return: true on success, else false setting @errp with error.
1041 */
1042 bool (*log_global_start)(MemoryListener *listener, Error **errp);
1043
1044 /**
1045 * @log_global_stop:
1046 *
1047 * Called by memory_global_dirty_log_stop(), which
1048 * disables the %DIRTY_LOG_MIGRATION client on all memory regions in
1049 * the address space.
1050 *
1051 * @listener: The #MemoryListener.
1052 */
1053 void (*log_global_stop)(MemoryListener *listener);
1054
1055 /**
1056 * @log_global_after_sync:
1057 *
1058 * Called after reading the dirty memory bitmap
1059 * for any #MemoryRegionSection.
1060 *
1061 * @listener: The #MemoryListener.
1062 */
1063 void (*log_global_after_sync)(MemoryListener *listener);
1064
1065 /**
1066 * @eventfd_add:
1067 *
1068 * Called during an address space update transaction,
1069 * for a section of the address space that has had a new ioeventfd
1070 * registration since the last transaction.
1071 *
1072 * @listener: The #MemoryListener.
1073 * @section: The new #MemoryRegionSection.
1074 * @match_data: The @match_data parameter for the new ioeventfd.
1075 * @data: The @data parameter for the new ioeventfd.
1076 * @e: The #EventNotifier parameter for the new ioeventfd.
1077 */
1078 void (*eventfd_add)(MemoryListener *listener, MemoryRegionSection *section,
1079 bool match_data, uint64_t data, EventNotifier *e);
1080
1081 /**
1082 * @eventfd_del:
1083 *
1084 * Called during an address space update transaction,
1085 * for a section of the address space that has dropped an ioeventfd
1086 * registration since the last transaction.
1087 *
1088 * @listener: The #MemoryListener.
1089 * @section: The new #MemoryRegionSection.
1090 * @match_data: The @match_data parameter for the dropped ioeventfd.
1091 * @data: The @data parameter for the dropped ioeventfd.
1092 * @e: The #EventNotifier parameter for the dropped ioeventfd.
1093 */
1094 void (*eventfd_del)(MemoryListener *listener, MemoryRegionSection *section,
1095 bool match_data, uint64_t data, EventNotifier *e);
1096
1097 /**
1098 * @coalesced_io_add:
1099 *
1100 * Called during an address space update transaction,
1101 * for a section of the address space that has had a new coalesced
1102 * MMIO range registration since the last transaction.
1103 *
1104 * @listener: The #MemoryListener.
1105 * @section: The new #MemoryRegionSection.
1106 * @addr: The starting address for the coalesced MMIO range.
1107 * @len: The length of the coalesced MMIO range.
1108 */
1109 void (*coalesced_io_add)(MemoryListener *listener, MemoryRegionSection *section,
1110 hwaddr addr, hwaddr len);
1111
1112 /**
1113 * @coalesced_io_del:
1114 *
1115 * Called during an address space update transaction,
1116 * for a section of the address space that has dropped a coalesced
1117 * MMIO range since the last transaction.
1118 *
1119 * @listener: The #MemoryListener.
1120 * @section: The new #MemoryRegionSection.
1121 * @addr: The starting address for the coalesced MMIO range.
1122 * @len: The length of the coalesced MMIO range.
1123 */
1124 void (*coalesced_io_del)(MemoryListener *listener, MemoryRegionSection *section,
1125 hwaddr addr, hwaddr len);
1126 /**
1127 * @priority:
1128 *
1129 * Govern the order in which memory listeners are invoked. Lower priorities
1130 * are invoked earlier for "add" or "start" callbacks, and later for "delete"
1131 * or "stop" callbacks.
1132 */
1133 unsigned priority;
1134
1135 /**
1136 * @name:
1137 *
1138 * Name of the listener. It can be used in contexts where we'd like to
1139 * identify one memory listener with the rest.
1140 */
1141 const char *name;
1142
1143 /* private: */
1144 AddressSpace *address_space;
1145 QTAILQ_ENTRY(MemoryListener) link;
1146 QTAILQ_ENTRY(MemoryListener) link_as;
1147 };
1148
1149 typedef struct AddressSpaceMapClient {
1150 QEMUBH *bh;
1151 QLIST_ENTRY(AddressSpaceMapClient) link;
1152 } AddressSpaceMapClient;
1153
1154 #define DEFAULT_MAX_BOUNCE_BUFFER_SIZE (4096)
1155
1156 /**
1157 * struct AddressSpace: describes a mapping of addresses to #MemoryRegion objects
1158 */
1159 struct AddressSpace {
1160 /* private: */
1161 struct rcu_head rcu;
1162 char *name;
1163 MemoryRegion *root;
1164
1165 /* Accessed via RCU. */
1166 struct FlatView *current_map;
1167
1168 int ioeventfd_nb;
1169 int ioeventfd_notifiers;
1170 struct MemoryRegionIoeventfd *ioeventfds;
1171 QTAILQ_HEAD(, MemoryListener) listeners;
1172 QTAILQ_ENTRY(AddressSpace) address_spaces_link;
1173
1174 /*
1175 * Maximum DMA bounce buffer size used for indirect memory map requests.
1176 * This limits the total size of bounce buffer allocations made for
1177 * DMA requests to indirect memory regions within this AddressSpace. DMA
1178 * requests that exceed the limit (e.g. due to overly large requested size
1179 * or concurrent DMA requests having claimed too much buffer space) will be
1180 * rejected and left to the caller to handle.
1181 */
1182 size_t max_bounce_buffer_size;
1183 /* Total size of bounce buffers currently allocated, atomically accessed */
1184 size_t bounce_buffer_size;
1185 /* List of callbacks to invoke when buffers free up */
1186 QemuMutex map_client_list_lock;
1187 QLIST_HEAD(, AddressSpaceMapClient) map_client_list;
1188 };
1189
1190 typedef struct AddressSpaceDispatch AddressSpaceDispatch;
1191 typedef struct FlatRange FlatRange;
1192
1193 /* Flattened global view of current active memory hierarchy. Kept in sorted
1194 * order.
1195 */
1196 struct FlatView {
1197 struct rcu_head rcu;
1198 unsigned ref;
1199 FlatRange *ranges;
1200 unsigned nr;
1201 unsigned nr_allocated;
1202 struct AddressSpaceDispatch *dispatch;
1203 MemoryRegion *root;
1204 };
1205
address_space_to_flatview(AddressSpace * as)1206 static inline FlatView *address_space_to_flatview(AddressSpace *as)
1207 {
1208 return qatomic_rcu_read(&as->current_map);
1209 }
1210
1211 /**
1212 * typedef flatview_cb: callback for flatview_for_each_range()
1213 *
1214 * @start: start address of the range within the FlatView
1215 * @len: length of the range in bytes
1216 * @mr: MemoryRegion covering this range
1217 * @offset_in_region: offset of the first byte of the range within @mr
1218 * @opaque: data pointer passed to flatview_for_each_range()
1219 *
1220 * Returns: true to stop the iteration, false to keep going.
1221 */
1222 typedef bool (*flatview_cb)(Int128 start,
1223 Int128 len,
1224 const MemoryRegion *mr,
1225 hwaddr offset_in_region,
1226 void *opaque);
1227
1228 /**
1229 * flatview_for_each_range: Iterate through a FlatView
1230 * @fv: the FlatView to iterate through
1231 * @cb: function to call for each range
1232 * @opaque: opaque data pointer to pass to @cb
1233 *
1234 * A FlatView is made up of a list of non-overlapping ranges, each of
1235 * which is a slice of a MemoryRegion. This function iterates through
1236 * each range in @fv, calling @cb. The callback function can terminate
1237 * iteration early by returning 'true'.
1238 */
1239 void flatview_for_each_range(FlatView *fv, flatview_cb cb, void *opaque);
1240
MemoryRegionSection_eq(MemoryRegionSection * a,MemoryRegionSection * b)1241 static inline bool MemoryRegionSection_eq(MemoryRegionSection *a,
1242 MemoryRegionSection *b)
1243 {
1244 return a->mr == b->mr &&
1245 a->fv == b->fv &&
1246 a->offset_within_region == b->offset_within_region &&
1247 a->offset_within_address_space == b->offset_within_address_space &&
1248 int128_eq(a->size, b->size) &&
1249 a->readonly == b->readonly &&
1250 a->nonvolatile == b->nonvolatile;
1251 }
1252
1253 /**
1254 * memory_region_section_new_copy: Copy a memory region section
1255 *
1256 * Allocate memory for a new copy, copy the memory region section, and
1257 * properly take a reference on all relevant members.
1258 *
1259 * @s: the #MemoryRegionSection to copy
1260 */
1261 MemoryRegionSection *memory_region_section_new_copy(MemoryRegionSection *s);
1262
1263 /**
1264 * memory_region_section_free_copy: Free a copied memory region section
1265 *
1266 * Free a copy of a memory section created via memory_region_section_new_copy().
1267 * properly dropping references on all relevant members.
1268 *
1269 * @s: the #MemoryRegionSection to copy
1270 */
1271 void memory_region_section_free_copy(MemoryRegionSection *s);
1272
1273 /**
1274 * memory_region_section_intersect_range: Adjust the memory section to cover
1275 * the intersection with the given range.
1276 *
1277 * @s: the #MemoryRegionSection to be adjusted
1278 * @offset: the offset of the given range in the memory region
1279 * @size: the size of the given range
1280 *
1281 * Returns false if the intersection is empty, otherwise returns true.
1282 */
memory_region_section_intersect_range(MemoryRegionSection * s,uint64_t offset,uint64_t size)1283 static inline bool memory_region_section_intersect_range(MemoryRegionSection *s,
1284 uint64_t offset,
1285 uint64_t size)
1286 {
1287 uint64_t start = MAX(s->offset_within_region, offset);
1288 Int128 end = int128_min(int128_add(int128_make64(s->offset_within_region),
1289 s->size),
1290 int128_add(int128_make64(offset),
1291 int128_make64(size)));
1292
1293 if (int128_le(end, int128_make64(start))) {
1294 return false;
1295 }
1296
1297 s->offset_within_address_space += start - s->offset_within_region;
1298 s->offset_within_region = start;
1299 s->size = int128_sub(end, int128_make64(start));
1300 return true;
1301 }
1302
1303 /**
1304 * memory_region_init: Initialize a memory region
1305 *
1306 * The region typically acts as a container for other memory regions. Use
1307 * memory_region_add_subregion() to add subregions.
1308 *
1309 * @mr: the #MemoryRegion to be initialized
1310 * @owner: the object that tracks the region's reference count
1311 * @name: used for debugging; not visible to the user or ABI
1312 * @size: size of the region; any subregions beyond this size will be clipped
1313 */
1314 void memory_region_init(MemoryRegion *mr,
1315 Object *owner,
1316 const char *name,
1317 uint64_t size);
1318
1319 /**
1320 * memory_region_ref: Add 1 to a memory region's reference count
1321 *
1322 * Whenever memory regions are accessed outside the BQL, they need to be
1323 * preserved against hot-unplug. MemoryRegions actually do not have their
1324 * own reference count; they piggyback on a QOM object, their "owner".
1325 * This function adds a reference to the owner.
1326 *
1327 * All MemoryRegions must have an owner if they can disappear, even if the
1328 * device they belong to operates exclusively under the BQL. This is because
1329 * the region could be returned at any time by memory_region_find, and this
1330 * is usually under guest control.
1331 *
1332 * @mr: the #MemoryRegion
1333 */
1334 void memory_region_ref(MemoryRegion *mr);
1335
1336 /**
1337 * memory_region_unref: Remove 1 to a memory region's reference count
1338 *
1339 * Whenever memory regions are accessed outside the BQL, they need to be
1340 * preserved against hot-unplug. MemoryRegions actually do not have their
1341 * own reference count; they piggyback on a QOM object, their "owner".
1342 * This function removes a reference to the owner and possibly destroys it.
1343 *
1344 * @mr: the #MemoryRegion
1345 */
1346 void memory_region_unref(MemoryRegion *mr);
1347
1348 /**
1349 * memory_region_init_io: Initialize an I/O memory region.
1350 *
1351 * Accesses into the region will cause the callbacks in @ops to be called.
1352 * if @size is nonzero, subregions will be clipped to @size.
1353 *
1354 * @mr: the #MemoryRegion to be initialized.
1355 * @owner: the object that tracks the region's reference count
1356 * @ops: a structure containing read and write callbacks to be used when
1357 * I/O is performed on the region.
1358 * @opaque: passed to the read and write callbacks of the @ops structure.
1359 * @name: used for debugging; not visible to the user or ABI
1360 * @size: size of the region.
1361 */
1362 void memory_region_init_io(MemoryRegion *mr,
1363 Object *owner,
1364 const MemoryRegionOps *ops,
1365 void *opaque,
1366 const char *name,
1367 uint64_t size);
1368
1369 /**
1370 * memory_region_init_ram_nomigrate: Initialize RAM memory region. Accesses
1371 * into the region will modify memory
1372 * directly.
1373 *
1374 * @mr: the #MemoryRegion to be initialized.
1375 * @owner: the object that tracks the region's reference count
1376 * @name: Region name, becomes part of RAMBlock name used in migration stream
1377 * must be unique within any device
1378 * @size: size of the region.
1379 * @errp: pointer to Error*, to store an error if it happens.
1380 *
1381 * Note that this function does not do anything to cause the data in the
1382 * RAM memory region to be migrated; that is the responsibility of the caller.
1383 *
1384 * Return: true on success, else false setting @errp with error.
1385 */
1386 bool memory_region_init_ram_nomigrate(MemoryRegion *mr,
1387 Object *owner,
1388 const char *name,
1389 uint64_t size,
1390 Error **errp);
1391
1392 /**
1393 * memory_region_init_ram_flags_nomigrate: Initialize RAM memory region.
1394 * Accesses into the region will
1395 * modify memory directly.
1396 *
1397 * @mr: the #MemoryRegion to be initialized.
1398 * @owner: the object that tracks the region's reference count
1399 * @name: Region name, becomes part of RAMBlock name used in migration stream
1400 * must be unique within any device
1401 * @size: size of the region.
1402 * @ram_flags: RamBlock flags. Supported flags: RAM_SHARED, RAM_NORESERVE,
1403 * RAM_GUEST_MEMFD.
1404 * @errp: pointer to Error*, to store an error if it happens.
1405 *
1406 * Note that this function does not do anything to cause the data in the
1407 * RAM memory region to be migrated; that is the responsibility of the caller.
1408 *
1409 * Return: true on success, else false setting @errp with error.
1410 */
1411 bool memory_region_init_ram_flags_nomigrate(MemoryRegion *mr,
1412 Object *owner,
1413 const char *name,
1414 uint64_t size,
1415 uint32_t ram_flags,
1416 Error **errp);
1417
1418 /**
1419 * memory_region_init_resizeable_ram: Initialize memory region with resizable
1420 * RAM. Accesses into the region will
1421 * modify memory directly. Only an initial
1422 * portion of this RAM is actually used.
1423 * Changing the size while migrating
1424 * can result in the migration being
1425 * canceled.
1426 *
1427 * @mr: the #MemoryRegion to be initialized.
1428 * @owner: the object that tracks the region's reference count
1429 * @name: Region name, becomes part of RAMBlock name used in migration stream
1430 * must be unique within any device
1431 * @size: used size of the region.
1432 * @max_size: max size of the region.
1433 * @resized: callback to notify owner about used size change.
1434 * @errp: pointer to Error*, to store an error if it happens.
1435 *
1436 * Note that this function does not do anything to cause the data in the
1437 * RAM memory region to be migrated; that is the responsibility of the caller.
1438 *
1439 * Return: true on success, else false setting @errp with error.
1440 */
1441 bool memory_region_init_resizeable_ram(MemoryRegion *mr,
1442 Object *owner,
1443 const char *name,
1444 uint64_t size,
1445 uint64_t max_size,
1446 void (*resized)(const char*,
1447 uint64_t length,
1448 void *host),
1449 Error **errp);
1450 #ifdef CONFIG_POSIX
1451
1452 /**
1453 * memory_region_init_ram_from_file: Initialize RAM memory region with a
1454 * mmap-ed backend.
1455 *
1456 * @mr: the #MemoryRegion to be initialized.
1457 * @owner: the object that tracks the region's reference count
1458 * @name: Region name, becomes part of RAMBlock name used in migration stream
1459 * must be unique within any device
1460 * @size: size of the region.
1461 * @align: alignment of the region base address; if 0, the default alignment
1462 * (getpagesize()) will be used.
1463 * @ram_flags: RamBlock flags. Supported flags: RAM_SHARED, RAM_PMEM,
1464 * RAM_NORESERVE, RAM_PROTECTED, RAM_NAMED_FILE, RAM_READONLY,
1465 * RAM_READONLY_FD, RAM_GUEST_MEMFD
1466 * @path: the path in which to allocate the RAM.
1467 * @offset: offset within the file referenced by path
1468 * @errp: pointer to Error*, to store an error if it happens.
1469 *
1470 * Note that this function does not do anything to cause the data in the
1471 * RAM memory region to be migrated; that is the responsibility of the caller.
1472 *
1473 * Return: true on success, else false setting @errp with error.
1474 */
1475 bool memory_region_init_ram_from_file(MemoryRegion *mr,
1476 Object *owner,
1477 const char *name,
1478 uint64_t size,
1479 uint64_t align,
1480 uint32_t ram_flags,
1481 const char *path,
1482 ram_addr_t offset,
1483 Error **errp);
1484
1485 /**
1486 * memory_region_init_ram_from_fd: Initialize RAM memory region with a
1487 * mmap-ed backend.
1488 *
1489 * @mr: the #MemoryRegion to be initialized.
1490 * @owner: the object that tracks the region's reference count
1491 * @name: the name of the region.
1492 * @size: size of the region.
1493 * @ram_flags: RamBlock flags. Supported flags: RAM_SHARED, RAM_PMEM,
1494 * RAM_NORESERVE, RAM_PROTECTED, RAM_NAMED_FILE, RAM_READONLY,
1495 * RAM_READONLY_FD, RAM_GUEST_MEMFD
1496 * @fd: the fd to mmap.
1497 * @offset: offset within the file referenced by fd
1498 * @errp: pointer to Error*, to store an error if it happens.
1499 *
1500 * Note that this function does not do anything to cause the data in the
1501 * RAM memory region to be migrated; that is the responsibility of the caller.
1502 *
1503 * Return: true on success, else false setting @errp with error.
1504 */
1505 bool memory_region_init_ram_from_fd(MemoryRegion *mr,
1506 Object *owner,
1507 const char *name,
1508 uint64_t size,
1509 uint32_t ram_flags,
1510 int fd,
1511 ram_addr_t offset,
1512 Error **errp);
1513 #endif
1514
1515 /**
1516 * memory_region_init_ram_ptr: Initialize RAM memory region from a
1517 * user-provided pointer. Accesses into the
1518 * region will modify memory directly.
1519 *
1520 * @mr: the #MemoryRegion to be initialized.
1521 * @owner: the object that tracks the region's reference count
1522 * @name: Region name, becomes part of RAMBlock name used in migration stream
1523 * must be unique within any device
1524 * @size: size of the region.
1525 * @ptr: memory to be mapped; must contain at least @size bytes.
1526 *
1527 * Note that this function does not do anything to cause the data in the
1528 * RAM memory region to be migrated; that is the responsibility of the caller.
1529 */
1530 void memory_region_init_ram_ptr(MemoryRegion *mr,
1531 Object *owner,
1532 const char *name,
1533 uint64_t size,
1534 void *ptr);
1535
1536 /**
1537 * memory_region_init_ram_device_ptr: Initialize RAM device memory region from
1538 * a user-provided pointer.
1539 *
1540 * A RAM device represents a mapping to a physical device, such as to a PCI
1541 * MMIO BAR of an vfio-pci assigned device. The memory region may be mapped
1542 * into the VM address space and access to the region will modify memory
1543 * directly. However, the memory region should not be included in a memory
1544 * dump (device may not be enabled/mapped at the time of the dump), and
1545 * operations incompatible with manipulating MMIO should be avoided. Replaces
1546 * skip_dump flag.
1547 *
1548 * @mr: the #MemoryRegion to be initialized.
1549 * @owner: the object that tracks the region's reference count
1550 * @name: the name of the region.
1551 * @size: size of the region.
1552 * @ptr: memory to be mapped; must contain at least @size bytes.
1553 *
1554 * Note that this function does not do anything to cause the data in the
1555 * RAM memory region to be migrated; that is the responsibility of the caller.
1556 * (For RAM device memory regions, migrating the contents rarely makes sense.)
1557 */
1558 void memory_region_init_ram_device_ptr(MemoryRegion *mr,
1559 Object *owner,
1560 const char *name,
1561 uint64_t size,
1562 void *ptr);
1563
1564 /**
1565 * memory_region_init_alias: Initialize a memory region that aliases all or a
1566 * part of another memory region.
1567 *
1568 * @mr: the #MemoryRegion to be initialized.
1569 * @owner: the object that tracks the region's reference count
1570 * @name: used for debugging; not visible to the user or ABI
1571 * @orig: the region to be referenced; @mr will be equivalent to
1572 * @orig between @offset and @offset + @size - 1.
1573 * @offset: start of the section in @orig to be referenced.
1574 * @size: size of the region.
1575 */
1576 void memory_region_init_alias(MemoryRegion *mr,
1577 Object *owner,
1578 const char *name,
1579 MemoryRegion *orig,
1580 hwaddr offset,
1581 uint64_t size);
1582
1583 /**
1584 * memory_region_init_rom_nomigrate: Initialize a ROM memory region.
1585 *
1586 * This has the same effect as calling memory_region_init_ram_nomigrate()
1587 * and then marking the resulting region read-only with
1588 * memory_region_set_readonly().
1589 *
1590 * Note that this function does not do anything to cause the data in the
1591 * RAM side of the memory region to be migrated; that is the responsibility
1592 * of the caller.
1593 *
1594 * @mr: the #MemoryRegion to be initialized.
1595 * @owner: the object that tracks the region's reference count
1596 * @name: Region name, becomes part of RAMBlock name used in migration stream
1597 * must be unique within any device
1598 * @size: size of the region.
1599 * @errp: pointer to Error*, to store an error if it happens.
1600 *
1601 * Return: true on success, else false setting @errp with error.
1602 */
1603 bool memory_region_init_rom_nomigrate(MemoryRegion *mr,
1604 Object *owner,
1605 const char *name,
1606 uint64_t size,
1607 Error **errp);
1608
1609 /**
1610 * memory_region_init_rom_device_nomigrate: Initialize a ROM memory region.
1611 * Writes are handled via callbacks.
1612 *
1613 * Note that this function does not do anything to cause the data in the
1614 * RAM side of the memory region to be migrated; that is the responsibility
1615 * of the caller.
1616 *
1617 * @mr: the #MemoryRegion to be initialized.
1618 * @owner: the object that tracks the region's reference count
1619 * @ops: callbacks for write access handling (must not be NULL).
1620 * @opaque: passed to the read and write callbacks of the @ops structure.
1621 * @name: Region name, becomes part of RAMBlock name used in migration stream
1622 * must be unique within any device
1623 * @size: size of the region.
1624 * @errp: pointer to Error*, to store an error if it happens.
1625 *
1626 * Return: true on success, else false setting @errp with error.
1627 */
1628 bool memory_region_init_rom_device_nomigrate(MemoryRegion *mr,
1629 Object *owner,
1630 const MemoryRegionOps *ops,
1631 void *opaque,
1632 const char *name,
1633 uint64_t size,
1634 Error **errp);
1635
1636 /**
1637 * memory_region_init_iommu: Initialize a memory region of a custom type
1638 * that translates addresses
1639 *
1640 * An IOMMU region translates addresses and forwards accesses to a target
1641 * memory region.
1642 *
1643 * The IOMMU implementation must define a subclass of TYPE_IOMMU_MEMORY_REGION.
1644 * @_iommu_mr should be a pointer to enough memory for an instance of
1645 * that subclass, @instance_size is the size of that subclass, and
1646 * @mrtypename is its name. This function will initialize @_iommu_mr as an
1647 * instance of the subclass, and its methods will then be called to handle
1648 * accesses to the memory region. See the documentation of
1649 * #IOMMUMemoryRegionClass for further details.
1650 *
1651 * @_iommu_mr: the #IOMMUMemoryRegion to be initialized
1652 * @instance_size: the IOMMUMemoryRegion subclass instance size
1653 * @mrtypename: the type name of the #IOMMUMemoryRegion
1654 * @owner: the object that tracks the region's reference count
1655 * @name: used for debugging; not visible to the user or ABI
1656 * @size: size of the region.
1657 */
1658 void memory_region_init_iommu(void *_iommu_mr,
1659 size_t instance_size,
1660 const char *mrtypename,
1661 Object *owner,
1662 const char *name,
1663 uint64_t size);
1664
1665 /**
1666 * memory_region_init_ram - Initialize RAM memory region. Accesses into the
1667 * region will modify memory directly.
1668 *
1669 * @mr: the #MemoryRegion to be initialized
1670 * @owner: the object that tracks the region's reference count (must be
1671 * TYPE_DEVICE or a subclass of TYPE_DEVICE, or NULL)
1672 * @name: name of the memory region
1673 * @size: size of the region in bytes
1674 * @errp: pointer to Error*, to store an error if it happens.
1675 *
1676 * This function allocates RAM for a board model or device, and
1677 * arranges for it to be migrated (by calling vmstate_register_ram()
1678 * if @owner is a DeviceState, or vmstate_register_ram_global() if
1679 * @owner is NULL).
1680 *
1681 * TODO: Currently we restrict @owner to being either NULL (for
1682 * global RAM regions with no owner) or devices, so that we can
1683 * give the RAM block a unique name for migration purposes.
1684 * We should lift this restriction and allow arbitrary Objects.
1685 * If you pass a non-NULL non-device @owner then we will assert.
1686 *
1687 * Return: true on success, else false setting @errp with error.
1688 */
1689 bool memory_region_init_ram(MemoryRegion *mr,
1690 Object *owner,
1691 const char *name,
1692 uint64_t size,
1693 Error **errp);
1694
1695 bool memory_region_init_ram_guest_memfd(MemoryRegion *mr,
1696 Object *owner,
1697 const char *name,
1698 uint64_t size,
1699 Error **errp);
1700
1701 /**
1702 * memory_region_init_rom: Initialize a ROM memory region.
1703 *
1704 * This has the same effect as calling memory_region_init_ram()
1705 * and then marking the resulting region read-only with
1706 * memory_region_set_readonly(). This includes arranging for the
1707 * contents to be migrated.
1708 *
1709 * TODO: Currently we restrict @owner to being either NULL (for
1710 * global RAM regions with no owner) or devices, so that we can
1711 * give the RAM block a unique name for migration purposes.
1712 * We should lift this restriction and allow arbitrary Objects.
1713 * If you pass a non-NULL non-device @owner then we will assert.
1714 *
1715 * @mr: the #MemoryRegion to be initialized.
1716 * @owner: the object that tracks the region's reference count
1717 * @name: Region name, becomes part of RAMBlock name used in migration stream
1718 * must be unique within any device
1719 * @size: size of the region.
1720 * @errp: pointer to Error*, to store an error if it happens.
1721 *
1722 * Return: true on success, else false setting @errp with error.
1723 */
1724 bool memory_region_init_rom(MemoryRegion *mr,
1725 Object *owner,
1726 const char *name,
1727 uint64_t size,
1728 Error **errp);
1729
1730 /**
1731 * memory_region_init_rom_device: Initialize a ROM memory region.
1732 * Writes are handled via callbacks.
1733 *
1734 * This function initializes a memory region backed by RAM for reads
1735 * and callbacks for writes, and arranges for the RAM backing to
1736 * be migrated (by calling vmstate_register_ram()
1737 * if @owner is a DeviceState, or vmstate_register_ram_global() if
1738 * @owner is NULL).
1739 *
1740 * TODO: Currently we restrict @owner to being either NULL (for
1741 * global RAM regions with no owner) or devices, so that we can
1742 * give the RAM block a unique name for migration purposes.
1743 * We should lift this restriction and allow arbitrary Objects.
1744 * If you pass a non-NULL non-device @owner then we will assert.
1745 *
1746 * @mr: the #MemoryRegion to be initialized.
1747 * @owner: the object that tracks the region's reference count
1748 * @ops: callbacks for write access handling (must not be NULL).
1749 * @opaque: passed to the read and write callbacks of the @ops structure.
1750 * @name: Region name, becomes part of RAMBlock name used in migration stream
1751 * must be unique within any device
1752 * @size: size of the region.
1753 * @errp: pointer to Error*, to store an error if it happens.
1754 *
1755 * Return: true on success, else false setting @errp with error.
1756 */
1757 bool memory_region_init_rom_device(MemoryRegion *mr,
1758 Object *owner,
1759 const MemoryRegionOps *ops,
1760 void *opaque,
1761 const char *name,
1762 uint64_t size,
1763 Error **errp);
1764
1765
1766 /**
1767 * memory_region_owner: get a memory region's owner.
1768 *
1769 * @mr: the memory region being queried.
1770 */
1771 Object *memory_region_owner(MemoryRegion *mr);
1772
1773 /**
1774 * memory_region_size: get a memory region's size.
1775 *
1776 * @mr: the memory region being queried.
1777 */
1778 uint64_t memory_region_size(MemoryRegion *mr);
1779
1780 /**
1781 * memory_region_is_ram: check whether a memory region is random access
1782 *
1783 * Returns %true if a memory region is random access.
1784 *
1785 * @mr: the memory region being queried
1786 */
memory_region_is_ram(MemoryRegion * mr)1787 static inline bool memory_region_is_ram(MemoryRegion *mr)
1788 {
1789 return mr->ram;
1790 }
1791
1792 /**
1793 * memory_region_is_ram_device: check whether a memory region is a ram device
1794 *
1795 * Returns %true if a memory region is a device backed ram region
1796 *
1797 * @mr: the memory region being queried
1798 */
1799 bool memory_region_is_ram_device(MemoryRegion *mr);
1800
1801 /**
1802 * memory_region_is_romd: check whether a memory region is in ROMD mode
1803 *
1804 * Returns %true if a memory region is a ROM device and currently set to allow
1805 * direct reads.
1806 *
1807 * @mr: the memory region being queried
1808 */
memory_region_is_romd(MemoryRegion * mr)1809 static inline bool memory_region_is_romd(MemoryRegion *mr)
1810 {
1811 return mr->rom_device && mr->romd_mode;
1812 }
1813
1814 /**
1815 * memory_region_is_protected: check whether a memory region is protected
1816 *
1817 * Returns %true if a memory region is protected RAM and cannot be accessed
1818 * via standard mechanisms, e.g. DMA.
1819 *
1820 * @mr: the memory region being queried
1821 */
1822 bool memory_region_is_protected(MemoryRegion *mr);
1823
1824 /**
1825 * memory_region_has_guest_memfd: check whether a memory region has guest_memfd
1826 * associated
1827 *
1828 * Returns %true if a memory region's ram_block has valid guest_memfd assigned.
1829 *
1830 * @mr: the memory region being queried
1831 */
1832 bool memory_region_has_guest_memfd(MemoryRegion *mr);
1833
1834 /**
1835 * memory_region_get_iommu: check whether a memory region is an iommu
1836 *
1837 * Returns pointer to IOMMUMemoryRegion if a memory region is an iommu,
1838 * otherwise NULL.
1839 *
1840 * @mr: the memory region being queried
1841 */
memory_region_get_iommu(MemoryRegion * mr)1842 static inline IOMMUMemoryRegion *memory_region_get_iommu(MemoryRegion *mr)
1843 {
1844 if (mr->alias) {
1845 return memory_region_get_iommu(mr->alias);
1846 }
1847 if (mr->is_iommu) {
1848 return (IOMMUMemoryRegion *) mr;
1849 }
1850 return NULL;
1851 }
1852
1853 /**
1854 * memory_region_get_iommu_class_nocheck: returns iommu memory region class
1855 * if an iommu or NULL if not
1856 *
1857 * Returns pointer to IOMMUMemoryRegionClass if a memory region is an iommu,
1858 * otherwise NULL. This is fast path avoiding QOM checking, use with caution.
1859 *
1860 * @iommu_mr: the memory region being queried
1861 */
memory_region_get_iommu_class_nocheck(IOMMUMemoryRegion * iommu_mr)1862 static inline IOMMUMemoryRegionClass *memory_region_get_iommu_class_nocheck(
1863 IOMMUMemoryRegion *iommu_mr)
1864 {
1865 return (IOMMUMemoryRegionClass *) (((Object *)iommu_mr)->class);
1866 }
1867
1868 #define memory_region_is_iommu(mr) (memory_region_get_iommu(mr) != NULL)
1869
1870 /**
1871 * memory_region_iommu_get_min_page_size: get minimum supported page size
1872 * for an iommu
1873 *
1874 * Returns minimum supported page size for an iommu.
1875 *
1876 * @iommu_mr: the memory region being queried
1877 */
1878 uint64_t memory_region_iommu_get_min_page_size(IOMMUMemoryRegion *iommu_mr);
1879
1880 /**
1881 * memory_region_notify_iommu: notify a change in an IOMMU translation entry.
1882 *
1883 * Note: for any IOMMU implementation, an in-place mapping change
1884 * should be notified with an UNMAP followed by a MAP.
1885 *
1886 * @iommu_mr: the memory region that was changed
1887 * @iommu_idx: the IOMMU index for the translation table which has changed
1888 * @event: TLB event with the new entry in the IOMMU translation table.
1889 * The entry replaces all old entries for the same virtual I/O address
1890 * range.
1891 */
1892 void memory_region_notify_iommu(IOMMUMemoryRegion *iommu_mr,
1893 int iommu_idx,
1894 const IOMMUTLBEvent event);
1895
1896 /**
1897 * memory_region_notify_iommu_one: notify a change in an IOMMU translation
1898 * entry to a single notifier
1899 *
1900 * This works just like memory_region_notify_iommu(), but it only
1901 * notifies a specific notifier, not all of them.
1902 *
1903 * @notifier: the notifier to be notified
1904 * @event: TLB event with the new entry in the IOMMU translation table.
1905 * The entry replaces all old entries for the same virtual I/O address
1906 * range.
1907 */
1908 void memory_region_notify_iommu_one(IOMMUNotifier *notifier,
1909 const IOMMUTLBEvent *event);
1910
1911 /**
1912 * memory_region_unmap_iommu_notifier_range: notify a unmap for an IOMMU
1913 * translation that covers the
1914 * range of a notifier
1915 *
1916 * @notifier: the notifier to be notified
1917 */
1918 void memory_region_unmap_iommu_notifier_range(IOMMUNotifier *notifier);
1919
1920
1921 /**
1922 * memory_region_register_iommu_notifier: register a notifier for changes to
1923 * IOMMU translation entries.
1924 *
1925 * Returns 0 on success, or a negative errno otherwise. In particular,
1926 * -EINVAL indicates that at least one of the attributes of the notifier
1927 * is not supported (flag/range) by the IOMMU memory region. In case of error
1928 * the error object must be created.
1929 *
1930 * @mr: the memory region to observe
1931 * @n: the IOMMUNotifier to be added; the notify callback receives a
1932 * pointer to an #IOMMUTLBEntry as the opaque value; the pointer
1933 * ceases to be valid on exit from the notifier.
1934 * @errp: pointer to Error*, to store an error if it happens.
1935 */
1936 int memory_region_register_iommu_notifier(MemoryRegion *mr,
1937 IOMMUNotifier *n, Error **errp);
1938
1939 /**
1940 * memory_region_iommu_replay: replay existing IOMMU translations to
1941 * a notifier with the minimum page granularity returned by
1942 * mr->iommu_ops->get_page_size().
1943 *
1944 * Note: this is not related to record-and-replay functionality.
1945 *
1946 * @iommu_mr: the memory region to observe
1947 * @n: the notifier to which to replay iommu mappings
1948 */
1949 void memory_region_iommu_replay(IOMMUMemoryRegion *iommu_mr, IOMMUNotifier *n);
1950
1951 /**
1952 * memory_region_unregister_iommu_notifier: unregister a notifier for
1953 * changes to IOMMU translation entries.
1954 *
1955 * @mr: the memory region which was observed and for which notify_stopped()
1956 * needs to be called
1957 * @n: the notifier to be removed.
1958 */
1959 void memory_region_unregister_iommu_notifier(MemoryRegion *mr,
1960 IOMMUNotifier *n);
1961
1962 /**
1963 * memory_region_iommu_get_attr: return an IOMMU attr if get_attr() is
1964 * defined on the IOMMU.
1965 *
1966 * Returns 0 on success, or a negative errno otherwise. In particular,
1967 * -EINVAL indicates that the IOMMU does not support the requested
1968 * attribute.
1969 *
1970 * @iommu_mr: the memory region
1971 * @attr: the requested attribute
1972 * @data: a pointer to the requested attribute data
1973 */
1974 int memory_region_iommu_get_attr(IOMMUMemoryRegion *iommu_mr,
1975 enum IOMMUMemoryRegionAttr attr,
1976 void *data);
1977
1978 /**
1979 * memory_region_iommu_attrs_to_index: return the IOMMU index to
1980 * use for translations with the given memory transaction attributes.
1981 *
1982 * @iommu_mr: the memory region
1983 * @attrs: the memory transaction attributes
1984 */
1985 int memory_region_iommu_attrs_to_index(IOMMUMemoryRegion *iommu_mr,
1986 MemTxAttrs attrs);
1987
1988 /**
1989 * memory_region_iommu_num_indexes: return the total number of IOMMU
1990 * indexes that this IOMMU supports.
1991 *
1992 * @iommu_mr: the memory region
1993 */
1994 int memory_region_iommu_num_indexes(IOMMUMemoryRegion *iommu_mr);
1995
1996 /**
1997 * memory_region_name: get a memory region's name
1998 *
1999 * Returns the string that was used to initialize the memory region.
2000 *
2001 * @mr: the memory region being queried
2002 */
2003 const char *memory_region_name(const MemoryRegion *mr);
2004
2005 /**
2006 * memory_region_is_logging: return whether a memory region is logging writes
2007 *
2008 * Returns %true if the memory region is logging writes for the given client
2009 *
2010 * @mr: the memory region being queried
2011 * @client: the client being queried
2012 */
2013 bool memory_region_is_logging(MemoryRegion *mr, uint8_t client);
2014
2015 /**
2016 * memory_region_get_dirty_log_mask: return the clients for which a
2017 * memory region is logging writes.
2018 *
2019 * Returns a bitmap of clients, in which the DIRTY_MEMORY_* constants
2020 * are the bit indices.
2021 *
2022 * @mr: the memory region being queried
2023 */
2024 uint8_t memory_region_get_dirty_log_mask(MemoryRegion *mr);
2025
2026 /**
2027 * memory_region_is_rom: check whether a memory region is ROM
2028 *
2029 * Returns %true if a memory region is read-only memory.
2030 *
2031 * @mr: the memory region being queried
2032 */
memory_region_is_rom(MemoryRegion * mr)2033 static inline bool memory_region_is_rom(MemoryRegion *mr)
2034 {
2035 return mr->ram && mr->readonly;
2036 }
2037
2038 /**
2039 * memory_region_is_nonvolatile: check whether a memory region is non-volatile
2040 *
2041 * Returns %true is a memory region is non-volatile memory.
2042 *
2043 * @mr: the memory region being queried
2044 */
memory_region_is_nonvolatile(MemoryRegion * mr)2045 static inline bool memory_region_is_nonvolatile(MemoryRegion *mr)
2046 {
2047 return mr->nonvolatile;
2048 }
2049
2050 /**
2051 * memory_region_get_fd: Get a file descriptor backing a RAM memory region.
2052 *
2053 * Returns a file descriptor backing a file-based RAM memory region,
2054 * or -1 if the region is not a file-based RAM memory region.
2055 *
2056 * @mr: the RAM or alias memory region being queried.
2057 */
2058 int memory_region_get_fd(MemoryRegion *mr);
2059
2060 /**
2061 * memory_region_from_host: Convert a pointer into a RAM memory region
2062 * and an offset within it.
2063 *
2064 * Given a host pointer inside a RAM memory region (created with
2065 * memory_region_init_ram() or memory_region_init_ram_ptr()), return
2066 * the MemoryRegion and the offset within it.
2067 *
2068 * Use with care; by the time this function returns, the returned pointer is
2069 * not protected by RCU anymore. If the caller is not within an RCU critical
2070 * section and does not hold the BQL, it must have other means of
2071 * protecting the pointer, such as a reference to the region that includes
2072 * the incoming ram_addr_t.
2073 *
2074 * @ptr: the host pointer to be converted
2075 * @offset: the offset within memory region
2076 */
2077 MemoryRegion *memory_region_from_host(void *ptr, ram_addr_t *offset);
2078
2079 /**
2080 * memory_region_get_ram_ptr: Get a pointer into a RAM memory region.
2081 *
2082 * Returns a host pointer to a RAM memory region (created with
2083 * memory_region_init_ram() or memory_region_init_ram_ptr()).
2084 *
2085 * Use with care; by the time this function returns, the returned pointer is
2086 * not protected by RCU anymore. If the caller is not within an RCU critical
2087 * section and does not hold the BQL, it must have other means of
2088 * protecting the pointer, such as a reference to the region that includes
2089 * the incoming ram_addr_t.
2090 *
2091 * @mr: the memory region being queried.
2092 */
2093 void *memory_region_get_ram_ptr(MemoryRegion *mr);
2094
2095 /* memory_region_ram_resize: Resize a RAM region.
2096 *
2097 * Resizing RAM while migrating can result in the migration being canceled.
2098 * Care has to be taken if the guest might have already detected the memory.
2099 *
2100 * @mr: a memory region created with @memory_region_init_resizeable_ram.
2101 * @newsize: the new size the region
2102 * @errp: pointer to Error*, to store an error if it happens.
2103 */
2104 void memory_region_ram_resize(MemoryRegion *mr, ram_addr_t newsize,
2105 Error **errp);
2106
2107 /**
2108 * memory_region_msync: Synchronize selected address range of
2109 * a memory mapped region
2110 *
2111 * @mr: the memory region to be msync
2112 * @addr: the initial address of the range to be sync
2113 * @size: the size of the range to be sync
2114 */
2115 void memory_region_msync(MemoryRegion *mr, hwaddr addr, hwaddr size);
2116
2117 /**
2118 * memory_region_writeback: Trigger cache writeback for
2119 * selected address range
2120 *
2121 * @mr: the memory region to be updated
2122 * @addr: the initial address of the range to be written back
2123 * @size: the size of the range to be written back
2124 */
2125 void memory_region_writeback(MemoryRegion *mr, hwaddr addr, hwaddr size);
2126
2127 /**
2128 * memory_region_set_log: Turn dirty logging on or off for a region.
2129 *
2130 * Turns dirty logging on or off for a specified client (display, migration).
2131 * Only meaningful for RAM regions.
2132 *
2133 * @mr: the memory region being updated.
2134 * @log: whether dirty logging is to be enabled or disabled.
2135 * @client: the user of the logging information; %DIRTY_MEMORY_VGA only.
2136 */
2137 void memory_region_set_log(MemoryRegion *mr, bool log, unsigned client);
2138
2139 /**
2140 * memory_region_set_dirty: Mark a range of bytes as dirty in a memory region.
2141 *
2142 * Marks a range of bytes as dirty, after it has been dirtied outside
2143 * guest code.
2144 *
2145 * @mr: the memory region being dirtied.
2146 * @addr: the address (relative to the start of the region) being dirtied.
2147 * @size: size of the range being dirtied.
2148 */
2149 void memory_region_set_dirty(MemoryRegion *mr, hwaddr addr,
2150 hwaddr size);
2151
2152 /**
2153 * memory_region_clear_dirty_bitmap - clear dirty bitmap for memory range
2154 *
2155 * This function is called when the caller wants to clear the remote
2156 * dirty bitmap of a memory range within the memory region. This can
2157 * be used by e.g. KVM to manually clear dirty log when
2158 * KVM_CAP_MANUAL_DIRTY_LOG_PROTECT is declared support by the host
2159 * kernel.
2160 *
2161 * @mr: the memory region to clear the dirty log upon
2162 * @start: start address offset within the memory region
2163 * @len: length of the memory region to clear dirty bitmap
2164 */
2165 void memory_region_clear_dirty_bitmap(MemoryRegion *mr, hwaddr start,
2166 hwaddr len);
2167
2168 /**
2169 * memory_region_snapshot_and_clear_dirty: Get a snapshot of the dirty
2170 * bitmap and clear it.
2171 *
2172 * Creates a snapshot of the dirty bitmap, clears the dirty bitmap and
2173 * returns the snapshot. The snapshot can then be used to query dirty
2174 * status, using memory_region_snapshot_get_dirty. Snapshotting allows
2175 * querying the same page multiple times, which is especially useful for
2176 * display updates where the scanlines often are not page aligned.
2177 *
2178 * The dirty bitmap region which gets copied into the snapshot (and
2179 * cleared afterwards) can be larger than requested. The boundaries
2180 * are rounded up/down so complete bitmap longs (covering 64 pages on
2181 * 64bit hosts) can be copied over into the bitmap snapshot. Which
2182 * isn't a problem for display updates as the extra pages are outside
2183 * the visible area, and in case the visible area changes a full
2184 * display redraw is due anyway. Should other use cases for this
2185 * function emerge we might have to revisit this implementation
2186 * detail.
2187 *
2188 * Use g_free to release DirtyBitmapSnapshot.
2189 *
2190 * @mr: the memory region being queried.
2191 * @addr: the address (relative to the start of the region) being queried.
2192 * @size: the size of the range being queried.
2193 * @client: the user of the logging information; typically %DIRTY_MEMORY_VGA.
2194 */
2195 DirtyBitmapSnapshot *memory_region_snapshot_and_clear_dirty(MemoryRegion *mr,
2196 hwaddr addr,
2197 hwaddr size,
2198 unsigned client);
2199
2200 /**
2201 * memory_region_snapshot_get_dirty: Check whether a range of bytes is dirty
2202 * in the specified dirty bitmap snapshot.
2203 *
2204 * @mr: the memory region being queried.
2205 * @snap: the dirty bitmap snapshot
2206 * @addr: the address (relative to the start of the region) being queried.
2207 * @size: the size of the range being queried.
2208 */
2209 bool memory_region_snapshot_get_dirty(MemoryRegion *mr,
2210 DirtyBitmapSnapshot *snap,
2211 hwaddr addr, hwaddr size);
2212
2213 /**
2214 * memory_region_reset_dirty: Mark a range of pages as clean, for a specified
2215 * client.
2216 *
2217 * Marks a range of pages as no longer dirty.
2218 *
2219 * @mr: the region being updated.
2220 * @addr: the start of the subrange being cleaned.
2221 * @size: the size of the subrange being cleaned.
2222 * @client: the user of the logging information; %DIRTY_MEMORY_MIGRATION or
2223 * %DIRTY_MEMORY_VGA.
2224 */
2225 void memory_region_reset_dirty(MemoryRegion *mr, hwaddr addr,
2226 hwaddr size, unsigned client);
2227
2228 /**
2229 * memory_region_flush_rom_device: Mark a range of pages dirty and invalidate
2230 * TBs (for self-modifying code).
2231 *
2232 * The MemoryRegionOps->write() callback of a ROM device must use this function
2233 * to mark byte ranges that have been modified internally, such as by directly
2234 * accessing the memory returned by memory_region_get_ram_ptr().
2235 *
2236 * This function marks the range dirty and invalidates TBs so that TCG can
2237 * detect self-modifying code.
2238 *
2239 * @mr: the region being flushed.
2240 * @addr: the start, relative to the start of the region, of the range being
2241 * flushed.
2242 * @size: the size, in bytes, of the range being flushed.
2243 */
2244 void memory_region_flush_rom_device(MemoryRegion *mr, hwaddr addr, hwaddr size);
2245
2246 /**
2247 * memory_region_set_readonly: Turn a memory region read-only (or read-write)
2248 *
2249 * Allows a memory region to be marked as read-only (turning it into a ROM).
2250 * only useful on RAM regions.
2251 *
2252 * @mr: the region being updated.
2253 * @readonly: whether the region is to be ROM or RAM.
2254 */
2255 void memory_region_set_readonly(MemoryRegion *mr, bool readonly);
2256
2257 /**
2258 * memory_region_set_nonvolatile: Turn a memory region non-volatile
2259 *
2260 * Allows a memory region to be marked as non-volatile.
2261 * only useful on RAM regions.
2262 *
2263 * @mr: the region being updated.
2264 * @nonvolatile: whether the region is to be non-volatile.
2265 */
2266 void memory_region_set_nonvolatile(MemoryRegion *mr, bool nonvolatile);
2267
2268 /**
2269 * memory_region_rom_device_set_romd: enable/disable ROMD mode
2270 *
2271 * Allows a ROM device (initialized with memory_region_init_rom_device() to
2272 * set to ROMD mode (default) or MMIO mode. When it is in ROMD mode, the
2273 * device is mapped to guest memory and satisfies read access directly.
2274 * When in MMIO mode, reads are forwarded to the #MemoryRegion.read function.
2275 * Writes are always handled by the #MemoryRegion.write function.
2276 *
2277 * @mr: the memory region to be updated
2278 * @romd_mode: %true to put the region into ROMD mode
2279 */
2280 void memory_region_rom_device_set_romd(MemoryRegion *mr, bool romd_mode);
2281
2282 /**
2283 * memory_region_set_coalescing: Enable memory coalescing for the region.
2284 *
2285 * Enabled writes to a region to be queued for later processing. MMIO ->write
2286 * callbacks may be delayed until a non-coalesced MMIO is issued.
2287 * Only useful for IO regions. Roughly similar to write-combining hardware.
2288 *
2289 * @mr: the memory region to be write coalesced
2290 */
2291 void memory_region_set_coalescing(MemoryRegion *mr);
2292
2293 /**
2294 * memory_region_add_coalescing: Enable memory coalescing for a sub-range of
2295 * a region.
2296 *
2297 * Like memory_region_set_coalescing(), but works on a sub-range of a region.
2298 * Multiple calls can be issued coalesced disjoint ranges.
2299 *
2300 * @mr: the memory region to be updated.
2301 * @offset: the start of the range within the region to be coalesced.
2302 * @size: the size of the subrange to be coalesced.
2303 */
2304 void memory_region_add_coalescing(MemoryRegion *mr,
2305 hwaddr offset,
2306 uint64_t size);
2307
2308 /**
2309 * memory_region_clear_coalescing: Disable MMIO coalescing for the region.
2310 *
2311 * Disables any coalescing caused by memory_region_set_coalescing() or
2312 * memory_region_add_coalescing(). Roughly equivalent to uncacheble memory
2313 * hardware.
2314 *
2315 * @mr: the memory region to be updated.
2316 */
2317 void memory_region_clear_coalescing(MemoryRegion *mr);
2318
2319 /**
2320 * memory_region_set_flush_coalesced: Enforce memory coalescing flush before
2321 * accesses.
2322 *
2323 * Ensure that pending coalesced MMIO request are flushed before the memory
2324 * region is accessed. This property is automatically enabled for all regions
2325 * passed to memory_region_set_coalescing() and memory_region_add_coalescing().
2326 *
2327 * @mr: the memory region to be updated.
2328 */
2329 void memory_region_set_flush_coalesced(MemoryRegion *mr);
2330
2331 /**
2332 * memory_region_clear_flush_coalesced: Disable memory coalescing flush before
2333 * accesses.
2334 *
2335 * Clear the automatic coalesced MMIO flushing enabled via
2336 * memory_region_set_flush_coalesced. Note that this service has no effect on
2337 * memory regions that have MMIO coalescing enabled for themselves. For them,
2338 * automatic flushing will stop once coalescing is disabled.
2339 *
2340 * @mr: the memory region to be updated.
2341 */
2342 void memory_region_clear_flush_coalesced(MemoryRegion *mr);
2343
2344 /**
2345 * memory_region_add_eventfd: Request an eventfd to be triggered when a word
2346 * is written to a location.
2347 *
2348 * Marks a word in an IO region (initialized with memory_region_init_io())
2349 * as a trigger for an eventfd event. The I/O callback will not be called.
2350 * The caller must be prepared to handle failure (that is, take the required
2351 * action if the callback _is_ called).
2352 *
2353 * @mr: the memory region being updated.
2354 * @addr: the address within @mr that is to be monitored
2355 * @size: the size of the access to trigger the eventfd
2356 * @match_data: whether to match against @data, instead of just @addr
2357 * @data: the data to match against the guest write
2358 * @e: event notifier to be triggered when @addr, @size, and @data all match.
2359 **/
2360 void memory_region_add_eventfd(MemoryRegion *mr,
2361 hwaddr addr,
2362 unsigned size,
2363 bool match_data,
2364 uint64_t data,
2365 EventNotifier *e);
2366
2367 /**
2368 * memory_region_del_eventfd: Cancel an eventfd.
2369 *
2370 * Cancels an eventfd trigger requested by a previous
2371 * memory_region_add_eventfd() call.
2372 *
2373 * @mr: the memory region being updated.
2374 * @addr: the address within @mr that is to be monitored
2375 * @size: the size of the access to trigger the eventfd
2376 * @match_data: whether to match against @data, instead of just @addr
2377 * @data: the data to match against the guest write
2378 * @e: event notifier to be triggered when @addr, @size, and @data all match.
2379 */
2380 void memory_region_del_eventfd(MemoryRegion *mr,
2381 hwaddr addr,
2382 unsigned size,
2383 bool match_data,
2384 uint64_t data,
2385 EventNotifier *e);
2386
2387 /**
2388 * memory_region_add_subregion: Add a subregion to a container.
2389 *
2390 * Adds a subregion at @offset. The subregion may not overlap with other
2391 * subregions (except for those explicitly marked as overlapping). A region
2392 * may only be added once as a subregion (unless removed with
2393 * memory_region_del_subregion()); use memory_region_init_alias() if you
2394 * want a region to be a subregion in multiple locations.
2395 *
2396 * @mr: the region to contain the new subregion; must be a container
2397 * initialized with memory_region_init().
2398 * @offset: the offset relative to @mr where @subregion is added.
2399 * @subregion: the subregion to be added.
2400 */
2401 void memory_region_add_subregion(MemoryRegion *mr,
2402 hwaddr offset,
2403 MemoryRegion *subregion);
2404 /**
2405 * memory_region_add_subregion_overlap: Add a subregion to a container
2406 * with overlap.
2407 *
2408 * Adds a subregion at @offset. The subregion may overlap with other
2409 * subregions. Conflicts are resolved by having a higher @priority hide a
2410 * lower @priority. Subregions without priority are taken as @priority 0.
2411 * A region may only be added once as a subregion (unless removed with
2412 * memory_region_del_subregion()); use memory_region_init_alias() if you
2413 * want a region to be a subregion in multiple locations.
2414 *
2415 * @mr: the region to contain the new subregion; must be a container
2416 * initialized with memory_region_init().
2417 * @offset: the offset relative to @mr where @subregion is added.
2418 * @subregion: the subregion to be added.
2419 * @priority: used for resolving overlaps; highest priority wins.
2420 */
2421 void memory_region_add_subregion_overlap(MemoryRegion *mr,
2422 hwaddr offset,
2423 MemoryRegion *subregion,
2424 int priority);
2425
2426 /**
2427 * memory_region_get_ram_addr: Get the ram address associated with a memory
2428 * region
2429 *
2430 * @mr: the region to be queried
2431 */
2432 ram_addr_t memory_region_get_ram_addr(MemoryRegion *mr);
2433
2434 uint64_t memory_region_get_alignment(const MemoryRegion *mr);
2435 /**
2436 * memory_region_del_subregion: Remove a subregion.
2437 *
2438 * Removes a subregion from its container.
2439 *
2440 * @mr: the container to be updated.
2441 * @subregion: the region being removed; must be a current subregion of @mr.
2442 */
2443 void memory_region_del_subregion(MemoryRegion *mr,
2444 MemoryRegion *subregion);
2445
2446 /*
2447 * memory_region_set_enabled: dynamically enable or disable a region
2448 *
2449 * Enables or disables a memory region. A disabled memory region
2450 * ignores all accesses to itself and its subregions. It does not
2451 * obscure sibling subregions with lower priority - it simply behaves as
2452 * if it was removed from the hierarchy.
2453 *
2454 * Regions default to being enabled.
2455 *
2456 * @mr: the region to be updated
2457 * @enabled: whether to enable or disable the region
2458 */
2459 void memory_region_set_enabled(MemoryRegion *mr, bool enabled);
2460
2461 /*
2462 * memory_region_set_address: dynamically update the address of a region
2463 *
2464 * Dynamically updates the address of a region, relative to its container.
2465 * May be used on regions are currently part of a memory hierarchy.
2466 *
2467 * @mr: the region to be updated
2468 * @addr: new address, relative to container region
2469 */
2470 void memory_region_set_address(MemoryRegion *mr, hwaddr addr);
2471
2472 /*
2473 * memory_region_set_size: dynamically update the size of a region.
2474 *
2475 * Dynamically updates the size of a region.
2476 *
2477 * @mr: the region to be updated
2478 * @size: used size of the region.
2479 */
2480 void memory_region_set_size(MemoryRegion *mr, uint64_t size);
2481
2482 /*
2483 * memory_region_set_alias_offset: dynamically update a memory alias's offset
2484 *
2485 * Dynamically updates the offset into the target region that an alias points
2486 * to, as if the fourth argument to memory_region_init_alias() has changed.
2487 *
2488 * @mr: the #MemoryRegion to be updated; should be an alias.
2489 * @offset: the new offset into the target memory region
2490 */
2491 void memory_region_set_alias_offset(MemoryRegion *mr,
2492 hwaddr offset);
2493
2494 /*
2495 * memory_region_set_unmergeable: Set a memory region unmergeable
2496 *
2497 * Mark a memory region unmergeable, resulting in the memory region (or
2498 * everything contained in a memory region container) not getting merged when
2499 * simplifying the address space and notifying memory listeners. Consequently,
2500 * memory listeners will never get notified about ranges that are larger than
2501 * the original memory regions.
2502 *
2503 * This is primarily useful when multiple aliases to a RAM memory region are
2504 * mapped into a memory region container, and updates (e.g., enable/disable or
2505 * map/unmap) of individual memory region aliases are not supposed to affect
2506 * other memory regions in the same container.
2507 *
2508 * @mr: the #MemoryRegion to be updated
2509 * @unmergeable: whether to mark the #MemoryRegion unmergeable
2510 */
2511 void memory_region_set_unmergeable(MemoryRegion *mr, bool unmergeable);
2512
2513 /**
2514 * memory_region_present: checks if an address relative to a @container
2515 * translates into #MemoryRegion within @container
2516 *
2517 * Answer whether a #MemoryRegion within @container covers the address
2518 * @addr.
2519 *
2520 * @container: a #MemoryRegion within which @addr is a relative address
2521 * @addr: the area within @container to be searched
2522 */
2523 bool memory_region_present(MemoryRegion *container, hwaddr addr);
2524
2525 /**
2526 * memory_region_is_mapped: returns true if #MemoryRegion is mapped
2527 * into another memory region, which does not necessarily imply that it is
2528 * mapped into an address space.
2529 *
2530 * @mr: a #MemoryRegion which should be checked if it's mapped
2531 */
2532 bool memory_region_is_mapped(MemoryRegion *mr);
2533
2534 /**
2535 * memory_region_get_ram_discard_manager: get the #RamDiscardManager for a
2536 * #MemoryRegion
2537 *
2538 * The #RamDiscardManager cannot change while a memory region is mapped.
2539 *
2540 * @mr: the #MemoryRegion
2541 */
2542 RamDiscardManager *memory_region_get_ram_discard_manager(MemoryRegion *mr);
2543
2544 /**
2545 * memory_region_has_ram_discard_manager: check whether a #MemoryRegion has a
2546 * #RamDiscardManager assigned
2547 *
2548 * @mr: the #MemoryRegion
2549 */
memory_region_has_ram_discard_manager(MemoryRegion * mr)2550 static inline bool memory_region_has_ram_discard_manager(MemoryRegion *mr)
2551 {
2552 return !!memory_region_get_ram_discard_manager(mr);
2553 }
2554
2555 /**
2556 * memory_region_set_ram_discard_manager: set the #RamDiscardManager for a
2557 * #MemoryRegion
2558 *
2559 * This function must not be called for a mapped #MemoryRegion, a #MemoryRegion
2560 * that does not cover RAM, or a #MemoryRegion that already has a
2561 * #RamDiscardManager assigned. Return 0 if the rdm is set successfully.
2562 *
2563 * @mr: the #MemoryRegion
2564 * @rdm: #RamDiscardManager to set
2565 */
2566 int memory_region_set_ram_discard_manager(MemoryRegion *mr,
2567 RamDiscardManager *rdm);
2568
2569 /**
2570 * memory_region_find: translate an address/size relative to a
2571 * MemoryRegion into a #MemoryRegionSection.
2572 *
2573 * Locates the first #MemoryRegion within @mr that overlaps the range
2574 * given by @addr and @size.
2575 *
2576 * Returns a #MemoryRegionSection that describes a contiguous overlap.
2577 * It will have the following characteristics:
2578 * - @size = 0 iff no overlap was found
2579 * - @mr is non-%NULL iff an overlap was found
2580 *
2581 * Remember that in the return value the @offset_within_region is
2582 * relative to the returned region (in the .@mr field), not to the
2583 * @mr argument.
2584 *
2585 * Similarly, the .@offset_within_address_space is relative to the
2586 * address space that contains both regions, the passed and the
2587 * returned one. However, in the special case where the @mr argument
2588 * has no container (and thus is the root of the address space), the
2589 * following will hold:
2590 * - @offset_within_address_space >= @addr
2591 * - @offset_within_address_space + .@size <= @addr + @size
2592 *
2593 * @mr: a MemoryRegion within which @addr is a relative address
2594 * @addr: start of the area within @as to be searched
2595 * @size: size of the area to be searched
2596 */
2597 MemoryRegionSection memory_region_find(MemoryRegion *mr,
2598 hwaddr addr, uint64_t size);
2599
2600 /**
2601 * memory_global_dirty_log_sync: synchronize the dirty log for all memory
2602 *
2603 * Synchronizes the dirty page log for all address spaces.
2604 *
2605 * @last_stage: whether this is the last stage of live migration
2606 */
2607 void memory_global_dirty_log_sync(bool last_stage);
2608
2609 /**
2610 * memory_global_after_dirty_log_sync: synchronize the dirty log for all memory
2611 *
2612 * Synchronizes the vCPUs with a thread that is reading the dirty bitmap.
2613 * This function must be called after the dirty log bitmap is cleared, and
2614 * before dirty guest memory pages are read. If you are using
2615 * #DirtyBitmapSnapshot, memory_region_snapshot_and_clear_dirty() takes
2616 * care of doing this.
2617 */
2618 void memory_global_after_dirty_log_sync(void);
2619
2620 /**
2621 * memory_region_transaction_begin: Start a transaction.
2622 *
2623 * During a transaction, changes will be accumulated and made visible
2624 * only when the transaction ends (is committed).
2625 */
2626 void memory_region_transaction_begin(void);
2627
2628 /**
2629 * memory_region_transaction_commit: Commit a transaction and make changes
2630 * visible to the guest.
2631 */
2632 void memory_region_transaction_commit(void);
2633
2634 /**
2635 * memory_listener_register: register callbacks to be called when memory
2636 * sections are mapped or unmapped into an address
2637 * space
2638 *
2639 * @listener: an object containing the callbacks to be called
2640 * @filter: if non-%NULL, only regions in this address space will be observed
2641 */
2642 void memory_listener_register(MemoryListener *listener, AddressSpace *filter);
2643
2644 /**
2645 * memory_listener_unregister: undo the effect of memory_listener_register()
2646 *
2647 * @listener: an object containing the callbacks to be removed
2648 */
2649 void memory_listener_unregister(MemoryListener *listener);
2650
2651 /**
2652 * memory_global_dirty_log_start: begin dirty logging for all regions
2653 *
2654 * @flags: purpose of starting dirty log, migration or dirty rate
2655 * @errp: pointer to Error*, to store an error if it happens.
2656 *
2657 * Return: true on success, else false setting @errp with error.
2658 */
2659 bool memory_global_dirty_log_start(unsigned int flags, Error **errp);
2660
2661 /**
2662 * memory_global_dirty_log_stop: end dirty logging for all regions
2663 *
2664 * @flags: purpose of stopping dirty log, migration or dirty rate
2665 */
2666 void memory_global_dirty_log_stop(unsigned int flags);
2667
2668 void mtree_info(bool flatview, bool dispatch_tree, bool owner, bool disabled);
2669
2670 bool memory_region_access_valid(MemoryRegion *mr, hwaddr addr,
2671 unsigned size, bool is_write,
2672 MemTxAttrs attrs);
2673
2674 /**
2675 * memory_region_dispatch_read: perform a read directly to the specified
2676 * MemoryRegion.
2677 *
2678 * @mr: #MemoryRegion to access
2679 * @addr: address within that region
2680 * @pval: pointer to uint64_t which the data is written to
2681 * @op: size, sign, and endianness of the memory operation
2682 * @attrs: memory transaction attributes to use for the access
2683 */
2684 MemTxResult memory_region_dispatch_read(MemoryRegion *mr,
2685 hwaddr addr,
2686 uint64_t *pval,
2687 MemOp op,
2688 MemTxAttrs attrs);
2689 /**
2690 * memory_region_dispatch_write: perform a write directly to the specified
2691 * MemoryRegion.
2692 *
2693 * @mr: #MemoryRegion to access
2694 * @addr: address within that region
2695 * @data: data to write
2696 * @op: size, sign, and endianness of the memory operation
2697 * @attrs: memory transaction attributes to use for the access
2698 */
2699 MemTxResult memory_region_dispatch_write(MemoryRegion *mr,
2700 hwaddr addr,
2701 uint64_t data,
2702 MemOp op,
2703 MemTxAttrs attrs);
2704
2705 /**
2706 * address_space_init: initializes an address space
2707 *
2708 * @as: an uninitialized #AddressSpace
2709 * @root: a #MemoryRegion that routes addresses for the address space
2710 * @name: an address space name. The name is only used for debugging
2711 * output.
2712 */
2713 void address_space_init(AddressSpace *as, MemoryRegion *root, const char *name);
2714
2715 /**
2716 * address_space_destroy: destroy an address space
2717 *
2718 * Releases all resources associated with an address space. After an address space
2719 * is destroyed, its root memory region (given by address_space_init()) may be destroyed
2720 * as well.
2721 *
2722 * @as: address space to be destroyed
2723 */
2724 void address_space_destroy(AddressSpace *as);
2725
2726 /**
2727 * address_space_remove_listeners: unregister all listeners of an address space
2728 *
2729 * Removes all callbacks previously registered with memory_listener_register()
2730 * for @as.
2731 *
2732 * @as: an initialized #AddressSpace
2733 */
2734 void address_space_remove_listeners(AddressSpace *as);
2735
2736 /**
2737 * address_space_rw: read from or write to an address space.
2738 *
2739 * Return a MemTxResult indicating whether the operation succeeded
2740 * or failed (eg unassigned memory, device rejected the transaction,
2741 * IOMMU fault).
2742 *
2743 * @as: #AddressSpace to be accessed
2744 * @addr: address within that address space
2745 * @attrs: memory transaction attributes
2746 * @buf: buffer with the data transferred
2747 * @len: the number of bytes to read or write
2748 * @is_write: indicates the transfer direction
2749 */
2750 MemTxResult address_space_rw(AddressSpace *as, hwaddr addr,
2751 MemTxAttrs attrs, void *buf,
2752 hwaddr len, bool is_write);
2753
2754 /**
2755 * address_space_write: write to address space.
2756 *
2757 * Return a MemTxResult indicating whether the operation succeeded
2758 * or failed (eg unassigned memory, device rejected the transaction,
2759 * IOMMU fault).
2760 *
2761 * @as: #AddressSpace to be accessed
2762 * @addr: address within that address space
2763 * @attrs: memory transaction attributes
2764 * @buf: buffer with the data transferred
2765 * @len: the number of bytes to write
2766 */
2767 MemTxResult address_space_write(AddressSpace *as, hwaddr addr,
2768 MemTxAttrs attrs,
2769 const void *buf, hwaddr len);
2770
2771 /**
2772 * address_space_write_rom: write to address space, including ROM.
2773 *
2774 * This function writes to the specified address space, but will
2775 * write data to both ROM and RAM. This is used for non-guest
2776 * writes like writes from the gdb debug stub or initial loading
2777 * of ROM contents.
2778 *
2779 * Note that portions of the write which attempt to write data to
2780 * a device will be silently ignored -- only real RAM and ROM will
2781 * be written to.
2782 *
2783 * Return a MemTxResult indicating whether the operation succeeded
2784 * or failed (eg unassigned memory, device rejected the transaction,
2785 * IOMMU fault).
2786 *
2787 * @as: #AddressSpace to be accessed
2788 * @addr: address within that address space
2789 * @attrs: memory transaction attributes
2790 * @buf: buffer with the data transferred
2791 * @len: the number of bytes to write
2792 */
2793 MemTxResult address_space_write_rom(AddressSpace *as, hwaddr addr,
2794 MemTxAttrs attrs,
2795 const void *buf, hwaddr len);
2796
2797 /* address_space_ld*: load from an address space
2798 * address_space_st*: store to an address space
2799 *
2800 * These functions perform a load or store of the byte, word,
2801 * longword or quad to the specified address within the AddressSpace.
2802 * The _le suffixed functions treat the data as little endian;
2803 * _be indicates big endian; no suffix indicates "same endianness
2804 * as guest CPU".
2805 *
2806 * The "guest CPU endianness" accessors are deprecated for use outside
2807 * target-* code; devices should be CPU-agnostic and use either the LE
2808 * or the BE accessors.
2809 *
2810 * @as #AddressSpace to be accessed
2811 * @addr: address within that address space
2812 * @val: data value, for stores
2813 * @attrs: memory transaction attributes
2814 * @result: location to write the success/failure of the transaction;
2815 * if NULL, this information is discarded
2816 */
2817
2818 #define SUFFIX
2819 #define ARG1 as
2820 #define ARG1_DECL AddressSpace *as
2821 #include "exec/memory_ldst.h.inc"
2822
stl_phys_notdirty(AddressSpace * as,hwaddr addr,uint32_t val)2823 static inline void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val)
2824 {
2825 address_space_stl_notdirty(as, addr, val,
2826 MEMTXATTRS_UNSPECIFIED, NULL);
2827 }
2828
2829 #define SUFFIX
2830 #define ARG1 as
2831 #define ARG1_DECL AddressSpace *as
2832 #include "exec/memory_ldst_phys.h.inc"
2833
2834 struct MemoryRegionCache {
2835 uint8_t *ptr;
2836 hwaddr xlat;
2837 hwaddr len;
2838 FlatView *fv;
2839 MemoryRegionSection mrs;
2840 bool is_write;
2841 };
2842
2843 /* address_space_ld*_cached: load from a cached #MemoryRegion
2844 * address_space_st*_cached: store into a cached #MemoryRegion
2845 *
2846 * These functions perform a load or store of the byte, word,
2847 * longword or quad to the specified address. The address is
2848 * a physical address in the AddressSpace, but it must lie within
2849 * a #MemoryRegion that was mapped with address_space_cache_init.
2850 *
2851 * The _le suffixed functions treat the data as little endian;
2852 * _be indicates big endian; no suffix indicates "same endianness
2853 * as guest CPU".
2854 *
2855 * The "guest CPU endianness" accessors are deprecated for use outside
2856 * target-* code; devices should be CPU-agnostic and use either the LE
2857 * or the BE accessors.
2858 *
2859 * @cache: previously initialized #MemoryRegionCache to be accessed
2860 * @addr: address within the address space
2861 * @val: data value, for stores
2862 * @attrs: memory transaction attributes
2863 * @result: location to write the success/failure of the transaction;
2864 * if NULL, this information is discarded
2865 */
2866
2867 #define SUFFIX _cached_slow
2868 #define ARG1 cache
2869 #define ARG1_DECL MemoryRegionCache *cache
2870 #include "exec/memory_ldst.h.inc"
2871
2872 /* Inline fast path for direct RAM access. */
address_space_ldub_cached(MemoryRegionCache * cache,hwaddr addr,MemTxAttrs attrs,MemTxResult * result)2873 static inline uint8_t address_space_ldub_cached(MemoryRegionCache *cache,
2874 hwaddr addr, MemTxAttrs attrs, MemTxResult *result)
2875 {
2876 assert(addr < cache->len);
2877 if (likely(cache->ptr)) {
2878 return ldub_p(cache->ptr + addr);
2879 } else {
2880 return address_space_ldub_cached_slow(cache, addr, attrs, result);
2881 }
2882 }
2883
address_space_stb_cached(MemoryRegionCache * cache,hwaddr addr,uint8_t val,MemTxAttrs attrs,MemTxResult * result)2884 static inline void address_space_stb_cached(MemoryRegionCache *cache,
2885 hwaddr addr, uint8_t val, MemTxAttrs attrs, MemTxResult *result)
2886 {
2887 assert(addr < cache->len);
2888 if (likely(cache->ptr)) {
2889 stb_p(cache->ptr + addr, val);
2890 } else {
2891 address_space_stb_cached_slow(cache, addr, val, attrs, result);
2892 }
2893 }
2894
2895 #define ENDIANNESS
2896 #include "exec/memory_ldst_cached.h.inc"
2897
2898 #define ENDIANNESS _le
2899 #include "exec/memory_ldst_cached.h.inc"
2900
2901 #define ENDIANNESS _be
2902 #include "exec/memory_ldst_cached.h.inc"
2903
2904 #define SUFFIX _cached
2905 #define ARG1 cache
2906 #define ARG1_DECL MemoryRegionCache *cache
2907 #include "exec/memory_ldst_phys.h.inc"
2908
2909 /* address_space_cache_init: prepare for repeated access to a physical
2910 * memory region
2911 *
2912 * @cache: #MemoryRegionCache to be filled
2913 * @as: #AddressSpace to be accessed
2914 * @addr: address within that address space
2915 * @len: length of buffer
2916 * @is_write: indicates the transfer direction
2917 *
2918 * Will only work with RAM, and may map a subset of the requested range by
2919 * returning a value that is less than @len. On failure, return a negative
2920 * errno value.
2921 *
2922 * Because it only works with RAM, this function can be used for
2923 * read-modify-write operations. In this case, is_write should be %true.
2924 *
2925 * Note that addresses passed to the address_space_*_cached functions
2926 * are relative to @addr.
2927 */
2928 int64_t address_space_cache_init(MemoryRegionCache *cache,
2929 AddressSpace *as,
2930 hwaddr addr,
2931 hwaddr len,
2932 bool is_write);
2933
2934 /**
2935 * address_space_cache_init_empty: Initialize empty #MemoryRegionCache
2936 *
2937 * @cache: The #MemoryRegionCache to operate on.
2938 *
2939 * Initializes #MemoryRegionCache structure without memory region attached.
2940 * Cache initialized this way can only be safely destroyed, but not used.
2941 */
address_space_cache_init_empty(MemoryRegionCache * cache)2942 static inline void address_space_cache_init_empty(MemoryRegionCache *cache)
2943 {
2944 cache->mrs.mr = NULL;
2945 /* There is no real need to initialize fv, but it makes Coverity happy. */
2946 cache->fv = NULL;
2947 }
2948
2949 /**
2950 * address_space_cache_invalidate: complete a write to a #MemoryRegionCache
2951 *
2952 * @cache: The #MemoryRegionCache to operate on.
2953 * @addr: The first physical address that was written, relative to the
2954 * address that was passed to @address_space_cache_init.
2955 * @access_len: The number of bytes that were written starting at @addr.
2956 */
2957 void address_space_cache_invalidate(MemoryRegionCache *cache,
2958 hwaddr addr,
2959 hwaddr access_len);
2960
2961 /**
2962 * address_space_cache_destroy: free a #MemoryRegionCache
2963 *
2964 * @cache: The #MemoryRegionCache whose memory should be released.
2965 */
2966 void address_space_cache_destroy(MemoryRegionCache *cache);
2967
2968 /* address_space_get_iotlb_entry: translate an address into an IOTLB
2969 * entry. Should be called from an RCU critical section.
2970 */
2971 IOMMUTLBEntry address_space_get_iotlb_entry(AddressSpace *as, hwaddr addr,
2972 bool is_write, MemTxAttrs attrs);
2973
2974 /* address_space_translate: translate an address range into an address space
2975 * into a MemoryRegion and an address range into that section. Should be
2976 * called from an RCU critical section, to avoid that the last reference
2977 * to the returned region disappears after address_space_translate returns.
2978 *
2979 * @fv: #FlatView to be accessed
2980 * @addr: address within that address space
2981 * @xlat: pointer to address within the returned memory region section's
2982 * #MemoryRegion.
2983 * @len: pointer to length
2984 * @is_write: indicates the transfer direction
2985 * @attrs: memory attributes
2986 */
2987 MemoryRegion *flatview_translate(FlatView *fv,
2988 hwaddr addr, hwaddr *xlat,
2989 hwaddr *len, bool is_write,
2990 MemTxAttrs attrs);
2991
address_space_translate(AddressSpace * as,hwaddr addr,hwaddr * xlat,hwaddr * len,bool is_write,MemTxAttrs attrs)2992 static inline MemoryRegion *address_space_translate(AddressSpace *as,
2993 hwaddr addr, hwaddr *xlat,
2994 hwaddr *len, bool is_write,
2995 MemTxAttrs attrs)
2996 {
2997 return flatview_translate(address_space_to_flatview(as),
2998 addr, xlat, len, is_write, attrs);
2999 }
3000
3001 /* address_space_access_valid: check for validity of accessing an address
3002 * space range
3003 *
3004 * Check whether memory is assigned to the given address space range, and
3005 * access is permitted by any IOMMU regions that are active for the address
3006 * space.
3007 *
3008 * For now, addr and len should be aligned to a page size. This limitation
3009 * will be lifted in the future.
3010 *
3011 * @as: #AddressSpace to be accessed
3012 * @addr: address within that address space
3013 * @len: length of the area to be checked
3014 * @is_write: indicates the transfer direction
3015 * @attrs: memory attributes
3016 */
3017 bool address_space_access_valid(AddressSpace *as, hwaddr addr, hwaddr len,
3018 bool is_write, MemTxAttrs attrs);
3019
3020 /* address_space_map: map a physical memory region into a host virtual address
3021 *
3022 * May map a subset of the requested range, given by and returned in @plen.
3023 * May return %NULL and set *@plen to zero(0), if resources needed to perform
3024 * the mapping are exhausted.
3025 * Use only for reads OR writes - not for read-modify-write operations.
3026 * Use address_space_register_map_client() to know when retrying the map
3027 * operation is likely to succeed.
3028 *
3029 * @as: #AddressSpace to be accessed
3030 * @addr: address within that address space
3031 * @plen: pointer to length of buffer; updated on return
3032 * @is_write: indicates the transfer direction
3033 * @attrs: memory attributes
3034 */
3035 void *address_space_map(AddressSpace *as, hwaddr addr,
3036 hwaddr *plen, bool is_write, MemTxAttrs attrs);
3037
3038 /* address_space_unmap: Unmaps a memory region previously mapped by address_space_map()
3039 *
3040 * Will also mark the memory as dirty if @is_write == %true. @access_len gives
3041 * the amount of memory that was actually read or written by the caller.
3042 *
3043 * @as: #AddressSpace used
3044 * @buffer: host pointer as returned by address_space_map()
3045 * @len: buffer length as returned by address_space_map()
3046 * @access_len: amount of data actually transferred
3047 * @is_write: indicates the transfer direction
3048 */
3049 void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
3050 bool is_write, hwaddr access_len);
3051
3052 /*
3053 * address_space_register_map_client: Register a callback to invoke when
3054 * resources for address_space_map() are available again.
3055 *
3056 * address_space_map may fail when there are not enough resources available,
3057 * such as when bounce buffer memory would exceed the limit. The callback can
3058 * be used to retry the address_space_map operation. Note that the callback
3059 * gets automatically removed after firing.
3060 *
3061 * @as: #AddressSpace to be accessed
3062 * @bh: callback to invoke when address_space_map() retry is appropriate
3063 */
3064 void address_space_register_map_client(AddressSpace *as, QEMUBH *bh);
3065
3066 /*
3067 * address_space_unregister_map_client: Unregister a callback that has
3068 * previously been registered and not fired yet.
3069 *
3070 * @as: #AddressSpace to be accessed
3071 * @bh: callback to unregister
3072 */
3073 void address_space_unregister_map_client(AddressSpace *as, QEMUBH *bh);
3074
3075 /* Internal functions, part of the implementation of address_space_read. */
3076 MemTxResult address_space_read_full(AddressSpace *as, hwaddr addr,
3077 MemTxAttrs attrs, void *buf, hwaddr len);
3078 MemTxResult flatview_read_continue(FlatView *fv, hwaddr addr,
3079 MemTxAttrs attrs, void *buf,
3080 hwaddr len, hwaddr addr1, hwaddr l,
3081 MemoryRegion *mr);
3082 void *qemu_map_ram_ptr(RAMBlock *ram_block, ram_addr_t addr);
3083
3084 /* Internal functions, part of the implementation of address_space_read_cached
3085 * and address_space_write_cached. */
3086 MemTxResult address_space_read_cached_slow(MemoryRegionCache *cache,
3087 hwaddr addr, void *buf, hwaddr len);
3088 MemTxResult address_space_write_cached_slow(MemoryRegionCache *cache,
3089 hwaddr addr, const void *buf,
3090 hwaddr len);
3091
3092 int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr);
3093 bool prepare_mmio_access(MemoryRegion *mr);
3094
memory_region_supports_direct_access(MemoryRegion * mr)3095 static inline bool memory_region_supports_direct_access(MemoryRegion *mr)
3096 {
3097 /* ROM DEVICE regions only allow direct access if in ROMD mode. */
3098 if (memory_region_is_romd(mr)) {
3099 return true;
3100 }
3101 if (!memory_region_is_ram(mr)) {
3102 return false;
3103 }
3104 /*
3105 * RAM DEVICE regions can be accessed directly using memcpy, but it might
3106 * be MMIO and access using mempy can be wrong (e.g., using instructions not
3107 * intended for MMIO access). So we treat this as IO.
3108 */
3109 return !memory_region_is_ram_device(mr);
3110 }
3111
memory_access_is_direct(MemoryRegion * mr,bool is_write,MemTxAttrs attrs)3112 static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write,
3113 MemTxAttrs attrs)
3114 {
3115 if (!memory_region_supports_direct_access(mr)) {
3116 return false;
3117 }
3118 /* Debug access can write to ROM. */
3119 if (is_write && !attrs.debug) {
3120 return !mr->readonly && !mr->rom_device;
3121 }
3122 return true;
3123 }
3124
3125 /**
3126 * address_space_read: read from an address space.
3127 *
3128 * Return a MemTxResult indicating whether the operation succeeded
3129 * or failed (eg unassigned memory, device rejected the transaction,
3130 * IOMMU fault). Called within RCU critical section.
3131 *
3132 * @as: #AddressSpace to be accessed
3133 * @addr: address within that address space
3134 * @attrs: memory transaction attributes
3135 * @buf: buffer with the data transferred
3136 * @len: length of the data transferred
3137 */
3138 static inline __attribute__((__always_inline__))
address_space_read(AddressSpace * as,hwaddr addr,MemTxAttrs attrs,void * buf,hwaddr len)3139 MemTxResult address_space_read(AddressSpace *as, hwaddr addr,
3140 MemTxAttrs attrs, void *buf,
3141 hwaddr len)
3142 {
3143 MemTxResult result = MEMTX_OK;
3144 hwaddr l, addr1;
3145 void *ptr;
3146 MemoryRegion *mr;
3147 FlatView *fv;
3148
3149 if (__builtin_constant_p(len)) {
3150 if (len) {
3151 RCU_READ_LOCK_GUARD();
3152 fv = address_space_to_flatview(as);
3153 l = len;
3154 mr = flatview_translate(fv, addr, &addr1, &l, false, attrs);
3155 if (len == l && memory_access_is_direct(mr, false, attrs)) {
3156 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
3157 memcpy(buf, ptr, len);
3158 } else {
3159 result = flatview_read_continue(fv, addr, attrs, buf, len,
3160 addr1, l, mr);
3161 }
3162 }
3163 } else {
3164 result = address_space_read_full(as, addr, attrs, buf, len);
3165 }
3166 return result;
3167 }
3168
3169 /**
3170 * address_space_read_cached: read from a cached RAM region
3171 *
3172 * @cache: Cached region to be addressed
3173 * @addr: address relative to the base of the RAM region
3174 * @buf: buffer with the data transferred
3175 * @len: length of the data transferred
3176 */
3177 static inline MemTxResult
address_space_read_cached(MemoryRegionCache * cache,hwaddr addr,void * buf,hwaddr len)3178 address_space_read_cached(MemoryRegionCache *cache, hwaddr addr,
3179 void *buf, hwaddr len)
3180 {
3181 assert(addr < cache->len && len <= cache->len - addr);
3182 fuzz_dma_read_cb(cache->xlat + addr, len, cache->mrs.mr);
3183 if (likely(cache->ptr)) {
3184 memcpy(buf, cache->ptr + addr, len);
3185 return MEMTX_OK;
3186 } else {
3187 return address_space_read_cached_slow(cache, addr, buf, len);
3188 }
3189 }
3190
3191 /**
3192 * address_space_write_cached: write to a cached RAM region
3193 *
3194 * @cache: Cached region to be addressed
3195 * @addr: address relative to the base of the RAM region
3196 * @buf: buffer with the data transferred
3197 * @len: length of the data transferred
3198 */
3199 static inline MemTxResult
address_space_write_cached(MemoryRegionCache * cache,hwaddr addr,const void * buf,hwaddr len)3200 address_space_write_cached(MemoryRegionCache *cache, hwaddr addr,
3201 const void *buf, hwaddr len)
3202 {
3203 assert(addr < cache->len && len <= cache->len - addr);
3204 if (likely(cache->ptr)) {
3205 memcpy(cache->ptr + addr, buf, len);
3206 return MEMTX_OK;
3207 } else {
3208 return address_space_write_cached_slow(cache, addr, buf, len);
3209 }
3210 }
3211
3212 /**
3213 * address_space_set: Fill address space with a constant byte.
3214 *
3215 * Return a MemTxResult indicating whether the operation succeeded
3216 * or failed (eg unassigned memory, device rejected the transaction,
3217 * IOMMU fault).
3218 *
3219 * @as: #AddressSpace to be accessed
3220 * @addr: address within that address space
3221 * @c: constant byte to fill the memory
3222 * @len: the number of bytes to fill with the constant byte
3223 * @attrs: memory transaction attributes
3224 */
3225 MemTxResult address_space_set(AddressSpace *as, hwaddr addr,
3226 uint8_t c, hwaddr len, MemTxAttrs attrs);
3227
3228 /*
3229 * Inhibit technologies that require discarding of pages in RAM blocks, e.g.,
3230 * to manage the actual amount of memory consumed by the VM (then, the memory
3231 * provided by RAM blocks might be bigger than the desired memory consumption).
3232 * This *must* be set if:
3233 * - Discarding parts of a RAM blocks does not result in the change being
3234 * reflected in the VM and the pages getting freed.
3235 * - All memory in RAM blocks is pinned or duplicated, invaldiating any previous
3236 * discards blindly.
3237 * - Discarding parts of a RAM blocks will result in integrity issues (e.g.,
3238 * encrypted VMs).
3239 * Technologies that only temporarily pin the current working set of a
3240 * driver are fine, because we don't expect such pages to be discarded
3241 * (esp. based on guest action like balloon inflation).
3242 *
3243 * This is *not* to be used to protect from concurrent discards (esp.,
3244 * postcopy).
3245 *
3246 * Returns 0 if successful. Returns -EBUSY if a technology that relies on
3247 * discards to work reliably is active.
3248 */
3249 int ram_block_discard_disable(bool state);
3250
3251 /*
3252 * See ram_block_discard_disable(): only disable uncoordinated discards,
3253 * keeping coordinated discards (via the RamDiscardManager) enabled.
3254 */
3255 int ram_block_uncoordinated_discard_disable(bool state);
3256
3257 /*
3258 * Inhibit technologies that disable discarding of pages in RAM blocks.
3259 *
3260 * Returns 0 if successful. Returns -EBUSY if discards are already set to
3261 * broken.
3262 */
3263 int ram_block_discard_require(bool state);
3264
3265 /*
3266 * See ram_block_discard_require(): only inhibit technologies that disable
3267 * uncoordinated discarding of pages in RAM blocks, allowing co-existence with
3268 * technologies that only inhibit uncoordinated discards (via the
3269 * RamDiscardManager).
3270 */
3271 int ram_block_coordinated_discard_require(bool state);
3272
3273 /*
3274 * Test if any discarding of memory in ram blocks is disabled.
3275 */
3276 bool ram_block_discard_is_disabled(void);
3277
3278 /*
3279 * Test if any discarding of memory in ram blocks is required to work reliably.
3280 */
3281 bool ram_block_discard_is_required(void);
3282
3283 void ram_block_add_cpr_blocker(RAMBlock *rb, Error **errp);
3284 void ram_block_del_cpr_blocker(RAMBlock *rb);
3285
3286 #endif
3287