1 /*
2 * Physical memory management API
3 *
4 * Copyright 2011 Red Hat, Inc. and/or its affiliates
5 *
6 * Authors:
7 * Avi Kivity <avi@redhat.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
11 *
12 */
13
14 #ifndef MEMORY_H
15 #define MEMORY_H
16
17 #ifndef CONFIG_USER_ONLY
18
19 #include "exec/cpu-common.h"
20 #include "exec/hwaddr.h"
21 #include "exec/memattrs.h"
22 #include "exec/memop.h"
23 #include "exec/ramlist.h"
24 #include "qemu/bswap.h"
25 #include "qemu/queue.h"
26 #include "qemu/int128.h"
27 #include "qemu/range.h"
28 #include "qemu/notify.h"
29 #include "qom/object.h"
30 #include "qemu/rcu.h"
31
32 #define RAM_ADDR_INVALID (~(ram_addr_t)0)
33
34 #define MAX_PHYS_ADDR_SPACE_BITS 62
35 #define MAX_PHYS_ADDR (((hwaddr)1 << MAX_PHYS_ADDR_SPACE_BITS) - 1)
36
37 #define TYPE_MEMORY_REGION "memory-region"
38 DECLARE_INSTANCE_CHECKER(MemoryRegion, MEMORY_REGION,
39 TYPE_MEMORY_REGION)
40
41 #define TYPE_IOMMU_MEMORY_REGION "iommu-memory-region"
42 typedef struct IOMMUMemoryRegionClass IOMMUMemoryRegionClass;
43 DECLARE_OBJ_CHECKERS(IOMMUMemoryRegion, IOMMUMemoryRegionClass,
44 IOMMU_MEMORY_REGION, TYPE_IOMMU_MEMORY_REGION)
45
46 #define TYPE_RAM_DISCARD_MANAGER "ram-discard-manager"
47 typedef struct RamDiscardManagerClass RamDiscardManagerClass;
48 typedef struct RamDiscardManager RamDiscardManager;
49 DECLARE_OBJ_CHECKERS(RamDiscardManager, RamDiscardManagerClass,
50 RAM_DISCARD_MANAGER, TYPE_RAM_DISCARD_MANAGER);
51
52 #ifdef CONFIG_FUZZ
53 void fuzz_dma_read_cb(size_t addr,
54 size_t len,
55 MemoryRegion *mr);
56 #else
fuzz_dma_read_cb(size_t addr,size_t len,MemoryRegion * mr)57 static inline void fuzz_dma_read_cb(size_t addr,
58 size_t len,
59 MemoryRegion *mr)
60 {
61 /* Do Nothing */
62 }
63 #endif
64
65 /* Possible bits for global_dirty_log_{start|stop} */
66
67 /* Dirty tracking enabled because migration is running */
68 #define GLOBAL_DIRTY_MIGRATION (1U << 0)
69
70 /* Dirty tracking enabled because measuring dirty rate */
71 #define GLOBAL_DIRTY_DIRTY_RATE (1U << 1)
72
73 /* Dirty tracking enabled because dirty limit */
74 #define GLOBAL_DIRTY_LIMIT (1U << 2)
75
76 #define GLOBAL_DIRTY_MASK (0x7)
77
78 extern unsigned int global_dirty_tracking;
79
80 typedef struct MemoryRegionOps MemoryRegionOps;
81
82 struct ReservedRegion {
83 Range range;
84 unsigned type;
85 };
86
87 /**
88 * struct MemoryRegionSection: describes a fragment of a #MemoryRegion
89 *
90 * @mr: the region, or %NULL if empty
91 * @fv: the flat view of the address space the region is mapped in
92 * @offset_within_region: the beginning of the section, relative to @mr's start
93 * @size: the size of the section; will not exceed @mr's boundaries
94 * @offset_within_address_space: the address of the first byte of the section
95 * relative to the region's address space
96 * @readonly: writes to this section are ignored
97 * @nonvolatile: this section is non-volatile
98 * @unmergeable: this section should not get merged with adjacent sections
99 */
100 struct MemoryRegionSection {
101 Int128 size;
102 MemoryRegion *mr;
103 FlatView *fv;
104 hwaddr offset_within_region;
105 hwaddr offset_within_address_space;
106 bool readonly;
107 bool nonvolatile;
108 bool unmergeable;
109 };
110
111 typedef struct IOMMUTLBEntry IOMMUTLBEntry;
112
113 /* See address_space_translate: bit 0 is read, bit 1 is write. */
114 typedef enum {
115 IOMMU_NONE = 0,
116 IOMMU_RO = 1,
117 IOMMU_WO = 2,
118 IOMMU_RW = 3,
119 } IOMMUAccessFlags;
120
121 #define IOMMU_ACCESS_FLAG(r, w) (((r) ? IOMMU_RO : 0) | ((w) ? IOMMU_WO : 0))
122
123 struct IOMMUTLBEntry {
124 AddressSpace *target_as;
125 hwaddr iova;
126 hwaddr translated_addr;
127 hwaddr addr_mask; /* 0xfff = 4k translation */
128 IOMMUAccessFlags perm;
129 };
130
131 /*
132 * Bitmap for different IOMMUNotifier capabilities. Each notifier can
133 * register with one or multiple IOMMU Notifier capability bit(s).
134 *
135 * Normally there're two use cases for the notifiers:
136 *
137 * (1) When the device needs accurate synchronizations of the vIOMMU page
138 * tables, it needs to register with both MAP|UNMAP notifies (which
139 * is defined as IOMMU_NOTIFIER_IOTLB_EVENTS below).
140 *
141 * Regarding to accurate synchronization, it's when the notified
142 * device maintains a shadow page table and must be notified on each
143 * guest MAP (page table entry creation) and UNMAP (invalidation)
144 * events (e.g. VFIO). Both notifications must be accurate so that
145 * the shadow page table is fully in sync with the guest view.
146 *
147 * (2) When the device doesn't need accurate synchronizations of the
148 * vIOMMU page tables, it needs to register only with UNMAP or
149 * DEVIOTLB_UNMAP notifies.
150 *
151 * It's when the device maintains a cache of IOMMU translations
152 * (IOTLB) and is able to fill that cache by requesting translations
153 * from the vIOMMU through a protocol similar to ATS (Address
154 * Translation Service).
155 *
156 * Note that in this mode the vIOMMU will not maintain a shadowed
157 * page table for the address space, and the UNMAP messages can cover
158 * more than the pages that used to get mapped. The IOMMU notifiee
159 * should be able to take care of over-sized invalidations.
160 */
161 typedef enum {
162 IOMMU_NOTIFIER_NONE = 0,
163 /* Notify cache invalidations */
164 IOMMU_NOTIFIER_UNMAP = 0x1,
165 /* Notify entry changes (newly created entries) */
166 IOMMU_NOTIFIER_MAP = 0x2,
167 /* Notify changes on device IOTLB entries */
168 IOMMU_NOTIFIER_DEVIOTLB_UNMAP = 0x04,
169 } IOMMUNotifierFlag;
170
171 #define IOMMU_NOTIFIER_IOTLB_EVENTS (IOMMU_NOTIFIER_MAP | IOMMU_NOTIFIER_UNMAP)
172 #define IOMMU_NOTIFIER_DEVIOTLB_EVENTS IOMMU_NOTIFIER_DEVIOTLB_UNMAP
173 #define IOMMU_NOTIFIER_ALL (IOMMU_NOTIFIER_IOTLB_EVENTS | \
174 IOMMU_NOTIFIER_DEVIOTLB_EVENTS)
175
176 struct IOMMUNotifier;
177 typedef void (*IOMMUNotify)(struct IOMMUNotifier *notifier,
178 IOMMUTLBEntry *data);
179
180 struct IOMMUNotifier {
181 IOMMUNotify notify;
182 IOMMUNotifierFlag notifier_flags;
183 /* Notify for address space range start <= addr <= end */
184 hwaddr start;
185 hwaddr end;
186 int iommu_idx;
187 QLIST_ENTRY(IOMMUNotifier) node;
188 };
189 typedef struct IOMMUNotifier IOMMUNotifier;
190
191 typedef struct IOMMUTLBEvent {
192 IOMMUNotifierFlag type;
193 IOMMUTLBEntry entry;
194 } IOMMUTLBEvent;
195
196 /* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
197 #define RAM_PREALLOC (1 << 0)
198
199 /* RAM is mmap-ed with MAP_SHARED */
200 #define RAM_SHARED (1 << 1)
201
202 /* Only a portion of RAM (used_length) is actually used, and migrated.
203 * Resizing RAM while migrating can result in the migration being canceled.
204 */
205 #define RAM_RESIZEABLE (1 << 2)
206
207 /* UFFDIO_ZEROPAGE is available on this RAMBlock to atomically
208 * zero the page and wake waiting processes.
209 * (Set during postcopy)
210 */
211 #define RAM_UF_ZEROPAGE (1 << 3)
212
213 /* RAM can be migrated */
214 #define RAM_MIGRATABLE (1 << 4)
215
216 /* RAM is a persistent kind memory */
217 #define RAM_PMEM (1 << 5)
218
219
220 /*
221 * UFFDIO_WRITEPROTECT is used on this RAMBlock to
222 * support 'write-tracking' migration type.
223 * Implies ram_state->ram_wt_enabled.
224 */
225 #define RAM_UF_WRITEPROTECT (1 << 6)
226
227 /*
228 * RAM is mmap-ed with MAP_NORESERVE. When set, reserving swap space (or huge
229 * pages if applicable) is skipped: will bail out if not supported. When not
230 * set, the OS will do the reservation, if supported for the memory type.
231 */
232 #define RAM_NORESERVE (1 << 7)
233
234 /* RAM that isn't accessible through normal means. */
235 #define RAM_PROTECTED (1 << 8)
236
237 /* RAM is an mmap-ed named file */
238 #define RAM_NAMED_FILE (1 << 9)
239
240 /* RAM is mmap-ed read-only */
241 #define RAM_READONLY (1 << 10)
242
243 /* RAM FD is opened read-only */
244 #define RAM_READONLY_FD (1 << 11)
245
246 /* RAM can be private that has kvm guest memfd backend */
247 #define RAM_GUEST_MEMFD (1 << 12)
248
iommu_notifier_init(IOMMUNotifier * n,IOMMUNotify fn,IOMMUNotifierFlag flags,hwaddr start,hwaddr end,int iommu_idx)249 static inline void iommu_notifier_init(IOMMUNotifier *n, IOMMUNotify fn,
250 IOMMUNotifierFlag flags,
251 hwaddr start, hwaddr end,
252 int iommu_idx)
253 {
254 n->notify = fn;
255 n->notifier_flags = flags;
256 n->start = start;
257 n->end = end;
258 n->iommu_idx = iommu_idx;
259 }
260
261 /*
262 * Memory region callbacks
263 */
264 struct MemoryRegionOps {
265 /* Read from the memory region. @addr is relative to @mr; @size is
266 * in bytes. */
267 uint64_t (*read)(void *opaque,
268 hwaddr addr,
269 unsigned size);
270 /* Write to the memory region. @addr is relative to @mr; @size is
271 * in bytes. */
272 void (*write)(void *opaque,
273 hwaddr addr,
274 uint64_t data,
275 unsigned size);
276
277 MemTxResult (*read_with_attrs)(void *opaque,
278 hwaddr addr,
279 uint64_t *data,
280 unsigned size,
281 MemTxAttrs attrs);
282 MemTxResult (*write_with_attrs)(void *opaque,
283 hwaddr addr,
284 uint64_t data,
285 unsigned size,
286 MemTxAttrs attrs);
287
288 enum device_endian endianness;
289 /* Guest-visible constraints: */
290 struct {
291 /* If nonzero, specify bounds on access sizes beyond which a machine
292 * check is thrown.
293 */
294 unsigned min_access_size;
295 unsigned max_access_size;
296 /* If true, unaligned accesses are supported. Otherwise unaligned
297 * accesses throw machine checks.
298 */
299 bool unaligned;
300 /*
301 * If present, and returns #false, the transaction is not accepted
302 * by the device (and results in machine dependent behaviour such
303 * as a machine check exception).
304 */
305 bool (*accepts)(void *opaque, hwaddr addr,
306 unsigned size, bool is_write,
307 MemTxAttrs attrs);
308 } valid;
309 /* Internal implementation constraints: */
310 struct {
311 /* If nonzero, specifies the minimum size implemented. Smaller sizes
312 * will be rounded upwards and a partial result will be returned.
313 */
314 unsigned min_access_size;
315 /* If nonzero, specifies the maximum size implemented. Larger sizes
316 * will be done as a series of accesses with smaller sizes.
317 */
318 unsigned max_access_size;
319 /* If true, unaligned accesses are supported. Otherwise all accesses
320 * are converted to (possibly multiple) naturally aligned accesses.
321 */
322 bool unaligned;
323 } impl;
324 };
325
326 typedef struct MemoryRegionClass {
327 /* private */
328 ObjectClass parent_class;
329 } MemoryRegionClass;
330
331
332 enum IOMMUMemoryRegionAttr {
333 IOMMU_ATTR_SPAPR_TCE_FD
334 };
335
336 /*
337 * IOMMUMemoryRegionClass:
338 *
339 * All IOMMU implementations need to subclass TYPE_IOMMU_MEMORY_REGION
340 * and provide an implementation of at least the @translate method here
341 * to handle requests to the memory region. Other methods are optional.
342 *
343 * The IOMMU implementation must use the IOMMU notifier infrastructure
344 * to report whenever mappings are changed, by calling
345 * memory_region_notify_iommu() (or, if necessary, by calling
346 * memory_region_notify_iommu_one() for each registered notifier).
347 *
348 * Conceptually an IOMMU provides a mapping from input address
349 * to an output TLB entry. If the IOMMU is aware of memory transaction
350 * attributes and the output TLB entry depends on the transaction
351 * attributes, we represent this using IOMMU indexes. Each index
352 * selects a particular translation table that the IOMMU has:
353 *
354 * @attrs_to_index returns the IOMMU index for a set of transaction attributes
355 *
356 * @translate takes an input address and an IOMMU index
357 *
358 * and the mapping returned can only depend on the input address and the
359 * IOMMU index.
360 *
361 * Most IOMMUs don't care about the transaction attributes and support
362 * only a single IOMMU index. A more complex IOMMU might have one index
363 * for secure transactions and one for non-secure transactions.
364 */
365 struct IOMMUMemoryRegionClass {
366 /* private: */
367 MemoryRegionClass parent_class;
368
369 /* public: */
370 /**
371 * @translate:
372 *
373 * Return a TLB entry that contains a given address.
374 *
375 * The IOMMUAccessFlags indicated via @flag are optional and may
376 * be specified as IOMMU_NONE to indicate that the caller needs
377 * the full translation information for both reads and writes. If
378 * the access flags are specified then the IOMMU implementation
379 * may use this as an optimization, to stop doing a page table
380 * walk as soon as it knows that the requested permissions are not
381 * allowed. If IOMMU_NONE is passed then the IOMMU must do the
382 * full page table walk and report the permissions in the returned
383 * IOMMUTLBEntry. (Note that this implies that an IOMMU may not
384 * return different mappings for reads and writes.)
385 *
386 * The returned information remains valid while the caller is
387 * holding the big QEMU lock or is inside an RCU critical section;
388 * if the caller wishes to cache the mapping beyond that it must
389 * register an IOMMU notifier so it can invalidate its cached
390 * information when the IOMMU mapping changes.
391 *
392 * @iommu: the IOMMUMemoryRegion
393 *
394 * @hwaddr: address to be translated within the memory region
395 *
396 * @flag: requested access permission
397 *
398 * @iommu_idx: IOMMU index for the translation
399 */
400 IOMMUTLBEntry (*translate)(IOMMUMemoryRegion *iommu, hwaddr addr,
401 IOMMUAccessFlags flag, int iommu_idx);
402 /**
403 * @get_min_page_size:
404 *
405 * Returns minimum supported page size in bytes.
406 *
407 * If this method is not provided then the minimum is assumed to
408 * be TARGET_PAGE_SIZE.
409 *
410 * @iommu: the IOMMUMemoryRegion
411 */
412 uint64_t (*get_min_page_size)(IOMMUMemoryRegion *iommu);
413 /**
414 * @notify_flag_changed:
415 *
416 * Called when IOMMU Notifier flag changes (ie when the set of
417 * events which IOMMU users are requesting notification for changes).
418 * Optional method -- need not be provided if the IOMMU does not
419 * need to know exactly which events must be notified.
420 *
421 * @iommu: the IOMMUMemoryRegion
422 *
423 * @old_flags: events which previously needed to be notified
424 *
425 * @new_flags: events which now need to be notified
426 *
427 * Returns 0 on success, or a negative errno; in particular
428 * returns -EINVAL if the new flag bitmap is not supported by the
429 * IOMMU memory region. In case of failure, the error object
430 * must be created
431 */
432 int (*notify_flag_changed)(IOMMUMemoryRegion *iommu,
433 IOMMUNotifierFlag old_flags,
434 IOMMUNotifierFlag new_flags,
435 Error **errp);
436 /**
437 * @replay:
438 *
439 * Called to handle memory_region_iommu_replay().
440 *
441 * The default implementation of memory_region_iommu_replay() is to
442 * call the IOMMU translate method for every page in the address space
443 * with flag == IOMMU_NONE and then call the notifier if translate
444 * returns a valid mapping. If this method is implemented then it
445 * overrides the default behaviour, and must provide the full semantics
446 * of memory_region_iommu_replay(), by calling @notifier for every
447 * translation present in the IOMMU.
448 *
449 * Optional method -- an IOMMU only needs to provide this method
450 * if the default is inefficient or produces undesirable side effects.
451 *
452 * Note: this is not related to record-and-replay functionality.
453 */
454 void (*replay)(IOMMUMemoryRegion *iommu, IOMMUNotifier *notifier);
455
456 /**
457 * @get_attr:
458 *
459 * Get IOMMU misc attributes. This is an optional method that
460 * can be used to allow users of the IOMMU to get implementation-specific
461 * information. The IOMMU implements this method to handle calls
462 * by IOMMU users to memory_region_iommu_get_attr() by filling in
463 * the arbitrary data pointer for any IOMMUMemoryRegionAttr values that
464 * the IOMMU supports. If the method is unimplemented then
465 * memory_region_iommu_get_attr() will always return -EINVAL.
466 *
467 * @iommu: the IOMMUMemoryRegion
468 *
469 * @attr: attribute being queried
470 *
471 * @data: memory to fill in with the attribute data
472 *
473 * Returns 0 on success, or a negative errno; in particular
474 * returns -EINVAL for unrecognized or unimplemented attribute types.
475 */
476 int (*get_attr)(IOMMUMemoryRegion *iommu, enum IOMMUMemoryRegionAttr attr,
477 void *data);
478
479 /**
480 * @attrs_to_index:
481 *
482 * Return the IOMMU index to use for a given set of transaction attributes.
483 *
484 * Optional method: if an IOMMU only supports a single IOMMU index then
485 * the default implementation of memory_region_iommu_attrs_to_index()
486 * will return 0.
487 *
488 * The indexes supported by an IOMMU must be contiguous, starting at 0.
489 *
490 * @iommu: the IOMMUMemoryRegion
491 * @attrs: memory transaction attributes
492 */
493 int (*attrs_to_index)(IOMMUMemoryRegion *iommu, MemTxAttrs attrs);
494
495 /**
496 * @num_indexes:
497 *
498 * Return the number of IOMMU indexes this IOMMU supports.
499 *
500 * Optional method: if this method is not provided, then
501 * memory_region_iommu_num_indexes() will return 1, indicating that
502 * only a single IOMMU index is supported.
503 *
504 * @iommu: the IOMMUMemoryRegion
505 */
506 int (*num_indexes)(IOMMUMemoryRegion *iommu);
507 };
508
509 typedef struct RamDiscardListener RamDiscardListener;
510 typedef int (*NotifyRamPopulate)(RamDiscardListener *rdl,
511 MemoryRegionSection *section);
512 typedef void (*NotifyRamDiscard)(RamDiscardListener *rdl,
513 MemoryRegionSection *section);
514
515 struct RamDiscardListener {
516 /*
517 * @notify_populate:
518 *
519 * Notification that previously discarded memory is about to get populated.
520 * Listeners are able to object. If any listener objects, already
521 * successfully notified listeners are notified about a discard again.
522 *
523 * @rdl: the #RamDiscardListener getting notified
524 * @section: the #MemoryRegionSection to get populated. The section
525 * is aligned within the memory region to the minimum granularity
526 * unless it would exceed the registered section.
527 *
528 * Returns 0 on success. If the notification is rejected by the listener,
529 * an error is returned.
530 */
531 NotifyRamPopulate notify_populate;
532
533 /*
534 * @notify_discard:
535 *
536 * Notification that previously populated memory was discarded successfully
537 * and listeners should drop all references to such memory and prevent
538 * new population (e.g., unmap).
539 *
540 * @rdl: the #RamDiscardListener getting notified
541 * @section: the #MemoryRegionSection to get populated. The section
542 * is aligned within the memory region to the minimum granularity
543 * unless it would exceed the registered section.
544 */
545 NotifyRamDiscard notify_discard;
546
547 /*
548 * @double_discard_supported:
549 *
550 * The listener suppors getting @notify_discard notifications that span
551 * already discarded parts.
552 */
553 bool double_discard_supported;
554
555 MemoryRegionSection *section;
556 QLIST_ENTRY(RamDiscardListener) next;
557 };
558
ram_discard_listener_init(RamDiscardListener * rdl,NotifyRamPopulate populate_fn,NotifyRamDiscard discard_fn,bool double_discard_supported)559 static inline void ram_discard_listener_init(RamDiscardListener *rdl,
560 NotifyRamPopulate populate_fn,
561 NotifyRamDiscard discard_fn,
562 bool double_discard_supported)
563 {
564 rdl->notify_populate = populate_fn;
565 rdl->notify_discard = discard_fn;
566 rdl->double_discard_supported = double_discard_supported;
567 }
568
569 typedef int (*ReplayRamPopulate)(MemoryRegionSection *section, void *opaque);
570 typedef void (*ReplayRamDiscard)(MemoryRegionSection *section, void *opaque);
571
572 /*
573 * RamDiscardManagerClass:
574 *
575 * A #RamDiscardManager coordinates which parts of specific RAM #MemoryRegion
576 * regions are currently populated to be used/accessed by the VM, notifying
577 * after parts were discarded (freeing up memory) and before parts will be
578 * populated (consuming memory), to be used/accessed by the VM.
579 *
580 * A #RamDiscardManager can only be set for a RAM #MemoryRegion while the
581 * #MemoryRegion isn't mapped into an address space yet (either directly
582 * or via an alias); it cannot change while the #MemoryRegion is
583 * mapped into an address space.
584 *
585 * The #RamDiscardManager is intended to be used by technologies that are
586 * incompatible with discarding of RAM (e.g., VFIO, which may pin all
587 * memory inside a #MemoryRegion), and require proper coordination to only
588 * map the currently populated parts, to hinder parts that are expected to
589 * remain discarded from silently getting populated and consuming memory.
590 * Technologies that support discarding of RAM don't have to bother and can
591 * simply map the whole #MemoryRegion.
592 *
593 * An example #RamDiscardManager is virtio-mem, which logically (un)plugs
594 * memory within an assigned RAM #MemoryRegion, coordinated with the VM.
595 * Logically unplugging memory consists of discarding RAM. The VM agreed to not
596 * access unplugged (discarded) memory - especially via DMA. virtio-mem will
597 * properly coordinate with listeners before memory is plugged (populated),
598 * and after memory is unplugged (discarded).
599 *
600 * Listeners are called in multiples of the minimum granularity (unless it
601 * would exceed the registered range) and changes are aligned to the minimum
602 * granularity within the #MemoryRegion. Listeners have to prepare for memory
603 * becoming discarded in a different granularity than it was populated and the
604 * other way around.
605 */
606 struct RamDiscardManagerClass {
607 /* private */
608 InterfaceClass parent_class;
609
610 /* public */
611
612 /**
613 * @get_min_granularity:
614 *
615 * Get the minimum granularity in which listeners will get notified
616 * about changes within the #MemoryRegion via the #RamDiscardManager.
617 *
618 * @rdm: the #RamDiscardManager
619 * @mr: the #MemoryRegion
620 *
621 * Returns the minimum granularity.
622 */
623 uint64_t (*get_min_granularity)(const RamDiscardManager *rdm,
624 const MemoryRegion *mr);
625
626 /**
627 * @is_populated:
628 *
629 * Check whether the given #MemoryRegionSection is completely populated
630 * (i.e., no parts are currently discarded) via the #RamDiscardManager.
631 * There are no alignment requirements.
632 *
633 * @rdm: the #RamDiscardManager
634 * @section: the #MemoryRegionSection
635 *
636 * Returns whether the given range is completely populated.
637 */
638 bool (*is_populated)(const RamDiscardManager *rdm,
639 const MemoryRegionSection *section);
640
641 /**
642 * @replay_populated:
643 *
644 * Call the #ReplayRamPopulate callback for all populated parts within the
645 * #MemoryRegionSection via the #RamDiscardManager.
646 *
647 * In case any call fails, no further calls are made.
648 *
649 * @rdm: the #RamDiscardManager
650 * @section: the #MemoryRegionSection
651 * @replay_fn: the #ReplayRamPopulate callback
652 * @opaque: pointer to forward to the callback
653 *
654 * Returns 0 on success, or a negative error if any notification failed.
655 */
656 int (*replay_populated)(const RamDiscardManager *rdm,
657 MemoryRegionSection *section,
658 ReplayRamPopulate replay_fn, void *opaque);
659
660 /**
661 * @replay_discarded:
662 *
663 * Call the #ReplayRamDiscard callback for all discarded parts within the
664 * #MemoryRegionSection via the #RamDiscardManager.
665 *
666 * @rdm: the #RamDiscardManager
667 * @section: the #MemoryRegionSection
668 * @replay_fn: the #ReplayRamDiscard callback
669 * @opaque: pointer to forward to the callback
670 */
671 void (*replay_discarded)(const RamDiscardManager *rdm,
672 MemoryRegionSection *section,
673 ReplayRamDiscard replay_fn, void *opaque);
674
675 /**
676 * @register_listener:
677 *
678 * Register a #RamDiscardListener for the given #MemoryRegionSection and
679 * immediately notify the #RamDiscardListener about all populated parts
680 * within the #MemoryRegionSection via the #RamDiscardManager.
681 *
682 * In case any notification fails, no further notifications are triggered
683 * and an error is logged.
684 *
685 * @rdm: the #RamDiscardManager
686 * @rdl: the #RamDiscardListener
687 * @section: the #MemoryRegionSection
688 */
689 void (*register_listener)(RamDiscardManager *rdm,
690 RamDiscardListener *rdl,
691 MemoryRegionSection *section);
692
693 /**
694 * @unregister_listener:
695 *
696 * Unregister a previously registered #RamDiscardListener via the
697 * #RamDiscardManager after notifying the #RamDiscardListener about all
698 * populated parts becoming unpopulated within the registered
699 * #MemoryRegionSection.
700 *
701 * @rdm: the #RamDiscardManager
702 * @rdl: the #RamDiscardListener
703 */
704 void (*unregister_listener)(RamDiscardManager *rdm,
705 RamDiscardListener *rdl);
706 };
707
708 uint64_t ram_discard_manager_get_min_granularity(const RamDiscardManager *rdm,
709 const MemoryRegion *mr);
710
711 bool ram_discard_manager_is_populated(const RamDiscardManager *rdm,
712 const MemoryRegionSection *section);
713
714 int ram_discard_manager_replay_populated(const RamDiscardManager *rdm,
715 MemoryRegionSection *section,
716 ReplayRamPopulate replay_fn,
717 void *opaque);
718
719 void ram_discard_manager_replay_discarded(const RamDiscardManager *rdm,
720 MemoryRegionSection *section,
721 ReplayRamDiscard replay_fn,
722 void *opaque);
723
724 void ram_discard_manager_register_listener(RamDiscardManager *rdm,
725 RamDiscardListener *rdl,
726 MemoryRegionSection *section);
727
728 void ram_discard_manager_unregister_listener(RamDiscardManager *rdm,
729 RamDiscardListener *rdl);
730
731 /**
732 * memory_get_xlat_addr: Extract addresses from a TLB entry
733 *
734 * @iotlb: pointer to an #IOMMUTLBEntry
735 * @vaddr: virtual address
736 * @ram_addr: RAM address
737 * @read_only: indicates if writes are allowed
738 * @mr_has_discard_manager: indicates memory is controlled by a
739 * RamDiscardManager
740 * @errp: pointer to Error*, to store an error if it happens.
741 *
742 * Return: true on success, else false setting @errp with error.
743 */
744 bool memory_get_xlat_addr(IOMMUTLBEntry *iotlb, void **vaddr,
745 ram_addr_t *ram_addr, bool *read_only,
746 bool *mr_has_discard_manager, Error **errp);
747
748 typedef struct CoalescedMemoryRange CoalescedMemoryRange;
749 typedef struct MemoryRegionIoeventfd MemoryRegionIoeventfd;
750
751 /** MemoryRegion:
752 *
753 * A struct representing a memory region.
754 */
755 struct MemoryRegion {
756 Object parent_obj;
757
758 /* private: */
759
760 /* The following fields should fit in a cache line */
761 bool romd_mode;
762 bool ram;
763 bool subpage;
764 bool readonly; /* For RAM regions */
765 bool nonvolatile;
766 bool rom_device;
767 bool flush_coalesced_mmio;
768 bool unmergeable;
769 uint8_t dirty_log_mask;
770 bool is_iommu;
771 RAMBlock *ram_block;
772 Object *owner;
773 /* owner as TYPE_DEVICE. Used for re-entrancy checks in MR access hotpath */
774 DeviceState *dev;
775
776 const MemoryRegionOps *ops;
777 void *opaque;
778 MemoryRegion *container;
779 int mapped_via_alias; /* Mapped via an alias, container might be NULL */
780 Int128 size;
781 hwaddr addr;
782 void (*destructor)(MemoryRegion *mr);
783 uint64_t align;
784 bool terminates;
785 bool ram_device;
786 bool enabled;
787 bool warning_printed; /* For reservations */
788 uint8_t vga_logging_count;
789 MemoryRegion *alias;
790 hwaddr alias_offset;
791 int32_t priority;
792 QTAILQ_HEAD(, MemoryRegion) subregions;
793 QTAILQ_ENTRY(MemoryRegion) subregions_link;
794 QTAILQ_HEAD(, CoalescedMemoryRange) coalesced;
795 const char *name;
796 unsigned ioeventfd_nb;
797 MemoryRegionIoeventfd *ioeventfds;
798 RamDiscardManager *rdm; /* Only for RAM */
799
800 /* For devices designed to perform re-entrant IO into their own IO MRs */
801 bool disable_reentrancy_guard;
802 };
803
804 struct IOMMUMemoryRegion {
805 MemoryRegion parent_obj;
806
807 QLIST_HEAD(, IOMMUNotifier) iommu_notify;
808 IOMMUNotifierFlag iommu_notify_flags;
809 };
810
811 #define IOMMU_NOTIFIER_FOREACH(n, mr) \
812 QLIST_FOREACH((n), &(mr)->iommu_notify, node)
813
814 #define MEMORY_LISTENER_PRIORITY_MIN 0
815 #define MEMORY_LISTENER_PRIORITY_ACCEL 10
816 #define MEMORY_LISTENER_PRIORITY_DEV_BACKEND 10
817
818 /**
819 * struct MemoryListener: callbacks structure for updates to the physical memory map
820 *
821 * Allows a component to adjust to changes in the guest-visible memory map.
822 * Use with memory_listener_register() and memory_listener_unregister().
823 */
824 struct MemoryListener {
825 /**
826 * @begin:
827 *
828 * Called at the beginning of an address space update transaction.
829 * Followed by calls to #MemoryListener.region_add(),
830 * #MemoryListener.region_del(), #MemoryListener.region_nop(),
831 * #MemoryListener.log_start() and #MemoryListener.log_stop() in
832 * increasing address order.
833 *
834 * @listener: The #MemoryListener.
835 */
836 void (*begin)(MemoryListener *listener);
837
838 /**
839 * @commit:
840 *
841 * Called at the end of an address space update transaction,
842 * after the last call to #MemoryListener.region_add(),
843 * #MemoryListener.region_del() or #MemoryListener.region_nop(),
844 * #MemoryListener.log_start() and #MemoryListener.log_stop().
845 *
846 * @listener: The #MemoryListener.
847 */
848 void (*commit)(MemoryListener *listener);
849
850 /**
851 * @region_add:
852 *
853 * Called during an address space update transaction,
854 * for a section of the address space that is new in this address space
855 * space since the last transaction.
856 *
857 * @listener: The #MemoryListener.
858 * @section: The new #MemoryRegionSection.
859 */
860 void (*region_add)(MemoryListener *listener, MemoryRegionSection *section);
861
862 /**
863 * @region_del:
864 *
865 * Called during an address space update transaction,
866 * for a section of the address space that has disappeared in the address
867 * space since the last transaction.
868 *
869 * @listener: The #MemoryListener.
870 * @section: The old #MemoryRegionSection.
871 */
872 void (*region_del)(MemoryListener *listener, MemoryRegionSection *section);
873
874 /**
875 * @region_nop:
876 *
877 * Called during an address space update transaction,
878 * for a section of the address space that is in the same place in the address
879 * space as in the last transaction.
880 *
881 * @listener: The #MemoryListener.
882 * @section: The #MemoryRegionSection.
883 */
884 void (*region_nop)(MemoryListener *listener, MemoryRegionSection *section);
885
886 /**
887 * @log_start:
888 *
889 * Called during an address space update transaction, after
890 * one of #MemoryListener.region_add(), #MemoryListener.region_del() or
891 * #MemoryListener.region_nop(), if dirty memory logging clients have
892 * become active since the last transaction.
893 *
894 * @listener: The #MemoryListener.
895 * @section: The #MemoryRegionSection.
896 * @old: A bitmap of dirty memory logging clients that were active in
897 * the previous transaction.
898 * @new: A bitmap of dirty memory logging clients that are active in
899 * the current transaction.
900 */
901 void (*log_start)(MemoryListener *listener, MemoryRegionSection *section,
902 int old_val, int new_val);
903
904 /**
905 * @log_stop:
906 *
907 * Called during an address space update transaction, after
908 * one of #MemoryListener.region_add(), #MemoryListener.region_del() or
909 * #MemoryListener.region_nop() and possibly after
910 * #MemoryListener.log_start(), if dirty memory logging clients have
911 * become inactive since the last transaction.
912 *
913 * @listener: The #MemoryListener.
914 * @section: The #MemoryRegionSection.
915 * @old: A bitmap of dirty memory logging clients that were active in
916 * the previous transaction.
917 * @new: A bitmap of dirty memory logging clients that are active in
918 * the current transaction.
919 */
920 void (*log_stop)(MemoryListener *listener, MemoryRegionSection *section,
921 int old_val, int new_val);
922
923 /**
924 * @log_sync:
925 *
926 * Called by memory_region_snapshot_and_clear_dirty() and
927 * memory_global_dirty_log_sync(), before accessing QEMU's "official"
928 * copy of the dirty memory bitmap for a #MemoryRegionSection.
929 *
930 * @listener: The #MemoryListener.
931 * @section: The #MemoryRegionSection.
932 */
933 void (*log_sync)(MemoryListener *listener, MemoryRegionSection *section);
934
935 /**
936 * @log_sync_global:
937 *
938 * This is the global version of @log_sync when the listener does
939 * not have a way to synchronize the log with finer granularity.
940 * When the listener registers with @log_sync_global defined, then
941 * its @log_sync must be NULL. Vice versa.
942 *
943 * @listener: The #MemoryListener.
944 * @last_stage: The last stage to synchronize the log during migration.
945 * The caller should guarantee that the synchronization with true for
946 * @last_stage is triggered for once after all VCPUs have been stopped.
947 */
948 void (*log_sync_global)(MemoryListener *listener, bool last_stage);
949
950 /**
951 * @log_clear:
952 *
953 * Called before reading the dirty memory bitmap for a
954 * #MemoryRegionSection.
955 *
956 * @listener: The #MemoryListener.
957 * @section: The #MemoryRegionSection.
958 */
959 void (*log_clear)(MemoryListener *listener, MemoryRegionSection *section);
960
961 /**
962 * @log_global_start:
963 *
964 * Called by memory_global_dirty_log_start(), which
965 * enables the %DIRTY_LOG_MIGRATION client on all memory regions in
966 * the address space. #MemoryListener.log_global_start() is also
967 * called when a #MemoryListener is added, if global dirty logging is
968 * active at that time.
969 *
970 * @listener: The #MemoryListener.
971 * @errp: pointer to Error*, to store an error if it happens.
972 *
973 * Return: true on success, else false setting @errp with error.
974 */
975 bool (*log_global_start)(MemoryListener *listener, Error **errp);
976
977 /**
978 * @log_global_stop:
979 *
980 * Called by memory_global_dirty_log_stop(), which
981 * disables the %DIRTY_LOG_MIGRATION client on all memory regions in
982 * the address space.
983 *
984 * @listener: The #MemoryListener.
985 */
986 void (*log_global_stop)(MemoryListener *listener);
987
988 /**
989 * @log_global_after_sync:
990 *
991 * Called after reading the dirty memory bitmap
992 * for any #MemoryRegionSection.
993 *
994 * @listener: The #MemoryListener.
995 */
996 void (*log_global_after_sync)(MemoryListener *listener);
997
998 /**
999 * @eventfd_add:
1000 *
1001 * Called during an address space update transaction,
1002 * for a section of the address space that has had a new ioeventfd
1003 * registration since the last transaction.
1004 *
1005 * @listener: The #MemoryListener.
1006 * @section: The new #MemoryRegionSection.
1007 * @match_data: The @match_data parameter for the new ioeventfd.
1008 * @data: The @data parameter for the new ioeventfd.
1009 * @e: The #EventNotifier parameter for the new ioeventfd.
1010 */
1011 void (*eventfd_add)(MemoryListener *listener, MemoryRegionSection *section,
1012 bool match_data, uint64_t data, EventNotifier *e);
1013
1014 /**
1015 * @eventfd_del:
1016 *
1017 * Called during an address space update transaction,
1018 * for a section of the address space that has dropped an ioeventfd
1019 * registration since the last transaction.
1020 *
1021 * @listener: The #MemoryListener.
1022 * @section: The new #MemoryRegionSection.
1023 * @match_data: The @match_data parameter for the dropped ioeventfd.
1024 * @data: The @data parameter for the dropped ioeventfd.
1025 * @e: The #EventNotifier parameter for the dropped ioeventfd.
1026 */
1027 void (*eventfd_del)(MemoryListener *listener, MemoryRegionSection *section,
1028 bool match_data, uint64_t data, EventNotifier *e);
1029
1030 /**
1031 * @coalesced_io_add:
1032 *
1033 * Called during an address space update transaction,
1034 * for a section of the address space that has had a new coalesced
1035 * MMIO range registration since the last transaction.
1036 *
1037 * @listener: The #MemoryListener.
1038 * @section: The new #MemoryRegionSection.
1039 * @addr: The starting address for the coalesced MMIO range.
1040 * @len: The length of the coalesced MMIO range.
1041 */
1042 void (*coalesced_io_add)(MemoryListener *listener, MemoryRegionSection *section,
1043 hwaddr addr, hwaddr len);
1044
1045 /**
1046 * @coalesced_io_del:
1047 *
1048 * Called during an address space update transaction,
1049 * for a section of the address space that has dropped a coalesced
1050 * MMIO range since the last transaction.
1051 *
1052 * @listener: The #MemoryListener.
1053 * @section: The new #MemoryRegionSection.
1054 * @addr: The starting address for the coalesced MMIO range.
1055 * @len: The length of the coalesced MMIO range.
1056 */
1057 void (*coalesced_io_del)(MemoryListener *listener, MemoryRegionSection *section,
1058 hwaddr addr, hwaddr len);
1059 /**
1060 * @priority:
1061 *
1062 * Govern the order in which memory listeners are invoked. Lower priorities
1063 * are invoked earlier for "add" or "start" callbacks, and later for "delete"
1064 * or "stop" callbacks.
1065 */
1066 unsigned priority;
1067
1068 /**
1069 * @name:
1070 *
1071 * Name of the listener. It can be used in contexts where we'd like to
1072 * identify one memory listener with the rest.
1073 */
1074 const char *name;
1075
1076 /* private: */
1077 AddressSpace *address_space;
1078 QTAILQ_ENTRY(MemoryListener) link;
1079 QTAILQ_ENTRY(MemoryListener) link_as;
1080 };
1081
1082 typedef struct AddressSpaceMapClient {
1083 QEMUBH *bh;
1084 QLIST_ENTRY(AddressSpaceMapClient) link;
1085 } AddressSpaceMapClient;
1086
1087 #define DEFAULT_MAX_BOUNCE_BUFFER_SIZE (4096)
1088
1089 /**
1090 * struct AddressSpace: describes a mapping of addresses to #MemoryRegion objects
1091 */
1092 struct AddressSpace {
1093 /* private: */
1094 struct rcu_head rcu;
1095 char *name;
1096 MemoryRegion *root;
1097
1098 /* Accessed via RCU. */
1099 struct FlatView *current_map;
1100
1101 int ioeventfd_nb;
1102 int ioeventfd_notifiers;
1103 struct MemoryRegionIoeventfd *ioeventfds;
1104 QTAILQ_HEAD(, MemoryListener) listeners;
1105 QTAILQ_ENTRY(AddressSpace) address_spaces_link;
1106
1107 /* Maximum DMA bounce buffer size used for indirect memory map requests */
1108 size_t max_bounce_buffer_size;
1109 /* Total size of bounce buffers currently allocated, atomically accessed */
1110 size_t bounce_buffer_size;
1111 /* List of callbacks to invoke when buffers free up */
1112 QemuMutex map_client_list_lock;
1113 QLIST_HEAD(, AddressSpaceMapClient) map_client_list;
1114 };
1115
1116 typedef struct AddressSpaceDispatch AddressSpaceDispatch;
1117 typedef struct FlatRange FlatRange;
1118
1119 /* Flattened global view of current active memory hierarchy. Kept in sorted
1120 * order.
1121 */
1122 struct FlatView {
1123 struct rcu_head rcu;
1124 unsigned ref;
1125 FlatRange *ranges;
1126 unsigned nr;
1127 unsigned nr_allocated;
1128 struct AddressSpaceDispatch *dispatch;
1129 MemoryRegion *root;
1130 };
1131
address_space_to_flatview(AddressSpace * as)1132 static inline FlatView *address_space_to_flatview(AddressSpace *as)
1133 {
1134 return qatomic_rcu_read(&as->current_map);
1135 }
1136
1137 /**
1138 * typedef flatview_cb: callback for flatview_for_each_range()
1139 *
1140 * @start: start address of the range within the FlatView
1141 * @len: length of the range in bytes
1142 * @mr: MemoryRegion covering this range
1143 * @offset_in_region: offset of the first byte of the range within @mr
1144 * @opaque: data pointer passed to flatview_for_each_range()
1145 *
1146 * Returns: true to stop the iteration, false to keep going.
1147 */
1148 typedef bool (*flatview_cb)(Int128 start,
1149 Int128 len,
1150 const MemoryRegion *mr,
1151 hwaddr offset_in_region,
1152 void *opaque);
1153
1154 /**
1155 * flatview_for_each_range: Iterate through a FlatView
1156 * @fv: the FlatView to iterate through
1157 * @cb: function to call for each range
1158 * @opaque: opaque data pointer to pass to @cb
1159 *
1160 * A FlatView is made up of a list of non-overlapping ranges, each of
1161 * which is a slice of a MemoryRegion. This function iterates through
1162 * each range in @fv, calling @cb. The callback function can terminate
1163 * iteration early by returning 'true'.
1164 */
1165 void flatview_for_each_range(FlatView *fv, flatview_cb cb, void *opaque);
1166
MemoryRegionSection_eq(MemoryRegionSection * a,MemoryRegionSection * b)1167 static inline bool MemoryRegionSection_eq(MemoryRegionSection *a,
1168 MemoryRegionSection *b)
1169 {
1170 return a->mr == b->mr &&
1171 a->fv == b->fv &&
1172 a->offset_within_region == b->offset_within_region &&
1173 a->offset_within_address_space == b->offset_within_address_space &&
1174 int128_eq(a->size, b->size) &&
1175 a->readonly == b->readonly &&
1176 a->nonvolatile == b->nonvolatile;
1177 }
1178
1179 /**
1180 * memory_region_section_new_copy: Copy a memory region section
1181 *
1182 * Allocate memory for a new copy, copy the memory region section, and
1183 * properly take a reference on all relevant members.
1184 *
1185 * @s: the #MemoryRegionSection to copy
1186 */
1187 MemoryRegionSection *memory_region_section_new_copy(MemoryRegionSection *s);
1188
1189 /**
1190 * memory_region_section_new_copy: Free a copied memory region section
1191 *
1192 * Free a copy of a memory section created via memory_region_section_new_copy().
1193 * properly dropping references on all relevant members.
1194 *
1195 * @s: the #MemoryRegionSection to copy
1196 */
1197 void memory_region_section_free_copy(MemoryRegionSection *s);
1198
1199 /**
1200 * memory_region_init: Initialize a memory region
1201 *
1202 * The region typically acts as a container for other memory regions. Use
1203 * memory_region_add_subregion() to add subregions.
1204 *
1205 * @mr: the #MemoryRegion to be initialized
1206 * @owner: the object that tracks the region's reference count
1207 * @name: used for debugging; not visible to the user or ABI
1208 * @size: size of the region; any subregions beyond this size will be clipped
1209 */
1210 void memory_region_init(MemoryRegion *mr,
1211 Object *owner,
1212 const char *name,
1213 uint64_t size);
1214
1215 /**
1216 * memory_region_ref: Add 1 to a memory region's reference count
1217 *
1218 * Whenever memory regions are accessed outside the BQL, they need to be
1219 * preserved against hot-unplug. MemoryRegions actually do not have their
1220 * own reference count; they piggyback on a QOM object, their "owner".
1221 * This function adds a reference to the owner.
1222 *
1223 * All MemoryRegions must have an owner if they can disappear, even if the
1224 * device they belong to operates exclusively under the BQL. This is because
1225 * the region could be returned at any time by memory_region_find, and this
1226 * is usually under guest control.
1227 *
1228 * @mr: the #MemoryRegion
1229 */
1230 void memory_region_ref(MemoryRegion *mr);
1231
1232 /**
1233 * memory_region_unref: Remove 1 to a memory region's reference count
1234 *
1235 * Whenever memory regions are accessed outside the BQL, they need to be
1236 * preserved against hot-unplug. MemoryRegions actually do not have their
1237 * own reference count; they piggyback on a QOM object, their "owner".
1238 * This function removes a reference to the owner and possibly destroys it.
1239 *
1240 * @mr: the #MemoryRegion
1241 */
1242 void memory_region_unref(MemoryRegion *mr);
1243
1244 /**
1245 * memory_region_init_io: Initialize an I/O memory region.
1246 *
1247 * Accesses into the region will cause the callbacks in @ops to be called.
1248 * if @size is nonzero, subregions will be clipped to @size.
1249 *
1250 * @mr: the #MemoryRegion to be initialized.
1251 * @owner: the object that tracks the region's reference count
1252 * @ops: a structure containing read and write callbacks to be used when
1253 * I/O is performed on the region.
1254 * @opaque: passed to the read and write callbacks of the @ops structure.
1255 * @name: used for debugging; not visible to the user or ABI
1256 * @size: size of the region.
1257 */
1258 void memory_region_init_io(MemoryRegion *mr,
1259 Object *owner,
1260 const MemoryRegionOps *ops,
1261 void *opaque,
1262 const char *name,
1263 uint64_t size);
1264
1265 /**
1266 * memory_region_init_ram_nomigrate: Initialize RAM memory region. Accesses
1267 * into the region will modify memory
1268 * directly.
1269 *
1270 * @mr: the #MemoryRegion to be initialized.
1271 * @owner: the object that tracks the region's reference count
1272 * @name: Region name, becomes part of RAMBlock name used in migration stream
1273 * must be unique within any device
1274 * @size: size of the region.
1275 * @errp: pointer to Error*, to store an error if it happens.
1276 *
1277 * Note that this function does not do anything to cause the data in the
1278 * RAM memory region to be migrated; that is the responsibility of the caller.
1279 *
1280 * Return: true on success, else false setting @errp with error.
1281 */
1282 bool memory_region_init_ram_nomigrate(MemoryRegion *mr,
1283 Object *owner,
1284 const char *name,
1285 uint64_t size,
1286 Error **errp);
1287
1288 /**
1289 * memory_region_init_ram_flags_nomigrate: Initialize RAM memory region.
1290 * Accesses into the region will
1291 * modify memory directly.
1292 *
1293 * @mr: the #MemoryRegion to be initialized.
1294 * @owner: the object that tracks the region's reference count
1295 * @name: Region name, becomes part of RAMBlock name used in migration stream
1296 * must be unique within any device
1297 * @size: size of the region.
1298 * @ram_flags: RamBlock flags. Supported flags: RAM_SHARED, RAM_NORESERVE,
1299 * RAM_GUEST_MEMFD.
1300 * @errp: pointer to Error*, to store an error if it happens.
1301 *
1302 * Note that this function does not do anything to cause the data in the
1303 * RAM memory region to be migrated; that is the responsibility of the caller.
1304 *
1305 * Return: true on success, else false setting @errp with error.
1306 */
1307 bool memory_region_init_ram_flags_nomigrate(MemoryRegion *mr,
1308 Object *owner,
1309 const char *name,
1310 uint64_t size,
1311 uint32_t ram_flags,
1312 Error **errp);
1313
1314 /**
1315 * memory_region_init_resizeable_ram: Initialize memory region with resizable
1316 * RAM. Accesses into the region will
1317 * modify memory directly. Only an initial
1318 * portion of this RAM is actually used.
1319 * Changing the size while migrating
1320 * can result in the migration being
1321 * canceled.
1322 *
1323 * @mr: the #MemoryRegion to be initialized.
1324 * @owner: the object that tracks the region's reference count
1325 * @name: Region name, becomes part of RAMBlock name used in migration stream
1326 * must be unique within any device
1327 * @size: used size of the region.
1328 * @max_size: max size of the region.
1329 * @resized: callback to notify owner about used size change.
1330 * @errp: pointer to Error*, to store an error if it happens.
1331 *
1332 * Note that this function does not do anything to cause the data in the
1333 * RAM memory region to be migrated; that is the responsibility of the caller.
1334 *
1335 * Return: true on success, else false setting @errp with error.
1336 */
1337 bool memory_region_init_resizeable_ram(MemoryRegion *mr,
1338 Object *owner,
1339 const char *name,
1340 uint64_t size,
1341 uint64_t max_size,
1342 void (*resized)(const char*,
1343 uint64_t length,
1344 void *host),
1345 Error **errp);
1346 #ifdef CONFIG_POSIX
1347
1348 /**
1349 * memory_region_init_ram_from_file: Initialize RAM memory region with a
1350 * mmap-ed backend.
1351 *
1352 * @mr: the #MemoryRegion to be initialized.
1353 * @owner: the object that tracks the region's reference count
1354 * @name: Region name, becomes part of RAMBlock name used in migration stream
1355 * must be unique within any device
1356 * @size: size of the region.
1357 * @align: alignment of the region base address; if 0, the default alignment
1358 * (getpagesize()) will be used.
1359 * @ram_flags: RamBlock flags. Supported flags: RAM_SHARED, RAM_PMEM,
1360 * RAM_NORESERVE, RAM_PROTECTED, RAM_NAMED_FILE, RAM_READONLY,
1361 * RAM_READONLY_FD, RAM_GUEST_MEMFD
1362 * @path: the path in which to allocate the RAM.
1363 * @offset: offset within the file referenced by path
1364 * @errp: pointer to Error*, to store an error if it happens.
1365 *
1366 * Note that this function does not do anything to cause the data in the
1367 * RAM memory region to be migrated; that is the responsibility of the caller.
1368 *
1369 * Return: true on success, else false setting @errp with error.
1370 */
1371 bool memory_region_init_ram_from_file(MemoryRegion *mr,
1372 Object *owner,
1373 const char *name,
1374 uint64_t size,
1375 uint64_t align,
1376 uint32_t ram_flags,
1377 const char *path,
1378 ram_addr_t offset,
1379 Error **errp);
1380
1381 /**
1382 * memory_region_init_ram_from_fd: Initialize RAM memory region with a
1383 * mmap-ed backend.
1384 *
1385 * @mr: the #MemoryRegion to be initialized.
1386 * @owner: the object that tracks the region's reference count
1387 * @name: the name of the region.
1388 * @size: size of the region.
1389 * @ram_flags: RamBlock flags. Supported flags: RAM_SHARED, RAM_PMEM,
1390 * RAM_NORESERVE, RAM_PROTECTED, RAM_NAMED_FILE, RAM_READONLY,
1391 * RAM_READONLY_FD, RAM_GUEST_MEMFD
1392 * @fd: the fd to mmap.
1393 * @offset: offset within the file referenced by fd
1394 * @errp: pointer to Error*, to store an error if it happens.
1395 *
1396 * Note that this function does not do anything to cause the data in the
1397 * RAM memory region to be migrated; that is the responsibility of the caller.
1398 *
1399 * Return: true on success, else false setting @errp with error.
1400 */
1401 bool memory_region_init_ram_from_fd(MemoryRegion *mr,
1402 Object *owner,
1403 const char *name,
1404 uint64_t size,
1405 uint32_t ram_flags,
1406 int fd,
1407 ram_addr_t offset,
1408 Error **errp);
1409 #endif
1410
1411 /**
1412 * memory_region_init_ram_ptr: Initialize RAM memory region from a
1413 * user-provided pointer. Accesses into the
1414 * region will modify memory directly.
1415 *
1416 * @mr: the #MemoryRegion to be initialized.
1417 * @owner: the object that tracks the region's reference count
1418 * @name: Region name, becomes part of RAMBlock name used in migration stream
1419 * must be unique within any device
1420 * @size: size of the region.
1421 * @ptr: memory to be mapped; must contain at least @size bytes.
1422 *
1423 * Note that this function does not do anything to cause the data in the
1424 * RAM memory region to be migrated; that is the responsibility of the caller.
1425 */
1426 void memory_region_init_ram_ptr(MemoryRegion *mr,
1427 Object *owner,
1428 const char *name,
1429 uint64_t size,
1430 void *ptr);
1431
1432 /**
1433 * memory_region_init_ram_device_ptr: Initialize RAM device memory region from
1434 * a user-provided pointer.
1435 *
1436 * A RAM device represents a mapping to a physical device, such as to a PCI
1437 * MMIO BAR of an vfio-pci assigned device. The memory region may be mapped
1438 * into the VM address space and access to the region will modify memory
1439 * directly. However, the memory region should not be included in a memory
1440 * dump (device may not be enabled/mapped at the time of the dump), and
1441 * operations incompatible with manipulating MMIO should be avoided. Replaces
1442 * skip_dump flag.
1443 *
1444 * @mr: the #MemoryRegion to be initialized.
1445 * @owner: the object that tracks the region's reference count
1446 * @name: the name of the region.
1447 * @size: size of the region.
1448 * @ptr: memory to be mapped; must contain at least @size bytes.
1449 *
1450 * Note that this function does not do anything to cause the data in the
1451 * RAM memory region to be migrated; that is the responsibility of the caller.
1452 * (For RAM device memory regions, migrating the contents rarely makes sense.)
1453 */
1454 void memory_region_init_ram_device_ptr(MemoryRegion *mr,
1455 Object *owner,
1456 const char *name,
1457 uint64_t size,
1458 void *ptr);
1459
1460 /**
1461 * memory_region_init_alias: Initialize a memory region that aliases all or a
1462 * part of another memory region.
1463 *
1464 * @mr: the #MemoryRegion to be initialized.
1465 * @owner: the object that tracks the region's reference count
1466 * @name: used for debugging; not visible to the user or ABI
1467 * @orig: the region to be referenced; @mr will be equivalent to
1468 * @orig between @offset and @offset + @size - 1.
1469 * @offset: start of the section in @orig to be referenced.
1470 * @size: size of the region.
1471 */
1472 void memory_region_init_alias(MemoryRegion *mr,
1473 Object *owner,
1474 const char *name,
1475 MemoryRegion *orig,
1476 hwaddr offset,
1477 uint64_t size);
1478
1479 /**
1480 * memory_region_init_rom_nomigrate: Initialize a ROM memory region.
1481 *
1482 * This has the same effect as calling memory_region_init_ram_nomigrate()
1483 * and then marking the resulting region read-only with
1484 * memory_region_set_readonly().
1485 *
1486 * Note that this function does not do anything to cause the data in the
1487 * RAM side of the memory region to be migrated; that is the responsibility
1488 * of the caller.
1489 *
1490 * @mr: the #MemoryRegion to be initialized.
1491 * @owner: the object that tracks the region's reference count
1492 * @name: Region name, becomes part of RAMBlock name used in migration stream
1493 * must be unique within any device
1494 * @size: size of the region.
1495 * @errp: pointer to Error*, to store an error if it happens.
1496 *
1497 * Return: true on success, else false setting @errp with error.
1498 */
1499 bool memory_region_init_rom_nomigrate(MemoryRegion *mr,
1500 Object *owner,
1501 const char *name,
1502 uint64_t size,
1503 Error **errp);
1504
1505 /**
1506 * memory_region_init_rom_device_nomigrate: Initialize a ROM memory region.
1507 * Writes are handled via callbacks.
1508 *
1509 * Note that this function does not do anything to cause the data in the
1510 * RAM side of the memory region to be migrated; that is the responsibility
1511 * of the caller.
1512 *
1513 * @mr: the #MemoryRegion to be initialized.
1514 * @owner: the object that tracks the region's reference count
1515 * @ops: callbacks for write access handling (must not be NULL).
1516 * @opaque: passed to the read and write callbacks of the @ops structure.
1517 * @name: Region name, becomes part of RAMBlock name used in migration stream
1518 * must be unique within any device
1519 * @size: size of the region.
1520 * @errp: pointer to Error*, to store an error if it happens.
1521 *
1522 * Return: true on success, else false setting @errp with error.
1523 */
1524 bool memory_region_init_rom_device_nomigrate(MemoryRegion *mr,
1525 Object *owner,
1526 const MemoryRegionOps *ops,
1527 void *opaque,
1528 const char *name,
1529 uint64_t size,
1530 Error **errp);
1531
1532 /**
1533 * memory_region_init_iommu: Initialize a memory region of a custom type
1534 * that translates addresses
1535 *
1536 * An IOMMU region translates addresses and forwards accesses to a target
1537 * memory region.
1538 *
1539 * The IOMMU implementation must define a subclass of TYPE_IOMMU_MEMORY_REGION.
1540 * @_iommu_mr should be a pointer to enough memory for an instance of
1541 * that subclass, @instance_size is the size of that subclass, and
1542 * @mrtypename is its name. This function will initialize @_iommu_mr as an
1543 * instance of the subclass, and its methods will then be called to handle
1544 * accesses to the memory region. See the documentation of
1545 * #IOMMUMemoryRegionClass for further details.
1546 *
1547 * @_iommu_mr: the #IOMMUMemoryRegion to be initialized
1548 * @instance_size: the IOMMUMemoryRegion subclass instance size
1549 * @mrtypename: the type name of the #IOMMUMemoryRegion
1550 * @owner: the object that tracks the region's reference count
1551 * @name: used for debugging; not visible to the user or ABI
1552 * @size: size of the region.
1553 */
1554 void memory_region_init_iommu(void *_iommu_mr,
1555 size_t instance_size,
1556 const char *mrtypename,
1557 Object *owner,
1558 const char *name,
1559 uint64_t size);
1560
1561 /**
1562 * memory_region_init_ram - Initialize RAM memory region. Accesses into the
1563 * region will modify memory directly.
1564 *
1565 * @mr: the #MemoryRegion to be initialized
1566 * @owner: the object that tracks the region's reference count (must be
1567 * TYPE_DEVICE or a subclass of TYPE_DEVICE, or NULL)
1568 * @name: name of the memory region
1569 * @size: size of the region in bytes
1570 * @errp: pointer to Error*, to store an error if it happens.
1571 *
1572 * This function allocates RAM for a board model or device, and
1573 * arranges for it to be migrated (by calling vmstate_register_ram()
1574 * if @owner is a DeviceState, or vmstate_register_ram_global() if
1575 * @owner is NULL).
1576 *
1577 * TODO: Currently we restrict @owner to being either NULL (for
1578 * global RAM regions with no owner) or devices, so that we can
1579 * give the RAM block a unique name for migration purposes.
1580 * We should lift this restriction and allow arbitrary Objects.
1581 * If you pass a non-NULL non-device @owner then we will assert.
1582 *
1583 * Return: true on success, else false setting @errp with error.
1584 */
1585 bool memory_region_init_ram(MemoryRegion *mr,
1586 Object *owner,
1587 const char *name,
1588 uint64_t size,
1589 Error **errp);
1590
1591 bool memory_region_init_ram_guest_memfd(MemoryRegion *mr,
1592 Object *owner,
1593 const char *name,
1594 uint64_t size,
1595 Error **errp);
1596
1597 /**
1598 * memory_region_init_rom: Initialize a ROM memory region.
1599 *
1600 * This has the same effect as calling memory_region_init_ram()
1601 * and then marking the resulting region read-only with
1602 * memory_region_set_readonly(). This includes arranging for the
1603 * contents to be migrated.
1604 *
1605 * TODO: Currently we restrict @owner to being either NULL (for
1606 * global RAM regions with no owner) or devices, so that we can
1607 * give the RAM block a unique name for migration purposes.
1608 * We should lift this restriction and allow arbitrary Objects.
1609 * If you pass a non-NULL non-device @owner then we will assert.
1610 *
1611 * @mr: the #MemoryRegion to be initialized.
1612 * @owner: the object that tracks the region's reference count
1613 * @name: Region name, becomes part of RAMBlock name used in migration stream
1614 * must be unique within any device
1615 * @size: size of the region.
1616 * @errp: pointer to Error*, to store an error if it happens.
1617 *
1618 * Return: true on success, else false setting @errp with error.
1619 */
1620 bool memory_region_init_rom(MemoryRegion *mr,
1621 Object *owner,
1622 const char *name,
1623 uint64_t size,
1624 Error **errp);
1625
1626 /**
1627 * memory_region_init_rom_device: Initialize a ROM memory region.
1628 * Writes are handled via callbacks.
1629 *
1630 * This function initializes a memory region backed by RAM for reads
1631 * and callbacks for writes, and arranges for the RAM backing to
1632 * be migrated (by calling vmstate_register_ram()
1633 * if @owner is a DeviceState, or vmstate_register_ram_global() if
1634 * @owner is NULL).
1635 *
1636 * TODO: Currently we restrict @owner to being either NULL (for
1637 * global RAM regions with no owner) or devices, so that we can
1638 * give the RAM block a unique name for migration purposes.
1639 * We should lift this restriction and allow arbitrary Objects.
1640 * If you pass a non-NULL non-device @owner then we will assert.
1641 *
1642 * @mr: the #MemoryRegion to be initialized.
1643 * @owner: the object that tracks the region's reference count
1644 * @ops: callbacks for write access handling (must not be NULL).
1645 * @opaque: passed to the read and write callbacks of the @ops structure.
1646 * @name: Region name, becomes part of RAMBlock name used in migration stream
1647 * must be unique within any device
1648 * @size: size of the region.
1649 * @errp: pointer to Error*, to store an error if it happens.
1650 *
1651 * Return: true on success, else false setting @errp with error.
1652 */
1653 bool memory_region_init_rom_device(MemoryRegion *mr,
1654 Object *owner,
1655 const MemoryRegionOps *ops,
1656 void *opaque,
1657 const char *name,
1658 uint64_t size,
1659 Error **errp);
1660
1661
1662 /**
1663 * memory_region_owner: get a memory region's owner.
1664 *
1665 * @mr: the memory region being queried.
1666 */
1667 Object *memory_region_owner(MemoryRegion *mr);
1668
1669 /**
1670 * memory_region_size: get a memory region's size.
1671 *
1672 * @mr: the memory region being queried.
1673 */
1674 uint64_t memory_region_size(MemoryRegion *mr);
1675
1676 /**
1677 * memory_region_is_ram: check whether a memory region is random access
1678 *
1679 * Returns %true if a memory region is random access.
1680 *
1681 * @mr: the memory region being queried
1682 */
memory_region_is_ram(MemoryRegion * mr)1683 static inline bool memory_region_is_ram(MemoryRegion *mr)
1684 {
1685 return mr->ram;
1686 }
1687
1688 /**
1689 * memory_region_is_ram_device: check whether a memory region is a ram device
1690 *
1691 * Returns %true if a memory region is a device backed ram region
1692 *
1693 * @mr: the memory region being queried
1694 */
1695 bool memory_region_is_ram_device(MemoryRegion *mr);
1696
1697 /**
1698 * memory_region_is_romd: check whether a memory region is in ROMD mode
1699 *
1700 * Returns %true if a memory region is a ROM device and currently set to allow
1701 * direct reads.
1702 *
1703 * @mr: the memory region being queried
1704 */
memory_region_is_romd(MemoryRegion * mr)1705 static inline bool memory_region_is_romd(MemoryRegion *mr)
1706 {
1707 return mr->rom_device && mr->romd_mode;
1708 }
1709
1710 /**
1711 * memory_region_is_protected: check whether a memory region is protected
1712 *
1713 * Returns %true if a memory region is protected RAM and cannot be accessed
1714 * via standard mechanisms, e.g. DMA.
1715 *
1716 * @mr: the memory region being queried
1717 */
1718 bool memory_region_is_protected(MemoryRegion *mr);
1719
1720 /**
1721 * memory_region_has_guest_memfd: check whether a memory region has guest_memfd
1722 * associated
1723 *
1724 * Returns %true if a memory region's ram_block has valid guest_memfd assigned.
1725 *
1726 * @mr: the memory region being queried
1727 */
1728 bool memory_region_has_guest_memfd(MemoryRegion *mr);
1729
1730 /**
1731 * memory_region_get_iommu: check whether a memory region is an iommu
1732 *
1733 * Returns pointer to IOMMUMemoryRegion if a memory region is an iommu,
1734 * otherwise NULL.
1735 *
1736 * @mr: the memory region being queried
1737 */
memory_region_get_iommu(MemoryRegion * mr)1738 static inline IOMMUMemoryRegion *memory_region_get_iommu(MemoryRegion *mr)
1739 {
1740 if (mr->alias) {
1741 return memory_region_get_iommu(mr->alias);
1742 }
1743 if (mr->is_iommu) {
1744 return (IOMMUMemoryRegion *) mr;
1745 }
1746 return NULL;
1747 }
1748
1749 /**
1750 * memory_region_get_iommu_class_nocheck: returns iommu memory region class
1751 * if an iommu or NULL if not
1752 *
1753 * Returns pointer to IOMMUMemoryRegionClass if a memory region is an iommu,
1754 * otherwise NULL. This is fast path avoiding QOM checking, use with caution.
1755 *
1756 * @iommu_mr: the memory region being queried
1757 */
memory_region_get_iommu_class_nocheck(IOMMUMemoryRegion * iommu_mr)1758 static inline IOMMUMemoryRegionClass *memory_region_get_iommu_class_nocheck(
1759 IOMMUMemoryRegion *iommu_mr)
1760 {
1761 return (IOMMUMemoryRegionClass *) (((Object *)iommu_mr)->class);
1762 }
1763
1764 #define memory_region_is_iommu(mr) (memory_region_get_iommu(mr) != NULL)
1765
1766 /**
1767 * memory_region_iommu_get_min_page_size: get minimum supported page size
1768 * for an iommu
1769 *
1770 * Returns minimum supported page size for an iommu.
1771 *
1772 * @iommu_mr: the memory region being queried
1773 */
1774 uint64_t memory_region_iommu_get_min_page_size(IOMMUMemoryRegion *iommu_mr);
1775
1776 /**
1777 * memory_region_notify_iommu: notify a change in an IOMMU translation entry.
1778 *
1779 * Note: for any IOMMU implementation, an in-place mapping change
1780 * should be notified with an UNMAP followed by a MAP.
1781 *
1782 * @iommu_mr: the memory region that was changed
1783 * @iommu_idx: the IOMMU index for the translation table which has changed
1784 * @event: TLB event with the new entry in the IOMMU translation table.
1785 * The entry replaces all old entries for the same virtual I/O address
1786 * range.
1787 */
1788 void memory_region_notify_iommu(IOMMUMemoryRegion *iommu_mr,
1789 int iommu_idx,
1790 const IOMMUTLBEvent event);
1791
1792 /**
1793 * memory_region_notify_iommu_one: notify a change in an IOMMU translation
1794 * entry to a single notifier
1795 *
1796 * This works just like memory_region_notify_iommu(), but it only
1797 * notifies a specific notifier, not all of them.
1798 *
1799 * @notifier: the notifier to be notified
1800 * @event: TLB event with the new entry in the IOMMU translation table.
1801 * The entry replaces all old entries for the same virtual I/O address
1802 * range.
1803 */
1804 void memory_region_notify_iommu_one(IOMMUNotifier *notifier,
1805 const IOMMUTLBEvent *event);
1806
1807 /**
1808 * memory_region_unmap_iommu_notifier_range: notify a unmap for an IOMMU
1809 * translation that covers the
1810 * range of a notifier
1811 *
1812 * @notifier: the notifier to be notified
1813 */
1814 void memory_region_unmap_iommu_notifier_range(IOMMUNotifier *notifier);
1815
1816
1817 /**
1818 * memory_region_register_iommu_notifier: register a notifier for changes to
1819 * IOMMU translation entries.
1820 *
1821 * Returns 0 on success, or a negative errno otherwise. In particular,
1822 * -EINVAL indicates that at least one of the attributes of the notifier
1823 * is not supported (flag/range) by the IOMMU memory region. In case of error
1824 * the error object must be created.
1825 *
1826 * @mr: the memory region to observe
1827 * @n: the IOMMUNotifier to be added; the notify callback receives a
1828 * pointer to an #IOMMUTLBEntry as the opaque value; the pointer
1829 * ceases to be valid on exit from the notifier.
1830 * @errp: pointer to Error*, to store an error if it happens.
1831 */
1832 int memory_region_register_iommu_notifier(MemoryRegion *mr,
1833 IOMMUNotifier *n, Error **errp);
1834
1835 /**
1836 * memory_region_iommu_replay: replay existing IOMMU translations to
1837 * a notifier with the minimum page granularity returned by
1838 * mr->iommu_ops->get_page_size().
1839 *
1840 * Note: this is not related to record-and-replay functionality.
1841 *
1842 * @iommu_mr: the memory region to observe
1843 * @n: the notifier to which to replay iommu mappings
1844 */
1845 void memory_region_iommu_replay(IOMMUMemoryRegion *iommu_mr, IOMMUNotifier *n);
1846
1847 /**
1848 * memory_region_unregister_iommu_notifier: unregister a notifier for
1849 * changes to IOMMU translation entries.
1850 *
1851 * @mr: the memory region which was observed and for which notify_stopped()
1852 * needs to be called
1853 * @n: the notifier to be removed.
1854 */
1855 void memory_region_unregister_iommu_notifier(MemoryRegion *mr,
1856 IOMMUNotifier *n);
1857
1858 /**
1859 * memory_region_iommu_get_attr: return an IOMMU attr if get_attr() is
1860 * defined on the IOMMU.
1861 *
1862 * Returns 0 on success, or a negative errno otherwise. In particular,
1863 * -EINVAL indicates that the IOMMU does not support the requested
1864 * attribute.
1865 *
1866 * @iommu_mr: the memory region
1867 * @attr: the requested attribute
1868 * @data: a pointer to the requested attribute data
1869 */
1870 int memory_region_iommu_get_attr(IOMMUMemoryRegion *iommu_mr,
1871 enum IOMMUMemoryRegionAttr attr,
1872 void *data);
1873
1874 /**
1875 * memory_region_iommu_attrs_to_index: return the IOMMU index to
1876 * use for translations with the given memory transaction attributes.
1877 *
1878 * @iommu_mr: the memory region
1879 * @attrs: the memory transaction attributes
1880 */
1881 int memory_region_iommu_attrs_to_index(IOMMUMemoryRegion *iommu_mr,
1882 MemTxAttrs attrs);
1883
1884 /**
1885 * memory_region_iommu_num_indexes: return the total number of IOMMU
1886 * indexes that this IOMMU supports.
1887 *
1888 * @iommu_mr: the memory region
1889 */
1890 int memory_region_iommu_num_indexes(IOMMUMemoryRegion *iommu_mr);
1891
1892 /**
1893 * memory_region_name: get a memory region's name
1894 *
1895 * Returns the string that was used to initialize the memory region.
1896 *
1897 * @mr: the memory region being queried
1898 */
1899 const char *memory_region_name(const MemoryRegion *mr);
1900
1901 /**
1902 * memory_region_is_logging: return whether a memory region is logging writes
1903 *
1904 * Returns %true if the memory region is logging writes for the given client
1905 *
1906 * @mr: the memory region being queried
1907 * @client: the client being queried
1908 */
1909 bool memory_region_is_logging(MemoryRegion *mr, uint8_t client);
1910
1911 /**
1912 * memory_region_get_dirty_log_mask: return the clients for which a
1913 * memory region is logging writes.
1914 *
1915 * Returns a bitmap of clients, in which the DIRTY_MEMORY_* constants
1916 * are the bit indices.
1917 *
1918 * @mr: the memory region being queried
1919 */
1920 uint8_t memory_region_get_dirty_log_mask(MemoryRegion *mr);
1921
1922 /**
1923 * memory_region_is_rom: check whether a memory region is ROM
1924 *
1925 * Returns %true if a memory region is read-only memory.
1926 *
1927 * @mr: the memory region being queried
1928 */
memory_region_is_rom(MemoryRegion * mr)1929 static inline bool memory_region_is_rom(MemoryRegion *mr)
1930 {
1931 return mr->ram && mr->readonly;
1932 }
1933
1934 /**
1935 * memory_region_is_nonvolatile: check whether a memory region is non-volatile
1936 *
1937 * Returns %true is a memory region is non-volatile memory.
1938 *
1939 * @mr: the memory region being queried
1940 */
memory_region_is_nonvolatile(MemoryRegion * mr)1941 static inline bool memory_region_is_nonvolatile(MemoryRegion *mr)
1942 {
1943 return mr->nonvolatile;
1944 }
1945
1946 /**
1947 * memory_region_get_fd: Get a file descriptor backing a RAM memory region.
1948 *
1949 * Returns a file descriptor backing a file-based RAM memory region,
1950 * or -1 if the region is not a file-based RAM memory region.
1951 *
1952 * @mr: the RAM or alias memory region being queried.
1953 */
1954 int memory_region_get_fd(MemoryRegion *mr);
1955
1956 /**
1957 * memory_region_from_host: Convert a pointer into a RAM memory region
1958 * and an offset within it.
1959 *
1960 * Given a host pointer inside a RAM memory region (created with
1961 * memory_region_init_ram() or memory_region_init_ram_ptr()), return
1962 * the MemoryRegion and the offset within it.
1963 *
1964 * Use with care; by the time this function returns, the returned pointer is
1965 * not protected by RCU anymore. If the caller is not within an RCU critical
1966 * section and does not hold the BQL, it must have other means of
1967 * protecting the pointer, such as a reference to the region that includes
1968 * the incoming ram_addr_t.
1969 *
1970 * @ptr: the host pointer to be converted
1971 * @offset: the offset within memory region
1972 */
1973 MemoryRegion *memory_region_from_host(void *ptr, ram_addr_t *offset);
1974
1975 /**
1976 * memory_region_get_ram_ptr: Get a pointer into a RAM memory region.
1977 *
1978 * Returns a host pointer to a RAM memory region (created with
1979 * memory_region_init_ram() or memory_region_init_ram_ptr()).
1980 *
1981 * Use with care; by the time this function returns, the returned pointer is
1982 * not protected by RCU anymore. If the caller is not within an RCU critical
1983 * section and does not hold the BQL, it must have other means of
1984 * protecting the pointer, such as a reference to the region that includes
1985 * the incoming ram_addr_t.
1986 *
1987 * @mr: the memory region being queried.
1988 */
1989 void *memory_region_get_ram_ptr(MemoryRegion *mr);
1990
1991 /* memory_region_ram_resize: Resize a RAM region.
1992 *
1993 * Resizing RAM while migrating can result in the migration being canceled.
1994 * Care has to be taken if the guest might have already detected the memory.
1995 *
1996 * @mr: a memory region created with @memory_region_init_resizeable_ram.
1997 * @newsize: the new size the region
1998 * @errp: pointer to Error*, to store an error if it happens.
1999 */
2000 void memory_region_ram_resize(MemoryRegion *mr, ram_addr_t newsize,
2001 Error **errp);
2002
2003 /**
2004 * memory_region_msync: Synchronize selected address range of
2005 * a memory mapped region
2006 *
2007 * @mr: the memory region to be msync
2008 * @addr: the initial address of the range to be sync
2009 * @size: the size of the range to be sync
2010 */
2011 void memory_region_msync(MemoryRegion *mr, hwaddr addr, hwaddr size);
2012
2013 /**
2014 * memory_region_writeback: Trigger cache writeback for
2015 * selected address range
2016 *
2017 * @mr: the memory region to be updated
2018 * @addr: the initial address of the range to be written back
2019 * @size: the size of the range to be written back
2020 */
2021 void memory_region_writeback(MemoryRegion *mr, hwaddr addr, hwaddr size);
2022
2023 /**
2024 * memory_region_set_log: Turn dirty logging on or off for a region.
2025 *
2026 * Turns dirty logging on or off for a specified client (display, migration).
2027 * Only meaningful for RAM regions.
2028 *
2029 * @mr: the memory region being updated.
2030 * @log: whether dirty logging is to be enabled or disabled.
2031 * @client: the user of the logging information; %DIRTY_MEMORY_VGA only.
2032 */
2033 void memory_region_set_log(MemoryRegion *mr, bool log, unsigned client);
2034
2035 /**
2036 * memory_region_set_dirty: Mark a range of bytes as dirty in a memory region.
2037 *
2038 * Marks a range of bytes as dirty, after it has been dirtied outside
2039 * guest code.
2040 *
2041 * @mr: the memory region being dirtied.
2042 * @addr: the address (relative to the start of the region) being dirtied.
2043 * @size: size of the range being dirtied.
2044 */
2045 void memory_region_set_dirty(MemoryRegion *mr, hwaddr addr,
2046 hwaddr size);
2047
2048 /**
2049 * memory_region_clear_dirty_bitmap - clear dirty bitmap for memory range
2050 *
2051 * This function is called when the caller wants to clear the remote
2052 * dirty bitmap of a memory range within the memory region. This can
2053 * be used by e.g. KVM to manually clear dirty log when
2054 * KVM_CAP_MANUAL_DIRTY_LOG_PROTECT is declared support by the host
2055 * kernel.
2056 *
2057 * @mr: the memory region to clear the dirty log upon
2058 * @start: start address offset within the memory region
2059 * @len: length of the memory region to clear dirty bitmap
2060 */
2061 void memory_region_clear_dirty_bitmap(MemoryRegion *mr, hwaddr start,
2062 hwaddr len);
2063
2064 /**
2065 * memory_region_snapshot_and_clear_dirty: Get a snapshot of the dirty
2066 * bitmap and clear it.
2067 *
2068 * Creates a snapshot of the dirty bitmap, clears the dirty bitmap and
2069 * returns the snapshot. The snapshot can then be used to query dirty
2070 * status, using memory_region_snapshot_get_dirty. Snapshotting allows
2071 * querying the same page multiple times, which is especially useful for
2072 * display updates where the scanlines often are not page aligned.
2073 *
2074 * The dirty bitmap region which gets copied into the snapshot (and
2075 * cleared afterwards) can be larger than requested. The boundaries
2076 * are rounded up/down so complete bitmap longs (covering 64 pages on
2077 * 64bit hosts) can be copied over into the bitmap snapshot. Which
2078 * isn't a problem for display updates as the extra pages are outside
2079 * the visible area, and in case the visible area changes a full
2080 * display redraw is due anyway. Should other use cases for this
2081 * function emerge we might have to revisit this implementation
2082 * detail.
2083 *
2084 * Use g_free to release DirtyBitmapSnapshot.
2085 *
2086 * @mr: the memory region being queried.
2087 * @addr: the address (relative to the start of the region) being queried.
2088 * @size: the size of the range being queried.
2089 * @client: the user of the logging information; typically %DIRTY_MEMORY_VGA.
2090 */
2091 DirtyBitmapSnapshot *memory_region_snapshot_and_clear_dirty(MemoryRegion *mr,
2092 hwaddr addr,
2093 hwaddr size,
2094 unsigned client);
2095
2096 /**
2097 * memory_region_snapshot_get_dirty: Check whether a range of bytes is dirty
2098 * in the specified dirty bitmap snapshot.
2099 *
2100 * @mr: the memory region being queried.
2101 * @snap: the dirty bitmap snapshot
2102 * @addr: the address (relative to the start of the region) being queried.
2103 * @size: the size of the range being queried.
2104 */
2105 bool memory_region_snapshot_get_dirty(MemoryRegion *mr,
2106 DirtyBitmapSnapshot *snap,
2107 hwaddr addr, hwaddr size);
2108
2109 /**
2110 * memory_region_reset_dirty: Mark a range of pages as clean, for a specified
2111 * client.
2112 *
2113 * Marks a range of pages as no longer dirty.
2114 *
2115 * @mr: the region being updated.
2116 * @addr: the start of the subrange being cleaned.
2117 * @size: the size of the subrange being cleaned.
2118 * @client: the user of the logging information; %DIRTY_MEMORY_MIGRATION or
2119 * %DIRTY_MEMORY_VGA.
2120 */
2121 void memory_region_reset_dirty(MemoryRegion *mr, hwaddr addr,
2122 hwaddr size, unsigned client);
2123
2124 /**
2125 * memory_region_flush_rom_device: Mark a range of pages dirty and invalidate
2126 * TBs (for self-modifying code).
2127 *
2128 * The MemoryRegionOps->write() callback of a ROM device must use this function
2129 * to mark byte ranges that have been modified internally, such as by directly
2130 * accessing the memory returned by memory_region_get_ram_ptr().
2131 *
2132 * This function marks the range dirty and invalidates TBs so that TCG can
2133 * detect self-modifying code.
2134 *
2135 * @mr: the region being flushed.
2136 * @addr: the start, relative to the start of the region, of the range being
2137 * flushed.
2138 * @size: the size, in bytes, of the range being flushed.
2139 */
2140 void memory_region_flush_rom_device(MemoryRegion *mr, hwaddr addr, hwaddr size);
2141
2142 /**
2143 * memory_region_set_readonly: Turn a memory region read-only (or read-write)
2144 *
2145 * Allows a memory region to be marked as read-only (turning it into a ROM).
2146 * only useful on RAM regions.
2147 *
2148 * @mr: the region being updated.
2149 * @readonly: whether rhe region is to be ROM or RAM.
2150 */
2151 void memory_region_set_readonly(MemoryRegion *mr, bool readonly);
2152
2153 /**
2154 * memory_region_set_nonvolatile: Turn a memory region non-volatile
2155 *
2156 * Allows a memory region to be marked as non-volatile.
2157 * only useful on RAM regions.
2158 *
2159 * @mr: the region being updated.
2160 * @nonvolatile: whether rhe region is to be non-volatile.
2161 */
2162 void memory_region_set_nonvolatile(MemoryRegion *mr, bool nonvolatile);
2163
2164 /**
2165 * memory_region_rom_device_set_romd: enable/disable ROMD mode
2166 *
2167 * Allows a ROM device (initialized with memory_region_init_rom_device() to
2168 * set to ROMD mode (default) or MMIO mode. When it is in ROMD mode, the
2169 * device is mapped to guest memory and satisfies read access directly.
2170 * When in MMIO mode, reads are forwarded to the #MemoryRegion.read function.
2171 * Writes are always handled by the #MemoryRegion.write function.
2172 *
2173 * @mr: the memory region to be updated
2174 * @romd_mode: %true to put the region into ROMD mode
2175 */
2176 void memory_region_rom_device_set_romd(MemoryRegion *mr, bool romd_mode);
2177
2178 /**
2179 * memory_region_set_coalescing: Enable memory coalescing for the region.
2180 *
2181 * Enabled writes to a region to be queued for later processing. MMIO ->write
2182 * callbacks may be delayed until a non-coalesced MMIO is issued.
2183 * Only useful for IO regions. Roughly similar to write-combining hardware.
2184 *
2185 * @mr: the memory region to be write coalesced
2186 */
2187 void memory_region_set_coalescing(MemoryRegion *mr);
2188
2189 /**
2190 * memory_region_add_coalescing: Enable memory coalescing for a sub-range of
2191 * a region.
2192 *
2193 * Like memory_region_set_coalescing(), but works on a sub-range of a region.
2194 * Multiple calls can be issued coalesced disjoint ranges.
2195 *
2196 * @mr: the memory region to be updated.
2197 * @offset: the start of the range within the region to be coalesced.
2198 * @size: the size of the subrange to be coalesced.
2199 */
2200 void memory_region_add_coalescing(MemoryRegion *mr,
2201 hwaddr offset,
2202 uint64_t size);
2203
2204 /**
2205 * memory_region_clear_coalescing: Disable MMIO coalescing for the region.
2206 *
2207 * Disables any coalescing caused by memory_region_set_coalescing() or
2208 * memory_region_add_coalescing(). Roughly equivalent to uncacheble memory
2209 * hardware.
2210 *
2211 * @mr: the memory region to be updated.
2212 */
2213 void memory_region_clear_coalescing(MemoryRegion *mr);
2214
2215 /**
2216 * memory_region_set_flush_coalesced: Enforce memory coalescing flush before
2217 * accesses.
2218 *
2219 * Ensure that pending coalesced MMIO request are flushed before the memory
2220 * region is accessed. This property is automatically enabled for all regions
2221 * passed to memory_region_set_coalescing() and memory_region_add_coalescing().
2222 *
2223 * @mr: the memory region to be updated.
2224 */
2225 void memory_region_set_flush_coalesced(MemoryRegion *mr);
2226
2227 /**
2228 * memory_region_clear_flush_coalesced: Disable memory coalescing flush before
2229 * accesses.
2230 *
2231 * Clear the automatic coalesced MMIO flushing enabled via
2232 * memory_region_set_flush_coalesced. Note that this service has no effect on
2233 * memory regions that have MMIO coalescing enabled for themselves. For them,
2234 * automatic flushing will stop once coalescing is disabled.
2235 *
2236 * @mr: the memory region to be updated.
2237 */
2238 void memory_region_clear_flush_coalesced(MemoryRegion *mr);
2239
2240 /**
2241 * memory_region_add_eventfd: Request an eventfd to be triggered when a word
2242 * is written to a location.
2243 *
2244 * Marks a word in an IO region (initialized with memory_region_init_io())
2245 * as a trigger for an eventfd event. The I/O callback will not be called.
2246 * The caller must be prepared to handle failure (that is, take the required
2247 * action if the callback _is_ called).
2248 *
2249 * @mr: the memory region being updated.
2250 * @addr: the address within @mr that is to be monitored
2251 * @size: the size of the access to trigger the eventfd
2252 * @match_data: whether to match against @data, instead of just @addr
2253 * @data: the data to match against the guest write
2254 * @e: event notifier to be triggered when @addr, @size, and @data all match.
2255 **/
2256 void memory_region_add_eventfd(MemoryRegion *mr,
2257 hwaddr addr,
2258 unsigned size,
2259 bool match_data,
2260 uint64_t data,
2261 EventNotifier *e);
2262
2263 /**
2264 * memory_region_del_eventfd: Cancel an eventfd.
2265 *
2266 * Cancels an eventfd trigger requested by a previous
2267 * memory_region_add_eventfd() call.
2268 *
2269 * @mr: the memory region being updated.
2270 * @addr: the address within @mr that is to be monitored
2271 * @size: the size of the access to trigger the eventfd
2272 * @match_data: whether to match against @data, instead of just @addr
2273 * @data: the data to match against the guest write
2274 * @e: event notifier to be triggered when @addr, @size, and @data all match.
2275 */
2276 void memory_region_del_eventfd(MemoryRegion *mr,
2277 hwaddr addr,
2278 unsigned size,
2279 bool match_data,
2280 uint64_t data,
2281 EventNotifier *e);
2282
2283 /**
2284 * memory_region_add_subregion: Add a subregion to a container.
2285 *
2286 * Adds a subregion at @offset. The subregion may not overlap with other
2287 * subregions (except for those explicitly marked as overlapping). A region
2288 * may only be added once as a subregion (unless removed with
2289 * memory_region_del_subregion()); use memory_region_init_alias() if you
2290 * want a region to be a subregion in multiple locations.
2291 *
2292 * @mr: the region to contain the new subregion; must be a container
2293 * initialized with memory_region_init().
2294 * @offset: the offset relative to @mr where @subregion is added.
2295 * @subregion: the subregion to be added.
2296 */
2297 void memory_region_add_subregion(MemoryRegion *mr,
2298 hwaddr offset,
2299 MemoryRegion *subregion);
2300 /**
2301 * memory_region_add_subregion_overlap: Add a subregion to a container
2302 * with overlap.
2303 *
2304 * Adds a subregion at @offset. The subregion may overlap with other
2305 * subregions. Conflicts are resolved by having a higher @priority hide a
2306 * lower @priority. Subregions without priority are taken as @priority 0.
2307 * A region may only be added once as a subregion (unless removed with
2308 * memory_region_del_subregion()); use memory_region_init_alias() if you
2309 * want a region to be a subregion in multiple locations.
2310 *
2311 * @mr: the region to contain the new subregion; must be a container
2312 * initialized with memory_region_init().
2313 * @offset: the offset relative to @mr where @subregion is added.
2314 * @subregion: the subregion to be added.
2315 * @priority: used for resolving overlaps; highest priority wins.
2316 */
2317 void memory_region_add_subregion_overlap(MemoryRegion *mr,
2318 hwaddr offset,
2319 MemoryRegion *subregion,
2320 int priority);
2321
2322 /**
2323 * memory_region_get_ram_addr: Get the ram address associated with a memory
2324 * region
2325 *
2326 * @mr: the region to be queried
2327 */
2328 ram_addr_t memory_region_get_ram_addr(MemoryRegion *mr);
2329
2330 uint64_t memory_region_get_alignment(const MemoryRegion *mr);
2331 /**
2332 * memory_region_del_subregion: Remove a subregion.
2333 *
2334 * Removes a subregion from its container.
2335 *
2336 * @mr: the container to be updated.
2337 * @subregion: the region being removed; must be a current subregion of @mr.
2338 */
2339 void memory_region_del_subregion(MemoryRegion *mr,
2340 MemoryRegion *subregion);
2341
2342 /*
2343 * memory_region_set_enabled: dynamically enable or disable a region
2344 *
2345 * Enables or disables a memory region. A disabled memory region
2346 * ignores all accesses to itself and its subregions. It does not
2347 * obscure sibling subregions with lower priority - it simply behaves as
2348 * if it was removed from the hierarchy.
2349 *
2350 * Regions default to being enabled.
2351 *
2352 * @mr: the region to be updated
2353 * @enabled: whether to enable or disable the region
2354 */
2355 void memory_region_set_enabled(MemoryRegion *mr, bool enabled);
2356
2357 /*
2358 * memory_region_set_address: dynamically update the address of a region
2359 *
2360 * Dynamically updates the address of a region, relative to its container.
2361 * May be used on regions are currently part of a memory hierarchy.
2362 *
2363 * @mr: the region to be updated
2364 * @addr: new address, relative to container region
2365 */
2366 void memory_region_set_address(MemoryRegion *mr, hwaddr addr);
2367
2368 /*
2369 * memory_region_set_size: dynamically update the size of a region.
2370 *
2371 * Dynamically updates the size of a region.
2372 *
2373 * @mr: the region to be updated
2374 * @size: used size of the region.
2375 */
2376 void memory_region_set_size(MemoryRegion *mr, uint64_t size);
2377
2378 /*
2379 * memory_region_set_alias_offset: dynamically update a memory alias's offset
2380 *
2381 * Dynamically updates the offset into the target region that an alias points
2382 * to, as if the fourth argument to memory_region_init_alias() has changed.
2383 *
2384 * @mr: the #MemoryRegion to be updated; should be an alias.
2385 * @offset: the new offset into the target memory region
2386 */
2387 void memory_region_set_alias_offset(MemoryRegion *mr,
2388 hwaddr offset);
2389
2390 /*
2391 * memory_region_set_unmergeable: Set a memory region unmergeable
2392 *
2393 * Mark a memory region unmergeable, resulting in the memory region (or
2394 * everything contained in a memory region container) not getting merged when
2395 * simplifying the address space and notifying memory listeners. Consequently,
2396 * memory listeners will never get notified about ranges that are larger than
2397 * the original memory regions.
2398 *
2399 * This is primarily useful when multiple aliases to a RAM memory region are
2400 * mapped into a memory region container, and updates (e.g., enable/disable or
2401 * map/unmap) of individual memory region aliases are not supposed to affect
2402 * other memory regions in the same container.
2403 *
2404 * @mr: the #MemoryRegion to be updated
2405 * @unmergeable: whether to mark the #MemoryRegion unmergeable
2406 */
2407 void memory_region_set_unmergeable(MemoryRegion *mr, bool unmergeable);
2408
2409 /**
2410 * memory_region_present: checks if an address relative to a @container
2411 * translates into #MemoryRegion within @container
2412 *
2413 * Answer whether a #MemoryRegion within @container covers the address
2414 * @addr.
2415 *
2416 * @container: a #MemoryRegion within which @addr is a relative address
2417 * @addr: the area within @container to be searched
2418 */
2419 bool memory_region_present(MemoryRegion *container, hwaddr addr);
2420
2421 /**
2422 * memory_region_is_mapped: returns true if #MemoryRegion is mapped
2423 * into another memory region, which does not necessarily imply that it is
2424 * mapped into an address space.
2425 *
2426 * @mr: a #MemoryRegion which should be checked if it's mapped
2427 */
2428 bool memory_region_is_mapped(MemoryRegion *mr);
2429
2430 /**
2431 * memory_region_get_ram_discard_manager: get the #RamDiscardManager for a
2432 * #MemoryRegion
2433 *
2434 * The #RamDiscardManager cannot change while a memory region is mapped.
2435 *
2436 * @mr: the #MemoryRegion
2437 */
2438 RamDiscardManager *memory_region_get_ram_discard_manager(MemoryRegion *mr);
2439
2440 /**
2441 * memory_region_has_ram_discard_manager: check whether a #MemoryRegion has a
2442 * #RamDiscardManager assigned
2443 *
2444 * @mr: the #MemoryRegion
2445 */
memory_region_has_ram_discard_manager(MemoryRegion * mr)2446 static inline bool memory_region_has_ram_discard_manager(MemoryRegion *mr)
2447 {
2448 return !!memory_region_get_ram_discard_manager(mr);
2449 }
2450
2451 /**
2452 * memory_region_set_ram_discard_manager: set the #RamDiscardManager for a
2453 * #MemoryRegion
2454 *
2455 * This function must not be called for a mapped #MemoryRegion, a #MemoryRegion
2456 * that does not cover RAM, or a #MemoryRegion that already has a
2457 * #RamDiscardManager assigned.
2458 *
2459 * @mr: the #MemoryRegion
2460 * @rdm: #RamDiscardManager to set
2461 */
2462 void memory_region_set_ram_discard_manager(MemoryRegion *mr,
2463 RamDiscardManager *rdm);
2464
2465 /**
2466 * memory_region_find: translate an address/size relative to a
2467 * MemoryRegion into a #MemoryRegionSection.
2468 *
2469 * Locates the first #MemoryRegion within @mr that overlaps the range
2470 * given by @addr and @size.
2471 *
2472 * Returns a #MemoryRegionSection that describes a contiguous overlap.
2473 * It will have the following characteristics:
2474 * - @size = 0 iff no overlap was found
2475 * - @mr is non-%NULL iff an overlap was found
2476 *
2477 * Remember that in the return value the @offset_within_region is
2478 * relative to the returned region (in the .@mr field), not to the
2479 * @mr argument.
2480 *
2481 * Similarly, the .@offset_within_address_space is relative to the
2482 * address space that contains both regions, the passed and the
2483 * returned one. However, in the special case where the @mr argument
2484 * has no container (and thus is the root of the address space), the
2485 * following will hold:
2486 * - @offset_within_address_space >= @addr
2487 * - @offset_within_address_space + .@size <= @addr + @size
2488 *
2489 * @mr: a MemoryRegion within which @addr is a relative address
2490 * @addr: start of the area within @as to be searched
2491 * @size: size of the area to be searched
2492 */
2493 MemoryRegionSection memory_region_find(MemoryRegion *mr,
2494 hwaddr addr, uint64_t size);
2495
2496 /**
2497 * memory_global_dirty_log_sync: synchronize the dirty log for all memory
2498 *
2499 * Synchronizes the dirty page log for all address spaces.
2500 *
2501 * @last_stage: whether this is the last stage of live migration
2502 */
2503 void memory_global_dirty_log_sync(bool last_stage);
2504
2505 /**
2506 * memory_global_dirty_log_sync: synchronize the dirty log for all memory
2507 *
2508 * Synchronizes the vCPUs with a thread that is reading the dirty bitmap.
2509 * This function must be called after the dirty log bitmap is cleared, and
2510 * before dirty guest memory pages are read. If you are using
2511 * #DirtyBitmapSnapshot, memory_region_snapshot_and_clear_dirty() takes
2512 * care of doing this.
2513 */
2514 void memory_global_after_dirty_log_sync(void);
2515
2516 /**
2517 * memory_region_transaction_begin: Start a transaction.
2518 *
2519 * During a transaction, changes will be accumulated and made visible
2520 * only when the transaction ends (is committed).
2521 */
2522 void memory_region_transaction_begin(void);
2523
2524 /**
2525 * memory_region_transaction_commit: Commit a transaction and make changes
2526 * visible to the guest.
2527 */
2528 void memory_region_transaction_commit(void);
2529
2530 /**
2531 * memory_listener_register: register callbacks to be called when memory
2532 * sections are mapped or unmapped into an address
2533 * space
2534 *
2535 * @listener: an object containing the callbacks to be called
2536 * @filter: if non-%NULL, only regions in this address space will be observed
2537 */
2538 void memory_listener_register(MemoryListener *listener, AddressSpace *filter);
2539
2540 /**
2541 * memory_listener_unregister: undo the effect of memory_listener_register()
2542 *
2543 * @listener: an object containing the callbacks to be removed
2544 */
2545 void memory_listener_unregister(MemoryListener *listener);
2546
2547 /**
2548 * memory_global_dirty_log_start: begin dirty logging for all regions
2549 *
2550 * @flags: purpose of starting dirty log, migration or dirty rate
2551 * @errp: pointer to Error*, to store an error if it happens.
2552 *
2553 * Return: true on success, else false setting @errp with error.
2554 */
2555 bool memory_global_dirty_log_start(unsigned int flags, Error **errp);
2556
2557 /**
2558 * memory_global_dirty_log_stop: end dirty logging for all regions
2559 *
2560 * @flags: purpose of stopping dirty log, migration or dirty rate
2561 */
2562 void memory_global_dirty_log_stop(unsigned int flags);
2563
2564 void mtree_info(bool flatview, bool dispatch_tree, bool owner, bool disabled);
2565
2566 bool memory_region_access_valid(MemoryRegion *mr, hwaddr addr,
2567 unsigned size, bool is_write,
2568 MemTxAttrs attrs);
2569
2570 /**
2571 * memory_region_dispatch_read: perform a read directly to the specified
2572 * MemoryRegion.
2573 *
2574 * @mr: #MemoryRegion to access
2575 * @addr: address within that region
2576 * @pval: pointer to uint64_t which the data is written to
2577 * @op: size, sign, and endianness of the memory operation
2578 * @attrs: memory transaction attributes to use for the access
2579 */
2580 MemTxResult memory_region_dispatch_read(MemoryRegion *mr,
2581 hwaddr addr,
2582 uint64_t *pval,
2583 MemOp op,
2584 MemTxAttrs attrs);
2585 /**
2586 * memory_region_dispatch_write: perform a write directly to the specified
2587 * MemoryRegion.
2588 *
2589 * @mr: #MemoryRegion to access
2590 * @addr: address within that region
2591 * @data: data to write
2592 * @op: size, sign, and endianness of the memory operation
2593 * @attrs: memory transaction attributes to use for the access
2594 */
2595 MemTxResult memory_region_dispatch_write(MemoryRegion *mr,
2596 hwaddr addr,
2597 uint64_t data,
2598 MemOp op,
2599 MemTxAttrs attrs);
2600
2601 /**
2602 * address_space_init: initializes an address space
2603 *
2604 * @as: an uninitialized #AddressSpace
2605 * @root: a #MemoryRegion that routes addresses for the address space
2606 * @name: an address space name. The name is only used for debugging
2607 * output.
2608 */
2609 void address_space_init(AddressSpace *as, MemoryRegion *root, const char *name);
2610
2611 /**
2612 * address_space_destroy: destroy an address space
2613 *
2614 * Releases all resources associated with an address space. After an address space
2615 * is destroyed, its root memory region (given by address_space_init()) may be destroyed
2616 * as well.
2617 *
2618 * @as: address space to be destroyed
2619 */
2620 void address_space_destroy(AddressSpace *as);
2621
2622 /**
2623 * address_space_remove_listeners: unregister all listeners of an address space
2624 *
2625 * Removes all callbacks previously registered with memory_listener_register()
2626 * for @as.
2627 *
2628 * @as: an initialized #AddressSpace
2629 */
2630 void address_space_remove_listeners(AddressSpace *as);
2631
2632 /**
2633 * address_space_rw: read from or write to an address space.
2634 *
2635 * Return a MemTxResult indicating whether the operation succeeded
2636 * or failed (eg unassigned memory, device rejected the transaction,
2637 * IOMMU fault).
2638 *
2639 * @as: #AddressSpace to be accessed
2640 * @addr: address within that address space
2641 * @attrs: memory transaction attributes
2642 * @buf: buffer with the data transferred
2643 * @len: the number of bytes to read or write
2644 * @is_write: indicates the transfer direction
2645 */
2646 MemTxResult address_space_rw(AddressSpace *as, hwaddr addr,
2647 MemTxAttrs attrs, void *buf,
2648 hwaddr len, bool is_write);
2649
2650 /**
2651 * address_space_write: write to address space.
2652 *
2653 * Return a MemTxResult indicating whether the operation succeeded
2654 * or failed (eg unassigned memory, device rejected the transaction,
2655 * IOMMU fault).
2656 *
2657 * @as: #AddressSpace to be accessed
2658 * @addr: address within that address space
2659 * @attrs: memory transaction attributes
2660 * @buf: buffer with the data transferred
2661 * @len: the number of bytes to write
2662 */
2663 MemTxResult address_space_write(AddressSpace *as, hwaddr addr,
2664 MemTxAttrs attrs,
2665 const void *buf, hwaddr len);
2666
2667 /**
2668 * address_space_write_rom: write to address space, including ROM.
2669 *
2670 * This function writes to the specified address space, but will
2671 * write data to both ROM and RAM. This is used for non-guest
2672 * writes like writes from the gdb debug stub or initial loading
2673 * of ROM contents.
2674 *
2675 * Note that portions of the write which attempt to write data to
2676 * a device will be silently ignored -- only real RAM and ROM will
2677 * be written to.
2678 *
2679 * Return a MemTxResult indicating whether the operation succeeded
2680 * or failed (eg unassigned memory, device rejected the transaction,
2681 * IOMMU fault).
2682 *
2683 * @as: #AddressSpace to be accessed
2684 * @addr: address within that address space
2685 * @attrs: memory transaction attributes
2686 * @buf: buffer with the data transferred
2687 * @len: the number of bytes to write
2688 */
2689 MemTxResult address_space_write_rom(AddressSpace *as, hwaddr addr,
2690 MemTxAttrs attrs,
2691 const void *buf, hwaddr len);
2692
2693 /* address_space_ld*: load from an address space
2694 * address_space_st*: store to an address space
2695 *
2696 * These functions perform a load or store of the byte, word,
2697 * longword or quad to the specified address within the AddressSpace.
2698 * The _le suffixed functions treat the data as little endian;
2699 * _be indicates big endian; no suffix indicates "same endianness
2700 * as guest CPU".
2701 *
2702 * The "guest CPU endianness" accessors are deprecated for use outside
2703 * target-* code; devices should be CPU-agnostic and use either the LE
2704 * or the BE accessors.
2705 *
2706 * @as #AddressSpace to be accessed
2707 * @addr: address within that address space
2708 * @val: data value, for stores
2709 * @attrs: memory transaction attributes
2710 * @result: location to write the success/failure of the transaction;
2711 * if NULL, this information is discarded
2712 */
2713
2714 #define SUFFIX
2715 #define ARG1 as
2716 #define ARG1_DECL AddressSpace *as
2717 #include "exec/memory_ldst.h.inc"
2718
2719 #define SUFFIX
2720 #define ARG1 as
2721 #define ARG1_DECL AddressSpace *as
2722 #include "exec/memory_ldst_phys.h.inc"
2723
2724 struct MemoryRegionCache {
2725 uint8_t *ptr;
2726 hwaddr xlat;
2727 hwaddr len;
2728 FlatView *fv;
2729 MemoryRegionSection mrs;
2730 bool is_write;
2731 };
2732
2733 /* address_space_ld*_cached: load from a cached #MemoryRegion
2734 * address_space_st*_cached: store into a cached #MemoryRegion
2735 *
2736 * These functions perform a load or store of the byte, word,
2737 * longword or quad to the specified address. The address is
2738 * a physical address in the AddressSpace, but it must lie within
2739 * a #MemoryRegion that was mapped with address_space_cache_init.
2740 *
2741 * The _le suffixed functions treat the data as little endian;
2742 * _be indicates big endian; no suffix indicates "same endianness
2743 * as guest CPU".
2744 *
2745 * The "guest CPU endianness" accessors are deprecated for use outside
2746 * target-* code; devices should be CPU-agnostic and use either the LE
2747 * or the BE accessors.
2748 *
2749 * @cache: previously initialized #MemoryRegionCache to be accessed
2750 * @addr: address within the address space
2751 * @val: data value, for stores
2752 * @attrs: memory transaction attributes
2753 * @result: location to write the success/failure of the transaction;
2754 * if NULL, this information is discarded
2755 */
2756
2757 #define SUFFIX _cached_slow
2758 #define ARG1 cache
2759 #define ARG1_DECL MemoryRegionCache *cache
2760 #include "exec/memory_ldst.h.inc"
2761
2762 /* Inline fast path for direct RAM access. */
address_space_ldub_cached(MemoryRegionCache * cache,hwaddr addr,MemTxAttrs attrs,MemTxResult * result)2763 static inline uint8_t address_space_ldub_cached(MemoryRegionCache *cache,
2764 hwaddr addr, MemTxAttrs attrs, MemTxResult *result)
2765 {
2766 assert(addr < cache->len);
2767 if (likely(cache->ptr)) {
2768 return ldub_p(cache->ptr + addr);
2769 } else {
2770 return address_space_ldub_cached_slow(cache, addr, attrs, result);
2771 }
2772 }
2773
address_space_stb_cached(MemoryRegionCache * cache,hwaddr addr,uint8_t val,MemTxAttrs attrs,MemTxResult * result)2774 static inline void address_space_stb_cached(MemoryRegionCache *cache,
2775 hwaddr addr, uint8_t val, MemTxAttrs attrs, MemTxResult *result)
2776 {
2777 assert(addr < cache->len);
2778 if (likely(cache->ptr)) {
2779 stb_p(cache->ptr + addr, val);
2780 } else {
2781 address_space_stb_cached_slow(cache, addr, val, attrs, result);
2782 }
2783 }
2784
2785 #define ENDIANNESS _le
2786 #include "exec/memory_ldst_cached.h.inc"
2787
2788 #define ENDIANNESS _be
2789 #include "exec/memory_ldst_cached.h.inc"
2790
2791 #define SUFFIX _cached
2792 #define ARG1 cache
2793 #define ARG1_DECL MemoryRegionCache *cache
2794 #include "exec/memory_ldst_phys.h.inc"
2795
2796 /* address_space_cache_init: prepare for repeated access to a physical
2797 * memory region
2798 *
2799 * @cache: #MemoryRegionCache to be filled
2800 * @as: #AddressSpace to be accessed
2801 * @addr: address within that address space
2802 * @len: length of buffer
2803 * @is_write: indicates the transfer direction
2804 *
2805 * Will only work with RAM, and may map a subset of the requested range by
2806 * returning a value that is less than @len. On failure, return a negative
2807 * errno value.
2808 *
2809 * Because it only works with RAM, this function can be used for
2810 * read-modify-write operations. In this case, is_write should be %true.
2811 *
2812 * Note that addresses passed to the address_space_*_cached functions
2813 * are relative to @addr.
2814 */
2815 int64_t address_space_cache_init(MemoryRegionCache *cache,
2816 AddressSpace *as,
2817 hwaddr addr,
2818 hwaddr len,
2819 bool is_write);
2820
2821 /**
2822 * address_space_cache_init_empty: Initialize empty #MemoryRegionCache
2823 *
2824 * @cache: The #MemoryRegionCache to operate on.
2825 *
2826 * Initializes #MemoryRegionCache structure without memory region attached.
2827 * Cache initialized this way can only be safely destroyed, but not used.
2828 */
address_space_cache_init_empty(MemoryRegionCache * cache)2829 static inline void address_space_cache_init_empty(MemoryRegionCache *cache)
2830 {
2831 cache->mrs.mr = NULL;
2832 /* There is no real need to initialize fv, but it makes Coverity happy. */
2833 cache->fv = NULL;
2834 }
2835
2836 /**
2837 * address_space_cache_invalidate: complete a write to a #MemoryRegionCache
2838 *
2839 * @cache: The #MemoryRegionCache to operate on.
2840 * @addr: The first physical address that was written, relative to the
2841 * address that was passed to @address_space_cache_init.
2842 * @access_len: The number of bytes that were written starting at @addr.
2843 */
2844 void address_space_cache_invalidate(MemoryRegionCache *cache,
2845 hwaddr addr,
2846 hwaddr access_len);
2847
2848 /**
2849 * address_space_cache_destroy: free a #MemoryRegionCache
2850 *
2851 * @cache: The #MemoryRegionCache whose memory should be released.
2852 */
2853 void address_space_cache_destroy(MemoryRegionCache *cache);
2854
2855 /* address_space_get_iotlb_entry: translate an address into an IOTLB
2856 * entry. Should be called from an RCU critical section.
2857 */
2858 IOMMUTLBEntry address_space_get_iotlb_entry(AddressSpace *as, hwaddr addr,
2859 bool is_write, MemTxAttrs attrs);
2860
2861 /* address_space_translate: translate an address range into an address space
2862 * into a MemoryRegion and an address range into that section. Should be
2863 * called from an RCU critical section, to avoid that the last reference
2864 * to the returned region disappears after address_space_translate returns.
2865 *
2866 * @fv: #FlatView to be accessed
2867 * @addr: address within that address space
2868 * @xlat: pointer to address within the returned memory region section's
2869 * #MemoryRegion.
2870 * @len: pointer to length
2871 * @is_write: indicates the transfer direction
2872 * @attrs: memory attributes
2873 */
2874 MemoryRegion *flatview_translate(FlatView *fv,
2875 hwaddr addr, hwaddr *xlat,
2876 hwaddr *len, bool is_write,
2877 MemTxAttrs attrs);
2878
address_space_translate(AddressSpace * as,hwaddr addr,hwaddr * xlat,hwaddr * len,bool is_write,MemTxAttrs attrs)2879 static inline MemoryRegion *address_space_translate(AddressSpace *as,
2880 hwaddr addr, hwaddr *xlat,
2881 hwaddr *len, bool is_write,
2882 MemTxAttrs attrs)
2883 {
2884 return flatview_translate(address_space_to_flatview(as),
2885 addr, xlat, len, is_write, attrs);
2886 }
2887
2888 /* address_space_access_valid: check for validity of accessing an address
2889 * space range
2890 *
2891 * Check whether memory is assigned to the given address space range, and
2892 * access is permitted by any IOMMU regions that are active for the address
2893 * space.
2894 *
2895 * For now, addr and len should be aligned to a page size. This limitation
2896 * will be lifted in the future.
2897 *
2898 * @as: #AddressSpace to be accessed
2899 * @addr: address within that address space
2900 * @len: length of the area to be checked
2901 * @is_write: indicates the transfer direction
2902 * @attrs: memory attributes
2903 */
2904 bool address_space_access_valid(AddressSpace *as, hwaddr addr, hwaddr len,
2905 bool is_write, MemTxAttrs attrs);
2906
2907 /* address_space_map: map a physical memory region into a host virtual address
2908 *
2909 * May map a subset of the requested range, given by and returned in @plen.
2910 * May return %NULL and set *@plen to zero(0), if resources needed to perform
2911 * the mapping are exhausted.
2912 * Use only for reads OR writes - not for read-modify-write operations.
2913 * Use address_space_register_map_client() to know when retrying the map
2914 * operation is likely to succeed.
2915 *
2916 * @as: #AddressSpace to be accessed
2917 * @addr: address within that address space
2918 * @plen: pointer to length of buffer; updated on return
2919 * @is_write: indicates the transfer direction
2920 * @attrs: memory attributes
2921 */
2922 void *address_space_map(AddressSpace *as, hwaddr addr,
2923 hwaddr *plen, bool is_write, MemTxAttrs attrs);
2924
2925 /* address_space_unmap: Unmaps a memory region previously mapped by address_space_map()
2926 *
2927 * Will also mark the memory as dirty if @is_write == %true. @access_len gives
2928 * the amount of memory that was actually read or written by the caller.
2929 *
2930 * @as: #AddressSpace used
2931 * @buffer: host pointer as returned by address_space_map()
2932 * @len: buffer length as returned by address_space_map()
2933 * @access_len: amount of data actually transferred
2934 * @is_write: indicates the transfer direction
2935 */
2936 void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2937 bool is_write, hwaddr access_len);
2938
2939 /*
2940 * address_space_register_map_client: Register a callback to invoke when
2941 * resources for address_space_map() are available again.
2942 *
2943 * address_space_map may fail when there are not enough resources available,
2944 * such as when bounce buffer memory would exceed the limit. The callback can
2945 * be used to retry the address_space_map operation. Note that the callback
2946 * gets automatically removed after firing.
2947 *
2948 * @as: #AddressSpace to be accessed
2949 * @bh: callback to invoke when address_space_map() retry is appropriate
2950 */
2951 void address_space_register_map_client(AddressSpace *as, QEMUBH *bh);
2952
2953 /*
2954 * address_space_unregister_map_client: Unregister a callback that has
2955 * previously been registered and not fired yet.
2956 *
2957 * @as: #AddressSpace to be accessed
2958 * @bh: callback to unregister
2959 */
2960 void address_space_unregister_map_client(AddressSpace *as, QEMUBH *bh);
2961
2962 /* Internal functions, part of the implementation of address_space_read. */
2963 MemTxResult address_space_read_full(AddressSpace *as, hwaddr addr,
2964 MemTxAttrs attrs, void *buf, hwaddr len);
2965 MemTxResult flatview_read_continue(FlatView *fv, hwaddr addr,
2966 MemTxAttrs attrs, void *buf,
2967 hwaddr len, hwaddr addr1, hwaddr l,
2968 MemoryRegion *mr);
2969 void *qemu_map_ram_ptr(RAMBlock *ram_block, ram_addr_t addr);
2970
2971 /* Internal functions, part of the implementation of address_space_read_cached
2972 * and address_space_write_cached. */
2973 MemTxResult address_space_read_cached_slow(MemoryRegionCache *cache,
2974 hwaddr addr, void *buf, hwaddr len);
2975 MemTxResult address_space_write_cached_slow(MemoryRegionCache *cache,
2976 hwaddr addr, const void *buf,
2977 hwaddr len);
2978
2979 int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr);
2980 bool prepare_mmio_access(MemoryRegion *mr);
2981
memory_access_is_direct(MemoryRegion * mr,bool is_write)2982 static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write)
2983 {
2984 if (is_write) {
2985 return memory_region_is_ram(mr) && !mr->readonly &&
2986 !mr->rom_device && !memory_region_is_ram_device(mr);
2987 } else {
2988 return (memory_region_is_ram(mr) && !memory_region_is_ram_device(mr)) ||
2989 memory_region_is_romd(mr);
2990 }
2991 }
2992
2993 /**
2994 * address_space_read: read from an address space.
2995 *
2996 * Return a MemTxResult indicating whether the operation succeeded
2997 * or failed (eg unassigned memory, device rejected the transaction,
2998 * IOMMU fault). Called within RCU critical section.
2999 *
3000 * @as: #AddressSpace to be accessed
3001 * @addr: address within that address space
3002 * @attrs: memory transaction attributes
3003 * @buf: buffer with the data transferred
3004 * @len: length of the data transferred
3005 */
3006 static inline __attribute__((__always_inline__))
address_space_read(AddressSpace * as,hwaddr addr,MemTxAttrs attrs,void * buf,hwaddr len)3007 MemTxResult address_space_read(AddressSpace *as, hwaddr addr,
3008 MemTxAttrs attrs, void *buf,
3009 hwaddr len)
3010 {
3011 MemTxResult result = MEMTX_OK;
3012 hwaddr l, addr1;
3013 void *ptr;
3014 MemoryRegion *mr;
3015 FlatView *fv;
3016
3017 if (__builtin_constant_p(len)) {
3018 if (len) {
3019 RCU_READ_LOCK_GUARD();
3020 fv = address_space_to_flatview(as);
3021 l = len;
3022 mr = flatview_translate(fv, addr, &addr1, &l, false, attrs);
3023 if (len == l && memory_access_is_direct(mr, false)) {
3024 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
3025 memcpy(buf, ptr, len);
3026 } else {
3027 result = flatview_read_continue(fv, addr, attrs, buf, len,
3028 addr1, l, mr);
3029 }
3030 }
3031 } else {
3032 result = address_space_read_full(as, addr, attrs, buf, len);
3033 }
3034 return result;
3035 }
3036
3037 /**
3038 * address_space_read_cached: read from a cached RAM region
3039 *
3040 * @cache: Cached region to be addressed
3041 * @addr: address relative to the base of the RAM region
3042 * @buf: buffer with the data transferred
3043 * @len: length of the data transferred
3044 */
3045 static inline MemTxResult
address_space_read_cached(MemoryRegionCache * cache,hwaddr addr,void * buf,hwaddr len)3046 address_space_read_cached(MemoryRegionCache *cache, hwaddr addr,
3047 void *buf, hwaddr len)
3048 {
3049 assert(addr < cache->len && len <= cache->len - addr);
3050 fuzz_dma_read_cb(cache->xlat + addr, len, cache->mrs.mr);
3051 if (likely(cache->ptr)) {
3052 memcpy(buf, cache->ptr + addr, len);
3053 return MEMTX_OK;
3054 } else {
3055 return address_space_read_cached_slow(cache, addr, buf, len);
3056 }
3057 }
3058
3059 /**
3060 * address_space_write_cached: write to a cached RAM region
3061 *
3062 * @cache: Cached region to be addressed
3063 * @addr: address relative to the base of the RAM region
3064 * @buf: buffer with the data transferred
3065 * @len: length of the data transferred
3066 */
3067 static inline MemTxResult
address_space_write_cached(MemoryRegionCache * cache,hwaddr addr,const void * buf,hwaddr len)3068 address_space_write_cached(MemoryRegionCache *cache, hwaddr addr,
3069 const void *buf, hwaddr len)
3070 {
3071 assert(addr < cache->len && len <= cache->len - addr);
3072 if (likely(cache->ptr)) {
3073 memcpy(cache->ptr + addr, buf, len);
3074 return MEMTX_OK;
3075 } else {
3076 return address_space_write_cached_slow(cache, addr, buf, len);
3077 }
3078 }
3079
3080 /**
3081 * address_space_set: Fill address space with a constant byte.
3082 *
3083 * Return a MemTxResult indicating whether the operation succeeded
3084 * or failed (eg unassigned memory, device rejected the transaction,
3085 * IOMMU fault).
3086 *
3087 * @as: #AddressSpace to be accessed
3088 * @addr: address within that address space
3089 * @c: constant byte to fill the memory
3090 * @len: the number of bytes to fill with the constant byte
3091 * @attrs: memory transaction attributes
3092 */
3093 MemTxResult address_space_set(AddressSpace *as, hwaddr addr,
3094 uint8_t c, hwaddr len, MemTxAttrs attrs);
3095
3096 #ifdef COMPILING_PER_TARGET
3097 /* enum device_endian to MemOp. */
devend_memop(enum device_endian end)3098 static inline MemOp devend_memop(enum device_endian end)
3099 {
3100 QEMU_BUILD_BUG_ON(DEVICE_HOST_ENDIAN != DEVICE_LITTLE_ENDIAN &&
3101 DEVICE_HOST_ENDIAN != DEVICE_BIG_ENDIAN);
3102
3103 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
3104 /* Swap if non-host endianness or native (target) endianness */
3105 return (end == DEVICE_HOST_ENDIAN) ? 0 : MO_BSWAP;
3106 #else
3107 const int non_host_endianness =
3108 DEVICE_LITTLE_ENDIAN ^ DEVICE_BIG_ENDIAN ^ DEVICE_HOST_ENDIAN;
3109
3110 /* In this case, native (target) endianness needs no swap. */
3111 return (end == non_host_endianness) ? MO_BSWAP : 0;
3112 #endif
3113 }
3114 #endif /* COMPILING_PER_TARGET */
3115
3116 /*
3117 * Inhibit technologies that require discarding of pages in RAM blocks, e.g.,
3118 * to manage the actual amount of memory consumed by the VM (then, the memory
3119 * provided by RAM blocks might be bigger than the desired memory consumption).
3120 * This *must* be set if:
3121 * - Discarding parts of a RAM blocks does not result in the change being
3122 * reflected in the VM and the pages getting freed.
3123 * - All memory in RAM blocks is pinned or duplicated, invaldiating any previous
3124 * discards blindly.
3125 * - Discarding parts of a RAM blocks will result in integrity issues (e.g.,
3126 * encrypted VMs).
3127 * Technologies that only temporarily pin the current working set of a
3128 * driver are fine, because we don't expect such pages to be discarded
3129 * (esp. based on guest action like balloon inflation).
3130 *
3131 * This is *not* to be used to protect from concurrent discards (esp.,
3132 * postcopy).
3133 *
3134 * Returns 0 if successful. Returns -EBUSY if a technology that relies on
3135 * discards to work reliably is active.
3136 */
3137 int ram_block_discard_disable(bool state);
3138
3139 /*
3140 * See ram_block_discard_disable(): only disable uncoordinated discards,
3141 * keeping coordinated discards (via the RamDiscardManager) enabled.
3142 */
3143 int ram_block_uncoordinated_discard_disable(bool state);
3144
3145 /*
3146 * Inhibit technologies that disable discarding of pages in RAM blocks.
3147 *
3148 * Returns 0 if successful. Returns -EBUSY if discards are already set to
3149 * broken.
3150 */
3151 int ram_block_discard_require(bool state);
3152
3153 /*
3154 * See ram_block_discard_require(): only inhibit technologies that disable
3155 * uncoordinated discarding of pages in RAM blocks, allowing co-existence with
3156 * technologies that only inhibit uncoordinated discards (via the
3157 * RamDiscardManager).
3158 */
3159 int ram_block_coordinated_discard_require(bool state);
3160
3161 /*
3162 * Test if any discarding of memory in ram blocks is disabled.
3163 */
3164 bool ram_block_discard_is_disabled(void);
3165
3166 /*
3167 * Test if any discarding of memory in ram blocks is required to work reliably.
3168 */
3169 bool ram_block_discard_is_required(void);
3170
3171 #endif
3172
3173 #endif
3174