xref: /openbmc/qemu/include/hw/mem/memory-device.h (revision e0c72452)
1 /*
2  * Memory Device Interface
3  *
4  * Copyright (c) 2018 Red Hat, Inc.
5  *
6  * Authors:
7  *  David Hildenbrand <david@redhat.com>
8  *
9  * This work is licensed under the terms of the GNU GPL, version 2 or later.
10  * See the COPYING file in the top-level directory.
11  */
12 
13 #ifndef MEMORY_DEVICE_H
14 #define MEMORY_DEVICE_H
15 
16 #include "hw/qdev-core.h"
17 #include "qemu/typedefs.h"
18 #include "qapi/qapi-types-machine.h"
19 #include "qom/object.h"
20 
21 #define TYPE_MEMORY_DEVICE "memory-device"
22 
23 typedef struct MemoryDeviceClass MemoryDeviceClass;
24 DECLARE_CLASS_CHECKERS(MemoryDeviceClass, MEMORY_DEVICE,
25                        TYPE_MEMORY_DEVICE)
26 #define MEMORY_DEVICE(obj) \
27      INTERFACE_CHECK(MemoryDeviceState, (obj), TYPE_MEMORY_DEVICE)
28 
29 typedef struct MemoryDeviceState MemoryDeviceState;
30 
31 /**
32  * MemoryDeviceClass:
33  *
34  * All memory devices need to implement TYPE_MEMORY_DEVICE as an interface.
35  *
36  * A memory device is a device that owns a memory region which is
37  * mapped into guest physical address space at a certain address. The
38  * address in guest physical memory can either be specified explicitly
39  * or get assigned automatically.
40  *
41  * Conceptually, memory devices only span one memory region. If multiple
42  * successive memory regions are used, a covering memory region has to
43  * be provided. Scattered memory regions are not supported for single
44  * devices.
45  *
46  * The device memory region returned via @get_memory_region may either be a
47  * single RAM memory region or a memory region container with subregions
48  * that are RAM memory regions or aliases to RAM memory regions. Other
49  * memory regions or subregions are not supported.
50  *
51  * If the device memory region returned via @get_memory_region is a
52  * memory region container, it's supported to dynamically (un)map subregions
53  * as long as the number of memslots returned by @get_memslots() won't
54  * be exceeded and as long as all memory regions are of the same kind (e.g.,
55  * all RAM or all ROM).
56  */
57 struct MemoryDeviceClass {
58     /* private */
59     InterfaceClass parent_class;
60 
61     /*
62      * Return the address of the memory device in guest physical memory.
63      *
64      * Called when (un)plugging a memory device or when iterating over
65      * all memory devices mapped into guest physical address space.
66      *
67      * If "0" is returned, no address has been specified by the user and
68      * no address has been assigned to this memory device yet.
69      */
70     uint64_t (*get_addr)(const MemoryDeviceState *md);
71 
72     /*
73      * Set the address of the memory device in guest physical memory.
74      *
75      * Called when plugging the memory device to configure the determined
76      * address in guest physical memory.
77      */
78     void (*set_addr)(MemoryDeviceState *md, uint64_t addr, Error **errp);
79 
80     /*
81      * Return the amount of memory provided by the memory device currently
82      * usable ("plugged") by the VM.
83      *
84      * Called when calculating the total amount of ram available to the
85      * VM (e.g. to report memory stats to the user).
86      *
87      * This is helpful for devices that dynamically manage the amount of
88      * memory accessible by the guest via the reserved memory region. For
89      * most devices, this corresponds to the size of the memory region.
90      */
91     uint64_t (*get_plugged_size)(const MemoryDeviceState *md, Error **errp);
92 
93     /*
94      * Return the memory region of the memory device.
95      *
96      * Called when (un)plugging the memory device, to (un)map the
97      * memory region in guest physical memory, but also to detect the
98      * required alignment during address assignment or when the size of the
99      * memory region is required.
100      */
101     MemoryRegion *(*get_memory_region)(MemoryDeviceState *md, Error **errp);
102 
103     /*
104      * Optional: Instruct the memory device to decide how many memory slots
105      * it requires, not exceeding the given limit.
106      *
107      * Called exactly once when pre-plugging the memory device, before
108      * querying the number of memslots using @get_memslots the first time.
109      */
110     void (*decide_memslots)(MemoryDeviceState *md, unsigned int limit);
111 
112     /*
113      * Optional for memory devices that require only a single memslot,
114      * required for all other memory devices: Return the number of memslots
115      * (distinct RAM memory regions in the device memory region) that are
116      * required by the device.
117      *
118      * If this function is not implemented, the assumption is "1".
119      *
120      * Called when (un)plugging the memory device, to check if the requirements
121      * can be satisfied, and to do proper accounting.
122      */
123     unsigned int (*get_memslots)(MemoryDeviceState *md);
124 
125     /*
126      * Optional: Return the desired minimum alignment of the device in guest
127      * physical address space. The final alignment is computed based on this
128      * alignment and the alignment requirements of the memory region.
129      *
130      * Called when plugging the memory device to detect the required alignment
131      * during address assignment.
132      */
133     uint64_t (*get_min_alignment)(const MemoryDeviceState *md);
134 
135     /*
136      * Translate the memory device into #MemoryDeviceInfo.
137      */
138     void (*fill_device_info)(const MemoryDeviceState *md,
139                              MemoryDeviceInfo *info);
140 };
141 
142 /*
143  * Traditionally, KVM/vhost in many setups supported 509 memslots, whereby
144  * 253 memslots were "reserved" for boot memory and other devices (such
145  * as PCI BARs, which can get mapped dynamically) and 256 memslots were
146  * dedicated for DIMMs. These magic numbers worked reliably in the past.
147  *
148  * Further, using many memslots can negatively affect performance, so setting
149  * the soft-limit of memslots used by memory devices to the traditional
150  * DIMM limit of 256 sounds reasonable.
151  *
152  * If we have less than 509 memslots, we will instruct memory devices that
153  * support automatically deciding how many memslots to use to only use a single
154  * one.
155  *
156  * Hotplugging vhost devices with at least 509 memslots is not expected to
157  * cause problems, not even when devices automatically decided how many memslots
158  * to use.
159  */
160 #define MEMORY_DEVICES_SOFT_MEMSLOT_LIMIT 256
161 #define MEMORY_DEVICES_SAFE_MAX_MEMSLOTS 509
162 
163 MemoryDeviceInfoList *qmp_memory_device_list(void);
164 uint64_t get_plugged_memory_size(void);
165 unsigned int memory_devices_get_reserved_memslots(void);
166 bool memory_devices_memslot_auto_decision_active(void);
167 void memory_device_pre_plug(MemoryDeviceState *md, MachineState *ms,
168                             const uint64_t *legacy_align, Error **errp);
169 void memory_device_plug(MemoryDeviceState *md, MachineState *ms);
170 void memory_device_unplug(MemoryDeviceState *md, MachineState *ms);
171 uint64_t memory_device_get_region_size(const MemoryDeviceState *md,
172                                        Error **errp);
173 
174 #endif
175