xref: /openbmc/qemu/include/hw/mem/memory-device.h (revision b2580720)
1 /*
2  * Memory Device Interface
3  *
4  * Copyright (c) 2018 Red Hat, Inc.
5  *
6  * Authors:
7  *  David Hildenbrand <david@redhat.com>
8  *
9  * This work is licensed under the terms of the GNU GPL, version 2 or later.
10  * See the COPYING file in the top-level directory.
11  */
12 
13 #ifndef MEMORY_DEVICE_H
14 #define MEMORY_DEVICE_H
15 
16 #include "hw/qdev-core.h"
17 #include "qapi/qapi-types-machine.h"
18 #include "qom/object.h"
19 
20 #define TYPE_MEMORY_DEVICE "memory-device"
21 
22 typedef struct MemoryDeviceClass MemoryDeviceClass;
23 DECLARE_CLASS_CHECKERS(MemoryDeviceClass, MEMORY_DEVICE,
24                        TYPE_MEMORY_DEVICE)
25 #define MEMORY_DEVICE(obj) \
26      INTERFACE_CHECK(MemoryDeviceState, (obj), TYPE_MEMORY_DEVICE)
27 
28 typedef struct MemoryDeviceState MemoryDeviceState;
29 
30 /**
31  * MemoryDeviceClass:
32  *
33  * All memory devices need to implement TYPE_MEMORY_DEVICE as an interface.
34  *
35  * A memory device is a device that owns a memory region which is
36  * mapped into guest physical address space at a certain address. The
37  * address in guest physical memory can either be specified explicitly
38  * or get assigned automatically.
39  *
40  * Some memory device might not own a memory region in certain device
41  * configurations. Such devices can logically get (un)plugged, however,
42  * empty memory devices are mostly ignored by the memory device code.
43  *
44  * Conceptually, memory devices only span one memory region. If multiple
45  * successive memory regions are used, a covering memory region has to
46  * be provided. Scattered memory regions are not supported for single
47  * devices.
48  *
49  * The device memory region returned via @get_memory_region may either be a
50  * single RAM memory region or a memory region container with subregions
51  * that are RAM memory regions or aliases to RAM memory regions. Other
52  * memory regions or subregions are not supported.
53  *
54  * If the device memory region returned via @get_memory_region is a
55  * memory region container, it's supported to dynamically (un)map subregions
56  * as long as the number of memslots returned by @get_memslots() won't
57  * be exceeded and as long as all memory regions are of the same kind (e.g.,
58  * all RAM or all ROM).
59  */
60 struct MemoryDeviceClass {
61     /* private */
62     InterfaceClass parent_class;
63 
64     /*
65      * Return the address of the memory device in guest physical memory.
66      *
67      * Called when (un)plugging a memory device or when iterating over
68      * all memory devices mapped into guest physical address space.
69      *
70      * If "0" is returned, no address has been specified by the user and
71      * no address has been assigned to this memory device yet.
72      */
73     uint64_t (*get_addr)(const MemoryDeviceState *md);
74 
75     /*
76      * Set the address of the memory device in guest physical memory.
77      *
78      * Called when plugging the memory device to configure the determined
79      * address in guest physical memory.
80      */
81     void (*set_addr)(MemoryDeviceState *md, uint64_t addr, Error **errp);
82 
83     /*
84      * Return the amount of memory provided by the memory device currently
85      * usable ("plugged") by the VM.
86      *
87      * Called when calculating the total amount of ram available to the
88      * VM (e.g. to report memory stats to the user).
89      *
90      * This is helpful for devices that dynamically manage the amount of
91      * memory accessible by the guest via the reserved memory region. For
92      * most devices, this corresponds to the size of the memory region.
93      */
94     uint64_t (*get_plugged_size)(const MemoryDeviceState *md, Error **errp);
95 
96     /*
97      * Return the memory region of the memory device. If the device is
98      * completely empty, returns NULL without an error.
99      *
100      * Called when (un)plugging the memory device, to (un)map the
101      * memory region in guest physical memory, but also to detect the
102      * required alignment during address assignment or when the size of the
103      * memory region is required.
104      */
105     MemoryRegion *(*get_memory_region)(MemoryDeviceState *md, Error **errp);
106 
107     /*
108      * Optional: Instruct the memory device to decide how many memory slots
109      * it requires, not exceeding the given limit.
110      *
111      * Called exactly once when pre-plugging the memory device, before
112      * querying the number of memslots using @get_memslots the first time.
113      */
114     void (*decide_memslots)(MemoryDeviceState *md, unsigned int limit);
115 
116     /*
117      * Optional for memory devices that require only a single memslot,
118      * required for all other memory devices: Return the number of memslots
119      * (distinct RAM memory regions in the device memory region) that are
120      * required by the device.
121      *
122      * If this function is not implemented, the assumption is "1".
123      *
124      * Called when (un)plugging the memory device, to check if the requirements
125      * can be satisfied, and to do proper accounting.
126      */
127     unsigned int (*get_memslots)(MemoryDeviceState *md);
128 
129     /*
130      * Optional: Return the desired minimum alignment of the device in guest
131      * physical address space. The final alignment is computed based on this
132      * alignment and the alignment requirements of the memory region.
133      *
134      * Called when plugging the memory device to detect the required alignment
135      * during address assignment.
136      */
137     uint64_t (*get_min_alignment)(const MemoryDeviceState *md);
138 
139     /*
140      * Translate the memory device into #MemoryDeviceInfo.
141      */
142     void (*fill_device_info)(const MemoryDeviceState *md,
143                              MemoryDeviceInfo *info);
144 };
145 
146 /*
147  * Traditionally, KVM/vhost in many setups supported 509 memslots, whereby
148  * 253 memslots were "reserved" for boot memory and other devices (such
149  * as PCI BARs, which can get mapped dynamically) and 256 memslots were
150  * dedicated for DIMMs. These magic numbers worked reliably in the past.
151  *
152  * Further, using many memslots can negatively affect performance, so setting
153  * the soft-limit of memslots used by memory devices to the traditional
154  * DIMM limit of 256 sounds reasonable.
155  *
156  * If we have less than 509 memslots, we will instruct memory devices that
157  * support automatically deciding how many memslots to use to only use a single
158  * one.
159  *
160  * Hotplugging vhost devices with at least 509 memslots is not expected to
161  * cause problems, not even when devices automatically decided how many memslots
162  * to use.
163  */
164 #define MEMORY_DEVICES_SOFT_MEMSLOT_LIMIT 256
165 #define MEMORY_DEVICES_SAFE_MAX_MEMSLOTS 509
166 
167 MemoryDeviceInfoList *qmp_memory_device_list(void);
168 uint64_t get_plugged_memory_size(void);
169 unsigned int memory_devices_get_reserved_memslots(void);
170 bool memory_devices_memslot_auto_decision_active(void);
171 void memory_device_pre_plug(MemoryDeviceState *md, MachineState *ms,
172                             Error **errp);
173 void memory_device_plug(MemoryDeviceState *md, MachineState *ms);
174 void memory_device_unplug(MemoryDeviceState *md, MachineState *ms);
175 uint64_t memory_device_get_region_size(const MemoryDeviceState *md,
176                                        Error **errp);
177 
178 #endif
179