1 /* 2 * Memory Device Interface 3 * 4 * Copyright ProfitBricks GmbH 2012 5 * Copyright (C) 2014 Red Hat Inc 6 * Copyright (c) 2018 Red Hat Inc 7 * 8 * This work is licensed under the terms of the GNU GPL, version 2 or later. 9 * See the COPYING file in the top-level directory. 10 */ 11 12 #include "qemu/osdep.h" 13 #include "hw/mem/memory-device.h" 14 #include "hw/qdev.h" 15 #include "qapi/error.h" 16 #include "hw/boards.h" 17 #include "qemu/range.h" 18 #include "hw/virtio/vhost.h" 19 #include "sysemu/kvm.h" 20 21 static gint memory_device_addr_sort(gconstpointer a, gconstpointer b) 22 { 23 const MemoryDeviceState *md_a = MEMORY_DEVICE(a); 24 const MemoryDeviceState *md_b = MEMORY_DEVICE(b); 25 const MemoryDeviceClass *mdc_a = MEMORY_DEVICE_GET_CLASS(a); 26 const MemoryDeviceClass *mdc_b = MEMORY_DEVICE_GET_CLASS(b); 27 const uint64_t addr_a = mdc_a->get_addr(md_a); 28 const uint64_t addr_b = mdc_b->get_addr(md_b); 29 30 if (addr_a > addr_b) { 31 return 1; 32 } else if (addr_a < addr_b) { 33 return -1; 34 } 35 return 0; 36 } 37 38 static int memory_device_build_list(Object *obj, void *opaque) 39 { 40 GSList **list = opaque; 41 42 if (object_dynamic_cast(obj, TYPE_MEMORY_DEVICE)) { 43 DeviceState *dev = DEVICE(obj); 44 if (dev->realized) { /* only realized memory devices matter */ 45 *list = g_slist_insert_sorted(*list, dev, memory_device_addr_sort); 46 } 47 } 48 49 object_child_foreach(obj, memory_device_build_list, opaque); 50 return 0; 51 } 52 53 static int memory_device_used_region_size(Object *obj, void *opaque) 54 { 55 uint64_t *size = opaque; 56 57 if (object_dynamic_cast(obj, TYPE_MEMORY_DEVICE)) { 58 const DeviceState *dev = DEVICE(obj); 59 const MemoryDeviceState *md = MEMORY_DEVICE(obj); 60 const MemoryDeviceClass *mdc = MEMORY_DEVICE_GET_CLASS(obj); 61 62 if (dev->realized) { 63 *size += mdc->get_region_size(md); 64 } 65 } 66 67 object_child_foreach(obj, memory_device_used_region_size, opaque); 68 return 0; 69 } 70 71 static void memory_device_check_addable(MachineState *ms, uint64_t size, 72 Error **errp) 73 { 74 uint64_t used_region_size = 0; 75 76 /* we will need a new memory slot for kvm and vhost */ 77 if (kvm_enabled() && !kvm_has_free_slot(ms)) { 78 error_setg(errp, "hypervisor has no free memory slots left"); 79 return; 80 } 81 if (!vhost_has_free_slot()) { 82 error_setg(errp, "a used vhost backend has no free memory slots left"); 83 return; 84 } 85 86 /* will we exceed the total amount of memory specified */ 87 memory_device_used_region_size(OBJECT(ms), &used_region_size); 88 if (used_region_size + size > ms->maxram_size - ms->ram_size) { 89 error_setg(errp, "not enough space, currently 0x%" PRIx64 90 " in use of total hot pluggable 0x" RAM_ADDR_FMT, 91 used_region_size, ms->maxram_size - ms->ram_size); 92 return; 93 } 94 95 } 96 97 uint64_t memory_device_get_free_addr(MachineState *ms, const uint64_t *hint, 98 uint64_t align, uint64_t size, 99 Error **errp) 100 { 101 uint64_t address_space_start, address_space_end; 102 GSList *list = NULL, *item; 103 uint64_t new_addr = 0; 104 105 if (!ms->device_memory) { 106 error_setg(errp, "memory devices (e.g. for memory hotplug) are not " 107 "supported by the machine"); 108 return 0; 109 } 110 111 if (!memory_region_size(&ms->device_memory->mr)) { 112 error_setg(errp, "memory devices (e.g. for memory hotplug) are not " 113 "enabled, please specify the maxmem option"); 114 return 0; 115 } 116 address_space_start = ms->device_memory->base; 117 address_space_end = address_space_start + 118 memory_region_size(&ms->device_memory->mr); 119 g_assert(address_space_end >= address_space_start); 120 121 /* address_space_start indicates the maximum alignment we expect */ 122 if (QEMU_ALIGN_UP(address_space_start, align) != address_space_start) { 123 error_setg(errp, "the alignment (0%" PRIx64 ") is not supported", 124 align); 125 return 0; 126 } 127 128 memory_device_check_addable(ms, size, errp); 129 if (*errp) { 130 return 0; 131 } 132 133 if (hint && QEMU_ALIGN_UP(*hint, align) != *hint) { 134 error_setg(errp, "address must be aligned to 0x%" PRIx64 " bytes", 135 align); 136 return 0; 137 } 138 139 if (QEMU_ALIGN_UP(size, align) != size) { 140 error_setg(errp, "backend memory size must be multiple of 0x%" 141 PRIx64, align); 142 return 0; 143 } 144 145 if (hint) { 146 new_addr = *hint; 147 if (new_addr < address_space_start) { 148 error_setg(errp, "can't add memory [0x%" PRIx64 ":0x%" PRIx64 149 "] at 0x%" PRIx64, new_addr, size, address_space_start); 150 return 0; 151 } else if ((new_addr + size) > address_space_end) { 152 error_setg(errp, "can't add memory [0x%" PRIx64 ":0x%" PRIx64 153 "] beyond 0x%" PRIx64, new_addr, size, 154 address_space_end); 155 return 0; 156 } 157 } else { 158 new_addr = address_space_start; 159 } 160 161 /* find address range that will fit new memory device */ 162 object_child_foreach(OBJECT(ms), memory_device_build_list, &list); 163 for (item = list; item; item = g_slist_next(item)) { 164 const MemoryDeviceState *md = item->data; 165 const MemoryDeviceClass *mdc = MEMORY_DEVICE_GET_CLASS(OBJECT(md)); 166 uint64_t md_size, md_addr; 167 168 md_addr = mdc->get_addr(md); 169 md_size = mdc->get_region_size(md); 170 if (*errp) { 171 goto out; 172 } 173 174 if (ranges_overlap(md_addr, md_size, new_addr, size)) { 175 if (hint) { 176 const DeviceState *d = DEVICE(md); 177 error_setg(errp, "address range conflicts with '%s'", d->id); 178 goto out; 179 } 180 new_addr = QEMU_ALIGN_UP(md_addr + md_size, align); 181 } 182 } 183 184 if (new_addr + size > address_space_end) { 185 error_setg(errp, "could not find position in guest address space for " 186 "memory device - memory fragmented due to alignments"); 187 goto out; 188 } 189 out: 190 g_slist_free(list); 191 return new_addr; 192 } 193 194 MemoryDeviceInfoList *qmp_memory_device_list(void) 195 { 196 GSList *devices = NULL, *item; 197 MemoryDeviceInfoList *list = NULL, *prev = NULL; 198 199 object_child_foreach(qdev_get_machine(), memory_device_build_list, 200 &devices); 201 202 for (item = devices; item; item = g_slist_next(item)) { 203 const MemoryDeviceState *md = MEMORY_DEVICE(item->data); 204 const MemoryDeviceClass *mdc = MEMORY_DEVICE_GET_CLASS(item->data); 205 MemoryDeviceInfoList *elem = g_new0(MemoryDeviceInfoList, 1); 206 MemoryDeviceInfo *info = g_new0(MemoryDeviceInfo, 1); 207 208 mdc->fill_device_info(md, info); 209 210 elem->value = info; 211 elem->next = NULL; 212 if (prev) { 213 prev->next = elem; 214 } else { 215 list = elem; 216 } 217 prev = elem; 218 } 219 220 g_slist_free(devices); 221 222 return list; 223 } 224 225 static int memory_device_plugged_size(Object *obj, void *opaque) 226 { 227 uint64_t *size = opaque; 228 229 if (object_dynamic_cast(obj, TYPE_MEMORY_DEVICE)) { 230 const DeviceState *dev = DEVICE(obj); 231 const MemoryDeviceState *md = MEMORY_DEVICE(obj); 232 const MemoryDeviceClass *mdc = MEMORY_DEVICE_GET_CLASS(obj); 233 234 if (dev->realized) { 235 *size += mdc->get_plugged_size(md); 236 } 237 } 238 239 object_child_foreach(obj, memory_device_plugged_size, opaque); 240 return 0; 241 } 242 243 uint64_t get_plugged_memory_size(void) 244 { 245 uint64_t size = 0; 246 247 memory_device_plugged_size(qdev_get_machine(), &size); 248 249 return size; 250 } 251 252 void memory_device_plug_region(MachineState *ms, MemoryRegion *mr, 253 uint64_t addr) 254 { 255 /* we expect a previous call to memory_device_get_free_addr() */ 256 g_assert(ms->device_memory); 257 258 memory_region_add_subregion(&ms->device_memory->mr, 259 addr - ms->device_memory->base, mr); 260 } 261 262 void memory_device_unplug_region(MachineState *ms, MemoryRegion *mr) 263 { 264 /* we expect a previous call to memory_device_get_free_addr() */ 265 g_assert(ms->device_memory); 266 267 memory_region_del_subregion(&ms->device_memory->mr, mr); 268 } 269 270 static const TypeInfo memory_device_info = { 271 .name = TYPE_MEMORY_DEVICE, 272 .parent = TYPE_INTERFACE, 273 .class_size = sizeof(MemoryDeviceClass), 274 }; 275 276 static void memory_device_register_types(void) 277 { 278 type_register_static(&memory_device_info); 279 } 280 281 type_init(memory_device_register_types) 282