1 /* 2 * Memory Device Interface 3 * 4 * Copyright ProfitBricks GmbH 2012 5 * Copyright (C) 2014 Red Hat Inc 6 * Copyright (c) 2018 Red Hat Inc 7 * 8 * This work is licensed under the terms of the GNU GPL, version 2 or later. 9 * See the COPYING file in the top-level directory. 10 */ 11 12 #include "qemu/osdep.h" 13 #include "hw/mem/memory-device.h" 14 #include "hw/qdev.h" 15 #include "qapi/error.h" 16 #include "hw/boards.h" 17 #include "qemu/range.h" 18 #include "hw/virtio/vhost.h" 19 #include "sysemu/kvm.h" 20 #include "trace.h" 21 22 static gint memory_device_addr_sort(gconstpointer a, gconstpointer b) 23 { 24 const MemoryDeviceState *md_a = MEMORY_DEVICE(a); 25 const MemoryDeviceState *md_b = MEMORY_DEVICE(b); 26 const MemoryDeviceClass *mdc_a = MEMORY_DEVICE_GET_CLASS(a); 27 const MemoryDeviceClass *mdc_b = MEMORY_DEVICE_GET_CLASS(b); 28 const uint64_t addr_a = mdc_a->get_addr(md_a); 29 const uint64_t addr_b = mdc_b->get_addr(md_b); 30 31 if (addr_a > addr_b) { 32 return 1; 33 } else if (addr_a < addr_b) { 34 return -1; 35 } 36 return 0; 37 } 38 39 static int memory_device_build_list(Object *obj, void *opaque) 40 { 41 GSList **list = opaque; 42 43 if (object_dynamic_cast(obj, TYPE_MEMORY_DEVICE)) { 44 DeviceState *dev = DEVICE(obj); 45 if (dev->realized) { /* only realized memory devices matter */ 46 *list = g_slist_insert_sorted(*list, dev, memory_device_addr_sort); 47 } 48 } 49 50 object_child_foreach(obj, memory_device_build_list, opaque); 51 return 0; 52 } 53 54 static int memory_device_used_region_size(Object *obj, void *opaque) 55 { 56 uint64_t *size = opaque; 57 58 if (object_dynamic_cast(obj, TYPE_MEMORY_DEVICE)) { 59 const DeviceState *dev = DEVICE(obj); 60 const MemoryDeviceState *md = MEMORY_DEVICE(obj); 61 62 if (dev->realized) { 63 *size += memory_device_get_region_size(md, &error_abort); 64 } 65 } 66 67 object_child_foreach(obj, memory_device_used_region_size, opaque); 68 return 0; 69 } 70 71 static void memory_device_check_addable(MachineState *ms, uint64_t size, 72 Error **errp) 73 { 74 uint64_t used_region_size = 0; 75 76 /* we will need a new memory slot for kvm and vhost */ 77 if (kvm_enabled() && !kvm_has_free_slot(ms)) { 78 error_setg(errp, "hypervisor has no free memory slots left"); 79 return; 80 } 81 if (!vhost_has_free_slot()) { 82 error_setg(errp, "a used vhost backend has no free memory slots left"); 83 return; 84 } 85 86 /* will we exceed the total amount of memory specified */ 87 memory_device_used_region_size(OBJECT(ms), &used_region_size); 88 if (used_region_size + size < used_region_size || 89 used_region_size + size > ms->maxram_size - ms->ram_size) { 90 error_setg(errp, "not enough space, currently 0x%" PRIx64 91 " in use of total space for memory devices 0x" RAM_ADDR_FMT, 92 used_region_size, ms->maxram_size - ms->ram_size); 93 return; 94 } 95 96 } 97 98 static uint64_t memory_device_get_free_addr(MachineState *ms, 99 const uint64_t *hint, 100 uint64_t align, uint64_t size, 101 Error **errp) 102 { 103 uint64_t address_space_start, address_space_end; 104 GSList *list = NULL, *item; 105 uint64_t new_addr = 0; 106 107 if (!ms->device_memory) { 108 error_setg(errp, "memory devices (e.g. for memory hotplug) are not " 109 "supported by the machine"); 110 return 0; 111 } 112 113 if (!memory_region_size(&ms->device_memory->mr)) { 114 error_setg(errp, "memory devices (e.g. for memory hotplug) are not " 115 "enabled, please specify the maxmem option"); 116 return 0; 117 } 118 address_space_start = ms->device_memory->base; 119 address_space_end = address_space_start + 120 memory_region_size(&ms->device_memory->mr); 121 g_assert(address_space_end >= address_space_start); 122 123 /* address_space_start indicates the maximum alignment we expect */ 124 if (!QEMU_IS_ALIGNED(address_space_start, align)) { 125 error_setg(errp, "the alignment (0x%" PRIx64 ") is not supported", 126 align); 127 return 0; 128 } 129 130 memory_device_check_addable(ms, size, errp); 131 if (*errp) { 132 return 0; 133 } 134 135 if (hint && !QEMU_IS_ALIGNED(*hint, align)) { 136 error_setg(errp, "address must be aligned to 0x%" PRIx64 " bytes", 137 align); 138 return 0; 139 } 140 141 if (!QEMU_IS_ALIGNED(size, align)) { 142 error_setg(errp, "backend memory size must be multiple of 0x%" 143 PRIx64, align); 144 return 0; 145 } 146 147 if (hint) { 148 new_addr = *hint; 149 if (new_addr < address_space_start) { 150 error_setg(errp, "can't add memory device [0x%" PRIx64 ":0x%" PRIx64 151 "] before 0x%" PRIx64, new_addr, size, 152 address_space_start); 153 return 0; 154 } else if ((new_addr + size) > address_space_end) { 155 error_setg(errp, "can't add memory device [0x%" PRIx64 ":0x%" PRIx64 156 "] beyond 0x%" PRIx64, new_addr, size, 157 address_space_end); 158 return 0; 159 } 160 } else { 161 new_addr = address_space_start; 162 } 163 164 /* find address range that will fit new memory device */ 165 object_child_foreach(OBJECT(ms), memory_device_build_list, &list); 166 for (item = list; item; item = g_slist_next(item)) { 167 const MemoryDeviceState *md = item->data; 168 const MemoryDeviceClass *mdc = MEMORY_DEVICE_GET_CLASS(OBJECT(md)); 169 uint64_t md_size, md_addr; 170 171 md_addr = mdc->get_addr(md); 172 md_size = memory_device_get_region_size(md, &error_abort); 173 174 if (ranges_overlap(md_addr, md_size, new_addr, size)) { 175 if (hint) { 176 const DeviceState *d = DEVICE(md); 177 error_setg(errp, "address range conflicts with memory device" 178 " id='%s'", d->id ? d->id : "(unnamed)"); 179 goto out; 180 } 181 new_addr = QEMU_ALIGN_UP(md_addr + md_size, align); 182 } 183 } 184 185 if (new_addr + size > address_space_end) { 186 error_setg(errp, "could not find position in guest address space for " 187 "memory device - memory fragmented due to alignments"); 188 goto out; 189 } 190 out: 191 g_slist_free(list); 192 return new_addr; 193 } 194 195 MemoryDeviceInfoList *qmp_memory_device_list(void) 196 { 197 GSList *devices = NULL, *item; 198 MemoryDeviceInfoList *list = NULL, *prev = NULL; 199 200 object_child_foreach(qdev_get_machine(), memory_device_build_list, 201 &devices); 202 203 for (item = devices; item; item = g_slist_next(item)) { 204 const MemoryDeviceState *md = MEMORY_DEVICE(item->data); 205 const MemoryDeviceClass *mdc = MEMORY_DEVICE_GET_CLASS(item->data); 206 MemoryDeviceInfoList *elem = g_new0(MemoryDeviceInfoList, 1); 207 MemoryDeviceInfo *info = g_new0(MemoryDeviceInfo, 1); 208 209 mdc->fill_device_info(md, info); 210 211 elem->value = info; 212 elem->next = NULL; 213 if (prev) { 214 prev->next = elem; 215 } else { 216 list = elem; 217 } 218 prev = elem; 219 } 220 221 g_slist_free(devices); 222 223 return list; 224 } 225 226 static int memory_device_plugged_size(Object *obj, void *opaque) 227 { 228 uint64_t *size = opaque; 229 230 if (object_dynamic_cast(obj, TYPE_MEMORY_DEVICE)) { 231 const DeviceState *dev = DEVICE(obj); 232 const MemoryDeviceState *md = MEMORY_DEVICE(obj); 233 const MemoryDeviceClass *mdc = MEMORY_DEVICE_GET_CLASS(obj); 234 235 if (dev->realized) { 236 *size += mdc->get_plugged_size(md, &error_abort); 237 } 238 } 239 240 object_child_foreach(obj, memory_device_plugged_size, opaque); 241 return 0; 242 } 243 244 uint64_t get_plugged_memory_size(void) 245 { 246 uint64_t size = 0; 247 248 memory_device_plugged_size(qdev_get_machine(), &size); 249 250 return size; 251 } 252 253 void memory_device_pre_plug(MemoryDeviceState *md, MachineState *ms, 254 const uint64_t *legacy_align, Error **errp) 255 { 256 const MemoryDeviceClass *mdc = MEMORY_DEVICE_GET_CLASS(md); 257 Error *local_err = NULL; 258 uint64_t addr, align; 259 MemoryRegion *mr; 260 261 mr = mdc->get_memory_region(md, &local_err); 262 if (local_err) { 263 goto out; 264 } 265 266 align = legacy_align ? *legacy_align : memory_region_get_alignment(mr); 267 addr = mdc->get_addr(md); 268 addr = memory_device_get_free_addr(ms, !addr ? NULL : &addr, align, 269 memory_region_size(mr), &local_err); 270 if (local_err) { 271 goto out; 272 } 273 mdc->set_addr(md, addr, &local_err); 274 if (!local_err) { 275 trace_memory_device_pre_plug(DEVICE(md)->id ? DEVICE(md)->id : "", 276 addr); 277 } 278 out: 279 error_propagate(errp, local_err); 280 } 281 282 void memory_device_plug(MemoryDeviceState *md, MachineState *ms) 283 { 284 const MemoryDeviceClass *mdc = MEMORY_DEVICE_GET_CLASS(md); 285 const uint64_t addr = mdc->get_addr(md); 286 MemoryRegion *mr; 287 288 /* 289 * We expect that a previous call to memory_device_pre_plug() succeeded, so 290 * it can't fail at this point. 291 */ 292 mr = mdc->get_memory_region(md, &error_abort); 293 g_assert(ms->device_memory); 294 295 memory_region_add_subregion(&ms->device_memory->mr, 296 addr - ms->device_memory->base, mr); 297 trace_memory_device_plug(DEVICE(md)->id ? DEVICE(md)->id : "", addr); 298 } 299 300 void memory_device_unplug(MemoryDeviceState *md, MachineState *ms) 301 { 302 const MemoryDeviceClass *mdc = MEMORY_DEVICE_GET_CLASS(md); 303 MemoryRegion *mr; 304 305 /* 306 * We expect that a previous call to memory_device_pre_plug() succeeded, so 307 * it can't fail at this point. 308 */ 309 mr = mdc->get_memory_region(md, &error_abort); 310 g_assert(ms->device_memory); 311 312 memory_region_del_subregion(&ms->device_memory->mr, mr); 313 trace_memory_device_unplug(DEVICE(md)->id ? DEVICE(md)->id : "", 314 mdc->get_addr(md)); 315 } 316 317 uint64_t memory_device_get_region_size(const MemoryDeviceState *md, 318 Error **errp) 319 { 320 const MemoryDeviceClass *mdc = MEMORY_DEVICE_GET_CLASS(md); 321 MemoryRegion *mr; 322 323 /* dropping const here is fine as we don't touch the memory region */ 324 mr = mdc->get_memory_region((MemoryDeviceState *)md, errp); 325 if (!mr) { 326 return 0; 327 } 328 329 return memory_region_size(mr); 330 } 331 332 static const TypeInfo memory_device_info = { 333 .name = TYPE_MEMORY_DEVICE, 334 .parent = TYPE_INTERFACE, 335 .class_size = sizeof(MemoryDeviceClass), 336 }; 337 338 static void memory_device_register_types(void) 339 { 340 type_register_static(&memory_device_info); 341 } 342 343 type_init(memory_device_register_types) 344