1 /* 2 * Virtio MMIO bindings 3 * 4 * Copyright (c) 2011 Linaro Limited 5 * 6 * Author: 7 * Peter Maydell <peter.maydell@linaro.org> 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License; either version 2 11 * of the License, or (at your option) any later version. 12 * 13 * This program is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * GNU General Public License for more details. 17 * 18 * You should have received a copy of the GNU General Public License along 19 * with this program; if not, see <http://www.gnu.org/licenses/>. 20 */ 21 22 #include "qemu/osdep.h" 23 #include "standard-headers/linux/virtio_mmio.h" 24 #include "hw/irq.h" 25 #include "hw/sysbus.h" 26 #include "hw/virtio/virtio.h" 27 #include "migration/qemu-file-types.h" 28 #include "qemu/host-utils.h" 29 #include "qemu/module.h" 30 #include "sysemu/kvm.h" 31 #include "hw/virtio/virtio-bus.h" 32 #include "qemu/error-report.h" 33 #include "qemu/log.h" 34 #include "trace.h" 35 36 /* QOM macros */ 37 /* virtio-mmio-bus */ 38 #define TYPE_VIRTIO_MMIO_BUS "virtio-mmio-bus" 39 #define VIRTIO_MMIO_BUS(obj) \ 40 OBJECT_CHECK(VirtioBusState, (obj), TYPE_VIRTIO_MMIO_BUS) 41 #define VIRTIO_MMIO_BUS_GET_CLASS(obj) \ 42 OBJECT_GET_CLASS(VirtioBusClass, (obj), TYPE_VIRTIO_MMIO_BUS) 43 #define VIRTIO_MMIO_BUS_CLASS(klass) \ 44 OBJECT_CLASS_CHECK(VirtioBusClass, (klass), TYPE_VIRTIO_MMIO_BUS) 45 46 /* virtio-mmio */ 47 #define TYPE_VIRTIO_MMIO "virtio-mmio" 48 #define VIRTIO_MMIO(obj) \ 49 OBJECT_CHECK(VirtIOMMIOProxy, (obj), TYPE_VIRTIO_MMIO) 50 51 #define VIRT_MAGIC 0x74726976 /* 'virt' */ 52 #define VIRT_VERSION 1 53 #define VIRT_VENDOR 0x554D4551 /* 'QEMU' */ 54 55 typedef struct { 56 /* Generic */ 57 SysBusDevice parent_obj; 58 MemoryRegion iomem; 59 qemu_irq irq; 60 /* Guest accessible state needing migration and reset */ 61 uint32_t host_features_sel; 62 uint32_t guest_features_sel; 63 uint32_t guest_page_shift; 64 /* virtio-bus */ 65 VirtioBusState bus; 66 bool format_transport_address; 67 } VirtIOMMIOProxy; 68 69 static bool virtio_mmio_ioeventfd_enabled(DeviceState *d) 70 { 71 return kvm_eventfds_enabled(); 72 } 73 74 static int virtio_mmio_ioeventfd_assign(DeviceState *d, 75 EventNotifier *notifier, 76 int n, bool assign) 77 { 78 VirtIOMMIOProxy *proxy = VIRTIO_MMIO(d); 79 80 if (assign) { 81 memory_region_add_eventfd(&proxy->iomem, VIRTIO_MMIO_QUEUE_NOTIFY, 4, 82 true, n, notifier); 83 } else { 84 memory_region_del_eventfd(&proxy->iomem, VIRTIO_MMIO_QUEUE_NOTIFY, 4, 85 true, n, notifier); 86 } 87 return 0; 88 } 89 90 static void virtio_mmio_start_ioeventfd(VirtIOMMIOProxy *proxy) 91 { 92 virtio_bus_start_ioeventfd(&proxy->bus); 93 } 94 95 static void virtio_mmio_stop_ioeventfd(VirtIOMMIOProxy *proxy) 96 { 97 virtio_bus_stop_ioeventfd(&proxy->bus); 98 } 99 100 static uint64_t virtio_mmio_read(void *opaque, hwaddr offset, unsigned size) 101 { 102 VirtIOMMIOProxy *proxy = (VirtIOMMIOProxy *)opaque; 103 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 104 105 trace_virtio_mmio_read(offset); 106 107 if (!vdev) { 108 /* If no backend is present, we treat most registers as 109 * read-as-zero, except for the magic number, version and 110 * vendor ID. This is not strictly sanctioned by the virtio 111 * spec, but it allows us to provide transports with no backend 112 * plugged in which don't confuse Linux's virtio code: the 113 * probe won't complain about the bad magic number, but the 114 * device ID of zero means no backend will claim it. 115 */ 116 switch (offset) { 117 case VIRTIO_MMIO_MAGIC_VALUE: 118 return VIRT_MAGIC; 119 case VIRTIO_MMIO_VERSION: 120 return VIRT_VERSION; 121 case VIRTIO_MMIO_VENDOR_ID: 122 return VIRT_VENDOR; 123 default: 124 return 0; 125 } 126 } 127 128 if (offset >= VIRTIO_MMIO_CONFIG) { 129 offset -= VIRTIO_MMIO_CONFIG; 130 switch (size) { 131 case 1: 132 return virtio_config_readb(vdev, offset); 133 case 2: 134 return virtio_config_readw(vdev, offset); 135 case 4: 136 return virtio_config_readl(vdev, offset); 137 default: 138 abort(); 139 } 140 } 141 if (size != 4) { 142 qemu_log_mask(LOG_GUEST_ERROR, 143 "%s: wrong size access to register!\n", 144 __func__); 145 return 0; 146 } 147 switch (offset) { 148 case VIRTIO_MMIO_MAGIC_VALUE: 149 return VIRT_MAGIC; 150 case VIRTIO_MMIO_VERSION: 151 return VIRT_VERSION; 152 case VIRTIO_MMIO_DEVICE_ID: 153 return vdev->device_id; 154 case VIRTIO_MMIO_VENDOR_ID: 155 return VIRT_VENDOR; 156 case VIRTIO_MMIO_DEVICE_FEATURES: 157 if (proxy->host_features_sel) { 158 return 0; 159 } 160 return vdev->host_features; 161 case VIRTIO_MMIO_QUEUE_NUM_MAX: 162 if (!virtio_queue_get_num(vdev, vdev->queue_sel)) { 163 return 0; 164 } 165 return VIRTQUEUE_MAX_SIZE; 166 case VIRTIO_MMIO_QUEUE_PFN: 167 return virtio_queue_get_addr(vdev, vdev->queue_sel) 168 >> proxy->guest_page_shift; 169 case VIRTIO_MMIO_INTERRUPT_STATUS: 170 return atomic_read(&vdev->isr); 171 case VIRTIO_MMIO_STATUS: 172 return vdev->status; 173 case VIRTIO_MMIO_DEVICE_FEATURES_SEL: 174 case VIRTIO_MMIO_DRIVER_FEATURES: 175 case VIRTIO_MMIO_DRIVER_FEATURES_SEL: 176 case VIRTIO_MMIO_GUEST_PAGE_SIZE: 177 case VIRTIO_MMIO_QUEUE_SEL: 178 case VIRTIO_MMIO_QUEUE_NUM: 179 case VIRTIO_MMIO_QUEUE_ALIGN: 180 case VIRTIO_MMIO_QUEUE_NOTIFY: 181 case VIRTIO_MMIO_INTERRUPT_ACK: 182 qemu_log_mask(LOG_GUEST_ERROR, 183 "%s: read of write-only register\n", 184 __func__); 185 return 0; 186 default: 187 qemu_log_mask(LOG_GUEST_ERROR, "%s: bad register offset\n", __func__); 188 return 0; 189 } 190 return 0; 191 } 192 193 static void virtio_mmio_write(void *opaque, hwaddr offset, uint64_t value, 194 unsigned size) 195 { 196 VirtIOMMIOProxy *proxy = (VirtIOMMIOProxy *)opaque; 197 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 198 199 trace_virtio_mmio_write_offset(offset, value); 200 201 if (!vdev) { 202 /* If no backend is present, we just make all registers 203 * write-ignored. This allows us to provide transports with 204 * no backend plugged in. 205 */ 206 return; 207 } 208 209 if (offset >= VIRTIO_MMIO_CONFIG) { 210 offset -= VIRTIO_MMIO_CONFIG; 211 switch (size) { 212 case 1: 213 virtio_config_writeb(vdev, offset, value); 214 break; 215 case 2: 216 virtio_config_writew(vdev, offset, value); 217 break; 218 case 4: 219 virtio_config_writel(vdev, offset, value); 220 break; 221 default: 222 abort(); 223 } 224 return; 225 } 226 if (size != 4) { 227 qemu_log_mask(LOG_GUEST_ERROR, 228 "%s: wrong size access to register!\n", 229 __func__); 230 return; 231 } 232 switch (offset) { 233 case VIRTIO_MMIO_DEVICE_FEATURES_SEL: 234 proxy->host_features_sel = value; 235 break; 236 case VIRTIO_MMIO_DRIVER_FEATURES: 237 if (!proxy->guest_features_sel) { 238 virtio_set_features(vdev, value); 239 } 240 break; 241 case VIRTIO_MMIO_DRIVER_FEATURES_SEL: 242 proxy->guest_features_sel = value; 243 break; 244 case VIRTIO_MMIO_GUEST_PAGE_SIZE: 245 proxy->guest_page_shift = ctz32(value); 246 if (proxy->guest_page_shift > 31) { 247 proxy->guest_page_shift = 0; 248 } 249 trace_virtio_mmio_guest_page(value, proxy->guest_page_shift); 250 break; 251 case VIRTIO_MMIO_QUEUE_SEL: 252 if (value < VIRTIO_QUEUE_MAX) { 253 vdev->queue_sel = value; 254 } 255 break; 256 case VIRTIO_MMIO_QUEUE_NUM: 257 trace_virtio_mmio_queue_write(value, VIRTQUEUE_MAX_SIZE); 258 virtio_queue_set_num(vdev, vdev->queue_sel, value); 259 /* Note: only call this function for legacy devices */ 260 virtio_queue_update_rings(vdev, vdev->queue_sel); 261 break; 262 case VIRTIO_MMIO_QUEUE_ALIGN: 263 /* Note: this is only valid for legacy devices */ 264 virtio_queue_set_align(vdev, vdev->queue_sel, value); 265 break; 266 case VIRTIO_MMIO_QUEUE_PFN: 267 if (value == 0) { 268 virtio_reset(vdev); 269 } else { 270 virtio_queue_set_addr(vdev, vdev->queue_sel, 271 value << proxy->guest_page_shift); 272 } 273 break; 274 case VIRTIO_MMIO_QUEUE_NOTIFY: 275 if (value < VIRTIO_QUEUE_MAX) { 276 virtio_queue_notify(vdev, value); 277 } 278 break; 279 case VIRTIO_MMIO_INTERRUPT_ACK: 280 atomic_and(&vdev->isr, ~value); 281 virtio_update_irq(vdev); 282 break; 283 case VIRTIO_MMIO_STATUS: 284 if (!(value & VIRTIO_CONFIG_S_DRIVER_OK)) { 285 virtio_mmio_stop_ioeventfd(proxy); 286 } 287 288 virtio_set_status(vdev, value & 0xff); 289 290 if (value & VIRTIO_CONFIG_S_DRIVER_OK) { 291 virtio_mmio_start_ioeventfd(proxy); 292 } 293 294 if (vdev->status == 0) { 295 virtio_reset(vdev); 296 } 297 break; 298 case VIRTIO_MMIO_MAGIC_VALUE: 299 case VIRTIO_MMIO_VERSION: 300 case VIRTIO_MMIO_DEVICE_ID: 301 case VIRTIO_MMIO_VENDOR_ID: 302 case VIRTIO_MMIO_DEVICE_FEATURES: 303 case VIRTIO_MMIO_QUEUE_NUM_MAX: 304 case VIRTIO_MMIO_INTERRUPT_STATUS: 305 qemu_log_mask(LOG_GUEST_ERROR, 306 "%s: write to readonly register\n", 307 __func__); 308 break; 309 310 default: 311 qemu_log_mask(LOG_GUEST_ERROR, "%s: bad register offset\n", __func__); 312 } 313 } 314 315 static const MemoryRegionOps virtio_mem_ops = { 316 .read = virtio_mmio_read, 317 .write = virtio_mmio_write, 318 .endianness = DEVICE_NATIVE_ENDIAN, 319 }; 320 321 static void virtio_mmio_update_irq(DeviceState *opaque, uint16_t vector) 322 { 323 VirtIOMMIOProxy *proxy = VIRTIO_MMIO(opaque); 324 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 325 int level; 326 327 if (!vdev) { 328 return; 329 } 330 level = (atomic_read(&vdev->isr) != 0); 331 trace_virtio_mmio_setting_irq(level); 332 qemu_set_irq(proxy->irq, level); 333 } 334 335 static int virtio_mmio_load_config(DeviceState *opaque, QEMUFile *f) 336 { 337 VirtIOMMIOProxy *proxy = VIRTIO_MMIO(opaque); 338 339 proxy->host_features_sel = qemu_get_be32(f); 340 proxy->guest_features_sel = qemu_get_be32(f); 341 proxy->guest_page_shift = qemu_get_be32(f); 342 return 0; 343 } 344 345 static void virtio_mmio_save_config(DeviceState *opaque, QEMUFile *f) 346 { 347 VirtIOMMIOProxy *proxy = VIRTIO_MMIO(opaque); 348 349 qemu_put_be32(f, proxy->host_features_sel); 350 qemu_put_be32(f, proxy->guest_features_sel); 351 qemu_put_be32(f, proxy->guest_page_shift); 352 } 353 354 static void virtio_mmio_reset(DeviceState *d) 355 { 356 VirtIOMMIOProxy *proxy = VIRTIO_MMIO(d); 357 358 virtio_mmio_stop_ioeventfd(proxy); 359 virtio_bus_reset(&proxy->bus); 360 proxy->host_features_sel = 0; 361 proxy->guest_features_sel = 0; 362 proxy->guest_page_shift = 0; 363 } 364 365 static int virtio_mmio_set_guest_notifier(DeviceState *d, int n, bool assign, 366 bool with_irqfd) 367 { 368 VirtIOMMIOProxy *proxy = VIRTIO_MMIO(d); 369 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 370 VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev); 371 VirtQueue *vq = virtio_get_queue(vdev, n); 372 EventNotifier *notifier = virtio_queue_get_guest_notifier(vq); 373 374 if (assign) { 375 int r = event_notifier_init(notifier, 0); 376 if (r < 0) { 377 return r; 378 } 379 virtio_queue_set_guest_notifier_fd_handler(vq, true, with_irqfd); 380 } else { 381 virtio_queue_set_guest_notifier_fd_handler(vq, false, with_irqfd); 382 event_notifier_cleanup(notifier); 383 } 384 385 if (vdc->guest_notifier_mask && vdev->use_guest_notifier_mask) { 386 vdc->guest_notifier_mask(vdev, n, !assign); 387 } 388 389 return 0; 390 } 391 392 static int virtio_mmio_set_guest_notifiers(DeviceState *d, int nvqs, 393 bool assign) 394 { 395 VirtIOMMIOProxy *proxy = VIRTIO_MMIO(d); 396 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 397 /* TODO: need to check if kvm-arm supports irqfd */ 398 bool with_irqfd = false; 399 int r, n; 400 401 nvqs = MIN(nvqs, VIRTIO_QUEUE_MAX); 402 403 for (n = 0; n < nvqs; n++) { 404 if (!virtio_queue_get_num(vdev, n)) { 405 break; 406 } 407 408 r = virtio_mmio_set_guest_notifier(d, n, assign, with_irqfd); 409 if (r < 0) { 410 goto assign_error; 411 } 412 } 413 414 return 0; 415 416 assign_error: 417 /* We get here on assignment failure. Recover by undoing for VQs 0 .. n. */ 418 assert(assign); 419 while (--n >= 0) { 420 virtio_mmio_set_guest_notifier(d, n, !assign, false); 421 } 422 return r; 423 } 424 425 /* virtio-mmio device */ 426 427 static Property virtio_mmio_properties[] = { 428 DEFINE_PROP_BOOL("format_transport_address", VirtIOMMIOProxy, 429 format_transport_address, true), 430 DEFINE_PROP_END_OF_LIST(), 431 }; 432 433 static void virtio_mmio_realizefn(DeviceState *d, Error **errp) 434 { 435 VirtIOMMIOProxy *proxy = VIRTIO_MMIO(d); 436 SysBusDevice *sbd = SYS_BUS_DEVICE(d); 437 438 qbus_create_inplace(&proxy->bus, sizeof(proxy->bus), TYPE_VIRTIO_MMIO_BUS, 439 d, NULL); 440 sysbus_init_irq(sbd, &proxy->irq); 441 memory_region_init_io(&proxy->iomem, OBJECT(d), &virtio_mem_ops, proxy, 442 TYPE_VIRTIO_MMIO, 0x200); 443 sysbus_init_mmio(sbd, &proxy->iomem); 444 } 445 446 static void virtio_mmio_class_init(ObjectClass *klass, void *data) 447 { 448 DeviceClass *dc = DEVICE_CLASS(klass); 449 450 dc->realize = virtio_mmio_realizefn; 451 dc->reset = virtio_mmio_reset; 452 set_bit(DEVICE_CATEGORY_MISC, dc->categories); 453 dc->props = virtio_mmio_properties; 454 } 455 456 static const TypeInfo virtio_mmio_info = { 457 .name = TYPE_VIRTIO_MMIO, 458 .parent = TYPE_SYS_BUS_DEVICE, 459 .instance_size = sizeof(VirtIOMMIOProxy), 460 .class_init = virtio_mmio_class_init, 461 }; 462 463 /* virtio-mmio-bus. */ 464 465 static char *virtio_mmio_bus_get_dev_path(DeviceState *dev) 466 { 467 BusState *virtio_mmio_bus; 468 VirtIOMMIOProxy *virtio_mmio_proxy; 469 char *proxy_path; 470 SysBusDevice *proxy_sbd; 471 char *path; 472 473 virtio_mmio_bus = qdev_get_parent_bus(dev); 474 virtio_mmio_proxy = VIRTIO_MMIO(virtio_mmio_bus->parent); 475 proxy_path = qdev_get_dev_path(DEVICE(virtio_mmio_proxy)); 476 477 /* 478 * If @format_transport_address is false, then we just perform the same as 479 * virtio_bus_get_dev_path(): we delegate the address formatting for the 480 * device on the virtio-mmio bus to the bus that the virtio-mmio proxy 481 * (i.e., the device that implements the virtio-mmio bus) resides on. In 482 * this case the base address of the virtio-mmio transport will be 483 * invisible. 484 */ 485 if (!virtio_mmio_proxy->format_transport_address) { 486 return proxy_path; 487 } 488 489 /* Otherwise, we append the base address of the transport. */ 490 proxy_sbd = SYS_BUS_DEVICE(virtio_mmio_proxy); 491 assert(proxy_sbd->num_mmio == 1); 492 assert(proxy_sbd->mmio[0].memory == &virtio_mmio_proxy->iomem); 493 494 if (proxy_path) { 495 path = g_strdup_printf("%s/virtio-mmio@" TARGET_FMT_plx, proxy_path, 496 proxy_sbd->mmio[0].addr); 497 } else { 498 path = g_strdup_printf("virtio-mmio@" TARGET_FMT_plx, 499 proxy_sbd->mmio[0].addr); 500 } 501 g_free(proxy_path); 502 return path; 503 } 504 505 static void virtio_mmio_bus_class_init(ObjectClass *klass, void *data) 506 { 507 BusClass *bus_class = BUS_CLASS(klass); 508 VirtioBusClass *k = VIRTIO_BUS_CLASS(klass); 509 510 k->notify = virtio_mmio_update_irq; 511 k->save_config = virtio_mmio_save_config; 512 k->load_config = virtio_mmio_load_config; 513 k->set_guest_notifiers = virtio_mmio_set_guest_notifiers; 514 k->ioeventfd_enabled = virtio_mmio_ioeventfd_enabled; 515 k->ioeventfd_assign = virtio_mmio_ioeventfd_assign; 516 k->has_variable_vring_alignment = true; 517 bus_class->max_dev = 1; 518 bus_class->get_dev_path = virtio_mmio_bus_get_dev_path; 519 } 520 521 static const TypeInfo virtio_mmio_bus_info = { 522 .name = TYPE_VIRTIO_MMIO_BUS, 523 .parent = TYPE_VIRTIO_BUS, 524 .instance_size = sizeof(VirtioBusState), 525 .class_init = virtio_mmio_bus_class_init, 526 }; 527 528 static void virtio_mmio_register_types(void) 529 { 530 type_register_static(&virtio_mmio_bus_info); 531 type_register_static(&virtio_mmio_info); 532 } 533 534 type_init(virtio_mmio_register_types) 535