1 /* 2 * Virtio PCI Bindings 3 * 4 * Copyright IBM, Corp. 2007 5 * Copyright (c) 2009 CodeSourcery 6 * 7 * Authors: 8 * Anthony Liguori <aliguori@us.ibm.com> 9 * Paul Brook <paul@codesourcery.com> 10 * 11 * This work is licensed under the terms of the GNU GPL, version 2. See 12 * the COPYING file in the top-level directory. 13 * 14 * Contributions after 2012-01-13 are licensed under the terms of the 15 * GNU GPL, version 2 or (at your option) any later version. 16 */ 17 18 #include "qemu/osdep.h" 19 20 #include "exec/memop.h" 21 #include "standard-headers/linux/virtio_pci.h" 22 #include "standard-headers/linux/virtio_ids.h" 23 #include "hw/boards.h" 24 #include "hw/virtio/virtio.h" 25 #include "migration/qemu-file-types.h" 26 #include "hw/pci/pci.h" 27 #include "hw/pci/pci_bus.h" 28 #include "hw/qdev-properties.h" 29 #include "qapi/error.h" 30 #include "qemu/error-report.h" 31 #include "qemu/log.h" 32 #include "qemu/module.h" 33 #include "hw/pci/msi.h" 34 #include "hw/pci/msix.h" 35 #include "hw/loader.h" 36 #include "sysemu/kvm.h" 37 #include "hw/virtio/virtio-pci.h" 38 #include "qemu/range.h" 39 #include "hw/virtio/virtio-bus.h" 40 #include "qapi/visitor.h" 41 #include "sysemu/replay.h" 42 #include "trace.h" 43 44 #define VIRTIO_PCI_REGION_SIZE(dev) VIRTIO_PCI_CONFIG_OFF(msix_present(dev)) 45 46 #undef VIRTIO_PCI_CONFIG 47 48 /* The remaining space is defined by each driver as the per-driver 49 * configuration space */ 50 #define VIRTIO_PCI_CONFIG_SIZE(dev) VIRTIO_PCI_CONFIG_OFF(msix_enabled(dev)) 51 52 static void virtio_pci_bus_new(VirtioBusState *bus, size_t bus_size, 53 VirtIOPCIProxy *dev); 54 static void virtio_pci_reset(DeviceState *qdev); 55 56 /* virtio device */ 57 /* DeviceState to VirtIOPCIProxy. For use off data-path. TODO: use QOM. */ 58 static inline VirtIOPCIProxy *to_virtio_pci_proxy(DeviceState *d) 59 { 60 return container_of(d, VirtIOPCIProxy, pci_dev.qdev); 61 } 62 63 /* DeviceState to VirtIOPCIProxy. Note: used on datapath, 64 * be careful and test performance if you change this. 65 */ 66 static inline VirtIOPCIProxy *to_virtio_pci_proxy_fast(DeviceState *d) 67 { 68 return container_of(d, VirtIOPCIProxy, pci_dev.qdev); 69 } 70 71 static void virtio_pci_notify(DeviceState *d, uint16_t vector) 72 { 73 VirtIOPCIProxy *proxy = to_virtio_pci_proxy_fast(d); 74 75 if (msix_enabled(&proxy->pci_dev)) { 76 if (vector != VIRTIO_NO_VECTOR) { 77 msix_notify(&proxy->pci_dev, vector); 78 } 79 } else { 80 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 81 pci_set_irq(&proxy->pci_dev, qatomic_read(&vdev->isr) & 1); 82 } 83 } 84 85 static void virtio_pci_save_config(DeviceState *d, QEMUFile *f) 86 { 87 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d); 88 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 89 90 pci_device_save(&proxy->pci_dev, f); 91 msix_save(&proxy->pci_dev, f); 92 if (msix_present(&proxy->pci_dev)) 93 qemu_put_be16(f, vdev->config_vector); 94 } 95 96 static const VMStateDescription vmstate_virtio_pci_modern_queue_state = { 97 .name = "virtio_pci/modern_queue_state", 98 .version_id = 1, 99 .minimum_version_id = 1, 100 .fields = (VMStateField[]) { 101 VMSTATE_UINT16(num, VirtIOPCIQueue), 102 VMSTATE_UNUSED(1), /* enabled was stored as be16 */ 103 VMSTATE_BOOL(enabled, VirtIOPCIQueue), 104 VMSTATE_UINT32_ARRAY(desc, VirtIOPCIQueue, 2), 105 VMSTATE_UINT32_ARRAY(avail, VirtIOPCIQueue, 2), 106 VMSTATE_UINT32_ARRAY(used, VirtIOPCIQueue, 2), 107 VMSTATE_END_OF_LIST() 108 } 109 }; 110 111 static bool virtio_pci_modern_state_needed(void *opaque) 112 { 113 VirtIOPCIProxy *proxy = opaque; 114 115 return virtio_pci_modern(proxy); 116 } 117 118 static const VMStateDescription vmstate_virtio_pci_modern_state_sub = { 119 .name = "virtio_pci/modern_state", 120 .version_id = 1, 121 .minimum_version_id = 1, 122 .needed = &virtio_pci_modern_state_needed, 123 .fields = (VMStateField[]) { 124 VMSTATE_UINT32(dfselect, VirtIOPCIProxy), 125 VMSTATE_UINT32(gfselect, VirtIOPCIProxy), 126 VMSTATE_UINT32_ARRAY(guest_features, VirtIOPCIProxy, 2), 127 VMSTATE_STRUCT_ARRAY(vqs, VirtIOPCIProxy, VIRTIO_QUEUE_MAX, 0, 128 vmstate_virtio_pci_modern_queue_state, 129 VirtIOPCIQueue), 130 VMSTATE_END_OF_LIST() 131 } 132 }; 133 134 static const VMStateDescription vmstate_virtio_pci = { 135 .name = "virtio_pci", 136 .version_id = 1, 137 .minimum_version_id = 1, 138 .fields = (VMStateField[]) { 139 VMSTATE_END_OF_LIST() 140 }, 141 .subsections = (const VMStateDescription*[]) { 142 &vmstate_virtio_pci_modern_state_sub, 143 NULL 144 } 145 }; 146 147 static bool virtio_pci_has_extra_state(DeviceState *d) 148 { 149 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d); 150 151 return proxy->flags & VIRTIO_PCI_FLAG_MIGRATE_EXTRA; 152 } 153 154 static void virtio_pci_save_extra_state(DeviceState *d, QEMUFile *f) 155 { 156 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d); 157 158 vmstate_save_state(f, &vmstate_virtio_pci, proxy, NULL); 159 } 160 161 static int virtio_pci_load_extra_state(DeviceState *d, QEMUFile *f) 162 { 163 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d); 164 165 return vmstate_load_state(f, &vmstate_virtio_pci, proxy, 1); 166 } 167 168 static void virtio_pci_save_queue(DeviceState *d, int n, QEMUFile *f) 169 { 170 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d); 171 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 172 173 if (msix_present(&proxy->pci_dev)) 174 qemu_put_be16(f, virtio_queue_vector(vdev, n)); 175 } 176 177 static int virtio_pci_load_config(DeviceState *d, QEMUFile *f) 178 { 179 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d); 180 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 181 uint16_t vector; 182 183 int ret; 184 ret = pci_device_load(&proxy->pci_dev, f); 185 if (ret) { 186 return ret; 187 } 188 msix_unuse_all_vectors(&proxy->pci_dev); 189 msix_load(&proxy->pci_dev, f); 190 if (msix_present(&proxy->pci_dev)) { 191 qemu_get_be16s(f, &vector); 192 193 if (vector != VIRTIO_NO_VECTOR && vector >= proxy->nvectors) { 194 return -EINVAL; 195 } 196 } else { 197 vector = VIRTIO_NO_VECTOR; 198 } 199 vdev->config_vector = vector; 200 if (vector != VIRTIO_NO_VECTOR) { 201 msix_vector_use(&proxy->pci_dev, vector); 202 } 203 return 0; 204 } 205 206 static int virtio_pci_load_queue(DeviceState *d, int n, QEMUFile *f) 207 { 208 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d); 209 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 210 211 uint16_t vector; 212 if (msix_present(&proxy->pci_dev)) { 213 qemu_get_be16s(f, &vector); 214 if (vector != VIRTIO_NO_VECTOR && vector >= proxy->nvectors) { 215 return -EINVAL; 216 } 217 } else { 218 vector = VIRTIO_NO_VECTOR; 219 } 220 virtio_queue_set_vector(vdev, n, vector); 221 if (vector != VIRTIO_NO_VECTOR) { 222 msix_vector_use(&proxy->pci_dev, vector); 223 } 224 225 return 0; 226 } 227 228 typedef struct VirtIOPCIIDInfo { 229 /* virtio id */ 230 uint16_t vdev_id; 231 /* pci device id for the transitional device */ 232 uint16_t trans_devid; 233 uint16_t class_id; 234 } VirtIOPCIIDInfo; 235 236 static const VirtIOPCIIDInfo virtio_pci_id_info[] = { 237 { 238 .vdev_id = VIRTIO_ID_CRYPTO, 239 .class_id = PCI_CLASS_OTHERS, 240 }, { 241 .vdev_id = VIRTIO_ID_FS, 242 .class_id = PCI_CLASS_STORAGE_OTHER, 243 }, { 244 .vdev_id = VIRTIO_ID_NET, 245 .trans_devid = PCI_DEVICE_ID_VIRTIO_NET, 246 .class_id = PCI_CLASS_NETWORK_ETHERNET, 247 }, { 248 .vdev_id = VIRTIO_ID_BLOCK, 249 .trans_devid = PCI_DEVICE_ID_VIRTIO_BLOCK, 250 .class_id = PCI_CLASS_STORAGE_SCSI, 251 }, { 252 .vdev_id = VIRTIO_ID_CONSOLE, 253 .trans_devid = PCI_DEVICE_ID_VIRTIO_CONSOLE, 254 .class_id = PCI_CLASS_COMMUNICATION_OTHER, 255 }, { 256 .vdev_id = VIRTIO_ID_SCSI, 257 .trans_devid = PCI_DEVICE_ID_VIRTIO_SCSI, 258 .class_id = PCI_CLASS_STORAGE_SCSI 259 }, { 260 .vdev_id = VIRTIO_ID_9P, 261 .trans_devid = PCI_DEVICE_ID_VIRTIO_9P, 262 .class_id = PCI_BASE_CLASS_NETWORK, 263 }, { 264 .vdev_id = VIRTIO_ID_BALLOON, 265 .trans_devid = PCI_DEVICE_ID_VIRTIO_BALLOON, 266 .class_id = PCI_CLASS_OTHERS, 267 }, { 268 .vdev_id = VIRTIO_ID_RNG, 269 .trans_devid = PCI_DEVICE_ID_VIRTIO_RNG, 270 .class_id = PCI_CLASS_OTHERS, 271 }, 272 }; 273 274 static const VirtIOPCIIDInfo *virtio_pci_get_id_info(uint16_t vdev_id) 275 { 276 const VirtIOPCIIDInfo *info = NULL; 277 int i; 278 279 for (i = 0; i < ARRAY_SIZE(virtio_pci_id_info); i++) { 280 if (virtio_pci_id_info[i].vdev_id == vdev_id) { 281 info = &virtio_pci_id_info[i]; 282 break; 283 } 284 } 285 286 if (!info) { 287 /* The device id is invalid or not added to the id_info yet. */ 288 error_report("Invalid virtio device(id %u)", vdev_id); 289 abort(); 290 } 291 292 return info; 293 } 294 295 /* 296 * Get the Transitional Device ID for the specific device, return 297 * zero if the device is non-transitional. 298 */ 299 uint16_t virtio_pci_get_trans_devid(uint16_t device_id) 300 { 301 return virtio_pci_get_id_info(device_id)->trans_devid; 302 } 303 304 /* 305 * Get the Class ID for the specific device. 306 */ 307 uint16_t virtio_pci_get_class_id(uint16_t device_id) 308 { 309 return virtio_pci_get_id_info(device_id)->class_id; 310 } 311 312 static bool virtio_pci_ioeventfd_enabled(DeviceState *d) 313 { 314 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d); 315 316 return (proxy->flags & VIRTIO_PCI_FLAG_USE_IOEVENTFD) != 0; 317 } 318 319 #define QEMU_VIRTIO_PCI_QUEUE_MEM_MULT 0x1000 320 321 static inline int virtio_pci_queue_mem_mult(struct VirtIOPCIProxy *proxy) 322 { 323 return (proxy->flags & VIRTIO_PCI_FLAG_PAGE_PER_VQ) ? 324 QEMU_VIRTIO_PCI_QUEUE_MEM_MULT : 4; 325 } 326 327 static int virtio_pci_ioeventfd_assign(DeviceState *d, EventNotifier *notifier, 328 int n, bool assign) 329 { 330 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d); 331 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 332 VirtQueue *vq = virtio_get_queue(vdev, n); 333 bool legacy = virtio_pci_legacy(proxy); 334 bool modern = virtio_pci_modern(proxy); 335 bool fast_mmio = kvm_ioeventfd_any_length_enabled(); 336 bool modern_pio = proxy->flags & VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY; 337 MemoryRegion *modern_mr = &proxy->notify.mr; 338 MemoryRegion *modern_notify_mr = &proxy->notify_pio.mr; 339 MemoryRegion *legacy_mr = &proxy->bar; 340 hwaddr modern_addr = virtio_pci_queue_mem_mult(proxy) * 341 virtio_get_queue_index(vq); 342 hwaddr legacy_addr = VIRTIO_PCI_QUEUE_NOTIFY; 343 344 if (assign) { 345 if (modern) { 346 if (fast_mmio) { 347 memory_region_add_eventfd(modern_mr, modern_addr, 0, 348 false, n, notifier); 349 } else { 350 memory_region_add_eventfd(modern_mr, modern_addr, 2, 351 false, n, notifier); 352 } 353 if (modern_pio) { 354 memory_region_add_eventfd(modern_notify_mr, 0, 2, 355 true, n, notifier); 356 } 357 } 358 if (legacy) { 359 memory_region_add_eventfd(legacy_mr, legacy_addr, 2, 360 true, n, notifier); 361 } 362 } else { 363 if (modern) { 364 if (fast_mmio) { 365 memory_region_del_eventfd(modern_mr, modern_addr, 0, 366 false, n, notifier); 367 } else { 368 memory_region_del_eventfd(modern_mr, modern_addr, 2, 369 false, n, notifier); 370 } 371 if (modern_pio) { 372 memory_region_del_eventfd(modern_notify_mr, 0, 2, 373 true, n, notifier); 374 } 375 } 376 if (legacy) { 377 memory_region_del_eventfd(legacy_mr, legacy_addr, 2, 378 true, n, notifier); 379 } 380 } 381 return 0; 382 } 383 384 static void virtio_pci_start_ioeventfd(VirtIOPCIProxy *proxy) 385 { 386 virtio_bus_start_ioeventfd(&proxy->bus); 387 } 388 389 static void virtio_pci_stop_ioeventfd(VirtIOPCIProxy *proxy) 390 { 391 virtio_bus_stop_ioeventfd(&proxy->bus); 392 } 393 394 static void virtio_ioport_write(void *opaque, uint32_t addr, uint32_t val) 395 { 396 VirtIOPCIProxy *proxy = opaque; 397 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 398 uint16_t vector; 399 hwaddr pa; 400 401 switch (addr) { 402 case VIRTIO_PCI_GUEST_FEATURES: 403 /* Guest does not negotiate properly? We have to assume nothing. */ 404 if (val & (1 << VIRTIO_F_BAD_FEATURE)) { 405 val = virtio_bus_get_vdev_bad_features(&proxy->bus); 406 } 407 virtio_set_features(vdev, val); 408 break; 409 case VIRTIO_PCI_QUEUE_PFN: 410 pa = (hwaddr)val << VIRTIO_PCI_QUEUE_ADDR_SHIFT; 411 if (pa == 0) { 412 virtio_pci_reset(DEVICE(proxy)); 413 } 414 else 415 virtio_queue_set_addr(vdev, vdev->queue_sel, pa); 416 break; 417 case VIRTIO_PCI_QUEUE_SEL: 418 if (val < VIRTIO_QUEUE_MAX) 419 vdev->queue_sel = val; 420 break; 421 case VIRTIO_PCI_QUEUE_NOTIFY: 422 if (val < VIRTIO_QUEUE_MAX) { 423 virtio_queue_notify(vdev, val); 424 } 425 break; 426 case VIRTIO_PCI_STATUS: 427 if (!(val & VIRTIO_CONFIG_S_DRIVER_OK)) { 428 virtio_pci_stop_ioeventfd(proxy); 429 } 430 431 virtio_set_status(vdev, val & 0xFF); 432 433 if (val & VIRTIO_CONFIG_S_DRIVER_OK) { 434 virtio_pci_start_ioeventfd(proxy); 435 } 436 437 if (vdev->status == 0) { 438 virtio_pci_reset(DEVICE(proxy)); 439 } 440 441 /* Linux before 2.6.34 drives the device without enabling 442 the PCI device bus master bit. Enable it automatically 443 for the guest. This is a PCI spec violation but so is 444 initiating DMA with bus master bit clear. */ 445 if (val == (VIRTIO_CONFIG_S_ACKNOWLEDGE | VIRTIO_CONFIG_S_DRIVER)) { 446 pci_default_write_config(&proxy->pci_dev, PCI_COMMAND, 447 proxy->pci_dev.config[PCI_COMMAND] | 448 PCI_COMMAND_MASTER, 1); 449 } 450 break; 451 case VIRTIO_MSI_CONFIG_VECTOR: 452 if (vdev->config_vector != VIRTIO_NO_VECTOR) { 453 msix_vector_unuse(&proxy->pci_dev, vdev->config_vector); 454 } 455 /* Make it possible for guest to discover an error took place. */ 456 if (val < proxy->nvectors) { 457 msix_vector_use(&proxy->pci_dev, val); 458 } else { 459 val = VIRTIO_NO_VECTOR; 460 } 461 vdev->config_vector = val; 462 break; 463 case VIRTIO_MSI_QUEUE_VECTOR: 464 vector = virtio_queue_vector(vdev, vdev->queue_sel); 465 if (vector != VIRTIO_NO_VECTOR) { 466 msix_vector_unuse(&proxy->pci_dev, vector); 467 } 468 /* Make it possible for guest to discover an error took place. */ 469 if (val < proxy->nvectors) { 470 msix_vector_use(&proxy->pci_dev, val); 471 } else { 472 val = VIRTIO_NO_VECTOR; 473 } 474 virtio_queue_set_vector(vdev, vdev->queue_sel, val); 475 break; 476 default: 477 qemu_log_mask(LOG_GUEST_ERROR, 478 "%s: unexpected address 0x%x value 0x%x\n", 479 __func__, addr, val); 480 break; 481 } 482 } 483 484 static uint32_t virtio_ioport_read(VirtIOPCIProxy *proxy, uint32_t addr) 485 { 486 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 487 uint32_t ret = 0xFFFFFFFF; 488 489 switch (addr) { 490 case VIRTIO_PCI_HOST_FEATURES: 491 ret = vdev->host_features; 492 break; 493 case VIRTIO_PCI_GUEST_FEATURES: 494 ret = vdev->guest_features; 495 break; 496 case VIRTIO_PCI_QUEUE_PFN: 497 ret = virtio_queue_get_addr(vdev, vdev->queue_sel) 498 >> VIRTIO_PCI_QUEUE_ADDR_SHIFT; 499 break; 500 case VIRTIO_PCI_QUEUE_NUM: 501 ret = virtio_queue_get_num(vdev, vdev->queue_sel); 502 break; 503 case VIRTIO_PCI_QUEUE_SEL: 504 ret = vdev->queue_sel; 505 break; 506 case VIRTIO_PCI_STATUS: 507 ret = vdev->status; 508 break; 509 case VIRTIO_PCI_ISR: 510 /* reading from the ISR also clears it. */ 511 ret = qatomic_xchg(&vdev->isr, 0); 512 pci_irq_deassert(&proxy->pci_dev); 513 break; 514 case VIRTIO_MSI_CONFIG_VECTOR: 515 ret = vdev->config_vector; 516 break; 517 case VIRTIO_MSI_QUEUE_VECTOR: 518 ret = virtio_queue_vector(vdev, vdev->queue_sel); 519 break; 520 default: 521 break; 522 } 523 524 return ret; 525 } 526 527 static uint64_t virtio_pci_config_read(void *opaque, hwaddr addr, 528 unsigned size) 529 { 530 VirtIOPCIProxy *proxy = opaque; 531 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 532 uint32_t config = VIRTIO_PCI_CONFIG_SIZE(&proxy->pci_dev); 533 uint64_t val = 0; 534 535 if (vdev == NULL) { 536 return UINT64_MAX; 537 } 538 539 if (addr < config) { 540 return virtio_ioport_read(proxy, addr); 541 } 542 addr -= config; 543 544 switch (size) { 545 case 1: 546 val = virtio_config_readb(vdev, addr); 547 break; 548 case 2: 549 val = virtio_config_readw(vdev, addr); 550 if (virtio_is_big_endian(vdev)) { 551 val = bswap16(val); 552 } 553 break; 554 case 4: 555 val = virtio_config_readl(vdev, addr); 556 if (virtio_is_big_endian(vdev)) { 557 val = bswap32(val); 558 } 559 break; 560 } 561 return val; 562 } 563 564 static void virtio_pci_config_write(void *opaque, hwaddr addr, 565 uint64_t val, unsigned size) 566 { 567 VirtIOPCIProxy *proxy = opaque; 568 uint32_t config = VIRTIO_PCI_CONFIG_SIZE(&proxy->pci_dev); 569 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 570 571 if (vdev == NULL) { 572 return; 573 } 574 575 if (addr < config) { 576 virtio_ioport_write(proxy, addr, val); 577 return; 578 } 579 addr -= config; 580 /* 581 * Virtio-PCI is odd. Ioports are LE but config space is target native 582 * endian. 583 */ 584 switch (size) { 585 case 1: 586 virtio_config_writeb(vdev, addr, val); 587 break; 588 case 2: 589 if (virtio_is_big_endian(vdev)) { 590 val = bswap16(val); 591 } 592 virtio_config_writew(vdev, addr, val); 593 break; 594 case 4: 595 if (virtio_is_big_endian(vdev)) { 596 val = bswap32(val); 597 } 598 virtio_config_writel(vdev, addr, val); 599 break; 600 } 601 } 602 603 static const MemoryRegionOps virtio_pci_config_ops = { 604 .read = virtio_pci_config_read, 605 .write = virtio_pci_config_write, 606 .impl = { 607 .min_access_size = 1, 608 .max_access_size = 4, 609 }, 610 .endianness = DEVICE_LITTLE_ENDIAN, 611 }; 612 613 static MemoryRegion *virtio_address_space_lookup(VirtIOPCIProxy *proxy, 614 hwaddr *off, int len) 615 { 616 int i; 617 VirtIOPCIRegion *reg; 618 619 for (i = 0; i < ARRAY_SIZE(proxy->regs); ++i) { 620 reg = &proxy->regs[i]; 621 if (*off >= reg->offset && 622 *off + len <= reg->offset + reg->size) { 623 *off -= reg->offset; 624 return ®->mr; 625 } 626 } 627 628 return NULL; 629 } 630 631 /* Below are generic functions to do memcpy from/to an address space, 632 * without byteswaps, with input validation. 633 * 634 * As regular address_space_* APIs all do some kind of byteswap at least for 635 * some host/target combinations, we are forced to explicitly convert to a 636 * known-endianness integer value. 637 * It doesn't really matter which endian format to go through, so the code 638 * below selects the endian that causes the least amount of work on the given 639 * host. 640 * 641 * Note: host pointer must be aligned. 642 */ 643 static 644 void virtio_address_space_write(VirtIOPCIProxy *proxy, hwaddr addr, 645 const uint8_t *buf, int len) 646 { 647 uint64_t val; 648 MemoryRegion *mr; 649 650 /* address_space_* APIs assume an aligned address. 651 * As address is under guest control, handle illegal values. 652 */ 653 addr &= ~(len - 1); 654 655 mr = virtio_address_space_lookup(proxy, &addr, len); 656 if (!mr) { 657 return; 658 } 659 660 /* Make sure caller aligned buf properly */ 661 assert(!(((uintptr_t)buf) & (len - 1))); 662 663 switch (len) { 664 case 1: 665 val = pci_get_byte(buf); 666 break; 667 case 2: 668 val = pci_get_word(buf); 669 break; 670 case 4: 671 val = pci_get_long(buf); 672 break; 673 default: 674 /* As length is under guest control, handle illegal values. */ 675 return; 676 } 677 memory_region_dispatch_write(mr, addr, val, size_memop(len) | MO_LE, 678 MEMTXATTRS_UNSPECIFIED); 679 } 680 681 static void 682 virtio_address_space_read(VirtIOPCIProxy *proxy, hwaddr addr, 683 uint8_t *buf, int len) 684 { 685 uint64_t val; 686 MemoryRegion *mr; 687 688 /* address_space_* APIs assume an aligned address. 689 * As address is under guest control, handle illegal values. 690 */ 691 addr &= ~(len - 1); 692 693 mr = virtio_address_space_lookup(proxy, &addr, len); 694 if (!mr) { 695 return; 696 } 697 698 /* Make sure caller aligned buf properly */ 699 assert(!(((uintptr_t)buf) & (len - 1))); 700 701 memory_region_dispatch_read(mr, addr, &val, size_memop(len) | MO_LE, 702 MEMTXATTRS_UNSPECIFIED); 703 switch (len) { 704 case 1: 705 pci_set_byte(buf, val); 706 break; 707 case 2: 708 pci_set_word(buf, val); 709 break; 710 case 4: 711 pci_set_long(buf, val); 712 break; 713 default: 714 /* As length is under guest control, handle illegal values. */ 715 break; 716 } 717 } 718 719 static void virtio_write_config(PCIDevice *pci_dev, uint32_t address, 720 uint32_t val, int len) 721 { 722 VirtIOPCIProxy *proxy = VIRTIO_PCI(pci_dev); 723 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 724 struct virtio_pci_cfg_cap *cfg; 725 726 pci_default_write_config(pci_dev, address, val, len); 727 728 if (proxy->flags & VIRTIO_PCI_FLAG_INIT_FLR) { 729 pcie_cap_flr_write_config(pci_dev, address, val, len); 730 } 731 732 if (range_covers_byte(address, len, PCI_COMMAND)) { 733 if (!(pci_dev->config[PCI_COMMAND] & PCI_COMMAND_MASTER)) { 734 virtio_set_disabled(vdev, true); 735 virtio_pci_stop_ioeventfd(proxy); 736 virtio_set_status(vdev, vdev->status & ~VIRTIO_CONFIG_S_DRIVER_OK); 737 } else { 738 virtio_set_disabled(vdev, false); 739 } 740 } 741 742 if (proxy->config_cap && 743 ranges_overlap(address, len, proxy->config_cap + offsetof(struct virtio_pci_cfg_cap, 744 pci_cfg_data), 745 sizeof cfg->pci_cfg_data)) { 746 uint32_t off; 747 uint32_t len; 748 749 cfg = (void *)(proxy->pci_dev.config + proxy->config_cap); 750 off = le32_to_cpu(cfg->cap.offset); 751 len = le32_to_cpu(cfg->cap.length); 752 753 if (len == 1 || len == 2 || len == 4) { 754 assert(len <= sizeof cfg->pci_cfg_data); 755 virtio_address_space_write(proxy, off, cfg->pci_cfg_data, len); 756 } 757 } 758 } 759 760 static uint32_t virtio_read_config(PCIDevice *pci_dev, 761 uint32_t address, int len) 762 { 763 VirtIOPCIProxy *proxy = VIRTIO_PCI(pci_dev); 764 struct virtio_pci_cfg_cap *cfg; 765 766 if (proxy->config_cap && 767 ranges_overlap(address, len, proxy->config_cap + offsetof(struct virtio_pci_cfg_cap, 768 pci_cfg_data), 769 sizeof cfg->pci_cfg_data)) { 770 uint32_t off; 771 uint32_t len; 772 773 cfg = (void *)(proxy->pci_dev.config + proxy->config_cap); 774 off = le32_to_cpu(cfg->cap.offset); 775 len = le32_to_cpu(cfg->cap.length); 776 777 if (len == 1 || len == 2 || len == 4) { 778 assert(len <= sizeof cfg->pci_cfg_data); 779 virtio_address_space_read(proxy, off, cfg->pci_cfg_data, len); 780 } 781 } 782 783 return pci_default_read_config(pci_dev, address, len); 784 } 785 786 static int kvm_virtio_pci_vq_vector_use(VirtIOPCIProxy *proxy, 787 unsigned int vector) 788 { 789 VirtIOIRQFD *irqfd = &proxy->vector_irqfd[vector]; 790 int ret; 791 792 if (irqfd->users == 0) { 793 KVMRouteChange c = kvm_irqchip_begin_route_changes(kvm_state); 794 ret = kvm_irqchip_add_msi_route(&c, vector, &proxy->pci_dev); 795 if (ret < 0) { 796 return ret; 797 } 798 kvm_irqchip_commit_route_changes(&c); 799 irqfd->virq = ret; 800 } 801 irqfd->users++; 802 return 0; 803 } 804 805 static void kvm_virtio_pci_vq_vector_release(VirtIOPCIProxy *proxy, 806 unsigned int vector) 807 { 808 VirtIOIRQFD *irqfd = &proxy->vector_irqfd[vector]; 809 if (--irqfd->users == 0) { 810 kvm_irqchip_release_virq(kvm_state, irqfd->virq); 811 } 812 } 813 814 static int kvm_virtio_pci_irqfd_use(VirtIOPCIProxy *proxy, 815 EventNotifier *n, 816 unsigned int vector) 817 { 818 VirtIOIRQFD *irqfd = &proxy->vector_irqfd[vector]; 819 return kvm_irqchip_add_irqfd_notifier_gsi(kvm_state, n, NULL, irqfd->virq); 820 } 821 822 static void kvm_virtio_pci_irqfd_release(VirtIOPCIProxy *proxy, 823 EventNotifier *n , 824 unsigned int vector) 825 { 826 VirtIOIRQFD *irqfd = &proxy->vector_irqfd[vector]; 827 int ret; 828 829 ret = kvm_irqchip_remove_irqfd_notifier_gsi(kvm_state, n, irqfd->virq); 830 assert(ret == 0); 831 } 832 static int virtio_pci_get_notifier(VirtIOPCIProxy *proxy, int queue_no, 833 EventNotifier **n, unsigned int *vector) 834 { 835 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 836 VirtQueue *vq; 837 838 if (queue_no == VIRTIO_CONFIG_IRQ_IDX) { 839 return -1; 840 } else { 841 if (!virtio_queue_get_num(vdev, queue_no)) { 842 return -1; 843 } 844 *vector = virtio_queue_vector(vdev, queue_no); 845 vq = virtio_get_queue(vdev, queue_no); 846 *n = virtio_queue_get_guest_notifier(vq); 847 } 848 return 0; 849 } 850 851 static int kvm_virtio_pci_vector_use_one(VirtIOPCIProxy *proxy, int queue_no) 852 { 853 unsigned int vector; 854 int ret; 855 EventNotifier *n; 856 PCIDevice *dev = &proxy->pci_dev; 857 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 858 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); 859 860 ret = virtio_pci_get_notifier(proxy, queue_no, &n, &vector); 861 if (ret < 0) { 862 return ret; 863 } 864 if (vector >= msix_nr_vectors_allocated(dev)) { 865 return 0; 866 } 867 ret = kvm_virtio_pci_vq_vector_use(proxy, vector); 868 if (ret < 0) { 869 goto undo; 870 } 871 /* 872 * If guest supports masking, set up irqfd now. 873 * Otherwise, delay until unmasked in the frontend. 874 */ 875 if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) { 876 ret = kvm_virtio_pci_irqfd_use(proxy, n, vector); 877 if (ret < 0) { 878 kvm_virtio_pci_vq_vector_release(proxy, vector); 879 goto undo; 880 } 881 } 882 883 return 0; 884 undo: 885 886 vector = virtio_queue_vector(vdev, queue_no); 887 if (vector >= msix_nr_vectors_allocated(dev)) { 888 return ret; 889 } 890 if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) { 891 ret = virtio_pci_get_notifier(proxy, queue_no, &n, &vector); 892 if (ret < 0) { 893 return ret; 894 } 895 kvm_virtio_pci_irqfd_release(proxy, n, vector); 896 } 897 return ret; 898 } 899 static int kvm_virtio_pci_vector_use(VirtIOPCIProxy *proxy, int nvqs) 900 { 901 int queue_no; 902 int ret = 0; 903 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 904 905 for (queue_no = 0; queue_no < nvqs; queue_no++) { 906 if (!virtio_queue_get_num(vdev, queue_no)) { 907 return -1; 908 } 909 ret = kvm_virtio_pci_vector_use_one(proxy, queue_no); 910 } 911 return ret; 912 } 913 914 915 static void kvm_virtio_pci_vector_release_one(VirtIOPCIProxy *proxy, 916 int queue_no) 917 { 918 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 919 unsigned int vector; 920 EventNotifier *n; 921 int ret; 922 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); 923 PCIDevice *dev = &proxy->pci_dev; 924 925 ret = virtio_pci_get_notifier(proxy, queue_no, &n, &vector); 926 if (ret < 0) { 927 return; 928 } 929 if (vector >= msix_nr_vectors_allocated(dev)) { 930 return; 931 } 932 if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) { 933 kvm_virtio_pci_irqfd_release(proxy, n, vector); 934 } 935 kvm_virtio_pci_vq_vector_release(proxy, vector); 936 } 937 938 static void kvm_virtio_pci_vector_release(VirtIOPCIProxy *proxy, int nvqs) 939 { 940 int queue_no; 941 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 942 943 for (queue_no = 0; queue_no < nvqs; queue_no++) { 944 if (!virtio_queue_get_num(vdev, queue_no)) { 945 break; 946 } 947 kvm_virtio_pci_vector_release_one(proxy, queue_no); 948 } 949 } 950 951 static int virtio_pci_one_vector_unmask(VirtIOPCIProxy *proxy, 952 unsigned int queue_no, 953 unsigned int vector, 954 MSIMessage msg, 955 EventNotifier *n) 956 { 957 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 958 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); 959 VirtIOIRQFD *irqfd; 960 int ret = 0; 961 962 if (proxy->vector_irqfd) { 963 irqfd = &proxy->vector_irqfd[vector]; 964 if (irqfd->msg.data != msg.data || irqfd->msg.address != msg.address) { 965 ret = kvm_irqchip_update_msi_route(kvm_state, irqfd->virq, msg, 966 &proxy->pci_dev); 967 if (ret < 0) { 968 return ret; 969 } 970 kvm_irqchip_commit_routes(kvm_state); 971 } 972 } 973 974 /* If guest supports masking, irqfd is already setup, unmask it. 975 * Otherwise, set it up now. 976 */ 977 if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) { 978 k->guest_notifier_mask(vdev, queue_no, false); 979 /* Test after unmasking to avoid losing events. */ 980 if (k->guest_notifier_pending && 981 k->guest_notifier_pending(vdev, queue_no)) { 982 event_notifier_set(n); 983 } 984 } else { 985 ret = kvm_virtio_pci_irqfd_use(proxy, n, vector); 986 } 987 return ret; 988 } 989 990 static void virtio_pci_one_vector_mask(VirtIOPCIProxy *proxy, 991 unsigned int queue_no, 992 unsigned int vector, 993 EventNotifier *n) 994 { 995 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 996 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); 997 998 /* If guest supports masking, keep irqfd but mask it. 999 * Otherwise, clean it up now. 1000 */ 1001 if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) { 1002 k->guest_notifier_mask(vdev, queue_no, true); 1003 } else { 1004 kvm_virtio_pci_irqfd_release(proxy, n, vector); 1005 } 1006 } 1007 1008 static int virtio_pci_vector_unmask(PCIDevice *dev, unsigned vector, 1009 MSIMessage msg) 1010 { 1011 VirtIOPCIProxy *proxy = container_of(dev, VirtIOPCIProxy, pci_dev); 1012 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 1013 VirtQueue *vq = virtio_vector_first_queue(vdev, vector); 1014 EventNotifier *n; 1015 int ret, index, unmasked = 0; 1016 1017 while (vq) { 1018 index = virtio_get_queue_index(vq); 1019 if (!virtio_queue_get_num(vdev, index)) { 1020 break; 1021 } 1022 if (index < proxy->nvqs_with_notifiers) { 1023 n = virtio_queue_get_guest_notifier(vq); 1024 ret = virtio_pci_one_vector_unmask(proxy, index, vector, msg, n); 1025 if (ret < 0) { 1026 goto undo; 1027 } 1028 ++unmasked; 1029 } 1030 vq = virtio_vector_next_queue(vq); 1031 } 1032 1033 return 0; 1034 1035 undo: 1036 vq = virtio_vector_first_queue(vdev, vector); 1037 while (vq && unmasked >= 0) { 1038 index = virtio_get_queue_index(vq); 1039 if (index < proxy->nvqs_with_notifiers) { 1040 n = virtio_queue_get_guest_notifier(vq); 1041 virtio_pci_one_vector_mask(proxy, index, vector, n); 1042 --unmasked; 1043 } 1044 vq = virtio_vector_next_queue(vq); 1045 } 1046 return ret; 1047 } 1048 1049 static void virtio_pci_vector_mask(PCIDevice *dev, unsigned vector) 1050 { 1051 VirtIOPCIProxy *proxy = container_of(dev, VirtIOPCIProxy, pci_dev); 1052 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 1053 VirtQueue *vq = virtio_vector_first_queue(vdev, vector); 1054 EventNotifier *n; 1055 int index; 1056 1057 while (vq) { 1058 index = virtio_get_queue_index(vq); 1059 n = virtio_queue_get_guest_notifier(vq); 1060 if (!virtio_queue_get_num(vdev, index)) { 1061 break; 1062 } 1063 if (index < proxy->nvqs_with_notifiers) { 1064 virtio_pci_one_vector_mask(proxy, index, vector, n); 1065 } 1066 vq = virtio_vector_next_queue(vq); 1067 } 1068 } 1069 1070 static void virtio_pci_vector_poll(PCIDevice *dev, 1071 unsigned int vector_start, 1072 unsigned int vector_end) 1073 { 1074 VirtIOPCIProxy *proxy = container_of(dev, VirtIOPCIProxy, pci_dev); 1075 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 1076 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); 1077 int queue_no; 1078 unsigned int vector; 1079 EventNotifier *notifier; 1080 int ret; 1081 1082 for (queue_no = 0; queue_no < proxy->nvqs_with_notifiers; queue_no++) { 1083 ret = virtio_pci_get_notifier(proxy, queue_no, ¬ifier, &vector); 1084 if (ret < 0) { 1085 break; 1086 } 1087 if (vector < vector_start || vector >= vector_end || 1088 !msix_is_masked(dev, vector)) { 1089 continue; 1090 } 1091 if (k->guest_notifier_pending) { 1092 if (k->guest_notifier_pending(vdev, queue_no)) { 1093 msix_set_pending(dev, vector); 1094 } 1095 } else if (event_notifier_test_and_clear(notifier)) { 1096 msix_set_pending(dev, vector); 1097 } 1098 } 1099 } 1100 1101 static int virtio_pci_set_guest_notifier(DeviceState *d, int n, bool assign, 1102 bool with_irqfd) 1103 { 1104 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d); 1105 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 1106 VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev); 1107 VirtQueue *vq = virtio_get_queue(vdev, n); 1108 EventNotifier *notifier = virtio_queue_get_guest_notifier(vq); 1109 1110 if (assign) { 1111 int r = event_notifier_init(notifier, 0); 1112 if (r < 0) { 1113 return r; 1114 } 1115 virtio_queue_set_guest_notifier_fd_handler(vq, true, with_irqfd); 1116 } else { 1117 virtio_queue_set_guest_notifier_fd_handler(vq, false, with_irqfd); 1118 event_notifier_cleanup(notifier); 1119 } 1120 1121 if (!msix_enabled(&proxy->pci_dev) && 1122 vdev->use_guest_notifier_mask && 1123 vdc->guest_notifier_mask) { 1124 vdc->guest_notifier_mask(vdev, n, !assign); 1125 } 1126 1127 return 0; 1128 } 1129 1130 static bool virtio_pci_query_guest_notifiers(DeviceState *d) 1131 { 1132 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d); 1133 return msix_enabled(&proxy->pci_dev); 1134 } 1135 1136 static int virtio_pci_set_guest_notifiers(DeviceState *d, int nvqs, bool assign) 1137 { 1138 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d); 1139 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 1140 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); 1141 int r, n; 1142 bool with_irqfd = msix_enabled(&proxy->pci_dev) && 1143 kvm_msi_via_irqfd_enabled(); 1144 1145 nvqs = MIN(nvqs, VIRTIO_QUEUE_MAX); 1146 1147 /* 1148 * When deassigning, pass a consistent nvqs value to avoid leaking 1149 * notifiers. But first check we've actually been configured, exit 1150 * early if we haven't. 1151 */ 1152 if (!assign && !proxy->nvqs_with_notifiers) { 1153 return 0; 1154 } 1155 assert(assign || nvqs == proxy->nvqs_with_notifiers); 1156 1157 proxy->nvqs_with_notifiers = nvqs; 1158 1159 /* Must unset vector notifier while guest notifier is still assigned */ 1160 if ((proxy->vector_irqfd || k->guest_notifier_mask) && !assign) { 1161 msix_unset_vector_notifiers(&proxy->pci_dev); 1162 if (proxy->vector_irqfd) { 1163 kvm_virtio_pci_vector_release(proxy, nvqs); 1164 g_free(proxy->vector_irqfd); 1165 proxy->vector_irqfd = NULL; 1166 } 1167 } 1168 1169 for (n = 0; n < nvqs; n++) { 1170 if (!virtio_queue_get_num(vdev, n)) { 1171 break; 1172 } 1173 1174 r = virtio_pci_set_guest_notifier(d, n, assign, with_irqfd); 1175 if (r < 0) { 1176 goto assign_error; 1177 } 1178 } 1179 1180 /* Must set vector notifier after guest notifier has been assigned */ 1181 if ((with_irqfd || k->guest_notifier_mask) && assign) { 1182 if (with_irqfd) { 1183 proxy->vector_irqfd = 1184 g_malloc0(sizeof(*proxy->vector_irqfd) * 1185 msix_nr_vectors_allocated(&proxy->pci_dev)); 1186 r = kvm_virtio_pci_vector_use(proxy, nvqs); 1187 if (r < 0) { 1188 goto assign_error; 1189 } 1190 } 1191 r = msix_set_vector_notifiers(&proxy->pci_dev, 1192 virtio_pci_vector_unmask, 1193 virtio_pci_vector_mask, 1194 virtio_pci_vector_poll); 1195 if (r < 0) { 1196 goto notifiers_error; 1197 } 1198 } 1199 1200 return 0; 1201 1202 notifiers_error: 1203 if (with_irqfd) { 1204 assert(assign); 1205 kvm_virtio_pci_vector_release(proxy, nvqs); 1206 } 1207 1208 assign_error: 1209 /* We get here on assignment failure. Recover by undoing for VQs 0 .. n. */ 1210 assert(assign); 1211 while (--n >= 0) { 1212 virtio_pci_set_guest_notifier(d, n, !assign, with_irqfd); 1213 } 1214 return r; 1215 } 1216 1217 static int virtio_pci_set_host_notifier_mr(DeviceState *d, int n, 1218 MemoryRegion *mr, bool assign) 1219 { 1220 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d); 1221 int offset; 1222 1223 if (n >= VIRTIO_QUEUE_MAX || !virtio_pci_modern(proxy) || 1224 virtio_pci_queue_mem_mult(proxy) != memory_region_size(mr)) { 1225 return -1; 1226 } 1227 1228 if (assign) { 1229 offset = virtio_pci_queue_mem_mult(proxy) * n; 1230 memory_region_add_subregion_overlap(&proxy->notify.mr, offset, mr, 1); 1231 } else { 1232 memory_region_del_subregion(&proxy->notify.mr, mr); 1233 } 1234 1235 return 0; 1236 } 1237 1238 static void virtio_pci_vmstate_change(DeviceState *d, bool running) 1239 { 1240 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d); 1241 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 1242 1243 if (running) { 1244 /* Old QEMU versions did not set bus master enable on status write. 1245 * Detect DRIVER set and enable it. 1246 */ 1247 if ((proxy->flags & VIRTIO_PCI_FLAG_BUS_MASTER_BUG_MIGRATION) && 1248 (vdev->status & VIRTIO_CONFIG_S_DRIVER) && 1249 !(proxy->pci_dev.config[PCI_COMMAND] & PCI_COMMAND_MASTER)) { 1250 pci_default_write_config(&proxy->pci_dev, PCI_COMMAND, 1251 proxy->pci_dev.config[PCI_COMMAND] | 1252 PCI_COMMAND_MASTER, 1); 1253 } 1254 virtio_pci_start_ioeventfd(proxy); 1255 } else { 1256 virtio_pci_stop_ioeventfd(proxy); 1257 } 1258 } 1259 1260 /* 1261 * virtio-pci: This is the PCIDevice which has a virtio-pci-bus. 1262 */ 1263 1264 static int virtio_pci_query_nvectors(DeviceState *d) 1265 { 1266 VirtIOPCIProxy *proxy = VIRTIO_PCI(d); 1267 1268 return proxy->nvectors; 1269 } 1270 1271 static AddressSpace *virtio_pci_get_dma_as(DeviceState *d) 1272 { 1273 VirtIOPCIProxy *proxy = VIRTIO_PCI(d); 1274 PCIDevice *dev = &proxy->pci_dev; 1275 1276 return pci_get_address_space(dev); 1277 } 1278 1279 static bool virtio_pci_iommu_enabled(DeviceState *d) 1280 { 1281 VirtIOPCIProxy *proxy = VIRTIO_PCI(d); 1282 PCIDevice *dev = &proxy->pci_dev; 1283 AddressSpace *dma_as = pci_device_iommu_address_space(dev); 1284 1285 if (dma_as == &address_space_memory) { 1286 return false; 1287 } 1288 1289 return true; 1290 } 1291 1292 static bool virtio_pci_queue_enabled(DeviceState *d, int n) 1293 { 1294 VirtIOPCIProxy *proxy = VIRTIO_PCI(d); 1295 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 1296 1297 if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) { 1298 return proxy->vqs[n].enabled; 1299 } 1300 1301 return virtio_queue_enabled_legacy(vdev, n); 1302 } 1303 1304 static int virtio_pci_add_mem_cap(VirtIOPCIProxy *proxy, 1305 struct virtio_pci_cap *cap) 1306 { 1307 PCIDevice *dev = &proxy->pci_dev; 1308 int offset; 1309 1310 offset = pci_add_capability(dev, PCI_CAP_ID_VNDR, 0, 1311 cap->cap_len, &error_abort); 1312 1313 assert(cap->cap_len >= sizeof *cap); 1314 memcpy(dev->config + offset + PCI_CAP_FLAGS, &cap->cap_len, 1315 cap->cap_len - PCI_CAP_FLAGS); 1316 1317 return offset; 1318 } 1319 1320 static uint64_t virtio_pci_common_read(void *opaque, hwaddr addr, 1321 unsigned size) 1322 { 1323 VirtIOPCIProxy *proxy = opaque; 1324 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 1325 uint32_t val = 0; 1326 int i; 1327 1328 if (vdev == NULL) { 1329 return UINT64_MAX; 1330 } 1331 1332 switch (addr) { 1333 case VIRTIO_PCI_COMMON_DFSELECT: 1334 val = proxy->dfselect; 1335 break; 1336 case VIRTIO_PCI_COMMON_DF: 1337 if (proxy->dfselect <= 1) { 1338 VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev); 1339 1340 val = (vdev->host_features & ~vdc->legacy_features) >> 1341 (32 * proxy->dfselect); 1342 } 1343 break; 1344 case VIRTIO_PCI_COMMON_GFSELECT: 1345 val = proxy->gfselect; 1346 break; 1347 case VIRTIO_PCI_COMMON_GF: 1348 if (proxy->gfselect < ARRAY_SIZE(proxy->guest_features)) { 1349 val = proxy->guest_features[proxy->gfselect]; 1350 } 1351 break; 1352 case VIRTIO_PCI_COMMON_MSIX: 1353 val = vdev->config_vector; 1354 break; 1355 case VIRTIO_PCI_COMMON_NUMQ: 1356 for (i = 0; i < VIRTIO_QUEUE_MAX; ++i) { 1357 if (virtio_queue_get_num(vdev, i)) { 1358 val = i + 1; 1359 } 1360 } 1361 break; 1362 case VIRTIO_PCI_COMMON_STATUS: 1363 val = vdev->status; 1364 break; 1365 case VIRTIO_PCI_COMMON_CFGGENERATION: 1366 val = vdev->generation; 1367 break; 1368 case VIRTIO_PCI_COMMON_Q_SELECT: 1369 val = vdev->queue_sel; 1370 break; 1371 case VIRTIO_PCI_COMMON_Q_SIZE: 1372 val = virtio_queue_get_num(vdev, vdev->queue_sel); 1373 break; 1374 case VIRTIO_PCI_COMMON_Q_MSIX: 1375 val = virtio_queue_vector(vdev, vdev->queue_sel); 1376 break; 1377 case VIRTIO_PCI_COMMON_Q_ENABLE: 1378 val = proxy->vqs[vdev->queue_sel].enabled; 1379 break; 1380 case VIRTIO_PCI_COMMON_Q_NOFF: 1381 /* Simply map queues in order */ 1382 val = vdev->queue_sel; 1383 break; 1384 case VIRTIO_PCI_COMMON_Q_DESCLO: 1385 val = proxy->vqs[vdev->queue_sel].desc[0]; 1386 break; 1387 case VIRTIO_PCI_COMMON_Q_DESCHI: 1388 val = proxy->vqs[vdev->queue_sel].desc[1]; 1389 break; 1390 case VIRTIO_PCI_COMMON_Q_AVAILLO: 1391 val = proxy->vqs[vdev->queue_sel].avail[0]; 1392 break; 1393 case VIRTIO_PCI_COMMON_Q_AVAILHI: 1394 val = proxy->vqs[vdev->queue_sel].avail[1]; 1395 break; 1396 case VIRTIO_PCI_COMMON_Q_USEDLO: 1397 val = proxy->vqs[vdev->queue_sel].used[0]; 1398 break; 1399 case VIRTIO_PCI_COMMON_Q_USEDHI: 1400 val = proxy->vqs[vdev->queue_sel].used[1]; 1401 break; 1402 case VIRTIO_PCI_COMMON_Q_RESET: 1403 val = proxy->vqs[vdev->queue_sel].reset; 1404 break; 1405 default: 1406 val = 0; 1407 } 1408 1409 return val; 1410 } 1411 1412 static void virtio_pci_common_write(void *opaque, hwaddr addr, 1413 uint64_t val, unsigned size) 1414 { 1415 VirtIOPCIProxy *proxy = opaque; 1416 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 1417 uint16_t vector; 1418 1419 if (vdev == NULL) { 1420 return; 1421 } 1422 1423 switch (addr) { 1424 case VIRTIO_PCI_COMMON_DFSELECT: 1425 proxy->dfselect = val; 1426 break; 1427 case VIRTIO_PCI_COMMON_GFSELECT: 1428 proxy->gfselect = val; 1429 break; 1430 case VIRTIO_PCI_COMMON_GF: 1431 if (proxy->gfselect < ARRAY_SIZE(proxy->guest_features)) { 1432 proxy->guest_features[proxy->gfselect] = val; 1433 virtio_set_features(vdev, 1434 (((uint64_t)proxy->guest_features[1]) << 32) | 1435 proxy->guest_features[0]); 1436 } 1437 break; 1438 case VIRTIO_PCI_COMMON_MSIX: 1439 if (vdev->config_vector != VIRTIO_NO_VECTOR) { 1440 msix_vector_unuse(&proxy->pci_dev, vdev->config_vector); 1441 } 1442 /* Make it possible for guest to discover an error took place. */ 1443 if (val < proxy->nvectors) { 1444 msix_vector_use(&proxy->pci_dev, val); 1445 } else { 1446 val = VIRTIO_NO_VECTOR; 1447 } 1448 vdev->config_vector = val; 1449 break; 1450 case VIRTIO_PCI_COMMON_STATUS: 1451 if (!(val & VIRTIO_CONFIG_S_DRIVER_OK)) { 1452 virtio_pci_stop_ioeventfd(proxy); 1453 } 1454 1455 virtio_set_status(vdev, val & 0xFF); 1456 1457 if (val & VIRTIO_CONFIG_S_DRIVER_OK) { 1458 virtio_pci_start_ioeventfd(proxy); 1459 } 1460 1461 if (vdev->status == 0) { 1462 virtio_pci_reset(DEVICE(proxy)); 1463 } 1464 1465 break; 1466 case VIRTIO_PCI_COMMON_Q_SELECT: 1467 if (val < VIRTIO_QUEUE_MAX) { 1468 vdev->queue_sel = val; 1469 } 1470 break; 1471 case VIRTIO_PCI_COMMON_Q_SIZE: 1472 proxy->vqs[vdev->queue_sel].num = val; 1473 virtio_queue_set_num(vdev, vdev->queue_sel, 1474 proxy->vqs[vdev->queue_sel].num); 1475 break; 1476 case VIRTIO_PCI_COMMON_Q_MSIX: 1477 vector = virtio_queue_vector(vdev, vdev->queue_sel); 1478 if (vector != VIRTIO_NO_VECTOR) { 1479 msix_vector_unuse(&proxy->pci_dev, vector); 1480 } 1481 /* Make it possible for guest to discover an error took place. */ 1482 if (val < proxy->nvectors) { 1483 msix_vector_use(&proxy->pci_dev, val); 1484 } else { 1485 val = VIRTIO_NO_VECTOR; 1486 } 1487 virtio_queue_set_vector(vdev, vdev->queue_sel, val); 1488 break; 1489 case VIRTIO_PCI_COMMON_Q_ENABLE: 1490 if (val == 1) { 1491 virtio_queue_set_num(vdev, vdev->queue_sel, 1492 proxy->vqs[vdev->queue_sel].num); 1493 virtio_queue_set_rings(vdev, vdev->queue_sel, 1494 ((uint64_t)proxy->vqs[vdev->queue_sel].desc[1]) << 32 | 1495 proxy->vqs[vdev->queue_sel].desc[0], 1496 ((uint64_t)proxy->vqs[vdev->queue_sel].avail[1]) << 32 | 1497 proxy->vqs[vdev->queue_sel].avail[0], 1498 ((uint64_t)proxy->vqs[vdev->queue_sel].used[1]) << 32 | 1499 proxy->vqs[vdev->queue_sel].used[0]); 1500 proxy->vqs[vdev->queue_sel].enabled = 1; 1501 proxy->vqs[vdev->queue_sel].reset = 0; 1502 virtio_queue_enable(vdev, vdev->queue_sel); 1503 } else { 1504 virtio_error(vdev, "wrong value for queue_enable %"PRIx64, val); 1505 } 1506 break; 1507 case VIRTIO_PCI_COMMON_Q_DESCLO: 1508 proxy->vqs[vdev->queue_sel].desc[0] = val; 1509 break; 1510 case VIRTIO_PCI_COMMON_Q_DESCHI: 1511 proxy->vqs[vdev->queue_sel].desc[1] = val; 1512 break; 1513 case VIRTIO_PCI_COMMON_Q_AVAILLO: 1514 proxy->vqs[vdev->queue_sel].avail[0] = val; 1515 break; 1516 case VIRTIO_PCI_COMMON_Q_AVAILHI: 1517 proxy->vqs[vdev->queue_sel].avail[1] = val; 1518 break; 1519 case VIRTIO_PCI_COMMON_Q_USEDLO: 1520 proxy->vqs[vdev->queue_sel].used[0] = val; 1521 break; 1522 case VIRTIO_PCI_COMMON_Q_USEDHI: 1523 proxy->vqs[vdev->queue_sel].used[1] = val; 1524 break; 1525 case VIRTIO_PCI_COMMON_Q_RESET: 1526 if (val == 1) { 1527 proxy->vqs[vdev->queue_sel].reset = 1; 1528 1529 virtio_queue_reset(vdev, vdev->queue_sel); 1530 1531 proxy->vqs[vdev->queue_sel].reset = 0; 1532 proxy->vqs[vdev->queue_sel].enabled = 0; 1533 } 1534 break; 1535 default: 1536 break; 1537 } 1538 } 1539 1540 1541 static uint64_t virtio_pci_notify_read(void *opaque, hwaddr addr, 1542 unsigned size) 1543 { 1544 VirtIOPCIProxy *proxy = opaque; 1545 if (virtio_bus_get_device(&proxy->bus) == NULL) { 1546 return UINT64_MAX; 1547 } 1548 1549 return 0; 1550 } 1551 1552 static void virtio_pci_notify_write(void *opaque, hwaddr addr, 1553 uint64_t val, unsigned size) 1554 { 1555 VirtIOPCIProxy *proxy = opaque; 1556 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 1557 1558 unsigned queue = addr / virtio_pci_queue_mem_mult(proxy); 1559 1560 if (vdev != NULL && queue < VIRTIO_QUEUE_MAX) { 1561 trace_virtio_pci_notify_write(addr, val, size); 1562 virtio_queue_notify(vdev, queue); 1563 } 1564 } 1565 1566 static void virtio_pci_notify_write_pio(void *opaque, hwaddr addr, 1567 uint64_t val, unsigned size) 1568 { 1569 VirtIOPCIProxy *proxy = opaque; 1570 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 1571 1572 unsigned queue = val; 1573 1574 if (vdev != NULL && queue < VIRTIO_QUEUE_MAX) { 1575 trace_virtio_pci_notify_write_pio(addr, val, size); 1576 virtio_queue_notify(vdev, queue); 1577 } 1578 } 1579 1580 static uint64_t virtio_pci_isr_read(void *opaque, hwaddr addr, 1581 unsigned size) 1582 { 1583 VirtIOPCIProxy *proxy = opaque; 1584 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 1585 uint64_t val; 1586 1587 if (vdev == NULL) { 1588 return UINT64_MAX; 1589 } 1590 1591 val = qatomic_xchg(&vdev->isr, 0); 1592 pci_irq_deassert(&proxy->pci_dev); 1593 return val; 1594 } 1595 1596 static void virtio_pci_isr_write(void *opaque, hwaddr addr, 1597 uint64_t val, unsigned size) 1598 { 1599 } 1600 1601 static uint64_t virtio_pci_device_read(void *opaque, hwaddr addr, 1602 unsigned size) 1603 { 1604 VirtIOPCIProxy *proxy = opaque; 1605 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 1606 uint64_t val; 1607 1608 if (vdev == NULL) { 1609 return UINT64_MAX; 1610 } 1611 1612 switch (size) { 1613 case 1: 1614 val = virtio_config_modern_readb(vdev, addr); 1615 break; 1616 case 2: 1617 val = virtio_config_modern_readw(vdev, addr); 1618 break; 1619 case 4: 1620 val = virtio_config_modern_readl(vdev, addr); 1621 break; 1622 default: 1623 val = 0; 1624 break; 1625 } 1626 return val; 1627 } 1628 1629 static void virtio_pci_device_write(void *opaque, hwaddr addr, 1630 uint64_t val, unsigned size) 1631 { 1632 VirtIOPCIProxy *proxy = opaque; 1633 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 1634 1635 if (vdev == NULL) { 1636 return; 1637 } 1638 1639 switch (size) { 1640 case 1: 1641 virtio_config_modern_writeb(vdev, addr, val); 1642 break; 1643 case 2: 1644 virtio_config_modern_writew(vdev, addr, val); 1645 break; 1646 case 4: 1647 virtio_config_modern_writel(vdev, addr, val); 1648 break; 1649 } 1650 } 1651 1652 static void virtio_pci_modern_regions_init(VirtIOPCIProxy *proxy, 1653 const char *vdev_name) 1654 { 1655 static const MemoryRegionOps common_ops = { 1656 .read = virtio_pci_common_read, 1657 .write = virtio_pci_common_write, 1658 .impl = { 1659 .min_access_size = 1, 1660 .max_access_size = 4, 1661 }, 1662 .endianness = DEVICE_LITTLE_ENDIAN, 1663 }; 1664 static const MemoryRegionOps isr_ops = { 1665 .read = virtio_pci_isr_read, 1666 .write = virtio_pci_isr_write, 1667 .impl = { 1668 .min_access_size = 1, 1669 .max_access_size = 4, 1670 }, 1671 .endianness = DEVICE_LITTLE_ENDIAN, 1672 }; 1673 static const MemoryRegionOps device_ops = { 1674 .read = virtio_pci_device_read, 1675 .write = virtio_pci_device_write, 1676 .impl = { 1677 .min_access_size = 1, 1678 .max_access_size = 4, 1679 }, 1680 .endianness = DEVICE_LITTLE_ENDIAN, 1681 }; 1682 static const MemoryRegionOps notify_ops = { 1683 .read = virtio_pci_notify_read, 1684 .write = virtio_pci_notify_write, 1685 .impl = { 1686 .min_access_size = 1, 1687 .max_access_size = 4, 1688 }, 1689 .endianness = DEVICE_LITTLE_ENDIAN, 1690 }; 1691 static const MemoryRegionOps notify_pio_ops = { 1692 .read = virtio_pci_notify_read, 1693 .write = virtio_pci_notify_write_pio, 1694 .impl = { 1695 .min_access_size = 1, 1696 .max_access_size = 4, 1697 }, 1698 .endianness = DEVICE_LITTLE_ENDIAN, 1699 }; 1700 g_autoptr(GString) name = g_string_new(NULL); 1701 1702 g_string_printf(name, "virtio-pci-common-%s", vdev_name); 1703 memory_region_init_io(&proxy->common.mr, OBJECT(proxy), 1704 &common_ops, 1705 proxy, 1706 name->str, 1707 proxy->common.size); 1708 1709 g_string_printf(name, "virtio-pci-isr-%s", vdev_name); 1710 memory_region_init_io(&proxy->isr.mr, OBJECT(proxy), 1711 &isr_ops, 1712 proxy, 1713 name->str, 1714 proxy->isr.size); 1715 1716 g_string_printf(name, "virtio-pci-device-%s", vdev_name); 1717 memory_region_init_io(&proxy->device.mr, OBJECT(proxy), 1718 &device_ops, 1719 proxy, 1720 name->str, 1721 proxy->device.size); 1722 1723 g_string_printf(name, "virtio-pci-notify-%s", vdev_name); 1724 memory_region_init_io(&proxy->notify.mr, OBJECT(proxy), 1725 ¬ify_ops, 1726 proxy, 1727 name->str, 1728 proxy->notify.size); 1729 1730 g_string_printf(name, "virtio-pci-notify-pio-%s", vdev_name); 1731 memory_region_init_io(&proxy->notify_pio.mr, OBJECT(proxy), 1732 ¬ify_pio_ops, 1733 proxy, 1734 name->str, 1735 proxy->notify_pio.size); 1736 } 1737 1738 static void virtio_pci_modern_region_map(VirtIOPCIProxy *proxy, 1739 VirtIOPCIRegion *region, 1740 struct virtio_pci_cap *cap, 1741 MemoryRegion *mr, 1742 uint8_t bar) 1743 { 1744 memory_region_add_subregion(mr, region->offset, ®ion->mr); 1745 1746 cap->cfg_type = region->type; 1747 cap->bar = bar; 1748 cap->offset = cpu_to_le32(region->offset); 1749 cap->length = cpu_to_le32(region->size); 1750 virtio_pci_add_mem_cap(proxy, cap); 1751 1752 } 1753 1754 static void virtio_pci_modern_mem_region_map(VirtIOPCIProxy *proxy, 1755 VirtIOPCIRegion *region, 1756 struct virtio_pci_cap *cap) 1757 { 1758 virtio_pci_modern_region_map(proxy, region, cap, 1759 &proxy->modern_bar, proxy->modern_mem_bar_idx); 1760 } 1761 1762 static void virtio_pci_modern_io_region_map(VirtIOPCIProxy *proxy, 1763 VirtIOPCIRegion *region, 1764 struct virtio_pci_cap *cap) 1765 { 1766 virtio_pci_modern_region_map(proxy, region, cap, 1767 &proxy->io_bar, proxy->modern_io_bar_idx); 1768 } 1769 1770 static void virtio_pci_modern_mem_region_unmap(VirtIOPCIProxy *proxy, 1771 VirtIOPCIRegion *region) 1772 { 1773 memory_region_del_subregion(&proxy->modern_bar, 1774 ®ion->mr); 1775 } 1776 1777 static void virtio_pci_modern_io_region_unmap(VirtIOPCIProxy *proxy, 1778 VirtIOPCIRegion *region) 1779 { 1780 memory_region_del_subregion(&proxy->io_bar, 1781 ®ion->mr); 1782 } 1783 1784 static void virtio_pci_pre_plugged(DeviceState *d, Error **errp) 1785 { 1786 VirtIOPCIProxy *proxy = VIRTIO_PCI(d); 1787 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 1788 1789 if (virtio_pci_modern(proxy)) { 1790 virtio_add_feature(&vdev->host_features, VIRTIO_F_VERSION_1); 1791 } 1792 1793 virtio_add_feature(&vdev->host_features, VIRTIO_F_BAD_FEATURE); 1794 } 1795 1796 /* This is called by virtio-bus just after the device is plugged. */ 1797 static void virtio_pci_device_plugged(DeviceState *d, Error **errp) 1798 { 1799 VirtIOPCIProxy *proxy = VIRTIO_PCI(d); 1800 VirtioBusState *bus = &proxy->bus; 1801 bool legacy = virtio_pci_legacy(proxy); 1802 bool modern; 1803 bool modern_pio = proxy->flags & VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY; 1804 uint8_t *config; 1805 uint32_t size; 1806 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 1807 1808 /* 1809 * Virtio capabilities present without 1810 * VIRTIO_F_VERSION_1 confuses guests 1811 */ 1812 if (!proxy->ignore_backend_features && 1813 !virtio_has_feature(vdev->host_features, VIRTIO_F_VERSION_1)) { 1814 virtio_pci_disable_modern(proxy); 1815 1816 if (!legacy) { 1817 error_setg(errp, "Device doesn't support modern mode, and legacy" 1818 " mode is disabled"); 1819 error_append_hint(errp, "Set disable-legacy to off\n"); 1820 1821 return; 1822 } 1823 } 1824 1825 modern = virtio_pci_modern(proxy); 1826 1827 config = proxy->pci_dev.config; 1828 if (proxy->class_code) { 1829 pci_config_set_class(config, proxy->class_code); 1830 } 1831 1832 if (legacy) { 1833 if (!virtio_legacy_allowed(vdev)) { 1834 /* 1835 * To avoid migration issues, we allow legacy mode when legacy 1836 * check is disabled in the old machine types (< 5.1). 1837 */ 1838 if (virtio_legacy_check_disabled(vdev)) { 1839 warn_report("device is modern-only, but for backward " 1840 "compatibility legacy is allowed"); 1841 } else { 1842 error_setg(errp, 1843 "device is modern-only, use disable-legacy=on"); 1844 return; 1845 } 1846 } 1847 if (virtio_host_has_feature(vdev, VIRTIO_F_IOMMU_PLATFORM)) { 1848 error_setg(errp, "VIRTIO_F_IOMMU_PLATFORM was supported by" 1849 " neither legacy nor transitional device"); 1850 return; 1851 } 1852 /* 1853 * Legacy and transitional devices use specific subsystem IDs. 1854 * Note that the subsystem vendor ID (config + PCI_SUBSYSTEM_VENDOR_ID) 1855 * is set to PCI_SUBVENDOR_ID_REDHAT_QUMRANET by default. 1856 */ 1857 pci_set_word(config + PCI_SUBSYSTEM_ID, virtio_bus_get_vdev_id(bus)); 1858 if (proxy->trans_devid) { 1859 pci_config_set_device_id(config, proxy->trans_devid); 1860 } 1861 } else { 1862 /* pure virtio-1.0 */ 1863 pci_set_word(config + PCI_VENDOR_ID, 1864 PCI_VENDOR_ID_REDHAT_QUMRANET); 1865 pci_set_word(config + PCI_DEVICE_ID, 1866 PCI_DEVICE_ID_VIRTIO_10_BASE + virtio_bus_get_vdev_id(bus)); 1867 pci_config_set_revision(config, 1); 1868 } 1869 config[PCI_INTERRUPT_PIN] = 1; 1870 1871 1872 if (modern) { 1873 struct virtio_pci_cap cap = { 1874 .cap_len = sizeof cap, 1875 }; 1876 struct virtio_pci_notify_cap notify = { 1877 .cap.cap_len = sizeof notify, 1878 .notify_off_multiplier = 1879 cpu_to_le32(virtio_pci_queue_mem_mult(proxy)), 1880 }; 1881 struct virtio_pci_cfg_cap cfg = { 1882 .cap.cap_len = sizeof cfg, 1883 .cap.cfg_type = VIRTIO_PCI_CAP_PCI_CFG, 1884 }; 1885 struct virtio_pci_notify_cap notify_pio = { 1886 .cap.cap_len = sizeof notify, 1887 .notify_off_multiplier = cpu_to_le32(0x0), 1888 }; 1889 1890 struct virtio_pci_cfg_cap *cfg_mask; 1891 1892 virtio_pci_modern_regions_init(proxy, vdev->name); 1893 1894 virtio_pci_modern_mem_region_map(proxy, &proxy->common, &cap); 1895 virtio_pci_modern_mem_region_map(proxy, &proxy->isr, &cap); 1896 virtio_pci_modern_mem_region_map(proxy, &proxy->device, &cap); 1897 virtio_pci_modern_mem_region_map(proxy, &proxy->notify, ¬ify.cap); 1898 1899 if (modern_pio) { 1900 memory_region_init(&proxy->io_bar, OBJECT(proxy), 1901 "virtio-pci-io", 0x4); 1902 1903 pci_register_bar(&proxy->pci_dev, proxy->modern_io_bar_idx, 1904 PCI_BASE_ADDRESS_SPACE_IO, &proxy->io_bar); 1905 1906 virtio_pci_modern_io_region_map(proxy, &proxy->notify_pio, 1907 ¬ify_pio.cap); 1908 } 1909 1910 pci_register_bar(&proxy->pci_dev, proxy->modern_mem_bar_idx, 1911 PCI_BASE_ADDRESS_SPACE_MEMORY | 1912 PCI_BASE_ADDRESS_MEM_PREFETCH | 1913 PCI_BASE_ADDRESS_MEM_TYPE_64, 1914 &proxy->modern_bar); 1915 1916 proxy->config_cap = virtio_pci_add_mem_cap(proxy, &cfg.cap); 1917 cfg_mask = (void *)(proxy->pci_dev.wmask + proxy->config_cap); 1918 pci_set_byte(&cfg_mask->cap.bar, ~0x0); 1919 pci_set_long((uint8_t *)&cfg_mask->cap.offset, ~0x0); 1920 pci_set_long((uint8_t *)&cfg_mask->cap.length, ~0x0); 1921 pci_set_long(cfg_mask->pci_cfg_data, ~0x0); 1922 } 1923 1924 if (proxy->nvectors) { 1925 int err = msix_init_exclusive_bar(&proxy->pci_dev, proxy->nvectors, 1926 proxy->msix_bar_idx, NULL); 1927 if (err) { 1928 /* Notice when a system that supports MSIx can't initialize it */ 1929 if (err != -ENOTSUP) { 1930 warn_report("unable to init msix vectors to %" PRIu32, 1931 proxy->nvectors); 1932 } 1933 proxy->nvectors = 0; 1934 } 1935 } 1936 1937 proxy->pci_dev.config_write = virtio_write_config; 1938 proxy->pci_dev.config_read = virtio_read_config; 1939 1940 if (legacy) { 1941 size = VIRTIO_PCI_REGION_SIZE(&proxy->pci_dev) 1942 + virtio_bus_get_vdev_config_len(bus); 1943 size = pow2ceil(size); 1944 1945 memory_region_init_io(&proxy->bar, OBJECT(proxy), 1946 &virtio_pci_config_ops, 1947 proxy, "virtio-pci", size); 1948 1949 pci_register_bar(&proxy->pci_dev, proxy->legacy_io_bar_idx, 1950 PCI_BASE_ADDRESS_SPACE_IO, &proxy->bar); 1951 } 1952 } 1953 1954 static void virtio_pci_device_unplugged(DeviceState *d) 1955 { 1956 VirtIOPCIProxy *proxy = VIRTIO_PCI(d); 1957 bool modern = virtio_pci_modern(proxy); 1958 bool modern_pio = proxy->flags & VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY; 1959 1960 virtio_pci_stop_ioeventfd(proxy); 1961 1962 if (modern) { 1963 virtio_pci_modern_mem_region_unmap(proxy, &proxy->common); 1964 virtio_pci_modern_mem_region_unmap(proxy, &proxy->isr); 1965 virtio_pci_modern_mem_region_unmap(proxy, &proxy->device); 1966 virtio_pci_modern_mem_region_unmap(proxy, &proxy->notify); 1967 if (modern_pio) { 1968 virtio_pci_modern_io_region_unmap(proxy, &proxy->notify_pio); 1969 } 1970 } 1971 } 1972 1973 static void virtio_pci_realize(PCIDevice *pci_dev, Error **errp) 1974 { 1975 VirtIOPCIProxy *proxy = VIRTIO_PCI(pci_dev); 1976 VirtioPCIClass *k = VIRTIO_PCI_GET_CLASS(pci_dev); 1977 bool pcie_port = pci_bus_is_express(pci_get_bus(pci_dev)) && 1978 !pci_bus_is_root(pci_get_bus(pci_dev)); 1979 1980 if (kvm_enabled() && !kvm_has_many_ioeventfds()) { 1981 proxy->flags &= ~VIRTIO_PCI_FLAG_USE_IOEVENTFD; 1982 } 1983 1984 /* fd-based ioevents can't be synchronized in record/replay */ 1985 if (replay_mode != REPLAY_MODE_NONE) { 1986 proxy->flags &= ~VIRTIO_PCI_FLAG_USE_IOEVENTFD; 1987 } 1988 1989 /* 1990 * virtio pci bar layout used by default. 1991 * subclasses can re-arrange things if needed. 1992 * 1993 * region 0 -- virtio legacy io bar 1994 * region 1 -- msi-x bar 1995 * region 2 -- virtio modern io bar (off by default) 1996 * region 4+5 -- virtio modern memory (64bit) bar 1997 * 1998 */ 1999 proxy->legacy_io_bar_idx = 0; 2000 proxy->msix_bar_idx = 1; 2001 proxy->modern_io_bar_idx = 2; 2002 proxy->modern_mem_bar_idx = 4; 2003 2004 proxy->common.offset = 0x0; 2005 proxy->common.size = 0x1000; 2006 proxy->common.type = VIRTIO_PCI_CAP_COMMON_CFG; 2007 2008 proxy->isr.offset = 0x1000; 2009 proxy->isr.size = 0x1000; 2010 proxy->isr.type = VIRTIO_PCI_CAP_ISR_CFG; 2011 2012 proxy->device.offset = 0x2000; 2013 proxy->device.size = 0x1000; 2014 proxy->device.type = VIRTIO_PCI_CAP_DEVICE_CFG; 2015 2016 proxy->notify.offset = 0x3000; 2017 proxy->notify.size = virtio_pci_queue_mem_mult(proxy) * VIRTIO_QUEUE_MAX; 2018 proxy->notify.type = VIRTIO_PCI_CAP_NOTIFY_CFG; 2019 2020 proxy->notify_pio.offset = 0x0; 2021 proxy->notify_pio.size = 0x4; 2022 proxy->notify_pio.type = VIRTIO_PCI_CAP_NOTIFY_CFG; 2023 2024 /* subclasses can enforce modern, so do this unconditionally */ 2025 memory_region_init(&proxy->modern_bar, OBJECT(proxy), "virtio-pci", 2026 /* PCI BAR regions must be powers of 2 */ 2027 pow2ceil(proxy->notify.offset + proxy->notify.size)); 2028 2029 if (proxy->disable_legacy == ON_OFF_AUTO_AUTO) { 2030 proxy->disable_legacy = pcie_port ? ON_OFF_AUTO_ON : ON_OFF_AUTO_OFF; 2031 } 2032 2033 if (!virtio_pci_modern(proxy) && !virtio_pci_legacy(proxy)) { 2034 error_setg(errp, "device cannot work as neither modern nor legacy mode" 2035 " is enabled"); 2036 error_append_hint(errp, "Set either disable-modern or disable-legacy" 2037 " to off\n"); 2038 return; 2039 } 2040 2041 if (pcie_port && pci_is_express(pci_dev)) { 2042 int pos; 2043 uint16_t last_pcie_cap_offset = PCI_CONFIG_SPACE_SIZE; 2044 2045 pos = pcie_endpoint_cap_init(pci_dev, 0); 2046 assert(pos > 0); 2047 2048 pos = pci_add_capability(pci_dev, PCI_CAP_ID_PM, 0, 2049 PCI_PM_SIZEOF, errp); 2050 if (pos < 0) { 2051 return; 2052 } 2053 2054 pci_dev->exp.pm_cap = pos; 2055 2056 /* 2057 * Indicates that this function complies with revision 1.2 of the 2058 * PCI Power Management Interface Specification. 2059 */ 2060 pci_set_word(pci_dev->config + pos + PCI_PM_PMC, 0x3); 2061 2062 if (proxy->flags & VIRTIO_PCI_FLAG_AER) { 2063 pcie_aer_init(pci_dev, PCI_ERR_VER, last_pcie_cap_offset, 2064 PCI_ERR_SIZEOF, NULL); 2065 last_pcie_cap_offset += PCI_ERR_SIZEOF; 2066 } 2067 2068 if (proxy->flags & VIRTIO_PCI_FLAG_INIT_DEVERR) { 2069 /* Init error enabling flags */ 2070 pcie_cap_deverr_init(pci_dev); 2071 } 2072 2073 if (proxy->flags & VIRTIO_PCI_FLAG_INIT_LNKCTL) { 2074 /* Init Link Control Register */ 2075 pcie_cap_lnkctl_init(pci_dev); 2076 } 2077 2078 if (proxy->flags & VIRTIO_PCI_FLAG_INIT_PM) { 2079 /* Init Power Management Control Register */ 2080 pci_set_word(pci_dev->wmask + pos + PCI_PM_CTRL, 2081 PCI_PM_CTRL_STATE_MASK); 2082 } 2083 2084 if (proxy->flags & VIRTIO_PCI_FLAG_ATS) { 2085 pcie_ats_init(pci_dev, last_pcie_cap_offset, 2086 proxy->flags & VIRTIO_PCI_FLAG_ATS_PAGE_ALIGNED); 2087 last_pcie_cap_offset += PCI_EXT_CAP_ATS_SIZEOF; 2088 } 2089 2090 if (proxy->flags & VIRTIO_PCI_FLAG_INIT_FLR) { 2091 /* Set Function Level Reset capability bit */ 2092 pcie_cap_flr_init(pci_dev); 2093 } 2094 } else { 2095 /* 2096 * make future invocations of pci_is_express() return false 2097 * and pci_config_size() return PCI_CONFIG_SPACE_SIZE. 2098 */ 2099 pci_dev->cap_present &= ~QEMU_PCI_CAP_EXPRESS; 2100 } 2101 2102 virtio_pci_bus_new(&proxy->bus, sizeof(proxy->bus), proxy); 2103 if (k->realize) { 2104 k->realize(proxy, errp); 2105 } 2106 } 2107 2108 static void virtio_pci_exit(PCIDevice *pci_dev) 2109 { 2110 VirtIOPCIProxy *proxy = VIRTIO_PCI(pci_dev); 2111 bool pcie_port = pci_bus_is_express(pci_get_bus(pci_dev)) && 2112 !pci_bus_is_root(pci_get_bus(pci_dev)); 2113 2114 msix_uninit_exclusive_bar(pci_dev); 2115 if (proxy->flags & VIRTIO_PCI_FLAG_AER && pcie_port && 2116 pci_is_express(pci_dev)) { 2117 pcie_aer_exit(pci_dev); 2118 } 2119 } 2120 2121 static void virtio_pci_reset(DeviceState *qdev) 2122 { 2123 VirtIOPCIProxy *proxy = VIRTIO_PCI(qdev); 2124 VirtioBusState *bus = VIRTIO_BUS(&proxy->bus); 2125 int i; 2126 2127 virtio_bus_reset(bus); 2128 msix_unuse_all_vectors(&proxy->pci_dev); 2129 2130 for (i = 0; i < VIRTIO_QUEUE_MAX; i++) { 2131 proxy->vqs[i].enabled = 0; 2132 proxy->vqs[i].reset = 0; 2133 proxy->vqs[i].num = 0; 2134 proxy->vqs[i].desc[0] = proxy->vqs[i].desc[1] = 0; 2135 proxy->vqs[i].avail[0] = proxy->vqs[i].avail[1] = 0; 2136 proxy->vqs[i].used[0] = proxy->vqs[i].used[1] = 0; 2137 } 2138 } 2139 2140 static void virtio_pci_bus_reset_hold(Object *obj) 2141 { 2142 PCIDevice *dev = PCI_DEVICE(obj); 2143 DeviceState *qdev = DEVICE(obj); 2144 2145 virtio_pci_reset(qdev); 2146 2147 if (pci_is_express(dev)) { 2148 pcie_cap_deverr_reset(dev); 2149 pcie_cap_lnkctl_reset(dev); 2150 2151 pci_set_word(dev->config + dev->exp.pm_cap + PCI_PM_CTRL, 0); 2152 } 2153 } 2154 2155 static Property virtio_pci_properties[] = { 2156 DEFINE_PROP_BIT("virtio-pci-bus-master-bug-migration", VirtIOPCIProxy, flags, 2157 VIRTIO_PCI_FLAG_BUS_MASTER_BUG_MIGRATION_BIT, false), 2158 DEFINE_PROP_BIT("migrate-extra", VirtIOPCIProxy, flags, 2159 VIRTIO_PCI_FLAG_MIGRATE_EXTRA_BIT, true), 2160 DEFINE_PROP_BIT("modern-pio-notify", VirtIOPCIProxy, flags, 2161 VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY_BIT, false), 2162 DEFINE_PROP_BIT("x-disable-pcie", VirtIOPCIProxy, flags, 2163 VIRTIO_PCI_FLAG_DISABLE_PCIE_BIT, false), 2164 DEFINE_PROP_BIT("page-per-vq", VirtIOPCIProxy, flags, 2165 VIRTIO_PCI_FLAG_PAGE_PER_VQ_BIT, false), 2166 DEFINE_PROP_BOOL("x-ignore-backend-features", VirtIOPCIProxy, 2167 ignore_backend_features, false), 2168 DEFINE_PROP_BIT("ats", VirtIOPCIProxy, flags, 2169 VIRTIO_PCI_FLAG_ATS_BIT, false), 2170 DEFINE_PROP_BIT("x-ats-page-aligned", VirtIOPCIProxy, flags, 2171 VIRTIO_PCI_FLAG_ATS_PAGE_ALIGNED_BIT, true), 2172 DEFINE_PROP_BIT("x-pcie-deverr-init", VirtIOPCIProxy, flags, 2173 VIRTIO_PCI_FLAG_INIT_DEVERR_BIT, true), 2174 DEFINE_PROP_BIT("x-pcie-lnkctl-init", VirtIOPCIProxy, flags, 2175 VIRTIO_PCI_FLAG_INIT_LNKCTL_BIT, true), 2176 DEFINE_PROP_BIT("x-pcie-pm-init", VirtIOPCIProxy, flags, 2177 VIRTIO_PCI_FLAG_INIT_PM_BIT, true), 2178 DEFINE_PROP_BIT("x-pcie-flr-init", VirtIOPCIProxy, flags, 2179 VIRTIO_PCI_FLAG_INIT_FLR_BIT, true), 2180 DEFINE_PROP_BIT("aer", VirtIOPCIProxy, flags, 2181 VIRTIO_PCI_FLAG_AER_BIT, false), 2182 DEFINE_PROP_END_OF_LIST(), 2183 }; 2184 2185 static void virtio_pci_dc_realize(DeviceState *qdev, Error **errp) 2186 { 2187 VirtioPCIClass *vpciklass = VIRTIO_PCI_GET_CLASS(qdev); 2188 VirtIOPCIProxy *proxy = VIRTIO_PCI(qdev); 2189 PCIDevice *pci_dev = &proxy->pci_dev; 2190 2191 if (!(proxy->flags & VIRTIO_PCI_FLAG_DISABLE_PCIE) && 2192 virtio_pci_modern(proxy)) { 2193 pci_dev->cap_present |= QEMU_PCI_CAP_EXPRESS; 2194 } 2195 2196 vpciklass->parent_dc_realize(qdev, errp); 2197 } 2198 2199 static void virtio_pci_class_init(ObjectClass *klass, void *data) 2200 { 2201 DeviceClass *dc = DEVICE_CLASS(klass); 2202 PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); 2203 VirtioPCIClass *vpciklass = VIRTIO_PCI_CLASS(klass); 2204 ResettableClass *rc = RESETTABLE_CLASS(klass); 2205 2206 device_class_set_props(dc, virtio_pci_properties); 2207 k->realize = virtio_pci_realize; 2208 k->exit = virtio_pci_exit; 2209 k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET; 2210 k->revision = VIRTIO_PCI_ABI_VERSION; 2211 k->class_id = PCI_CLASS_OTHERS; 2212 device_class_set_parent_realize(dc, virtio_pci_dc_realize, 2213 &vpciklass->parent_dc_realize); 2214 rc->phases.hold = virtio_pci_bus_reset_hold; 2215 } 2216 2217 static const TypeInfo virtio_pci_info = { 2218 .name = TYPE_VIRTIO_PCI, 2219 .parent = TYPE_PCI_DEVICE, 2220 .instance_size = sizeof(VirtIOPCIProxy), 2221 .class_init = virtio_pci_class_init, 2222 .class_size = sizeof(VirtioPCIClass), 2223 .abstract = true, 2224 }; 2225 2226 static Property virtio_pci_generic_properties[] = { 2227 DEFINE_PROP_ON_OFF_AUTO("disable-legacy", VirtIOPCIProxy, disable_legacy, 2228 ON_OFF_AUTO_AUTO), 2229 DEFINE_PROP_BOOL("disable-modern", VirtIOPCIProxy, disable_modern, false), 2230 DEFINE_PROP_END_OF_LIST(), 2231 }; 2232 2233 static void virtio_pci_base_class_init(ObjectClass *klass, void *data) 2234 { 2235 const VirtioPCIDeviceTypeInfo *t = data; 2236 if (t->class_init) { 2237 t->class_init(klass, NULL); 2238 } 2239 } 2240 2241 static void virtio_pci_generic_class_init(ObjectClass *klass, void *data) 2242 { 2243 DeviceClass *dc = DEVICE_CLASS(klass); 2244 2245 device_class_set_props(dc, virtio_pci_generic_properties); 2246 } 2247 2248 static void virtio_pci_transitional_instance_init(Object *obj) 2249 { 2250 VirtIOPCIProxy *proxy = VIRTIO_PCI(obj); 2251 2252 proxy->disable_legacy = ON_OFF_AUTO_OFF; 2253 proxy->disable_modern = false; 2254 } 2255 2256 static void virtio_pci_non_transitional_instance_init(Object *obj) 2257 { 2258 VirtIOPCIProxy *proxy = VIRTIO_PCI(obj); 2259 2260 proxy->disable_legacy = ON_OFF_AUTO_ON; 2261 proxy->disable_modern = false; 2262 } 2263 2264 void virtio_pci_types_register(const VirtioPCIDeviceTypeInfo *t) 2265 { 2266 char *base_name = NULL; 2267 TypeInfo base_type_info = { 2268 .name = t->base_name, 2269 .parent = t->parent ? t->parent : TYPE_VIRTIO_PCI, 2270 .instance_size = t->instance_size, 2271 .instance_init = t->instance_init, 2272 .class_size = t->class_size, 2273 .abstract = true, 2274 .interfaces = t->interfaces, 2275 }; 2276 TypeInfo generic_type_info = { 2277 .name = t->generic_name, 2278 .parent = base_type_info.name, 2279 .class_init = virtio_pci_generic_class_init, 2280 .interfaces = (InterfaceInfo[]) { 2281 { INTERFACE_PCIE_DEVICE }, 2282 { INTERFACE_CONVENTIONAL_PCI_DEVICE }, 2283 { } 2284 }, 2285 }; 2286 2287 if (!base_type_info.name) { 2288 /* No base type -> register a single generic device type */ 2289 /* use intermediate %s-base-type to add generic device props */ 2290 base_name = g_strdup_printf("%s-base-type", t->generic_name); 2291 base_type_info.name = base_name; 2292 base_type_info.class_init = virtio_pci_generic_class_init; 2293 2294 generic_type_info.parent = base_name; 2295 generic_type_info.class_init = virtio_pci_base_class_init; 2296 generic_type_info.class_data = (void *)t; 2297 2298 assert(!t->non_transitional_name); 2299 assert(!t->transitional_name); 2300 } else { 2301 base_type_info.class_init = virtio_pci_base_class_init; 2302 base_type_info.class_data = (void *)t; 2303 } 2304 2305 type_register(&base_type_info); 2306 if (generic_type_info.name) { 2307 type_register(&generic_type_info); 2308 } 2309 2310 if (t->non_transitional_name) { 2311 const TypeInfo non_transitional_type_info = { 2312 .name = t->non_transitional_name, 2313 .parent = base_type_info.name, 2314 .instance_init = virtio_pci_non_transitional_instance_init, 2315 .interfaces = (InterfaceInfo[]) { 2316 { INTERFACE_PCIE_DEVICE }, 2317 { INTERFACE_CONVENTIONAL_PCI_DEVICE }, 2318 { } 2319 }, 2320 }; 2321 type_register(&non_transitional_type_info); 2322 } 2323 2324 if (t->transitional_name) { 2325 const TypeInfo transitional_type_info = { 2326 .name = t->transitional_name, 2327 .parent = base_type_info.name, 2328 .instance_init = virtio_pci_transitional_instance_init, 2329 .interfaces = (InterfaceInfo[]) { 2330 /* 2331 * Transitional virtio devices work only as Conventional PCI 2332 * devices because they require PIO ports. 2333 */ 2334 { INTERFACE_CONVENTIONAL_PCI_DEVICE }, 2335 { } 2336 }, 2337 }; 2338 type_register(&transitional_type_info); 2339 } 2340 g_free(base_name); 2341 } 2342 2343 unsigned virtio_pci_optimal_num_queues(unsigned fixed_queues) 2344 { 2345 /* 2346 * 1:1 vq to vCPU mapping is ideal because the same vCPU that submitted 2347 * virtqueue buffers can handle their completion. When a different vCPU 2348 * handles completion it may need to IPI the vCPU that submitted the 2349 * request and this adds overhead. 2350 * 2351 * Virtqueues consume guest RAM and MSI-X vectors. This is wasteful in 2352 * guests with very many vCPUs and a device that is only used by a few 2353 * vCPUs. Unfortunately optimizing that case requires manual pinning inside 2354 * the guest, so those users might as well manually set the number of 2355 * queues. There is no upper limit that can be applied automatically and 2356 * doing so arbitrarily would result in a sudden performance drop once the 2357 * threshold number of vCPUs is exceeded. 2358 */ 2359 unsigned num_queues = current_machine->smp.cpus; 2360 2361 /* 2362 * The maximum number of MSI-X vectors is PCI_MSIX_FLAGS_QSIZE + 1, but the 2363 * config change interrupt and the fixed virtqueues must be taken into 2364 * account too. 2365 */ 2366 num_queues = MIN(num_queues, PCI_MSIX_FLAGS_QSIZE - fixed_queues); 2367 2368 /* 2369 * There is a limit to how many virtqueues a device can have. 2370 */ 2371 return MIN(num_queues, VIRTIO_QUEUE_MAX - fixed_queues); 2372 } 2373 2374 /* virtio-pci-bus */ 2375 2376 static void virtio_pci_bus_new(VirtioBusState *bus, size_t bus_size, 2377 VirtIOPCIProxy *dev) 2378 { 2379 DeviceState *qdev = DEVICE(dev); 2380 char virtio_bus_name[] = "virtio-bus"; 2381 2382 qbus_init(bus, bus_size, TYPE_VIRTIO_PCI_BUS, qdev, virtio_bus_name); 2383 } 2384 2385 static void virtio_pci_bus_class_init(ObjectClass *klass, void *data) 2386 { 2387 BusClass *bus_class = BUS_CLASS(klass); 2388 VirtioBusClass *k = VIRTIO_BUS_CLASS(klass); 2389 bus_class->max_dev = 1; 2390 k->notify = virtio_pci_notify; 2391 k->save_config = virtio_pci_save_config; 2392 k->load_config = virtio_pci_load_config; 2393 k->save_queue = virtio_pci_save_queue; 2394 k->load_queue = virtio_pci_load_queue; 2395 k->save_extra_state = virtio_pci_save_extra_state; 2396 k->load_extra_state = virtio_pci_load_extra_state; 2397 k->has_extra_state = virtio_pci_has_extra_state; 2398 k->query_guest_notifiers = virtio_pci_query_guest_notifiers; 2399 k->set_guest_notifiers = virtio_pci_set_guest_notifiers; 2400 k->set_host_notifier_mr = virtio_pci_set_host_notifier_mr; 2401 k->vmstate_change = virtio_pci_vmstate_change; 2402 k->pre_plugged = virtio_pci_pre_plugged; 2403 k->device_plugged = virtio_pci_device_plugged; 2404 k->device_unplugged = virtio_pci_device_unplugged; 2405 k->query_nvectors = virtio_pci_query_nvectors; 2406 k->ioeventfd_enabled = virtio_pci_ioeventfd_enabled; 2407 k->ioeventfd_assign = virtio_pci_ioeventfd_assign; 2408 k->get_dma_as = virtio_pci_get_dma_as; 2409 k->iommu_enabled = virtio_pci_iommu_enabled; 2410 k->queue_enabled = virtio_pci_queue_enabled; 2411 } 2412 2413 static const TypeInfo virtio_pci_bus_info = { 2414 .name = TYPE_VIRTIO_PCI_BUS, 2415 .parent = TYPE_VIRTIO_BUS, 2416 .instance_size = sizeof(VirtioPCIBusState), 2417 .class_size = sizeof(VirtioPCIBusClass), 2418 .class_init = virtio_pci_bus_class_init, 2419 }; 2420 2421 static void virtio_pci_register_types(void) 2422 { 2423 /* Base types: */ 2424 type_register_static(&virtio_pci_bus_info); 2425 type_register_static(&virtio_pci_info); 2426 } 2427 2428 type_init(virtio_pci_register_types) 2429 2430