1 /* 2 * Virtio PCI Bindings 3 * 4 * Copyright IBM, Corp. 2007 5 * Copyright (c) 2009 CodeSourcery 6 * 7 * Authors: 8 * Anthony Liguori <aliguori@us.ibm.com> 9 * Paul Brook <paul@codesourcery.com> 10 * 11 * This work is licensed under the terms of the GNU GPL, version 2. See 12 * the COPYING file in the top-level directory. 13 * 14 * Contributions after 2012-01-13 are licensed under the terms of the 15 * GNU GPL, version 2 or (at your option) any later version. 16 */ 17 18 #include "qemu/osdep.h" 19 20 #include "standard-headers/linux/virtio_pci.h" 21 #include "hw/virtio/virtio.h" 22 #include "hw/pci/pci.h" 23 #include "hw/pci/pci_bus.h" 24 #include "qapi/error.h" 25 #include "qemu/error-report.h" 26 #include "hw/pci/msi.h" 27 #include "hw/pci/msix.h" 28 #include "hw/loader.h" 29 #include "sysemu/kvm.h" 30 #include "virtio-pci.h" 31 #include "qemu/range.h" 32 #include "hw/virtio/virtio-bus.h" 33 #include "qapi/visitor.h" 34 35 #define VIRTIO_PCI_REGION_SIZE(dev) VIRTIO_PCI_CONFIG_OFF(msix_present(dev)) 36 37 #undef VIRTIO_PCI_CONFIG 38 39 /* The remaining space is defined by each driver as the per-driver 40 * configuration space */ 41 #define VIRTIO_PCI_CONFIG_SIZE(dev) VIRTIO_PCI_CONFIG_OFF(msix_enabled(dev)) 42 43 static void virtio_pci_bus_new(VirtioBusState *bus, size_t bus_size, 44 VirtIOPCIProxy *dev); 45 static void virtio_pci_reset(DeviceState *qdev); 46 47 /* virtio device */ 48 /* DeviceState to VirtIOPCIProxy. For use off data-path. TODO: use QOM. */ 49 static inline VirtIOPCIProxy *to_virtio_pci_proxy(DeviceState *d) 50 { 51 return container_of(d, VirtIOPCIProxy, pci_dev.qdev); 52 } 53 54 /* DeviceState to VirtIOPCIProxy. Note: used on datapath, 55 * be careful and test performance if you change this. 56 */ 57 static inline VirtIOPCIProxy *to_virtio_pci_proxy_fast(DeviceState *d) 58 { 59 return container_of(d, VirtIOPCIProxy, pci_dev.qdev); 60 } 61 62 static void virtio_pci_notify(DeviceState *d, uint16_t vector) 63 { 64 VirtIOPCIProxy *proxy = to_virtio_pci_proxy_fast(d); 65 66 if (msix_enabled(&proxy->pci_dev)) 67 msix_notify(&proxy->pci_dev, vector); 68 else { 69 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 70 pci_set_irq(&proxy->pci_dev, atomic_read(&vdev->isr) & 1); 71 } 72 } 73 74 static void virtio_pci_save_config(DeviceState *d, QEMUFile *f) 75 { 76 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d); 77 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 78 79 pci_device_save(&proxy->pci_dev, f); 80 msix_save(&proxy->pci_dev, f); 81 if (msix_present(&proxy->pci_dev)) 82 qemu_put_be16(f, vdev->config_vector); 83 } 84 85 static const VMStateDescription vmstate_virtio_pci_modern_queue_state = { 86 .name = "virtio_pci/modern_queue_state", 87 .version_id = 1, 88 .minimum_version_id = 1, 89 .fields = (VMStateField[]) { 90 VMSTATE_UINT16(num, VirtIOPCIQueue), 91 VMSTATE_UNUSED(1), /* enabled was stored as be16 */ 92 VMSTATE_BOOL(enabled, VirtIOPCIQueue), 93 VMSTATE_UINT32_ARRAY(desc, VirtIOPCIQueue, 2), 94 VMSTATE_UINT32_ARRAY(avail, VirtIOPCIQueue, 2), 95 VMSTATE_UINT32_ARRAY(used, VirtIOPCIQueue, 2), 96 VMSTATE_END_OF_LIST() 97 } 98 }; 99 100 static bool virtio_pci_modern_state_needed(void *opaque) 101 { 102 VirtIOPCIProxy *proxy = opaque; 103 104 return virtio_pci_modern(proxy); 105 } 106 107 static const VMStateDescription vmstate_virtio_pci_modern_state_sub = { 108 .name = "virtio_pci/modern_state", 109 .version_id = 1, 110 .minimum_version_id = 1, 111 .needed = &virtio_pci_modern_state_needed, 112 .fields = (VMStateField[]) { 113 VMSTATE_UINT32(dfselect, VirtIOPCIProxy), 114 VMSTATE_UINT32(gfselect, VirtIOPCIProxy), 115 VMSTATE_UINT32_ARRAY(guest_features, VirtIOPCIProxy, 2), 116 VMSTATE_STRUCT_ARRAY(vqs, VirtIOPCIProxy, VIRTIO_QUEUE_MAX, 0, 117 vmstate_virtio_pci_modern_queue_state, 118 VirtIOPCIQueue), 119 VMSTATE_END_OF_LIST() 120 } 121 }; 122 123 static const VMStateDescription vmstate_virtio_pci = { 124 .name = "virtio_pci", 125 .version_id = 1, 126 .minimum_version_id = 1, 127 .minimum_version_id_old = 1, 128 .fields = (VMStateField[]) { 129 VMSTATE_END_OF_LIST() 130 }, 131 .subsections = (const VMStateDescription*[]) { 132 &vmstate_virtio_pci_modern_state_sub, 133 NULL 134 } 135 }; 136 137 static bool virtio_pci_has_extra_state(DeviceState *d) 138 { 139 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d); 140 141 return proxy->flags & VIRTIO_PCI_FLAG_MIGRATE_EXTRA; 142 } 143 144 static void virtio_pci_save_extra_state(DeviceState *d, QEMUFile *f) 145 { 146 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d); 147 148 vmstate_save_state(f, &vmstate_virtio_pci, proxy, NULL); 149 } 150 151 static int virtio_pci_load_extra_state(DeviceState *d, QEMUFile *f) 152 { 153 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d); 154 155 return vmstate_load_state(f, &vmstate_virtio_pci, proxy, 1); 156 } 157 158 static void virtio_pci_save_queue(DeviceState *d, int n, QEMUFile *f) 159 { 160 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d); 161 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 162 163 if (msix_present(&proxy->pci_dev)) 164 qemu_put_be16(f, virtio_queue_vector(vdev, n)); 165 } 166 167 static int virtio_pci_load_config(DeviceState *d, QEMUFile *f) 168 { 169 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d); 170 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 171 172 int ret; 173 ret = pci_device_load(&proxy->pci_dev, f); 174 if (ret) { 175 return ret; 176 } 177 msix_unuse_all_vectors(&proxy->pci_dev); 178 msix_load(&proxy->pci_dev, f); 179 if (msix_present(&proxy->pci_dev)) { 180 qemu_get_be16s(f, &vdev->config_vector); 181 } else { 182 vdev->config_vector = VIRTIO_NO_VECTOR; 183 } 184 if (vdev->config_vector != VIRTIO_NO_VECTOR) { 185 return msix_vector_use(&proxy->pci_dev, vdev->config_vector); 186 } 187 return 0; 188 } 189 190 static int virtio_pci_load_queue(DeviceState *d, int n, QEMUFile *f) 191 { 192 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d); 193 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 194 195 uint16_t vector; 196 if (msix_present(&proxy->pci_dev)) { 197 qemu_get_be16s(f, &vector); 198 } else { 199 vector = VIRTIO_NO_VECTOR; 200 } 201 virtio_queue_set_vector(vdev, n, vector); 202 if (vector != VIRTIO_NO_VECTOR) { 203 return msix_vector_use(&proxy->pci_dev, vector); 204 } 205 206 return 0; 207 } 208 209 static bool virtio_pci_ioeventfd_enabled(DeviceState *d) 210 { 211 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d); 212 213 return (proxy->flags & VIRTIO_PCI_FLAG_USE_IOEVENTFD) != 0; 214 } 215 216 #define QEMU_VIRTIO_PCI_QUEUE_MEM_MULT 0x1000 217 218 static inline int virtio_pci_queue_mem_mult(struct VirtIOPCIProxy *proxy) 219 { 220 return (proxy->flags & VIRTIO_PCI_FLAG_PAGE_PER_VQ) ? 221 QEMU_VIRTIO_PCI_QUEUE_MEM_MULT : 4; 222 } 223 224 static int virtio_pci_ioeventfd_assign(DeviceState *d, EventNotifier *notifier, 225 int n, bool assign) 226 { 227 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d); 228 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 229 VirtQueue *vq = virtio_get_queue(vdev, n); 230 bool legacy = virtio_pci_legacy(proxy); 231 bool modern = virtio_pci_modern(proxy); 232 bool fast_mmio = kvm_ioeventfd_any_length_enabled(); 233 bool modern_pio = proxy->flags & VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY; 234 MemoryRegion *modern_mr = &proxy->notify.mr; 235 MemoryRegion *modern_notify_mr = &proxy->notify_pio.mr; 236 MemoryRegion *legacy_mr = &proxy->bar; 237 hwaddr modern_addr = virtio_pci_queue_mem_mult(proxy) * 238 virtio_get_queue_index(vq); 239 hwaddr legacy_addr = VIRTIO_PCI_QUEUE_NOTIFY; 240 241 if (assign) { 242 if (modern) { 243 if (fast_mmio) { 244 memory_region_add_eventfd(modern_mr, modern_addr, 0, 245 false, n, notifier); 246 } else { 247 memory_region_add_eventfd(modern_mr, modern_addr, 2, 248 false, n, notifier); 249 } 250 if (modern_pio) { 251 memory_region_add_eventfd(modern_notify_mr, 0, 2, 252 true, n, notifier); 253 } 254 } 255 if (legacy) { 256 memory_region_add_eventfd(legacy_mr, legacy_addr, 2, 257 true, n, notifier); 258 } 259 } else { 260 if (modern) { 261 if (fast_mmio) { 262 memory_region_del_eventfd(modern_mr, modern_addr, 0, 263 false, n, notifier); 264 } else { 265 memory_region_del_eventfd(modern_mr, modern_addr, 2, 266 false, n, notifier); 267 } 268 if (modern_pio) { 269 memory_region_del_eventfd(modern_notify_mr, 0, 2, 270 true, n, notifier); 271 } 272 } 273 if (legacy) { 274 memory_region_del_eventfd(legacy_mr, legacy_addr, 2, 275 true, n, notifier); 276 } 277 } 278 return 0; 279 } 280 281 static void virtio_pci_start_ioeventfd(VirtIOPCIProxy *proxy) 282 { 283 virtio_bus_start_ioeventfd(&proxy->bus); 284 } 285 286 static void virtio_pci_stop_ioeventfd(VirtIOPCIProxy *proxy) 287 { 288 virtio_bus_stop_ioeventfd(&proxy->bus); 289 } 290 291 static void virtio_ioport_write(void *opaque, uint32_t addr, uint32_t val) 292 { 293 VirtIOPCIProxy *proxy = opaque; 294 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 295 hwaddr pa; 296 297 switch (addr) { 298 case VIRTIO_PCI_GUEST_FEATURES: 299 /* Guest does not negotiate properly? We have to assume nothing. */ 300 if (val & (1 << VIRTIO_F_BAD_FEATURE)) { 301 val = virtio_bus_get_vdev_bad_features(&proxy->bus); 302 } 303 virtio_set_features(vdev, val); 304 break; 305 case VIRTIO_PCI_QUEUE_PFN: 306 pa = (hwaddr)val << VIRTIO_PCI_QUEUE_ADDR_SHIFT; 307 if (pa == 0) { 308 virtio_pci_reset(DEVICE(proxy)); 309 } 310 else 311 virtio_queue_set_addr(vdev, vdev->queue_sel, pa); 312 break; 313 case VIRTIO_PCI_QUEUE_SEL: 314 if (val < VIRTIO_QUEUE_MAX) 315 vdev->queue_sel = val; 316 break; 317 case VIRTIO_PCI_QUEUE_NOTIFY: 318 if (val < VIRTIO_QUEUE_MAX) { 319 virtio_queue_notify(vdev, val); 320 } 321 break; 322 case VIRTIO_PCI_STATUS: 323 if (!(val & VIRTIO_CONFIG_S_DRIVER_OK)) { 324 virtio_pci_stop_ioeventfd(proxy); 325 } 326 327 virtio_set_status(vdev, val & 0xFF); 328 329 if (val & VIRTIO_CONFIG_S_DRIVER_OK) { 330 virtio_pci_start_ioeventfd(proxy); 331 } 332 333 if (vdev->status == 0) { 334 virtio_pci_reset(DEVICE(proxy)); 335 } 336 337 /* Linux before 2.6.34 drives the device without enabling 338 the PCI device bus master bit. Enable it automatically 339 for the guest. This is a PCI spec violation but so is 340 initiating DMA with bus master bit clear. */ 341 if (val == (VIRTIO_CONFIG_S_ACKNOWLEDGE | VIRTIO_CONFIG_S_DRIVER)) { 342 pci_default_write_config(&proxy->pci_dev, PCI_COMMAND, 343 proxy->pci_dev.config[PCI_COMMAND] | 344 PCI_COMMAND_MASTER, 1); 345 } 346 break; 347 case VIRTIO_MSI_CONFIG_VECTOR: 348 msix_vector_unuse(&proxy->pci_dev, vdev->config_vector); 349 /* Make it possible for guest to discover an error took place. */ 350 if (msix_vector_use(&proxy->pci_dev, val) < 0) 351 val = VIRTIO_NO_VECTOR; 352 vdev->config_vector = val; 353 break; 354 case VIRTIO_MSI_QUEUE_VECTOR: 355 msix_vector_unuse(&proxy->pci_dev, 356 virtio_queue_vector(vdev, vdev->queue_sel)); 357 /* Make it possible for guest to discover an error took place. */ 358 if (msix_vector_use(&proxy->pci_dev, val) < 0) 359 val = VIRTIO_NO_VECTOR; 360 virtio_queue_set_vector(vdev, vdev->queue_sel, val); 361 break; 362 default: 363 error_report("%s: unexpected address 0x%x value 0x%x", 364 __func__, addr, val); 365 break; 366 } 367 } 368 369 static uint32_t virtio_ioport_read(VirtIOPCIProxy *proxy, uint32_t addr) 370 { 371 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 372 uint32_t ret = 0xFFFFFFFF; 373 374 switch (addr) { 375 case VIRTIO_PCI_HOST_FEATURES: 376 ret = vdev->host_features; 377 break; 378 case VIRTIO_PCI_GUEST_FEATURES: 379 ret = vdev->guest_features; 380 break; 381 case VIRTIO_PCI_QUEUE_PFN: 382 ret = virtio_queue_get_addr(vdev, vdev->queue_sel) 383 >> VIRTIO_PCI_QUEUE_ADDR_SHIFT; 384 break; 385 case VIRTIO_PCI_QUEUE_NUM: 386 ret = virtio_queue_get_num(vdev, vdev->queue_sel); 387 break; 388 case VIRTIO_PCI_QUEUE_SEL: 389 ret = vdev->queue_sel; 390 break; 391 case VIRTIO_PCI_STATUS: 392 ret = vdev->status; 393 break; 394 case VIRTIO_PCI_ISR: 395 /* reading from the ISR also clears it. */ 396 ret = atomic_xchg(&vdev->isr, 0); 397 pci_irq_deassert(&proxy->pci_dev); 398 break; 399 case VIRTIO_MSI_CONFIG_VECTOR: 400 ret = vdev->config_vector; 401 break; 402 case VIRTIO_MSI_QUEUE_VECTOR: 403 ret = virtio_queue_vector(vdev, vdev->queue_sel); 404 break; 405 default: 406 break; 407 } 408 409 return ret; 410 } 411 412 static uint64_t virtio_pci_config_read(void *opaque, hwaddr addr, 413 unsigned size) 414 { 415 VirtIOPCIProxy *proxy = opaque; 416 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 417 uint32_t config = VIRTIO_PCI_CONFIG_SIZE(&proxy->pci_dev); 418 uint64_t val = 0; 419 if (addr < config) { 420 return virtio_ioport_read(proxy, addr); 421 } 422 addr -= config; 423 424 switch (size) { 425 case 1: 426 val = virtio_config_readb(vdev, addr); 427 break; 428 case 2: 429 val = virtio_config_readw(vdev, addr); 430 if (virtio_is_big_endian(vdev)) { 431 val = bswap16(val); 432 } 433 break; 434 case 4: 435 val = virtio_config_readl(vdev, addr); 436 if (virtio_is_big_endian(vdev)) { 437 val = bswap32(val); 438 } 439 break; 440 } 441 return val; 442 } 443 444 static void virtio_pci_config_write(void *opaque, hwaddr addr, 445 uint64_t val, unsigned size) 446 { 447 VirtIOPCIProxy *proxy = opaque; 448 uint32_t config = VIRTIO_PCI_CONFIG_SIZE(&proxy->pci_dev); 449 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 450 if (addr < config) { 451 virtio_ioport_write(proxy, addr, val); 452 return; 453 } 454 addr -= config; 455 /* 456 * Virtio-PCI is odd. Ioports are LE but config space is target native 457 * endian. 458 */ 459 switch (size) { 460 case 1: 461 virtio_config_writeb(vdev, addr, val); 462 break; 463 case 2: 464 if (virtio_is_big_endian(vdev)) { 465 val = bswap16(val); 466 } 467 virtio_config_writew(vdev, addr, val); 468 break; 469 case 4: 470 if (virtio_is_big_endian(vdev)) { 471 val = bswap32(val); 472 } 473 virtio_config_writel(vdev, addr, val); 474 break; 475 } 476 } 477 478 static const MemoryRegionOps virtio_pci_config_ops = { 479 .read = virtio_pci_config_read, 480 .write = virtio_pci_config_write, 481 .impl = { 482 .min_access_size = 1, 483 .max_access_size = 4, 484 }, 485 .endianness = DEVICE_LITTLE_ENDIAN, 486 }; 487 488 static MemoryRegion *virtio_address_space_lookup(VirtIOPCIProxy *proxy, 489 hwaddr *off, int len) 490 { 491 int i; 492 VirtIOPCIRegion *reg; 493 494 for (i = 0; i < ARRAY_SIZE(proxy->regs); ++i) { 495 reg = &proxy->regs[i]; 496 if (*off >= reg->offset && 497 *off + len <= reg->offset + reg->size) { 498 *off -= reg->offset; 499 return ®->mr; 500 } 501 } 502 503 return NULL; 504 } 505 506 /* Below are generic functions to do memcpy from/to an address space, 507 * without byteswaps, with input validation. 508 * 509 * As regular address_space_* APIs all do some kind of byteswap at least for 510 * some host/target combinations, we are forced to explicitly convert to a 511 * known-endianness integer value. 512 * It doesn't really matter which endian format to go through, so the code 513 * below selects the endian that causes the least amount of work on the given 514 * host. 515 * 516 * Note: host pointer must be aligned. 517 */ 518 static 519 void virtio_address_space_write(VirtIOPCIProxy *proxy, hwaddr addr, 520 const uint8_t *buf, int len) 521 { 522 uint64_t val; 523 MemoryRegion *mr; 524 525 /* address_space_* APIs assume an aligned address. 526 * As address is under guest control, handle illegal values. 527 */ 528 addr &= ~(len - 1); 529 530 mr = virtio_address_space_lookup(proxy, &addr, len); 531 if (!mr) { 532 return; 533 } 534 535 /* Make sure caller aligned buf properly */ 536 assert(!(((uintptr_t)buf) & (len - 1))); 537 538 switch (len) { 539 case 1: 540 val = pci_get_byte(buf); 541 break; 542 case 2: 543 val = cpu_to_le16(pci_get_word(buf)); 544 break; 545 case 4: 546 val = cpu_to_le32(pci_get_long(buf)); 547 break; 548 default: 549 /* As length is under guest control, handle illegal values. */ 550 return; 551 } 552 memory_region_dispatch_write(mr, addr, val, len, MEMTXATTRS_UNSPECIFIED); 553 } 554 555 static void 556 virtio_address_space_read(VirtIOPCIProxy *proxy, hwaddr addr, 557 uint8_t *buf, int len) 558 { 559 uint64_t val; 560 MemoryRegion *mr; 561 562 /* address_space_* APIs assume an aligned address. 563 * As address is under guest control, handle illegal values. 564 */ 565 addr &= ~(len - 1); 566 567 mr = virtio_address_space_lookup(proxy, &addr, len); 568 if (!mr) { 569 return; 570 } 571 572 /* Make sure caller aligned buf properly */ 573 assert(!(((uintptr_t)buf) & (len - 1))); 574 575 memory_region_dispatch_read(mr, addr, &val, len, MEMTXATTRS_UNSPECIFIED); 576 switch (len) { 577 case 1: 578 pci_set_byte(buf, val); 579 break; 580 case 2: 581 pci_set_word(buf, le16_to_cpu(val)); 582 break; 583 case 4: 584 pci_set_long(buf, le32_to_cpu(val)); 585 break; 586 default: 587 /* As length is under guest control, handle illegal values. */ 588 break; 589 } 590 } 591 592 static void virtio_write_config(PCIDevice *pci_dev, uint32_t address, 593 uint32_t val, int len) 594 { 595 VirtIOPCIProxy *proxy = VIRTIO_PCI(pci_dev); 596 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 597 struct virtio_pci_cfg_cap *cfg; 598 599 pci_default_write_config(pci_dev, address, val, len); 600 601 if (range_covers_byte(address, len, PCI_COMMAND) && 602 !(pci_dev->config[PCI_COMMAND] & PCI_COMMAND_MASTER)) { 603 virtio_pci_stop_ioeventfd(proxy); 604 virtio_set_status(vdev, vdev->status & ~VIRTIO_CONFIG_S_DRIVER_OK); 605 } 606 607 if (proxy->config_cap && 608 ranges_overlap(address, len, proxy->config_cap + offsetof(struct virtio_pci_cfg_cap, 609 pci_cfg_data), 610 sizeof cfg->pci_cfg_data)) { 611 uint32_t off; 612 uint32_t len; 613 614 cfg = (void *)(proxy->pci_dev.config + proxy->config_cap); 615 off = le32_to_cpu(cfg->cap.offset); 616 len = le32_to_cpu(cfg->cap.length); 617 618 if (len == 1 || len == 2 || len == 4) { 619 assert(len <= sizeof cfg->pci_cfg_data); 620 virtio_address_space_write(proxy, off, cfg->pci_cfg_data, len); 621 } 622 } 623 } 624 625 static uint32_t virtio_read_config(PCIDevice *pci_dev, 626 uint32_t address, int len) 627 { 628 VirtIOPCIProxy *proxy = VIRTIO_PCI(pci_dev); 629 struct virtio_pci_cfg_cap *cfg; 630 631 if (proxy->config_cap && 632 ranges_overlap(address, len, proxy->config_cap + offsetof(struct virtio_pci_cfg_cap, 633 pci_cfg_data), 634 sizeof cfg->pci_cfg_data)) { 635 uint32_t off; 636 uint32_t len; 637 638 cfg = (void *)(proxy->pci_dev.config + proxy->config_cap); 639 off = le32_to_cpu(cfg->cap.offset); 640 len = le32_to_cpu(cfg->cap.length); 641 642 if (len == 1 || len == 2 || len == 4) { 643 assert(len <= sizeof cfg->pci_cfg_data); 644 virtio_address_space_read(proxy, off, cfg->pci_cfg_data, len); 645 } 646 } 647 648 return pci_default_read_config(pci_dev, address, len); 649 } 650 651 static int kvm_virtio_pci_vq_vector_use(VirtIOPCIProxy *proxy, 652 unsigned int queue_no, 653 unsigned int vector) 654 { 655 VirtIOIRQFD *irqfd = &proxy->vector_irqfd[vector]; 656 int ret; 657 658 if (irqfd->users == 0) { 659 ret = kvm_irqchip_add_msi_route(kvm_state, vector, &proxy->pci_dev); 660 if (ret < 0) { 661 return ret; 662 } 663 irqfd->virq = ret; 664 } 665 irqfd->users++; 666 return 0; 667 } 668 669 static void kvm_virtio_pci_vq_vector_release(VirtIOPCIProxy *proxy, 670 unsigned int vector) 671 { 672 VirtIOIRQFD *irqfd = &proxy->vector_irqfd[vector]; 673 if (--irqfd->users == 0) { 674 kvm_irqchip_release_virq(kvm_state, irqfd->virq); 675 } 676 } 677 678 static int kvm_virtio_pci_irqfd_use(VirtIOPCIProxy *proxy, 679 unsigned int queue_no, 680 unsigned int vector) 681 { 682 VirtIOIRQFD *irqfd = &proxy->vector_irqfd[vector]; 683 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 684 VirtQueue *vq = virtio_get_queue(vdev, queue_no); 685 EventNotifier *n = virtio_queue_get_guest_notifier(vq); 686 return kvm_irqchip_add_irqfd_notifier_gsi(kvm_state, n, NULL, irqfd->virq); 687 } 688 689 static void kvm_virtio_pci_irqfd_release(VirtIOPCIProxy *proxy, 690 unsigned int queue_no, 691 unsigned int vector) 692 { 693 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 694 VirtQueue *vq = virtio_get_queue(vdev, queue_no); 695 EventNotifier *n = virtio_queue_get_guest_notifier(vq); 696 VirtIOIRQFD *irqfd = &proxy->vector_irqfd[vector]; 697 int ret; 698 699 ret = kvm_irqchip_remove_irqfd_notifier_gsi(kvm_state, n, irqfd->virq); 700 assert(ret == 0); 701 } 702 703 static int kvm_virtio_pci_vector_use(VirtIOPCIProxy *proxy, int nvqs) 704 { 705 PCIDevice *dev = &proxy->pci_dev; 706 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 707 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); 708 unsigned int vector; 709 int ret, queue_no; 710 711 for (queue_no = 0; queue_no < nvqs; queue_no++) { 712 if (!virtio_queue_get_num(vdev, queue_no)) { 713 break; 714 } 715 vector = virtio_queue_vector(vdev, queue_no); 716 if (vector >= msix_nr_vectors_allocated(dev)) { 717 continue; 718 } 719 ret = kvm_virtio_pci_vq_vector_use(proxy, queue_no, vector); 720 if (ret < 0) { 721 goto undo; 722 } 723 /* If guest supports masking, set up irqfd now. 724 * Otherwise, delay until unmasked in the frontend. 725 */ 726 if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) { 727 ret = kvm_virtio_pci_irqfd_use(proxy, queue_no, vector); 728 if (ret < 0) { 729 kvm_virtio_pci_vq_vector_release(proxy, vector); 730 goto undo; 731 } 732 } 733 } 734 return 0; 735 736 undo: 737 while (--queue_no >= 0) { 738 vector = virtio_queue_vector(vdev, queue_no); 739 if (vector >= msix_nr_vectors_allocated(dev)) { 740 continue; 741 } 742 if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) { 743 kvm_virtio_pci_irqfd_release(proxy, queue_no, vector); 744 } 745 kvm_virtio_pci_vq_vector_release(proxy, vector); 746 } 747 return ret; 748 } 749 750 static void kvm_virtio_pci_vector_release(VirtIOPCIProxy *proxy, int nvqs) 751 { 752 PCIDevice *dev = &proxy->pci_dev; 753 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 754 unsigned int vector; 755 int queue_no; 756 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); 757 758 for (queue_no = 0; queue_no < nvqs; queue_no++) { 759 if (!virtio_queue_get_num(vdev, queue_no)) { 760 break; 761 } 762 vector = virtio_queue_vector(vdev, queue_no); 763 if (vector >= msix_nr_vectors_allocated(dev)) { 764 continue; 765 } 766 /* If guest supports masking, clean up irqfd now. 767 * Otherwise, it was cleaned when masked in the frontend. 768 */ 769 if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) { 770 kvm_virtio_pci_irqfd_release(proxy, queue_no, vector); 771 } 772 kvm_virtio_pci_vq_vector_release(proxy, vector); 773 } 774 } 775 776 static int virtio_pci_vq_vector_unmask(VirtIOPCIProxy *proxy, 777 unsigned int queue_no, 778 unsigned int vector, 779 MSIMessage msg) 780 { 781 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 782 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); 783 VirtQueue *vq = virtio_get_queue(vdev, queue_no); 784 EventNotifier *n = virtio_queue_get_guest_notifier(vq); 785 VirtIOIRQFD *irqfd; 786 int ret = 0; 787 788 if (proxy->vector_irqfd) { 789 irqfd = &proxy->vector_irqfd[vector]; 790 if (irqfd->msg.data != msg.data || irqfd->msg.address != msg.address) { 791 ret = kvm_irqchip_update_msi_route(kvm_state, irqfd->virq, msg, 792 &proxy->pci_dev); 793 if (ret < 0) { 794 return ret; 795 } 796 kvm_irqchip_commit_routes(kvm_state); 797 } 798 } 799 800 /* If guest supports masking, irqfd is already setup, unmask it. 801 * Otherwise, set it up now. 802 */ 803 if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) { 804 k->guest_notifier_mask(vdev, queue_no, false); 805 /* Test after unmasking to avoid losing events. */ 806 if (k->guest_notifier_pending && 807 k->guest_notifier_pending(vdev, queue_no)) { 808 event_notifier_set(n); 809 } 810 } else { 811 ret = kvm_virtio_pci_irqfd_use(proxy, queue_no, vector); 812 } 813 return ret; 814 } 815 816 static void virtio_pci_vq_vector_mask(VirtIOPCIProxy *proxy, 817 unsigned int queue_no, 818 unsigned int vector) 819 { 820 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 821 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); 822 823 /* If guest supports masking, keep irqfd but mask it. 824 * Otherwise, clean it up now. 825 */ 826 if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) { 827 k->guest_notifier_mask(vdev, queue_no, true); 828 } else { 829 kvm_virtio_pci_irqfd_release(proxy, queue_no, vector); 830 } 831 } 832 833 static int virtio_pci_vector_unmask(PCIDevice *dev, unsigned vector, 834 MSIMessage msg) 835 { 836 VirtIOPCIProxy *proxy = container_of(dev, VirtIOPCIProxy, pci_dev); 837 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 838 VirtQueue *vq = virtio_vector_first_queue(vdev, vector); 839 int ret, index, unmasked = 0; 840 841 while (vq) { 842 index = virtio_get_queue_index(vq); 843 if (!virtio_queue_get_num(vdev, index)) { 844 break; 845 } 846 if (index < proxy->nvqs_with_notifiers) { 847 ret = virtio_pci_vq_vector_unmask(proxy, index, vector, msg); 848 if (ret < 0) { 849 goto undo; 850 } 851 ++unmasked; 852 } 853 vq = virtio_vector_next_queue(vq); 854 } 855 856 return 0; 857 858 undo: 859 vq = virtio_vector_first_queue(vdev, vector); 860 while (vq && unmasked >= 0) { 861 index = virtio_get_queue_index(vq); 862 if (index < proxy->nvqs_with_notifiers) { 863 virtio_pci_vq_vector_mask(proxy, index, vector); 864 --unmasked; 865 } 866 vq = virtio_vector_next_queue(vq); 867 } 868 return ret; 869 } 870 871 static void virtio_pci_vector_mask(PCIDevice *dev, unsigned vector) 872 { 873 VirtIOPCIProxy *proxy = container_of(dev, VirtIOPCIProxy, pci_dev); 874 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 875 VirtQueue *vq = virtio_vector_first_queue(vdev, vector); 876 int index; 877 878 while (vq) { 879 index = virtio_get_queue_index(vq); 880 if (!virtio_queue_get_num(vdev, index)) { 881 break; 882 } 883 if (index < proxy->nvqs_with_notifiers) { 884 virtio_pci_vq_vector_mask(proxy, index, vector); 885 } 886 vq = virtio_vector_next_queue(vq); 887 } 888 } 889 890 static void virtio_pci_vector_poll(PCIDevice *dev, 891 unsigned int vector_start, 892 unsigned int vector_end) 893 { 894 VirtIOPCIProxy *proxy = container_of(dev, VirtIOPCIProxy, pci_dev); 895 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 896 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); 897 int queue_no; 898 unsigned int vector; 899 EventNotifier *notifier; 900 VirtQueue *vq; 901 902 for (queue_no = 0; queue_no < proxy->nvqs_with_notifiers; queue_no++) { 903 if (!virtio_queue_get_num(vdev, queue_no)) { 904 break; 905 } 906 vector = virtio_queue_vector(vdev, queue_no); 907 if (vector < vector_start || vector >= vector_end || 908 !msix_is_masked(dev, vector)) { 909 continue; 910 } 911 vq = virtio_get_queue(vdev, queue_no); 912 notifier = virtio_queue_get_guest_notifier(vq); 913 if (k->guest_notifier_pending) { 914 if (k->guest_notifier_pending(vdev, queue_no)) { 915 msix_set_pending(dev, vector); 916 } 917 } else if (event_notifier_test_and_clear(notifier)) { 918 msix_set_pending(dev, vector); 919 } 920 } 921 } 922 923 static int virtio_pci_set_guest_notifier(DeviceState *d, int n, bool assign, 924 bool with_irqfd) 925 { 926 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d); 927 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 928 VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev); 929 VirtQueue *vq = virtio_get_queue(vdev, n); 930 EventNotifier *notifier = virtio_queue_get_guest_notifier(vq); 931 932 if (assign) { 933 int r = event_notifier_init(notifier, 0); 934 if (r < 0) { 935 return r; 936 } 937 virtio_queue_set_guest_notifier_fd_handler(vq, true, with_irqfd); 938 } else { 939 virtio_queue_set_guest_notifier_fd_handler(vq, false, with_irqfd); 940 event_notifier_cleanup(notifier); 941 } 942 943 if (!msix_enabled(&proxy->pci_dev) && 944 vdev->use_guest_notifier_mask && 945 vdc->guest_notifier_mask) { 946 vdc->guest_notifier_mask(vdev, n, !assign); 947 } 948 949 return 0; 950 } 951 952 static bool virtio_pci_query_guest_notifiers(DeviceState *d) 953 { 954 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d); 955 return msix_enabled(&proxy->pci_dev); 956 } 957 958 static int virtio_pci_set_guest_notifiers(DeviceState *d, int nvqs, bool assign) 959 { 960 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d); 961 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 962 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); 963 int r, n; 964 bool with_irqfd = msix_enabled(&proxy->pci_dev) && 965 kvm_msi_via_irqfd_enabled(); 966 967 nvqs = MIN(nvqs, VIRTIO_QUEUE_MAX); 968 969 /* When deassigning, pass a consistent nvqs value 970 * to avoid leaking notifiers. 971 */ 972 assert(assign || nvqs == proxy->nvqs_with_notifiers); 973 974 proxy->nvqs_with_notifiers = nvqs; 975 976 /* Must unset vector notifier while guest notifier is still assigned */ 977 if ((proxy->vector_irqfd || k->guest_notifier_mask) && !assign) { 978 msix_unset_vector_notifiers(&proxy->pci_dev); 979 if (proxy->vector_irqfd) { 980 kvm_virtio_pci_vector_release(proxy, nvqs); 981 g_free(proxy->vector_irqfd); 982 proxy->vector_irqfd = NULL; 983 } 984 } 985 986 for (n = 0; n < nvqs; n++) { 987 if (!virtio_queue_get_num(vdev, n)) { 988 break; 989 } 990 991 r = virtio_pci_set_guest_notifier(d, n, assign, with_irqfd); 992 if (r < 0) { 993 goto assign_error; 994 } 995 } 996 997 /* Must set vector notifier after guest notifier has been assigned */ 998 if ((with_irqfd || k->guest_notifier_mask) && assign) { 999 if (with_irqfd) { 1000 proxy->vector_irqfd = 1001 g_malloc0(sizeof(*proxy->vector_irqfd) * 1002 msix_nr_vectors_allocated(&proxy->pci_dev)); 1003 r = kvm_virtio_pci_vector_use(proxy, nvqs); 1004 if (r < 0) { 1005 goto assign_error; 1006 } 1007 } 1008 r = msix_set_vector_notifiers(&proxy->pci_dev, 1009 virtio_pci_vector_unmask, 1010 virtio_pci_vector_mask, 1011 virtio_pci_vector_poll); 1012 if (r < 0) { 1013 goto notifiers_error; 1014 } 1015 } 1016 1017 return 0; 1018 1019 notifiers_error: 1020 if (with_irqfd) { 1021 assert(assign); 1022 kvm_virtio_pci_vector_release(proxy, nvqs); 1023 } 1024 1025 assign_error: 1026 /* We get here on assignment failure. Recover by undoing for VQs 0 .. n. */ 1027 assert(assign); 1028 while (--n >= 0) { 1029 virtio_pci_set_guest_notifier(d, n, !assign, with_irqfd); 1030 } 1031 return r; 1032 } 1033 1034 static int virtio_pci_set_host_notifier_mr(DeviceState *d, int n, 1035 MemoryRegion *mr, bool assign) 1036 { 1037 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d); 1038 int offset; 1039 1040 if (n >= VIRTIO_QUEUE_MAX || !virtio_pci_modern(proxy) || 1041 virtio_pci_queue_mem_mult(proxy) != memory_region_size(mr)) { 1042 return -1; 1043 } 1044 1045 if (assign) { 1046 offset = virtio_pci_queue_mem_mult(proxy) * n; 1047 memory_region_add_subregion_overlap(&proxy->notify.mr, offset, mr, 1); 1048 } else { 1049 memory_region_del_subregion(&proxy->notify.mr, mr); 1050 } 1051 1052 return 0; 1053 } 1054 1055 static void virtio_pci_vmstate_change(DeviceState *d, bool running) 1056 { 1057 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d); 1058 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 1059 1060 if (running) { 1061 /* Old QEMU versions did not set bus master enable on status write. 1062 * Detect DRIVER set and enable it. 1063 */ 1064 if ((proxy->flags & VIRTIO_PCI_FLAG_BUS_MASTER_BUG_MIGRATION) && 1065 (vdev->status & VIRTIO_CONFIG_S_DRIVER) && 1066 !(proxy->pci_dev.config[PCI_COMMAND] & PCI_COMMAND_MASTER)) { 1067 pci_default_write_config(&proxy->pci_dev, PCI_COMMAND, 1068 proxy->pci_dev.config[PCI_COMMAND] | 1069 PCI_COMMAND_MASTER, 1); 1070 } 1071 virtio_pci_start_ioeventfd(proxy); 1072 } else { 1073 virtio_pci_stop_ioeventfd(proxy); 1074 } 1075 } 1076 1077 /* 1078 * virtio-pci: This is the PCIDevice which has a virtio-pci-bus. 1079 */ 1080 1081 static int virtio_pci_query_nvectors(DeviceState *d) 1082 { 1083 VirtIOPCIProxy *proxy = VIRTIO_PCI(d); 1084 1085 return proxy->nvectors; 1086 } 1087 1088 static AddressSpace *virtio_pci_get_dma_as(DeviceState *d) 1089 { 1090 VirtIOPCIProxy *proxy = VIRTIO_PCI(d); 1091 PCIDevice *dev = &proxy->pci_dev; 1092 1093 return pci_get_address_space(dev); 1094 } 1095 1096 static int virtio_pci_add_mem_cap(VirtIOPCIProxy *proxy, 1097 struct virtio_pci_cap *cap) 1098 { 1099 PCIDevice *dev = &proxy->pci_dev; 1100 int offset; 1101 1102 offset = pci_add_capability(dev, PCI_CAP_ID_VNDR, 0, 1103 cap->cap_len, &error_abort); 1104 1105 assert(cap->cap_len >= sizeof *cap); 1106 memcpy(dev->config + offset + PCI_CAP_FLAGS, &cap->cap_len, 1107 cap->cap_len - PCI_CAP_FLAGS); 1108 1109 return offset; 1110 } 1111 1112 static uint64_t virtio_pci_common_read(void *opaque, hwaddr addr, 1113 unsigned size) 1114 { 1115 VirtIOPCIProxy *proxy = opaque; 1116 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 1117 uint32_t val = 0; 1118 int i; 1119 1120 switch (addr) { 1121 case VIRTIO_PCI_COMMON_DFSELECT: 1122 val = proxy->dfselect; 1123 break; 1124 case VIRTIO_PCI_COMMON_DF: 1125 if (proxy->dfselect <= 1) { 1126 VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev); 1127 1128 val = (vdev->host_features & ~vdc->legacy_features) >> 1129 (32 * proxy->dfselect); 1130 } 1131 break; 1132 case VIRTIO_PCI_COMMON_GFSELECT: 1133 val = proxy->gfselect; 1134 break; 1135 case VIRTIO_PCI_COMMON_GF: 1136 if (proxy->gfselect < ARRAY_SIZE(proxy->guest_features)) { 1137 val = proxy->guest_features[proxy->gfselect]; 1138 } 1139 break; 1140 case VIRTIO_PCI_COMMON_MSIX: 1141 val = vdev->config_vector; 1142 break; 1143 case VIRTIO_PCI_COMMON_NUMQ: 1144 for (i = 0; i < VIRTIO_QUEUE_MAX; ++i) { 1145 if (virtio_queue_get_num(vdev, i)) { 1146 val = i + 1; 1147 } 1148 } 1149 break; 1150 case VIRTIO_PCI_COMMON_STATUS: 1151 val = vdev->status; 1152 break; 1153 case VIRTIO_PCI_COMMON_CFGGENERATION: 1154 val = vdev->generation; 1155 break; 1156 case VIRTIO_PCI_COMMON_Q_SELECT: 1157 val = vdev->queue_sel; 1158 break; 1159 case VIRTIO_PCI_COMMON_Q_SIZE: 1160 val = virtio_queue_get_num(vdev, vdev->queue_sel); 1161 break; 1162 case VIRTIO_PCI_COMMON_Q_MSIX: 1163 val = virtio_queue_vector(vdev, vdev->queue_sel); 1164 break; 1165 case VIRTIO_PCI_COMMON_Q_ENABLE: 1166 val = proxy->vqs[vdev->queue_sel].enabled; 1167 break; 1168 case VIRTIO_PCI_COMMON_Q_NOFF: 1169 /* Simply map queues in order */ 1170 val = vdev->queue_sel; 1171 break; 1172 case VIRTIO_PCI_COMMON_Q_DESCLO: 1173 val = proxy->vqs[vdev->queue_sel].desc[0]; 1174 break; 1175 case VIRTIO_PCI_COMMON_Q_DESCHI: 1176 val = proxy->vqs[vdev->queue_sel].desc[1]; 1177 break; 1178 case VIRTIO_PCI_COMMON_Q_AVAILLO: 1179 val = proxy->vqs[vdev->queue_sel].avail[0]; 1180 break; 1181 case VIRTIO_PCI_COMMON_Q_AVAILHI: 1182 val = proxy->vqs[vdev->queue_sel].avail[1]; 1183 break; 1184 case VIRTIO_PCI_COMMON_Q_USEDLO: 1185 val = proxy->vqs[vdev->queue_sel].used[0]; 1186 break; 1187 case VIRTIO_PCI_COMMON_Q_USEDHI: 1188 val = proxy->vqs[vdev->queue_sel].used[1]; 1189 break; 1190 default: 1191 val = 0; 1192 } 1193 1194 return val; 1195 } 1196 1197 static void virtio_pci_common_write(void *opaque, hwaddr addr, 1198 uint64_t val, unsigned size) 1199 { 1200 VirtIOPCIProxy *proxy = opaque; 1201 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 1202 1203 switch (addr) { 1204 case VIRTIO_PCI_COMMON_DFSELECT: 1205 proxy->dfselect = val; 1206 break; 1207 case VIRTIO_PCI_COMMON_GFSELECT: 1208 proxy->gfselect = val; 1209 break; 1210 case VIRTIO_PCI_COMMON_GF: 1211 if (proxy->gfselect < ARRAY_SIZE(proxy->guest_features)) { 1212 proxy->guest_features[proxy->gfselect] = val; 1213 virtio_set_features(vdev, 1214 (((uint64_t)proxy->guest_features[1]) << 32) | 1215 proxy->guest_features[0]); 1216 } 1217 break; 1218 case VIRTIO_PCI_COMMON_MSIX: 1219 msix_vector_unuse(&proxy->pci_dev, vdev->config_vector); 1220 /* Make it possible for guest to discover an error took place. */ 1221 if (msix_vector_use(&proxy->pci_dev, val) < 0) { 1222 val = VIRTIO_NO_VECTOR; 1223 } 1224 vdev->config_vector = val; 1225 break; 1226 case VIRTIO_PCI_COMMON_STATUS: 1227 if (!(val & VIRTIO_CONFIG_S_DRIVER_OK)) { 1228 virtio_pci_stop_ioeventfd(proxy); 1229 } 1230 1231 virtio_set_status(vdev, val & 0xFF); 1232 1233 if (val & VIRTIO_CONFIG_S_DRIVER_OK) { 1234 virtio_pci_start_ioeventfd(proxy); 1235 } 1236 1237 if (vdev->status == 0) { 1238 virtio_pci_reset(DEVICE(proxy)); 1239 } 1240 1241 break; 1242 case VIRTIO_PCI_COMMON_Q_SELECT: 1243 if (val < VIRTIO_QUEUE_MAX) { 1244 vdev->queue_sel = val; 1245 } 1246 break; 1247 case VIRTIO_PCI_COMMON_Q_SIZE: 1248 proxy->vqs[vdev->queue_sel].num = val; 1249 break; 1250 case VIRTIO_PCI_COMMON_Q_MSIX: 1251 msix_vector_unuse(&proxy->pci_dev, 1252 virtio_queue_vector(vdev, vdev->queue_sel)); 1253 /* Make it possible for guest to discover an error took place. */ 1254 if (msix_vector_use(&proxy->pci_dev, val) < 0) { 1255 val = VIRTIO_NO_VECTOR; 1256 } 1257 virtio_queue_set_vector(vdev, vdev->queue_sel, val); 1258 break; 1259 case VIRTIO_PCI_COMMON_Q_ENABLE: 1260 virtio_queue_set_num(vdev, vdev->queue_sel, 1261 proxy->vqs[vdev->queue_sel].num); 1262 virtio_queue_set_rings(vdev, vdev->queue_sel, 1263 ((uint64_t)proxy->vqs[vdev->queue_sel].desc[1]) << 32 | 1264 proxy->vqs[vdev->queue_sel].desc[0], 1265 ((uint64_t)proxy->vqs[vdev->queue_sel].avail[1]) << 32 | 1266 proxy->vqs[vdev->queue_sel].avail[0], 1267 ((uint64_t)proxy->vqs[vdev->queue_sel].used[1]) << 32 | 1268 proxy->vqs[vdev->queue_sel].used[0]); 1269 proxy->vqs[vdev->queue_sel].enabled = 1; 1270 break; 1271 case VIRTIO_PCI_COMMON_Q_DESCLO: 1272 proxy->vqs[vdev->queue_sel].desc[0] = val; 1273 break; 1274 case VIRTIO_PCI_COMMON_Q_DESCHI: 1275 proxy->vqs[vdev->queue_sel].desc[1] = val; 1276 break; 1277 case VIRTIO_PCI_COMMON_Q_AVAILLO: 1278 proxy->vqs[vdev->queue_sel].avail[0] = val; 1279 break; 1280 case VIRTIO_PCI_COMMON_Q_AVAILHI: 1281 proxy->vqs[vdev->queue_sel].avail[1] = val; 1282 break; 1283 case VIRTIO_PCI_COMMON_Q_USEDLO: 1284 proxy->vqs[vdev->queue_sel].used[0] = val; 1285 break; 1286 case VIRTIO_PCI_COMMON_Q_USEDHI: 1287 proxy->vqs[vdev->queue_sel].used[1] = val; 1288 break; 1289 default: 1290 break; 1291 } 1292 } 1293 1294 1295 static uint64_t virtio_pci_notify_read(void *opaque, hwaddr addr, 1296 unsigned size) 1297 { 1298 return 0; 1299 } 1300 1301 static void virtio_pci_notify_write(void *opaque, hwaddr addr, 1302 uint64_t val, unsigned size) 1303 { 1304 VirtIODevice *vdev = opaque; 1305 VirtIOPCIProxy *proxy = VIRTIO_PCI(DEVICE(vdev)->parent_bus->parent); 1306 unsigned queue = addr / virtio_pci_queue_mem_mult(proxy); 1307 1308 if (queue < VIRTIO_QUEUE_MAX) { 1309 virtio_queue_notify(vdev, queue); 1310 } 1311 } 1312 1313 static void virtio_pci_notify_write_pio(void *opaque, hwaddr addr, 1314 uint64_t val, unsigned size) 1315 { 1316 VirtIODevice *vdev = opaque; 1317 unsigned queue = val; 1318 1319 if (queue < VIRTIO_QUEUE_MAX) { 1320 virtio_queue_notify(vdev, queue); 1321 } 1322 } 1323 1324 static uint64_t virtio_pci_isr_read(void *opaque, hwaddr addr, 1325 unsigned size) 1326 { 1327 VirtIOPCIProxy *proxy = opaque; 1328 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 1329 uint64_t val = atomic_xchg(&vdev->isr, 0); 1330 pci_irq_deassert(&proxy->pci_dev); 1331 1332 return val; 1333 } 1334 1335 static void virtio_pci_isr_write(void *opaque, hwaddr addr, 1336 uint64_t val, unsigned size) 1337 { 1338 } 1339 1340 static uint64_t virtio_pci_device_read(void *opaque, hwaddr addr, 1341 unsigned size) 1342 { 1343 VirtIODevice *vdev = opaque; 1344 uint64_t val = 0; 1345 1346 switch (size) { 1347 case 1: 1348 val = virtio_config_modern_readb(vdev, addr); 1349 break; 1350 case 2: 1351 val = virtio_config_modern_readw(vdev, addr); 1352 break; 1353 case 4: 1354 val = virtio_config_modern_readl(vdev, addr); 1355 break; 1356 } 1357 return val; 1358 } 1359 1360 static void virtio_pci_device_write(void *opaque, hwaddr addr, 1361 uint64_t val, unsigned size) 1362 { 1363 VirtIODevice *vdev = opaque; 1364 switch (size) { 1365 case 1: 1366 virtio_config_modern_writeb(vdev, addr, val); 1367 break; 1368 case 2: 1369 virtio_config_modern_writew(vdev, addr, val); 1370 break; 1371 case 4: 1372 virtio_config_modern_writel(vdev, addr, val); 1373 break; 1374 } 1375 } 1376 1377 static void virtio_pci_modern_regions_init(VirtIOPCIProxy *proxy) 1378 { 1379 static const MemoryRegionOps common_ops = { 1380 .read = virtio_pci_common_read, 1381 .write = virtio_pci_common_write, 1382 .impl = { 1383 .min_access_size = 1, 1384 .max_access_size = 4, 1385 }, 1386 .endianness = DEVICE_LITTLE_ENDIAN, 1387 }; 1388 static const MemoryRegionOps isr_ops = { 1389 .read = virtio_pci_isr_read, 1390 .write = virtio_pci_isr_write, 1391 .impl = { 1392 .min_access_size = 1, 1393 .max_access_size = 4, 1394 }, 1395 .endianness = DEVICE_LITTLE_ENDIAN, 1396 }; 1397 static const MemoryRegionOps device_ops = { 1398 .read = virtio_pci_device_read, 1399 .write = virtio_pci_device_write, 1400 .impl = { 1401 .min_access_size = 1, 1402 .max_access_size = 4, 1403 }, 1404 .endianness = DEVICE_LITTLE_ENDIAN, 1405 }; 1406 static const MemoryRegionOps notify_ops = { 1407 .read = virtio_pci_notify_read, 1408 .write = virtio_pci_notify_write, 1409 .impl = { 1410 .min_access_size = 1, 1411 .max_access_size = 4, 1412 }, 1413 .endianness = DEVICE_LITTLE_ENDIAN, 1414 }; 1415 static const MemoryRegionOps notify_pio_ops = { 1416 .read = virtio_pci_notify_read, 1417 .write = virtio_pci_notify_write_pio, 1418 .impl = { 1419 .min_access_size = 1, 1420 .max_access_size = 4, 1421 }, 1422 .endianness = DEVICE_LITTLE_ENDIAN, 1423 }; 1424 1425 1426 memory_region_init_io(&proxy->common.mr, OBJECT(proxy), 1427 &common_ops, 1428 proxy, 1429 "virtio-pci-common", 1430 proxy->common.size); 1431 1432 memory_region_init_io(&proxy->isr.mr, OBJECT(proxy), 1433 &isr_ops, 1434 proxy, 1435 "virtio-pci-isr", 1436 proxy->isr.size); 1437 1438 memory_region_init_io(&proxy->device.mr, OBJECT(proxy), 1439 &device_ops, 1440 virtio_bus_get_device(&proxy->bus), 1441 "virtio-pci-device", 1442 proxy->device.size); 1443 1444 memory_region_init_io(&proxy->notify.mr, OBJECT(proxy), 1445 ¬ify_ops, 1446 virtio_bus_get_device(&proxy->bus), 1447 "virtio-pci-notify", 1448 proxy->notify.size); 1449 1450 memory_region_init_io(&proxy->notify_pio.mr, OBJECT(proxy), 1451 ¬ify_pio_ops, 1452 virtio_bus_get_device(&proxy->bus), 1453 "virtio-pci-notify-pio", 1454 proxy->notify_pio.size); 1455 } 1456 1457 static void virtio_pci_modern_region_map(VirtIOPCIProxy *proxy, 1458 VirtIOPCIRegion *region, 1459 struct virtio_pci_cap *cap, 1460 MemoryRegion *mr, 1461 uint8_t bar) 1462 { 1463 memory_region_add_subregion(mr, region->offset, ®ion->mr); 1464 1465 cap->cfg_type = region->type; 1466 cap->bar = bar; 1467 cap->offset = cpu_to_le32(region->offset); 1468 cap->length = cpu_to_le32(region->size); 1469 virtio_pci_add_mem_cap(proxy, cap); 1470 1471 } 1472 1473 static void virtio_pci_modern_mem_region_map(VirtIOPCIProxy *proxy, 1474 VirtIOPCIRegion *region, 1475 struct virtio_pci_cap *cap) 1476 { 1477 virtio_pci_modern_region_map(proxy, region, cap, 1478 &proxy->modern_bar, proxy->modern_mem_bar_idx); 1479 } 1480 1481 static void virtio_pci_modern_io_region_map(VirtIOPCIProxy *proxy, 1482 VirtIOPCIRegion *region, 1483 struct virtio_pci_cap *cap) 1484 { 1485 virtio_pci_modern_region_map(proxy, region, cap, 1486 &proxy->io_bar, proxy->modern_io_bar_idx); 1487 } 1488 1489 static void virtio_pci_modern_mem_region_unmap(VirtIOPCIProxy *proxy, 1490 VirtIOPCIRegion *region) 1491 { 1492 memory_region_del_subregion(&proxy->modern_bar, 1493 ®ion->mr); 1494 } 1495 1496 static void virtio_pci_modern_io_region_unmap(VirtIOPCIProxy *proxy, 1497 VirtIOPCIRegion *region) 1498 { 1499 memory_region_del_subregion(&proxy->io_bar, 1500 ®ion->mr); 1501 } 1502 1503 static void virtio_pci_pre_plugged(DeviceState *d, Error **errp) 1504 { 1505 VirtIOPCIProxy *proxy = VIRTIO_PCI(d); 1506 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 1507 1508 if (virtio_pci_modern(proxy)) { 1509 virtio_add_feature(&vdev->host_features, VIRTIO_F_VERSION_1); 1510 } 1511 1512 virtio_add_feature(&vdev->host_features, VIRTIO_F_BAD_FEATURE); 1513 } 1514 1515 /* This is called by virtio-bus just after the device is plugged. */ 1516 static void virtio_pci_device_plugged(DeviceState *d, Error **errp) 1517 { 1518 VirtIOPCIProxy *proxy = VIRTIO_PCI(d); 1519 VirtioBusState *bus = &proxy->bus; 1520 bool legacy = virtio_pci_legacy(proxy); 1521 bool modern; 1522 bool modern_pio = proxy->flags & VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY; 1523 uint8_t *config; 1524 uint32_t size; 1525 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 1526 1527 /* 1528 * Virtio capabilities present without 1529 * VIRTIO_F_VERSION_1 confuses guests 1530 */ 1531 if (!proxy->ignore_backend_features && 1532 !virtio_has_feature(vdev->host_features, VIRTIO_F_VERSION_1)) { 1533 virtio_pci_disable_modern(proxy); 1534 1535 if (!legacy) { 1536 error_setg(errp, "Device doesn't support modern mode, and legacy" 1537 " mode is disabled"); 1538 error_append_hint(errp, "Set disable-legacy to off\n"); 1539 1540 return; 1541 } 1542 } 1543 1544 modern = virtio_pci_modern(proxy); 1545 1546 config = proxy->pci_dev.config; 1547 if (proxy->class_code) { 1548 pci_config_set_class(config, proxy->class_code); 1549 } 1550 1551 if (legacy) { 1552 if (virtio_host_has_feature(vdev, VIRTIO_F_IOMMU_PLATFORM)) { 1553 error_setg(errp, "VIRTIO_F_IOMMU_PLATFORM was supported by" 1554 " neither legacy nor transitional device"); 1555 return ; 1556 } 1557 /* 1558 * Legacy and transitional devices use specific subsystem IDs. 1559 * Note that the subsystem vendor ID (config + PCI_SUBSYSTEM_VENDOR_ID) 1560 * is set to PCI_SUBVENDOR_ID_REDHAT_QUMRANET by default. 1561 */ 1562 pci_set_word(config + PCI_SUBSYSTEM_ID, virtio_bus_get_vdev_id(bus)); 1563 } else { 1564 /* pure virtio-1.0 */ 1565 pci_set_word(config + PCI_VENDOR_ID, 1566 PCI_VENDOR_ID_REDHAT_QUMRANET); 1567 pci_set_word(config + PCI_DEVICE_ID, 1568 0x1040 + virtio_bus_get_vdev_id(bus)); 1569 pci_config_set_revision(config, 1); 1570 } 1571 config[PCI_INTERRUPT_PIN] = 1; 1572 1573 1574 if (modern) { 1575 struct virtio_pci_cap cap = { 1576 .cap_len = sizeof cap, 1577 }; 1578 struct virtio_pci_notify_cap notify = { 1579 .cap.cap_len = sizeof notify, 1580 .notify_off_multiplier = 1581 cpu_to_le32(virtio_pci_queue_mem_mult(proxy)), 1582 }; 1583 struct virtio_pci_cfg_cap cfg = { 1584 .cap.cap_len = sizeof cfg, 1585 .cap.cfg_type = VIRTIO_PCI_CAP_PCI_CFG, 1586 }; 1587 struct virtio_pci_notify_cap notify_pio = { 1588 .cap.cap_len = sizeof notify, 1589 .notify_off_multiplier = cpu_to_le32(0x0), 1590 }; 1591 1592 struct virtio_pci_cfg_cap *cfg_mask; 1593 1594 virtio_pci_modern_regions_init(proxy); 1595 1596 virtio_pci_modern_mem_region_map(proxy, &proxy->common, &cap); 1597 virtio_pci_modern_mem_region_map(proxy, &proxy->isr, &cap); 1598 virtio_pci_modern_mem_region_map(proxy, &proxy->device, &cap); 1599 virtio_pci_modern_mem_region_map(proxy, &proxy->notify, ¬ify.cap); 1600 1601 if (modern_pio) { 1602 memory_region_init(&proxy->io_bar, OBJECT(proxy), 1603 "virtio-pci-io", 0x4); 1604 1605 pci_register_bar(&proxy->pci_dev, proxy->modern_io_bar_idx, 1606 PCI_BASE_ADDRESS_SPACE_IO, &proxy->io_bar); 1607 1608 virtio_pci_modern_io_region_map(proxy, &proxy->notify_pio, 1609 ¬ify_pio.cap); 1610 } 1611 1612 pci_register_bar(&proxy->pci_dev, proxy->modern_mem_bar_idx, 1613 PCI_BASE_ADDRESS_SPACE_MEMORY | 1614 PCI_BASE_ADDRESS_MEM_PREFETCH | 1615 PCI_BASE_ADDRESS_MEM_TYPE_64, 1616 &proxy->modern_bar); 1617 1618 proxy->config_cap = virtio_pci_add_mem_cap(proxy, &cfg.cap); 1619 cfg_mask = (void *)(proxy->pci_dev.wmask + proxy->config_cap); 1620 pci_set_byte(&cfg_mask->cap.bar, ~0x0); 1621 pci_set_long((uint8_t *)&cfg_mask->cap.offset, ~0x0); 1622 pci_set_long((uint8_t *)&cfg_mask->cap.length, ~0x0); 1623 pci_set_long(cfg_mask->pci_cfg_data, ~0x0); 1624 } 1625 1626 if (proxy->nvectors) { 1627 int err = msix_init_exclusive_bar(&proxy->pci_dev, proxy->nvectors, 1628 proxy->msix_bar_idx, NULL); 1629 if (err) { 1630 /* Notice when a system that supports MSIx can't initialize it */ 1631 if (err != -ENOTSUP) { 1632 warn_report("unable to init msix vectors to %" PRIu32, 1633 proxy->nvectors); 1634 } 1635 proxy->nvectors = 0; 1636 } 1637 } 1638 1639 proxy->pci_dev.config_write = virtio_write_config; 1640 proxy->pci_dev.config_read = virtio_read_config; 1641 1642 if (legacy) { 1643 size = VIRTIO_PCI_REGION_SIZE(&proxy->pci_dev) 1644 + virtio_bus_get_vdev_config_len(bus); 1645 size = pow2ceil(size); 1646 1647 memory_region_init_io(&proxy->bar, OBJECT(proxy), 1648 &virtio_pci_config_ops, 1649 proxy, "virtio-pci", size); 1650 1651 pci_register_bar(&proxy->pci_dev, proxy->legacy_io_bar_idx, 1652 PCI_BASE_ADDRESS_SPACE_IO, &proxy->bar); 1653 } 1654 } 1655 1656 static void virtio_pci_device_unplugged(DeviceState *d) 1657 { 1658 VirtIOPCIProxy *proxy = VIRTIO_PCI(d); 1659 bool modern = virtio_pci_modern(proxy); 1660 bool modern_pio = proxy->flags & VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY; 1661 1662 virtio_pci_stop_ioeventfd(proxy); 1663 1664 if (modern) { 1665 virtio_pci_modern_mem_region_unmap(proxy, &proxy->common); 1666 virtio_pci_modern_mem_region_unmap(proxy, &proxy->isr); 1667 virtio_pci_modern_mem_region_unmap(proxy, &proxy->device); 1668 virtio_pci_modern_mem_region_unmap(proxy, &proxy->notify); 1669 if (modern_pio) { 1670 virtio_pci_modern_io_region_unmap(proxy, &proxy->notify_pio); 1671 } 1672 } 1673 } 1674 1675 static void virtio_pci_realize(PCIDevice *pci_dev, Error **errp) 1676 { 1677 VirtIOPCIProxy *proxy = VIRTIO_PCI(pci_dev); 1678 VirtioPCIClass *k = VIRTIO_PCI_GET_CLASS(pci_dev); 1679 bool pcie_port = pci_bus_is_express(pci_get_bus(pci_dev)) && 1680 !pci_bus_is_root(pci_get_bus(pci_dev)); 1681 1682 if (kvm_enabled() && !kvm_has_many_ioeventfds()) { 1683 proxy->flags &= ~VIRTIO_PCI_FLAG_USE_IOEVENTFD; 1684 } 1685 1686 /* 1687 * virtio pci bar layout used by default. 1688 * subclasses can re-arrange things if needed. 1689 * 1690 * region 0 -- virtio legacy io bar 1691 * region 1 -- msi-x bar 1692 * region 4+5 -- virtio modern memory (64bit) bar 1693 * 1694 */ 1695 proxy->legacy_io_bar_idx = 0; 1696 proxy->msix_bar_idx = 1; 1697 proxy->modern_io_bar_idx = 2; 1698 proxy->modern_mem_bar_idx = 4; 1699 1700 proxy->common.offset = 0x0; 1701 proxy->common.size = 0x1000; 1702 proxy->common.type = VIRTIO_PCI_CAP_COMMON_CFG; 1703 1704 proxy->isr.offset = 0x1000; 1705 proxy->isr.size = 0x1000; 1706 proxy->isr.type = VIRTIO_PCI_CAP_ISR_CFG; 1707 1708 proxy->device.offset = 0x2000; 1709 proxy->device.size = 0x1000; 1710 proxy->device.type = VIRTIO_PCI_CAP_DEVICE_CFG; 1711 1712 proxy->notify.offset = 0x3000; 1713 proxy->notify.size = virtio_pci_queue_mem_mult(proxy) * VIRTIO_QUEUE_MAX; 1714 proxy->notify.type = VIRTIO_PCI_CAP_NOTIFY_CFG; 1715 1716 proxy->notify_pio.offset = 0x0; 1717 proxy->notify_pio.size = 0x4; 1718 proxy->notify_pio.type = VIRTIO_PCI_CAP_NOTIFY_CFG; 1719 1720 /* subclasses can enforce modern, so do this unconditionally */ 1721 memory_region_init(&proxy->modern_bar, OBJECT(proxy), "virtio-pci", 1722 /* PCI BAR regions must be powers of 2 */ 1723 pow2ceil(proxy->notify.offset + proxy->notify.size)); 1724 1725 if ((proxy->disable_legacy == ON_OFF_AUTO_ON) || 1726 ((proxy->disable_legacy == ON_OFF_AUTO_AUTO) && pcie_port)) { 1727 if (proxy->disable_modern) { 1728 error_setg(errp, "device cannot work as neither modern nor " 1729 "legacy mode is enabled"); 1730 error_append_hint(errp, "Set either disable-modern or " 1731 "disable-legacy to off\n"); 1732 return; 1733 } 1734 proxy->mode = VIRTIO_PCI_MODE_MODERN; 1735 } else { 1736 if (proxy->disable_modern) { 1737 proxy->mode = VIRTIO_PCI_MODE_LEGACY; 1738 } else { 1739 proxy->mode = VIRTIO_PCI_MODE_TRANSITIONAL; 1740 } 1741 } 1742 1743 if (pcie_port && pci_is_express(pci_dev)) { 1744 int pos; 1745 1746 pos = pcie_endpoint_cap_init(pci_dev, 0); 1747 assert(pos > 0); 1748 1749 pos = pci_add_capability(pci_dev, PCI_CAP_ID_PM, 0, 1750 PCI_PM_SIZEOF, errp); 1751 if (pos < 0) { 1752 return; 1753 } 1754 1755 pci_dev->exp.pm_cap = pos; 1756 1757 /* 1758 * Indicates that this function complies with revision 1.2 of the 1759 * PCI Power Management Interface Specification. 1760 */ 1761 pci_set_word(pci_dev->config + pos + PCI_PM_PMC, 0x3); 1762 1763 if (proxy->flags & VIRTIO_PCI_FLAG_INIT_DEVERR) { 1764 /* Init error enabling flags */ 1765 pcie_cap_deverr_init(pci_dev); 1766 } 1767 1768 if (proxy->flags & VIRTIO_PCI_FLAG_INIT_LNKCTL) { 1769 /* Init Link Control Register */ 1770 pcie_cap_lnkctl_init(pci_dev); 1771 } 1772 1773 if (proxy->flags & VIRTIO_PCI_FLAG_INIT_PM) { 1774 /* Init Power Management Control Register */ 1775 pci_set_word(pci_dev->wmask + pos + PCI_PM_CTRL, 1776 PCI_PM_CTRL_STATE_MASK); 1777 } 1778 1779 if (proxy->flags & VIRTIO_PCI_FLAG_ATS) { 1780 pcie_ats_init(pci_dev, 256); 1781 } 1782 1783 } else { 1784 /* 1785 * make future invocations of pci_is_express() return false 1786 * and pci_config_size() return PCI_CONFIG_SPACE_SIZE. 1787 */ 1788 pci_dev->cap_present &= ~QEMU_PCI_CAP_EXPRESS; 1789 } 1790 1791 virtio_pci_bus_new(&proxy->bus, sizeof(proxy->bus), proxy); 1792 if (k->realize) { 1793 k->realize(proxy, errp); 1794 } 1795 } 1796 1797 static void virtio_pci_exit(PCIDevice *pci_dev) 1798 { 1799 msix_uninit_exclusive_bar(pci_dev); 1800 } 1801 1802 static void virtio_pci_reset(DeviceState *qdev) 1803 { 1804 VirtIOPCIProxy *proxy = VIRTIO_PCI(qdev); 1805 VirtioBusState *bus = VIRTIO_BUS(&proxy->bus); 1806 PCIDevice *dev = PCI_DEVICE(qdev); 1807 int i; 1808 1809 virtio_pci_stop_ioeventfd(proxy); 1810 virtio_bus_reset(bus); 1811 msix_unuse_all_vectors(&proxy->pci_dev); 1812 1813 for (i = 0; i < VIRTIO_QUEUE_MAX; i++) { 1814 proxy->vqs[i].enabled = 0; 1815 proxy->vqs[i].num = 0; 1816 proxy->vqs[i].desc[0] = proxy->vqs[i].desc[1] = 0; 1817 proxy->vqs[i].avail[0] = proxy->vqs[i].avail[1] = 0; 1818 proxy->vqs[i].used[0] = proxy->vqs[i].used[1] = 0; 1819 } 1820 1821 if (pci_is_express(dev)) { 1822 pcie_cap_deverr_reset(dev); 1823 pcie_cap_lnkctl_reset(dev); 1824 1825 pci_set_word(dev->config + dev->exp.pm_cap + PCI_PM_CTRL, 0); 1826 } 1827 } 1828 1829 static Property virtio_pci_properties[] = { 1830 DEFINE_PROP_BIT("virtio-pci-bus-master-bug-migration", VirtIOPCIProxy, flags, 1831 VIRTIO_PCI_FLAG_BUS_MASTER_BUG_MIGRATION_BIT, false), 1832 DEFINE_PROP_BIT("migrate-extra", VirtIOPCIProxy, flags, 1833 VIRTIO_PCI_FLAG_MIGRATE_EXTRA_BIT, true), 1834 DEFINE_PROP_BIT("modern-pio-notify", VirtIOPCIProxy, flags, 1835 VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY_BIT, false), 1836 DEFINE_PROP_BIT("x-disable-pcie", VirtIOPCIProxy, flags, 1837 VIRTIO_PCI_FLAG_DISABLE_PCIE_BIT, false), 1838 DEFINE_PROP_BIT("page-per-vq", VirtIOPCIProxy, flags, 1839 VIRTIO_PCI_FLAG_PAGE_PER_VQ_BIT, false), 1840 DEFINE_PROP_BOOL("x-ignore-backend-features", VirtIOPCIProxy, 1841 ignore_backend_features, false), 1842 DEFINE_PROP_BIT("ats", VirtIOPCIProxy, flags, 1843 VIRTIO_PCI_FLAG_ATS_BIT, false), 1844 DEFINE_PROP_BIT("x-pcie-deverr-init", VirtIOPCIProxy, flags, 1845 VIRTIO_PCI_FLAG_INIT_DEVERR_BIT, true), 1846 DEFINE_PROP_BIT("x-pcie-lnkctl-init", VirtIOPCIProxy, flags, 1847 VIRTIO_PCI_FLAG_INIT_LNKCTL_BIT, true), 1848 DEFINE_PROP_BIT("x-pcie-pm-init", VirtIOPCIProxy, flags, 1849 VIRTIO_PCI_FLAG_INIT_PM_BIT, true), 1850 DEFINE_PROP_END_OF_LIST(), 1851 }; 1852 1853 static void virtio_pci_dc_realize(DeviceState *qdev, Error **errp) 1854 { 1855 VirtioPCIClass *vpciklass = VIRTIO_PCI_GET_CLASS(qdev); 1856 VirtIOPCIProxy *proxy = VIRTIO_PCI(qdev); 1857 PCIDevice *pci_dev = &proxy->pci_dev; 1858 1859 if (!(proxy->flags & VIRTIO_PCI_FLAG_DISABLE_PCIE) && 1860 virtio_pci_modern(proxy)) { 1861 pci_dev->cap_present |= QEMU_PCI_CAP_EXPRESS; 1862 } 1863 1864 vpciklass->parent_dc_realize(qdev, errp); 1865 } 1866 1867 static void virtio_pci_class_init(ObjectClass *klass, void *data) 1868 { 1869 DeviceClass *dc = DEVICE_CLASS(klass); 1870 PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); 1871 VirtioPCIClass *vpciklass = VIRTIO_PCI_CLASS(klass); 1872 1873 dc->props = virtio_pci_properties; 1874 k->realize = virtio_pci_realize; 1875 k->exit = virtio_pci_exit; 1876 k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET; 1877 k->revision = VIRTIO_PCI_ABI_VERSION; 1878 k->class_id = PCI_CLASS_OTHERS; 1879 device_class_set_parent_realize(dc, virtio_pci_dc_realize, 1880 &vpciklass->parent_dc_realize); 1881 dc->reset = virtio_pci_reset; 1882 } 1883 1884 static const TypeInfo virtio_pci_info = { 1885 .name = TYPE_VIRTIO_PCI, 1886 .parent = TYPE_PCI_DEVICE, 1887 .instance_size = sizeof(VirtIOPCIProxy), 1888 .class_init = virtio_pci_class_init, 1889 .class_size = sizeof(VirtioPCIClass), 1890 .abstract = true, 1891 }; 1892 1893 static Property virtio_pci_generic_properties[] = { 1894 DEFINE_PROP_ON_OFF_AUTO("disable-legacy", VirtIOPCIProxy, disable_legacy, 1895 ON_OFF_AUTO_AUTO), 1896 DEFINE_PROP_BOOL("disable-modern", VirtIOPCIProxy, disable_modern, false), 1897 DEFINE_PROP_END_OF_LIST(), 1898 }; 1899 1900 static void virtio_pci_base_class_init(ObjectClass *klass, void *data) 1901 { 1902 const VirtioPCIDeviceTypeInfo *t = data; 1903 if (t->class_init) { 1904 t->class_init(klass, NULL); 1905 } 1906 } 1907 1908 static void virtio_pci_generic_class_init(ObjectClass *klass, void *data) 1909 { 1910 DeviceClass *dc = DEVICE_CLASS(klass); 1911 1912 dc->props = virtio_pci_generic_properties; 1913 } 1914 1915 /* Used when the generic type and the base type is the same */ 1916 static void virtio_pci_generic_base_class_init(ObjectClass *klass, void *data) 1917 { 1918 virtio_pci_base_class_init(klass, data); 1919 virtio_pci_generic_class_init(klass, NULL); 1920 } 1921 1922 static void virtio_pci_transitional_instance_init(Object *obj) 1923 { 1924 VirtIOPCIProxy *proxy = VIRTIO_PCI(obj); 1925 1926 proxy->disable_legacy = ON_OFF_AUTO_OFF; 1927 proxy->disable_modern = false; 1928 } 1929 1930 static void virtio_pci_non_transitional_instance_init(Object *obj) 1931 { 1932 VirtIOPCIProxy *proxy = VIRTIO_PCI(obj); 1933 1934 proxy->disable_legacy = ON_OFF_AUTO_ON; 1935 proxy->disable_modern = false; 1936 } 1937 1938 void virtio_pci_types_register(const VirtioPCIDeviceTypeInfo *t) 1939 { 1940 TypeInfo base_type_info = { 1941 .name = t->base_name, 1942 .parent = t->parent ? t->parent : TYPE_VIRTIO_PCI, 1943 .instance_size = t->instance_size, 1944 .instance_init = t->instance_init, 1945 .class_size = t->class_size, 1946 .class_init = virtio_pci_base_class_init, 1947 .class_data = (void *)t, 1948 .abstract = true, 1949 }; 1950 TypeInfo generic_type_info = { 1951 .name = t->generic_name, 1952 .parent = base_type_info.name, 1953 .class_init = virtio_pci_generic_class_init, 1954 .interfaces = (InterfaceInfo[]) { 1955 { INTERFACE_PCIE_DEVICE }, 1956 { INTERFACE_CONVENTIONAL_PCI_DEVICE }, 1957 { } 1958 }, 1959 }; 1960 1961 if (!base_type_info.name) { 1962 /* No base type -> register a single generic device type */ 1963 base_type_info.name = t->generic_name; 1964 base_type_info.class_init = virtio_pci_generic_base_class_init; 1965 base_type_info.interfaces = generic_type_info.interfaces; 1966 base_type_info.abstract = false; 1967 generic_type_info.name = NULL; 1968 assert(!t->non_transitional_name); 1969 assert(!t->transitional_name); 1970 } 1971 1972 type_register(&base_type_info); 1973 if (generic_type_info.name) { 1974 type_register(&generic_type_info); 1975 } 1976 1977 if (t->non_transitional_name) { 1978 const TypeInfo non_transitional_type_info = { 1979 .name = t->non_transitional_name, 1980 .parent = base_type_info.name, 1981 .instance_init = virtio_pci_non_transitional_instance_init, 1982 .interfaces = (InterfaceInfo[]) { 1983 { INTERFACE_PCIE_DEVICE }, 1984 { INTERFACE_CONVENTIONAL_PCI_DEVICE }, 1985 { } 1986 }, 1987 }; 1988 type_register(&non_transitional_type_info); 1989 } 1990 1991 if (t->transitional_name) { 1992 const TypeInfo transitional_type_info = { 1993 .name = t->transitional_name, 1994 .parent = base_type_info.name, 1995 .instance_init = virtio_pci_transitional_instance_init, 1996 .interfaces = (InterfaceInfo[]) { 1997 /* 1998 * Transitional virtio devices work only as Conventional PCI 1999 * devices because they require PIO ports. 2000 */ 2001 { INTERFACE_CONVENTIONAL_PCI_DEVICE }, 2002 { } 2003 }, 2004 }; 2005 type_register(&transitional_type_info); 2006 } 2007 } 2008 2009 /* virtio-pci-bus */ 2010 2011 static void virtio_pci_bus_new(VirtioBusState *bus, size_t bus_size, 2012 VirtIOPCIProxy *dev) 2013 { 2014 DeviceState *qdev = DEVICE(dev); 2015 char virtio_bus_name[] = "virtio-bus"; 2016 2017 qbus_create_inplace(bus, bus_size, TYPE_VIRTIO_PCI_BUS, qdev, 2018 virtio_bus_name); 2019 } 2020 2021 static void virtio_pci_bus_class_init(ObjectClass *klass, void *data) 2022 { 2023 BusClass *bus_class = BUS_CLASS(klass); 2024 VirtioBusClass *k = VIRTIO_BUS_CLASS(klass); 2025 bus_class->max_dev = 1; 2026 k->notify = virtio_pci_notify; 2027 k->save_config = virtio_pci_save_config; 2028 k->load_config = virtio_pci_load_config; 2029 k->save_queue = virtio_pci_save_queue; 2030 k->load_queue = virtio_pci_load_queue; 2031 k->save_extra_state = virtio_pci_save_extra_state; 2032 k->load_extra_state = virtio_pci_load_extra_state; 2033 k->has_extra_state = virtio_pci_has_extra_state; 2034 k->query_guest_notifiers = virtio_pci_query_guest_notifiers; 2035 k->set_guest_notifiers = virtio_pci_set_guest_notifiers; 2036 k->set_host_notifier_mr = virtio_pci_set_host_notifier_mr; 2037 k->vmstate_change = virtio_pci_vmstate_change; 2038 k->pre_plugged = virtio_pci_pre_plugged; 2039 k->device_plugged = virtio_pci_device_plugged; 2040 k->device_unplugged = virtio_pci_device_unplugged; 2041 k->query_nvectors = virtio_pci_query_nvectors; 2042 k->ioeventfd_enabled = virtio_pci_ioeventfd_enabled; 2043 k->ioeventfd_assign = virtio_pci_ioeventfd_assign; 2044 k->get_dma_as = virtio_pci_get_dma_as; 2045 } 2046 2047 static const TypeInfo virtio_pci_bus_info = { 2048 .name = TYPE_VIRTIO_PCI_BUS, 2049 .parent = TYPE_VIRTIO_BUS, 2050 .instance_size = sizeof(VirtioPCIBusState), 2051 .class_init = virtio_pci_bus_class_init, 2052 }; 2053 2054 static void virtio_pci_register_types(void) 2055 { 2056 /* Base types: */ 2057 type_register_static(&virtio_pci_bus_info); 2058 type_register_static(&virtio_pci_info); 2059 } 2060 2061 type_init(virtio_pci_register_types) 2062 2063