1 /* 2 * Virtio PCI Bindings 3 * 4 * Copyright IBM, Corp. 2007 5 * Copyright (c) 2009 CodeSourcery 6 * 7 * Authors: 8 * Anthony Liguori <aliguori@us.ibm.com> 9 * Paul Brook <paul@codesourcery.com> 10 * 11 * This work is licensed under the terms of the GNU GPL, version 2. See 12 * the COPYING file in the top-level directory. 13 * 14 * Contributions after 2012-01-13 are licensed under the terms of the 15 * GNU GPL, version 2 or (at your option) any later version. 16 */ 17 18 #include "qemu/osdep.h" 19 20 #include "standard-headers/linux/virtio_pci.h" 21 #include "hw/virtio/virtio.h" 22 #include "migration/qemu-file-types.h" 23 #include "hw/pci/pci.h" 24 #include "hw/pci/pci_bus.h" 25 #include "hw/qdev-properties.h" 26 #include "qapi/error.h" 27 #include "qemu/error-report.h" 28 #include "qemu/module.h" 29 #include "hw/pci/msi.h" 30 #include "hw/pci/msix.h" 31 #include "hw/loader.h" 32 #include "sysemu/kvm.h" 33 #include "virtio-pci.h" 34 #include "qemu/range.h" 35 #include "hw/virtio/virtio-bus.h" 36 #include "qapi/visitor.h" 37 38 #define VIRTIO_PCI_REGION_SIZE(dev) VIRTIO_PCI_CONFIG_OFF(msix_present(dev)) 39 40 #undef VIRTIO_PCI_CONFIG 41 42 /* The remaining space is defined by each driver as the per-driver 43 * configuration space */ 44 #define VIRTIO_PCI_CONFIG_SIZE(dev) VIRTIO_PCI_CONFIG_OFF(msix_enabled(dev)) 45 46 static void virtio_pci_bus_new(VirtioBusState *bus, size_t bus_size, 47 VirtIOPCIProxy *dev); 48 static void virtio_pci_reset(DeviceState *qdev); 49 50 /* virtio device */ 51 /* DeviceState to VirtIOPCIProxy. For use off data-path. TODO: use QOM. */ 52 static inline VirtIOPCIProxy *to_virtio_pci_proxy(DeviceState *d) 53 { 54 return container_of(d, VirtIOPCIProxy, pci_dev.qdev); 55 } 56 57 /* DeviceState to VirtIOPCIProxy. Note: used on datapath, 58 * be careful and test performance if you change this. 59 */ 60 static inline VirtIOPCIProxy *to_virtio_pci_proxy_fast(DeviceState *d) 61 { 62 return container_of(d, VirtIOPCIProxy, pci_dev.qdev); 63 } 64 65 static void virtio_pci_notify(DeviceState *d, uint16_t vector) 66 { 67 VirtIOPCIProxy *proxy = to_virtio_pci_proxy_fast(d); 68 69 if (msix_enabled(&proxy->pci_dev)) 70 msix_notify(&proxy->pci_dev, vector); 71 else { 72 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 73 pci_set_irq(&proxy->pci_dev, atomic_read(&vdev->isr) & 1); 74 } 75 } 76 77 static void virtio_pci_save_config(DeviceState *d, QEMUFile *f) 78 { 79 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d); 80 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 81 82 pci_device_save(&proxy->pci_dev, f); 83 msix_save(&proxy->pci_dev, f); 84 if (msix_present(&proxy->pci_dev)) 85 qemu_put_be16(f, vdev->config_vector); 86 } 87 88 static const VMStateDescription vmstate_virtio_pci_modern_queue_state = { 89 .name = "virtio_pci/modern_queue_state", 90 .version_id = 1, 91 .minimum_version_id = 1, 92 .fields = (VMStateField[]) { 93 VMSTATE_UINT16(num, VirtIOPCIQueue), 94 VMSTATE_UNUSED(1), /* enabled was stored as be16 */ 95 VMSTATE_BOOL(enabled, VirtIOPCIQueue), 96 VMSTATE_UINT32_ARRAY(desc, VirtIOPCIQueue, 2), 97 VMSTATE_UINT32_ARRAY(avail, VirtIOPCIQueue, 2), 98 VMSTATE_UINT32_ARRAY(used, VirtIOPCIQueue, 2), 99 VMSTATE_END_OF_LIST() 100 } 101 }; 102 103 static bool virtio_pci_modern_state_needed(void *opaque) 104 { 105 VirtIOPCIProxy *proxy = opaque; 106 107 return virtio_pci_modern(proxy); 108 } 109 110 static const VMStateDescription vmstate_virtio_pci_modern_state_sub = { 111 .name = "virtio_pci/modern_state", 112 .version_id = 1, 113 .minimum_version_id = 1, 114 .needed = &virtio_pci_modern_state_needed, 115 .fields = (VMStateField[]) { 116 VMSTATE_UINT32(dfselect, VirtIOPCIProxy), 117 VMSTATE_UINT32(gfselect, VirtIOPCIProxy), 118 VMSTATE_UINT32_ARRAY(guest_features, VirtIOPCIProxy, 2), 119 VMSTATE_STRUCT_ARRAY(vqs, VirtIOPCIProxy, VIRTIO_QUEUE_MAX, 0, 120 vmstate_virtio_pci_modern_queue_state, 121 VirtIOPCIQueue), 122 VMSTATE_END_OF_LIST() 123 } 124 }; 125 126 static const VMStateDescription vmstate_virtio_pci = { 127 .name = "virtio_pci", 128 .version_id = 1, 129 .minimum_version_id = 1, 130 .minimum_version_id_old = 1, 131 .fields = (VMStateField[]) { 132 VMSTATE_END_OF_LIST() 133 }, 134 .subsections = (const VMStateDescription*[]) { 135 &vmstate_virtio_pci_modern_state_sub, 136 NULL 137 } 138 }; 139 140 static bool virtio_pci_has_extra_state(DeviceState *d) 141 { 142 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d); 143 144 return proxy->flags & VIRTIO_PCI_FLAG_MIGRATE_EXTRA; 145 } 146 147 static void virtio_pci_save_extra_state(DeviceState *d, QEMUFile *f) 148 { 149 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d); 150 151 vmstate_save_state(f, &vmstate_virtio_pci, proxy, NULL); 152 } 153 154 static int virtio_pci_load_extra_state(DeviceState *d, QEMUFile *f) 155 { 156 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d); 157 158 return vmstate_load_state(f, &vmstate_virtio_pci, proxy, 1); 159 } 160 161 static void virtio_pci_save_queue(DeviceState *d, int n, QEMUFile *f) 162 { 163 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d); 164 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 165 166 if (msix_present(&proxy->pci_dev)) 167 qemu_put_be16(f, virtio_queue_vector(vdev, n)); 168 } 169 170 static int virtio_pci_load_config(DeviceState *d, QEMUFile *f) 171 { 172 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d); 173 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 174 175 int ret; 176 ret = pci_device_load(&proxy->pci_dev, f); 177 if (ret) { 178 return ret; 179 } 180 msix_unuse_all_vectors(&proxy->pci_dev); 181 msix_load(&proxy->pci_dev, f); 182 if (msix_present(&proxy->pci_dev)) { 183 qemu_get_be16s(f, &vdev->config_vector); 184 } else { 185 vdev->config_vector = VIRTIO_NO_VECTOR; 186 } 187 if (vdev->config_vector != VIRTIO_NO_VECTOR) { 188 return msix_vector_use(&proxy->pci_dev, vdev->config_vector); 189 } 190 return 0; 191 } 192 193 static int virtio_pci_load_queue(DeviceState *d, int n, QEMUFile *f) 194 { 195 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d); 196 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 197 198 uint16_t vector; 199 if (msix_present(&proxy->pci_dev)) { 200 qemu_get_be16s(f, &vector); 201 } else { 202 vector = VIRTIO_NO_VECTOR; 203 } 204 virtio_queue_set_vector(vdev, n, vector); 205 if (vector != VIRTIO_NO_VECTOR) { 206 return msix_vector_use(&proxy->pci_dev, vector); 207 } 208 209 return 0; 210 } 211 212 static bool virtio_pci_ioeventfd_enabled(DeviceState *d) 213 { 214 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d); 215 216 return (proxy->flags & VIRTIO_PCI_FLAG_USE_IOEVENTFD) != 0; 217 } 218 219 #define QEMU_VIRTIO_PCI_QUEUE_MEM_MULT 0x1000 220 221 static inline int virtio_pci_queue_mem_mult(struct VirtIOPCIProxy *proxy) 222 { 223 return (proxy->flags & VIRTIO_PCI_FLAG_PAGE_PER_VQ) ? 224 QEMU_VIRTIO_PCI_QUEUE_MEM_MULT : 4; 225 } 226 227 static int virtio_pci_ioeventfd_assign(DeviceState *d, EventNotifier *notifier, 228 int n, bool assign) 229 { 230 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d); 231 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 232 VirtQueue *vq = virtio_get_queue(vdev, n); 233 bool legacy = virtio_pci_legacy(proxy); 234 bool modern = virtio_pci_modern(proxy); 235 bool fast_mmio = kvm_ioeventfd_any_length_enabled(); 236 bool modern_pio = proxy->flags & VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY; 237 MemoryRegion *modern_mr = &proxy->notify.mr; 238 MemoryRegion *modern_notify_mr = &proxy->notify_pio.mr; 239 MemoryRegion *legacy_mr = &proxy->bar; 240 hwaddr modern_addr = virtio_pci_queue_mem_mult(proxy) * 241 virtio_get_queue_index(vq); 242 hwaddr legacy_addr = VIRTIO_PCI_QUEUE_NOTIFY; 243 244 if (assign) { 245 if (modern) { 246 if (fast_mmio) { 247 memory_region_add_eventfd(modern_mr, modern_addr, 0, 248 false, n, notifier); 249 } else { 250 memory_region_add_eventfd(modern_mr, modern_addr, 2, 251 false, n, notifier); 252 } 253 if (modern_pio) { 254 memory_region_add_eventfd(modern_notify_mr, 0, 2, 255 true, n, notifier); 256 } 257 } 258 if (legacy) { 259 memory_region_add_eventfd(legacy_mr, legacy_addr, 2, 260 true, n, notifier); 261 } 262 } else { 263 if (modern) { 264 if (fast_mmio) { 265 memory_region_del_eventfd(modern_mr, modern_addr, 0, 266 false, n, notifier); 267 } else { 268 memory_region_del_eventfd(modern_mr, modern_addr, 2, 269 false, n, notifier); 270 } 271 if (modern_pio) { 272 memory_region_del_eventfd(modern_notify_mr, 0, 2, 273 true, n, notifier); 274 } 275 } 276 if (legacy) { 277 memory_region_del_eventfd(legacy_mr, legacy_addr, 2, 278 true, n, notifier); 279 } 280 } 281 return 0; 282 } 283 284 static void virtio_pci_start_ioeventfd(VirtIOPCIProxy *proxy) 285 { 286 virtio_bus_start_ioeventfd(&proxy->bus); 287 } 288 289 static void virtio_pci_stop_ioeventfd(VirtIOPCIProxy *proxy) 290 { 291 virtio_bus_stop_ioeventfd(&proxy->bus); 292 } 293 294 static void virtio_ioport_write(void *opaque, uint32_t addr, uint32_t val) 295 { 296 VirtIOPCIProxy *proxy = opaque; 297 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 298 hwaddr pa; 299 300 switch (addr) { 301 case VIRTIO_PCI_GUEST_FEATURES: 302 /* Guest does not negotiate properly? We have to assume nothing. */ 303 if (val & (1 << VIRTIO_F_BAD_FEATURE)) { 304 val = virtio_bus_get_vdev_bad_features(&proxy->bus); 305 } 306 virtio_set_features(vdev, val); 307 break; 308 case VIRTIO_PCI_QUEUE_PFN: 309 pa = (hwaddr)val << VIRTIO_PCI_QUEUE_ADDR_SHIFT; 310 if (pa == 0) { 311 virtio_pci_reset(DEVICE(proxy)); 312 } 313 else 314 virtio_queue_set_addr(vdev, vdev->queue_sel, pa); 315 break; 316 case VIRTIO_PCI_QUEUE_SEL: 317 if (val < VIRTIO_QUEUE_MAX) 318 vdev->queue_sel = val; 319 break; 320 case VIRTIO_PCI_QUEUE_NOTIFY: 321 if (val < VIRTIO_QUEUE_MAX) { 322 virtio_queue_notify(vdev, val); 323 } 324 break; 325 case VIRTIO_PCI_STATUS: 326 if (!(val & VIRTIO_CONFIG_S_DRIVER_OK)) { 327 virtio_pci_stop_ioeventfd(proxy); 328 } 329 330 virtio_set_status(vdev, val & 0xFF); 331 332 if (val & VIRTIO_CONFIG_S_DRIVER_OK) { 333 virtio_pci_start_ioeventfd(proxy); 334 } 335 336 if (vdev->status == 0) { 337 virtio_pci_reset(DEVICE(proxy)); 338 } 339 340 /* Linux before 2.6.34 drives the device without enabling 341 the PCI device bus master bit. Enable it automatically 342 for the guest. This is a PCI spec violation but so is 343 initiating DMA with bus master bit clear. */ 344 if (val == (VIRTIO_CONFIG_S_ACKNOWLEDGE | VIRTIO_CONFIG_S_DRIVER)) { 345 pci_default_write_config(&proxy->pci_dev, PCI_COMMAND, 346 proxy->pci_dev.config[PCI_COMMAND] | 347 PCI_COMMAND_MASTER, 1); 348 } 349 break; 350 case VIRTIO_MSI_CONFIG_VECTOR: 351 msix_vector_unuse(&proxy->pci_dev, vdev->config_vector); 352 /* Make it possible for guest to discover an error took place. */ 353 if (msix_vector_use(&proxy->pci_dev, val) < 0) 354 val = VIRTIO_NO_VECTOR; 355 vdev->config_vector = val; 356 break; 357 case VIRTIO_MSI_QUEUE_VECTOR: 358 msix_vector_unuse(&proxy->pci_dev, 359 virtio_queue_vector(vdev, vdev->queue_sel)); 360 /* Make it possible for guest to discover an error took place. */ 361 if (msix_vector_use(&proxy->pci_dev, val) < 0) 362 val = VIRTIO_NO_VECTOR; 363 virtio_queue_set_vector(vdev, vdev->queue_sel, val); 364 break; 365 default: 366 error_report("%s: unexpected address 0x%x value 0x%x", 367 __func__, addr, val); 368 break; 369 } 370 } 371 372 static uint32_t virtio_ioport_read(VirtIOPCIProxy *proxy, uint32_t addr) 373 { 374 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 375 uint32_t ret = 0xFFFFFFFF; 376 377 switch (addr) { 378 case VIRTIO_PCI_HOST_FEATURES: 379 ret = vdev->host_features; 380 break; 381 case VIRTIO_PCI_GUEST_FEATURES: 382 ret = vdev->guest_features; 383 break; 384 case VIRTIO_PCI_QUEUE_PFN: 385 ret = virtio_queue_get_addr(vdev, vdev->queue_sel) 386 >> VIRTIO_PCI_QUEUE_ADDR_SHIFT; 387 break; 388 case VIRTIO_PCI_QUEUE_NUM: 389 ret = virtio_queue_get_num(vdev, vdev->queue_sel); 390 break; 391 case VIRTIO_PCI_QUEUE_SEL: 392 ret = vdev->queue_sel; 393 break; 394 case VIRTIO_PCI_STATUS: 395 ret = vdev->status; 396 break; 397 case VIRTIO_PCI_ISR: 398 /* reading from the ISR also clears it. */ 399 ret = atomic_xchg(&vdev->isr, 0); 400 pci_irq_deassert(&proxy->pci_dev); 401 break; 402 case VIRTIO_MSI_CONFIG_VECTOR: 403 ret = vdev->config_vector; 404 break; 405 case VIRTIO_MSI_QUEUE_VECTOR: 406 ret = virtio_queue_vector(vdev, vdev->queue_sel); 407 break; 408 default: 409 break; 410 } 411 412 return ret; 413 } 414 415 static uint64_t virtio_pci_config_read(void *opaque, hwaddr addr, 416 unsigned size) 417 { 418 VirtIOPCIProxy *proxy = opaque; 419 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 420 uint32_t config = VIRTIO_PCI_CONFIG_SIZE(&proxy->pci_dev); 421 uint64_t val = 0; 422 if (addr < config) { 423 return virtio_ioport_read(proxy, addr); 424 } 425 addr -= config; 426 427 switch (size) { 428 case 1: 429 val = virtio_config_readb(vdev, addr); 430 break; 431 case 2: 432 val = virtio_config_readw(vdev, addr); 433 if (virtio_is_big_endian(vdev)) { 434 val = bswap16(val); 435 } 436 break; 437 case 4: 438 val = virtio_config_readl(vdev, addr); 439 if (virtio_is_big_endian(vdev)) { 440 val = bswap32(val); 441 } 442 break; 443 } 444 return val; 445 } 446 447 static void virtio_pci_config_write(void *opaque, hwaddr addr, 448 uint64_t val, unsigned size) 449 { 450 VirtIOPCIProxy *proxy = opaque; 451 uint32_t config = VIRTIO_PCI_CONFIG_SIZE(&proxy->pci_dev); 452 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 453 if (addr < config) { 454 virtio_ioport_write(proxy, addr, val); 455 return; 456 } 457 addr -= config; 458 /* 459 * Virtio-PCI is odd. Ioports are LE but config space is target native 460 * endian. 461 */ 462 switch (size) { 463 case 1: 464 virtio_config_writeb(vdev, addr, val); 465 break; 466 case 2: 467 if (virtio_is_big_endian(vdev)) { 468 val = bswap16(val); 469 } 470 virtio_config_writew(vdev, addr, val); 471 break; 472 case 4: 473 if (virtio_is_big_endian(vdev)) { 474 val = bswap32(val); 475 } 476 virtio_config_writel(vdev, addr, val); 477 break; 478 } 479 } 480 481 static const MemoryRegionOps virtio_pci_config_ops = { 482 .read = virtio_pci_config_read, 483 .write = virtio_pci_config_write, 484 .impl = { 485 .min_access_size = 1, 486 .max_access_size = 4, 487 }, 488 .endianness = DEVICE_LITTLE_ENDIAN, 489 }; 490 491 static MemoryRegion *virtio_address_space_lookup(VirtIOPCIProxy *proxy, 492 hwaddr *off, int len) 493 { 494 int i; 495 VirtIOPCIRegion *reg; 496 497 for (i = 0; i < ARRAY_SIZE(proxy->regs); ++i) { 498 reg = &proxy->regs[i]; 499 if (*off >= reg->offset && 500 *off + len <= reg->offset + reg->size) { 501 *off -= reg->offset; 502 return ®->mr; 503 } 504 } 505 506 return NULL; 507 } 508 509 /* Below are generic functions to do memcpy from/to an address space, 510 * without byteswaps, with input validation. 511 * 512 * As regular address_space_* APIs all do some kind of byteswap at least for 513 * some host/target combinations, we are forced to explicitly convert to a 514 * known-endianness integer value. 515 * It doesn't really matter which endian format to go through, so the code 516 * below selects the endian that causes the least amount of work on the given 517 * host. 518 * 519 * Note: host pointer must be aligned. 520 */ 521 static 522 void virtio_address_space_write(VirtIOPCIProxy *proxy, hwaddr addr, 523 const uint8_t *buf, int len) 524 { 525 uint64_t val; 526 MemoryRegion *mr; 527 528 /* address_space_* APIs assume an aligned address. 529 * As address is under guest control, handle illegal values. 530 */ 531 addr &= ~(len - 1); 532 533 mr = virtio_address_space_lookup(proxy, &addr, len); 534 if (!mr) { 535 return; 536 } 537 538 /* Make sure caller aligned buf properly */ 539 assert(!(((uintptr_t)buf) & (len - 1))); 540 541 switch (len) { 542 case 1: 543 val = pci_get_byte(buf); 544 break; 545 case 2: 546 val = cpu_to_le16(pci_get_word(buf)); 547 break; 548 case 4: 549 val = cpu_to_le32(pci_get_long(buf)); 550 break; 551 default: 552 /* As length is under guest control, handle illegal values. */ 553 return; 554 } 555 memory_region_dispatch_write(mr, addr, val, len, MEMTXATTRS_UNSPECIFIED); 556 } 557 558 static void 559 virtio_address_space_read(VirtIOPCIProxy *proxy, hwaddr addr, 560 uint8_t *buf, int len) 561 { 562 uint64_t val; 563 MemoryRegion *mr; 564 565 /* address_space_* APIs assume an aligned address. 566 * As address is under guest control, handle illegal values. 567 */ 568 addr &= ~(len - 1); 569 570 mr = virtio_address_space_lookup(proxy, &addr, len); 571 if (!mr) { 572 return; 573 } 574 575 /* Make sure caller aligned buf properly */ 576 assert(!(((uintptr_t)buf) & (len - 1))); 577 578 memory_region_dispatch_read(mr, addr, &val, len, MEMTXATTRS_UNSPECIFIED); 579 switch (len) { 580 case 1: 581 pci_set_byte(buf, val); 582 break; 583 case 2: 584 pci_set_word(buf, le16_to_cpu(val)); 585 break; 586 case 4: 587 pci_set_long(buf, le32_to_cpu(val)); 588 break; 589 default: 590 /* As length is under guest control, handle illegal values. */ 591 break; 592 } 593 } 594 595 static void virtio_write_config(PCIDevice *pci_dev, uint32_t address, 596 uint32_t val, int len) 597 { 598 VirtIOPCIProxy *proxy = VIRTIO_PCI(pci_dev); 599 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 600 struct virtio_pci_cfg_cap *cfg; 601 602 pci_default_write_config(pci_dev, address, val, len); 603 604 if (range_covers_byte(address, len, PCI_COMMAND) && 605 !(pci_dev->config[PCI_COMMAND] & PCI_COMMAND_MASTER)) { 606 virtio_pci_stop_ioeventfd(proxy); 607 virtio_set_status(vdev, vdev->status & ~VIRTIO_CONFIG_S_DRIVER_OK); 608 } 609 610 if (proxy->config_cap && 611 ranges_overlap(address, len, proxy->config_cap + offsetof(struct virtio_pci_cfg_cap, 612 pci_cfg_data), 613 sizeof cfg->pci_cfg_data)) { 614 uint32_t off; 615 uint32_t len; 616 617 cfg = (void *)(proxy->pci_dev.config + proxy->config_cap); 618 off = le32_to_cpu(cfg->cap.offset); 619 len = le32_to_cpu(cfg->cap.length); 620 621 if (len == 1 || len == 2 || len == 4) { 622 assert(len <= sizeof cfg->pci_cfg_data); 623 virtio_address_space_write(proxy, off, cfg->pci_cfg_data, len); 624 } 625 } 626 } 627 628 static uint32_t virtio_read_config(PCIDevice *pci_dev, 629 uint32_t address, int len) 630 { 631 VirtIOPCIProxy *proxy = VIRTIO_PCI(pci_dev); 632 struct virtio_pci_cfg_cap *cfg; 633 634 if (proxy->config_cap && 635 ranges_overlap(address, len, proxy->config_cap + offsetof(struct virtio_pci_cfg_cap, 636 pci_cfg_data), 637 sizeof cfg->pci_cfg_data)) { 638 uint32_t off; 639 uint32_t len; 640 641 cfg = (void *)(proxy->pci_dev.config + proxy->config_cap); 642 off = le32_to_cpu(cfg->cap.offset); 643 len = le32_to_cpu(cfg->cap.length); 644 645 if (len == 1 || len == 2 || len == 4) { 646 assert(len <= sizeof cfg->pci_cfg_data); 647 virtio_address_space_read(proxy, off, cfg->pci_cfg_data, len); 648 } 649 } 650 651 return pci_default_read_config(pci_dev, address, len); 652 } 653 654 static int kvm_virtio_pci_vq_vector_use(VirtIOPCIProxy *proxy, 655 unsigned int queue_no, 656 unsigned int vector) 657 { 658 VirtIOIRQFD *irqfd = &proxy->vector_irqfd[vector]; 659 int ret; 660 661 if (irqfd->users == 0) { 662 ret = kvm_irqchip_add_msi_route(kvm_state, vector, &proxy->pci_dev); 663 if (ret < 0) { 664 return ret; 665 } 666 irqfd->virq = ret; 667 } 668 irqfd->users++; 669 return 0; 670 } 671 672 static void kvm_virtio_pci_vq_vector_release(VirtIOPCIProxy *proxy, 673 unsigned int vector) 674 { 675 VirtIOIRQFD *irqfd = &proxy->vector_irqfd[vector]; 676 if (--irqfd->users == 0) { 677 kvm_irqchip_release_virq(kvm_state, irqfd->virq); 678 } 679 } 680 681 static int kvm_virtio_pci_irqfd_use(VirtIOPCIProxy *proxy, 682 unsigned int queue_no, 683 unsigned int vector) 684 { 685 VirtIOIRQFD *irqfd = &proxy->vector_irqfd[vector]; 686 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 687 VirtQueue *vq = virtio_get_queue(vdev, queue_no); 688 EventNotifier *n = virtio_queue_get_guest_notifier(vq); 689 return kvm_irqchip_add_irqfd_notifier_gsi(kvm_state, n, NULL, irqfd->virq); 690 } 691 692 static void kvm_virtio_pci_irqfd_release(VirtIOPCIProxy *proxy, 693 unsigned int queue_no, 694 unsigned int vector) 695 { 696 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 697 VirtQueue *vq = virtio_get_queue(vdev, queue_no); 698 EventNotifier *n = virtio_queue_get_guest_notifier(vq); 699 VirtIOIRQFD *irqfd = &proxy->vector_irqfd[vector]; 700 int ret; 701 702 ret = kvm_irqchip_remove_irqfd_notifier_gsi(kvm_state, n, irqfd->virq); 703 assert(ret == 0); 704 } 705 706 static int kvm_virtio_pci_vector_use(VirtIOPCIProxy *proxy, int nvqs) 707 { 708 PCIDevice *dev = &proxy->pci_dev; 709 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 710 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); 711 unsigned int vector; 712 int ret, queue_no; 713 714 for (queue_no = 0; queue_no < nvqs; queue_no++) { 715 if (!virtio_queue_get_num(vdev, queue_no)) { 716 break; 717 } 718 vector = virtio_queue_vector(vdev, queue_no); 719 if (vector >= msix_nr_vectors_allocated(dev)) { 720 continue; 721 } 722 ret = kvm_virtio_pci_vq_vector_use(proxy, queue_no, vector); 723 if (ret < 0) { 724 goto undo; 725 } 726 /* If guest supports masking, set up irqfd now. 727 * Otherwise, delay until unmasked in the frontend. 728 */ 729 if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) { 730 ret = kvm_virtio_pci_irqfd_use(proxy, queue_no, vector); 731 if (ret < 0) { 732 kvm_virtio_pci_vq_vector_release(proxy, vector); 733 goto undo; 734 } 735 } 736 } 737 return 0; 738 739 undo: 740 while (--queue_no >= 0) { 741 vector = virtio_queue_vector(vdev, queue_no); 742 if (vector >= msix_nr_vectors_allocated(dev)) { 743 continue; 744 } 745 if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) { 746 kvm_virtio_pci_irqfd_release(proxy, queue_no, vector); 747 } 748 kvm_virtio_pci_vq_vector_release(proxy, vector); 749 } 750 return ret; 751 } 752 753 static void kvm_virtio_pci_vector_release(VirtIOPCIProxy *proxy, int nvqs) 754 { 755 PCIDevice *dev = &proxy->pci_dev; 756 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 757 unsigned int vector; 758 int queue_no; 759 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); 760 761 for (queue_no = 0; queue_no < nvqs; queue_no++) { 762 if (!virtio_queue_get_num(vdev, queue_no)) { 763 break; 764 } 765 vector = virtio_queue_vector(vdev, queue_no); 766 if (vector >= msix_nr_vectors_allocated(dev)) { 767 continue; 768 } 769 /* If guest supports masking, clean up irqfd now. 770 * Otherwise, it was cleaned when masked in the frontend. 771 */ 772 if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) { 773 kvm_virtio_pci_irqfd_release(proxy, queue_no, vector); 774 } 775 kvm_virtio_pci_vq_vector_release(proxy, vector); 776 } 777 } 778 779 static int virtio_pci_vq_vector_unmask(VirtIOPCIProxy *proxy, 780 unsigned int queue_no, 781 unsigned int vector, 782 MSIMessage msg) 783 { 784 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 785 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); 786 VirtQueue *vq = virtio_get_queue(vdev, queue_no); 787 EventNotifier *n = virtio_queue_get_guest_notifier(vq); 788 VirtIOIRQFD *irqfd; 789 int ret = 0; 790 791 if (proxy->vector_irqfd) { 792 irqfd = &proxy->vector_irqfd[vector]; 793 if (irqfd->msg.data != msg.data || irqfd->msg.address != msg.address) { 794 ret = kvm_irqchip_update_msi_route(kvm_state, irqfd->virq, msg, 795 &proxy->pci_dev); 796 if (ret < 0) { 797 return ret; 798 } 799 kvm_irqchip_commit_routes(kvm_state); 800 } 801 } 802 803 /* If guest supports masking, irqfd is already setup, unmask it. 804 * Otherwise, set it up now. 805 */ 806 if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) { 807 k->guest_notifier_mask(vdev, queue_no, false); 808 /* Test after unmasking to avoid losing events. */ 809 if (k->guest_notifier_pending && 810 k->guest_notifier_pending(vdev, queue_no)) { 811 event_notifier_set(n); 812 } 813 } else { 814 ret = kvm_virtio_pci_irqfd_use(proxy, queue_no, vector); 815 } 816 return ret; 817 } 818 819 static void virtio_pci_vq_vector_mask(VirtIOPCIProxy *proxy, 820 unsigned int queue_no, 821 unsigned int vector) 822 { 823 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 824 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); 825 826 /* If guest supports masking, keep irqfd but mask it. 827 * Otherwise, clean it up now. 828 */ 829 if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) { 830 k->guest_notifier_mask(vdev, queue_no, true); 831 } else { 832 kvm_virtio_pci_irqfd_release(proxy, queue_no, vector); 833 } 834 } 835 836 static int virtio_pci_vector_unmask(PCIDevice *dev, unsigned vector, 837 MSIMessage msg) 838 { 839 VirtIOPCIProxy *proxy = container_of(dev, VirtIOPCIProxy, pci_dev); 840 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 841 VirtQueue *vq = virtio_vector_first_queue(vdev, vector); 842 int ret, index, unmasked = 0; 843 844 while (vq) { 845 index = virtio_get_queue_index(vq); 846 if (!virtio_queue_get_num(vdev, index)) { 847 break; 848 } 849 if (index < proxy->nvqs_with_notifiers) { 850 ret = virtio_pci_vq_vector_unmask(proxy, index, vector, msg); 851 if (ret < 0) { 852 goto undo; 853 } 854 ++unmasked; 855 } 856 vq = virtio_vector_next_queue(vq); 857 } 858 859 return 0; 860 861 undo: 862 vq = virtio_vector_first_queue(vdev, vector); 863 while (vq && unmasked >= 0) { 864 index = virtio_get_queue_index(vq); 865 if (index < proxy->nvqs_with_notifiers) { 866 virtio_pci_vq_vector_mask(proxy, index, vector); 867 --unmasked; 868 } 869 vq = virtio_vector_next_queue(vq); 870 } 871 return ret; 872 } 873 874 static void virtio_pci_vector_mask(PCIDevice *dev, unsigned vector) 875 { 876 VirtIOPCIProxy *proxy = container_of(dev, VirtIOPCIProxy, pci_dev); 877 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 878 VirtQueue *vq = virtio_vector_first_queue(vdev, vector); 879 int index; 880 881 while (vq) { 882 index = virtio_get_queue_index(vq); 883 if (!virtio_queue_get_num(vdev, index)) { 884 break; 885 } 886 if (index < proxy->nvqs_with_notifiers) { 887 virtio_pci_vq_vector_mask(proxy, index, vector); 888 } 889 vq = virtio_vector_next_queue(vq); 890 } 891 } 892 893 static void virtio_pci_vector_poll(PCIDevice *dev, 894 unsigned int vector_start, 895 unsigned int vector_end) 896 { 897 VirtIOPCIProxy *proxy = container_of(dev, VirtIOPCIProxy, pci_dev); 898 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 899 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); 900 int queue_no; 901 unsigned int vector; 902 EventNotifier *notifier; 903 VirtQueue *vq; 904 905 for (queue_no = 0; queue_no < proxy->nvqs_with_notifiers; queue_no++) { 906 if (!virtio_queue_get_num(vdev, queue_no)) { 907 break; 908 } 909 vector = virtio_queue_vector(vdev, queue_no); 910 if (vector < vector_start || vector >= vector_end || 911 !msix_is_masked(dev, vector)) { 912 continue; 913 } 914 vq = virtio_get_queue(vdev, queue_no); 915 notifier = virtio_queue_get_guest_notifier(vq); 916 if (k->guest_notifier_pending) { 917 if (k->guest_notifier_pending(vdev, queue_no)) { 918 msix_set_pending(dev, vector); 919 } 920 } else if (event_notifier_test_and_clear(notifier)) { 921 msix_set_pending(dev, vector); 922 } 923 } 924 } 925 926 static int virtio_pci_set_guest_notifier(DeviceState *d, int n, bool assign, 927 bool with_irqfd) 928 { 929 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d); 930 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 931 VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev); 932 VirtQueue *vq = virtio_get_queue(vdev, n); 933 EventNotifier *notifier = virtio_queue_get_guest_notifier(vq); 934 935 if (assign) { 936 int r = event_notifier_init(notifier, 0); 937 if (r < 0) { 938 return r; 939 } 940 virtio_queue_set_guest_notifier_fd_handler(vq, true, with_irqfd); 941 } else { 942 virtio_queue_set_guest_notifier_fd_handler(vq, false, with_irqfd); 943 event_notifier_cleanup(notifier); 944 } 945 946 if (!msix_enabled(&proxy->pci_dev) && 947 vdev->use_guest_notifier_mask && 948 vdc->guest_notifier_mask) { 949 vdc->guest_notifier_mask(vdev, n, !assign); 950 } 951 952 return 0; 953 } 954 955 static bool virtio_pci_query_guest_notifiers(DeviceState *d) 956 { 957 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d); 958 return msix_enabled(&proxy->pci_dev); 959 } 960 961 static int virtio_pci_set_guest_notifiers(DeviceState *d, int nvqs, bool assign) 962 { 963 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d); 964 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 965 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); 966 int r, n; 967 bool with_irqfd = msix_enabled(&proxy->pci_dev) && 968 kvm_msi_via_irqfd_enabled(); 969 970 nvqs = MIN(nvqs, VIRTIO_QUEUE_MAX); 971 972 /* When deassigning, pass a consistent nvqs value 973 * to avoid leaking notifiers. 974 */ 975 assert(assign || nvqs == proxy->nvqs_with_notifiers); 976 977 proxy->nvqs_with_notifiers = nvqs; 978 979 /* Must unset vector notifier while guest notifier is still assigned */ 980 if ((proxy->vector_irqfd || k->guest_notifier_mask) && !assign) { 981 msix_unset_vector_notifiers(&proxy->pci_dev); 982 if (proxy->vector_irqfd) { 983 kvm_virtio_pci_vector_release(proxy, nvqs); 984 g_free(proxy->vector_irqfd); 985 proxy->vector_irqfd = NULL; 986 } 987 } 988 989 for (n = 0; n < nvqs; n++) { 990 if (!virtio_queue_get_num(vdev, n)) { 991 break; 992 } 993 994 r = virtio_pci_set_guest_notifier(d, n, assign, with_irqfd); 995 if (r < 0) { 996 goto assign_error; 997 } 998 } 999 1000 /* Must set vector notifier after guest notifier has been assigned */ 1001 if ((with_irqfd || k->guest_notifier_mask) && assign) { 1002 if (with_irqfd) { 1003 proxy->vector_irqfd = 1004 g_malloc0(sizeof(*proxy->vector_irqfd) * 1005 msix_nr_vectors_allocated(&proxy->pci_dev)); 1006 r = kvm_virtio_pci_vector_use(proxy, nvqs); 1007 if (r < 0) { 1008 goto assign_error; 1009 } 1010 } 1011 r = msix_set_vector_notifiers(&proxy->pci_dev, 1012 virtio_pci_vector_unmask, 1013 virtio_pci_vector_mask, 1014 virtio_pci_vector_poll); 1015 if (r < 0) { 1016 goto notifiers_error; 1017 } 1018 } 1019 1020 return 0; 1021 1022 notifiers_error: 1023 if (with_irqfd) { 1024 assert(assign); 1025 kvm_virtio_pci_vector_release(proxy, nvqs); 1026 } 1027 1028 assign_error: 1029 /* We get here on assignment failure. Recover by undoing for VQs 0 .. n. */ 1030 assert(assign); 1031 while (--n >= 0) { 1032 virtio_pci_set_guest_notifier(d, n, !assign, with_irqfd); 1033 } 1034 return r; 1035 } 1036 1037 static int virtio_pci_set_host_notifier_mr(DeviceState *d, int n, 1038 MemoryRegion *mr, bool assign) 1039 { 1040 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d); 1041 int offset; 1042 1043 if (n >= VIRTIO_QUEUE_MAX || !virtio_pci_modern(proxy) || 1044 virtio_pci_queue_mem_mult(proxy) != memory_region_size(mr)) { 1045 return -1; 1046 } 1047 1048 if (assign) { 1049 offset = virtio_pci_queue_mem_mult(proxy) * n; 1050 memory_region_add_subregion_overlap(&proxy->notify.mr, offset, mr, 1); 1051 } else { 1052 memory_region_del_subregion(&proxy->notify.mr, mr); 1053 } 1054 1055 return 0; 1056 } 1057 1058 static void virtio_pci_vmstate_change(DeviceState *d, bool running) 1059 { 1060 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d); 1061 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 1062 1063 if (running) { 1064 /* Old QEMU versions did not set bus master enable on status write. 1065 * Detect DRIVER set and enable it. 1066 */ 1067 if ((proxy->flags & VIRTIO_PCI_FLAG_BUS_MASTER_BUG_MIGRATION) && 1068 (vdev->status & VIRTIO_CONFIG_S_DRIVER) && 1069 !(proxy->pci_dev.config[PCI_COMMAND] & PCI_COMMAND_MASTER)) { 1070 pci_default_write_config(&proxy->pci_dev, PCI_COMMAND, 1071 proxy->pci_dev.config[PCI_COMMAND] | 1072 PCI_COMMAND_MASTER, 1); 1073 } 1074 virtio_pci_start_ioeventfd(proxy); 1075 } else { 1076 virtio_pci_stop_ioeventfd(proxy); 1077 } 1078 } 1079 1080 /* 1081 * virtio-pci: This is the PCIDevice which has a virtio-pci-bus. 1082 */ 1083 1084 static int virtio_pci_query_nvectors(DeviceState *d) 1085 { 1086 VirtIOPCIProxy *proxy = VIRTIO_PCI(d); 1087 1088 return proxy->nvectors; 1089 } 1090 1091 static AddressSpace *virtio_pci_get_dma_as(DeviceState *d) 1092 { 1093 VirtIOPCIProxy *proxy = VIRTIO_PCI(d); 1094 PCIDevice *dev = &proxy->pci_dev; 1095 1096 return pci_get_address_space(dev); 1097 } 1098 1099 static int virtio_pci_add_mem_cap(VirtIOPCIProxy *proxy, 1100 struct virtio_pci_cap *cap) 1101 { 1102 PCIDevice *dev = &proxy->pci_dev; 1103 int offset; 1104 1105 offset = pci_add_capability(dev, PCI_CAP_ID_VNDR, 0, 1106 cap->cap_len, &error_abort); 1107 1108 assert(cap->cap_len >= sizeof *cap); 1109 memcpy(dev->config + offset + PCI_CAP_FLAGS, &cap->cap_len, 1110 cap->cap_len - PCI_CAP_FLAGS); 1111 1112 return offset; 1113 } 1114 1115 static uint64_t virtio_pci_common_read(void *opaque, hwaddr addr, 1116 unsigned size) 1117 { 1118 VirtIOPCIProxy *proxy = opaque; 1119 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 1120 uint32_t val = 0; 1121 int i; 1122 1123 switch (addr) { 1124 case VIRTIO_PCI_COMMON_DFSELECT: 1125 val = proxy->dfselect; 1126 break; 1127 case VIRTIO_PCI_COMMON_DF: 1128 if (proxy->dfselect <= 1) { 1129 VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev); 1130 1131 val = (vdev->host_features & ~vdc->legacy_features) >> 1132 (32 * proxy->dfselect); 1133 } 1134 break; 1135 case VIRTIO_PCI_COMMON_GFSELECT: 1136 val = proxy->gfselect; 1137 break; 1138 case VIRTIO_PCI_COMMON_GF: 1139 if (proxy->gfselect < ARRAY_SIZE(proxy->guest_features)) { 1140 val = proxy->guest_features[proxy->gfselect]; 1141 } 1142 break; 1143 case VIRTIO_PCI_COMMON_MSIX: 1144 val = vdev->config_vector; 1145 break; 1146 case VIRTIO_PCI_COMMON_NUMQ: 1147 for (i = 0; i < VIRTIO_QUEUE_MAX; ++i) { 1148 if (virtio_queue_get_num(vdev, i)) { 1149 val = i + 1; 1150 } 1151 } 1152 break; 1153 case VIRTIO_PCI_COMMON_STATUS: 1154 val = vdev->status; 1155 break; 1156 case VIRTIO_PCI_COMMON_CFGGENERATION: 1157 val = vdev->generation; 1158 break; 1159 case VIRTIO_PCI_COMMON_Q_SELECT: 1160 val = vdev->queue_sel; 1161 break; 1162 case VIRTIO_PCI_COMMON_Q_SIZE: 1163 val = virtio_queue_get_num(vdev, vdev->queue_sel); 1164 break; 1165 case VIRTIO_PCI_COMMON_Q_MSIX: 1166 val = virtio_queue_vector(vdev, vdev->queue_sel); 1167 break; 1168 case VIRTIO_PCI_COMMON_Q_ENABLE: 1169 val = proxy->vqs[vdev->queue_sel].enabled; 1170 break; 1171 case VIRTIO_PCI_COMMON_Q_NOFF: 1172 /* Simply map queues in order */ 1173 val = vdev->queue_sel; 1174 break; 1175 case VIRTIO_PCI_COMMON_Q_DESCLO: 1176 val = proxy->vqs[vdev->queue_sel].desc[0]; 1177 break; 1178 case VIRTIO_PCI_COMMON_Q_DESCHI: 1179 val = proxy->vqs[vdev->queue_sel].desc[1]; 1180 break; 1181 case VIRTIO_PCI_COMMON_Q_AVAILLO: 1182 val = proxy->vqs[vdev->queue_sel].avail[0]; 1183 break; 1184 case VIRTIO_PCI_COMMON_Q_AVAILHI: 1185 val = proxy->vqs[vdev->queue_sel].avail[1]; 1186 break; 1187 case VIRTIO_PCI_COMMON_Q_USEDLO: 1188 val = proxy->vqs[vdev->queue_sel].used[0]; 1189 break; 1190 case VIRTIO_PCI_COMMON_Q_USEDHI: 1191 val = proxy->vqs[vdev->queue_sel].used[1]; 1192 break; 1193 default: 1194 val = 0; 1195 } 1196 1197 return val; 1198 } 1199 1200 static void virtio_pci_common_write(void *opaque, hwaddr addr, 1201 uint64_t val, unsigned size) 1202 { 1203 VirtIOPCIProxy *proxy = opaque; 1204 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 1205 1206 switch (addr) { 1207 case VIRTIO_PCI_COMMON_DFSELECT: 1208 proxy->dfselect = val; 1209 break; 1210 case VIRTIO_PCI_COMMON_GFSELECT: 1211 proxy->gfselect = val; 1212 break; 1213 case VIRTIO_PCI_COMMON_GF: 1214 if (proxy->gfselect < ARRAY_SIZE(proxy->guest_features)) { 1215 proxy->guest_features[proxy->gfselect] = val; 1216 virtio_set_features(vdev, 1217 (((uint64_t)proxy->guest_features[1]) << 32) | 1218 proxy->guest_features[0]); 1219 } 1220 break; 1221 case VIRTIO_PCI_COMMON_MSIX: 1222 msix_vector_unuse(&proxy->pci_dev, vdev->config_vector); 1223 /* Make it possible for guest to discover an error took place. */ 1224 if (msix_vector_use(&proxy->pci_dev, val) < 0) { 1225 val = VIRTIO_NO_VECTOR; 1226 } 1227 vdev->config_vector = val; 1228 break; 1229 case VIRTIO_PCI_COMMON_STATUS: 1230 if (!(val & VIRTIO_CONFIG_S_DRIVER_OK)) { 1231 virtio_pci_stop_ioeventfd(proxy); 1232 } 1233 1234 virtio_set_status(vdev, val & 0xFF); 1235 1236 if (val & VIRTIO_CONFIG_S_DRIVER_OK) { 1237 virtio_pci_start_ioeventfd(proxy); 1238 } 1239 1240 if (vdev->status == 0) { 1241 virtio_pci_reset(DEVICE(proxy)); 1242 } 1243 1244 break; 1245 case VIRTIO_PCI_COMMON_Q_SELECT: 1246 if (val < VIRTIO_QUEUE_MAX) { 1247 vdev->queue_sel = val; 1248 } 1249 break; 1250 case VIRTIO_PCI_COMMON_Q_SIZE: 1251 proxy->vqs[vdev->queue_sel].num = val; 1252 break; 1253 case VIRTIO_PCI_COMMON_Q_MSIX: 1254 msix_vector_unuse(&proxy->pci_dev, 1255 virtio_queue_vector(vdev, vdev->queue_sel)); 1256 /* Make it possible for guest to discover an error took place. */ 1257 if (msix_vector_use(&proxy->pci_dev, val) < 0) { 1258 val = VIRTIO_NO_VECTOR; 1259 } 1260 virtio_queue_set_vector(vdev, vdev->queue_sel, val); 1261 break; 1262 case VIRTIO_PCI_COMMON_Q_ENABLE: 1263 virtio_queue_set_num(vdev, vdev->queue_sel, 1264 proxy->vqs[vdev->queue_sel].num); 1265 virtio_queue_set_rings(vdev, vdev->queue_sel, 1266 ((uint64_t)proxy->vqs[vdev->queue_sel].desc[1]) << 32 | 1267 proxy->vqs[vdev->queue_sel].desc[0], 1268 ((uint64_t)proxy->vqs[vdev->queue_sel].avail[1]) << 32 | 1269 proxy->vqs[vdev->queue_sel].avail[0], 1270 ((uint64_t)proxy->vqs[vdev->queue_sel].used[1]) << 32 | 1271 proxy->vqs[vdev->queue_sel].used[0]); 1272 proxy->vqs[vdev->queue_sel].enabled = 1; 1273 break; 1274 case VIRTIO_PCI_COMMON_Q_DESCLO: 1275 proxy->vqs[vdev->queue_sel].desc[0] = val; 1276 break; 1277 case VIRTIO_PCI_COMMON_Q_DESCHI: 1278 proxy->vqs[vdev->queue_sel].desc[1] = val; 1279 break; 1280 case VIRTIO_PCI_COMMON_Q_AVAILLO: 1281 proxy->vqs[vdev->queue_sel].avail[0] = val; 1282 break; 1283 case VIRTIO_PCI_COMMON_Q_AVAILHI: 1284 proxy->vqs[vdev->queue_sel].avail[1] = val; 1285 break; 1286 case VIRTIO_PCI_COMMON_Q_USEDLO: 1287 proxy->vqs[vdev->queue_sel].used[0] = val; 1288 break; 1289 case VIRTIO_PCI_COMMON_Q_USEDHI: 1290 proxy->vqs[vdev->queue_sel].used[1] = val; 1291 break; 1292 default: 1293 break; 1294 } 1295 } 1296 1297 1298 static uint64_t virtio_pci_notify_read(void *opaque, hwaddr addr, 1299 unsigned size) 1300 { 1301 return 0; 1302 } 1303 1304 static void virtio_pci_notify_write(void *opaque, hwaddr addr, 1305 uint64_t val, unsigned size) 1306 { 1307 VirtIODevice *vdev = opaque; 1308 VirtIOPCIProxy *proxy = VIRTIO_PCI(DEVICE(vdev)->parent_bus->parent); 1309 unsigned queue = addr / virtio_pci_queue_mem_mult(proxy); 1310 1311 if (queue < VIRTIO_QUEUE_MAX) { 1312 virtio_queue_notify(vdev, queue); 1313 } 1314 } 1315 1316 static void virtio_pci_notify_write_pio(void *opaque, hwaddr addr, 1317 uint64_t val, unsigned size) 1318 { 1319 VirtIODevice *vdev = opaque; 1320 unsigned queue = val; 1321 1322 if (queue < VIRTIO_QUEUE_MAX) { 1323 virtio_queue_notify(vdev, queue); 1324 } 1325 } 1326 1327 static uint64_t virtio_pci_isr_read(void *opaque, hwaddr addr, 1328 unsigned size) 1329 { 1330 VirtIOPCIProxy *proxy = opaque; 1331 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 1332 uint64_t val = atomic_xchg(&vdev->isr, 0); 1333 pci_irq_deassert(&proxy->pci_dev); 1334 1335 return val; 1336 } 1337 1338 static void virtio_pci_isr_write(void *opaque, hwaddr addr, 1339 uint64_t val, unsigned size) 1340 { 1341 } 1342 1343 static uint64_t virtio_pci_device_read(void *opaque, hwaddr addr, 1344 unsigned size) 1345 { 1346 VirtIODevice *vdev = opaque; 1347 uint64_t val = 0; 1348 1349 switch (size) { 1350 case 1: 1351 val = virtio_config_modern_readb(vdev, addr); 1352 break; 1353 case 2: 1354 val = virtio_config_modern_readw(vdev, addr); 1355 break; 1356 case 4: 1357 val = virtio_config_modern_readl(vdev, addr); 1358 break; 1359 } 1360 return val; 1361 } 1362 1363 static void virtio_pci_device_write(void *opaque, hwaddr addr, 1364 uint64_t val, unsigned size) 1365 { 1366 VirtIODevice *vdev = opaque; 1367 switch (size) { 1368 case 1: 1369 virtio_config_modern_writeb(vdev, addr, val); 1370 break; 1371 case 2: 1372 virtio_config_modern_writew(vdev, addr, val); 1373 break; 1374 case 4: 1375 virtio_config_modern_writel(vdev, addr, val); 1376 break; 1377 } 1378 } 1379 1380 static void virtio_pci_modern_regions_init(VirtIOPCIProxy *proxy) 1381 { 1382 static const MemoryRegionOps common_ops = { 1383 .read = virtio_pci_common_read, 1384 .write = virtio_pci_common_write, 1385 .impl = { 1386 .min_access_size = 1, 1387 .max_access_size = 4, 1388 }, 1389 .endianness = DEVICE_LITTLE_ENDIAN, 1390 }; 1391 static const MemoryRegionOps isr_ops = { 1392 .read = virtio_pci_isr_read, 1393 .write = virtio_pci_isr_write, 1394 .impl = { 1395 .min_access_size = 1, 1396 .max_access_size = 4, 1397 }, 1398 .endianness = DEVICE_LITTLE_ENDIAN, 1399 }; 1400 static const MemoryRegionOps device_ops = { 1401 .read = virtio_pci_device_read, 1402 .write = virtio_pci_device_write, 1403 .impl = { 1404 .min_access_size = 1, 1405 .max_access_size = 4, 1406 }, 1407 .endianness = DEVICE_LITTLE_ENDIAN, 1408 }; 1409 static const MemoryRegionOps notify_ops = { 1410 .read = virtio_pci_notify_read, 1411 .write = virtio_pci_notify_write, 1412 .impl = { 1413 .min_access_size = 1, 1414 .max_access_size = 4, 1415 }, 1416 .endianness = DEVICE_LITTLE_ENDIAN, 1417 }; 1418 static const MemoryRegionOps notify_pio_ops = { 1419 .read = virtio_pci_notify_read, 1420 .write = virtio_pci_notify_write_pio, 1421 .impl = { 1422 .min_access_size = 1, 1423 .max_access_size = 4, 1424 }, 1425 .endianness = DEVICE_LITTLE_ENDIAN, 1426 }; 1427 1428 1429 memory_region_init_io(&proxy->common.mr, OBJECT(proxy), 1430 &common_ops, 1431 proxy, 1432 "virtio-pci-common", 1433 proxy->common.size); 1434 1435 memory_region_init_io(&proxy->isr.mr, OBJECT(proxy), 1436 &isr_ops, 1437 proxy, 1438 "virtio-pci-isr", 1439 proxy->isr.size); 1440 1441 memory_region_init_io(&proxy->device.mr, OBJECT(proxy), 1442 &device_ops, 1443 virtio_bus_get_device(&proxy->bus), 1444 "virtio-pci-device", 1445 proxy->device.size); 1446 1447 memory_region_init_io(&proxy->notify.mr, OBJECT(proxy), 1448 ¬ify_ops, 1449 virtio_bus_get_device(&proxy->bus), 1450 "virtio-pci-notify", 1451 proxy->notify.size); 1452 1453 memory_region_init_io(&proxy->notify_pio.mr, OBJECT(proxy), 1454 ¬ify_pio_ops, 1455 virtio_bus_get_device(&proxy->bus), 1456 "virtio-pci-notify-pio", 1457 proxy->notify_pio.size); 1458 } 1459 1460 static void virtio_pci_modern_region_map(VirtIOPCIProxy *proxy, 1461 VirtIOPCIRegion *region, 1462 struct virtio_pci_cap *cap, 1463 MemoryRegion *mr, 1464 uint8_t bar) 1465 { 1466 memory_region_add_subregion(mr, region->offset, ®ion->mr); 1467 1468 cap->cfg_type = region->type; 1469 cap->bar = bar; 1470 cap->offset = cpu_to_le32(region->offset); 1471 cap->length = cpu_to_le32(region->size); 1472 virtio_pci_add_mem_cap(proxy, cap); 1473 1474 } 1475 1476 static void virtio_pci_modern_mem_region_map(VirtIOPCIProxy *proxy, 1477 VirtIOPCIRegion *region, 1478 struct virtio_pci_cap *cap) 1479 { 1480 virtio_pci_modern_region_map(proxy, region, cap, 1481 &proxy->modern_bar, proxy->modern_mem_bar_idx); 1482 } 1483 1484 static void virtio_pci_modern_io_region_map(VirtIOPCIProxy *proxy, 1485 VirtIOPCIRegion *region, 1486 struct virtio_pci_cap *cap) 1487 { 1488 virtio_pci_modern_region_map(proxy, region, cap, 1489 &proxy->io_bar, proxy->modern_io_bar_idx); 1490 } 1491 1492 static void virtio_pci_modern_mem_region_unmap(VirtIOPCIProxy *proxy, 1493 VirtIOPCIRegion *region) 1494 { 1495 memory_region_del_subregion(&proxy->modern_bar, 1496 ®ion->mr); 1497 } 1498 1499 static void virtio_pci_modern_io_region_unmap(VirtIOPCIProxy *proxy, 1500 VirtIOPCIRegion *region) 1501 { 1502 memory_region_del_subregion(&proxy->io_bar, 1503 ®ion->mr); 1504 } 1505 1506 static void virtio_pci_pre_plugged(DeviceState *d, Error **errp) 1507 { 1508 VirtIOPCIProxy *proxy = VIRTIO_PCI(d); 1509 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 1510 1511 if (virtio_pci_modern(proxy)) { 1512 virtio_add_feature(&vdev->host_features, VIRTIO_F_VERSION_1); 1513 } 1514 1515 virtio_add_feature(&vdev->host_features, VIRTIO_F_BAD_FEATURE); 1516 } 1517 1518 /* This is called by virtio-bus just after the device is plugged. */ 1519 static void virtio_pci_device_plugged(DeviceState *d, Error **errp) 1520 { 1521 VirtIOPCIProxy *proxy = VIRTIO_PCI(d); 1522 VirtioBusState *bus = &proxy->bus; 1523 bool legacy = virtio_pci_legacy(proxy); 1524 bool modern; 1525 bool modern_pio = proxy->flags & VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY; 1526 uint8_t *config; 1527 uint32_t size; 1528 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 1529 1530 /* 1531 * Virtio capabilities present without 1532 * VIRTIO_F_VERSION_1 confuses guests 1533 */ 1534 if (!proxy->ignore_backend_features && 1535 !virtio_has_feature(vdev->host_features, VIRTIO_F_VERSION_1)) { 1536 virtio_pci_disable_modern(proxy); 1537 1538 if (!legacy) { 1539 error_setg(errp, "Device doesn't support modern mode, and legacy" 1540 " mode is disabled"); 1541 error_append_hint(errp, "Set disable-legacy to off\n"); 1542 1543 return; 1544 } 1545 } 1546 1547 modern = virtio_pci_modern(proxy); 1548 1549 config = proxy->pci_dev.config; 1550 if (proxy->class_code) { 1551 pci_config_set_class(config, proxy->class_code); 1552 } 1553 1554 if (legacy) { 1555 if (virtio_host_has_feature(vdev, VIRTIO_F_IOMMU_PLATFORM)) { 1556 error_setg(errp, "VIRTIO_F_IOMMU_PLATFORM was supported by" 1557 " neither legacy nor transitional device"); 1558 return ; 1559 } 1560 /* 1561 * Legacy and transitional devices use specific subsystem IDs. 1562 * Note that the subsystem vendor ID (config + PCI_SUBSYSTEM_VENDOR_ID) 1563 * is set to PCI_SUBVENDOR_ID_REDHAT_QUMRANET by default. 1564 */ 1565 pci_set_word(config + PCI_SUBSYSTEM_ID, virtio_bus_get_vdev_id(bus)); 1566 } else { 1567 /* pure virtio-1.0 */ 1568 pci_set_word(config + PCI_VENDOR_ID, 1569 PCI_VENDOR_ID_REDHAT_QUMRANET); 1570 pci_set_word(config + PCI_DEVICE_ID, 1571 0x1040 + virtio_bus_get_vdev_id(bus)); 1572 pci_config_set_revision(config, 1); 1573 } 1574 config[PCI_INTERRUPT_PIN] = 1; 1575 1576 1577 if (modern) { 1578 struct virtio_pci_cap cap = { 1579 .cap_len = sizeof cap, 1580 }; 1581 struct virtio_pci_notify_cap notify = { 1582 .cap.cap_len = sizeof notify, 1583 .notify_off_multiplier = 1584 cpu_to_le32(virtio_pci_queue_mem_mult(proxy)), 1585 }; 1586 struct virtio_pci_cfg_cap cfg = { 1587 .cap.cap_len = sizeof cfg, 1588 .cap.cfg_type = VIRTIO_PCI_CAP_PCI_CFG, 1589 }; 1590 struct virtio_pci_notify_cap notify_pio = { 1591 .cap.cap_len = sizeof notify, 1592 .notify_off_multiplier = cpu_to_le32(0x0), 1593 }; 1594 1595 struct virtio_pci_cfg_cap *cfg_mask; 1596 1597 virtio_pci_modern_regions_init(proxy); 1598 1599 virtio_pci_modern_mem_region_map(proxy, &proxy->common, &cap); 1600 virtio_pci_modern_mem_region_map(proxy, &proxy->isr, &cap); 1601 virtio_pci_modern_mem_region_map(proxy, &proxy->device, &cap); 1602 virtio_pci_modern_mem_region_map(proxy, &proxy->notify, ¬ify.cap); 1603 1604 if (modern_pio) { 1605 memory_region_init(&proxy->io_bar, OBJECT(proxy), 1606 "virtio-pci-io", 0x4); 1607 1608 pci_register_bar(&proxy->pci_dev, proxy->modern_io_bar_idx, 1609 PCI_BASE_ADDRESS_SPACE_IO, &proxy->io_bar); 1610 1611 virtio_pci_modern_io_region_map(proxy, &proxy->notify_pio, 1612 ¬ify_pio.cap); 1613 } 1614 1615 pci_register_bar(&proxy->pci_dev, proxy->modern_mem_bar_idx, 1616 PCI_BASE_ADDRESS_SPACE_MEMORY | 1617 PCI_BASE_ADDRESS_MEM_PREFETCH | 1618 PCI_BASE_ADDRESS_MEM_TYPE_64, 1619 &proxy->modern_bar); 1620 1621 proxy->config_cap = virtio_pci_add_mem_cap(proxy, &cfg.cap); 1622 cfg_mask = (void *)(proxy->pci_dev.wmask + proxy->config_cap); 1623 pci_set_byte(&cfg_mask->cap.bar, ~0x0); 1624 pci_set_long((uint8_t *)&cfg_mask->cap.offset, ~0x0); 1625 pci_set_long((uint8_t *)&cfg_mask->cap.length, ~0x0); 1626 pci_set_long(cfg_mask->pci_cfg_data, ~0x0); 1627 } 1628 1629 if (proxy->nvectors) { 1630 int err = msix_init_exclusive_bar(&proxy->pci_dev, proxy->nvectors, 1631 proxy->msix_bar_idx, NULL); 1632 if (err) { 1633 /* Notice when a system that supports MSIx can't initialize it */ 1634 if (err != -ENOTSUP) { 1635 warn_report("unable to init msix vectors to %" PRIu32, 1636 proxy->nvectors); 1637 } 1638 proxy->nvectors = 0; 1639 } 1640 } 1641 1642 proxy->pci_dev.config_write = virtio_write_config; 1643 proxy->pci_dev.config_read = virtio_read_config; 1644 1645 if (legacy) { 1646 size = VIRTIO_PCI_REGION_SIZE(&proxy->pci_dev) 1647 + virtio_bus_get_vdev_config_len(bus); 1648 size = pow2ceil(size); 1649 1650 memory_region_init_io(&proxy->bar, OBJECT(proxy), 1651 &virtio_pci_config_ops, 1652 proxy, "virtio-pci", size); 1653 1654 pci_register_bar(&proxy->pci_dev, proxy->legacy_io_bar_idx, 1655 PCI_BASE_ADDRESS_SPACE_IO, &proxy->bar); 1656 } 1657 } 1658 1659 static void virtio_pci_device_unplugged(DeviceState *d) 1660 { 1661 VirtIOPCIProxy *proxy = VIRTIO_PCI(d); 1662 bool modern = virtio_pci_modern(proxy); 1663 bool modern_pio = proxy->flags & VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY; 1664 1665 virtio_pci_stop_ioeventfd(proxy); 1666 1667 if (modern) { 1668 virtio_pci_modern_mem_region_unmap(proxy, &proxy->common); 1669 virtio_pci_modern_mem_region_unmap(proxy, &proxy->isr); 1670 virtio_pci_modern_mem_region_unmap(proxy, &proxy->device); 1671 virtio_pci_modern_mem_region_unmap(proxy, &proxy->notify); 1672 if (modern_pio) { 1673 virtio_pci_modern_io_region_unmap(proxy, &proxy->notify_pio); 1674 } 1675 } 1676 } 1677 1678 static void virtio_pci_realize(PCIDevice *pci_dev, Error **errp) 1679 { 1680 VirtIOPCIProxy *proxy = VIRTIO_PCI(pci_dev); 1681 VirtioPCIClass *k = VIRTIO_PCI_GET_CLASS(pci_dev); 1682 bool pcie_port = pci_bus_is_express(pci_get_bus(pci_dev)) && 1683 !pci_bus_is_root(pci_get_bus(pci_dev)); 1684 1685 if (kvm_enabled() && !kvm_has_many_ioeventfds()) { 1686 proxy->flags &= ~VIRTIO_PCI_FLAG_USE_IOEVENTFD; 1687 } 1688 1689 /* 1690 * virtio pci bar layout used by default. 1691 * subclasses can re-arrange things if needed. 1692 * 1693 * region 0 -- virtio legacy io bar 1694 * region 1 -- msi-x bar 1695 * region 4+5 -- virtio modern memory (64bit) bar 1696 * 1697 */ 1698 proxy->legacy_io_bar_idx = 0; 1699 proxy->msix_bar_idx = 1; 1700 proxy->modern_io_bar_idx = 2; 1701 proxy->modern_mem_bar_idx = 4; 1702 1703 proxy->common.offset = 0x0; 1704 proxy->common.size = 0x1000; 1705 proxy->common.type = VIRTIO_PCI_CAP_COMMON_CFG; 1706 1707 proxy->isr.offset = 0x1000; 1708 proxy->isr.size = 0x1000; 1709 proxy->isr.type = VIRTIO_PCI_CAP_ISR_CFG; 1710 1711 proxy->device.offset = 0x2000; 1712 proxy->device.size = 0x1000; 1713 proxy->device.type = VIRTIO_PCI_CAP_DEVICE_CFG; 1714 1715 proxy->notify.offset = 0x3000; 1716 proxy->notify.size = virtio_pci_queue_mem_mult(proxy) * VIRTIO_QUEUE_MAX; 1717 proxy->notify.type = VIRTIO_PCI_CAP_NOTIFY_CFG; 1718 1719 proxy->notify_pio.offset = 0x0; 1720 proxy->notify_pio.size = 0x4; 1721 proxy->notify_pio.type = VIRTIO_PCI_CAP_NOTIFY_CFG; 1722 1723 /* subclasses can enforce modern, so do this unconditionally */ 1724 memory_region_init(&proxy->modern_bar, OBJECT(proxy), "virtio-pci", 1725 /* PCI BAR regions must be powers of 2 */ 1726 pow2ceil(proxy->notify.offset + proxy->notify.size)); 1727 1728 if (proxy->disable_legacy == ON_OFF_AUTO_AUTO) { 1729 proxy->disable_legacy = pcie_port ? ON_OFF_AUTO_ON : ON_OFF_AUTO_OFF; 1730 } 1731 1732 if (!virtio_pci_modern(proxy) && !virtio_pci_legacy(proxy)) { 1733 error_setg(errp, "device cannot work as neither modern nor legacy mode" 1734 " is enabled"); 1735 error_append_hint(errp, "Set either disable-modern or disable-legacy" 1736 " to off\n"); 1737 return; 1738 } 1739 1740 if (pcie_port && pci_is_express(pci_dev)) { 1741 int pos; 1742 1743 pos = pcie_endpoint_cap_init(pci_dev, 0); 1744 assert(pos > 0); 1745 1746 pos = pci_add_capability(pci_dev, PCI_CAP_ID_PM, 0, 1747 PCI_PM_SIZEOF, errp); 1748 if (pos < 0) { 1749 return; 1750 } 1751 1752 pci_dev->exp.pm_cap = pos; 1753 1754 /* 1755 * Indicates that this function complies with revision 1.2 of the 1756 * PCI Power Management Interface Specification. 1757 */ 1758 pci_set_word(pci_dev->config + pos + PCI_PM_PMC, 0x3); 1759 1760 if (proxy->flags & VIRTIO_PCI_FLAG_INIT_DEVERR) { 1761 /* Init error enabling flags */ 1762 pcie_cap_deverr_init(pci_dev); 1763 } 1764 1765 if (proxy->flags & VIRTIO_PCI_FLAG_INIT_LNKCTL) { 1766 /* Init Link Control Register */ 1767 pcie_cap_lnkctl_init(pci_dev); 1768 } 1769 1770 if (proxy->flags & VIRTIO_PCI_FLAG_INIT_PM) { 1771 /* Init Power Management Control Register */ 1772 pci_set_word(pci_dev->wmask + pos + PCI_PM_CTRL, 1773 PCI_PM_CTRL_STATE_MASK); 1774 } 1775 1776 if (proxy->flags & VIRTIO_PCI_FLAG_ATS) { 1777 pcie_ats_init(pci_dev, 256); 1778 } 1779 1780 } else { 1781 /* 1782 * make future invocations of pci_is_express() return false 1783 * and pci_config_size() return PCI_CONFIG_SPACE_SIZE. 1784 */ 1785 pci_dev->cap_present &= ~QEMU_PCI_CAP_EXPRESS; 1786 } 1787 1788 virtio_pci_bus_new(&proxy->bus, sizeof(proxy->bus), proxy); 1789 if (k->realize) { 1790 k->realize(proxy, errp); 1791 } 1792 } 1793 1794 static void virtio_pci_exit(PCIDevice *pci_dev) 1795 { 1796 msix_uninit_exclusive_bar(pci_dev); 1797 } 1798 1799 static void virtio_pci_reset(DeviceState *qdev) 1800 { 1801 VirtIOPCIProxy *proxy = VIRTIO_PCI(qdev); 1802 VirtioBusState *bus = VIRTIO_BUS(&proxy->bus); 1803 PCIDevice *dev = PCI_DEVICE(qdev); 1804 int i; 1805 1806 virtio_pci_stop_ioeventfd(proxy); 1807 virtio_bus_reset(bus); 1808 msix_unuse_all_vectors(&proxy->pci_dev); 1809 1810 for (i = 0; i < VIRTIO_QUEUE_MAX; i++) { 1811 proxy->vqs[i].enabled = 0; 1812 proxy->vqs[i].num = 0; 1813 proxy->vqs[i].desc[0] = proxy->vqs[i].desc[1] = 0; 1814 proxy->vqs[i].avail[0] = proxy->vqs[i].avail[1] = 0; 1815 proxy->vqs[i].used[0] = proxy->vqs[i].used[1] = 0; 1816 } 1817 1818 if (pci_is_express(dev)) { 1819 pcie_cap_deverr_reset(dev); 1820 pcie_cap_lnkctl_reset(dev); 1821 1822 pci_set_word(dev->config + dev->exp.pm_cap + PCI_PM_CTRL, 0); 1823 } 1824 } 1825 1826 static Property virtio_pci_properties[] = { 1827 DEFINE_PROP_BIT("virtio-pci-bus-master-bug-migration", VirtIOPCIProxy, flags, 1828 VIRTIO_PCI_FLAG_BUS_MASTER_BUG_MIGRATION_BIT, false), 1829 DEFINE_PROP_BIT("migrate-extra", VirtIOPCIProxy, flags, 1830 VIRTIO_PCI_FLAG_MIGRATE_EXTRA_BIT, true), 1831 DEFINE_PROP_BIT("modern-pio-notify", VirtIOPCIProxy, flags, 1832 VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY_BIT, false), 1833 DEFINE_PROP_BIT("x-disable-pcie", VirtIOPCIProxy, flags, 1834 VIRTIO_PCI_FLAG_DISABLE_PCIE_BIT, false), 1835 DEFINE_PROP_BIT("page-per-vq", VirtIOPCIProxy, flags, 1836 VIRTIO_PCI_FLAG_PAGE_PER_VQ_BIT, false), 1837 DEFINE_PROP_BOOL("x-ignore-backend-features", VirtIOPCIProxy, 1838 ignore_backend_features, false), 1839 DEFINE_PROP_BIT("ats", VirtIOPCIProxy, flags, 1840 VIRTIO_PCI_FLAG_ATS_BIT, false), 1841 DEFINE_PROP_BIT("x-pcie-deverr-init", VirtIOPCIProxy, flags, 1842 VIRTIO_PCI_FLAG_INIT_DEVERR_BIT, true), 1843 DEFINE_PROP_BIT("x-pcie-lnkctl-init", VirtIOPCIProxy, flags, 1844 VIRTIO_PCI_FLAG_INIT_LNKCTL_BIT, true), 1845 DEFINE_PROP_BIT("x-pcie-pm-init", VirtIOPCIProxy, flags, 1846 VIRTIO_PCI_FLAG_INIT_PM_BIT, true), 1847 DEFINE_PROP_END_OF_LIST(), 1848 }; 1849 1850 static void virtio_pci_dc_realize(DeviceState *qdev, Error **errp) 1851 { 1852 VirtioPCIClass *vpciklass = VIRTIO_PCI_GET_CLASS(qdev); 1853 VirtIOPCIProxy *proxy = VIRTIO_PCI(qdev); 1854 PCIDevice *pci_dev = &proxy->pci_dev; 1855 1856 if (!(proxy->flags & VIRTIO_PCI_FLAG_DISABLE_PCIE) && 1857 virtio_pci_modern(proxy)) { 1858 pci_dev->cap_present |= QEMU_PCI_CAP_EXPRESS; 1859 } 1860 1861 vpciklass->parent_dc_realize(qdev, errp); 1862 } 1863 1864 static void virtio_pci_class_init(ObjectClass *klass, void *data) 1865 { 1866 DeviceClass *dc = DEVICE_CLASS(klass); 1867 PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); 1868 VirtioPCIClass *vpciklass = VIRTIO_PCI_CLASS(klass); 1869 1870 dc->props = virtio_pci_properties; 1871 k->realize = virtio_pci_realize; 1872 k->exit = virtio_pci_exit; 1873 k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET; 1874 k->revision = VIRTIO_PCI_ABI_VERSION; 1875 k->class_id = PCI_CLASS_OTHERS; 1876 device_class_set_parent_realize(dc, virtio_pci_dc_realize, 1877 &vpciklass->parent_dc_realize); 1878 dc->reset = virtio_pci_reset; 1879 } 1880 1881 static const TypeInfo virtio_pci_info = { 1882 .name = TYPE_VIRTIO_PCI, 1883 .parent = TYPE_PCI_DEVICE, 1884 .instance_size = sizeof(VirtIOPCIProxy), 1885 .class_init = virtio_pci_class_init, 1886 .class_size = sizeof(VirtioPCIClass), 1887 .abstract = true, 1888 }; 1889 1890 static Property virtio_pci_generic_properties[] = { 1891 DEFINE_PROP_ON_OFF_AUTO("disable-legacy", VirtIOPCIProxy, disable_legacy, 1892 ON_OFF_AUTO_AUTO), 1893 DEFINE_PROP_BOOL("disable-modern", VirtIOPCIProxy, disable_modern, false), 1894 DEFINE_PROP_END_OF_LIST(), 1895 }; 1896 1897 static void virtio_pci_base_class_init(ObjectClass *klass, void *data) 1898 { 1899 const VirtioPCIDeviceTypeInfo *t = data; 1900 if (t->class_init) { 1901 t->class_init(klass, NULL); 1902 } 1903 } 1904 1905 static void virtio_pci_generic_class_init(ObjectClass *klass, void *data) 1906 { 1907 DeviceClass *dc = DEVICE_CLASS(klass); 1908 1909 dc->props = virtio_pci_generic_properties; 1910 } 1911 1912 static void virtio_pci_transitional_instance_init(Object *obj) 1913 { 1914 VirtIOPCIProxy *proxy = VIRTIO_PCI(obj); 1915 1916 proxy->disable_legacy = ON_OFF_AUTO_OFF; 1917 proxy->disable_modern = false; 1918 } 1919 1920 static void virtio_pci_non_transitional_instance_init(Object *obj) 1921 { 1922 VirtIOPCIProxy *proxy = VIRTIO_PCI(obj); 1923 1924 proxy->disable_legacy = ON_OFF_AUTO_ON; 1925 proxy->disable_modern = false; 1926 } 1927 1928 void virtio_pci_types_register(const VirtioPCIDeviceTypeInfo *t) 1929 { 1930 char *base_name = NULL; 1931 TypeInfo base_type_info = { 1932 .name = t->base_name, 1933 .parent = t->parent ? t->parent : TYPE_VIRTIO_PCI, 1934 .instance_size = t->instance_size, 1935 .instance_init = t->instance_init, 1936 .class_size = t->class_size, 1937 .abstract = true, 1938 .interfaces = t->interfaces, 1939 }; 1940 TypeInfo generic_type_info = { 1941 .name = t->generic_name, 1942 .parent = base_type_info.name, 1943 .class_init = virtio_pci_generic_class_init, 1944 .interfaces = (InterfaceInfo[]) { 1945 { INTERFACE_PCIE_DEVICE }, 1946 { INTERFACE_CONVENTIONAL_PCI_DEVICE }, 1947 { } 1948 }, 1949 }; 1950 1951 if (!base_type_info.name) { 1952 /* No base type -> register a single generic device type */ 1953 /* use intermediate %s-base-type to add generic device props */ 1954 base_name = g_strdup_printf("%s-base-type", t->generic_name); 1955 base_type_info.name = base_name; 1956 base_type_info.class_init = virtio_pci_generic_class_init; 1957 1958 generic_type_info.parent = base_name; 1959 generic_type_info.class_init = virtio_pci_base_class_init; 1960 generic_type_info.class_data = (void *)t; 1961 1962 assert(!t->non_transitional_name); 1963 assert(!t->transitional_name); 1964 } else { 1965 base_type_info.class_init = virtio_pci_base_class_init; 1966 base_type_info.class_data = (void *)t; 1967 } 1968 1969 type_register(&base_type_info); 1970 if (generic_type_info.name) { 1971 type_register(&generic_type_info); 1972 } 1973 1974 if (t->non_transitional_name) { 1975 const TypeInfo non_transitional_type_info = { 1976 .name = t->non_transitional_name, 1977 .parent = base_type_info.name, 1978 .instance_init = virtio_pci_non_transitional_instance_init, 1979 .interfaces = (InterfaceInfo[]) { 1980 { INTERFACE_PCIE_DEVICE }, 1981 { INTERFACE_CONVENTIONAL_PCI_DEVICE }, 1982 { } 1983 }, 1984 }; 1985 type_register(&non_transitional_type_info); 1986 } 1987 1988 if (t->transitional_name) { 1989 const TypeInfo transitional_type_info = { 1990 .name = t->transitional_name, 1991 .parent = base_type_info.name, 1992 .instance_init = virtio_pci_transitional_instance_init, 1993 .interfaces = (InterfaceInfo[]) { 1994 /* 1995 * Transitional virtio devices work only as Conventional PCI 1996 * devices because they require PIO ports. 1997 */ 1998 { INTERFACE_CONVENTIONAL_PCI_DEVICE }, 1999 { } 2000 }, 2001 }; 2002 type_register(&transitional_type_info); 2003 } 2004 g_free(base_name); 2005 } 2006 2007 /* virtio-pci-bus */ 2008 2009 static void virtio_pci_bus_new(VirtioBusState *bus, size_t bus_size, 2010 VirtIOPCIProxy *dev) 2011 { 2012 DeviceState *qdev = DEVICE(dev); 2013 char virtio_bus_name[] = "virtio-bus"; 2014 2015 qbus_create_inplace(bus, bus_size, TYPE_VIRTIO_PCI_BUS, qdev, 2016 virtio_bus_name); 2017 } 2018 2019 static void virtio_pci_bus_class_init(ObjectClass *klass, void *data) 2020 { 2021 BusClass *bus_class = BUS_CLASS(klass); 2022 VirtioBusClass *k = VIRTIO_BUS_CLASS(klass); 2023 bus_class->max_dev = 1; 2024 k->notify = virtio_pci_notify; 2025 k->save_config = virtio_pci_save_config; 2026 k->load_config = virtio_pci_load_config; 2027 k->save_queue = virtio_pci_save_queue; 2028 k->load_queue = virtio_pci_load_queue; 2029 k->save_extra_state = virtio_pci_save_extra_state; 2030 k->load_extra_state = virtio_pci_load_extra_state; 2031 k->has_extra_state = virtio_pci_has_extra_state; 2032 k->query_guest_notifiers = virtio_pci_query_guest_notifiers; 2033 k->set_guest_notifiers = virtio_pci_set_guest_notifiers; 2034 k->set_host_notifier_mr = virtio_pci_set_host_notifier_mr; 2035 k->vmstate_change = virtio_pci_vmstate_change; 2036 k->pre_plugged = virtio_pci_pre_plugged; 2037 k->device_plugged = virtio_pci_device_plugged; 2038 k->device_unplugged = virtio_pci_device_unplugged; 2039 k->query_nvectors = virtio_pci_query_nvectors; 2040 k->ioeventfd_enabled = virtio_pci_ioeventfd_enabled; 2041 k->ioeventfd_assign = virtio_pci_ioeventfd_assign; 2042 k->get_dma_as = virtio_pci_get_dma_as; 2043 } 2044 2045 static const TypeInfo virtio_pci_bus_info = { 2046 .name = TYPE_VIRTIO_PCI_BUS, 2047 .parent = TYPE_VIRTIO_BUS, 2048 .instance_size = sizeof(VirtioPCIBusState), 2049 .class_init = virtio_pci_bus_class_init, 2050 }; 2051 2052 static void virtio_pci_register_types(void) 2053 { 2054 /* Base types: */ 2055 type_register_static(&virtio_pci_bus_info); 2056 type_register_static(&virtio_pci_info); 2057 } 2058 2059 type_init(virtio_pci_register_types) 2060 2061