1 /* 2 * Virtio PCI Bindings 3 * 4 * Copyright IBM, Corp. 2007 5 * Copyright (c) 2009 CodeSourcery 6 * 7 * Authors: 8 * Anthony Liguori <aliguori@us.ibm.com> 9 * Paul Brook <paul@codesourcery.com> 10 * 11 * This work is licensed under the terms of the GNU GPL, version 2. See 12 * the COPYING file in the top-level directory. 13 * 14 * Contributions after 2012-01-13 are licensed under the terms of the 15 * GNU GPL, version 2 or (at your option) any later version. 16 */ 17 18 #include "qemu/osdep.h" 19 20 #include "standard-headers/linux/virtio_pci.h" 21 #include "hw/virtio/virtio.h" 22 #include "migration/qemu-file-types.h" 23 #include "hw/pci/pci.h" 24 #include "hw/pci/pci_bus.h" 25 #include "qapi/error.h" 26 #include "qemu/error-report.h" 27 #include "qemu/module.h" 28 #include "hw/pci/msi.h" 29 #include "hw/pci/msix.h" 30 #include "hw/loader.h" 31 #include "sysemu/kvm.h" 32 #include "virtio-pci.h" 33 #include "qemu/range.h" 34 #include "hw/virtio/virtio-bus.h" 35 #include "qapi/visitor.h" 36 37 #define VIRTIO_PCI_REGION_SIZE(dev) VIRTIO_PCI_CONFIG_OFF(msix_present(dev)) 38 39 #undef VIRTIO_PCI_CONFIG 40 41 /* The remaining space is defined by each driver as the per-driver 42 * configuration space */ 43 #define VIRTIO_PCI_CONFIG_SIZE(dev) VIRTIO_PCI_CONFIG_OFF(msix_enabled(dev)) 44 45 static void virtio_pci_bus_new(VirtioBusState *bus, size_t bus_size, 46 VirtIOPCIProxy *dev); 47 static void virtio_pci_reset(DeviceState *qdev); 48 49 /* virtio device */ 50 /* DeviceState to VirtIOPCIProxy. For use off data-path. TODO: use QOM. */ 51 static inline VirtIOPCIProxy *to_virtio_pci_proxy(DeviceState *d) 52 { 53 return container_of(d, VirtIOPCIProxy, pci_dev.qdev); 54 } 55 56 /* DeviceState to VirtIOPCIProxy. Note: used on datapath, 57 * be careful and test performance if you change this. 58 */ 59 static inline VirtIOPCIProxy *to_virtio_pci_proxy_fast(DeviceState *d) 60 { 61 return container_of(d, VirtIOPCIProxy, pci_dev.qdev); 62 } 63 64 static void virtio_pci_notify(DeviceState *d, uint16_t vector) 65 { 66 VirtIOPCIProxy *proxy = to_virtio_pci_proxy_fast(d); 67 68 if (msix_enabled(&proxy->pci_dev)) 69 msix_notify(&proxy->pci_dev, vector); 70 else { 71 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 72 pci_set_irq(&proxy->pci_dev, atomic_read(&vdev->isr) & 1); 73 } 74 } 75 76 static void virtio_pci_save_config(DeviceState *d, QEMUFile *f) 77 { 78 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d); 79 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 80 81 pci_device_save(&proxy->pci_dev, f); 82 msix_save(&proxy->pci_dev, f); 83 if (msix_present(&proxy->pci_dev)) 84 qemu_put_be16(f, vdev->config_vector); 85 } 86 87 static const VMStateDescription vmstate_virtio_pci_modern_queue_state = { 88 .name = "virtio_pci/modern_queue_state", 89 .version_id = 1, 90 .minimum_version_id = 1, 91 .fields = (VMStateField[]) { 92 VMSTATE_UINT16(num, VirtIOPCIQueue), 93 VMSTATE_UNUSED(1), /* enabled was stored as be16 */ 94 VMSTATE_BOOL(enabled, VirtIOPCIQueue), 95 VMSTATE_UINT32_ARRAY(desc, VirtIOPCIQueue, 2), 96 VMSTATE_UINT32_ARRAY(avail, VirtIOPCIQueue, 2), 97 VMSTATE_UINT32_ARRAY(used, VirtIOPCIQueue, 2), 98 VMSTATE_END_OF_LIST() 99 } 100 }; 101 102 static bool virtio_pci_modern_state_needed(void *opaque) 103 { 104 VirtIOPCIProxy *proxy = opaque; 105 106 return virtio_pci_modern(proxy); 107 } 108 109 static const VMStateDescription vmstate_virtio_pci_modern_state_sub = { 110 .name = "virtio_pci/modern_state", 111 .version_id = 1, 112 .minimum_version_id = 1, 113 .needed = &virtio_pci_modern_state_needed, 114 .fields = (VMStateField[]) { 115 VMSTATE_UINT32(dfselect, VirtIOPCIProxy), 116 VMSTATE_UINT32(gfselect, VirtIOPCIProxy), 117 VMSTATE_UINT32_ARRAY(guest_features, VirtIOPCIProxy, 2), 118 VMSTATE_STRUCT_ARRAY(vqs, VirtIOPCIProxy, VIRTIO_QUEUE_MAX, 0, 119 vmstate_virtio_pci_modern_queue_state, 120 VirtIOPCIQueue), 121 VMSTATE_END_OF_LIST() 122 } 123 }; 124 125 static const VMStateDescription vmstate_virtio_pci = { 126 .name = "virtio_pci", 127 .version_id = 1, 128 .minimum_version_id = 1, 129 .minimum_version_id_old = 1, 130 .fields = (VMStateField[]) { 131 VMSTATE_END_OF_LIST() 132 }, 133 .subsections = (const VMStateDescription*[]) { 134 &vmstate_virtio_pci_modern_state_sub, 135 NULL 136 } 137 }; 138 139 static bool virtio_pci_has_extra_state(DeviceState *d) 140 { 141 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d); 142 143 return proxy->flags & VIRTIO_PCI_FLAG_MIGRATE_EXTRA; 144 } 145 146 static void virtio_pci_save_extra_state(DeviceState *d, QEMUFile *f) 147 { 148 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d); 149 150 vmstate_save_state(f, &vmstate_virtio_pci, proxy, NULL); 151 } 152 153 static int virtio_pci_load_extra_state(DeviceState *d, QEMUFile *f) 154 { 155 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d); 156 157 return vmstate_load_state(f, &vmstate_virtio_pci, proxy, 1); 158 } 159 160 static void virtio_pci_save_queue(DeviceState *d, int n, QEMUFile *f) 161 { 162 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d); 163 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 164 165 if (msix_present(&proxy->pci_dev)) 166 qemu_put_be16(f, virtio_queue_vector(vdev, n)); 167 } 168 169 static int virtio_pci_load_config(DeviceState *d, QEMUFile *f) 170 { 171 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d); 172 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 173 174 int ret; 175 ret = pci_device_load(&proxy->pci_dev, f); 176 if (ret) { 177 return ret; 178 } 179 msix_unuse_all_vectors(&proxy->pci_dev); 180 msix_load(&proxy->pci_dev, f); 181 if (msix_present(&proxy->pci_dev)) { 182 qemu_get_be16s(f, &vdev->config_vector); 183 } else { 184 vdev->config_vector = VIRTIO_NO_VECTOR; 185 } 186 if (vdev->config_vector != VIRTIO_NO_VECTOR) { 187 return msix_vector_use(&proxy->pci_dev, vdev->config_vector); 188 } 189 return 0; 190 } 191 192 static int virtio_pci_load_queue(DeviceState *d, int n, QEMUFile *f) 193 { 194 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d); 195 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 196 197 uint16_t vector; 198 if (msix_present(&proxy->pci_dev)) { 199 qemu_get_be16s(f, &vector); 200 } else { 201 vector = VIRTIO_NO_VECTOR; 202 } 203 virtio_queue_set_vector(vdev, n, vector); 204 if (vector != VIRTIO_NO_VECTOR) { 205 return msix_vector_use(&proxy->pci_dev, vector); 206 } 207 208 return 0; 209 } 210 211 static bool virtio_pci_ioeventfd_enabled(DeviceState *d) 212 { 213 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d); 214 215 return (proxy->flags & VIRTIO_PCI_FLAG_USE_IOEVENTFD) != 0; 216 } 217 218 #define QEMU_VIRTIO_PCI_QUEUE_MEM_MULT 0x1000 219 220 static inline int virtio_pci_queue_mem_mult(struct VirtIOPCIProxy *proxy) 221 { 222 return (proxy->flags & VIRTIO_PCI_FLAG_PAGE_PER_VQ) ? 223 QEMU_VIRTIO_PCI_QUEUE_MEM_MULT : 4; 224 } 225 226 static int virtio_pci_ioeventfd_assign(DeviceState *d, EventNotifier *notifier, 227 int n, bool assign) 228 { 229 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d); 230 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 231 VirtQueue *vq = virtio_get_queue(vdev, n); 232 bool legacy = virtio_pci_legacy(proxy); 233 bool modern = virtio_pci_modern(proxy); 234 bool fast_mmio = kvm_ioeventfd_any_length_enabled(); 235 bool modern_pio = proxy->flags & VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY; 236 MemoryRegion *modern_mr = &proxy->notify.mr; 237 MemoryRegion *modern_notify_mr = &proxy->notify_pio.mr; 238 MemoryRegion *legacy_mr = &proxy->bar; 239 hwaddr modern_addr = virtio_pci_queue_mem_mult(proxy) * 240 virtio_get_queue_index(vq); 241 hwaddr legacy_addr = VIRTIO_PCI_QUEUE_NOTIFY; 242 243 if (assign) { 244 if (modern) { 245 if (fast_mmio) { 246 memory_region_add_eventfd(modern_mr, modern_addr, 0, 247 false, n, notifier); 248 } else { 249 memory_region_add_eventfd(modern_mr, modern_addr, 2, 250 false, n, notifier); 251 } 252 if (modern_pio) { 253 memory_region_add_eventfd(modern_notify_mr, 0, 2, 254 true, n, notifier); 255 } 256 } 257 if (legacy) { 258 memory_region_add_eventfd(legacy_mr, legacy_addr, 2, 259 true, n, notifier); 260 } 261 } else { 262 if (modern) { 263 if (fast_mmio) { 264 memory_region_del_eventfd(modern_mr, modern_addr, 0, 265 false, n, notifier); 266 } else { 267 memory_region_del_eventfd(modern_mr, modern_addr, 2, 268 false, n, notifier); 269 } 270 if (modern_pio) { 271 memory_region_del_eventfd(modern_notify_mr, 0, 2, 272 true, n, notifier); 273 } 274 } 275 if (legacy) { 276 memory_region_del_eventfd(legacy_mr, legacy_addr, 2, 277 true, n, notifier); 278 } 279 } 280 return 0; 281 } 282 283 static void virtio_pci_start_ioeventfd(VirtIOPCIProxy *proxy) 284 { 285 virtio_bus_start_ioeventfd(&proxy->bus); 286 } 287 288 static void virtio_pci_stop_ioeventfd(VirtIOPCIProxy *proxy) 289 { 290 virtio_bus_stop_ioeventfd(&proxy->bus); 291 } 292 293 static void virtio_ioport_write(void *opaque, uint32_t addr, uint32_t val) 294 { 295 VirtIOPCIProxy *proxy = opaque; 296 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 297 hwaddr pa; 298 299 switch (addr) { 300 case VIRTIO_PCI_GUEST_FEATURES: 301 /* Guest does not negotiate properly? We have to assume nothing. */ 302 if (val & (1 << VIRTIO_F_BAD_FEATURE)) { 303 val = virtio_bus_get_vdev_bad_features(&proxy->bus); 304 } 305 virtio_set_features(vdev, val); 306 break; 307 case VIRTIO_PCI_QUEUE_PFN: 308 pa = (hwaddr)val << VIRTIO_PCI_QUEUE_ADDR_SHIFT; 309 if (pa == 0) { 310 virtio_pci_reset(DEVICE(proxy)); 311 } 312 else 313 virtio_queue_set_addr(vdev, vdev->queue_sel, pa); 314 break; 315 case VIRTIO_PCI_QUEUE_SEL: 316 if (val < VIRTIO_QUEUE_MAX) 317 vdev->queue_sel = val; 318 break; 319 case VIRTIO_PCI_QUEUE_NOTIFY: 320 if (val < VIRTIO_QUEUE_MAX) { 321 virtio_queue_notify(vdev, val); 322 } 323 break; 324 case VIRTIO_PCI_STATUS: 325 if (!(val & VIRTIO_CONFIG_S_DRIVER_OK)) { 326 virtio_pci_stop_ioeventfd(proxy); 327 } 328 329 virtio_set_status(vdev, val & 0xFF); 330 331 if (val & VIRTIO_CONFIG_S_DRIVER_OK) { 332 virtio_pci_start_ioeventfd(proxy); 333 } 334 335 if (vdev->status == 0) { 336 virtio_pci_reset(DEVICE(proxy)); 337 } 338 339 /* Linux before 2.6.34 drives the device without enabling 340 the PCI device bus master bit. Enable it automatically 341 for the guest. This is a PCI spec violation but so is 342 initiating DMA with bus master bit clear. */ 343 if (val == (VIRTIO_CONFIG_S_ACKNOWLEDGE | VIRTIO_CONFIG_S_DRIVER)) { 344 pci_default_write_config(&proxy->pci_dev, PCI_COMMAND, 345 proxy->pci_dev.config[PCI_COMMAND] | 346 PCI_COMMAND_MASTER, 1); 347 } 348 break; 349 case VIRTIO_MSI_CONFIG_VECTOR: 350 msix_vector_unuse(&proxy->pci_dev, vdev->config_vector); 351 /* Make it possible for guest to discover an error took place. */ 352 if (msix_vector_use(&proxy->pci_dev, val) < 0) 353 val = VIRTIO_NO_VECTOR; 354 vdev->config_vector = val; 355 break; 356 case VIRTIO_MSI_QUEUE_VECTOR: 357 msix_vector_unuse(&proxy->pci_dev, 358 virtio_queue_vector(vdev, vdev->queue_sel)); 359 /* Make it possible for guest to discover an error took place. */ 360 if (msix_vector_use(&proxy->pci_dev, val) < 0) 361 val = VIRTIO_NO_VECTOR; 362 virtio_queue_set_vector(vdev, vdev->queue_sel, val); 363 break; 364 default: 365 error_report("%s: unexpected address 0x%x value 0x%x", 366 __func__, addr, val); 367 break; 368 } 369 } 370 371 static uint32_t virtio_ioport_read(VirtIOPCIProxy *proxy, uint32_t addr) 372 { 373 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 374 uint32_t ret = 0xFFFFFFFF; 375 376 switch (addr) { 377 case VIRTIO_PCI_HOST_FEATURES: 378 ret = vdev->host_features; 379 break; 380 case VIRTIO_PCI_GUEST_FEATURES: 381 ret = vdev->guest_features; 382 break; 383 case VIRTIO_PCI_QUEUE_PFN: 384 ret = virtio_queue_get_addr(vdev, vdev->queue_sel) 385 >> VIRTIO_PCI_QUEUE_ADDR_SHIFT; 386 break; 387 case VIRTIO_PCI_QUEUE_NUM: 388 ret = virtio_queue_get_num(vdev, vdev->queue_sel); 389 break; 390 case VIRTIO_PCI_QUEUE_SEL: 391 ret = vdev->queue_sel; 392 break; 393 case VIRTIO_PCI_STATUS: 394 ret = vdev->status; 395 break; 396 case VIRTIO_PCI_ISR: 397 /* reading from the ISR also clears it. */ 398 ret = atomic_xchg(&vdev->isr, 0); 399 pci_irq_deassert(&proxy->pci_dev); 400 break; 401 case VIRTIO_MSI_CONFIG_VECTOR: 402 ret = vdev->config_vector; 403 break; 404 case VIRTIO_MSI_QUEUE_VECTOR: 405 ret = virtio_queue_vector(vdev, vdev->queue_sel); 406 break; 407 default: 408 break; 409 } 410 411 return ret; 412 } 413 414 static uint64_t virtio_pci_config_read(void *opaque, hwaddr addr, 415 unsigned size) 416 { 417 VirtIOPCIProxy *proxy = opaque; 418 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 419 uint32_t config = VIRTIO_PCI_CONFIG_SIZE(&proxy->pci_dev); 420 uint64_t val = 0; 421 if (addr < config) { 422 return virtio_ioport_read(proxy, addr); 423 } 424 addr -= config; 425 426 switch (size) { 427 case 1: 428 val = virtio_config_readb(vdev, addr); 429 break; 430 case 2: 431 val = virtio_config_readw(vdev, addr); 432 if (virtio_is_big_endian(vdev)) { 433 val = bswap16(val); 434 } 435 break; 436 case 4: 437 val = virtio_config_readl(vdev, addr); 438 if (virtio_is_big_endian(vdev)) { 439 val = bswap32(val); 440 } 441 break; 442 } 443 return val; 444 } 445 446 static void virtio_pci_config_write(void *opaque, hwaddr addr, 447 uint64_t val, unsigned size) 448 { 449 VirtIOPCIProxy *proxy = opaque; 450 uint32_t config = VIRTIO_PCI_CONFIG_SIZE(&proxy->pci_dev); 451 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 452 if (addr < config) { 453 virtio_ioport_write(proxy, addr, val); 454 return; 455 } 456 addr -= config; 457 /* 458 * Virtio-PCI is odd. Ioports are LE but config space is target native 459 * endian. 460 */ 461 switch (size) { 462 case 1: 463 virtio_config_writeb(vdev, addr, val); 464 break; 465 case 2: 466 if (virtio_is_big_endian(vdev)) { 467 val = bswap16(val); 468 } 469 virtio_config_writew(vdev, addr, val); 470 break; 471 case 4: 472 if (virtio_is_big_endian(vdev)) { 473 val = bswap32(val); 474 } 475 virtio_config_writel(vdev, addr, val); 476 break; 477 } 478 } 479 480 static const MemoryRegionOps virtio_pci_config_ops = { 481 .read = virtio_pci_config_read, 482 .write = virtio_pci_config_write, 483 .impl = { 484 .min_access_size = 1, 485 .max_access_size = 4, 486 }, 487 .endianness = DEVICE_LITTLE_ENDIAN, 488 }; 489 490 static MemoryRegion *virtio_address_space_lookup(VirtIOPCIProxy *proxy, 491 hwaddr *off, int len) 492 { 493 int i; 494 VirtIOPCIRegion *reg; 495 496 for (i = 0; i < ARRAY_SIZE(proxy->regs); ++i) { 497 reg = &proxy->regs[i]; 498 if (*off >= reg->offset && 499 *off + len <= reg->offset + reg->size) { 500 *off -= reg->offset; 501 return ®->mr; 502 } 503 } 504 505 return NULL; 506 } 507 508 /* Below are generic functions to do memcpy from/to an address space, 509 * without byteswaps, with input validation. 510 * 511 * As regular address_space_* APIs all do some kind of byteswap at least for 512 * some host/target combinations, we are forced to explicitly convert to a 513 * known-endianness integer value. 514 * It doesn't really matter which endian format to go through, so the code 515 * below selects the endian that causes the least amount of work on the given 516 * host. 517 * 518 * Note: host pointer must be aligned. 519 */ 520 static 521 void virtio_address_space_write(VirtIOPCIProxy *proxy, hwaddr addr, 522 const uint8_t *buf, int len) 523 { 524 uint64_t val; 525 MemoryRegion *mr; 526 527 /* address_space_* APIs assume an aligned address. 528 * As address is under guest control, handle illegal values. 529 */ 530 addr &= ~(len - 1); 531 532 mr = virtio_address_space_lookup(proxy, &addr, len); 533 if (!mr) { 534 return; 535 } 536 537 /* Make sure caller aligned buf properly */ 538 assert(!(((uintptr_t)buf) & (len - 1))); 539 540 switch (len) { 541 case 1: 542 val = pci_get_byte(buf); 543 break; 544 case 2: 545 val = cpu_to_le16(pci_get_word(buf)); 546 break; 547 case 4: 548 val = cpu_to_le32(pci_get_long(buf)); 549 break; 550 default: 551 /* As length is under guest control, handle illegal values. */ 552 return; 553 } 554 memory_region_dispatch_write(mr, addr, val, len, MEMTXATTRS_UNSPECIFIED); 555 } 556 557 static void 558 virtio_address_space_read(VirtIOPCIProxy *proxy, hwaddr addr, 559 uint8_t *buf, int len) 560 { 561 uint64_t val; 562 MemoryRegion *mr; 563 564 /* address_space_* APIs assume an aligned address. 565 * As address is under guest control, handle illegal values. 566 */ 567 addr &= ~(len - 1); 568 569 mr = virtio_address_space_lookup(proxy, &addr, len); 570 if (!mr) { 571 return; 572 } 573 574 /* Make sure caller aligned buf properly */ 575 assert(!(((uintptr_t)buf) & (len - 1))); 576 577 memory_region_dispatch_read(mr, addr, &val, len, MEMTXATTRS_UNSPECIFIED); 578 switch (len) { 579 case 1: 580 pci_set_byte(buf, val); 581 break; 582 case 2: 583 pci_set_word(buf, le16_to_cpu(val)); 584 break; 585 case 4: 586 pci_set_long(buf, le32_to_cpu(val)); 587 break; 588 default: 589 /* As length is under guest control, handle illegal values. */ 590 break; 591 } 592 } 593 594 static void virtio_write_config(PCIDevice *pci_dev, uint32_t address, 595 uint32_t val, int len) 596 { 597 VirtIOPCIProxy *proxy = VIRTIO_PCI(pci_dev); 598 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 599 struct virtio_pci_cfg_cap *cfg; 600 601 pci_default_write_config(pci_dev, address, val, len); 602 603 if (range_covers_byte(address, len, PCI_COMMAND) && 604 !(pci_dev->config[PCI_COMMAND] & PCI_COMMAND_MASTER)) { 605 virtio_pci_stop_ioeventfd(proxy); 606 virtio_set_status(vdev, vdev->status & ~VIRTIO_CONFIG_S_DRIVER_OK); 607 } 608 609 if (proxy->config_cap && 610 ranges_overlap(address, len, proxy->config_cap + offsetof(struct virtio_pci_cfg_cap, 611 pci_cfg_data), 612 sizeof cfg->pci_cfg_data)) { 613 uint32_t off; 614 uint32_t len; 615 616 cfg = (void *)(proxy->pci_dev.config + proxy->config_cap); 617 off = le32_to_cpu(cfg->cap.offset); 618 len = le32_to_cpu(cfg->cap.length); 619 620 if (len == 1 || len == 2 || len == 4) { 621 assert(len <= sizeof cfg->pci_cfg_data); 622 virtio_address_space_write(proxy, off, cfg->pci_cfg_data, len); 623 } 624 } 625 } 626 627 static uint32_t virtio_read_config(PCIDevice *pci_dev, 628 uint32_t address, int len) 629 { 630 VirtIOPCIProxy *proxy = VIRTIO_PCI(pci_dev); 631 struct virtio_pci_cfg_cap *cfg; 632 633 if (proxy->config_cap && 634 ranges_overlap(address, len, proxy->config_cap + offsetof(struct virtio_pci_cfg_cap, 635 pci_cfg_data), 636 sizeof cfg->pci_cfg_data)) { 637 uint32_t off; 638 uint32_t len; 639 640 cfg = (void *)(proxy->pci_dev.config + proxy->config_cap); 641 off = le32_to_cpu(cfg->cap.offset); 642 len = le32_to_cpu(cfg->cap.length); 643 644 if (len == 1 || len == 2 || len == 4) { 645 assert(len <= sizeof cfg->pci_cfg_data); 646 virtio_address_space_read(proxy, off, cfg->pci_cfg_data, len); 647 } 648 } 649 650 return pci_default_read_config(pci_dev, address, len); 651 } 652 653 static int kvm_virtio_pci_vq_vector_use(VirtIOPCIProxy *proxy, 654 unsigned int queue_no, 655 unsigned int vector) 656 { 657 VirtIOIRQFD *irqfd = &proxy->vector_irqfd[vector]; 658 int ret; 659 660 if (irqfd->users == 0) { 661 ret = kvm_irqchip_add_msi_route(kvm_state, vector, &proxy->pci_dev); 662 if (ret < 0) { 663 return ret; 664 } 665 irqfd->virq = ret; 666 } 667 irqfd->users++; 668 return 0; 669 } 670 671 static void kvm_virtio_pci_vq_vector_release(VirtIOPCIProxy *proxy, 672 unsigned int vector) 673 { 674 VirtIOIRQFD *irqfd = &proxy->vector_irqfd[vector]; 675 if (--irqfd->users == 0) { 676 kvm_irqchip_release_virq(kvm_state, irqfd->virq); 677 } 678 } 679 680 static int kvm_virtio_pci_irqfd_use(VirtIOPCIProxy *proxy, 681 unsigned int queue_no, 682 unsigned int vector) 683 { 684 VirtIOIRQFD *irqfd = &proxy->vector_irqfd[vector]; 685 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 686 VirtQueue *vq = virtio_get_queue(vdev, queue_no); 687 EventNotifier *n = virtio_queue_get_guest_notifier(vq); 688 return kvm_irqchip_add_irqfd_notifier_gsi(kvm_state, n, NULL, irqfd->virq); 689 } 690 691 static void kvm_virtio_pci_irqfd_release(VirtIOPCIProxy *proxy, 692 unsigned int queue_no, 693 unsigned int vector) 694 { 695 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 696 VirtQueue *vq = virtio_get_queue(vdev, queue_no); 697 EventNotifier *n = virtio_queue_get_guest_notifier(vq); 698 VirtIOIRQFD *irqfd = &proxy->vector_irqfd[vector]; 699 int ret; 700 701 ret = kvm_irqchip_remove_irqfd_notifier_gsi(kvm_state, n, irqfd->virq); 702 assert(ret == 0); 703 } 704 705 static int kvm_virtio_pci_vector_use(VirtIOPCIProxy *proxy, int nvqs) 706 { 707 PCIDevice *dev = &proxy->pci_dev; 708 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 709 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); 710 unsigned int vector; 711 int ret, queue_no; 712 713 for (queue_no = 0; queue_no < nvqs; queue_no++) { 714 if (!virtio_queue_get_num(vdev, queue_no)) { 715 break; 716 } 717 vector = virtio_queue_vector(vdev, queue_no); 718 if (vector >= msix_nr_vectors_allocated(dev)) { 719 continue; 720 } 721 ret = kvm_virtio_pci_vq_vector_use(proxy, queue_no, vector); 722 if (ret < 0) { 723 goto undo; 724 } 725 /* If guest supports masking, set up irqfd now. 726 * Otherwise, delay until unmasked in the frontend. 727 */ 728 if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) { 729 ret = kvm_virtio_pci_irqfd_use(proxy, queue_no, vector); 730 if (ret < 0) { 731 kvm_virtio_pci_vq_vector_release(proxy, vector); 732 goto undo; 733 } 734 } 735 } 736 return 0; 737 738 undo: 739 while (--queue_no >= 0) { 740 vector = virtio_queue_vector(vdev, queue_no); 741 if (vector >= msix_nr_vectors_allocated(dev)) { 742 continue; 743 } 744 if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) { 745 kvm_virtio_pci_irqfd_release(proxy, queue_no, vector); 746 } 747 kvm_virtio_pci_vq_vector_release(proxy, vector); 748 } 749 return ret; 750 } 751 752 static void kvm_virtio_pci_vector_release(VirtIOPCIProxy *proxy, int nvqs) 753 { 754 PCIDevice *dev = &proxy->pci_dev; 755 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 756 unsigned int vector; 757 int queue_no; 758 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); 759 760 for (queue_no = 0; queue_no < nvqs; queue_no++) { 761 if (!virtio_queue_get_num(vdev, queue_no)) { 762 break; 763 } 764 vector = virtio_queue_vector(vdev, queue_no); 765 if (vector >= msix_nr_vectors_allocated(dev)) { 766 continue; 767 } 768 /* If guest supports masking, clean up irqfd now. 769 * Otherwise, it was cleaned when masked in the frontend. 770 */ 771 if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) { 772 kvm_virtio_pci_irqfd_release(proxy, queue_no, vector); 773 } 774 kvm_virtio_pci_vq_vector_release(proxy, vector); 775 } 776 } 777 778 static int virtio_pci_vq_vector_unmask(VirtIOPCIProxy *proxy, 779 unsigned int queue_no, 780 unsigned int vector, 781 MSIMessage msg) 782 { 783 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 784 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); 785 VirtQueue *vq = virtio_get_queue(vdev, queue_no); 786 EventNotifier *n = virtio_queue_get_guest_notifier(vq); 787 VirtIOIRQFD *irqfd; 788 int ret = 0; 789 790 if (proxy->vector_irqfd) { 791 irqfd = &proxy->vector_irqfd[vector]; 792 if (irqfd->msg.data != msg.data || irqfd->msg.address != msg.address) { 793 ret = kvm_irqchip_update_msi_route(kvm_state, irqfd->virq, msg, 794 &proxy->pci_dev); 795 if (ret < 0) { 796 return ret; 797 } 798 kvm_irqchip_commit_routes(kvm_state); 799 } 800 } 801 802 /* If guest supports masking, irqfd is already setup, unmask it. 803 * Otherwise, set it up now. 804 */ 805 if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) { 806 k->guest_notifier_mask(vdev, queue_no, false); 807 /* Test after unmasking to avoid losing events. */ 808 if (k->guest_notifier_pending && 809 k->guest_notifier_pending(vdev, queue_no)) { 810 event_notifier_set(n); 811 } 812 } else { 813 ret = kvm_virtio_pci_irqfd_use(proxy, queue_no, vector); 814 } 815 return ret; 816 } 817 818 static void virtio_pci_vq_vector_mask(VirtIOPCIProxy *proxy, 819 unsigned int queue_no, 820 unsigned int vector) 821 { 822 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 823 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); 824 825 /* If guest supports masking, keep irqfd but mask it. 826 * Otherwise, clean it up now. 827 */ 828 if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) { 829 k->guest_notifier_mask(vdev, queue_no, true); 830 } else { 831 kvm_virtio_pci_irqfd_release(proxy, queue_no, vector); 832 } 833 } 834 835 static int virtio_pci_vector_unmask(PCIDevice *dev, unsigned vector, 836 MSIMessage msg) 837 { 838 VirtIOPCIProxy *proxy = container_of(dev, VirtIOPCIProxy, pci_dev); 839 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 840 VirtQueue *vq = virtio_vector_first_queue(vdev, vector); 841 int ret, index, unmasked = 0; 842 843 while (vq) { 844 index = virtio_get_queue_index(vq); 845 if (!virtio_queue_get_num(vdev, index)) { 846 break; 847 } 848 if (index < proxy->nvqs_with_notifiers) { 849 ret = virtio_pci_vq_vector_unmask(proxy, index, vector, msg); 850 if (ret < 0) { 851 goto undo; 852 } 853 ++unmasked; 854 } 855 vq = virtio_vector_next_queue(vq); 856 } 857 858 return 0; 859 860 undo: 861 vq = virtio_vector_first_queue(vdev, vector); 862 while (vq && unmasked >= 0) { 863 index = virtio_get_queue_index(vq); 864 if (index < proxy->nvqs_with_notifiers) { 865 virtio_pci_vq_vector_mask(proxy, index, vector); 866 --unmasked; 867 } 868 vq = virtio_vector_next_queue(vq); 869 } 870 return ret; 871 } 872 873 static void virtio_pci_vector_mask(PCIDevice *dev, unsigned vector) 874 { 875 VirtIOPCIProxy *proxy = container_of(dev, VirtIOPCIProxy, pci_dev); 876 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 877 VirtQueue *vq = virtio_vector_first_queue(vdev, vector); 878 int index; 879 880 while (vq) { 881 index = virtio_get_queue_index(vq); 882 if (!virtio_queue_get_num(vdev, index)) { 883 break; 884 } 885 if (index < proxy->nvqs_with_notifiers) { 886 virtio_pci_vq_vector_mask(proxy, index, vector); 887 } 888 vq = virtio_vector_next_queue(vq); 889 } 890 } 891 892 static void virtio_pci_vector_poll(PCIDevice *dev, 893 unsigned int vector_start, 894 unsigned int vector_end) 895 { 896 VirtIOPCIProxy *proxy = container_of(dev, VirtIOPCIProxy, pci_dev); 897 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 898 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); 899 int queue_no; 900 unsigned int vector; 901 EventNotifier *notifier; 902 VirtQueue *vq; 903 904 for (queue_no = 0; queue_no < proxy->nvqs_with_notifiers; queue_no++) { 905 if (!virtio_queue_get_num(vdev, queue_no)) { 906 break; 907 } 908 vector = virtio_queue_vector(vdev, queue_no); 909 if (vector < vector_start || vector >= vector_end || 910 !msix_is_masked(dev, vector)) { 911 continue; 912 } 913 vq = virtio_get_queue(vdev, queue_no); 914 notifier = virtio_queue_get_guest_notifier(vq); 915 if (k->guest_notifier_pending) { 916 if (k->guest_notifier_pending(vdev, queue_no)) { 917 msix_set_pending(dev, vector); 918 } 919 } else if (event_notifier_test_and_clear(notifier)) { 920 msix_set_pending(dev, vector); 921 } 922 } 923 } 924 925 static int virtio_pci_set_guest_notifier(DeviceState *d, int n, bool assign, 926 bool with_irqfd) 927 { 928 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d); 929 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 930 VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev); 931 VirtQueue *vq = virtio_get_queue(vdev, n); 932 EventNotifier *notifier = virtio_queue_get_guest_notifier(vq); 933 934 if (assign) { 935 int r = event_notifier_init(notifier, 0); 936 if (r < 0) { 937 return r; 938 } 939 virtio_queue_set_guest_notifier_fd_handler(vq, true, with_irqfd); 940 } else { 941 virtio_queue_set_guest_notifier_fd_handler(vq, false, with_irqfd); 942 event_notifier_cleanup(notifier); 943 } 944 945 if (!msix_enabled(&proxy->pci_dev) && 946 vdev->use_guest_notifier_mask && 947 vdc->guest_notifier_mask) { 948 vdc->guest_notifier_mask(vdev, n, !assign); 949 } 950 951 return 0; 952 } 953 954 static bool virtio_pci_query_guest_notifiers(DeviceState *d) 955 { 956 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d); 957 return msix_enabled(&proxy->pci_dev); 958 } 959 960 static int virtio_pci_set_guest_notifiers(DeviceState *d, int nvqs, bool assign) 961 { 962 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d); 963 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 964 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); 965 int r, n; 966 bool with_irqfd = msix_enabled(&proxy->pci_dev) && 967 kvm_msi_via_irqfd_enabled(); 968 969 nvqs = MIN(nvqs, VIRTIO_QUEUE_MAX); 970 971 /* When deassigning, pass a consistent nvqs value 972 * to avoid leaking notifiers. 973 */ 974 assert(assign || nvqs == proxy->nvqs_with_notifiers); 975 976 proxy->nvqs_with_notifiers = nvqs; 977 978 /* Must unset vector notifier while guest notifier is still assigned */ 979 if ((proxy->vector_irqfd || k->guest_notifier_mask) && !assign) { 980 msix_unset_vector_notifiers(&proxy->pci_dev); 981 if (proxy->vector_irqfd) { 982 kvm_virtio_pci_vector_release(proxy, nvqs); 983 g_free(proxy->vector_irqfd); 984 proxy->vector_irqfd = NULL; 985 } 986 } 987 988 for (n = 0; n < nvqs; n++) { 989 if (!virtio_queue_get_num(vdev, n)) { 990 break; 991 } 992 993 r = virtio_pci_set_guest_notifier(d, n, assign, with_irqfd); 994 if (r < 0) { 995 goto assign_error; 996 } 997 } 998 999 /* Must set vector notifier after guest notifier has been assigned */ 1000 if ((with_irqfd || k->guest_notifier_mask) && assign) { 1001 if (with_irqfd) { 1002 proxy->vector_irqfd = 1003 g_malloc0(sizeof(*proxy->vector_irqfd) * 1004 msix_nr_vectors_allocated(&proxy->pci_dev)); 1005 r = kvm_virtio_pci_vector_use(proxy, nvqs); 1006 if (r < 0) { 1007 goto assign_error; 1008 } 1009 } 1010 r = msix_set_vector_notifiers(&proxy->pci_dev, 1011 virtio_pci_vector_unmask, 1012 virtio_pci_vector_mask, 1013 virtio_pci_vector_poll); 1014 if (r < 0) { 1015 goto notifiers_error; 1016 } 1017 } 1018 1019 return 0; 1020 1021 notifiers_error: 1022 if (with_irqfd) { 1023 assert(assign); 1024 kvm_virtio_pci_vector_release(proxy, nvqs); 1025 } 1026 1027 assign_error: 1028 /* We get here on assignment failure. Recover by undoing for VQs 0 .. n. */ 1029 assert(assign); 1030 while (--n >= 0) { 1031 virtio_pci_set_guest_notifier(d, n, !assign, with_irqfd); 1032 } 1033 return r; 1034 } 1035 1036 static int virtio_pci_set_host_notifier_mr(DeviceState *d, int n, 1037 MemoryRegion *mr, bool assign) 1038 { 1039 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d); 1040 int offset; 1041 1042 if (n >= VIRTIO_QUEUE_MAX || !virtio_pci_modern(proxy) || 1043 virtio_pci_queue_mem_mult(proxy) != memory_region_size(mr)) { 1044 return -1; 1045 } 1046 1047 if (assign) { 1048 offset = virtio_pci_queue_mem_mult(proxy) * n; 1049 memory_region_add_subregion_overlap(&proxy->notify.mr, offset, mr, 1); 1050 } else { 1051 memory_region_del_subregion(&proxy->notify.mr, mr); 1052 } 1053 1054 return 0; 1055 } 1056 1057 static void virtio_pci_vmstate_change(DeviceState *d, bool running) 1058 { 1059 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d); 1060 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 1061 1062 if (running) { 1063 /* Old QEMU versions did not set bus master enable on status write. 1064 * Detect DRIVER set and enable it. 1065 */ 1066 if ((proxy->flags & VIRTIO_PCI_FLAG_BUS_MASTER_BUG_MIGRATION) && 1067 (vdev->status & VIRTIO_CONFIG_S_DRIVER) && 1068 !(proxy->pci_dev.config[PCI_COMMAND] & PCI_COMMAND_MASTER)) { 1069 pci_default_write_config(&proxy->pci_dev, PCI_COMMAND, 1070 proxy->pci_dev.config[PCI_COMMAND] | 1071 PCI_COMMAND_MASTER, 1); 1072 } 1073 virtio_pci_start_ioeventfd(proxy); 1074 } else { 1075 virtio_pci_stop_ioeventfd(proxy); 1076 } 1077 } 1078 1079 /* 1080 * virtio-pci: This is the PCIDevice which has a virtio-pci-bus. 1081 */ 1082 1083 static int virtio_pci_query_nvectors(DeviceState *d) 1084 { 1085 VirtIOPCIProxy *proxy = VIRTIO_PCI(d); 1086 1087 return proxy->nvectors; 1088 } 1089 1090 static AddressSpace *virtio_pci_get_dma_as(DeviceState *d) 1091 { 1092 VirtIOPCIProxy *proxy = VIRTIO_PCI(d); 1093 PCIDevice *dev = &proxy->pci_dev; 1094 1095 return pci_get_address_space(dev); 1096 } 1097 1098 static int virtio_pci_add_mem_cap(VirtIOPCIProxy *proxy, 1099 struct virtio_pci_cap *cap) 1100 { 1101 PCIDevice *dev = &proxy->pci_dev; 1102 int offset; 1103 1104 offset = pci_add_capability(dev, PCI_CAP_ID_VNDR, 0, 1105 cap->cap_len, &error_abort); 1106 1107 assert(cap->cap_len >= sizeof *cap); 1108 memcpy(dev->config + offset + PCI_CAP_FLAGS, &cap->cap_len, 1109 cap->cap_len - PCI_CAP_FLAGS); 1110 1111 return offset; 1112 } 1113 1114 static uint64_t virtio_pci_common_read(void *opaque, hwaddr addr, 1115 unsigned size) 1116 { 1117 VirtIOPCIProxy *proxy = opaque; 1118 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 1119 uint32_t val = 0; 1120 int i; 1121 1122 switch (addr) { 1123 case VIRTIO_PCI_COMMON_DFSELECT: 1124 val = proxy->dfselect; 1125 break; 1126 case VIRTIO_PCI_COMMON_DF: 1127 if (proxy->dfselect <= 1) { 1128 VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev); 1129 1130 val = (vdev->host_features & ~vdc->legacy_features) >> 1131 (32 * proxy->dfselect); 1132 } 1133 break; 1134 case VIRTIO_PCI_COMMON_GFSELECT: 1135 val = proxy->gfselect; 1136 break; 1137 case VIRTIO_PCI_COMMON_GF: 1138 if (proxy->gfselect < ARRAY_SIZE(proxy->guest_features)) { 1139 val = proxy->guest_features[proxy->gfselect]; 1140 } 1141 break; 1142 case VIRTIO_PCI_COMMON_MSIX: 1143 val = vdev->config_vector; 1144 break; 1145 case VIRTIO_PCI_COMMON_NUMQ: 1146 for (i = 0; i < VIRTIO_QUEUE_MAX; ++i) { 1147 if (virtio_queue_get_num(vdev, i)) { 1148 val = i + 1; 1149 } 1150 } 1151 break; 1152 case VIRTIO_PCI_COMMON_STATUS: 1153 val = vdev->status; 1154 break; 1155 case VIRTIO_PCI_COMMON_CFGGENERATION: 1156 val = vdev->generation; 1157 break; 1158 case VIRTIO_PCI_COMMON_Q_SELECT: 1159 val = vdev->queue_sel; 1160 break; 1161 case VIRTIO_PCI_COMMON_Q_SIZE: 1162 val = virtio_queue_get_num(vdev, vdev->queue_sel); 1163 break; 1164 case VIRTIO_PCI_COMMON_Q_MSIX: 1165 val = virtio_queue_vector(vdev, vdev->queue_sel); 1166 break; 1167 case VIRTIO_PCI_COMMON_Q_ENABLE: 1168 val = proxy->vqs[vdev->queue_sel].enabled; 1169 break; 1170 case VIRTIO_PCI_COMMON_Q_NOFF: 1171 /* Simply map queues in order */ 1172 val = vdev->queue_sel; 1173 break; 1174 case VIRTIO_PCI_COMMON_Q_DESCLO: 1175 val = proxy->vqs[vdev->queue_sel].desc[0]; 1176 break; 1177 case VIRTIO_PCI_COMMON_Q_DESCHI: 1178 val = proxy->vqs[vdev->queue_sel].desc[1]; 1179 break; 1180 case VIRTIO_PCI_COMMON_Q_AVAILLO: 1181 val = proxy->vqs[vdev->queue_sel].avail[0]; 1182 break; 1183 case VIRTIO_PCI_COMMON_Q_AVAILHI: 1184 val = proxy->vqs[vdev->queue_sel].avail[1]; 1185 break; 1186 case VIRTIO_PCI_COMMON_Q_USEDLO: 1187 val = proxy->vqs[vdev->queue_sel].used[0]; 1188 break; 1189 case VIRTIO_PCI_COMMON_Q_USEDHI: 1190 val = proxy->vqs[vdev->queue_sel].used[1]; 1191 break; 1192 default: 1193 val = 0; 1194 } 1195 1196 return val; 1197 } 1198 1199 static void virtio_pci_common_write(void *opaque, hwaddr addr, 1200 uint64_t val, unsigned size) 1201 { 1202 VirtIOPCIProxy *proxy = opaque; 1203 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 1204 1205 switch (addr) { 1206 case VIRTIO_PCI_COMMON_DFSELECT: 1207 proxy->dfselect = val; 1208 break; 1209 case VIRTIO_PCI_COMMON_GFSELECT: 1210 proxy->gfselect = val; 1211 break; 1212 case VIRTIO_PCI_COMMON_GF: 1213 if (proxy->gfselect < ARRAY_SIZE(proxy->guest_features)) { 1214 proxy->guest_features[proxy->gfselect] = val; 1215 virtio_set_features(vdev, 1216 (((uint64_t)proxy->guest_features[1]) << 32) | 1217 proxy->guest_features[0]); 1218 } 1219 break; 1220 case VIRTIO_PCI_COMMON_MSIX: 1221 msix_vector_unuse(&proxy->pci_dev, vdev->config_vector); 1222 /* Make it possible for guest to discover an error took place. */ 1223 if (msix_vector_use(&proxy->pci_dev, val) < 0) { 1224 val = VIRTIO_NO_VECTOR; 1225 } 1226 vdev->config_vector = val; 1227 break; 1228 case VIRTIO_PCI_COMMON_STATUS: 1229 if (!(val & VIRTIO_CONFIG_S_DRIVER_OK)) { 1230 virtio_pci_stop_ioeventfd(proxy); 1231 } 1232 1233 virtio_set_status(vdev, val & 0xFF); 1234 1235 if (val & VIRTIO_CONFIG_S_DRIVER_OK) { 1236 virtio_pci_start_ioeventfd(proxy); 1237 } 1238 1239 if (vdev->status == 0) { 1240 virtio_pci_reset(DEVICE(proxy)); 1241 } 1242 1243 break; 1244 case VIRTIO_PCI_COMMON_Q_SELECT: 1245 if (val < VIRTIO_QUEUE_MAX) { 1246 vdev->queue_sel = val; 1247 } 1248 break; 1249 case VIRTIO_PCI_COMMON_Q_SIZE: 1250 proxy->vqs[vdev->queue_sel].num = val; 1251 break; 1252 case VIRTIO_PCI_COMMON_Q_MSIX: 1253 msix_vector_unuse(&proxy->pci_dev, 1254 virtio_queue_vector(vdev, vdev->queue_sel)); 1255 /* Make it possible for guest to discover an error took place. */ 1256 if (msix_vector_use(&proxy->pci_dev, val) < 0) { 1257 val = VIRTIO_NO_VECTOR; 1258 } 1259 virtio_queue_set_vector(vdev, vdev->queue_sel, val); 1260 break; 1261 case VIRTIO_PCI_COMMON_Q_ENABLE: 1262 virtio_queue_set_num(vdev, vdev->queue_sel, 1263 proxy->vqs[vdev->queue_sel].num); 1264 virtio_queue_set_rings(vdev, vdev->queue_sel, 1265 ((uint64_t)proxy->vqs[vdev->queue_sel].desc[1]) << 32 | 1266 proxy->vqs[vdev->queue_sel].desc[0], 1267 ((uint64_t)proxy->vqs[vdev->queue_sel].avail[1]) << 32 | 1268 proxy->vqs[vdev->queue_sel].avail[0], 1269 ((uint64_t)proxy->vqs[vdev->queue_sel].used[1]) << 32 | 1270 proxy->vqs[vdev->queue_sel].used[0]); 1271 proxy->vqs[vdev->queue_sel].enabled = 1; 1272 break; 1273 case VIRTIO_PCI_COMMON_Q_DESCLO: 1274 proxy->vqs[vdev->queue_sel].desc[0] = val; 1275 break; 1276 case VIRTIO_PCI_COMMON_Q_DESCHI: 1277 proxy->vqs[vdev->queue_sel].desc[1] = val; 1278 break; 1279 case VIRTIO_PCI_COMMON_Q_AVAILLO: 1280 proxy->vqs[vdev->queue_sel].avail[0] = val; 1281 break; 1282 case VIRTIO_PCI_COMMON_Q_AVAILHI: 1283 proxy->vqs[vdev->queue_sel].avail[1] = val; 1284 break; 1285 case VIRTIO_PCI_COMMON_Q_USEDLO: 1286 proxy->vqs[vdev->queue_sel].used[0] = val; 1287 break; 1288 case VIRTIO_PCI_COMMON_Q_USEDHI: 1289 proxy->vqs[vdev->queue_sel].used[1] = val; 1290 break; 1291 default: 1292 break; 1293 } 1294 } 1295 1296 1297 static uint64_t virtio_pci_notify_read(void *opaque, hwaddr addr, 1298 unsigned size) 1299 { 1300 return 0; 1301 } 1302 1303 static void virtio_pci_notify_write(void *opaque, hwaddr addr, 1304 uint64_t val, unsigned size) 1305 { 1306 VirtIODevice *vdev = opaque; 1307 VirtIOPCIProxy *proxy = VIRTIO_PCI(DEVICE(vdev)->parent_bus->parent); 1308 unsigned queue = addr / virtio_pci_queue_mem_mult(proxy); 1309 1310 if (queue < VIRTIO_QUEUE_MAX) { 1311 virtio_queue_notify(vdev, queue); 1312 } 1313 } 1314 1315 static void virtio_pci_notify_write_pio(void *opaque, hwaddr addr, 1316 uint64_t val, unsigned size) 1317 { 1318 VirtIODevice *vdev = opaque; 1319 unsigned queue = val; 1320 1321 if (queue < VIRTIO_QUEUE_MAX) { 1322 virtio_queue_notify(vdev, queue); 1323 } 1324 } 1325 1326 static uint64_t virtio_pci_isr_read(void *opaque, hwaddr addr, 1327 unsigned size) 1328 { 1329 VirtIOPCIProxy *proxy = opaque; 1330 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 1331 uint64_t val = atomic_xchg(&vdev->isr, 0); 1332 pci_irq_deassert(&proxy->pci_dev); 1333 1334 return val; 1335 } 1336 1337 static void virtio_pci_isr_write(void *opaque, hwaddr addr, 1338 uint64_t val, unsigned size) 1339 { 1340 } 1341 1342 static uint64_t virtio_pci_device_read(void *opaque, hwaddr addr, 1343 unsigned size) 1344 { 1345 VirtIODevice *vdev = opaque; 1346 uint64_t val = 0; 1347 1348 switch (size) { 1349 case 1: 1350 val = virtio_config_modern_readb(vdev, addr); 1351 break; 1352 case 2: 1353 val = virtio_config_modern_readw(vdev, addr); 1354 break; 1355 case 4: 1356 val = virtio_config_modern_readl(vdev, addr); 1357 break; 1358 } 1359 return val; 1360 } 1361 1362 static void virtio_pci_device_write(void *opaque, hwaddr addr, 1363 uint64_t val, unsigned size) 1364 { 1365 VirtIODevice *vdev = opaque; 1366 switch (size) { 1367 case 1: 1368 virtio_config_modern_writeb(vdev, addr, val); 1369 break; 1370 case 2: 1371 virtio_config_modern_writew(vdev, addr, val); 1372 break; 1373 case 4: 1374 virtio_config_modern_writel(vdev, addr, val); 1375 break; 1376 } 1377 } 1378 1379 static void virtio_pci_modern_regions_init(VirtIOPCIProxy *proxy) 1380 { 1381 static const MemoryRegionOps common_ops = { 1382 .read = virtio_pci_common_read, 1383 .write = virtio_pci_common_write, 1384 .impl = { 1385 .min_access_size = 1, 1386 .max_access_size = 4, 1387 }, 1388 .endianness = DEVICE_LITTLE_ENDIAN, 1389 }; 1390 static const MemoryRegionOps isr_ops = { 1391 .read = virtio_pci_isr_read, 1392 .write = virtio_pci_isr_write, 1393 .impl = { 1394 .min_access_size = 1, 1395 .max_access_size = 4, 1396 }, 1397 .endianness = DEVICE_LITTLE_ENDIAN, 1398 }; 1399 static const MemoryRegionOps device_ops = { 1400 .read = virtio_pci_device_read, 1401 .write = virtio_pci_device_write, 1402 .impl = { 1403 .min_access_size = 1, 1404 .max_access_size = 4, 1405 }, 1406 .endianness = DEVICE_LITTLE_ENDIAN, 1407 }; 1408 static const MemoryRegionOps notify_ops = { 1409 .read = virtio_pci_notify_read, 1410 .write = virtio_pci_notify_write, 1411 .impl = { 1412 .min_access_size = 1, 1413 .max_access_size = 4, 1414 }, 1415 .endianness = DEVICE_LITTLE_ENDIAN, 1416 }; 1417 static const MemoryRegionOps notify_pio_ops = { 1418 .read = virtio_pci_notify_read, 1419 .write = virtio_pci_notify_write_pio, 1420 .impl = { 1421 .min_access_size = 1, 1422 .max_access_size = 4, 1423 }, 1424 .endianness = DEVICE_LITTLE_ENDIAN, 1425 }; 1426 1427 1428 memory_region_init_io(&proxy->common.mr, OBJECT(proxy), 1429 &common_ops, 1430 proxy, 1431 "virtio-pci-common", 1432 proxy->common.size); 1433 1434 memory_region_init_io(&proxy->isr.mr, OBJECT(proxy), 1435 &isr_ops, 1436 proxy, 1437 "virtio-pci-isr", 1438 proxy->isr.size); 1439 1440 memory_region_init_io(&proxy->device.mr, OBJECT(proxy), 1441 &device_ops, 1442 virtio_bus_get_device(&proxy->bus), 1443 "virtio-pci-device", 1444 proxy->device.size); 1445 1446 memory_region_init_io(&proxy->notify.mr, OBJECT(proxy), 1447 ¬ify_ops, 1448 virtio_bus_get_device(&proxy->bus), 1449 "virtio-pci-notify", 1450 proxy->notify.size); 1451 1452 memory_region_init_io(&proxy->notify_pio.mr, OBJECT(proxy), 1453 ¬ify_pio_ops, 1454 virtio_bus_get_device(&proxy->bus), 1455 "virtio-pci-notify-pio", 1456 proxy->notify_pio.size); 1457 } 1458 1459 static void virtio_pci_modern_region_map(VirtIOPCIProxy *proxy, 1460 VirtIOPCIRegion *region, 1461 struct virtio_pci_cap *cap, 1462 MemoryRegion *mr, 1463 uint8_t bar) 1464 { 1465 memory_region_add_subregion(mr, region->offset, ®ion->mr); 1466 1467 cap->cfg_type = region->type; 1468 cap->bar = bar; 1469 cap->offset = cpu_to_le32(region->offset); 1470 cap->length = cpu_to_le32(region->size); 1471 virtio_pci_add_mem_cap(proxy, cap); 1472 1473 } 1474 1475 static void virtio_pci_modern_mem_region_map(VirtIOPCIProxy *proxy, 1476 VirtIOPCIRegion *region, 1477 struct virtio_pci_cap *cap) 1478 { 1479 virtio_pci_modern_region_map(proxy, region, cap, 1480 &proxy->modern_bar, proxy->modern_mem_bar_idx); 1481 } 1482 1483 static void virtio_pci_modern_io_region_map(VirtIOPCIProxy *proxy, 1484 VirtIOPCIRegion *region, 1485 struct virtio_pci_cap *cap) 1486 { 1487 virtio_pci_modern_region_map(proxy, region, cap, 1488 &proxy->io_bar, proxy->modern_io_bar_idx); 1489 } 1490 1491 static void virtio_pci_modern_mem_region_unmap(VirtIOPCIProxy *proxy, 1492 VirtIOPCIRegion *region) 1493 { 1494 memory_region_del_subregion(&proxy->modern_bar, 1495 ®ion->mr); 1496 } 1497 1498 static void virtio_pci_modern_io_region_unmap(VirtIOPCIProxy *proxy, 1499 VirtIOPCIRegion *region) 1500 { 1501 memory_region_del_subregion(&proxy->io_bar, 1502 ®ion->mr); 1503 } 1504 1505 static void virtio_pci_pre_plugged(DeviceState *d, Error **errp) 1506 { 1507 VirtIOPCIProxy *proxy = VIRTIO_PCI(d); 1508 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 1509 1510 if (virtio_pci_modern(proxy)) { 1511 virtio_add_feature(&vdev->host_features, VIRTIO_F_VERSION_1); 1512 } 1513 1514 virtio_add_feature(&vdev->host_features, VIRTIO_F_BAD_FEATURE); 1515 } 1516 1517 /* This is called by virtio-bus just after the device is plugged. */ 1518 static void virtio_pci_device_plugged(DeviceState *d, Error **errp) 1519 { 1520 VirtIOPCIProxy *proxy = VIRTIO_PCI(d); 1521 VirtioBusState *bus = &proxy->bus; 1522 bool legacy = virtio_pci_legacy(proxy); 1523 bool modern; 1524 bool modern_pio = proxy->flags & VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY; 1525 uint8_t *config; 1526 uint32_t size; 1527 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 1528 1529 /* 1530 * Virtio capabilities present without 1531 * VIRTIO_F_VERSION_1 confuses guests 1532 */ 1533 if (!proxy->ignore_backend_features && 1534 !virtio_has_feature(vdev->host_features, VIRTIO_F_VERSION_1)) { 1535 virtio_pci_disable_modern(proxy); 1536 1537 if (!legacy) { 1538 error_setg(errp, "Device doesn't support modern mode, and legacy" 1539 " mode is disabled"); 1540 error_append_hint(errp, "Set disable-legacy to off\n"); 1541 1542 return; 1543 } 1544 } 1545 1546 modern = virtio_pci_modern(proxy); 1547 1548 config = proxy->pci_dev.config; 1549 if (proxy->class_code) { 1550 pci_config_set_class(config, proxy->class_code); 1551 } 1552 1553 if (legacy) { 1554 if (virtio_host_has_feature(vdev, VIRTIO_F_IOMMU_PLATFORM)) { 1555 error_setg(errp, "VIRTIO_F_IOMMU_PLATFORM was supported by" 1556 " neither legacy nor transitional device"); 1557 return ; 1558 } 1559 /* 1560 * Legacy and transitional devices use specific subsystem IDs. 1561 * Note that the subsystem vendor ID (config + PCI_SUBSYSTEM_VENDOR_ID) 1562 * is set to PCI_SUBVENDOR_ID_REDHAT_QUMRANET by default. 1563 */ 1564 pci_set_word(config + PCI_SUBSYSTEM_ID, virtio_bus_get_vdev_id(bus)); 1565 } else { 1566 /* pure virtio-1.0 */ 1567 pci_set_word(config + PCI_VENDOR_ID, 1568 PCI_VENDOR_ID_REDHAT_QUMRANET); 1569 pci_set_word(config + PCI_DEVICE_ID, 1570 0x1040 + virtio_bus_get_vdev_id(bus)); 1571 pci_config_set_revision(config, 1); 1572 } 1573 config[PCI_INTERRUPT_PIN] = 1; 1574 1575 1576 if (modern) { 1577 struct virtio_pci_cap cap = { 1578 .cap_len = sizeof cap, 1579 }; 1580 struct virtio_pci_notify_cap notify = { 1581 .cap.cap_len = sizeof notify, 1582 .notify_off_multiplier = 1583 cpu_to_le32(virtio_pci_queue_mem_mult(proxy)), 1584 }; 1585 struct virtio_pci_cfg_cap cfg = { 1586 .cap.cap_len = sizeof cfg, 1587 .cap.cfg_type = VIRTIO_PCI_CAP_PCI_CFG, 1588 }; 1589 struct virtio_pci_notify_cap notify_pio = { 1590 .cap.cap_len = sizeof notify, 1591 .notify_off_multiplier = cpu_to_le32(0x0), 1592 }; 1593 1594 struct virtio_pci_cfg_cap *cfg_mask; 1595 1596 virtio_pci_modern_regions_init(proxy); 1597 1598 virtio_pci_modern_mem_region_map(proxy, &proxy->common, &cap); 1599 virtio_pci_modern_mem_region_map(proxy, &proxy->isr, &cap); 1600 virtio_pci_modern_mem_region_map(proxy, &proxy->device, &cap); 1601 virtio_pci_modern_mem_region_map(proxy, &proxy->notify, ¬ify.cap); 1602 1603 if (modern_pio) { 1604 memory_region_init(&proxy->io_bar, OBJECT(proxy), 1605 "virtio-pci-io", 0x4); 1606 1607 pci_register_bar(&proxy->pci_dev, proxy->modern_io_bar_idx, 1608 PCI_BASE_ADDRESS_SPACE_IO, &proxy->io_bar); 1609 1610 virtio_pci_modern_io_region_map(proxy, &proxy->notify_pio, 1611 ¬ify_pio.cap); 1612 } 1613 1614 pci_register_bar(&proxy->pci_dev, proxy->modern_mem_bar_idx, 1615 PCI_BASE_ADDRESS_SPACE_MEMORY | 1616 PCI_BASE_ADDRESS_MEM_PREFETCH | 1617 PCI_BASE_ADDRESS_MEM_TYPE_64, 1618 &proxy->modern_bar); 1619 1620 proxy->config_cap = virtio_pci_add_mem_cap(proxy, &cfg.cap); 1621 cfg_mask = (void *)(proxy->pci_dev.wmask + proxy->config_cap); 1622 pci_set_byte(&cfg_mask->cap.bar, ~0x0); 1623 pci_set_long((uint8_t *)&cfg_mask->cap.offset, ~0x0); 1624 pci_set_long((uint8_t *)&cfg_mask->cap.length, ~0x0); 1625 pci_set_long(cfg_mask->pci_cfg_data, ~0x0); 1626 } 1627 1628 if (proxy->nvectors) { 1629 int err = msix_init_exclusive_bar(&proxy->pci_dev, proxy->nvectors, 1630 proxy->msix_bar_idx, NULL); 1631 if (err) { 1632 /* Notice when a system that supports MSIx can't initialize it */ 1633 if (err != -ENOTSUP) { 1634 warn_report("unable to init msix vectors to %" PRIu32, 1635 proxy->nvectors); 1636 } 1637 proxy->nvectors = 0; 1638 } 1639 } 1640 1641 proxy->pci_dev.config_write = virtio_write_config; 1642 proxy->pci_dev.config_read = virtio_read_config; 1643 1644 if (legacy) { 1645 size = VIRTIO_PCI_REGION_SIZE(&proxy->pci_dev) 1646 + virtio_bus_get_vdev_config_len(bus); 1647 size = pow2ceil(size); 1648 1649 memory_region_init_io(&proxy->bar, OBJECT(proxy), 1650 &virtio_pci_config_ops, 1651 proxy, "virtio-pci", size); 1652 1653 pci_register_bar(&proxy->pci_dev, proxy->legacy_io_bar_idx, 1654 PCI_BASE_ADDRESS_SPACE_IO, &proxy->bar); 1655 } 1656 } 1657 1658 static void virtio_pci_device_unplugged(DeviceState *d) 1659 { 1660 VirtIOPCIProxy *proxy = VIRTIO_PCI(d); 1661 bool modern = virtio_pci_modern(proxy); 1662 bool modern_pio = proxy->flags & VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY; 1663 1664 virtio_pci_stop_ioeventfd(proxy); 1665 1666 if (modern) { 1667 virtio_pci_modern_mem_region_unmap(proxy, &proxy->common); 1668 virtio_pci_modern_mem_region_unmap(proxy, &proxy->isr); 1669 virtio_pci_modern_mem_region_unmap(proxy, &proxy->device); 1670 virtio_pci_modern_mem_region_unmap(proxy, &proxy->notify); 1671 if (modern_pio) { 1672 virtio_pci_modern_io_region_unmap(proxy, &proxy->notify_pio); 1673 } 1674 } 1675 } 1676 1677 static void virtio_pci_realize(PCIDevice *pci_dev, Error **errp) 1678 { 1679 VirtIOPCIProxy *proxy = VIRTIO_PCI(pci_dev); 1680 VirtioPCIClass *k = VIRTIO_PCI_GET_CLASS(pci_dev); 1681 bool pcie_port = pci_bus_is_express(pci_get_bus(pci_dev)) && 1682 !pci_bus_is_root(pci_get_bus(pci_dev)); 1683 1684 if (kvm_enabled() && !kvm_has_many_ioeventfds()) { 1685 proxy->flags &= ~VIRTIO_PCI_FLAG_USE_IOEVENTFD; 1686 } 1687 1688 /* 1689 * virtio pci bar layout used by default. 1690 * subclasses can re-arrange things if needed. 1691 * 1692 * region 0 -- virtio legacy io bar 1693 * region 1 -- msi-x bar 1694 * region 4+5 -- virtio modern memory (64bit) bar 1695 * 1696 */ 1697 proxy->legacy_io_bar_idx = 0; 1698 proxy->msix_bar_idx = 1; 1699 proxy->modern_io_bar_idx = 2; 1700 proxy->modern_mem_bar_idx = 4; 1701 1702 proxy->common.offset = 0x0; 1703 proxy->common.size = 0x1000; 1704 proxy->common.type = VIRTIO_PCI_CAP_COMMON_CFG; 1705 1706 proxy->isr.offset = 0x1000; 1707 proxy->isr.size = 0x1000; 1708 proxy->isr.type = VIRTIO_PCI_CAP_ISR_CFG; 1709 1710 proxy->device.offset = 0x2000; 1711 proxy->device.size = 0x1000; 1712 proxy->device.type = VIRTIO_PCI_CAP_DEVICE_CFG; 1713 1714 proxy->notify.offset = 0x3000; 1715 proxy->notify.size = virtio_pci_queue_mem_mult(proxy) * VIRTIO_QUEUE_MAX; 1716 proxy->notify.type = VIRTIO_PCI_CAP_NOTIFY_CFG; 1717 1718 proxy->notify_pio.offset = 0x0; 1719 proxy->notify_pio.size = 0x4; 1720 proxy->notify_pio.type = VIRTIO_PCI_CAP_NOTIFY_CFG; 1721 1722 /* subclasses can enforce modern, so do this unconditionally */ 1723 memory_region_init(&proxy->modern_bar, OBJECT(proxy), "virtio-pci", 1724 /* PCI BAR regions must be powers of 2 */ 1725 pow2ceil(proxy->notify.offset + proxy->notify.size)); 1726 1727 if (proxy->disable_legacy == ON_OFF_AUTO_AUTO) { 1728 proxy->disable_legacy = pcie_port ? ON_OFF_AUTO_ON : ON_OFF_AUTO_OFF; 1729 } 1730 1731 if (!virtio_pci_modern(proxy) && !virtio_pci_legacy(proxy)) { 1732 error_setg(errp, "device cannot work as neither modern nor legacy mode" 1733 " is enabled"); 1734 error_append_hint(errp, "Set either disable-modern or disable-legacy" 1735 " to off\n"); 1736 return; 1737 } 1738 1739 if (pcie_port && pci_is_express(pci_dev)) { 1740 int pos; 1741 1742 pos = pcie_endpoint_cap_init(pci_dev, 0); 1743 assert(pos > 0); 1744 1745 pos = pci_add_capability(pci_dev, PCI_CAP_ID_PM, 0, 1746 PCI_PM_SIZEOF, errp); 1747 if (pos < 0) { 1748 return; 1749 } 1750 1751 pci_dev->exp.pm_cap = pos; 1752 1753 /* 1754 * Indicates that this function complies with revision 1.2 of the 1755 * PCI Power Management Interface Specification. 1756 */ 1757 pci_set_word(pci_dev->config + pos + PCI_PM_PMC, 0x3); 1758 1759 if (proxy->flags & VIRTIO_PCI_FLAG_INIT_DEVERR) { 1760 /* Init error enabling flags */ 1761 pcie_cap_deverr_init(pci_dev); 1762 } 1763 1764 if (proxy->flags & VIRTIO_PCI_FLAG_INIT_LNKCTL) { 1765 /* Init Link Control Register */ 1766 pcie_cap_lnkctl_init(pci_dev); 1767 } 1768 1769 if (proxy->flags & VIRTIO_PCI_FLAG_INIT_PM) { 1770 /* Init Power Management Control Register */ 1771 pci_set_word(pci_dev->wmask + pos + PCI_PM_CTRL, 1772 PCI_PM_CTRL_STATE_MASK); 1773 } 1774 1775 if (proxy->flags & VIRTIO_PCI_FLAG_ATS) { 1776 pcie_ats_init(pci_dev, 256); 1777 } 1778 1779 } else { 1780 /* 1781 * make future invocations of pci_is_express() return false 1782 * and pci_config_size() return PCI_CONFIG_SPACE_SIZE. 1783 */ 1784 pci_dev->cap_present &= ~QEMU_PCI_CAP_EXPRESS; 1785 } 1786 1787 virtio_pci_bus_new(&proxy->bus, sizeof(proxy->bus), proxy); 1788 if (k->realize) { 1789 k->realize(proxy, errp); 1790 } 1791 } 1792 1793 static void virtio_pci_exit(PCIDevice *pci_dev) 1794 { 1795 msix_uninit_exclusive_bar(pci_dev); 1796 } 1797 1798 static void virtio_pci_reset(DeviceState *qdev) 1799 { 1800 VirtIOPCIProxy *proxy = VIRTIO_PCI(qdev); 1801 VirtioBusState *bus = VIRTIO_BUS(&proxy->bus); 1802 PCIDevice *dev = PCI_DEVICE(qdev); 1803 int i; 1804 1805 virtio_pci_stop_ioeventfd(proxy); 1806 virtio_bus_reset(bus); 1807 msix_unuse_all_vectors(&proxy->pci_dev); 1808 1809 for (i = 0; i < VIRTIO_QUEUE_MAX; i++) { 1810 proxy->vqs[i].enabled = 0; 1811 proxy->vqs[i].num = 0; 1812 proxy->vqs[i].desc[0] = proxy->vqs[i].desc[1] = 0; 1813 proxy->vqs[i].avail[0] = proxy->vqs[i].avail[1] = 0; 1814 proxy->vqs[i].used[0] = proxy->vqs[i].used[1] = 0; 1815 } 1816 1817 if (pci_is_express(dev)) { 1818 pcie_cap_deverr_reset(dev); 1819 pcie_cap_lnkctl_reset(dev); 1820 1821 pci_set_word(dev->config + dev->exp.pm_cap + PCI_PM_CTRL, 0); 1822 } 1823 } 1824 1825 static Property virtio_pci_properties[] = { 1826 DEFINE_PROP_BIT("virtio-pci-bus-master-bug-migration", VirtIOPCIProxy, flags, 1827 VIRTIO_PCI_FLAG_BUS_MASTER_BUG_MIGRATION_BIT, false), 1828 DEFINE_PROP_BIT("migrate-extra", VirtIOPCIProxy, flags, 1829 VIRTIO_PCI_FLAG_MIGRATE_EXTRA_BIT, true), 1830 DEFINE_PROP_BIT("modern-pio-notify", VirtIOPCIProxy, flags, 1831 VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY_BIT, false), 1832 DEFINE_PROP_BIT("x-disable-pcie", VirtIOPCIProxy, flags, 1833 VIRTIO_PCI_FLAG_DISABLE_PCIE_BIT, false), 1834 DEFINE_PROP_BIT("page-per-vq", VirtIOPCIProxy, flags, 1835 VIRTIO_PCI_FLAG_PAGE_PER_VQ_BIT, false), 1836 DEFINE_PROP_BOOL("x-ignore-backend-features", VirtIOPCIProxy, 1837 ignore_backend_features, false), 1838 DEFINE_PROP_BIT("ats", VirtIOPCIProxy, flags, 1839 VIRTIO_PCI_FLAG_ATS_BIT, false), 1840 DEFINE_PROP_BIT("x-pcie-deverr-init", VirtIOPCIProxy, flags, 1841 VIRTIO_PCI_FLAG_INIT_DEVERR_BIT, true), 1842 DEFINE_PROP_BIT("x-pcie-lnkctl-init", VirtIOPCIProxy, flags, 1843 VIRTIO_PCI_FLAG_INIT_LNKCTL_BIT, true), 1844 DEFINE_PROP_BIT("x-pcie-pm-init", VirtIOPCIProxy, flags, 1845 VIRTIO_PCI_FLAG_INIT_PM_BIT, true), 1846 DEFINE_PROP_END_OF_LIST(), 1847 }; 1848 1849 static void virtio_pci_dc_realize(DeviceState *qdev, Error **errp) 1850 { 1851 VirtioPCIClass *vpciklass = VIRTIO_PCI_GET_CLASS(qdev); 1852 VirtIOPCIProxy *proxy = VIRTIO_PCI(qdev); 1853 PCIDevice *pci_dev = &proxy->pci_dev; 1854 1855 if (!(proxy->flags & VIRTIO_PCI_FLAG_DISABLE_PCIE) && 1856 virtio_pci_modern(proxy)) { 1857 pci_dev->cap_present |= QEMU_PCI_CAP_EXPRESS; 1858 } 1859 1860 vpciklass->parent_dc_realize(qdev, errp); 1861 } 1862 1863 static void virtio_pci_class_init(ObjectClass *klass, void *data) 1864 { 1865 DeviceClass *dc = DEVICE_CLASS(klass); 1866 PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); 1867 VirtioPCIClass *vpciklass = VIRTIO_PCI_CLASS(klass); 1868 1869 dc->props = virtio_pci_properties; 1870 k->realize = virtio_pci_realize; 1871 k->exit = virtio_pci_exit; 1872 k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET; 1873 k->revision = VIRTIO_PCI_ABI_VERSION; 1874 k->class_id = PCI_CLASS_OTHERS; 1875 device_class_set_parent_realize(dc, virtio_pci_dc_realize, 1876 &vpciklass->parent_dc_realize); 1877 dc->reset = virtio_pci_reset; 1878 } 1879 1880 static const TypeInfo virtio_pci_info = { 1881 .name = TYPE_VIRTIO_PCI, 1882 .parent = TYPE_PCI_DEVICE, 1883 .instance_size = sizeof(VirtIOPCIProxy), 1884 .class_init = virtio_pci_class_init, 1885 .class_size = sizeof(VirtioPCIClass), 1886 .abstract = true, 1887 }; 1888 1889 static Property virtio_pci_generic_properties[] = { 1890 DEFINE_PROP_ON_OFF_AUTO("disable-legacy", VirtIOPCIProxy, disable_legacy, 1891 ON_OFF_AUTO_AUTO), 1892 DEFINE_PROP_BOOL("disable-modern", VirtIOPCIProxy, disable_modern, false), 1893 DEFINE_PROP_END_OF_LIST(), 1894 }; 1895 1896 static void virtio_pci_base_class_init(ObjectClass *klass, void *data) 1897 { 1898 const VirtioPCIDeviceTypeInfo *t = data; 1899 if (t->class_init) { 1900 t->class_init(klass, NULL); 1901 } 1902 } 1903 1904 static void virtio_pci_generic_class_init(ObjectClass *klass, void *data) 1905 { 1906 DeviceClass *dc = DEVICE_CLASS(klass); 1907 1908 dc->props = virtio_pci_generic_properties; 1909 } 1910 1911 static void virtio_pci_transitional_instance_init(Object *obj) 1912 { 1913 VirtIOPCIProxy *proxy = VIRTIO_PCI(obj); 1914 1915 proxy->disable_legacy = ON_OFF_AUTO_OFF; 1916 proxy->disable_modern = false; 1917 } 1918 1919 static void virtio_pci_non_transitional_instance_init(Object *obj) 1920 { 1921 VirtIOPCIProxy *proxy = VIRTIO_PCI(obj); 1922 1923 proxy->disable_legacy = ON_OFF_AUTO_ON; 1924 proxy->disable_modern = false; 1925 } 1926 1927 void virtio_pci_types_register(const VirtioPCIDeviceTypeInfo *t) 1928 { 1929 char *base_name = NULL; 1930 TypeInfo base_type_info = { 1931 .name = t->base_name, 1932 .parent = t->parent ? t->parent : TYPE_VIRTIO_PCI, 1933 .instance_size = t->instance_size, 1934 .instance_init = t->instance_init, 1935 .class_size = t->class_size, 1936 .abstract = true, 1937 .interfaces = t->interfaces, 1938 }; 1939 TypeInfo generic_type_info = { 1940 .name = t->generic_name, 1941 .parent = base_type_info.name, 1942 .class_init = virtio_pci_generic_class_init, 1943 .interfaces = (InterfaceInfo[]) { 1944 { INTERFACE_PCIE_DEVICE }, 1945 { INTERFACE_CONVENTIONAL_PCI_DEVICE }, 1946 { } 1947 }, 1948 }; 1949 1950 if (!base_type_info.name) { 1951 /* No base type -> register a single generic device type */ 1952 /* use intermediate %s-base-type to add generic device props */ 1953 base_name = g_strdup_printf("%s-base-type", t->generic_name); 1954 base_type_info.name = base_name; 1955 base_type_info.class_init = virtio_pci_generic_class_init; 1956 1957 generic_type_info.parent = base_name; 1958 generic_type_info.class_init = virtio_pci_base_class_init; 1959 generic_type_info.class_data = (void *)t; 1960 1961 assert(!t->non_transitional_name); 1962 assert(!t->transitional_name); 1963 } else { 1964 base_type_info.class_init = virtio_pci_base_class_init; 1965 base_type_info.class_data = (void *)t; 1966 } 1967 1968 type_register(&base_type_info); 1969 if (generic_type_info.name) { 1970 type_register(&generic_type_info); 1971 } 1972 1973 if (t->non_transitional_name) { 1974 const TypeInfo non_transitional_type_info = { 1975 .name = t->non_transitional_name, 1976 .parent = base_type_info.name, 1977 .instance_init = virtio_pci_non_transitional_instance_init, 1978 .interfaces = (InterfaceInfo[]) { 1979 { INTERFACE_PCIE_DEVICE }, 1980 { INTERFACE_CONVENTIONAL_PCI_DEVICE }, 1981 { } 1982 }, 1983 }; 1984 type_register(&non_transitional_type_info); 1985 } 1986 1987 if (t->transitional_name) { 1988 const TypeInfo transitional_type_info = { 1989 .name = t->transitional_name, 1990 .parent = base_type_info.name, 1991 .instance_init = virtio_pci_transitional_instance_init, 1992 .interfaces = (InterfaceInfo[]) { 1993 /* 1994 * Transitional virtio devices work only as Conventional PCI 1995 * devices because they require PIO ports. 1996 */ 1997 { INTERFACE_CONVENTIONAL_PCI_DEVICE }, 1998 { } 1999 }, 2000 }; 2001 type_register(&transitional_type_info); 2002 } 2003 g_free(base_name); 2004 } 2005 2006 /* virtio-pci-bus */ 2007 2008 static void virtio_pci_bus_new(VirtioBusState *bus, size_t bus_size, 2009 VirtIOPCIProxy *dev) 2010 { 2011 DeviceState *qdev = DEVICE(dev); 2012 char virtio_bus_name[] = "virtio-bus"; 2013 2014 qbus_create_inplace(bus, bus_size, TYPE_VIRTIO_PCI_BUS, qdev, 2015 virtio_bus_name); 2016 } 2017 2018 static void virtio_pci_bus_class_init(ObjectClass *klass, void *data) 2019 { 2020 BusClass *bus_class = BUS_CLASS(klass); 2021 VirtioBusClass *k = VIRTIO_BUS_CLASS(klass); 2022 bus_class->max_dev = 1; 2023 k->notify = virtio_pci_notify; 2024 k->save_config = virtio_pci_save_config; 2025 k->load_config = virtio_pci_load_config; 2026 k->save_queue = virtio_pci_save_queue; 2027 k->load_queue = virtio_pci_load_queue; 2028 k->save_extra_state = virtio_pci_save_extra_state; 2029 k->load_extra_state = virtio_pci_load_extra_state; 2030 k->has_extra_state = virtio_pci_has_extra_state; 2031 k->query_guest_notifiers = virtio_pci_query_guest_notifiers; 2032 k->set_guest_notifiers = virtio_pci_set_guest_notifiers; 2033 k->set_host_notifier_mr = virtio_pci_set_host_notifier_mr; 2034 k->vmstate_change = virtio_pci_vmstate_change; 2035 k->pre_plugged = virtio_pci_pre_plugged; 2036 k->device_plugged = virtio_pci_device_plugged; 2037 k->device_unplugged = virtio_pci_device_unplugged; 2038 k->query_nvectors = virtio_pci_query_nvectors; 2039 k->ioeventfd_enabled = virtio_pci_ioeventfd_enabled; 2040 k->ioeventfd_assign = virtio_pci_ioeventfd_assign; 2041 k->get_dma_as = virtio_pci_get_dma_as; 2042 } 2043 2044 static const TypeInfo virtio_pci_bus_info = { 2045 .name = TYPE_VIRTIO_PCI_BUS, 2046 .parent = TYPE_VIRTIO_BUS, 2047 .instance_size = sizeof(VirtioPCIBusState), 2048 .class_init = virtio_pci_bus_class_init, 2049 }; 2050 2051 static void virtio_pci_register_types(void) 2052 { 2053 /* Base types: */ 2054 type_register_static(&virtio_pci_bus_info); 2055 type_register_static(&virtio_pci_info); 2056 } 2057 2058 type_init(virtio_pci_register_types) 2059 2060