1 /* 2 * Virtio PCI Bindings 3 * 4 * Copyright IBM, Corp. 2007 5 * Copyright (c) 2009 CodeSourcery 6 * 7 * Authors: 8 * Anthony Liguori <aliguori@us.ibm.com> 9 * Paul Brook <paul@codesourcery.com> 10 * 11 * This work is licensed under the terms of the GNU GPL, version 2. See 12 * the COPYING file in the top-level directory. 13 * 14 * Contributions after 2012-01-13 are licensed under the terms of the 15 * GNU GPL, version 2 or (at your option) any later version. 16 */ 17 18 #include "qemu/osdep.h" 19 20 #include "exec/memop.h" 21 #include "standard-headers/linux/virtio_pci.h" 22 #include "hw/boards.h" 23 #include "hw/virtio/virtio.h" 24 #include "migration/qemu-file-types.h" 25 #include "hw/pci/pci.h" 26 #include "hw/pci/pci_bus.h" 27 #include "hw/qdev-properties.h" 28 #include "qapi/error.h" 29 #include "qemu/error-report.h" 30 #include "qemu/log.h" 31 #include "qemu/module.h" 32 #include "hw/pci/msi.h" 33 #include "hw/pci/msix.h" 34 #include "hw/loader.h" 35 #include "sysemu/kvm.h" 36 #include "virtio-pci.h" 37 #include "qemu/range.h" 38 #include "hw/virtio/virtio-bus.h" 39 #include "qapi/visitor.h" 40 #include "sysemu/replay.h" 41 42 #define VIRTIO_PCI_REGION_SIZE(dev) VIRTIO_PCI_CONFIG_OFF(msix_present(dev)) 43 44 #undef VIRTIO_PCI_CONFIG 45 46 /* The remaining space is defined by each driver as the per-driver 47 * configuration space */ 48 #define VIRTIO_PCI_CONFIG_SIZE(dev) VIRTIO_PCI_CONFIG_OFF(msix_enabled(dev)) 49 50 static void virtio_pci_bus_new(VirtioBusState *bus, size_t bus_size, 51 VirtIOPCIProxy *dev); 52 static void virtio_pci_reset(DeviceState *qdev); 53 54 /* virtio device */ 55 /* DeviceState to VirtIOPCIProxy. For use off data-path. TODO: use QOM. */ 56 static inline VirtIOPCIProxy *to_virtio_pci_proxy(DeviceState *d) 57 { 58 return container_of(d, VirtIOPCIProxy, pci_dev.qdev); 59 } 60 61 /* DeviceState to VirtIOPCIProxy. Note: used on datapath, 62 * be careful and test performance if you change this. 63 */ 64 static inline VirtIOPCIProxy *to_virtio_pci_proxy_fast(DeviceState *d) 65 { 66 return container_of(d, VirtIOPCIProxy, pci_dev.qdev); 67 } 68 69 static void virtio_pci_notify(DeviceState *d, uint16_t vector) 70 { 71 VirtIOPCIProxy *proxy = to_virtio_pci_proxy_fast(d); 72 73 if (msix_enabled(&proxy->pci_dev)) 74 msix_notify(&proxy->pci_dev, vector); 75 else { 76 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 77 pci_set_irq(&proxy->pci_dev, qatomic_read(&vdev->isr) & 1); 78 } 79 } 80 81 static void virtio_pci_save_config(DeviceState *d, QEMUFile *f) 82 { 83 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d); 84 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 85 86 pci_device_save(&proxy->pci_dev, f); 87 msix_save(&proxy->pci_dev, f); 88 if (msix_present(&proxy->pci_dev)) 89 qemu_put_be16(f, vdev->config_vector); 90 } 91 92 static const VMStateDescription vmstate_virtio_pci_modern_queue_state = { 93 .name = "virtio_pci/modern_queue_state", 94 .version_id = 1, 95 .minimum_version_id = 1, 96 .fields = (VMStateField[]) { 97 VMSTATE_UINT16(num, VirtIOPCIQueue), 98 VMSTATE_UNUSED(1), /* enabled was stored as be16 */ 99 VMSTATE_BOOL(enabled, VirtIOPCIQueue), 100 VMSTATE_UINT32_ARRAY(desc, VirtIOPCIQueue, 2), 101 VMSTATE_UINT32_ARRAY(avail, VirtIOPCIQueue, 2), 102 VMSTATE_UINT32_ARRAY(used, VirtIOPCIQueue, 2), 103 VMSTATE_END_OF_LIST() 104 } 105 }; 106 107 static bool virtio_pci_modern_state_needed(void *opaque) 108 { 109 VirtIOPCIProxy *proxy = opaque; 110 111 return virtio_pci_modern(proxy); 112 } 113 114 static const VMStateDescription vmstate_virtio_pci_modern_state_sub = { 115 .name = "virtio_pci/modern_state", 116 .version_id = 1, 117 .minimum_version_id = 1, 118 .needed = &virtio_pci_modern_state_needed, 119 .fields = (VMStateField[]) { 120 VMSTATE_UINT32(dfselect, VirtIOPCIProxy), 121 VMSTATE_UINT32(gfselect, VirtIOPCIProxy), 122 VMSTATE_UINT32_ARRAY(guest_features, VirtIOPCIProxy, 2), 123 VMSTATE_STRUCT_ARRAY(vqs, VirtIOPCIProxy, VIRTIO_QUEUE_MAX, 0, 124 vmstate_virtio_pci_modern_queue_state, 125 VirtIOPCIQueue), 126 VMSTATE_END_OF_LIST() 127 } 128 }; 129 130 static const VMStateDescription vmstate_virtio_pci = { 131 .name = "virtio_pci", 132 .version_id = 1, 133 .minimum_version_id = 1, 134 .minimum_version_id_old = 1, 135 .fields = (VMStateField[]) { 136 VMSTATE_END_OF_LIST() 137 }, 138 .subsections = (const VMStateDescription*[]) { 139 &vmstate_virtio_pci_modern_state_sub, 140 NULL 141 } 142 }; 143 144 static bool virtio_pci_has_extra_state(DeviceState *d) 145 { 146 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d); 147 148 return proxy->flags & VIRTIO_PCI_FLAG_MIGRATE_EXTRA; 149 } 150 151 static void virtio_pci_save_extra_state(DeviceState *d, QEMUFile *f) 152 { 153 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d); 154 155 vmstate_save_state(f, &vmstate_virtio_pci, proxy, NULL); 156 } 157 158 static int virtio_pci_load_extra_state(DeviceState *d, QEMUFile *f) 159 { 160 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d); 161 162 return vmstate_load_state(f, &vmstate_virtio_pci, proxy, 1); 163 } 164 165 static void virtio_pci_save_queue(DeviceState *d, int n, QEMUFile *f) 166 { 167 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d); 168 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 169 170 if (msix_present(&proxy->pci_dev)) 171 qemu_put_be16(f, virtio_queue_vector(vdev, n)); 172 } 173 174 static int virtio_pci_load_config(DeviceState *d, QEMUFile *f) 175 { 176 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d); 177 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 178 179 int ret; 180 ret = pci_device_load(&proxy->pci_dev, f); 181 if (ret) { 182 return ret; 183 } 184 msix_unuse_all_vectors(&proxy->pci_dev); 185 msix_load(&proxy->pci_dev, f); 186 if (msix_present(&proxy->pci_dev)) { 187 qemu_get_be16s(f, &vdev->config_vector); 188 } else { 189 vdev->config_vector = VIRTIO_NO_VECTOR; 190 } 191 if (vdev->config_vector != VIRTIO_NO_VECTOR) { 192 return msix_vector_use(&proxy->pci_dev, vdev->config_vector); 193 } 194 return 0; 195 } 196 197 static int virtio_pci_load_queue(DeviceState *d, int n, QEMUFile *f) 198 { 199 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d); 200 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 201 202 uint16_t vector; 203 if (msix_present(&proxy->pci_dev)) { 204 qemu_get_be16s(f, &vector); 205 } else { 206 vector = VIRTIO_NO_VECTOR; 207 } 208 virtio_queue_set_vector(vdev, n, vector); 209 if (vector != VIRTIO_NO_VECTOR) { 210 return msix_vector_use(&proxy->pci_dev, vector); 211 } 212 213 return 0; 214 } 215 216 static bool virtio_pci_ioeventfd_enabled(DeviceState *d) 217 { 218 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d); 219 220 return (proxy->flags & VIRTIO_PCI_FLAG_USE_IOEVENTFD) != 0; 221 } 222 223 #define QEMU_VIRTIO_PCI_QUEUE_MEM_MULT 0x1000 224 225 static inline int virtio_pci_queue_mem_mult(struct VirtIOPCIProxy *proxy) 226 { 227 return (proxy->flags & VIRTIO_PCI_FLAG_PAGE_PER_VQ) ? 228 QEMU_VIRTIO_PCI_QUEUE_MEM_MULT : 4; 229 } 230 231 static int virtio_pci_ioeventfd_assign(DeviceState *d, EventNotifier *notifier, 232 int n, bool assign) 233 { 234 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d); 235 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 236 VirtQueue *vq = virtio_get_queue(vdev, n); 237 bool legacy = virtio_pci_legacy(proxy); 238 bool modern = virtio_pci_modern(proxy); 239 bool fast_mmio = kvm_ioeventfd_any_length_enabled(); 240 bool modern_pio = proxy->flags & VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY; 241 MemoryRegion *modern_mr = &proxy->notify.mr; 242 MemoryRegion *modern_notify_mr = &proxy->notify_pio.mr; 243 MemoryRegion *legacy_mr = &proxy->bar; 244 hwaddr modern_addr = virtio_pci_queue_mem_mult(proxy) * 245 virtio_get_queue_index(vq); 246 hwaddr legacy_addr = VIRTIO_PCI_QUEUE_NOTIFY; 247 248 if (assign) { 249 if (modern) { 250 if (fast_mmio) { 251 memory_region_add_eventfd(modern_mr, modern_addr, 0, 252 false, n, notifier); 253 } else { 254 memory_region_add_eventfd(modern_mr, modern_addr, 2, 255 false, n, notifier); 256 } 257 if (modern_pio) { 258 memory_region_add_eventfd(modern_notify_mr, 0, 2, 259 true, n, notifier); 260 } 261 } 262 if (legacy) { 263 memory_region_add_eventfd(legacy_mr, legacy_addr, 2, 264 true, n, notifier); 265 } 266 } else { 267 if (modern) { 268 if (fast_mmio) { 269 memory_region_del_eventfd(modern_mr, modern_addr, 0, 270 false, n, notifier); 271 } else { 272 memory_region_del_eventfd(modern_mr, modern_addr, 2, 273 false, n, notifier); 274 } 275 if (modern_pio) { 276 memory_region_del_eventfd(modern_notify_mr, 0, 2, 277 true, n, notifier); 278 } 279 } 280 if (legacy) { 281 memory_region_del_eventfd(legacy_mr, legacy_addr, 2, 282 true, n, notifier); 283 } 284 } 285 return 0; 286 } 287 288 static void virtio_pci_start_ioeventfd(VirtIOPCIProxy *proxy) 289 { 290 virtio_bus_start_ioeventfd(&proxy->bus); 291 } 292 293 static void virtio_pci_stop_ioeventfd(VirtIOPCIProxy *proxy) 294 { 295 virtio_bus_stop_ioeventfd(&proxy->bus); 296 } 297 298 static void virtio_ioport_write(void *opaque, uint32_t addr, uint32_t val) 299 { 300 VirtIOPCIProxy *proxy = opaque; 301 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 302 hwaddr pa; 303 304 switch (addr) { 305 case VIRTIO_PCI_GUEST_FEATURES: 306 /* Guest does not negotiate properly? We have to assume nothing. */ 307 if (val & (1 << VIRTIO_F_BAD_FEATURE)) { 308 val = virtio_bus_get_vdev_bad_features(&proxy->bus); 309 } 310 virtio_set_features(vdev, val); 311 break; 312 case VIRTIO_PCI_QUEUE_PFN: 313 pa = (hwaddr)val << VIRTIO_PCI_QUEUE_ADDR_SHIFT; 314 if (pa == 0) { 315 virtio_pci_reset(DEVICE(proxy)); 316 } 317 else 318 virtio_queue_set_addr(vdev, vdev->queue_sel, pa); 319 break; 320 case VIRTIO_PCI_QUEUE_SEL: 321 if (val < VIRTIO_QUEUE_MAX) 322 vdev->queue_sel = val; 323 break; 324 case VIRTIO_PCI_QUEUE_NOTIFY: 325 if (val < VIRTIO_QUEUE_MAX) { 326 virtio_queue_notify(vdev, val); 327 } 328 break; 329 case VIRTIO_PCI_STATUS: 330 if (!(val & VIRTIO_CONFIG_S_DRIVER_OK)) { 331 virtio_pci_stop_ioeventfd(proxy); 332 } 333 334 virtio_set_status(vdev, val & 0xFF); 335 336 if (val & VIRTIO_CONFIG_S_DRIVER_OK) { 337 virtio_pci_start_ioeventfd(proxy); 338 } 339 340 if (vdev->status == 0) { 341 virtio_pci_reset(DEVICE(proxy)); 342 } 343 344 /* Linux before 2.6.34 drives the device without enabling 345 the PCI device bus master bit. Enable it automatically 346 for the guest. This is a PCI spec violation but so is 347 initiating DMA with bus master bit clear. */ 348 if (val == (VIRTIO_CONFIG_S_ACKNOWLEDGE | VIRTIO_CONFIG_S_DRIVER)) { 349 pci_default_write_config(&proxy->pci_dev, PCI_COMMAND, 350 proxy->pci_dev.config[PCI_COMMAND] | 351 PCI_COMMAND_MASTER, 1); 352 } 353 break; 354 case VIRTIO_MSI_CONFIG_VECTOR: 355 msix_vector_unuse(&proxy->pci_dev, vdev->config_vector); 356 /* Make it possible for guest to discover an error took place. */ 357 if (msix_vector_use(&proxy->pci_dev, val) < 0) 358 val = VIRTIO_NO_VECTOR; 359 vdev->config_vector = val; 360 break; 361 case VIRTIO_MSI_QUEUE_VECTOR: 362 msix_vector_unuse(&proxy->pci_dev, 363 virtio_queue_vector(vdev, vdev->queue_sel)); 364 /* Make it possible for guest to discover an error took place. */ 365 if (msix_vector_use(&proxy->pci_dev, val) < 0) 366 val = VIRTIO_NO_VECTOR; 367 virtio_queue_set_vector(vdev, vdev->queue_sel, val); 368 break; 369 default: 370 qemu_log_mask(LOG_GUEST_ERROR, 371 "%s: unexpected address 0x%x value 0x%x\n", 372 __func__, addr, val); 373 break; 374 } 375 } 376 377 static uint32_t virtio_ioport_read(VirtIOPCIProxy *proxy, uint32_t addr) 378 { 379 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 380 uint32_t ret = 0xFFFFFFFF; 381 382 switch (addr) { 383 case VIRTIO_PCI_HOST_FEATURES: 384 ret = vdev->host_features; 385 break; 386 case VIRTIO_PCI_GUEST_FEATURES: 387 ret = vdev->guest_features; 388 break; 389 case VIRTIO_PCI_QUEUE_PFN: 390 ret = virtio_queue_get_addr(vdev, vdev->queue_sel) 391 >> VIRTIO_PCI_QUEUE_ADDR_SHIFT; 392 break; 393 case VIRTIO_PCI_QUEUE_NUM: 394 ret = virtio_queue_get_num(vdev, vdev->queue_sel); 395 break; 396 case VIRTIO_PCI_QUEUE_SEL: 397 ret = vdev->queue_sel; 398 break; 399 case VIRTIO_PCI_STATUS: 400 ret = vdev->status; 401 break; 402 case VIRTIO_PCI_ISR: 403 /* reading from the ISR also clears it. */ 404 ret = qatomic_xchg(&vdev->isr, 0); 405 pci_irq_deassert(&proxy->pci_dev); 406 break; 407 case VIRTIO_MSI_CONFIG_VECTOR: 408 ret = vdev->config_vector; 409 break; 410 case VIRTIO_MSI_QUEUE_VECTOR: 411 ret = virtio_queue_vector(vdev, vdev->queue_sel); 412 break; 413 default: 414 break; 415 } 416 417 return ret; 418 } 419 420 static uint64_t virtio_pci_config_read(void *opaque, hwaddr addr, 421 unsigned size) 422 { 423 VirtIOPCIProxy *proxy = opaque; 424 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 425 uint32_t config = VIRTIO_PCI_CONFIG_SIZE(&proxy->pci_dev); 426 uint64_t val = 0; 427 428 if (vdev == NULL) { 429 return UINT64_MAX; 430 } 431 432 if (addr < config) { 433 return virtio_ioport_read(proxy, addr); 434 } 435 addr -= config; 436 437 switch (size) { 438 case 1: 439 val = virtio_config_readb(vdev, addr); 440 break; 441 case 2: 442 val = virtio_config_readw(vdev, addr); 443 if (virtio_is_big_endian(vdev)) { 444 val = bswap16(val); 445 } 446 break; 447 case 4: 448 val = virtio_config_readl(vdev, addr); 449 if (virtio_is_big_endian(vdev)) { 450 val = bswap32(val); 451 } 452 break; 453 } 454 return val; 455 } 456 457 static void virtio_pci_config_write(void *opaque, hwaddr addr, 458 uint64_t val, unsigned size) 459 { 460 VirtIOPCIProxy *proxy = opaque; 461 uint32_t config = VIRTIO_PCI_CONFIG_SIZE(&proxy->pci_dev); 462 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 463 464 if (vdev == NULL) { 465 return; 466 } 467 468 if (addr < config) { 469 virtio_ioport_write(proxy, addr, val); 470 return; 471 } 472 addr -= config; 473 /* 474 * Virtio-PCI is odd. Ioports are LE but config space is target native 475 * endian. 476 */ 477 switch (size) { 478 case 1: 479 virtio_config_writeb(vdev, addr, val); 480 break; 481 case 2: 482 if (virtio_is_big_endian(vdev)) { 483 val = bswap16(val); 484 } 485 virtio_config_writew(vdev, addr, val); 486 break; 487 case 4: 488 if (virtio_is_big_endian(vdev)) { 489 val = bswap32(val); 490 } 491 virtio_config_writel(vdev, addr, val); 492 break; 493 } 494 } 495 496 static const MemoryRegionOps virtio_pci_config_ops = { 497 .read = virtio_pci_config_read, 498 .write = virtio_pci_config_write, 499 .impl = { 500 .min_access_size = 1, 501 .max_access_size = 4, 502 }, 503 .endianness = DEVICE_LITTLE_ENDIAN, 504 }; 505 506 static MemoryRegion *virtio_address_space_lookup(VirtIOPCIProxy *proxy, 507 hwaddr *off, int len) 508 { 509 int i; 510 VirtIOPCIRegion *reg; 511 512 for (i = 0; i < ARRAY_SIZE(proxy->regs); ++i) { 513 reg = &proxy->regs[i]; 514 if (*off >= reg->offset && 515 *off + len <= reg->offset + reg->size) { 516 *off -= reg->offset; 517 return ®->mr; 518 } 519 } 520 521 return NULL; 522 } 523 524 /* Below are generic functions to do memcpy from/to an address space, 525 * without byteswaps, with input validation. 526 * 527 * As regular address_space_* APIs all do some kind of byteswap at least for 528 * some host/target combinations, we are forced to explicitly convert to a 529 * known-endianness integer value. 530 * It doesn't really matter which endian format to go through, so the code 531 * below selects the endian that causes the least amount of work on the given 532 * host. 533 * 534 * Note: host pointer must be aligned. 535 */ 536 static 537 void virtio_address_space_write(VirtIOPCIProxy *proxy, hwaddr addr, 538 const uint8_t *buf, int len) 539 { 540 uint64_t val; 541 MemoryRegion *mr; 542 543 /* address_space_* APIs assume an aligned address. 544 * As address is under guest control, handle illegal values. 545 */ 546 addr &= ~(len - 1); 547 548 mr = virtio_address_space_lookup(proxy, &addr, len); 549 if (!mr) { 550 return; 551 } 552 553 /* Make sure caller aligned buf properly */ 554 assert(!(((uintptr_t)buf) & (len - 1))); 555 556 switch (len) { 557 case 1: 558 val = pci_get_byte(buf); 559 break; 560 case 2: 561 val = pci_get_word(buf); 562 break; 563 case 4: 564 val = pci_get_long(buf); 565 break; 566 default: 567 /* As length is under guest control, handle illegal values. */ 568 return; 569 } 570 memory_region_dispatch_write(mr, addr, val, size_memop(len) | MO_LE, 571 MEMTXATTRS_UNSPECIFIED); 572 } 573 574 static void 575 virtio_address_space_read(VirtIOPCIProxy *proxy, hwaddr addr, 576 uint8_t *buf, int len) 577 { 578 uint64_t val; 579 MemoryRegion *mr; 580 581 /* address_space_* APIs assume an aligned address. 582 * As address is under guest control, handle illegal values. 583 */ 584 addr &= ~(len - 1); 585 586 mr = virtio_address_space_lookup(proxy, &addr, len); 587 if (!mr) { 588 return; 589 } 590 591 /* Make sure caller aligned buf properly */ 592 assert(!(((uintptr_t)buf) & (len - 1))); 593 594 memory_region_dispatch_read(mr, addr, &val, size_memop(len) | MO_LE, 595 MEMTXATTRS_UNSPECIFIED); 596 switch (len) { 597 case 1: 598 pci_set_byte(buf, val); 599 break; 600 case 2: 601 pci_set_word(buf, val); 602 break; 603 case 4: 604 pci_set_long(buf, val); 605 break; 606 default: 607 /* As length is under guest control, handle illegal values. */ 608 break; 609 } 610 } 611 612 static void virtio_write_config(PCIDevice *pci_dev, uint32_t address, 613 uint32_t val, int len) 614 { 615 VirtIOPCIProxy *proxy = VIRTIO_PCI(pci_dev); 616 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 617 struct virtio_pci_cfg_cap *cfg; 618 619 pci_default_write_config(pci_dev, address, val, len); 620 621 if (proxy->flags & VIRTIO_PCI_FLAG_INIT_FLR) { 622 pcie_cap_flr_write_config(pci_dev, address, val, len); 623 } 624 625 if (range_covers_byte(address, len, PCI_COMMAND)) { 626 if (!(pci_dev->config[PCI_COMMAND] & PCI_COMMAND_MASTER)) { 627 virtio_set_disabled(vdev, true); 628 virtio_pci_stop_ioeventfd(proxy); 629 virtio_set_status(vdev, vdev->status & ~VIRTIO_CONFIG_S_DRIVER_OK); 630 } else { 631 virtio_set_disabled(vdev, false); 632 } 633 } 634 635 if (proxy->config_cap && 636 ranges_overlap(address, len, proxy->config_cap + offsetof(struct virtio_pci_cfg_cap, 637 pci_cfg_data), 638 sizeof cfg->pci_cfg_data)) { 639 uint32_t off; 640 uint32_t len; 641 642 cfg = (void *)(proxy->pci_dev.config + proxy->config_cap); 643 off = le32_to_cpu(cfg->cap.offset); 644 len = le32_to_cpu(cfg->cap.length); 645 646 if (len == 1 || len == 2 || len == 4) { 647 assert(len <= sizeof cfg->pci_cfg_data); 648 virtio_address_space_write(proxy, off, cfg->pci_cfg_data, len); 649 } 650 } 651 } 652 653 static uint32_t virtio_read_config(PCIDevice *pci_dev, 654 uint32_t address, int len) 655 { 656 VirtIOPCIProxy *proxy = VIRTIO_PCI(pci_dev); 657 struct virtio_pci_cfg_cap *cfg; 658 659 if (proxy->config_cap && 660 ranges_overlap(address, len, proxy->config_cap + offsetof(struct virtio_pci_cfg_cap, 661 pci_cfg_data), 662 sizeof cfg->pci_cfg_data)) { 663 uint32_t off; 664 uint32_t len; 665 666 cfg = (void *)(proxy->pci_dev.config + proxy->config_cap); 667 off = le32_to_cpu(cfg->cap.offset); 668 len = le32_to_cpu(cfg->cap.length); 669 670 if (len == 1 || len == 2 || len == 4) { 671 assert(len <= sizeof cfg->pci_cfg_data); 672 virtio_address_space_read(proxy, off, cfg->pci_cfg_data, len); 673 } 674 } 675 676 return pci_default_read_config(pci_dev, address, len); 677 } 678 679 static int kvm_virtio_pci_vq_vector_use(VirtIOPCIProxy *proxy, 680 unsigned int queue_no, 681 unsigned int vector) 682 { 683 VirtIOIRQFD *irqfd = &proxy->vector_irqfd[vector]; 684 int ret; 685 686 if (irqfd->users == 0) { 687 ret = kvm_irqchip_add_msi_route(kvm_state, vector, &proxy->pci_dev); 688 if (ret < 0) { 689 return ret; 690 } 691 irqfd->virq = ret; 692 } 693 irqfd->users++; 694 return 0; 695 } 696 697 static void kvm_virtio_pci_vq_vector_release(VirtIOPCIProxy *proxy, 698 unsigned int vector) 699 { 700 VirtIOIRQFD *irqfd = &proxy->vector_irqfd[vector]; 701 if (--irqfd->users == 0) { 702 kvm_irqchip_release_virq(kvm_state, irqfd->virq); 703 } 704 } 705 706 static int kvm_virtio_pci_irqfd_use(VirtIOPCIProxy *proxy, 707 unsigned int queue_no, 708 unsigned int vector) 709 { 710 VirtIOIRQFD *irqfd = &proxy->vector_irqfd[vector]; 711 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 712 VirtQueue *vq = virtio_get_queue(vdev, queue_no); 713 EventNotifier *n = virtio_queue_get_guest_notifier(vq); 714 return kvm_irqchip_add_irqfd_notifier_gsi(kvm_state, n, NULL, irqfd->virq); 715 } 716 717 static void kvm_virtio_pci_irqfd_release(VirtIOPCIProxy *proxy, 718 unsigned int queue_no, 719 unsigned int vector) 720 { 721 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 722 VirtQueue *vq = virtio_get_queue(vdev, queue_no); 723 EventNotifier *n = virtio_queue_get_guest_notifier(vq); 724 VirtIOIRQFD *irqfd = &proxy->vector_irqfd[vector]; 725 int ret; 726 727 ret = kvm_irqchip_remove_irqfd_notifier_gsi(kvm_state, n, irqfd->virq); 728 assert(ret == 0); 729 } 730 731 static int kvm_virtio_pci_vector_use(VirtIOPCIProxy *proxy, int nvqs) 732 { 733 PCIDevice *dev = &proxy->pci_dev; 734 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 735 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); 736 unsigned int vector; 737 int ret, queue_no; 738 739 for (queue_no = 0; queue_no < nvqs; queue_no++) { 740 if (!virtio_queue_get_num(vdev, queue_no)) { 741 break; 742 } 743 vector = virtio_queue_vector(vdev, queue_no); 744 if (vector >= msix_nr_vectors_allocated(dev)) { 745 continue; 746 } 747 ret = kvm_virtio_pci_vq_vector_use(proxy, queue_no, vector); 748 if (ret < 0) { 749 goto undo; 750 } 751 /* If guest supports masking, set up irqfd now. 752 * Otherwise, delay until unmasked in the frontend. 753 */ 754 if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) { 755 ret = kvm_virtio_pci_irqfd_use(proxy, queue_no, vector); 756 if (ret < 0) { 757 kvm_virtio_pci_vq_vector_release(proxy, vector); 758 goto undo; 759 } 760 } 761 } 762 return 0; 763 764 undo: 765 while (--queue_no >= 0) { 766 vector = virtio_queue_vector(vdev, queue_no); 767 if (vector >= msix_nr_vectors_allocated(dev)) { 768 continue; 769 } 770 if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) { 771 kvm_virtio_pci_irqfd_release(proxy, queue_no, vector); 772 } 773 kvm_virtio_pci_vq_vector_release(proxy, vector); 774 } 775 return ret; 776 } 777 778 static void kvm_virtio_pci_vector_release(VirtIOPCIProxy *proxy, int nvqs) 779 { 780 PCIDevice *dev = &proxy->pci_dev; 781 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 782 unsigned int vector; 783 int queue_no; 784 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); 785 786 for (queue_no = 0; queue_no < nvqs; queue_no++) { 787 if (!virtio_queue_get_num(vdev, queue_no)) { 788 break; 789 } 790 vector = virtio_queue_vector(vdev, queue_no); 791 if (vector >= msix_nr_vectors_allocated(dev)) { 792 continue; 793 } 794 /* If guest supports masking, clean up irqfd now. 795 * Otherwise, it was cleaned when masked in the frontend. 796 */ 797 if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) { 798 kvm_virtio_pci_irqfd_release(proxy, queue_no, vector); 799 } 800 kvm_virtio_pci_vq_vector_release(proxy, vector); 801 } 802 } 803 804 static int virtio_pci_vq_vector_unmask(VirtIOPCIProxy *proxy, 805 unsigned int queue_no, 806 unsigned int vector, 807 MSIMessage msg) 808 { 809 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 810 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); 811 VirtQueue *vq = virtio_get_queue(vdev, queue_no); 812 EventNotifier *n = virtio_queue_get_guest_notifier(vq); 813 VirtIOIRQFD *irqfd; 814 int ret = 0; 815 816 if (proxy->vector_irqfd) { 817 irqfd = &proxy->vector_irqfd[vector]; 818 if (irqfd->msg.data != msg.data || irqfd->msg.address != msg.address) { 819 ret = kvm_irqchip_update_msi_route(kvm_state, irqfd->virq, msg, 820 &proxy->pci_dev); 821 if (ret < 0) { 822 return ret; 823 } 824 kvm_irqchip_commit_routes(kvm_state); 825 } 826 } 827 828 /* If guest supports masking, irqfd is already setup, unmask it. 829 * Otherwise, set it up now. 830 */ 831 if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) { 832 k->guest_notifier_mask(vdev, queue_no, false); 833 /* Test after unmasking to avoid losing events. */ 834 if (k->guest_notifier_pending && 835 k->guest_notifier_pending(vdev, queue_no)) { 836 event_notifier_set(n); 837 } 838 } else { 839 ret = kvm_virtio_pci_irqfd_use(proxy, queue_no, vector); 840 } 841 return ret; 842 } 843 844 static void virtio_pci_vq_vector_mask(VirtIOPCIProxy *proxy, 845 unsigned int queue_no, 846 unsigned int vector) 847 { 848 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 849 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); 850 851 /* If guest supports masking, keep irqfd but mask it. 852 * Otherwise, clean it up now. 853 */ 854 if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) { 855 k->guest_notifier_mask(vdev, queue_no, true); 856 } else { 857 kvm_virtio_pci_irqfd_release(proxy, queue_no, vector); 858 } 859 } 860 861 static int virtio_pci_vector_unmask(PCIDevice *dev, unsigned vector, 862 MSIMessage msg) 863 { 864 VirtIOPCIProxy *proxy = container_of(dev, VirtIOPCIProxy, pci_dev); 865 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 866 VirtQueue *vq = virtio_vector_first_queue(vdev, vector); 867 int ret, index, unmasked = 0; 868 869 while (vq) { 870 index = virtio_get_queue_index(vq); 871 if (!virtio_queue_get_num(vdev, index)) { 872 break; 873 } 874 if (index < proxy->nvqs_with_notifiers) { 875 ret = virtio_pci_vq_vector_unmask(proxy, index, vector, msg); 876 if (ret < 0) { 877 goto undo; 878 } 879 ++unmasked; 880 } 881 vq = virtio_vector_next_queue(vq); 882 } 883 884 return 0; 885 886 undo: 887 vq = virtio_vector_first_queue(vdev, vector); 888 while (vq && unmasked >= 0) { 889 index = virtio_get_queue_index(vq); 890 if (index < proxy->nvqs_with_notifiers) { 891 virtio_pci_vq_vector_mask(proxy, index, vector); 892 --unmasked; 893 } 894 vq = virtio_vector_next_queue(vq); 895 } 896 return ret; 897 } 898 899 static void virtio_pci_vector_mask(PCIDevice *dev, unsigned vector) 900 { 901 VirtIOPCIProxy *proxy = container_of(dev, VirtIOPCIProxy, pci_dev); 902 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 903 VirtQueue *vq = virtio_vector_first_queue(vdev, vector); 904 int index; 905 906 while (vq) { 907 index = virtio_get_queue_index(vq); 908 if (!virtio_queue_get_num(vdev, index)) { 909 break; 910 } 911 if (index < proxy->nvqs_with_notifiers) { 912 virtio_pci_vq_vector_mask(proxy, index, vector); 913 } 914 vq = virtio_vector_next_queue(vq); 915 } 916 } 917 918 static void virtio_pci_vector_poll(PCIDevice *dev, 919 unsigned int vector_start, 920 unsigned int vector_end) 921 { 922 VirtIOPCIProxy *proxy = container_of(dev, VirtIOPCIProxy, pci_dev); 923 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 924 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); 925 int queue_no; 926 unsigned int vector; 927 EventNotifier *notifier; 928 VirtQueue *vq; 929 930 for (queue_no = 0; queue_no < proxy->nvqs_with_notifiers; queue_no++) { 931 if (!virtio_queue_get_num(vdev, queue_no)) { 932 break; 933 } 934 vector = virtio_queue_vector(vdev, queue_no); 935 if (vector < vector_start || vector >= vector_end || 936 !msix_is_masked(dev, vector)) { 937 continue; 938 } 939 vq = virtio_get_queue(vdev, queue_no); 940 notifier = virtio_queue_get_guest_notifier(vq); 941 if (k->guest_notifier_pending) { 942 if (k->guest_notifier_pending(vdev, queue_no)) { 943 msix_set_pending(dev, vector); 944 } 945 } else if (event_notifier_test_and_clear(notifier)) { 946 msix_set_pending(dev, vector); 947 } 948 } 949 } 950 951 static int virtio_pci_set_guest_notifier(DeviceState *d, int n, bool assign, 952 bool with_irqfd) 953 { 954 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d); 955 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 956 VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev); 957 VirtQueue *vq = virtio_get_queue(vdev, n); 958 EventNotifier *notifier = virtio_queue_get_guest_notifier(vq); 959 960 if (assign) { 961 int r = event_notifier_init(notifier, 0); 962 if (r < 0) { 963 return r; 964 } 965 virtio_queue_set_guest_notifier_fd_handler(vq, true, with_irqfd); 966 } else { 967 virtio_queue_set_guest_notifier_fd_handler(vq, false, with_irqfd); 968 event_notifier_cleanup(notifier); 969 } 970 971 if (!msix_enabled(&proxy->pci_dev) && 972 vdev->use_guest_notifier_mask && 973 vdc->guest_notifier_mask) { 974 vdc->guest_notifier_mask(vdev, n, !assign); 975 } 976 977 return 0; 978 } 979 980 static bool virtio_pci_query_guest_notifiers(DeviceState *d) 981 { 982 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d); 983 return msix_enabled(&proxy->pci_dev); 984 } 985 986 static int virtio_pci_set_guest_notifiers(DeviceState *d, int nvqs, bool assign) 987 { 988 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d); 989 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 990 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); 991 int r, n; 992 bool with_irqfd = msix_enabled(&proxy->pci_dev) && 993 kvm_msi_via_irqfd_enabled(); 994 995 nvqs = MIN(nvqs, VIRTIO_QUEUE_MAX); 996 997 /* When deassigning, pass a consistent nvqs value 998 * to avoid leaking notifiers. 999 */ 1000 assert(assign || nvqs == proxy->nvqs_with_notifiers); 1001 1002 proxy->nvqs_with_notifiers = nvqs; 1003 1004 /* Must unset vector notifier while guest notifier is still assigned */ 1005 if ((proxy->vector_irqfd || k->guest_notifier_mask) && !assign) { 1006 msix_unset_vector_notifiers(&proxy->pci_dev); 1007 if (proxy->vector_irqfd) { 1008 kvm_virtio_pci_vector_release(proxy, nvqs); 1009 g_free(proxy->vector_irqfd); 1010 proxy->vector_irqfd = NULL; 1011 } 1012 } 1013 1014 for (n = 0; n < nvqs; n++) { 1015 if (!virtio_queue_get_num(vdev, n)) { 1016 break; 1017 } 1018 1019 r = virtio_pci_set_guest_notifier(d, n, assign, with_irqfd); 1020 if (r < 0) { 1021 goto assign_error; 1022 } 1023 } 1024 1025 /* Must set vector notifier after guest notifier has been assigned */ 1026 if ((with_irqfd || k->guest_notifier_mask) && assign) { 1027 if (with_irqfd) { 1028 proxy->vector_irqfd = 1029 g_malloc0(sizeof(*proxy->vector_irqfd) * 1030 msix_nr_vectors_allocated(&proxy->pci_dev)); 1031 r = kvm_virtio_pci_vector_use(proxy, nvqs); 1032 if (r < 0) { 1033 goto assign_error; 1034 } 1035 } 1036 r = msix_set_vector_notifiers(&proxy->pci_dev, 1037 virtio_pci_vector_unmask, 1038 virtio_pci_vector_mask, 1039 virtio_pci_vector_poll); 1040 if (r < 0) { 1041 goto notifiers_error; 1042 } 1043 } 1044 1045 return 0; 1046 1047 notifiers_error: 1048 if (with_irqfd) { 1049 assert(assign); 1050 kvm_virtio_pci_vector_release(proxy, nvqs); 1051 } 1052 1053 assign_error: 1054 /* We get here on assignment failure. Recover by undoing for VQs 0 .. n. */ 1055 assert(assign); 1056 while (--n >= 0) { 1057 virtio_pci_set_guest_notifier(d, n, !assign, with_irqfd); 1058 } 1059 return r; 1060 } 1061 1062 static int virtio_pci_set_host_notifier_mr(DeviceState *d, int n, 1063 MemoryRegion *mr, bool assign) 1064 { 1065 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d); 1066 int offset; 1067 1068 if (n >= VIRTIO_QUEUE_MAX || !virtio_pci_modern(proxy) || 1069 virtio_pci_queue_mem_mult(proxy) != memory_region_size(mr)) { 1070 return -1; 1071 } 1072 1073 if (assign) { 1074 offset = virtio_pci_queue_mem_mult(proxy) * n; 1075 memory_region_add_subregion_overlap(&proxy->notify.mr, offset, mr, 1); 1076 } else { 1077 memory_region_del_subregion(&proxy->notify.mr, mr); 1078 } 1079 1080 return 0; 1081 } 1082 1083 static void virtio_pci_vmstate_change(DeviceState *d, bool running) 1084 { 1085 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d); 1086 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 1087 1088 if (running) { 1089 /* Old QEMU versions did not set bus master enable on status write. 1090 * Detect DRIVER set and enable it. 1091 */ 1092 if ((proxy->flags & VIRTIO_PCI_FLAG_BUS_MASTER_BUG_MIGRATION) && 1093 (vdev->status & VIRTIO_CONFIG_S_DRIVER) && 1094 !(proxy->pci_dev.config[PCI_COMMAND] & PCI_COMMAND_MASTER)) { 1095 pci_default_write_config(&proxy->pci_dev, PCI_COMMAND, 1096 proxy->pci_dev.config[PCI_COMMAND] | 1097 PCI_COMMAND_MASTER, 1); 1098 } 1099 virtio_pci_start_ioeventfd(proxy); 1100 } else { 1101 virtio_pci_stop_ioeventfd(proxy); 1102 } 1103 } 1104 1105 /* 1106 * virtio-pci: This is the PCIDevice which has a virtio-pci-bus. 1107 */ 1108 1109 static int virtio_pci_query_nvectors(DeviceState *d) 1110 { 1111 VirtIOPCIProxy *proxy = VIRTIO_PCI(d); 1112 1113 return proxy->nvectors; 1114 } 1115 1116 static AddressSpace *virtio_pci_get_dma_as(DeviceState *d) 1117 { 1118 VirtIOPCIProxy *proxy = VIRTIO_PCI(d); 1119 PCIDevice *dev = &proxy->pci_dev; 1120 1121 return pci_get_address_space(dev); 1122 } 1123 1124 static bool virtio_pci_queue_enabled(DeviceState *d, int n) 1125 { 1126 VirtIOPCIProxy *proxy = VIRTIO_PCI(d); 1127 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 1128 1129 if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) { 1130 return proxy->vqs[n].enabled; 1131 } 1132 1133 return virtio_queue_enabled_legacy(vdev, n); 1134 } 1135 1136 static int virtio_pci_add_mem_cap(VirtIOPCIProxy *proxy, 1137 struct virtio_pci_cap *cap) 1138 { 1139 PCIDevice *dev = &proxy->pci_dev; 1140 int offset; 1141 1142 offset = pci_add_capability(dev, PCI_CAP_ID_VNDR, 0, 1143 cap->cap_len, &error_abort); 1144 1145 assert(cap->cap_len >= sizeof *cap); 1146 memcpy(dev->config + offset + PCI_CAP_FLAGS, &cap->cap_len, 1147 cap->cap_len - PCI_CAP_FLAGS); 1148 1149 return offset; 1150 } 1151 1152 static uint64_t virtio_pci_common_read(void *opaque, hwaddr addr, 1153 unsigned size) 1154 { 1155 VirtIOPCIProxy *proxy = opaque; 1156 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 1157 uint32_t val = 0; 1158 int i; 1159 1160 if (vdev == NULL) { 1161 return UINT64_MAX; 1162 } 1163 1164 switch (addr) { 1165 case VIRTIO_PCI_COMMON_DFSELECT: 1166 val = proxy->dfselect; 1167 break; 1168 case VIRTIO_PCI_COMMON_DF: 1169 if (proxy->dfselect <= 1) { 1170 VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev); 1171 1172 val = (vdev->host_features & ~vdc->legacy_features) >> 1173 (32 * proxy->dfselect); 1174 } 1175 break; 1176 case VIRTIO_PCI_COMMON_GFSELECT: 1177 val = proxy->gfselect; 1178 break; 1179 case VIRTIO_PCI_COMMON_GF: 1180 if (proxy->gfselect < ARRAY_SIZE(proxy->guest_features)) { 1181 val = proxy->guest_features[proxy->gfselect]; 1182 } 1183 break; 1184 case VIRTIO_PCI_COMMON_MSIX: 1185 val = vdev->config_vector; 1186 break; 1187 case VIRTIO_PCI_COMMON_NUMQ: 1188 for (i = 0; i < VIRTIO_QUEUE_MAX; ++i) { 1189 if (virtio_queue_get_num(vdev, i)) { 1190 val = i + 1; 1191 } 1192 } 1193 break; 1194 case VIRTIO_PCI_COMMON_STATUS: 1195 val = vdev->status; 1196 break; 1197 case VIRTIO_PCI_COMMON_CFGGENERATION: 1198 val = vdev->generation; 1199 break; 1200 case VIRTIO_PCI_COMMON_Q_SELECT: 1201 val = vdev->queue_sel; 1202 break; 1203 case VIRTIO_PCI_COMMON_Q_SIZE: 1204 val = virtio_queue_get_num(vdev, vdev->queue_sel); 1205 break; 1206 case VIRTIO_PCI_COMMON_Q_MSIX: 1207 val = virtio_queue_vector(vdev, vdev->queue_sel); 1208 break; 1209 case VIRTIO_PCI_COMMON_Q_ENABLE: 1210 val = proxy->vqs[vdev->queue_sel].enabled; 1211 break; 1212 case VIRTIO_PCI_COMMON_Q_NOFF: 1213 /* Simply map queues in order */ 1214 val = vdev->queue_sel; 1215 break; 1216 case VIRTIO_PCI_COMMON_Q_DESCLO: 1217 val = proxy->vqs[vdev->queue_sel].desc[0]; 1218 break; 1219 case VIRTIO_PCI_COMMON_Q_DESCHI: 1220 val = proxy->vqs[vdev->queue_sel].desc[1]; 1221 break; 1222 case VIRTIO_PCI_COMMON_Q_AVAILLO: 1223 val = proxy->vqs[vdev->queue_sel].avail[0]; 1224 break; 1225 case VIRTIO_PCI_COMMON_Q_AVAILHI: 1226 val = proxy->vqs[vdev->queue_sel].avail[1]; 1227 break; 1228 case VIRTIO_PCI_COMMON_Q_USEDLO: 1229 val = proxy->vqs[vdev->queue_sel].used[0]; 1230 break; 1231 case VIRTIO_PCI_COMMON_Q_USEDHI: 1232 val = proxy->vqs[vdev->queue_sel].used[1]; 1233 break; 1234 default: 1235 val = 0; 1236 } 1237 1238 return val; 1239 } 1240 1241 static void virtio_pci_common_write(void *opaque, hwaddr addr, 1242 uint64_t val, unsigned size) 1243 { 1244 VirtIOPCIProxy *proxy = opaque; 1245 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 1246 1247 if (vdev == NULL) { 1248 return; 1249 } 1250 1251 switch (addr) { 1252 case VIRTIO_PCI_COMMON_DFSELECT: 1253 proxy->dfselect = val; 1254 break; 1255 case VIRTIO_PCI_COMMON_GFSELECT: 1256 proxy->gfselect = val; 1257 break; 1258 case VIRTIO_PCI_COMMON_GF: 1259 if (proxy->gfselect < ARRAY_SIZE(proxy->guest_features)) { 1260 proxy->guest_features[proxy->gfselect] = val; 1261 virtio_set_features(vdev, 1262 (((uint64_t)proxy->guest_features[1]) << 32) | 1263 proxy->guest_features[0]); 1264 } 1265 break; 1266 case VIRTIO_PCI_COMMON_MSIX: 1267 msix_vector_unuse(&proxy->pci_dev, vdev->config_vector); 1268 /* Make it possible for guest to discover an error took place. */ 1269 if (msix_vector_use(&proxy->pci_dev, val) < 0) { 1270 val = VIRTIO_NO_VECTOR; 1271 } 1272 vdev->config_vector = val; 1273 break; 1274 case VIRTIO_PCI_COMMON_STATUS: 1275 if (!(val & VIRTIO_CONFIG_S_DRIVER_OK)) { 1276 virtio_pci_stop_ioeventfd(proxy); 1277 } 1278 1279 virtio_set_status(vdev, val & 0xFF); 1280 1281 if (val & VIRTIO_CONFIG_S_DRIVER_OK) { 1282 virtio_pci_start_ioeventfd(proxy); 1283 } 1284 1285 if (vdev->status == 0) { 1286 virtio_pci_reset(DEVICE(proxy)); 1287 } 1288 1289 break; 1290 case VIRTIO_PCI_COMMON_Q_SELECT: 1291 if (val < VIRTIO_QUEUE_MAX) { 1292 vdev->queue_sel = val; 1293 } 1294 break; 1295 case VIRTIO_PCI_COMMON_Q_SIZE: 1296 proxy->vqs[vdev->queue_sel].num = val; 1297 virtio_queue_set_num(vdev, vdev->queue_sel, 1298 proxy->vqs[vdev->queue_sel].num); 1299 break; 1300 case VIRTIO_PCI_COMMON_Q_MSIX: 1301 msix_vector_unuse(&proxy->pci_dev, 1302 virtio_queue_vector(vdev, vdev->queue_sel)); 1303 /* Make it possible for guest to discover an error took place. */ 1304 if (msix_vector_use(&proxy->pci_dev, val) < 0) { 1305 val = VIRTIO_NO_VECTOR; 1306 } 1307 virtio_queue_set_vector(vdev, vdev->queue_sel, val); 1308 break; 1309 case VIRTIO_PCI_COMMON_Q_ENABLE: 1310 if (val == 1) { 1311 virtio_queue_set_num(vdev, vdev->queue_sel, 1312 proxy->vqs[vdev->queue_sel].num); 1313 virtio_queue_set_rings(vdev, vdev->queue_sel, 1314 ((uint64_t)proxy->vqs[vdev->queue_sel].desc[1]) << 32 | 1315 proxy->vqs[vdev->queue_sel].desc[0], 1316 ((uint64_t)proxy->vqs[vdev->queue_sel].avail[1]) << 32 | 1317 proxy->vqs[vdev->queue_sel].avail[0], 1318 ((uint64_t)proxy->vqs[vdev->queue_sel].used[1]) << 32 | 1319 proxy->vqs[vdev->queue_sel].used[0]); 1320 proxy->vqs[vdev->queue_sel].enabled = 1; 1321 } else { 1322 virtio_error(vdev, "wrong value for queue_enable %"PRIx64, val); 1323 } 1324 break; 1325 case VIRTIO_PCI_COMMON_Q_DESCLO: 1326 proxy->vqs[vdev->queue_sel].desc[0] = val; 1327 break; 1328 case VIRTIO_PCI_COMMON_Q_DESCHI: 1329 proxy->vqs[vdev->queue_sel].desc[1] = val; 1330 break; 1331 case VIRTIO_PCI_COMMON_Q_AVAILLO: 1332 proxy->vqs[vdev->queue_sel].avail[0] = val; 1333 break; 1334 case VIRTIO_PCI_COMMON_Q_AVAILHI: 1335 proxy->vqs[vdev->queue_sel].avail[1] = val; 1336 break; 1337 case VIRTIO_PCI_COMMON_Q_USEDLO: 1338 proxy->vqs[vdev->queue_sel].used[0] = val; 1339 break; 1340 case VIRTIO_PCI_COMMON_Q_USEDHI: 1341 proxy->vqs[vdev->queue_sel].used[1] = val; 1342 break; 1343 default: 1344 break; 1345 } 1346 } 1347 1348 1349 static uint64_t virtio_pci_notify_read(void *opaque, hwaddr addr, 1350 unsigned size) 1351 { 1352 VirtIOPCIProxy *proxy = opaque; 1353 if (virtio_bus_get_device(&proxy->bus) == NULL) { 1354 return UINT64_MAX; 1355 } 1356 1357 return 0; 1358 } 1359 1360 static void virtio_pci_notify_write(void *opaque, hwaddr addr, 1361 uint64_t val, unsigned size) 1362 { 1363 VirtIOPCIProxy *proxy = opaque; 1364 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 1365 1366 unsigned queue = addr / virtio_pci_queue_mem_mult(proxy); 1367 1368 if (vdev != NULL && queue < VIRTIO_QUEUE_MAX) { 1369 virtio_queue_notify(vdev, queue); 1370 } 1371 } 1372 1373 static void virtio_pci_notify_write_pio(void *opaque, hwaddr addr, 1374 uint64_t val, unsigned size) 1375 { 1376 VirtIOPCIProxy *proxy = opaque; 1377 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 1378 1379 unsigned queue = val; 1380 1381 if (vdev != NULL && queue < VIRTIO_QUEUE_MAX) { 1382 virtio_queue_notify(vdev, queue); 1383 } 1384 } 1385 1386 static uint64_t virtio_pci_isr_read(void *opaque, hwaddr addr, 1387 unsigned size) 1388 { 1389 VirtIOPCIProxy *proxy = opaque; 1390 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 1391 uint64_t val; 1392 1393 if (vdev == NULL) { 1394 return UINT64_MAX; 1395 } 1396 1397 val = qatomic_xchg(&vdev->isr, 0); 1398 pci_irq_deassert(&proxy->pci_dev); 1399 return val; 1400 } 1401 1402 static void virtio_pci_isr_write(void *opaque, hwaddr addr, 1403 uint64_t val, unsigned size) 1404 { 1405 } 1406 1407 static uint64_t virtio_pci_device_read(void *opaque, hwaddr addr, 1408 unsigned size) 1409 { 1410 VirtIOPCIProxy *proxy = opaque; 1411 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 1412 uint64_t val; 1413 1414 if (vdev == NULL) { 1415 return UINT64_MAX; 1416 } 1417 1418 switch (size) { 1419 case 1: 1420 val = virtio_config_modern_readb(vdev, addr); 1421 break; 1422 case 2: 1423 val = virtio_config_modern_readw(vdev, addr); 1424 break; 1425 case 4: 1426 val = virtio_config_modern_readl(vdev, addr); 1427 break; 1428 default: 1429 val = 0; 1430 break; 1431 } 1432 return val; 1433 } 1434 1435 static void virtio_pci_device_write(void *opaque, hwaddr addr, 1436 uint64_t val, unsigned size) 1437 { 1438 VirtIOPCIProxy *proxy = opaque; 1439 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 1440 1441 if (vdev == NULL) { 1442 return; 1443 } 1444 1445 switch (size) { 1446 case 1: 1447 virtio_config_modern_writeb(vdev, addr, val); 1448 break; 1449 case 2: 1450 virtio_config_modern_writew(vdev, addr, val); 1451 break; 1452 case 4: 1453 virtio_config_modern_writel(vdev, addr, val); 1454 break; 1455 } 1456 } 1457 1458 static void virtio_pci_modern_regions_init(VirtIOPCIProxy *proxy, 1459 const char *vdev_name) 1460 { 1461 static const MemoryRegionOps common_ops = { 1462 .read = virtio_pci_common_read, 1463 .write = virtio_pci_common_write, 1464 .impl = { 1465 .min_access_size = 1, 1466 .max_access_size = 4, 1467 }, 1468 .endianness = DEVICE_LITTLE_ENDIAN, 1469 }; 1470 static const MemoryRegionOps isr_ops = { 1471 .read = virtio_pci_isr_read, 1472 .write = virtio_pci_isr_write, 1473 .impl = { 1474 .min_access_size = 1, 1475 .max_access_size = 4, 1476 }, 1477 .endianness = DEVICE_LITTLE_ENDIAN, 1478 }; 1479 static const MemoryRegionOps device_ops = { 1480 .read = virtio_pci_device_read, 1481 .write = virtio_pci_device_write, 1482 .impl = { 1483 .min_access_size = 1, 1484 .max_access_size = 4, 1485 }, 1486 .endianness = DEVICE_LITTLE_ENDIAN, 1487 }; 1488 static const MemoryRegionOps notify_ops = { 1489 .read = virtio_pci_notify_read, 1490 .write = virtio_pci_notify_write, 1491 .impl = { 1492 .min_access_size = 1, 1493 .max_access_size = 4, 1494 }, 1495 .endianness = DEVICE_LITTLE_ENDIAN, 1496 }; 1497 static const MemoryRegionOps notify_pio_ops = { 1498 .read = virtio_pci_notify_read, 1499 .write = virtio_pci_notify_write_pio, 1500 .impl = { 1501 .min_access_size = 1, 1502 .max_access_size = 4, 1503 }, 1504 .endianness = DEVICE_LITTLE_ENDIAN, 1505 }; 1506 g_autoptr(GString) name = g_string_new(NULL); 1507 1508 g_string_printf(name, "virtio-pci-common-%s", vdev_name); 1509 memory_region_init_io(&proxy->common.mr, OBJECT(proxy), 1510 &common_ops, 1511 proxy, 1512 name->str, 1513 proxy->common.size); 1514 1515 g_string_printf(name, "virtio-pci-isr-%s", vdev_name); 1516 memory_region_init_io(&proxy->isr.mr, OBJECT(proxy), 1517 &isr_ops, 1518 proxy, 1519 name->str, 1520 proxy->isr.size); 1521 1522 g_string_printf(name, "virtio-pci-device-%s", vdev_name); 1523 memory_region_init_io(&proxy->device.mr, OBJECT(proxy), 1524 &device_ops, 1525 proxy, 1526 name->str, 1527 proxy->device.size); 1528 1529 g_string_printf(name, "virtio-pci-notify-%s", vdev_name); 1530 memory_region_init_io(&proxy->notify.mr, OBJECT(proxy), 1531 ¬ify_ops, 1532 proxy, 1533 name->str, 1534 proxy->notify.size); 1535 1536 g_string_printf(name, "virtio-pci-notify-pio-%s", vdev_name); 1537 memory_region_init_io(&proxy->notify_pio.mr, OBJECT(proxy), 1538 ¬ify_pio_ops, 1539 proxy, 1540 name->str, 1541 proxy->notify_pio.size); 1542 } 1543 1544 static void virtio_pci_modern_region_map(VirtIOPCIProxy *proxy, 1545 VirtIOPCIRegion *region, 1546 struct virtio_pci_cap *cap, 1547 MemoryRegion *mr, 1548 uint8_t bar) 1549 { 1550 memory_region_add_subregion(mr, region->offset, ®ion->mr); 1551 1552 cap->cfg_type = region->type; 1553 cap->bar = bar; 1554 cap->offset = cpu_to_le32(region->offset); 1555 cap->length = cpu_to_le32(region->size); 1556 virtio_pci_add_mem_cap(proxy, cap); 1557 1558 } 1559 1560 static void virtio_pci_modern_mem_region_map(VirtIOPCIProxy *proxy, 1561 VirtIOPCIRegion *region, 1562 struct virtio_pci_cap *cap) 1563 { 1564 virtio_pci_modern_region_map(proxy, region, cap, 1565 &proxy->modern_bar, proxy->modern_mem_bar_idx); 1566 } 1567 1568 static void virtio_pci_modern_io_region_map(VirtIOPCIProxy *proxy, 1569 VirtIOPCIRegion *region, 1570 struct virtio_pci_cap *cap) 1571 { 1572 virtio_pci_modern_region_map(proxy, region, cap, 1573 &proxy->io_bar, proxy->modern_io_bar_idx); 1574 } 1575 1576 static void virtio_pci_modern_mem_region_unmap(VirtIOPCIProxy *proxy, 1577 VirtIOPCIRegion *region) 1578 { 1579 memory_region_del_subregion(&proxy->modern_bar, 1580 ®ion->mr); 1581 } 1582 1583 static void virtio_pci_modern_io_region_unmap(VirtIOPCIProxy *proxy, 1584 VirtIOPCIRegion *region) 1585 { 1586 memory_region_del_subregion(&proxy->io_bar, 1587 ®ion->mr); 1588 } 1589 1590 static void virtio_pci_pre_plugged(DeviceState *d, Error **errp) 1591 { 1592 VirtIOPCIProxy *proxy = VIRTIO_PCI(d); 1593 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 1594 1595 if (virtio_pci_modern(proxy)) { 1596 virtio_add_feature(&vdev->host_features, VIRTIO_F_VERSION_1); 1597 } 1598 1599 virtio_add_feature(&vdev->host_features, VIRTIO_F_BAD_FEATURE); 1600 } 1601 1602 /* This is called by virtio-bus just after the device is plugged. */ 1603 static void virtio_pci_device_plugged(DeviceState *d, Error **errp) 1604 { 1605 VirtIOPCIProxy *proxy = VIRTIO_PCI(d); 1606 VirtioBusState *bus = &proxy->bus; 1607 bool legacy = virtio_pci_legacy(proxy); 1608 bool modern; 1609 bool modern_pio = proxy->flags & VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY; 1610 uint8_t *config; 1611 uint32_t size; 1612 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 1613 1614 /* 1615 * Virtio capabilities present without 1616 * VIRTIO_F_VERSION_1 confuses guests 1617 */ 1618 if (!proxy->ignore_backend_features && 1619 !virtio_has_feature(vdev->host_features, VIRTIO_F_VERSION_1)) { 1620 virtio_pci_disable_modern(proxy); 1621 1622 if (!legacy) { 1623 error_setg(errp, "Device doesn't support modern mode, and legacy" 1624 " mode is disabled"); 1625 error_append_hint(errp, "Set disable-legacy to off\n"); 1626 1627 return; 1628 } 1629 } 1630 1631 modern = virtio_pci_modern(proxy); 1632 1633 config = proxy->pci_dev.config; 1634 if (proxy->class_code) { 1635 pci_config_set_class(config, proxy->class_code); 1636 } 1637 1638 if (legacy) { 1639 if (!virtio_legacy_allowed(vdev)) { 1640 /* 1641 * To avoid migration issues, we allow legacy mode when legacy 1642 * check is disabled in the old machine types (< 5.1). 1643 */ 1644 if (virtio_legacy_check_disabled(vdev)) { 1645 warn_report("device is modern-only, but for backward " 1646 "compatibility legacy is allowed"); 1647 } else { 1648 error_setg(errp, 1649 "device is modern-only, use disable-legacy=on"); 1650 return; 1651 } 1652 } 1653 if (virtio_host_has_feature(vdev, VIRTIO_F_IOMMU_PLATFORM)) { 1654 error_setg(errp, "VIRTIO_F_IOMMU_PLATFORM was supported by" 1655 " neither legacy nor transitional device"); 1656 return ; 1657 } 1658 /* 1659 * Legacy and transitional devices use specific subsystem IDs. 1660 * Note that the subsystem vendor ID (config + PCI_SUBSYSTEM_VENDOR_ID) 1661 * is set to PCI_SUBVENDOR_ID_REDHAT_QUMRANET by default. 1662 */ 1663 pci_set_word(config + PCI_SUBSYSTEM_ID, virtio_bus_get_vdev_id(bus)); 1664 } else { 1665 /* pure virtio-1.0 */ 1666 pci_set_word(config + PCI_VENDOR_ID, 1667 PCI_VENDOR_ID_REDHAT_QUMRANET); 1668 pci_set_word(config + PCI_DEVICE_ID, 1669 0x1040 + virtio_bus_get_vdev_id(bus)); 1670 pci_config_set_revision(config, 1); 1671 } 1672 config[PCI_INTERRUPT_PIN] = 1; 1673 1674 1675 if (modern) { 1676 struct virtio_pci_cap cap = { 1677 .cap_len = sizeof cap, 1678 }; 1679 struct virtio_pci_notify_cap notify = { 1680 .cap.cap_len = sizeof notify, 1681 .notify_off_multiplier = 1682 cpu_to_le32(virtio_pci_queue_mem_mult(proxy)), 1683 }; 1684 struct virtio_pci_cfg_cap cfg = { 1685 .cap.cap_len = sizeof cfg, 1686 .cap.cfg_type = VIRTIO_PCI_CAP_PCI_CFG, 1687 }; 1688 struct virtio_pci_notify_cap notify_pio = { 1689 .cap.cap_len = sizeof notify, 1690 .notify_off_multiplier = cpu_to_le32(0x0), 1691 }; 1692 1693 struct virtio_pci_cfg_cap *cfg_mask; 1694 1695 virtio_pci_modern_regions_init(proxy, vdev->name); 1696 1697 virtio_pci_modern_mem_region_map(proxy, &proxy->common, &cap); 1698 virtio_pci_modern_mem_region_map(proxy, &proxy->isr, &cap); 1699 virtio_pci_modern_mem_region_map(proxy, &proxy->device, &cap); 1700 virtio_pci_modern_mem_region_map(proxy, &proxy->notify, ¬ify.cap); 1701 1702 if (modern_pio) { 1703 memory_region_init(&proxy->io_bar, OBJECT(proxy), 1704 "virtio-pci-io", 0x4); 1705 1706 pci_register_bar(&proxy->pci_dev, proxy->modern_io_bar_idx, 1707 PCI_BASE_ADDRESS_SPACE_IO, &proxy->io_bar); 1708 1709 virtio_pci_modern_io_region_map(proxy, &proxy->notify_pio, 1710 ¬ify_pio.cap); 1711 } 1712 1713 pci_register_bar(&proxy->pci_dev, proxy->modern_mem_bar_idx, 1714 PCI_BASE_ADDRESS_SPACE_MEMORY | 1715 PCI_BASE_ADDRESS_MEM_PREFETCH | 1716 PCI_BASE_ADDRESS_MEM_TYPE_64, 1717 &proxy->modern_bar); 1718 1719 proxy->config_cap = virtio_pci_add_mem_cap(proxy, &cfg.cap); 1720 cfg_mask = (void *)(proxy->pci_dev.wmask + proxy->config_cap); 1721 pci_set_byte(&cfg_mask->cap.bar, ~0x0); 1722 pci_set_long((uint8_t *)&cfg_mask->cap.offset, ~0x0); 1723 pci_set_long((uint8_t *)&cfg_mask->cap.length, ~0x0); 1724 pci_set_long(cfg_mask->pci_cfg_data, ~0x0); 1725 } 1726 1727 if (proxy->nvectors) { 1728 int err = msix_init_exclusive_bar(&proxy->pci_dev, proxy->nvectors, 1729 proxy->msix_bar_idx, NULL); 1730 if (err) { 1731 /* Notice when a system that supports MSIx can't initialize it */ 1732 if (err != -ENOTSUP) { 1733 warn_report("unable to init msix vectors to %" PRIu32, 1734 proxy->nvectors); 1735 } 1736 proxy->nvectors = 0; 1737 } 1738 } 1739 1740 proxy->pci_dev.config_write = virtio_write_config; 1741 proxy->pci_dev.config_read = virtio_read_config; 1742 1743 if (legacy) { 1744 size = VIRTIO_PCI_REGION_SIZE(&proxy->pci_dev) 1745 + virtio_bus_get_vdev_config_len(bus); 1746 size = pow2ceil(size); 1747 1748 memory_region_init_io(&proxy->bar, OBJECT(proxy), 1749 &virtio_pci_config_ops, 1750 proxy, "virtio-pci", size); 1751 1752 pci_register_bar(&proxy->pci_dev, proxy->legacy_io_bar_idx, 1753 PCI_BASE_ADDRESS_SPACE_IO, &proxy->bar); 1754 } 1755 } 1756 1757 static void virtio_pci_device_unplugged(DeviceState *d) 1758 { 1759 VirtIOPCIProxy *proxy = VIRTIO_PCI(d); 1760 bool modern = virtio_pci_modern(proxy); 1761 bool modern_pio = proxy->flags & VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY; 1762 1763 virtio_pci_stop_ioeventfd(proxy); 1764 1765 if (modern) { 1766 virtio_pci_modern_mem_region_unmap(proxy, &proxy->common); 1767 virtio_pci_modern_mem_region_unmap(proxy, &proxy->isr); 1768 virtio_pci_modern_mem_region_unmap(proxy, &proxy->device); 1769 virtio_pci_modern_mem_region_unmap(proxy, &proxy->notify); 1770 if (modern_pio) { 1771 virtio_pci_modern_io_region_unmap(proxy, &proxy->notify_pio); 1772 } 1773 } 1774 } 1775 1776 static void virtio_pci_realize(PCIDevice *pci_dev, Error **errp) 1777 { 1778 VirtIOPCIProxy *proxy = VIRTIO_PCI(pci_dev); 1779 VirtioPCIClass *k = VIRTIO_PCI_GET_CLASS(pci_dev); 1780 bool pcie_port = pci_bus_is_express(pci_get_bus(pci_dev)) && 1781 !pci_bus_is_root(pci_get_bus(pci_dev)); 1782 1783 if (kvm_enabled() && !kvm_has_many_ioeventfds()) { 1784 proxy->flags &= ~VIRTIO_PCI_FLAG_USE_IOEVENTFD; 1785 } 1786 1787 /* fd-based ioevents can't be synchronized in record/replay */ 1788 if (replay_mode != REPLAY_MODE_NONE) { 1789 proxy->flags &= ~VIRTIO_PCI_FLAG_USE_IOEVENTFD; 1790 } 1791 1792 /* 1793 * virtio pci bar layout used by default. 1794 * subclasses can re-arrange things if needed. 1795 * 1796 * region 0 -- virtio legacy io bar 1797 * region 1 -- msi-x bar 1798 * region 2 -- virtio modern io bar (off by default) 1799 * region 4+5 -- virtio modern memory (64bit) bar 1800 * 1801 */ 1802 proxy->legacy_io_bar_idx = 0; 1803 proxy->msix_bar_idx = 1; 1804 proxy->modern_io_bar_idx = 2; 1805 proxy->modern_mem_bar_idx = 4; 1806 1807 proxy->common.offset = 0x0; 1808 proxy->common.size = 0x1000; 1809 proxy->common.type = VIRTIO_PCI_CAP_COMMON_CFG; 1810 1811 proxy->isr.offset = 0x1000; 1812 proxy->isr.size = 0x1000; 1813 proxy->isr.type = VIRTIO_PCI_CAP_ISR_CFG; 1814 1815 proxy->device.offset = 0x2000; 1816 proxy->device.size = 0x1000; 1817 proxy->device.type = VIRTIO_PCI_CAP_DEVICE_CFG; 1818 1819 proxy->notify.offset = 0x3000; 1820 proxy->notify.size = virtio_pci_queue_mem_mult(proxy) * VIRTIO_QUEUE_MAX; 1821 proxy->notify.type = VIRTIO_PCI_CAP_NOTIFY_CFG; 1822 1823 proxy->notify_pio.offset = 0x0; 1824 proxy->notify_pio.size = 0x4; 1825 proxy->notify_pio.type = VIRTIO_PCI_CAP_NOTIFY_CFG; 1826 1827 /* subclasses can enforce modern, so do this unconditionally */ 1828 memory_region_init(&proxy->modern_bar, OBJECT(proxy), "virtio-pci", 1829 /* PCI BAR regions must be powers of 2 */ 1830 pow2ceil(proxy->notify.offset + proxy->notify.size)); 1831 1832 if (proxy->disable_legacy == ON_OFF_AUTO_AUTO) { 1833 proxy->disable_legacy = pcie_port ? ON_OFF_AUTO_ON : ON_OFF_AUTO_OFF; 1834 } 1835 1836 if (!virtio_pci_modern(proxy) && !virtio_pci_legacy(proxy)) { 1837 error_setg(errp, "device cannot work as neither modern nor legacy mode" 1838 " is enabled"); 1839 error_append_hint(errp, "Set either disable-modern or disable-legacy" 1840 " to off\n"); 1841 return; 1842 } 1843 1844 if (pcie_port && pci_is_express(pci_dev)) { 1845 int pos; 1846 uint16_t last_pcie_cap_offset = PCI_CONFIG_SPACE_SIZE; 1847 1848 pos = pcie_endpoint_cap_init(pci_dev, 0); 1849 assert(pos > 0); 1850 1851 pos = pci_add_capability(pci_dev, PCI_CAP_ID_PM, 0, 1852 PCI_PM_SIZEOF, errp); 1853 if (pos < 0) { 1854 return; 1855 } 1856 1857 pci_dev->exp.pm_cap = pos; 1858 1859 /* 1860 * Indicates that this function complies with revision 1.2 of the 1861 * PCI Power Management Interface Specification. 1862 */ 1863 pci_set_word(pci_dev->config + pos + PCI_PM_PMC, 0x3); 1864 1865 if (proxy->flags & VIRTIO_PCI_FLAG_AER) { 1866 pcie_aer_init(pci_dev, PCI_ERR_VER, last_pcie_cap_offset, 1867 PCI_ERR_SIZEOF, NULL); 1868 last_pcie_cap_offset += PCI_ERR_SIZEOF; 1869 } 1870 1871 if (proxy->flags & VIRTIO_PCI_FLAG_INIT_DEVERR) { 1872 /* Init error enabling flags */ 1873 pcie_cap_deverr_init(pci_dev); 1874 } 1875 1876 if (proxy->flags & VIRTIO_PCI_FLAG_INIT_LNKCTL) { 1877 /* Init Link Control Register */ 1878 pcie_cap_lnkctl_init(pci_dev); 1879 } 1880 1881 if (proxy->flags & VIRTIO_PCI_FLAG_INIT_PM) { 1882 /* Init Power Management Control Register */ 1883 pci_set_word(pci_dev->wmask + pos + PCI_PM_CTRL, 1884 PCI_PM_CTRL_STATE_MASK); 1885 } 1886 1887 if (proxy->flags & VIRTIO_PCI_FLAG_ATS) { 1888 pcie_ats_init(pci_dev, last_pcie_cap_offset, 1889 proxy->flags & VIRTIO_PCI_FLAG_ATS_PAGE_ALIGNED); 1890 last_pcie_cap_offset += PCI_EXT_CAP_ATS_SIZEOF; 1891 } 1892 1893 if (proxy->flags & VIRTIO_PCI_FLAG_INIT_FLR) { 1894 /* Set Function Level Reset capability bit */ 1895 pcie_cap_flr_init(pci_dev); 1896 } 1897 } else { 1898 /* 1899 * make future invocations of pci_is_express() return false 1900 * and pci_config_size() return PCI_CONFIG_SPACE_SIZE. 1901 */ 1902 pci_dev->cap_present &= ~QEMU_PCI_CAP_EXPRESS; 1903 } 1904 1905 virtio_pci_bus_new(&proxy->bus, sizeof(proxy->bus), proxy); 1906 if (k->realize) { 1907 k->realize(proxy, errp); 1908 } 1909 } 1910 1911 static void virtio_pci_exit(PCIDevice *pci_dev) 1912 { 1913 VirtIOPCIProxy *proxy = VIRTIO_PCI(pci_dev); 1914 bool pcie_port = pci_bus_is_express(pci_get_bus(pci_dev)) && 1915 !pci_bus_is_root(pci_get_bus(pci_dev)); 1916 1917 msix_uninit_exclusive_bar(pci_dev); 1918 if (proxy->flags & VIRTIO_PCI_FLAG_AER && pcie_port && 1919 pci_is_express(pci_dev)) { 1920 pcie_aer_exit(pci_dev); 1921 } 1922 } 1923 1924 static void virtio_pci_reset(DeviceState *qdev) 1925 { 1926 VirtIOPCIProxy *proxy = VIRTIO_PCI(qdev); 1927 VirtioBusState *bus = VIRTIO_BUS(&proxy->bus); 1928 PCIDevice *dev = PCI_DEVICE(qdev); 1929 int i; 1930 1931 virtio_pci_stop_ioeventfd(proxy); 1932 virtio_bus_reset(bus); 1933 msix_unuse_all_vectors(&proxy->pci_dev); 1934 1935 for (i = 0; i < VIRTIO_QUEUE_MAX; i++) { 1936 proxy->vqs[i].enabled = 0; 1937 proxy->vqs[i].num = 0; 1938 proxy->vqs[i].desc[0] = proxy->vqs[i].desc[1] = 0; 1939 proxy->vqs[i].avail[0] = proxy->vqs[i].avail[1] = 0; 1940 proxy->vqs[i].used[0] = proxy->vqs[i].used[1] = 0; 1941 } 1942 1943 if (pci_is_express(dev)) { 1944 pcie_cap_deverr_reset(dev); 1945 pcie_cap_lnkctl_reset(dev); 1946 1947 pci_set_word(dev->config + dev->exp.pm_cap + PCI_PM_CTRL, 0); 1948 } 1949 } 1950 1951 static Property virtio_pci_properties[] = { 1952 DEFINE_PROP_BIT("virtio-pci-bus-master-bug-migration", VirtIOPCIProxy, flags, 1953 VIRTIO_PCI_FLAG_BUS_MASTER_BUG_MIGRATION_BIT, false), 1954 DEFINE_PROP_BIT("migrate-extra", VirtIOPCIProxy, flags, 1955 VIRTIO_PCI_FLAG_MIGRATE_EXTRA_BIT, true), 1956 DEFINE_PROP_BIT("modern-pio-notify", VirtIOPCIProxy, flags, 1957 VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY_BIT, false), 1958 DEFINE_PROP_BIT("x-disable-pcie", VirtIOPCIProxy, flags, 1959 VIRTIO_PCI_FLAG_DISABLE_PCIE_BIT, false), 1960 DEFINE_PROP_BIT("page-per-vq", VirtIOPCIProxy, flags, 1961 VIRTIO_PCI_FLAG_PAGE_PER_VQ_BIT, false), 1962 DEFINE_PROP_BOOL("x-ignore-backend-features", VirtIOPCIProxy, 1963 ignore_backend_features, false), 1964 DEFINE_PROP_BIT("ats", VirtIOPCIProxy, flags, 1965 VIRTIO_PCI_FLAG_ATS_BIT, false), 1966 DEFINE_PROP_BIT("x-ats-page-aligned", VirtIOPCIProxy, flags, 1967 VIRTIO_PCI_FLAG_ATS_PAGE_ALIGNED_BIT, true), 1968 DEFINE_PROP_BIT("x-pcie-deverr-init", VirtIOPCIProxy, flags, 1969 VIRTIO_PCI_FLAG_INIT_DEVERR_BIT, true), 1970 DEFINE_PROP_BIT("x-pcie-lnkctl-init", VirtIOPCIProxy, flags, 1971 VIRTIO_PCI_FLAG_INIT_LNKCTL_BIT, true), 1972 DEFINE_PROP_BIT("x-pcie-pm-init", VirtIOPCIProxy, flags, 1973 VIRTIO_PCI_FLAG_INIT_PM_BIT, true), 1974 DEFINE_PROP_BIT("x-pcie-flr-init", VirtIOPCIProxy, flags, 1975 VIRTIO_PCI_FLAG_INIT_FLR_BIT, true), 1976 DEFINE_PROP_BIT("aer", VirtIOPCIProxy, flags, 1977 VIRTIO_PCI_FLAG_AER_BIT, false), 1978 DEFINE_PROP_END_OF_LIST(), 1979 }; 1980 1981 static void virtio_pci_dc_realize(DeviceState *qdev, Error **errp) 1982 { 1983 VirtioPCIClass *vpciklass = VIRTIO_PCI_GET_CLASS(qdev); 1984 VirtIOPCIProxy *proxy = VIRTIO_PCI(qdev); 1985 PCIDevice *pci_dev = &proxy->pci_dev; 1986 1987 if (!(proxy->flags & VIRTIO_PCI_FLAG_DISABLE_PCIE) && 1988 virtio_pci_modern(proxy)) { 1989 pci_dev->cap_present |= QEMU_PCI_CAP_EXPRESS; 1990 } 1991 1992 vpciklass->parent_dc_realize(qdev, errp); 1993 } 1994 1995 static void virtio_pci_class_init(ObjectClass *klass, void *data) 1996 { 1997 DeviceClass *dc = DEVICE_CLASS(klass); 1998 PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); 1999 VirtioPCIClass *vpciklass = VIRTIO_PCI_CLASS(klass); 2000 2001 device_class_set_props(dc, virtio_pci_properties); 2002 k->realize = virtio_pci_realize; 2003 k->exit = virtio_pci_exit; 2004 k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET; 2005 k->revision = VIRTIO_PCI_ABI_VERSION; 2006 k->class_id = PCI_CLASS_OTHERS; 2007 device_class_set_parent_realize(dc, virtio_pci_dc_realize, 2008 &vpciklass->parent_dc_realize); 2009 dc->reset = virtio_pci_reset; 2010 } 2011 2012 static const TypeInfo virtio_pci_info = { 2013 .name = TYPE_VIRTIO_PCI, 2014 .parent = TYPE_PCI_DEVICE, 2015 .instance_size = sizeof(VirtIOPCIProxy), 2016 .class_init = virtio_pci_class_init, 2017 .class_size = sizeof(VirtioPCIClass), 2018 .abstract = true, 2019 }; 2020 2021 static Property virtio_pci_generic_properties[] = { 2022 DEFINE_PROP_ON_OFF_AUTO("disable-legacy", VirtIOPCIProxy, disable_legacy, 2023 ON_OFF_AUTO_AUTO), 2024 DEFINE_PROP_BOOL("disable-modern", VirtIOPCIProxy, disable_modern, false), 2025 DEFINE_PROP_END_OF_LIST(), 2026 }; 2027 2028 static void virtio_pci_base_class_init(ObjectClass *klass, void *data) 2029 { 2030 const VirtioPCIDeviceTypeInfo *t = data; 2031 if (t->class_init) { 2032 t->class_init(klass, NULL); 2033 } 2034 } 2035 2036 static void virtio_pci_generic_class_init(ObjectClass *klass, void *data) 2037 { 2038 DeviceClass *dc = DEVICE_CLASS(klass); 2039 2040 device_class_set_props(dc, virtio_pci_generic_properties); 2041 } 2042 2043 static void virtio_pci_transitional_instance_init(Object *obj) 2044 { 2045 VirtIOPCIProxy *proxy = VIRTIO_PCI(obj); 2046 2047 proxy->disable_legacy = ON_OFF_AUTO_OFF; 2048 proxy->disable_modern = false; 2049 } 2050 2051 static void virtio_pci_non_transitional_instance_init(Object *obj) 2052 { 2053 VirtIOPCIProxy *proxy = VIRTIO_PCI(obj); 2054 2055 proxy->disable_legacy = ON_OFF_AUTO_ON; 2056 proxy->disable_modern = false; 2057 } 2058 2059 void virtio_pci_types_register(const VirtioPCIDeviceTypeInfo *t) 2060 { 2061 char *base_name = NULL; 2062 TypeInfo base_type_info = { 2063 .name = t->base_name, 2064 .parent = t->parent ? t->parent : TYPE_VIRTIO_PCI, 2065 .instance_size = t->instance_size, 2066 .instance_init = t->instance_init, 2067 .class_size = t->class_size, 2068 .abstract = true, 2069 .interfaces = t->interfaces, 2070 }; 2071 TypeInfo generic_type_info = { 2072 .name = t->generic_name, 2073 .parent = base_type_info.name, 2074 .class_init = virtio_pci_generic_class_init, 2075 .interfaces = (InterfaceInfo[]) { 2076 { INTERFACE_PCIE_DEVICE }, 2077 { INTERFACE_CONVENTIONAL_PCI_DEVICE }, 2078 { } 2079 }, 2080 }; 2081 2082 if (!base_type_info.name) { 2083 /* No base type -> register a single generic device type */ 2084 /* use intermediate %s-base-type to add generic device props */ 2085 base_name = g_strdup_printf("%s-base-type", t->generic_name); 2086 base_type_info.name = base_name; 2087 base_type_info.class_init = virtio_pci_generic_class_init; 2088 2089 generic_type_info.parent = base_name; 2090 generic_type_info.class_init = virtio_pci_base_class_init; 2091 generic_type_info.class_data = (void *)t; 2092 2093 assert(!t->non_transitional_name); 2094 assert(!t->transitional_name); 2095 } else { 2096 base_type_info.class_init = virtio_pci_base_class_init; 2097 base_type_info.class_data = (void *)t; 2098 } 2099 2100 type_register(&base_type_info); 2101 if (generic_type_info.name) { 2102 type_register(&generic_type_info); 2103 } 2104 2105 if (t->non_transitional_name) { 2106 const TypeInfo non_transitional_type_info = { 2107 .name = t->non_transitional_name, 2108 .parent = base_type_info.name, 2109 .instance_init = virtio_pci_non_transitional_instance_init, 2110 .interfaces = (InterfaceInfo[]) { 2111 { INTERFACE_PCIE_DEVICE }, 2112 { INTERFACE_CONVENTIONAL_PCI_DEVICE }, 2113 { } 2114 }, 2115 }; 2116 type_register(&non_transitional_type_info); 2117 } 2118 2119 if (t->transitional_name) { 2120 const TypeInfo transitional_type_info = { 2121 .name = t->transitional_name, 2122 .parent = base_type_info.name, 2123 .instance_init = virtio_pci_transitional_instance_init, 2124 .interfaces = (InterfaceInfo[]) { 2125 /* 2126 * Transitional virtio devices work only as Conventional PCI 2127 * devices because they require PIO ports. 2128 */ 2129 { INTERFACE_CONVENTIONAL_PCI_DEVICE }, 2130 { } 2131 }, 2132 }; 2133 type_register(&transitional_type_info); 2134 } 2135 g_free(base_name); 2136 } 2137 2138 unsigned virtio_pci_optimal_num_queues(unsigned fixed_queues) 2139 { 2140 /* 2141 * 1:1 vq to vCPU mapping is ideal because the same vCPU that submitted 2142 * virtqueue buffers can handle their completion. When a different vCPU 2143 * handles completion it may need to IPI the vCPU that submitted the 2144 * request and this adds overhead. 2145 * 2146 * Virtqueues consume guest RAM and MSI-X vectors. This is wasteful in 2147 * guests with very many vCPUs and a device that is only used by a few 2148 * vCPUs. Unfortunately optimizing that case requires manual pinning inside 2149 * the guest, so those users might as well manually set the number of 2150 * queues. There is no upper limit that can be applied automatically and 2151 * doing so arbitrarily would result in a sudden performance drop once the 2152 * threshold number of vCPUs is exceeded. 2153 */ 2154 unsigned num_queues = current_machine->smp.cpus; 2155 2156 /* 2157 * The maximum number of MSI-X vectors is PCI_MSIX_FLAGS_QSIZE + 1, but the 2158 * config change interrupt and the fixed virtqueues must be taken into 2159 * account too. 2160 */ 2161 num_queues = MIN(num_queues, PCI_MSIX_FLAGS_QSIZE - fixed_queues); 2162 2163 /* 2164 * There is a limit to how many virtqueues a device can have. 2165 */ 2166 return MIN(num_queues, VIRTIO_QUEUE_MAX - fixed_queues); 2167 } 2168 2169 /* virtio-pci-bus */ 2170 2171 static void virtio_pci_bus_new(VirtioBusState *bus, size_t bus_size, 2172 VirtIOPCIProxy *dev) 2173 { 2174 DeviceState *qdev = DEVICE(dev); 2175 char virtio_bus_name[] = "virtio-bus"; 2176 2177 qbus_create_inplace(bus, bus_size, TYPE_VIRTIO_PCI_BUS, qdev, 2178 virtio_bus_name); 2179 } 2180 2181 static void virtio_pci_bus_class_init(ObjectClass *klass, void *data) 2182 { 2183 BusClass *bus_class = BUS_CLASS(klass); 2184 VirtioBusClass *k = VIRTIO_BUS_CLASS(klass); 2185 bus_class->max_dev = 1; 2186 k->notify = virtio_pci_notify; 2187 k->save_config = virtio_pci_save_config; 2188 k->load_config = virtio_pci_load_config; 2189 k->save_queue = virtio_pci_save_queue; 2190 k->load_queue = virtio_pci_load_queue; 2191 k->save_extra_state = virtio_pci_save_extra_state; 2192 k->load_extra_state = virtio_pci_load_extra_state; 2193 k->has_extra_state = virtio_pci_has_extra_state; 2194 k->query_guest_notifiers = virtio_pci_query_guest_notifiers; 2195 k->set_guest_notifiers = virtio_pci_set_guest_notifiers; 2196 k->set_host_notifier_mr = virtio_pci_set_host_notifier_mr; 2197 k->vmstate_change = virtio_pci_vmstate_change; 2198 k->pre_plugged = virtio_pci_pre_plugged; 2199 k->device_plugged = virtio_pci_device_plugged; 2200 k->device_unplugged = virtio_pci_device_unplugged; 2201 k->query_nvectors = virtio_pci_query_nvectors; 2202 k->ioeventfd_enabled = virtio_pci_ioeventfd_enabled; 2203 k->ioeventfd_assign = virtio_pci_ioeventfd_assign; 2204 k->get_dma_as = virtio_pci_get_dma_as; 2205 k->queue_enabled = virtio_pci_queue_enabled; 2206 } 2207 2208 static const TypeInfo virtio_pci_bus_info = { 2209 .name = TYPE_VIRTIO_PCI_BUS, 2210 .parent = TYPE_VIRTIO_BUS, 2211 .instance_size = sizeof(VirtioPCIBusState), 2212 .class_size = sizeof(VirtioPCIBusClass), 2213 .class_init = virtio_pci_bus_class_init, 2214 }; 2215 2216 static void virtio_pci_register_types(void) 2217 { 2218 /* Base types: */ 2219 type_register_static(&virtio_pci_bus_info); 2220 type_register_static(&virtio_pci_info); 2221 } 2222 2223 type_init(virtio_pci_register_types) 2224 2225