1 /* 2 * Virtio PCI Bindings 3 * 4 * Copyright IBM, Corp. 2007 5 * Copyright (c) 2009 CodeSourcery 6 * 7 * Authors: 8 * Anthony Liguori <aliguori@us.ibm.com> 9 * Paul Brook <paul@codesourcery.com> 10 * 11 * This work is licensed under the terms of the GNU GPL, version 2. See 12 * the COPYING file in the top-level directory. 13 * 14 * Contributions after 2012-01-13 are licensed under the terms of the 15 * GNU GPL, version 2 or (at your option) any later version. 16 */ 17 18 #include "qemu/osdep.h" 19 20 #include "standard-headers/linux/virtio_pci.h" 21 #include "hw/virtio/virtio.h" 22 #include "hw/virtio/virtio-blk.h" 23 #include "hw/virtio/virtio-net.h" 24 #include "hw/virtio/virtio-serial.h" 25 #include "hw/virtio/virtio-scsi.h" 26 #include "hw/virtio/virtio-balloon.h" 27 #include "hw/virtio/virtio-input.h" 28 #include "hw/pci/pci.h" 29 #include "qemu/error-report.h" 30 #include "hw/pci/msi.h" 31 #include "hw/pci/msix.h" 32 #include "hw/loader.h" 33 #include "sysemu/kvm.h" 34 #include "sysemu/block-backend.h" 35 #include "virtio-pci.h" 36 #include "qemu/range.h" 37 #include "hw/virtio/virtio-bus.h" 38 #include "qapi/visitor.h" 39 40 #define VIRTIO_PCI_REGION_SIZE(dev) VIRTIO_PCI_CONFIG_OFF(msix_present(dev)) 41 42 #undef VIRTIO_PCI_CONFIG 43 44 /* The remaining space is defined by each driver as the per-driver 45 * configuration space */ 46 #define VIRTIO_PCI_CONFIG_SIZE(dev) VIRTIO_PCI_CONFIG_OFF(msix_enabled(dev)) 47 48 static void virtio_pci_bus_new(VirtioBusState *bus, size_t bus_size, 49 VirtIOPCIProxy *dev); 50 51 /* virtio device */ 52 /* DeviceState to VirtIOPCIProxy. For use off data-path. TODO: use QOM. */ 53 static inline VirtIOPCIProxy *to_virtio_pci_proxy(DeviceState *d) 54 { 55 return container_of(d, VirtIOPCIProxy, pci_dev.qdev); 56 } 57 58 /* DeviceState to VirtIOPCIProxy. Note: used on datapath, 59 * be careful and test performance if you change this. 60 */ 61 static inline VirtIOPCIProxy *to_virtio_pci_proxy_fast(DeviceState *d) 62 { 63 return container_of(d, VirtIOPCIProxy, pci_dev.qdev); 64 } 65 66 static void virtio_pci_notify(DeviceState *d, uint16_t vector) 67 { 68 VirtIOPCIProxy *proxy = to_virtio_pci_proxy_fast(d); 69 70 if (msix_enabled(&proxy->pci_dev)) 71 msix_notify(&proxy->pci_dev, vector); 72 else { 73 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 74 pci_set_irq(&proxy->pci_dev, vdev->isr & 1); 75 } 76 } 77 78 static void virtio_pci_save_config(DeviceState *d, QEMUFile *f) 79 { 80 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d); 81 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 82 83 pci_device_save(&proxy->pci_dev, f); 84 msix_save(&proxy->pci_dev, f); 85 if (msix_present(&proxy->pci_dev)) 86 qemu_put_be16(f, vdev->config_vector); 87 } 88 89 static void virtio_pci_load_modern_queue_state(VirtIOPCIQueue *vq, 90 QEMUFile *f) 91 { 92 vq->num = qemu_get_be16(f); 93 vq->enabled = qemu_get_be16(f); 94 vq->desc[0] = qemu_get_be32(f); 95 vq->desc[1] = qemu_get_be32(f); 96 vq->avail[0] = qemu_get_be32(f); 97 vq->avail[1] = qemu_get_be32(f); 98 vq->used[0] = qemu_get_be32(f); 99 vq->used[1] = qemu_get_be32(f); 100 } 101 102 static bool virtio_pci_has_extra_state(DeviceState *d) 103 { 104 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d); 105 106 return proxy->flags & VIRTIO_PCI_FLAG_MIGRATE_EXTRA; 107 } 108 109 static int get_virtio_pci_modern_state(QEMUFile *f, void *pv, size_t size) 110 { 111 VirtIOPCIProxy *proxy = pv; 112 int i; 113 114 proxy->dfselect = qemu_get_be32(f); 115 proxy->gfselect = qemu_get_be32(f); 116 proxy->guest_features[0] = qemu_get_be32(f); 117 proxy->guest_features[1] = qemu_get_be32(f); 118 for (i = 0; i < VIRTIO_QUEUE_MAX; i++) { 119 virtio_pci_load_modern_queue_state(&proxy->vqs[i], f); 120 } 121 122 return 0; 123 } 124 125 static void virtio_pci_save_modern_queue_state(VirtIOPCIQueue *vq, 126 QEMUFile *f) 127 { 128 qemu_put_be16(f, vq->num); 129 qemu_put_be16(f, vq->enabled); 130 qemu_put_be32(f, vq->desc[0]); 131 qemu_put_be32(f, vq->desc[1]); 132 qemu_put_be32(f, vq->avail[0]); 133 qemu_put_be32(f, vq->avail[1]); 134 qemu_put_be32(f, vq->used[0]); 135 qemu_put_be32(f, vq->used[1]); 136 } 137 138 static void put_virtio_pci_modern_state(QEMUFile *f, void *pv, size_t size) 139 { 140 VirtIOPCIProxy *proxy = pv; 141 int i; 142 143 qemu_put_be32(f, proxy->dfselect); 144 qemu_put_be32(f, proxy->gfselect); 145 qemu_put_be32(f, proxy->guest_features[0]); 146 qemu_put_be32(f, proxy->guest_features[1]); 147 for (i = 0; i < VIRTIO_QUEUE_MAX; i++) { 148 virtio_pci_save_modern_queue_state(&proxy->vqs[i], f); 149 } 150 } 151 152 static const VMStateInfo vmstate_info_virtio_pci_modern_state = { 153 .name = "virtqueue_state", 154 .get = get_virtio_pci_modern_state, 155 .put = put_virtio_pci_modern_state, 156 }; 157 158 static bool virtio_pci_modern_state_needed(void *opaque) 159 { 160 VirtIOPCIProxy *proxy = opaque; 161 162 return !(proxy->flags & VIRTIO_PCI_FLAG_DISABLE_MODERN); 163 } 164 165 static const VMStateDescription vmstate_virtio_pci_modern_state = { 166 .name = "virtio_pci/modern_state", 167 .version_id = 1, 168 .minimum_version_id = 1, 169 .needed = &virtio_pci_modern_state_needed, 170 .fields = (VMStateField[]) { 171 { 172 .name = "modern_state", 173 .version_id = 0, 174 .field_exists = NULL, 175 .size = 0, 176 .info = &vmstate_info_virtio_pci_modern_state, 177 .flags = VMS_SINGLE, 178 .offset = 0, 179 }, 180 VMSTATE_END_OF_LIST() 181 } 182 }; 183 184 static const VMStateDescription vmstate_virtio_pci = { 185 .name = "virtio_pci", 186 .version_id = 1, 187 .minimum_version_id = 1, 188 .minimum_version_id_old = 1, 189 .fields = (VMStateField[]) { 190 VMSTATE_END_OF_LIST() 191 }, 192 .subsections = (const VMStateDescription*[]) { 193 &vmstate_virtio_pci_modern_state, 194 NULL 195 } 196 }; 197 198 static void virtio_pci_save_extra_state(DeviceState *d, QEMUFile *f) 199 { 200 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d); 201 202 vmstate_save_state(f, &vmstate_virtio_pci, proxy, NULL); 203 } 204 205 static int virtio_pci_load_extra_state(DeviceState *d, QEMUFile *f) 206 { 207 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d); 208 209 return vmstate_load_state(f, &vmstate_virtio_pci, proxy, 1); 210 } 211 212 static void virtio_pci_save_queue(DeviceState *d, int n, QEMUFile *f) 213 { 214 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d); 215 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 216 217 if (msix_present(&proxy->pci_dev)) 218 qemu_put_be16(f, virtio_queue_vector(vdev, n)); 219 } 220 221 static int virtio_pci_load_config(DeviceState *d, QEMUFile *f) 222 { 223 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d); 224 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 225 226 int ret; 227 ret = pci_device_load(&proxy->pci_dev, f); 228 if (ret) { 229 return ret; 230 } 231 msix_unuse_all_vectors(&proxy->pci_dev); 232 msix_load(&proxy->pci_dev, f); 233 if (msix_present(&proxy->pci_dev)) { 234 qemu_get_be16s(f, &vdev->config_vector); 235 } else { 236 vdev->config_vector = VIRTIO_NO_VECTOR; 237 } 238 if (vdev->config_vector != VIRTIO_NO_VECTOR) { 239 return msix_vector_use(&proxy->pci_dev, vdev->config_vector); 240 } 241 return 0; 242 } 243 244 static int virtio_pci_load_queue(DeviceState *d, int n, QEMUFile *f) 245 { 246 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d); 247 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 248 249 uint16_t vector; 250 if (msix_present(&proxy->pci_dev)) { 251 qemu_get_be16s(f, &vector); 252 } else { 253 vector = VIRTIO_NO_VECTOR; 254 } 255 virtio_queue_set_vector(vdev, n, vector); 256 if (vector != VIRTIO_NO_VECTOR) { 257 return msix_vector_use(&proxy->pci_dev, vector); 258 } 259 260 return 0; 261 } 262 263 #define QEMU_VIRTIO_PCI_QUEUE_MEM_MULT 0x1000 264 265 static int virtio_pci_set_host_notifier_internal(VirtIOPCIProxy *proxy, 266 int n, bool assign, bool set_handler) 267 { 268 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 269 VirtQueue *vq = virtio_get_queue(vdev, n); 270 EventNotifier *notifier = virtio_queue_get_host_notifier(vq); 271 bool legacy = !(proxy->flags & VIRTIO_PCI_FLAG_DISABLE_LEGACY); 272 bool modern = !(proxy->flags & VIRTIO_PCI_FLAG_DISABLE_MODERN); 273 bool fast_mmio = kvm_ioeventfd_any_length_enabled(); 274 bool modern_pio = proxy->flags & VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY; 275 MemoryRegion *modern_mr = &proxy->notify.mr; 276 MemoryRegion *modern_notify_mr = &proxy->notify_pio.mr; 277 MemoryRegion *legacy_mr = &proxy->bar; 278 hwaddr modern_addr = QEMU_VIRTIO_PCI_QUEUE_MEM_MULT * 279 virtio_get_queue_index(vq); 280 hwaddr legacy_addr = VIRTIO_PCI_QUEUE_NOTIFY; 281 int r = 0; 282 283 if (assign) { 284 r = event_notifier_init(notifier, 1); 285 if (r < 0) { 286 error_report("%s: unable to init event notifier: %d", 287 __func__, r); 288 return r; 289 } 290 virtio_queue_set_host_notifier_fd_handler(vq, true, set_handler); 291 if (modern) { 292 if (fast_mmio) { 293 memory_region_add_eventfd(modern_mr, modern_addr, 0, 294 false, n, notifier); 295 } else { 296 memory_region_add_eventfd(modern_mr, modern_addr, 2, 297 false, n, notifier); 298 } 299 if (modern_pio) { 300 memory_region_add_eventfd(modern_notify_mr, 0, 2, 301 true, n, notifier); 302 } 303 } 304 if (legacy) { 305 memory_region_add_eventfd(legacy_mr, legacy_addr, 2, 306 true, n, notifier); 307 } 308 } else { 309 if (modern) { 310 if (fast_mmio) { 311 memory_region_del_eventfd(modern_mr, modern_addr, 0, 312 false, n, notifier); 313 } else { 314 memory_region_del_eventfd(modern_mr, modern_addr, 2, 315 false, n, notifier); 316 } 317 if (modern_pio) { 318 memory_region_del_eventfd(modern_notify_mr, 0, 2, 319 true, n, notifier); 320 } 321 } 322 if (legacy) { 323 memory_region_del_eventfd(legacy_mr, legacy_addr, 2, 324 true, n, notifier); 325 } 326 virtio_queue_set_host_notifier_fd_handler(vq, false, false); 327 event_notifier_cleanup(notifier); 328 } 329 return r; 330 } 331 332 static void virtio_pci_start_ioeventfd(VirtIOPCIProxy *proxy) 333 { 334 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 335 int n, r; 336 337 if (!(proxy->flags & VIRTIO_PCI_FLAG_USE_IOEVENTFD) || 338 proxy->ioeventfd_disabled || 339 proxy->ioeventfd_started) { 340 return; 341 } 342 343 for (n = 0; n < VIRTIO_QUEUE_MAX; n++) { 344 if (!virtio_queue_get_num(vdev, n)) { 345 continue; 346 } 347 348 r = virtio_pci_set_host_notifier_internal(proxy, n, true, true); 349 if (r < 0) { 350 goto assign_error; 351 } 352 } 353 proxy->ioeventfd_started = true; 354 return; 355 356 assign_error: 357 while (--n >= 0) { 358 if (!virtio_queue_get_num(vdev, n)) { 359 continue; 360 } 361 362 r = virtio_pci_set_host_notifier_internal(proxy, n, false, false); 363 assert(r >= 0); 364 } 365 proxy->ioeventfd_started = false; 366 error_report("%s: failed. Fallback to a userspace (slower).", __func__); 367 } 368 369 static void virtio_pci_stop_ioeventfd(VirtIOPCIProxy *proxy) 370 { 371 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 372 int r; 373 int n; 374 375 if (!proxy->ioeventfd_started) { 376 return; 377 } 378 379 for (n = 0; n < VIRTIO_QUEUE_MAX; n++) { 380 if (!virtio_queue_get_num(vdev, n)) { 381 continue; 382 } 383 384 r = virtio_pci_set_host_notifier_internal(proxy, n, false, false); 385 assert(r >= 0); 386 } 387 proxy->ioeventfd_started = false; 388 } 389 390 static void virtio_ioport_write(void *opaque, uint32_t addr, uint32_t val) 391 { 392 VirtIOPCIProxy *proxy = opaque; 393 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 394 hwaddr pa; 395 396 switch (addr) { 397 case VIRTIO_PCI_GUEST_FEATURES: 398 /* Guest does not negotiate properly? We have to assume nothing. */ 399 if (val & (1 << VIRTIO_F_BAD_FEATURE)) { 400 val = virtio_bus_get_vdev_bad_features(&proxy->bus); 401 } 402 virtio_set_features(vdev, val); 403 break; 404 case VIRTIO_PCI_QUEUE_PFN: 405 pa = (hwaddr)val << VIRTIO_PCI_QUEUE_ADDR_SHIFT; 406 if (pa == 0) { 407 virtio_pci_stop_ioeventfd(proxy); 408 virtio_reset(vdev); 409 msix_unuse_all_vectors(&proxy->pci_dev); 410 } 411 else 412 virtio_queue_set_addr(vdev, vdev->queue_sel, pa); 413 break; 414 case VIRTIO_PCI_QUEUE_SEL: 415 if (val < VIRTIO_QUEUE_MAX) 416 vdev->queue_sel = val; 417 break; 418 case VIRTIO_PCI_QUEUE_NOTIFY: 419 if (val < VIRTIO_QUEUE_MAX) { 420 virtio_queue_notify(vdev, val); 421 } 422 break; 423 case VIRTIO_PCI_STATUS: 424 if (!(val & VIRTIO_CONFIG_S_DRIVER_OK)) { 425 virtio_pci_stop_ioeventfd(proxy); 426 } 427 428 virtio_set_status(vdev, val & 0xFF); 429 430 if (val & VIRTIO_CONFIG_S_DRIVER_OK) { 431 virtio_pci_start_ioeventfd(proxy); 432 } 433 434 if (vdev->status == 0) { 435 virtio_reset(vdev); 436 msix_unuse_all_vectors(&proxy->pci_dev); 437 } 438 439 /* Linux before 2.6.34 drives the device without enabling 440 the PCI device bus master bit. Enable it automatically 441 for the guest. This is a PCI spec violation but so is 442 initiating DMA with bus master bit clear. */ 443 if (val == (VIRTIO_CONFIG_S_ACKNOWLEDGE | VIRTIO_CONFIG_S_DRIVER)) { 444 pci_default_write_config(&proxy->pci_dev, PCI_COMMAND, 445 proxy->pci_dev.config[PCI_COMMAND] | 446 PCI_COMMAND_MASTER, 1); 447 } 448 break; 449 case VIRTIO_MSI_CONFIG_VECTOR: 450 msix_vector_unuse(&proxy->pci_dev, vdev->config_vector); 451 /* Make it possible for guest to discover an error took place. */ 452 if (msix_vector_use(&proxy->pci_dev, val) < 0) 453 val = VIRTIO_NO_VECTOR; 454 vdev->config_vector = val; 455 break; 456 case VIRTIO_MSI_QUEUE_VECTOR: 457 msix_vector_unuse(&proxy->pci_dev, 458 virtio_queue_vector(vdev, vdev->queue_sel)); 459 /* Make it possible for guest to discover an error took place. */ 460 if (msix_vector_use(&proxy->pci_dev, val) < 0) 461 val = VIRTIO_NO_VECTOR; 462 virtio_queue_set_vector(vdev, vdev->queue_sel, val); 463 break; 464 default: 465 error_report("%s: unexpected address 0x%x value 0x%x", 466 __func__, addr, val); 467 break; 468 } 469 } 470 471 static uint32_t virtio_ioport_read(VirtIOPCIProxy *proxy, uint32_t addr) 472 { 473 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 474 uint32_t ret = 0xFFFFFFFF; 475 476 switch (addr) { 477 case VIRTIO_PCI_HOST_FEATURES: 478 ret = vdev->host_features; 479 break; 480 case VIRTIO_PCI_GUEST_FEATURES: 481 ret = vdev->guest_features; 482 break; 483 case VIRTIO_PCI_QUEUE_PFN: 484 ret = virtio_queue_get_addr(vdev, vdev->queue_sel) 485 >> VIRTIO_PCI_QUEUE_ADDR_SHIFT; 486 break; 487 case VIRTIO_PCI_QUEUE_NUM: 488 ret = virtio_queue_get_num(vdev, vdev->queue_sel); 489 break; 490 case VIRTIO_PCI_QUEUE_SEL: 491 ret = vdev->queue_sel; 492 break; 493 case VIRTIO_PCI_STATUS: 494 ret = vdev->status; 495 break; 496 case VIRTIO_PCI_ISR: 497 /* reading from the ISR also clears it. */ 498 ret = vdev->isr; 499 vdev->isr = 0; 500 pci_irq_deassert(&proxy->pci_dev); 501 break; 502 case VIRTIO_MSI_CONFIG_VECTOR: 503 ret = vdev->config_vector; 504 break; 505 case VIRTIO_MSI_QUEUE_VECTOR: 506 ret = virtio_queue_vector(vdev, vdev->queue_sel); 507 break; 508 default: 509 break; 510 } 511 512 return ret; 513 } 514 515 static uint64_t virtio_pci_config_read(void *opaque, hwaddr addr, 516 unsigned size) 517 { 518 VirtIOPCIProxy *proxy = opaque; 519 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 520 uint32_t config = VIRTIO_PCI_CONFIG_SIZE(&proxy->pci_dev); 521 uint64_t val = 0; 522 if (addr < config) { 523 return virtio_ioport_read(proxy, addr); 524 } 525 addr -= config; 526 527 switch (size) { 528 case 1: 529 val = virtio_config_readb(vdev, addr); 530 break; 531 case 2: 532 val = virtio_config_readw(vdev, addr); 533 if (virtio_is_big_endian(vdev)) { 534 val = bswap16(val); 535 } 536 break; 537 case 4: 538 val = virtio_config_readl(vdev, addr); 539 if (virtio_is_big_endian(vdev)) { 540 val = bswap32(val); 541 } 542 break; 543 } 544 return val; 545 } 546 547 static void virtio_pci_config_write(void *opaque, hwaddr addr, 548 uint64_t val, unsigned size) 549 { 550 VirtIOPCIProxy *proxy = opaque; 551 uint32_t config = VIRTIO_PCI_CONFIG_SIZE(&proxy->pci_dev); 552 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 553 if (addr < config) { 554 virtio_ioport_write(proxy, addr, val); 555 return; 556 } 557 addr -= config; 558 /* 559 * Virtio-PCI is odd. Ioports are LE but config space is target native 560 * endian. 561 */ 562 switch (size) { 563 case 1: 564 virtio_config_writeb(vdev, addr, val); 565 break; 566 case 2: 567 if (virtio_is_big_endian(vdev)) { 568 val = bswap16(val); 569 } 570 virtio_config_writew(vdev, addr, val); 571 break; 572 case 4: 573 if (virtio_is_big_endian(vdev)) { 574 val = bswap32(val); 575 } 576 virtio_config_writel(vdev, addr, val); 577 break; 578 } 579 } 580 581 static const MemoryRegionOps virtio_pci_config_ops = { 582 .read = virtio_pci_config_read, 583 .write = virtio_pci_config_write, 584 .impl = { 585 .min_access_size = 1, 586 .max_access_size = 4, 587 }, 588 .endianness = DEVICE_LITTLE_ENDIAN, 589 }; 590 591 /* Below are generic functions to do memcpy from/to an address space, 592 * without byteswaps, with input validation. 593 * 594 * As regular address_space_* APIs all do some kind of byteswap at least for 595 * some host/target combinations, we are forced to explicitly convert to a 596 * known-endianness integer value. 597 * It doesn't really matter which endian format to go through, so the code 598 * below selects the endian that causes the least amount of work on the given 599 * host. 600 * 601 * Note: host pointer must be aligned. 602 */ 603 static 604 void virtio_address_space_write(AddressSpace *as, hwaddr addr, 605 const uint8_t *buf, int len) 606 { 607 uint32_t val; 608 609 /* address_space_* APIs assume an aligned address. 610 * As address is under guest control, handle illegal values. 611 */ 612 addr &= ~(len - 1); 613 614 /* Make sure caller aligned buf properly */ 615 assert(!(((uintptr_t)buf) & (len - 1))); 616 617 switch (len) { 618 case 1: 619 val = pci_get_byte(buf); 620 address_space_stb(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL); 621 break; 622 case 2: 623 val = pci_get_word(buf); 624 address_space_stw_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL); 625 break; 626 case 4: 627 val = pci_get_long(buf); 628 address_space_stl_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL); 629 break; 630 default: 631 /* As length is under guest control, handle illegal values. */ 632 break; 633 } 634 } 635 636 static void 637 virtio_address_space_read(AddressSpace *as, hwaddr addr, uint8_t *buf, int len) 638 { 639 uint32_t val; 640 641 /* address_space_* APIs assume an aligned address. 642 * As address is under guest control, handle illegal values. 643 */ 644 addr &= ~(len - 1); 645 646 /* Make sure caller aligned buf properly */ 647 assert(!(((uintptr_t)buf) & (len - 1))); 648 649 switch (len) { 650 case 1: 651 val = address_space_ldub(as, addr, MEMTXATTRS_UNSPECIFIED, NULL); 652 pci_set_byte(buf, val); 653 break; 654 case 2: 655 val = address_space_lduw_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL); 656 pci_set_word(buf, val); 657 break; 658 case 4: 659 val = address_space_ldl_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL); 660 pci_set_long(buf, val); 661 break; 662 default: 663 /* As length is under guest control, handle illegal values. */ 664 break; 665 } 666 } 667 668 static void virtio_write_config(PCIDevice *pci_dev, uint32_t address, 669 uint32_t val, int len) 670 { 671 VirtIOPCIProxy *proxy = DO_UPCAST(VirtIOPCIProxy, pci_dev, pci_dev); 672 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 673 struct virtio_pci_cfg_cap *cfg; 674 675 pci_default_write_config(pci_dev, address, val, len); 676 677 if (range_covers_byte(address, len, PCI_COMMAND) && 678 !(pci_dev->config[PCI_COMMAND] & PCI_COMMAND_MASTER)) { 679 virtio_pci_stop_ioeventfd(proxy); 680 virtio_set_status(vdev, vdev->status & ~VIRTIO_CONFIG_S_DRIVER_OK); 681 } 682 683 if (proxy->config_cap && 684 ranges_overlap(address, len, proxy->config_cap + offsetof(struct virtio_pci_cfg_cap, 685 pci_cfg_data), 686 sizeof cfg->pci_cfg_data)) { 687 uint32_t off; 688 uint32_t len; 689 690 cfg = (void *)(proxy->pci_dev.config + proxy->config_cap); 691 off = le32_to_cpu(cfg->cap.offset); 692 len = le32_to_cpu(cfg->cap.length); 693 694 if (len == 1 || len == 2 || len == 4) { 695 assert(len <= sizeof cfg->pci_cfg_data); 696 virtio_address_space_write(&proxy->modern_as, off, 697 cfg->pci_cfg_data, len); 698 } 699 } 700 } 701 702 static uint32_t virtio_read_config(PCIDevice *pci_dev, 703 uint32_t address, int len) 704 { 705 VirtIOPCIProxy *proxy = DO_UPCAST(VirtIOPCIProxy, pci_dev, pci_dev); 706 struct virtio_pci_cfg_cap *cfg; 707 708 if (proxy->config_cap && 709 ranges_overlap(address, len, proxy->config_cap + offsetof(struct virtio_pci_cfg_cap, 710 pci_cfg_data), 711 sizeof cfg->pci_cfg_data)) { 712 uint32_t off; 713 uint32_t len; 714 715 cfg = (void *)(proxy->pci_dev.config + proxy->config_cap); 716 off = le32_to_cpu(cfg->cap.offset); 717 len = le32_to_cpu(cfg->cap.length); 718 719 if (len == 1 || len == 2 || len == 4) { 720 assert(len <= sizeof cfg->pci_cfg_data); 721 virtio_address_space_read(&proxy->modern_as, off, 722 cfg->pci_cfg_data, len); 723 } 724 } 725 726 return pci_default_read_config(pci_dev, address, len); 727 } 728 729 static int kvm_virtio_pci_vq_vector_use(VirtIOPCIProxy *proxy, 730 unsigned int queue_no, 731 unsigned int vector, 732 MSIMessage msg) 733 { 734 VirtIOIRQFD *irqfd = &proxy->vector_irqfd[vector]; 735 int ret; 736 737 if (irqfd->users == 0) { 738 ret = kvm_irqchip_add_msi_route(kvm_state, msg, &proxy->pci_dev); 739 if (ret < 0) { 740 return ret; 741 } 742 irqfd->virq = ret; 743 } 744 irqfd->users++; 745 return 0; 746 } 747 748 static void kvm_virtio_pci_vq_vector_release(VirtIOPCIProxy *proxy, 749 unsigned int vector) 750 { 751 VirtIOIRQFD *irqfd = &proxy->vector_irqfd[vector]; 752 if (--irqfd->users == 0) { 753 kvm_irqchip_release_virq(kvm_state, irqfd->virq); 754 } 755 } 756 757 static int kvm_virtio_pci_irqfd_use(VirtIOPCIProxy *proxy, 758 unsigned int queue_no, 759 unsigned int vector) 760 { 761 VirtIOIRQFD *irqfd = &proxy->vector_irqfd[vector]; 762 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 763 VirtQueue *vq = virtio_get_queue(vdev, queue_no); 764 EventNotifier *n = virtio_queue_get_guest_notifier(vq); 765 int ret; 766 ret = kvm_irqchip_add_irqfd_notifier_gsi(kvm_state, n, NULL, irqfd->virq); 767 return ret; 768 } 769 770 static void kvm_virtio_pci_irqfd_release(VirtIOPCIProxy *proxy, 771 unsigned int queue_no, 772 unsigned int vector) 773 { 774 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 775 VirtQueue *vq = virtio_get_queue(vdev, queue_no); 776 EventNotifier *n = virtio_queue_get_guest_notifier(vq); 777 VirtIOIRQFD *irqfd = &proxy->vector_irqfd[vector]; 778 int ret; 779 780 ret = kvm_irqchip_remove_irqfd_notifier_gsi(kvm_state, n, irqfd->virq); 781 assert(ret == 0); 782 } 783 784 static int kvm_virtio_pci_vector_use(VirtIOPCIProxy *proxy, int nvqs) 785 { 786 PCIDevice *dev = &proxy->pci_dev; 787 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 788 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); 789 unsigned int vector; 790 int ret, queue_no; 791 MSIMessage msg; 792 793 for (queue_no = 0; queue_no < nvqs; queue_no++) { 794 if (!virtio_queue_get_num(vdev, queue_no)) { 795 break; 796 } 797 vector = virtio_queue_vector(vdev, queue_no); 798 if (vector >= msix_nr_vectors_allocated(dev)) { 799 continue; 800 } 801 msg = msix_get_message(dev, vector); 802 ret = kvm_virtio_pci_vq_vector_use(proxy, queue_no, vector, msg); 803 if (ret < 0) { 804 goto undo; 805 } 806 /* If guest supports masking, set up irqfd now. 807 * Otherwise, delay until unmasked in the frontend. 808 */ 809 if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) { 810 ret = kvm_virtio_pci_irqfd_use(proxy, queue_no, vector); 811 if (ret < 0) { 812 kvm_virtio_pci_vq_vector_release(proxy, vector); 813 goto undo; 814 } 815 } 816 } 817 return 0; 818 819 undo: 820 while (--queue_no >= 0) { 821 vector = virtio_queue_vector(vdev, queue_no); 822 if (vector >= msix_nr_vectors_allocated(dev)) { 823 continue; 824 } 825 if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) { 826 kvm_virtio_pci_irqfd_release(proxy, queue_no, vector); 827 } 828 kvm_virtio_pci_vq_vector_release(proxy, vector); 829 } 830 return ret; 831 } 832 833 static void kvm_virtio_pci_vector_release(VirtIOPCIProxy *proxy, int nvqs) 834 { 835 PCIDevice *dev = &proxy->pci_dev; 836 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 837 unsigned int vector; 838 int queue_no; 839 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); 840 841 for (queue_no = 0; queue_no < nvqs; queue_no++) { 842 if (!virtio_queue_get_num(vdev, queue_no)) { 843 break; 844 } 845 vector = virtio_queue_vector(vdev, queue_no); 846 if (vector >= msix_nr_vectors_allocated(dev)) { 847 continue; 848 } 849 /* If guest supports masking, clean up irqfd now. 850 * Otherwise, it was cleaned when masked in the frontend. 851 */ 852 if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) { 853 kvm_virtio_pci_irqfd_release(proxy, queue_no, vector); 854 } 855 kvm_virtio_pci_vq_vector_release(proxy, vector); 856 } 857 } 858 859 static int virtio_pci_vq_vector_unmask(VirtIOPCIProxy *proxy, 860 unsigned int queue_no, 861 unsigned int vector, 862 MSIMessage msg) 863 { 864 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 865 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); 866 VirtQueue *vq = virtio_get_queue(vdev, queue_no); 867 EventNotifier *n = virtio_queue_get_guest_notifier(vq); 868 VirtIOIRQFD *irqfd; 869 int ret = 0; 870 871 if (proxy->vector_irqfd) { 872 irqfd = &proxy->vector_irqfd[vector]; 873 if (irqfd->msg.data != msg.data || irqfd->msg.address != msg.address) { 874 ret = kvm_irqchip_update_msi_route(kvm_state, irqfd->virq, msg, 875 &proxy->pci_dev); 876 if (ret < 0) { 877 return ret; 878 } 879 } 880 } 881 882 /* If guest supports masking, irqfd is already setup, unmask it. 883 * Otherwise, set it up now. 884 */ 885 if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) { 886 k->guest_notifier_mask(vdev, queue_no, false); 887 /* Test after unmasking to avoid losing events. */ 888 if (k->guest_notifier_pending && 889 k->guest_notifier_pending(vdev, queue_no)) { 890 event_notifier_set(n); 891 } 892 } else { 893 ret = kvm_virtio_pci_irqfd_use(proxy, queue_no, vector); 894 } 895 return ret; 896 } 897 898 static void virtio_pci_vq_vector_mask(VirtIOPCIProxy *proxy, 899 unsigned int queue_no, 900 unsigned int vector) 901 { 902 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 903 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); 904 905 /* If guest supports masking, keep irqfd but mask it. 906 * Otherwise, clean it up now. 907 */ 908 if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) { 909 k->guest_notifier_mask(vdev, queue_no, true); 910 } else { 911 kvm_virtio_pci_irqfd_release(proxy, queue_no, vector); 912 } 913 } 914 915 static int virtio_pci_vector_unmask(PCIDevice *dev, unsigned vector, 916 MSIMessage msg) 917 { 918 VirtIOPCIProxy *proxy = container_of(dev, VirtIOPCIProxy, pci_dev); 919 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 920 VirtQueue *vq = virtio_vector_first_queue(vdev, vector); 921 int ret, index, unmasked = 0; 922 923 while (vq) { 924 index = virtio_get_queue_index(vq); 925 if (!virtio_queue_get_num(vdev, index)) { 926 break; 927 } 928 if (index < proxy->nvqs_with_notifiers) { 929 ret = virtio_pci_vq_vector_unmask(proxy, index, vector, msg); 930 if (ret < 0) { 931 goto undo; 932 } 933 ++unmasked; 934 } 935 vq = virtio_vector_next_queue(vq); 936 } 937 938 return 0; 939 940 undo: 941 vq = virtio_vector_first_queue(vdev, vector); 942 while (vq && unmasked >= 0) { 943 index = virtio_get_queue_index(vq); 944 if (index < proxy->nvqs_with_notifiers) { 945 virtio_pci_vq_vector_mask(proxy, index, vector); 946 --unmasked; 947 } 948 vq = virtio_vector_next_queue(vq); 949 } 950 return ret; 951 } 952 953 static void virtio_pci_vector_mask(PCIDevice *dev, unsigned vector) 954 { 955 VirtIOPCIProxy *proxy = container_of(dev, VirtIOPCIProxy, pci_dev); 956 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 957 VirtQueue *vq = virtio_vector_first_queue(vdev, vector); 958 int index; 959 960 while (vq) { 961 index = virtio_get_queue_index(vq); 962 if (!virtio_queue_get_num(vdev, index)) { 963 break; 964 } 965 if (index < proxy->nvqs_with_notifiers) { 966 virtio_pci_vq_vector_mask(proxy, index, vector); 967 } 968 vq = virtio_vector_next_queue(vq); 969 } 970 } 971 972 static void virtio_pci_vector_poll(PCIDevice *dev, 973 unsigned int vector_start, 974 unsigned int vector_end) 975 { 976 VirtIOPCIProxy *proxy = container_of(dev, VirtIOPCIProxy, pci_dev); 977 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 978 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); 979 int queue_no; 980 unsigned int vector; 981 EventNotifier *notifier; 982 VirtQueue *vq; 983 984 for (queue_no = 0; queue_no < proxy->nvqs_with_notifiers; queue_no++) { 985 if (!virtio_queue_get_num(vdev, queue_no)) { 986 break; 987 } 988 vector = virtio_queue_vector(vdev, queue_no); 989 if (vector < vector_start || vector >= vector_end || 990 !msix_is_masked(dev, vector)) { 991 continue; 992 } 993 vq = virtio_get_queue(vdev, queue_no); 994 notifier = virtio_queue_get_guest_notifier(vq); 995 if (k->guest_notifier_pending) { 996 if (k->guest_notifier_pending(vdev, queue_no)) { 997 msix_set_pending(dev, vector); 998 } 999 } else if (event_notifier_test_and_clear(notifier)) { 1000 msix_set_pending(dev, vector); 1001 } 1002 } 1003 } 1004 1005 static int virtio_pci_set_guest_notifier(DeviceState *d, int n, bool assign, 1006 bool with_irqfd) 1007 { 1008 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d); 1009 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 1010 VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev); 1011 VirtQueue *vq = virtio_get_queue(vdev, n); 1012 EventNotifier *notifier = virtio_queue_get_guest_notifier(vq); 1013 1014 if (assign) { 1015 int r = event_notifier_init(notifier, 0); 1016 if (r < 0) { 1017 return r; 1018 } 1019 virtio_queue_set_guest_notifier_fd_handler(vq, true, with_irqfd); 1020 } else { 1021 virtio_queue_set_guest_notifier_fd_handler(vq, false, with_irqfd); 1022 event_notifier_cleanup(notifier); 1023 } 1024 1025 if (!msix_enabled(&proxy->pci_dev) && 1026 vdev->use_guest_notifier_mask && 1027 vdc->guest_notifier_mask) { 1028 vdc->guest_notifier_mask(vdev, n, !assign); 1029 } 1030 1031 return 0; 1032 } 1033 1034 static bool virtio_pci_query_guest_notifiers(DeviceState *d) 1035 { 1036 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d); 1037 return msix_enabled(&proxy->pci_dev); 1038 } 1039 1040 static int virtio_pci_set_guest_notifiers(DeviceState *d, int nvqs, bool assign) 1041 { 1042 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d); 1043 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 1044 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); 1045 int r, n; 1046 bool with_irqfd = msix_enabled(&proxy->pci_dev) && 1047 kvm_msi_via_irqfd_enabled(); 1048 1049 nvqs = MIN(nvqs, VIRTIO_QUEUE_MAX); 1050 1051 /* When deassigning, pass a consistent nvqs value 1052 * to avoid leaking notifiers. 1053 */ 1054 assert(assign || nvqs == proxy->nvqs_with_notifiers); 1055 1056 proxy->nvqs_with_notifiers = nvqs; 1057 1058 /* Must unset vector notifier while guest notifier is still assigned */ 1059 if ((proxy->vector_irqfd || k->guest_notifier_mask) && !assign) { 1060 msix_unset_vector_notifiers(&proxy->pci_dev); 1061 if (proxy->vector_irqfd) { 1062 kvm_virtio_pci_vector_release(proxy, nvqs); 1063 g_free(proxy->vector_irqfd); 1064 proxy->vector_irqfd = NULL; 1065 } 1066 } 1067 1068 for (n = 0; n < nvqs; n++) { 1069 if (!virtio_queue_get_num(vdev, n)) { 1070 break; 1071 } 1072 1073 r = virtio_pci_set_guest_notifier(d, n, assign, with_irqfd); 1074 if (r < 0) { 1075 goto assign_error; 1076 } 1077 } 1078 1079 /* Must set vector notifier after guest notifier has been assigned */ 1080 if ((with_irqfd || k->guest_notifier_mask) && assign) { 1081 if (with_irqfd) { 1082 proxy->vector_irqfd = 1083 g_malloc0(sizeof(*proxy->vector_irqfd) * 1084 msix_nr_vectors_allocated(&proxy->pci_dev)); 1085 r = kvm_virtio_pci_vector_use(proxy, nvqs); 1086 if (r < 0) { 1087 goto assign_error; 1088 } 1089 } 1090 r = msix_set_vector_notifiers(&proxy->pci_dev, 1091 virtio_pci_vector_unmask, 1092 virtio_pci_vector_mask, 1093 virtio_pci_vector_poll); 1094 if (r < 0) { 1095 goto notifiers_error; 1096 } 1097 } 1098 1099 return 0; 1100 1101 notifiers_error: 1102 if (with_irqfd) { 1103 assert(assign); 1104 kvm_virtio_pci_vector_release(proxy, nvqs); 1105 } 1106 1107 assign_error: 1108 /* We get here on assignment failure. Recover by undoing for VQs 0 .. n. */ 1109 assert(assign); 1110 while (--n >= 0) { 1111 virtio_pci_set_guest_notifier(d, n, !assign, with_irqfd); 1112 } 1113 return r; 1114 } 1115 1116 static int virtio_pci_set_host_notifier(DeviceState *d, int n, bool assign) 1117 { 1118 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d); 1119 1120 /* Stop using ioeventfd for virtqueue kick if the device starts using host 1121 * notifiers. This makes it easy to avoid stepping on each others' toes. 1122 */ 1123 proxy->ioeventfd_disabled = assign; 1124 if (assign) { 1125 virtio_pci_stop_ioeventfd(proxy); 1126 } 1127 /* We don't need to start here: it's not needed because backend 1128 * currently only stops on status change away from ok, 1129 * reset, vmstop and such. If we do add code to start here, 1130 * need to check vmstate, device state etc. */ 1131 return virtio_pci_set_host_notifier_internal(proxy, n, assign, false); 1132 } 1133 1134 static void virtio_pci_vmstate_change(DeviceState *d, bool running) 1135 { 1136 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d); 1137 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 1138 1139 if (running) { 1140 /* Old QEMU versions did not set bus master enable on status write. 1141 * Detect DRIVER set and enable it. 1142 */ 1143 if ((proxy->flags & VIRTIO_PCI_FLAG_BUS_MASTER_BUG_MIGRATION) && 1144 (vdev->status & VIRTIO_CONFIG_S_DRIVER) && 1145 !(proxy->pci_dev.config[PCI_COMMAND] & PCI_COMMAND_MASTER)) { 1146 pci_default_write_config(&proxy->pci_dev, PCI_COMMAND, 1147 proxy->pci_dev.config[PCI_COMMAND] | 1148 PCI_COMMAND_MASTER, 1); 1149 } 1150 virtio_pci_start_ioeventfd(proxy); 1151 } else { 1152 virtio_pci_stop_ioeventfd(proxy); 1153 } 1154 } 1155 1156 #ifdef CONFIG_VIRTFS 1157 static void virtio_9p_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp) 1158 { 1159 V9fsPCIState *dev = VIRTIO_9P_PCI(vpci_dev); 1160 DeviceState *vdev = DEVICE(&dev->vdev); 1161 1162 qdev_set_parent_bus(vdev, BUS(&vpci_dev->bus)); 1163 object_property_set_bool(OBJECT(vdev), true, "realized", errp); 1164 } 1165 1166 static Property virtio_9p_pci_properties[] = { 1167 DEFINE_PROP_BIT("ioeventfd", VirtIOPCIProxy, flags, 1168 VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT, true), 1169 DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors, 2), 1170 DEFINE_PROP_END_OF_LIST(), 1171 }; 1172 1173 static void virtio_9p_pci_class_init(ObjectClass *klass, void *data) 1174 { 1175 DeviceClass *dc = DEVICE_CLASS(klass); 1176 PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass); 1177 VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass); 1178 1179 k->realize = virtio_9p_pci_realize; 1180 pcidev_k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET; 1181 pcidev_k->device_id = PCI_DEVICE_ID_VIRTIO_9P; 1182 pcidev_k->revision = VIRTIO_PCI_ABI_VERSION; 1183 pcidev_k->class_id = 0x2; 1184 set_bit(DEVICE_CATEGORY_STORAGE, dc->categories); 1185 dc->props = virtio_9p_pci_properties; 1186 } 1187 1188 static void virtio_9p_pci_instance_init(Object *obj) 1189 { 1190 V9fsPCIState *dev = VIRTIO_9P_PCI(obj); 1191 1192 virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev), 1193 TYPE_VIRTIO_9P); 1194 } 1195 1196 static const TypeInfo virtio_9p_pci_info = { 1197 .name = TYPE_VIRTIO_9P_PCI, 1198 .parent = TYPE_VIRTIO_PCI, 1199 .instance_size = sizeof(V9fsPCIState), 1200 .instance_init = virtio_9p_pci_instance_init, 1201 .class_init = virtio_9p_pci_class_init, 1202 }; 1203 #endif /* CONFIG_VIRTFS */ 1204 1205 /* 1206 * virtio-pci: This is the PCIDevice which has a virtio-pci-bus. 1207 */ 1208 1209 static int virtio_pci_query_nvectors(DeviceState *d) 1210 { 1211 VirtIOPCIProxy *proxy = VIRTIO_PCI(d); 1212 1213 return proxy->nvectors; 1214 } 1215 1216 static int virtio_pci_add_mem_cap(VirtIOPCIProxy *proxy, 1217 struct virtio_pci_cap *cap) 1218 { 1219 PCIDevice *dev = &proxy->pci_dev; 1220 int offset; 1221 1222 offset = pci_add_capability(dev, PCI_CAP_ID_VNDR, 0, cap->cap_len); 1223 assert(offset > 0); 1224 1225 assert(cap->cap_len >= sizeof *cap); 1226 memcpy(dev->config + offset + PCI_CAP_FLAGS, &cap->cap_len, 1227 cap->cap_len - PCI_CAP_FLAGS); 1228 1229 return offset; 1230 } 1231 1232 static uint64_t virtio_pci_common_read(void *opaque, hwaddr addr, 1233 unsigned size) 1234 { 1235 VirtIOPCIProxy *proxy = opaque; 1236 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 1237 uint32_t val = 0; 1238 int i; 1239 1240 switch (addr) { 1241 case VIRTIO_PCI_COMMON_DFSELECT: 1242 val = proxy->dfselect; 1243 break; 1244 case VIRTIO_PCI_COMMON_DF: 1245 if (proxy->dfselect <= 1) { 1246 val = (vdev->host_features & ~VIRTIO_LEGACY_FEATURES) >> 1247 (32 * proxy->dfselect); 1248 } 1249 break; 1250 case VIRTIO_PCI_COMMON_GFSELECT: 1251 val = proxy->gfselect; 1252 break; 1253 case VIRTIO_PCI_COMMON_GF: 1254 if (proxy->gfselect < ARRAY_SIZE(proxy->guest_features)) { 1255 val = proxy->guest_features[proxy->gfselect]; 1256 } 1257 break; 1258 case VIRTIO_PCI_COMMON_MSIX: 1259 val = vdev->config_vector; 1260 break; 1261 case VIRTIO_PCI_COMMON_NUMQ: 1262 for (i = 0; i < VIRTIO_QUEUE_MAX; ++i) { 1263 if (virtio_queue_get_num(vdev, i)) { 1264 val = i + 1; 1265 } 1266 } 1267 break; 1268 case VIRTIO_PCI_COMMON_STATUS: 1269 val = vdev->status; 1270 break; 1271 case VIRTIO_PCI_COMMON_CFGGENERATION: 1272 val = vdev->generation; 1273 break; 1274 case VIRTIO_PCI_COMMON_Q_SELECT: 1275 val = vdev->queue_sel; 1276 break; 1277 case VIRTIO_PCI_COMMON_Q_SIZE: 1278 val = virtio_queue_get_num(vdev, vdev->queue_sel); 1279 break; 1280 case VIRTIO_PCI_COMMON_Q_MSIX: 1281 val = virtio_queue_vector(vdev, vdev->queue_sel); 1282 break; 1283 case VIRTIO_PCI_COMMON_Q_ENABLE: 1284 val = proxy->vqs[vdev->queue_sel].enabled; 1285 break; 1286 case VIRTIO_PCI_COMMON_Q_NOFF: 1287 /* Simply map queues in order */ 1288 val = vdev->queue_sel; 1289 break; 1290 case VIRTIO_PCI_COMMON_Q_DESCLO: 1291 val = proxy->vqs[vdev->queue_sel].desc[0]; 1292 break; 1293 case VIRTIO_PCI_COMMON_Q_DESCHI: 1294 val = proxy->vqs[vdev->queue_sel].desc[1]; 1295 break; 1296 case VIRTIO_PCI_COMMON_Q_AVAILLO: 1297 val = proxy->vqs[vdev->queue_sel].avail[0]; 1298 break; 1299 case VIRTIO_PCI_COMMON_Q_AVAILHI: 1300 val = proxy->vqs[vdev->queue_sel].avail[1]; 1301 break; 1302 case VIRTIO_PCI_COMMON_Q_USEDLO: 1303 val = proxy->vqs[vdev->queue_sel].used[0]; 1304 break; 1305 case VIRTIO_PCI_COMMON_Q_USEDHI: 1306 val = proxy->vqs[vdev->queue_sel].used[1]; 1307 break; 1308 default: 1309 val = 0; 1310 } 1311 1312 return val; 1313 } 1314 1315 static void virtio_pci_common_write(void *opaque, hwaddr addr, 1316 uint64_t val, unsigned size) 1317 { 1318 VirtIOPCIProxy *proxy = opaque; 1319 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 1320 1321 switch (addr) { 1322 case VIRTIO_PCI_COMMON_DFSELECT: 1323 proxy->dfselect = val; 1324 break; 1325 case VIRTIO_PCI_COMMON_GFSELECT: 1326 proxy->gfselect = val; 1327 break; 1328 case VIRTIO_PCI_COMMON_GF: 1329 if (proxy->gfselect < ARRAY_SIZE(proxy->guest_features)) { 1330 proxy->guest_features[proxy->gfselect] = val; 1331 virtio_set_features(vdev, 1332 (((uint64_t)proxy->guest_features[1]) << 32) | 1333 proxy->guest_features[0]); 1334 } 1335 break; 1336 case VIRTIO_PCI_COMMON_MSIX: 1337 msix_vector_unuse(&proxy->pci_dev, vdev->config_vector); 1338 /* Make it possible for guest to discover an error took place. */ 1339 if (msix_vector_use(&proxy->pci_dev, val) < 0) { 1340 val = VIRTIO_NO_VECTOR; 1341 } 1342 vdev->config_vector = val; 1343 break; 1344 case VIRTIO_PCI_COMMON_STATUS: 1345 if (!(val & VIRTIO_CONFIG_S_DRIVER_OK)) { 1346 virtio_pci_stop_ioeventfd(proxy); 1347 } 1348 1349 virtio_set_status(vdev, val & 0xFF); 1350 1351 if (val & VIRTIO_CONFIG_S_DRIVER_OK) { 1352 virtio_pci_start_ioeventfd(proxy); 1353 } 1354 1355 if (vdev->status == 0) { 1356 virtio_reset(vdev); 1357 msix_unuse_all_vectors(&proxy->pci_dev); 1358 } 1359 1360 break; 1361 case VIRTIO_PCI_COMMON_Q_SELECT: 1362 if (val < VIRTIO_QUEUE_MAX) { 1363 vdev->queue_sel = val; 1364 } 1365 break; 1366 case VIRTIO_PCI_COMMON_Q_SIZE: 1367 proxy->vqs[vdev->queue_sel].num = val; 1368 break; 1369 case VIRTIO_PCI_COMMON_Q_MSIX: 1370 msix_vector_unuse(&proxy->pci_dev, 1371 virtio_queue_vector(vdev, vdev->queue_sel)); 1372 /* Make it possible for guest to discover an error took place. */ 1373 if (msix_vector_use(&proxy->pci_dev, val) < 0) { 1374 val = VIRTIO_NO_VECTOR; 1375 } 1376 virtio_queue_set_vector(vdev, vdev->queue_sel, val); 1377 break; 1378 case VIRTIO_PCI_COMMON_Q_ENABLE: 1379 /* TODO: need a way to put num back on reset. */ 1380 virtio_queue_set_num(vdev, vdev->queue_sel, 1381 proxy->vqs[vdev->queue_sel].num); 1382 virtio_queue_set_rings(vdev, vdev->queue_sel, 1383 ((uint64_t)proxy->vqs[vdev->queue_sel].desc[1]) << 32 | 1384 proxy->vqs[vdev->queue_sel].desc[0], 1385 ((uint64_t)proxy->vqs[vdev->queue_sel].avail[1]) << 32 | 1386 proxy->vqs[vdev->queue_sel].avail[0], 1387 ((uint64_t)proxy->vqs[vdev->queue_sel].used[1]) << 32 | 1388 proxy->vqs[vdev->queue_sel].used[0]); 1389 proxy->vqs[vdev->queue_sel].enabled = 1; 1390 break; 1391 case VIRTIO_PCI_COMMON_Q_DESCLO: 1392 proxy->vqs[vdev->queue_sel].desc[0] = val; 1393 break; 1394 case VIRTIO_PCI_COMMON_Q_DESCHI: 1395 proxy->vqs[vdev->queue_sel].desc[1] = val; 1396 break; 1397 case VIRTIO_PCI_COMMON_Q_AVAILLO: 1398 proxy->vqs[vdev->queue_sel].avail[0] = val; 1399 break; 1400 case VIRTIO_PCI_COMMON_Q_AVAILHI: 1401 proxy->vqs[vdev->queue_sel].avail[1] = val; 1402 break; 1403 case VIRTIO_PCI_COMMON_Q_USEDLO: 1404 proxy->vqs[vdev->queue_sel].used[0] = val; 1405 break; 1406 case VIRTIO_PCI_COMMON_Q_USEDHI: 1407 proxy->vqs[vdev->queue_sel].used[1] = val; 1408 break; 1409 default: 1410 break; 1411 } 1412 } 1413 1414 1415 static uint64_t virtio_pci_notify_read(void *opaque, hwaddr addr, 1416 unsigned size) 1417 { 1418 return 0; 1419 } 1420 1421 static void virtio_pci_notify_write(void *opaque, hwaddr addr, 1422 uint64_t val, unsigned size) 1423 { 1424 VirtIODevice *vdev = opaque; 1425 unsigned queue = addr / QEMU_VIRTIO_PCI_QUEUE_MEM_MULT; 1426 1427 if (queue < VIRTIO_QUEUE_MAX) { 1428 virtio_queue_notify(vdev, queue); 1429 } 1430 } 1431 1432 static void virtio_pci_notify_write_pio(void *opaque, hwaddr addr, 1433 uint64_t val, unsigned size) 1434 { 1435 VirtIODevice *vdev = opaque; 1436 unsigned queue = val; 1437 1438 if (queue < VIRTIO_QUEUE_MAX) { 1439 virtio_queue_notify(vdev, queue); 1440 } 1441 } 1442 1443 static uint64_t virtio_pci_isr_read(void *opaque, hwaddr addr, 1444 unsigned size) 1445 { 1446 VirtIOPCIProxy *proxy = opaque; 1447 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 1448 uint64_t val = vdev->isr; 1449 1450 vdev->isr = 0; 1451 pci_irq_deassert(&proxy->pci_dev); 1452 1453 return val; 1454 } 1455 1456 static void virtio_pci_isr_write(void *opaque, hwaddr addr, 1457 uint64_t val, unsigned size) 1458 { 1459 } 1460 1461 static uint64_t virtio_pci_device_read(void *opaque, hwaddr addr, 1462 unsigned size) 1463 { 1464 VirtIODevice *vdev = opaque; 1465 uint64_t val = 0; 1466 1467 switch (size) { 1468 case 1: 1469 val = virtio_config_modern_readb(vdev, addr); 1470 break; 1471 case 2: 1472 val = virtio_config_modern_readw(vdev, addr); 1473 break; 1474 case 4: 1475 val = virtio_config_modern_readl(vdev, addr); 1476 break; 1477 } 1478 return val; 1479 } 1480 1481 static void virtio_pci_device_write(void *opaque, hwaddr addr, 1482 uint64_t val, unsigned size) 1483 { 1484 VirtIODevice *vdev = opaque; 1485 switch (size) { 1486 case 1: 1487 virtio_config_modern_writeb(vdev, addr, val); 1488 break; 1489 case 2: 1490 virtio_config_modern_writew(vdev, addr, val); 1491 break; 1492 case 4: 1493 virtio_config_modern_writel(vdev, addr, val); 1494 break; 1495 } 1496 } 1497 1498 static void virtio_pci_modern_regions_init(VirtIOPCIProxy *proxy) 1499 { 1500 static const MemoryRegionOps common_ops = { 1501 .read = virtio_pci_common_read, 1502 .write = virtio_pci_common_write, 1503 .impl = { 1504 .min_access_size = 1, 1505 .max_access_size = 4, 1506 }, 1507 .endianness = DEVICE_LITTLE_ENDIAN, 1508 }; 1509 static const MemoryRegionOps isr_ops = { 1510 .read = virtio_pci_isr_read, 1511 .write = virtio_pci_isr_write, 1512 .impl = { 1513 .min_access_size = 1, 1514 .max_access_size = 4, 1515 }, 1516 .endianness = DEVICE_LITTLE_ENDIAN, 1517 }; 1518 static const MemoryRegionOps device_ops = { 1519 .read = virtio_pci_device_read, 1520 .write = virtio_pci_device_write, 1521 .impl = { 1522 .min_access_size = 1, 1523 .max_access_size = 4, 1524 }, 1525 .endianness = DEVICE_LITTLE_ENDIAN, 1526 }; 1527 static const MemoryRegionOps notify_ops = { 1528 .read = virtio_pci_notify_read, 1529 .write = virtio_pci_notify_write, 1530 .impl = { 1531 .min_access_size = 1, 1532 .max_access_size = 4, 1533 }, 1534 .endianness = DEVICE_LITTLE_ENDIAN, 1535 }; 1536 static const MemoryRegionOps notify_pio_ops = { 1537 .read = virtio_pci_notify_read, 1538 .write = virtio_pci_notify_write_pio, 1539 .impl = { 1540 .min_access_size = 1, 1541 .max_access_size = 4, 1542 }, 1543 .endianness = DEVICE_LITTLE_ENDIAN, 1544 }; 1545 1546 1547 memory_region_init_io(&proxy->common.mr, OBJECT(proxy), 1548 &common_ops, 1549 proxy, 1550 "virtio-pci-common", 1551 proxy->common.size); 1552 1553 memory_region_init_io(&proxy->isr.mr, OBJECT(proxy), 1554 &isr_ops, 1555 proxy, 1556 "virtio-pci-isr", 1557 proxy->isr.size); 1558 1559 memory_region_init_io(&proxy->device.mr, OBJECT(proxy), 1560 &device_ops, 1561 virtio_bus_get_device(&proxy->bus), 1562 "virtio-pci-device", 1563 proxy->device.size); 1564 1565 memory_region_init_io(&proxy->notify.mr, OBJECT(proxy), 1566 ¬ify_ops, 1567 virtio_bus_get_device(&proxy->bus), 1568 "virtio-pci-notify", 1569 proxy->notify.size); 1570 1571 memory_region_init_io(&proxy->notify_pio.mr, OBJECT(proxy), 1572 ¬ify_pio_ops, 1573 virtio_bus_get_device(&proxy->bus), 1574 "virtio-pci-notify-pio", 1575 proxy->notify.size); 1576 } 1577 1578 static void virtio_pci_modern_region_map(VirtIOPCIProxy *proxy, 1579 VirtIOPCIRegion *region, 1580 struct virtio_pci_cap *cap, 1581 MemoryRegion *mr, 1582 uint8_t bar) 1583 { 1584 memory_region_add_subregion(mr, region->offset, ®ion->mr); 1585 1586 cap->cfg_type = region->type; 1587 cap->bar = bar; 1588 cap->offset = cpu_to_le32(region->offset); 1589 cap->length = cpu_to_le32(region->size); 1590 virtio_pci_add_mem_cap(proxy, cap); 1591 1592 } 1593 1594 static void virtio_pci_modern_mem_region_map(VirtIOPCIProxy *proxy, 1595 VirtIOPCIRegion *region, 1596 struct virtio_pci_cap *cap) 1597 { 1598 virtio_pci_modern_region_map(proxy, region, cap, 1599 &proxy->modern_bar, proxy->modern_mem_bar); 1600 } 1601 1602 static void virtio_pci_modern_io_region_map(VirtIOPCIProxy *proxy, 1603 VirtIOPCIRegion *region, 1604 struct virtio_pci_cap *cap) 1605 { 1606 virtio_pci_modern_region_map(proxy, region, cap, 1607 &proxy->io_bar, proxy->modern_io_bar); 1608 } 1609 1610 static void virtio_pci_modern_mem_region_unmap(VirtIOPCIProxy *proxy, 1611 VirtIOPCIRegion *region) 1612 { 1613 memory_region_del_subregion(&proxy->modern_bar, 1614 ®ion->mr); 1615 } 1616 1617 static void virtio_pci_modern_io_region_unmap(VirtIOPCIProxy *proxy, 1618 VirtIOPCIRegion *region) 1619 { 1620 memory_region_del_subregion(&proxy->io_bar, 1621 ®ion->mr); 1622 } 1623 1624 /* This is called by virtio-bus just after the device is plugged. */ 1625 static void virtio_pci_device_plugged(DeviceState *d, Error **errp) 1626 { 1627 VirtIOPCIProxy *proxy = VIRTIO_PCI(d); 1628 VirtioBusState *bus = &proxy->bus; 1629 bool legacy = !(proxy->flags & VIRTIO_PCI_FLAG_DISABLE_LEGACY); 1630 bool modern = !(proxy->flags & VIRTIO_PCI_FLAG_DISABLE_MODERN); 1631 bool modern_pio = proxy->flags & VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY; 1632 uint8_t *config; 1633 uint32_t size; 1634 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 1635 1636 config = proxy->pci_dev.config; 1637 if (proxy->class_code) { 1638 pci_config_set_class(config, proxy->class_code); 1639 } 1640 1641 if (legacy) { 1642 /* legacy and transitional */ 1643 pci_set_word(config + PCI_SUBSYSTEM_VENDOR_ID, 1644 pci_get_word(config + PCI_VENDOR_ID)); 1645 pci_set_word(config + PCI_SUBSYSTEM_ID, virtio_bus_get_vdev_id(bus)); 1646 } else { 1647 /* pure virtio-1.0 */ 1648 pci_set_word(config + PCI_VENDOR_ID, 1649 PCI_VENDOR_ID_REDHAT_QUMRANET); 1650 pci_set_word(config + PCI_DEVICE_ID, 1651 0x1040 + virtio_bus_get_vdev_id(bus)); 1652 pci_config_set_revision(config, 1); 1653 } 1654 config[PCI_INTERRUPT_PIN] = 1; 1655 1656 1657 if (modern) { 1658 struct virtio_pci_cap cap = { 1659 .cap_len = sizeof cap, 1660 }; 1661 struct virtio_pci_notify_cap notify = { 1662 .cap.cap_len = sizeof notify, 1663 .notify_off_multiplier = 1664 cpu_to_le32(QEMU_VIRTIO_PCI_QUEUE_MEM_MULT), 1665 }; 1666 struct virtio_pci_cfg_cap cfg = { 1667 .cap.cap_len = sizeof cfg, 1668 .cap.cfg_type = VIRTIO_PCI_CAP_PCI_CFG, 1669 }; 1670 struct virtio_pci_notify_cap notify_pio = { 1671 .cap.cap_len = sizeof notify, 1672 .notify_off_multiplier = cpu_to_le32(0x0), 1673 }; 1674 1675 struct virtio_pci_cfg_cap *cfg_mask; 1676 1677 virtio_add_feature(&vdev->host_features, VIRTIO_F_VERSION_1); 1678 virtio_pci_modern_regions_init(proxy); 1679 1680 virtio_pci_modern_mem_region_map(proxy, &proxy->common, &cap); 1681 virtio_pci_modern_mem_region_map(proxy, &proxy->isr, &cap); 1682 virtio_pci_modern_mem_region_map(proxy, &proxy->device, &cap); 1683 virtio_pci_modern_mem_region_map(proxy, &proxy->notify, ¬ify.cap); 1684 1685 if (modern_pio) { 1686 memory_region_init(&proxy->io_bar, OBJECT(proxy), 1687 "virtio-pci-io", 0x4); 1688 1689 pci_register_bar(&proxy->pci_dev, proxy->modern_io_bar, 1690 PCI_BASE_ADDRESS_SPACE_IO, &proxy->io_bar); 1691 1692 virtio_pci_modern_io_region_map(proxy, &proxy->notify_pio, 1693 ¬ify_pio.cap); 1694 } 1695 1696 pci_register_bar(&proxy->pci_dev, proxy->modern_mem_bar, 1697 PCI_BASE_ADDRESS_SPACE_MEMORY | 1698 PCI_BASE_ADDRESS_MEM_PREFETCH | 1699 PCI_BASE_ADDRESS_MEM_TYPE_64, 1700 &proxy->modern_bar); 1701 1702 proxy->config_cap = virtio_pci_add_mem_cap(proxy, &cfg.cap); 1703 cfg_mask = (void *)(proxy->pci_dev.wmask + proxy->config_cap); 1704 pci_set_byte(&cfg_mask->cap.bar, ~0x0); 1705 pci_set_long((uint8_t *)&cfg_mask->cap.offset, ~0x0); 1706 pci_set_long((uint8_t *)&cfg_mask->cap.length, ~0x0); 1707 pci_set_long(cfg_mask->pci_cfg_data, ~0x0); 1708 } 1709 1710 if (proxy->nvectors) { 1711 int err = msix_init_exclusive_bar(&proxy->pci_dev, proxy->nvectors, 1712 proxy->msix_bar); 1713 if (err) { 1714 /* Notice when a system that supports MSIx can't initialize it. */ 1715 if (err != -ENOTSUP) { 1716 error_report("unable to init msix vectors to %" PRIu32, 1717 proxy->nvectors); 1718 } 1719 proxy->nvectors = 0; 1720 } 1721 } 1722 1723 proxy->pci_dev.config_write = virtio_write_config; 1724 proxy->pci_dev.config_read = virtio_read_config; 1725 1726 if (legacy) { 1727 size = VIRTIO_PCI_REGION_SIZE(&proxy->pci_dev) 1728 + virtio_bus_get_vdev_config_len(bus); 1729 size = pow2ceil(size); 1730 1731 memory_region_init_io(&proxy->bar, OBJECT(proxy), 1732 &virtio_pci_config_ops, 1733 proxy, "virtio-pci", size); 1734 1735 pci_register_bar(&proxy->pci_dev, proxy->legacy_io_bar, 1736 PCI_BASE_ADDRESS_SPACE_IO, &proxy->bar); 1737 } 1738 1739 if (!kvm_has_many_ioeventfds()) { 1740 proxy->flags &= ~VIRTIO_PCI_FLAG_USE_IOEVENTFD; 1741 } 1742 1743 virtio_add_feature(&vdev->host_features, VIRTIO_F_BAD_FEATURE); 1744 } 1745 1746 static void virtio_pci_device_unplugged(DeviceState *d) 1747 { 1748 VirtIOPCIProxy *proxy = VIRTIO_PCI(d); 1749 bool modern = !(proxy->flags & VIRTIO_PCI_FLAG_DISABLE_MODERN); 1750 bool modern_pio = proxy->flags & VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY; 1751 1752 virtio_pci_stop_ioeventfd(proxy); 1753 1754 if (modern) { 1755 virtio_pci_modern_mem_region_unmap(proxy, &proxy->common); 1756 virtio_pci_modern_mem_region_unmap(proxy, &proxy->isr); 1757 virtio_pci_modern_mem_region_unmap(proxy, &proxy->device); 1758 virtio_pci_modern_mem_region_unmap(proxy, &proxy->notify); 1759 if (modern_pio) { 1760 virtio_pci_modern_io_region_unmap(proxy, &proxy->notify_pio); 1761 } 1762 } 1763 } 1764 1765 static void virtio_pci_realize(PCIDevice *pci_dev, Error **errp) 1766 { 1767 VirtIOPCIProxy *proxy = VIRTIO_PCI(pci_dev); 1768 VirtioPCIClass *k = VIRTIO_PCI_GET_CLASS(pci_dev); 1769 1770 /* 1771 * virtio pci bar layout used by default. 1772 * subclasses can re-arrange things if needed. 1773 * 1774 * region 0 -- virtio legacy io bar 1775 * region 1 -- msi-x bar 1776 * region 4+5 -- virtio modern memory (64bit) bar 1777 * 1778 */ 1779 proxy->legacy_io_bar = 0; 1780 proxy->msix_bar = 1; 1781 proxy->modern_io_bar = 2; 1782 proxy->modern_mem_bar = 4; 1783 1784 proxy->common.offset = 0x0; 1785 proxy->common.size = 0x1000; 1786 proxy->common.type = VIRTIO_PCI_CAP_COMMON_CFG; 1787 1788 proxy->isr.offset = 0x1000; 1789 proxy->isr.size = 0x1000; 1790 proxy->isr.type = VIRTIO_PCI_CAP_ISR_CFG; 1791 1792 proxy->device.offset = 0x2000; 1793 proxy->device.size = 0x1000; 1794 proxy->device.type = VIRTIO_PCI_CAP_DEVICE_CFG; 1795 1796 proxy->notify.offset = 0x3000; 1797 proxy->notify.size = 1798 QEMU_VIRTIO_PCI_QUEUE_MEM_MULT * VIRTIO_QUEUE_MAX; 1799 proxy->notify.type = VIRTIO_PCI_CAP_NOTIFY_CFG; 1800 1801 proxy->notify_pio.offset = 0x0; 1802 proxy->notify_pio.size = 0x4; 1803 proxy->notify_pio.type = VIRTIO_PCI_CAP_NOTIFY_CFG; 1804 1805 /* subclasses can enforce modern, so do this unconditionally */ 1806 memory_region_init(&proxy->modern_bar, OBJECT(proxy), "virtio-pci", 1807 2 * QEMU_VIRTIO_PCI_QUEUE_MEM_MULT * 1808 VIRTIO_QUEUE_MAX); 1809 1810 memory_region_init_alias(&proxy->modern_cfg, 1811 OBJECT(proxy), 1812 "virtio-pci-cfg", 1813 &proxy->modern_bar, 1814 0, 1815 memory_region_size(&proxy->modern_bar)); 1816 1817 address_space_init(&proxy->modern_as, &proxy->modern_cfg, "virtio-pci-cfg-as"); 1818 1819 if (pci_is_express(pci_dev) && pci_bus_is_express(pci_dev->bus) && 1820 !pci_bus_is_root(pci_dev->bus)) { 1821 int pos; 1822 1823 pos = pcie_endpoint_cap_init(pci_dev, 0); 1824 assert(pos > 0); 1825 1826 pos = pci_add_capability(pci_dev, PCI_CAP_ID_PM, 0, PCI_PM_SIZEOF); 1827 assert(pos > 0); 1828 1829 /* 1830 * Indicates that this function complies with revision 1.2 of the 1831 * PCI Power Management Interface Specification. 1832 */ 1833 pci_set_word(pci_dev->config + pos + PCI_PM_PMC, 0x3); 1834 } else { 1835 /* 1836 * make future invocations of pci_is_express() return false 1837 * and pci_config_size() return PCI_CONFIG_SPACE_SIZE. 1838 */ 1839 pci_dev->cap_present &= ~QEMU_PCI_CAP_EXPRESS; 1840 } 1841 1842 virtio_pci_bus_new(&proxy->bus, sizeof(proxy->bus), proxy); 1843 if (k->realize) { 1844 k->realize(proxy, errp); 1845 } 1846 } 1847 1848 static void virtio_pci_exit(PCIDevice *pci_dev) 1849 { 1850 VirtIOPCIProxy *proxy = VIRTIO_PCI(pci_dev); 1851 1852 msix_uninit_exclusive_bar(pci_dev); 1853 address_space_destroy(&proxy->modern_as); 1854 } 1855 1856 static void virtio_pci_reset(DeviceState *qdev) 1857 { 1858 VirtIOPCIProxy *proxy = VIRTIO_PCI(qdev); 1859 VirtioBusState *bus = VIRTIO_BUS(&proxy->bus); 1860 int i; 1861 1862 virtio_pci_stop_ioeventfd(proxy); 1863 virtio_bus_reset(bus); 1864 msix_unuse_all_vectors(&proxy->pci_dev); 1865 1866 for (i = 0; i < VIRTIO_QUEUE_MAX; i++) { 1867 proxy->vqs[i].enabled = 0; 1868 } 1869 } 1870 1871 static Property virtio_pci_properties[] = { 1872 DEFINE_PROP_BIT("virtio-pci-bus-master-bug-migration", VirtIOPCIProxy, flags, 1873 VIRTIO_PCI_FLAG_BUS_MASTER_BUG_MIGRATION_BIT, false), 1874 DEFINE_PROP_BIT("disable-legacy", VirtIOPCIProxy, flags, 1875 VIRTIO_PCI_FLAG_DISABLE_LEGACY_BIT, false), 1876 DEFINE_PROP_BIT("disable-modern", VirtIOPCIProxy, flags, 1877 VIRTIO_PCI_FLAG_DISABLE_MODERN_BIT, true), 1878 DEFINE_PROP_BIT("migrate-extra", VirtIOPCIProxy, flags, 1879 VIRTIO_PCI_FLAG_MIGRATE_EXTRA_BIT, true), 1880 DEFINE_PROP_BIT("modern-pio-notify", VirtIOPCIProxy, flags, 1881 VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY_BIT, false), 1882 DEFINE_PROP_BIT("x-disable-pcie", VirtIOPCIProxy, flags, 1883 VIRTIO_PCI_FLAG_DISABLE_PCIE_BIT, false), 1884 DEFINE_PROP_END_OF_LIST(), 1885 }; 1886 1887 static void virtio_pci_dc_realize(DeviceState *qdev, Error **errp) 1888 { 1889 VirtioPCIClass *vpciklass = VIRTIO_PCI_GET_CLASS(qdev); 1890 VirtIOPCIProxy *proxy = VIRTIO_PCI(qdev); 1891 PCIDevice *pci_dev = &proxy->pci_dev; 1892 1893 if (!(proxy->flags & VIRTIO_PCI_FLAG_DISABLE_PCIE) && 1894 !(proxy->flags & VIRTIO_PCI_FLAG_DISABLE_MODERN)) { 1895 pci_dev->cap_present |= QEMU_PCI_CAP_EXPRESS; 1896 } 1897 1898 vpciklass->parent_dc_realize(qdev, errp); 1899 } 1900 1901 static void virtio_pci_class_init(ObjectClass *klass, void *data) 1902 { 1903 DeviceClass *dc = DEVICE_CLASS(klass); 1904 PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); 1905 VirtioPCIClass *vpciklass = VIRTIO_PCI_CLASS(klass); 1906 1907 dc->props = virtio_pci_properties; 1908 k->realize = virtio_pci_realize; 1909 k->exit = virtio_pci_exit; 1910 k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET; 1911 k->revision = VIRTIO_PCI_ABI_VERSION; 1912 k->class_id = PCI_CLASS_OTHERS; 1913 vpciklass->parent_dc_realize = dc->realize; 1914 dc->realize = virtio_pci_dc_realize; 1915 dc->reset = virtio_pci_reset; 1916 } 1917 1918 static const TypeInfo virtio_pci_info = { 1919 .name = TYPE_VIRTIO_PCI, 1920 .parent = TYPE_PCI_DEVICE, 1921 .instance_size = sizeof(VirtIOPCIProxy), 1922 .class_init = virtio_pci_class_init, 1923 .class_size = sizeof(VirtioPCIClass), 1924 .abstract = true, 1925 }; 1926 1927 /* virtio-blk-pci */ 1928 1929 static Property virtio_blk_pci_properties[] = { 1930 DEFINE_PROP_UINT32("class", VirtIOPCIProxy, class_code, 0), 1931 DEFINE_PROP_BIT("ioeventfd", VirtIOPCIProxy, flags, 1932 VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT, true), 1933 DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors, 2), 1934 DEFINE_PROP_END_OF_LIST(), 1935 }; 1936 1937 static void virtio_blk_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp) 1938 { 1939 VirtIOBlkPCI *dev = VIRTIO_BLK_PCI(vpci_dev); 1940 DeviceState *vdev = DEVICE(&dev->vdev); 1941 1942 qdev_set_parent_bus(vdev, BUS(&vpci_dev->bus)); 1943 object_property_set_bool(OBJECT(vdev), true, "realized", errp); 1944 } 1945 1946 static void virtio_blk_pci_class_init(ObjectClass *klass, void *data) 1947 { 1948 DeviceClass *dc = DEVICE_CLASS(klass); 1949 VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass); 1950 PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass); 1951 1952 set_bit(DEVICE_CATEGORY_STORAGE, dc->categories); 1953 dc->props = virtio_blk_pci_properties; 1954 k->realize = virtio_blk_pci_realize; 1955 pcidev_k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET; 1956 pcidev_k->device_id = PCI_DEVICE_ID_VIRTIO_BLOCK; 1957 pcidev_k->revision = VIRTIO_PCI_ABI_VERSION; 1958 pcidev_k->class_id = PCI_CLASS_STORAGE_SCSI; 1959 } 1960 1961 static void virtio_blk_pci_instance_init(Object *obj) 1962 { 1963 VirtIOBlkPCI *dev = VIRTIO_BLK_PCI(obj); 1964 1965 virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev), 1966 TYPE_VIRTIO_BLK); 1967 object_property_add_alias(obj, "iothread", OBJECT(&dev->vdev),"iothread", 1968 &error_abort); 1969 object_property_add_alias(obj, "bootindex", OBJECT(&dev->vdev), 1970 "bootindex", &error_abort); 1971 } 1972 1973 static const TypeInfo virtio_blk_pci_info = { 1974 .name = TYPE_VIRTIO_BLK_PCI, 1975 .parent = TYPE_VIRTIO_PCI, 1976 .instance_size = sizeof(VirtIOBlkPCI), 1977 .instance_init = virtio_blk_pci_instance_init, 1978 .class_init = virtio_blk_pci_class_init, 1979 }; 1980 1981 /* virtio-scsi-pci */ 1982 1983 static Property virtio_scsi_pci_properties[] = { 1984 DEFINE_PROP_BIT("ioeventfd", VirtIOPCIProxy, flags, 1985 VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT, true), 1986 DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors, 1987 DEV_NVECTORS_UNSPECIFIED), 1988 DEFINE_PROP_END_OF_LIST(), 1989 }; 1990 1991 static void virtio_scsi_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp) 1992 { 1993 VirtIOSCSIPCI *dev = VIRTIO_SCSI_PCI(vpci_dev); 1994 DeviceState *vdev = DEVICE(&dev->vdev); 1995 VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(vdev); 1996 DeviceState *proxy = DEVICE(vpci_dev); 1997 char *bus_name; 1998 1999 if (vpci_dev->nvectors == DEV_NVECTORS_UNSPECIFIED) { 2000 vpci_dev->nvectors = vs->conf.num_queues + 3; 2001 } 2002 2003 /* 2004 * For command line compatibility, this sets the virtio-scsi-device bus 2005 * name as before. 2006 */ 2007 if (proxy->id) { 2008 bus_name = g_strdup_printf("%s.0", proxy->id); 2009 virtio_device_set_child_bus_name(VIRTIO_DEVICE(vdev), bus_name); 2010 g_free(bus_name); 2011 } 2012 2013 qdev_set_parent_bus(vdev, BUS(&vpci_dev->bus)); 2014 object_property_set_bool(OBJECT(vdev), true, "realized", errp); 2015 } 2016 2017 static void virtio_scsi_pci_class_init(ObjectClass *klass, void *data) 2018 { 2019 DeviceClass *dc = DEVICE_CLASS(klass); 2020 VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass); 2021 PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass); 2022 2023 k->realize = virtio_scsi_pci_realize; 2024 set_bit(DEVICE_CATEGORY_STORAGE, dc->categories); 2025 dc->props = virtio_scsi_pci_properties; 2026 pcidev_k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET; 2027 pcidev_k->device_id = PCI_DEVICE_ID_VIRTIO_SCSI; 2028 pcidev_k->revision = 0x00; 2029 pcidev_k->class_id = PCI_CLASS_STORAGE_SCSI; 2030 } 2031 2032 static void virtio_scsi_pci_instance_init(Object *obj) 2033 { 2034 VirtIOSCSIPCI *dev = VIRTIO_SCSI_PCI(obj); 2035 2036 virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev), 2037 TYPE_VIRTIO_SCSI); 2038 object_property_add_alias(obj, "iothread", OBJECT(&dev->vdev), "iothread", 2039 &error_abort); 2040 } 2041 2042 static const TypeInfo virtio_scsi_pci_info = { 2043 .name = TYPE_VIRTIO_SCSI_PCI, 2044 .parent = TYPE_VIRTIO_PCI, 2045 .instance_size = sizeof(VirtIOSCSIPCI), 2046 .instance_init = virtio_scsi_pci_instance_init, 2047 .class_init = virtio_scsi_pci_class_init, 2048 }; 2049 2050 /* vhost-scsi-pci */ 2051 2052 #ifdef CONFIG_VHOST_SCSI 2053 static Property vhost_scsi_pci_properties[] = { 2054 DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors, 2055 DEV_NVECTORS_UNSPECIFIED), 2056 DEFINE_PROP_END_OF_LIST(), 2057 }; 2058 2059 static void vhost_scsi_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp) 2060 { 2061 VHostSCSIPCI *dev = VHOST_SCSI_PCI(vpci_dev); 2062 DeviceState *vdev = DEVICE(&dev->vdev); 2063 VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(vdev); 2064 2065 if (vpci_dev->nvectors == DEV_NVECTORS_UNSPECIFIED) { 2066 vpci_dev->nvectors = vs->conf.num_queues + 3; 2067 } 2068 2069 qdev_set_parent_bus(vdev, BUS(&vpci_dev->bus)); 2070 object_property_set_bool(OBJECT(vdev), true, "realized", errp); 2071 } 2072 2073 static void vhost_scsi_pci_class_init(ObjectClass *klass, void *data) 2074 { 2075 DeviceClass *dc = DEVICE_CLASS(klass); 2076 VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass); 2077 PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass); 2078 k->realize = vhost_scsi_pci_realize; 2079 set_bit(DEVICE_CATEGORY_STORAGE, dc->categories); 2080 dc->props = vhost_scsi_pci_properties; 2081 pcidev_k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET; 2082 pcidev_k->device_id = PCI_DEVICE_ID_VIRTIO_SCSI; 2083 pcidev_k->revision = 0x00; 2084 pcidev_k->class_id = PCI_CLASS_STORAGE_SCSI; 2085 } 2086 2087 static void vhost_scsi_pci_instance_init(Object *obj) 2088 { 2089 VHostSCSIPCI *dev = VHOST_SCSI_PCI(obj); 2090 2091 virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev), 2092 TYPE_VHOST_SCSI); 2093 object_property_add_alias(obj, "bootindex", OBJECT(&dev->vdev), 2094 "bootindex", &error_abort); 2095 } 2096 2097 static const TypeInfo vhost_scsi_pci_info = { 2098 .name = TYPE_VHOST_SCSI_PCI, 2099 .parent = TYPE_VIRTIO_PCI, 2100 .instance_size = sizeof(VHostSCSIPCI), 2101 .instance_init = vhost_scsi_pci_instance_init, 2102 .class_init = vhost_scsi_pci_class_init, 2103 }; 2104 #endif 2105 2106 /* virtio-balloon-pci */ 2107 2108 static Property virtio_balloon_pci_properties[] = { 2109 DEFINE_PROP_UINT32("class", VirtIOPCIProxy, class_code, 0), 2110 DEFINE_PROP_END_OF_LIST(), 2111 }; 2112 2113 static void virtio_balloon_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp) 2114 { 2115 VirtIOBalloonPCI *dev = VIRTIO_BALLOON_PCI(vpci_dev); 2116 DeviceState *vdev = DEVICE(&dev->vdev); 2117 2118 if (vpci_dev->class_code != PCI_CLASS_OTHERS && 2119 vpci_dev->class_code != PCI_CLASS_MEMORY_RAM) { /* qemu < 1.1 */ 2120 vpci_dev->class_code = PCI_CLASS_OTHERS; 2121 } 2122 2123 qdev_set_parent_bus(vdev, BUS(&vpci_dev->bus)); 2124 object_property_set_bool(OBJECT(vdev), true, "realized", errp); 2125 } 2126 2127 static void virtio_balloon_pci_class_init(ObjectClass *klass, void *data) 2128 { 2129 DeviceClass *dc = DEVICE_CLASS(klass); 2130 VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass); 2131 PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass); 2132 k->realize = virtio_balloon_pci_realize; 2133 set_bit(DEVICE_CATEGORY_MISC, dc->categories); 2134 dc->props = virtio_balloon_pci_properties; 2135 pcidev_k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET; 2136 pcidev_k->device_id = PCI_DEVICE_ID_VIRTIO_BALLOON; 2137 pcidev_k->revision = VIRTIO_PCI_ABI_VERSION; 2138 pcidev_k->class_id = PCI_CLASS_OTHERS; 2139 } 2140 2141 static void virtio_balloon_pci_instance_init(Object *obj) 2142 { 2143 VirtIOBalloonPCI *dev = VIRTIO_BALLOON_PCI(obj); 2144 2145 virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev), 2146 TYPE_VIRTIO_BALLOON); 2147 object_property_add_alias(obj, "guest-stats", OBJECT(&dev->vdev), 2148 "guest-stats", &error_abort); 2149 object_property_add_alias(obj, "guest-stats-polling-interval", 2150 OBJECT(&dev->vdev), 2151 "guest-stats-polling-interval", &error_abort); 2152 } 2153 2154 static const TypeInfo virtio_balloon_pci_info = { 2155 .name = TYPE_VIRTIO_BALLOON_PCI, 2156 .parent = TYPE_VIRTIO_PCI, 2157 .instance_size = sizeof(VirtIOBalloonPCI), 2158 .instance_init = virtio_balloon_pci_instance_init, 2159 .class_init = virtio_balloon_pci_class_init, 2160 }; 2161 2162 /* virtio-serial-pci */ 2163 2164 static void virtio_serial_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp) 2165 { 2166 VirtIOSerialPCI *dev = VIRTIO_SERIAL_PCI(vpci_dev); 2167 DeviceState *vdev = DEVICE(&dev->vdev); 2168 DeviceState *proxy = DEVICE(vpci_dev); 2169 char *bus_name; 2170 2171 if (vpci_dev->class_code != PCI_CLASS_COMMUNICATION_OTHER && 2172 vpci_dev->class_code != PCI_CLASS_DISPLAY_OTHER && /* qemu 0.10 */ 2173 vpci_dev->class_code != PCI_CLASS_OTHERS) { /* qemu-kvm */ 2174 vpci_dev->class_code = PCI_CLASS_COMMUNICATION_OTHER; 2175 } 2176 2177 /* backwards-compatibility with machines that were created with 2178 DEV_NVECTORS_UNSPECIFIED */ 2179 if (vpci_dev->nvectors == DEV_NVECTORS_UNSPECIFIED) { 2180 vpci_dev->nvectors = dev->vdev.serial.max_virtserial_ports + 1; 2181 } 2182 2183 /* 2184 * For command line compatibility, this sets the virtio-serial-device bus 2185 * name as before. 2186 */ 2187 if (proxy->id) { 2188 bus_name = g_strdup_printf("%s.0", proxy->id); 2189 virtio_device_set_child_bus_name(VIRTIO_DEVICE(vdev), bus_name); 2190 g_free(bus_name); 2191 } 2192 2193 qdev_set_parent_bus(vdev, BUS(&vpci_dev->bus)); 2194 object_property_set_bool(OBJECT(vdev), true, "realized", errp); 2195 } 2196 2197 static Property virtio_serial_pci_properties[] = { 2198 DEFINE_PROP_BIT("ioeventfd", VirtIOPCIProxy, flags, 2199 VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT, true), 2200 DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors, 2), 2201 DEFINE_PROP_UINT32("class", VirtIOPCIProxy, class_code, 0), 2202 DEFINE_PROP_END_OF_LIST(), 2203 }; 2204 2205 static void virtio_serial_pci_class_init(ObjectClass *klass, void *data) 2206 { 2207 DeviceClass *dc = DEVICE_CLASS(klass); 2208 VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass); 2209 PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass); 2210 k->realize = virtio_serial_pci_realize; 2211 set_bit(DEVICE_CATEGORY_INPUT, dc->categories); 2212 dc->props = virtio_serial_pci_properties; 2213 pcidev_k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET; 2214 pcidev_k->device_id = PCI_DEVICE_ID_VIRTIO_CONSOLE; 2215 pcidev_k->revision = VIRTIO_PCI_ABI_VERSION; 2216 pcidev_k->class_id = PCI_CLASS_COMMUNICATION_OTHER; 2217 } 2218 2219 static void virtio_serial_pci_instance_init(Object *obj) 2220 { 2221 VirtIOSerialPCI *dev = VIRTIO_SERIAL_PCI(obj); 2222 2223 virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev), 2224 TYPE_VIRTIO_SERIAL); 2225 } 2226 2227 static const TypeInfo virtio_serial_pci_info = { 2228 .name = TYPE_VIRTIO_SERIAL_PCI, 2229 .parent = TYPE_VIRTIO_PCI, 2230 .instance_size = sizeof(VirtIOSerialPCI), 2231 .instance_init = virtio_serial_pci_instance_init, 2232 .class_init = virtio_serial_pci_class_init, 2233 }; 2234 2235 /* virtio-net-pci */ 2236 2237 static Property virtio_net_properties[] = { 2238 DEFINE_PROP_BIT("ioeventfd", VirtIOPCIProxy, flags, 2239 VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT, false), 2240 DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors, 3), 2241 DEFINE_PROP_END_OF_LIST(), 2242 }; 2243 2244 static void virtio_net_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp) 2245 { 2246 DeviceState *qdev = DEVICE(vpci_dev); 2247 VirtIONetPCI *dev = VIRTIO_NET_PCI(vpci_dev); 2248 DeviceState *vdev = DEVICE(&dev->vdev); 2249 2250 virtio_net_set_netclient_name(&dev->vdev, qdev->id, 2251 object_get_typename(OBJECT(qdev))); 2252 qdev_set_parent_bus(vdev, BUS(&vpci_dev->bus)); 2253 object_property_set_bool(OBJECT(vdev), true, "realized", errp); 2254 } 2255 2256 static void virtio_net_pci_class_init(ObjectClass *klass, void *data) 2257 { 2258 DeviceClass *dc = DEVICE_CLASS(klass); 2259 PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); 2260 VirtioPCIClass *vpciklass = VIRTIO_PCI_CLASS(klass); 2261 2262 k->romfile = "efi-virtio.rom"; 2263 k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET; 2264 k->device_id = PCI_DEVICE_ID_VIRTIO_NET; 2265 k->revision = VIRTIO_PCI_ABI_VERSION; 2266 k->class_id = PCI_CLASS_NETWORK_ETHERNET; 2267 set_bit(DEVICE_CATEGORY_NETWORK, dc->categories); 2268 dc->props = virtio_net_properties; 2269 vpciklass->realize = virtio_net_pci_realize; 2270 } 2271 2272 static void virtio_net_pci_instance_init(Object *obj) 2273 { 2274 VirtIONetPCI *dev = VIRTIO_NET_PCI(obj); 2275 2276 virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev), 2277 TYPE_VIRTIO_NET); 2278 object_property_add_alias(obj, "bootindex", OBJECT(&dev->vdev), 2279 "bootindex", &error_abort); 2280 } 2281 2282 static const TypeInfo virtio_net_pci_info = { 2283 .name = TYPE_VIRTIO_NET_PCI, 2284 .parent = TYPE_VIRTIO_PCI, 2285 .instance_size = sizeof(VirtIONetPCI), 2286 .instance_init = virtio_net_pci_instance_init, 2287 .class_init = virtio_net_pci_class_init, 2288 }; 2289 2290 /* virtio-rng-pci */ 2291 2292 static void virtio_rng_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp) 2293 { 2294 VirtIORngPCI *vrng = VIRTIO_RNG_PCI(vpci_dev); 2295 DeviceState *vdev = DEVICE(&vrng->vdev); 2296 Error *err = NULL; 2297 2298 qdev_set_parent_bus(vdev, BUS(&vpci_dev->bus)); 2299 object_property_set_bool(OBJECT(vdev), true, "realized", &err); 2300 if (err) { 2301 error_propagate(errp, err); 2302 return; 2303 } 2304 2305 object_property_set_link(OBJECT(vrng), 2306 OBJECT(vrng->vdev.conf.rng), "rng", 2307 NULL); 2308 } 2309 2310 static void virtio_rng_pci_class_init(ObjectClass *klass, void *data) 2311 { 2312 DeviceClass *dc = DEVICE_CLASS(klass); 2313 VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass); 2314 PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass); 2315 2316 k->realize = virtio_rng_pci_realize; 2317 set_bit(DEVICE_CATEGORY_MISC, dc->categories); 2318 2319 pcidev_k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET; 2320 pcidev_k->device_id = PCI_DEVICE_ID_VIRTIO_RNG; 2321 pcidev_k->revision = VIRTIO_PCI_ABI_VERSION; 2322 pcidev_k->class_id = PCI_CLASS_OTHERS; 2323 } 2324 2325 static void virtio_rng_initfn(Object *obj) 2326 { 2327 VirtIORngPCI *dev = VIRTIO_RNG_PCI(obj); 2328 2329 virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev), 2330 TYPE_VIRTIO_RNG); 2331 object_property_add_alias(obj, "rng", OBJECT(&dev->vdev), "rng", 2332 &error_abort); 2333 } 2334 2335 static const TypeInfo virtio_rng_pci_info = { 2336 .name = TYPE_VIRTIO_RNG_PCI, 2337 .parent = TYPE_VIRTIO_PCI, 2338 .instance_size = sizeof(VirtIORngPCI), 2339 .instance_init = virtio_rng_initfn, 2340 .class_init = virtio_rng_pci_class_init, 2341 }; 2342 2343 /* virtio-input-pci */ 2344 2345 static Property virtio_input_pci_properties[] = { 2346 DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors, 2), 2347 DEFINE_PROP_END_OF_LIST(), 2348 }; 2349 2350 static void virtio_input_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp) 2351 { 2352 VirtIOInputPCI *vinput = VIRTIO_INPUT_PCI(vpci_dev); 2353 DeviceState *vdev = DEVICE(&vinput->vdev); 2354 2355 qdev_set_parent_bus(vdev, BUS(&vpci_dev->bus)); 2356 /* force virtio-1.0 */ 2357 vpci_dev->flags &= ~VIRTIO_PCI_FLAG_DISABLE_MODERN; 2358 vpci_dev->flags |= VIRTIO_PCI_FLAG_DISABLE_LEGACY; 2359 object_property_set_bool(OBJECT(vdev), true, "realized", errp); 2360 } 2361 2362 static void virtio_input_pci_class_init(ObjectClass *klass, void *data) 2363 { 2364 DeviceClass *dc = DEVICE_CLASS(klass); 2365 VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass); 2366 PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass); 2367 2368 dc->props = virtio_input_pci_properties; 2369 k->realize = virtio_input_pci_realize; 2370 set_bit(DEVICE_CATEGORY_INPUT, dc->categories); 2371 2372 pcidev_k->class_id = PCI_CLASS_INPUT_OTHER; 2373 } 2374 2375 static void virtio_input_hid_kbd_pci_class_init(ObjectClass *klass, void *data) 2376 { 2377 PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass); 2378 2379 pcidev_k->class_id = PCI_CLASS_INPUT_KEYBOARD; 2380 } 2381 2382 static void virtio_input_hid_mouse_pci_class_init(ObjectClass *klass, 2383 void *data) 2384 { 2385 PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass); 2386 2387 pcidev_k->class_id = PCI_CLASS_INPUT_MOUSE; 2388 } 2389 2390 static void virtio_keyboard_initfn(Object *obj) 2391 { 2392 VirtIOInputHIDPCI *dev = VIRTIO_INPUT_HID_PCI(obj); 2393 2394 virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev), 2395 TYPE_VIRTIO_KEYBOARD); 2396 } 2397 2398 static void virtio_mouse_initfn(Object *obj) 2399 { 2400 VirtIOInputHIDPCI *dev = VIRTIO_INPUT_HID_PCI(obj); 2401 2402 virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev), 2403 TYPE_VIRTIO_MOUSE); 2404 } 2405 2406 static void virtio_tablet_initfn(Object *obj) 2407 { 2408 VirtIOInputHIDPCI *dev = VIRTIO_INPUT_HID_PCI(obj); 2409 2410 virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev), 2411 TYPE_VIRTIO_TABLET); 2412 } 2413 2414 static const TypeInfo virtio_input_pci_info = { 2415 .name = TYPE_VIRTIO_INPUT_PCI, 2416 .parent = TYPE_VIRTIO_PCI, 2417 .instance_size = sizeof(VirtIOInputPCI), 2418 .class_init = virtio_input_pci_class_init, 2419 .abstract = true, 2420 }; 2421 2422 static const TypeInfo virtio_input_hid_pci_info = { 2423 .name = TYPE_VIRTIO_INPUT_HID_PCI, 2424 .parent = TYPE_VIRTIO_INPUT_PCI, 2425 .instance_size = sizeof(VirtIOInputHIDPCI), 2426 .abstract = true, 2427 }; 2428 2429 static const TypeInfo virtio_keyboard_pci_info = { 2430 .name = TYPE_VIRTIO_KEYBOARD_PCI, 2431 .parent = TYPE_VIRTIO_INPUT_HID_PCI, 2432 .class_init = virtio_input_hid_kbd_pci_class_init, 2433 .instance_size = sizeof(VirtIOInputHIDPCI), 2434 .instance_init = virtio_keyboard_initfn, 2435 }; 2436 2437 static const TypeInfo virtio_mouse_pci_info = { 2438 .name = TYPE_VIRTIO_MOUSE_PCI, 2439 .parent = TYPE_VIRTIO_INPUT_HID_PCI, 2440 .class_init = virtio_input_hid_mouse_pci_class_init, 2441 .instance_size = sizeof(VirtIOInputHIDPCI), 2442 .instance_init = virtio_mouse_initfn, 2443 }; 2444 2445 static const TypeInfo virtio_tablet_pci_info = { 2446 .name = TYPE_VIRTIO_TABLET_PCI, 2447 .parent = TYPE_VIRTIO_INPUT_HID_PCI, 2448 .instance_size = sizeof(VirtIOInputHIDPCI), 2449 .instance_init = virtio_tablet_initfn, 2450 }; 2451 2452 #ifdef CONFIG_LINUX 2453 static void virtio_host_initfn(Object *obj) 2454 { 2455 VirtIOInputHostPCI *dev = VIRTIO_INPUT_HOST_PCI(obj); 2456 2457 virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev), 2458 TYPE_VIRTIO_INPUT_HOST); 2459 } 2460 2461 static const TypeInfo virtio_host_pci_info = { 2462 .name = TYPE_VIRTIO_INPUT_HOST_PCI, 2463 .parent = TYPE_VIRTIO_INPUT_PCI, 2464 .instance_size = sizeof(VirtIOInputHostPCI), 2465 .instance_init = virtio_host_initfn, 2466 }; 2467 #endif 2468 2469 /* virtio-pci-bus */ 2470 2471 static void virtio_pci_bus_new(VirtioBusState *bus, size_t bus_size, 2472 VirtIOPCIProxy *dev) 2473 { 2474 DeviceState *qdev = DEVICE(dev); 2475 char virtio_bus_name[] = "virtio-bus"; 2476 2477 qbus_create_inplace(bus, bus_size, TYPE_VIRTIO_PCI_BUS, qdev, 2478 virtio_bus_name); 2479 } 2480 2481 static void virtio_pci_bus_class_init(ObjectClass *klass, void *data) 2482 { 2483 BusClass *bus_class = BUS_CLASS(klass); 2484 VirtioBusClass *k = VIRTIO_BUS_CLASS(klass); 2485 bus_class->max_dev = 1; 2486 k->notify = virtio_pci_notify; 2487 k->save_config = virtio_pci_save_config; 2488 k->load_config = virtio_pci_load_config; 2489 k->save_queue = virtio_pci_save_queue; 2490 k->load_queue = virtio_pci_load_queue; 2491 k->save_extra_state = virtio_pci_save_extra_state; 2492 k->load_extra_state = virtio_pci_load_extra_state; 2493 k->has_extra_state = virtio_pci_has_extra_state; 2494 k->query_guest_notifiers = virtio_pci_query_guest_notifiers; 2495 k->set_host_notifier = virtio_pci_set_host_notifier; 2496 k->set_guest_notifiers = virtio_pci_set_guest_notifiers; 2497 k->vmstate_change = virtio_pci_vmstate_change; 2498 k->device_plugged = virtio_pci_device_plugged; 2499 k->device_unplugged = virtio_pci_device_unplugged; 2500 k->query_nvectors = virtio_pci_query_nvectors; 2501 } 2502 2503 static const TypeInfo virtio_pci_bus_info = { 2504 .name = TYPE_VIRTIO_PCI_BUS, 2505 .parent = TYPE_VIRTIO_BUS, 2506 .instance_size = sizeof(VirtioPCIBusState), 2507 .class_init = virtio_pci_bus_class_init, 2508 }; 2509 2510 static void virtio_pci_register_types(void) 2511 { 2512 type_register_static(&virtio_rng_pci_info); 2513 type_register_static(&virtio_input_pci_info); 2514 type_register_static(&virtio_input_hid_pci_info); 2515 type_register_static(&virtio_keyboard_pci_info); 2516 type_register_static(&virtio_mouse_pci_info); 2517 type_register_static(&virtio_tablet_pci_info); 2518 #ifdef CONFIG_LINUX 2519 type_register_static(&virtio_host_pci_info); 2520 #endif 2521 type_register_static(&virtio_pci_bus_info); 2522 type_register_static(&virtio_pci_info); 2523 #ifdef CONFIG_VIRTFS 2524 type_register_static(&virtio_9p_pci_info); 2525 #endif 2526 type_register_static(&virtio_blk_pci_info); 2527 type_register_static(&virtio_scsi_pci_info); 2528 type_register_static(&virtio_balloon_pci_info); 2529 type_register_static(&virtio_serial_pci_info); 2530 type_register_static(&virtio_net_pci_info); 2531 #ifdef CONFIG_VHOST_SCSI 2532 type_register_static(&vhost_scsi_pci_info); 2533 #endif 2534 } 2535 2536 type_init(virtio_pci_register_types) 2537