1 /* 2 * Virtio PCI Bindings 3 * 4 * Copyright IBM, Corp. 2007 5 * Copyright (c) 2009 CodeSourcery 6 * 7 * Authors: 8 * Anthony Liguori <aliguori@us.ibm.com> 9 * Paul Brook <paul@codesourcery.com> 10 * 11 * This work is licensed under the terms of the GNU GPL, version 2. See 12 * the COPYING file in the top-level directory. 13 * 14 * Contributions after 2012-01-13 are licensed under the terms of the 15 * GNU GPL, version 2 or (at your option) any later version. 16 */ 17 18 #include "qemu/osdep.h" 19 20 #include "standard-headers/linux/virtio_pci.h" 21 #include "hw/virtio/virtio.h" 22 #include "hw/virtio/virtio-blk.h" 23 #include "hw/virtio/virtio-net.h" 24 #include "hw/virtio/virtio-serial.h" 25 #include "hw/virtio/virtio-scsi.h" 26 #include "hw/virtio/virtio-balloon.h" 27 #include "hw/virtio/virtio-input.h" 28 #include "hw/pci/pci.h" 29 #include "qapi/error.h" 30 #include "qemu/error-report.h" 31 #include "hw/pci/msi.h" 32 #include "hw/pci/msix.h" 33 #include "hw/loader.h" 34 #include "sysemu/kvm.h" 35 #include "sysemu/block-backend.h" 36 #include "virtio-pci.h" 37 #include "qemu/range.h" 38 #include "hw/virtio/virtio-bus.h" 39 #include "qapi/visitor.h" 40 41 #define VIRTIO_PCI_REGION_SIZE(dev) VIRTIO_PCI_CONFIG_OFF(msix_present(dev)) 42 43 #undef VIRTIO_PCI_CONFIG 44 45 /* The remaining space is defined by each driver as the per-driver 46 * configuration space */ 47 #define VIRTIO_PCI_CONFIG_SIZE(dev) VIRTIO_PCI_CONFIG_OFF(msix_enabled(dev)) 48 49 static void virtio_pci_bus_new(VirtioBusState *bus, size_t bus_size, 50 VirtIOPCIProxy *dev); 51 static void virtio_pci_reset(DeviceState *qdev); 52 53 /* virtio device */ 54 /* DeviceState to VirtIOPCIProxy. For use off data-path. TODO: use QOM. */ 55 static inline VirtIOPCIProxy *to_virtio_pci_proxy(DeviceState *d) 56 { 57 return container_of(d, VirtIOPCIProxy, pci_dev.qdev); 58 } 59 60 /* DeviceState to VirtIOPCIProxy. Note: used on datapath, 61 * be careful and test performance if you change this. 62 */ 63 static inline VirtIOPCIProxy *to_virtio_pci_proxy_fast(DeviceState *d) 64 { 65 return container_of(d, VirtIOPCIProxy, pci_dev.qdev); 66 } 67 68 static void virtio_pci_notify(DeviceState *d, uint16_t vector) 69 { 70 VirtIOPCIProxy *proxy = to_virtio_pci_proxy_fast(d); 71 72 if (msix_enabled(&proxy->pci_dev)) 73 msix_notify(&proxy->pci_dev, vector); 74 else { 75 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 76 pci_set_irq(&proxy->pci_dev, vdev->isr & 1); 77 } 78 } 79 80 static void virtio_pci_save_config(DeviceState *d, QEMUFile *f) 81 { 82 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d); 83 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 84 85 pci_device_save(&proxy->pci_dev, f); 86 msix_save(&proxy->pci_dev, f); 87 if (msix_present(&proxy->pci_dev)) 88 qemu_put_be16(f, vdev->config_vector); 89 } 90 91 static void virtio_pci_load_modern_queue_state(VirtIOPCIQueue *vq, 92 QEMUFile *f) 93 { 94 vq->num = qemu_get_be16(f); 95 vq->enabled = qemu_get_be16(f); 96 vq->desc[0] = qemu_get_be32(f); 97 vq->desc[1] = qemu_get_be32(f); 98 vq->avail[0] = qemu_get_be32(f); 99 vq->avail[1] = qemu_get_be32(f); 100 vq->used[0] = qemu_get_be32(f); 101 vq->used[1] = qemu_get_be32(f); 102 } 103 104 static bool virtio_pci_has_extra_state(DeviceState *d) 105 { 106 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d); 107 108 return proxy->flags & VIRTIO_PCI_FLAG_MIGRATE_EXTRA; 109 } 110 111 static int get_virtio_pci_modern_state(QEMUFile *f, void *pv, size_t size) 112 { 113 VirtIOPCIProxy *proxy = pv; 114 int i; 115 116 proxy->dfselect = qemu_get_be32(f); 117 proxy->gfselect = qemu_get_be32(f); 118 proxy->guest_features[0] = qemu_get_be32(f); 119 proxy->guest_features[1] = qemu_get_be32(f); 120 for (i = 0; i < VIRTIO_QUEUE_MAX; i++) { 121 virtio_pci_load_modern_queue_state(&proxy->vqs[i], f); 122 } 123 124 return 0; 125 } 126 127 static void virtio_pci_save_modern_queue_state(VirtIOPCIQueue *vq, 128 QEMUFile *f) 129 { 130 qemu_put_be16(f, vq->num); 131 qemu_put_be16(f, vq->enabled); 132 qemu_put_be32(f, vq->desc[0]); 133 qemu_put_be32(f, vq->desc[1]); 134 qemu_put_be32(f, vq->avail[0]); 135 qemu_put_be32(f, vq->avail[1]); 136 qemu_put_be32(f, vq->used[0]); 137 qemu_put_be32(f, vq->used[1]); 138 } 139 140 static void put_virtio_pci_modern_state(QEMUFile *f, void *pv, size_t size) 141 { 142 VirtIOPCIProxy *proxy = pv; 143 int i; 144 145 qemu_put_be32(f, proxy->dfselect); 146 qemu_put_be32(f, proxy->gfselect); 147 qemu_put_be32(f, proxy->guest_features[0]); 148 qemu_put_be32(f, proxy->guest_features[1]); 149 for (i = 0; i < VIRTIO_QUEUE_MAX; i++) { 150 virtio_pci_save_modern_queue_state(&proxy->vqs[i], f); 151 } 152 } 153 154 static const VMStateInfo vmstate_info_virtio_pci_modern_state = { 155 .name = "virtqueue_state", 156 .get = get_virtio_pci_modern_state, 157 .put = put_virtio_pci_modern_state, 158 }; 159 160 static bool virtio_pci_modern_state_needed(void *opaque) 161 { 162 VirtIOPCIProxy *proxy = opaque; 163 164 return !(proxy->flags & VIRTIO_PCI_FLAG_DISABLE_MODERN); 165 } 166 167 static const VMStateDescription vmstate_virtio_pci_modern_state = { 168 .name = "virtio_pci/modern_state", 169 .version_id = 1, 170 .minimum_version_id = 1, 171 .needed = &virtio_pci_modern_state_needed, 172 .fields = (VMStateField[]) { 173 { 174 .name = "modern_state", 175 .version_id = 0, 176 .field_exists = NULL, 177 .size = 0, 178 .info = &vmstate_info_virtio_pci_modern_state, 179 .flags = VMS_SINGLE, 180 .offset = 0, 181 }, 182 VMSTATE_END_OF_LIST() 183 } 184 }; 185 186 static const VMStateDescription vmstate_virtio_pci = { 187 .name = "virtio_pci", 188 .version_id = 1, 189 .minimum_version_id = 1, 190 .minimum_version_id_old = 1, 191 .fields = (VMStateField[]) { 192 VMSTATE_END_OF_LIST() 193 }, 194 .subsections = (const VMStateDescription*[]) { 195 &vmstate_virtio_pci_modern_state, 196 NULL 197 } 198 }; 199 200 static void virtio_pci_save_extra_state(DeviceState *d, QEMUFile *f) 201 { 202 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d); 203 204 vmstate_save_state(f, &vmstate_virtio_pci, proxy, NULL); 205 } 206 207 static int virtio_pci_load_extra_state(DeviceState *d, QEMUFile *f) 208 { 209 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d); 210 211 return vmstate_load_state(f, &vmstate_virtio_pci, proxy, 1); 212 } 213 214 static void virtio_pci_save_queue(DeviceState *d, int n, QEMUFile *f) 215 { 216 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d); 217 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 218 219 if (msix_present(&proxy->pci_dev)) 220 qemu_put_be16(f, virtio_queue_vector(vdev, n)); 221 } 222 223 static int virtio_pci_load_config(DeviceState *d, QEMUFile *f) 224 { 225 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d); 226 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 227 228 int ret; 229 ret = pci_device_load(&proxy->pci_dev, f); 230 if (ret) { 231 return ret; 232 } 233 msix_unuse_all_vectors(&proxy->pci_dev); 234 msix_load(&proxy->pci_dev, f); 235 if (msix_present(&proxy->pci_dev)) { 236 qemu_get_be16s(f, &vdev->config_vector); 237 } else { 238 vdev->config_vector = VIRTIO_NO_VECTOR; 239 } 240 if (vdev->config_vector != VIRTIO_NO_VECTOR) { 241 return msix_vector_use(&proxy->pci_dev, vdev->config_vector); 242 } 243 return 0; 244 } 245 246 static int virtio_pci_load_queue(DeviceState *d, int n, QEMUFile *f) 247 { 248 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d); 249 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 250 251 uint16_t vector; 252 if (msix_present(&proxy->pci_dev)) { 253 qemu_get_be16s(f, &vector); 254 } else { 255 vector = VIRTIO_NO_VECTOR; 256 } 257 virtio_queue_set_vector(vdev, n, vector); 258 if (vector != VIRTIO_NO_VECTOR) { 259 return msix_vector_use(&proxy->pci_dev, vector); 260 } 261 262 return 0; 263 } 264 265 #define QEMU_VIRTIO_PCI_QUEUE_MEM_MULT 0x1000 266 267 static int virtio_pci_set_host_notifier_internal(VirtIOPCIProxy *proxy, 268 int n, bool assign, bool set_handler) 269 { 270 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 271 VirtQueue *vq = virtio_get_queue(vdev, n); 272 EventNotifier *notifier = virtio_queue_get_host_notifier(vq); 273 bool legacy = !(proxy->flags & VIRTIO_PCI_FLAG_DISABLE_LEGACY); 274 bool modern = !(proxy->flags & VIRTIO_PCI_FLAG_DISABLE_MODERN); 275 bool fast_mmio = kvm_ioeventfd_any_length_enabled(); 276 bool modern_pio = proxy->flags & VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY; 277 MemoryRegion *modern_mr = &proxy->notify.mr; 278 MemoryRegion *modern_notify_mr = &proxy->notify_pio.mr; 279 MemoryRegion *legacy_mr = &proxy->bar; 280 hwaddr modern_addr = QEMU_VIRTIO_PCI_QUEUE_MEM_MULT * 281 virtio_get_queue_index(vq); 282 hwaddr legacy_addr = VIRTIO_PCI_QUEUE_NOTIFY; 283 int r = 0; 284 285 if (assign) { 286 r = event_notifier_init(notifier, 1); 287 if (r < 0) { 288 error_report("%s: unable to init event notifier: %d", 289 __func__, r); 290 return r; 291 } 292 virtio_queue_set_host_notifier_fd_handler(vq, true, set_handler); 293 if (modern) { 294 if (fast_mmio) { 295 memory_region_add_eventfd(modern_mr, modern_addr, 0, 296 false, n, notifier); 297 } else { 298 memory_region_add_eventfd(modern_mr, modern_addr, 2, 299 false, n, notifier); 300 } 301 if (modern_pio) { 302 memory_region_add_eventfd(modern_notify_mr, 0, 2, 303 true, n, notifier); 304 } 305 } 306 if (legacy) { 307 memory_region_add_eventfd(legacy_mr, legacy_addr, 2, 308 true, n, notifier); 309 } 310 } else { 311 if (modern) { 312 if (fast_mmio) { 313 memory_region_del_eventfd(modern_mr, modern_addr, 0, 314 false, n, notifier); 315 } else { 316 memory_region_del_eventfd(modern_mr, modern_addr, 2, 317 false, n, notifier); 318 } 319 if (modern_pio) { 320 memory_region_del_eventfd(modern_notify_mr, 0, 2, 321 true, n, notifier); 322 } 323 } 324 if (legacy) { 325 memory_region_del_eventfd(legacy_mr, legacy_addr, 2, 326 true, n, notifier); 327 } 328 virtio_queue_set_host_notifier_fd_handler(vq, false, false); 329 event_notifier_cleanup(notifier); 330 } 331 return r; 332 } 333 334 static void virtio_pci_start_ioeventfd(VirtIOPCIProxy *proxy) 335 { 336 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 337 int n, r; 338 339 if (!(proxy->flags & VIRTIO_PCI_FLAG_USE_IOEVENTFD) || 340 proxy->ioeventfd_disabled || 341 proxy->ioeventfd_started) { 342 return; 343 } 344 345 for (n = 0; n < VIRTIO_QUEUE_MAX; n++) { 346 if (!virtio_queue_get_num(vdev, n)) { 347 continue; 348 } 349 350 r = virtio_pci_set_host_notifier_internal(proxy, n, true, true); 351 if (r < 0) { 352 goto assign_error; 353 } 354 } 355 proxy->ioeventfd_started = true; 356 return; 357 358 assign_error: 359 while (--n >= 0) { 360 if (!virtio_queue_get_num(vdev, n)) { 361 continue; 362 } 363 364 r = virtio_pci_set_host_notifier_internal(proxy, n, false, false); 365 assert(r >= 0); 366 } 367 proxy->ioeventfd_started = false; 368 error_report("%s: failed. Fallback to a userspace (slower).", __func__); 369 } 370 371 static void virtio_pci_stop_ioeventfd(VirtIOPCIProxy *proxy) 372 { 373 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 374 int r; 375 int n; 376 377 if (!proxy->ioeventfd_started) { 378 return; 379 } 380 381 for (n = 0; n < VIRTIO_QUEUE_MAX; n++) { 382 if (!virtio_queue_get_num(vdev, n)) { 383 continue; 384 } 385 386 r = virtio_pci_set_host_notifier_internal(proxy, n, false, false); 387 assert(r >= 0); 388 } 389 proxy->ioeventfd_started = false; 390 } 391 392 static void virtio_ioport_write(void *opaque, uint32_t addr, uint32_t val) 393 { 394 VirtIOPCIProxy *proxy = opaque; 395 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 396 hwaddr pa; 397 398 switch (addr) { 399 case VIRTIO_PCI_GUEST_FEATURES: 400 /* Guest does not negotiate properly? We have to assume nothing. */ 401 if (val & (1 << VIRTIO_F_BAD_FEATURE)) { 402 val = virtio_bus_get_vdev_bad_features(&proxy->bus); 403 } 404 virtio_set_features(vdev, val); 405 break; 406 case VIRTIO_PCI_QUEUE_PFN: 407 pa = (hwaddr)val << VIRTIO_PCI_QUEUE_ADDR_SHIFT; 408 if (pa == 0) { 409 virtio_pci_reset(DEVICE(proxy)); 410 } 411 else 412 virtio_queue_set_addr(vdev, vdev->queue_sel, pa); 413 break; 414 case VIRTIO_PCI_QUEUE_SEL: 415 if (val < VIRTIO_QUEUE_MAX) 416 vdev->queue_sel = val; 417 break; 418 case VIRTIO_PCI_QUEUE_NOTIFY: 419 if (val < VIRTIO_QUEUE_MAX) { 420 virtio_queue_notify(vdev, val); 421 } 422 break; 423 case VIRTIO_PCI_STATUS: 424 if (!(val & VIRTIO_CONFIG_S_DRIVER_OK)) { 425 virtio_pci_stop_ioeventfd(proxy); 426 } 427 428 virtio_set_status(vdev, val & 0xFF); 429 430 if (val & VIRTIO_CONFIG_S_DRIVER_OK) { 431 virtio_pci_start_ioeventfd(proxy); 432 } 433 434 if (vdev->status == 0) { 435 virtio_pci_reset(DEVICE(proxy)); 436 } 437 438 /* Linux before 2.6.34 drives the device without enabling 439 the PCI device bus master bit. Enable it automatically 440 for the guest. This is a PCI spec violation but so is 441 initiating DMA with bus master bit clear. */ 442 if (val == (VIRTIO_CONFIG_S_ACKNOWLEDGE | VIRTIO_CONFIG_S_DRIVER)) { 443 pci_default_write_config(&proxy->pci_dev, PCI_COMMAND, 444 proxy->pci_dev.config[PCI_COMMAND] | 445 PCI_COMMAND_MASTER, 1); 446 } 447 break; 448 case VIRTIO_MSI_CONFIG_VECTOR: 449 msix_vector_unuse(&proxy->pci_dev, vdev->config_vector); 450 /* Make it possible for guest to discover an error took place. */ 451 if (msix_vector_use(&proxy->pci_dev, val) < 0) 452 val = VIRTIO_NO_VECTOR; 453 vdev->config_vector = val; 454 break; 455 case VIRTIO_MSI_QUEUE_VECTOR: 456 msix_vector_unuse(&proxy->pci_dev, 457 virtio_queue_vector(vdev, vdev->queue_sel)); 458 /* Make it possible for guest to discover an error took place. */ 459 if (msix_vector_use(&proxy->pci_dev, val) < 0) 460 val = VIRTIO_NO_VECTOR; 461 virtio_queue_set_vector(vdev, vdev->queue_sel, val); 462 break; 463 default: 464 error_report("%s: unexpected address 0x%x value 0x%x", 465 __func__, addr, val); 466 break; 467 } 468 } 469 470 static uint32_t virtio_ioport_read(VirtIOPCIProxy *proxy, uint32_t addr) 471 { 472 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 473 uint32_t ret = 0xFFFFFFFF; 474 475 switch (addr) { 476 case VIRTIO_PCI_HOST_FEATURES: 477 ret = vdev->host_features; 478 break; 479 case VIRTIO_PCI_GUEST_FEATURES: 480 ret = vdev->guest_features; 481 break; 482 case VIRTIO_PCI_QUEUE_PFN: 483 ret = virtio_queue_get_addr(vdev, vdev->queue_sel) 484 >> VIRTIO_PCI_QUEUE_ADDR_SHIFT; 485 break; 486 case VIRTIO_PCI_QUEUE_NUM: 487 ret = virtio_queue_get_num(vdev, vdev->queue_sel); 488 break; 489 case VIRTIO_PCI_QUEUE_SEL: 490 ret = vdev->queue_sel; 491 break; 492 case VIRTIO_PCI_STATUS: 493 ret = vdev->status; 494 break; 495 case VIRTIO_PCI_ISR: 496 /* reading from the ISR also clears it. */ 497 ret = vdev->isr; 498 vdev->isr = 0; 499 pci_irq_deassert(&proxy->pci_dev); 500 break; 501 case VIRTIO_MSI_CONFIG_VECTOR: 502 ret = vdev->config_vector; 503 break; 504 case VIRTIO_MSI_QUEUE_VECTOR: 505 ret = virtio_queue_vector(vdev, vdev->queue_sel); 506 break; 507 default: 508 break; 509 } 510 511 return ret; 512 } 513 514 static uint64_t virtio_pci_config_read(void *opaque, hwaddr addr, 515 unsigned size) 516 { 517 VirtIOPCIProxy *proxy = opaque; 518 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 519 uint32_t config = VIRTIO_PCI_CONFIG_SIZE(&proxy->pci_dev); 520 uint64_t val = 0; 521 if (addr < config) { 522 return virtio_ioport_read(proxy, addr); 523 } 524 addr -= config; 525 526 switch (size) { 527 case 1: 528 val = virtio_config_readb(vdev, addr); 529 break; 530 case 2: 531 val = virtio_config_readw(vdev, addr); 532 if (virtio_is_big_endian(vdev)) { 533 val = bswap16(val); 534 } 535 break; 536 case 4: 537 val = virtio_config_readl(vdev, addr); 538 if (virtio_is_big_endian(vdev)) { 539 val = bswap32(val); 540 } 541 break; 542 } 543 return val; 544 } 545 546 static void virtio_pci_config_write(void *opaque, hwaddr addr, 547 uint64_t val, unsigned size) 548 { 549 VirtIOPCIProxy *proxy = opaque; 550 uint32_t config = VIRTIO_PCI_CONFIG_SIZE(&proxy->pci_dev); 551 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 552 if (addr < config) { 553 virtio_ioport_write(proxy, addr, val); 554 return; 555 } 556 addr -= config; 557 /* 558 * Virtio-PCI is odd. Ioports are LE but config space is target native 559 * endian. 560 */ 561 switch (size) { 562 case 1: 563 virtio_config_writeb(vdev, addr, val); 564 break; 565 case 2: 566 if (virtio_is_big_endian(vdev)) { 567 val = bswap16(val); 568 } 569 virtio_config_writew(vdev, addr, val); 570 break; 571 case 4: 572 if (virtio_is_big_endian(vdev)) { 573 val = bswap32(val); 574 } 575 virtio_config_writel(vdev, addr, val); 576 break; 577 } 578 } 579 580 static const MemoryRegionOps virtio_pci_config_ops = { 581 .read = virtio_pci_config_read, 582 .write = virtio_pci_config_write, 583 .impl = { 584 .min_access_size = 1, 585 .max_access_size = 4, 586 }, 587 .endianness = DEVICE_LITTLE_ENDIAN, 588 }; 589 590 /* Below are generic functions to do memcpy from/to an address space, 591 * without byteswaps, with input validation. 592 * 593 * As regular address_space_* APIs all do some kind of byteswap at least for 594 * some host/target combinations, we are forced to explicitly convert to a 595 * known-endianness integer value. 596 * It doesn't really matter which endian format to go through, so the code 597 * below selects the endian that causes the least amount of work on the given 598 * host. 599 * 600 * Note: host pointer must be aligned. 601 */ 602 static 603 void virtio_address_space_write(AddressSpace *as, hwaddr addr, 604 const uint8_t *buf, int len) 605 { 606 uint32_t val; 607 608 /* address_space_* APIs assume an aligned address. 609 * As address is under guest control, handle illegal values. 610 */ 611 addr &= ~(len - 1); 612 613 /* Make sure caller aligned buf properly */ 614 assert(!(((uintptr_t)buf) & (len - 1))); 615 616 switch (len) { 617 case 1: 618 val = pci_get_byte(buf); 619 address_space_stb(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL); 620 break; 621 case 2: 622 val = pci_get_word(buf); 623 address_space_stw_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL); 624 break; 625 case 4: 626 val = pci_get_long(buf); 627 address_space_stl_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL); 628 break; 629 default: 630 /* As length is under guest control, handle illegal values. */ 631 break; 632 } 633 } 634 635 static void 636 virtio_address_space_read(AddressSpace *as, hwaddr addr, uint8_t *buf, int len) 637 { 638 uint32_t val; 639 640 /* address_space_* APIs assume an aligned address. 641 * As address is under guest control, handle illegal values. 642 */ 643 addr &= ~(len - 1); 644 645 /* Make sure caller aligned buf properly */ 646 assert(!(((uintptr_t)buf) & (len - 1))); 647 648 switch (len) { 649 case 1: 650 val = address_space_ldub(as, addr, MEMTXATTRS_UNSPECIFIED, NULL); 651 pci_set_byte(buf, val); 652 break; 653 case 2: 654 val = address_space_lduw_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL); 655 pci_set_word(buf, val); 656 break; 657 case 4: 658 val = address_space_ldl_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL); 659 pci_set_long(buf, val); 660 break; 661 default: 662 /* As length is under guest control, handle illegal values. */ 663 break; 664 } 665 } 666 667 static void virtio_write_config(PCIDevice *pci_dev, uint32_t address, 668 uint32_t val, int len) 669 { 670 VirtIOPCIProxy *proxy = DO_UPCAST(VirtIOPCIProxy, pci_dev, pci_dev); 671 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 672 struct virtio_pci_cfg_cap *cfg; 673 674 pci_default_write_config(pci_dev, address, val, len); 675 676 if (range_covers_byte(address, len, PCI_COMMAND) && 677 !(pci_dev->config[PCI_COMMAND] & PCI_COMMAND_MASTER)) { 678 virtio_pci_stop_ioeventfd(proxy); 679 virtio_set_status(vdev, vdev->status & ~VIRTIO_CONFIG_S_DRIVER_OK); 680 } 681 682 if (proxy->config_cap && 683 ranges_overlap(address, len, proxy->config_cap + offsetof(struct virtio_pci_cfg_cap, 684 pci_cfg_data), 685 sizeof cfg->pci_cfg_data)) { 686 uint32_t off; 687 uint32_t len; 688 689 cfg = (void *)(proxy->pci_dev.config + proxy->config_cap); 690 off = le32_to_cpu(cfg->cap.offset); 691 len = le32_to_cpu(cfg->cap.length); 692 693 if (len == 1 || len == 2 || len == 4) { 694 assert(len <= sizeof cfg->pci_cfg_data); 695 virtio_address_space_write(&proxy->modern_as, off, 696 cfg->pci_cfg_data, len); 697 } 698 } 699 } 700 701 static uint32_t virtio_read_config(PCIDevice *pci_dev, 702 uint32_t address, int len) 703 { 704 VirtIOPCIProxy *proxy = DO_UPCAST(VirtIOPCIProxy, pci_dev, pci_dev); 705 struct virtio_pci_cfg_cap *cfg; 706 707 if (proxy->config_cap && 708 ranges_overlap(address, len, proxy->config_cap + offsetof(struct virtio_pci_cfg_cap, 709 pci_cfg_data), 710 sizeof cfg->pci_cfg_data)) { 711 uint32_t off; 712 uint32_t len; 713 714 cfg = (void *)(proxy->pci_dev.config + proxy->config_cap); 715 off = le32_to_cpu(cfg->cap.offset); 716 len = le32_to_cpu(cfg->cap.length); 717 718 if (len == 1 || len == 2 || len == 4) { 719 assert(len <= sizeof cfg->pci_cfg_data); 720 virtio_address_space_read(&proxy->modern_as, off, 721 cfg->pci_cfg_data, len); 722 } 723 } 724 725 return pci_default_read_config(pci_dev, address, len); 726 } 727 728 static int kvm_virtio_pci_vq_vector_use(VirtIOPCIProxy *proxy, 729 unsigned int queue_no, 730 unsigned int vector, 731 MSIMessage msg) 732 { 733 VirtIOIRQFD *irqfd = &proxy->vector_irqfd[vector]; 734 int ret; 735 736 if (irqfd->users == 0) { 737 ret = kvm_irqchip_add_msi_route(kvm_state, msg, &proxy->pci_dev); 738 if (ret < 0) { 739 return ret; 740 } 741 irqfd->virq = ret; 742 } 743 irqfd->users++; 744 return 0; 745 } 746 747 static void kvm_virtio_pci_vq_vector_release(VirtIOPCIProxy *proxy, 748 unsigned int vector) 749 { 750 VirtIOIRQFD *irqfd = &proxy->vector_irqfd[vector]; 751 if (--irqfd->users == 0) { 752 kvm_irqchip_release_virq(kvm_state, irqfd->virq); 753 } 754 } 755 756 static int kvm_virtio_pci_irqfd_use(VirtIOPCIProxy *proxy, 757 unsigned int queue_no, 758 unsigned int vector) 759 { 760 VirtIOIRQFD *irqfd = &proxy->vector_irqfd[vector]; 761 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 762 VirtQueue *vq = virtio_get_queue(vdev, queue_no); 763 EventNotifier *n = virtio_queue_get_guest_notifier(vq); 764 return kvm_irqchip_add_irqfd_notifier_gsi(kvm_state, n, NULL, irqfd->virq); 765 } 766 767 static void kvm_virtio_pci_irqfd_release(VirtIOPCIProxy *proxy, 768 unsigned int queue_no, 769 unsigned int vector) 770 { 771 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 772 VirtQueue *vq = virtio_get_queue(vdev, queue_no); 773 EventNotifier *n = virtio_queue_get_guest_notifier(vq); 774 VirtIOIRQFD *irqfd = &proxy->vector_irqfd[vector]; 775 int ret; 776 777 ret = kvm_irqchip_remove_irqfd_notifier_gsi(kvm_state, n, irqfd->virq); 778 assert(ret == 0); 779 } 780 781 static int kvm_virtio_pci_vector_use(VirtIOPCIProxy *proxy, int nvqs) 782 { 783 PCIDevice *dev = &proxy->pci_dev; 784 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 785 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); 786 unsigned int vector; 787 int ret, queue_no; 788 MSIMessage msg; 789 790 for (queue_no = 0; queue_no < nvqs; queue_no++) { 791 if (!virtio_queue_get_num(vdev, queue_no)) { 792 break; 793 } 794 vector = virtio_queue_vector(vdev, queue_no); 795 if (vector >= msix_nr_vectors_allocated(dev)) { 796 continue; 797 } 798 msg = msix_get_message(dev, vector); 799 ret = kvm_virtio_pci_vq_vector_use(proxy, queue_no, vector, msg); 800 if (ret < 0) { 801 goto undo; 802 } 803 /* If guest supports masking, set up irqfd now. 804 * Otherwise, delay until unmasked in the frontend. 805 */ 806 if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) { 807 ret = kvm_virtio_pci_irqfd_use(proxy, queue_no, vector); 808 if (ret < 0) { 809 kvm_virtio_pci_vq_vector_release(proxy, vector); 810 goto undo; 811 } 812 } 813 } 814 return 0; 815 816 undo: 817 while (--queue_no >= 0) { 818 vector = virtio_queue_vector(vdev, queue_no); 819 if (vector >= msix_nr_vectors_allocated(dev)) { 820 continue; 821 } 822 if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) { 823 kvm_virtio_pci_irqfd_release(proxy, queue_no, vector); 824 } 825 kvm_virtio_pci_vq_vector_release(proxy, vector); 826 } 827 return ret; 828 } 829 830 static void kvm_virtio_pci_vector_release(VirtIOPCIProxy *proxy, int nvqs) 831 { 832 PCIDevice *dev = &proxy->pci_dev; 833 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 834 unsigned int vector; 835 int queue_no; 836 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); 837 838 for (queue_no = 0; queue_no < nvqs; queue_no++) { 839 if (!virtio_queue_get_num(vdev, queue_no)) { 840 break; 841 } 842 vector = virtio_queue_vector(vdev, queue_no); 843 if (vector >= msix_nr_vectors_allocated(dev)) { 844 continue; 845 } 846 /* If guest supports masking, clean up irqfd now. 847 * Otherwise, it was cleaned when masked in the frontend. 848 */ 849 if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) { 850 kvm_virtio_pci_irqfd_release(proxy, queue_no, vector); 851 } 852 kvm_virtio_pci_vq_vector_release(proxy, vector); 853 } 854 } 855 856 static int virtio_pci_vq_vector_unmask(VirtIOPCIProxy *proxy, 857 unsigned int queue_no, 858 unsigned int vector, 859 MSIMessage msg) 860 { 861 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 862 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); 863 VirtQueue *vq = virtio_get_queue(vdev, queue_no); 864 EventNotifier *n = virtio_queue_get_guest_notifier(vq); 865 VirtIOIRQFD *irqfd; 866 int ret = 0; 867 868 if (proxy->vector_irqfd) { 869 irqfd = &proxy->vector_irqfd[vector]; 870 if (irqfd->msg.data != msg.data || irqfd->msg.address != msg.address) { 871 ret = kvm_irqchip_update_msi_route(kvm_state, irqfd->virq, msg, 872 &proxy->pci_dev); 873 if (ret < 0) { 874 return ret; 875 } 876 } 877 } 878 879 /* If guest supports masking, irqfd is already setup, unmask it. 880 * Otherwise, set it up now. 881 */ 882 if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) { 883 k->guest_notifier_mask(vdev, queue_no, false); 884 /* Test after unmasking to avoid losing events. */ 885 if (k->guest_notifier_pending && 886 k->guest_notifier_pending(vdev, queue_no)) { 887 event_notifier_set(n); 888 } 889 } else { 890 ret = kvm_virtio_pci_irqfd_use(proxy, queue_no, vector); 891 } 892 return ret; 893 } 894 895 static void virtio_pci_vq_vector_mask(VirtIOPCIProxy *proxy, 896 unsigned int queue_no, 897 unsigned int vector) 898 { 899 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 900 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); 901 902 /* If guest supports masking, keep irqfd but mask it. 903 * Otherwise, clean it up now. 904 */ 905 if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) { 906 k->guest_notifier_mask(vdev, queue_no, true); 907 } else { 908 kvm_virtio_pci_irqfd_release(proxy, queue_no, vector); 909 } 910 } 911 912 static int virtio_pci_vector_unmask(PCIDevice *dev, unsigned vector, 913 MSIMessage msg) 914 { 915 VirtIOPCIProxy *proxy = container_of(dev, VirtIOPCIProxy, pci_dev); 916 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 917 VirtQueue *vq = virtio_vector_first_queue(vdev, vector); 918 int ret, index, unmasked = 0; 919 920 while (vq) { 921 index = virtio_get_queue_index(vq); 922 if (!virtio_queue_get_num(vdev, index)) { 923 break; 924 } 925 if (index < proxy->nvqs_with_notifiers) { 926 ret = virtio_pci_vq_vector_unmask(proxy, index, vector, msg); 927 if (ret < 0) { 928 goto undo; 929 } 930 ++unmasked; 931 } 932 vq = virtio_vector_next_queue(vq); 933 } 934 935 return 0; 936 937 undo: 938 vq = virtio_vector_first_queue(vdev, vector); 939 while (vq && unmasked >= 0) { 940 index = virtio_get_queue_index(vq); 941 if (index < proxy->nvqs_with_notifiers) { 942 virtio_pci_vq_vector_mask(proxy, index, vector); 943 --unmasked; 944 } 945 vq = virtio_vector_next_queue(vq); 946 } 947 return ret; 948 } 949 950 static void virtio_pci_vector_mask(PCIDevice *dev, unsigned vector) 951 { 952 VirtIOPCIProxy *proxy = container_of(dev, VirtIOPCIProxy, pci_dev); 953 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 954 VirtQueue *vq = virtio_vector_first_queue(vdev, vector); 955 int index; 956 957 while (vq) { 958 index = virtio_get_queue_index(vq); 959 if (!virtio_queue_get_num(vdev, index)) { 960 break; 961 } 962 if (index < proxy->nvqs_with_notifiers) { 963 virtio_pci_vq_vector_mask(proxy, index, vector); 964 } 965 vq = virtio_vector_next_queue(vq); 966 } 967 } 968 969 static void virtio_pci_vector_poll(PCIDevice *dev, 970 unsigned int vector_start, 971 unsigned int vector_end) 972 { 973 VirtIOPCIProxy *proxy = container_of(dev, VirtIOPCIProxy, pci_dev); 974 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 975 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); 976 int queue_no; 977 unsigned int vector; 978 EventNotifier *notifier; 979 VirtQueue *vq; 980 981 for (queue_no = 0; queue_no < proxy->nvqs_with_notifiers; queue_no++) { 982 if (!virtio_queue_get_num(vdev, queue_no)) { 983 break; 984 } 985 vector = virtio_queue_vector(vdev, queue_no); 986 if (vector < vector_start || vector >= vector_end || 987 !msix_is_masked(dev, vector)) { 988 continue; 989 } 990 vq = virtio_get_queue(vdev, queue_no); 991 notifier = virtio_queue_get_guest_notifier(vq); 992 if (k->guest_notifier_pending) { 993 if (k->guest_notifier_pending(vdev, queue_no)) { 994 msix_set_pending(dev, vector); 995 } 996 } else if (event_notifier_test_and_clear(notifier)) { 997 msix_set_pending(dev, vector); 998 } 999 } 1000 } 1001 1002 static int virtio_pci_set_guest_notifier(DeviceState *d, int n, bool assign, 1003 bool with_irqfd) 1004 { 1005 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d); 1006 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 1007 VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev); 1008 VirtQueue *vq = virtio_get_queue(vdev, n); 1009 EventNotifier *notifier = virtio_queue_get_guest_notifier(vq); 1010 1011 if (assign) { 1012 int r = event_notifier_init(notifier, 0); 1013 if (r < 0) { 1014 return r; 1015 } 1016 virtio_queue_set_guest_notifier_fd_handler(vq, true, with_irqfd); 1017 } else { 1018 virtio_queue_set_guest_notifier_fd_handler(vq, false, with_irqfd); 1019 event_notifier_cleanup(notifier); 1020 } 1021 1022 if (!msix_enabled(&proxy->pci_dev) && 1023 vdev->use_guest_notifier_mask && 1024 vdc->guest_notifier_mask) { 1025 vdc->guest_notifier_mask(vdev, n, !assign); 1026 } 1027 1028 return 0; 1029 } 1030 1031 static bool virtio_pci_query_guest_notifiers(DeviceState *d) 1032 { 1033 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d); 1034 return msix_enabled(&proxy->pci_dev); 1035 } 1036 1037 static int virtio_pci_set_guest_notifiers(DeviceState *d, int nvqs, bool assign) 1038 { 1039 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d); 1040 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 1041 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); 1042 int r, n; 1043 bool with_irqfd = msix_enabled(&proxy->pci_dev) && 1044 kvm_msi_via_irqfd_enabled(); 1045 1046 nvqs = MIN(nvqs, VIRTIO_QUEUE_MAX); 1047 1048 /* When deassigning, pass a consistent nvqs value 1049 * to avoid leaking notifiers. 1050 */ 1051 assert(assign || nvqs == proxy->nvqs_with_notifiers); 1052 1053 proxy->nvqs_with_notifiers = nvqs; 1054 1055 /* Must unset vector notifier while guest notifier is still assigned */ 1056 if ((proxy->vector_irqfd || k->guest_notifier_mask) && !assign) { 1057 msix_unset_vector_notifiers(&proxy->pci_dev); 1058 if (proxy->vector_irqfd) { 1059 kvm_virtio_pci_vector_release(proxy, nvqs); 1060 g_free(proxy->vector_irqfd); 1061 proxy->vector_irqfd = NULL; 1062 } 1063 } 1064 1065 for (n = 0; n < nvqs; n++) { 1066 if (!virtio_queue_get_num(vdev, n)) { 1067 break; 1068 } 1069 1070 r = virtio_pci_set_guest_notifier(d, n, assign, with_irqfd); 1071 if (r < 0) { 1072 goto assign_error; 1073 } 1074 } 1075 1076 /* Must set vector notifier after guest notifier has been assigned */ 1077 if ((with_irqfd || k->guest_notifier_mask) && assign) { 1078 if (with_irqfd) { 1079 proxy->vector_irqfd = 1080 g_malloc0(sizeof(*proxy->vector_irqfd) * 1081 msix_nr_vectors_allocated(&proxy->pci_dev)); 1082 r = kvm_virtio_pci_vector_use(proxy, nvqs); 1083 if (r < 0) { 1084 goto assign_error; 1085 } 1086 } 1087 r = msix_set_vector_notifiers(&proxy->pci_dev, 1088 virtio_pci_vector_unmask, 1089 virtio_pci_vector_mask, 1090 virtio_pci_vector_poll); 1091 if (r < 0) { 1092 goto notifiers_error; 1093 } 1094 } 1095 1096 return 0; 1097 1098 notifiers_error: 1099 if (with_irqfd) { 1100 assert(assign); 1101 kvm_virtio_pci_vector_release(proxy, nvqs); 1102 } 1103 1104 assign_error: 1105 /* We get here on assignment failure. Recover by undoing for VQs 0 .. n. */ 1106 assert(assign); 1107 while (--n >= 0) { 1108 virtio_pci_set_guest_notifier(d, n, !assign, with_irqfd); 1109 } 1110 return r; 1111 } 1112 1113 static int virtio_pci_set_host_notifier(DeviceState *d, int n, bool assign) 1114 { 1115 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d); 1116 1117 /* Stop using ioeventfd for virtqueue kick if the device starts using host 1118 * notifiers. This makes it easy to avoid stepping on each others' toes. 1119 */ 1120 proxy->ioeventfd_disabled = assign; 1121 if (assign) { 1122 virtio_pci_stop_ioeventfd(proxy); 1123 } 1124 /* We don't need to start here: it's not needed because backend 1125 * currently only stops on status change away from ok, 1126 * reset, vmstop and such. If we do add code to start here, 1127 * need to check vmstate, device state etc. */ 1128 return virtio_pci_set_host_notifier_internal(proxy, n, assign, false); 1129 } 1130 1131 static void virtio_pci_vmstate_change(DeviceState *d, bool running) 1132 { 1133 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d); 1134 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 1135 1136 if (running) { 1137 /* Old QEMU versions did not set bus master enable on status write. 1138 * Detect DRIVER set and enable it. 1139 */ 1140 if ((proxy->flags & VIRTIO_PCI_FLAG_BUS_MASTER_BUG_MIGRATION) && 1141 (vdev->status & VIRTIO_CONFIG_S_DRIVER) && 1142 !(proxy->pci_dev.config[PCI_COMMAND] & PCI_COMMAND_MASTER)) { 1143 pci_default_write_config(&proxy->pci_dev, PCI_COMMAND, 1144 proxy->pci_dev.config[PCI_COMMAND] | 1145 PCI_COMMAND_MASTER, 1); 1146 } 1147 virtio_pci_start_ioeventfd(proxy); 1148 } else { 1149 virtio_pci_stop_ioeventfd(proxy); 1150 } 1151 } 1152 1153 #ifdef CONFIG_VIRTFS 1154 static void virtio_9p_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp) 1155 { 1156 V9fsPCIState *dev = VIRTIO_9P_PCI(vpci_dev); 1157 DeviceState *vdev = DEVICE(&dev->vdev); 1158 1159 qdev_set_parent_bus(vdev, BUS(&vpci_dev->bus)); 1160 object_property_set_bool(OBJECT(vdev), true, "realized", errp); 1161 } 1162 1163 static Property virtio_9p_pci_properties[] = { 1164 DEFINE_PROP_BIT("ioeventfd", VirtIOPCIProxy, flags, 1165 VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT, true), 1166 DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors, 2), 1167 DEFINE_PROP_END_OF_LIST(), 1168 }; 1169 1170 static void virtio_9p_pci_class_init(ObjectClass *klass, void *data) 1171 { 1172 DeviceClass *dc = DEVICE_CLASS(klass); 1173 PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass); 1174 VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass); 1175 1176 k->realize = virtio_9p_pci_realize; 1177 pcidev_k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET; 1178 pcidev_k->device_id = PCI_DEVICE_ID_VIRTIO_9P; 1179 pcidev_k->revision = VIRTIO_PCI_ABI_VERSION; 1180 pcidev_k->class_id = 0x2; 1181 set_bit(DEVICE_CATEGORY_STORAGE, dc->categories); 1182 dc->props = virtio_9p_pci_properties; 1183 } 1184 1185 static void virtio_9p_pci_instance_init(Object *obj) 1186 { 1187 V9fsPCIState *dev = VIRTIO_9P_PCI(obj); 1188 1189 virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev), 1190 TYPE_VIRTIO_9P); 1191 } 1192 1193 static const TypeInfo virtio_9p_pci_info = { 1194 .name = TYPE_VIRTIO_9P_PCI, 1195 .parent = TYPE_VIRTIO_PCI, 1196 .instance_size = sizeof(V9fsPCIState), 1197 .instance_init = virtio_9p_pci_instance_init, 1198 .class_init = virtio_9p_pci_class_init, 1199 }; 1200 #endif /* CONFIG_VIRTFS */ 1201 1202 /* 1203 * virtio-pci: This is the PCIDevice which has a virtio-pci-bus. 1204 */ 1205 1206 static int virtio_pci_query_nvectors(DeviceState *d) 1207 { 1208 VirtIOPCIProxy *proxy = VIRTIO_PCI(d); 1209 1210 return proxy->nvectors; 1211 } 1212 1213 static int virtio_pci_add_mem_cap(VirtIOPCIProxy *proxy, 1214 struct virtio_pci_cap *cap) 1215 { 1216 PCIDevice *dev = &proxy->pci_dev; 1217 int offset; 1218 1219 offset = pci_add_capability(dev, PCI_CAP_ID_VNDR, 0, cap->cap_len); 1220 assert(offset > 0); 1221 1222 assert(cap->cap_len >= sizeof *cap); 1223 memcpy(dev->config + offset + PCI_CAP_FLAGS, &cap->cap_len, 1224 cap->cap_len - PCI_CAP_FLAGS); 1225 1226 return offset; 1227 } 1228 1229 static uint64_t virtio_pci_common_read(void *opaque, hwaddr addr, 1230 unsigned size) 1231 { 1232 VirtIOPCIProxy *proxy = opaque; 1233 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 1234 uint32_t val = 0; 1235 int i; 1236 1237 switch (addr) { 1238 case VIRTIO_PCI_COMMON_DFSELECT: 1239 val = proxy->dfselect; 1240 break; 1241 case VIRTIO_PCI_COMMON_DF: 1242 if (proxy->dfselect <= 1) { 1243 val = (vdev->host_features & ~VIRTIO_LEGACY_FEATURES) >> 1244 (32 * proxy->dfselect); 1245 } 1246 break; 1247 case VIRTIO_PCI_COMMON_GFSELECT: 1248 val = proxy->gfselect; 1249 break; 1250 case VIRTIO_PCI_COMMON_GF: 1251 if (proxy->gfselect < ARRAY_SIZE(proxy->guest_features)) { 1252 val = proxy->guest_features[proxy->gfselect]; 1253 } 1254 break; 1255 case VIRTIO_PCI_COMMON_MSIX: 1256 val = vdev->config_vector; 1257 break; 1258 case VIRTIO_PCI_COMMON_NUMQ: 1259 for (i = 0; i < VIRTIO_QUEUE_MAX; ++i) { 1260 if (virtio_queue_get_num(vdev, i)) { 1261 val = i + 1; 1262 } 1263 } 1264 break; 1265 case VIRTIO_PCI_COMMON_STATUS: 1266 val = vdev->status; 1267 break; 1268 case VIRTIO_PCI_COMMON_CFGGENERATION: 1269 val = vdev->generation; 1270 break; 1271 case VIRTIO_PCI_COMMON_Q_SELECT: 1272 val = vdev->queue_sel; 1273 break; 1274 case VIRTIO_PCI_COMMON_Q_SIZE: 1275 val = virtio_queue_get_num(vdev, vdev->queue_sel); 1276 break; 1277 case VIRTIO_PCI_COMMON_Q_MSIX: 1278 val = virtio_queue_vector(vdev, vdev->queue_sel); 1279 break; 1280 case VIRTIO_PCI_COMMON_Q_ENABLE: 1281 val = proxy->vqs[vdev->queue_sel].enabled; 1282 break; 1283 case VIRTIO_PCI_COMMON_Q_NOFF: 1284 /* Simply map queues in order */ 1285 val = vdev->queue_sel; 1286 break; 1287 case VIRTIO_PCI_COMMON_Q_DESCLO: 1288 val = proxy->vqs[vdev->queue_sel].desc[0]; 1289 break; 1290 case VIRTIO_PCI_COMMON_Q_DESCHI: 1291 val = proxy->vqs[vdev->queue_sel].desc[1]; 1292 break; 1293 case VIRTIO_PCI_COMMON_Q_AVAILLO: 1294 val = proxy->vqs[vdev->queue_sel].avail[0]; 1295 break; 1296 case VIRTIO_PCI_COMMON_Q_AVAILHI: 1297 val = proxy->vqs[vdev->queue_sel].avail[1]; 1298 break; 1299 case VIRTIO_PCI_COMMON_Q_USEDLO: 1300 val = proxy->vqs[vdev->queue_sel].used[0]; 1301 break; 1302 case VIRTIO_PCI_COMMON_Q_USEDHI: 1303 val = proxy->vqs[vdev->queue_sel].used[1]; 1304 break; 1305 default: 1306 val = 0; 1307 } 1308 1309 return val; 1310 } 1311 1312 static void virtio_pci_common_write(void *opaque, hwaddr addr, 1313 uint64_t val, unsigned size) 1314 { 1315 VirtIOPCIProxy *proxy = opaque; 1316 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 1317 1318 switch (addr) { 1319 case VIRTIO_PCI_COMMON_DFSELECT: 1320 proxy->dfselect = val; 1321 break; 1322 case VIRTIO_PCI_COMMON_GFSELECT: 1323 proxy->gfselect = val; 1324 break; 1325 case VIRTIO_PCI_COMMON_GF: 1326 if (proxy->gfselect < ARRAY_SIZE(proxy->guest_features)) { 1327 proxy->guest_features[proxy->gfselect] = val; 1328 virtio_set_features(vdev, 1329 (((uint64_t)proxy->guest_features[1]) << 32) | 1330 proxy->guest_features[0]); 1331 } 1332 break; 1333 case VIRTIO_PCI_COMMON_MSIX: 1334 msix_vector_unuse(&proxy->pci_dev, vdev->config_vector); 1335 /* Make it possible for guest to discover an error took place. */ 1336 if (msix_vector_use(&proxy->pci_dev, val) < 0) { 1337 val = VIRTIO_NO_VECTOR; 1338 } 1339 vdev->config_vector = val; 1340 break; 1341 case VIRTIO_PCI_COMMON_STATUS: 1342 if (!(val & VIRTIO_CONFIG_S_DRIVER_OK)) { 1343 virtio_pci_stop_ioeventfd(proxy); 1344 } 1345 1346 virtio_set_status(vdev, val & 0xFF); 1347 1348 if (val & VIRTIO_CONFIG_S_DRIVER_OK) { 1349 virtio_pci_start_ioeventfd(proxy); 1350 } 1351 1352 if (vdev->status == 0) { 1353 virtio_pci_reset(DEVICE(proxy)); 1354 } 1355 1356 break; 1357 case VIRTIO_PCI_COMMON_Q_SELECT: 1358 if (val < VIRTIO_QUEUE_MAX) { 1359 vdev->queue_sel = val; 1360 } 1361 break; 1362 case VIRTIO_PCI_COMMON_Q_SIZE: 1363 proxy->vqs[vdev->queue_sel].num = val; 1364 break; 1365 case VIRTIO_PCI_COMMON_Q_MSIX: 1366 msix_vector_unuse(&proxy->pci_dev, 1367 virtio_queue_vector(vdev, vdev->queue_sel)); 1368 /* Make it possible for guest to discover an error took place. */ 1369 if (msix_vector_use(&proxy->pci_dev, val) < 0) { 1370 val = VIRTIO_NO_VECTOR; 1371 } 1372 virtio_queue_set_vector(vdev, vdev->queue_sel, val); 1373 break; 1374 case VIRTIO_PCI_COMMON_Q_ENABLE: 1375 /* TODO: need a way to put num back on reset. */ 1376 virtio_queue_set_num(vdev, vdev->queue_sel, 1377 proxy->vqs[vdev->queue_sel].num); 1378 virtio_queue_set_rings(vdev, vdev->queue_sel, 1379 ((uint64_t)proxy->vqs[vdev->queue_sel].desc[1]) << 32 | 1380 proxy->vqs[vdev->queue_sel].desc[0], 1381 ((uint64_t)proxy->vqs[vdev->queue_sel].avail[1]) << 32 | 1382 proxy->vqs[vdev->queue_sel].avail[0], 1383 ((uint64_t)proxy->vqs[vdev->queue_sel].used[1]) << 32 | 1384 proxy->vqs[vdev->queue_sel].used[0]); 1385 proxy->vqs[vdev->queue_sel].enabled = 1; 1386 break; 1387 case VIRTIO_PCI_COMMON_Q_DESCLO: 1388 proxy->vqs[vdev->queue_sel].desc[0] = val; 1389 break; 1390 case VIRTIO_PCI_COMMON_Q_DESCHI: 1391 proxy->vqs[vdev->queue_sel].desc[1] = val; 1392 break; 1393 case VIRTIO_PCI_COMMON_Q_AVAILLO: 1394 proxy->vqs[vdev->queue_sel].avail[0] = val; 1395 break; 1396 case VIRTIO_PCI_COMMON_Q_AVAILHI: 1397 proxy->vqs[vdev->queue_sel].avail[1] = val; 1398 break; 1399 case VIRTIO_PCI_COMMON_Q_USEDLO: 1400 proxy->vqs[vdev->queue_sel].used[0] = val; 1401 break; 1402 case VIRTIO_PCI_COMMON_Q_USEDHI: 1403 proxy->vqs[vdev->queue_sel].used[1] = val; 1404 break; 1405 default: 1406 break; 1407 } 1408 } 1409 1410 1411 static uint64_t virtio_pci_notify_read(void *opaque, hwaddr addr, 1412 unsigned size) 1413 { 1414 return 0; 1415 } 1416 1417 static void virtio_pci_notify_write(void *opaque, hwaddr addr, 1418 uint64_t val, unsigned size) 1419 { 1420 VirtIODevice *vdev = opaque; 1421 unsigned queue = addr / QEMU_VIRTIO_PCI_QUEUE_MEM_MULT; 1422 1423 if (queue < VIRTIO_QUEUE_MAX) { 1424 virtio_queue_notify(vdev, queue); 1425 } 1426 } 1427 1428 static void virtio_pci_notify_write_pio(void *opaque, hwaddr addr, 1429 uint64_t val, unsigned size) 1430 { 1431 VirtIODevice *vdev = opaque; 1432 unsigned queue = val; 1433 1434 if (queue < VIRTIO_QUEUE_MAX) { 1435 virtio_queue_notify(vdev, queue); 1436 } 1437 } 1438 1439 static uint64_t virtio_pci_isr_read(void *opaque, hwaddr addr, 1440 unsigned size) 1441 { 1442 VirtIOPCIProxy *proxy = opaque; 1443 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 1444 uint64_t val = vdev->isr; 1445 1446 vdev->isr = 0; 1447 pci_irq_deassert(&proxy->pci_dev); 1448 1449 return val; 1450 } 1451 1452 static void virtio_pci_isr_write(void *opaque, hwaddr addr, 1453 uint64_t val, unsigned size) 1454 { 1455 } 1456 1457 static uint64_t virtio_pci_device_read(void *opaque, hwaddr addr, 1458 unsigned size) 1459 { 1460 VirtIODevice *vdev = opaque; 1461 uint64_t val = 0; 1462 1463 switch (size) { 1464 case 1: 1465 val = virtio_config_modern_readb(vdev, addr); 1466 break; 1467 case 2: 1468 val = virtio_config_modern_readw(vdev, addr); 1469 break; 1470 case 4: 1471 val = virtio_config_modern_readl(vdev, addr); 1472 break; 1473 } 1474 return val; 1475 } 1476 1477 static void virtio_pci_device_write(void *opaque, hwaddr addr, 1478 uint64_t val, unsigned size) 1479 { 1480 VirtIODevice *vdev = opaque; 1481 switch (size) { 1482 case 1: 1483 virtio_config_modern_writeb(vdev, addr, val); 1484 break; 1485 case 2: 1486 virtio_config_modern_writew(vdev, addr, val); 1487 break; 1488 case 4: 1489 virtio_config_modern_writel(vdev, addr, val); 1490 break; 1491 } 1492 } 1493 1494 static void virtio_pci_modern_regions_init(VirtIOPCIProxy *proxy) 1495 { 1496 static const MemoryRegionOps common_ops = { 1497 .read = virtio_pci_common_read, 1498 .write = virtio_pci_common_write, 1499 .impl = { 1500 .min_access_size = 1, 1501 .max_access_size = 4, 1502 }, 1503 .endianness = DEVICE_LITTLE_ENDIAN, 1504 }; 1505 static const MemoryRegionOps isr_ops = { 1506 .read = virtio_pci_isr_read, 1507 .write = virtio_pci_isr_write, 1508 .impl = { 1509 .min_access_size = 1, 1510 .max_access_size = 4, 1511 }, 1512 .endianness = DEVICE_LITTLE_ENDIAN, 1513 }; 1514 static const MemoryRegionOps device_ops = { 1515 .read = virtio_pci_device_read, 1516 .write = virtio_pci_device_write, 1517 .impl = { 1518 .min_access_size = 1, 1519 .max_access_size = 4, 1520 }, 1521 .endianness = DEVICE_LITTLE_ENDIAN, 1522 }; 1523 static const MemoryRegionOps notify_ops = { 1524 .read = virtio_pci_notify_read, 1525 .write = virtio_pci_notify_write, 1526 .impl = { 1527 .min_access_size = 1, 1528 .max_access_size = 4, 1529 }, 1530 .endianness = DEVICE_LITTLE_ENDIAN, 1531 }; 1532 static const MemoryRegionOps notify_pio_ops = { 1533 .read = virtio_pci_notify_read, 1534 .write = virtio_pci_notify_write_pio, 1535 .impl = { 1536 .min_access_size = 1, 1537 .max_access_size = 4, 1538 }, 1539 .endianness = DEVICE_LITTLE_ENDIAN, 1540 }; 1541 1542 1543 memory_region_init_io(&proxy->common.mr, OBJECT(proxy), 1544 &common_ops, 1545 proxy, 1546 "virtio-pci-common", 1547 proxy->common.size); 1548 1549 memory_region_init_io(&proxy->isr.mr, OBJECT(proxy), 1550 &isr_ops, 1551 proxy, 1552 "virtio-pci-isr", 1553 proxy->isr.size); 1554 1555 memory_region_init_io(&proxy->device.mr, OBJECT(proxy), 1556 &device_ops, 1557 virtio_bus_get_device(&proxy->bus), 1558 "virtio-pci-device", 1559 proxy->device.size); 1560 1561 memory_region_init_io(&proxy->notify.mr, OBJECT(proxy), 1562 ¬ify_ops, 1563 virtio_bus_get_device(&proxy->bus), 1564 "virtio-pci-notify", 1565 proxy->notify.size); 1566 1567 memory_region_init_io(&proxy->notify_pio.mr, OBJECT(proxy), 1568 ¬ify_pio_ops, 1569 virtio_bus_get_device(&proxy->bus), 1570 "virtio-pci-notify-pio", 1571 proxy->notify.size); 1572 } 1573 1574 static void virtio_pci_modern_region_map(VirtIOPCIProxy *proxy, 1575 VirtIOPCIRegion *region, 1576 struct virtio_pci_cap *cap, 1577 MemoryRegion *mr, 1578 uint8_t bar) 1579 { 1580 memory_region_add_subregion(mr, region->offset, ®ion->mr); 1581 1582 cap->cfg_type = region->type; 1583 cap->bar = bar; 1584 cap->offset = cpu_to_le32(region->offset); 1585 cap->length = cpu_to_le32(region->size); 1586 virtio_pci_add_mem_cap(proxy, cap); 1587 1588 } 1589 1590 static void virtio_pci_modern_mem_region_map(VirtIOPCIProxy *proxy, 1591 VirtIOPCIRegion *region, 1592 struct virtio_pci_cap *cap) 1593 { 1594 virtio_pci_modern_region_map(proxy, region, cap, 1595 &proxy->modern_bar, proxy->modern_mem_bar); 1596 } 1597 1598 static void virtio_pci_modern_io_region_map(VirtIOPCIProxy *proxy, 1599 VirtIOPCIRegion *region, 1600 struct virtio_pci_cap *cap) 1601 { 1602 virtio_pci_modern_region_map(proxy, region, cap, 1603 &proxy->io_bar, proxy->modern_io_bar); 1604 } 1605 1606 static void virtio_pci_modern_mem_region_unmap(VirtIOPCIProxy *proxy, 1607 VirtIOPCIRegion *region) 1608 { 1609 memory_region_del_subregion(&proxy->modern_bar, 1610 ®ion->mr); 1611 } 1612 1613 static void virtio_pci_modern_io_region_unmap(VirtIOPCIProxy *proxy, 1614 VirtIOPCIRegion *region) 1615 { 1616 memory_region_del_subregion(&proxy->io_bar, 1617 ®ion->mr); 1618 } 1619 1620 /* This is called by virtio-bus just after the device is plugged. */ 1621 static void virtio_pci_device_plugged(DeviceState *d, Error **errp) 1622 { 1623 VirtIOPCIProxy *proxy = VIRTIO_PCI(d); 1624 VirtioBusState *bus = &proxy->bus; 1625 bool legacy = !(proxy->flags & VIRTIO_PCI_FLAG_DISABLE_LEGACY); 1626 bool modern = !(proxy->flags & VIRTIO_PCI_FLAG_DISABLE_MODERN); 1627 bool modern_pio = proxy->flags & VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY; 1628 uint8_t *config; 1629 uint32_t size; 1630 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 1631 1632 config = proxy->pci_dev.config; 1633 if (proxy->class_code) { 1634 pci_config_set_class(config, proxy->class_code); 1635 } 1636 1637 if (legacy) { 1638 /* legacy and transitional */ 1639 pci_set_word(config + PCI_SUBSYSTEM_VENDOR_ID, 1640 pci_get_word(config + PCI_VENDOR_ID)); 1641 pci_set_word(config + PCI_SUBSYSTEM_ID, virtio_bus_get_vdev_id(bus)); 1642 } else { 1643 /* pure virtio-1.0 */ 1644 pci_set_word(config + PCI_VENDOR_ID, 1645 PCI_VENDOR_ID_REDHAT_QUMRANET); 1646 pci_set_word(config + PCI_DEVICE_ID, 1647 0x1040 + virtio_bus_get_vdev_id(bus)); 1648 pci_config_set_revision(config, 1); 1649 } 1650 config[PCI_INTERRUPT_PIN] = 1; 1651 1652 1653 if (modern) { 1654 struct virtio_pci_cap cap = { 1655 .cap_len = sizeof cap, 1656 }; 1657 struct virtio_pci_notify_cap notify = { 1658 .cap.cap_len = sizeof notify, 1659 .notify_off_multiplier = 1660 cpu_to_le32(QEMU_VIRTIO_PCI_QUEUE_MEM_MULT), 1661 }; 1662 struct virtio_pci_cfg_cap cfg = { 1663 .cap.cap_len = sizeof cfg, 1664 .cap.cfg_type = VIRTIO_PCI_CAP_PCI_CFG, 1665 }; 1666 struct virtio_pci_notify_cap notify_pio = { 1667 .cap.cap_len = sizeof notify, 1668 .notify_off_multiplier = cpu_to_le32(0x0), 1669 }; 1670 1671 struct virtio_pci_cfg_cap *cfg_mask; 1672 1673 virtio_add_feature(&vdev->host_features, VIRTIO_F_VERSION_1); 1674 virtio_pci_modern_regions_init(proxy); 1675 1676 virtio_pci_modern_mem_region_map(proxy, &proxy->common, &cap); 1677 virtio_pci_modern_mem_region_map(proxy, &proxy->isr, &cap); 1678 virtio_pci_modern_mem_region_map(proxy, &proxy->device, &cap); 1679 virtio_pci_modern_mem_region_map(proxy, &proxy->notify, ¬ify.cap); 1680 1681 if (modern_pio) { 1682 memory_region_init(&proxy->io_bar, OBJECT(proxy), 1683 "virtio-pci-io", 0x4); 1684 1685 pci_register_bar(&proxy->pci_dev, proxy->modern_io_bar, 1686 PCI_BASE_ADDRESS_SPACE_IO, &proxy->io_bar); 1687 1688 virtio_pci_modern_io_region_map(proxy, &proxy->notify_pio, 1689 ¬ify_pio.cap); 1690 } 1691 1692 pci_register_bar(&proxy->pci_dev, proxy->modern_mem_bar, 1693 PCI_BASE_ADDRESS_SPACE_MEMORY | 1694 PCI_BASE_ADDRESS_MEM_PREFETCH | 1695 PCI_BASE_ADDRESS_MEM_TYPE_64, 1696 &proxy->modern_bar); 1697 1698 proxy->config_cap = virtio_pci_add_mem_cap(proxy, &cfg.cap); 1699 cfg_mask = (void *)(proxy->pci_dev.wmask + proxy->config_cap); 1700 pci_set_byte(&cfg_mask->cap.bar, ~0x0); 1701 pci_set_long((uint8_t *)&cfg_mask->cap.offset, ~0x0); 1702 pci_set_long((uint8_t *)&cfg_mask->cap.length, ~0x0); 1703 pci_set_long(cfg_mask->pci_cfg_data, ~0x0); 1704 } 1705 1706 if (proxy->nvectors) { 1707 int err = msix_init_exclusive_bar(&proxy->pci_dev, proxy->nvectors, 1708 proxy->msix_bar); 1709 if (err) { 1710 /* Notice when a system that supports MSIx can't initialize it. */ 1711 if (err != -ENOTSUP) { 1712 error_report("unable to init msix vectors to %" PRIu32, 1713 proxy->nvectors); 1714 } 1715 proxy->nvectors = 0; 1716 } 1717 } 1718 1719 proxy->pci_dev.config_write = virtio_write_config; 1720 proxy->pci_dev.config_read = virtio_read_config; 1721 1722 if (legacy) { 1723 size = VIRTIO_PCI_REGION_SIZE(&proxy->pci_dev) 1724 + virtio_bus_get_vdev_config_len(bus); 1725 size = pow2ceil(size); 1726 1727 memory_region_init_io(&proxy->bar, OBJECT(proxy), 1728 &virtio_pci_config_ops, 1729 proxy, "virtio-pci", size); 1730 1731 pci_register_bar(&proxy->pci_dev, proxy->legacy_io_bar, 1732 PCI_BASE_ADDRESS_SPACE_IO, &proxy->bar); 1733 } 1734 1735 if (!kvm_has_many_ioeventfds()) { 1736 proxy->flags &= ~VIRTIO_PCI_FLAG_USE_IOEVENTFD; 1737 } 1738 1739 virtio_add_feature(&vdev->host_features, VIRTIO_F_BAD_FEATURE); 1740 } 1741 1742 static void virtio_pci_device_unplugged(DeviceState *d) 1743 { 1744 VirtIOPCIProxy *proxy = VIRTIO_PCI(d); 1745 bool modern = !(proxy->flags & VIRTIO_PCI_FLAG_DISABLE_MODERN); 1746 bool modern_pio = proxy->flags & VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY; 1747 1748 virtio_pci_stop_ioeventfd(proxy); 1749 1750 if (modern) { 1751 virtio_pci_modern_mem_region_unmap(proxy, &proxy->common); 1752 virtio_pci_modern_mem_region_unmap(proxy, &proxy->isr); 1753 virtio_pci_modern_mem_region_unmap(proxy, &proxy->device); 1754 virtio_pci_modern_mem_region_unmap(proxy, &proxy->notify); 1755 if (modern_pio) { 1756 virtio_pci_modern_io_region_unmap(proxy, &proxy->notify_pio); 1757 } 1758 } 1759 } 1760 1761 static void virtio_pci_realize(PCIDevice *pci_dev, Error **errp) 1762 { 1763 VirtIOPCIProxy *proxy = VIRTIO_PCI(pci_dev); 1764 VirtioPCIClass *k = VIRTIO_PCI_GET_CLASS(pci_dev); 1765 1766 /* 1767 * virtio pci bar layout used by default. 1768 * subclasses can re-arrange things if needed. 1769 * 1770 * region 0 -- virtio legacy io bar 1771 * region 1 -- msi-x bar 1772 * region 4+5 -- virtio modern memory (64bit) bar 1773 * 1774 */ 1775 proxy->legacy_io_bar = 0; 1776 proxy->msix_bar = 1; 1777 proxy->modern_io_bar = 2; 1778 proxy->modern_mem_bar = 4; 1779 1780 proxy->common.offset = 0x0; 1781 proxy->common.size = 0x1000; 1782 proxy->common.type = VIRTIO_PCI_CAP_COMMON_CFG; 1783 1784 proxy->isr.offset = 0x1000; 1785 proxy->isr.size = 0x1000; 1786 proxy->isr.type = VIRTIO_PCI_CAP_ISR_CFG; 1787 1788 proxy->device.offset = 0x2000; 1789 proxy->device.size = 0x1000; 1790 proxy->device.type = VIRTIO_PCI_CAP_DEVICE_CFG; 1791 1792 proxy->notify.offset = 0x3000; 1793 proxy->notify.size = 1794 QEMU_VIRTIO_PCI_QUEUE_MEM_MULT * VIRTIO_QUEUE_MAX; 1795 proxy->notify.type = VIRTIO_PCI_CAP_NOTIFY_CFG; 1796 1797 proxy->notify_pio.offset = 0x0; 1798 proxy->notify_pio.size = 0x4; 1799 proxy->notify_pio.type = VIRTIO_PCI_CAP_NOTIFY_CFG; 1800 1801 /* subclasses can enforce modern, so do this unconditionally */ 1802 memory_region_init(&proxy->modern_bar, OBJECT(proxy), "virtio-pci", 1803 2 * QEMU_VIRTIO_PCI_QUEUE_MEM_MULT * 1804 VIRTIO_QUEUE_MAX); 1805 1806 memory_region_init_alias(&proxy->modern_cfg, 1807 OBJECT(proxy), 1808 "virtio-pci-cfg", 1809 &proxy->modern_bar, 1810 0, 1811 memory_region_size(&proxy->modern_bar)); 1812 1813 address_space_init(&proxy->modern_as, &proxy->modern_cfg, "virtio-pci-cfg-as"); 1814 1815 if (pci_is_express(pci_dev) && pci_bus_is_express(pci_dev->bus) && 1816 !pci_bus_is_root(pci_dev->bus)) { 1817 int pos; 1818 1819 pos = pcie_endpoint_cap_init(pci_dev, 0); 1820 assert(pos > 0); 1821 1822 pos = pci_add_capability(pci_dev, PCI_CAP_ID_PM, 0, PCI_PM_SIZEOF); 1823 assert(pos > 0); 1824 1825 /* 1826 * Indicates that this function complies with revision 1.2 of the 1827 * PCI Power Management Interface Specification. 1828 */ 1829 pci_set_word(pci_dev->config + pos + PCI_PM_PMC, 0x3); 1830 } else { 1831 /* 1832 * make future invocations of pci_is_express() return false 1833 * and pci_config_size() return PCI_CONFIG_SPACE_SIZE. 1834 */ 1835 pci_dev->cap_present &= ~QEMU_PCI_CAP_EXPRESS; 1836 } 1837 1838 virtio_pci_bus_new(&proxy->bus, sizeof(proxy->bus), proxy); 1839 if (k->realize) { 1840 k->realize(proxy, errp); 1841 } 1842 } 1843 1844 static void virtio_pci_exit(PCIDevice *pci_dev) 1845 { 1846 VirtIOPCIProxy *proxy = VIRTIO_PCI(pci_dev); 1847 1848 msix_uninit_exclusive_bar(pci_dev); 1849 address_space_destroy(&proxy->modern_as); 1850 } 1851 1852 static void virtio_pci_reset(DeviceState *qdev) 1853 { 1854 VirtIOPCIProxy *proxy = VIRTIO_PCI(qdev); 1855 VirtioBusState *bus = VIRTIO_BUS(&proxy->bus); 1856 int i; 1857 1858 virtio_pci_stop_ioeventfd(proxy); 1859 virtio_bus_reset(bus); 1860 msix_unuse_all_vectors(&proxy->pci_dev); 1861 1862 for (i = 0; i < VIRTIO_QUEUE_MAX; i++) { 1863 proxy->vqs[i].enabled = 0; 1864 } 1865 } 1866 1867 static Property virtio_pci_properties[] = { 1868 DEFINE_PROP_BIT("virtio-pci-bus-master-bug-migration", VirtIOPCIProxy, flags, 1869 VIRTIO_PCI_FLAG_BUS_MASTER_BUG_MIGRATION_BIT, false), 1870 DEFINE_PROP_BIT("disable-legacy", VirtIOPCIProxy, flags, 1871 VIRTIO_PCI_FLAG_DISABLE_LEGACY_BIT, false), 1872 DEFINE_PROP_BIT("disable-modern", VirtIOPCIProxy, flags, 1873 VIRTIO_PCI_FLAG_DISABLE_MODERN_BIT, true), 1874 DEFINE_PROP_BIT("migrate-extra", VirtIOPCIProxy, flags, 1875 VIRTIO_PCI_FLAG_MIGRATE_EXTRA_BIT, true), 1876 DEFINE_PROP_BIT("modern-pio-notify", VirtIOPCIProxy, flags, 1877 VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY_BIT, false), 1878 DEFINE_PROP_BIT("x-disable-pcie", VirtIOPCIProxy, flags, 1879 VIRTIO_PCI_FLAG_DISABLE_PCIE_BIT, false), 1880 DEFINE_PROP_END_OF_LIST(), 1881 }; 1882 1883 static void virtio_pci_dc_realize(DeviceState *qdev, Error **errp) 1884 { 1885 VirtioPCIClass *vpciklass = VIRTIO_PCI_GET_CLASS(qdev); 1886 VirtIOPCIProxy *proxy = VIRTIO_PCI(qdev); 1887 PCIDevice *pci_dev = &proxy->pci_dev; 1888 1889 if (!(proxy->flags & VIRTIO_PCI_FLAG_DISABLE_PCIE) && 1890 !(proxy->flags & VIRTIO_PCI_FLAG_DISABLE_MODERN)) { 1891 pci_dev->cap_present |= QEMU_PCI_CAP_EXPRESS; 1892 } 1893 1894 vpciklass->parent_dc_realize(qdev, errp); 1895 } 1896 1897 static void virtio_pci_class_init(ObjectClass *klass, void *data) 1898 { 1899 DeviceClass *dc = DEVICE_CLASS(klass); 1900 PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); 1901 VirtioPCIClass *vpciklass = VIRTIO_PCI_CLASS(klass); 1902 1903 dc->props = virtio_pci_properties; 1904 k->realize = virtio_pci_realize; 1905 k->exit = virtio_pci_exit; 1906 k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET; 1907 k->revision = VIRTIO_PCI_ABI_VERSION; 1908 k->class_id = PCI_CLASS_OTHERS; 1909 vpciklass->parent_dc_realize = dc->realize; 1910 dc->realize = virtio_pci_dc_realize; 1911 dc->reset = virtio_pci_reset; 1912 } 1913 1914 static const TypeInfo virtio_pci_info = { 1915 .name = TYPE_VIRTIO_PCI, 1916 .parent = TYPE_PCI_DEVICE, 1917 .instance_size = sizeof(VirtIOPCIProxy), 1918 .class_init = virtio_pci_class_init, 1919 .class_size = sizeof(VirtioPCIClass), 1920 .abstract = true, 1921 }; 1922 1923 /* virtio-blk-pci */ 1924 1925 static Property virtio_blk_pci_properties[] = { 1926 DEFINE_PROP_UINT32("class", VirtIOPCIProxy, class_code, 0), 1927 DEFINE_PROP_BIT("ioeventfd", VirtIOPCIProxy, flags, 1928 VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT, true), 1929 DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors, 2), 1930 DEFINE_PROP_END_OF_LIST(), 1931 }; 1932 1933 static void virtio_blk_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp) 1934 { 1935 VirtIOBlkPCI *dev = VIRTIO_BLK_PCI(vpci_dev); 1936 DeviceState *vdev = DEVICE(&dev->vdev); 1937 1938 qdev_set_parent_bus(vdev, BUS(&vpci_dev->bus)); 1939 object_property_set_bool(OBJECT(vdev), true, "realized", errp); 1940 } 1941 1942 static void virtio_blk_pci_class_init(ObjectClass *klass, void *data) 1943 { 1944 DeviceClass *dc = DEVICE_CLASS(klass); 1945 VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass); 1946 PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass); 1947 1948 set_bit(DEVICE_CATEGORY_STORAGE, dc->categories); 1949 dc->props = virtio_blk_pci_properties; 1950 k->realize = virtio_blk_pci_realize; 1951 pcidev_k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET; 1952 pcidev_k->device_id = PCI_DEVICE_ID_VIRTIO_BLOCK; 1953 pcidev_k->revision = VIRTIO_PCI_ABI_VERSION; 1954 pcidev_k->class_id = PCI_CLASS_STORAGE_SCSI; 1955 } 1956 1957 static void virtio_blk_pci_instance_init(Object *obj) 1958 { 1959 VirtIOBlkPCI *dev = VIRTIO_BLK_PCI(obj); 1960 1961 virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev), 1962 TYPE_VIRTIO_BLK); 1963 object_property_add_alias(obj, "iothread", OBJECT(&dev->vdev),"iothread", 1964 &error_abort); 1965 object_property_add_alias(obj, "bootindex", OBJECT(&dev->vdev), 1966 "bootindex", &error_abort); 1967 } 1968 1969 static const TypeInfo virtio_blk_pci_info = { 1970 .name = TYPE_VIRTIO_BLK_PCI, 1971 .parent = TYPE_VIRTIO_PCI, 1972 .instance_size = sizeof(VirtIOBlkPCI), 1973 .instance_init = virtio_blk_pci_instance_init, 1974 .class_init = virtio_blk_pci_class_init, 1975 }; 1976 1977 /* virtio-scsi-pci */ 1978 1979 static Property virtio_scsi_pci_properties[] = { 1980 DEFINE_PROP_BIT("ioeventfd", VirtIOPCIProxy, flags, 1981 VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT, true), 1982 DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors, 1983 DEV_NVECTORS_UNSPECIFIED), 1984 DEFINE_PROP_END_OF_LIST(), 1985 }; 1986 1987 static void virtio_scsi_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp) 1988 { 1989 VirtIOSCSIPCI *dev = VIRTIO_SCSI_PCI(vpci_dev); 1990 DeviceState *vdev = DEVICE(&dev->vdev); 1991 VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(vdev); 1992 DeviceState *proxy = DEVICE(vpci_dev); 1993 char *bus_name; 1994 1995 if (vpci_dev->nvectors == DEV_NVECTORS_UNSPECIFIED) { 1996 vpci_dev->nvectors = vs->conf.num_queues + 3; 1997 } 1998 1999 /* 2000 * For command line compatibility, this sets the virtio-scsi-device bus 2001 * name as before. 2002 */ 2003 if (proxy->id) { 2004 bus_name = g_strdup_printf("%s.0", proxy->id); 2005 virtio_device_set_child_bus_name(VIRTIO_DEVICE(vdev), bus_name); 2006 g_free(bus_name); 2007 } 2008 2009 qdev_set_parent_bus(vdev, BUS(&vpci_dev->bus)); 2010 object_property_set_bool(OBJECT(vdev), true, "realized", errp); 2011 } 2012 2013 static void virtio_scsi_pci_class_init(ObjectClass *klass, void *data) 2014 { 2015 DeviceClass *dc = DEVICE_CLASS(klass); 2016 VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass); 2017 PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass); 2018 2019 k->realize = virtio_scsi_pci_realize; 2020 set_bit(DEVICE_CATEGORY_STORAGE, dc->categories); 2021 dc->props = virtio_scsi_pci_properties; 2022 pcidev_k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET; 2023 pcidev_k->device_id = PCI_DEVICE_ID_VIRTIO_SCSI; 2024 pcidev_k->revision = 0x00; 2025 pcidev_k->class_id = PCI_CLASS_STORAGE_SCSI; 2026 } 2027 2028 static void virtio_scsi_pci_instance_init(Object *obj) 2029 { 2030 VirtIOSCSIPCI *dev = VIRTIO_SCSI_PCI(obj); 2031 2032 virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev), 2033 TYPE_VIRTIO_SCSI); 2034 object_property_add_alias(obj, "iothread", OBJECT(&dev->vdev), "iothread", 2035 &error_abort); 2036 } 2037 2038 static const TypeInfo virtio_scsi_pci_info = { 2039 .name = TYPE_VIRTIO_SCSI_PCI, 2040 .parent = TYPE_VIRTIO_PCI, 2041 .instance_size = sizeof(VirtIOSCSIPCI), 2042 .instance_init = virtio_scsi_pci_instance_init, 2043 .class_init = virtio_scsi_pci_class_init, 2044 }; 2045 2046 /* vhost-scsi-pci */ 2047 2048 #ifdef CONFIG_VHOST_SCSI 2049 static Property vhost_scsi_pci_properties[] = { 2050 DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors, 2051 DEV_NVECTORS_UNSPECIFIED), 2052 DEFINE_PROP_END_OF_LIST(), 2053 }; 2054 2055 static void vhost_scsi_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp) 2056 { 2057 VHostSCSIPCI *dev = VHOST_SCSI_PCI(vpci_dev); 2058 DeviceState *vdev = DEVICE(&dev->vdev); 2059 VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(vdev); 2060 2061 if (vpci_dev->nvectors == DEV_NVECTORS_UNSPECIFIED) { 2062 vpci_dev->nvectors = vs->conf.num_queues + 3; 2063 } 2064 2065 qdev_set_parent_bus(vdev, BUS(&vpci_dev->bus)); 2066 object_property_set_bool(OBJECT(vdev), true, "realized", errp); 2067 } 2068 2069 static void vhost_scsi_pci_class_init(ObjectClass *klass, void *data) 2070 { 2071 DeviceClass *dc = DEVICE_CLASS(klass); 2072 VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass); 2073 PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass); 2074 k->realize = vhost_scsi_pci_realize; 2075 set_bit(DEVICE_CATEGORY_STORAGE, dc->categories); 2076 dc->props = vhost_scsi_pci_properties; 2077 pcidev_k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET; 2078 pcidev_k->device_id = PCI_DEVICE_ID_VIRTIO_SCSI; 2079 pcidev_k->revision = 0x00; 2080 pcidev_k->class_id = PCI_CLASS_STORAGE_SCSI; 2081 } 2082 2083 static void vhost_scsi_pci_instance_init(Object *obj) 2084 { 2085 VHostSCSIPCI *dev = VHOST_SCSI_PCI(obj); 2086 2087 virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev), 2088 TYPE_VHOST_SCSI); 2089 object_property_add_alias(obj, "bootindex", OBJECT(&dev->vdev), 2090 "bootindex", &error_abort); 2091 } 2092 2093 static const TypeInfo vhost_scsi_pci_info = { 2094 .name = TYPE_VHOST_SCSI_PCI, 2095 .parent = TYPE_VIRTIO_PCI, 2096 .instance_size = sizeof(VHostSCSIPCI), 2097 .instance_init = vhost_scsi_pci_instance_init, 2098 .class_init = vhost_scsi_pci_class_init, 2099 }; 2100 #endif 2101 2102 /* virtio-balloon-pci */ 2103 2104 static Property virtio_balloon_pci_properties[] = { 2105 DEFINE_PROP_UINT32("class", VirtIOPCIProxy, class_code, 0), 2106 DEFINE_PROP_END_OF_LIST(), 2107 }; 2108 2109 static void virtio_balloon_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp) 2110 { 2111 VirtIOBalloonPCI *dev = VIRTIO_BALLOON_PCI(vpci_dev); 2112 DeviceState *vdev = DEVICE(&dev->vdev); 2113 2114 if (vpci_dev->class_code != PCI_CLASS_OTHERS && 2115 vpci_dev->class_code != PCI_CLASS_MEMORY_RAM) { /* qemu < 1.1 */ 2116 vpci_dev->class_code = PCI_CLASS_OTHERS; 2117 } 2118 2119 qdev_set_parent_bus(vdev, BUS(&vpci_dev->bus)); 2120 object_property_set_bool(OBJECT(vdev), true, "realized", errp); 2121 } 2122 2123 static void virtio_balloon_pci_class_init(ObjectClass *klass, void *data) 2124 { 2125 DeviceClass *dc = DEVICE_CLASS(klass); 2126 VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass); 2127 PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass); 2128 k->realize = virtio_balloon_pci_realize; 2129 set_bit(DEVICE_CATEGORY_MISC, dc->categories); 2130 dc->props = virtio_balloon_pci_properties; 2131 pcidev_k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET; 2132 pcidev_k->device_id = PCI_DEVICE_ID_VIRTIO_BALLOON; 2133 pcidev_k->revision = VIRTIO_PCI_ABI_VERSION; 2134 pcidev_k->class_id = PCI_CLASS_OTHERS; 2135 } 2136 2137 static void virtio_balloon_pci_instance_init(Object *obj) 2138 { 2139 VirtIOBalloonPCI *dev = VIRTIO_BALLOON_PCI(obj); 2140 2141 virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev), 2142 TYPE_VIRTIO_BALLOON); 2143 object_property_add_alias(obj, "guest-stats", OBJECT(&dev->vdev), 2144 "guest-stats", &error_abort); 2145 object_property_add_alias(obj, "guest-stats-polling-interval", 2146 OBJECT(&dev->vdev), 2147 "guest-stats-polling-interval", &error_abort); 2148 } 2149 2150 static const TypeInfo virtio_balloon_pci_info = { 2151 .name = TYPE_VIRTIO_BALLOON_PCI, 2152 .parent = TYPE_VIRTIO_PCI, 2153 .instance_size = sizeof(VirtIOBalloonPCI), 2154 .instance_init = virtio_balloon_pci_instance_init, 2155 .class_init = virtio_balloon_pci_class_init, 2156 }; 2157 2158 /* virtio-serial-pci */ 2159 2160 static void virtio_serial_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp) 2161 { 2162 VirtIOSerialPCI *dev = VIRTIO_SERIAL_PCI(vpci_dev); 2163 DeviceState *vdev = DEVICE(&dev->vdev); 2164 DeviceState *proxy = DEVICE(vpci_dev); 2165 char *bus_name; 2166 2167 if (vpci_dev->class_code != PCI_CLASS_COMMUNICATION_OTHER && 2168 vpci_dev->class_code != PCI_CLASS_DISPLAY_OTHER && /* qemu 0.10 */ 2169 vpci_dev->class_code != PCI_CLASS_OTHERS) { /* qemu-kvm */ 2170 vpci_dev->class_code = PCI_CLASS_COMMUNICATION_OTHER; 2171 } 2172 2173 /* backwards-compatibility with machines that were created with 2174 DEV_NVECTORS_UNSPECIFIED */ 2175 if (vpci_dev->nvectors == DEV_NVECTORS_UNSPECIFIED) { 2176 vpci_dev->nvectors = dev->vdev.serial.max_virtserial_ports + 1; 2177 } 2178 2179 /* 2180 * For command line compatibility, this sets the virtio-serial-device bus 2181 * name as before. 2182 */ 2183 if (proxy->id) { 2184 bus_name = g_strdup_printf("%s.0", proxy->id); 2185 virtio_device_set_child_bus_name(VIRTIO_DEVICE(vdev), bus_name); 2186 g_free(bus_name); 2187 } 2188 2189 qdev_set_parent_bus(vdev, BUS(&vpci_dev->bus)); 2190 object_property_set_bool(OBJECT(vdev), true, "realized", errp); 2191 } 2192 2193 static Property virtio_serial_pci_properties[] = { 2194 DEFINE_PROP_BIT("ioeventfd", VirtIOPCIProxy, flags, 2195 VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT, true), 2196 DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors, 2), 2197 DEFINE_PROP_UINT32("class", VirtIOPCIProxy, class_code, 0), 2198 DEFINE_PROP_END_OF_LIST(), 2199 }; 2200 2201 static void virtio_serial_pci_class_init(ObjectClass *klass, void *data) 2202 { 2203 DeviceClass *dc = DEVICE_CLASS(klass); 2204 VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass); 2205 PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass); 2206 k->realize = virtio_serial_pci_realize; 2207 set_bit(DEVICE_CATEGORY_INPUT, dc->categories); 2208 dc->props = virtio_serial_pci_properties; 2209 pcidev_k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET; 2210 pcidev_k->device_id = PCI_DEVICE_ID_VIRTIO_CONSOLE; 2211 pcidev_k->revision = VIRTIO_PCI_ABI_VERSION; 2212 pcidev_k->class_id = PCI_CLASS_COMMUNICATION_OTHER; 2213 } 2214 2215 static void virtio_serial_pci_instance_init(Object *obj) 2216 { 2217 VirtIOSerialPCI *dev = VIRTIO_SERIAL_PCI(obj); 2218 2219 virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev), 2220 TYPE_VIRTIO_SERIAL); 2221 } 2222 2223 static const TypeInfo virtio_serial_pci_info = { 2224 .name = TYPE_VIRTIO_SERIAL_PCI, 2225 .parent = TYPE_VIRTIO_PCI, 2226 .instance_size = sizeof(VirtIOSerialPCI), 2227 .instance_init = virtio_serial_pci_instance_init, 2228 .class_init = virtio_serial_pci_class_init, 2229 }; 2230 2231 /* virtio-net-pci */ 2232 2233 static Property virtio_net_properties[] = { 2234 DEFINE_PROP_BIT("ioeventfd", VirtIOPCIProxy, flags, 2235 VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT, false), 2236 DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors, 3), 2237 DEFINE_PROP_END_OF_LIST(), 2238 }; 2239 2240 static void virtio_net_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp) 2241 { 2242 DeviceState *qdev = DEVICE(vpci_dev); 2243 VirtIONetPCI *dev = VIRTIO_NET_PCI(vpci_dev); 2244 DeviceState *vdev = DEVICE(&dev->vdev); 2245 2246 virtio_net_set_netclient_name(&dev->vdev, qdev->id, 2247 object_get_typename(OBJECT(qdev))); 2248 qdev_set_parent_bus(vdev, BUS(&vpci_dev->bus)); 2249 object_property_set_bool(OBJECT(vdev), true, "realized", errp); 2250 } 2251 2252 static void virtio_net_pci_class_init(ObjectClass *klass, void *data) 2253 { 2254 DeviceClass *dc = DEVICE_CLASS(klass); 2255 PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); 2256 VirtioPCIClass *vpciklass = VIRTIO_PCI_CLASS(klass); 2257 2258 k->romfile = "efi-virtio.rom"; 2259 k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET; 2260 k->device_id = PCI_DEVICE_ID_VIRTIO_NET; 2261 k->revision = VIRTIO_PCI_ABI_VERSION; 2262 k->class_id = PCI_CLASS_NETWORK_ETHERNET; 2263 set_bit(DEVICE_CATEGORY_NETWORK, dc->categories); 2264 dc->props = virtio_net_properties; 2265 vpciklass->realize = virtio_net_pci_realize; 2266 } 2267 2268 static void virtio_net_pci_instance_init(Object *obj) 2269 { 2270 VirtIONetPCI *dev = VIRTIO_NET_PCI(obj); 2271 2272 virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev), 2273 TYPE_VIRTIO_NET); 2274 object_property_add_alias(obj, "bootindex", OBJECT(&dev->vdev), 2275 "bootindex", &error_abort); 2276 } 2277 2278 static const TypeInfo virtio_net_pci_info = { 2279 .name = TYPE_VIRTIO_NET_PCI, 2280 .parent = TYPE_VIRTIO_PCI, 2281 .instance_size = sizeof(VirtIONetPCI), 2282 .instance_init = virtio_net_pci_instance_init, 2283 .class_init = virtio_net_pci_class_init, 2284 }; 2285 2286 /* virtio-rng-pci */ 2287 2288 static void virtio_rng_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp) 2289 { 2290 VirtIORngPCI *vrng = VIRTIO_RNG_PCI(vpci_dev); 2291 DeviceState *vdev = DEVICE(&vrng->vdev); 2292 Error *err = NULL; 2293 2294 qdev_set_parent_bus(vdev, BUS(&vpci_dev->bus)); 2295 object_property_set_bool(OBJECT(vdev), true, "realized", &err); 2296 if (err) { 2297 error_propagate(errp, err); 2298 return; 2299 } 2300 2301 object_property_set_link(OBJECT(vrng), 2302 OBJECT(vrng->vdev.conf.rng), "rng", 2303 NULL); 2304 } 2305 2306 static void virtio_rng_pci_class_init(ObjectClass *klass, void *data) 2307 { 2308 DeviceClass *dc = DEVICE_CLASS(klass); 2309 VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass); 2310 PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass); 2311 2312 k->realize = virtio_rng_pci_realize; 2313 set_bit(DEVICE_CATEGORY_MISC, dc->categories); 2314 2315 pcidev_k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET; 2316 pcidev_k->device_id = PCI_DEVICE_ID_VIRTIO_RNG; 2317 pcidev_k->revision = VIRTIO_PCI_ABI_VERSION; 2318 pcidev_k->class_id = PCI_CLASS_OTHERS; 2319 } 2320 2321 static void virtio_rng_initfn(Object *obj) 2322 { 2323 VirtIORngPCI *dev = VIRTIO_RNG_PCI(obj); 2324 2325 virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev), 2326 TYPE_VIRTIO_RNG); 2327 object_property_add_alias(obj, "rng", OBJECT(&dev->vdev), "rng", 2328 &error_abort); 2329 } 2330 2331 static const TypeInfo virtio_rng_pci_info = { 2332 .name = TYPE_VIRTIO_RNG_PCI, 2333 .parent = TYPE_VIRTIO_PCI, 2334 .instance_size = sizeof(VirtIORngPCI), 2335 .instance_init = virtio_rng_initfn, 2336 .class_init = virtio_rng_pci_class_init, 2337 }; 2338 2339 /* virtio-input-pci */ 2340 2341 static Property virtio_input_pci_properties[] = { 2342 DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors, 2), 2343 DEFINE_PROP_END_OF_LIST(), 2344 }; 2345 2346 static void virtio_input_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp) 2347 { 2348 VirtIOInputPCI *vinput = VIRTIO_INPUT_PCI(vpci_dev); 2349 DeviceState *vdev = DEVICE(&vinput->vdev); 2350 2351 qdev_set_parent_bus(vdev, BUS(&vpci_dev->bus)); 2352 /* force virtio-1.0 */ 2353 vpci_dev->flags &= ~VIRTIO_PCI_FLAG_DISABLE_MODERN; 2354 vpci_dev->flags |= VIRTIO_PCI_FLAG_DISABLE_LEGACY; 2355 object_property_set_bool(OBJECT(vdev), true, "realized", errp); 2356 } 2357 2358 static void virtio_input_pci_class_init(ObjectClass *klass, void *data) 2359 { 2360 DeviceClass *dc = DEVICE_CLASS(klass); 2361 VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass); 2362 PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass); 2363 2364 dc->props = virtio_input_pci_properties; 2365 k->realize = virtio_input_pci_realize; 2366 set_bit(DEVICE_CATEGORY_INPUT, dc->categories); 2367 2368 pcidev_k->class_id = PCI_CLASS_INPUT_OTHER; 2369 } 2370 2371 static void virtio_input_hid_kbd_pci_class_init(ObjectClass *klass, void *data) 2372 { 2373 PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass); 2374 2375 pcidev_k->class_id = PCI_CLASS_INPUT_KEYBOARD; 2376 } 2377 2378 static void virtio_input_hid_mouse_pci_class_init(ObjectClass *klass, 2379 void *data) 2380 { 2381 PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass); 2382 2383 pcidev_k->class_id = PCI_CLASS_INPUT_MOUSE; 2384 } 2385 2386 static void virtio_keyboard_initfn(Object *obj) 2387 { 2388 VirtIOInputHIDPCI *dev = VIRTIO_INPUT_HID_PCI(obj); 2389 2390 virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev), 2391 TYPE_VIRTIO_KEYBOARD); 2392 } 2393 2394 static void virtio_mouse_initfn(Object *obj) 2395 { 2396 VirtIOInputHIDPCI *dev = VIRTIO_INPUT_HID_PCI(obj); 2397 2398 virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev), 2399 TYPE_VIRTIO_MOUSE); 2400 } 2401 2402 static void virtio_tablet_initfn(Object *obj) 2403 { 2404 VirtIOInputHIDPCI *dev = VIRTIO_INPUT_HID_PCI(obj); 2405 2406 virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev), 2407 TYPE_VIRTIO_TABLET); 2408 } 2409 2410 static const TypeInfo virtio_input_pci_info = { 2411 .name = TYPE_VIRTIO_INPUT_PCI, 2412 .parent = TYPE_VIRTIO_PCI, 2413 .instance_size = sizeof(VirtIOInputPCI), 2414 .class_init = virtio_input_pci_class_init, 2415 .abstract = true, 2416 }; 2417 2418 static const TypeInfo virtio_input_hid_pci_info = { 2419 .name = TYPE_VIRTIO_INPUT_HID_PCI, 2420 .parent = TYPE_VIRTIO_INPUT_PCI, 2421 .instance_size = sizeof(VirtIOInputHIDPCI), 2422 .abstract = true, 2423 }; 2424 2425 static const TypeInfo virtio_keyboard_pci_info = { 2426 .name = TYPE_VIRTIO_KEYBOARD_PCI, 2427 .parent = TYPE_VIRTIO_INPUT_HID_PCI, 2428 .class_init = virtio_input_hid_kbd_pci_class_init, 2429 .instance_size = sizeof(VirtIOInputHIDPCI), 2430 .instance_init = virtio_keyboard_initfn, 2431 }; 2432 2433 static const TypeInfo virtio_mouse_pci_info = { 2434 .name = TYPE_VIRTIO_MOUSE_PCI, 2435 .parent = TYPE_VIRTIO_INPUT_HID_PCI, 2436 .class_init = virtio_input_hid_mouse_pci_class_init, 2437 .instance_size = sizeof(VirtIOInputHIDPCI), 2438 .instance_init = virtio_mouse_initfn, 2439 }; 2440 2441 static const TypeInfo virtio_tablet_pci_info = { 2442 .name = TYPE_VIRTIO_TABLET_PCI, 2443 .parent = TYPE_VIRTIO_INPUT_HID_PCI, 2444 .instance_size = sizeof(VirtIOInputHIDPCI), 2445 .instance_init = virtio_tablet_initfn, 2446 }; 2447 2448 #ifdef CONFIG_LINUX 2449 static void virtio_host_initfn(Object *obj) 2450 { 2451 VirtIOInputHostPCI *dev = VIRTIO_INPUT_HOST_PCI(obj); 2452 2453 virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev), 2454 TYPE_VIRTIO_INPUT_HOST); 2455 } 2456 2457 static const TypeInfo virtio_host_pci_info = { 2458 .name = TYPE_VIRTIO_INPUT_HOST_PCI, 2459 .parent = TYPE_VIRTIO_INPUT_PCI, 2460 .instance_size = sizeof(VirtIOInputHostPCI), 2461 .instance_init = virtio_host_initfn, 2462 }; 2463 #endif 2464 2465 /* virtio-pci-bus */ 2466 2467 static void virtio_pci_bus_new(VirtioBusState *bus, size_t bus_size, 2468 VirtIOPCIProxy *dev) 2469 { 2470 DeviceState *qdev = DEVICE(dev); 2471 char virtio_bus_name[] = "virtio-bus"; 2472 2473 qbus_create_inplace(bus, bus_size, TYPE_VIRTIO_PCI_BUS, qdev, 2474 virtio_bus_name); 2475 } 2476 2477 static void virtio_pci_bus_class_init(ObjectClass *klass, void *data) 2478 { 2479 BusClass *bus_class = BUS_CLASS(klass); 2480 VirtioBusClass *k = VIRTIO_BUS_CLASS(klass); 2481 bus_class->max_dev = 1; 2482 k->notify = virtio_pci_notify; 2483 k->save_config = virtio_pci_save_config; 2484 k->load_config = virtio_pci_load_config; 2485 k->save_queue = virtio_pci_save_queue; 2486 k->load_queue = virtio_pci_load_queue; 2487 k->save_extra_state = virtio_pci_save_extra_state; 2488 k->load_extra_state = virtio_pci_load_extra_state; 2489 k->has_extra_state = virtio_pci_has_extra_state; 2490 k->query_guest_notifiers = virtio_pci_query_guest_notifiers; 2491 k->set_host_notifier = virtio_pci_set_host_notifier; 2492 k->set_guest_notifiers = virtio_pci_set_guest_notifiers; 2493 k->vmstate_change = virtio_pci_vmstate_change; 2494 k->device_plugged = virtio_pci_device_plugged; 2495 k->device_unplugged = virtio_pci_device_unplugged; 2496 k->query_nvectors = virtio_pci_query_nvectors; 2497 } 2498 2499 static const TypeInfo virtio_pci_bus_info = { 2500 .name = TYPE_VIRTIO_PCI_BUS, 2501 .parent = TYPE_VIRTIO_BUS, 2502 .instance_size = sizeof(VirtioPCIBusState), 2503 .class_init = virtio_pci_bus_class_init, 2504 }; 2505 2506 static void virtio_pci_register_types(void) 2507 { 2508 type_register_static(&virtio_rng_pci_info); 2509 type_register_static(&virtio_input_pci_info); 2510 type_register_static(&virtio_input_hid_pci_info); 2511 type_register_static(&virtio_keyboard_pci_info); 2512 type_register_static(&virtio_mouse_pci_info); 2513 type_register_static(&virtio_tablet_pci_info); 2514 #ifdef CONFIG_LINUX 2515 type_register_static(&virtio_host_pci_info); 2516 #endif 2517 type_register_static(&virtio_pci_bus_info); 2518 type_register_static(&virtio_pci_info); 2519 #ifdef CONFIG_VIRTFS 2520 type_register_static(&virtio_9p_pci_info); 2521 #endif 2522 type_register_static(&virtio_blk_pci_info); 2523 type_register_static(&virtio_scsi_pci_info); 2524 type_register_static(&virtio_balloon_pci_info); 2525 type_register_static(&virtio_serial_pci_info); 2526 type_register_static(&virtio_net_pci_info); 2527 #ifdef CONFIG_VHOST_SCSI 2528 type_register_static(&vhost_scsi_pci_info); 2529 #endif 2530 } 2531 2532 type_init(virtio_pci_register_types) 2533