1 /* 2 * Virtio PCI driver - common functionality for all device versions 3 * 4 * This module allows virtio devices to be used over a virtual PCI device. 5 * This can be used with QEMU based VMMs like KVM or Xen. 6 * 7 * Copyright IBM Corp. 2007 8 * Copyright Red Hat, Inc. 2014 9 * 10 * Authors: 11 * Anthony Liguori <aliguori@us.ibm.com> 12 * Rusty Russell <rusty@rustcorp.com.au> 13 * Michael S. Tsirkin <mst@redhat.com> 14 * 15 * This work is licensed under the terms of the GNU GPL, version 2 or later. 16 * See the COPYING file in the top-level directory. 17 * 18 */ 19 20 #include "virtio_pci_common.h" 21 22 static bool force_legacy = false; 23 24 #if IS_ENABLED(CONFIG_VIRTIO_PCI_LEGACY) 25 module_param(force_legacy, bool, 0444); 26 MODULE_PARM_DESC(force_legacy, 27 "Force legacy mode for transitional virtio 1 devices"); 28 #endif 29 30 /* wait for pending irq handlers */ 31 void vp_synchronize_vectors(struct virtio_device *vdev) 32 { 33 struct virtio_pci_device *vp_dev = to_vp_device(vdev); 34 int i; 35 36 if (vp_dev->intx_enabled) 37 synchronize_irq(vp_dev->pci_dev->irq); 38 39 for (i = 0; i < vp_dev->msix_vectors; ++i) 40 synchronize_irq(pci_irq_vector(vp_dev->pci_dev, i)); 41 } 42 43 /* the notify function used when creating a virt queue */ 44 bool vp_notify(struct virtqueue *vq) 45 { 46 /* we write the queue's selector into the notification register to 47 * signal the other end */ 48 iowrite16(vq->index, (void __iomem *)vq->priv); 49 return true; 50 } 51 52 /* Handle a configuration change: Tell driver if it wants to know. */ 53 static irqreturn_t vp_config_changed(int irq, void *opaque) 54 { 55 struct virtio_pci_device *vp_dev = opaque; 56 57 virtio_config_changed(&vp_dev->vdev); 58 return IRQ_HANDLED; 59 } 60 61 /* Notify all virtqueues on an interrupt. */ 62 static irqreturn_t vp_vring_interrupt(int irq, void *opaque) 63 { 64 struct virtio_pci_device *vp_dev = opaque; 65 struct virtio_pci_vq_info *info; 66 irqreturn_t ret = IRQ_NONE; 67 unsigned long flags; 68 69 spin_lock_irqsave(&vp_dev->lock, flags); 70 list_for_each_entry(info, &vp_dev->virtqueues, node) { 71 if (vring_interrupt(irq, info->vq) == IRQ_HANDLED) 72 ret = IRQ_HANDLED; 73 } 74 spin_unlock_irqrestore(&vp_dev->lock, flags); 75 76 return ret; 77 } 78 79 /* A small wrapper to also acknowledge the interrupt when it's handled. 80 * I really need an EIO hook for the vring so I can ack the interrupt once we 81 * know that we'll be handling the IRQ but before we invoke the callback since 82 * the callback may notify the host which results in the host attempting to 83 * raise an interrupt that we would then mask once we acknowledged the 84 * interrupt. */ 85 static irqreturn_t vp_interrupt(int irq, void *opaque) 86 { 87 struct virtio_pci_device *vp_dev = opaque; 88 u8 isr; 89 90 /* reading the ISR has the effect of also clearing it so it's very 91 * important to save off the value. */ 92 isr = ioread8(vp_dev->isr); 93 94 /* It's definitely not us if the ISR was not high */ 95 if (!isr) 96 return IRQ_NONE; 97 98 /* Configuration change? Tell driver if it wants to know. */ 99 if (isr & VIRTIO_PCI_ISR_CONFIG) 100 vp_config_changed(irq, opaque); 101 102 return vp_vring_interrupt(irq, opaque); 103 } 104 105 static int vp_request_msix_vectors(struct virtio_device *vdev, int nvectors, 106 bool per_vq_vectors, struct irq_affinity *desc) 107 { 108 struct virtio_pci_device *vp_dev = to_vp_device(vdev); 109 const char *name = dev_name(&vp_dev->vdev.dev); 110 unsigned flags = PCI_IRQ_MSIX; 111 unsigned i, v; 112 int err = -ENOMEM; 113 114 vp_dev->msix_vectors = nvectors; 115 116 vp_dev->msix_names = kmalloc_array(nvectors, 117 sizeof(*vp_dev->msix_names), 118 GFP_KERNEL); 119 if (!vp_dev->msix_names) 120 goto error; 121 vp_dev->msix_affinity_masks 122 = kcalloc(nvectors, sizeof(*vp_dev->msix_affinity_masks), 123 GFP_KERNEL); 124 if (!vp_dev->msix_affinity_masks) 125 goto error; 126 for (i = 0; i < nvectors; ++i) 127 if (!alloc_cpumask_var(&vp_dev->msix_affinity_masks[i], 128 GFP_KERNEL)) 129 goto error; 130 131 if (desc) { 132 flags |= PCI_IRQ_AFFINITY; 133 desc->pre_vectors++; /* virtio config vector */ 134 } 135 136 err = pci_alloc_irq_vectors_affinity(vp_dev->pci_dev, nvectors, 137 nvectors, flags, desc); 138 if (err < 0) 139 goto error; 140 vp_dev->msix_enabled = 1; 141 142 /* Set the vector used for configuration */ 143 v = vp_dev->msix_used_vectors; 144 snprintf(vp_dev->msix_names[v], sizeof *vp_dev->msix_names, 145 "%s-config", name); 146 err = request_irq(pci_irq_vector(vp_dev->pci_dev, v), 147 vp_config_changed, 0, vp_dev->msix_names[v], 148 vp_dev); 149 if (err) 150 goto error; 151 ++vp_dev->msix_used_vectors; 152 153 v = vp_dev->config_vector(vp_dev, v); 154 /* Verify we had enough resources to assign the vector */ 155 if (v == VIRTIO_MSI_NO_VECTOR) { 156 err = -EBUSY; 157 goto error; 158 } 159 160 if (!per_vq_vectors) { 161 /* Shared vector for all VQs */ 162 v = vp_dev->msix_used_vectors; 163 snprintf(vp_dev->msix_names[v], sizeof *vp_dev->msix_names, 164 "%s-virtqueues", name); 165 err = request_irq(pci_irq_vector(vp_dev->pci_dev, v), 166 vp_vring_interrupt, 0, vp_dev->msix_names[v], 167 vp_dev); 168 if (err) 169 goto error; 170 ++vp_dev->msix_used_vectors; 171 } 172 return 0; 173 error: 174 return err; 175 } 176 177 static struct virtqueue *vp_setup_vq(struct virtio_device *vdev, unsigned index, 178 void (*callback)(struct virtqueue *vq), 179 const char *name, 180 bool ctx, 181 u16 msix_vec) 182 { 183 struct virtio_pci_device *vp_dev = to_vp_device(vdev); 184 struct virtio_pci_vq_info *info = kmalloc(sizeof *info, GFP_KERNEL); 185 struct virtqueue *vq; 186 unsigned long flags; 187 188 /* fill out our structure that represents an active queue */ 189 if (!info) 190 return ERR_PTR(-ENOMEM); 191 192 vq = vp_dev->setup_vq(vp_dev, info, index, callback, name, ctx, 193 msix_vec); 194 if (IS_ERR(vq)) 195 goto out_info; 196 197 info->vq = vq; 198 if (callback) { 199 spin_lock_irqsave(&vp_dev->lock, flags); 200 list_add(&info->node, &vp_dev->virtqueues); 201 spin_unlock_irqrestore(&vp_dev->lock, flags); 202 } else { 203 INIT_LIST_HEAD(&info->node); 204 } 205 206 vp_dev->vqs[index] = info; 207 return vq; 208 209 out_info: 210 kfree(info); 211 return vq; 212 } 213 214 static void vp_del_vq(struct virtqueue *vq) 215 { 216 struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev); 217 struct virtio_pci_vq_info *info = vp_dev->vqs[vq->index]; 218 unsigned long flags; 219 220 spin_lock_irqsave(&vp_dev->lock, flags); 221 list_del(&info->node); 222 spin_unlock_irqrestore(&vp_dev->lock, flags); 223 224 vp_dev->del_vq(info); 225 kfree(info); 226 } 227 228 /* the config->del_vqs() implementation */ 229 void vp_del_vqs(struct virtio_device *vdev) 230 { 231 struct virtio_pci_device *vp_dev = to_vp_device(vdev); 232 struct virtqueue *vq, *n; 233 int i; 234 235 list_for_each_entry_safe(vq, n, &vdev->vqs, list) { 236 if (vp_dev->per_vq_vectors) { 237 int v = vp_dev->vqs[vq->index]->msix_vector; 238 239 if (v != VIRTIO_MSI_NO_VECTOR) { 240 int irq = pci_irq_vector(vp_dev->pci_dev, v); 241 242 irq_set_affinity_hint(irq, NULL); 243 free_irq(irq, vq); 244 } 245 } 246 vp_del_vq(vq); 247 } 248 vp_dev->per_vq_vectors = false; 249 250 if (vp_dev->intx_enabled) { 251 free_irq(vp_dev->pci_dev->irq, vp_dev); 252 vp_dev->intx_enabled = 0; 253 } 254 255 for (i = 0; i < vp_dev->msix_used_vectors; ++i) 256 free_irq(pci_irq_vector(vp_dev->pci_dev, i), vp_dev); 257 258 if (vp_dev->msix_affinity_masks) { 259 for (i = 0; i < vp_dev->msix_vectors; i++) 260 if (vp_dev->msix_affinity_masks[i]) 261 free_cpumask_var(vp_dev->msix_affinity_masks[i]); 262 } 263 264 if (vp_dev->msix_enabled) { 265 /* Disable the vector used for configuration */ 266 vp_dev->config_vector(vp_dev, VIRTIO_MSI_NO_VECTOR); 267 268 pci_free_irq_vectors(vp_dev->pci_dev); 269 vp_dev->msix_enabled = 0; 270 } 271 272 vp_dev->msix_vectors = 0; 273 vp_dev->msix_used_vectors = 0; 274 kfree(vp_dev->msix_names); 275 vp_dev->msix_names = NULL; 276 kfree(vp_dev->msix_affinity_masks); 277 vp_dev->msix_affinity_masks = NULL; 278 kfree(vp_dev->vqs); 279 vp_dev->vqs = NULL; 280 } 281 282 static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned nvqs, 283 struct virtqueue *vqs[], vq_callback_t *callbacks[], 284 const char * const names[], bool per_vq_vectors, 285 const bool *ctx, 286 struct irq_affinity *desc) 287 { 288 struct virtio_pci_device *vp_dev = to_vp_device(vdev); 289 u16 msix_vec; 290 int i, err, nvectors, allocated_vectors, queue_idx = 0; 291 292 vp_dev->vqs = kcalloc(nvqs, sizeof(*vp_dev->vqs), GFP_KERNEL); 293 if (!vp_dev->vqs) 294 return -ENOMEM; 295 296 if (per_vq_vectors) { 297 /* Best option: one for change interrupt, one per vq. */ 298 nvectors = 1; 299 for (i = 0; i < nvqs; ++i) 300 if (callbacks[i]) 301 ++nvectors; 302 } else { 303 /* Second best: one for change, shared for all vqs. */ 304 nvectors = 2; 305 } 306 307 err = vp_request_msix_vectors(vdev, nvectors, per_vq_vectors, 308 per_vq_vectors ? desc : NULL); 309 if (err) 310 goto error_find; 311 312 vp_dev->per_vq_vectors = per_vq_vectors; 313 allocated_vectors = vp_dev->msix_used_vectors; 314 for (i = 0; i < nvqs; ++i) { 315 if (!names[i]) { 316 vqs[i] = NULL; 317 continue; 318 } 319 320 if (!callbacks[i]) 321 msix_vec = VIRTIO_MSI_NO_VECTOR; 322 else if (vp_dev->per_vq_vectors) 323 msix_vec = allocated_vectors++; 324 else 325 msix_vec = VP_MSIX_VQ_VECTOR; 326 vqs[i] = vp_setup_vq(vdev, queue_idx++, callbacks[i], names[i], 327 ctx ? ctx[i] : false, 328 msix_vec); 329 if (IS_ERR(vqs[i])) { 330 err = PTR_ERR(vqs[i]); 331 goto error_find; 332 } 333 334 if (!vp_dev->per_vq_vectors || msix_vec == VIRTIO_MSI_NO_VECTOR) 335 continue; 336 337 /* allocate per-vq irq if available and necessary */ 338 snprintf(vp_dev->msix_names[msix_vec], 339 sizeof *vp_dev->msix_names, 340 "%s-%s", 341 dev_name(&vp_dev->vdev.dev), names[i]); 342 err = request_irq(pci_irq_vector(vp_dev->pci_dev, msix_vec), 343 vring_interrupt, 0, 344 vp_dev->msix_names[msix_vec], 345 vqs[i]); 346 if (err) 347 goto error_find; 348 } 349 return 0; 350 351 error_find: 352 vp_del_vqs(vdev); 353 return err; 354 } 355 356 static int vp_find_vqs_intx(struct virtio_device *vdev, unsigned nvqs, 357 struct virtqueue *vqs[], vq_callback_t *callbacks[], 358 const char * const names[], const bool *ctx) 359 { 360 struct virtio_pci_device *vp_dev = to_vp_device(vdev); 361 int i, err, queue_idx = 0; 362 363 vp_dev->vqs = kcalloc(nvqs, sizeof(*vp_dev->vqs), GFP_KERNEL); 364 if (!vp_dev->vqs) 365 return -ENOMEM; 366 367 err = request_irq(vp_dev->pci_dev->irq, vp_interrupt, IRQF_SHARED, 368 dev_name(&vdev->dev), vp_dev); 369 if (err) 370 goto out_del_vqs; 371 372 vp_dev->intx_enabled = 1; 373 vp_dev->per_vq_vectors = false; 374 for (i = 0; i < nvqs; ++i) { 375 if (!names[i]) { 376 vqs[i] = NULL; 377 continue; 378 } 379 vqs[i] = vp_setup_vq(vdev, queue_idx++, callbacks[i], names[i], 380 ctx ? ctx[i] : false, 381 VIRTIO_MSI_NO_VECTOR); 382 if (IS_ERR(vqs[i])) { 383 err = PTR_ERR(vqs[i]); 384 goto out_del_vqs; 385 } 386 } 387 388 return 0; 389 out_del_vqs: 390 vp_del_vqs(vdev); 391 return err; 392 } 393 394 /* the config->find_vqs() implementation */ 395 int vp_find_vqs(struct virtio_device *vdev, unsigned nvqs, 396 struct virtqueue *vqs[], vq_callback_t *callbacks[], 397 const char * const names[], const bool *ctx, 398 struct irq_affinity *desc) 399 { 400 int err; 401 402 /* Try MSI-X with one vector per queue. */ 403 err = vp_find_vqs_msix(vdev, nvqs, vqs, callbacks, names, true, ctx, desc); 404 if (!err) 405 return 0; 406 /* Fallback: MSI-X with one vector for config, one shared for queues. */ 407 err = vp_find_vqs_msix(vdev, nvqs, vqs, callbacks, names, false, ctx, desc); 408 if (!err) 409 return 0; 410 /* Finally fall back to regular interrupts. */ 411 return vp_find_vqs_intx(vdev, nvqs, vqs, callbacks, names, ctx); 412 } 413 414 const char *vp_bus_name(struct virtio_device *vdev) 415 { 416 struct virtio_pci_device *vp_dev = to_vp_device(vdev); 417 418 return pci_name(vp_dev->pci_dev); 419 } 420 421 /* Setup the affinity for a virtqueue: 422 * - force the affinity for per vq vector 423 * - OR over all affinities for shared MSI 424 * - ignore the affinity request if we're using INTX 425 */ 426 int vp_set_vq_affinity(struct virtqueue *vq, const struct cpumask *cpu_mask) 427 { 428 struct virtio_device *vdev = vq->vdev; 429 struct virtio_pci_device *vp_dev = to_vp_device(vdev); 430 struct virtio_pci_vq_info *info = vp_dev->vqs[vq->index]; 431 struct cpumask *mask; 432 unsigned int irq; 433 434 if (!vq->callback) 435 return -EINVAL; 436 437 if (vp_dev->msix_enabled) { 438 mask = vp_dev->msix_affinity_masks[info->msix_vector]; 439 irq = pci_irq_vector(vp_dev->pci_dev, info->msix_vector); 440 if (!cpu_mask) 441 irq_set_affinity_hint(irq, NULL); 442 else { 443 cpumask_copy(mask, cpu_mask); 444 irq_set_affinity_hint(irq, mask); 445 } 446 } 447 return 0; 448 } 449 450 const struct cpumask *vp_get_vq_affinity(struct virtio_device *vdev, int index) 451 { 452 struct virtio_pci_device *vp_dev = to_vp_device(vdev); 453 454 if (!vp_dev->per_vq_vectors || 455 vp_dev->vqs[index]->msix_vector == VIRTIO_MSI_NO_VECTOR) 456 return NULL; 457 458 return pci_irq_get_affinity(vp_dev->pci_dev, 459 vp_dev->vqs[index]->msix_vector); 460 } 461 462 #ifdef CONFIG_PM_SLEEP 463 static int virtio_pci_freeze(struct device *dev) 464 { 465 struct pci_dev *pci_dev = to_pci_dev(dev); 466 struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev); 467 int ret; 468 469 ret = virtio_device_freeze(&vp_dev->vdev); 470 471 if (!ret) 472 pci_disable_device(pci_dev); 473 return ret; 474 } 475 476 static int virtio_pci_restore(struct device *dev) 477 { 478 struct pci_dev *pci_dev = to_pci_dev(dev); 479 struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev); 480 int ret; 481 482 ret = pci_enable_device(pci_dev); 483 if (ret) 484 return ret; 485 486 pci_set_master(pci_dev); 487 return virtio_device_restore(&vp_dev->vdev); 488 } 489 490 static const struct dev_pm_ops virtio_pci_pm_ops = { 491 SET_SYSTEM_SLEEP_PM_OPS(virtio_pci_freeze, virtio_pci_restore) 492 }; 493 #endif 494 495 496 /* Qumranet donated their vendor ID for devices 0x1000 thru 0x10FF. */ 497 static const struct pci_device_id virtio_pci_id_table[] = { 498 { PCI_DEVICE(PCI_VENDOR_ID_REDHAT_QUMRANET, PCI_ANY_ID) }, 499 { 0 } 500 }; 501 502 MODULE_DEVICE_TABLE(pci, virtio_pci_id_table); 503 504 static void virtio_pci_release_dev(struct device *_d) 505 { 506 struct virtio_device *vdev = dev_to_virtio(_d); 507 struct virtio_pci_device *vp_dev = to_vp_device(vdev); 508 509 /* As struct device is a kobject, it's not safe to 510 * free the memory (including the reference counter itself) 511 * until it's release callback. */ 512 kfree(vp_dev); 513 } 514 515 static int virtio_pci_probe(struct pci_dev *pci_dev, 516 const struct pci_device_id *id) 517 { 518 struct virtio_pci_device *vp_dev, *reg_dev = NULL; 519 int rc; 520 521 /* allocate our structure and fill it out */ 522 vp_dev = kzalloc(sizeof(struct virtio_pci_device), GFP_KERNEL); 523 if (!vp_dev) 524 return -ENOMEM; 525 526 pci_set_drvdata(pci_dev, vp_dev); 527 vp_dev->vdev.dev.parent = &pci_dev->dev; 528 vp_dev->vdev.dev.release = virtio_pci_release_dev; 529 vp_dev->pci_dev = pci_dev; 530 INIT_LIST_HEAD(&vp_dev->virtqueues); 531 spin_lock_init(&vp_dev->lock); 532 533 /* enable the device */ 534 rc = pci_enable_device(pci_dev); 535 if (rc) 536 goto err_enable_device; 537 538 if (force_legacy) { 539 rc = virtio_pci_legacy_probe(vp_dev); 540 /* Also try modern mode if we can't map BAR0 (no IO space). */ 541 if (rc == -ENODEV || rc == -ENOMEM) 542 rc = virtio_pci_modern_probe(vp_dev); 543 if (rc) 544 goto err_probe; 545 } else { 546 rc = virtio_pci_modern_probe(vp_dev); 547 if (rc == -ENODEV) 548 rc = virtio_pci_legacy_probe(vp_dev); 549 if (rc) 550 goto err_probe; 551 } 552 553 pci_set_master(pci_dev); 554 555 rc = register_virtio_device(&vp_dev->vdev); 556 reg_dev = vp_dev; 557 if (rc) 558 goto err_register; 559 560 return 0; 561 562 err_register: 563 if (vp_dev->ioaddr) 564 virtio_pci_legacy_remove(vp_dev); 565 else 566 virtio_pci_modern_remove(vp_dev); 567 err_probe: 568 pci_disable_device(pci_dev); 569 err_enable_device: 570 if (reg_dev) 571 put_device(&vp_dev->vdev.dev); 572 else 573 kfree(vp_dev); 574 return rc; 575 } 576 577 static void virtio_pci_remove(struct pci_dev *pci_dev) 578 { 579 struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev); 580 struct device *dev = get_device(&vp_dev->vdev.dev); 581 582 pci_disable_sriov(pci_dev); 583 584 unregister_virtio_device(&vp_dev->vdev); 585 586 if (vp_dev->ioaddr) 587 virtio_pci_legacy_remove(vp_dev); 588 else 589 virtio_pci_modern_remove(vp_dev); 590 591 pci_disable_device(pci_dev); 592 put_device(dev); 593 } 594 595 static int virtio_pci_sriov_configure(struct pci_dev *pci_dev, int num_vfs) 596 { 597 struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev); 598 struct virtio_device *vdev = &vp_dev->vdev; 599 int ret; 600 601 if (!(vdev->config->get_status(vdev) & VIRTIO_CONFIG_S_DRIVER_OK)) 602 return -EBUSY; 603 604 if (!__virtio_test_bit(vdev, VIRTIO_F_SR_IOV)) 605 return -EINVAL; 606 607 if (pci_vfs_assigned(pci_dev)) 608 return -EPERM; 609 610 if (num_vfs == 0) { 611 pci_disable_sriov(pci_dev); 612 return 0; 613 } 614 615 ret = pci_enable_sriov(pci_dev, num_vfs); 616 if (ret < 0) 617 return ret; 618 619 return num_vfs; 620 } 621 622 static struct pci_driver virtio_pci_driver = { 623 .name = "virtio-pci", 624 .id_table = virtio_pci_id_table, 625 .probe = virtio_pci_probe, 626 .remove = virtio_pci_remove, 627 #ifdef CONFIG_PM_SLEEP 628 .driver.pm = &virtio_pci_pm_ops, 629 #endif 630 .sriov_configure = virtio_pci_sriov_configure, 631 }; 632 633 module_pci_driver(virtio_pci_driver); 634 635 MODULE_AUTHOR("Anthony Liguori <aliguori@us.ibm.com>"); 636 MODULE_DESCRIPTION("virtio-pci"); 637 MODULE_LICENSE("GPL"); 638 MODULE_VERSION("1"); 639