1 /* 2 * drivers/pci/iov.c 3 * 4 * Copyright (C) 2009 Intel Corporation, Yu Zhao <yu.zhao@intel.com> 5 * 6 * PCI Express I/O Virtualization (IOV) support. 7 * Single Root IOV 1.0 8 * Address Translation Service 1.0 9 */ 10 11 #include <linux/pci.h> 12 #include <linux/slab.h> 13 #include <linux/mutex.h> 14 #include <linux/export.h> 15 #include <linux/string.h> 16 #include <linux/delay.h> 17 #include <linux/pci-ats.h> 18 #include "pci.h" 19 20 #define VIRTFN_ID_LEN 16 21 22 static inline u8 virtfn_bus(struct pci_dev *dev, int id) 23 { 24 return dev->bus->number + ((dev->devfn + dev->sriov->offset + 25 dev->sriov->stride * id) >> 8); 26 } 27 28 static inline u8 virtfn_devfn(struct pci_dev *dev, int id) 29 { 30 return (dev->devfn + dev->sriov->offset + 31 dev->sriov->stride * id) & 0xff; 32 } 33 34 static struct pci_bus *virtfn_add_bus(struct pci_bus *bus, int busnr) 35 { 36 struct pci_bus *child; 37 38 if (bus->number == busnr) 39 return bus; 40 41 child = pci_find_bus(pci_domain_nr(bus), busnr); 42 if (child) 43 return child; 44 45 child = pci_add_new_bus(bus, NULL, busnr); 46 if (!child) 47 return NULL; 48 49 pci_bus_insert_busn_res(child, busnr, busnr); 50 51 return child; 52 } 53 54 static void virtfn_remove_bus(struct pci_bus *physbus, struct pci_bus *virtbus) 55 { 56 if (physbus != virtbus && list_empty(&virtbus->devices)) 57 pci_remove_bus(virtbus); 58 } 59 60 static int virtfn_add(struct pci_dev *dev, int id, int reset) 61 { 62 int i; 63 int rc = -ENOMEM; 64 u64 size; 65 char buf[VIRTFN_ID_LEN]; 66 struct pci_dev *virtfn; 67 struct resource *res; 68 struct pci_sriov *iov = dev->sriov; 69 struct pci_bus *bus; 70 71 mutex_lock(&iov->dev->sriov->lock); 72 bus = virtfn_add_bus(dev->bus, virtfn_bus(dev, id)); 73 if (!bus) 74 goto failed; 75 76 virtfn = pci_alloc_dev(bus); 77 if (!virtfn) 78 goto failed0; 79 80 virtfn->devfn = virtfn_devfn(dev, id); 81 virtfn->vendor = dev->vendor; 82 pci_read_config_word(dev, iov->pos + PCI_SRIOV_VF_DID, &virtfn->device); 83 pci_setup_device(virtfn); 84 virtfn->dev.parent = dev->dev.parent; 85 virtfn->physfn = pci_dev_get(dev); 86 virtfn->is_virtfn = 1; 87 88 for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) { 89 res = dev->resource + PCI_IOV_RESOURCES + i; 90 if (!res->parent) 91 continue; 92 virtfn->resource[i].name = pci_name(virtfn); 93 virtfn->resource[i].flags = res->flags; 94 size = resource_size(res); 95 do_div(size, iov->total_VFs); 96 virtfn->resource[i].start = res->start + size * id; 97 virtfn->resource[i].end = virtfn->resource[i].start + size - 1; 98 rc = request_resource(res, &virtfn->resource[i]); 99 BUG_ON(rc); 100 } 101 102 if (reset) 103 __pci_reset_function(virtfn); 104 105 pci_device_add(virtfn, virtfn->bus); 106 mutex_unlock(&iov->dev->sriov->lock); 107 108 rc = pci_bus_add_device(virtfn); 109 sprintf(buf, "virtfn%u", id); 110 rc = sysfs_create_link(&dev->dev.kobj, &virtfn->dev.kobj, buf); 111 if (rc) 112 goto failed1; 113 rc = sysfs_create_link(&virtfn->dev.kobj, &dev->dev.kobj, "physfn"); 114 if (rc) 115 goto failed2; 116 117 kobject_uevent(&virtfn->dev.kobj, KOBJ_CHANGE); 118 119 return 0; 120 121 failed2: 122 sysfs_remove_link(&dev->dev.kobj, buf); 123 failed1: 124 pci_dev_put(dev); 125 mutex_lock(&iov->dev->sriov->lock); 126 pci_stop_and_remove_bus_device(virtfn); 127 failed0: 128 virtfn_remove_bus(dev->bus, bus); 129 failed: 130 mutex_unlock(&iov->dev->sriov->lock); 131 132 return rc; 133 } 134 135 static void virtfn_remove(struct pci_dev *dev, int id, int reset) 136 { 137 char buf[VIRTFN_ID_LEN]; 138 struct pci_dev *virtfn; 139 struct pci_sriov *iov = dev->sriov; 140 141 virtfn = pci_get_domain_bus_and_slot(pci_domain_nr(dev->bus), 142 virtfn_bus(dev, id), 143 virtfn_devfn(dev, id)); 144 if (!virtfn) 145 return; 146 147 if (reset) { 148 device_release_driver(&virtfn->dev); 149 __pci_reset_function(virtfn); 150 } 151 152 sprintf(buf, "virtfn%u", id); 153 sysfs_remove_link(&dev->dev.kobj, buf); 154 /* 155 * pci_stop_dev() could have been called for this virtfn already, 156 * so the directory for the virtfn may have been removed before. 157 * Double check to avoid spurious sysfs warnings. 158 */ 159 if (virtfn->dev.kobj.sd) 160 sysfs_remove_link(&virtfn->dev.kobj, "physfn"); 161 162 mutex_lock(&iov->dev->sriov->lock); 163 pci_stop_and_remove_bus_device(virtfn); 164 virtfn_remove_bus(dev->bus, virtfn->bus); 165 mutex_unlock(&iov->dev->sriov->lock); 166 167 /* balance pci_get_domain_bus_and_slot() */ 168 pci_dev_put(virtfn); 169 pci_dev_put(dev); 170 } 171 172 static int sriov_migration(struct pci_dev *dev) 173 { 174 u16 status; 175 struct pci_sriov *iov = dev->sriov; 176 177 if (!iov->num_VFs) 178 return 0; 179 180 if (!(iov->cap & PCI_SRIOV_CAP_VFM)) 181 return 0; 182 183 pci_read_config_word(dev, iov->pos + PCI_SRIOV_STATUS, &status); 184 if (!(status & PCI_SRIOV_STATUS_VFM)) 185 return 0; 186 187 schedule_work(&iov->mtask); 188 189 return 1; 190 } 191 192 static void sriov_migration_task(struct work_struct *work) 193 { 194 int i; 195 u8 state; 196 u16 status; 197 struct pci_sriov *iov = container_of(work, struct pci_sriov, mtask); 198 199 for (i = iov->initial_VFs; i < iov->num_VFs; i++) { 200 state = readb(iov->mstate + i); 201 if (state == PCI_SRIOV_VFM_MI) { 202 writeb(PCI_SRIOV_VFM_AV, iov->mstate + i); 203 state = readb(iov->mstate + i); 204 if (state == PCI_SRIOV_VFM_AV) 205 virtfn_add(iov->self, i, 1); 206 } else if (state == PCI_SRIOV_VFM_MO) { 207 virtfn_remove(iov->self, i, 1); 208 writeb(PCI_SRIOV_VFM_UA, iov->mstate + i); 209 state = readb(iov->mstate + i); 210 if (state == PCI_SRIOV_VFM_AV) 211 virtfn_add(iov->self, i, 0); 212 } 213 } 214 215 pci_read_config_word(iov->self, iov->pos + PCI_SRIOV_STATUS, &status); 216 status &= ~PCI_SRIOV_STATUS_VFM; 217 pci_write_config_word(iov->self, iov->pos + PCI_SRIOV_STATUS, status); 218 } 219 220 static int sriov_enable_migration(struct pci_dev *dev, int nr_virtfn) 221 { 222 int bir; 223 u32 table; 224 resource_size_t pa; 225 struct pci_sriov *iov = dev->sriov; 226 227 if (nr_virtfn <= iov->initial_VFs) 228 return 0; 229 230 pci_read_config_dword(dev, iov->pos + PCI_SRIOV_VFM, &table); 231 bir = PCI_SRIOV_VFM_BIR(table); 232 if (bir > PCI_STD_RESOURCE_END) 233 return -EIO; 234 235 table = PCI_SRIOV_VFM_OFFSET(table); 236 if (table + nr_virtfn > pci_resource_len(dev, bir)) 237 return -EIO; 238 239 pa = pci_resource_start(dev, bir) + table; 240 iov->mstate = ioremap(pa, nr_virtfn); 241 if (!iov->mstate) 242 return -ENOMEM; 243 244 INIT_WORK(&iov->mtask, sriov_migration_task); 245 246 iov->ctrl |= PCI_SRIOV_CTRL_VFM | PCI_SRIOV_CTRL_INTR; 247 pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl); 248 249 return 0; 250 } 251 252 static void sriov_disable_migration(struct pci_dev *dev) 253 { 254 struct pci_sriov *iov = dev->sriov; 255 256 iov->ctrl &= ~(PCI_SRIOV_CTRL_VFM | PCI_SRIOV_CTRL_INTR); 257 pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl); 258 259 cancel_work_sync(&iov->mtask); 260 iounmap(iov->mstate); 261 } 262 263 static int sriov_enable(struct pci_dev *dev, int nr_virtfn) 264 { 265 int rc; 266 int i, j; 267 int nres; 268 u16 offset, stride, initial; 269 struct resource *res; 270 struct pci_dev *pdev; 271 struct pci_sriov *iov = dev->sriov; 272 int bars = 0; 273 274 if (!nr_virtfn) 275 return 0; 276 277 if (iov->num_VFs) 278 return -EINVAL; 279 280 pci_read_config_word(dev, iov->pos + PCI_SRIOV_INITIAL_VF, &initial); 281 if (initial > iov->total_VFs || 282 (!(iov->cap & PCI_SRIOV_CAP_VFM) && (initial != iov->total_VFs))) 283 return -EIO; 284 285 if (nr_virtfn < 0 || nr_virtfn > iov->total_VFs || 286 (!(iov->cap & PCI_SRIOV_CAP_VFM) && (nr_virtfn > initial))) 287 return -EINVAL; 288 289 pci_read_config_word(dev, iov->pos + PCI_SRIOV_VF_OFFSET, &offset); 290 pci_read_config_word(dev, iov->pos + PCI_SRIOV_VF_STRIDE, &stride); 291 if (!offset || (nr_virtfn > 1 && !stride)) 292 return -EIO; 293 294 nres = 0; 295 for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) { 296 bars |= (1 << (i + PCI_IOV_RESOURCES)); 297 res = dev->resource + PCI_IOV_RESOURCES + i; 298 if (res->parent) 299 nres++; 300 } 301 if (nres != iov->nres) { 302 dev_err(&dev->dev, "not enough MMIO resources for SR-IOV\n"); 303 return -ENOMEM; 304 } 305 306 iov->offset = offset; 307 iov->stride = stride; 308 309 if (virtfn_bus(dev, nr_virtfn - 1) > dev->bus->busn_res.end) { 310 dev_err(&dev->dev, "SR-IOV: bus number out of range\n"); 311 return -ENOMEM; 312 } 313 314 if (pci_enable_resources(dev, bars)) { 315 dev_err(&dev->dev, "SR-IOV: IOV BARS not allocated\n"); 316 return -ENOMEM; 317 } 318 319 if (iov->link != dev->devfn) { 320 pdev = pci_get_slot(dev->bus, iov->link); 321 if (!pdev) 322 return -ENODEV; 323 324 if (!pdev->is_physfn) { 325 pci_dev_put(pdev); 326 return -ENOSYS; 327 } 328 329 rc = sysfs_create_link(&dev->dev.kobj, 330 &pdev->dev.kobj, "dep_link"); 331 pci_dev_put(pdev); 332 if (rc) 333 return rc; 334 } 335 336 pci_write_config_word(dev, iov->pos + PCI_SRIOV_NUM_VF, nr_virtfn); 337 iov->ctrl |= PCI_SRIOV_CTRL_VFE | PCI_SRIOV_CTRL_MSE; 338 pci_cfg_access_lock(dev); 339 pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl); 340 msleep(100); 341 pci_cfg_access_unlock(dev); 342 343 iov->initial_VFs = initial; 344 if (nr_virtfn < initial) 345 initial = nr_virtfn; 346 347 for (i = 0; i < initial; i++) { 348 rc = virtfn_add(dev, i, 0); 349 if (rc) 350 goto failed; 351 } 352 353 if (iov->cap & PCI_SRIOV_CAP_VFM) { 354 rc = sriov_enable_migration(dev, nr_virtfn); 355 if (rc) 356 goto failed; 357 } 358 359 kobject_uevent(&dev->dev.kobj, KOBJ_CHANGE); 360 iov->num_VFs = nr_virtfn; 361 362 return 0; 363 364 failed: 365 for (j = 0; j < i; j++) 366 virtfn_remove(dev, j, 0); 367 368 iov->ctrl &= ~(PCI_SRIOV_CTRL_VFE | PCI_SRIOV_CTRL_MSE); 369 pci_cfg_access_lock(dev); 370 pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl); 371 pci_write_config_word(dev, iov->pos + PCI_SRIOV_NUM_VF, 0); 372 ssleep(1); 373 pci_cfg_access_unlock(dev); 374 375 if (iov->link != dev->devfn) 376 sysfs_remove_link(&dev->dev.kobj, "dep_link"); 377 378 return rc; 379 } 380 381 static void sriov_disable(struct pci_dev *dev) 382 { 383 int i; 384 struct pci_sriov *iov = dev->sriov; 385 386 if (!iov->num_VFs) 387 return; 388 389 if (iov->cap & PCI_SRIOV_CAP_VFM) 390 sriov_disable_migration(dev); 391 392 for (i = 0; i < iov->num_VFs; i++) 393 virtfn_remove(dev, i, 0); 394 395 iov->ctrl &= ~(PCI_SRIOV_CTRL_VFE | PCI_SRIOV_CTRL_MSE); 396 pci_cfg_access_lock(dev); 397 pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl); 398 ssleep(1); 399 pci_cfg_access_unlock(dev); 400 401 if (iov->link != dev->devfn) 402 sysfs_remove_link(&dev->dev.kobj, "dep_link"); 403 404 iov->num_VFs = 0; 405 pci_write_config_word(dev, iov->pos + PCI_SRIOV_NUM_VF, 0); 406 } 407 408 static int sriov_init(struct pci_dev *dev, int pos) 409 { 410 int i; 411 int rc; 412 int nres; 413 u32 pgsz; 414 u16 ctrl, total, offset, stride; 415 struct pci_sriov *iov; 416 struct resource *res; 417 struct pci_dev *pdev; 418 419 if (pci_pcie_type(dev) != PCI_EXP_TYPE_RC_END && 420 pci_pcie_type(dev) != PCI_EXP_TYPE_ENDPOINT) 421 return -ENODEV; 422 423 pci_read_config_word(dev, pos + PCI_SRIOV_CTRL, &ctrl); 424 if (ctrl & PCI_SRIOV_CTRL_VFE) { 425 pci_write_config_word(dev, pos + PCI_SRIOV_CTRL, 0); 426 ssleep(1); 427 } 428 429 pci_read_config_word(dev, pos + PCI_SRIOV_TOTAL_VF, &total); 430 if (!total) 431 return 0; 432 433 ctrl = 0; 434 list_for_each_entry(pdev, &dev->bus->devices, bus_list) 435 if (pdev->is_physfn) 436 goto found; 437 438 pdev = NULL; 439 if (pci_ari_enabled(dev->bus)) 440 ctrl |= PCI_SRIOV_CTRL_ARI; 441 442 found: 443 pci_write_config_word(dev, pos + PCI_SRIOV_CTRL, ctrl); 444 pci_read_config_word(dev, pos + PCI_SRIOV_VF_OFFSET, &offset); 445 pci_read_config_word(dev, pos + PCI_SRIOV_VF_STRIDE, &stride); 446 if (!offset || (total > 1 && !stride)) 447 return -EIO; 448 449 pci_read_config_dword(dev, pos + PCI_SRIOV_SUP_PGSIZE, &pgsz); 450 i = PAGE_SHIFT > 12 ? PAGE_SHIFT - 12 : 0; 451 pgsz &= ~((1 << i) - 1); 452 if (!pgsz) 453 return -EIO; 454 455 pgsz &= ~(pgsz - 1); 456 pci_write_config_dword(dev, pos + PCI_SRIOV_SYS_PGSIZE, pgsz); 457 458 nres = 0; 459 for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) { 460 res = dev->resource + PCI_IOV_RESOURCES + i; 461 i += __pci_read_base(dev, pci_bar_unknown, res, 462 pos + PCI_SRIOV_BAR + i * 4); 463 if (!res->flags) 464 continue; 465 if (resource_size(res) & (PAGE_SIZE - 1)) { 466 rc = -EIO; 467 goto failed; 468 } 469 res->end = res->start + resource_size(res) * total - 1; 470 nres++; 471 } 472 473 iov = kzalloc(sizeof(*iov), GFP_KERNEL); 474 if (!iov) { 475 rc = -ENOMEM; 476 goto failed; 477 } 478 479 iov->pos = pos; 480 iov->nres = nres; 481 iov->ctrl = ctrl; 482 iov->total_VFs = total; 483 iov->offset = offset; 484 iov->stride = stride; 485 iov->pgsz = pgsz; 486 iov->self = dev; 487 pci_read_config_dword(dev, pos + PCI_SRIOV_CAP, &iov->cap); 488 pci_read_config_byte(dev, pos + PCI_SRIOV_FUNC_LINK, &iov->link); 489 if (pci_pcie_type(dev) == PCI_EXP_TYPE_RC_END) 490 iov->link = PCI_DEVFN(PCI_SLOT(dev->devfn), iov->link); 491 492 if (pdev) 493 iov->dev = pci_dev_get(pdev); 494 else 495 iov->dev = dev; 496 497 mutex_init(&iov->lock); 498 499 dev->sriov = iov; 500 dev->is_physfn = 1; 501 502 return 0; 503 504 failed: 505 for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) { 506 res = dev->resource + PCI_IOV_RESOURCES + i; 507 res->flags = 0; 508 } 509 510 return rc; 511 } 512 513 static void sriov_release(struct pci_dev *dev) 514 { 515 BUG_ON(dev->sriov->num_VFs); 516 517 if (dev != dev->sriov->dev) 518 pci_dev_put(dev->sriov->dev); 519 520 mutex_destroy(&dev->sriov->lock); 521 522 kfree(dev->sriov); 523 dev->sriov = NULL; 524 } 525 526 static void sriov_restore_state(struct pci_dev *dev) 527 { 528 int i; 529 u16 ctrl; 530 struct pci_sriov *iov = dev->sriov; 531 532 pci_read_config_word(dev, iov->pos + PCI_SRIOV_CTRL, &ctrl); 533 if (ctrl & PCI_SRIOV_CTRL_VFE) 534 return; 535 536 for (i = PCI_IOV_RESOURCES; i <= PCI_IOV_RESOURCE_END; i++) 537 pci_update_resource(dev, i); 538 539 pci_write_config_dword(dev, iov->pos + PCI_SRIOV_SYS_PGSIZE, iov->pgsz); 540 pci_write_config_word(dev, iov->pos + PCI_SRIOV_NUM_VF, iov->num_VFs); 541 pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl); 542 if (iov->ctrl & PCI_SRIOV_CTRL_VFE) 543 msleep(100); 544 } 545 546 /** 547 * pci_iov_init - initialize the IOV capability 548 * @dev: the PCI device 549 * 550 * Returns 0 on success, or negative on failure. 551 */ 552 int pci_iov_init(struct pci_dev *dev) 553 { 554 int pos; 555 556 if (!pci_is_pcie(dev)) 557 return -ENODEV; 558 559 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV); 560 if (pos) 561 return sriov_init(dev, pos); 562 563 return -ENODEV; 564 } 565 566 /** 567 * pci_iov_release - release resources used by the IOV capability 568 * @dev: the PCI device 569 */ 570 void pci_iov_release(struct pci_dev *dev) 571 { 572 if (dev->is_physfn) 573 sriov_release(dev); 574 } 575 576 /** 577 * pci_iov_resource_bar - get position of the SR-IOV BAR 578 * @dev: the PCI device 579 * @resno: the resource number 580 * @type: the BAR type to be filled in 581 * 582 * Returns position of the BAR encapsulated in the SR-IOV capability. 583 */ 584 int pci_iov_resource_bar(struct pci_dev *dev, int resno, 585 enum pci_bar_type *type) 586 { 587 if (resno < PCI_IOV_RESOURCES || resno > PCI_IOV_RESOURCE_END) 588 return 0; 589 590 BUG_ON(!dev->is_physfn); 591 592 *type = pci_bar_unknown; 593 594 return dev->sriov->pos + PCI_SRIOV_BAR + 595 4 * (resno - PCI_IOV_RESOURCES); 596 } 597 598 /** 599 * pci_sriov_resource_alignment - get resource alignment for VF BAR 600 * @dev: the PCI device 601 * @resno: the resource number 602 * 603 * Returns the alignment of the VF BAR found in the SR-IOV capability. 604 * This is not the same as the resource size which is defined as 605 * the VF BAR size multiplied by the number of VFs. The alignment 606 * is just the VF BAR size. 607 */ 608 resource_size_t pci_sriov_resource_alignment(struct pci_dev *dev, int resno) 609 { 610 struct resource tmp; 611 enum pci_bar_type type; 612 int reg = pci_iov_resource_bar(dev, resno, &type); 613 614 if (!reg) 615 return 0; 616 617 __pci_read_base(dev, type, &tmp, reg); 618 return resource_alignment(&tmp); 619 } 620 621 /** 622 * pci_restore_iov_state - restore the state of the IOV capability 623 * @dev: the PCI device 624 */ 625 void pci_restore_iov_state(struct pci_dev *dev) 626 { 627 if (dev->is_physfn) 628 sriov_restore_state(dev); 629 } 630 631 /** 632 * pci_iov_bus_range - find bus range used by Virtual Function 633 * @bus: the PCI bus 634 * 635 * Returns max number of buses (exclude current one) used by Virtual 636 * Functions. 637 */ 638 int pci_iov_bus_range(struct pci_bus *bus) 639 { 640 int max = 0; 641 u8 busnr; 642 struct pci_dev *dev; 643 644 list_for_each_entry(dev, &bus->devices, bus_list) { 645 if (!dev->is_physfn) 646 continue; 647 busnr = virtfn_bus(dev, dev->sriov->total_VFs - 1); 648 if (busnr > max) 649 max = busnr; 650 } 651 652 return max ? max - bus->number : 0; 653 } 654 655 /** 656 * pci_enable_sriov - enable the SR-IOV capability 657 * @dev: the PCI device 658 * @nr_virtfn: number of virtual functions to enable 659 * 660 * Returns 0 on success, or negative on failure. 661 */ 662 int pci_enable_sriov(struct pci_dev *dev, int nr_virtfn) 663 { 664 might_sleep(); 665 666 if (!dev->is_physfn) 667 return -ENOSYS; 668 669 return sriov_enable(dev, nr_virtfn); 670 } 671 EXPORT_SYMBOL_GPL(pci_enable_sriov); 672 673 /** 674 * pci_disable_sriov - disable the SR-IOV capability 675 * @dev: the PCI device 676 */ 677 void pci_disable_sriov(struct pci_dev *dev) 678 { 679 might_sleep(); 680 681 if (!dev->is_physfn) 682 return; 683 684 sriov_disable(dev); 685 } 686 EXPORT_SYMBOL_GPL(pci_disable_sriov); 687 688 /** 689 * pci_sriov_migration - notify SR-IOV core of Virtual Function Migration 690 * @dev: the PCI device 691 * 692 * Returns IRQ_HANDLED if the IRQ is handled, or IRQ_NONE if not. 693 * 694 * Physical Function driver is responsible to register IRQ handler using 695 * VF Migration Interrupt Message Number, and call this function when the 696 * interrupt is generated by the hardware. 697 */ 698 irqreturn_t pci_sriov_migration(struct pci_dev *dev) 699 { 700 if (!dev->is_physfn) 701 return IRQ_NONE; 702 703 return sriov_migration(dev) ? IRQ_HANDLED : IRQ_NONE; 704 } 705 EXPORT_SYMBOL_GPL(pci_sriov_migration); 706 707 /** 708 * pci_num_vf - return number of VFs associated with a PF device_release_driver 709 * @dev: the PCI device 710 * 711 * Returns number of VFs, or 0 if SR-IOV is not enabled. 712 */ 713 int pci_num_vf(struct pci_dev *dev) 714 { 715 if (!dev->is_physfn) 716 return 0; 717 718 return dev->sriov->num_VFs; 719 } 720 EXPORT_SYMBOL_GPL(pci_num_vf); 721 722 /** 723 * pci_vfs_assigned - returns number of VFs are assigned to a guest 724 * @dev: the PCI device 725 * 726 * Returns number of VFs belonging to this device that are assigned to a guest. 727 * If device is not a physical function returns 0. 728 */ 729 int pci_vfs_assigned(struct pci_dev *dev) 730 { 731 struct pci_dev *vfdev; 732 unsigned int vfs_assigned = 0; 733 unsigned short dev_id; 734 735 /* only search if we are a PF */ 736 if (!dev->is_physfn) 737 return 0; 738 739 /* 740 * determine the device ID for the VFs, the vendor ID will be the 741 * same as the PF so there is no need to check for that one 742 */ 743 pci_read_config_word(dev, dev->sriov->pos + PCI_SRIOV_VF_DID, &dev_id); 744 745 /* loop through all the VFs to see if we own any that are assigned */ 746 vfdev = pci_get_device(dev->vendor, dev_id, NULL); 747 while (vfdev) { 748 /* 749 * It is considered assigned if it is a virtual function with 750 * our dev as the physical function and the assigned bit is set 751 */ 752 if (vfdev->is_virtfn && (vfdev->physfn == dev) && 753 (vfdev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)) 754 vfs_assigned++; 755 756 vfdev = pci_get_device(dev->vendor, dev_id, vfdev); 757 } 758 759 return vfs_assigned; 760 } 761 EXPORT_SYMBOL_GPL(pci_vfs_assigned); 762 763 /** 764 * pci_sriov_set_totalvfs -- reduce the TotalVFs available 765 * @dev: the PCI PF device 766 * @numvfs: number that should be used for TotalVFs supported 767 * 768 * Should be called from PF driver's probe routine with 769 * device's mutex held. 770 * 771 * Returns 0 if PF is an SRIOV-capable device and 772 * value of numvfs valid. If not a PF return -ENOSYS; 773 * if numvfs is invalid return -EINVAL; 774 * if VFs already enabled, return -EBUSY. 775 */ 776 int pci_sriov_set_totalvfs(struct pci_dev *dev, u16 numvfs) 777 { 778 if (!dev->is_physfn) 779 return -ENOSYS; 780 if (numvfs > dev->sriov->total_VFs) 781 return -EINVAL; 782 783 /* Shouldn't change if VFs already enabled */ 784 if (dev->sriov->ctrl & PCI_SRIOV_CTRL_VFE) 785 return -EBUSY; 786 else 787 dev->sriov->driver_max_VFs = numvfs; 788 789 return 0; 790 } 791 EXPORT_SYMBOL_GPL(pci_sriov_set_totalvfs); 792 793 /** 794 * pci_sriov_get_totalvfs -- get total VFs supported on this device 795 * @dev: the PCI PF device 796 * 797 * For a PCIe device with SRIOV support, return the PCIe 798 * SRIOV capability value of TotalVFs or the value of driver_max_VFs 799 * if the driver reduced it. Otherwise 0. 800 */ 801 int pci_sriov_get_totalvfs(struct pci_dev *dev) 802 { 803 if (!dev->is_physfn) 804 return 0; 805 806 if (dev->sriov->driver_max_VFs) 807 return dev->sriov->driver_max_VFs; 808 809 return dev->sriov->total_VFs; 810 } 811 EXPORT_SYMBOL_GPL(pci_sriov_get_totalvfs); 812