1 /* 2 * PCI Stub Driver - Grabs devices in backend to be exported later 3 * 4 * Ryan Wilson <hap9@epoch.ncsc.mil> 5 * Chris Bookholt <hap10@epoch.ncsc.mil> 6 */ 7 8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 9 10 #include <linux/module.h> 11 #include <linux/init.h> 12 #include <linux/rwsem.h> 13 #include <linux/list.h> 14 #include <linux/spinlock.h> 15 #include <linux/kref.h> 16 #include <linux/pci.h> 17 #include <linux/wait.h> 18 #include <linux/sched.h> 19 #include <linux/atomic.h> 20 #include <xen/events.h> 21 #include <asm/xen/pci.h> 22 #include <asm/xen/hypervisor.h> 23 #include <xen/interface/physdev.h> 24 #include "pciback.h" 25 #include "conf_space.h" 26 #include "conf_space_quirks.h" 27 28 #define PCISTUB_DRIVER_NAME "pciback" 29 30 static char *pci_devs_to_hide; 31 wait_queue_head_t xen_pcibk_aer_wait_queue; 32 /*Add sem for sync AER handling and xen_pcibk remove/reconfigue ops, 33 * We want to avoid in middle of AER ops, xen_pcibk devices is being removed 34 */ 35 static DECLARE_RWSEM(pcistub_sem); 36 module_param_named(hide, pci_devs_to_hide, charp, 0444); 37 38 struct pcistub_device_id { 39 struct list_head slot_list; 40 int domain; 41 unsigned char bus; 42 unsigned int devfn; 43 }; 44 static LIST_HEAD(pcistub_device_ids); 45 static DEFINE_SPINLOCK(device_ids_lock); 46 47 struct pcistub_device { 48 struct kref kref; 49 struct list_head dev_list; 50 spinlock_t lock; 51 52 struct pci_dev *dev; 53 struct xen_pcibk_device *pdev;/* non-NULL if struct pci_dev is in use */ 54 }; 55 56 /* Access to pcistub_devices & seized_devices lists and the initialize_devices 57 * flag must be locked with pcistub_devices_lock 58 */ 59 static DEFINE_SPINLOCK(pcistub_devices_lock); 60 static LIST_HEAD(pcistub_devices); 61 62 /* wait for device_initcall before initializing our devices 63 * (see pcistub_init_devices_late) 64 */ 65 static int initialize_devices; 66 static LIST_HEAD(seized_devices); 67 68 static struct pcistub_device *pcistub_device_alloc(struct pci_dev *dev) 69 { 70 struct pcistub_device *psdev; 71 72 dev_dbg(&dev->dev, "pcistub_device_alloc\n"); 73 74 psdev = kzalloc(sizeof(*psdev), GFP_ATOMIC); 75 if (!psdev) 76 return NULL; 77 78 psdev->dev = pci_dev_get(dev); 79 if (!psdev->dev) { 80 kfree(psdev); 81 return NULL; 82 } 83 84 kref_init(&psdev->kref); 85 spin_lock_init(&psdev->lock); 86 87 return psdev; 88 } 89 90 /* Don't call this directly as it's called by pcistub_device_put */ 91 static void pcistub_device_release(struct kref *kref) 92 { 93 struct pcistub_device *psdev; 94 struct pci_dev *dev; 95 struct xen_pcibk_dev_data *dev_data; 96 97 psdev = container_of(kref, struct pcistub_device, kref); 98 dev = psdev->dev; 99 dev_data = pci_get_drvdata(dev); 100 101 dev_dbg(&dev->dev, "pcistub_device_release\n"); 102 103 xen_unregister_device_domain_owner(dev); 104 105 /* Call the reset function which does not take lock as this 106 * is called from "unbind" which takes a device_lock mutex. 107 */ 108 __pci_reset_function_locked(dev); 109 if (pci_load_and_free_saved_state(dev, &dev_data->pci_saved_state)) 110 dev_info(&dev->dev, "Could not reload PCI state\n"); 111 else 112 pci_restore_state(dev); 113 114 if (dev->msix_cap) { 115 struct physdev_pci_device ppdev = { 116 .seg = pci_domain_nr(dev->bus), 117 .bus = dev->bus->number, 118 .devfn = dev->devfn 119 }; 120 int err = HYPERVISOR_physdev_op(PHYSDEVOP_release_msix, 121 &ppdev); 122 123 if (err && err != -ENOSYS) 124 dev_warn(&dev->dev, "MSI-X release failed (%d)\n", 125 err); 126 } 127 128 /* Disable the device */ 129 xen_pcibk_reset_device(dev); 130 131 kfree(dev_data); 132 pci_set_drvdata(dev, NULL); 133 134 /* Clean-up the device */ 135 xen_pcibk_config_free_dyn_fields(dev); 136 xen_pcibk_config_free_dev(dev); 137 138 pci_clear_dev_assigned(dev); 139 pci_dev_put(dev); 140 141 kfree(psdev); 142 } 143 144 static inline void pcistub_device_get(struct pcistub_device *psdev) 145 { 146 kref_get(&psdev->kref); 147 } 148 149 static inline void pcistub_device_put(struct pcistub_device *psdev) 150 { 151 kref_put(&psdev->kref, pcistub_device_release); 152 } 153 154 static struct pcistub_device *pcistub_device_find_locked(int domain, int bus, 155 int slot, int func) 156 { 157 struct pcistub_device *psdev; 158 159 list_for_each_entry(psdev, &pcistub_devices, dev_list) { 160 if (psdev->dev != NULL 161 && domain == pci_domain_nr(psdev->dev->bus) 162 && bus == psdev->dev->bus->number 163 && slot == PCI_SLOT(psdev->dev->devfn) 164 && func == PCI_FUNC(psdev->dev->devfn)) { 165 return psdev; 166 } 167 } 168 169 return NULL; 170 } 171 172 static struct pcistub_device *pcistub_device_find(int domain, int bus, 173 int slot, int func) 174 { 175 struct pcistub_device *psdev; 176 unsigned long flags; 177 178 spin_lock_irqsave(&pcistub_devices_lock, flags); 179 180 psdev = pcistub_device_find_locked(domain, bus, slot, func); 181 if (psdev) 182 pcistub_device_get(psdev); 183 184 spin_unlock_irqrestore(&pcistub_devices_lock, flags); 185 return psdev; 186 } 187 188 static struct pci_dev *pcistub_device_get_pci_dev(struct xen_pcibk_device *pdev, 189 struct pcistub_device *psdev) 190 { 191 struct pci_dev *pci_dev = NULL; 192 unsigned long flags; 193 194 pcistub_device_get(psdev); 195 196 spin_lock_irqsave(&psdev->lock, flags); 197 if (!psdev->pdev) { 198 psdev->pdev = pdev; 199 pci_dev = psdev->dev; 200 } 201 spin_unlock_irqrestore(&psdev->lock, flags); 202 203 if (!pci_dev) 204 pcistub_device_put(psdev); 205 206 return pci_dev; 207 } 208 209 struct pci_dev *pcistub_get_pci_dev_by_slot(struct xen_pcibk_device *pdev, 210 int domain, int bus, 211 int slot, int func) 212 { 213 struct pcistub_device *psdev; 214 struct pci_dev *found_dev = NULL; 215 unsigned long flags; 216 217 spin_lock_irqsave(&pcistub_devices_lock, flags); 218 219 psdev = pcistub_device_find_locked(domain, bus, slot, func); 220 if (psdev) 221 found_dev = pcistub_device_get_pci_dev(pdev, psdev); 222 223 spin_unlock_irqrestore(&pcistub_devices_lock, flags); 224 return found_dev; 225 } 226 227 struct pci_dev *pcistub_get_pci_dev(struct xen_pcibk_device *pdev, 228 struct pci_dev *dev) 229 { 230 struct pcistub_device *psdev; 231 struct pci_dev *found_dev = NULL; 232 unsigned long flags; 233 234 spin_lock_irqsave(&pcistub_devices_lock, flags); 235 236 list_for_each_entry(psdev, &pcistub_devices, dev_list) { 237 if (psdev->dev == dev) { 238 found_dev = pcistub_device_get_pci_dev(pdev, psdev); 239 break; 240 } 241 } 242 243 spin_unlock_irqrestore(&pcistub_devices_lock, flags); 244 return found_dev; 245 } 246 247 /* 248 * Called when: 249 * - XenBus state has been reconfigure (pci unplug). See xen_pcibk_remove_device 250 * - XenBus state has been disconnected (guest shutdown). See xen_pcibk_xenbus_remove 251 * - 'echo BDF > unbind' on pciback module with no guest attached. See pcistub_remove 252 * - 'echo BDF > unbind' with a guest still using it. See pcistub_remove 253 * 254 * As such we have to be careful. 255 * 256 * To make this easier, the caller has to hold the device lock. 257 */ 258 void pcistub_put_pci_dev(struct pci_dev *dev) 259 { 260 struct pcistub_device *psdev, *found_psdev = NULL; 261 unsigned long flags; 262 struct xen_pcibk_dev_data *dev_data; 263 int ret; 264 265 spin_lock_irqsave(&pcistub_devices_lock, flags); 266 267 list_for_each_entry(psdev, &pcistub_devices, dev_list) { 268 if (psdev->dev == dev) { 269 found_psdev = psdev; 270 break; 271 } 272 } 273 274 spin_unlock_irqrestore(&pcistub_devices_lock, flags); 275 if (WARN_ON(!found_psdev)) 276 return; 277 278 /*hold this lock for avoiding breaking link between 279 * pcistub and xen_pcibk when AER is in processing 280 */ 281 down_write(&pcistub_sem); 282 /* Cleanup our device 283 * (so it's ready for the next domain) 284 */ 285 device_lock_assert(&dev->dev); 286 __pci_reset_function_locked(dev); 287 288 dev_data = pci_get_drvdata(dev); 289 ret = pci_load_saved_state(dev, dev_data->pci_saved_state); 290 if (!ret) { 291 /* 292 * The usual sequence is pci_save_state & pci_restore_state 293 * but the guest might have messed the configuration space up. 294 * Use the initial version (when device was bound to us). 295 */ 296 pci_restore_state(dev); 297 } else 298 dev_info(&dev->dev, "Could not reload PCI state\n"); 299 /* This disables the device. */ 300 xen_pcibk_reset_device(dev); 301 302 /* And cleanup up our emulated fields. */ 303 xen_pcibk_config_reset_dev(dev); 304 xen_pcibk_config_free_dyn_fields(dev); 305 306 xen_unregister_device_domain_owner(dev); 307 308 spin_lock_irqsave(&found_psdev->lock, flags); 309 found_psdev->pdev = NULL; 310 spin_unlock_irqrestore(&found_psdev->lock, flags); 311 312 pcistub_device_put(found_psdev); 313 up_write(&pcistub_sem); 314 } 315 316 static int pcistub_match_one(struct pci_dev *dev, 317 struct pcistub_device_id *pdev_id) 318 { 319 /* Match the specified device by domain, bus, slot, func and also if 320 * any of the device's parent bridges match. 321 */ 322 for (; dev != NULL; dev = dev->bus->self) { 323 if (pci_domain_nr(dev->bus) == pdev_id->domain 324 && dev->bus->number == pdev_id->bus 325 && dev->devfn == pdev_id->devfn) 326 return 1; 327 328 /* Sometimes topmost bridge links to itself. */ 329 if (dev == dev->bus->self) 330 break; 331 } 332 333 return 0; 334 } 335 336 static int pcistub_match(struct pci_dev *dev) 337 { 338 struct pcistub_device_id *pdev_id; 339 unsigned long flags; 340 int found = 0; 341 342 spin_lock_irqsave(&device_ids_lock, flags); 343 list_for_each_entry(pdev_id, &pcistub_device_ids, slot_list) { 344 if (pcistub_match_one(dev, pdev_id)) { 345 found = 1; 346 break; 347 } 348 } 349 spin_unlock_irqrestore(&device_ids_lock, flags); 350 351 return found; 352 } 353 354 static int pcistub_init_device(struct pci_dev *dev) 355 { 356 struct xen_pcibk_dev_data *dev_data; 357 int err = 0; 358 359 dev_dbg(&dev->dev, "initializing...\n"); 360 361 /* The PCI backend is not intended to be a module (or to work with 362 * removable PCI devices (yet). If it were, xen_pcibk_config_free() 363 * would need to be called somewhere to free the memory allocated 364 * here and then to call kfree(pci_get_drvdata(psdev->dev)). 365 */ 366 dev_data = kzalloc(sizeof(*dev_data) + strlen(DRV_NAME "[]") 367 + strlen(pci_name(dev)) + 1, GFP_ATOMIC); 368 if (!dev_data) { 369 err = -ENOMEM; 370 goto out; 371 } 372 pci_set_drvdata(dev, dev_data); 373 374 /* 375 * Setup name for fake IRQ handler. It will only be enabled 376 * once the device is turned on by the guest. 377 */ 378 sprintf(dev_data->irq_name, DRV_NAME "[%s]", pci_name(dev)); 379 380 dev_dbg(&dev->dev, "initializing config\n"); 381 382 init_waitqueue_head(&xen_pcibk_aer_wait_queue); 383 err = xen_pcibk_config_init_dev(dev); 384 if (err) 385 goto out; 386 387 /* HACK: Force device (& ACPI) to determine what IRQ it's on - we 388 * must do this here because pcibios_enable_device may specify 389 * the pci device's true irq (and possibly its other resources) 390 * if they differ from what's in the configuration space. 391 * This makes the assumption that the device's resources won't 392 * change after this point (otherwise this code may break!) 393 */ 394 dev_dbg(&dev->dev, "enabling device\n"); 395 err = pci_enable_device(dev); 396 if (err) 397 goto config_release; 398 399 if (dev->msix_cap) { 400 struct physdev_pci_device ppdev = { 401 .seg = pci_domain_nr(dev->bus), 402 .bus = dev->bus->number, 403 .devfn = dev->devfn 404 }; 405 406 err = HYPERVISOR_physdev_op(PHYSDEVOP_prepare_msix, &ppdev); 407 if (err && err != -ENOSYS) 408 dev_err(&dev->dev, "MSI-X preparation failed (%d)\n", 409 err); 410 } 411 412 /* We need the device active to save the state. */ 413 dev_dbg(&dev->dev, "save state of device\n"); 414 pci_save_state(dev); 415 dev_data->pci_saved_state = pci_store_saved_state(dev); 416 if (!dev_data->pci_saved_state) 417 dev_err(&dev->dev, "Could not store PCI conf saved state!\n"); 418 else { 419 dev_dbg(&dev->dev, "resetting (FLR, D3, etc) the device\n"); 420 __pci_reset_function_locked(dev); 421 pci_restore_state(dev); 422 } 423 /* Now disable the device (this also ensures some private device 424 * data is setup before we export) 425 */ 426 dev_dbg(&dev->dev, "reset device\n"); 427 xen_pcibk_reset_device(dev); 428 429 pci_set_dev_assigned(dev); 430 return 0; 431 432 config_release: 433 xen_pcibk_config_free_dev(dev); 434 435 out: 436 pci_set_drvdata(dev, NULL); 437 kfree(dev_data); 438 return err; 439 } 440 441 /* 442 * Because some initialization still happens on 443 * devices during fs_initcall, we need to defer 444 * full initialization of our devices until 445 * device_initcall. 446 */ 447 static int __init pcistub_init_devices_late(void) 448 { 449 struct pcistub_device *psdev; 450 unsigned long flags; 451 int err = 0; 452 453 spin_lock_irqsave(&pcistub_devices_lock, flags); 454 455 while (!list_empty(&seized_devices)) { 456 psdev = container_of(seized_devices.next, 457 struct pcistub_device, dev_list); 458 list_del(&psdev->dev_list); 459 460 spin_unlock_irqrestore(&pcistub_devices_lock, flags); 461 462 err = pcistub_init_device(psdev->dev); 463 if (err) { 464 dev_err(&psdev->dev->dev, 465 "error %d initializing device\n", err); 466 kfree(psdev); 467 psdev = NULL; 468 } 469 470 spin_lock_irqsave(&pcistub_devices_lock, flags); 471 472 if (psdev) 473 list_add_tail(&psdev->dev_list, &pcistub_devices); 474 } 475 476 initialize_devices = 1; 477 478 spin_unlock_irqrestore(&pcistub_devices_lock, flags); 479 480 return 0; 481 } 482 483 static void pcistub_device_id_add_list(struct pcistub_device_id *new, 484 int domain, int bus, unsigned int devfn) 485 { 486 struct pcistub_device_id *pci_dev_id; 487 unsigned long flags; 488 int found = 0; 489 490 spin_lock_irqsave(&device_ids_lock, flags); 491 492 list_for_each_entry(pci_dev_id, &pcistub_device_ids, slot_list) { 493 if (pci_dev_id->domain == domain && pci_dev_id->bus == bus && 494 pci_dev_id->devfn == devfn) { 495 found = 1; 496 break; 497 } 498 } 499 500 if (!found) { 501 new->domain = domain; 502 new->bus = bus; 503 new->devfn = devfn; 504 list_add_tail(&new->slot_list, &pcistub_device_ids); 505 } 506 507 spin_unlock_irqrestore(&device_ids_lock, flags); 508 509 if (found) 510 kfree(new); 511 } 512 513 static int pcistub_seize(struct pci_dev *dev, 514 struct pcistub_device_id *pci_dev_id) 515 { 516 struct pcistub_device *psdev; 517 unsigned long flags; 518 int err = 0; 519 520 psdev = pcistub_device_alloc(dev); 521 if (!psdev) { 522 kfree(pci_dev_id); 523 return -ENOMEM; 524 } 525 526 spin_lock_irqsave(&pcistub_devices_lock, flags); 527 528 if (initialize_devices) { 529 spin_unlock_irqrestore(&pcistub_devices_lock, flags); 530 531 /* don't want irqs disabled when calling pcistub_init_device */ 532 err = pcistub_init_device(psdev->dev); 533 534 spin_lock_irqsave(&pcistub_devices_lock, flags); 535 536 if (!err) 537 list_add(&psdev->dev_list, &pcistub_devices); 538 } else { 539 dev_dbg(&dev->dev, "deferring initialization\n"); 540 list_add(&psdev->dev_list, &seized_devices); 541 } 542 543 spin_unlock_irqrestore(&pcistub_devices_lock, flags); 544 545 if (err) { 546 kfree(pci_dev_id); 547 pcistub_device_put(psdev); 548 } else if (pci_dev_id) 549 pcistub_device_id_add_list(pci_dev_id, pci_domain_nr(dev->bus), 550 dev->bus->number, dev->devfn); 551 552 return err; 553 } 554 555 /* Called when 'bind'. This means we must _NOT_ call pci_reset_function or 556 * other functions that take the sysfs lock. */ 557 static int pcistub_probe(struct pci_dev *dev, const struct pci_device_id *id) 558 { 559 int err = 0, match; 560 struct pcistub_device_id *pci_dev_id = NULL; 561 562 dev_dbg(&dev->dev, "probing...\n"); 563 564 match = pcistub_match(dev); 565 566 if ((dev->driver_override && 567 !strcmp(dev->driver_override, PCISTUB_DRIVER_NAME)) || 568 match) { 569 570 if (dev->hdr_type != PCI_HEADER_TYPE_NORMAL 571 && dev->hdr_type != PCI_HEADER_TYPE_BRIDGE) { 572 dev_err(&dev->dev, "can't export pci devices that " 573 "don't have a normal (0) or bridge (1) " 574 "header type!\n"); 575 err = -ENODEV; 576 goto out; 577 } 578 579 if (!match) { 580 pci_dev_id = kmalloc(sizeof(*pci_dev_id), GFP_ATOMIC); 581 if (!pci_dev_id) { 582 err = -ENOMEM; 583 goto out; 584 } 585 } 586 587 dev_info(&dev->dev, "seizing device\n"); 588 err = pcistub_seize(dev, pci_dev_id); 589 } else 590 /* Didn't find the device */ 591 err = -ENODEV; 592 593 out: 594 return err; 595 } 596 597 /* Called when 'unbind'. This means we must _NOT_ call pci_reset_function or 598 * other functions that take the sysfs lock. */ 599 static void pcistub_remove(struct pci_dev *dev) 600 { 601 struct pcistub_device *psdev, *found_psdev = NULL; 602 unsigned long flags; 603 604 dev_dbg(&dev->dev, "removing\n"); 605 606 spin_lock_irqsave(&pcistub_devices_lock, flags); 607 608 xen_pcibk_config_quirk_release(dev); 609 610 list_for_each_entry(psdev, &pcistub_devices, dev_list) { 611 if (psdev->dev == dev) { 612 found_psdev = psdev; 613 break; 614 } 615 } 616 617 spin_unlock_irqrestore(&pcistub_devices_lock, flags); 618 619 if (found_psdev) { 620 dev_dbg(&dev->dev, "found device to remove %s\n", 621 found_psdev->pdev ? "- in-use" : ""); 622 623 if (found_psdev->pdev) { 624 int domid = xen_find_device_domain_owner(dev); 625 626 pr_warn("****** removing device %s while still in-use by domain %d! ******\n", 627 pci_name(found_psdev->dev), domid); 628 pr_warn("****** driver domain may still access this device's i/o resources!\n"); 629 pr_warn("****** shutdown driver domain before binding device\n"); 630 pr_warn("****** to other drivers or domains\n"); 631 632 /* N.B. This ends up calling pcistub_put_pci_dev which ends up 633 * doing the FLR. */ 634 xen_pcibk_release_pci_dev(found_psdev->pdev, 635 found_psdev->dev, 636 false /* caller holds the lock. */); 637 } 638 639 spin_lock_irqsave(&pcistub_devices_lock, flags); 640 list_del(&found_psdev->dev_list); 641 spin_unlock_irqrestore(&pcistub_devices_lock, flags); 642 643 /* the final put for releasing from the list */ 644 pcistub_device_put(found_psdev); 645 } 646 } 647 648 static const struct pci_device_id pcistub_ids[] = { 649 { 650 .vendor = PCI_ANY_ID, 651 .device = PCI_ANY_ID, 652 .subvendor = PCI_ANY_ID, 653 .subdevice = PCI_ANY_ID, 654 }, 655 {0,}, 656 }; 657 658 #define PCI_NODENAME_MAX 40 659 static void kill_domain_by_device(struct pcistub_device *psdev) 660 { 661 struct xenbus_transaction xbt; 662 int err; 663 char nodename[PCI_NODENAME_MAX]; 664 665 BUG_ON(!psdev); 666 snprintf(nodename, PCI_NODENAME_MAX, "/local/domain/0/backend/pci/%d/0", 667 psdev->pdev->xdev->otherend_id); 668 669 again: 670 err = xenbus_transaction_start(&xbt); 671 if (err) { 672 dev_err(&psdev->dev->dev, 673 "error %d when start xenbus transaction\n", err); 674 return; 675 } 676 /*PV AER handlers will set this flag*/ 677 xenbus_printf(xbt, nodename, "aerState" , "aerfail"); 678 err = xenbus_transaction_end(xbt, 0); 679 if (err) { 680 if (err == -EAGAIN) 681 goto again; 682 dev_err(&psdev->dev->dev, 683 "error %d when end xenbus transaction\n", err); 684 return; 685 } 686 } 687 688 /* For each aer recovery step error_detected, mmio_enabled, etc, front_end and 689 * backend need to have cooperation. In xen_pcibk, those steps will do similar 690 * jobs: send service request and waiting for front_end response. 691 */ 692 static pci_ers_result_t common_process(struct pcistub_device *psdev, 693 pci_channel_state_t state, int aer_cmd, 694 pci_ers_result_t result) 695 { 696 pci_ers_result_t res = result; 697 struct xen_pcie_aer_op *aer_op; 698 struct xen_pcibk_device *pdev = psdev->pdev; 699 struct xen_pci_sharedinfo *sh_info = pdev->sh_info; 700 int ret; 701 702 /*with PV AER drivers*/ 703 aer_op = &(sh_info->aer_op); 704 aer_op->cmd = aer_cmd ; 705 /*useful for error_detected callback*/ 706 aer_op->err = state; 707 /*pcifront_end BDF*/ 708 ret = xen_pcibk_get_pcifront_dev(psdev->dev, psdev->pdev, 709 &aer_op->domain, &aer_op->bus, &aer_op->devfn); 710 if (!ret) { 711 dev_err(&psdev->dev->dev, 712 DRV_NAME ": failed to get pcifront device\n"); 713 return PCI_ERS_RESULT_NONE; 714 } 715 wmb(); 716 717 dev_dbg(&psdev->dev->dev, 718 DRV_NAME ": aer_op %x dom %x bus %x devfn %x\n", 719 aer_cmd, aer_op->domain, aer_op->bus, aer_op->devfn); 720 /*local flag to mark there's aer request, xen_pcibk callback will use 721 * this flag to judge whether we need to check pci-front give aer 722 * service ack signal 723 */ 724 set_bit(_PCIB_op_pending, (unsigned long *)&pdev->flags); 725 726 /*It is possible that a pcifront conf_read_write ops request invokes 727 * the callback which cause the spurious execution of wake_up. 728 * Yet it is harmless and better than a spinlock here 729 */ 730 set_bit(_XEN_PCIB_active, 731 (unsigned long *)&sh_info->flags); 732 wmb(); 733 notify_remote_via_irq(pdev->evtchn_irq); 734 735 ret = wait_event_timeout(xen_pcibk_aer_wait_queue, 736 !(test_bit(_XEN_PCIB_active, (unsigned long *) 737 &sh_info->flags)), 300*HZ); 738 739 if (!ret) { 740 if (test_bit(_XEN_PCIB_active, 741 (unsigned long *)&sh_info->flags)) { 742 dev_err(&psdev->dev->dev, 743 "pcifront aer process not responding!\n"); 744 clear_bit(_XEN_PCIB_active, 745 (unsigned long *)&sh_info->flags); 746 aer_op->err = PCI_ERS_RESULT_NONE; 747 return res; 748 } 749 } 750 clear_bit(_PCIB_op_pending, (unsigned long *)&pdev->flags); 751 752 if (test_bit(_XEN_PCIF_active, 753 (unsigned long *)&sh_info->flags)) { 754 dev_dbg(&psdev->dev->dev, 755 "schedule pci_conf service in " DRV_NAME "\n"); 756 xen_pcibk_test_and_schedule_op(psdev->pdev); 757 } 758 759 res = (pci_ers_result_t)aer_op->err; 760 return res; 761 } 762 763 /* 764 * xen_pcibk_slot_reset: it will send the slot_reset request to pcifront in case 765 * of the device driver could provide this service, and then wait for pcifront 766 * ack. 767 * @dev: pointer to PCI devices 768 * return value is used by aer_core do_recovery policy 769 */ 770 static pci_ers_result_t xen_pcibk_slot_reset(struct pci_dev *dev) 771 { 772 struct pcistub_device *psdev; 773 pci_ers_result_t result; 774 775 result = PCI_ERS_RESULT_RECOVERED; 776 dev_dbg(&dev->dev, "xen_pcibk_slot_reset(bus:%x,devfn:%x)\n", 777 dev->bus->number, dev->devfn); 778 779 down_write(&pcistub_sem); 780 psdev = pcistub_device_find(pci_domain_nr(dev->bus), 781 dev->bus->number, 782 PCI_SLOT(dev->devfn), 783 PCI_FUNC(dev->devfn)); 784 785 if (!psdev || !psdev->pdev) { 786 dev_err(&dev->dev, 787 DRV_NAME " device is not found/assigned\n"); 788 goto end; 789 } 790 791 if (!psdev->pdev->sh_info) { 792 dev_err(&dev->dev, DRV_NAME " device is not connected or owned" 793 " by HVM, kill it\n"); 794 kill_domain_by_device(psdev); 795 goto end; 796 } 797 798 if (!test_bit(_XEN_PCIB_AERHANDLER, 799 (unsigned long *)&psdev->pdev->sh_info->flags)) { 800 dev_err(&dev->dev, 801 "guest with no AER driver should have been killed\n"); 802 goto end; 803 } 804 result = common_process(psdev, 1, XEN_PCI_OP_aer_slotreset, result); 805 806 if (result == PCI_ERS_RESULT_NONE || 807 result == PCI_ERS_RESULT_DISCONNECT) { 808 dev_dbg(&dev->dev, 809 "No AER slot_reset service or disconnected!\n"); 810 kill_domain_by_device(psdev); 811 } 812 end: 813 if (psdev) 814 pcistub_device_put(psdev); 815 up_write(&pcistub_sem); 816 return result; 817 818 } 819 820 821 /*xen_pcibk_mmio_enabled: it will send the mmio_enabled request to pcifront 822 * in case of the device driver could provide this service, and then wait 823 * for pcifront ack 824 * @dev: pointer to PCI devices 825 * return value is used by aer_core do_recovery policy 826 */ 827 828 static pci_ers_result_t xen_pcibk_mmio_enabled(struct pci_dev *dev) 829 { 830 struct pcistub_device *psdev; 831 pci_ers_result_t result; 832 833 result = PCI_ERS_RESULT_RECOVERED; 834 dev_dbg(&dev->dev, "xen_pcibk_mmio_enabled(bus:%x,devfn:%x)\n", 835 dev->bus->number, dev->devfn); 836 837 down_write(&pcistub_sem); 838 psdev = pcistub_device_find(pci_domain_nr(dev->bus), 839 dev->bus->number, 840 PCI_SLOT(dev->devfn), 841 PCI_FUNC(dev->devfn)); 842 843 if (!psdev || !psdev->pdev) { 844 dev_err(&dev->dev, 845 DRV_NAME " device is not found/assigned\n"); 846 goto end; 847 } 848 849 if (!psdev->pdev->sh_info) { 850 dev_err(&dev->dev, DRV_NAME " device is not connected or owned" 851 " by HVM, kill it\n"); 852 kill_domain_by_device(psdev); 853 goto end; 854 } 855 856 if (!test_bit(_XEN_PCIB_AERHANDLER, 857 (unsigned long *)&psdev->pdev->sh_info->flags)) { 858 dev_err(&dev->dev, 859 "guest with no AER driver should have been killed\n"); 860 goto end; 861 } 862 result = common_process(psdev, 1, XEN_PCI_OP_aer_mmio, result); 863 864 if (result == PCI_ERS_RESULT_NONE || 865 result == PCI_ERS_RESULT_DISCONNECT) { 866 dev_dbg(&dev->dev, 867 "No AER mmio_enabled service or disconnected!\n"); 868 kill_domain_by_device(psdev); 869 } 870 end: 871 if (psdev) 872 pcistub_device_put(psdev); 873 up_write(&pcistub_sem); 874 return result; 875 } 876 877 /*xen_pcibk_error_detected: it will send the error_detected request to pcifront 878 * in case of the device driver could provide this service, and then wait 879 * for pcifront ack. 880 * @dev: pointer to PCI devices 881 * @error: the current PCI connection state 882 * return value is used by aer_core do_recovery policy 883 */ 884 885 static pci_ers_result_t xen_pcibk_error_detected(struct pci_dev *dev, 886 pci_channel_state_t error) 887 { 888 struct pcistub_device *psdev; 889 pci_ers_result_t result; 890 891 result = PCI_ERS_RESULT_CAN_RECOVER; 892 dev_dbg(&dev->dev, "xen_pcibk_error_detected(bus:%x,devfn:%x)\n", 893 dev->bus->number, dev->devfn); 894 895 down_write(&pcistub_sem); 896 psdev = pcistub_device_find(pci_domain_nr(dev->bus), 897 dev->bus->number, 898 PCI_SLOT(dev->devfn), 899 PCI_FUNC(dev->devfn)); 900 901 if (!psdev || !psdev->pdev) { 902 dev_err(&dev->dev, 903 DRV_NAME " device is not found/assigned\n"); 904 goto end; 905 } 906 907 if (!psdev->pdev->sh_info) { 908 dev_err(&dev->dev, DRV_NAME " device is not connected or owned" 909 " by HVM, kill it\n"); 910 kill_domain_by_device(psdev); 911 goto end; 912 } 913 914 /*Guest owns the device yet no aer handler regiested, kill guest*/ 915 if (!test_bit(_XEN_PCIB_AERHANDLER, 916 (unsigned long *)&psdev->pdev->sh_info->flags)) { 917 dev_dbg(&dev->dev, "guest may have no aer driver, kill it\n"); 918 kill_domain_by_device(psdev); 919 goto end; 920 } 921 result = common_process(psdev, error, XEN_PCI_OP_aer_detected, result); 922 923 if (result == PCI_ERS_RESULT_NONE || 924 result == PCI_ERS_RESULT_DISCONNECT) { 925 dev_dbg(&dev->dev, 926 "No AER error_detected service or disconnected!\n"); 927 kill_domain_by_device(psdev); 928 } 929 end: 930 if (psdev) 931 pcistub_device_put(psdev); 932 up_write(&pcistub_sem); 933 return result; 934 } 935 936 /*xen_pcibk_error_resume: it will send the error_resume request to pcifront 937 * in case of the device driver could provide this service, and then wait 938 * for pcifront ack. 939 * @dev: pointer to PCI devices 940 */ 941 942 static void xen_pcibk_error_resume(struct pci_dev *dev) 943 { 944 struct pcistub_device *psdev; 945 946 dev_dbg(&dev->dev, "xen_pcibk_error_resume(bus:%x,devfn:%x)\n", 947 dev->bus->number, dev->devfn); 948 949 down_write(&pcistub_sem); 950 psdev = pcistub_device_find(pci_domain_nr(dev->bus), 951 dev->bus->number, 952 PCI_SLOT(dev->devfn), 953 PCI_FUNC(dev->devfn)); 954 955 if (!psdev || !psdev->pdev) { 956 dev_err(&dev->dev, 957 DRV_NAME " device is not found/assigned\n"); 958 goto end; 959 } 960 961 if (!psdev->pdev->sh_info) { 962 dev_err(&dev->dev, DRV_NAME " device is not connected or owned" 963 " by HVM, kill it\n"); 964 kill_domain_by_device(psdev); 965 goto end; 966 } 967 968 if (!test_bit(_XEN_PCIB_AERHANDLER, 969 (unsigned long *)&psdev->pdev->sh_info->flags)) { 970 dev_err(&dev->dev, 971 "guest with no AER driver should have been killed\n"); 972 kill_domain_by_device(psdev); 973 goto end; 974 } 975 common_process(psdev, 1, XEN_PCI_OP_aer_resume, 976 PCI_ERS_RESULT_RECOVERED); 977 end: 978 if (psdev) 979 pcistub_device_put(psdev); 980 up_write(&pcistub_sem); 981 return; 982 } 983 984 /*add xen_pcibk AER handling*/ 985 static const struct pci_error_handlers xen_pcibk_error_handler = { 986 .error_detected = xen_pcibk_error_detected, 987 .mmio_enabled = xen_pcibk_mmio_enabled, 988 .slot_reset = xen_pcibk_slot_reset, 989 .resume = xen_pcibk_error_resume, 990 }; 991 992 /* 993 * Note: There is no MODULE_DEVICE_TABLE entry here because this isn't 994 * for a normal device. I don't want it to be loaded automatically. 995 */ 996 997 static struct pci_driver xen_pcibk_pci_driver = { 998 /* The name should be xen_pciback, but until the tools are updated 999 * we will keep it as pciback. */ 1000 .name = PCISTUB_DRIVER_NAME, 1001 .id_table = pcistub_ids, 1002 .probe = pcistub_probe, 1003 .remove = pcistub_remove, 1004 .err_handler = &xen_pcibk_error_handler, 1005 }; 1006 1007 static inline int str_to_slot(const char *buf, int *domain, int *bus, 1008 int *slot, int *func) 1009 { 1010 int parsed = 0; 1011 1012 switch (sscanf(buf, " %x:%x:%x.%x %n", domain, bus, slot, func, 1013 &parsed)) { 1014 case 3: 1015 *func = -1; 1016 sscanf(buf, " %x:%x:%x.* %n", domain, bus, slot, &parsed); 1017 break; 1018 case 2: 1019 *slot = *func = -1; 1020 sscanf(buf, " %x:%x:*.* %n", domain, bus, &parsed); 1021 break; 1022 } 1023 if (parsed && !buf[parsed]) 1024 return 0; 1025 1026 /* try again without domain */ 1027 *domain = 0; 1028 switch (sscanf(buf, " %x:%x.%x %n", bus, slot, func, &parsed)) { 1029 case 2: 1030 *func = -1; 1031 sscanf(buf, " %x:%x.* %n", bus, slot, &parsed); 1032 break; 1033 case 1: 1034 *slot = *func = -1; 1035 sscanf(buf, " %x:*.* %n", bus, &parsed); 1036 break; 1037 } 1038 if (parsed && !buf[parsed]) 1039 return 0; 1040 1041 return -EINVAL; 1042 } 1043 1044 static inline int str_to_quirk(const char *buf, int *domain, int *bus, int 1045 *slot, int *func, int *reg, int *size, int *mask) 1046 { 1047 int parsed = 0; 1048 1049 sscanf(buf, " %x:%x:%x.%x-%x:%x:%x %n", domain, bus, slot, func, 1050 reg, size, mask, &parsed); 1051 if (parsed && !buf[parsed]) 1052 return 0; 1053 1054 /* try again without domain */ 1055 *domain = 0; 1056 sscanf(buf, " %x:%x.%x-%x:%x:%x %n", bus, slot, func, reg, size, 1057 mask, &parsed); 1058 if (parsed && !buf[parsed]) 1059 return 0; 1060 1061 return -EINVAL; 1062 } 1063 1064 static int pcistub_device_id_add(int domain, int bus, int slot, int func) 1065 { 1066 struct pcistub_device_id *pci_dev_id; 1067 int rc = 0, devfn = PCI_DEVFN(slot, func); 1068 1069 if (slot < 0) { 1070 for (slot = 0; !rc && slot < 32; ++slot) 1071 rc = pcistub_device_id_add(domain, bus, slot, func); 1072 return rc; 1073 } 1074 1075 if (func < 0) { 1076 for (func = 0; !rc && func < 8; ++func) 1077 rc = pcistub_device_id_add(domain, bus, slot, func); 1078 return rc; 1079 } 1080 1081 if (( 1082 #if !defined(MODULE) /* pci_domains_supported is not being exported */ \ 1083 || !defined(CONFIG_PCI_DOMAINS) 1084 !pci_domains_supported ? domain : 1085 #endif 1086 domain < 0 || domain > 0xffff) 1087 || bus < 0 || bus > 0xff 1088 || PCI_SLOT(devfn) != slot 1089 || PCI_FUNC(devfn) != func) 1090 return -EINVAL; 1091 1092 pci_dev_id = kmalloc(sizeof(*pci_dev_id), GFP_KERNEL); 1093 if (!pci_dev_id) 1094 return -ENOMEM; 1095 1096 pr_debug("wants to seize %04x:%02x:%02x.%d\n", 1097 domain, bus, slot, func); 1098 1099 pcistub_device_id_add_list(pci_dev_id, domain, bus, devfn); 1100 1101 return 0; 1102 } 1103 1104 static int pcistub_device_id_remove(int domain, int bus, int slot, int func) 1105 { 1106 struct pcistub_device_id *pci_dev_id, *t; 1107 int err = -ENOENT; 1108 unsigned long flags; 1109 1110 spin_lock_irqsave(&device_ids_lock, flags); 1111 list_for_each_entry_safe(pci_dev_id, t, &pcistub_device_ids, 1112 slot_list) { 1113 if (pci_dev_id->domain == domain && pci_dev_id->bus == bus 1114 && (slot < 0 || PCI_SLOT(pci_dev_id->devfn) == slot) 1115 && (func < 0 || PCI_FUNC(pci_dev_id->devfn) == func)) { 1116 /* Don't break; here because it's possible the same 1117 * slot could be in the list more than once 1118 */ 1119 list_del(&pci_dev_id->slot_list); 1120 kfree(pci_dev_id); 1121 1122 err = 0; 1123 1124 pr_debug("removed %04x:%02x:%02x.%d from seize list\n", 1125 domain, bus, slot, func); 1126 } 1127 } 1128 spin_unlock_irqrestore(&device_ids_lock, flags); 1129 1130 return err; 1131 } 1132 1133 static int pcistub_reg_add(int domain, int bus, int slot, int func, 1134 unsigned int reg, unsigned int size, 1135 unsigned int mask) 1136 { 1137 int err = 0; 1138 struct pcistub_device *psdev; 1139 struct pci_dev *dev; 1140 struct config_field *field; 1141 1142 if (reg > 0xfff || (size < 4 && (mask >> (size * 8)))) 1143 return -EINVAL; 1144 1145 psdev = pcistub_device_find(domain, bus, slot, func); 1146 if (!psdev) { 1147 err = -ENODEV; 1148 goto out; 1149 } 1150 dev = psdev->dev; 1151 1152 field = kzalloc(sizeof(*field), GFP_ATOMIC); 1153 if (!field) { 1154 err = -ENOMEM; 1155 goto out; 1156 } 1157 1158 field->offset = reg; 1159 field->size = size; 1160 field->mask = mask; 1161 field->init = NULL; 1162 field->reset = NULL; 1163 field->release = NULL; 1164 field->clean = xen_pcibk_config_field_free; 1165 1166 err = xen_pcibk_config_quirks_add_field(dev, field); 1167 if (err) 1168 kfree(field); 1169 out: 1170 if (psdev) 1171 pcistub_device_put(psdev); 1172 return err; 1173 } 1174 1175 static ssize_t pcistub_slot_add(struct device_driver *drv, const char *buf, 1176 size_t count) 1177 { 1178 int domain, bus, slot, func; 1179 int err; 1180 1181 err = str_to_slot(buf, &domain, &bus, &slot, &func); 1182 if (err) 1183 goto out; 1184 1185 err = pcistub_device_id_add(domain, bus, slot, func); 1186 1187 out: 1188 if (!err) 1189 err = count; 1190 return err; 1191 } 1192 static DRIVER_ATTR(new_slot, S_IWUSR, NULL, pcistub_slot_add); 1193 1194 static ssize_t pcistub_slot_remove(struct device_driver *drv, const char *buf, 1195 size_t count) 1196 { 1197 int domain, bus, slot, func; 1198 int err; 1199 1200 err = str_to_slot(buf, &domain, &bus, &slot, &func); 1201 if (err) 1202 goto out; 1203 1204 err = pcistub_device_id_remove(domain, bus, slot, func); 1205 1206 out: 1207 if (!err) 1208 err = count; 1209 return err; 1210 } 1211 static DRIVER_ATTR(remove_slot, S_IWUSR, NULL, pcistub_slot_remove); 1212 1213 static ssize_t pcistub_slot_show(struct device_driver *drv, char *buf) 1214 { 1215 struct pcistub_device_id *pci_dev_id; 1216 size_t count = 0; 1217 unsigned long flags; 1218 1219 spin_lock_irqsave(&device_ids_lock, flags); 1220 list_for_each_entry(pci_dev_id, &pcistub_device_ids, slot_list) { 1221 if (count >= PAGE_SIZE) 1222 break; 1223 1224 count += scnprintf(buf + count, PAGE_SIZE - count, 1225 "%04x:%02x:%02x.%d\n", 1226 pci_dev_id->domain, pci_dev_id->bus, 1227 PCI_SLOT(pci_dev_id->devfn), 1228 PCI_FUNC(pci_dev_id->devfn)); 1229 } 1230 spin_unlock_irqrestore(&device_ids_lock, flags); 1231 1232 return count; 1233 } 1234 static DRIVER_ATTR(slots, S_IRUSR, pcistub_slot_show, NULL); 1235 1236 static ssize_t pcistub_irq_handler_show(struct device_driver *drv, char *buf) 1237 { 1238 struct pcistub_device *psdev; 1239 struct xen_pcibk_dev_data *dev_data; 1240 size_t count = 0; 1241 unsigned long flags; 1242 1243 spin_lock_irqsave(&pcistub_devices_lock, flags); 1244 list_for_each_entry(psdev, &pcistub_devices, dev_list) { 1245 if (count >= PAGE_SIZE) 1246 break; 1247 if (!psdev->dev) 1248 continue; 1249 dev_data = pci_get_drvdata(psdev->dev); 1250 if (!dev_data) 1251 continue; 1252 count += 1253 scnprintf(buf + count, PAGE_SIZE - count, 1254 "%s:%s:%sing:%ld\n", 1255 pci_name(psdev->dev), 1256 dev_data->isr_on ? "on" : "off", 1257 dev_data->ack_intr ? "ack" : "not ack", 1258 dev_data->handled); 1259 } 1260 spin_unlock_irqrestore(&pcistub_devices_lock, flags); 1261 return count; 1262 } 1263 static DRIVER_ATTR(irq_handlers, S_IRUSR, pcistub_irq_handler_show, NULL); 1264 1265 static ssize_t pcistub_irq_handler_switch(struct device_driver *drv, 1266 const char *buf, 1267 size_t count) 1268 { 1269 struct pcistub_device *psdev; 1270 struct xen_pcibk_dev_data *dev_data; 1271 int domain, bus, slot, func; 1272 int err; 1273 1274 err = str_to_slot(buf, &domain, &bus, &slot, &func); 1275 if (err) 1276 return err; 1277 1278 psdev = pcistub_device_find(domain, bus, slot, func); 1279 if (!psdev) { 1280 err = -ENOENT; 1281 goto out; 1282 } 1283 1284 dev_data = pci_get_drvdata(psdev->dev); 1285 if (!dev_data) { 1286 err = -ENOENT; 1287 goto out; 1288 } 1289 1290 dev_dbg(&psdev->dev->dev, "%s fake irq handler: %d->%d\n", 1291 dev_data->irq_name, dev_data->isr_on, 1292 !dev_data->isr_on); 1293 1294 dev_data->isr_on = !(dev_data->isr_on); 1295 if (dev_data->isr_on) 1296 dev_data->ack_intr = 1; 1297 out: 1298 if (psdev) 1299 pcistub_device_put(psdev); 1300 if (!err) 1301 err = count; 1302 return err; 1303 } 1304 static DRIVER_ATTR(irq_handler_state, S_IWUSR, NULL, 1305 pcistub_irq_handler_switch); 1306 1307 static ssize_t pcistub_quirk_add(struct device_driver *drv, const char *buf, 1308 size_t count) 1309 { 1310 int domain, bus, slot, func, reg, size, mask; 1311 int err; 1312 1313 err = str_to_quirk(buf, &domain, &bus, &slot, &func, ®, &size, 1314 &mask); 1315 if (err) 1316 goto out; 1317 1318 err = pcistub_reg_add(domain, bus, slot, func, reg, size, mask); 1319 1320 out: 1321 if (!err) 1322 err = count; 1323 return err; 1324 } 1325 1326 static ssize_t pcistub_quirk_show(struct device_driver *drv, char *buf) 1327 { 1328 int count = 0; 1329 unsigned long flags; 1330 struct xen_pcibk_config_quirk *quirk; 1331 struct xen_pcibk_dev_data *dev_data; 1332 const struct config_field *field; 1333 const struct config_field_entry *cfg_entry; 1334 1335 spin_lock_irqsave(&device_ids_lock, flags); 1336 list_for_each_entry(quirk, &xen_pcibk_quirks, quirks_list) { 1337 if (count >= PAGE_SIZE) 1338 goto out; 1339 1340 count += scnprintf(buf + count, PAGE_SIZE - count, 1341 "%02x:%02x.%01x\n\t%04x:%04x:%04x:%04x\n", 1342 quirk->pdev->bus->number, 1343 PCI_SLOT(quirk->pdev->devfn), 1344 PCI_FUNC(quirk->pdev->devfn), 1345 quirk->devid.vendor, quirk->devid.device, 1346 quirk->devid.subvendor, 1347 quirk->devid.subdevice); 1348 1349 dev_data = pci_get_drvdata(quirk->pdev); 1350 1351 list_for_each_entry(cfg_entry, &dev_data->config_fields, list) { 1352 field = cfg_entry->field; 1353 if (count >= PAGE_SIZE) 1354 goto out; 1355 1356 count += scnprintf(buf + count, PAGE_SIZE - count, 1357 "\t\t%08x:%01x:%08x\n", 1358 cfg_entry->base_offset + 1359 field->offset, field->size, 1360 field->mask); 1361 } 1362 } 1363 1364 out: 1365 spin_unlock_irqrestore(&device_ids_lock, flags); 1366 1367 return count; 1368 } 1369 static DRIVER_ATTR(quirks, S_IRUSR | S_IWUSR, pcistub_quirk_show, 1370 pcistub_quirk_add); 1371 1372 static ssize_t permissive_add(struct device_driver *drv, const char *buf, 1373 size_t count) 1374 { 1375 int domain, bus, slot, func; 1376 int err; 1377 struct pcistub_device *psdev; 1378 struct xen_pcibk_dev_data *dev_data; 1379 1380 err = str_to_slot(buf, &domain, &bus, &slot, &func); 1381 if (err) 1382 goto out; 1383 1384 psdev = pcistub_device_find(domain, bus, slot, func); 1385 if (!psdev) { 1386 err = -ENODEV; 1387 goto out; 1388 } 1389 1390 dev_data = pci_get_drvdata(psdev->dev); 1391 /* the driver data for a device should never be null at this point */ 1392 if (!dev_data) { 1393 err = -ENXIO; 1394 goto release; 1395 } 1396 if (!dev_data->permissive) { 1397 dev_data->permissive = 1; 1398 /* Let user know that what they're doing could be unsafe */ 1399 dev_warn(&psdev->dev->dev, "enabling permissive mode " 1400 "configuration space accesses!\n"); 1401 dev_warn(&psdev->dev->dev, 1402 "permissive mode is potentially unsafe!\n"); 1403 } 1404 release: 1405 pcistub_device_put(psdev); 1406 out: 1407 if (!err) 1408 err = count; 1409 return err; 1410 } 1411 1412 static ssize_t permissive_show(struct device_driver *drv, char *buf) 1413 { 1414 struct pcistub_device *psdev; 1415 struct xen_pcibk_dev_data *dev_data; 1416 size_t count = 0; 1417 unsigned long flags; 1418 spin_lock_irqsave(&pcistub_devices_lock, flags); 1419 list_for_each_entry(psdev, &pcistub_devices, dev_list) { 1420 if (count >= PAGE_SIZE) 1421 break; 1422 if (!psdev->dev) 1423 continue; 1424 dev_data = pci_get_drvdata(psdev->dev); 1425 if (!dev_data || !dev_data->permissive) 1426 continue; 1427 count += 1428 scnprintf(buf + count, PAGE_SIZE - count, "%s\n", 1429 pci_name(psdev->dev)); 1430 } 1431 spin_unlock_irqrestore(&pcistub_devices_lock, flags); 1432 return count; 1433 } 1434 static DRIVER_ATTR(permissive, S_IRUSR | S_IWUSR, permissive_show, 1435 permissive_add); 1436 1437 static void pcistub_exit(void) 1438 { 1439 driver_remove_file(&xen_pcibk_pci_driver.driver, &driver_attr_new_slot); 1440 driver_remove_file(&xen_pcibk_pci_driver.driver, 1441 &driver_attr_remove_slot); 1442 driver_remove_file(&xen_pcibk_pci_driver.driver, &driver_attr_slots); 1443 driver_remove_file(&xen_pcibk_pci_driver.driver, &driver_attr_quirks); 1444 driver_remove_file(&xen_pcibk_pci_driver.driver, 1445 &driver_attr_permissive); 1446 driver_remove_file(&xen_pcibk_pci_driver.driver, 1447 &driver_attr_irq_handlers); 1448 driver_remove_file(&xen_pcibk_pci_driver.driver, 1449 &driver_attr_irq_handler_state); 1450 pci_unregister_driver(&xen_pcibk_pci_driver); 1451 } 1452 1453 static int __init pcistub_init(void) 1454 { 1455 int pos = 0; 1456 int err = 0; 1457 int domain, bus, slot, func; 1458 int parsed; 1459 1460 if (pci_devs_to_hide && *pci_devs_to_hide) { 1461 do { 1462 parsed = 0; 1463 1464 err = sscanf(pci_devs_to_hide + pos, 1465 " (%x:%x:%x.%x) %n", 1466 &domain, &bus, &slot, &func, &parsed); 1467 switch (err) { 1468 case 3: 1469 func = -1; 1470 sscanf(pci_devs_to_hide + pos, 1471 " (%x:%x:%x.*) %n", 1472 &domain, &bus, &slot, &parsed); 1473 break; 1474 case 2: 1475 slot = func = -1; 1476 sscanf(pci_devs_to_hide + pos, 1477 " (%x:%x:*.*) %n", 1478 &domain, &bus, &parsed); 1479 break; 1480 } 1481 1482 if (!parsed) { 1483 domain = 0; 1484 err = sscanf(pci_devs_to_hide + pos, 1485 " (%x:%x.%x) %n", 1486 &bus, &slot, &func, &parsed); 1487 switch (err) { 1488 case 2: 1489 func = -1; 1490 sscanf(pci_devs_to_hide + pos, 1491 " (%x:%x.*) %n", 1492 &bus, &slot, &parsed); 1493 break; 1494 case 1: 1495 slot = func = -1; 1496 sscanf(pci_devs_to_hide + pos, 1497 " (%x:*.*) %n", 1498 &bus, &parsed); 1499 break; 1500 } 1501 } 1502 1503 if (parsed <= 0) 1504 goto parse_error; 1505 1506 err = pcistub_device_id_add(domain, bus, slot, func); 1507 if (err) 1508 goto out; 1509 1510 pos += parsed; 1511 } while (pci_devs_to_hide[pos]); 1512 } 1513 1514 /* If we're the first PCI Device Driver to register, we're the 1515 * first one to get offered PCI devices as they become 1516 * available (and thus we can be the first to grab them) 1517 */ 1518 err = pci_register_driver(&xen_pcibk_pci_driver); 1519 if (err < 0) 1520 goto out; 1521 1522 err = driver_create_file(&xen_pcibk_pci_driver.driver, 1523 &driver_attr_new_slot); 1524 if (!err) 1525 err = driver_create_file(&xen_pcibk_pci_driver.driver, 1526 &driver_attr_remove_slot); 1527 if (!err) 1528 err = driver_create_file(&xen_pcibk_pci_driver.driver, 1529 &driver_attr_slots); 1530 if (!err) 1531 err = driver_create_file(&xen_pcibk_pci_driver.driver, 1532 &driver_attr_quirks); 1533 if (!err) 1534 err = driver_create_file(&xen_pcibk_pci_driver.driver, 1535 &driver_attr_permissive); 1536 1537 if (!err) 1538 err = driver_create_file(&xen_pcibk_pci_driver.driver, 1539 &driver_attr_irq_handlers); 1540 if (!err) 1541 err = driver_create_file(&xen_pcibk_pci_driver.driver, 1542 &driver_attr_irq_handler_state); 1543 if (err) 1544 pcistub_exit(); 1545 1546 out: 1547 return err; 1548 1549 parse_error: 1550 pr_err("Error parsing pci_devs_to_hide at \"%s\"\n", 1551 pci_devs_to_hide + pos); 1552 return -EINVAL; 1553 } 1554 1555 #ifndef MODULE 1556 /* 1557 * fs_initcall happens before device_initcall 1558 * so xen_pcibk *should* get called first (b/c we 1559 * want to suck up any device before other drivers 1560 * get a chance by being the first pci device 1561 * driver to register) 1562 */ 1563 fs_initcall(pcistub_init); 1564 #endif 1565 1566 #ifdef CONFIG_PCI_IOV 1567 static struct pcistub_device *find_vfs(const struct pci_dev *pdev) 1568 { 1569 struct pcistub_device *psdev = NULL; 1570 unsigned long flags; 1571 bool found = false; 1572 1573 spin_lock_irqsave(&pcistub_devices_lock, flags); 1574 list_for_each_entry(psdev, &pcistub_devices, dev_list) { 1575 if (!psdev->pdev && psdev->dev != pdev 1576 && pci_physfn(psdev->dev) == pdev) { 1577 found = true; 1578 break; 1579 } 1580 } 1581 spin_unlock_irqrestore(&pcistub_devices_lock, flags); 1582 if (found) 1583 return psdev; 1584 return NULL; 1585 } 1586 1587 static int pci_stub_notifier(struct notifier_block *nb, 1588 unsigned long action, void *data) 1589 { 1590 struct device *dev = data; 1591 const struct pci_dev *pdev = to_pci_dev(dev); 1592 1593 if (action != BUS_NOTIFY_UNBIND_DRIVER) 1594 return NOTIFY_DONE; 1595 1596 if (!pdev->is_physfn) 1597 return NOTIFY_DONE; 1598 1599 for (;;) { 1600 struct pcistub_device *psdev = find_vfs(pdev); 1601 if (!psdev) 1602 break; 1603 device_release_driver(&psdev->dev->dev); 1604 } 1605 return NOTIFY_DONE; 1606 } 1607 1608 static struct notifier_block pci_stub_nb = { 1609 .notifier_call = pci_stub_notifier, 1610 }; 1611 #endif 1612 1613 static int __init xen_pcibk_init(void) 1614 { 1615 int err; 1616 1617 if (!xen_initial_domain()) 1618 return -ENODEV; 1619 1620 err = xen_pcibk_config_init(); 1621 if (err) 1622 return err; 1623 1624 #ifdef MODULE 1625 err = pcistub_init(); 1626 if (err < 0) 1627 return err; 1628 #endif 1629 1630 pcistub_init_devices_late(); 1631 err = xen_pcibk_xenbus_register(); 1632 if (err) 1633 pcistub_exit(); 1634 #ifdef CONFIG_PCI_IOV 1635 else 1636 bus_register_notifier(&pci_bus_type, &pci_stub_nb); 1637 #endif 1638 1639 return err; 1640 } 1641 1642 static void __exit xen_pcibk_cleanup(void) 1643 { 1644 #ifdef CONFIG_PCI_IOV 1645 bus_unregister_notifier(&pci_bus_type, &pci_stub_nb); 1646 #endif 1647 xen_pcibk_xenbus_unregister(); 1648 pcistub_exit(); 1649 } 1650 1651 module_init(xen_pcibk_init); 1652 module_exit(xen_pcibk_cleanup); 1653 1654 MODULE_LICENSE("Dual BSD/GPL"); 1655 MODULE_ALIAS("xen-backend:pci"); 1656