1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * (C) Copyright 2002-2004, 2007 Greg Kroah-Hartman <greg@kroah.com> 4 * (C) Copyright 2007 Novell Inc. 5 */ 6 7 #include <linux/pci.h> 8 #include <linux/module.h> 9 #include <linux/init.h> 10 #include <linux/device.h> 11 #include <linux/mempolicy.h> 12 #include <linux/string.h> 13 #include <linux/slab.h> 14 #include <linux/sched.h> 15 #include <linux/cpu.h> 16 #include <linux/pm_runtime.h> 17 #include <linux/suspend.h> 18 #include <linux/kexec.h> 19 #include <linux/of_device.h> 20 #include <linux/acpi.h> 21 #include "pci.h" 22 #include "pcie/portdrv.h" 23 24 struct pci_dynid { 25 struct list_head node; 26 struct pci_device_id id; 27 }; 28 29 /** 30 * pci_add_dynid - add a new PCI device ID to this driver and re-probe devices 31 * @drv: target pci driver 32 * @vendor: PCI vendor ID 33 * @device: PCI device ID 34 * @subvendor: PCI subvendor ID 35 * @subdevice: PCI subdevice ID 36 * @class: PCI class 37 * @class_mask: PCI class mask 38 * @driver_data: private driver data 39 * 40 * Adds a new dynamic pci device ID to this driver and causes the 41 * driver to probe for all devices again. @drv must have been 42 * registered prior to calling this function. 43 * 44 * CONTEXT: 45 * Does GFP_KERNEL allocation. 46 * 47 * RETURNS: 48 * 0 on success, -errno on failure. 49 */ 50 int pci_add_dynid(struct pci_driver *drv, 51 unsigned int vendor, unsigned int device, 52 unsigned int subvendor, unsigned int subdevice, 53 unsigned int class, unsigned int class_mask, 54 unsigned long driver_data) 55 { 56 struct pci_dynid *dynid; 57 58 dynid = kzalloc(sizeof(*dynid), GFP_KERNEL); 59 if (!dynid) 60 return -ENOMEM; 61 62 dynid->id.vendor = vendor; 63 dynid->id.device = device; 64 dynid->id.subvendor = subvendor; 65 dynid->id.subdevice = subdevice; 66 dynid->id.class = class; 67 dynid->id.class_mask = class_mask; 68 dynid->id.driver_data = driver_data; 69 70 spin_lock(&drv->dynids.lock); 71 list_add_tail(&dynid->node, &drv->dynids.list); 72 spin_unlock(&drv->dynids.lock); 73 74 return driver_attach(&drv->driver); 75 } 76 EXPORT_SYMBOL_GPL(pci_add_dynid); 77 78 static void pci_free_dynids(struct pci_driver *drv) 79 { 80 struct pci_dynid *dynid, *n; 81 82 spin_lock(&drv->dynids.lock); 83 list_for_each_entry_safe(dynid, n, &drv->dynids.list, node) { 84 list_del(&dynid->node); 85 kfree(dynid); 86 } 87 spin_unlock(&drv->dynids.lock); 88 } 89 90 /** 91 * store_new_id - sysfs frontend to pci_add_dynid() 92 * @driver: target device driver 93 * @buf: buffer for scanning device ID data 94 * @count: input size 95 * 96 * Allow PCI IDs to be added to an existing driver via sysfs. 97 */ 98 static ssize_t new_id_store(struct device_driver *driver, const char *buf, 99 size_t count) 100 { 101 struct pci_driver *pdrv = to_pci_driver(driver); 102 const struct pci_device_id *ids = pdrv->id_table; 103 u32 vendor, device, subvendor = PCI_ANY_ID, 104 subdevice = PCI_ANY_ID, class = 0, class_mask = 0; 105 unsigned long driver_data = 0; 106 int fields = 0; 107 int retval = 0; 108 109 fields = sscanf(buf, "%x %x %x %x %x %x %lx", 110 &vendor, &device, &subvendor, &subdevice, 111 &class, &class_mask, &driver_data); 112 if (fields < 2) 113 return -EINVAL; 114 115 if (fields != 7) { 116 struct pci_dev *pdev = kzalloc(sizeof(*pdev), GFP_KERNEL); 117 if (!pdev) 118 return -ENOMEM; 119 120 pdev->vendor = vendor; 121 pdev->device = device; 122 pdev->subsystem_vendor = subvendor; 123 pdev->subsystem_device = subdevice; 124 pdev->class = class; 125 126 if (pci_match_id(pdrv->id_table, pdev)) 127 retval = -EEXIST; 128 129 kfree(pdev); 130 131 if (retval) 132 return retval; 133 } 134 135 /* Only accept driver_data values that match an existing id_table 136 entry */ 137 if (ids) { 138 retval = -EINVAL; 139 while (ids->vendor || ids->subvendor || ids->class_mask) { 140 if (driver_data == ids->driver_data) { 141 retval = 0; 142 break; 143 } 144 ids++; 145 } 146 if (retval) /* No match */ 147 return retval; 148 } 149 150 retval = pci_add_dynid(pdrv, vendor, device, subvendor, subdevice, 151 class, class_mask, driver_data); 152 if (retval) 153 return retval; 154 return count; 155 } 156 static DRIVER_ATTR_WO(new_id); 157 158 /** 159 * store_remove_id - remove a PCI device ID from this driver 160 * @driver: target device driver 161 * @buf: buffer for scanning device ID data 162 * @count: input size 163 * 164 * Removes a dynamic pci device ID to this driver. 165 */ 166 static ssize_t remove_id_store(struct device_driver *driver, const char *buf, 167 size_t count) 168 { 169 struct pci_dynid *dynid, *n; 170 struct pci_driver *pdrv = to_pci_driver(driver); 171 u32 vendor, device, subvendor = PCI_ANY_ID, 172 subdevice = PCI_ANY_ID, class = 0, class_mask = 0; 173 int fields = 0; 174 size_t retval = -ENODEV; 175 176 fields = sscanf(buf, "%x %x %x %x %x %x", 177 &vendor, &device, &subvendor, &subdevice, 178 &class, &class_mask); 179 if (fields < 2) 180 return -EINVAL; 181 182 spin_lock(&pdrv->dynids.lock); 183 list_for_each_entry_safe(dynid, n, &pdrv->dynids.list, node) { 184 struct pci_device_id *id = &dynid->id; 185 if ((id->vendor == vendor) && 186 (id->device == device) && 187 (subvendor == PCI_ANY_ID || id->subvendor == subvendor) && 188 (subdevice == PCI_ANY_ID || id->subdevice == subdevice) && 189 !((id->class ^ class) & class_mask)) { 190 list_del(&dynid->node); 191 kfree(dynid); 192 retval = count; 193 break; 194 } 195 } 196 spin_unlock(&pdrv->dynids.lock); 197 198 return retval; 199 } 200 static DRIVER_ATTR_WO(remove_id); 201 202 static struct attribute *pci_drv_attrs[] = { 203 &driver_attr_new_id.attr, 204 &driver_attr_remove_id.attr, 205 NULL, 206 }; 207 ATTRIBUTE_GROUPS(pci_drv); 208 209 /** 210 * pci_match_id - See if a pci device matches a given pci_id table 211 * @ids: array of PCI device id structures to search in 212 * @dev: the PCI device structure to match against. 213 * 214 * Used by a driver to check whether a PCI device present in the 215 * system is in its list of supported devices. Returns the matching 216 * pci_device_id structure or %NULL if there is no match. 217 * 218 * Deprecated, don't use this as it will not catch any dynamic ids 219 * that a driver might want to check for. 220 */ 221 const struct pci_device_id *pci_match_id(const struct pci_device_id *ids, 222 struct pci_dev *dev) 223 { 224 if (ids) { 225 while (ids->vendor || ids->subvendor || ids->class_mask) { 226 if (pci_match_one_device(ids, dev)) 227 return ids; 228 ids++; 229 } 230 } 231 return NULL; 232 } 233 EXPORT_SYMBOL(pci_match_id); 234 235 static const struct pci_device_id pci_device_id_any = { 236 .vendor = PCI_ANY_ID, 237 .device = PCI_ANY_ID, 238 .subvendor = PCI_ANY_ID, 239 .subdevice = PCI_ANY_ID, 240 }; 241 242 /** 243 * pci_match_device - Tell if a PCI device structure has a matching PCI device id structure 244 * @drv: the PCI driver to match against 245 * @dev: the PCI device structure to match against 246 * 247 * Used by a driver to check whether a PCI device present in the 248 * system is in its list of supported devices. Returns the matching 249 * pci_device_id structure or %NULL if there is no match. 250 */ 251 static const struct pci_device_id *pci_match_device(struct pci_driver *drv, 252 struct pci_dev *dev) 253 { 254 struct pci_dynid *dynid; 255 const struct pci_device_id *found_id = NULL; 256 257 /* When driver_override is set, only bind to the matching driver */ 258 if (dev->driver_override && strcmp(dev->driver_override, drv->name)) 259 return NULL; 260 261 /* Look at the dynamic ids first, before the static ones */ 262 spin_lock(&drv->dynids.lock); 263 list_for_each_entry(dynid, &drv->dynids.list, node) { 264 if (pci_match_one_device(&dynid->id, dev)) { 265 found_id = &dynid->id; 266 break; 267 } 268 } 269 spin_unlock(&drv->dynids.lock); 270 271 if (!found_id) 272 found_id = pci_match_id(drv->id_table, dev); 273 274 /* driver_override will always match, send a dummy id */ 275 if (!found_id && dev->driver_override) 276 found_id = &pci_device_id_any; 277 278 return found_id; 279 } 280 281 struct drv_dev_and_id { 282 struct pci_driver *drv; 283 struct pci_dev *dev; 284 const struct pci_device_id *id; 285 }; 286 287 static long local_pci_probe(void *_ddi) 288 { 289 struct drv_dev_and_id *ddi = _ddi; 290 struct pci_dev *pci_dev = ddi->dev; 291 struct pci_driver *pci_drv = ddi->drv; 292 struct device *dev = &pci_dev->dev; 293 int rc; 294 295 /* 296 * Unbound PCI devices are always put in D0, regardless of 297 * runtime PM status. During probe, the device is set to 298 * active and the usage count is incremented. If the driver 299 * supports runtime PM, it should call pm_runtime_put_noidle(), 300 * or any other runtime PM helper function decrementing the usage 301 * count, in its probe routine and pm_runtime_get_noresume() in 302 * its remove routine. 303 */ 304 pm_runtime_get_sync(dev); 305 pci_dev->driver = pci_drv; 306 rc = pci_drv->probe(pci_dev, ddi->id); 307 if (!rc) 308 return rc; 309 if (rc < 0) { 310 pci_dev->driver = NULL; 311 pm_runtime_put_sync(dev); 312 return rc; 313 } 314 /* 315 * Probe function should return < 0 for failure, 0 for success 316 * Treat values > 0 as success, but warn. 317 */ 318 dev_warn(dev, "Driver probe function unexpectedly returned %d\n", rc); 319 return 0; 320 } 321 322 static bool pci_physfn_is_probed(struct pci_dev *dev) 323 { 324 #ifdef CONFIG_PCI_IOV 325 return dev->is_virtfn && dev->physfn->is_probed; 326 #else 327 return false; 328 #endif 329 } 330 331 static int pci_call_probe(struct pci_driver *drv, struct pci_dev *dev, 332 const struct pci_device_id *id) 333 { 334 int error, node, cpu; 335 struct drv_dev_and_id ddi = { drv, dev, id }; 336 337 /* 338 * Execute driver initialization on node where the device is 339 * attached. This way the driver likely allocates its local memory 340 * on the right node. 341 */ 342 node = dev_to_node(&dev->dev); 343 dev->is_probed = 1; 344 345 cpu_hotplug_disable(); 346 347 /* 348 * Prevent nesting work_on_cpu() for the case where a Virtual Function 349 * device is probed from work_on_cpu() of the Physical device. 350 */ 351 if (node < 0 || node >= MAX_NUMNODES || !node_online(node) || 352 pci_physfn_is_probed(dev)) 353 cpu = nr_cpu_ids; 354 else 355 cpu = cpumask_any_and(cpumask_of_node(node), cpu_online_mask); 356 357 if (cpu < nr_cpu_ids) 358 error = work_on_cpu(cpu, local_pci_probe, &ddi); 359 else 360 error = local_pci_probe(&ddi); 361 362 dev->is_probed = 0; 363 cpu_hotplug_enable(); 364 return error; 365 } 366 367 /** 368 * __pci_device_probe - check if a driver wants to claim a specific PCI device 369 * @drv: driver to call to check if it wants the PCI device 370 * @pci_dev: PCI device being probed 371 * 372 * returns 0 on success, else error. 373 * side-effect: pci_dev->driver is set to drv when drv claims pci_dev. 374 */ 375 static int __pci_device_probe(struct pci_driver *drv, struct pci_dev *pci_dev) 376 { 377 const struct pci_device_id *id; 378 int error = 0; 379 380 if (!pci_dev->driver && drv->probe) { 381 error = -ENODEV; 382 383 id = pci_match_device(drv, pci_dev); 384 if (id) 385 error = pci_call_probe(drv, pci_dev, id); 386 } 387 return error; 388 } 389 390 int __weak pcibios_alloc_irq(struct pci_dev *dev) 391 { 392 return 0; 393 } 394 395 void __weak pcibios_free_irq(struct pci_dev *dev) 396 { 397 } 398 399 #ifdef CONFIG_PCI_IOV 400 static inline bool pci_device_can_probe(struct pci_dev *pdev) 401 { 402 return (!pdev->is_virtfn || pdev->physfn->sriov->drivers_autoprobe || 403 pdev->driver_override); 404 } 405 #else 406 static inline bool pci_device_can_probe(struct pci_dev *pdev) 407 { 408 return true; 409 } 410 #endif 411 412 static int pci_device_probe(struct device *dev) 413 { 414 int error; 415 struct pci_dev *pci_dev = to_pci_dev(dev); 416 struct pci_driver *drv = to_pci_driver(dev->driver); 417 418 if (!pci_device_can_probe(pci_dev)) 419 return -ENODEV; 420 421 pci_assign_irq(pci_dev); 422 423 error = pcibios_alloc_irq(pci_dev); 424 if (error < 0) 425 return error; 426 427 pci_dev_get(pci_dev); 428 error = __pci_device_probe(drv, pci_dev); 429 if (error) { 430 pcibios_free_irq(pci_dev); 431 pci_dev_put(pci_dev); 432 } 433 434 return error; 435 } 436 437 static int pci_device_remove(struct device *dev) 438 { 439 struct pci_dev *pci_dev = to_pci_dev(dev); 440 struct pci_driver *drv = pci_dev->driver; 441 442 if (drv) { 443 if (drv->remove) { 444 pm_runtime_get_sync(dev); 445 drv->remove(pci_dev); 446 pm_runtime_put_noidle(dev); 447 } 448 pcibios_free_irq(pci_dev); 449 pci_dev->driver = NULL; 450 pci_iov_remove(pci_dev); 451 } 452 453 /* Undo the runtime PM settings in local_pci_probe() */ 454 pm_runtime_put_sync(dev); 455 456 /* 457 * If the device is still on, set the power state as "unknown", 458 * since it might change by the next time we load the driver. 459 */ 460 if (pci_dev->current_state == PCI_D0) 461 pci_dev->current_state = PCI_UNKNOWN; 462 463 /* 464 * We would love to complain here if pci_dev->is_enabled is set, that 465 * the driver should have called pci_disable_device(), but the 466 * unfortunate fact is there are too many odd BIOS and bridge setups 467 * that don't like drivers doing that all of the time. 468 * Oh well, we can dream of sane hardware when we sleep, no matter how 469 * horrible the crap we have to deal with is when we are awake... 470 */ 471 472 pci_dev_put(pci_dev); 473 return 0; 474 } 475 476 static void pci_device_shutdown(struct device *dev) 477 { 478 struct pci_dev *pci_dev = to_pci_dev(dev); 479 struct pci_driver *drv = pci_dev->driver; 480 481 pm_runtime_resume(dev); 482 483 if (drv && drv->shutdown) 484 drv->shutdown(pci_dev); 485 486 /* 487 * If this is a kexec reboot, turn off Bus Master bit on the 488 * device to tell it to not continue to do DMA. Don't touch 489 * devices in D3cold or unknown states. 490 * If it is not a kexec reboot, firmware will hit the PCI 491 * devices with big hammer and stop their DMA any way. 492 */ 493 if (kexec_in_progress && (pci_dev->current_state <= PCI_D3hot)) 494 pci_clear_master(pci_dev); 495 } 496 497 #ifdef CONFIG_PM 498 499 /* Auxiliary functions used for system resume and run-time resume. */ 500 501 /** 502 * pci_restore_standard_config - restore standard config registers of PCI device 503 * @pci_dev: PCI device to handle 504 */ 505 static int pci_restore_standard_config(struct pci_dev *pci_dev) 506 { 507 pci_update_current_state(pci_dev, PCI_UNKNOWN); 508 509 if (pci_dev->current_state != PCI_D0) { 510 int error = pci_set_power_state(pci_dev, PCI_D0); 511 if (error) 512 return error; 513 } 514 515 pci_restore_state(pci_dev); 516 pci_pme_restore(pci_dev); 517 return 0; 518 } 519 520 #endif 521 522 #ifdef CONFIG_PM_SLEEP 523 524 static void pci_pm_default_resume_early(struct pci_dev *pci_dev) 525 { 526 pci_power_up(pci_dev); 527 pci_restore_state(pci_dev); 528 pci_pme_restore(pci_dev); 529 } 530 531 /* 532 * Default "suspend" method for devices that have no driver provided suspend, 533 * or not even a driver at all (second part). 534 */ 535 static void pci_pm_set_unknown_state(struct pci_dev *pci_dev) 536 { 537 /* 538 * mark its power state as "unknown", since we don't know if 539 * e.g. the BIOS will change its device state when we suspend. 540 */ 541 if (pci_dev->current_state == PCI_D0) 542 pci_dev->current_state = PCI_UNKNOWN; 543 } 544 545 /* 546 * Default "resume" method for devices that have no driver provided resume, 547 * or not even a driver at all (second part). 548 */ 549 static int pci_pm_reenable_device(struct pci_dev *pci_dev) 550 { 551 int retval; 552 553 /* if the device was enabled before suspend, reenable */ 554 retval = pci_reenable_device(pci_dev); 555 /* 556 * if the device was busmaster before the suspend, make it busmaster 557 * again 558 */ 559 if (pci_dev->is_busmaster) 560 pci_set_master(pci_dev); 561 562 return retval; 563 } 564 565 static int pci_legacy_suspend(struct device *dev, pm_message_t state) 566 { 567 struct pci_dev *pci_dev = to_pci_dev(dev); 568 struct pci_driver *drv = pci_dev->driver; 569 570 if (drv && drv->suspend) { 571 pci_power_t prev = pci_dev->current_state; 572 int error; 573 574 error = drv->suspend(pci_dev, state); 575 suspend_report_result(drv->suspend, error); 576 if (error) 577 return error; 578 579 if (!pci_dev->state_saved && pci_dev->current_state != PCI_D0 580 && pci_dev->current_state != PCI_UNKNOWN) { 581 WARN_ONCE(pci_dev->current_state != prev, 582 "PCI PM: Device state not saved by %pS\n", 583 drv->suspend); 584 } 585 } 586 587 pci_fixup_device(pci_fixup_suspend, pci_dev); 588 589 return 0; 590 } 591 592 static int pci_legacy_suspend_late(struct device *dev, pm_message_t state) 593 { 594 struct pci_dev *pci_dev = to_pci_dev(dev); 595 struct pci_driver *drv = pci_dev->driver; 596 597 if (drv && drv->suspend_late) { 598 pci_power_t prev = pci_dev->current_state; 599 int error; 600 601 error = drv->suspend_late(pci_dev, state); 602 suspend_report_result(drv->suspend_late, error); 603 if (error) 604 return error; 605 606 if (!pci_dev->state_saved && pci_dev->current_state != PCI_D0 607 && pci_dev->current_state != PCI_UNKNOWN) { 608 WARN_ONCE(pci_dev->current_state != prev, 609 "PCI PM: Device state not saved by %pS\n", 610 drv->suspend_late); 611 goto Fixup; 612 } 613 } 614 615 if (!pci_dev->state_saved) 616 pci_save_state(pci_dev); 617 618 pci_pm_set_unknown_state(pci_dev); 619 620 Fixup: 621 pci_fixup_device(pci_fixup_suspend_late, pci_dev); 622 623 return 0; 624 } 625 626 static int pci_legacy_resume_early(struct device *dev) 627 { 628 struct pci_dev *pci_dev = to_pci_dev(dev); 629 struct pci_driver *drv = pci_dev->driver; 630 631 return drv && drv->resume_early ? 632 drv->resume_early(pci_dev) : 0; 633 } 634 635 static int pci_legacy_resume(struct device *dev) 636 { 637 struct pci_dev *pci_dev = to_pci_dev(dev); 638 struct pci_driver *drv = pci_dev->driver; 639 640 pci_fixup_device(pci_fixup_resume, pci_dev); 641 642 return drv && drv->resume ? 643 drv->resume(pci_dev) : pci_pm_reenable_device(pci_dev); 644 } 645 646 /* Auxiliary functions used by the new power management framework */ 647 648 static void pci_pm_default_resume(struct pci_dev *pci_dev) 649 { 650 pci_fixup_device(pci_fixup_resume, pci_dev); 651 pci_enable_wake(pci_dev, PCI_D0, false); 652 } 653 654 static void pci_pm_default_suspend(struct pci_dev *pci_dev) 655 { 656 /* Disable non-bridge devices without PM support */ 657 if (!pci_has_subordinate(pci_dev)) 658 pci_disable_enabled_device(pci_dev); 659 } 660 661 static bool pci_has_legacy_pm_support(struct pci_dev *pci_dev) 662 { 663 struct pci_driver *drv = pci_dev->driver; 664 bool ret = drv && (drv->suspend || drv->suspend_late || drv->resume 665 || drv->resume_early); 666 667 /* 668 * Legacy PM support is used by default, so warn if the new framework is 669 * supported as well. Drivers are supposed to support either the 670 * former, or the latter, but not both at the same time. 671 */ 672 WARN(ret && drv->driver.pm, "driver %s device %04x:%04x\n", 673 drv->name, pci_dev->vendor, pci_dev->device); 674 675 return ret; 676 } 677 678 /* New power management framework */ 679 680 static int pci_pm_prepare(struct device *dev) 681 { 682 struct device_driver *drv = dev->driver; 683 struct pci_dev *pci_dev = to_pci_dev(dev); 684 685 if (drv && drv->pm && drv->pm->prepare) { 686 int error = drv->pm->prepare(dev); 687 if (error < 0) 688 return error; 689 690 if (!error && dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_PREPARE)) 691 return 0; 692 } 693 if (pci_dev_need_resume(pci_dev)) 694 return 0; 695 696 /* 697 * The PME setting needs to be adjusted here in case the direct-complete 698 * optimization is used with respect to this device. 699 */ 700 pci_dev_adjust_pme(pci_dev); 701 return 1; 702 } 703 704 static void pci_pm_complete(struct device *dev) 705 { 706 struct pci_dev *pci_dev = to_pci_dev(dev); 707 708 pci_dev_complete_resume(pci_dev); 709 pm_generic_complete(dev); 710 711 /* Resume device if platform firmware has put it in reset-power-on */ 712 if (pm_runtime_suspended(dev) && pm_resume_via_firmware()) { 713 pci_power_t pre_sleep_state = pci_dev->current_state; 714 715 pci_refresh_power_state(pci_dev); 716 /* 717 * On platforms with ACPI this check may also trigger for 718 * devices sharing power resources if one of those power 719 * resources has been activated as a result of a change of the 720 * power state of another device sharing it. However, in that 721 * case it is also better to resume the device, in general. 722 */ 723 if (pci_dev->current_state < pre_sleep_state) 724 pm_request_resume(dev); 725 } 726 } 727 728 #else /* !CONFIG_PM_SLEEP */ 729 730 #define pci_pm_prepare NULL 731 #define pci_pm_complete NULL 732 733 #endif /* !CONFIG_PM_SLEEP */ 734 735 #ifdef CONFIG_SUSPEND 736 static void pcie_pme_root_status_cleanup(struct pci_dev *pci_dev) 737 { 738 /* 739 * Some BIOSes forget to clear Root PME Status bits after system 740 * wakeup, which breaks ACPI-based runtime wakeup on PCI Express. 741 * Clear those bits now just in case (shouldn't hurt). 742 */ 743 if (pci_is_pcie(pci_dev) && 744 (pci_pcie_type(pci_dev) == PCI_EXP_TYPE_ROOT_PORT || 745 pci_pcie_type(pci_dev) == PCI_EXP_TYPE_RC_EC)) 746 pcie_clear_root_pme_status(pci_dev); 747 } 748 749 static int pci_pm_suspend(struct device *dev) 750 { 751 struct pci_dev *pci_dev = to_pci_dev(dev); 752 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; 753 754 pci_dev->skip_bus_pm = false; 755 756 if (pci_has_legacy_pm_support(pci_dev)) 757 return pci_legacy_suspend(dev, PMSG_SUSPEND); 758 759 if (!pm) { 760 pci_pm_default_suspend(pci_dev); 761 return 0; 762 } 763 764 /* 765 * PCI devices suspended at run time may need to be resumed at this 766 * point, because in general it may be necessary to reconfigure them for 767 * system suspend. Namely, if the device is expected to wake up the 768 * system from the sleep state, it may have to be reconfigured for this 769 * purpose, or if the device is not expected to wake up the system from 770 * the sleep state, it should be prevented from signaling wakeup events 771 * going forward. 772 * 773 * Also if the driver of the device does not indicate that its system 774 * suspend callbacks can cope with runtime-suspended devices, it is 775 * better to resume the device from runtime suspend here. 776 */ 777 if (!dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND) || 778 pci_dev_need_resume(pci_dev)) { 779 pm_runtime_resume(dev); 780 pci_dev->state_saved = false; 781 } else { 782 pci_dev_adjust_pme(pci_dev); 783 } 784 785 if (pm->suspend) { 786 pci_power_t prev = pci_dev->current_state; 787 int error; 788 789 error = pm->suspend(dev); 790 suspend_report_result(pm->suspend, error); 791 if (error) 792 return error; 793 794 if (!pci_dev->state_saved && pci_dev->current_state != PCI_D0 795 && pci_dev->current_state != PCI_UNKNOWN) { 796 WARN_ONCE(pci_dev->current_state != prev, 797 "PCI PM: State of device not saved by %pS\n", 798 pm->suspend); 799 } 800 } 801 802 return 0; 803 } 804 805 static int pci_pm_suspend_late(struct device *dev) 806 { 807 if (dev_pm_smart_suspend_and_suspended(dev)) 808 return 0; 809 810 pci_fixup_device(pci_fixup_suspend, to_pci_dev(dev)); 811 812 return pm_generic_suspend_late(dev); 813 } 814 815 static int pci_pm_suspend_noirq(struct device *dev) 816 { 817 struct pci_dev *pci_dev = to_pci_dev(dev); 818 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; 819 820 if (dev_pm_smart_suspend_and_suspended(dev)) { 821 dev->power.may_skip_resume = true; 822 return 0; 823 } 824 825 if (pci_has_legacy_pm_support(pci_dev)) 826 return pci_legacy_suspend_late(dev, PMSG_SUSPEND); 827 828 if (!pm) { 829 pci_save_state(pci_dev); 830 goto Fixup; 831 } 832 833 if (pm->suspend_noirq) { 834 pci_power_t prev = pci_dev->current_state; 835 int error; 836 837 error = pm->suspend_noirq(dev); 838 suspend_report_result(pm->suspend_noirq, error); 839 if (error) 840 return error; 841 842 if (!pci_dev->state_saved && pci_dev->current_state != PCI_D0 843 && pci_dev->current_state != PCI_UNKNOWN) { 844 WARN_ONCE(pci_dev->current_state != prev, 845 "PCI PM: State of device not saved by %pS\n", 846 pm->suspend_noirq); 847 goto Fixup; 848 } 849 } 850 851 if (pci_dev->skip_bus_pm) { 852 /* 853 * Either the device is a bridge with a child in D0 below it, or 854 * the function is running for the second time in a row without 855 * going through full resume, which is possible only during 856 * suspend-to-idle in a spurious wakeup case. The device should 857 * be in D0 at this point, but if it is a bridge, it may be 858 * necessary to save its state. 859 */ 860 if (!pci_dev->state_saved) 861 pci_save_state(pci_dev); 862 } else if (!pci_dev->state_saved) { 863 pci_save_state(pci_dev); 864 if (pci_power_manageable(pci_dev)) 865 pci_prepare_to_sleep(pci_dev); 866 } 867 868 dev_dbg(dev, "PCI PM: Suspend power state: %s\n", 869 pci_power_name(pci_dev->current_state)); 870 871 if (pci_dev->current_state == PCI_D0) { 872 pci_dev->skip_bus_pm = true; 873 /* 874 * Per PCI PM r1.2, table 6-1, a bridge must be in D0 if any 875 * downstream device is in D0, so avoid changing the power state 876 * of the parent bridge by setting the skip_bus_pm flag for it. 877 */ 878 if (pci_dev->bus->self) 879 pci_dev->bus->self->skip_bus_pm = true; 880 } 881 882 if (pci_dev->skip_bus_pm && pm_suspend_no_platform()) { 883 dev_dbg(dev, "PCI PM: Skipped\n"); 884 goto Fixup; 885 } 886 887 pci_pm_set_unknown_state(pci_dev); 888 889 /* 890 * Some BIOSes from ASUS have a bug: If a USB EHCI host controller's 891 * PCI COMMAND register isn't 0, the BIOS assumes that the controller 892 * hasn't been quiesced and tries to turn it off. If the controller 893 * is already in D3, this can hang or cause memory corruption. 894 * 895 * Since the value of the COMMAND register doesn't matter once the 896 * device has been suspended, we can safely set it to 0 here. 897 */ 898 if (pci_dev->class == PCI_CLASS_SERIAL_USB_EHCI) 899 pci_write_config_word(pci_dev, PCI_COMMAND, 0); 900 901 Fixup: 902 pci_fixup_device(pci_fixup_suspend_late, pci_dev); 903 904 /* 905 * If the target system sleep state is suspend-to-idle, it is sufficient 906 * to check whether or not the device's wakeup settings are good for 907 * runtime PM. Otherwise, the pm_resume_via_firmware() check will cause 908 * pci_pm_complete() to take care of fixing up the device's state 909 * anyway, if need be. 910 */ 911 dev->power.may_skip_resume = device_may_wakeup(dev) || 912 !device_can_wakeup(dev); 913 914 return 0; 915 } 916 917 static int pci_pm_resume_noirq(struct device *dev) 918 { 919 struct pci_dev *pci_dev = to_pci_dev(dev); 920 struct device_driver *drv = dev->driver; 921 int error = 0; 922 923 if (dev_pm_may_skip_resume(dev)) 924 return 0; 925 926 /* 927 * Devices with DPM_FLAG_SMART_SUSPEND may be left in runtime suspend 928 * during system suspend, so update their runtime PM status to "active" 929 * as they are going to be put into D0 shortly. 930 */ 931 if (dev_pm_smart_suspend_and_suspended(dev)) 932 pm_runtime_set_active(dev); 933 934 /* 935 * In the suspend-to-idle case, devices left in D0 during suspend will 936 * stay in D0, so it is not necessary to restore or update their 937 * configuration here and attempting to put them into D0 again is 938 * pointless, so avoid doing that. 939 */ 940 if (!(pci_dev->skip_bus_pm && pm_suspend_no_platform())) 941 pci_pm_default_resume_early(pci_dev); 942 943 pci_fixup_device(pci_fixup_resume_early, pci_dev); 944 945 if (pci_has_legacy_pm_support(pci_dev)) 946 return pci_legacy_resume_early(dev); 947 948 pcie_pme_root_status_cleanup(pci_dev); 949 950 if (drv && drv->pm && drv->pm->resume_noirq) 951 error = drv->pm->resume_noirq(dev); 952 953 return error; 954 } 955 956 static int pci_pm_resume(struct device *dev) 957 { 958 struct pci_dev *pci_dev = to_pci_dev(dev); 959 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; 960 int error = 0; 961 962 /* 963 * This is necessary for the suspend error path in which resume is 964 * called without restoring the standard config registers of the device. 965 */ 966 if (pci_dev->state_saved) 967 pci_restore_standard_config(pci_dev); 968 969 if (pci_has_legacy_pm_support(pci_dev)) 970 return pci_legacy_resume(dev); 971 972 pci_pm_default_resume(pci_dev); 973 974 if (pm) { 975 if (pm->resume) 976 error = pm->resume(dev); 977 } else { 978 pci_pm_reenable_device(pci_dev); 979 } 980 981 return error; 982 } 983 984 #else /* !CONFIG_SUSPEND */ 985 986 #define pci_pm_suspend NULL 987 #define pci_pm_suspend_late NULL 988 #define pci_pm_suspend_noirq NULL 989 #define pci_pm_resume NULL 990 #define pci_pm_resume_noirq NULL 991 992 #endif /* !CONFIG_SUSPEND */ 993 994 #ifdef CONFIG_HIBERNATE_CALLBACKS 995 996 997 /* 998 * pcibios_pm_ops - provide arch-specific hooks when a PCI device is doing 999 * a hibernate transition 1000 */ 1001 struct dev_pm_ops __weak pcibios_pm_ops; 1002 1003 static int pci_pm_freeze(struct device *dev) 1004 { 1005 struct pci_dev *pci_dev = to_pci_dev(dev); 1006 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; 1007 1008 if (pci_has_legacy_pm_support(pci_dev)) 1009 return pci_legacy_suspend(dev, PMSG_FREEZE); 1010 1011 if (!pm) { 1012 pci_pm_default_suspend(pci_dev); 1013 return 0; 1014 } 1015 1016 /* 1017 * Resume all runtime-suspended devices before creating a snapshot 1018 * image of system memory, because the restore kernel generally cannot 1019 * be expected to always handle them consistently and they need to be 1020 * put into the runtime-active metastate during system resume anyway, 1021 * so it is better to ensure that the state saved in the image will be 1022 * always consistent with that. 1023 */ 1024 pm_runtime_resume(dev); 1025 pci_dev->state_saved = false; 1026 1027 if (pm->freeze) { 1028 int error; 1029 1030 error = pm->freeze(dev); 1031 suspend_report_result(pm->freeze, error); 1032 if (error) 1033 return error; 1034 } 1035 1036 return 0; 1037 } 1038 1039 static int pci_pm_freeze_noirq(struct device *dev) 1040 { 1041 struct pci_dev *pci_dev = to_pci_dev(dev); 1042 struct device_driver *drv = dev->driver; 1043 1044 if (pci_has_legacy_pm_support(pci_dev)) 1045 return pci_legacy_suspend_late(dev, PMSG_FREEZE); 1046 1047 if (drv && drv->pm && drv->pm->freeze_noirq) { 1048 int error; 1049 1050 error = drv->pm->freeze_noirq(dev); 1051 suspend_report_result(drv->pm->freeze_noirq, error); 1052 if (error) 1053 return error; 1054 } 1055 1056 if (!pci_dev->state_saved) 1057 pci_save_state(pci_dev); 1058 1059 pci_pm_set_unknown_state(pci_dev); 1060 1061 if (pcibios_pm_ops.freeze_noirq) 1062 return pcibios_pm_ops.freeze_noirq(dev); 1063 1064 return 0; 1065 } 1066 1067 static int pci_pm_thaw_noirq(struct device *dev) 1068 { 1069 struct pci_dev *pci_dev = to_pci_dev(dev); 1070 struct device_driver *drv = dev->driver; 1071 int error = 0; 1072 1073 if (pcibios_pm_ops.thaw_noirq) { 1074 error = pcibios_pm_ops.thaw_noirq(dev); 1075 if (error) 1076 return error; 1077 } 1078 1079 if (pci_has_legacy_pm_support(pci_dev)) 1080 return pci_legacy_resume_early(dev); 1081 1082 /* 1083 * pci_restore_state() requires the device to be in D0 (because of MSI 1084 * restoration among other things), so force it into D0 in case the 1085 * driver's "freeze" callbacks put it into a low-power state directly. 1086 */ 1087 pci_set_power_state(pci_dev, PCI_D0); 1088 pci_restore_state(pci_dev); 1089 1090 if (drv && drv->pm && drv->pm->thaw_noirq) 1091 error = drv->pm->thaw_noirq(dev); 1092 1093 return error; 1094 } 1095 1096 static int pci_pm_thaw(struct device *dev) 1097 { 1098 struct pci_dev *pci_dev = to_pci_dev(dev); 1099 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; 1100 int error = 0; 1101 1102 if (pci_has_legacy_pm_support(pci_dev)) 1103 return pci_legacy_resume(dev); 1104 1105 if (pm) { 1106 if (pm->thaw) 1107 error = pm->thaw(dev); 1108 } else { 1109 pci_pm_reenable_device(pci_dev); 1110 } 1111 1112 pci_dev->state_saved = false; 1113 1114 return error; 1115 } 1116 1117 static int pci_pm_poweroff(struct device *dev) 1118 { 1119 struct pci_dev *pci_dev = to_pci_dev(dev); 1120 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; 1121 1122 if (pci_has_legacy_pm_support(pci_dev)) 1123 return pci_legacy_suspend(dev, PMSG_HIBERNATE); 1124 1125 if (!pm) { 1126 pci_pm_default_suspend(pci_dev); 1127 return 0; 1128 } 1129 1130 /* The reason to do that is the same as in pci_pm_suspend(). */ 1131 if (!dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND) || 1132 pci_dev_need_resume(pci_dev)) { 1133 pm_runtime_resume(dev); 1134 pci_dev->state_saved = false; 1135 } else { 1136 pci_dev_adjust_pme(pci_dev); 1137 } 1138 1139 if (pm->poweroff) { 1140 int error; 1141 1142 error = pm->poweroff(dev); 1143 suspend_report_result(pm->poweroff, error); 1144 if (error) 1145 return error; 1146 } 1147 1148 return 0; 1149 } 1150 1151 static int pci_pm_poweroff_late(struct device *dev) 1152 { 1153 if (dev_pm_smart_suspend_and_suspended(dev)) 1154 return 0; 1155 1156 pci_fixup_device(pci_fixup_suspend, to_pci_dev(dev)); 1157 1158 return pm_generic_poweroff_late(dev); 1159 } 1160 1161 static int pci_pm_poweroff_noirq(struct device *dev) 1162 { 1163 struct pci_dev *pci_dev = to_pci_dev(dev); 1164 struct device_driver *drv = dev->driver; 1165 1166 if (dev_pm_smart_suspend_and_suspended(dev)) 1167 return 0; 1168 1169 if (pci_has_legacy_pm_support(to_pci_dev(dev))) 1170 return pci_legacy_suspend_late(dev, PMSG_HIBERNATE); 1171 1172 if (!drv || !drv->pm) { 1173 pci_fixup_device(pci_fixup_suspend_late, pci_dev); 1174 return 0; 1175 } 1176 1177 if (drv->pm->poweroff_noirq) { 1178 int error; 1179 1180 error = drv->pm->poweroff_noirq(dev); 1181 suspend_report_result(drv->pm->poweroff_noirq, error); 1182 if (error) 1183 return error; 1184 } 1185 1186 if (!pci_dev->state_saved && !pci_has_subordinate(pci_dev)) 1187 pci_prepare_to_sleep(pci_dev); 1188 1189 /* 1190 * The reason for doing this here is the same as for the analogous code 1191 * in pci_pm_suspend_noirq(). 1192 */ 1193 if (pci_dev->class == PCI_CLASS_SERIAL_USB_EHCI) 1194 pci_write_config_word(pci_dev, PCI_COMMAND, 0); 1195 1196 pci_fixup_device(pci_fixup_suspend_late, pci_dev); 1197 1198 if (pcibios_pm_ops.poweroff_noirq) 1199 return pcibios_pm_ops.poweroff_noirq(dev); 1200 1201 return 0; 1202 } 1203 1204 static int pci_pm_restore_noirq(struct device *dev) 1205 { 1206 struct pci_dev *pci_dev = to_pci_dev(dev); 1207 struct device_driver *drv = dev->driver; 1208 int error = 0; 1209 1210 if (pcibios_pm_ops.restore_noirq) { 1211 error = pcibios_pm_ops.restore_noirq(dev); 1212 if (error) 1213 return error; 1214 } 1215 1216 pci_pm_default_resume_early(pci_dev); 1217 pci_fixup_device(pci_fixup_resume_early, pci_dev); 1218 1219 if (pci_has_legacy_pm_support(pci_dev)) 1220 return pci_legacy_resume_early(dev); 1221 1222 if (drv && drv->pm && drv->pm->restore_noirq) 1223 error = drv->pm->restore_noirq(dev); 1224 1225 return error; 1226 } 1227 1228 static int pci_pm_restore(struct device *dev) 1229 { 1230 struct pci_dev *pci_dev = to_pci_dev(dev); 1231 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; 1232 int error = 0; 1233 1234 /* 1235 * This is necessary for the hibernation error path in which restore is 1236 * called without restoring the standard config registers of the device. 1237 */ 1238 if (pci_dev->state_saved) 1239 pci_restore_standard_config(pci_dev); 1240 1241 if (pci_has_legacy_pm_support(pci_dev)) 1242 return pci_legacy_resume(dev); 1243 1244 pci_pm_default_resume(pci_dev); 1245 1246 if (pm) { 1247 if (pm->restore) 1248 error = pm->restore(dev); 1249 } else { 1250 pci_pm_reenable_device(pci_dev); 1251 } 1252 1253 return error; 1254 } 1255 1256 #else /* !CONFIG_HIBERNATE_CALLBACKS */ 1257 1258 #define pci_pm_freeze NULL 1259 #define pci_pm_freeze_noirq NULL 1260 #define pci_pm_thaw NULL 1261 #define pci_pm_thaw_noirq NULL 1262 #define pci_pm_poweroff NULL 1263 #define pci_pm_poweroff_late NULL 1264 #define pci_pm_poweroff_noirq NULL 1265 #define pci_pm_restore NULL 1266 #define pci_pm_restore_noirq NULL 1267 1268 #endif /* !CONFIG_HIBERNATE_CALLBACKS */ 1269 1270 #ifdef CONFIG_PM 1271 1272 static int pci_pm_runtime_suspend(struct device *dev) 1273 { 1274 struct pci_dev *pci_dev = to_pci_dev(dev); 1275 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; 1276 pci_power_t prev = pci_dev->current_state; 1277 int error; 1278 1279 /* 1280 * If pci_dev->driver is not set (unbound), we leave the device in D0, 1281 * but it may go to D3cold when the bridge above it runtime suspends. 1282 * Save its config space in case that happens. 1283 */ 1284 if (!pci_dev->driver) { 1285 pci_save_state(pci_dev); 1286 return 0; 1287 } 1288 1289 pci_dev->state_saved = false; 1290 if (pm && pm->runtime_suspend) { 1291 error = pm->runtime_suspend(dev); 1292 /* 1293 * -EBUSY and -EAGAIN is used to request the runtime PM core 1294 * to schedule a new suspend, so log the event only with debug 1295 * log level. 1296 */ 1297 if (error == -EBUSY || error == -EAGAIN) { 1298 dev_dbg(dev, "can't suspend now (%ps returned %d)\n", 1299 pm->runtime_suspend, error); 1300 return error; 1301 } else if (error) { 1302 dev_err(dev, "can't suspend (%ps returned %d)\n", 1303 pm->runtime_suspend, error); 1304 return error; 1305 } 1306 } 1307 1308 pci_fixup_device(pci_fixup_suspend, pci_dev); 1309 1310 if (pm && pm->runtime_suspend 1311 && !pci_dev->state_saved && pci_dev->current_state != PCI_D0 1312 && pci_dev->current_state != PCI_UNKNOWN) { 1313 WARN_ONCE(pci_dev->current_state != prev, 1314 "PCI PM: State of device not saved by %pS\n", 1315 pm->runtime_suspend); 1316 return 0; 1317 } 1318 1319 if (!pci_dev->state_saved) { 1320 pci_save_state(pci_dev); 1321 pci_finish_runtime_suspend(pci_dev); 1322 } 1323 1324 return 0; 1325 } 1326 1327 static int pci_pm_runtime_resume(struct device *dev) 1328 { 1329 int rc = 0; 1330 struct pci_dev *pci_dev = to_pci_dev(dev); 1331 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; 1332 1333 /* 1334 * Restoring config space is necessary even if the device is not bound 1335 * to a driver because although we left it in D0, it may have gone to 1336 * D3cold when the bridge above it runtime suspended. 1337 */ 1338 pci_restore_standard_config(pci_dev); 1339 1340 if (!pci_dev->driver) 1341 return 0; 1342 1343 pci_fixup_device(pci_fixup_resume_early, pci_dev); 1344 pci_enable_wake(pci_dev, PCI_D0, false); 1345 pci_fixup_device(pci_fixup_resume, pci_dev); 1346 1347 if (pm && pm->runtime_resume) 1348 rc = pm->runtime_resume(dev); 1349 1350 pci_dev->runtime_d3cold = false; 1351 1352 return rc; 1353 } 1354 1355 static int pci_pm_runtime_idle(struct device *dev) 1356 { 1357 struct pci_dev *pci_dev = to_pci_dev(dev); 1358 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; 1359 int ret = 0; 1360 1361 /* 1362 * If pci_dev->driver is not set (unbound), the device should 1363 * always remain in D0 regardless of the runtime PM status 1364 */ 1365 if (!pci_dev->driver) 1366 return 0; 1367 1368 if (!pm) 1369 return -ENOSYS; 1370 1371 if (pm->runtime_idle) 1372 ret = pm->runtime_idle(dev); 1373 1374 return ret; 1375 } 1376 1377 static const struct dev_pm_ops pci_dev_pm_ops = { 1378 .prepare = pci_pm_prepare, 1379 .complete = pci_pm_complete, 1380 .suspend = pci_pm_suspend, 1381 .suspend_late = pci_pm_suspend_late, 1382 .resume = pci_pm_resume, 1383 .freeze = pci_pm_freeze, 1384 .thaw = pci_pm_thaw, 1385 .poweroff = pci_pm_poweroff, 1386 .poweroff_late = pci_pm_poweroff_late, 1387 .restore = pci_pm_restore, 1388 .suspend_noirq = pci_pm_suspend_noirq, 1389 .resume_noirq = pci_pm_resume_noirq, 1390 .freeze_noirq = pci_pm_freeze_noirq, 1391 .thaw_noirq = pci_pm_thaw_noirq, 1392 .poweroff_noirq = pci_pm_poweroff_noirq, 1393 .restore_noirq = pci_pm_restore_noirq, 1394 .runtime_suspend = pci_pm_runtime_suspend, 1395 .runtime_resume = pci_pm_runtime_resume, 1396 .runtime_idle = pci_pm_runtime_idle, 1397 }; 1398 1399 #define PCI_PM_OPS_PTR (&pci_dev_pm_ops) 1400 1401 #else /* !CONFIG_PM */ 1402 1403 #define pci_pm_runtime_suspend NULL 1404 #define pci_pm_runtime_resume NULL 1405 #define pci_pm_runtime_idle NULL 1406 1407 #define PCI_PM_OPS_PTR NULL 1408 1409 #endif /* !CONFIG_PM */ 1410 1411 /** 1412 * __pci_register_driver - register a new pci driver 1413 * @drv: the driver structure to register 1414 * @owner: owner module of drv 1415 * @mod_name: module name string 1416 * 1417 * Adds the driver structure to the list of registered drivers. 1418 * Returns a negative value on error, otherwise 0. 1419 * If no error occurred, the driver remains registered even if 1420 * no device was claimed during registration. 1421 */ 1422 int __pci_register_driver(struct pci_driver *drv, struct module *owner, 1423 const char *mod_name) 1424 { 1425 /* initialize common driver fields */ 1426 drv->driver.name = drv->name; 1427 drv->driver.bus = &pci_bus_type; 1428 drv->driver.owner = owner; 1429 drv->driver.mod_name = mod_name; 1430 drv->driver.groups = drv->groups; 1431 1432 spin_lock_init(&drv->dynids.lock); 1433 INIT_LIST_HEAD(&drv->dynids.list); 1434 1435 /* register with core */ 1436 return driver_register(&drv->driver); 1437 } 1438 EXPORT_SYMBOL(__pci_register_driver); 1439 1440 /** 1441 * pci_unregister_driver - unregister a pci driver 1442 * @drv: the driver structure to unregister 1443 * 1444 * Deletes the driver structure from the list of registered PCI drivers, 1445 * gives it a chance to clean up by calling its remove() function for 1446 * each device it was responsible for, and marks those devices as 1447 * driverless. 1448 */ 1449 1450 void pci_unregister_driver(struct pci_driver *drv) 1451 { 1452 driver_unregister(&drv->driver); 1453 pci_free_dynids(drv); 1454 } 1455 EXPORT_SYMBOL(pci_unregister_driver); 1456 1457 static struct pci_driver pci_compat_driver = { 1458 .name = "compat" 1459 }; 1460 1461 /** 1462 * pci_dev_driver - get the pci_driver of a device 1463 * @dev: the device to query 1464 * 1465 * Returns the appropriate pci_driver structure or %NULL if there is no 1466 * registered driver for the device. 1467 */ 1468 struct pci_driver *pci_dev_driver(const struct pci_dev *dev) 1469 { 1470 if (dev->driver) 1471 return dev->driver; 1472 else { 1473 int i; 1474 for (i = 0; i <= PCI_ROM_RESOURCE; i++) 1475 if (dev->resource[i].flags & IORESOURCE_BUSY) 1476 return &pci_compat_driver; 1477 } 1478 return NULL; 1479 } 1480 EXPORT_SYMBOL(pci_dev_driver); 1481 1482 /** 1483 * pci_bus_match - Tell if a PCI device structure has a matching PCI device id structure 1484 * @dev: the PCI device structure to match against 1485 * @drv: the device driver to search for matching PCI device id structures 1486 * 1487 * Used by a driver to check whether a PCI device present in the 1488 * system is in its list of supported devices. Returns the matching 1489 * pci_device_id structure or %NULL if there is no match. 1490 */ 1491 static int pci_bus_match(struct device *dev, struct device_driver *drv) 1492 { 1493 struct pci_dev *pci_dev = to_pci_dev(dev); 1494 struct pci_driver *pci_drv; 1495 const struct pci_device_id *found_id; 1496 1497 if (!pci_dev->match_driver) 1498 return 0; 1499 1500 pci_drv = to_pci_driver(drv); 1501 found_id = pci_match_device(pci_drv, pci_dev); 1502 if (found_id) 1503 return 1; 1504 1505 return 0; 1506 } 1507 1508 /** 1509 * pci_dev_get - increments the reference count of the pci device structure 1510 * @dev: the device being referenced 1511 * 1512 * Each live reference to a device should be refcounted. 1513 * 1514 * Drivers for PCI devices should normally record such references in 1515 * their probe() methods, when they bind to a device, and release 1516 * them by calling pci_dev_put(), in their disconnect() methods. 1517 * 1518 * A pointer to the device with the incremented reference counter is returned. 1519 */ 1520 struct pci_dev *pci_dev_get(struct pci_dev *dev) 1521 { 1522 if (dev) 1523 get_device(&dev->dev); 1524 return dev; 1525 } 1526 EXPORT_SYMBOL(pci_dev_get); 1527 1528 /** 1529 * pci_dev_put - release a use of the pci device structure 1530 * @dev: device that's been disconnected 1531 * 1532 * Must be called when a user of a device is finished with it. When the last 1533 * user of the device calls this function, the memory of the device is freed. 1534 */ 1535 void pci_dev_put(struct pci_dev *dev) 1536 { 1537 if (dev) 1538 put_device(&dev->dev); 1539 } 1540 EXPORT_SYMBOL(pci_dev_put); 1541 1542 static int pci_uevent(struct device *dev, struct kobj_uevent_env *env) 1543 { 1544 struct pci_dev *pdev; 1545 1546 if (!dev) 1547 return -ENODEV; 1548 1549 pdev = to_pci_dev(dev); 1550 1551 if (add_uevent_var(env, "PCI_CLASS=%04X", pdev->class)) 1552 return -ENOMEM; 1553 1554 if (add_uevent_var(env, "PCI_ID=%04X:%04X", pdev->vendor, pdev->device)) 1555 return -ENOMEM; 1556 1557 if (add_uevent_var(env, "PCI_SUBSYS_ID=%04X:%04X", pdev->subsystem_vendor, 1558 pdev->subsystem_device)) 1559 return -ENOMEM; 1560 1561 if (add_uevent_var(env, "PCI_SLOT_NAME=%s", pci_name(pdev))) 1562 return -ENOMEM; 1563 1564 if (add_uevent_var(env, "MODALIAS=pci:v%08Xd%08Xsv%08Xsd%08Xbc%02Xsc%02Xi%02X", 1565 pdev->vendor, pdev->device, 1566 pdev->subsystem_vendor, pdev->subsystem_device, 1567 (u8)(pdev->class >> 16), (u8)(pdev->class >> 8), 1568 (u8)(pdev->class))) 1569 return -ENOMEM; 1570 1571 return 0; 1572 } 1573 1574 #if defined(CONFIG_PCIEPORTBUS) || defined(CONFIG_EEH) 1575 /** 1576 * pci_uevent_ers - emit a uevent during recovery path of PCI device 1577 * @pdev: PCI device undergoing error recovery 1578 * @err_type: type of error event 1579 */ 1580 void pci_uevent_ers(struct pci_dev *pdev, enum pci_ers_result err_type) 1581 { 1582 int idx = 0; 1583 char *envp[3]; 1584 1585 switch (err_type) { 1586 case PCI_ERS_RESULT_NONE: 1587 case PCI_ERS_RESULT_CAN_RECOVER: 1588 envp[idx++] = "ERROR_EVENT=BEGIN_RECOVERY"; 1589 envp[idx++] = "DEVICE_ONLINE=0"; 1590 break; 1591 case PCI_ERS_RESULT_RECOVERED: 1592 envp[idx++] = "ERROR_EVENT=SUCCESSFUL_RECOVERY"; 1593 envp[idx++] = "DEVICE_ONLINE=1"; 1594 break; 1595 case PCI_ERS_RESULT_DISCONNECT: 1596 envp[idx++] = "ERROR_EVENT=FAILED_RECOVERY"; 1597 envp[idx++] = "DEVICE_ONLINE=0"; 1598 break; 1599 default: 1600 break; 1601 } 1602 1603 if (idx > 0) { 1604 envp[idx++] = NULL; 1605 kobject_uevent_env(&pdev->dev.kobj, KOBJ_CHANGE, envp); 1606 } 1607 } 1608 #endif 1609 1610 static int pci_bus_num_vf(struct device *dev) 1611 { 1612 return pci_num_vf(to_pci_dev(dev)); 1613 } 1614 1615 /** 1616 * pci_dma_configure - Setup DMA configuration 1617 * @dev: ptr to dev structure 1618 * 1619 * Function to update PCI devices's DMA configuration using the same 1620 * info from the OF node or ACPI node of host bridge's parent (if any). 1621 */ 1622 static int pci_dma_configure(struct device *dev) 1623 { 1624 struct device *bridge; 1625 int ret = 0; 1626 1627 bridge = pci_get_host_bridge_device(to_pci_dev(dev)); 1628 1629 if (IS_ENABLED(CONFIG_OF) && bridge->parent && 1630 bridge->parent->of_node) { 1631 ret = of_dma_configure(dev, bridge->parent->of_node, true); 1632 } else if (has_acpi_companion(bridge)) { 1633 struct acpi_device *adev = to_acpi_device_node(bridge->fwnode); 1634 1635 ret = acpi_dma_configure(dev, acpi_get_dma_attr(adev)); 1636 } 1637 1638 pci_put_host_bridge_device(bridge); 1639 return ret; 1640 } 1641 1642 struct bus_type pci_bus_type = { 1643 .name = "pci", 1644 .match = pci_bus_match, 1645 .uevent = pci_uevent, 1646 .probe = pci_device_probe, 1647 .remove = pci_device_remove, 1648 .shutdown = pci_device_shutdown, 1649 .dev_groups = pci_dev_groups, 1650 .bus_groups = pci_bus_groups, 1651 .drv_groups = pci_drv_groups, 1652 .pm = PCI_PM_OPS_PTR, 1653 .num_vf = pci_bus_num_vf, 1654 .dma_configure = pci_dma_configure, 1655 }; 1656 EXPORT_SYMBOL(pci_bus_type); 1657 1658 #ifdef CONFIG_PCIEPORTBUS 1659 static int pcie_port_bus_match(struct device *dev, struct device_driver *drv) 1660 { 1661 struct pcie_device *pciedev; 1662 struct pcie_port_service_driver *driver; 1663 1664 if (drv->bus != &pcie_port_bus_type || dev->bus != &pcie_port_bus_type) 1665 return 0; 1666 1667 pciedev = to_pcie_device(dev); 1668 driver = to_service_driver(drv); 1669 1670 if (driver->service != pciedev->service) 1671 return 0; 1672 1673 if (driver->port_type != PCIE_ANY_PORT && 1674 driver->port_type != pci_pcie_type(pciedev->port)) 1675 return 0; 1676 1677 return 1; 1678 } 1679 1680 struct bus_type pcie_port_bus_type = { 1681 .name = "pci_express", 1682 .match = pcie_port_bus_match, 1683 }; 1684 EXPORT_SYMBOL_GPL(pcie_port_bus_type); 1685 #endif 1686 1687 static int __init pci_driver_init(void) 1688 { 1689 int ret; 1690 1691 ret = bus_register(&pci_bus_type); 1692 if (ret) 1693 return ret; 1694 1695 #ifdef CONFIG_PCIEPORTBUS 1696 ret = bus_register(&pcie_port_bus_type); 1697 if (ret) 1698 return ret; 1699 #endif 1700 dma_debug_add_bus(&pci_bus_type); 1701 return 0; 1702 } 1703 postcore_initcall(pci_driver_init); 1704