1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * PCI support in ACPI 4 * 5 * Copyright (C) 2005 David Shaohua Li <shaohua.li@intel.com> 6 * Copyright (C) 2004 Tom Long Nguyen <tom.l.nguyen@intel.com> 7 * Copyright (C) 2004 Intel Corp. 8 */ 9 10 #include <linux/delay.h> 11 #include <linux/init.h> 12 #include <linux/irqdomain.h> 13 #include <linux/pci.h> 14 #include <linux/msi.h> 15 #include <linux/pci_hotplug.h> 16 #include <linux/module.h> 17 #include <linux/pci-aspm.h> 18 #include <linux/pci-acpi.h> 19 #include <linux/pm_runtime.h> 20 #include <linux/pm_qos.h> 21 #include "pci.h" 22 23 /* 24 * The GUID is defined in the PCI Firmware Specification available here: 25 * https://www.pcisig.com/members/downloads/pcifw_r3_1_13Dec10.pdf 26 */ 27 const guid_t pci_acpi_dsm_guid = 28 GUID_INIT(0xe5c937d0, 0x3553, 0x4d7a, 29 0x91, 0x17, 0xea, 0x4d, 0x19, 0xc3, 0x43, 0x4d); 30 31 #if defined(CONFIG_PCI_QUIRKS) && defined(CONFIG_ARM64) 32 static int acpi_get_rc_addr(struct acpi_device *adev, struct resource *res) 33 { 34 struct device *dev = &adev->dev; 35 struct resource_entry *entry; 36 struct list_head list; 37 unsigned long flags; 38 int ret; 39 40 INIT_LIST_HEAD(&list); 41 flags = IORESOURCE_MEM; 42 ret = acpi_dev_get_resources(adev, &list, 43 acpi_dev_filter_resource_type_cb, 44 (void *) flags); 45 if (ret < 0) { 46 dev_err(dev, "failed to parse _CRS method, error code %d\n", 47 ret); 48 return ret; 49 } 50 51 if (ret == 0) { 52 dev_err(dev, "no IO and memory resources present in _CRS\n"); 53 return -EINVAL; 54 } 55 56 entry = list_first_entry(&list, struct resource_entry, node); 57 *res = *entry->res; 58 acpi_dev_free_resource_list(&list); 59 return 0; 60 } 61 62 static acpi_status acpi_match_rc(acpi_handle handle, u32 lvl, void *context, 63 void **retval) 64 { 65 u16 *segment = context; 66 unsigned long long uid; 67 acpi_status status; 68 69 status = acpi_evaluate_integer(handle, "_UID", NULL, &uid); 70 if (ACPI_FAILURE(status) || uid != *segment) 71 return AE_CTRL_DEPTH; 72 73 *(acpi_handle *)retval = handle; 74 return AE_CTRL_TERMINATE; 75 } 76 77 int acpi_get_rc_resources(struct device *dev, const char *hid, u16 segment, 78 struct resource *res) 79 { 80 struct acpi_device *adev; 81 acpi_status status; 82 acpi_handle handle; 83 int ret; 84 85 status = acpi_get_devices(hid, acpi_match_rc, &segment, &handle); 86 if (ACPI_FAILURE(status)) { 87 dev_err(dev, "can't find _HID %s device to locate resources\n", 88 hid); 89 return -ENODEV; 90 } 91 92 ret = acpi_bus_get_device(handle, &adev); 93 if (ret) 94 return ret; 95 96 ret = acpi_get_rc_addr(adev, res); 97 if (ret) { 98 dev_err(dev, "can't get resource from %s\n", 99 dev_name(&adev->dev)); 100 return ret; 101 } 102 103 return 0; 104 } 105 #endif 106 107 phys_addr_t acpi_pci_root_get_mcfg_addr(acpi_handle handle) 108 { 109 acpi_status status = AE_NOT_EXIST; 110 unsigned long long mcfg_addr; 111 112 if (handle) 113 status = acpi_evaluate_integer(handle, METHOD_NAME__CBA, 114 NULL, &mcfg_addr); 115 if (ACPI_FAILURE(status)) 116 return 0; 117 118 return (phys_addr_t)mcfg_addr; 119 } 120 121 static acpi_status decode_type0_hpx_record(union acpi_object *record, 122 struct hotplug_params *hpx) 123 { 124 int i; 125 union acpi_object *fields = record->package.elements; 126 u32 revision = fields[1].integer.value; 127 128 switch (revision) { 129 case 1: 130 if (record->package.count != 6) 131 return AE_ERROR; 132 for (i = 2; i < 6; i++) 133 if (fields[i].type != ACPI_TYPE_INTEGER) 134 return AE_ERROR; 135 hpx->t0 = &hpx->type0_data; 136 hpx->t0->revision = revision; 137 hpx->t0->cache_line_size = fields[2].integer.value; 138 hpx->t0->latency_timer = fields[3].integer.value; 139 hpx->t0->enable_serr = fields[4].integer.value; 140 hpx->t0->enable_perr = fields[5].integer.value; 141 break; 142 default: 143 printk(KERN_WARNING 144 "%s: Type 0 Revision %d record not supported\n", 145 __func__, revision); 146 return AE_ERROR; 147 } 148 return AE_OK; 149 } 150 151 static acpi_status decode_type1_hpx_record(union acpi_object *record, 152 struct hotplug_params *hpx) 153 { 154 int i; 155 union acpi_object *fields = record->package.elements; 156 u32 revision = fields[1].integer.value; 157 158 switch (revision) { 159 case 1: 160 if (record->package.count != 5) 161 return AE_ERROR; 162 for (i = 2; i < 5; i++) 163 if (fields[i].type != ACPI_TYPE_INTEGER) 164 return AE_ERROR; 165 hpx->t1 = &hpx->type1_data; 166 hpx->t1->revision = revision; 167 hpx->t1->max_mem_read = fields[2].integer.value; 168 hpx->t1->avg_max_split = fields[3].integer.value; 169 hpx->t1->tot_max_split = fields[4].integer.value; 170 break; 171 default: 172 printk(KERN_WARNING 173 "%s: Type 1 Revision %d record not supported\n", 174 __func__, revision); 175 return AE_ERROR; 176 } 177 return AE_OK; 178 } 179 180 static acpi_status decode_type2_hpx_record(union acpi_object *record, 181 struct hotplug_params *hpx) 182 { 183 int i; 184 union acpi_object *fields = record->package.elements; 185 u32 revision = fields[1].integer.value; 186 187 switch (revision) { 188 case 1: 189 if (record->package.count != 18) 190 return AE_ERROR; 191 for (i = 2; i < 18; i++) 192 if (fields[i].type != ACPI_TYPE_INTEGER) 193 return AE_ERROR; 194 hpx->t2 = &hpx->type2_data; 195 hpx->t2->revision = revision; 196 hpx->t2->unc_err_mask_and = fields[2].integer.value; 197 hpx->t2->unc_err_mask_or = fields[3].integer.value; 198 hpx->t2->unc_err_sever_and = fields[4].integer.value; 199 hpx->t2->unc_err_sever_or = fields[5].integer.value; 200 hpx->t2->cor_err_mask_and = fields[6].integer.value; 201 hpx->t2->cor_err_mask_or = fields[7].integer.value; 202 hpx->t2->adv_err_cap_and = fields[8].integer.value; 203 hpx->t2->adv_err_cap_or = fields[9].integer.value; 204 hpx->t2->pci_exp_devctl_and = fields[10].integer.value; 205 hpx->t2->pci_exp_devctl_or = fields[11].integer.value; 206 hpx->t2->pci_exp_lnkctl_and = fields[12].integer.value; 207 hpx->t2->pci_exp_lnkctl_or = fields[13].integer.value; 208 hpx->t2->sec_unc_err_sever_and = fields[14].integer.value; 209 hpx->t2->sec_unc_err_sever_or = fields[15].integer.value; 210 hpx->t2->sec_unc_err_mask_and = fields[16].integer.value; 211 hpx->t2->sec_unc_err_mask_or = fields[17].integer.value; 212 break; 213 default: 214 printk(KERN_WARNING 215 "%s: Type 2 Revision %d record not supported\n", 216 __func__, revision); 217 return AE_ERROR; 218 } 219 return AE_OK; 220 } 221 222 static acpi_status acpi_run_hpx(acpi_handle handle, struct hotplug_params *hpx) 223 { 224 acpi_status status; 225 struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL}; 226 union acpi_object *package, *record, *fields; 227 u32 type; 228 int i; 229 230 /* Clear the return buffer with zeros */ 231 memset(hpx, 0, sizeof(struct hotplug_params)); 232 233 status = acpi_evaluate_object(handle, "_HPX", NULL, &buffer); 234 if (ACPI_FAILURE(status)) 235 return status; 236 237 package = (union acpi_object *)buffer.pointer; 238 if (package->type != ACPI_TYPE_PACKAGE) { 239 status = AE_ERROR; 240 goto exit; 241 } 242 243 for (i = 0; i < package->package.count; i++) { 244 record = &package->package.elements[i]; 245 if (record->type != ACPI_TYPE_PACKAGE) { 246 status = AE_ERROR; 247 goto exit; 248 } 249 250 fields = record->package.elements; 251 if (fields[0].type != ACPI_TYPE_INTEGER || 252 fields[1].type != ACPI_TYPE_INTEGER) { 253 status = AE_ERROR; 254 goto exit; 255 } 256 257 type = fields[0].integer.value; 258 switch (type) { 259 case 0: 260 status = decode_type0_hpx_record(record, hpx); 261 if (ACPI_FAILURE(status)) 262 goto exit; 263 break; 264 case 1: 265 status = decode_type1_hpx_record(record, hpx); 266 if (ACPI_FAILURE(status)) 267 goto exit; 268 break; 269 case 2: 270 status = decode_type2_hpx_record(record, hpx); 271 if (ACPI_FAILURE(status)) 272 goto exit; 273 break; 274 default: 275 printk(KERN_ERR "%s: Type %d record not supported\n", 276 __func__, type); 277 status = AE_ERROR; 278 goto exit; 279 } 280 } 281 exit: 282 kfree(buffer.pointer); 283 return status; 284 } 285 286 static acpi_status acpi_run_hpp(acpi_handle handle, struct hotplug_params *hpp) 287 { 288 acpi_status status; 289 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; 290 union acpi_object *package, *fields; 291 int i; 292 293 memset(hpp, 0, sizeof(struct hotplug_params)); 294 295 status = acpi_evaluate_object(handle, "_HPP", NULL, &buffer); 296 if (ACPI_FAILURE(status)) 297 return status; 298 299 package = (union acpi_object *) buffer.pointer; 300 if (package->type != ACPI_TYPE_PACKAGE || 301 package->package.count != 4) { 302 status = AE_ERROR; 303 goto exit; 304 } 305 306 fields = package->package.elements; 307 for (i = 0; i < 4; i++) { 308 if (fields[i].type != ACPI_TYPE_INTEGER) { 309 status = AE_ERROR; 310 goto exit; 311 } 312 } 313 314 hpp->t0 = &hpp->type0_data; 315 hpp->t0->revision = 1; 316 hpp->t0->cache_line_size = fields[0].integer.value; 317 hpp->t0->latency_timer = fields[1].integer.value; 318 hpp->t0->enable_serr = fields[2].integer.value; 319 hpp->t0->enable_perr = fields[3].integer.value; 320 321 exit: 322 kfree(buffer.pointer); 323 return status; 324 } 325 326 /* pci_get_hp_params 327 * 328 * @dev - the pci_dev for which we want parameters 329 * @hpp - allocated by the caller 330 */ 331 int pci_get_hp_params(struct pci_dev *dev, struct hotplug_params *hpp) 332 { 333 acpi_status status; 334 acpi_handle handle, phandle; 335 struct pci_bus *pbus; 336 337 if (acpi_pci_disabled) 338 return -ENODEV; 339 340 handle = NULL; 341 for (pbus = dev->bus; pbus; pbus = pbus->parent) { 342 handle = acpi_pci_get_bridge_handle(pbus); 343 if (handle) 344 break; 345 } 346 347 /* 348 * _HPP settings apply to all child buses, until another _HPP is 349 * encountered. If we don't find an _HPP for the input pci dev, 350 * look for it in the parent device scope since that would apply to 351 * this pci dev. 352 */ 353 while (handle) { 354 status = acpi_run_hpx(handle, hpp); 355 if (ACPI_SUCCESS(status)) 356 return 0; 357 status = acpi_run_hpp(handle, hpp); 358 if (ACPI_SUCCESS(status)) 359 return 0; 360 if (acpi_is_root_bridge(handle)) 361 break; 362 status = acpi_get_parent(handle, &phandle); 363 if (ACPI_FAILURE(status)) 364 break; 365 handle = phandle; 366 } 367 return -ENODEV; 368 } 369 EXPORT_SYMBOL_GPL(pci_get_hp_params); 370 371 /** 372 * pciehp_is_native - Check whether a hotplug port is handled by the OS 373 * @bridge: Hotplug port to check 374 * 375 * Returns true if the given @bridge is handled by the native PCIe hotplug 376 * driver. 377 */ 378 bool pciehp_is_native(struct pci_dev *bridge) 379 { 380 const struct pci_host_bridge *host; 381 u32 slot_cap; 382 383 if (!IS_ENABLED(CONFIG_HOTPLUG_PCI_PCIE)) 384 return false; 385 386 pcie_capability_read_dword(bridge, PCI_EXP_SLTCAP, &slot_cap); 387 if (!(slot_cap & PCI_EXP_SLTCAP_HPC)) 388 return false; 389 390 if (pcie_ports_native) 391 return true; 392 393 host = pci_find_host_bridge(bridge->bus); 394 return host->native_pcie_hotplug; 395 } 396 397 /** 398 * shpchp_is_native - Check whether a hotplug port is handled by the OS 399 * @bridge: Hotplug port to check 400 * 401 * Returns true if the given @bridge is handled by the native SHPC hotplug 402 * driver. 403 */ 404 bool shpchp_is_native(struct pci_dev *bridge) 405 { 406 return bridge->shpc_managed; 407 } 408 409 /** 410 * pci_acpi_wake_bus - Root bus wakeup notification fork function. 411 * @context: Device wakeup context. 412 */ 413 static void pci_acpi_wake_bus(struct acpi_device_wakeup_context *context) 414 { 415 struct acpi_device *adev; 416 struct acpi_pci_root *root; 417 418 adev = container_of(context, struct acpi_device, wakeup.context); 419 root = acpi_driver_data(adev); 420 pci_pme_wakeup_bus(root->bus); 421 } 422 423 /** 424 * pci_acpi_wake_dev - PCI device wakeup notification work function. 425 * @context: Device wakeup context. 426 */ 427 static void pci_acpi_wake_dev(struct acpi_device_wakeup_context *context) 428 { 429 struct pci_dev *pci_dev; 430 431 pci_dev = to_pci_dev(context->dev); 432 433 if (pci_dev->pme_poll) 434 pci_dev->pme_poll = false; 435 436 if (pci_dev->current_state == PCI_D3cold) { 437 pci_wakeup_event(pci_dev); 438 pm_request_resume(&pci_dev->dev); 439 return; 440 } 441 442 /* Clear PME Status if set. */ 443 if (pci_dev->pme_support) 444 pci_check_pme_status(pci_dev); 445 446 pci_wakeup_event(pci_dev); 447 pm_request_resume(&pci_dev->dev); 448 449 pci_pme_wakeup_bus(pci_dev->subordinate); 450 } 451 452 /** 453 * pci_acpi_add_bus_pm_notifier - Register PM notifier for root PCI bus. 454 * @dev: PCI root bridge ACPI device. 455 */ 456 acpi_status pci_acpi_add_bus_pm_notifier(struct acpi_device *dev) 457 { 458 return acpi_add_pm_notifier(dev, NULL, pci_acpi_wake_bus); 459 } 460 461 /** 462 * pci_acpi_add_pm_notifier - Register PM notifier for given PCI device. 463 * @dev: ACPI device to add the notifier for. 464 * @pci_dev: PCI device to check for the PME status if an event is signaled. 465 */ 466 acpi_status pci_acpi_add_pm_notifier(struct acpi_device *dev, 467 struct pci_dev *pci_dev) 468 { 469 return acpi_add_pm_notifier(dev, &pci_dev->dev, pci_acpi_wake_dev); 470 } 471 472 /* 473 * _SxD returns the D-state with the highest power 474 * (lowest D-state number) supported in the S-state "x". 475 * 476 * If the devices does not have a _PRW 477 * (Power Resources for Wake) supporting system wakeup from "x" 478 * then the OS is free to choose a lower power (higher number 479 * D-state) than the return value from _SxD. 480 * 481 * But if _PRW is enabled at S-state "x", the OS 482 * must not choose a power lower than _SxD -- 483 * unless the device has an _SxW method specifying 484 * the lowest power (highest D-state number) the device 485 * may enter while still able to wake the system. 486 * 487 * ie. depending on global OS policy: 488 * 489 * if (_PRW at S-state x) 490 * choose from highest power _SxD to lowest power _SxW 491 * else // no _PRW at S-state x 492 * choose highest power _SxD or any lower power 493 */ 494 495 static pci_power_t acpi_pci_choose_state(struct pci_dev *pdev) 496 { 497 int acpi_state, d_max; 498 499 if (pdev->no_d3cold) 500 d_max = ACPI_STATE_D3_HOT; 501 else 502 d_max = ACPI_STATE_D3_COLD; 503 acpi_state = acpi_pm_device_sleep_state(&pdev->dev, NULL, d_max); 504 if (acpi_state < 0) 505 return PCI_POWER_ERROR; 506 507 switch (acpi_state) { 508 case ACPI_STATE_D0: 509 return PCI_D0; 510 case ACPI_STATE_D1: 511 return PCI_D1; 512 case ACPI_STATE_D2: 513 return PCI_D2; 514 case ACPI_STATE_D3_HOT: 515 return PCI_D3hot; 516 case ACPI_STATE_D3_COLD: 517 return PCI_D3cold; 518 } 519 return PCI_POWER_ERROR; 520 } 521 522 static struct acpi_device *acpi_pci_find_companion(struct device *dev); 523 524 static bool acpi_pci_bridge_d3(struct pci_dev *dev) 525 { 526 const struct fwnode_handle *fwnode; 527 struct acpi_device *adev; 528 struct pci_dev *root; 529 u8 val; 530 531 if (!dev->is_hotplug_bridge) 532 return false; 533 534 /* 535 * Look for a special _DSD property for the root port and if it 536 * is set we know the hierarchy behind it supports D3 just fine. 537 */ 538 root = pci_find_pcie_root_port(dev); 539 if (!root) 540 return false; 541 542 adev = ACPI_COMPANION(&root->dev); 543 if (root == dev) { 544 /* 545 * It is possible that the ACPI companion is not yet bound 546 * for the root port so look it up manually here. 547 */ 548 if (!adev && !pci_dev_is_added(root)) 549 adev = acpi_pci_find_companion(&root->dev); 550 } 551 552 if (!adev) 553 return false; 554 555 fwnode = acpi_fwnode_handle(adev); 556 if (fwnode_property_read_u8(fwnode, "HotPlugSupportInD3", &val)) 557 return false; 558 559 return val == 1; 560 } 561 562 static bool acpi_pci_power_manageable(struct pci_dev *dev) 563 { 564 struct acpi_device *adev = ACPI_COMPANION(&dev->dev); 565 return adev ? acpi_device_power_manageable(adev) : false; 566 } 567 568 static int acpi_pci_set_power_state(struct pci_dev *dev, pci_power_t state) 569 { 570 struct acpi_device *adev = ACPI_COMPANION(&dev->dev); 571 static const u8 state_conv[] = { 572 [PCI_D0] = ACPI_STATE_D0, 573 [PCI_D1] = ACPI_STATE_D1, 574 [PCI_D2] = ACPI_STATE_D2, 575 [PCI_D3hot] = ACPI_STATE_D3_HOT, 576 [PCI_D3cold] = ACPI_STATE_D3_COLD, 577 }; 578 int error = -EINVAL; 579 580 /* If the ACPI device has _EJ0, ignore the device */ 581 if (!adev || acpi_has_method(adev->handle, "_EJ0")) 582 return -ENODEV; 583 584 switch (state) { 585 case PCI_D3cold: 586 if (dev_pm_qos_flags(&dev->dev, PM_QOS_FLAG_NO_POWER_OFF) == 587 PM_QOS_FLAGS_ALL) { 588 error = -EBUSY; 589 break; 590 } 591 /* Fall through */ 592 case PCI_D0: 593 case PCI_D1: 594 case PCI_D2: 595 case PCI_D3hot: 596 error = acpi_device_set_power(adev, state_conv[state]); 597 } 598 599 if (!error) 600 pci_dbg(dev, "power state changed by ACPI to %s\n", 601 acpi_power_state_string(state_conv[state])); 602 603 return error; 604 } 605 606 static pci_power_t acpi_pci_get_power_state(struct pci_dev *dev) 607 { 608 struct acpi_device *adev = ACPI_COMPANION(&dev->dev); 609 static const pci_power_t state_conv[] = { 610 [ACPI_STATE_D0] = PCI_D0, 611 [ACPI_STATE_D1] = PCI_D1, 612 [ACPI_STATE_D2] = PCI_D2, 613 [ACPI_STATE_D3_HOT] = PCI_D3hot, 614 [ACPI_STATE_D3_COLD] = PCI_D3cold, 615 }; 616 int state; 617 618 if (!adev || !acpi_device_power_manageable(adev)) 619 return PCI_UNKNOWN; 620 621 if (acpi_device_get_power(adev, &state) || state == ACPI_STATE_UNKNOWN) 622 return PCI_UNKNOWN; 623 624 return state_conv[state]; 625 } 626 627 static int acpi_pci_propagate_wakeup(struct pci_bus *bus, bool enable) 628 { 629 while (bus->parent) { 630 if (acpi_pm_device_can_wakeup(&bus->self->dev)) 631 return acpi_pm_set_bridge_wakeup(&bus->self->dev, enable); 632 633 bus = bus->parent; 634 } 635 636 /* We have reached the root bus. */ 637 if (bus->bridge) { 638 if (acpi_pm_device_can_wakeup(bus->bridge)) 639 return acpi_pm_set_bridge_wakeup(bus->bridge, enable); 640 } 641 return 0; 642 } 643 644 static int acpi_pci_wakeup(struct pci_dev *dev, bool enable) 645 { 646 if (acpi_pm_device_can_wakeup(&dev->dev)) 647 return acpi_pm_set_device_wakeup(&dev->dev, enable); 648 649 return acpi_pci_propagate_wakeup(dev->bus, enable); 650 } 651 652 static bool acpi_pci_need_resume(struct pci_dev *dev) 653 { 654 struct acpi_device *adev = ACPI_COMPANION(&dev->dev); 655 656 /* 657 * In some cases (eg. Samsung 305V4A) leaving a bridge in suspend over 658 * system-wide suspend/resume confuses the platform firmware, so avoid 659 * doing that. According to Section 16.1.6 of ACPI 6.2, endpoint 660 * devices are expected to be in D3 before invoking the S3 entry path 661 * from the firmware, so they should not be affected by this issue. 662 */ 663 if (pci_is_bridge(dev) && acpi_target_system_state() != ACPI_STATE_S0) 664 return true; 665 666 if (!adev || !acpi_device_power_manageable(adev)) 667 return false; 668 669 if (device_may_wakeup(&dev->dev) != !!adev->wakeup.prepare_count) 670 return true; 671 672 if (acpi_target_system_state() == ACPI_STATE_S0) 673 return false; 674 675 return !!adev->power.flags.dsw_present; 676 } 677 678 static const struct pci_platform_pm_ops acpi_pci_platform_pm = { 679 .bridge_d3 = acpi_pci_bridge_d3, 680 .is_manageable = acpi_pci_power_manageable, 681 .set_state = acpi_pci_set_power_state, 682 .get_state = acpi_pci_get_power_state, 683 .choose_state = acpi_pci_choose_state, 684 .set_wakeup = acpi_pci_wakeup, 685 .need_resume = acpi_pci_need_resume, 686 }; 687 688 void acpi_pci_add_bus(struct pci_bus *bus) 689 { 690 union acpi_object *obj; 691 struct pci_host_bridge *bridge; 692 693 if (acpi_pci_disabled || !bus->bridge || !ACPI_HANDLE(bus->bridge)) 694 return; 695 696 acpi_pci_slot_enumerate(bus); 697 acpiphp_enumerate_slots(bus); 698 699 /* 700 * For a host bridge, check its _DSM for function 8 and if 701 * that is available, mark it in pci_host_bridge. 702 */ 703 if (!pci_is_root_bus(bus)) 704 return; 705 706 obj = acpi_evaluate_dsm(ACPI_HANDLE(bus->bridge), &pci_acpi_dsm_guid, 3, 707 RESET_DELAY_DSM, NULL); 708 if (!obj) 709 return; 710 711 if (obj->type == ACPI_TYPE_INTEGER && obj->integer.value == 1) { 712 bridge = pci_find_host_bridge(bus); 713 bridge->ignore_reset_delay = 1; 714 } 715 ACPI_FREE(obj); 716 } 717 718 void acpi_pci_remove_bus(struct pci_bus *bus) 719 { 720 if (acpi_pci_disabled || !bus->bridge) 721 return; 722 723 acpiphp_remove_slots(bus); 724 acpi_pci_slot_remove(bus); 725 } 726 727 /* ACPI bus type */ 728 static struct acpi_device *acpi_pci_find_companion(struct device *dev) 729 { 730 struct pci_dev *pci_dev = to_pci_dev(dev); 731 bool check_children; 732 u64 addr; 733 734 check_children = pci_is_bridge(pci_dev); 735 /* Please ref to ACPI spec for the syntax of _ADR */ 736 addr = (PCI_SLOT(pci_dev->devfn) << 16) | PCI_FUNC(pci_dev->devfn); 737 return acpi_find_child_device(ACPI_COMPANION(dev->parent), addr, 738 check_children); 739 } 740 741 /** 742 * pci_acpi_optimize_delay - optimize PCI D3 and D3cold delay from ACPI 743 * @pdev: the PCI device whose delay is to be updated 744 * @handle: ACPI handle of this device 745 * 746 * Update the d3_delay and d3cold_delay of a PCI device from the ACPI _DSM 747 * control method of either the device itself or the PCI host bridge. 748 * 749 * Function 8, "Reset Delay," applies to the entire hierarchy below a PCI 750 * host bridge. If it returns one, the OS may assume that all devices in 751 * the hierarchy have already completed power-on reset delays. 752 * 753 * Function 9, "Device Readiness Durations," applies only to the object 754 * where it is located. It returns delay durations required after various 755 * events if the device requires less time than the spec requires. Delays 756 * from this function take precedence over the Reset Delay function. 757 * 758 * These _DSM functions are defined by the draft ECN of January 28, 2014, 759 * titled "ACPI additions for FW latency optimizations." 760 */ 761 static void pci_acpi_optimize_delay(struct pci_dev *pdev, 762 acpi_handle handle) 763 { 764 struct pci_host_bridge *bridge = pci_find_host_bridge(pdev->bus); 765 int value; 766 union acpi_object *obj, *elements; 767 768 if (bridge->ignore_reset_delay) 769 pdev->d3cold_delay = 0; 770 771 obj = acpi_evaluate_dsm(handle, &pci_acpi_dsm_guid, 3, 772 FUNCTION_DELAY_DSM, NULL); 773 if (!obj) 774 return; 775 776 if (obj->type == ACPI_TYPE_PACKAGE && obj->package.count == 5) { 777 elements = obj->package.elements; 778 if (elements[0].type == ACPI_TYPE_INTEGER) { 779 value = (int)elements[0].integer.value / 1000; 780 if (value < PCI_PM_D3COLD_WAIT) 781 pdev->d3cold_delay = value; 782 } 783 if (elements[3].type == ACPI_TYPE_INTEGER) { 784 value = (int)elements[3].integer.value / 1000; 785 if (value < PCI_PM_D3_WAIT) 786 pdev->d3_delay = value; 787 } 788 } 789 ACPI_FREE(obj); 790 } 791 792 static void pci_acpi_set_untrusted(struct pci_dev *dev) 793 { 794 u8 val; 795 796 if (pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT) 797 return; 798 if (device_property_read_u8(&dev->dev, "ExternalFacingPort", &val)) 799 return; 800 801 /* 802 * These root ports expose PCIe (including DMA) outside of the 803 * system so make sure we treat them and everything behind as 804 * untrusted. 805 */ 806 if (val) 807 dev->untrusted = 1; 808 } 809 810 static void pci_acpi_setup(struct device *dev) 811 { 812 struct pci_dev *pci_dev = to_pci_dev(dev); 813 struct acpi_device *adev = ACPI_COMPANION(dev); 814 815 if (!adev) 816 return; 817 818 pci_acpi_optimize_delay(pci_dev, adev->handle); 819 pci_acpi_set_untrusted(pci_dev); 820 821 pci_acpi_add_pm_notifier(adev, pci_dev); 822 if (!adev->wakeup.flags.valid) 823 return; 824 825 device_set_wakeup_capable(dev, true); 826 /* 827 * For bridges that can do D3 we enable wake automatically (as 828 * we do for the power management itself in that case). The 829 * reason is that the bridge may have additional methods such as 830 * _DSW that need to be called. 831 */ 832 if (pci_dev->bridge_d3) 833 device_wakeup_enable(dev); 834 835 acpi_pci_wakeup(pci_dev, false); 836 } 837 838 static void pci_acpi_cleanup(struct device *dev) 839 { 840 struct acpi_device *adev = ACPI_COMPANION(dev); 841 struct pci_dev *pci_dev = to_pci_dev(dev); 842 843 if (!adev) 844 return; 845 846 pci_acpi_remove_pm_notifier(adev); 847 if (adev->wakeup.flags.valid) { 848 if (pci_dev->bridge_d3) 849 device_wakeup_disable(dev); 850 851 device_set_wakeup_capable(dev, false); 852 } 853 } 854 855 static bool pci_acpi_bus_match(struct device *dev) 856 { 857 return dev_is_pci(dev); 858 } 859 860 static struct acpi_bus_type acpi_pci_bus = { 861 .name = "PCI", 862 .match = pci_acpi_bus_match, 863 .find_companion = acpi_pci_find_companion, 864 .setup = pci_acpi_setup, 865 .cleanup = pci_acpi_cleanup, 866 }; 867 868 869 static struct fwnode_handle *(*pci_msi_get_fwnode_cb)(struct device *dev); 870 871 /** 872 * pci_msi_register_fwnode_provider - Register callback to retrieve fwnode 873 * @fn: Callback matching a device to a fwnode that identifies a PCI 874 * MSI domain. 875 * 876 * This should be called by irqchip driver, which is the parent of 877 * the MSI domain to provide callback interface to query fwnode. 878 */ 879 void 880 pci_msi_register_fwnode_provider(struct fwnode_handle *(*fn)(struct device *)) 881 { 882 pci_msi_get_fwnode_cb = fn; 883 } 884 885 /** 886 * pci_host_bridge_acpi_msi_domain - Retrieve MSI domain of a PCI host bridge 887 * @bus: The PCI host bridge bus. 888 * 889 * This function uses the callback function registered by 890 * pci_msi_register_fwnode_provider() to retrieve the irq_domain with 891 * type DOMAIN_BUS_PCI_MSI of the specified host bridge bus. 892 * This returns NULL on error or when the domain is not found. 893 */ 894 struct irq_domain *pci_host_bridge_acpi_msi_domain(struct pci_bus *bus) 895 { 896 struct fwnode_handle *fwnode; 897 898 if (!pci_msi_get_fwnode_cb) 899 return NULL; 900 901 fwnode = pci_msi_get_fwnode_cb(&bus->dev); 902 if (!fwnode) 903 return NULL; 904 905 return irq_find_matching_fwnode(fwnode, DOMAIN_BUS_PCI_MSI); 906 } 907 908 static int __init acpi_pci_init(void) 909 { 910 int ret; 911 912 if (acpi_gbl_FADT.boot_flags & ACPI_FADT_NO_MSI) { 913 pr_info("ACPI FADT declares the system doesn't support MSI, so disable it\n"); 914 pci_no_msi(); 915 } 916 917 if (acpi_gbl_FADT.boot_flags & ACPI_FADT_NO_ASPM) { 918 pr_info("ACPI FADT declares the system doesn't support PCIe ASPM, so disable it\n"); 919 pcie_no_aspm(); 920 } 921 922 ret = register_acpi_bus_type(&acpi_pci_bus); 923 if (ret) 924 return 0; 925 926 pci_set_platform_pm(&acpi_pci_platform_pm); 927 acpi_pci_slot_init(); 928 acpiphp_init(); 929 930 return 0; 931 } 932 arch_initcall(acpi_pci_init); 933