1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * PCI support in ACPI 4 * 5 * Copyright (C) 2005 David Shaohua Li <shaohua.li@intel.com> 6 * Copyright (C) 2004 Tom Long Nguyen <tom.l.nguyen@intel.com> 7 * Copyright (C) 2004 Intel Corp. 8 */ 9 10 #include <linux/delay.h> 11 #include <linux/init.h> 12 #include <linux/irqdomain.h> 13 #include <linux/pci.h> 14 #include <linux/msi.h> 15 #include <linux/pci_hotplug.h> 16 #include <linux/module.h> 17 #include <linux/pci-aspm.h> 18 #include <linux/pci-acpi.h> 19 #include <linux/pm_runtime.h> 20 #include <linux/pm_qos.h> 21 #include "pci.h" 22 23 /* 24 * The GUID is defined in the PCI Firmware Specification available here: 25 * https://www.pcisig.com/members/downloads/pcifw_r3_1_13Dec10.pdf 26 */ 27 const guid_t pci_acpi_dsm_guid = 28 GUID_INIT(0xe5c937d0, 0x3553, 0x4d7a, 29 0x91, 0x17, 0xea, 0x4d, 0x19, 0xc3, 0x43, 0x4d); 30 31 #if defined(CONFIG_PCI_QUIRKS) && defined(CONFIG_ARM64) 32 static int acpi_get_rc_addr(struct acpi_device *adev, struct resource *res) 33 { 34 struct device *dev = &adev->dev; 35 struct resource_entry *entry; 36 struct list_head list; 37 unsigned long flags; 38 int ret; 39 40 INIT_LIST_HEAD(&list); 41 flags = IORESOURCE_MEM; 42 ret = acpi_dev_get_resources(adev, &list, 43 acpi_dev_filter_resource_type_cb, 44 (void *) flags); 45 if (ret < 0) { 46 dev_err(dev, "failed to parse _CRS method, error code %d\n", 47 ret); 48 return ret; 49 } 50 51 if (ret == 0) { 52 dev_err(dev, "no IO and memory resources present in _CRS\n"); 53 return -EINVAL; 54 } 55 56 entry = list_first_entry(&list, struct resource_entry, node); 57 *res = *entry->res; 58 acpi_dev_free_resource_list(&list); 59 return 0; 60 } 61 62 static acpi_status acpi_match_rc(acpi_handle handle, u32 lvl, void *context, 63 void **retval) 64 { 65 u16 *segment = context; 66 unsigned long long uid; 67 acpi_status status; 68 69 status = acpi_evaluate_integer(handle, "_UID", NULL, &uid); 70 if (ACPI_FAILURE(status) || uid != *segment) 71 return AE_CTRL_DEPTH; 72 73 *(acpi_handle *)retval = handle; 74 return AE_CTRL_TERMINATE; 75 } 76 77 int acpi_get_rc_resources(struct device *dev, const char *hid, u16 segment, 78 struct resource *res) 79 { 80 struct acpi_device *adev; 81 acpi_status status; 82 acpi_handle handle; 83 int ret; 84 85 status = acpi_get_devices(hid, acpi_match_rc, &segment, &handle); 86 if (ACPI_FAILURE(status)) { 87 dev_err(dev, "can't find _HID %s device to locate resources\n", 88 hid); 89 return -ENODEV; 90 } 91 92 ret = acpi_bus_get_device(handle, &adev); 93 if (ret) 94 return ret; 95 96 ret = acpi_get_rc_addr(adev, res); 97 if (ret) { 98 dev_err(dev, "can't get resource from %s\n", 99 dev_name(&adev->dev)); 100 return ret; 101 } 102 103 return 0; 104 } 105 #endif 106 107 phys_addr_t acpi_pci_root_get_mcfg_addr(acpi_handle handle) 108 { 109 acpi_status status = AE_NOT_EXIST; 110 unsigned long long mcfg_addr; 111 112 if (handle) 113 status = acpi_evaluate_integer(handle, METHOD_NAME__CBA, 114 NULL, &mcfg_addr); 115 if (ACPI_FAILURE(status)) 116 return 0; 117 118 return (phys_addr_t)mcfg_addr; 119 } 120 121 static acpi_status decode_type0_hpx_record(union acpi_object *record, 122 struct hpp_type0 *hpx0) 123 { 124 int i; 125 union acpi_object *fields = record->package.elements; 126 u32 revision = fields[1].integer.value; 127 128 switch (revision) { 129 case 1: 130 if (record->package.count != 6) 131 return AE_ERROR; 132 for (i = 2; i < 6; i++) 133 if (fields[i].type != ACPI_TYPE_INTEGER) 134 return AE_ERROR; 135 hpx0->revision = revision; 136 hpx0->cache_line_size = fields[2].integer.value; 137 hpx0->latency_timer = fields[3].integer.value; 138 hpx0->enable_serr = fields[4].integer.value; 139 hpx0->enable_perr = fields[5].integer.value; 140 break; 141 default: 142 pr_warn("%s: Type 0 Revision %d record not supported\n", 143 __func__, revision); 144 return AE_ERROR; 145 } 146 return AE_OK; 147 } 148 149 static acpi_status decode_type1_hpx_record(union acpi_object *record, 150 struct hpp_type1 *hpx1) 151 { 152 int i; 153 union acpi_object *fields = record->package.elements; 154 u32 revision = fields[1].integer.value; 155 156 switch (revision) { 157 case 1: 158 if (record->package.count != 5) 159 return AE_ERROR; 160 for (i = 2; i < 5; i++) 161 if (fields[i].type != ACPI_TYPE_INTEGER) 162 return AE_ERROR; 163 hpx1->revision = revision; 164 hpx1->max_mem_read = fields[2].integer.value; 165 hpx1->avg_max_split = fields[3].integer.value; 166 hpx1->tot_max_split = fields[4].integer.value; 167 break; 168 default: 169 pr_warn("%s: Type 1 Revision %d record not supported\n", 170 __func__, revision); 171 return AE_ERROR; 172 } 173 return AE_OK; 174 } 175 176 static acpi_status decode_type2_hpx_record(union acpi_object *record, 177 struct hpp_type2 *hpx2) 178 { 179 int i; 180 union acpi_object *fields = record->package.elements; 181 u32 revision = fields[1].integer.value; 182 183 switch (revision) { 184 case 1: 185 if (record->package.count != 18) 186 return AE_ERROR; 187 for (i = 2; i < 18; i++) 188 if (fields[i].type != ACPI_TYPE_INTEGER) 189 return AE_ERROR; 190 hpx2->revision = revision; 191 hpx2->unc_err_mask_and = fields[2].integer.value; 192 hpx2->unc_err_mask_or = fields[3].integer.value; 193 hpx2->unc_err_sever_and = fields[4].integer.value; 194 hpx2->unc_err_sever_or = fields[5].integer.value; 195 hpx2->cor_err_mask_and = fields[6].integer.value; 196 hpx2->cor_err_mask_or = fields[7].integer.value; 197 hpx2->adv_err_cap_and = fields[8].integer.value; 198 hpx2->adv_err_cap_or = fields[9].integer.value; 199 hpx2->pci_exp_devctl_and = fields[10].integer.value; 200 hpx2->pci_exp_devctl_or = fields[11].integer.value; 201 hpx2->pci_exp_lnkctl_and = fields[12].integer.value; 202 hpx2->pci_exp_lnkctl_or = fields[13].integer.value; 203 hpx2->sec_unc_err_sever_and = fields[14].integer.value; 204 hpx2->sec_unc_err_sever_or = fields[15].integer.value; 205 hpx2->sec_unc_err_mask_and = fields[16].integer.value; 206 hpx2->sec_unc_err_mask_or = fields[17].integer.value; 207 break; 208 default: 209 pr_warn("%s: Type 2 Revision %d record not supported\n", 210 __func__, revision); 211 return AE_ERROR; 212 } 213 return AE_OK; 214 } 215 216 static void parse_hpx3_register(struct hpx_type3 *hpx3_reg, 217 union acpi_object *reg_fields) 218 { 219 hpx3_reg->device_type = reg_fields[0].integer.value; 220 hpx3_reg->function_type = reg_fields[1].integer.value; 221 hpx3_reg->config_space_location = reg_fields[2].integer.value; 222 hpx3_reg->pci_exp_cap_id = reg_fields[3].integer.value; 223 hpx3_reg->pci_exp_cap_ver = reg_fields[4].integer.value; 224 hpx3_reg->pci_exp_vendor_id = reg_fields[5].integer.value; 225 hpx3_reg->dvsec_id = reg_fields[6].integer.value; 226 hpx3_reg->dvsec_rev = reg_fields[7].integer.value; 227 hpx3_reg->match_offset = reg_fields[8].integer.value; 228 hpx3_reg->match_mask_and = reg_fields[9].integer.value; 229 hpx3_reg->match_value = reg_fields[10].integer.value; 230 hpx3_reg->reg_offset = reg_fields[11].integer.value; 231 hpx3_reg->reg_mask_and = reg_fields[12].integer.value; 232 hpx3_reg->reg_mask_or = reg_fields[13].integer.value; 233 } 234 235 static acpi_status program_type3_hpx_record(struct pci_dev *dev, 236 union acpi_object *record, 237 const struct hotplug_program_ops *hp_ops) 238 { 239 union acpi_object *fields = record->package.elements; 240 u32 desc_count, expected_length, revision; 241 union acpi_object *reg_fields; 242 struct hpx_type3 hpx3; 243 int i; 244 245 revision = fields[1].integer.value; 246 switch (revision) { 247 case 1: 248 desc_count = fields[2].integer.value; 249 expected_length = 3 + desc_count * 14; 250 251 if (record->package.count != expected_length) 252 return AE_ERROR; 253 254 for (i = 2; i < expected_length; i++) 255 if (fields[i].type != ACPI_TYPE_INTEGER) 256 return AE_ERROR; 257 258 for (i = 0; i < desc_count; i++) { 259 reg_fields = fields + 3 + i * 14; 260 parse_hpx3_register(&hpx3, reg_fields); 261 hp_ops->program_type3(dev, &hpx3); 262 } 263 264 break; 265 default: 266 printk(KERN_WARNING 267 "%s: Type 3 Revision %d record not supported\n", 268 __func__, revision); 269 return AE_ERROR; 270 } 271 return AE_OK; 272 } 273 274 static acpi_status acpi_run_hpx(struct pci_dev *dev, acpi_handle handle, 275 const struct hotplug_program_ops *hp_ops) 276 { 277 acpi_status status; 278 struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL}; 279 union acpi_object *package, *record, *fields; 280 struct hpp_type0 hpx0; 281 struct hpp_type1 hpx1; 282 struct hpp_type2 hpx2; 283 u32 type; 284 int i; 285 286 status = acpi_evaluate_object(handle, "_HPX", NULL, &buffer); 287 if (ACPI_FAILURE(status)) 288 return status; 289 290 package = (union acpi_object *)buffer.pointer; 291 if (package->type != ACPI_TYPE_PACKAGE) { 292 status = AE_ERROR; 293 goto exit; 294 } 295 296 for (i = 0; i < package->package.count; i++) { 297 record = &package->package.elements[i]; 298 if (record->type != ACPI_TYPE_PACKAGE) { 299 status = AE_ERROR; 300 goto exit; 301 } 302 303 fields = record->package.elements; 304 if (fields[0].type != ACPI_TYPE_INTEGER || 305 fields[1].type != ACPI_TYPE_INTEGER) { 306 status = AE_ERROR; 307 goto exit; 308 } 309 310 type = fields[0].integer.value; 311 switch (type) { 312 case 0: 313 memset(&hpx0, 0, sizeof(hpx0)); 314 status = decode_type0_hpx_record(record, &hpx0); 315 if (ACPI_FAILURE(status)) 316 goto exit; 317 hp_ops->program_type0(dev, &hpx0); 318 break; 319 case 1: 320 memset(&hpx1, 0, sizeof(hpx1)); 321 status = decode_type1_hpx_record(record, &hpx1); 322 if (ACPI_FAILURE(status)) 323 goto exit; 324 hp_ops->program_type1(dev, &hpx1); 325 break; 326 case 2: 327 memset(&hpx2, 0, sizeof(hpx2)); 328 status = decode_type2_hpx_record(record, &hpx2); 329 if (ACPI_FAILURE(status)) 330 goto exit; 331 hp_ops->program_type2(dev, &hpx2); 332 break; 333 case 3: 334 status = program_type3_hpx_record(dev, record, hp_ops); 335 if (ACPI_FAILURE(status)) 336 goto exit; 337 break; 338 default: 339 pr_err("%s: Type %d record not supported\n", 340 __func__, type); 341 status = AE_ERROR; 342 goto exit; 343 } 344 } 345 exit: 346 kfree(buffer.pointer); 347 return status; 348 } 349 350 static acpi_status acpi_run_hpp(struct pci_dev *dev, acpi_handle handle, 351 const struct hotplug_program_ops *hp_ops) 352 { 353 acpi_status status; 354 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; 355 union acpi_object *package, *fields; 356 struct hpp_type0 hpp0; 357 int i; 358 359 memset(&hpp0, 0, sizeof(hpp0)); 360 361 status = acpi_evaluate_object(handle, "_HPP", NULL, &buffer); 362 if (ACPI_FAILURE(status)) 363 return status; 364 365 package = (union acpi_object *) buffer.pointer; 366 if (package->type != ACPI_TYPE_PACKAGE || 367 package->package.count != 4) { 368 status = AE_ERROR; 369 goto exit; 370 } 371 372 fields = package->package.elements; 373 for (i = 0; i < 4; i++) { 374 if (fields[i].type != ACPI_TYPE_INTEGER) { 375 status = AE_ERROR; 376 goto exit; 377 } 378 } 379 380 hpp0.revision = 1; 381 hpp0.cache_line_size = fields[0].integer.value; 382 hpp0.latency_timer = fields[1].integer.value; 383 hpp0.enable_serr = fields[2].integer.value; 384 hpp0.enable_perr = fields[3].integer.value; 385 386 hp_ops->program_type0(dev, &hpp0); 387 388 exit: 389 kfree(buffer.pointer); 390 return status; 391 } 392 393 /* pci_get_hp_params 394 * 395 * @dev - the pci_dev for which we want parameters 396 * @hpp - allocated by the caller 397 */ 398 int pci_acpi_program_hp_params(struct pci_dev *dev, 399 const struct hotplug_program_ops *hp_ops) 400 { 401 acpi_status status; 402 acpi_handle handle, phandle; 403 struct pci_bus *pbus; 404 405 if (acpi_pci_disabled) 406 return -ENODEV; 407 408 handle = NULL; 409 for (pbus = dev->bus; pbus; pbus = pbus->parent) { 410 handle = acpi_pci_get_bridge_handle(pbus); 411 if (handle) 412 break; 413 } 414 415 /* 416 * _HPP settings apply to all child buses, until another _HPP is 417 * encountered. If we don't find an _HPP for the input pci dev, 418 * look for it in the parent device scope since that would apply to 419 * this pci dev. 420 */ 421 while (handle) { 422 status = acpi_run_hpx(dev, handle, hp_ops); 423 if (ACPI_SUCCESS(status)) 424 return 0; 425 status = acpi_run_hpp(dev, handle, hp_ops); 426 if (ACPI_SUCCESS(status)) 427 return 0; 428 if (acpi_is_root_bridge(handle)) 429 break; 430 status = acpi_get_parent(handle, &phandle); 431 if (ACPI_FAILURE(status)) 432 break; 433 handle = phandle; 434 } 435 return -ENODEV; 436 } 437 438 /** 439 * pciehp_is_native - Check whether a hotplug port is handled by the OS 440 * @bridge: Hotplug port to check 441 * 442 * Returns true if the given @bridge is handled by the native PCIe hotplug 443 * driver. 444 */ 445 bool pciehp_is_native(struct pci_dev *bridge) 446 { 447 const struct pci_host_bridge *host; 448 u32 slot_cap; 449 450 if (!IS_ENABLED(CONFIG_HOTPLUG_PCI_PCIE)) 451 return false; 452 453 pcie_capability_read_dword(bridge, PCI_EXP_SLTCAP, &slot_cap); 454 if (!(slot_cap & PCI_EXP_SLTCAP_HPC)) 455 return false; 456 457 if (pcie_ports_native) 458 return true; 459 460 host = pci_find_host_bridge(bridge->bus); 461 return host->native_pcie_hotplug; 462 } 463 464 /** 465 * shpchp_is_native - Check whether a hotplug port is handled by the OS 466 * @bridge: Hotplug port to check 467 * 468 * Returns true if the given @bridge is handled by the native SHPC hotplug 469 * driver. 470 */ 471 bool shpchp_is_native(struct pci_dev *bridge) 472 { 473 return bridge->shpc_managed; 474 } 475 476 /** 477 * pci_acpi_wake_bus - Root bus wakeup notification fork function. 478 * @context: Device wakeup context. 479 */ 480 static void pci_acpi_wake_bus(struct acpi_device_wakeup_context *context) 481 { 482 struct acpi_device *adev; 483 struct acpi_pci_root *root; 484 485 adev = container_of(context, struct acpi_device, wakeup.context); 486 root = acpi_driver_data(adev); 487 pci_pme_wakeup_bus(root->bus); 488 } 489 490 /** 491 * pci_acpi_wake_dev - PCI device wakeup notification work function. 492 * @context: Device wakeup context. 493 */ 494 static void pci_acpi_wake_dev(struct acpi_device_wakeup_context *context) 495 { 496 struct pci_dev *pci_dev; 497 498 pci_dev = to_pci_dev(context->dev); 499 500 if (pci_dev->pme_poll) 501 pci_dev->pme_poll = false; 502 503 if (pci_dev->current_state == PCI_D3cold) { 504 pci_wakeup_event(pci_dev); 505 pm_request_resume(&pci_dev->dev); 506 return; 507 } 508 509 /* Clear PME Status if set. */ 510 if (pci_dev->pme_support) 511 pci_check_pme_status(pci_dev); 512 513 pci_wakeup_event(pci_dev); 514 pm_request_resume(&pci_dev->dev); 515 516 pci_pme_wakeup_bus(pci_dev->subordinate); 517 } 518 519 /** 520 * pci_acpi_add_bus_pm_notifier - Register PM notifier for root PCI bus. 521 * @dev: PCI root bridge ACPI device. 522 */ 523 acpi_status pci_acpi_add_bus_pm_notifier(struct acpi_device *dev) 524 { 525 return acpi_add_pm_notifier(dev, NULL, pci_acpi_wake_bus); 526 } 527 528 /** 529 * pci_acpi_add_pm_notifier - Register PM notifier for given PCI device. 530 * @dev: ACPI device to add the notifier for. 531 * @pci_dev: PCI device to check for the PME status if an event is signaled. 532 */ 533 acpi_status pci_acpi_add_pm_notifier(struct acpi_device *dev, 534 struct pci_dev *pci_dev) 535 { 536 return acpi_add_pm_notifier(dev, &pci_dev->dev, pci_acpi_wake_dev); 537 } 538 539 /* 540 * _SxD returns the D-state with the highest power 541 * (lowest D-state number) supported in the S-state "x". 542 * 543 * If the devices does not have a _PRW 544 * (Power Resources for Wake) supporting system wakeup from "x" 545 * then the OS is free to choose a lower power (higher number 546 * D-state) than the return value from _SxD. 547 * 548 * But if _PRW is enabled at S-state "x", the OS 549 * must not choose a power lower than _SxD -- 550 * unless the device has an _SxW method specifying 551 * the lowest power (highest D-state number) the device 552 * may enter while still able to wake the system. 553 * 554 * ie. depending on global OS policy: 555 * 556 * if (_PRW at S-state x) 557 * choose from highest power _SxD to lowest power _SxW 558 * else // no _PRW at S-state x 559 * choose highest power _SxD or any lower power 560 */ 561 562 static pci_power_t acpi_pci_choose_state(struct pci_dev *pdev) 563 { 564 int acpi_state, d_max; 565 566 if (pdev->no_d3cold) 567 d_max = ACPI_STATE_D3_HOT; 568 else 569 d_max = ACPI_STATE_D3_COLD; 570 acpi_state = acpi_pm_device_sleep_state(&pdev->dev, NULL, d_max); 571 if (acpi_state < 0) 572 return PCI_POWER_ERROR; 573 574 switch (acpi_state) { 575 case ACPI_STATE_D0: 576 return PCI_D0; 577 case ACPI_STATE_D1: 578 return PCI_D1; 579 case ACPI_STATE_D2: 580 return PCI_D2; 581 case ACPI_STATE_D3_HOT: 582 return PCI_D3hot; 583 case ACPI_STATE_D3_COLD: 584 return PCI_D3cold; 585 } 586 return PCI_POWER_ERROR; 587 } 588 589 static struct acpi_device *acpi_pci_find_companion(struct device *dev); 590 591 static bool acpi_pci_bridge_d3(struct pci_dev *dev) 592 { 593 const struct fwnode_handle *fwnode; 594 struct acpi_device *adev; 595 struct pci_dev *root; 596 u8 val; 597 598 if (!dev->is_hotplug_bridge) 599 return false; 600 601 /* 602 * Look for a special _DSD property for the root port and if it 603 * is set we know the hierarchy behind it supports D3 just fine. 604 */ 605 root = pci_find_pcie_root_port(dev); 606 if (!root) 607 return false; 608 609 adev = ACPI_COMPANION(&root->dev); 610 if (root == dev) { 611 /* 612 * It is possible that the ACPI companion is not yet bound 613 * for the root port so look it up manually here. 614 */ 615 if (!adev && !pci_dev_is_added(root)) 616 adev = acpi_pci_find_companion(&root->dev); 617 } 618 619 if (!adev) 620 return false; 621 622 fwnode = acpi_fwnode_handle(adev); 623 if (fwnode_property_read_u8(fwnode, "HotPlugSupportInD3", &val)) 624 return false; 625 626 return val == 1; 627 } 628 629 static bool acpi_pci_power_manageable(struct pci_dev *dev) 630 { 631 struct acpi_device *adev = ACPI_COMPANION(&dev->dev); 632 return adev ? acpi_device_power_manageable(adev) : false; 633 } 634 635 static int acpi_pci_set_power_state(struct pci_dev *dev, pci_power_t state) 636 { 637 struct acpi_device *adev = ACPI_COMPANION(&dev->dev); 638 static const u8 state_conv[] = { 639 [PCI_D0] = ACPI_STATE_D0, 640 [PCI_D1] = ACPI_STATE_D1, 641 [PCI_D2] = ACPI_STATE_D2, 642 [PCI_D3hot] = ACPI_STATE_D3_HOT, 643 [PCI_D3cold] = ACPI_STATE_D3_COLD, 644 }; 645 int error = -EINVAL; 646 647 /* If the ACPI device has _EJ0, ignore the device */ 648 if (!adev || acpi_has_method(adev->handle, "_EJ0")) 649 return -ENODEV; 650 651 switch (state) { 652 case PCI_D3cold: 653 if (dev_pm_qos_flags(&dev->dev, PM_QOS_FLAG_NO_POWER_OFF) == 654 PM_QOS_FLAGS_ALL) { 655 error = -EBUSY; 656 break; 657 } 658 /* Fall through */ 659 case PCI_D0: 660 case PCI_D1: 661 case PCI_D2: 662 case PCI_D3hot: 663 error = acpi_device_set_power(adev, state_conv[state]); 664 } 665 666 if (!error) 667 pci_dbg(dev, "power state changed by ACPI to %s\n", 668 acpi_power_state_string(state_conv[state])); 669 670 return error; 671 } 672 673 static pci_power_t acpi_pci_get_power_state(struct pci_dev *dev) 674 { 675 struct acpi_device *adev = ACPI_COMPANION(&dev->dev); 676 static const pci_power_t state_conv[] = { 677 [ACPI_STATE_D0] = PCI_D0, 678 [ACPI_STATE_D1] = PCI_D1, 679 [ACPI_STATE_D2] = PCI_D2, 680 [ACPI_STATE_D3_HOT] = PCI_D3hot, 681 [ACPI_STATE_D3_COLD] = PCI_D3cold, 682 }; 683 int state; 684 685 if (!adev || !acpi_device_power_manageable(adev)) 686 return PCI_UNKNOWN; 687 688 state = adev->power.state; 689 if (state == ACPI_STATE_UNKNOWN) 690 return PCI_UNKNOWN; 691 692 return state_conv[state]; 693 } 694 695 static void acpi_pci_refresh_power_state(struct pci_dev *dev) 696 { 697 struct acpi_device *adev = ACPI_COMPANION(&dev->dev); 698 699 if (adev && acpi_device_power_manageable(adev)) 700 acpi_device_update_power(adev, NULL); 701 } 702 703 static int acpi_pci_propagate_wakeup(struct pci_bus *bus, bool enable) 704 { 705 while (bus->parent) { 706 if (acpi_pm_device_can_wakeup(&bus->self->dev)) 707 return acpi_pm_set_bridge_wakeup(&bus->self->dev, enable); 708 709 bus = bus->parent; 710 } 711 712 /* We have reached the root bus. */ 713 if (bus->bridge) { 714 if (acpi_pm_device_can_wakeup(bus->bridge)) 715 return acpi_pm_set_bridge_wakeup(bus->bridge, enable); 716 } 717 return 0; 718 } 719 720 static int acpi_pci_wakeup(struct pci_dev *dev, bool enable) 721 { 722 if (acpi_pm_device_can_wakeup(&dev->dev)) 723 return acpi_pm_set_device_wakeup(&dev->dev, enable); 724 725 return acpi_pci_propagate_wakeup(dev->bus, enable); 726 } 727 728 static bool acpi_pci_need_resume(struct pci_dev *dev) 729 { 730 struct acpi_device *adev = ACPI_COMPANION(&dev->dev); 731 732 /* 733 * In some cases (eg. Samsung 305V4A) leaving a bridge in suspend over 734 * system-wide suspend/resume confuses the platform firmware, so avoid 735 * doing that. According to Section 16.1.6 of ACPI 6.2, endpoint 736 * devices are expected to be in D3 before invoking the S3 entry path 737 * from the firmware, so they should not be affected by this issue. 738 */ 739 if (pci_is_bridge(dev) && acpi_target_system_state() != ACPI_STATE_S0) 740 return true; 741 742 if (!adev || !acpi_device_power_manageable(adev)) 743 return false; 744 745 if (adev->wakeup.flags.valid && 746 device_may_wakeup(&dev->dev) != !!adev->wakeup.prepare_count) 747 return true; 748 749 if (acpi_target_system_state() == ACPI_STATE_S0) 750 return false; 751 752 return !!adev->power.flags.dsw_present; 753 } 754 755 static const struct pci_platform_pm_ops acpi_pci_platform_pm = { 756 .bridge_d3 = acpi_pci_bridge_d3, 757 .is_manageable = acpi_pci_power_manageable, 758 .set_state = acpi_pci_set_power_state, 759 .get_state = acpi_pci_get_power_state, 760 .refresh_state = acpi_pci_refresh_power_state, 761 .choose_state = acpi_pci_choose_state, 762 .set_wakeup = acpi_pci_wakeup, 763 .need_resume = acpi_pci_need_resume, 764 }; 765 766 void acpi_pci_add_bus(struct pci_bus *bus) 767 { 768 union acpi_object *obj; 769 struct pci_host_bridge *bridge; 770 771 if (acpi_pci_disabled || !bus->bridge || !ACPI_HANDLE(bus->bridge)) 772 return; 773 774 acpi_pci_slot_enumerate(bus); 775 acpiphp_enumerate_slots(bus); 776 777 /* 778 * For a host bridge, check its _DSM for function 8 and if 779 * that is available, mark it in pci_host_bridge. 780 */ 781 if (!pci_is_root_bus(bus)) 782 return; 783 784 obj = acpi_evaluate_dsm(ACPI_HANDLE(bus->bridge), &pci_acpi_dsm_guid, 3, 785 RESET_DELAY_DSM, NULL); 786 if (!obj) 787 return; 788 789 if (obj->type == ACPI_TYPE_INTEGER && obj->integer.value == 1) { 790 bridge = pci_find_host_bridge(bus); 791 bridge->ignore_reset_delay = 1; 792 } 793 ACPI_FREE(obj); 794 } 795 796 void acpi_pci_remove_bus(struct pci_bus *bus) 797 { 798 if (acpi_pci_disabled || !bus->bridge) 799 return; 800 801 acpiphp_remove_slots(bus); 802 acpi_pci_slot_remove(bus); 803 } 804 805 /* ACPI bus type */ 806 static struct acpi_device *acpi_pci_find_companion(struct device *dev) 807 { 808 struct pci_dev *pci_dev = to_pci_dev(dev); 809 bool check_children; 810 u64 addr; 811 812 check_children = pci_is_bridge(pci_dev); 813 /* Please ref to ACPI spec for the syntax of _ADR */ 814 addr = (PCI_SLOT(pci_dev->devfn) << 16) | PCI_FUNC(pci_dev->devfn); 815 return acpi_find_child_device(ACPI_COMPANION(dev->parent), addr, 816 check_children); 817 } 818 819 /** 820 * pci_acpi_optimize_delay - optimize PCI D3 and D3cold delay from ACPI 821 * @pdev: the PCI device whose delay is to be updated 822 * @handle: ACPI handle of this device 823 * 824 * Update the d3_delay and d3cold_delay of a PCI device from the ACPI _DSM 825 * control method of either the device itself or the PCI host bridge. 826 * 827 * Function 8, "Reset Delay," applies to the entire hierarchy below a PCI 828 * host bridge. If it returns one, the OS may assume that all devices in 829 * the hierarchy have already completed power-on reset delays. 830 * 831 * Function 9, "Device Readiness Durations," applies only to the object 832 * where it is located. It returns delay durations required after various 833 * events if the device requires less time than the spec requires. Delays 834 * from this function take precedence over the Reset Delay function. 835 * 836 * These _DSM functions are defined by the draft ECN of January 28, 2014, 837 * titled "ACPI additions for FW latency optimizations." 838 */ 839 static void pci_acpi_optimize_delay(struct pci_dev *pdev, 840 acpi_handle handle) 841 { 842 struct pci_host_bridge *bridge = pci_find_host_bridge(pdev->bus); 843 int value; 844 union acpi_object *obj, *elements; 845 846 if (bridge->ignore_reset_delay) 847 pdev->d3cold_delay = 0; 848 849 obj = acpi_evaluate_dsm(handle, &pci_acpi_dsm_guid, 3, 850 FUNCTION_DELAY_DSM, NULL); 851 if (!obj) 852 return; 853 854 if (obj->type == ACPI_TYPE_PACKAGE && obj->package.count == 5) { 855 elements = obj->package.elements; 856 if (elements[0].type == ACPI_TYPE_INTEGER) { 857 value = (int)elements[0].integer.value / 1000; 858 if (value < PCI_PM_D3COLD_WAIT) 859 pdev->d3cold_delay = value; 860 } 861 if (elements[3].type == ACPI_TYPE_INTEGER) { 862 value = (int)elements[3].integer.value / 1000; 863 if (value < PCI_PM_D3_WAIT) 864 pdev->d3_delay = value; 865 } 866 } 867 ACPI_FREE(obj); 868 } 869 870 static void pci_acpi_set_untrusted(struct pci_dev *dev) 871 { 872 u8 val; 873 874 if (pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT) 875 return; 876 if (device_property_read_u8(&dev->dev, "ExternalFacingPort", &val)) 877 return; 878 879 /* 880 * These root ports expose PCIe (including DMA) outside of the 881 * system so make sure we treat them and everything behind as 882 * untrusted. 883 */ 884 if (val) 885 dev->untrusted = 1; 886 } 887 888 static void pci_acpi_setup(struct device *dev) 889 { 890 struct pci_dev *pci_dev = to_pci_dev(dev); 891 struct acpi_device *adev = ACPI_COMPANION(dev); 892 893 if (!adev) 894 return; 895 896 pci_acpi_optimize_delay(pci_dev, adev->handle); 897 pci_acpi_set_untrusted(pci_dev); 898 899 pci_acpi_add_pm_notifier(adev, pci_dev); 900 if (!adev->wakeup.flags.valid) 901 return; 902 903 device_set_wakeup_capable(dev, true); 904 /* 905 * For bridges that can do D3 we enable wake automatically (as 906 * we do for the power management itself in that case). The 907 * reason is that the bridge may have additional methods such as 908 * _DSW that need to be called. 909 */ 910 if (pci_dev->bridge_d3) 911 device_wakeup_enable(dev); 912 913 acpi_pci_wakeup(pci_dev, false); 914 acpi_device_power_add_dependent(adev, dev); 915 } 916 917 static void pci_acpi_cleanup(struct device *dev) 918 { 919 struct acpi_device *adev = ACPI_COMPANION(dev); 920 struct pci_dev *pci_dev = to_pci_dev(dev); 921 922 if (!adev) 923 return; 924 925 pci_acpi_remove_pm_notifier(adev); 926 if (adev->wakeup.flags.valid) { 927 acpi_device_power_remove_dependent(adev, dev); 928 if (pci_dev->bridge_d3) 929 device_wakeup_disable(dev); 930 931 device_set_wakeup_capable(dev, false); 932 } 933 } 934 935 static bool pci_acpi_bus_match(struct device *dev) 936 { 937 return dev_is_pci(dev); 938 } 939 940 static struct acpi_bus_type acpi_pci_bus = { 941 .name = "PCI", 942 .match = pci_acpi_bus_match, 943 .find_companion = acpi_pci_find_companion, 944 .setup = pci_acpi_setup, 945 .cleanup = pci_acpi_cleanup, 946 }; 947 948 949 static struct fwnode_handle *(*pci_msi_get_fwnode_cb)(struct device *dev); 950 951 /** 952 * pci_msi_register_fwnode_provider - Register callback to retrieve fwnode 953 * @fn: Callback matching a device to a fwnode that identifies a PCI 954 * MSI domain. 955 * 956 * This should be called by irqchip driver, which is the parent of 957 * the MSI domain to provide callback interface to query fwnode. 958 */ 959 void 960 pci_msi_register_fwnode_provider(struct fwnode_handle *(*fn)(struct device *)) 961 { 962 pci_msi_get_fwnode_cb = fn; 963 } 964 965 /** 966 * pci_host_bridge_acpi_msi_domain - Retrieve MSI domain of a PCI host bridge 967 * @bus: The PCI host bridge bus. 968 * 969 * This function uses the callback function registered by 970 * pci_msi_register_fwnode_provider() to retrieve the irq_domain with 971 * type DOMAIN_BUS_PCI_MSI of the specified host bridge bus. 972 * This returns NULL on error or when the domain is not found. 973 */ 974 struct irq_domain *pci_host_bridge_acpi_msi_domain(struct pci_bus *bus) 975 { 976 struct fwnode_handle *fwnode; 977 978 if (!pci_msi_get_fwnode_cb) 979 return NULL; 980 981 fwnode = pci_msi_get_fwnode_cb(&bus->dev); 982 if (!fwnode) 983 return NULL; 984 985 return irq_find_matching_fwnode(fwnode, DOMAIN_BUS_PCI_MSI); 986 } 987 988 static int __init acpi_pci_init(void) 989 { 990 int ret; 991 992 if (acpi_gbl_FADT.boot_flags & ACPI_FADT_NO_MSI) { 993 pr_info("ACPI FADT declares the system doesn't support MSI, so disable it\n"); 994 pci_no_msi(); 995 } 996 997 if (acpi_gbl_FADT.boot_flags & ACPI_FADT_NO_ASPM) { 998 pr_info("ACPI FADT declares the system doesn't support PCIe ASPM, so disable it\n"); 999 pcie_no_aspm(); 1000 } 1001 1002 ret = register_acpi_bus_type(&acpi_pci_bus); 1003 if (ret) 1004 return 0; 1005 1006 pci_set_platform_pm(&acpi_pci_platform_pm); 1007 acpi_pci_slot_init(); 1008 acpiphp_init(); 1009 1010 return 0; 1011 } 1012 arch_initcall(acpi_pci_init); 1013