1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * PCI Bus Services, see include/linux/pci.h for further explanation. 4 * 5 * Copyright 1993 -- 1997 Drew Eckhardt, Frederic Potter, 6 * David Mosberger-Tang 7 * 8 * Copyright 1997 -- 2000 Martin Mares <mj@ucw.cz> 9 */ 10 11 #include <linux/acpi.h> 12 #include <linux/kernel.h> 13 #include <linux/delay.h> 14 #include <linux/dmi.h> 15 #include <linux/init.h> 16 #include <linux/msi.h> 17 #include <linux/of.h> 18 #include <linux/pci.h> 19 #include <linux/pm.h> 20 #include <linux/slab.h> 21 #include <linux/module.h> 22 #include <linux/spinlock.h> 23 #include <linux/string.h> 24 #include <linux/log2.h> 25 #include <linux/logic_pio.h> 26 #include <linux/pm_wakeup.h> 27 #include <linux/interrupt.h> 28 #include <linux/device.h> 29 #include <linux/pm_runtime.h> 30 #include <linux/pci_hotplug.h> 31 #include <linux/vmalloc.h> 32 #include <asm/dma.h> 33 #include <linux/aer.h> 34 #include <linux/bitfield.h> 35 #include "pci.h" 36 37 DEFINE_MUTEX(pci_slot_mutex); 38 39 const char *pci_power_names[] = { 40 "error", "D0", "D1", "D2", "D3hot", "D3cold", "unknown", 41 }; 42 EXPORT_SYMBOL_GPL(pci_power_names); 43 44 #ifdef CONFIG_X86_32 45 int isa_dma_bridge_buggy; 46 EXPORT_SYMBOL(isa_dma_bridge_buggy); 47 #endif 48 49 int pci_pci_problems; 50 EXPORT_SYMBOL(pci_pci_problems); 51 52 unsigned int pci_pm_d3hot_delay; 53 54 static void pci_pme_list_scan(struct work_struct *work); 55 56 static LIST_HEAD(pci_pme_list); 57 static DEFINE_MUTEX(pci_pme_list_mutex); 58 static DECLARE_DELAYED_WORK(pci_pme_work, pci_pme_list_scan); 59 60 struct pci_pme_device { 61 struct list_head list; 62 struct pci_dev *dev; 63 }; 64 65 #define PME_TIMEOUT 1000 /* How long between PME checks */ 66 67 /* 68 * Following exit from Conventional Reset, devices must be ready within 1 sec 69 * (PCIe r6.0 sec 6.6.1). A D3cold to D0 transition implies a Conventional 70 * Reset (PCIe r6.0 sec 5.8). 71 */ 72 #define PCI_RESET_WAIT 1000 /* msec */ 73 74 /* 75 * Devices may extend the 1 sec period through Request Retry Status 76 * completions (PCIe r6.0 sec 2.3.1). The spec does not provide an upper 77 * limit, but 60 sec ought to be enough for any device to become 78 * responsive. 79 */ 80 #define PCIE_RESET_READY_POLL_MS 60000 /* msec */ 81 82 static void pci_dev_d3_sleep(struct pci_dev *dev) 83 { 84 unsigned int delay_ms = max(dev->d3hot_delay, pci_pm_d3hot_delay); 85 unsigned int upper; 86 87 if (delay_ms) { 88 /* Use a 20% upper bound, 1ms minimum */ 89 upper = max(DIV_ROUND_CLOSEST(delay_ms, 5), 1U); 90 usleep_range(delay_ms * USEC_PER_MSEC, 91 (delay_ms + upper) * USEC_PER_MSEC); 92 } 93 } 94 95 bool pci_reset_supported(struct pci_dev *dev) 96 { 97 return dev->reset_methods[0] != 0; 98 } 99 100 #ifdef CONFIG_PCI_DOMAINS 101 int pci_domains_supported = 1; 102 #endif 103 104 #define DEFAULT_CARDBUS_IO_SIZE (256) 105 #define DEFAULT_CARDBUS_MEM_SIZE (64*1024*1024) 106 /* pci=cbmemsize=nnM,cbiosize=nn can override this */ 107 unsigned long pci_cardbus_io_size = DEFAULT_CARDBUS_IO_SIZE; 108 unsigned long pci_cardbus_mem_size = DEFAULT_CARDBUS_MEM_SIZE; 109 110 #define DEFAULT_HOTPLUG_IO_SIZE (256) 111 #define DEFAULT_HOTPLUG_MMIO_SIZE (2*1024*1024) 112 #define DEFAULT_HOTPLUG_MMIO_PREF_SIZE (2*1024*1024) 113 /* hpiosize=nn can override this */ 114 unsigned long pci_hotplug_io_size = DEFAULT_HOTPLUG_IO_SIZE; 115 /* 116 * pci=hpmmiosize=nnM overrides non-prefetchable MMIO size, 117 * pci=hpmmioprefsize=nnM overrides prefetchable MMIO size; 118 * pci=hpmemsize=nnM overrides both 119 */ 120 unsigned long pci_hotplug_mmio_size = DEFAULT_HOTPLUG_MMIO_SIZE; 121 unsigned long pci_hotplug_mmio_pref_size = DEFAULT_HOTPLUG_MMIO_PREF_SIZE; 122 123 #define DEFAULT_HOTPLUG_BUS_SIZE 1 124 unsigned long pci_hotplug_bus_size = DEFAULT_HOTPLUG_BUS_SIZE; 125 126 127 /* PCIe MPS/MRRS strategy; can be overridden by kernel command-line param */ 128 #ifdef CONFIG_PCIE_BUS_TUNE_OFF 129 enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_TUNE_OFF; 130 #elif defined CONFIG_PCIE_BUS_SAFE 131 enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_SAFE; 132 #elif defined CONFIG_PCIE_BUS_PERFORMANCE 133 enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_PERFORMANCE; 134 #elif defined CONFIG_PCIE_BUS_PEER2PEER 135 enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_PEER2PEER; 136 #else 137 enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_DEFAULT; 138 #endif 139 140 /* 141 * The default CLS is used if arch didn't set CLS explicitly and not 142 * all pci devices agree on the same value. Arch can override either 143 * the dfl or actual value as it sees fit. Don't forget this is 144 * measured in 32-bit words, not bytes. 145 */ 146 u8 pci_dfl_cache_line_size = L1_CACHE_BYTES >> 2; 147 u8 pci_cache_line_size; 148 149 /* 150 * If we set up a device for bus mastering, we need to check the latency 151 * timer as certain BIOSes forget to set it properly. 152 */ 153 unsigned int pcibios_max_latency = 255; 154 155 /* If set, the PCIe ARI capability will not be used. */ 156 static bool pcie_ari_disabled; 157 158 /* If set, the PCIe ATS capability will not be used. */ 159 static bool pcie_ats_disabled; 160 161 /* If set, the PCI config space of each device is printed during boot. */ 162 bool pci_early_dump; 163 164 bool pci_ats_disabled(void) 165 { 166 return pcie_ats_disabled; 167 } 168 EXPORT_SYMBOL_GPL(pci_ats_disabled); 169 170 /* Disable bridge_d3 for all PCIe ports */ 171 static bool pci_bridge_d3_disable; 172 /* Force bridge_d3 for all PCIe ports */ 173 static bool pci_bridge_d3_force; 174 175 static int __init pcie_port_pm_setup(char *str) 176 { 177 if (!strcmp(str, "off")) 178 pci_bridge_d3_disable = true; 179 else if (!strcmp(str, "force")) 180 pci_bridge_d3_force = true; 181 return 1; 182 } 183 __setup("pcie_port_pm=", pcie_port_pm_setup); 184 185 /** 186 * pci_bus_max_busnr - returns maximum PCI bus number of given bus' children 187 * @bus: pointer to PCI bus structure to search 188 * 189 * Given a PCI bus, returns the highest PCI bus number present in the set 190 * including the given PCI bus and its list of child PCI buses. 191 */ 192 unsigned char pci_bus_max_busnr(struct pci_bus *bus) 193 { 194 struct pci_bus *tmp; 195 unsigned char max, n; 196 197 max = bus->busn_res.end; 198 list_for_each_entry(tmp, &bus->children, node) { 199 n = pci_bus_max_busnr(tmp); 200 if (n > max) 201 max = n; 202 } 203 return max; 204 } 205 EXPORT_SYMBOL_GPL(pci_bus_max_busnr); 206 207 /** 208 * pci_status_get_and_clear_errors - return and clear error bits in PCI_STATUS 209 * @pdev: the PCI device 210 * 211 * Returns error bits set in PCI_STATUS and clears them. 212 */ 213 int pci_status_get_and_clear_errors(struct pci_dev *pdev) 214 { 215 u16 status; 216 int ret; 217 218 ret = pci_read_config_word(pdev, PCI_STATUS, &status); 219 if (ret != PCIBIOS_SUCCESSFUL) 220 return -EIO; 221 222 status &= PCI_STATUS_ERROR_BITS; 223 if (status) 224 pci_write_config_word(pdev, PCI_STATUS, status); 225 226 return status; 227 } 228 EXPORT_SYMBOL_GPL(pci_status_get_and_clear_errors); 229 230 #ifdef CONFIG_HAS_IOMEM 231 static void __iomem *__pci_ioremap_resource(struct pci_dev *pdev, int bar, 232 bool write_combine) 233 { 234 struct resource *res = &pdev->resource[bar]; 235 resource_size_t start = res->start; 236 resource_size_t size = resource_size(res); 237 238 /* 239 * Make sure the BAR is actually a memory resource, not an IO resource 240 */ 241 if (res->flags & IORESOURCE_UNSET || !(res->flags & IORESOURCE_MEM)) { 242 pci_err(pdev, "can't ioremap BAR %d: %pR\n", bar, res); 243 return NULL; 244 } 245 246 if (write_combine) 247 return ioremap_wc(start, size); 248 249 return ioremap(start, size); 250 } 251 252 void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar) 253 { 254 return __pci_ioremap_resource(pdev, bar, false); 255 } 256 EXPORT_SYMBOL_GPL(pci_ioremap_bar); 257 258 void __iomem *pci_ioremap_wc_bar(struct pci_dev *pdev, int bar) 259 { 260 return __pci_ioremap_resource(pdev, bar, true); 261 } 262 EXPORT_SYMBOL_GPL(pci_ioremap_wc_bar); 263 #endif 264 265 /** 266 * pci_dev_str_match_path - test if a path string matches a device 267 * @dev: the PCI device to test 268 * @path: string to match the device against 269 * @endptr: pointer to the string after the match 270 * 271 * Test if a string (typically from a kernel parameter) formatted as a 272 * path of device/function addresses matches a PCI device. The string must 273 * be of the form: 274 * 275 * [<domain>:]<bus>:<device>.<func>[/<device>.<func>]* 276 * 277 * A path for a device can be obtained using 'lspci -t'. Using a path 278 * is more robust against bus renumbering than using only a single bus, 279 * device and function address. 280 * 281 * Returns 1 if the string matches the device, 0 if it does not and 282 * a negative error code if it fails to parse the string. 283 */ 284 static int pci_dev_str_match_path(struct pci_dev *dev, const char *path, 285 const char **endptr) 286 { 287 int ret; 288 unsigned int seg, bus, slot, func; 289 char *wpath, *p; 290 char end; 291 292 *endptr = strchrnul(path, ';'); 293 294 wpath = kmemdup_nul(path, *endptr - path, GFP_ATOMIC); 295 if (!wpath) 296 return -ENOMEM; 297 298 while (1) { 299 p = strrchr(wpath, '/'); 300 if (!p) 301 break; 302 ret = sscanf(p, "/%x.%x%c", &slot, &func, &end); 303 if (ret != 2) { 304 ret = -EINVAL; 305 goto free_and_exit; 306 } 307 308 if (dev->devfn != PCI_DEVFN(slot, func)) { 309 ret = 0; 310 goto free_and_exit; 311 } 312 313 /* 314 * Note: we don't need to get a reference to the upstream 315 * bridge because we hold a reference to the top level 316 * device which should hold a reference to the bridge, 317 * and so on. 318 */ 319 dev = pci_upstream_bridge(dev); 320 if (!dev) { 321 ret = 0; 322 goto free_and_exit; 323 } 324 325 *p = 0; 326 } 327 328 ret = sscanf(wpath, "%x:%x:%x.%x%c", &seg, &bus, &slot, 329 &func, &end); 330 if (ret != 4) { 331 seg = 0; 332 ret = sscanf(wpath, "%x:%x.%x%c", &bus, &slot, &func, &end); 333 if (ret != 3) { 334 ret = -EINVAL; 335 goto free_and_exit; 336 } 337 } 338 339 ret = (seg == pci_domain_nr(dev->bus) && 340 bus == dev->bus->number && 341 dev->devfn == PCI_DEVFN(slot, func)); 342 343 free_and_exit: 344 kfree(wpath); 345 return ret; 346 } 347 348 /** 349 * pci_dev_str_match - test if a string matches a device 350 * @dev: the PCI device to test 351 * @p: string to match the device against 352 * @endptr: pointer to the string after the match 353 * 354 * Test if a string (typically from a kernel parameter) matches a specified 355 * PCI device. The string may be of one of the following formats: 356 * 357 * [<domain>:]<bus>:<device>.<func>[/<device>.<func>]* 358 * pci:<vendor>:<device>[:<subvendor>:<subdevice>] 359 * 360 * The first format specifies a PCI bus/device/function address which 361 * may change if new hardware is inserted, if motherboard firmware changes, 362 * or due to changes caused in kernel parameters. If the domain is 363 * left unspecified, it is taken to be 0. In order to be robust against 364 * bus renumbering issues, a path of PCI device/function numbers may be used 365 * to address the specific device. The path for a device can be determined 366 * through the use of 'lspci -t'. 367 * 368 * The second format matches devices using IDs in the configuration 369 * space which may match multiple devices in the system. A value of 0 370 * for any field will match all devices. (Note: this differs from 371 * in-kernel code that uses PCI_ANY_ID which is ~0; this is for 372 * legacy reasons and convenience so users don't have to specify 373 * FFFFFFFFs on the command line.) 374 * 375 * Returns 1 if the string matches the device, 0 if it does not and 376 * a negative error code if the string cannot be parsed. 377 */ 378 static int pci_dev_str_match(struct pci_dev *dev, const char *p, 379 const char **endptr) 380 { 381 int ret; 382 int count; 383 unsigned short vendor, device, subsystem_vendor, subsystem_device; 384 385 if (strncmp(p, "pci:", 4) == 0) { 386 /* PCI vendor/device (subvendor/subdevice) IDs are specified */ 387 p += 4; 388 ret = sscanf(p, "%hx:%hx:%hx:%hx%n", &vendor, &device, 389 &subsystem_vendor, &subsystem_device, &count); 390 if (ret != 4) { 391 ret = sscanf(p, "%hx:%hx%n", &vendor, &device, &count); 392 if (ret != 2) 393 return -EINVAL; 394 395 subsystem_vendor = 0; 396 subsystem_device = 0; 397 } 398 399 p += count; 400 401 if ((!vendor || vendor == dev->vendor) && 402 (!device || device == dev->device) && 403 (!subsystem_vendor || 404 subsystem_vendor == dev->subsystem_vendor) && 405 (!subsystem_device || 406 subsystem_device == dev->subsystem_device)) 407 goto found; 408 } else { 409 /* 410 * PCI Bus, Device, Function IDs are specified 411 * (optionally, may include a path of devfns following it) 412 */ 413 ret = pci_dev_str_match_path(dev, p, &p); 414 if (ret < 0) 415 return ret; 416 else if (ret) 417 goto found; 418 } 419 420 *endptr = p; 421 return 0; 422 423 found: 424 *endptr = p; 425 return 1; 426 } 427 428 static u8 __pci_find_next_cap_ttl(struct pci_bus *bus, unsigned int devfn, 429 u8 pos, int cap, int *ttl) 430 { 431 u8 id; 432 u16 ent; 433 434 pci_bus_read_config_byte(bus, devfn, pos, &pos); 435 436 while ((*ttl)--) { 437 if (pos < 0x40) 438 break; 439 pos &= ~3; 440 pci_bus_read_config_word(bus, devfn, pos, &ent); 441 442 id = ent & 0xff; 443 if (id == 0xff) 444 break; 445 if (id == cap) 446 return pos; 447 pos = (ent >> 8); 448 } 449 return 0; 450 } 451 452 static u8 __pci_find_next_cap(struct pci_bus *bus, unsigned int devfn, 453 u8 pos, int cap) 454 { 455 int ttl = PCI_FIND_CAP_TTL; 456 457 return __pci_find_next_cap_ttl(bus, devfn, pos, cap, &ttl); 458 } 459 460 u8 pci_find_next_capability(struct pci_dev *dev, u8 pos, int cap) 461 { 462 return __pci_find_next_cap(dev->bus, dev->devfn, 463 pos + PCI_CAP_LIST_NEXT, cap); 464 } 465 EXPORT_SYMBOL_GPL(pci_find_next_capability); 466 467 static u8 __pci_bus_find_cap_start(struct pci_bus *bus, 468 unsigned int devfn, u8 hdr_type) 469 { 470 u16 status; 471 472 pci_bus_read_config_word(bus, devfn, PCI_STATUS, &status); 473 if (!(status & PCI_STATUS_CAP_LIST)) 474 return 0; 475 476 switch (hdr_type) { 477 case PCI_HEADER_TYPE_NORMAL: 478 case PCI_HEADER_TYPE_BRIDGE: 479 return PCI_CAPABILITY_LIST; 480 case PCI_HEADER_TYPE_CARDBUS: 481 return PCI_CB_CAPABILITY_LIST; 482 } 483 484 return 0; 485 } 486 487 /** 488 * pci_find_capability - query for devices' capabilities 489 * @dev: PCI device to query 490 * @cap: capability code 491 * 492 * Tell if a device supports a given PCI capability. 493 * Returns the address of the requested capability structure within the 494 * device's PCI configuration space or 0 in case the device does not 495 * support it. Possible values for @cap include: 496 * 497 * %PCI_CAP_ID_PM Power Management 498 * %PCI_CAP_ID_AGP Accelerated Graphics Port 499 * %PCI_CAP_ID_VPD Vital Product Data 500 * %PCI_CAP_ID_SLOTID Slot Identification 501 * %PCI_CAP_ID_MSI Message Signalled Interrupts 502 * %PCI_CAP_ID_CHSWP CompactPCI HotSwap 503 * %PCI_CAP_ID_PCIX PCI-X 504 * %PCI_CAP_ID_EXP PCI Express 505 */ 506 u8 pci_find_capability(struct pci_dev *dev, int cap) 507 { 508 u8 pos; 509 510 pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type); 511 if (pos) 512 pos = __pci_find_next_cap(dev->bus, dev->devfn, pos, cap); 513 514 return pos; 515 } 516 EXPORT_SYMBOL(pci_find_capability); 517 518 /** 519 * pci_bus_find_capability - query for devices' capabilities 520 * @bus: the PCI bus to query 521 * @devfn: PCI device to query 522 * @cap: capability code 523 * 524 * Like pci_find_capability() but works for PCI devices that do not have a 525 * pci_dev structure set up yet. 526 * 527 * Returns the address of the requested capability structure within the 528 * device's PCI configuration space or 0 in case the device does not 529 * support it. 530 */ 531 u8 pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap) 532 { 533 u8 hdr_type, pos; 534 535 pci_bus_read_config_byte(bus, devfn, PCI_HEADER_TYPE, &hdr_type); 536 537 pos = __pci_bus_find_cap_start(bus, devfn, hdr_type & 0x7f); 538 if (pos) 539 pos = __pci_find_next_cap(bus, devfn, pos, cap); 540 541 return pos; 542 } 543 EXPORT_SYMBOL(pci_bus_find_capability); 544 545 /** 546 * pci_find_next_ext_capability - Find an extended capability 547 * @dev: PCI device to query 548 * @start: address at which to start looking (0 to start at beginning of list) 549 * @cap: capability code 550 * 551 * Returns the address of the next matching extended capability structure 552 * within the device's PCI configuration space or 0 if the device does 553 * not support it. Some capabilities can occur several times, e.g., the 554 * vendor-specific capability, and this provides a way to find them all. 555 */ 556 u16 pci_find_next_ext_capability(struct pci_dev *dev, u16 start, int cap) 557 { 558 u32 header; 559 int ttl; 560 u16 pos = PCI_CFG_SPACE_SIZE; 561 562 /* minimum 8 bytes per capability */ 563 ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8; 564 565 if (dev->cfg_size <= PCI_CFG_SPACE_SIZE) 566 return 0; 567 568 if (start) 569 pos = start; 570 571 if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL) 572 return 0; 573 574 /* 575 * If we have no capabilities, this is indicated by cap ID, 576 * cap version and next pointer all being 0. 577 */ 578 if (header == 0) 579 return 0; 580 581 while (ttl-- > 0) { 582 if (PCI_EXT_CAP_ID(header) == cap && pos != start) 583 return pos; 584 585 pos = PCI_EXT_CAP_NEXT(header); 586 if (pos < PCI_CFG_SPACE_SIZE) 587 break; 588 589 if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL) 590 break; 591 } 592 593 return 0; 594 } 595 EXPORT_SYMBOL_GPL(pci_find_next_ext_capability); 596 597 /** 598 * pci_find_ext_capability - Find an extended capability 599 * @dev: PCI device to query 600 * @cap: capability code 601 * 602 * Returns the address of the requested extended capability structure 603 * within the device's PCI configuration space or 0 if the device does 604 * not support it. Possible values for @cap include: 605 * 606 * %PCI_EXT_CAP_ID_ERR Advanced Error Reporting 607 * %PCI_EXT_CAP_ID_VC Virtual Channel 608 * %PCI_EXT_CAP_ID_DSN Device Serial Number 609 * %PCI_EXT_CAP_ID_PWR Power Budgeting 610 */ 611 u16 pci_find_ext_capability(struct pci_dev *dev, int cap) 612 { 613 return pci_find_next_ext_capability(dev, 0, cap); 614 } 615 EXPORT_SYMBOL_GPL(pci_find_ext_capability); 616 617 /** 618 * pci_get_dsn - Read and return the 8-byte Device Serial Number 619 * @dev: PCI device to query 620 * 621 * Looks up the PCI_EXT_CAP_ID_DSN and reads the 8 bytes of the Device Serial 622 * Number. 623 * 624 * Returns the DSN, or zero if the capability does not exist. 625 */ 626 u64 pci_get_dsn(struct pci_dev *dev) 627 { 628 u32 dword; 629 u64 dsn; 630 int pos; 631 632 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_DSN); 633 if (!pos) 634 return 0; 635 636 /* 637 * The Device Serial Number is two dwords offset 4 bytes from the 638 * capability position. The specification says that the first dword is 639 * the lower half, and the second dword is the upper half. 640 */ 641 pos += 4; 642 pci_read_config_dword(dev, pos, &dword); 643 dsn = (u64)dword; 644 pci_read_config_dword(dev, pos + 4, &dword); 645 dsn |= ((u64)dword) << 32; 646 647 return dsn; 648 } 649 EXPORT_SYMBOL_GPL(pci_get_dsn); 650 651 static u8 __pci_find_next_ht_cap(struct pci_dev *dev, u8 pos, int ht_cap) 652 { 653 int rc, ttl = PCI_FIND_CAP_TTL; 654 u8 cap, mask; 655 656 if (ht_cap == HT_CAPTYPE_SLAVE || ht_cap == HT_CAPTYPE_HOST) 657 mask = HT_3BIT_CAP_MASK; 658 else 659 mask = HT_5BIT_CAP_MASK; 660 661 pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn, pos, 662 PCI_CAP_ID_HT, &ttl); 663 while (pos) { 664 rc = pci_read_config_byte(dev, pos + 3, &cap); 665 if (rc != PCIBIOS_SUCCESSFUL) 666 return 0; 667 668 if ((cap & mask) == ht_cap) 669 return pos; 670 671 pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn, 672 pos + PCI_CAP_LIST_NEXT, 673 PCI_CAP_ID_HT, &ttl); 674 } 675 676 return 0; 677 } 678 679 /** 680 * pci_find_next_ht_capability - query a device's HyperTransport capabilities 681 * @dev: PCI device to query 682 * @pos: Position from which to continue searching 683 * @ht_cap: HyperTransport capability code 684 * 685 * To be used in conjunction with pci_find_ht_capability() to search for 686 * all capabilities matching @ht_cap. @pos should always be a value returned 687 * from pci_find_ht_capability(). 688 * 689 * NB. To be 100% safe against broken PCI devices, the caller should take 690 * steps to avoid an infinite loop. 691 */ 692 u8 pci_find_next_ht_capability(struct pci_dev *dev, u8 pos, int ht_cap) 693 { 694 return __pci_find_next_ht_cap(dev, pos + PCI_CAP_LIST_NEXT, ht_cap); 695 } 696 EXPORT_SYMBOL_GPL(pci_find_next_ht_capability); 697 698 /** 699 * pci_find_ht_capability - query a device's HyperTransport capabilities 700 * @dev: PCI device to query 701 * @ht_cap: HyperTransport capability code 702 * 703 * Tell if a device supports a given HyperTransport capability. 704 * Returns an address within the device's PCI configuration space 705 * or 0 in case the device does not support the request capability. 706 * The address points to the PCI capability, of type PCI_CAP_ID_HT, 707 * which has a HyperTransport capability matching @ht_cap. 708 */ 709 u8 pci_find_ht_capability(struct pci_dev *dev, int ht_cap) 710 { 711 u8 pos; 712 713 pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type); 714 if (pos) 715 pos = __pci_find_next_ht_cap(dev, pos, ht_cap); 716 717 return pos; 718 } 719 EXPORT_SYMBOL_GPL(pci_find_ht_capability); 720 721 /** 722 * pci_find_vsec_capability - Find a vendor-specific extended capability 723 * @dev: PCI device to query 724 * @vendor: Vendor ID for which capability is defined 725 * @cap: Vendor-specific capability ID 726 * 727 * If @dev has Vendor ID @vendor, search for a VSEC capability with 728 * VSEC ID @cap. If found, return the capability offset in 729 * config space; otherwise return 0. 730 */ 731 u16 pci_find_vsec_capability(struct pci_dev *dev, u16 vendor, int cap) 732 { 733 u16 vsec = 0; 734 u32 header; 735 736 if (vendor != dev->vendor) 737 return 0; 738 739 while ((vsec = pci_find_next_ext_capability(dev, vsec, 740 PCI_EXT_CAP_ID_VNDR))) { 741 if (pci_read_config_dword(dev, vsec + PCI_VNDR_HEADER, 742 &header) == PCIBIOS_SUCCESSFUL && 743 PCI_VNDR_HEADER_ID(header) == cap) 744 return vsec; 745 } 746 747 return 0; 748 } 749 EXPORT_SYMBOL_GPL(pci_find_vsec_capability); 750 751 /** 752 * pci_find_dvsec_capability - Find DVSEC for vendor 753 * @dev: PCI device to query 754 * @vendor: Vendor ID to match for the DVSEC 755 * @dvsec: Designated Vendor-specific capability ID 756 * 757 * If DVSEC has Vendor ID @vendor and DVSEC ID @dvsec return the capability 758 * offset in config space; otherwise return 0. 759 */ 760 u16 pci_find_dvsec_capability(struct pci_dev *dev, u16 vendor, u16 dvsec) 761 { 762 int pos; 763 764 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_DVSEC); 765 if (!pos) 766 return 0; 767 768 while (pos) { 769 u16 v, id; 770 771 pci_read_config_word(dev, pos + PCI_DVSEC_HEADER1, &v); 772 pci_read_config_word(dev, pos + PCI_DVSEC_HEADER2, &id); 773 if (vendor == v && dvsec == id) 774 return pos; 775 776 pos = pci_find_next_ext_capability(dev, pos, PCI_EXT_CAP_ID_DVSEC); 777 } 778 779 return 0; 780 } 781 EXPORT_SYMBOL_GPL(pci_find_dvsec_capability); 782 783 /** 784 * pci_find_parent_resource - return resource region of parent bus of given 785 * region 786 * @dev: PCI device structure contains resources to be searched 787 * @res: child resource record for which parent is sought 788 * 789 * For given resource region of given device, return the resource region of 790 * parent bus the given region is contained in. 791 */ 792 struct resource *pci_find_parent_resource(const struct pci_dev *dev, 793 struct resource *res) 794 { 795 const struct pci_bus *bus = dev->bus; 796 struct resource *r; 797 798 pci_bus_for_each_resource(bus, r) { 799 if (!r) 800 continue; 801 if (resource_contains(r, res)) { 802 803 /* 804 * If the window is prefetchable but the BAR is 805 * not, the allocator made a mistake. 806 */ 807 if (r->flags & IORESOURCE_PREFETCH && 808 !(res->flags & IORESOURCE_PREFETCH)) 809 return NULL; 810 811 /* 812 * If we're below a transparent bridge, there may 813 * be both a positively-decoded aperture and a 814 * subtractively-decoded region that contain the BAR. 815 * We want the positively-decoded one, so this depends 816 * on pci_bus_for_each_resource() giving us those 817 * first. 818 */ 819 return r; 820 } 821 } 822 return NULL; 823 } 824 EXPORT_SYMBOL(pci_find_parent_resource); 825 826 /** 827 * pci_find_resource - Return matching PCI device resource 828 * @dev: PCI device to query 829 * @res: Resource to look for 830 * 831 * Goes over standard PCI resources (BARs) and checks if the given resource 832 * is partially or fully contained in any of them. In that case the 833 * matching resource is returned, %NULL otherwise. 834 */ 835 struct resource *pci_find_resource(struct pci_dev *dev, struct resource *res) 836 { 837 int i; 838 839 for (i = 0; i < PCI_STD_NUM_BARS; i++) { 840 struct resource *r = &dev->resource[i]; 841 842 if (r->start && resource_contains(r, res)) 843 return r; 844 } 845 846 return NULL; 847 } 848 EXPORT_SYMBOL(pci_find_resource); 849 850 /** 851 * pci_wait_for_pending - wait for @mask bit(s) to clear in status word @pos 852 * @dev: the PCI device to operate on 853 * @pos: config space offset of status word 854 * @mask: mask of bit(s) to care about in status word 855 * 856 * Return 1 when mask bit(s) in status word clear, 0 otherwise. 857 */ 858 int pci_wait_for_pending(struct pci_dev *dev, int pos, u16 mask) 859 { 860 int i; 861 862 /* Wait for Transaction Pending bit clean */ 863 for (i = 0; i < 4; i++) { 864 u16 status; 865 if (i) 866 msleep((1 << (i - 1)) * 100); 867 868 pci_read_config_word(dev, pos, &status); 869 if (!(status & mask)) 870 return 1; 871 } 872 873 return 0; 874 } 875 876 static int pci_acs_enable; 877 878 /** 879 * pci_request_acs - ask for ACS to be enabled if supported 880 */ 881 void pci_request_acs(void) 882 { 883 pci_acs_enable = 1; 884 } 885 886 static const char *disable_acs_redir_param; 887 888 /** 889 * pci_disable_acs_redir - disable ACS redirect capabilities 890 * @dev: the PCI device 891 * 892 * For only devices specified in the disable_acs_redir parameter. 893 */ 894 static void pci_disable_acs_redir(struct pci_dev *dev) 895 { 896 int ret = 0; 897 const char *p; 898 int pos; 899 u16 ctrl; 900 901 if (!disable_acs_redir_param) 902 return; 903 904 p = disable_acs_redir_param; 905 while (*p) { 906 ret = pci_dev_str_match(dev, p, &p); 907 if (ret < 0) { 908 pr_info_once("PCI: Can't parse disable_acs_redir parameter: %s\n", 909 disable_acs_redir_param); 910 911 break; 912 } else if (ret == 1) { 913 /* Found a match */ 914 break; 915 } 916 917 if (*p != ';' && *p != ',') { 918 /* End of param or invalid format */ 919 break; 920 } 921 p++; 922 } 923 924 if (ret != 1) 925 return; 926 927 if (!pci_dev_specific_disable_acs_redir(dev)) 928 return; 929 930 pos = dev->acs_cap; 931 if (!pos) { 932 pci_warn(dev, "cannot disable ACS redirect for this hardware as it does not have ACS capabilities\n"); 933 return; 934 } 935 936 pci_read_config_word(dev, pos + PCI_ACS_CTRL, &ctrl); 937 938 /* P2P Request & Completion Redirect */ 939 ctrl &= ~(PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_EC); 940 941 pci_write_config_word(dev, pos + PCI_ACS_CTRL, ctrl); 942 943 pci_info(dev, "disabled ACS redirect\n"); 944 } 945 946 /** 947 * pci_std_enable_acs - enable ACS on devices using standard ACS capabilities 948 * @dev: the PCI device 949 */ 950 static void pci_std_enable_acs(struct pci_dev *dev) 951 { 952 int pos; 953 u16 cap; 954 u16 ctrl; 955 956 pos = dev->acs_cap; 957 if (!pos) 958 return; 959 960 pci_read_config_word(dev, pos + PCI_ACS_CAP, &cap); 961 pci_read_config_word(dev, pos + PCI_ACS_CTRL, &ctrl); 962 963 /* Source Validation */ 964 ctrl |= (cap & PCI_ACS_SV); 965 966 /* P2P Request Redirect */ 967 ctrl |= (cap & PCI_ACS_RR); 968 969 /* P2P Completion Redirect */ 970 ctrl |= (cap & PCI_ACS_CR); 971 972 /* Upstream Forwarding */ 973 ctrl |= (cap & PCI_ACS_UF); 974 975 /* Enable Translation Blocking for external devices and noats */ 976 if (pci_ats_disabled() || dev->external_facing || dev->untrusted) 977 ctrl |= (cap & PCI_ACS_TB); 978 979 pci_write_config_word(dev, pos + PCI_ACS_CTRL, ctrl); 980 } 981 982 /** 983 * pci_enable_acs - enable ACS if hardware support it 984 * @dev: the PCI device 985 */ 986 static void pci_enable_acs(struct pci_dev *dev) 987 { 988 if (!pci_acs_enable) 989 goto disable_acs_redir; 990 991 if (!pci_dev_specific_enable_acs(dev)) 992 goto disable_acs_redir; 993 994 pci_std_enable_acs(dev); 995 996 disable_acs_redir: 997 /* 998 * Note: pci_disable_acs_redir() must be called even if ACS was not 999 * enabled by the kernel because it may have been enabled by 1000 * platform firmware. So if we are told to disable it, we should 1001 * always disable it after setting the kernel's default 1002 * preferences. 1003 */ 1004 pci_disable_acs_redir(dev); 1005 } 1006 1007 /** 1008 * pci_restore_bars - restore a device's BAR values (e.g. after wake-up) 1009 * @dev: PCI device to have its BARs restored 1010 * 1011 * Restore the BAR values for a given device, so as to make it 1012 * accessible by its driver. 1013 */ 1014 static void pci_restore_bars(struct pci_dev *dev) 1015 { 1016 int i; 1017 1018 for (i = 0; i < PCI_BRIDGE_RESOURCES; i++) 1019 pci_update_resource(dev, i); 1020 } 1021 1022 static inline bool platform_pci_power_manageable(struct pci_dev *dev) 1023 { 1024 if (pci_use_mid_pm()) 1025 return true; 1026 1027 return acpi_pci_power_manageable(dev); 1028 } 1029 1030 static inline int platform_pci_set_power_state(struct pci_dev *dev, 1031 pci_power_t t) 1032 { 1033 if (pci_use_mid_pm()) 1034 return mid_pci_set_power_state(dev, t); 1035 1036 return acpi_pci_set_power_state(dev, t); 1037 } 1038 1039 static inline pci_power_t platform_pci_get_power_state(struct pci_dev *dev) 1040 { 1041 if (pci_use_mid_pm()) 1042 return mid_pci_get_power_state(dev); 1043 1044 return acpi_pci_get_power_state(dev); 1045 } 1046 1047 static inline void platform_pci_refresh_power_state(struct pci_dev *dev) 1048 { 1049 if (!pci_use_mid_pm()) 1050 acpi_pci_refresh_power_state(dev); 1051 } 1052 1053 static inline pci_power_t platform_pci_choose_state(struct pci_dev *dev) 1054 { 1055 if (pci_use_mid_pm()) 1056 return PCI_POWER_ERROR; 1057 1058 return acpi_pci_choose_state(dev); 1059 } 1060 1061 static inline int platform_pci_set_wakeup(struct pci_dev *dev, bool enable) 1062 { 1063 if (pci_use_mid_pm()) 1064 return PCI_POWER_ERROR; 1065 1066 return acpi_pci_wakeup(dev, enable); 1067 } 1068 1069 static inline bool platform_pci_need_resume(struct pci_dev *dev) 1070 { 1071 if (pci_use_mid_pm()) 1072 return false; 1073 1074 return acpi_pci_need_resume(dev); 1075 } 1076 1077 static inline bool platform_pci_bridge_d3(struct pci_dev *dev) 1078 { 1079 if (pci_use_mid_pm()) 1080 return false; 1081 1082 return acpi_pci_bridge_d3(dev); 1083 } 1084 1085 /** 1086 * pci_update_current_state - Read power state of given device and cache it 1087 * @dev: PCI device to handle. 1088 * @state: State to cache in case the device doesn't have the PM capability 1089 * 1090 * The power state is read from the PMCSR register, which however is 1091 * inaccessible in D3cold. The platform firmware is therefore queried first 1092 * to detect accessibility of the register. In case the platform firmware 1093 * reports an incorrect state or the device isn't power manageable by the 1094 * platform at all, we try to detect D3cold by testing accessibility of the 1095 * vendor ID in config space. 1096 */ 1097 void pci_update_current_state(struct pci_dev *dev, pci_power_t state) 1098 { 1099 if (platform_pci_get_power_state(dev) == PCI_D3cold) { 1100 dev->current_state = PCI_D3cold; 1101 } else if (dev->pm_cap) { 1102 u16 pmcsr; 1103 1104 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr); 1105 if (PCI_POSSIBLE_ERROR(pmcsr)) { 1106 dev->current_state = PCI_D3cold; 1107 return; 1108 } 1109 dev->current_state = pmcsr & PCI_PM_CTRL_STATE_MASK; 1110 } else { 1111 dev->current_state = state; 1112 } 1113 } 1114 1115 /** 1116 * pci_refresh_power_state - Refresh the given device's power state data 1117 * @dev: Target PCI device. 1118 * 1119 * Ask the platform to refresh the devices power state information and invoke 1120 * pci_update_current_state() to update its current PCI power state. 1121 */ 1122 void pci_refresh_power_state(struct pci_dev *dev) 1123 { 1124 platform_pci_refresh_power_state(dev); 1125 pci_update_current_state(dev, dev->current_state); 1126 } 1127 1128 /** 1129 * pci_platform_power_transition - Use platform to change device power state 1130 * @dev: PCI device to handle. 1131 * @state: State to put the device into. 1132 */ 1133 int pci_platform_power_transition(struct pci_dev *dev, pci_power_t state) 1134 { 1135 int error; 1136 1137 error = platform_pci_set_power_state(dev, state); 1138 if (!error) 1139 pci_update_current_state(dev, state); 1140 else if (!dev->pm_cap) /* Fall back to PCI_D0 */ 1141 dev->current_state = PCI_D0; 1142 1143 return error; 1144 } 1145 EXPORT_SYMBOL_GPL(pci_platform_power_transition); 1146 1147 static int pci_resume_one(struct pci_dev *pci_dev, void *ign) 1148 { 1149 pm_request_resume(&pci_dev->dev); 1150 return 0; 1151 } 1152 1153 /** 1154 * pci_resume_bus - Walk given bus and runtime resume devices on it 1155 * @bus: Top bus of the subtree to walk. 1156 */ 1157 void pci_resume_bus(struct pci_bus *bus) 1158 { 1159 if (bus) 1160 pci_walk_bus(bus, pci_resume_one, NULL); 1161 } 1162 1163 static int pci_dev_wait(struct pci_dev *dev, char *reset_type, int timeout) 1164 { 1165 int delay = 1; 1166 bool retrain = false; 1167 struct pci_dev *bridge; 1168 1169 if (pci_is_pcie(dev)) { 1170 bridge = pci_upstream_bridge(dev); 1171 if (bridge) 1172 retrain = true; 1173 } 1174 1175 /* 1176 * After reset, the device should not silently discard config 1177 * requests, but it may still indicate that it needs more time by 1178 * responding to them with CRS completions. The Root Port will 1179 * generally synthesize ~0 (PCI_ERROR_RESPONSE) data to complete 1180 * the read (except when CRS SV is enabled and the read was for the 1181 * Vendor ID; in that case it synthesizes 0x0001 data). 1182 * 1183 * Wait for the device to return a non-CRS completion. Read the 1184 * Command register instead of Vendor ID so we don't have to 1185 * contend with the CRS SV value. 1186 */ 1187 for (;;) { 1188 u32 id; 1189 1190 pci_read_config_dword(dev, PCI_COMMAND, &id); 1191 if (!PCI_POSSIBLE_ERROR(id)) 1192 break; 1193 1194 if (delay > timeout) { 1195 pci_warn(dev, "not ready %dms after %s; giving up\n", 1196 delay - 1, reset_type); 1197 return -ENOTTY; 1198 } 1199 1200 if (delay > PCI_RESET_WAIT) { 1201 if (retrain) { 1202 retrain = false; 1203 if (pcie_failed_link_retrain(bridge)) { 1204 delay = 1; 1205 continue; 1206 } 1207 } 1208 pci_info(dev, "not ready %dms after %s; waiting\n", 1209 delay - 1, reset_type); 1210 } 1211 1212 msleep(delay); 1213 delay *= 2; 1214 } 1215 1216 if (delay > PCI_RESET_WAIT) 1217 pci_info(dev, "ready %dms after %s\n", delay - 1, 1218 reset_type); 1219 1220 return 0; 1221 } 1222 1223 /** 1224 * pci_power_up - Put the given device into D0 1225 * @dev: PCI device to power up 1226 * 1227 * On success, return 0 or 1, depending on whether or not it is necessary to 1228 * restore the device's BARs subsequently (1 is returned in that case). 1229 */ 1230 int pci_power_up(struct pci_dev *dev) 1231 { 1232 bool need_restore; 1233 pci_power_t state; 1234 u16 pmcsr; 1235 1236 platform_pci_set_power_state(dev, PCI_D0); 1237 1238 if (!dev->pm_cap) { 1239 state = platform_pci_get_power_state(dev); 1240 if (state == PCI_UNKNOWN) 1241 dev->current_state = PCI_D0; 1242 else 1243 dev->current_state = state; 1244 1245 if (state == PCI_D0) 1246 return 0; 1247 1248 return -EIO; 1249 } 1250 1251 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr); 1252 if (PCI_POSSIBLE_ERROR(pmcsr)) { 1253 pci_err(dev, "Unable to change power state from %s to D0, device inaccessible\n", 1254 pci_power_name(dev->current_state)); 1255 dev->current_state = PCI_D3cold; 1256 return -EIO; 1257 } 1258 1259 state = pmcsr & PCI_PM_CTRL_STATE_MASK; 1260 1261 need_restore = (state == PCI_D3hot || dev->current_state >= PCI_D3hot) && 1262 !(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET); 1263 1264 if (state == PCI_D0) 1265 goto end; 1266 1267 /* 1268 * Force the entire word to 0. This doesn't affect PME_Status, disables 1269 * PME_En, and sets PowerState to 0. 1270 */ 1271 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, 0); 1272 1273 /* Mandatory transition delays; see PCI PM 1.2. */ 1274 if (state == PCI_D3hot) 1275 pci_dev_d3_sleep(dev); 1276 else if (state == PCI_D2) 1277 udelay(PCI_PM_D2_DELAY); 1278 1279 end: 1280 dev->current_state = PCI_D0; 1281 if (need_restore) 1282 return 1; 1283 1284 return 0; 1285 } 1286 1287 /** 1288 * pci_set_full_power_state - Put a PCI device into D0 and update its state 1289 * @dev: PCI device to power up 1290 * 1291 * Call pci_power_up() to put @dev into D0, read from its PCI_PM_CTRL register 1292 * to confirm the state change, restore its BARs if they might be lost and 1293 * reconfigure ASPM in acordance with the new power state. 1294 * 1295 * If pci_restore_state() is going to be called right after a power state change 1296 * to D0, it is more efficient to use pci_power_up() directly instead of this 1297 * function. 1298 */ 1299 static int pci_set_full_power_state(struct pci_dev *dev) 1300 { 1301 u16 pmcsr; 1302 int ret; 1303 1304 ret = pci_power_up(dev); 1305 if (ret < 0) 1306 return ret; 1307 1308 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr); 1309 dev->current_state = pmcsr & PCI_PM_CTRL_STATE_MASK; 1310 if (dev->current_state != PCI_D0) { 1311 pci_info_ratelimited(dev, "Refused to change power state from %s to D0\n", 1312 pci_power_name(dev->current_state)); 1313 } else if (ret > 0) { 1314 /* 1315 * According to section 5.4.1 of the "PCI BUS POWER MANAGEMENT 1316 * INTERFACE SPECIFICATION, REV. 1.2", a device transitioning 1317 * from D3hot to D0 _may_ perform an internal reset, thereby 1318 * going to "D0 Uninitialized" rather than "D0 Initialized". 1319 * For example, at least some versions of the 3c905B and the 1320 * 3c556B exhibit this behaviour. 1321 * 1322 * At least some laptop BIOSen (e.g. the Thinkpad T21) leave 1323 * devices in a D3hot state at boot. Consequently, we need to 1324 * restore at least the BARs so that the device will be 1325 * accessible to its driver. 1326 */ 1327 pci_restore_bars(dev); 1328 } 1329 1330 return 0; 1331 } 1332 1333 /** 1334 * __pci_dev_set_current_state - Set current state of a PCI device 1335 * @dev: Device to handle 1336 * @data: pointer to state to be set 1337 */ 1338 static int __pci_dev_set_current_state(struct pci_dev *dev, void *data) 1339 { 1340 pci_power_t state = *(pci_power_t *)data; 1341 1342 dev->current_state = state; 1343 return 0; 1344 } 1345 1346 /** 1347 * pci_bus_set_current_state - Walk given bus and set current state of devices 1348 * @bus: Top bus of the subtree to walk. 1349 * @state: state to be set 1350 */ 1351 void pci_bus_set_current_state(struct pci_bus *bus, pci_power_t state) 1352 { 1353 if (bus) 1354 pci_walk_bus(bus, __pci_dev_set_current_state, &state); 1355 } 1356 1357 /** 1358 * pci_set_low_power_state - Put a PCI device into a low-power state. 1359 * @dev: PCI device to handle. 1360 * @state: PCI power state (D1, D2, D3hot) to put the device into. 1361 * 1362 * Use the device's PCI_PM_CTRL register to put it into a low-power state. 1363 * 1364 * RETURN VALUE: 1365 * -EINVAL if the requested state is invalid. 1366 * -EIO if device does not support PCI PM or its PM capabilities register has a 1367 * wrong version, or device doesn't support the requested state. 1368 * 0 if device already is in the requested state. 1369 * 0 if device's power state has been successfully changed. 1370 */ 1371 static int pci_set_low_power_state(struct pci_dev *dev, pci_power_t state) 1372 { 1373 u16 pmcsr; 1374 1375 if (!dev->pm_cap) 1376 return -EIO; 1377 1378 /* 1379 * Validate transition: We can enter D0 from any state, but if 1380 * we're already in a low-power state, we can only go deeper. E.g., 1381 * we can go from D1 to D3, but we can't go directly from D3 to D1; 1382 * we'd have to go from D3 to D0, then to D1. 1383 */ 1384 if (dev->current_state <= PCI_D3cold && dev->current_state > state) { 1385 pci_dbg(dev, "Invalid power transition (from %s to %s)\n", 1386 pci_power_name(dev->current_state), 1387 pci_power_name(state)); 1388 return -EINVAL; 1389 } 1390 1391 /* Check if this device supports the desired state */ 1392 if ((state == PCI_D1 && !dev->d1_support) 1393 || (state == PCI_D2 && !dev->d2_support)) 1394 return -EIO; 1395 1396 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr); 1397 if (PCI_POSSIBLE_ERROR(pmcsr)) { 1398 pci_err(dev, "Unable to change power state from %s to %s, device inaccessible\n", 1399 pci_power_name(dev->current_state), 1400 pci_power_name(state)); 1401 dev->current_state = PCI_D3cold; 1402 return -EIO; 1403 } 1404 1405 pmcsr &= ~PCI_PM_CTRL_STATE_MASK; 1406 pmcsr |= state; 1407 1408 /* Enter specified state */ 1409 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr); 1410 1411 /* Mandatory power management transition delays; see PCI PM 1.2. */ 1412 if (state == PCI_D3hot) 1413 pci_dev_d3_sleep(dev); 1414 else if (state == PCI_D2) 1415 udelay(PCI_PM_D2_DELAY); 1416 1417 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr); 1418 dev->current_state = pmcsr & PCI_PM_CTRL_STATE_MASK; 1419 if (dev->current_state != state) 1420 pci_info_ratelimited(dev, "Refused to change power state from %s to %s\n", 1421 pci_power_name(dev->current_state), 1422 pci_power_name(state)); 1423 1424 return 0; 1425 } 1426 1427 /** 1428 * pci_set_power_state - Set the power state of a PCI device 1429 * @dev: PCI device to handle. 1430 * @state: PCI power state (D0, D1, D2, D3hot) to put the device into. 1431 * 1432 * Transition a device to a new power state, using the platform firmware and/or 1433 * the device's PCI PM registers. 1434 * 1435 * RETURN VALUE: 1436 * -EINVAL if the requested state is invalid. 1437 * -EIO if device does not support PCI PM or its PM capabilities register has a 1438 * wrong version, or device doesn't support the requested state. 1439 * 0 if the transition is to D1 or D2 but D1 and D2 are not supported. 1440 * 0 if device already is in the requested state. 1441 * 0 if the transition is to D3 but D3 is not supported. 1442 * 0 if device's power state has been successfully changed. 1443 */ 1444 int pci_set_power_state(struct pci_dev *dev, pci_power_t state) 1445 { 1446 int error; 1447 1448 /* Bound the state we're entering */ 1449 if (state > PCI_D3cold) 1450 state = PCI_D3cold; 1451 else if (state < PCI_D0) 1452 state = PCI_D0; 1453 else if ((state == PCI_D1 || state == PCI_D2) && pci_no_d1d2(dev)) 1454 1455 /* 1456 * If the device or the parent bridge do not support PCI 1457 * PM, ignore the request if we're doing anything other 1458 * than putting it into D0 (which would only happen on 1459 * boot). 1460 */ 1461 return 0; 1462 1463 /* Check if we're already there */ 1464 if (dev->current_state == state) 1465 return 0; 1466 1467 if (state == PCI_D0) 1468 return pci_set_full_power_state(dev); 1469 1470 /* 1471 * This device is quirked not to be put into D3, so don't put it in 1472 * D3 1473 */ 1474 if (state >= PCI_D3hot && (dev->dev_flags & PCI_DEV_FLAGS_NO_D3)) 1475 return 0; 1476 1477 if (state == PCI_D3cold) { 1478 /* 1479 * To put the device in D3cold, put it into D3hot in the native 1480 * way, then put it into D3cold using platform ops. 1481 */ 1482 error = pci_set_low_power_state(dev, PCI_D3hot); 1483 1484 if (pci_platform_power_transition(dev, PCI_D3cold)) 1485 return error; 1486 1487 /* Powering off a bridge may power off the whole hierarchy */ 1488 if (dev->current_state == PCI_D3cold) 1489 pci_bus_set_current_state(dev->subordinate, PCI_D3cold); 1490 } else { 1491 error = pci_set_low_power_state(dev, state); 1492 1493 if (pci_platform_power_transition(dev, state)) 1494 return error; 1495 } 1496 1497 return 0; 1498 } 1499 EXPORT_SYMBOL(pci_set_power_state); 1500 1501 #define PCI_EXP_SAVE_REGS 7 1502 1503 static struct pci_cap_saved_state *_pci_find_saved_cap(struct pci_dev *pci_dev, 1504 u16 cap, bool extended) 1505 { 1506 struct pci_cap_saved_state *tmp; 1507 1508 hlist_for_each_entry(tmp, &pci_dev->saved_cap_space, next) { 1509 if (tmp->cap.cap_extended == extended && tmp->cap.cap_nr == cap) 1510 return tmp; 1511 } 1512 return NULL; 1513 } 1514 1515 struct pci_cap_saved_state *pci_find_saved_cap(struct pci_dev *dev, char cap) 1516 { 1517 return _pci_find_saved_cap(dev, cap, false); 1518 } 1519 1520 struct pci_cap_saved_state *pci_find_saved_ext_cap(struct pci_dev *dev, u16 cap) 1521 { 1522 return _pci_find_saved_cap(dev, cap, true); 1523 } 1524 1525 static int pci_save_pcie_state(struct pci_dev *dev) 1526 { 1527 int i = 0; 1528 struct pci_cap_saved_state *save_state; 1529 u16 *cap; 1530 1531 if (!pci_is_pcie(dev)) 1532 return 0; 1533 1534 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP); 1535 if (!save_state) { 1536 pci_err(dev, "buffer not found in %s\n", __func__); 1537 return -ENOMEM; 1538 } 1539 1540 cap = (u16 *)&save_state->cap.data[0]; 1541 pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &cap[i++]); 1542 pcie_capability_read_word(dev, PCI_EXP_LNKCTL, &cap[i++]); 1543 pcie_capability_read_word(dev, PCI_EXP_SLTCTL, &cap[i++]); 1544 pcie_capability_read_word(dev, PCI_EXP_RTCTL, &cap[i++]); 1545 pcie_capability_read_word(dev, PCI_EXP_DEVCTL2, &cap[i++]); 1546 pcie_capability_read_word(dev, PCI_EXP_LNKCTL2, &cap[i++]); 1547 pcie_capability_read_word(dev, PCI_EXP_SLTCTL2, &cap[i++]); 1548 1549 return 0; 1550 } 1551 1552 void pci_bridge_reconfigure_ltr(struct pci_dev *dev) 1553 { 1554 #ifdef CONFIG_PCIEASPM 1555 struct pci_dev *bridge; 1556 u32 ctl; 1557 1558 bridge = pci_upstream_bridge(dev); 1559 if (bridge && bridge->ltr_path) { 1560 pcie_capability_read_dword(bridge, PCI_EXP_DEVCTL2, &ctl); 1561 if (!(ctl & PCI_EXP_DEVCTL2_LTR_EN)) { 1562 pci_dbg(bridge, "re-enabling LTR\n"); 1563 pcie_capability_set_word(bridge, PCI_EXP_DEVCTL2, 1564 PCI_EXP_DEVCTL2_LTR_EN); 1565 } 1566 } 1567 #endif 1568 } 1569 1570 static void pci_restore_pcie_state(struct pci_dev *dev) 1571 { 1572 int i = 0; 1573 struct pci_cap_saved_state *save_state; 1574 u16 *cap; 1575 1576 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP); 1577 if (!save_state) 1578 return; 1579 1580 /* 1581 * Downstream ports reset the LTR enable bit when link goes down. 1582 * Check and re-configure the bit here before restoring device. 1583 * PCIe r5.0, sec 7.5.3.16. 1584 */ 1585 pci_bridge_reconfigure_ltr(dev); 1586 1587 cap = (u16 *)&save_state->cap.data[0]; 1588 pcie_capability_write_word(dev, PCI_EXP_DEVCTL, cap[i++]); 1589 pcie_capability_write_word(dev, PCI_EXP_LNKCTL, cap[i++]); 1590 pcie_capability_write_word(dev, PCI_EXP_SLTCTL, cap[i++]); 1591 pcie_capability_write_word(dev, PCI_EXP_RTCTL, cap[i++]); 1592 pcie_capability_write_word(dev, PCI_EXP_DEVCTL2, cap[i++]); 1593 pcie_capability_write_word(dev, PCI_EXP_LNKCTL2, cap[i++]); 1594 pcie_capability_write_word(dev, PCI_EXP_SLTCTL2, cap[i++]); 1595 } 1596 1597 static int pci_save_pcix_state(struct pci_dev *dev) 1598 { 1599 int pos; 1600 struct pci_cap_saved_state *save_state; 1601 1602 pos = pci_find_capability(dev, PCI_CAP_ID_PCIX); 1603 if (!pos) 1604 return 0; 1605 1606 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX); 1607 if (!save_state) { 1608 pci_err(dev, "buffer not found in %s\n", __func__); 1609 return -ENOMEM; 1610 } 1611 1612 pci_read_config_word(dev, pos + PCI_X_CMD, 1613 (u16 *)save_state->cap.data); 1614 1615 return 0; 1616 } 1617 1618 static void pci_restore_pcix_state(struct pci_dev *dev) 1619 { 1620 int i = 0, pos; 1621 struct pci_cap_saved_state *save_state; 1622 u16 *cap; 1623 1624 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX); 1625 pos = pci_find_capability(dev, PCI_CAP_ID_PCIX); 1626 if (!save_state || !pos) 1627 return; 1628 cap = (u16 *)&save_state->cap.data[0]; 1629 1630 pci_write_config_word(dev, pos + PCI_X_CMD, cap[i++]); 1631 } 1632 1633 static void pci_save_ltr_state(struct pci_dev *dev) 1634 { 1635 int ltr; 1636 struct pci_cap_saved_state *save_state; 1637 u32 *cap; 1638 1639 if (!pci_is_pcie(dev)) 1640 return; 1641 1642 ltr = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_LTR); 1643 if (!ltr) 1644 return; 1645 1646 save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_LTR); 1647 if (!save_state) { 1648 pci_err(dev, "no suspend buffer for LTR; ASPM issues possible after resume\n"); 1649 return; 1650 } 1651 1652 /* Some broken devices only support dword access to LTR */ 1653 cap = &save_state->cap.data[0]; 1654 pci_read_config_dword(dev, ltr + PCI_LTR_MAX_SNOOP_LAT, cap); 1655 } 1656 1657 static void pci_restore_ltr_state(struct pci_dev *dev) 1658 { 1659 struct pci_cap_saved_state *save_state; 1660 int ltr; 1661 u32 *cap; 1662 1663 save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_LTR); 1664 ltr = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_LTR); 1665 if (!save_state || !ltr) 1666 return; 1667 1668 /* Some broken devices only support dword access to LTR */ 1669 cap = &save_state->cap.data[0]; 1670 pci_write_config_dword(dev, ltr + PCI_LTR_MAX_SNOOP_LAT, *cap); 1671 } 1672 1673 /** 1674 * pci_save_state - save the PCI configuration space of a device before 1675 * suspending 1676 * @dev: PCI device that we're dealing with 1677 */ 1678 int pci_save_state(struct pci_dev *dev) 1679 { 1680 int i; 1681 /* XXX: 100% dword access ok here? */ 1682 for (i = 0; i < 16; i++) { 1683 pci_read_config_dword(dev, i * 4, &dev->saved_config_space[i]); 1684 pci_dbg(dev, "saving config space at offset %#x (reading %#x)\n", 1685 i * 4, dev->saved_config_space[i]); 1686 } 1687 dev->state_saved = true; 1688 1689 i = pci_save_pcie_state(dev); 1690 if (i != 0) 1691 return i; 1692 1693 i = pci_save_pcix_state(dev); 1694 if (i != 0) 1695 return i; 1696 1697 pci_save_ltr_state(dev); 1698 pci_save_dpc_state(dev); 1699 pci_save_aer_state(dev); 1700 pci_save_ptm_state(dev); 1701 return pci_save_vc_state(dev); 1702 } 1703 EXPORT_SYMBOL(pci_save_state); 1704 1705 static void pci_restore_config_dword(struct pci_dev *pdev, int offset, 1706 u32 saved_val, int retry, bool force) 1707 { 1708 u32 val; 1709 1710 pci_read_config_dword(pdev, offset, &val); 1711 if (!force && val == saved_val) 1712 return; 1713 1714 for (;;) { 1715 pci_dbg(pdev, "restoring config space at offset %#x (was %#x, writing %#x)\n", 1716 offset, val, saved_val); 1717 pci_write_config_dword(pdev, offset, saved_val); 1718 if (retry-- <= 0) 1719 return; 1720 1721 pci_read_config_dword(pdev, offset, &val); 1722 if (val == saved_val) 1723 return; 1724 1725 mdelay(1); 1726 } 1727 } 1728 1729 static void pci_restore_config_space_range(struct pci_dev *pdev, 1730 int start, int end, int retry, 1731 bool force) 1732 { 1733 int index; 1734 1735 for (index = end; index >= start; index--) 1736 pci_restore_config_dword(pdev, 4 * index, 1737 pdev->saved_config_space[index], 1738 retry, force); 1739 } 1740 1741 static void pci_restore_config_space(struct pci_dev *pdev) 1742 { 1743 if (pdev->hdr_type == PCI_HEADER_TYPE_NORMAL) { 1744 pci_restore_config_space_range(pdev, 10, 15, 0, false); 1745 /* Restore BARs before the command register. */ 1746 pci_restore_config_space_range(pdev, 4, 9, 10, false); 1747 pci_restore_config_space_range(pdev, 0, 3, 0, false); 1748 } else if (pdev->hdr_type == PCI_HEADER_TYPE_BRIDGE) { 1749 pci_restore_config_space_range(pdev, 12, 15, 0, false); 1750 1751 /* 1752 * Force rewriting of prefetch registers to avoid S3 resume 1753 * issues on Intel PCI bridges that occur when these 1754 * registers are not explicitly written. 1755 */ 1756 pci_restore_config_space_range(pdev, 9, 11, 0, true); 1757 pci_restore_config_space_range(pdev, 0, 8, 0, false); 1758 } else { 1759 pci_restore_config_space_range(pdev, 0, 15, 0, false); 1760 } 1761 } 1762 1763 static void pci_restore_rebar_state(struct pci_dev *pdev) 1764 { 1765 unsigned int pos, nbars, i; 1766 u32 ctrl; 1767 1768 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_REBAR); 1769 if (!pos) 1770 return; 1771 1772 pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl); 1773 nbars = (ctrl & PCI_REBAR_CTRL_NBAR_MASK) >> 1774 PCI_REBAR_CTRL_NBAR_SHIFT; 1775 1776 for (i = 0; i < nbars; i++, pos += 8) { 1777 struct resource *res; 1778 int bar_idx, size; 1779 1780 pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl); 1781 bar_idx = ctrl & PCI_REBAR_CTRL_BAR_IDX; 1782 res = pdev->resource + bar_idx; 1783 size = pci_rebar_bytes_to_size(resource_size(res)); 1784 ctrl &= ~PCI_REBAR_CTRL_BAR_SIZE; 1785 ctrl |= size << PCI_REBAR_CTRL_BAR_SHIFT; 1786 pci_write_config_dword(pdev, pos + PCI_REBAR_CTRL, ctrl); 1787 } 1788 } 1789 1790 /** 1791 * pci_restore_state - Restore the saved state of a PCI device 1792 * @dev: PCI device that we're dealing with 1793 */ 1794 void pci_restore_state(struct pci_dev *dev) 1795 { 1796 if (!dev->state_saved) 1797 return; 1798 1799 /* 1800 * Restore max latencies (in the LTR capability) before enabling 1801 * LTR itself (in the PCIe capability). 1802 */ 1803 pci_restore_ltr_state(dev); 1804 1805 pci_restore_pcie_state(dev); 1806 pci_restore_pasid_state(dev); 1807 pci_restore_pri_state(dev); 1808 pci_restore_ats_state(dev); 1809 pci_restore_vc_state(dev); 1810 pci_restore_rebar_state(dev); 1811 pci_restore_dpc_state(dev); 1812 pci_restore_ptm_state(dev); 1813 1814 pci_aer_clear_status(dev); 1815 pci_restore_aer_state(dev); 1816 1817 pci_restore_config_space(dev); 1818 1819 pci_restore_pcix_state(dev); 1820 pci_restore_msi_state(dev); 1821 1822 /* Restore ACS and IOV configuration state */ 1823 pci_enable_acs(dev); 1824 pci_restore_iov_state(dev); 1825 1826 dev->state_saved = false; 1827 } 1828 EXPORT_SYMBOL(pci_restore_state); 1829 1830 struct pci_saved_state { 1831 u32 config_space[16]; 1832 struct pci_cap_saved_data cap[]; 1833 }; 1834 1835 /** 1836 * pci_store_saved_state - Allocate and return an opaque struct containing 1837 * the device saved state. 1838 * @dev: PCI device that we're dealing with 1839 * 1840 * Return NULL if no state or error. 1841 */ 1842 struct pci_saved_state *pci_store_saved_state(struct pci_dev *dev) 1843 { 1844 struct pci_saved_state *state; 1845 struct pci_cap_saved_state *tmp; 1846 struct pci_cap_saved_data *cap; 1847 size_t size; 1848 1849 if (!dev->state_saved) 1850 return NULL; 1851 1852 size = sizeof(*state) + sizeof(struct pci_cap_saved_data); 1853 1854 hlist_for_each_entry(tmp, &dev->saved_cap_space, next) 1855 size += sizeof(struct pci_cap_saved_data) + tmp->cap.size; 1856 1857 state = kzalloc(size, GFP_KERNEL); 1858 if (!state) 1859 return NULL; 1860 1861 memcpy(state->config_space, dev->saved_config_space, 1862 sizeof(state->config_space)); 1863 1864 cap = state->cap; 1865 hlist_for_each_entry(tmp, &dev->saved_cap_space, next) { 1866 size_t len = sizeof(struct pci_cap_saved_data) + tmp->cap.size; 1867 memcpy(cap, &tmp->cap, len); 1868 cap = (struct pci_cap_saved_data *)((u8 *)cap + len); 1869 } 1870 /* Empty cap_save terminates list */ 1871 1872 return state; 1873 } 1874 EXPORT_SYMBOL_GPL(pci_store_saved_state); 1875 1876 /** 1877 * pci_load_saved_state - Reload the provided save state into struct pci_dev. 1878 * @dev: PCI device that we're dealing with 1879 * @state: Saved state returned from pci_store_saved_state() 1880 */ 1881 int pci_load_saved_state(struct pci_dev *dev, 1882 struct pci_saved_state *state) 1883 { 1884 struct pci_cap_saved_data *cap; 1885 1886 dev->state_saved = false; 1887 1888 if (!state) 1889 return 0; 1890 1891 memcpy(dev->saved_config_space, state->config_space, 1892 sizeof(state->config_space)); 1893 1894 cap = state->cap; 1895 while (cap->size) { 1896 struct pci_cap_saved_state *tmp; 1897 1898 tmp = _pci_find_saved_cap(dev, cap->cap_nr, cap->cap_extended); 1899 if (!tmp || tmp->cap.size != cap->size) 1900 return -EINVAL; 1901 1902 memcpy(tmp->cap.data, cap->data, tmp->cap.size); 1903 cap = (struct pci_cap_saved_data *)((u8 *)cap + 1904 sizeof(struct pci_cap_saved_data) + cap->size); 1905 } 1906 1907 dev->state_saved = true; 1908 return 0; 1909 } 1910 EXPORT_SYMBOL_GPL(pci_load_saved_state); 1911 1912 /** 1913 * pci_load_and_free_saved_state - Reload the save state pointed to by state, 1914 * and free the memory allocated for it. 1915 * @dev: PCI device that we're dealing with 1916 * @state: Pointer to saved state returned from pci_store_saved_state() 1917 */ 1918 int pci_load_and_free_saved_state(struct pci_dev *dev, 1919 struct pci_saved_state **state) 1920 { 1921 int ret = pci_load_saved_state(dev, *state); 1922 kfree(*state); 1923 *state = NULL; 1924 return ret; 1925 } 1926 EXPORT_SYMBOL_GPL(pci_load_and_free_saved_state); 1927 1928 int __weak pcibios_enable_device(struct pci_dev *dev, int bars) 1929 { 1930 return pci_enable_resources(dev, bars); 1931 } 1932 1933 static int do_pci_enable_device(struct pci_dev *dev, int bars) 1934 { 1935 int err; 1936 struct pci_dev *bridge; 1937 u16 cmd; 1938 u8 pin; 1939 1940 err = pci_set_power_state(dev, PCI_D0); 1941 if (err < 0 && err != -EIO) 1942 return err; 1943 1944 bridge = pci_upstream_bridge(dev); 1945 if (bridge) 1946 pcie_aspm_powersave_config_link(bridge); 1947 1948 err = pcibios_enable_device(dev, bars); 1949 if (err < 0) 1950 return err; 1951 pci_fixup_device(pci_fixup_enable, dev); 1952 1953 if (dev->msi_enabled || dev->msix_enabled) 1954 return 0; 1955 1956 pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin); 1957 if (pin) { 1958 pci_read_config_word(dev, PCI_COMMAND, &cmd); 1959 if (cmd & PCI_COMMAND_INTX_DISABLE) 1960 pci_write_config_word(dev, PCI_COMMAND, 1961 cmd & ~PCI_COMMAND_INTX_DISABLE); 1962 } 1963 1964 return 0; 1965 } 1966 1967 /** 1968 * pci_reenable_device - Resume abandoned device 1969 * @dev: PCI device to be resumed 1970 * 1971 * NOTE: This function is a backend of pci_default_resume() and is not supposed 1972 * to be called by normal code, write proper resume handler and use it instead. 1973 */ 1974 int pci_reenable_device(struct pci_dev *dev) 1975 { 1976 if (pci_is_enabled(dev)) 1977 return do_pci_enable_device(dev, (1 << PCI_NUM_RESOURCES) - 1); 1978 return 0; 1979 } 1980 EXPORT_SYMBOL(pci_reenable_device); 1981 1982 static void pci_enable_bridge(struct pci_dev *dev) 1983 { 1984 struct pci_dev *bridge; 1985 int retval; 1986 1987 bridge = pci_upstream_bridge(dev); 1988 if (bridge) 1989 pci_enable_bridge(bridge); 1990 1991 if (pci_is_enabled(dev)) { 1992 if (!dev->is_busmaster) 1993 pci_set_master(dev); 1994 return; 1995 } 1996 1997 retval = pci_enable_device(dev); 1998 if (retval) 1999 pci_err(dev, "Error enabling bridge (%d), continuing\n", 2000 retval); 2001 pci_set_master(dev); 2002 } 2003 2004 static int pci_enable_device_flags(struct pci_dev *dev, unsigned long flags) 2005 { 2006 struct pci_dev *bridge; 2007 int err; 2008 int i, bars = 0; 2009 2010 /* 2011 * Power state could be unknown at this point, either due to a fresh 2012 * boot or a device removal call. So get the current power state 2013 * so that things like MSI message writing will behave as expected 2014 * (e.g. if the device really is in D0 at enable time). 2015 */ 2016 pci_update_current_state(dev, dev->current_state); 2017 2018 if (atomic_inc_return(&dev->enable_cnt) > 1) 2019 return 0; /* already enabled */ 2020 2021 bridge = pci_upstream_bridge(dev); 2022 if (bridge) 2023 pci_enable_bridge(bridge); 2024 2025 /* only skip sriov related */ 2026 for (i = 0; i <= PCI_ROM_RESOURCE; i++) 2027 if (dev->resource[i].flags & flags) 2028 bars |= (1 << i); 2029 for (i = PCI_BRIDGE_RESOURCES; i < DEVICE_COUNT_RESOURCE; i++) 2030 if (dev->resource[i].flags & flags) 2031 bars |= (1 << i); 2032 2033 err = do_pci_enable_device(dev, bars); 2034 if (err < 0) 2035 atomic_dec(&dev->enable_cnt); 2036 return err; 2037 } 2038 2039 /** 2040 * pci_enable_device_io - Initialize a device for use with IO space 2041 * @dev: PCI device to be initialized 2042 * 2043 * Initialize device before it's used by a driver. Ask low-level code 2044 * to enable I/O resources. Wake up the device if it was suspended. 2045 * Beware, this function can fail. 2046 */ 2047 int pci_enable_device_io(struct pci_dev *dev) 2048 { 2049 return pci_enable_device_flags(dev, IORESOURCE_IO); 2050 } 2051 EXPORT_SYMBOL(pci_enable_device_io); 2052 2053 /** 2054 * pci_enable_device_mem - Initialize a device for use with Memory space 2055 * @dev: PCI device to be initialized 2056 * 2057 * Initialize device before it's used by a driver. Ask low-level code 2058 * to enable Memory resources. Wake up the device if it was suspended. 2059 * Beware, this function can fail. 2060 */ 2061 int pci_enable_device_mem(struct pci_dev *dev) 2062 { 2063 return pci_enable_device_flags(dev, IORESOURCE_MEM); 2064 } 2065 EXPORT_SYMBOL(pci_enable_device_mem); 2066 2067 /** 2068 * pci_enable_device - Initialize device before it's used by a driver. 2069 * @dev: PCI device to be initialized 2070 * 2071 * Initialize device before it's used by a driver. Ask low-level code 2072 * to enable I/O and memory. Wake up the device if it was suspended. 2073 * Beware, this function can fail. 2074 * 2075 * Note we don't actually enable the device many times if we call 2076 * this function repeatedly (we just increment the count). 2077 */ 2078 int pci_enable_device(struct pci_dev *dev) 2079 { 2080 return pci_enable_device_flags(dev, IORESOURCE_MEM | IORESOURCE_IO); 2081 } 2082 EXPORT_SYMBOL(pci_enable_device); 2083 2084 /* 2085 * Managed PCI resources. This manages device on/off, INTx/MSI/MSI-X 2086 * on/off and BAR regions. pci_dev itself records MSI/MSI-X status, so 2087 * there's no need to track it separately. pci_devres is initialized 2088 * when a device is enabled using managed PCI device enable interface. 2089 */ 2090 struct pci_devres { 2091 unsigned int enabled:1; 2092 unsigned int pinned:1; 2093 unsigned int orig_intx:1; 2094 unsigned int restore_intx:1; 2095 unsigned int mwi:1; 2096 u32 region_mask; 2097 }; 2098 2099 static void pcim_release(struct device *gendev, void *res) 2100 { 2101 struct pci_dev *dev = to_pci_dev(gendev); 2102 struct pci_devres *this = res; 2103 int i; 2104 2105 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) 2106 if (this->region_mask & (1 << i)) 2107 pci_release_region(dev, i); 2108 2109 if (this->mwi) 2110 pci_clear_mwi(dev); 2111 2112 if (this->restore_intx) 2113 pci_intx(dev, this->orig_intx); 2114 2115 if (this->enabled && !this->pinned) 2116 pci_disable_device(dev); 2117 } 2118 2119 static struct pci_devres *get_pci_dr(struct pci_dev *pdev) 2120 { 2121 struct pci_devres *dr, *new_dr; 2122 2123 dr = devres_find(&pdev->dev, pcim_release, NULL, NULL); 2124 if (dr) 2125 return dr; 2126 2127 new_dr = devres_alloc(pcim_release, sizeof(*new_dr), GFP_KERNEL); 2128 if (!new_dr) 2129 return NULL; 2130 return devres_get(&pdev->dev, new_dr, NULL, NULL); 2131 } 2132 2133 static struct pci_devres *find_pci_dr(struct pci_dev *pdev) 2134 { 2135 if (pci_is_managed(pdev)) 2136 return devres_find(&pdev->dev, pcim_release, NULL, NULL); 2137 return NULL; 2138 } 2139 2140 /** 2141 * pcim_enable_device - Managed pci_enable_device() 2142 * @pdev: PCI device to be initialized 2143 * 2144 * Managed pci_enable_device(). 2145 */ 2146 int pcim_enable_device(struct pci_dev *pdev) 2147 { 2148 struct pci_devres *dr; 2149 int rc; 2150 2151 dr = get_pci_dr(pdev); 2152 if (unlikely(!dr)) 2153 return -ENOMEM; 2154 if (dr->enabled) 2155 return 0; 2156 2157 rc = pci_enable_device(pdev); 2158 if (!rc) { 2159 pdev->is_managed = 1; 2160 dr->enabled = 1; 2161 } 2162 return rc; 2163 } 2164 EXPORT_SYMBOL(pcim_enable_device); 2165 2166 /** 2167 * pcim_pin_device - Pin managed PCI device 2168 * @pdev: PCI device to pin 2169 * 2170 * Pin managed PCI device @pdev. Pinned device won't be disabled on 2171 * driver detach. @pdev must have been enabled with 2172 * pcim_enable_device(). 2173 */ 2174 void pcim_pin_device(struct pci_dev *pdev) 2175 { 2176 struct pci_devres *dr; 2177 2178 dr = find_pci_dr(pdev); 2179 WARN_ON(!dr || !dr->enabled); 2180 if (dr) 2181 dr->pinned = 1; 2182 } 2183 EXPORT_SYMBOL(pcim_pin_device); 2184 2185 /* 2186 * pcibios_device_add - provide arch specific hooks when adding device dev 2187 * @dev: the PCI device being added 2188 * 2189 * Permits the platform to provide architecture specific functionality when 2190 * devices are added. This is the default implementation. Architecture 2191 * implementations can override this. 2192 */ 2193 int __weak pcibios_device_add(struct pci_dev *dev) 2194 { 2195 return 0; 2196 } 2197 2198 /** 2199 * pcibios_release_device - provide arch specific hooks when releasing 2200 * device dev 2201 * @dev: the PCI device being released 2202 * 2203 * Permits the platform to provide architecture specific functionality when 2204 * devices are released. This is the default implementation. Architecture 2205 * implementations can override this. 2206 */ 2207 void __weak pcibios_release_device(struct pci_dev *dev) {} 2208 2209 /** 2210 * pcibios_disable_device - disable arch specific PCI resources for device dev 2211 * @dev: the PCI device to disable 2212 * 2213 * Disables architecture specific PCI resources for the device. This 2214 * is the default implementation. Architecture implementations can 2215 * override this. 2216 */ 2217 void __weak pcibios_disable_device(struct pci_dev *dev) {} 2218 2219 /** 2220 * pcibios_penalize_isa_irq - penalize an ISA IRQ 2221 * @irq: ISA IRQ to penalize 2222 * @active: IRQ active or not 2223 * 2224 * Permits the platform to provide architecture-specific functionality when 2225 * penalizing ISA IRQs. This is the default implementation. Architecture 2226 * implementations can override this. 2227 */ 2228 void __weak pcibios_penalize_isa_irq(int irq, int active) {} 2229 2230 static void do_pci_disable_device(struct pci_dev *dev) 2231 { 2232 u16 pci_command; 2233 2234 pci_read_config_word(dev, PCI_COMMAND, &pci_command); 2235 if (pci_command & PCI_COMMAND_MASTER) { 2236 pci_command &= ~PCI_COMMAND_MASTER; 2237 pci_write_config_word(dev, PCI_COMMAND, pci_command); 2238 } 2239 2240 pcibios_disable_device(dev); 2241 } 2242 2243 /** 2244 * pci_disable_enabled_device - Disable device without updating enable_cnt 2245 * @dev: PCI device to disable 2246 * 2247 * NOTE: This function is a backend of PCI power management routines and is 2248 * not supposed to be called drivers. 2249 */ 2250 void pci_disable_enabled_device(struct pci_dev *dev) 2251 { 2252 if (pci_is_enabled(dev)) 2253 do_pci_disable_device(dev); 2254 } 2255 2256 /** 2257 * pci_disable_device - Disable PCI device after use 2258 * @dev: PCI device to be disabled 2259 * 2260 * Signal to the system that the PCI device is not in use by the system 2261 * anymore. This only involves disabling PCI bus-mastering, if active. 2262 * 2263 * Note we don't actually disable the device until all callers of 2264 * pci_enable_device() have called pci_disable_device(). 2265 */ 2266 void pci_disable_device(struct pci_dev *dev) 2267 { 2268 struct pci_devres *dr; 2269 2270 dr = find_pci_dr(dev); 2271 if (dr) 2272 dr->enabled = 0; 2273 2274 dev_WARN_ONCE(&dev->dev, atomic_read(&dev->enable_cnt) <= 0, 2275 "disabling already-disabled device"); 2276 2277 if (atomic_dec_return(&dev->enable_cnt) != 0) 2278 return; 2279 2280 do_pci_disable_device(dev); 2281 2282 dev->is_busmaster = 0; 2283 } 2284 EXPORT_SYMBOL(pci_disable_device); 2285 2286 /** 2287 * pcibios_set_pcie_reset_state - set reset state for device dev 2288 * @dev: the PCIe device reset 2289 * @state: Reset state to enter into 2290 * 2291 * Set the PCIe reset state for the device. This is the default 2292 * implementation. Architecture implementations can override this. 2293 */ 2294 int __weak pcibios_set_pcie_reset_state(struct pci_dev *dev, 2295 enum pcie_reset_state state) 2296 { 2297 return -EINVAL; 2298 } 2299 2300 /** 2301 * pci_set_pcie_reset_state - set reset state for device dev 2302 * @dev: the PCIe device reset 2303 * @state: Reset state to enter into 2304 * 2305 * Sets the PCI reset state for the device. 2306 */ 2307 int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state) 2308 { 2309 return pcibios_set_pcie_reset_state(dev, state); 2310 } 2311 EXPORT_SYMBOL_GPL(pci_set_pcie_reset_state); 2312 2313 #ifdef CONFIG_PCIEAER 2314 void pcie_clear_device_status(struct pci_dev *dev) 2315 { 2316 u16 sta; 2317 2318 pcie_capability_read_word(dev, PCI_EXP_DEVSTA, &sta); 2319 pcie_capability_write_word(dev, PCI_EXP_DEVSTA, sta); 2320 } 2321 #endif 2322 2323 /** 2324 * pcie_clear_root_pme_status - Clear root port PME interrupt status. 2325 * @dev: PCIe root port or event collector. 2326 */ 2327 void pcie_clear_root_pme_status(struct pci_dev *dev) 2328 { 2329 pcie_capability_set_dword(dev, PCI_EXP_RTSTA, PCI_EXP_RTSTA_PME); 2330 } 2331 2332 /** 2333 * pci_check_pme_status - Check if given device has generated PME. 2334 * @dev: Device to check. 2335 * 2336 * Check the PME status of the device and if set, clear it and clear PME enable 2337 * (if set). Return 'true' if PME status and PME enable were both set or 2338 * 'false' otherwise. 2339 */ 2340 bool pci_check_pme_status(struct pci_dev *dev) 2341 { 2342 int pmcsr_pos; 2343 u16 pmcsr; 2344 bool ret = false; 2345 2346 if (!dev->pm_cap) 2347 return false; 2348 2349 pmcsr_pos = dev->pm_cap + PCI_PM_CTRL; 2350 pci_read_config_word(dev, pmcsr_pos, &pmcsr); 2351 if (!(pmcsr & PCI_PM_CTRL_PME_STATUS)) 2352 return false; 2353 2354 /* Clear PME status. */ 2355 pmcsr |= PCI_PM_CTRL_PME_STATUS; 2356 if (pmcsr & PCI_PM_CTRL_PME_ENABLE) { 2357 /* Disable PME to avoid interrupt flood. */ 2358 pmcsr &= ~PCI_PM_CTRL_PME_ENABLE; 2359 ret = true; 2360 } 2361 2362 pci_write_config_word(dev, pmcsr_pos, pmcsr); 2363 2364 return ret; 2365 } 2366 2367 /** 2368 * pci_pme_wakeup - Wake up a PCI device if its PME Status bit is set. 2369 * @dev: Device to handle. 2370 * @pme_poll_reset: Whether or not to reset the device's pme_poll flag. 2371 * 2372 * Check if @dev has generated PME and queue a resume request for it in that 2373 * case. 2374 */ 2375 static int pci_pme_wakeup(struct pci_dev *dev, void *pme_poll_reset) 2376 { 2377 if (pme_poll_reset && dev->pme_poll) 2378 dev->pme_poll = false; 2379 2380 if (pci_check_pme_status(dev)) { 2381 pci_wakeup_event(dev); 2382 pm_request_resume(&dev->dev); 2383 } 2384 return 0; 2385 } 2386 2387 /** 2388 * pci_pme_wakeup_bus - Walk given bus and wake up devices on it, if necessary. 2389 * @bus: Top bus of the subtree to walk. 2390 */ 2391 void pci_pme_wakeup_bus(struct pci_bus *bus) 2392 { 2393 if (bus) 2394 pci_walk_bus(bus, pci_pme_wakeup, (void *)true); 2395 } 2396 2397 2398 /** 2399 * pci_pme_capable - check the capability of PCI device to generate PME# 2400 * @dev: PCI device to handle. 2401 * @state: PCI state from which device will issue PME#. 2402 */ 2403 bool pci_pme_capable(struct pci_dev *dev, pci_power_t state) 2404 { 2405 if (!dev->pm_cap) 2406 return false; 2407 2408 return !!(dev->pme_support & (1 << state)); 2409 } 2410 EXPORT_SYMBOL(pci_pme_capable); 2411 2412 static void pci_pme_list_scan(struct work_struct *work) 2413 { 2414 struct pci_pme_device *pme_dev, *n; 2415 2416 mutex_lock(&pci_pme_list_mutex); 2417 list_for_each_entry_safe(pme_dev, n, &pci_pme_list, list) { 2418 if (pme_dev->dev->pme_poll) { 2419 struct pci_dev *bridge; 2420 2421 bridge = pme_dev->dev->bus->self; 2422 /* 2423 * If bridge is in low power state, the 2424 * configuration space of subordinate devices 2425 * may be not accessible 2426 */ 2427 if (bridge && bridge->current_state != PCI_D0) 2428 continue; 2429 /* 2430 * If the device is in D3cold it should not be 2431 * polled either. 2432 */ 2433 if (pme_dev->dev->current_state == PCI_D3cold) 2434 continue; 2435 2436 pci_pme_wakeup(pme_dev->dev, NULL); 2437 } else { 2438 list_del(&pme_dev->list); 2439 kfree(pme_dev); 2440 } 2441 } 2442 if (!list_empty(&pci_pme_list)) 2443 queue_delayed_work(system_freezable_wq, &pci_pme_work, 2444 msecs_to_jiffies(PME_TIMEOUT)); 2445 mutex_unlock(&pci_pme_list_mutex); 2446 } 2447 2448 static void __pci_pme_active(struct pci_dev *dev, bool enable) 2449 { 2450 u16 pmcsr; 2451 2452 if (!dev->pme_support) 2453 return; 2454 2455 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr); 2456 /* Clear PME_Status by writing 1 to it and enable PME# */ 2457 pmcsr |= PCI_PM_CTRL_PME_STATUS | PCI_PM_CTRL_PME_ENABLE; 2458 if (!enable) 2459 pmcsr &= ~PCI_PM_CTRL_PME_ENABLE; 2460 2461 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr); 2462 } 2463 2464 /** 2465 * pci_pme_restore - Restore PME configuration after config space restore. 2466 * @dev: PCI device to update. 2467 */ 2468 void pci_pme_restore(struct pci_dev *dev) 2469 { 2470 u16 pmcsr; 2471 2472 if (!dev->pme_support) 2473 return; 2474 2475 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr); 2476 if (dev->wakeup_prepared) { 2477 pmcsr |= PCI_PM_CTRL_PME_ENABLE; 2478 pmcsr &= ~PCI_PM_CTRL_PME_STATUS; 2479 } else { 2480 pmcsr &= ~PCI_PM_CTRL_PME_ENABLE; 2481 pmcsr |= PCI_PM_CTRL_PME_STATUS; 2482 } 2483 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr); 2484 } 2485 2486 /** 2487 * pci_pme_active - enable or disable PCI device's PME# function 2488 * @dev: PCI device to handle. 2489 * @enable: 'true' to enable PME# generation; 'false' to disable it. 2490 * 2491 * The caller must verify that the device is capable of generating PME# before 2492 * calling this function with @enable equal to 'true'. 2493 */ 2494 void pci_pme_active(struct pci_dev *dev, bool enable) 2495 { 2496 __pci_pme_active(dev, enable); 2497 2498 /* 2499 * PCI (as opposed to PCIe) PME requires that the device have 2500 * its PME# line hooked up correctly. Not all hardware vendors 2501 * do this, so the PME never gets delivered and the device 2502 * remains asleep. The easiest way around this is to 2503 * periodically walk the list of suspended devices and check 2504 * whether any have their PME flag set. The assumption is that 2505 * we'll wake up often enough anyway that this won't be a huge 2506 * hit, and the power savings from the devices will still be a 2507 * win. 2508 * 2509 * Although PCIe uses in-band PME message instead of PME# line 2510 * to report PME, PME does not work for some PCIe devices in 2511 * reality. For example, there are devices that set their PME 2512 * status bits, but don't really bother to send a PME message; 2513 * there are PCI Express Root Ports that don't bother to 2514 * trigger interrupts when they receive PME messages from the 2515 * devices below. So PME poll is used for PCIe devices too. 2516 */ 2517 2518 if (dev->pme_poll) { 2519 struct pci_pme_device *pme_dev; 2520 if (enable) { 2521 pme_dev = kmalloc(sizeof(struct pci_pme_device), 2522 GFP_KERNEL); 2523 if (!pme_dev) { 2524 pci_warn(dev, "can't enable PME#\n"); 2525 return; 2526 } 2527 pme_dev->dev = dev; 2528 mutex_lock(&pci_pme_list_mutex); 2529 list_add(&pme_dev->list, &pci_pme_list); 2530 if (list_is_singular(&pci_pme_list)) 2531 queue_delayed_work(system_freezable_wq, 2532 &pci_pme_work, 2533 msecs_to_jiffies(PME_TIMEOUT)); 2534 mutex_unlock(&pci_pme_list_mutex); 2535 } else { 2536 mutex_lock(&pci_pme_list_mutex); 2537 list_for_each_entry(pme_dev, &pci_pme_list, list) { 2538 if (pme_dev->dev == dev) { 2539 list_del(&pme_dev->list); 2540 kfree(pme_dev); 2541 break; 2542 } 2543 } 2544 mutex_unlock(&pci_pme_list_mutex); 2545 } 2546 } 2547 2548 pci_dbg(dev, "PME# %s\n", enable ? "enabled" : "disabled"); 2549 } 2550 EXPORT_SYMBOL(pci_pme_active); 2551 2552 /** 2553 * __pci_enable_wake - enable PCI device as wakeup event source 2554 * @dev: PCI device affected 2555 * @state: PCI state from which device will issue wakeup events 2556 * @enable: True to enable event generation; false to disable 2557 * 2558 * This enables the device as a wakeup event source, or disables it. 2559 * When such events involves platform-specific hooks, those hooks are 2560 * called automatically by this routine. 2561 * 2562 * Devices with legacy power management (no standard PCI PM capabilities) 2563 * always require such platform hooks. 2564 * 2565 * RETURN VALUE: 2566 * 0 is returned on success 2567 * -EINVAL is returned if device is not supposed to wake up the system 2568 * Error code depending on the platform is returned if both the platform and 2569 * the native mechanism fail to enable the generation of wake-up events 2570 */ 2571 static int __pci_enable_wake(struct pci_dev *dev, pci_power_t state, bool enable) 2572 { 2573 int ret = 0; 2574 2575 /* 2576 * Bridges that are not power-manageable directly only signal 2577 * wakeup on behalf of subordinate devices which is set up 2578 * elsewhere, so skip them. However, bridges that are 2579 * power-manageable may signal wakeup for themselves (for example, 2580 * on a hotplug event) and they need to be covered here. 2581 */ 2582 if (!pci_power_manageable(dev)) 2583 return 0; 2584 2585 /* Don't do the same thing twice in a row for one device. */ 2586 if (!!enable == !!dev->wakeup_prepared) 2587 return 0; 2588 2589 /* 2590 * According to "PCI System Architecture" 4th ed. by Tom Shanley & Don 2591 * Anderson we should be doing PME# wake enable followed by ACPI wake 2592 * enable. To disable wake-up we call the platform first, for symmetry. 2593 */ 2594 2595 if (enable) { 2596 int error; 2597 2598 /* 2599 * Enable PME signaling if the device can signal PME from 2600 * D3cold regardless of whether or not it can signal PME from 2601 * the current target state, because that will allow it to 2602 * signal PME when the hierarchy above it goes into D3cold and 2603 * the device itself ends up in D3cold as a result of that. 2604 */ 2605 if (pci_pme_capable(dev, state) || pci_pme_capable(dev, PCI_D3cold)) 2606 pci_pme_active(dev, true); 2607 else 2608 ret = 1; 2609 error = platform_pci_set_wakeup(dev, true); 2610 if (ret) 2611 ret = error; 2612 if (!ret) 2613 dev->wakeup_prepared = true; 2614 } else { 2615 platform_pci_set_wakeup(dev, false); 2616 pci_pme_active(dev, false); 2617 dev->wakeup_prepared = false; 2618 } 2619 2620 return ret; 2621 } 2622 2623 /** 2624 * pci_enable_wake - change wakeup settings for a PCI device 2625 * @pci_dev: Target device 2626 * @state: PCI state from which device will issue wakeup events 2627 * @enable: Whether or not to enable event generation 2628 * 2629 * If @enable is set, check device_may_wakeup() for the device before calling 2630 * __pci_enable_wake() for it. 2631 */ 2632 int pci_enable_wake(struct pci_dev *pci_dev, pci_power_t state, bool enable) 2633 { 2634 if (enable && !device_may_wakeup(&pci_dev->dev)) 2635 return -EINVAL; 2636 2637 return __pci_enable_wake(pci_dev, state, enable); 2638 } 2639 EXPORT_SYMBOL(pci_enable_wake); 2640 2641 /** 2642 * pci_wake_from_d3 - enable/disable device to wake up from D3_hot or D3_cold 2643 * @dev: PCI device to prepare 2644 * @enable: True to enable wake-up event generation; false to disable 2645 * 2646 * Many drivers want the device to wake up the system from D3_hot or D3_cold 2647 * and this function allows them to set that up cleanly - pci_enable_wake() 2648 * should not be called twice in a row to enable wake-up due to PCI PM vs ACPI 2649 * ordering constraints. 2650 * 2651 * This function only returns error code if the device is not allowed to wake 2652 * up the system from sleep or it is not capable of generating PME# from both 2653 * D3_hot and D3_cold and the platform is unable to enable wake-up power for it. 2654 */ 2655 int pci_wake_from_d3(struct pci_dev *dev, bool enable) 2656 { 2657 return pci_pme_capable(dev, PCI_D3cold) ? 2658 pci_enable_wake(dev, PCI_D3cold, enable) : 2659 pci_enable_wake(dev, PCI_D3hot, enable); 2660 } 2661 EXPORT_SYMBOL(pci_wake_from_d3); 2662 2663 /** 2664 * pci_target_state - find an appropriate low power state for a given PCI dev 2665 * @dev: PCI device 2666 * @wakeup: Whether or not wakeup functionality will be enabled for the device. 2667 * 2668 * Use underlying platform code to find a supported low power state for @dev. 2669 * If the platform can't manage @dev, return the deepest state from which it 2670 * can generate wake events, based on any available PME info. 2671 */ 2672 static pci_power_t pci_target_state(struct pci_dev *dev, bool wakeup) 2673 { 2674 if (platform_pci_power_manageable(dev)) { 2675 /* 2676 * Call the platform to find the target state for the device. 2677 */ 2678 pci_power_t state = platform_pci_choose_state(dev); 2679 2680 switch (state) { 2681 case PCI_POWER_ERROR: 2682 case PCI_UNKNOWN: 2683 return PCI_D3hot; 2684 2685 case PCI_D1: 2686 case PCI_D2: 2687 if (pci_no_d1d2(dev)) 2688 return PCI_D3hot; 2689 } 2690 2691 return state; 2692 } 2693 2694 /* 2695 * If the device is in D3cold even though it's not power-manageable by 2696 * the platform, it may have been powered down by non-standard means. 2697 * Best to let it slumber. 2698 */ 2699 if (dev->current_state == PCI_D3cold) 2700 return PCI_D3cold; 2701 else if (!dev->pm_cap) 2702 return PCI_D0; 2703 2704 if (wakeup && dev->pme_support) { 2705 pci_power_t state = PCI_D3hot; 2706 2707 /* 2708 * Find the deepest state from which the device can generate 2709 * PME#. 2710 */ 2711 while (state && !(dev->pme_support & (1 << state))) 2712 state--; 2713 2714 if (state) 2715 return state; 2716 else if (dev->pme_support & 1) 2717 return PCI_D0; 2718 } 2719 2720 return PCI_D3hot; 2721 } 2722 2723 /** 2724 * pci_prepare_to_sleep - prepare PCI device for system-wide transition 2725 * into a sleep state 2726 * @dev: Device to handle. 2727 * 2728 * Choose the power state appropriate for the device depending on whether 2729 * it can wake up the system and/or is power manageable by the platform 2730 * (PCI_D3hot is the default) and put the device into that state. 2731 */ 2732 int pci_prepare_to_sleep(struct pci_dev *dev) 2733 { 2734 bool wakeup = device_may_wakeup(&dev->dev); 2735 pci_power_t target_state = pci_target_state(dev, wakeup); 2736 int error; 2737 2738 if (target_state == PCI_POWER_ERROR) 2739 return -EIO; 2740 2741 pci_enable_wake(dev, target_state, wakeup); 2742 2743 error = pci_set_power_state(dev, target_state); 2744 2745 if (error) 2746 pci_enable_wake(dev, target_state, false); 2747 2748 return error; 2749 } 2750 EXPORT_SYMBOL(pci_prepare_to_sleep); 2751 2752 /** 2753 * pci_back_from_sleep - turn PCI device on during system-wide transition 2754 * into working state 2755 * @dev: Device to handle. 2756 * 2757 * Disable device's system wake-up capability and put it into D0. 2758 */ 2759 int pci_back_from_sleep(struct pci_dev *dev) 2760 { 2761 int ret = pci_set_power_state(dev, PCI_D0); 2762 2763 if (ret) 2764 return ret; 2765 2766 pci_enable_wake(dev, PCI_D0, false); 2767 return 0; 2768 } 2769 EXPORT_SYMBOL(pci_back_from_sleep); 2770 2771 /** 2772 * pci_finish_runtime_suspend - Carry out PCI-specific part of runtime suspend. 2773 * @dev: PCI device being suspended. 2774 * 2775 * Prepare @dev to generate wake-up events at run time and put it into a low 2776 * power state. 2777 */ 2778 int pci_finish_runtime_suspend(struct pci_dev *dev) 2779 { 2780 pci_power_t target_state; 2781 int error; 2782 2783 target_state = pci_target_state(dev, device_can_wakeup(&dev->dev)); 2784 if (target_state == PCI_POWER_ERROR) 2785 return -EIO; 2786 2787 __pci_enable_wake(dev, target_state, pci_dev_run_wake(dev)); 2788 2789 error = pci_set_power_state(dev, target_state); 2790 2791 if (error) 2792 pci_enable_wake(dev, target_state, false); 2793 2794 return error; 2795 } 2796 2797 /** 2798 * pci_dev_run_wake - Check if device can generate run-time wake-up events. 2799 * @dev: Device to check. 2800 * 2801 * Return true if the device itself is capable of generating wake-up events 2802 * (through the platform or using the native PCIe PME) or if the device supports 2803 * PME and one of its upstream bridges can generate wake-up events. 2804 */ 2805 bool pci_dev_run_wake(struct pci_dev *dev) 2806 { 2807 struct pci_bus *bus = dev->bus; 2808 2809 if (!dev->pme_support) 2810 return false; 2811 2812 /* PME-capable in principle, but not from the target power state */ 2813 if (!pci_pme_capable(dev, pci_target_state(dev, true))) 2814 return false; 2815 2816 if (device_can_wakeup(&dev->dev)) 2817 return true; 2818 2819 while (bus->parent) { 2820 struct pci_dev *bridge = bus->self; 2821 2822 if (device_can_wakeup(&bridge->dev)) 2823 return true; 2824 2825 bus = bus->parent; 2826 } 2827 2828 /* We have reached the root bus. */ 2829 if (bus->bridge) 2830 return device_can_wakeup(bus->bridge); 2831 2832 return false; 2833 } 2834 EXPORT_SYMBOL_GPL(pci_dev_run_wake); 2835 2836 /** 2837 * pci_dev_need_resume - Check if it is necessary to resume the device. 2838 * @pci_dev: Device to check. 2839 * 2840 * Return 'true' if the device is not runtime-suspended or it has to be 2841 * reconfigured due to wakeup settings difference between system and runtime 2842 * suspend, or the current power state of it is not suitable for the upcoming 2843 * (system-wide) transition. 2844 */ 2845 bool pci_dev_need_resume(struct pci_dev *pci_dev) 2846 { 2847 struct device *dev = &pci_dev->dev; 2848 pci_power_t target_state; 2849 2850 if (!pm_runtime_suspended(dev) || platform_pci_need_resume(pci_dev)) 2851 return true; 2852 2853 target_state = pci_target_state(pci_dev, device_may_wakeup(dev)); 2854 2855 /* 2856 * If the earlier platform check has not triggered, D3cold is just power 2857 * removal on top of D3hot, so no need to resume the device in that 2858 * case. 2859 */ 2860 return target_state != pci_dev->current_state && 2861 target_state != PCI_D3cold && 2862 pci_dev->current_state != PCI_D3hot; 2863 } 2864 2865 /** 2866 * pci_dev_adjust_pme - Adjust PME setting for a suspended device. 2867 * @pci_dev: Device to check. 2868 * 2869 * If the device is suspended and it is not configured for system wakeup, 2870 * disable PME for it to prevent it from waking up the system unnecessarily. 2871 * 2872 * Note that if the device's power state is D3cold and the platform check in 2873 * pci_dev_need_resume() has not triggered, the device's configuration need not 2874 * be changed. 2875 */ 2876 void pci_dev_adjust_pme(struct pci_dev *pci_dev) 2877 { 2878 struct device *dev = &pci_dev->dev; 2879 2880 spin_lock_irq(&dev->power.lock); 2881 2882 if (pm_runtime_suspended(dev) && !device_may_wakeup(dev) && 2883 pci_dev->current_state < PCI_D3cold) 2884 __pci_pme_active(pci_dev, false); 2885 2886 spin_unlock_irq(&dev->power.lock); 2887 } 2888 2889 /** 2890 * pci_dev_complete_resume - Finalize resume from system sleep for a device. 2891 * @pci_dev: Device to handle. 2892 * 2893 * If the device is runtime suspended and wakeup-capable, enable PME for it as 2894 * it might have been disabled during the prepare phase of system suspend if 2895 * the device was not configured for system wakeup. 2896 */ 2897 void pci_dev_complete_resume(struct pci_dev *pci_dev) 2898 { 2899 struct device *dev = &pci_dev->dev; 2900 2901 if (!pci_dev_run_wake(pci_dev)) 2902 return; 2903 2904 spin_lock_irq(&dev->power.lock); 2905 2906 if (pm_runtime_suspended(dev) && pci_dev->current_state < PCI_D3cold) 2907 __pci_pme_active(pci_dev, true); 2908 2909 spin_unlock_irq(&dev->power.lock); 2910 } 2911 2912 /** 2913 * pci_choose_state - Choose the power state of a PCI device. 2914 * @dev: Target PCI device. 2915 * @state: Target state for the whole system. 2916 * 2917 * Returns PCI power state suitable for @dev and @state. 2918 */ 2919 pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state) 2920 { 2921 if (state.event == PM_EVENT_ON) 2922 return PCI_D0; 2923 2924 return pci_target_state(dev, false); 2925 } 2926 EXPORT_SYMBOL(pci_choose_state); 2927 2928 void pci_config_pm_runtime_get(struct pci_dev *pdev) 2929 { 2930 struct device *dev = &pdev->dev; 2931 struct device *parent = dev->parent; 2932 2933 if (parent) 2934 pm_runtime_get_sync(parent); 2935 pm_runtime_get_noresume(dev); 2936 /* 2937 * pdev->current_state is set to PCI_D3cold during suspending, 2938 * so wait until suspending completes 2939 */ 2940 pm_runtime_barrier(dev); 2941 /* 2942 * Only need to resume devices in D3cold, because config 2943 * registers are still accessible for devices suspended but 2944 * not in D3cold. 2945 */ 2946 if (pdev->current_state == PCI_D3cold) 2947 pm_runtime_resume(dev); 2948 } 2949 2950 void pci_config_pm_runtime_put(struct pci_dev *pdev) 2951 { 2952 struct device *dev = &pdev->dev; 2953 struct device *parent = dev->parent; 2954 2955 pm_runtime_put(dev); 2956 if (parent) 2957 pm_runtime_put_sync(parent); 2958 } 2959 2960 static const struct dmi_system_id bridge_d3_blacklist[] = { 2961 #ifdef CONFIG_X86 2962 { 2963 /* 2964 * Gigabyte X299 root port is not marked as hotplug capable 2965 * which allows Linux to power manage it. However, this 2966 * confuses the BIOS SMI handler so don't power manage root 2967 * ports on that system. 2968 */ 2969 .ident = "X299 DESIGNARE EX-CF", 2970 .matches = { 2971 DMI_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co., Ltd."), 2972 DMI_MATCH(DMI_BOARD_NAME, "X299 DESIGNARE EX-CF"), 2973 }, 2974 }, 2975 { 2976 /* 2977 * Downstream device is not accessible after putting a root port 2978 * into D3cold and back into D0 on Elo Continental Z2 board 2979 */ 2980 .ident = "Elo Continental Z2", 2981 .matches = { 2982 DMI_MATCH(DMI_BOARD_VENDOR, "Elo Touch Solutions"), 2983 DMI_MATCH(DMI_BOARD_NAME, "Geminilake"), 2984 DMI_MATCH(DMI_BOARD_VERSION, "Continental Z2"), 2985 }, 2986 }, 2987 #endif 2988 { } 2989 }; 2990 2991 /** 2992 * pci_bridge_d3_possible - Is it possible to put the bridge into D3 2993 * @bridge: Bridge to check 2994 * 2995 * This function checks if it is possible to move the bridge to D3. 2996 * Currently we only allow D3 for recent enough PCIe ports and Thunderbolt. 2997 */ 2998 bool pci_bridge_d3_possible(struct pci_dev *bridge) 2999 { 3000 if (!pci_is_pcie(bridge)) 3001 return false; 3002 3003 switch (pci_pcie_type(bridge)) { 3004 case PCI_EXP_TYPE_ROOT_PORT: 3005 case PCI_EXP_TYPE_UPSTREAM: 3006 case PCI_EXP_TYPE_DOWNSTREAM: 3007 if (pci_bridge_d3_disable) 3008 return false; 3009 3010 /* 3011 * Hotplug ports handled by firmware in System Management Mode 3012 * may not be put into D3 by the OS (Thunderbolt on non-Macs). 3013 */ 3014 if (bridge->is_hotplug_bridge && !pciehp_is_native(bridge)) 3015 return false; 3016 3017 if (pci_bridge_d3_force) 3018 return true; 3019 3020 /* Even the oldest 2010 Thunderbolt controller supports D3. */ 3021 if (bridge->is_thunderbolt) 3022 return true; 3023 3024 /* Platform might know better if the bridge supports D3 */ 3025 if (platform_pci_bridge_d3(bridge)) 3026 return true; 3027 3028 /* 3029 * Hotplug ports handled natively by the OS were not validated 3030 * by vendors for runtime D3 at least until 2018 because there 3031 * was no OS support. 3032 */ 3033 if (bridge->is_hotplug_bridge) 3034 return false; 3035 3036 if (dmi_check_system(bridge_d3_blacklist)) 3037 return false; 3038 3039 /* 3040 * It should be safe to put PCIe ports from 2015 or newer 3041 * to D3. 3042 */ 3043 if (dmi_get_bios_year() >= 2015) 3044 return true; 3045 break; 3046 } 3047 3048 return false; 3049 } 3050 3051 static int pci_dev_check_d3cold(struct pci_dev *dev, void *data) 3052 { 3053 bool *d3cold_ok = data; 3054 3055 if (/* The device needs to be allowed to go D3cold ... */ 3056 dev->no_d3cold || !dev->d3cold_allowed || 3057 3058 /* ... and if it is wakeup capable to do so from D3cold. */ 3059 (device_may_wakeup(&dev->dev) && 3060 !pci_pme_capable(dev, PCI_D3cold)) || 3061 3062 /* If it is a bridge it must be allowed to go to D3. */ 3063 !pci_power_manageable(dev)) 3064 3065 *d3cold_ok = false; 3066 3067 return !*d3cold_ok; 3068 } 3069 3070 /* 3071 * pci_bridge_d3_update - Update bridge D3 capabilities 3072 * @dev: PCI device which is changed 3073 * 3074 * Update upstream bridge PM capabilities accordingly depending on if the 3075 * device PM configuration was changed or the device is being removed. The 3076 * change is also propagated upstream. 3077 */ 3078 void pci_bridge_d3_update(struct pci_dev *dev) 3079 { 3080 bool remove = !device_is_registered(&dev->dev); 3081 struct pci_dev *bridge; 3082 bool d3cold_ok = true; 3083 3084 bridge = pci_upstream_bridge(dev); 3085 if (!bridge || !pci_bridge_d3_possible(bridge)) 3086 return; 3087 3088 /* 3089 * If D3 is currently allowed for the bridge, removing one of its 3090 * children won't change that. 3091 */ 3092 if (remove && bridge->bridge_d3) 3093 return; 3094 3095 /* 3096 * If D3 is currently allowed for the bridge and a child is added or 3097 * changed, disallowance of D3 can only be caused by that child, so 3098 * we only need to check that single device, not any of its siblings. 3099 * 3100 * If D3 is currently not allowed for the bridge, checking the device 3101 * first may allow us to skip checking its siblings. 3102 */ 3103 if (!remove) 3104 pci_dev_check_d3cold(dev, &d3cold_ok); 3105 3106 /* 3107 * If D3 is currently not allowed for the bridge, this may be caused 3108 * either by the device being changed/removed or any of its siblings, 3109 * so we need to go through all children to find out if one of them 3110 * continues to block D3. 3111 */ 3112 if (d3cold_ok && !bridge->bridge_d3) 3113 pci_walk_bus(bridge->subordinate, pci_dev_check_d3cold, 3114 &d3cold_ok); 3115 3116 if (bridge->bridge_d3 != d3cold_ok) { 3117 bridge->bridge_d3 = d3cold_ok; 3118 /* Propagate change to upstream bridges */ 3119 pci_bridge_d3_update(bridge); 3120 } 3121 } 3122 3123 /** 3124 * pci_d3cold_enable - Enable D3cold for device 3125 * @dev: PCI device to handle 3126 * 3127 * This function can be used in drivers to enable D3cold from the device 3128 * they handle. It also updates upstream PCI bridge PM capabilities 3129 * accordingly. 3130 */ 3131 void pci_d3cold_enable(struct pci_dev *dev) 3132 { 3133 if (dev->no_d3cold) { 3134 dev->no_d3cold = false; 3135 pci_bridge_d3_update(dev); 3136 } 3137 } 3138 EXPORT_SYMBOL_GPL(pci_d3cold_enable); 3139 3140 /** 3141 * pci_d3cold_disable - Disable D3cold for device 3142 * @dev: PCI device to handle 3143 * 3144 * This function can be used in drivers to disable D3cold from the device 3145 * they handle. It also updates upstream PCI bridge PM capabilities 3146 * accordingly. 3147 */ 3148 void pci_d3cold_disable(struct pci_dev *dev) 3149 { 3150 if (!dev->no_d3cold) { 3151 dev->no_d3cold = true; 3152 pci_bridge_d3_update(dev); 3153 } 3154 } 3155 EXPORT_SYMBOL_GPL(pci_d3cold_disable); 3156 3157 /** 3158 * pci_pm_init - Initialize PM functions of given PCI device 3159 * @dev: PCI device to handle. 3160 */ 3161 void pci_pm_init(struct pci_dev *dev) 3162 { 3163 int pm; 3164 u16 status; 3165 u16 pmc; 3166 3167 pm_runtime_forbid(&dev->dev); 3168 pm_runtime_set_active(&dev->dev); 3169 pm_runtime_enable(&dev->dev); 3170 device_enable_async_suspend(&dev->dev); 3171 dev->wakeup_prepared = false; 3172 3173 dev->pm_cap = 0; 3174 dev->pme_support = 0; 3175 3176 /* find PCI PM capability in list */ 3177 pm = pci_find_capability(dev, PCI_CAP_ID_PM); 3178 if (!pm) 3179 return; 3180 /* Check device's ability to generate PME# */ 3181 pci_read_config_word(dev, pm + PCI_PM_PMC, &pmc); 3182 3183 if ((pmc & PCI_PM_CAP_VER_MASK) > 3) { 3184 pci_err(dev, "unsupported PM cap regs version (%u)\n", 3185 pmc & PCI_PM_CAP_VER_MASK); 3186 return; 3187 } 3188 3189 dev->pm_cap = pm; 3190 dev->d3hot_delay = PCI_PM_D3HOT_WAIT; 3191 dev->d3cold_delay = PCI_PM_D3COLD_WAIT; 3192 dev->bridge_d3 = pci_bridge_d3_possible(dev); 3193 dev->d3cold_allowed = true; 3194 3195 dev->d1_support = false; 3196 dev->d2_support = false; 3197 if (!pci_no_d1d2(dev)) { 3198 if (pmc & PCI_PM_CAP_D1) 3199 dev->d1_support = true; 3200 if (pmc & PCI_PM_CAP_D2) 3201 dev->d2_support = true; 3202 3203 if (dev->d1_support || dev->d2_support) 3204 pci_info(dev, "supports%s%s\n", 3205 dev->d1_support ? " D1" : "", 3206 dev->d2_support ? " D2" : ""); 3207 } 3208 3209 pmc &= PCI_PM_CAP_PME_MASK; 3210 if (pmc) { 3211 pci_info(dev, "PME# supported from%s%s%s%s%s\n", 3212 (pmc & PCI_PM_CAP_PME_D0) ? " D0" : "", 3213 (pmc & PCI_PM_CAP_PME_D1) ? " D1" : "", 3214 (pmc & PCI_PM_CAP_PME_D2) ? " D2" : "", 3215 (pmc & PCI_PM_CAP_PME_D3hot) ? " D3hot" : "", 3216 (pmc & PCI_PM_CAP_PME_D3cold) ? " D3cold" : ""); 3217 dev->pme_support = pmc >> PCI_PM_CAP_PME_SHIFT; 3218 dev->pme_poll = true; 3219 /* 3220 * Make device's PM flags reflect the wake-up capability, but 3221 * let the user space enable it to wake up the system as needed. 3222 */ 3223 device_set_wakeup_capable(&dev->dev, true); 3224 /* Disable the PME# generation functionality */ 3225 pci_pme_active(dev, false); 3226 } 3227 3228 pci_read_config_word(dev, PCI_STATUS, &status); 3229 if (status & PCI_STATUS_IMM_READY) 3230 dev->imm_ready = 1; 3231 } 3232 3233 static unsigned long pci_ea_flags(struct pci_dev *dev, u8 prop) 3234 { 3235 unsigned long flags = IORESOURCE_PCI_FIXED | IORESOURCE_PCI_EA_BEI; 3236 3237 switch (prop) { 3238 case PCI_EA_P_MEM: 3239 case PCI_EA_P_VF_MEM: 3240 flags |= IORESOURCE_MEM; 3241 break; 3242 case PCI_EA_P_MEM_PREFETCH: 3243 case PCI_EA_P_VF_MEM_PREFETCH: 3244 flags |= IORESOURCE_MEM | IORESOURCE_PREFETCH; 3245 break; 3246 case PCI_EA_P_IO: 3247 flags |= IORESOURCE_IO; 3248 break; 3249 default: 3250 return 0; 3251 } 3252 3253 return flags; 3254 } 3255 3256 static struct resource *pci_ea_get_resource(struct pci_dev *dev, u8 bei, 3257 u8 prop) 3258 { 3259 if (bei <= PCI_EA_BEI_BAR5 && prop <= PCI_EA_P_IO) 3260 return &dev->resource[bei]; 3261 #ifdef CONFIG_PCI_IOV 3262 else if (bei >= PCI_EA_BEI_VF_BAR0 && bei <= PCI_EA_BEI_VF_BAR5 && 3263 (prop == PCI_EA_P_VF_MEM || prop == PCI_EA_P_VF_MEM_PREFETCH)) 3264 return &dev->resource[PCI_IOV_RESOURCES + 3265 bei - PCI_EA_BEI_VF_BAR0]; 3266 #endif 3267 else if (bei == PCI_EA_BEI_ROM) 3268 return &dev->resource[PCI_ROM_RESOURCE]; 3269 else 3270 return NULL; 3271 } 3272 3273 /* Read an Enhanced Allocation (EA) entry */ 3274 static int pci_ea_read(struct pci_dev *dev, int offset) 3275 { 3276 struct resource *res; 3277 int ent_size, ent_offset = offset; 3278 resource_size_t start, end; 3279 unsigned long flags; 3280 u32 dw0, bei, base, max_offset; 3281 u8 prop; 3282 bool support_64 = (sizeof(resource_size_t) >= 8); 3283 3284 pci_read_config_dword(dev, ent_offset, &dw0); 3285 ent_offset += 4; 3286 3287 /* Entry size field indicates DWORDs after 1st */ 3288 ent_size = ((dw0 & PCI_EA_ES) + 1) << 2; 3289 3290 if (!(dw0 & PCI_EA_ENABLE)) /* Entry not enabled */ 3291 goto out; 3292 3293 bei = (dw0 & PCI_EA_BEI) >> 4; 3294 prop = (dw0 & PCI_EA_PP) >> 8; 3295 3296 /* 3297 * If the Property is in the reserved range, try the Secondary 3298 * Property instead. 3299 */ 3300 if (prop > PCI_EA_P_BRIDGE_IO && prop < PCI_EA_P_MEM_RESERVED) 3301 prop = (dw0 & PCI_EA_SP) >> 16; 3302 if (prop > PCI_EA_P_BRIDGE_IO) 3303 goto out; 3304 3305 res = pci_ea_get_resource(dev, bei, prop); 3306 if (!res) { 3307 pci_err(dev, "Unsupported EA entry BEI: %u\n", bei); 3308 goto out; 3309 } 3310 3311 flags = pci_ea_flags(dev, prop); 3312 if (!flags) { 3313 pci_err(dev, "Unsupported EA properties: %#x\n", prop); 3314 goto out; 3315 } 3316 3317 /* Read Base */ 3318 pci_read_config_dword(dev, ent_offset, &base); 3319 start = (base & PCI_EA_FIELD_MASK); 3320 ent_offset += 4; 3321 3322 /* Read MaxOffset */ 3323 pci_read_config_dword(dev, ent_offset, &max_offset); 3324 ent_offset += 4; 3325 3326 /* Read Base MSBs (if 64-bit entry) */ 3327 if (base & PCI_EA_IS_64) { 3328 u32 base_upper; 3329 3330 pci_read_config_dword(dev, ent_offset, &base_upper); 3331 ent_offset += 4; 3332 3333 flags |= IORESOURCE_MEM_64; 3334 3335 /* entry starts above 32-bit boundary, can't use */ 3336 if (!support_64 && base_upper) 3337 goto out; 3338 3339 if (support_64) 3340 start |= ((u64)base_upper << 32); 3341 } 3342 3343 end = start + (max_offset | 0x03); 3344 3345 /* Read MaxOffset MSBs (if 64-bit entry) */ 3346 if (max_offset & PCI_EA_IS_64) { 3347 u32 max_offset_upper; 3348 3349 pci_read_config_dword(dev, ent_offset, &max_offset_upper); 3350 ent_offset += 4; 3351 3352 flags |= IORESOURCE_MEM_64; 3353 3354 /* entry too big, can't use */ 3355 if (!support_64 && max_offset_upper) 3356 goto out; 3357 3358 if (support_64) 3359 end += ((u64)max_offset_upper << 32); 3360 } 3361 3362 if (end < start) { 3363 pci_err(dev, "EA Entry crosses address boundary\n"); 3364 goto out; 3365 } 3366 3367 if (ent_size != ent_offset - offset) { 3368 pci_err(dev, "EA Entry Size (%d) does not match length read (%d)\n", 3369 ent_size, ent_offset - offset); 3370 goto out; 3371 } 3372 3373 res->name = pci_name(dev); 3374 res->start = start; 3375 res->end = end; 3376 res->flags = flags; 3377 3378 if (bei <= PCI_EA_BEI_BAR5) 3379 pci_info(dev, "BAR %d: %pR (from Enhanced Allocation, properties %#02x)\n", 3380 bei, res, prop); 3381 else if (bei == PCI_EA_BEI_ROM) 3382 pci_info(dev, "ROM: %pR (from Enhanced Allocation, properties %#02x)\n", 3383 res, prop); 3384 else if (bei >= PCI_EA_BEI_VF_BAR0 && bei <= PCI_EA_BEI_VF_BAR5) 3385 pci_info(dev, "VF BAR %d: %pR (from Enhanced Allocation, properties %#02x)\n", 3386 bei - PCI_EA_BEI_VF_BAR0, res, prop); 3387 else 3388 pci_info(dev, "BEI %d res: %pR (from Enhanced Allocation, properties %#02x)\n", 3389 bei, res, prop); 3390 3391 out: 3392 return offset + ent_size; 3393 } 3394 3395 /* Enhanced Allocation Initialization */ 3396 void pci_ea_init(struct pci_dev *dev) 3397 { 3398 int ea; 3399 u8 num_ent; 3400 int offset; 3401 int i; 3402 3403 /* find PCI EA capability in list */ 3404 ea = pci_find_capability(dev, PCI_CAP_ID_EA); 3405 if (!ea) 3406 return; 3407 3408 /* determine the number of entries */ 3409 pci_bus_read_config_byte(dev->bus, dev->devfn, ea + PCI_EA_NUM_ENT, 3410 &num_ent); 3411 num_ent &= PCI_EA_NUM_ENT_MASK; 3412 3413 offset = ea + PCI_EA_FIRST_ENT; 3414 3415 /* Skip DWORD 2 for type 1 functions */ 3416 if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) 3417 offset += 4; 3418 3419 /* parse each EA entry */ 3420 for (i = 0; i < num_ent; ++i) 3421 offset = pci_ea_read(dev, offset); 3422 } 3423 3424 static void pci_add_saved_cap(struct pci_dev *pci_dev, 3425 struct pci_cap_saved_state *new_cap) 3426 { 3427 hlist_add_head(&new_cap->next, &pci_dev->saved_cap_space); 3428 } 3429 3430 /** 3431 * _pci_add_cap_save_buffer - allocate buffer for saving given 3432 * capability registers 3433 * @dev: the PCI device 3434 * @cap: the capability to allocate the buffer for 3435 * @extended: Standard or Extended capability ID 3436 * @size: requested size of the buffer 3437 */ 3438 static int _pci_add_cap_save_buffer(struct pci_dev *dev, u16 cap, 3439 bool extended, unsigned int size) 3440 { 3441 int pos; 3442 struct pci_cap_saved_state *save_state; 3443 3444 if (extended) 3445 pos = pci_find_ext_capability(dev, cap); 3446 else 3447 pos = pci_find_capability(dev, cap); 3448 3449 if (!pos) 3450 return 0; 3451 3452 save_state = kzalloc(sizeof(*save_state) + size, GFP_KERNEL); 3453 if (!save_state) 3454 return -ENOMEM; 3455 3456 save_state->cap.cap_nr = cap; 3457 save_state->cap.cap_extended = extended; 3458 save_state->cap.size = size; 3459 pci_add_saved_cap(dev, save_state); 3460 3461 return 0; 3462 } 3463 3464 int pci_add_cap_save_buffer(struct pci_dev *dev, char cap, unsigned int size) 3465 { 3466 return _pci_add_cap_save_buffer(dev, cap, false, size); 3467 } 3468 3469 int pci_add_ext_cap_save_buffer(struct pci_dev *dev, u16 cap, unsigned int size) 3470 { 3471 return _pci_add_cap_save_buffer(dev, cap, true, size); 3472 } 3473 3474 /** 3475 * pci_allocate_cap_save_buffers - allocate buffers for saving capabilities 3476 * @dev: the PCI device 3477 */ 3478 void pci_allocate_cap_save_buffers(struct pci_dev *dev) 3479 { 3480 int error; 3481 3482 error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_EXP, 3483 PCI_EXP_SAVE_REGS * sizeof(u16)); 3484 if (error) 3485 pci_err(dev, "unable to preallocate PCI Express save buffer\n"); 3486 3487 error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_PCIX, sizeof(u16)); 3488 if (error) 3489 pci_err(dev, "unable to preallocate PCI-X save buffer\n"); 3490 3491 error = pci_add_ext_cap_save_buffer(dev, PCI_EXT_CAP_ID_LTR, 3492 2 * sizeof(u16)); 3493 if (error) 3494 pci_err(dev, "unable to allocate suspend buffer for LTR\n"); 3495 3496 pci_allocate_vc_save_buffers(dev); 3497 } 3498 3499 void pci_free_cap_save_buffers(struct pci_dev *dev) 3500 { 3501 struct pci_cap_saved_state *tmp; 3502 struct hlist_node *n; 3503 3504 hlist_for_each_entry_safe(tmp, n, &dev->saved_cap_space, next) 3505 kfree(tmp); 3506 } 3507 3508 /** 3509 * pci_configure_ari - enable or disable ARI forwarding 3510 * @dev: the PCI device 3511 * 3512 * If @dev and its upstream bridge both support ARI, enable ARI in the 3513 * bridge. Otherwise, disable ARI in the bridge. 3514 */ 3515 void pci_configure_ari(struct pci_dev *dev) 3516 { 3517 u32 cap; 3518 struct pci_dev *bridge; 3519 3520 if (pcie_ari_disabled || !pci_is_pcie(dev) || dev->devfn) 3521 return; 3522 3523 bridge = dev->bus->self; 3524 if (!bridge) 3525 return; 3526 3527 pcie_capability_read_dword(bridge, PCI_EXP_DEVCAP2, &cap); 3528 if (!(cap & PCI_EXP_DEVCAP2_ARI)) 3529 return; 3530 3531 if (pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI)) { 3532 pcie_capability_set_word(bridge, PCI_EXP_DEVCTL2, 3533 PCI_EXP_DEVCTL2_ARI); 3534 bridge->ari_enabled = 1; 3535 } else { 3536 pcie_capability_clear_word(bridge, PCI_EXP_DEVCTL2, 3537 PCI_EXP_DEVCTL2_ARI); 3538 bridge->ari_enabled = 0; 3539 } 3540 } 3541 3542 static bool pci_acs_flags_enabled(struct pci_dev *pdev, u16 acs_flags) 3543 { 3544 int pos; 3545 u16 cap, ctrl; 3546 3547 pos = pdev->acs_cap; 3548 if (!pos) 3549 return false; 3550 3551 /* 3552 * Except for egress control, capabilities are either required 3553 * or only required if controllable. Features missing from the 3554 * capability field can therefore be assumed as hard-wired enabled. 3555 */ 3556 pci_read_config_word(pdev, pos + PCI_ACS_CAP, &cap); 3557 acs_flags &= (cap | PCI_ACS_EC); 3558 3559 pci_read_config_word(pdev, pos + PCI_ACS_CTRL, &ctrl); 3560 return (ctrl & acs_flags) == acs_flags; 3561 } 3562 3563 /** 3564 * pci_acs_enabled - test ACS against required flags for a given device 3565 * @pdev: device to test 3566 * @acs_flags: required PCI ACS flags 3567 * 3568 * Return true if the device supports the provided flags. Automatically 3569 * filters out flags that are not implemented on multifunction devices. 3570 * 3571 * Note that this interface checks the effective ACS capabilities of the 3572 * device rather than the actual capabilities. For instance, most single 3573 * function endpoints are not required to support ACS because they have no 3574 * opportunity for peer-to-peer access. We therefore return 'true' 3575 * regardless of whether the device exposes an ACS capability. This makes 3576 * it much easier for callers of this function to ignore the actual type 3577 * or topology of the device when testing ACS support. 3578 */ 3579 bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags) 3580 { 3581 int ret; 3582 3583 ret = pci_dev_specific_acs_enabled(pdev, acs_flags); 3584 if (ret >= 0) 3585 return ret > 0; 3586 3587 /* 3588 * Conventional PCI and PCI-X devices never support ACS, either 3589 * effectively or actually. The shared bus topology implies that 3590 * any device on the bus can receive or snoop DMA. 3591 */ 3592 if (!pci_is_pcie(pdev)) 3593 return false; 3594 3595 switch (pci_pcie_type(pdev)) { 3596 /* 3597 * PCI/X-to-PCIe bridges are not specifically mentioned by the spec, 3598 * but since their primary interface is PCI/X, we conservatively 3599 * handle them as we would a non-PCIe device. 3600 */ 3601 case PCI_EXP_TYPE_PCIE_BRIDGE: 3602 /* 3603 * PCIe 3.0, 6.12.1 excludes ACS on these devices. "ACS is never 3604 * applicable... must never implement an ACS Extended Capability...". 3605 * This seems arbitrary, but we take a conservative interpretation 3606 * of this statement. 3607 */ 3608 case PCI_EXP_TYPE_PCI_BRIDGE: 3609 case PCI_EXP_TYPE_RC_EC: 3610 return false; 3611 /* 3612 * PCIe 3.0, 6.12.1.1 specifies that downstream and root ports should 3613 * implement ACS in order to indicate their peer-to-peer capabilities, 3614 * regardless of whether they are single- or multi-function devices. 3615 */ 3616 case PCI_EXP_TYPE_DOWNSTREAM: 3617 case PCI_EXP_TYPE_ROOT_PORT: 3618 return pci_acs_flags_enabled(pdev, acs_flags); 3619 /* 3620 * PCIe 3.0, 6.12.1.2 specifies ACS capabilities that should be 3621 * implemented by the remaining PCIe types to indicate peer-to-peer 3622 * capabilities, but only when they are part of a multifunction 3623 * device. The footnote for section 6.12 indicates the specific 3624 * PCIe types included here. 3625 */ 3626 case PCI_EXP_TYPE_ENDPOINT: 3627 case PCI_EXP_TYPE_UPSTREAM: 3628 case PCI_EXP_TYPE_LEG_END: 3629 case PCI_EXP_TYPE_RC_END: 3630 if (!pdev->multifunction) 3631 break; 3632 3633 return pci_acs_flags_enabled(pdev, acs_flags); 3634 } 3635 3636 /* 3637 * PCIe 3.0, 6.12.1.3 specifies no ACS capabilities are applicable 3638 * to single function devices with the exception of downstream ports. 3639 */ 3640 return true; 3641 } 3642 3643 /** 3644 * pci_acs_path_enabled - test ACS flags from start to end in a hierarchy 3645 * @start: starting downstream device 3646 * @end: ending upstream device or NULL to search to the root bus 3647 * @acs_flags: required flags 3648 * 3649 * Walk up a device tree from start to end testing PCI ACS support. If 3650 * any step along the way does not support the required flags, return false. 3651 */ 3652 bool pci_acs_path_enabled(struct pci_dev *start, 3653 struct pci_dev *end, u16 acs_flags) 3654 { 3655 struct pci_dev *pdev, *parent = start; 3656 3657 do { 3658 pdev = parent; 3659 3660 if (!pci_acs_enabled(pdev, acs_flags)) 3661 return false; 3662 3663 if (pci_is_root_bus(pdev->bus)) 3664 return (end == NULL); 3665 3666 parent = pdev->bus->self; 3667 } while (pdev != end); 3668 3669 return true; 3670 } 3671 3672 /** 3673 * pci_acs_init - Initialize ACS if hardware supports it 3674 * @dev: the PCI device 3675 */ 3676 void pci_acs_init(struct pci_dev *dev) 3677 { 3678 dev->acs_cap = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS); 3679 3680 /* 3681 * Attempt to enable ACS regardless of capability because some Root 3682 * Ports (e.g. those quirked with *_intel_pch_acs_*) do not have 3683 * the standard ACS capability but still support ACS via those 3684 * quirks. 3685 */ 3686 pci_enable_acs(dev); 3687 } 3688 3689 /** 3690 * pci_rebar_find_pos - find position of resize ctrl reg for BAR 3691 * @pdev: PCI device 3692 * @bar: BAR to find 3693 * 3694 * Helper to find the position of the ctrl register for a BAR. 3695 * Returns -ENOTSUPP if resizable BARs are not supported at all. 3696 * Returns -ENOENT if no ctrl register for the BAR could be found. 3697 */ 3698 static int pci_rebar_find_pos(struct pci_dev *pdev, int bar) 3699 { 3700 unsigned int pos, nbars, i; 3701 u32 ctrl; 3702 3703 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_REBAR); 3704 if (!pos) 3705 return -ENOTSUPP; 3706 3707 pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl); 3708 nbars = (ctrl & PCI_REBAR_CTRL_NBAR_MASK) >> 3709 PCI_REBAR_CTRL_NBAR_SHIFT; 3710 3711 for (i = 0; i < nbars; i++, pos += 8) { 3712 int bar_idx; 3713 3714 pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl); 3715 bar_idx = ctrl & PCI_REBAR_CTRL_BAR_IDX; 3716 if (bar_idx == bar) 3717 return pos; 3718 } 3719 3720 return -ENOENT; 3721 } 3722 3723 /** 3724 * pci_rebar_get_possible_sizes - get possible sizes for BAR 3725 * @pdev: PCI device 3726 * @bar: BAR to query 3727 * 3728 * Get the possible sizes of a resizable BAR as bitmask defined in the spec 3729 * (bit 0=1MB, bit 19=512GB). Returns 0 if BAR isn't resizable. 3730 */ 3731 u32 pci_rebar_get_possible_sizes(struct pci_dev *pdev, int bar) 3732 { 3733 int pos; 3734 u32 cap; 3735 3736 pos = pci_rebar_find_pos(pdev, bar); 3737 if (pos < 0) 3738 return 0; 3739 3740 pci_read_config_dword(pdev, pos + PCI_REBAR_CAP, &cap); 3741 cap &= PCI_REBAR_CAP_SIZES; 3742 3743 /* Sapphire RX 5600 XT Pulse has an invalid cap dword for BAR 0 */ 3744 if (pdev->vendor == PCI_VENDOR_ID_ATI && pdev->device == 0x731f && 3745 bar == 0 && cap == 0x7000) 3746 cap = 0x3f000; 3747 3748 return cap >> 4; 3749 } 3750 EXPORT_SYMBOL(pci_rebar_get_possible_sizes); 3751 3752 /** 3753 * pci_rebar_get_current_size - get the current size of a BAR 3754 * @pdev: PCI device 3755 * @bar: BAR to set size to 3756 * 3757 * Read the size of a BAR from the resizable BAR config. 3758 * Returns size if found or negative error code. 3759 */ 3760 int pci_rebar_get_current_size(struct pci_dev *pdev, int bar) 3761 { 3762 int pos; 3763 u32 ctrl; 3764 3765 pos = pci_rebar_find_pos(pdev, bar); 3766 if (pos < 0) 3767 return pos; 3768 3769 pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl); 3770 return (ctrl & PCI_REBAR_CTRL_BAR_SIZE) >> PCI_REBAR_CTRL_BAR_SHIFT; 3771 } 3772 3773 /** 3774 * pci_rebar_set_size - set a new size for a BAR 3775 * @pdev: PCI device 3776 * @bar: BAR to set size to 3777 * @size: new size as defined in the spec (0=1MB, 19=512GB) 3778 * 3779 * Set the new size of a BAR as defined in the spec. 3780 * Returns zero if resizing was successful, error code otherwise. 3781 */ 3782 int pci_rebar_set_size(struct pci_dev *pdev, int bar, int size) 3783 { 3784 int pos; 3785 u32 ctrl; 3786 3787 pos = pci_rebar_find_pos(pdev, bar); 3788 if (pos < 0) 3789 return pos; 3790 3791 pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl); 3792 ctrl &= ~PCI_REBAR_CTRL_BAR_SIZE; 3793 ctrl |= size << PCI_REBAR_CTRL_BAR_SHIFT; 3794 pci_write_config_dword(pdev, pos + PCI_REBAR_CTRL, ctrl); 3795 return 0; 3796 } 3797 3798 /** 3799 * pci_enable_atomic_ops_to_root - enable AtomicOp requests to root port 3800 * @dev: the PCI device 3801 * @cap_mask: mask of desired AtomicOp sizes, including one or more of: 3802 * PCI_EXP_DEVCAP2_ATOMIC_COMP32 3803 * PCI_EXP_DEVCAP2_ATOMIC_COMP64 3804 * PCI_EXP_DEVCAP2_ATOMIC_COMP128 3805 * 3806 * Return 0 if all upstream bridges support AtomicOp routing, egress 3807 * blocking is disabled on all upstream ports, and the root port supports 3808 * the requested completion capabilities (32-bit, 64-bit and/or 128-bit 3809 * AtomicOp completion), or negative otherwise. 3810 */ 3811 int pci_enable_atomic_ops_to_root(struct pci_dev *dev, u32 cap_mask) 3812 { 3813 struct pci_bus *bus = dev->bus; 3814 struct pci_dev *bridge; 3815 u32 cap, ctl2; 3816 3817 /* 3818 * Per PCIe r5.0, sec 9.3.5.10, the AtomicOp Requester Enable bit 3819 * in Device Control 2 is reserved in VFs and the PF value applies 3820 * to all associated VFs. 3821 */ 3822 if (dev->is_virtfn) 3823 return -EINVAL; 3824 3825 if (!pci_is_pcie(dev)) 3826 return -EINVAL; 3827 3828 /* 3829 * Per PCIe r4.0, sec 6.15, endpoints and root ports may be 3830 * AtomicOp requesters. For now, we only support endpoints as 3831 * requesters and root ports as completers. No endpoints as 3832 * completers, and no peer-to-peer. 3833 */ 3834 3835 switch (pci_pcie_type(dev)) { 3836 case PCI_EXP_TYPE_ENDPOINT: 3837 case PCI_EXP_TYPE_LEG_END: 3838 case PCI_EXP_TYPE_RC_END: 3839 break; 3840 default: 3841 return -EINVAL; 3842 } 3843 3844 while (bus->parent) { 3845 bridge = bus->self; 3846 3847 pcie_capability_read_dword(bridge, PCI_EXP_DEVCAP2, &cap); 3848 3849 switch (pci_pcie_type(bridge)) { 3850 /* Ensure switch ports support AtomicOp routing */ 3851 case PCI_EXP_TYPE_UPSTREAM: 3852 case PCI_EXP_TYPE_DOWNSTREAM: 3853 if (!(cap & PCI_EXP_DEVCAP2_ATOMIC_ROUTE)) 3854 return -EINVAL; 3855 break; 3856 3857 /* Ensure root port supports all the sizes we care about */ 3858 case PCI_EXP_TYPE_ROOT_PORT: 3859 if ((cap & cap_mask) != cap_mask) 3860 return -EINVAL; 3861 break; 3862 } 3863 3864 /* Ensure upstream ports don't block AtomicOps on egress */ 3865 if (pci_pcie_type(bridge) == PCI_EXP_TYPE_UPSTREAM) { 3866 pcie_capability_read_dword(bridge, PCI_EXP_DEVCTL2, 3867 &ctl2); 3868 if (ctl2 & PCI_EXP_DEVCTL2_ATOMIC_EGRESS_BLOCK) 3869 return -EINVAL; 3870 } 3871 3872 bus = bus->parent; 3873 } 3874 3875 pcie_capability_set_word(dev, PCI_EXP_DEVCTL2, 3876 PCI_EXP_DEVCTL2_ATOMIC_REQ); 3877 return 0; 3878 } 3879 EXPORT_SYMBOL(pci_enable_atomic_ops_to_root); 3880 3881 /** 3882 * pci_swizzle_interrupt_pin - swizzle INTx for device behind bridge 3883 * @dev: the PCI device 3884 * @pin: the INTx pin (1=INTA, 2=INTB, 3=INTC, 4=INTD) 3885 * 3886 * Perform INTx swizzling for a device behind one level of bridge. This is 3887 * required by section 9.1 of the PCI-to-PCI bridge specification for devices 3888 * behind bridges on add-in cards. For devices with ARI enabled, the slot 3889 * number is always 0 (see the Implementation Note in section 2.2.8.1 of 3890 * the PCI Express Base Specification, Revision 2.1) 3891 */ 3892 u8 pci_swizzle_interrupt_pin(const struct pci_dev *dev, u8 pin) 3893 { 3894 int slot; 3895 3896 if (pci_ari_enabled(dev->bus)) 3897 slot = 0; 3898 else 3899 slot = PCI_SLOT(dev->devfn); 3900 3901 return (((pin - 1) + slot) % 4) + 1; 3902 } 3903 3904 int pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge) 3905 { 3906 u8 pin; 3907 3908 pin = dev->pin; 3909 if (!pin) 3910 return -1; 3911 3912 while (!pci_is_root_bus(dev->bus)) { 3913 pin = pci_swizzle_interrupt_pin(dev, pin); 3914 dev = dev->bus->self; 3915 } 3916 *bridge = dev; 3917 return pin; 3918 } 3919 3920 /** 3921 * pci_common_swizzle - swizzle INTx all the way to root bridge 3922 * @dev: the PCI device 3923 * @pinp: pointer to the INTx pin value (1=INTA, 2=INTB, 3=INTD, 4=INTD) 3924 * 3925 * Perform INTx swizzling for a device. This traverses through all PCI-to-PCI 3926 * bridges all the way up to a PCI root bus. 3927 */ 3928 u8 pci_common_swizzle(struct pci_dev *dev, u8 *pinp) 3929 { 3930 u8 pin = *pinp; 3931 3932 while (!pci_is_root_bus(dev->bus)) { 3933 pin = pci_swizzle_interrupt_pin(dev, pin); 3934 dev = dev->bus->self; 3935 } 3936 *pinp = pin; 3937 return PCI_SLOT(dev->devfn); 3938 } 3939 EXPORT_SYMBOL_GPL(pci_common_swizzle); 3940 3941 /** 3942 * pci_release_region - Release a PCI bar 3943 * @pdev: PCI device whose resources were previously reserved by 3944 * pci_request_region() 3945 * @bar: BAR to release 3946 * 3947 * Releases the PCI I/O and memory resources previously reserved by a 3948 * successful call to pci_request_region(). Call this function only 3949 * after all use of the PCI regions has ceased. 3950 */ 3951 void pci_release_region(struct pci_dev *pdev, int bar) 3952 { 3953 struct pci_devres *dr; 3954 3955 if (pci_resource_len(pdev, bar) == 0) 3956 return; 3957 if (pci_resource_flags(pdev, bar) & IORESOURCE_IO) 3958 release_region(pci_resource_start(pdev, bar), 3959 pci_resource_len(pdev, bar)); 3960 else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) 3961 release_mem_region(pci_resource_start(pdev, bar), 3962 pci_resource_len(pdev, bar)); 3963 3964 dr = find_pci_dr(pdev); 3965 if (dr) 3966 dr->region_mask &= ~(1 << bar); 3967 } 3968 EXPORT_SYMBOL(pci_release_region); 3969 3970 /** 3971 * __pci_request_region - Reserved PCI I/O and memory resource 3972 * @pdev: PCI device whose resources are to be reserved 3973 * @bar: BAR to be reserved 3974 * @res_name: Name to be associated with resource. 3975 * @exclusive: whether the region access is exclusive or not 3976 * 3977 * Mark the PCI region associated with PCI device @pdev BAR @bar as 3978 * being reserved by owner @res_name. Do not access any 3979 * address inside the PCI regions unless this call returns 3980 * successfully. 3981 * 3982 * If @exclusive is set, then the region is marked so that userspace 3983 * is explicitly not allowed to map the resource via /dev/mem or 3984 * sysfs MMIO access. 3985 * 3986 * Returns 0 on success, or %EBUSY on error. A warning 3987 * message is also printed on failure. 3988 */ 3989 static int __pci_request_region(struct pci_dev *pdev, int bar, 3990 const char *res_name, int exclusive) 3991 { 3992 struct pci_devres *dr; 3993 3994 if (pci_resource_len(pdev, bar) == 0) 3995 return 0; 3996 3997 if (pci_resource_flags(pdev, bar) & IORESOURCE_IO) { 3998 if (!request_region(pci_resource_start(pdev, bar), 3999 pci_resource_len(pdev, bar), res_name)) 4000 goto err_out; 4001 } else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) { 4002 if (!__request_mem_region(pci_resource_start(pdev, bar), 4003 pci_resource_len(pdev, bar), res_name, 4004 exclusive)) 4005 goto err_out; 4006 } 4007 4008 dr = find_pci_dr(pdev); 4009 if (dr) 4010 dr->region_mask |= 1 << bar; 4011 4012 return 0; 4013 4014 err_out: 4015 pci_warn(pdev, "BAR %d: can't reserve %pR\n", bar, 4016 &pdev->resource[bar]); 4017 return -EBUSY; 4018 } 4019 4020 /** 4021 * pci_request_region - Reserve PCI I/O and memory resource 4022 * @pdev: PCI device whose resources are to be reserved 4023 * @bar: BAR to be reserved 4024 * @res_name: Name to be associated with resource 4025 * 4026 * Mark the PCI region associated with PCI device @pdev BAR @bar as 4027 * being reserved by owner @res_name. Do not access any 4028 * address inside the PCI regions unless this call returns 4029 * successfully. 4030 * 4031 * Returns 0 on success, or %EBUSY on error. A warning 4032 * message is also printed on failure. 4033 */ 4034 int pci_request_region(struct pci_dev *pdev, int bar, const char *res_name) 4035 { 4036 return __pci_request_region(pdev, bar, res_name, 0); 4037 } 4038 EXPORT_SYMBOL(pci_request_region); 4039 4040 /** 4041 * pci_release_selected_regions - Release selected PCI I/O and memory resources 4042 * @pdev: PCI device whose resources were previously reserved 4043 * @bars: Bitmask of BARs to be released 4044 * 4045 * Release selected PCI I/O and memory resources previously reserved. 4046 * Call this function only after all use of the PCI regions has ceased. 4047 */ 4048 void pci_release_selected_regions(struct pci_dev *pdev, int bars) 4049 { 4050 int i; 4051 4052 for (i = 0; i < PCI_STD_NUM_BARS; i++) 4053 if (bars & (1 << i)) 4054 pci_release_region(pdev, i); 4055 } 4056 EXPORT_SYMBOL(pci_release_selected_regions); 4057 4058 static int __pci_request_selected_regions(struct pci_dev *pdev, int bars, 4059 const char *res_name, int excl) 4060 { 4061 int i; 4062 4063 for (i = 0; i < PCI_STD_NUM_BARS; i++) 4064 if (bars & (1 << i)) 4065 if (__pci_request_region(pdev, i, res_name, excl)) 4066 goto err_out; 4067 return 0; 4068 4069 err_out: 4070 while (--i >= 0) 4071 if (bars & (1 << i)) 4072 pci_release_region(pdev, i); 4073 4074 return -EBUSY; 4075 } 4076 4077 4078 /** 4079 * pci_request_selected_regions - Reserve selected PCI I/O and memory resources 4080 * @pdev: PCI device whose resources are to be reserved 4081 * @bars: Bitmask of BARs to be requested 4082 * @res_name: Name to be associated with resource 4083 */ 4084 int pci_request_selected_regions(struct pci_dev *pdev, int bars, 4085 const char *res_name) 4086 { 4087 return __pci_request_selected_regions(pdev, bars, res_name, 0); 4088 } 4089 EXPORT_SYMBOL(pci_request_selected_regions); 4090 4091 int pci_request_selected_regions_exclusive(struct pci_dev *pdev, int bars, 4092 const char *res_name) 4093 { 4094 return __pci_request_selected_regions(pdev, bars, res_name, 4095 IORESOURCE_EXCLUSIVE); 4096 } 4097 EXPORT_SYMBOL(pci_request_selected_regions_exclusive); 4098 4099 /** 4100 * pci_release_regions - Release reserved PCI I/O and memory resources 4101 * @pdev: PCI device whose resources were previously reserved by 4102 * pci_request_regions() 4103 * 4104 * Releases all PCI I/O and memory resources previously reserved by a 4105 * successful call to pci_request_regions(). Call this function only 4106 * after all use of the PCI regions has ceased. 4107 */ 4108 4109 void pci_release_regions(struct pci_dev *pdev) 4110 { 4111 pci_release_selected_regions(pdev, (1 << PCI_STD_NUM_BARS) - 1); 4112 } 4113 EXPORT_SYMBOL(pci_release_regions); 4114 4115 /** 4116 * pci_request_regions - Reserve PCI I/O and memory resources 4117 * @pdev: PCI device whose resources are to be reserved 4118 * @res_name: Name to be associated with resource. 4119 * 4120 * Mark all PCI regions associated with PCI device @pdev as 4121 * being reserved by owner @res_name. Do not access any 4122 * address inside the PCI regions unless this call returns 4123 * successfully. 4124 * 4125 * Returns 0 on success, or %EBUSY on error. A warning 4126 * message is also printed on failure. 4127 */ 4128 int pci_request_regions(struct pci_dev *pdev, const char *res_name) 4129 { 4130 return pci_request_selected_regions(pdev, 4131 ((1 << PCI_STD_NUM_BARS) - 1), res_name); 4132 } 4133 EXPORT_SYMBOL(pci_request_regions); 4134 4135 /** 4136 * pci_request_regions_exclusive - Reserve PCI I/O and memory resources 4137 * @pdev: PCI device whose resources are to be reserved 4138 * @res_name: Name to be associated with resource. 4139 * 4140 * Mark all PCI regions associated with PCI device @pdev as being reserved 4141 * by owner @res_name. Do not access any address inside the PCI regions 4142 * unless this call returns successfully. 4143 * 4144 * pci_request_regions_exclusive() will mark the region so that /dev/mem 4145 * and the sysfs MMIO access will not be allowed. 4146 * 4147 * Returns 0 on success, or %EBUSY on error. A warning message is also 4148 * printed on failure. 4149 */ 4150 int pci_request_regions_exclusive(struct pci_dev *pdev, const char *res_name) 4151 { 4152 return pci_request_selected_regions_exclusive(pdev, 4153 ((1 << PCI_STD_NUM_BARS) - 1), res_name); 4154 } 4155 EXPORT_SYMBOL(pci_request_regions_exclusive); 4156 4157 /* 4158 * Record the PCI IO range (expressed as CPU physical address + size). 4159 * Return a negative value if an error has occurred, zero otherwise 4160 */ 4161 int pci_register_io_range(struct fwnode_handle *fwnode, phys_addr_t addr, 4162 resource_size_t size) 4163 { 4164 int ret = 0; 4165 #ifdef PCI_IOBASE 4166 struct logic_pio_hwaddr *range; 4167 4168 if (!size || addr + size < addr) 4169 return -EINVAL; 4170 4171 range = kzalloc(sizeof(*range), GFP_ATOMIC); 4172 if (!range) 4173 return -ENOMEM; 4174 4175 range->fwnode = fwnode; 4176 range->size = size; 4177 range->hw_start = addr; 4178 range->flags = LOGIC_PIO_CPU_MMIO; 4179 4180 ret = logic_pio_register_range(range); 4181 if (ret) 4182 kfree(range); 4183 4184 /* Ignore duplicates due to deferred probing */ 4185 if (ret == -EEXIST) 4186 ret = 0; 4187 #endif 4188 4189 return ret; 4190 } 4191 4192 phys_addr_t pci_pio_to_address(unsigned long pio) 4193 { 4194 phys_addr_t address = (phys_addr_t)OF_BAD_ADDR; 4195 4196 #ifdef PCI_IOBASE 4197 if (pio >= MMIO_UPPER_LIMIT) 4198 return address; 4199 4200 address = logic_pio_to_hwaddr(pio); 4201 #endif 4202 4203 return address; 4204 } 4205 EXPORT_SYMBOL_GPL(pci_pio_to_address); 4206 4207 unsigned long __weak pci_address_to_pio(phys_addr_t address) 4208 { 4209 #ifdef PCI_IOBASE 4210 return logic_pio_trans_cpuaddr(address); 4211 #else 4212 if (address > IO_SPACE_LIMIT) 4213 return (unsigned long)-1; 4214 4215 return (unsigned long) address; 4216 #endif 4217 } 4218 4219 /** 4220 * pci_remap_iospace - Remap the memory mapped I/O space 4221 * @res: Resource describing the I/O space 4222 * @phys_addr: physical address of range to be mapped 4223 * 4224 * Remap the memory mapped I/O space described by the @res and the CPU 4225 * physical address @phys_addr into virtual address space. Only 4226 * architectures that have memory mapped IO functions defined (and the 4227 * PCI_IOBASE value defined) should call this function. 4228 */ 4229 #ifndef pci_remap_iospace 4230 int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr) 4231 { 4232 #if defined(PCI_IOBASE) && defined(CONFIG_MMU) 4233 unsigned long vaddr = (unsigned long)PCI_IOBASE + res->start; 4234 4235 if (!(res->flags & IORESOURCE_IO)) 4236 return -EINVAL; 4237 4238 if (res->end > IO_SPACE_LIMIT) 4239 return -EINVAL; 4240 4241 return ioremap_page_range(vaddr, vaddr + resource_size(res), phys_addr, 4242 pgprot_device(PAGE_KERNEL)); 4243 #else 4244 /* 4245 * This architecture does not have memory mapped I/O space, 4246 * so this function should never be called 4247 */ 4248 WARN_ONCE(1, "This architecture does not support memory mapped I/O\n"); 4249 return -ENODEV; 4250 #endif 4251 } 4252 EXPORT_SYMBOL(pci_remap_iospace); 4253 #endif 4254 4255 /** 4256 * pci_unmap_iospace - Unmap the memory mapped I/O space 4257 * @res: resource to be unmapped 4258 * 4259 * Unmap the CPU virtual address @res from virtual address space. Only 4260 * architectures that have memory mapped IO functions defined (and the 4261 * PCI_IOBASE value defined) should call this function. 4262 */ 4263 void pci_unmap_iospace(struct resource *res) 4264 { 4265 #if defined(PCI_IOBASE) && defined(CONFIG_MMU) 4266 unsigned long vaddr = (unsigned long)PCI_IOBASE + res->start; 4267 4268 vunmap_range(vaddr, vaddr + resource_size(res)); 4269 #endif 4270 } 4271 EXPORT_SYMBOL(pci_unmap_iospace); 4272 4273 static void devm_pci_unmap_iospace(struct device *dev, void *ptr) 4274 { 4275 struct resource **res = ptr; 4276 4277 pci_unmap_iospace(*res); 4278 } 4279 4280 /** 4281 * devm_pci_remap_iospace - Managed pci_remap_iospace() 4282 * @dev: Generic device to remap IO address for 4283 * @res: Resource describing the I/O space 4284 * @phys_addr: physical address of range to be mapped 4285 * 4286 * Managed pci_remap_iospace(). Map is automatically unmapped on driver 4287 * detach. 4288 */ 4289 int devm_pci_remap_iospace(struct device *dev, const struct resource *res, 4290 phys_addr_t phys_addr) 4291 { 4292 const struct resource **ptr; 4293 int error; 4294 4295 ptr = devres_alloc(devm_pci_unmap_iospace, sizeof(*ptr), GFP_KERNEL); 4296 if (!ptr) 4297 return -ENOMEM; 4298 4299 error = pci_remap_iospace(res, phys_addr); 4300 if (error) { 4301 devres_free(ptr); 4302 } else { 4303 *ptr = res; 4304 devres_add(dev, ptr); 4305 } 4306 4307 return error; 4308 } 4309 EXPORT_SYMBOL(devm_pci_remap_iospace); 4310 4311 /** 4312 * devm_pci_remap_cfgspace - Managed pci_remap_cfgspace() 4313 * @dev: Generic device to remap IO address for 4314 * @offset: Resource address to map 4315 * @size: Size of map 4316 * 4317 * Managed pci_remap_cfgspace(). Map is automatically unmapped on driver 4318 * detach. 4319 */ 4320 void __iomem *devm_pci_remap_cfgspace(struct device *dev, 4321 resource_size_t offset, 4322 resource_size_t size) 4323 { 4324 void __iomem **ptr, *addr; 4325 4326 ptr = devres_alloc(devm_ioremap_release, sizeof(*ptr), GFP_KERNEL); 4327 if (!ptr) 4328 return NULL; 4329 4330 addr = pci_remap_cfgspace(offset, size); 4331 if (addr) { 4332 *ptr = addr; 4333 devres_add(dev, ptr); 4334 } else 4335 devres_free(ptr); 4336 4337 return addr; 4338 } 4339 EXPORT_SYMBOL(devm_pci_remap_cfgspace); 4340 4341 /** 4342 * devm_pci_remap_cfg_resource - check, request region and ioremap cfg resource 4343 * @dev: generic device to handle the resource for 4344 * @res: configuration space resource to be handled 4345 * 4346 * Checks that a resource is a valid memory region, requests the memory 4347 * region and ioremaps with pci_remap_cfgspace() API that ensures the 4348 * proper PCI configuration space memory attributes are guaranteed. 4349 * 4350 * All operations are managed and will be undone on driver detach. 4351 * 4352 * Returns a pointer to the remapped memory or an ERR_PTR() encoded error code 4353 * on failure. Usage example:: 4354 * 4355 * res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 4356 * base = devm_pci_remap_cfg_resource(&pdev->dev, res); 4357 * if (IS_ERR(base)) 4358 * return PTR_ERR(base); 4359 */ 4360 void __iomem *devm_pci_remap_cfg_resource(struct device *dev, 4361 struct resource *res) 4362 { 4363 resource_size_t size; 4364 const char *name; 4365 void __iomem *dest_ptr; 4366 4367 BUG_ON(!dev); 4368 4369 if (!res || resource_type(res) != IORESOURCE_MEM) { 4370 dev_err(dev, "invalid resource\n"); 4371 return IOMEM_ERR_PTR(-EINVAL); 4372 } 4373 4374 size = resource_size(res); 4375 4376 if (res->name) 4377 name = devm_kasprintf(dev, GFP_KERNEL, "%s %s", dev_name(dev), 4378 res->name); 4379 else 4380 name = devm_kstrdup(dev, dev_name(dev), GFP_KERNEL); 4381 if (!name) 4382 return IOMEM_ERR_PTR(-ENOMEM); 4383 4384 if (!devm_request_mem_region(dev, res->start, size, name)) { 4385 dev_err(dev, "can't request region for resource %pR\n", res); 4386 return IOMEM_ERR_PTR(-EBUSY); 4387 } 4388 4389 dest_ptr = devm_pci_remap_cfgspace(dev, res->start, size); 4390 if (!dest_ptr) { 4391 dev_err(dev, "ioremap failed for resource %pR\n", res); 4392 devm_release_mem_region(dev, res->start, size); 4393 dest_ptr = IOMEM_ERR_PTR(-ENOMEM); 4394 } 4395 4396 return dest_ptr; 4397 } 4398 EXPORT_SYMBOL(devm_pci_remap_cfg_resource); 4399 4400 static void __pci_set_master(struct pci_dev *dev, bool enable) 4401 { 4402 u16 old_cmd, cmd; 4403 4404 pci_read_config_word(dev, PCI_COMMAND, &old_cmd); 4405 if (enable) 4406 cmd = old_cmd | PCI_COMMAND_MASTER; 4407 else 4408 cmd = old_cmd & ~PCI_COMMAND_MASTER; 4409 if (cmd != old_cmd) { 4410 pci_dbg(dev, "%s bus mastering\n", 4411 enable ? "enabling" : "disabling"); 4412 pci_write_config_word(dev, PCI_COMMAND, cmd); 4413 } 4414 dev->is_busmaster = enable; 4415 } 4416 4417 /** 4418 * pcibios_setup - process "pci=" kernel boot arguments 4419 * @str: string used to pass in "pci=" kernel boot arguments 4420 * 4421 * Process kernel boot arguments. This is the default implementation. 4422 * Architecture specific implementations can override this as necessary. 4423 */ 4424 char * __weak __init pcibios_setup(char *str) 4425 { 4426 return str; 4427 } 4428 4429 /** 4430 * pcibios_set_master - enable PCI bus-mastering for device dev 4431 * @dev: the PCI device to enable 4432 * 4433 * Enables PCI bus-mastering for the device. This is the default 4434 * implementation. Architecture specific implementations can override 4435 * this if necessary. 4436 */ 4437 void __weak pcibios_set_master(struct pci_dev *dev) 4438 { 4439 u8 lat; 4440 4441 /* The latency timer doesn't apply to PCIe (either Type 0 or Type 1) */ 4442 if (pci_is_pcie(dev)) 4443 return; 4444 4445 pci_read_config_byte(dev, PCI_LATENCY_TIMER, &lat); 4446 if (lat < 16) 4447 lat = (64 <= pcibios_max_latency) ? 64 : pcibios_max_latency; 4448 else if (lat > pcibios_max_latency) 4449 lat = pcibios_max_latency; 4450 else 4451 return; 4452 4453 pci_write_config_byte(dev, PCI_LATENCY_TIMER, lat); 4454 } 4455 4456 /** 4457 * pci_set_master - enables bus-mastering for device dev 4458 * @dev: the PCI device to enable 4459 * 4460 * Enables bus-mastering on the device and calls pcibios_set_master() 4461 * to do the needed arch specific settings. 4462 */ 4463 void pci_set_master(struct pci_dev *dev) 4464 { 4465 __pci_set_master(dev, true); 4466 pcibios_set_master(dev); 4467 } 4468 EXPORT_SYMBOL(pci_set_master); 4469 4470 /** 4471 * pci_clear_master - disables bus-mastering for device dev 4472 * @dev: the PCI device to disable 4473 */ 4474 void pci_clear_master(struct pci_dev *dev) 4475 { 4476 __pci_set_master(dev, false); 4477 } 4478 EXPORT_SYMBOL(pci_clear_master); 4479 4480 /** 4481 * pci_set_cacheline_size - ensure the CACHE_LINE_SIZE register is programmed 4482 * @dev: the PCI device for which MWI is to be enabled 4483 * 4484 * Helper function for pci_set_mwi. 4485 * Originally copied from drivers/net/acenic.c. 4486 * Copyright 1998-2001 by Jes Sorensen, <jes@trained-monkey.org>. 4487 * 4488 * RETURNS: An appropriate -ERRNO error value on error, or zero for success. 4489 */ 4490 int pci_set_cacheline_size(struct pci_dev *dev) 4491 { 4492 u8 cacheline_size; 4493 4494 if (!pci_cache_line_size) 4495 return -EINVAL; 4496 4497 /* Validate current setting: the PCI_CACHE_LINE_SIZE must be 4498 equal to or multiple of the right value. */ 4499 pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size); 4500 if (cacheline_size >= pci_cache_line_size && 4501 (cacheline_size % pci_cache_line_size) == 0) 4502 return 0; 4503 4504 /* Write the correct value. */ 4505 pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, pci_cache_line_size); 4506 /* Read it back. */ 4507 pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size); 4508 if (cacheline_size == pci_cache_line_size) 4509 return 0; 4510 4511 pci_dbg(dev, "cache line size of %d is not supported\n", 4512 pci_cache_line_size << 2); 4513 4514 return -EINVAL; 4515 } 4516 EXPORT_SYMBOL_GPL(pci_set_cacheline_size); 4517 4518 /** 4519 * pci_set_mwi - enables memory-write-invalidate PCI transaction 4520 * @dev: the PCI device for which MWI is enabled 4521 * 4522 * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND. 4523 * 4524 * RETURNS: An appropriate -ERRNO error value on error, or zero for success. 4525 */ 4526 int pci_set_mwi(struct pci_dev *dev) 4527 { 4528 #ifdef PCI_DISABLE_MWI 4529 return 0; 4530 #else 4531 int rc; 4532 u16 cmd; 4533 4534 rc = pci_set_cacheline_size(dev); 4535 if (rc) 4536 return rc; 4537 4538 pci_read_config_word(dev, PCI_COMMAND, &cmd); 4539 if (!(cmd & PCI_COMMAND_INVALIDATE)) { 4540 pci_dbg(dev, "enabling Mem-Wr-Inval\n"); 4541 cmd |= PCI_COMMAND_INVALIDATE; 4542 pci_write_config_word(dev, PCI_COMMAND, cmd); 4543 } 4544 return 0; 4545 #endif 4546 } 4547 EXPORT_SYMBOL(pci_set_mwi); 4548 4549 /** 4550 * pcim_set_mwi - a device-managed pci_set_mwi() 4551 * @dev: the PCI device for which MWI is enabled 4552 * 4553 * Managed pci_set_mwi(). 4554 * 4555 * RETURNS: An appropriate -ERRNO error value on error, or zero for success. 4556 */ 4557 int pcim_set_mwi(struct pci_dev *dev) 4558 { 4559 struct pci_devres *dr; 4560 4561 dr = find_pci_dr(dev); 4562 if (!dr) 4563 return -ENOMEM; 4564 4565 dr->mwi = 1; 4566 return pci_set_mwi(dev); 4567 } 4568 EXPORT_SYMBOL(pcim_set_mwi); 4569 4570 /** 4571 * pci_try_set_mwi - enables memory-write-invalidate PCI transaction 4572 * @dev: the PCI device for which MWI is enabled 4573 * 4574 * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND. 4575 * Callers are not required to check the return value. 4576 * 4577 * RETURNS: An appropriate -ERRNO error value on error, or zero for success. 4578 */ 4579 int pci_try_set_mwi(struct pci_dev *dev) 4580 { 4581 #ifdef PCI_DISABLE_MWI 4582 return 0; 4583 #else 4584 return pci_set_mwi(dev); 4585 #endif 4586 } 4587 EXPORT_SYMBOL(pci_try_set_mwi); 4588 4589 /** 4590 * pci_clear_mwi - disables Memory-Write-Invalidate for device dev 4591 * @dev: the PCI device to disable 4592 * 4593 * Disables PCI Memory-Write-Invalidate transaction on the device 4594 */ 4595 void pci_clear_mwi(struct pci_dev *dev) 4596 { 4597 #ifndef PCI_DISABLE_MWI 4598 u16 cmd; 4599 4600 pci_read_config_word(dev, PCI_COMMAND, &cmd); 4601 if (cmd & PCI_COMMAND_INVALIDATE) { 4602 cmd &= ~PCI_COMMAND_INVALIDATE; 4603 pci_write_config_word(dev, PCI_COMMAND, cmd); 4604 } 4605 #endif 4606 } 4607 EXPORT_SYMBOL(pci_clear_mwi); 4608 4609 /** 4610 * pci_disable_parity - disable parity checking for device 4611 * @dev: the PCI device to operate on 4612 * 4613 * Disable parity checking for device @dev 4614 */ 4615 void pci_disable_parity(struct pci_dev *dev) 4616 { 4617 u16 cmd; 4618 4619 pci_read_config_word(dev, PCI_COMMAND, &cmd); 4620 if (cmd & PCI_COMMAND_PARITY) { 4621 cmd &= ~PCI_COMMAND_PARITY; 4622 pci_write_config_word(dev, PCI_COMMAND, cmd); 4623 } 4624 } 4625 4626 /** 4627 * pci_intx - enables/disables PCI INTx for device dev 4628 * @pdev: the PCI device to operate on 4629 * @enable: boolean: whether to enable or disable PCI INTx 4630 * 4631 * Enables/disables PCI INTx for device @pdev 4632 */ 4633 void pci_intx(struct pci_dev *pdev, int enable) 4634 { 4635 u16 pci_command, new; 4636 4637 pci_read_config_word(pdev, PCI_COMMAND, &pci_command); 4638 4639 if (enable) 4640 new = pci_command & ~PCI_COMMAND_INTX_DISABLE; 4641 else 4642 new = pci_command | PCI_COMMAND_INTX_DISABLE; 4643 4644 if (new != pci_command) { 4645 struct pci_devres *dr; 4646 4647 pci_write_config_word(pdev, PCI_COMMAND, new); 4648 4649 dr = find_pci_dr(pdev); 4650 if (dr && !dr->restore_intx) { 4651 dr->restore_intx = 1; 4652 dr->orig_intx = !enable; 4653 } 4654 } 4655 } 4656 EXPORT_SYMBOL_GPL(pci_intx); 4657 4658 static bool pci_check_and_set_intx_mask(struct pci_dev *dev, bool mask) 4659 { 4660 struct pci_bus *bus = dev->bus; 4661 bool mask_updated = true; 4662 u32 cmd_status_dword; 4663 u16 origcmd, newcmd; 4664 unsigned long flags; 4665 bool irq_pending; 4666 4667 /* 4668 * We do a single dword read to retrieve both command and status. 4669 * Document assumptions that make this possible. 4670 */ 4671 BUILD_BUG_ON(PCI_COMMAND % 4); 4672 BUILD_BUG_ON(PCI_COMMAND + 2 != PCI_STATUS); 4673 4674 raw_spin_lock_irqsave(&pci_lock, flags); 4675 4676 bus->ops->read(bus, dev->devfn, PCI_COMMAND, 4, &cmd_status_dword); 4677 4678 irq_pending = (cmd_status_dword >> 16) & PCI_STATUS_INTERRUPT; 4679 4680 /* 4681 * Check interrupt status register to see whether our device 4682 * triggered the interrupt (when masking) or the next IRQ is 4683 * already pending (when unmasking). 4684 */ 4685 if (mask != irq_pending) { 4686 mask_updated = false; 4687 goto done; 4688 } 4689 4690 origcmd = cmd_status_dword; 4691 newcmd = origcmd & ~PCI_COMMAND_INTX_DISABLE; 4692 if (mask) 4693 newcmd |= PCI_COMMAND_INTX_DISABLE; 4694 if (newcmd != origcmd) 4695 bus->ops->write(bus, dev->devfn, PCI_COMMAND, 2, newcmd); 4696 4697 done: 4698 raw_spin_unlock_irqrestore(&pci_lock, flags); 4699 4700 return mask_updated; 4701 } 4702 4703 /** 4704 * pci_check_and_mask_intx - mask INTx on pending interrupt 4705 * @dev: the PCI device to operate on 4706 * 4707 * Check if the device dev has its INTx line asserted, mask it and return 4708 * true in that case. False is returned if no interrupt was pending. 4709 */ 4710 bool pci_check_and_mask_intx(struct pci_dev *dev) 4711 { 4712 return pci_check_and_set_intx_mask(dev, true); 4713 } 4714 EXPORT_SYMBOL_GPL(pci_check_and_mask_intx); 4715 4716 /** 4717 * pci_check_and_unmask_intx - unmask INTx if no interrupt is pending 4718 * @dev: the PCI device to operate on 4719 * 4720 * Check if the device dev has its INTx line asserted, unmask it if not and 4721 * return true. False is returned and the mask remains active if there was 4722 * still an interrupt pending. 4723 */ 4724 bool pci_check_and_unmask_intx(struct pci_dev *dev) 4725 { 4726 return pci_check_and_set_intx_mask(dev, false); 4727 } 4728 EXPORT_SYMBOL_GPL(pci_check_and_unmask_intx); 4729 4730 /** 4731 * pci_wait_for_pending_transaction - wait for pending transaction 4732 * @dev: the PCI device to operate on 4733 * 4734 * Return 0 if transaction is pending 1 otherwise. 4735 */ 4736 int pci_wait_for_pending_transaction(struct pci_dev *dev) 4737 { 4738 if (!pci_is_pcie(dev)) 4739 return 1; 4740 4741 return pci_wait_for_pending(dev, pci_pcie_cap(dev) + PCI_EXP_DEVSTA, 4742 PCI_EXP_DEVSTA_TRPND); 4743 } 4744 EXPORT_SYMBOL(pci_wait_for_pending_transaction); 4745 4746 /** 4747 * pcie_flr - initiate a PCIe function level reset 4748 * @dev: device to reset 4749 * 4750 * Initiate a function level reset unconditionally on @dev without 4751 * checking any flags and DEVCAP 4752 */ 4753 int pcie_flr(struct pci_dev *dev) 4754 { 4755 if (!pci_wait_for_pending_transaction(dev)) 4756 pci_err(dev, "timed out waiting for pending transaction; performing function level reset anyway\n"); 4757 4758 pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_BCR_FLR); 4759 4760 if (dev->imm_ready) 4761 return 0; 4762 4763 /* 4764 * Per PCIe r4.0, sec 6.6.2, a device must complete an FLR within 4765 * 100ms, but may silently discard requests while the FLR is in 4766 * progress. Wait 100ms before trying to access the device. 4767 */ 4768 msleep(100); 4769 4770 return pci_dev_wait(dev, "FLR", PCIE_RESET_READY_POLL_MS); 4771 } 4772 EXPORT_SYMBOL_GPL(pcie_flr); 4773 4774 /** 4775 * pcie_reset_flr - initiate a PCIe function level reset 4776 * @dev: device to reset 4777 * @probe: if true, return 0 if device can be reset this way 4778 * 4779 * Initiate a function level reset on @dev. 4780 */ 4781 int pcie_reset_flr(struct pci_dev *dev, bool probe) 4782 { 4783 if (dev->dev_flags & PCI_DEV_FLAGS_NO_FLR_RESET) 4784 return -ENOTTY; 4785 4786 if (!(dev->devcap & PCI_EXP_DEVCAP_FLR)) 4787 return -ENOTTY; 4788 4789 if (probe) 4790 return 0; 4791 4792 return pcie_flr(dev); 4793 } 4794 EXPORT_SYMBOL_GPL(pcie_reset_flr); 4795 4796 static int pci_af_flr(struct pci_dev *dev, bool probe) 4797 { 4798 int pos; 4799 u8 cap; 4800 4801 pos = pci_find_capability(dev, PCI_CAP_ID_AF); 4802 if (!pos) 4803 return -ENOTTY; 4804 4805 if (dev->dev_flags & PCI_DEV_FLAGS_NO_FLR_RESET) 4806 return -ENOTTY; 4807 4808 pci_read_config_byte(dev, pos + PCI_AF_CAP, &cap); 4809 if (!(cap & PCI_AF_CAP_TP) || !(cap & PCI_AF_CAP_FLR)) 4810 return -ENOTTY; 4811 4812 if (probe) 4813 return 0; 4814 4815 /* 4816 * Wait for Transaction Pending bit to clear. A word-aligned test 4817 * is used, so we use the control offset rather than status and shift 4818 * the test bit to match. 4819 */ 4820 if (!pci_wait_for_pending(dev, pos + PCI_AF_CTRL, 4821 PCI_AF_STATUS_TP << 8)) 4822 pci_err(dev, "timed out waiting for pending transaction; performing AF function level reset anyway\n"); 4823 4824 pci_write_config_byte(dev, pos + PCI_AF_CTRL, PCI_AF_CTRL_FLR); 4825 4826 if (dev->imm_ready) 4827 return 0; 4828 4829 /* 4830 * Per Advanced Capabilities for Conventional PCI ECN, 13 April 2006, 4831 * updated 27 July 2006; a device must complete an FLR within 4832 * 100ms, but may silently discard requests while the FLR is in 4833 * progress. Wait 100ms before trying to access the device. 4834 */ 4835 msleep(100); 4836 4837 return pci_dev_wait(dev, "AF_FLR", PCIE_RESET_READY_POLL_MS); 4838 } 4839 4840 /** 4841 * pci_pm_reset - Put device into PCI_D3 and back into PCI_D0. 4842 * @dev: Device to reset. 4843 * @probe: if true, return 0 if the device can be reset this way. 4844 * 4845 * If @dev supports native PCI PM and its PCI_PM_CTRL_NO_SOFT_RESET flag is 4846 * unset, it will be reinitialized internally when going from PCI_D3hot to 4847 * PCI_D0. If that's the case and the device is not in a low-power state 4848 * already, force it into PCI_D3hot and back to PCI_D0, causing it to be reset. 4849 * 4850 * NOTE: This causes the caller to sleep for twice the device power transition 4851 * cooldown period, which for the D0->D3hot and D3hot->D0 transitions is 10 ms 4852 * by default (i.e. unless the @dev's d3hot_delay field has a different value). 4853 * Moreover, only devices in D0 can be reset by this function. 4854 */ 4855 static int pci_pm_reset(struct pci_dev *dev, bool probe) 4856 { 4857 u16 csr; 4858 4859 if (!dev->pm_cap || dev->dev_flags & PCI_DEV_FLAGS_NO_PM_RESET) 4860 return -ENOTTY; 4861 4862 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &csr); 4863 if (csr & PCI_PM_CTRL_NO_SOFT_RESET) 4864 return -ENOTTY; 4865 4866 if (probe) 4867 return 0; 4868 4869 if (dev->current_state != PCI_D0) 4870 return -EINVAL; 4871 4872 csr &= ~PCI_PM_CTRL_STATE_MASK; 4873 csr |= PCI_D3hot; 4874 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr); 4875 pci_dev_d3_sleep(dev); 4876 4877 csr &= ~PCI_PM_CTRL_STATE_MASK; 4878 csr |= PCI_D0; 4879 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr); 4880 pci_dev_d3_sleep(dev); 4881 4882 return pci_dev_wait(dev, "PM D3hot->D0", PCIE_RESET_READY_POLL_MS); 4883 } 4884 4885 /** 4886 * pcie_wait_for_link_status - Wait for link status change 4887 * @pdev: Device whose link to wait for. 4888 * @use_lt: Use the LT bit if TRUE, or the DLLLA bit if FALSE. 4889 * @active: Waiting for active or inactive? 4890 * 4891 * Return 0 if successful, or -ETIMEDOUT if status has not changed within 4892 * PCIE_LINK_RETRAIN_TIMEOUT_MS milliseconds. 4893 */ 4894 static int pcie_wait_for_link_status(struct pci_dev *pdev, 4895 bool use_lt, bool active) 4896 { 4897 u16 lnksta_mask, lnksta_match; 4898 unsigned long end_jiffies; 4899 u16 lnksta; 4900 4901 lnksta_mask = use_lt ? PCI_EXP_LNKSTA_LT : PCI_EXP_LNKSTA_DLLLA; 4902 lnksta_match = active ? lnksta_mask : 0; 4903 4904 end_jiffies = jiffies + msecs_to_jiffies(PCIE_LINK_RETRAIN_TIMEOUT_MS); 4905 do { 4906 pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lnksta); 4907 if ((lnksta & lnksta_mask) == lnksta_match) 4908 return 0; 4909 msleep(1); 4910 } while (time_before(jiffies, end_jiffies)); 4911 4912 return -ETIMEDOUT; 4913 } 4914 4915 /** 4916 * pcie_retrain_link - Request a link retrain and wait for it to complete 4917 * @pdev: Device whose link to retrain. 4918 * @use_lt: Use the LT bit if TRUE, or the DLLLA bit if FALSE, for status. 4919 * 4920 * Retrain completion status is retrieved from the Link Status Register 4921 * according to @use_lt. It is not verified whether the use of the DLLLA 4922 * bit is valid. 4923 * 4924 * Return 0 if successful, or -ETIMEDOUT if training has not completed 4925 * within PCIE_LINK_RETRAIN_TIMEOUT_MS milliseconds. 4926 */ 4927 int pcie_retrain_link(struct pci_dev *pdev, bool use_lt) 4928 { 4929 int rc; 4930 u16 lnkctl; 4931 4932 /* 4933 * Ensure the updated LNKCTL parameters are used during link 4934 * training by checking that there is no ongoing link training to 4935 * avoid LTSSM race as recommended in Implementation Note at the 4936 * end of PCIe r6.0.1 sec 7.5.3.7. 4937 */ 4938 rc = pcie_wait_for_link_status(pdev, use_lt, !use_lt); 4939 if (rc) 4940 return rc; 4941 4942 pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &lnkctl); 4943 lnkctl |= PCI_EXP_LNKCTL_RL; 4944 pcie_capability_write_word(pdev, PCI_EXP_LNKCTL, lnkctl); 4945 if (pdev->clear_retrain_link) { 4946 /* 4947 * Due to an erratum in some devices the Retrain Link bit 4948 * needs to be cleared again manually to allow the link 4949 * training to succeed. 4950 */ 4951 lnkctl &= ~PCI_EXP_LNKCTL_RL; 4952 pcie_capability_write_word(pdev, PCI_EXP_LNKCTL, lnkctl); 4953 } 4954 4955 return pcie_wait_for_link_status(pdev, use_lt, !use_lt); 4956 } 4957 4958 /** 4959 * pcie_wait_for_link_delay - Wait until link is active or inactive 4960 * @pdev: Bridge device 4961 * @active: waiting for active or inactive? 4962 * @delay: Delay to wait after link has become active (in ms) 4963 * 4964 * Use this to wait till link becomes active or inactive. 4965 */ 4966 static bool pcie_wait_for_link_delay(struct pci_dev *pdev, bool active, 4967 int delay) 4968 { 4969 int rc; 4970 4971 /* 4972 * Some controllers might not implement link active reporting. In this 4973 * case, we wait for 1000 ms + any delay requested by the caller. 4974 */ 4975 if (!pdev->link_active_reporting) { 4976 msleep(PCIE_LINK_RETRAIN_TIMEOUT_MS + delay); 4977 return true; 4978 } 4979 4980 /* 4981 * PCIe r4.0 sec 6.6.1, a component must enter LTSSM Detect within 20ms, 4982 * after which we should expect an link active if the reset was 4983 * successful. If so, software must wait a minimum 100ms before sending 4984 * configuration requests to devices downstream this port. 4985 * 4986 * If the link fails to activate, either the device was physically 4987 * removed or the link is permanently failed. 4988 */ 4989 if (active) 4990 msleep(20); 4991 rc = pcie_wait_for_link_status(pdev, false, active); 4992 if (active) { 4993 if (rc) 4994 rc = pcie_failed_link_retrain(pdev); 4995 if (rc) 4996 return false; 4997 4998 msleep(delay); 4999 return true; 5000 } 5001 5002 if (rc) 5003 return false; 5004 5005 return true; 5006 } 5007 5008 /** 5009 * pcie_wait_for_link - Wait until link is active or inactive 5010 * @pdev: Bridge device 5011 * @active: waiting for active or inactive? 5012 * 5013 * Use this to wait till link becomes active or inactive. 5014 */ 5015 bool pcie_wait_for_link(struct pci_dev *pdev, bool active) 5016 { 5017 return pcie_wait_for_link_delay(pdev, active, 100); 5018 } 5019 5020 /* 5021 * Find maximum D3cold delay required by all the devices on the bus. The 5022 * spec says 100 ms, but firmware can lower it and we allow drivers to 5023 * increase it as well. 5024 * 5025 * Called with @pci_bus_sem locked for reading. 5026 */ 5027 static int pci_bus_max_d3cold_delay(const struct pci_bus *bus) 5028 { 5029 const struct pci_dev *pdev; 5030 int min_delay = 100; 5031 int max_delay = 0; 5032 5033 list_for_each_entry(pdev, &bus->devices, bus_list) { 5034 if (pdev->d3cold_delay < min_delay) 5035 min_delay = pdev->d3cold_delay; 5036 if (pdev->d3cold_delay > max_delay) 5037 max_delay = pdev->d3cold_delay; 5038 } 5039 5040 return max(min_delay, max_delay); 5041 } 5042 5043 /** 5044 * pci_bridge_wait_for_secondary_bus - Wait for secondary bus to be accessible 5045 * @dev: PCI bridge 5046 * @reset_type: reset type in human-readable form 5047 * 5048 * Handle necessary delays before access to the devices on the secondary 5049 * side of the bridge are permitted after D3cold to D0 transition 5050 * or Conventional Reset. 5051 * 5052 * For PCIe this means the delays in PCIe 5.0 section 6.6.1. For 5053 * conventional PCI it means Tpvrh + Trhfa specified in PCI 3.0 section 5054 * 4.3.2. 5055 * 5056 * Return 0 on success or -ENOTTY if the first device on the secondary bus 5057 * failed to become accessible. 5058 */ 5059 int pci_bridge_wait_for_secondary_bus(struct pci_dev *dev, char *reset_type) 5060 { 5061 struct pci_dev *child; 5062 int delay; 5063 5064 if (pci_dev_is_disconnected(dev)) 5065 return 0; 5066 5067 if (!pci_is_bridge(dev)) 5068 return 0; 5069 5070 down_read(&pci_bus_sem); 5071 5072 /* 5073 * We only deal with devices that are present currently on the bus. 5074 * For any hot-added devices the access delay is handled in pciehp 5075 * board_added(). In case of ACPI hotplug the firmware is expected 5076 * to configure the devices before OS is notified. 5077 */ 5078 if (!dev->subordinate || list_empty(&dev->subordinate->devices)) { 5079 up_read(&pci_bus_sem); 5080 return 0; 5081 } 5082 5083 /* Take d3cold_delay requirements into account */ 5084 delay = pci_bus_max_d3cold_delay(dev->subordinate); 5085 if (!delay) { 5086 up_read(&pci_bus_sem); 5087 return 0; 5088 } 5089 5090 child = list_first_entry(&dev->subordinate->devices, struct pci_dev, 5091 bus_list); 5092 up_read(&pci_bus_sem); 5093 5094 /* 5095 * Conventional PCI and PCI-X we need to wait Tpvrh + Trhfa before 5096 * accessing the device after reset (that is 1000 ms + 100 ms). 5097 */ 5098 if (!pci_is_pcie(dev)) { 5099 pci_dbg(dev, "waiting %d ms for secondary bus\n", 1000 + delay); 5100 msleep(1000 + delay); 5101 return 0; 5102 } 5103 5104 /* 5105 * For PCIe downstream and root ports that do not support speeds 5106 * greater than 5 GT/s need to wait minimum 100 ms. For higher 5107 * speeds (gen3) we need to wait first for the data link layer to 5108 * become active. 5109 * 5110 * However, 100 ms is the minimum and the PCIe spec says the 5111 * software must allow at least 1s before it can determine that the 5112 * device that did not respond is a broken device. Also device can 5113 * take longer than that to respond if it indicates so through Request 5114 * Retry Status completions. 5115 * 5116 * Therefore we wait for 100 ms and check for the device presence 5117 * until the timeout expires. 5118 */ 5119 if (!pcie_downstream_port(dev)) 5120 return 0; 5121 5122 if (pcie_get_speed_cap(dev) <= PCIE_SPEED_5_0GT) { 5123 u16 status; 5124 5125 pci_dbg(dev, "waiting %d ms for downstream link\n", delay); 5126 msleep(delay); 5127 5128 if (!pci_dev_wait(child, reset_type, PCI_RESET_WAIT - delay)) 5129 return 0; 5130 5131 /* 5132 * If the port supports active link reporting we now check 5133 * whether the link is active and if not bail out early with 5134 * the assumption that the device is not present anymore. 5135 */ 5136 if (!dev->link_active_reporting) 5137 return -ENOTTY; 5138 5139 pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &status); 5140 if (!(status & PCI_EXP_LNKSTA_DLLLA)) 5141 return -ENOTTY; 5142 5143 return pci_dev_wait(child, reset_type, 5144 PCIE_RESET_READY_POLL_MS - PCI_RESET_WAIT); 5145 } 5146 5147 pci_dbg(dev, "waiting %d ms for downstream link, after activation\n", 5148 delay); 5149 if (!pcie_wait_for_link_delay(dev, true, delay)) { 5150 /* Did not train, no need to wait any further */ 5151 pci_info(dev, "Data Link Layer Link Active not set in 1000 msec\n"); 5152 return -ENOTTY; 5153 } 5154 5155 return pci_dev_wait(child, reset_type, 5156 PCIE_RESET_READY_POLL_MS - delay); 5157 } 5158 5159 void pci_reset_secondary_bus(struct pci_dev *dev) 5160 { 5161 u16 ctrl; 5162 5163 pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &ctrl); 5164 ctrl |= PCI_BRIDGE_CTL_BUS_RESET; 5165 pci_write_config_word(dev, PCI_BRIDGE_CONTROL, ctrl); 5166 5167 /* 5168 * PCI spec v3.0 7.6.4.2 requires minimum Trst of 1ms. Double 5169 * this to 2ms to ensure that we meet the minimum requirement. 5170 */ 5171 msleep(2); 5172 5173 ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET; 5174 pci_write_config_word(dev, PCI_BRIDGE_CONTROL, ctrl); 5175 } 5176 5177 void __weak pcibios_reset_secondary_bus(struct pci_dev *dev) 5178 { 5179 pci_reset_secondary_bus(dev); 5180 } 5181 5182 /** 5183 * pci_bridge_secondary_bus_reset - Reset the secondary bus on a PCI bridge. 5184 * @dev: Bridge device 5185 * 5186 * Use the bridge control register to assert reset on the secondary bus. 5187 * Devices on the secondary bus are left in power-on state. 5188 */ 5189 int pci_bridge_secondary_bus_reset(struct pci_dev *dev) 5190 { 5191 pcibios_reset_secondary_bus(dev); 5192 5193 return pci_bridge_wait_for_secondary_bus(dev, "bus reset"); 5194 } 5195 EXPORT_SYMBOL_GPL(pci_bridge_secondary_bus_reset); 5196 5197 static int pci_parent_bus_reset(struct pci_dev *dev, bool probe) 5198 { 5199 struct pci_dev *pdev; 5200 5201 if (pci_is_root_bus(dev->bus) || dev->subordinate || 5202 !dev->bus->self || dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET) 5203 return -ENOTTY; 5204 5205 list_for_each_entry(pdev, &dev->bus->devices, bus_list) 5206 if (pdev != dev) 5207 return -ENOTTY; 5208 5209 if (probe) 5210 return 0; 5211 5212 return pci_bridge_secondary_bus_reset(dev->bus->self); 5213 } 5214 5215 static int pci_reset_hotplug_slot(struct hotplug_slot *hotplug, bool probe) 5216 { 5217 int rc = -ENOTTY; 5218 5219 if (!hotplug || !try_module_get(hotplug->owner)) 5220 return rc; 5221 5222 if (hotplug->ops->reset_slot) 5223 rc = hotplug->ops->reset_slot(hotplug, probe); 5224 5225 module_put(hotplug->owner); 5226 5227 return rc; 5228 } 5229 5230 static int pci_dev_reset_slot_function(struct pci_dev *dev, bool probe) 5231 { 5232 if (dev->multifunction || dev->subordinate || !dev->slot || 5233 dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET) 5234 return -ENOTTY; 5235 5236 return pci_reset_hotplug_slot(dev->slot->hotplug, probe); 5237 } 5238 5239 static int pci_reset_bus_function(struct pci_dev *dev, bool probe) 5240 { 5241 int rc; 5242 5243 rc = pci_dev_reset_slot_function(dev, probe); 5244 if (rc != -ENOTTY) 5245 return rc; 5246 return pci_parent_bus_reset(dev, probe); 5247 } 5248 5249 void pci_dev_lock(struct pci_dev *dev) 5250 { 5251 /* block PM suspend, driver probe, etc. */ 5252 device_lock(&dev->dev); 5253 pci_cfg_access_lock(dev); 5254 } 5255 EXPORT_SYMBOL_GPL(pci_dev_lock); 5256 5257 /* Return 1 on successful lock, 0 on contention */ 5258 int pci_dev_trylock(struct pci_dev *dev) 5259 { 5260 if (device_trylock(&dev->dev)) { 5261 if (pci_cfg_access_trylock(dev)) 5262 return 1; 5263 device_unlock(&dev->dev); 5264 } 5265 5266 return 0; 5267 } 5268 EXPORT_SYMBOL_GPL(pci_dev_trylock); 5269 5270 void pci_dev_unlock(struct pci_dev *dev) 5271 { 5272 pci_cfg_access_unlock(dev); 5273 device_unlock(&dev->dev); 5274 } 5275 EXPORT_SYMBOL_GPL(pci_dev_unlock); 5276 5277 static void pci_dev_save_and_disable(struct pci_dev *dev) 5278 { 5279 const struct pci_error_handlers *err_handler = 5280 dev->driver ? dev->driver->err_handler : NULL; 5281 5282 /* 5283 * dev->driver->err_handler->reset_prepare() is protected against 5284 * races with ->remove() by the device lock, which must be held by 5285 * the caller. 5286 */ 5287 if (err_handler && err_handler->reset_prepare) 5288 err_handler->reset_prepare(dev); 5289 5290 /* 5291 * Wake-up device prior to save. PM registers default to D0 after 5292 * reset and a simple register restore doesn't reliably return 5293 * to a non-D0 state anyway. 5294 */ 5295 pci_set_power_state(dev, PCI_D0); 5296 5297 pci_save_state(dev); 5298 /* 5299 * Disable the device by clearing the Command register, except for 5300 * INTx-disable which is set. This not only disables MMIO and I/O port 5301 * BARs, but also prevents the device from being Bus Master, preventing 5302 * DMA from the device including MSI/MSI-X interrupts. For PCI 2.3 5303 * compliant devices, INTx-disable prevents legacy interrupts. 5304 */ 5305 pci_write_config_word(dev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE); 5306 } 5307 5308 static void pci_dev_restore(struct pci_dev *dev) 5309 { 5310 const struct pci_error_handlers *err_handler = 5311 dev->driver ? dev->driver->err_handler : NULL; 5312 5313 pci_restore_state(dev); 5314 5315 /* 5316 * dev->driver->err_handler->reset_done() is protected against 5317 * races with ->remove() by the device lock, which must be held by 5318 * the caller. 5319 */ 5320 if (err_handler && err_handler->reset_done) 5321 err_handler->reset_done(dev); 5322 } 5323 5324 /* dev->reset_methods[] is a 0-terminated list of indices into this array */ 5325 static const struct pci_reset_fn_method pci_reset_fn_methods[] = { 5326 { }, 5327 { pci_dev_specific_reset, .name = "device_specific" }, 5328 { pci_dev_acpi_reset, .name = "acpi" }, 5329 { pcie_reset_flr, .name = "flr" }, 5330 { pci_af_flr, .name = "af_flr" }, 5331 { pci_pm_reset, .name = "pm" }, 5332 { pci_reset_bus_function, .name = "bus" }, 5333 }; 5334 5335 static ssize_t reset_method_show(struct device *dev, 5336 struct device_attribute *attr, char *buf) 5337 { 5338 struct pci_dev *pdev = to_pci_dev(dev); 5339 ssize_t len = 0; 5340 int i, m; 5341 5342 for (i = 0; i < PCI_NUM_RESET_METHODS; i++) { 5343 m = pdev->reset_methods[i]; 5344 if (!m) 5345 break; 5346 5347 len += sysfs_emit_at(buf, len, "%s%s", len ? " " : "", 5348 pci_reset_fn_methods[m].name); 5349 } 5350 5351 if (len) 5352 len += sysfs_emit_at(buf, len, "\n"); 5353 5354 return len; 5355 } 5356 5357 static int reset_method_lookup(const char *name) 5358 { 5359 int m; 5360 5361 for (m = 1; m < PCI_NUM_RESET_METHODS; m++) { 5362 if (sysfs_streq(name, pci_reset_fn_methods[m].name)) 5363 return m; 5364 } 5365 5366 return 0; /* not found */ 5367 } 5368 5369 static ssize_t reset_method_store(struct device *dev, 5370 struct device_attribute *attr, 5371 const char *buf, size_t count) 5372 { 5373 struct pci_dev *pdev = to_pci_dev(dev); 5374 char *options, *name; 5375 int m, n; 5376 u8 reset_methods[PCI_NUM_RESET_METHODS] = { 0 }; 5377 5378 if (sysfs_streq(buf, "")) { 5379 pdev->reset_methods[0] = 0; 5380 pci_warn(pdev, "All device reset methods disabled by user"); 5381 return count; 5382 } 5383 5384 if (sysfs_streq(buf, "default")) { 5385 pci_init_reset_methods(pdev); 5386 return count; 5387 } 5388 5389 options = kstrndup(buf, count, GFP_KERNEL); 5390 if (!options) 5391 return -ENOMEM; 5392 5393 n = 0; 5394 while ((name = strsep(&options, " ")) != NULL) { 5395 if (sysfs_streq(name, "")) 5396 continue; 5397 5398 name = strim(name); 5399 5400 m = reset_method_lookup(name); 5401 if (!m) { 5402 pci_err(pdev, "Invalid reset method '%s'", name); 5403 goto error; 5404 } 5405 5406 if (pci_reset_fn_methods[m].reset_fn(pdev, PCI_RESET_PROBE)) { 5407 pci_err(pdev, "Unsupported reset method '%s'", name); 5408 goto error; 5409 } 5410 5411 if (n == PCI_NUM_RESET_METHODS - 1) { 5412 pci_err(pdev, "Too many reset methods\n"); 5413 goto error; 5414 } 5415 5416 reset_methods[n++] = m; 5417 } 5418 5419 reset_methods[n] = 0; 5420 5421 /* Warn if dev-specific supported but not highest priority */ 5422 if (pci_reset_fn_methods[1].reset_fn(pdev, PCI_RESET_PROBE) == 0 && 5423 reset_methods[0] != 1) 5424 pci_warn(pdev, "Device-specific reset disabled/de-prioritized by user"); 5425 memcpy(pdev->reset_methods, reset_methods, sizeof(pdev->reset_methods)); 5426 kfree(options); 5427 return count; 5428 5429 error: 5430 /* Leave previous methods unchanged */ 5431 kfree(options); 5432 return -EINVAL; 5433 } 5434 static DEVICE_ATTR_RW(reset_method); 5435 5436 static struct attribute *pci_dev_reset_method_attrs[] = { 5437 &dev_attr_reset_method.attr, 5438 NULL, 5439 }; 5440 5441 static umode_t pci_dev_reset_method_attr_is_visible(struct kobject *kobj, 5442 struct attribute *a, int n) 5443 { 5444 struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj)); 5445 5446 if (!pci_reset_supported(pdev)) 5447 return 0; 5448 5449 return a->mode; 5450 } 5451 5452 const struct attribute_group pci_dev_reset_method_attr_group = { 5453 .attrs = pci_dev_reset_method_attrs, 5454 .is_visible = pci_dev_reset_method_attr_is_visible, 5455 }; 5456 5457 /** 5458 * __pci_reset_function_locked - reset a PCI device function while holding 5459 * the @dev mutex lock. 5460 * @dev: PCI device to reset 5461 * 5462 * Some devices allow an individual function to be reset without affecting 5463 * other functions in the same device. The PCI device must be responsive 5464 * to PCI config space in order to use this function. 5465 * 5466 * The device function is presumed to be unused and the caller is holding 5467 * the device mutex lock when this function is called. 5468 * 5469 * Resetting the device will make the contents of PCI configuration space 5470 * random, so any caller of this must be prepared to reinitialise the 5471 * device including MSI, bus mastering, BARs, decoding IO and memory spaces, 5472 * etc. 5473 * 5474 * Returns 0 if the device function was successfully reset or negative if the 5475 * device doesn't support resetting a single function. 5476 */ 5477 int __pci_reset_function_locked(struct pci_dev *dev) 5478 { 5479 int i, m, rc; 5480 5481 might_sleep(); 5482 5483 /* 5484 * A reset method returns -ENOTTY if it doesn't support this device and 5485 * we should try the next method. 5486 * 5487 * If it returns 0 (success), we're finished. If it returns any other 5488 * error, we're also finished: this indicates that further reset 5489 * mechanisms might be broken on the device. 5490 */ 5491 for (i = 0; i < PCI_NUM_RESET_METHODS; i++) { 5492 m = dev->reset_methods[i]; 5493 if (!m) 5494 return -ENOTTY; 5495 5496 rc = pci_reset_fn_methods[m].reset_fn(dev, PCI_RESET_DO_RESET); 5497 if (!rc) 5498 return 0; 5499 if (rc != -ENOTTY) 5500 return rc; 5501 } 5502 5503 return -ENOTTY; 5504 } 5505 EXPORT_SYMBOL_GPL(__pci_reset_function_locked); 5506 5507 /** 5508 * pci_init_reset_methods - check whether device can be safely reset 5509 * and store supported reset mechanisms. 5510 * @dev: PCI device to check for reset mechanisms 5511 * 5512 * Some devices allow an individual function to be reset without affecting 5513 * other functions in the same device. The PCI device must be in D0-D3hot 5514 * state. 5515 * 5516 * Stores reset mechanisms supported by device in reset_methods byte array 5517 * which is a member of struct pci_dev. 5518 */ 5519 void pci_init_reset_methods(struct pci_dev *dev) 5520 { 5521 int m, i, rc; 5522 5523 BUILD_BUG_ON(ARRAY_SIZE(pci_reset_fn_methods) != PCI_NUM_RESET_METHODS); 5524 5525 might_sleep(); 5526 5527 i = 0; 5528 for (m = 1; m < PCI_NUM_RESET_METHODS; m++) { 5529 rc = pci_reset_fn_methods[m].reset_fn(dev, PCI_RESET_PROBE); 5530 if (!rc) 5531 dev->reset_methods[i++] = m; 5532 else if (rc != -ENOTTY) 5533 break; 5534 } 5535 5536 dev->reset_methods[i] = 0; 5537 } 5538 5539 /** 5540 * pci_reset_function - quiesce and reset a PCI device function 5541 * @dev: PCI device to reset 5542 * 5543 * Some devices allow an individual function to be reset without affecting 5544 * other functions in the same device. The PCI device must be responsive 5545 * to PCI config space in order to use this function. 5546 * 5547 * This function does not just reset the PCI portion of a device, but 5548 * clears all the state associated with the device. This function differs 5549 * from __pci_reset_function_locked() in that it saves and restores device state 5550 * over the reset and takes the PCI device lock. 5551 * 5552 * Returns 0 if the device function was successfully reset or negative if the 5553 * device doesn't support resetting a single function. 5554 */ 5555 int pci_reset_function(struct pci_dev *dev) 5556 { 5557 int rc; 5558 5559 if (!pci_reset_supported(dev)) 5560 return -ENOTTY; 5561 5562 pci_dev_lock(dev); 5563 pci_dev_save_and_disable(dev); 5564 5565 rc = __pci_reset_function_locked(dev); 5566 5567 pci_dev_restore(dev); 5568 pci_dev_unlock(dev); 5569 5570 return rc; 5571 } 5572 EXPORT_SYMBOL_GPL(pci_reset_function); 5573 5574 /** 5575 * pci_reset_function_locked - quiesce and reset a PCI device function 5576 * @dev: PCI device to reset 5577 * 5578 * Some devices allow an individual function to be reset without affecting 5579 * other functions in the same device. The PCI device must be responsive 5580 * to PCI config space in order to use this function. 5581 * 5582 * This function does not just reset the PCI portion of a device, but 5583 * clears all the state associated with the device. This function differs 5584 * from __pci_reset_function_locked() in that it saves and restores device state 5585 * over the reset. It also differs from pci_reset_function() in that it 5586 * requires the PCI device lock to be held. 5587 * 5588 * Returns 0 if the device function was successfully reset or negative if the 5589 * device doesn't support resetting a single function. 5590 */ 5591 int pci_reset_function_locked(struct pci_dev *dev) 5592 { 5593 int rc; 5594 5595 if (!pci_reset_supported(dev)) 5596 return -ENOTTY; 5597 5598 pci_dev_save_and_disable(dev); 5599 5600 rc = __pci_reset_function_locked(dev); 5601 5602 pci_dev_restore(dev); 5603 5604 return rc; 5605 } 5606 EXPORT_SYMBOL_GPL(pci_reset_function_locked); 5607 5608 /** 5609 * pci_try_reset_function - quiesce and reset a PCI device function 5610 * @dev: PCI device to reset 5611 * 5612 * Same as above, except return -EAGAIN if unable to lock device. 5613 */ 5614 int pci_try_reset_function(struct pci_dev *dev) 5615 { 5616 int rc; 5617 5618 if (!pci_reset_supported(dev)) 5619 return -ENOTTY; 5620 5621 if (!pci_dev_trylock(dev)) 5622 return -EAGAIN; 5623 5624 pci_dev_save_and_disable(dev); 5625 rc = __pci_reset_function_locked(dev); 5626 pci_dev_restore(dev); 5627 pci_dev_unlock(dev); 5628 5629 return rc; 5630 } 5631 EXPORT_SYMBOL_GPL(pci_try_reset_function); 5632 5633 /* Do any devices on or below this bus prevent a bus reset? */ 5634 static bool pci_bus_resetable(struct pci_bus *bus) 5635 { 5636 struct pci_dev *dev; 5637 5638 5639 if (bus->self && (bus->self->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET)) 5640 return false; 5641 5642 list_for_each_entry(dev, &bus->devices, bus_list) { 5643 if (dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET || 5644 (dev->subordinate && !pci_bus_resetable(dev->subordinate))) 5645 return false; 5646 } 5647 5648 return true; 5649 } 5650 5651 /* Lock devices from the top of the tree down */ 5652 static void pci_bus_lock(struct pci_bus *bus) 5653 { 5654 struct pci_dev *dev; 5655 5656 list_for_each_entry(dev, &bus->devices, bus_list) { 5657 pci_dev_lock(dev); 5658 if (dev->subordinate) 5659 pci_bus_lock(dev->subordinate); 5660 } 5661 } 5662 5663 /* Unlock devices from the bottom of the tree up */ 5664 static void pci_bus_unlock(struct pci_bus *bus) 5665 { 5666 struct pci_dev *dev; 5667 5668 list_for_each_entry(dev, &bus->devices, bus_list) { 5669 if (dev->subordinate) 5670 pci_bus_unlock(dev->subordinate); 5671 pci_dev_unlock(dev); 5672 } 5673 } 5674 5675 /* Return 1 on successful lock, 0 on contention */ 5676 static int pci_bus_trylock(struct pci_bus *bus) 5677 { 5678 struct pci_dev *dev; 5679 5680 list_for_each_entry(dev, &bus->devices, bus_list) { 5681 if (!pci_dev_trylock(dev)) 5682 goto unlock; 5683 if (dev->subordinate) { 5684 if (!pci_bus_trylock(dev->subordinate)) { 5685 pci_dev_unlock(dev); 5686 goto unlock; 5687 } 5688 } 5689 } 5690 return 1; 5691 5692 unlock: 5693 list_for_each_entry_continue_reverse(dev, &bus->devices, bus_list) { 5694 if (dev->subordinate) 5695 pci_bus_unlock(dev->subordinate); 5696 pci_dev_unlock(dev); 5697 } 5698 return 0; 5699 } 5700 5701 /* Do any devices on or below this slot prevent a bus reset? */ 5702 static bool pci_slot_resetable(struct pci_slot *slot) 5703 { 5704 struct pci_dev *dev; 5705 5706 if (slot->bus->self && 5707 (slot->bus->self->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET)) 5708 return false; 5709 5710 list_for_each_entry(dev, &slot->bus->devices, bus_list) { 5711 if (!dev->slot || dev->slot != slot) 5712 continue; 5713 if (dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET || 5714 (dev->subordinate && !pci_bus_resetable(dev->subordinate))) 5715 return false; 5716 } 5717 5718 return true; 5719 } 5720 5721 /* Lock devices from the top of the tree down */ 5722 static void pci_slot_lock(struct pci_slot *slot) 5723 { 5724 struct pci_dev *dev; 5725 5726 list_for_each_entry(dev, &slot->bus->devices, bus_list) { 5727 if (!dev->slot || dev->slot != slot) 5728 continue; 5729 pci_dev_lock(dev); 5730 if (dev->subordinate) 5731 pci_bus_lock(dev->subordinate); 5732 } 5733 } 5734 5735 /* Unlock devices from the bottom of the tree up */ 5736 static void pci_slot_unlock(struct pci_slot *slot) 5737 { 5738 struct pci_dev *dev; 5739 5740 list_for_each_entry(dev, &slot->bus->devices, bus_list) { 5741 if (!dev->slot || dev->slot != slot) 5742 continue; 5743 if (dev->subordinate) 5744 pci_bus_unlock(dev->subordinate); 5745 pci_dev_unlock(dev); 5746 } 5747 } 5748 5749 /* Return 1 on successful lock, 0 on contention */ 5750 static int pci_slot_trylock(struct pci_slot *slot) 5751 { 5752 struct pci_dev *dev; 5753 5754 list_for_each_entry(dev, &slot->bus->devices, bus_list) { 5755 if (!dev->slot || dev->slot != slot) 5756 continue; 5757 if (!pci_dev_trylock(dev)) 5758 goto unlock; 5759 if (dev->subordinate) { 5760 if (!pci_bus_trylock(dev->subordinate)) { 5761 pci_dev_unlock(dev); 5762 goto unlock; 5763 } 5764 } 5765 } 5766 return 1; 5767 5768 unlock: 5769 list_for_each_entry_continue_reverse(dev, 5770 &slot->bus->devices, bus_list) { 5771 if (!dev->slot || dev->slot != slot) 5772 continue; 5773 if (dev->subordinate) 5774 pci_bus_unlock(dev->subordinate); 5775 pci_dev_unlock(dev); 5776 } 5777 return 0; 5778 } 5779 5780 /* 5781 * Save and disable devices from the top of the tree down while holding 5782 * the @dev mutex lock for the entire tree. 5783 */ 5784 static void pci_bus_save_and_disable_locked(struct pci_bus *bus) 5785 { 5786 struct pci_dev *dev; 5787 5788 list_for_each_entry(dev, &bus->devices, bus_list) { 5789 pci_dev_save_and_disable(dev); 5790 if (dev->subordinate) 5791 pci_bus_save_and_disable_locked(dev->subordinate); 5792 } 5793 } 5794 5795 /* 5796 * Restore devices from top of the tree down while holding @dev mutex lock 5797 * for the entire tree. Parent bridges need to be restored before we can 5798 * get to subordinate devices. 5799 */ 5800 static void pci_bus_restore_locked(struct pci_bus *bus) 5801 { 5802 struct pci_dev *dev; 5803 5804 list_for_each_entry(dev, &bus->devices, bus_list) { 5805 pci_dev_restore(dev); 5806 if (dev->subordinate) 5807 pci_bus_restore_locked(dev->subordinate); 5808 } 5809 } 5810 5811 /* 5812 * Save and disable devices from the top of the tree down while holding 5813 * the @dev mutex lock for the entire tree. 5814 */ 5815 static void pci_slot_save_and_disable_locked(struct pci_slot *slot) 5816 { 5817 struct pci_dev *dev; 5818 5819 list_for_each_entry(dev, &slot->bus->devices, bus_list) { 5820 if (!dev->slot || dev->slot != slot) 5821 continue; 5822 pci_dev_save_and_disable(dev); 5823 if (dev->subordinate) 5824 pci_bus_save_and_disable_locked(dev->subordinate); 5825 } 5826 } 5827 5828 /* 5829 * Restore devices from top of the tree down while holding @dev mutex lock 5830 * for the entire tree. Parent bridges need to be restored before we can 5831 * get to subordinate devices. 5832 */ 5833 static void pci_slot_restore_locked(struct pci_slot *slot) 5834 { 5835 struct pci_dev *dev; 5836 5837 list_for_each_entry(dev, &slot->bus->devices, bus_list) { 5838 if (!dev->slot || dev->slot != slot) 5839 continue; 5840 pci_dev_restore(dev); 5841 if (dev->subordinate) 5842 pci_bus_restore_locked(dev->subordinate); 5843 } 5844 } 5845 5846 static int pci_slot_reset(struct pci_slot *slot, bool probe) 5847 { 5848 int rc; 5849 5850 if (!slot || !pci_slot_resetable(slot)) 5851 return -ENOTTY; 5852 5853 if (!probe) 5854 pci_slot_lock(slot); 5855 5856 might_sleep(); 5857 5858 rc = pci_reset_hotplug_slot(slot->hotplug, probe); 5859 5860 if (!probe) 5861 pci_slot_unlock(slot); 5862 5863 return rc; 5864 } 5865 5866 /** 5867 * pci_probe_reset_slot - probe whether a PCI slot can be reset 5868 * @slot: PCI slot to probe 5869 * 5870 * Return 0 if slot can be reset, negative if a slot reset is not supported. 5871 */ 5872 int pci_probe_reset_slot(struct pci_slot *slot) 5873 { 5874 return pci_slot_reset(slot, PCI_RESET_PROBE); 5875 } 5876 EXPORT_SYMBOL_GPL(pci_probe_reset_slot); 5877 5878 /** 5879 * __pci_reset_slot - Try to reset a PCI slot 5880 * @slot: PCI slot to reset 5881 * 5882 * A PCI bus may host multiple slots, each slot may support a reset mechanism 5883 * independent of other slots. For instance, some slots may support slot power 5884 * control. In the case of a 1:1 bus to slot architecture, this function may 5885 * wrap the bus reset to avoid spurious slot related events such as hotplug. 5886 * Generally a slot reset should be attempted before a bus reset. All of the 5887 * function of the slot and any subordinate buses behind the slot are reset 5888 * through this function. PCI config space of all devices in the slot and 5889 * behind the slot is saved before and restored after reset. 5890 * 5891 * Same as above except return -EAGAIN if the slot cannot be locked 5892 */ 5893 static int __pci_reset_slot(struct pci_slot *slot) 5894 { 5895 int rc; 5896 5897 rc = pci_slot_reset(slot, PCI_RESET_PROBE); 5898 if (rc) 5899 return rc; 5900 5901 if (pci_slot_trylock(slot)) { 5902 pci_slot_save_and_disable_locked(slot); 5903 might_sleep(); 5904 rc = pci_reset_hotplug_slot(slot->hotplug, PCI_RESET_DO_RESET); 5905 pci_slot_restore_locked(slot); 5906 pci_slot_unlock(slot); 5907 } else 5908 rc = -EAGAIN; 5909 5910 return rc; 5911 } 5912 5913 static int pci_bus_reset(struct pci_bus *bus, bool probe) 5914 { 5915 int ret; 5916 5917 if (!bus->self || !pci_bus_resetable(bus)) 5918 return -ENOTTY; 5919 5920 if (probe) 5921 return 0; 5922 5923 pci_bus_lock(bus); 5924 5925 might_sleep(); 5926 5927 ret = pci_bridge_secondary_bus_reset(bus->self); 5928 5929 pci_bus_unlock(bus); 5930 5931 return ret; 5932 } 5933 5934 /** 5935 * pci_bus_error_reset - reset the bridge's subordinate bus 5936 * @bridge: The parent device that connects to the bus to reset 5937 * 5938 * This function will first try to reset the slots on this bus if the method is 5939 * available. If slot reset fails or is not available, this will fall back to a 5940 * secondary bus reset. 5941 */ 5942 int pci_bus_error_reset(struct pci_dev *bridge) 5943 { 5944 struct pci_bus *bus = bridge->subordinate; 5945 struct pci_slot *slot; 5946 5947 if (!bus) 5948 return -ENOTTY; 5949 5950 mutex_lock(&pci_slot_mutex); 5951 if (list_empty(&bus->slots)) 5952 goto bus_reset; 5953 5954 list_for_each_entry(slot, &bus->slots, list) 5955 if (pci_probe_reset_slot(slot)) 5956 goto bus_reset; 5957 5958 list_for_each_entry(slot, &bus->slots, list) 5959 if (pci_slot_reset(slot, PCI_RESET_DO_RESET)) 5960 goto bus_reset; 5961 5962 mutex_unlock(&pci_slot_mutex); 5963 return 0; 5964 bus_reset: 5965 mutex_unlock(&pci_slot_mutex); 5966 return pci_bus_reset(bridge->subordinate, PCI_RESET_DO_RESET); 5967 } 5968 5969 /** 5970 * pci_probe_reset_bus - probe whether a PCI bus can be reset 5971 * @bus: PCI bus to probe 5972 * 5973 * Return 0 if bus can be reset, negative if a bus reset is not supported. 5974 */ 5975 int pci_probe_reset_bus(struct pci_bus *bus) 5976 { 5977 return pci_bus_reset(bus, PCI_RESET_PROBE); 5978 } 5979 EXPORT_SYMBOL_GPL(pci_probe_reset_bus); 5980 5981 /** 5982 * __pci_reset_bus - Try to reset a PCI bus 5983 * @bus: top level PCI bus to reset 5984 * 5985 * Same as above except return -EAGAIN if the bus cannot be locked 5986 */ 5987 static int __pci_reset_bus(struct pci_bus *bus) 5988 { 5989 int rc; 5990 5991 rc = pci_bus_reset(bus, PCI_RESET_PROBE); 5992 if (rc) 5993 return rc; 5994 5995 if (pci_bus_trylock(bus)) { 5996 pci_bus_save_and_disable_locked(bus); 5997 might_sleep(); 5998 rc = pci_bridge_secondary_bus_reset(bus->self); 5999 pci_bus_restore_locked(bus); 6000 pci_bus_unlock(bus); 6001 } else 6002 rc = -EAGAIN; 6003 6004 return rc; 6005 } 6006 6007 /** 6008 * pci_reset_bus - Try to reset a PCI bus 6009 * @pdev: top level PCI device to reset via slot/bus 6010 * 6011 * Same as above except return -EAGAIN if the bus cannot be locked 6012 */ 6013 int pci_reset_bus(struct pci_dev *pdev) 6014 { 6015 return (!pci_probe_reset_slot(pdev->slot)) ? 6016 __pci_reset_slot(pdev->slot) : __pci_reset_bus(pdev->bus); 6017 } 6018 EXPORT_SYMBOL_GPL(pci_reset_bus); 6019 6020 /** 6021 * pcix_get_max_mmrbc - get PCI-X maximum designed memory read byte count 6022 * @dev: PCI device to query 6023 * 6024 * Returns mmrbc: maximum designed memory read count in bytes or 6025 * appropriate error value. 6026 */ 6027 int pcix_get_max_mmrbc(struct pci_dev *dev) 6028 { 6029 int cap; 6030 u32 stat; 6031 6032 cap = pci_find_capability(dev, PCI_CAP_ID_PCIX); 6033 if (!cap) 6034 return -EINVAL; 6035 6036 if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat)) 6037 return -EINVAL; 6038 6039 return 512 << ((stat & PCI_X_STATUS_MAX_READ) >> 21); 6040 } 6041 EXPORT_SYMBOL(pcix_get_max_mmrbc); 6042 6043 /** 6044 * pcix_get_mmrbc - get PCI-X maximum memory read byte count 6045 * @dev: PCI device to query 6046 * 6047 * Returns mmrbc: maximum memory read count in bytes or appropriate error 6048 * value. 6049 */ 6050 int pcix_get_mmrbc(struct pci_dev *dev) 6051 { 6052 int cap; 6053 u16 cmd; 6054 6055 cap = pci_find_capability(dev, PCI_CAP_ID_PCIX); 6056 if (!cap) 6057 return -EINVAL; 6058 6059 if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd)) 6060 return -EINVAL; 6061 6062 return 512 << ((cmd & PCI_X_CMD_MAX_READ) >> 2); 6063 } 6064 EXPORT_SYMBOL(pcix_get_mmrbc); 6065 6066 /** 6067 * pcix_set_mmrbc - set PCI-X maximum memory read byte count 6068 * @dev: PCI device to query 6069 * @mmrbc: maximum memory read count in bytes 6070 * valid values are 512, 1024, 2048, 4096 6071 * 6072 * If possible sets maximum memory read byte count, some bridges have errata 6073 * that prevent this. 6074 */ 6075 int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc) 6076 { 6077 int cap; 6078 u32 stat, v, o; 6079 u16 cmd; 6080 6081 if (mmrbc < 512 || mmrbc > 4096 || !is_power_of_2(mmrbc)) 6082 return -EINVAL; 6083 6084 v = ffs(mmrbc) - 10; 6085 6086 cap = pci_find_capability(dev, PCI_CAP_ID_PCIX); 6087 if (!cap) 6088 return -EINVAL; 6089 6090 if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat)) 6091 return -EINVAL; 6092 6093 if (v > (stat & PCI_X_STATUS_MAX_READ) >> 21) 6094 return -E2BIG; 6095 6096 if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd)) 6097 return -EINVAL; 6098 6099 o = (cmd & PCI_X_CMD_MAX_READ) >> 2; 6100 if (o != v) { 6101 if (v > o && (dev->bus->bus_flags & PCI_BUS_FLAGS_NO_MMRBC)) 6102 return -EIO; 6103 6104 cmd &= ~PCI_X_CMD_MAX_READ; 6105 cmd |= v << 2; 6106 if (pci_write_config_word(dev, cap + PCI_X_CMD, cmd)) 6107 return -EIO; 6108 } 6109 return 0; 6110 } 6111 EXPORT_SYMBOL(pcix_set_mmrbc); 6112 6113 /** 6114 * pcie_get_readrq - get PCI Express read request size 6115 * @dev: PCI device to query 6116 * 6117 * Returns maximum memory read request in bytes or appropriate error value. 6118 */ 6119 int pcie_get_readrq(struct pci_dev *dev) 6120 { 6121 u16 ctl; 6122 6123 pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &ctl); 6124 6125 return 128 << ((ctl & PCI_EXP_DEVCTL_READRQ) >> 12); 6126 } 6127 EXPORT_SYMBOL(pcie_get_readrq); 6128 6129 /** 6130 * pcie_set_readrq - set PCI Express maximum memory read request 6131 * @dev: PCI device to query 6132 * @rq: maximum memory read count in bytes 6133 * valid values are 128, 256, 512, 1024, 2048, 4096 6134 * 6135 * If possible sets maximum memory read request in bytes 6136 */ 6137 int pcie_set_readrq(struct pci_dev *dev, int rq) 6138 { 6139 u16 v; 6140 int ret; 6141 struct pci_host_bridge *bridge = pci_find_host_bridge(dev->bus); 6142 6143 if (rq < 128 || rq > 4096 || !is_power_of_2(rq)) 6144 return -EINVAL; 6145 6146 /* 6147 * If using the "performance" PCIe config, we clamp the read rq 6148 * size to the max packet size to keep the host bridge from 6149 * generating requests larger than we can cope with. 6150 */ 6151 if (pcie_bus_config == PCIE_BUS_PERFORMANCE) { 6152 int mps = pcie_get_mps(dev); 6153 6154 if (mps < rq) 6155 rq = mps; 6156 } 6157 6158 v = (ffs(rq) - 8) << 12; 6159 6160 if (bridge->no_inc_mrrs) { 6161 int max_mrrs = pcie_get_readrq(dev); 6162 6163 if (rq > max_mrrs) { 6164 pci_info(dev, "can't set Max_Read_Request_Size to %d; max is %d\n", rq, max_mrrs); 6165 return -EINVAL; 6166 } 6167 } 6168 6169 ret = pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL, 6170 PCI_EXP_DEVCTL_READRQ, v); 6171 6172 return pcibios_err_to_errno(ret); 6173 } 6174 EXPORT_SYMBOL(pcie_set_readrq); 6175 6176 /** 6177 * pcie_get_mps - get PCI Express maximum payload size 6178 * @dev: PCI device to query 6179 * 6180 * Returns maximum payload size in bytes 6181 */ 6182 int pcie_get_mps(struct pci_dev *dev) 6183 { 6184 u16 ctl; 6185 6186 pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &ctl); 6187 6188 return 128 << ((ctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5); 6189 } 6190 EXPORT_SYMBOL(pcie_get_mps); 6191 6192 /** 6193 * pcie_set_mps - set PCI Express maximum payload size 6194 * @dev: PCI device to query 6195 * @mps: maximum payload size in bytes 6196 * valid values are 128, 256, 512, 1024, 2048, 4096 6197 * 6198 * If possible sets maximum payload size 6199 */ 6200 int pcie_set_mps(struct pci_dev *dev, int mps) 6201 { 6202 u16 v; 6203 int ret; 6204 6205 if (mps < 128 || mps > 4096 || !is_power_of_2(mps)) 6206 return -EINVAL; 6207 6208 v = ffs(mps) - 8; 6209 if (v > dev->pcie_mpss) 6210 return -EINVAL; 6211 v <<= 5; 6212 6213 ret = pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL, 6214 PCI_EXP_DEVCTL_PAYLOAD, v); 6215 6216 return pcibios_err_to_errno(ret); 6217 } 6218 EXPORT_SYMBOL(pcie_set_mps); 6219 6220 /** 6221 * pcie_bandwidth_available - determine minimum link settings of a PCIe 6222 * device and its bandwidth limitation 6223 * @dev: PCI device to query 6224 * @limiting_dev: storage for device causing the bandwidth limitation 6225 * @speed: storage for speed of limiting device 6226 * @width: storage for width of limiting device 6227 * 6228 * Walk up the PCI device chain and find the point where the minimum 6229 * bandwidth is available. Return the bandwidth available there and (if 6230 * limiting_dev, speed, and width pointers are supplied) information about 6231 * that point. The bandwidth returned is in Mb/s, i.e., megabits/second of 6232 * raw bandwidth. 6233 */ 6234 u32 pcie_bandwidth_available(struct pci_dev *dev, struct pci_dev **limiting_dev, 6235 enum pci_bus_speed *speed, 6236 enum pcie_link_width *width) 6237 { 6238 u16 lnksta; 6239 enum pci_bus_speed next_speed; 6240 enum pcie_link_width next_width; 6241 u32 bw, next_bw; 6242 6243 if (speed) 6244 *speed = PCI_SPEED_UNKNOWN; 6245 if (width) 6246 *width = PCIE_LNK_WIDTH_UNKNOWN; 6247 6248 bw = 0; 6249 6250 while (dev) { 6251 pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &lnksta); 6252 6253 next_speed = pcie_link_speed[lnksta & PCI_EXP_LNKSTA_CLS]; 6254 next_width = (lnksta & PCI_EXP_LNKSTA_NLW) >> 6255 PCI_EXP_LNKSTA_NLW_SHIFT; 6256 6257 next_bw = next_width * PCIE_SPEED2MBS_ENC(next_speed); 6258 6259 /* Check if current device limits the total bandwidth */ 6260 if (!bw || next_bw <= bw) { 6261 bw = next_bw; 6262 6263 if (limiting_dev) 6264 *limiting_dev = dev; 6265 if (speed) 6266 *speed = next_speed; 6267 if (width) 6268 *width = next_width; 6269 } 6270 6271 dev = pci_upstream_bridge(dev); 6272 } 6273 6274 return bw; 6275 } 6276 EXPORT_SYMBOL(pcie_bandwidth_available); 6277 6278 /** 6279 * pcie_get_speed_cap - query for the PCI device's link speed capability 6280 * @dev: PCI device to query 6281 * 6282 * Query the PCI device speed capability. Return the maximum link speed 6283 * supported by the device. 6284 */ 6285 enum pci_bus_speed pcie_get_speed_cap(struct pci_dev *dev) 6286 { 6287 u32 lnkcap2, lnkcap; 6288 6289 /* 6290 * Link Capabilities 2 was added in PCIe r3.0, sec 7.8.18. The 6291 * implementation note there recommends using the Supported Link 6292 * Speeds Vector in Link Capabilities 2 when supported. 6293 * 6294 * Without Link Capabilities 2, i.e., prior to PCIe r3.0, software 6295 * should use the Supported Link Speeds field in Link Capabilities, 6296 * where only 2.5 GT/s and 5.0 GT/s speeds were defined. 6297 */ 6298 pcie_capability_read_dword(dev, PCI_EXP_LNKCAP2, &lnkcap2); 6299 6300 /* PCIe r3.0-compliant */ 6301 if (lnkcap2) 6302 return PCIE_LNKCAP2_SLS2SPEED(lnkcap2); 6303 6304 pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &lnkcap); 6305 if ((lnkcap & PCI_EXP_LNKCAP_SLS) == PCI_EXP_LNKCAP_SLS_5_0GB) 6306 return PCIE_SPEED_5_0GT; 6307 else if ((lnkcap & PCI_EXP_LNKCAP_SLS) == PCI_EXP_LNKCAP_SLS_2_5GB) 6308 return PCIE_SPEED_2_5GT; 6309 6310 return PCI_SPEED_UNKNOWN; 6311 } 6312 EXPORT_SYMBOL(pcie_get_speed_cap); 6313 6314 /** 6315 * pcie_get_width_cap - query for the PCI device's link width capability 6316 * @dev: PCI device to query 6317 * 6318 * Query the PCI device width capability. Return the maximum link width 6319 * supported by the device. 6320 */ 6321 enum pcie_link_width pcie_get_width_cap(struct pci_dev *dev) 6322 { 6323 u32 lnkcap; 6324 6325 pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &lnkcap); 6326 if (lnkcap) 6327 return (lnkcap & PCI_EXP_LNKCAP_MLW) >> 4; 6328 6329 return PCIE_LNK_WIDTH_UNKNOWN; 6330 } 6331 EXPORT_SYMBOL(pcie_get_width_cap); 6332 6333 /** 6334 * pcie_bandwidth_capable - calculate a PCI device's link bandwidth capability 6335 * @dev: PCI device 6336 * @speed: storage for link speed 6337 * @width: storage for link width 6338 * 6339 * Calculate a PCI device's link bandwidth by querying for its link speed 6340 * and width, multiplying them, and applying encoding overhead. The result 6341 * is in Mb/s, i.e., megabits/second of raw bandwidth. 6342 */ 6343 u32 pcie_bandwidth_capable(struct pci_dev *dev, enum pci_bus_speed *speed, 6344 enum pcie_link_width *width) 6345 { 6346 *speed = pcie_get_speed_cap(dev); 6347 *width = pcie_get_width_cap(dev); 6348 6349 if (*speed == PCI_SPEED_UNKNOWN || *width == PCIE_LNK_WIDTH_UNKNOWN) 6350 return 0; 6351 6352 return *width * PCIE_SPEED2MBS_ENC(*speed); 6353 } 6354 6355 /** 6356 * __pcie_print_link_status - Report the PCI device's link speed and width 6357 * @dev: PCI device to query 6358 * @verbose: Print info even when enough bandwidth is available 6359 * 6360 * If the available bandwidth at the device is less than the device is 6361 * capable of, report the device's maximum possible bandwidth and the 6362 * upstream link that limits its performance. If @verbose, always print 6363 * the available bandwidth, even if the device isn't constrained. 6364 */ 6365 void __pcie_print_link_status(struct pci_dev *dev, bool verbose) 6366 { 6367 enum pcie_link_width width, width_cap; 6368 enum pci_bus_speed speed, speed_cap; 6369 struct pci_dev *limiting_dev = NULL; 6370 u32 bw_avail, bw_cap; 6371 6372 bw_cap = pcie_bandwidth_capable(dev, &speed_cap, &width_cap); 6373 bw_avail = pcie_bandwidth_available(dev, &limiting_dev, &speed, &width); 6374 6375 if (bw_avail >= bw_cap && verbose) 6376 pci_info(dev, "%u.%03u Gb/s available PCIe bandwidth (%s x%d link)\n", 6377 bw_cap / 1000, bw_cap % 1000, 6378 pci_speed_string(speed_cap), width_cap); 6379 else if (bw_avail < bw_cap) 6380 pci_info(dev, "%u.%03u Gb/s available PCIe bandwidth, limited by %s x%d link at %s (capable of %u.%03u Gb/s with %s x%d link)\n", 6381 bw_avail / 1000, bw_avail % 1000, 6382 pci_speed_string(speed), width, 6383 limiting_dev ? pci_name(limiting_dev) : "<unknown>", 6384 bw_cap / 1000, bw_cap % 1000, 6385 pci_speed_string(speed_cap), width_cap); 6386 } 6387 6388 /** 6389 * pcie_print_link_status - Report the PCI device's link speed and width 6390 * @dev: PCI device to query 6391 * 6392 * Report the available bandwidth at the device. 6393 */ 6394 void pcie_print_link_status(struct pci_dev *dev) 6395 { 6396 __pcie_print_link_status(dev, true); 6397 } 6398 EXPORT_SYMBOL(pcie_print_link_status); 6399 6400 /** 6401 * pci_select_bars - Make BAR mask from the type of resource 6402 * @dev: the PCI device for which BAR mask is made 6403 * @flags: resource type mask to be selected 6404 * 6405 * This helper routine makes bar mask from the type of resource. 6406 */ 6407 int pci_select_bars(struct pci_dev *dev, unsigned long flags) 6408 { 6409 int i, bars = 0; 6410 for (i = 0; i < PCI_NUM_RESOURCES; i++) 6411 if (pci_resource_flags(dev, i) & flags) 6412 bars |= (1 << i); 6413 return bars; 6414 } 6415 EXPORT_SYMBOL(pci_select_bars); 6416 6417 /* Some architectures require additional programming to enable VGA */ 6418 static arch_set_vga_state_t arch_set_vga_state; 6419 6420 void __init pci_register_set_vga_state(arch_set_vga_state_t func) 6421 { 6422 arch_set_vga_state = func; /* NULL disables */ 6423 } 6424 6425 static int pci_set_vga_state_arch(struct pci_dev *dev, bool decode, 6426 unsigned int command_bits, u32 flags) 6427 { 6428 if (arch_set_vga_state) 6429 return arch_set_vga_state(dev, decode, command_bits, 6430 flags); 6431 return 0; 6432 } 6433 6434 /** 6435 * pci_set_vga_state - set VGA decode state on device and parents if requested 6436 * @dev: the PCI device 6437 * @decode: true = enable decoding, false = disable decoding 6438 * @command_bits: PCI_COMMAND_IO and/or PCI_COMMAND_MEMORY 6439 * @flags: traverse ancestors and change bridges 6440 * CHANGE_BRIDGE_ONLY / CHANGE_BRIDGE 6441 */ 6442 int pci_set_vga_state(struct pci_dev *dev, bool decode, 6443 unsigned int command_bits, u32 flags) 6444 { 6445 struct pci_bus *bus; 6446 struct pci_dev *bridge; 6447 u16 cmd; 6448 int rc; 6449 6450 WARN_ON((flags & PCI_VGA_STATE_CHANGE_DECODES) && (command_bits & ~(PCI_COMMAND_IO|PCI_COMMAND_MEMORY))); 6451 6452 /* ARCH specific VGA enables */ 6453 rc = pci_set_vga_state_arch(dev, decode, command_bits, flags); 6454 if (rc) 6455 return rc; 6456 6457 if (flags & PCI_VGA_STATE_CHANGE_DECODES) { 6458 pci_read_config_word(dev, PCI_COMMAND, &cmd); 6459 if (decode) 6460 cmd |= command_bits; 6461 else 6462 cmd &= ~command_bits; 6463 pci_write_config_word(dev, PCI_COMMAND, cmd); 6464 } 6465 6466 if (!(flags & PCI_VGA_STATE_CHANGE_BRIDGE)) 6467 return 0; 6468 6469 bus = dev->bus; 6470 while (bus) { 6471 bridge = bus->self; 6472 if (bridge) { 6473 pci_read_config_word(bridge, PCI_BRIDGE_CONTROL, 6474 &cmd); 6475 if (decode) 6476 cmd |= PCI_BRIDGE_CTL_VGA; 6477 else 6478 cmd &= ~PCI_BRIDGE_CTL_VGA; 6479 pci_write_config_word(bridge, PCI_BRIDGE_CONTROL, 6480 cmd); 6481 } 6482 bus = bus->parent; 6483 } 6484 return 0; 6485 } 6486 6487 #ifdef CONFIG_ACPI 6488 bool pci_pr3_present(struct pci_dev *pdev) 6489 { 6490 struct acpi_device *adev; 6491 6492 if (acpi_disabled) 6493 return false; 6494 6495 adev = ACPI_COMPANION(&pdev->dev); 6496 if (!adev) 6497 return false; 6498 6499 return adev->power.flags.power_resources && 6500 acpi_has_method(adev->handle, "_PR3"); 6501 } 6502 EXPORT_SYMBOL_GPL(pci_pr3_present); 6503 #endif 6504 6505 /** 6506 * pci_add_dma_alias - Add a DMA devfn alias for a device 6507 * @dev: the PCI device for which alias is added 6508 * @devfn_from: alias slot and function 6509 * @nr_devfns: number of subsequent devfns to alias 6510 * 6511 * This helper encodes an 8-bit devfn as a bit number in dma_alias_mask 6512 * which is used to program permissible bus-devfn source addresses for DMA 6513 * requests in an IOMMU. These aliases factor into IOMMU group creation 6514 * and are useful for devices generating DMA requests beyond or different 6515 * from their logical bus-devfn. Examples include device quirks where the 6516 * device simply uses the wrong devfn, as well as non-transparent bridges 6517 * where the alias may be a proxy for devices in another domain. 6518 * 6519 * IOMMU group creation is performed during device discovery or addition, 6520 * prior to any potential DMA mapping and therefore prior to driver probing 6521 * (especially for userspace assigned devices where IOMMU group definition 6522 * cannot be left as a userspace activity). DMA aliases should therefore 6523 * be configured via quirks, such as the PCI fixup header quirk. 6524 */ 6525 void pci_add_dma_alias(struct pci_dev *dev, u8 devfn_from, 6526 unsigned int nr_devfns) 6527 { 6528 int devfn_to; 6529 6530 nr_devfns = min(nr_devfns, (unsigned int)MAX_NR_DEVFNS - devfn_from); 6531 devfn_to = devfn_from + nr_devfns - 1; 6532 6533 if (!dev->dma_alias_mask) 6534 dev->dma_alias_mask = bitmap_zalloc(MAX_NR_DEVFNS, GFP_KERNEL); 6535 if (!dev->dma_alias_mask) { 6536 pci_warn(dev, "Unable to allocate DMA alias mask\n"); 6537 return; 6538 } 6539 6540 bitmap_set(dev->dma_alias_mask, devfn_from, nr_devfns); 6541 6542 if (nr_devfns == 1) 6543 pci_info(dev, "Enabling fixed DMA alias to %02x.%d\n", 6544 PCI_SLOT(devfn_from), PCI_FUNC(devfn_from)); 6545 else if (nr_devfns > 1) 6546 pci_info(dev, "Enabling fixed DMA alias for devfn range from %02x.%d to %02x.%d\n", 6547 PCI_SLOT(devfn_from), PCI_FUNC(devfn_from), 6548 PCI_SLOT(devfn_to), PCI_FUNC(devfn_to)); 6549 } 6550 6551 bool pci_devs_are_dma_aliases(struct pci_dev *dev1, struct pci_dev *dev2) 6552 { 6553 return (dev1->dma_alias_mask && 6554 test_bit(dev2->devfn, dev1->dma_alias_mask)) || 6555 (dev2->dma_alias_mask && 6556 test_bit(dev1->devfn, dev2->dma_alias_mask)) || 6557 pci_real_dma_dev(dev1) == dev2 || 6558 pci_real_dma_dev(dev2) == dev1; 6559 } 6560 6561 bool pci_device_is_present(struct pci_dev *pdev) 6562 { 6563 u32 v; 6564 6565 /* Check PF if pdev is a VF, since VF Vendor/Device IDs are 0xffff */ 6566 pdev = pci_physfn(pdev); 6567 if (pci_dev_is_disconnected(pdev)) 6568 return false; 6569 return pci_bus_read_dev_vendor_id(pdev->bus, pdev->devfn, &v, 0); 6570 } 6571 EXPORT_SYMBOL_GPL(pci_device_is_present); 6572 6573 void pci_ignore_hotplug(struct pci_dev *dev) 6574 { 6575 struct pci_dev *bridge = dev->bus->self; 6576 6577 dev->ignore_hotplug = 1; 6578 /* Propagate the "ignore hotplug" setting to the parent bridge. */ 6579 if (bridge) 6580 bridge->ignore_hotplug = 1; 6581 } 6582 EXPORT_SYMBOL_GPL(pci_ignore_hotplug); 6583 6584 /** 6585 * pci_real_dma_dev - Get PCI DMA device for PCI device 6586 * @dev: the PCI device that may have a PCI DMA alias 6587 * 6588 * Permits the platform to provide architecture-specific functionality to 6589 * devices needing to alias DMA to another PCI device on another PCI bus. If 6590 * the PCI device is on the same bus, it is recommended to use 6591 * pci_add_dma_alias(). This is the default implementation. Architecture 6592 * implementations can override this. 6593 */ 6594 struct pci_dev __weak *pci_real_dma_dev(struct pci_dev *dev) 6595 { 6596 return dev; 6597 } 6598 6599 resource_size_t __weak pcibios_default_alignment(void) 6600 { 6601 return 0; 6602 } 6603 6604 /* 6605 * Arches that don't want to expose struct resource to userland as-is in 6606 * sysfs and /proc can implement their own pci_resource_to_user(). 6607 */ 6608 void __weak pci_resource_to_user(const struct pci_dev *dev, int bar, 6609 const struct resource *rsrc, 6610 resource_size_t *start, resource_size_t *end) 6611 { 6612 *start = rsrc->start; 6613 *end = rsrc->end; 6614 } 6615 6616 static char *resource_alignment_param; 6617 static DEFINE_SPINLOCK(resource_alignment_lock); 6618 6619 /** 6620 * pci_specified_resource_alignment - get resource alignment specified by user. 6621 * @dev: the PCI device to get 6622 * @resize: whether or not to change resources' size when reassigning alignment 6623 * 6624 * RETURNS: Resource alignment if it is specified. 6625 * Zero if it is not specified. 6626 */ 6627 static resource_size_t pci_specified_resource_alignment(struct pci_dev *dev, 6628 bool *resize) 6629 { 6630 int align_order, count; 6631 resource_size_t align = pcibios_default_alignment(); 6632 const char *p; 6633 int ret; 6634 6635 spin_lock(&resource_alignment_lock); 6636 p = resource_alignment_param; 6637 if (!p || !*p) 6638 goto out; 6639 if (pci_has_flag(PCI_PROBE_ONLY)) { 6640 align = 0; 6641 pr_info_once("PCI: Ignoring requested alignments (PCI_PROBE_ONLY)\n"); 6642 goto out; 6643 } 6644 6645 while (*p) { 6646 count = 0; 6647 if (sscanf(p, "%d%n", &align_order, &count) == 1 && 6648 p[count] == '@') { 6649 p += count + 1; 6650 if (align_order > 63) { 6651 pr_err("PCI: Invalid requested alignment (order %d)\n", 6652 align_order); 6653 align_order = PAGE_SHIFT; 6654 } 6655 } else { 6656 align_order = PAGE_SHIFT; 6657 } 6658 6659 ret = pci_dev_str_match(dev, p, &p); 6660 if (ret == 1) { 6661 *resize = true; 6662 align = 1ULL << align_order; 6663 break; 6664 } else if (ret < 0) { 6665 pr_err("PCI: Can't parse resource_alignment parameter: %s\n", 6666 p); 6667 break; 6668 } 6669 6670 if (*p != ';' && *p != ',') { 6671 /* End of param or invalid format */ 6672 break; 6673 } 6674 p++; 6675 } 6676 out: 6677 spin_unlock(&resource_alignment_lock); 6678 return align; 6679 } 6680 6681 static void pci_request_resource_alignment(struct pci_dev *dev, int bar, 6682 resource_size_t align, bool resize) 6683 { 6684 struct resource *r = &dev->resource[bar]; 6685 resource_size_t size; 6686 6687 if (!(r->flags & IORESOURCE_MEM)) 6688 return; 6689 6690 if (r->flags & IORESOURCE_PCI_FIXED) { 6691 pci_info(dev, "BAR%d %pR: ignoring requested alignment %#llx\n", 6692 bar, r, (unsigned long long)align); 6693 return; 6694 } 6695 6696 size = resource_size(r); 6697 if (size >= align) 6698 return; 6699 6700 /* 6701 * Increase the alignment of the resource. There are two ways we 6702 * can do this: 6703 * 6704 * 1) Increase the size of the resource. BARs are aligned on their 6705 * size, so when we reallocate space for this resource, we'll 6706 * allocate it with the larger alignment. This also prevents 6707 * assignment of any other BARs inside the alignment region, so 6708 * if we're requesting page alignment, this means no other BARs 6709 * will share the page. 6710 * 6711 * The disadvantage is that this makes the resource larger than 6712 * the hardware BAR, which may break drivers that compute things 6713 * based on the resource size, e.g., to find registers at a 6714 * fixed offset before the end of the BAR. 6715 * 6716 * 2) Retain the resource size, but use IORESOURCE_STARTALIGN and 6717 * set r->start to the desired alignment. By itself this 6718 * doesn't prevent other BARs being put inside the alignment 6719 * region, but if we realign *every* resource of every device in 6720 * the system, none of them will share an alignment region. 6721 * 6722 * When the user has requested alignment for only some devices via 6723 * the "pci=resource_alignment" argument, "resize" is true and we 6724 * use the first method. Otherwise we assume we're aligning all 6725 * devices and we use the second. 6726 */ 6727 6728 pci_info(dev, "BAR%d %pR: requesting alignment to %#llx\n", 6729 bar, r, (unsigned long long)align); 6730 6731 if (resize) { 6732 r->start = 0; 6733 r->end = align - 1; 6734 } else { 6735 r->flags &= ~IORESOURCE_SIZEALIGN; 6736 r->flags |= IORESOURCE_STARTALIGN; 6737 r->start = align; 6738 r->end = r->start + size - 1; 6739 } 6740 r->flags |= IORESOURCE_UNSET; 6741 } 6742 6743 /* 6744 * This function disables memory decoding and releases memory resources 6745 * of the device specified by kernel's boot parameter 'pci=resource_alignment='. 6746 * It also rounds up size to specified alignment. 6747 * Later on, the kernel will assign page-aligned memory resource back 6748 * to the device. 6749 */ 6750 void pci_reassigndev_resource_alignment(struct pci_dev *dev) 6751 { 6752 int i; 6753 struct resource *r; 6754 resource_size_t align; 6755 u16 command; 6756 bool resize = false; 6757 6758 /* 6759 * VF BARs are read-only zero according to SR-IOV spec r1.1, sec 6760 * 3.4.1.11. Their resources are allocated from the space 6761 * described by the VF BARx register in the PF's SR-IOV capability. 6762 * We can't influence their alignment here. 6763 */ 6764 if (dev->is_virtfn) 6765 return; 6766 6767 /* check if specified PCI is target device to reassign */ 6768 align = pci_specified_resource_alignment(dev, &resize); 6769 if (!align) 6770 return; 6771 6772 if (dev->hdr_type == PCI_HEADER_TYPE_NORMAL && 6773 (dev->class >> 8) == PCI_CLASS_BRIDGE_HOST) { 6774 pci_warn(dev, "Can't reassign resources to host bridge\n"); 6775 return; 6776 } 6777 6778 pci_read_config_word(dev, PCI_COMMAND, &command); 6779 command &= ~PCI_COMMAND_MEMORY; 6780 pci_write_config_word(dev, PCI_COMMAND, command); 6781 6782 for (i = 0; i <= PCI_ROM_RESOURCE; i++) 6783 pci_request_resource_alignment(dev, i, align, resize); 6784 6785 /* 6786 * Need to disable bridge's resource window, 6787 * to enable the kernel to reassign new resource 6788 * window later on. 6789 */ 6790 if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) { 6791 for (i = PCI_BRIDGE_RESOURCES; i < PCI_NUM_RESOURCES; i++) { 6792 r = &dev->resource[i]; 6793 if (!(r->flags & IORESOURCE_MEM)) 6794 continue; 6795 r->flags |= IORESOURCE_UNSET; 6796 r->end = resource_size(r) - 1; 6797 r->start = 0; 6798 } 6799 pci_disable_bridge_window(dev); 6800 } 6801 } 6802 6803 static ssize_t resource_alignment_show(const struct bus_type *bus, char *buf) 6804 { 6805 size_t count = 0; 6806 6807 spin_lock(&resource_alignment_lock); 6808 if (resource_alignment_param) 6809 count = sysfs_emit(buf, "%s\n", resource_alignment_param); 6810 spin_unlock(&resource_alignment_lock); 6811 6812 return count; 6813 } 6814 6815 static ssize_t resource_alignment_store(const struct bus_type *bus, 6816 const char *buf, size_t count) 6817 { 6818 char *param, *old, *end; 6819 6820 if (count >= (PAGE_SIZE - 1)) 6821 return -EINVAL; 6822 6823 param = kstrndup(buf, count, GFP_KERNEL); 6824 if (!param) 6825 return -ENOMEM; 6826 6827 end = strchr(param, '\n'); 6828 if (end) 6829 *end = '\0'; 6830 6831 spin_lock(&resource_alignment_lock); 6832 old = resource_alignment_param; 6833 if (strlen(param)) { 6834 resource_alignment_param = param; 6835 } else { 6836 kfree(param); 6837 resource_alignment_param = NULL; 6838 } 6839 spin_unlock(&resource_alignment_lock); 6840 6841 kfree(old); 6842 6843 return count; 6844 } 6845 6846 static BUS_ATTR_RW(resource_alignment); 6847 6848 static int __init pci_resource_alignment_sysfs_init(void) 6849 { 6850 return bus_create_file(&pci_bus_type, 6851 &bus_attr_resource_alignment); 6852 } 6853 late_initcall(pci_resource_alignment_sysfs_init); 6854 6855 static void pci_no_domains(void) 6856 { 6857 #ifdef CONFIG_PCI_DOMAINS 6858 pci_domains_supported = 0; 6859 #endif 6860 } 6861 6862 #ifdef CONFIG_PCI_DOMAINS_GENERIC 6863 static DEFINE_IDA(pci_domain_nr_static_ida); 6864 static DEFINE_IDA(pci_domain_nr_dynamic_ida); 6865 6866 static void of_pci_reserve_static_domain_nr(void) 6867 { 6868 struct device_node *np; 6869 int domain_nr; 6870 6871 for_each_node_by_type(np, "pci") { 6872 domain_nr = of_get_pci_domain_nr(np); 6873 if (domain_nr < 0) 6874 continue; 6875 /* 6876 * Permanently allocate domain_nr in dynamic_ida 6877 * to prevent it from dynamic allocation. 6878 */ 6879 ida_alloc_range(&pci_domain_nr_dynamic_ida, 6880 domain_nr, domain_nr, GFP_KERNEL); 6881 } 6882 } 6883 6884 static int of_pci_bus_find_domain_nr(struct device *parent) 6885 { 6886 static bool static_domains_reserved = false; 6887 int domain_nr; 6888 6889 /* On the first call scan device tree for static allocations. */ 6890 if (!static_domains_reserved) { 6891 of_pci_reserve_static_domain_nr(); 6892 static_domains_reserved = true; 6893 } 6894 6895 if (parent) { 6896 /* 6897 * If domain is in DT, allocate it in static IDA. This 6898 * prevents duplicate static allocations in case of errors 6899 * in DT. 6900 */ 6901 domain_nr = of_get_pci_domain_nr(parent->of_node); 6902 if (domain_nr >= 0) 6903 return ida_alloc_range(&pci_domain_nr_static_ida, 6904 domain_nr, domain_nr, 6905 GFP_KERNEL); 6906 } 6907 6908 /* 6909 * If domain was not specified in DT, choose a free ID from dynamic 6910 * allocations. All domain numbers from DT are permanently in 6911 * dynamic allocations to prevent assigning them to other DT nodes 6912 * without static domain. 6913 */ 6914 return ida_alloc(&pci_domain_nr_dynamic_ida, GFP_KERNEL); 6915 } 6916 6917 static void of_pci_bus_release_domain_nr(struct pci_bus *bus, struct device *parent) 6918 { 6919 if (bus->domain_nr < 0) 6920 return; 6921 6922 /* Release domain from IDA where it was allocated. */ 6923 if (of_get_pci_domain_nr(parent->of_node) == bus->domain_nr) 6924 ida_free(&pci_domain_nr_static_ida, bus->domain_nr); 6925 else 6926 ida_free(&pci_domain_nr_dynamic_ida, bus->domain_nr); 6927 } 6928 6929 int pci_bus_find_domain_nr(struct pci_bus *bus, struct device *parent) 6930 { 6931 return acpi_disabled ? of_pci_bus_find_domain_nr(parent) : 6932 acpi_pci_bus_find_domain_nr(bus); 6933 } 6934 6935 void pci_bus_release_domain_nr(struct pci_bus *bus, struct device *parent) 6936 { 6937 if (!acpi_disabled) 6938 return; 6939 of_pci_bus_release_domain_nr(bus, parent); 6940 } 6941 #endif 6942 6943 /** 6944 * pci_ext_cfg_avail - can we access extended PCI config space? 6945 * 6946 * Returns 1 if we can access PCI extended config space (offsets 6947 * greater than 0xff). This is the default implementation. Architecture 6948 * implementations can override this. 6949 */ 6950 int __weak pci_ext_cfg_avail(void) 6951 { 6952 return 1; 6953 } 6954 6955 void __weak pci_fixup_cardbus(struct pci_bus *bus) 6956 { 6957 } 6958 EXPORT_SYMBOL(pci_fixup_cardbus); 6959 6960 static int __init pci_setup(char *str) 6961 { 6962 while (str) { 6963 char *k = strchr(str, ','); 6964 if (k) 6965 *k++ = 0; 6966 if (*str && (str = pcibios_setup(str)) && *str) { 6967 if (!strcmp(str, "nomsi")) { 6968 pci_no_msi(); 6969 } else if (!strncmp(str, "noats", 5)) { 6970 pr_info("PCIe: ATS is disabled\n"); 6971 pcie_ats_disabled = true; 6972 } else if (!strcmp(str, "noaer")) { 6973 pci_no_aer(); 6974 } else if (!strcmp(str, "earlydump")) { 6975 pci_early_dump = true; 6976 } else if (!strncmp(str, "realloc=", 8)) { 6977 pci_realloc_get_opt(str + 8); 6978 } else if (!strncmp(str, "realloc", 7)) { 6979 pci_realloc_get_opt("on"); 6980 } else if (!strcmp(str, "nodomains")) { 6981 pci_no_domains(); 6982 } else if (!strncmp(str, "noari", 5)) { 6983 pcie_ari_disabled = true; 6984 } else if (!strncmp(str, "cbiosize=", 9)) { 6985 pci_cardbus_io_size = memparse(str + 9, &str); 6986 } else if (!strncmp(str, "cbmemsize=", 10)) { 6987 pci_cardbus_mem_size = memparse(str + 10, &str); 6988 } else if (!strncmp(str, "resource_alignment=", 19)) { 6989 resource_alignment_param = str + 19; 6990 } else if (!strncmp(str, "ecrc=", 5)) { 6991 pcie_ecrc_get_policy(str + 5); 6992 } else if (!strncmp(str, "hpiosize=", 9)) { 6993 pci_hotplug_io_size = memparse(str + 9, &str); 6994 } else if (!strncmp(str, "hpmmiosize=", 11)) { 6995 pci_hotplug_mmio_size = memparse(str + 11, &str); 6996 } else if (!strncmp(str, "hpmmioprefsize=", 15)) { 6997 pci_hotplug_mmio_pref_size = memparse(str + 15, &str); 6998 } else if (!strncmp(str, "hpmemsize=", 10)) { 6999 pci_hotplug_mmio_size = memparse(str + 10, &str); 7000 pci_hotplug_mmio_pref_size = pci_hotplug_mmio_size; 7001 } else if (!strncmp(str, "hpbussize=", 10)) { 7002 pci_hotplug_bus_size = 7003 simple_strtoul(str + 10, &str, 0); 7004 if (pci_hotplug_bus_size > 0xff) 7005 pci_hotplug_bus_size = DEFAULT_HOTPLUG_BUS_SIZE; 7006 } else if (!strncmp(str, "pcie_bus_tune_off", 17)) { 7007 pcie_bus_config = PCIE_BUS_TUNE_OFF; 7008 } else if (!strncmp(str, "pcie_bus_safe", 13)) { 7009 pcie_bus_config = PCIE_BUS_SAFE; 7010 } else if (!strncmp(str, "pcie_bus_perf", 13)) { 7011 pcie_bus_config = PCIE_BUS_PERFORMANCE; 7012 } else if (!strncmp(str, "pcie_bus_peer2peer", 18)) { 7013 pcie_bus_config = PCIE_BUS_PEER2PEER; 7014 } else if (!strncmp(str, "pcie_scan_all", 13)) { 7015 pci_add_flags(PCI_SCAN_ALL_PCIE_DEVS); 7016 } else if (!strncmp(str, "disable_acs_redir=", 18)) { 7017 disable_acs_redir_param = str + 18; 7018 } else { 7019 pr_err("PCI: Unknown option `%s'\n", str); 7020 } 7021 } 7022 str = k; 7023 } 7024 return 0; 7025 } 7026 early_param("pci", pci_setup); 7027 7028 /* 7029 * 'resource_alignment_param' and 'disable_acs_redir_param' are initialized 7030 * in pci_setup(), above, to point to data in the __initdata section which 7031 * will be freed after the init sequence is complete. We can't allocate memory 7032 * in pci_setup() because some architectures do not have any memory allocation 7033 * service available during an early_param() call. So we allocate memory and 7034 * copy the variable here before the init section is freed. 7035 * 7036 */ 7037 static int __init pci_realloc_setup_params(void) 7038 { 7039 resource_alignment_param = kstrdup(resource_alignment_param, 7040 GFP_KERNEL); 7041 disable_acs_redir_param = kstrdup(disable_acs_redir_param, GFP_KERNEL); 7042 7043 return 0; 7044 } 7045 pure_initcall(pci_realloc_setup_params); 7046