1 /* 2 * PCI Bus Services, see include/linux/pci.h for further explanation. 3 * 4 * Copyright 1993 -- 1997 Drew Eckhardt, Frederic Potter, 5 * David Mosberger-Tang 6 * 7 * Copyright 1997 -- 2000 Martin Mares <mj@ucw.cz> 8 */ 9 10 #include <linux/kernel.h> 11 #include <linux/delay.h> 12 #include <linux/init.h> 13 #include <linux/pci.h> 14 #include <linux/pm.h> 15 #include <linux/slab.h> 16 #include <linux/module.h> 17 #include <linux/spinlock.h> 18 #include <linux/string.h> 19 #include <linux/log2.h> 20 #include <linux/pci-aspm.h> 21 #include <linux/pm_wakeup.h> 22 #include <linux/interrupt.h> 23 #include <linux/device.h> 24 #include <linux/pm_runtime.h> 25 #include <asm-generic/pci-bridge.h> 26 #include <asm/setup.h> 27 #include "pci.h" 28 29 const char *pci_power_names[] = { 30 "error", "D0", "D1", "D2", "D3hot", "D3cold", "unknown", 31 }; 32 EXPORT_SYMBOL_GPL(pci_power_names); 33 34 int isa_dma_bridge_buggy; 35 EXPORT_SYMBOL(isa_dma_bridge_buggy); 36 37 int pci_pci_problems; 38 EXPORT_SYMBOL(pci_pci_problems); 39 40 unsigned int pci_pm_d3_delay; 41 42 static void pci_pme_list_scan(struct work_struct *work); 43 44 static LIST_HEAD(pci_pme_list); 45 static DEFINE_MUTEX(pci_pme_list_mutex); 46 static DECLARE_DELAYED_WORK(pci_pme_work, pci_pme_list_scan); 47 48 struct pci_pme_device { 49 struct list_head list; 50 struct pci_dev *dev; 51 }; 52 53 #define PME_TIMEOUT 1000 /* How long between PME checks */ 54 55 static void pci_dev_d3_sleep(struct pci_dev *dev) 56 { 57 unsigned int delay = dev->d3_delay; 58 59 if (delay < pci_pm_d3_delay) 60 delay = pci_pm_d3_delay; 61 62 msleep(delay); 63 } 64 65 #ifdef CONFIG_PCI_DOMAINS 66 int pci_domains_supported = 1; 67 #endif 68 69 #define DEFAULT_CARDBUS_IO_SIZE (256) 70 #define DEFAULT_CARDBUS_MEM_SIZE (64*1024*1024) 71 /* pci=cbmemsize=nnM,cbiosize=nn can override this */ 72 unsigned long pci_cardbus_io_size = DEFAULT_CARDBUS_IO_SIZE; 73 unsigned long pci_cardbus_mem_size = DEFAULT_CARDBUS_MEM_SIZE; 74 75 #define DEFAULT_HOTPLUG_IO_SIZE (256) 76 #define DEFAULT_HOTPLUG_MEM_SIZE (2*1024*1024) 77 /* pci=hpmemsize=nnM,hpiosize=nn can override this */ 78 unsigned long pci_hotplug_io_size = DEFAULT_HOTPLUG_IO_SIZE; 79 unsigned long pci_hotplug_mem_size = DEFAULT_HOTPLUG_MEM_SIZE; 80 81 enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_TUNE_OFF; 82 83 /* 84 * The default CLS is used if arch didn't set CLS explicitly and not 85 * all pci devices agree on the same value. Arch can override either 86 * the dfl or actual value as it sees fit. Don't forget this is 87 * measured in 32-bit words, not bytes. 88 */ 89 u8 pci_dfl_cache_line_size = L1_CACHE_BYTES >> 2; 90 u8 pci_cache_line_size; 91 92 /* 93 * If we set up a device for bus mastering, we need to check the latency 94 * timer as certain BIOSes forget to set it properly. 95 */ 96 unsigned int pcibios_max_latency = 255; 97 98 /* If set, the PCIe ARI capability will not be used. */ 99 static bool pcie_ari_disabled; 100 101 /** 102 * pci_bus_max_busnr - returns maximum PCI bus number of given bus' children 103 * @bus: pointer to PCI bus structure to search 104 * 105 * Given a PCI bus, returns the highest PCI bus number present in the set 106 * including the given PCI bus and its list of child PCI buses. 107 */ 108 unsigned char pci_bus_max_busnr(struct pci_bus* bus) 109 { 110 struct list_head *tmp; 111 unsigned char max, n; 112 113 max = bus->busn_res.end; 114 list_for_each(tmp, &bus->children) { 115 n = pci_bus_max_busnr(pci_bus_b(tmp)); 116 if(n > max) 117 max = n; 118 } 119 return max; 120 } 121 EXPORT_SYMBOL_GPL(pci_bus_max_busnr); 122 123 #ifdef CONFIG_HAS_IOMEM 124 void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar) 125 { 126 /* 127 * Make sure the BAR is actually a memory resource, not an IO resource 128 */ 129 if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM)) { 130 WARN_ON(1); 131 return NULL; 132 } 133 return ioremap_nocache(pci_resource_start(pdev, bar), 134 pci_resource_len(pdev, bar)); 135 } 136 EXPORT_SYMBOL_GPL(pci_ioremap_bar); 137 #endif 138 139 #define PCI_FIND_CAP_TTL 48 140 141 static int __pci_find_next_cap_ttl(struct pci_bus *bus, unsigned int devfn, 142 u8 pos, int cap, int *ttl) 143 { 144 u8 id; 145 146 while ((*ttl)--) { 147 pci_bus_read_config_byte(bus, devfn, pos, &pos); 148 if (pos < 0x40) 149 break; 150 pos &= ~3; 151 pci_bus_read_config_byte(bus, devfn, pos + PCI_CAP_LIST_ID, 152 &id); 153 if (id == 0xff) 154 break; 155 if (id == cap) 156 return pos; 157 pos += PCI_CAP_LIST_NEXT; 158 } 159 return 0; 160 } 161 162 static int __pci_find_next_cap(struct pci_bus *bus, unsigned int devfn, 163 u8 pos, int cap) 164 { 165 int ttl = PCI_FIND_CAP_TTL; 166 167 return __pci_find_next_cap_ttl(bus, devfn, pos, cap, &ttl); 168 } 169 170 int pci_find_next_capability(struct pci_dev *dev, u8 pos, int cap) 171 { 172 return __pci_find_next_cap(dev->bus, dev->devfn, 173 pos + PCI_CAP_LIST_NEXT, cap); 174 } 175 EXPORT_SYMBOL_GPL(pci_find_next_capability); 176 177 static int __pci_bus_find_cap_start(struct pci_bus *bus, 178 unsigned int devfn, u8 hdr_type) 179 { 180 u16 status; 181 182 pci_bus_read_config_word(bus, devfn, PCI_STATUS, &status); 183 if (!(status & PCI_STATUS_CAP_LIST)) 184 return 0; 185 186 switch (hdr_type) { 187 case PCI_HEADER_TYPE_NORMAL: 188 case PCI_HEADER_TYPE_BRIDGE: 189 return PCI_CAPABILITY_LIST; 190 case PCI_HEADER_TYPE_CARDBUS: 191 return PCI_CB_CAPABILITY_LIST; 192 default: 193 return 0; 194 } 195 196 return 0; 197 } 198 199 /** 200 * pci_find_capability - query for devices' capabilities 201 * @dev: PCI device to query 202 * @cap: capability code 203 * 204 * Tell if a device supports a given PCI capability. 205 * Returns the address of the requested capability structure within the 206 * device's PCI configuration space or 0 in case the device does not 207 * support it. Possible values for @cap: 208 * 209 * %PCI_CAP_ID_PM Power Management 210 * %PCI_CAP_ID_AGP Accelerated Graphics Port 211 * %PCI_CAP_ID_VPD Vital Product Data 212 * %PCI_CAP_ID_SLOTID Slot Identification 213 * %PCI_CAP_ID_MSI Message Signalled Interrupts 214 * %PCI_CAP_ID_CHSWP CompactPCI HotSwap 215 * %PCI_CAP_ID_PCIX PCI-X 216 * %PCI_CAP_ID_EXP PCI Express 217 */ 218 int pci_find_capability(struct pci_dev *dev, int cap) 219 { 220 int pos; 221 222 pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type); 223 if (pos) 224 pos = __pci_find_next_cap(dev->bus, dev->devfn, pos, cap); 225 226 return pos; 227 } 228 229 /** 230 * pci_bus_find_capability - query for devices' capabilities 231 * @bus: the PCI bus to query 232 * @devfn: PCI device to query 233 * @cap: capability code 234 * 235 * Like pci_find_capability() but works for pci devices that do not have a 236 * pci_dev structure set up yet. 237 * 238 * Returns the address of the requested capability structure within the 239 * device's PCI configuration space or 0 in case the device does not 240 * support it. 241 */ 242 int pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap) 243 { 244 int pos; 245 u8 hdr_type; 246 247 pci_bus_read_config_byte(bus, devfn, PCI_HEADER_TYPE, &hdr_type); 248 249 pos = __pci_bus_find_cap_start(bus, devfn, hdr_type & 0x7f); 250 if (pos) 251 pos = __pci_find_next_cap(bus, devfn, pos, cap); 252 253 return pos; 254 } 255 256 /** 257 * pci_find_next_ext_capability - Find an extended capability 258 * @dev: PCI device to query 259 * @start: address at which to start looking (0 to start at beginning of list) 260 * @cap: capability code 261 * 262 * Returns the address of the next matching extended capability structure 263 * within the device's PCI configuration space or 0 if the device does 264 * not support it. Some capabilities can occur several times, e.g., the 265 * vendor-specific capability, and this provides a way to find them all. 266 */ 267 int pci_find_next_ext_capability(struct pci_dev *dev, int start, int cap) 268 { 269 u32 header; 270 int ttl; 271 int pos = PCI_CFG_SPACE_SIZE; 272 273 /* minimum 8 bytes per capability */ 274 ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8; 275 276 if (dev->cfg_size <= PCI_CFG_SPACE_SIZE) 277 return 0; 278 279 if (start) 280 pos = start; 281 282 if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL) 283 return 0; 284 285 /* 286 * If we have no capabilities, this is indicated by cap ID, 287 * cap version and next pointer all being 0. 288 */ 289 if (header == 0) 290 return 0; 291 292 while (ttl-- > 0) { 293 if (PCI_EXT_CAP_ID(header) == cap && pos != start) 294 return pos; 295 296 pos = PCI_EXT_CAP_NEXT(header); 297 if (pos < PCI_CFG_SPACE_SIZE) 298 break; 299 300 if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL) 301 break; 302 } 303 304 return 0; 305 } 306 EXPORT_SYMBOL_GPL(pci_find_next_ext_capability); 307 308 /** 309 * pci_find_ext_capability - Find an extended capability 310 * @dev: PCI device to query 311 * @cap: capability code 312 * 313 * Returns the address of the requested extended capability structure 314 * within the device's PCI configuration space or 0 if the device does 315 * not support it. Possible values for @cap: 316 * 317 * %PCI_EXT_CAP_ID_ERR Advanced Error Reporting 318 * %PCI_EXT_CAP_ID_VC Virtual Channel 319 * %PCI_EXT_CAP_ID_DSN Device Serial Number 320 * %PCI_EXT_CAP_ID_PWR Power Budgeting 321 */ 322 int pci_find_ext_capability(struct pci_dev *dev, int cap) 323 { 324 return pci_find_next_ext_capability(dev, 0, cap); 325 } 326 EXPORT_SYMBOL_GPL(pci_find_ext_capability); 327 328 static int __pci_find_next_ht_cap(struct pci_dev *dev, int pos, int ht_cap) 329 { 330 int rc, ttl = PCI_FIND_CAP_TTL; 331 u8 cap, mask; 332 333 if (ht_cap == HT_CAPTYPE_SLAVE || ht_cap == HT_CAPTYPE_HOST) 334 mask = HT_3BIT_CAP_MASK; 335 else 336 mask = HT_5BIT_CAP_MASK; 337 338 pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn, pos, 339 PCI_CAP_ID_HT, &ttl); 340 while (pos) { 341 rc = pci_read_config_byte(dev, pos + 3, &cap); 342 if (rc != PCIBIOS_SUCCESSFUL) 343 return 0; 344 345 if ((cap & mask) == ht_cap) 346 return pos; 347 348 pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn, 349 pos + PCI_CAP_LIST_NEXT, 350 PCI_CAP_ID_HT, &ttl); 351 } 352 353 return 0; 354 } 355 /** 356 * pci_find_next_ht_capability - query a device's Hypertransport capabilities 357 * @dev: PCI device to query 358 * @pos: Position from which to continue searching 359 * @ht_cap: Hypertransport capability code 360 * 361 * To be used in conjunction with pci_find_ht_capability() to search for 362 * all capabilities matching @ht_cap. @pos should always be a value returned 363 * from pci_find_ht_capability(). 364 * 365 * NB. To be 100% safe against broken PCI devices, the caller should take 366 * steps to avoid an infinite loop. 367 */ 368 int pci_find_next_ht_capability(struct pci_dev *dev, int pos, int ht_cap) 369 { 370 return __pci_find_next_ht_cap(dev, pos + PCI_CAP_LIST_NEXT, ht_cap); 371 } 372 EXPORT_SYMBOL_GPL(pci_find_next_ht_capability); 373 374 /** 375 * pci_find_ht_capability - query a device's Hypertransport capabilities 376 * @dev: PCI device to query 377 * @ht_cap: Hypertransport capability code 378 * 379 * Tell if a device supports a given Hypertransport capability. 380 * Returns an address within the device's PCI configuration space 381 * or 0 in case the device does not support the request capability. 382 * The address points to the PCI capability, of type PCI_CAP_ID_HT, 383 * which has a Hypertransport capability matching @ht_cap. 384 */ 385 int pci_find_ht_capability(struct pci_dev *dev, int ht_cap) 386 { 387 int pos; 388 389 pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type); 390 if (pos) 391 pos = __pci_find_next_ht_cap(dev, pos, ht_cap); 392 393 return pos; 394 } 395 EXPORT_SYMBOL_GPL(pci_find_ht_capability); 396 397 /** 398 * pci_find_parent_resource - return resource region of parent bus of given region 399 * @dev: PCI device structure contains resources to be searched 400 * @res: child resource record for which parent is sought 401 * 402 * For given resource region of given device, return the resource 403 * region of parent bus the given region is contained in or where 404 * it should be allocated from. 405 */ 406 struct resource * 407 pci_find_parent_resource(const struct pci_dev *dev, struct resource *res) 408 { 409 const struct pci_bus *bus = dev->bus; 410 int i; 411 struct resource *best = NULL, *r; 412 413 pci_bus_for_each_resource(bus, r, i) { 414 if (!r) 415 continue; 416 if (res->start && !(res->start >= r->start && res->end <= r->end)) 417 continue; /* Not contained */ 418 if ((res->flags ^ r->flags) & (IORESOURCE_IO | IORESOURCE_MEM)) 419 continue; /* Wrong type */ 420 if (!((res->flags ^ r->flags) & IORESOURCE_PREFETCH)) 421 return r; /* Exact match */ 422 /* We can't insert a non-prefetch resource inside a prefetchable parent .. */ 423 if (r->flags & IORESOURCE_PREFETCH) 424 continue; 425 /* .. but we can put a prefetchable resource inside a non-prefetchable one */ 426 if (!best) 427 best = r; 428 } 429 return best; 430 } 431 432 /** 433 * pci_restore_bars - restore a devices BAR values (e.g. after wake-up) 434 * @dev: PCI device to have its BARs restored 435 * 436 * Restore the BAR values for a given device, so as to make it 437 * accessible by its driver. 438 */ 439 static void 440 pci_restore_bars(struct pci_dev *dev) 441 { 442 int i; 443 444 for (i = 0; i < PCI_BRIDGE_RESOURCES; i++) 445 pci_update_resource(dev, i); 446 } 447 448 static struct pci_platform_pm_ops *pci_platform_pm; 449 450 int pci_set_platform_pm(struct pci_platform_pm_ops *ops) 451 { 452 if (!ops->is_manageable || !ops->set_state || !ops->choose_state 453 || !ops->sleep_wake) 454 return -EINVAL; 455 pci_platform_pm = ops; 456 return 0; 457 } 458 459 static inline bool platform_pci_power_manageable(struct pci_dev *dev) 460 { 461 return pci_platform_pm ? pci_platform_pm->is_manageable(dev) : false; 462 } 463 464 static inline int platform_pci_set_power_state(struct pci_dev *dev, 465 pci_power_t t) 466 { 467 return pci_platform_pm ? pci_platform_pm->set_state(dev, t) : -ENOSYS; 468 } 469 470 static inline pci_power_t platform_pci_choose_state(struct pci_dev *dev) 471 { 472 return pci_platform_pm ? 473 pci_platform_pm->choose_state(dev) : PCI_POWER_ERROR; 474 } 475 476 static inline int platform_pci_sleep_wake(struct pci_dev *dev, bool enable) 477 { 478 return pci_platform_pm ? 479 pci_platform_pm->sleep_wake(dev, enable) : -ENODEV; 480 } 481 482 static inline int platform_pci_run_wake(struct pci_dev *dev, bool enable) 483 { 484 return pci_platform_pm ? 485 pci_platform_pm->run_wake(dev, enable) : -ENODEV; 486 } 487 488 /** 489 * pci_raw_set_power_state - Use PCI PM registers to set the power state of 490 * given PCI device 491 * @dev: PCI device to handle. 492 * @state: PCI power state (D0, D1, D2, D3hot) to put the device into. 493 * 494 * RETURN VALUE: 495 * -EINVAL if the requested state is invalid. 496 * -EIO if device does not support PCI PM or its PM capabilities register has a 497 * wrong version, or device doesn't support the requested state. 498 * 0 if device already is in the requested state. 499 * 0 if device's power state has been successfully changed. 500 */ 501 static int pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state) 502 { 503 u16 pmcsr; 504 bool need_restore = false; 505 506 /* Check if we're already there */ 507 if (dev->current_state == state) 508 return 0; 509 510 if (!dev->pm_cap) 511 return -EIO; 512 513 if (state < PCI_D0 || state > PCI_D3hot) 514 return -EINVAL; 515 516 /* Validate current state: 517 * Can enter D0 from any state, but if we can only go deeper 518 * to sleep if we're already in a low power state 519 */ 520 if (state != PCI_D0 && dev->current_state <= PCI_D3cold 521 && dev->current_state > state) { 522 dev_err(&dev->dev, "invalid power transition " 523 "(from state %d to %d)\n", dev->current_state, state); 524 return -EINVAL; 525 } 526 527 /* check if this device supports the desired state */ 528 if ((state == PCI_D1 && !dev->d1_support) 529 || (state == PCI_D2 && !dev->d2_support)) 530 return -EIO; 531 532 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr); 533 534 /* If we're (effectively) in D3, force entire word to 0. 535 * This doesn't affect PME_Status, disables PME_En, and 536 * sets PowerState to 0. 537 */ 538 switch (dev->current_state) { 539 case PCI_D0: 540 case PCI_D1: 541 case PCI_D2: 542 pmcsr &= ~PCI_PM_CTRL_STATE_MASK; 543 pmcsr |= state; 544 break; 545 case PCI_D3hot: 546 case PCI_D3cold: 547 case PCI_UNKNOWN: /* Boot-up */ 548 if ((pmcsr & PCI_PM_CTRL_STATE_MASK) == PCI_D3hot 549 && !(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET)) 550 need_restore = true; 551 /* Fall-through: force to D0 */ 552 default: 553 pmcsr = 0; 554 break; 555 } 556 557 /* enter specified state */ 558 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr); 559 560 /* Mandatory power management transition delays */ 561 /* see PCI PM 1.1 5.6.1 table 18 */ 562 if (state == PCI_D3hot || dev->current_state == PCI_D3hot) 563 pci_dev_d3_sleep(dev); 564 else if (state == PCI_D2 || dev->current_state == PCI_D2) 565 udelay(PCI_PM_D2_DELAY); 566 567 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr); 568 dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK); 569 if (dev->current_state != state && printk_ratelimit()) 570 dev_info(&dev->dev, "Refused to change power state, " 571 "currently in D%d\n", dev->current_state); 572 573 /* 574 * According to section 5.4.1 of the "PCI BUS POWER MANAGEMENT 575 * INTERFACE SPECIFICATION, REV. 1.2", a device transitioning 576 * from D3hot to D0 _may_ perform an internal reset, thereby 577 * going to "D0 Uninitialized" rather than "D0 Initialized". 578 * For example, at least some versions of the 3c905B and the 579 * 3c556B exhibit this behaviour. 580 * 581 * At least some laptop BIOSen (e.g. the Thinkpad T21) leave 582 * devices in a D3hot state at boot. Consequently, we need to 583 * restore at least the BARs so that the device will be 584 * accessible to its driver. 585 */ 586 if (need_restore) 587 pci_restore_bars(dev); 588 589 if (dev->bus->self) 590 pcie_aspm_pm_state_change(dev->bus->self); 591 592 return 0; 593 } 594 595 /** 596 * pci_update_current_state - Read PCI power state of given device from its 597 * PCI PM registers and cache it 598 * @dev: PCI device to handle. 599 * @state: State to cache in case the device doesn't have the PM capability 600 */ 601 void pci_update_current_state(struct pci_dev *dev, pci_power_t state) 602 { 603 if (dev->pm_cap) { 604 u16 pmcsr; 605 606 /* 607 * Configuration space is not accessible for device in 608 * D3cold, so just keep or set D3cold for safety 609 */ 610 if (dev->current_state == PCI_D3cold) 611 return; 612 if (state == PCI_D3cold) { 613 dev->current_state = PCI_D3cold; 614 return; 615 } 616 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr); 617 dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK); 618 } else { 619 dev->current_state = state; 620 } 621 } 622 623 /** 624 * pci_power_up - Put the given device into D0 forcibly 625 * @dev: PCI device to power up 626 */ 627 void pci_power_up(struct pci_dev *dev) 628 { 629 if (platform_pci_power_manageable(dev)) 630 platform_pci_set_power_state(dev, PCI_D0); 631 632 pci_raw_set_power_state(dev, PCI_D0); 633 pci_update_current_state(dev, PCI_D0); 634 } 635 636 /** 637 * pci_platform_power_transition - Use platform to change device power state 638 * @dev: PCI device to handle. 639 * @state: State to put the device into. 640 */ 641 static int pci_platform_power_transition(struct pci_dev *dev, pci_power_t state) 642 { 643 int error; 644 645 if (platform_pci_power_manageable(dev)) { 646 error = platform_pci_set_power_state(dev, state); 647 if (!error) 648 pci_update_current_state(dev, state); 649 /* Fall back to PCI_D0 if native PM is not supported */ 650 if (!dev->pm_cap) 651 dev->current_state = PCI_D0; 652 } else { 653 error = -ENODEV; 654 /* Fall back to PCI_D0 if native PM is not supported */ 655 if (!dev->pm_cap) 656 dev->current_state = PCI_D0; 657 } 658 659 return error; 660 } 661 662 /** 663 * __pci_start_power_transition - Start power transition of a PCI device 664 * @dev: PCI device to handle. 665 * @state: State to put the device into. 666 */ 667 static void __pci_start_power_transition(struct pci_dev *dev, pci_power_t state) 668 { 669 if (state == PCI_D0) { 670 pci_platform_power_transition(dev, PCI_D0); 671 /* 672 * Mandatory power management transition delays, see 673 * PCI Express Base Specification Revision 2.0 Section 674 * 6.6.1: Conventional Reset. Do not delay for 675 * devices powered on/off by corresponding bridge, 676 * because have already delayed for the bridge. 677 */ 678 if (dev->runtime_d3cold) { 679 msleep(dev->d3cold_delay); 680 /* 681 * When powering on a bridge from D3cold, the 682 * whole hierarchy may be powered on into 683 * D0uninitialized state, resume them to give 684 * them a chance to suspend again 685 */ 686 pci_wakeup_bus(dev->subordinate); 687 } 688 } 689 } 690 691 /** 692 * __pci_dev_set_current_state - Set current state of a PCI device 693 * @dev: Device to handle 694 * @data: pointer to state to be set 695 */ 696 static int __pci_dev_set_current_state(struct pci_dev *dev, void *data) 697 { 698 pci_power_t state = *(pci_power_t *)data; 699 700 dev->current_state = state; 701 return 0; 702 } 703 704 /** 705 * __pci_bus_set_current_state - Walk given bus and set current state of devices 706 * @bus: Top bus of the subtree to walk. 707 * @state: state to be set 708 */ 709 static void __pci_bus_set_current_state(struct pci_bus *bus, pci_power_t state) 710 { 711 if (bus) 712 pci_walk_bus(bus, __pci_dev_set_current_state, &state); 713 } 714 715 /** 716 * __pci_complete_power_transition - Complete power transition of a PCI device 717 * @dev: PCI device to handle. 718 * @state: State to put the device into. 719 * 720 * This function should not be called directly by device drivers. 721 */ 722 int __pci_complete_power_transition(struct pci_dev *dev, pci_power_t state) 723 { 724 int ret; 725 726 if (state <= PCI_D0) 727 return -EINVAL; 728 ret = pci_platform_power_transition(dev, state); 729 /* Power off the bridge may power off the whole hierarchy */ 730 if (!ret && state == PCI_D3cold) 731 __pci_bus_set_current_state(dev->subordinate, PCI_D3cold); 732 return ret; 733 } 734 EXPORT_SYMBOL_GPL(__pci_complete_power_transition); 735 736 /** 737 * pci_set_power_state - Set the power state of a PCI device 738 * @dev: PCI device to handle. 739 * @state: PCI power state (D0, D1, D2, D3hot) to put the device into. 740 * 741 * Transition a device to a new power state, using the platform firmware and/or 742 * the device's PCI PM registers. 743 * 744 * RETURN VALUE: 745 * -EINVAL if the requested state is invalid. 746 * -EIO if device does not support PCI PM or its PM capabilities register has a 747 * wrong version, or device doesn't support the requested state. 748 * 0 if device already is in the requested state. 749 * 0 if device's power state has been successfully changed. 750 */ 751 int pci_set_power_state(struct pci_dev *dev, pci_power_t state) 752 { 753 int error; 754 755 /* bound the state we're entering */ 756 if (state > PCI_D3cold) 757 state = PCI_D3cold; 758 else if (state < PCI_D0) 759 state = PCI_D0; 760 else if ((state == PCI_D1 || state == PCI_D2) && pci_no_d1d2(dev)) 761 /* 762 * If the device or the parent bridge do not support PCI PM, 763 * ignore the request if we're doing anything other than putting 764 * it into D0 (which would only happen on boot). 765 */ 766 return 0; 767 768 /* Check if we're already there */ 769 if (dev->current_state == state) 770 return 0; 771 772 __pci_start_power_transition(dev, state); 773 774 /* This device is quirked not to be put into D3, so 775 don't put it in D3 */ 776 if (state >= PCI_D3hot && (dev->dev_flags & PCI_DEV_FLAGS_NO_D3)) 777 return 0; 778 779 /* 780 * To put device in D3cold, we put device into D3hot in native 781 * way, then put device into D3cold with platform ops 782 */ 783 error = pci_raw_set_power_state(dev, state > PCI_D3hot ? 784 PCI_D3hot : state); 785 786 if (!__pci_complete_power_transition(dev, state)) 787 error = 0; 788 /* 789 * When aspm_policy is "powersave" this call ensures 790 * that ASPM is configured. 791 */ 792 if (!error && dev->bus->self) 793 pcie_aspm_powersave_config_link(dev->bus->self); 794 795 return error; 796 } 797 798 /** 799 * pci_choose_state - Choose the power state of a PCI device 800 * @dev: PCI device to be suspended 801 * @state: target sleep state for the whole system. This is the value 802 * that is passed to suspend() function. 803 * 804 * Returns PCI power state suitable for given device and given system 805 * message. 806 */ 807 808 pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state) 809 { 810 pci_power_t ret; 811 812 if (!pci_find_capability(dev, PCI_CAP_ID_PM)) 813 return PCI_D0; 814 815 ret = platform_pci_choose_state(dev); 816 if (ret != PCI_POWER_ERROR) 817 return ret; 818 819 switch (state.event) { 820 case PM_EVENT_ON: 821 return PCI_D0; 822 case PM_EVENT_FREEZE: 823 case PM_EVENT_PRETHAW: 824 /* REVISIT both freeze and pre-thaw "should" use D0 */ 825 case PM_EVENT_SUSPEND: 826 case PM_EVENT_HIBERNATE: 827 return PCI_D3hot; 828 default: 829 dev_info(&dev->dev, "unrecognized suspend event %d\n", 830 state.event); 831 BUG(); 832 } 833 return PCI_D0; 834 } 835 836 EXPORT_SYMBOL(pci_choose_state); 837 838 #define PCI_EXP_SAVE_REGS 7 839 840 841 static struct pci_cap_saved_state *pci_find_saved_cap( 842 struct pci_dev *pci_dev, char cap) 843 { 844 struct pci_cap_saved_state *tmp; 845 846 hlist_for_each_entry(tmp, &pci_dev->saved_cap_space, next) { 847 if (tmp->cap.cap_nr == cap) 848 return tmp; 849 } 850 return NULL; 851 } 852 853 static int pci_save_pcie_state(struct pci_dev *dev) 854 { 855 int i = 0; 856 struct pci_cap_saved_state *save_state; 857 u16 *cap; 858 859 if (!pci_is_pcie(dev)) 860 return 0; 861 862 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP); 863 if (!save_state) { 864 dev_err(&dev->dev, "buffer not found in %s\n", __func__); 865 return -ENOMEM; 866 } 867 868 cap = (u16 *)&save_state->cap.data[0]; 869 pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &cap[i++]); 870 pcie_capability_read_word(dev, PCI_EXP_LNKCTL, &cap[i++]); 871 pcie_capability_read_word(dev, PCI_EXP_SLTCTL, &cap[i++]); 872 pcie_capability_read_word(dev, PCI_EXP_RTCTL, &cap[i++]); 873 pcie_capability_read_word(dev, PCI_EXP_DEVCTL2, &cap[i++]); 874 pcie_capability_read_word(dev, PCI_EXP_LNKCTL2, &cap[i++]); 875 pcie_capability_read_word(dev, PCI_EXP_SLTCTL2, &cap[i++]); 876 877 return 0; 878 } 879 880 static void pci_restore_pcie_state(struct pci_dev *dev) 881 { 882 int i = 0; 883 struct pci_cap_saved_state *save_state; 884 u16 *cap; 885 886 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP); 887 if (!save_state) 888 return; 889 890 cap = (u16 *)&save_state->cap.data[0]; 891 pcie_capability_write_word(dev, PCI_EXP_DEVCTL, cap[i++]); 892 pcie_capability_write_word(dev, PCI_EXP_LNKCTL, cap[i++]); 893 pcie_capability_write_word(dev, PCI_EXP_SLTCTL, cap[i++]); 894 pcie_capability_write_word(dev, PCI_EXP_RTCTL, cap[i++]); 895 pcie_capability_write_word(dev, PCI_EXP_DEVCTL2, cap[i++]); 896 pcie_capability_write_word(dev, PCI_EXP_LNKCTL2, cap[i++]); 897 pcie_capability_write_word(dev, PCI_EXP_SLTCTL2, cap[i++]); 898 } 899 900 901 static int pci_save_pcix_state(struct pci_dev *dev) 902 { 903 int pos; 904 struct pci_cap_saved_state *save_state; 905 906 pos = pci_find_capability(dev, PCI_CAP_ID_PCIX); 907 if (pos <= 0) 908 return 0; 909 910 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX); 911 if (!save_state) { 912 dev_err(&dev->dev, "buffer not found in %s\n", __func__); 913 return -ENOMEM; 914 } 915 916 pci_read_config_word(dev, pos + PCI_X_CMD, 917 (u16 *)save_state->cap.data); 918 919 return 0; 920 } 921 922 static void pci_restore_pcix_state(struct pci_dev *dev) 923 { 924 int i = 0, pos; 925 struct pci_cap_saved_state *save_state; 926 u16 *cap; 927 928 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX); 929 pos = pci_find_capability(dev, PCI_CAP_ID_PCIX); 930 if (!save_state || pos <= 0) 931 return; 932 cap = (u16 *)&save_state->cap.data[0]; 933 934 pci_write_config_word(dev, pos + PCI_X_CMD, cap[i++]); 935 } 936 937 938 /** 939 * pci_save_state - save the PCI configuration space of a device before suspending 940 * @dev: - PCI device that we're dealing with 941 */ 942 int 943 pci_save_state(struct pci_dev *dev) 944 { 945 int i; 946 /* XXX: 100% dword access ok here? */ 947 for (i = 0; i < 16; i++) 948 pci_read_config_dword(dev, i * 4, &dev->saved_config_space[i]); 949 dev->state_saved = true; 950 if ((i = pci_save_pcie_state(dev)) != 0) 951 return i; 952 if ((i = pci_save_pcix_state(dev)) != 0) 953 return i; 954 return 0; 955 } 956 957 static void pci_restore_config_dword(struct pci_dev *pdev, int offset, 958 u32 saved_val, int retry) 959 { 960 u32 val; 961 962 pci_read_config_dword(pdev, offset, &val); 963 if (val == saved_val) 964 return; 965 966 for (;;) { 967 dev_dbg(&pdev->dev, "restoring config space at offset " 968 "%#x (was %#x, writing %#x)\n", offset, val, saved_val); 969 pci_write_config_dword(pdev, offset, saved_val); 970 if (retry-- <= 0) 971 return; 972 973 pci_read_config_dword(pdev, offset, &val); 974 if (val == saved_val) 975 return; 976 977 mdelay(1); 978 } 979 } 980 981 static void pci_restore_config_space_range(struct pci_dev *pdev, 982 int start, int end, int retry) 983 { 984 int index; 985 986 for (index = end; index >= start; index--) 987 pci_restore_config_dword(pdev, 4 * index, 988 pdev->saved_config_space[index], 989 retry); 990 } 991 992 static void pci_restore_config_space(struct pci_dev *pdev) 993 { 994 if (pdev->hdr_type == PCI_HEADER_TYPE_NORMAL) { 995 pci_restore_config_space_range(pdev, 10, 15, 0); 996 /* Restore BARs before the command register. */ 997 pci_restore_config_space_range(pdev, 4, 9, 10); 998 pci_restore_config_space_range(pdev, 0, 3, 0); 999 } else { 1000 pci_restore_config_space_range(pdev, 0, 15, 0); 1001 } 1002 } 1003 1004 /** 1005 * pci_restore_state - Restore the saved state of a PCI device 1006 * @dev: - PCI device that we're dealing with 1007 */ 1008 void pci_restore_state(struct pci_dev *dev) 1009 { 1010 if (!dev->state_saved) 1011 return; 1012 1013 /* PCI Express register must be restored first */ 1014 pci_restore_pcie_state(dev); 1015 pci_restore_ats_state(dev); 1016 1017 pci_restore_config_space(dev); 1018 1019 pci_restore_pcix_state(dev); 1020 pci_restore_msi_state(dev); 1021 pci_restore_iov_state(dev); 1022 1023 dev->state_saved = false; 1024 } 1025 1026 struct pci_saved_state { 1027 u32 config_space[16]; 1028 struct pci_cap_saved_data cap[0]; 1029 }; 1030 1031 /** 1032 * pci_store_saved_state - Allocate and return an opaque struct containing 1033 * the device saved state. 1034 * @dev: PCI device that we're dealing with 1035 * 1036 * Rerturn NULL if no state or error. 1037 */ 1038 struct pci_saved_state *pci_store_saved_state(struct pci_dev *dev) 1039 { 1040 struct pci_saved_state *state; 1041 struct pci_cap_saved_state *tmp; 1042 struct pci_cap_saved_data *cap; 1043 size_t size; 1044 1045 if (!dev->state_saved) 1046 return NULL; 1047 1048 size = sizeof(*state) + sizeof(struct pci_cap_saved_data); 1049 1050 hlist_for_each_entry(tmp, &dev->saved_cap_space, next) 1051 size += sizeof(struct pci_cap_saved_data) + tmp->cap.size; 1052 1053 state = kzalloc(size, GFP_KERNEL); 1054 if (!state) 1055 return NULL; 1056 1057 memcpy(state->config_space, dev->saved_config_space, 1058 sizeof(state->config_space)); 1059 1060 cap = state->cap; 1061 hlist_for_each_entry(tmp, &dev->saved_cap_space, next) { 1062 size_t len = sizeof(struct pci_cap_saved_data) + tmp->cap.size; 1063 memcpy(cap, &tmp->cap, len); 1064 cap = (struct pci_cap_saved_data *)((u8 *)cap + len); 1065 } 1066 /* Empty cap_save terminates list */ 1067 1068 return state; 1069 } 1070 EXPORT_SYMBOL_GPL(pci_store_saved_state); 1071 1072 /** 1073 * pci_load_saved_state - Reload the provided save state into struct pci_dev. 1074 * @dev: PCI device that we're dealing with 1075 * @state: Saved state returned from pci_store_saved_state() 1076 */ 1077 int pci_load_saved_state(struct pci_dev *dev, struct pci_saved_state *state) 1078 { 1079 struct pci_cap_saved_data *cap; 1080 1081 dev->state_saved = false; 1082 1083 if (!state) 1084 return 0; 1085 1086 memcpy(dev->saved_config_space, state->config_space, 1087 sizeof(state->config_space)); 1088 1089 cap = state->cap; 1090 while (cap->size) { 1091 struct pci_cap_saved_state *tmp; 1092 1093 tmp = pci_find_saved_cap(dev, cap->cap_nr); 1094 if (!tmp || tmp->cap.size != cap->size) 1095 return -EINVAL; 1096 1097 memcpy(tmp->cap.data, cap->data, tmp->cap.size); 1098 cap = (struct pci_cap_saved_data *)((u8 *)cap + 1099 sizeof(struct pci_cap_saved_data) + cap->size); 1100 } 1101 1102 dev->state_saved = true; 1103 return 0; 1104 } 1105 EXPORT_SYMBOL_GPL(pci_load_saved_state); 1106 1107 /** 1108 * pci_load_and_free_saved_state - Reload the save state pointed to by state, 1109 * and free the memory allocated for it. 1110 * @dev: PCI device that we're dealing with 1111 * @state: Pointer to saved state returned from pci_store_saved_state() 1112 */ 1113 int pci_load_and_free_saved_state(struct pci_dev *dev, 1114 struct pci_saved_state **state) 1115 { 1116 int ret = pci_load_saved_state(dev, *state); 1117 kfree(*state); 1118 *state = NULL; 1119 return ret; 1120 } 1121 EXPORT_SYMBOL_GPL(pci_load_and_free_saved_state); 1122 1123 static int do_pci_enable_device(struct pci_dev *dev, int bars) 1124 { 1125 int err; 1126 1127 err = pci_set_power_state(dev, PCI_D0); 1128 if (err < 0 && err != -EIO) 1129 return err; 1130 err = pcibios_enable_device(dev, bars); 1131 if (err < 0) 1132 return err; 1133 pci_fixup_device(pci_fixup_enable, dev); 1134 1135 return 0; 1136 } 1137 1138 /** 1139 * pci_reenable_device - Resume abandoned device 1140 * @dev: PCI device to be resumed 1141 * 1142 * Note this function is a backend of pci_default_resume and is not supposed 1143 * to be called by normal code, write proper resume handler and use it instead. 1144 */ 1145 int pci_reenable_device(struct pci_dev *dev) 1146 { 1147 if (pci_is_enabled(dev)) 1148 return do_pci_enable_device(dev, (1 << PCI_NUM_RESOURCES) - 1); 1149 return 0; 1150 } 1151 1152 static int pci_enable_device_flags(struct pci_dev *dev, unsigned long flags) 1153 { 1154 int err; 1155 int i, bars = 0; 1156 1157 /* 1158 * Power state could be unknown at this point, either due to a fresh 1159 * boot or a device removal call. So get the current power state 1160 * so that things like MSI message writing will behave as expected 1161 * (e.g. if the device really is in D0 at enable time). 1162 */ 1163 if (dev->pm_cap) { 1164 u16 pmcsr; 1165 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr); 1166 dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK); 1167 } 1168 1169 if (atomic_inc_return(&dev->enable_cnt) > 1) 1170 return 0; /* already enabled */ 1171 1172 /* only skip sriov related */ 1173 for (i = 0; i <= PCI_ROM_RESOURCE; i++) 1174 if (dev->resource[i].flags & flags) 1175 bars |= (1 << i); 1176 for (i = PCI_BRIDGE_RESOURCES; i < DEVICE_COUNT_RESOURCE; i++) 1177 if (dev->resource[i].flags & flags) 1178 bars |= (1 << i); 1179 1180 err = do_pci_enable_device(dev, bars); 1181 if (err < 0) 1182 atomic_dec(&dev->enable_cnt); 1183 return err; 1184 } 1185 1186 /** 1187 * pci_enable_device_io - Initialize a device for use with IO space 1188 * @dev: PCI device to be initialized 1189 * 1190 * Initialize device before it's used by a driver. Ask low-level code 1191 * to enable I/O resources. Wake up the device if it was suspended. 1192 * Beware, this function can fail. 1193 */ 1194 int pci_enable_device_io(struct pci_dev *dev) 1195 { 1196 return pci_enable_device_flags(dev, IORESOURCE_IO); 1197 } 1198 1199 /** 1200 * pci_enable_device_mem - Initialize a device for use with Memory space 1201 * @dev: PCI device to be initialized 1202 * 1203 * Initialize device before it's used by a driver. Ask low-level code 1204 * to enable Memory resources. Wake up the device if it was suspended. 1205 * Beware, this function can fail. 1206 */ 1207 int pci_enable_device_mem(struct pci_dev *dev) 1208 { 1209 return pci_enable_device_flags(dev, IORESOURCE_MEM); 1210 } 1211 1212 /** 1213 * pci_enable_device - Initialize device before it's used by a driver. 1214 * @dev: PCI device to be initialized 1215 * 1216 * Initialize device before it's used by a driver. Ask low-level code 1217 * to enable I/O and memory. Wake up the device if it was suspended. 1218 * Beware, this function can fail. 1219 * 1220 * Note we don't actually enable the device many times if we call 1221 * this function repeatedly (we just increment the count). 1222 */ 1223 int pci_enable_device(struct pci_dev *dev) 1224 { 1225 return pci_enable_device_flags(dev, IORESOURCE_MEM | IORESOURCE_IO); 1226 } 1227 1228 /* 1229 * Managed PCI resources. This manages device on/off, intx/msi/msix 1230 * on/off and BAR regions. pci_dev itself records msi/msix status, so 1231 * there's no need to track it separately. pci_devres is initialized 1232 * when a device is enabled using managed PCI device enable interface. 1233 */ 1234 struct pci_devres { 1235 unsigned int enabled:1; 1236 unsigned int pinned:1; 1237 unsigned int orig_intx:1; 1238 unsigned int restore_intx:1; 1239 u32 region_mask; 1240 }; 1241 1242 static void pcim_release(struct device *gendev, void *res) 1243 { 1244 struct pci_dev *dev = container_of(gendev, struct pci_dev, dev); 1245 struct pci_devres *this = res; 1246 int i; 1247 1248 if (dev->msi_enabled) 1249 pci_disable_msi(dev); 1250 if (dev->msix_enabled) 1251 pci_disable_msix(dev); 1252 1253 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) 1254 if (this->region_mask & (1 << i)) 1255 pci_release_region(dev, i); 1256 1257 if (this->restore_intx) 1258 pci_intx(dev, this->orig_intx); 1259 1260 if (this->enabled && !this->pinned) 1261 pci_disable_device(dev); 1262 } 1263 1264 static struct pci_devres * get_pci_dr(struct pci_dev *pdev) 1265 { 1266 struct pci_devres *dr, *new_dr; 1267 1268 dr = devres_find(&pdev->dev, pcim_release, NULL, NULL); 1269 if (dr) 1270 return dr; 1271 1272 new_dr = devres_alloc(pcim_release, sizeof(*new_dr), GFP_KERNEL); 1273 if (!new_dr) 1274 return NULL; 1275 return devres_get(&pdev->dev, new_dr, NULL, NULL); 1276 } 1277 1278 static struct pci_devres * find_pci_dr(struct pci_dev *pdev) 1279 { 1280 if (pci_is_managed(pdev)) 1281 return devres_find(&pdev->dev, pcim_release, NULL, NULL); 1282 return NULL; 1283 } 1284 1285 /** 1286 * pcim_enable_device - Managed pci_enable_device() 1287 * @pdev: PCI device to be initialized 1288 * 1289 * Managed pci_enable_device(). 1290 */ 1291 int pcim_enable_device(struct pci_dev *pdev) 1292 { 1293 struct pci_devres *dr; 1294 int rc; 1295 1296 dr = get_pci_dr(pdev); 1297 if (unlikely(!dr)) 1298 return -ENOMEM; 1299 if (dr->enabled) 1300 return 0; 1301 1302 rc = pci_enable_device(pdev); 1303 if (!rc) { 1304 pdev->is_managed = 1; 1305 dr->enabled = 1; 1306 } 1307 return rc; 1308 } 1309 1310 /** 1311 * pcim_pin_device - Pin managed PCI device 1312 * @pdev: PCI device to pin 1313 * 1314 * Pin managed PCI device @pdev. Pinned device won't be disabled on 1315 * driver detach. @pdev must have been enabled with 1316 * pcim_enable_device(). 1317 */ 1318 void pcim_pin_device(struct pci_dev *pdev) 1319 { 1320 struct pci_devres *dr; 1321 1322 dr = find_pci_dr(pdev); 1323 WARN_ON(!dr || !dr->enabled); 1324 if (dr) 1325 dr->pinned = 1; 1326 } 1327 1328 /* 1329 * pcibios_add_device - provide arch specific hooks when adding device dev 1330 * @dev: the PCI device being added 1331 * 1332 * Permits the platform to provide architecture specific functionality when 1333 * devices are added. This is the default implementation. Architecture 1334 * implementations can override this. 1335 */ 1336 int __weak pcibios_add_device (struct pci_dev *dev) 1337 { 1338 return 0; 1339 } 1340 1341 /** 1342 * pcibios_disable_device - disable arch specific PCI resources for device dev 1343 * @dev: the PCI device to disable 1344 * 1345 * Disables architecture specific PCI resources for the device. This 1346 * is the default implementation. Architecture implementations can 1347 * override this. 1348 */ 1349 void __weak pcibios_disable_device (struct pci_dev *dev) {} 1350 1351 static void do_pci_disable_device(struct pci_dev *dev) 1352 { 1353 u16 pci_command; 1354 1355 pci_read_config_word(dev, PCI_COMMAND, &pci_command); 1356 if (pci_command & PCI_COMMAND_MASTER) { 1357 pci_command &= ~PCI_COMMAND_MASTER; 1358 pci_write_config_word(dev, PCI_COMMAND, pci_command); 1359 } 1360 1361 pcibios_disable_device(dev); 1362 } 1363 1364 /** 1365 * pci_disable_enabled_device - Disable device without updating enable_cnt 1366 * @dev: PCI device to disable 1367 * 1368 * NOTE: This function is a backend of PCI power management routines and is 1369 * not supposed to be called drivers. 1370 */ 1371 void pci_disable_enabled_device(struct pci_dev *dev) 1372 { 1373 if (pci_is_enabled(dev)) 1374 do_pci_disable_device(dev); 1375 } 1376 1377 /** 1378 * pci_disable_device - Disable PCI device after use 1379 * @dev: PCI device to be disabled 1380 * 1381 * Signal to the system that the PCI device is not in use by the system 1382 * anymore. This only involves disabling PCI bus-mastering, if active. 1383 * 1384 * Note we don't actually disable the device until all callers of 1385 * pci_enable_device() have called pci_disable_device(). 1386 */ 1387 void 1388 pci_disable_device(struct pci_dev *dev) 1389 { 1390 struct pci_devres *dr; 1391 1392 dr = find_pci_dr(dev); 1393 if (dr) 1394 dr->enabled = 0; 1395 1396 dev_WARN_ONCE(&dev->dev, atomic_read(&dev->enable_cnt) <= 0, 1397 "disabling already-disabled device"); 1398 1399 if (atomic_dec_return(&dev->enable_cnt) != 0) 1400 return; 1401 1402 do_pci_disable_device(dev); 1403 1404 dev->is_busmaster = 0; 1405 } 1406 1407 /** 1408 * pcibios_set_pcie_reset_state - set reset state for device dev 1409 * @dev: the PCIe device reset 1410 * @state: Reset state to enter into 1411 * 1412 * 1413 * Sets the PCIe reset state for the device. This is the default 1414 * implementation. Architecture implementations can override this. 1415 */ 1416 int __weak pcibios_set_pcie_reset_state(struct pci_dev *dev, 1417 enum pcie_reset_state state) 1418 { 1419 return -EINVAL; 1420 } 1421 1422 /** 1423 * pci_set_pcie_reset_state - set reset state for device dev 1424 * @dev: the PCIe device reset 1425 * @state: Reset state to enter into 1426 * 1427 * 1428 * Sets the PCI reset state for the device. 1429 */ 1430 int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state) 1431 { 1432 return pcibios_set_pcie_reset_state(dev, state); 1433 } 1434 1435 /** 1436 * pci_check_pme_status - Check if given device has generated PME. 1437 * @dev: Device to check. 1438 * 1439 * Check the PME status of the device and if set, clear it and clear PME enable 1440 * (if set). Return 'true' if PME status and PME enable were both set or 1441 * 'false' otherwise. 1442 */ 1443 bool pci_check_pme_status(struct pci_dev *dev) 1444 { 1445 int pmcsr_pos; 1446 u16 pmcsr; 1447 bool ret = false; 1448 1449 if (!dev->pm_cap) 1450 return false; 1451 1452 pmcsr_pos = dev->pm_cap + PCI_PM_CTRL; 1453 pci_read_config_word(dev, pmcsr_pos, &pmcsr); 1454 if (!(pmcsr & PCI_PM_CTRL_PME_STATUS)) 1455 return false; 1456 1457 /* Clear PME status. */ 1458 pmcsr |= PCI_PM_CTRL_PME_STATUS; 1459 if (pmcsr & PCI_PM_CTRL_PME_ENABLE) { 1460 /* Disable PME to avoid interrupt flood. */ 1461 pmcsr &= ~PCI_PM_CTRL_PME_ENABLE; 1462 ret = true; 1463 } 1464 1465 pci_write_config_word(dev, pmcsr_pos, pmcsr); 1466 1467 return ret; 1468 } 1469 1470 /** 1471 * pci_pme_wakeup - Wake up a PCI device if its PME Status bit is set. 1472 * @dev: Device to handle. 1473 * @pme_poll_reset: Whether or not to reset the device's pme_poll flag. 1474 * 1475 * Check if @dev has generated PME and queue a resume request for it in that 1476 * case. 1477 */ 1478 static int pci_pme_wakeup(struct pci_dev *dev, void *pme_poll_reset) 1479 { 1480 if (pme_poll_reset && dev->pme_poll) 1481 dev->pme_poll = false; 1482 1483 if (pci_check_pme_status(dev)) { 1484 pci_wakeup_event(dev); 1485 pm_request_resume(&dev->dev); 1486 } 1487 return 0; 1488 } 1489 1490 /** 1491 * pci_pme_wakeup_bus - Walk given bus and wake up devices on it, if necessary. 1492 * @bus: Top bus of the subtree to walk. 1493 */ 1494 void pci_pme_wakeup_bus(struct pci_bus *bus) 1495 { 1496 if (bus) 1497 pci_walk_bus(bus, pci_pme_wakeup, (void *)true); 1498 } 1499 1500 /** 1501 * pci_wakeup - Wake up a PCI device 1502 * @pci_dev: Device to handle. 1503 * @ign: ignored parameter 1504 */ 1505 static int pci_wakeup(struct pci_dev *pci_dev, void *ign) 1506 { 1507 pci_wakeup_event(pci_dev); 1508 pm_request_resume(&pci_dev->dev); 1509 return 0; 1510 } 1511 1512 /** 1513 * pci_wakeup_bus - Walk given bus and wake up devices on it 1514 * @bus: Top bus of the subtree to walk. 1515 */ 1516 void pci_wakeup_bus(struct pci_bus *bus) 1517 { 1518 if (bus) 1519 pci_walk_bus(bus, pci_wakeup, NULL); 1520 } 1521 1522 /** 1523 * pci_pme_capable - check the capability of PCI device to generate PME# 1524 * @dev: PCI device to handle. 1525 * @state: PCI state from which device will issue PME#. 1526 */ 1527 bool pci_pme_capable(struct pci_dev *dev, pci_power_t state) 1528 { 1529 if (!dev->pm_cap) 1530 return false; 1531 1532 return !!(dev->pme_support & (1 << state)); 1533 } 1534 1535 static void pci_pme_list_scan(struct work_struct *work) 1536 { 1537 struct pci_pme_device *pme_dev, *n; 1538 1539 mutex_lock(&pci_pme_list_mutex); 1540 if (!list_empty(&pci_pme_list)) { 1541 list_for_each_entry_safe(pme_dev, n, &pci_pme_list, list) { 1542 if (pme_dev->dev->pme_poll) { 1543 struct pci_dev *bridge; 1544 1545 bridge = pme_dev->dev->bus->self; 1546 /* 1547 * If bridge is in low power state, the 1548 * configuration space of subordinate devices 1549 * may be not accessible 1550 */ 1551 if (bridge && bridge->current_state != PCI_D0) 1552 continue; 1553 pci_pme_wakeup(pme_dev->dev, NULL); 1554 } else { 1555 list_del(&pme_dev->list); 1556 kfree(pme_dev); 1557 } 1558 } 1559 if (!list_empty(&pci_pme_list)) 1560 schedule_delayed_work(&pci_pme_work, 1561 msecs_to_jiffies(PME_TIMEOUT)); 1562 } 1563 mutex_unlock(&pci_pme_list_mutex); 1564 } 1565 1566 /** 1567 * pci_pme_active - enable or disable PCI device's PME# function 1568 * @dev: PCI device to handle. 1569 * @enable: 'true' to enable PME# generation; 'false' to disable it. 1570 * 1571 * The caller must verify that the device is capable of generating PME# before 1572 * calling this function with @enable equal to 'true'. 1573 */ 1574 void pci_pme_active(struct pci_dev *dev, bool enable) 1575 { 1576 u16 pmcsr; 1577 1578 if (!dev->pm_cap) 1579 return; 1580 1581 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr); 1582 /* Clear PME_Status by writing 1 to it and enable PME# */ 1583 pmcsr |= PCI_PM_CTRL_PME_STATUS | PCI_PM_CTRL_PME_ENABLE; 1584 if (!enable) 1585 pmcsr &= ~PCI_PM_CTRL_PME_ENABLE; 1586 1587 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr); 1588 1589 /* 1590 * PCI (as opposed to PCIe) PME requires that the device have 1591 * its PME# line hooked up correctly. Not all hardware vendors 1592 * do this, so the PME never gets delivered and the device 1593 * remains asleep. The easiest way around this is to 1594 * periodically walk the list of suspended devices and check 1595 * whether any have their PME flag set. The assumption is that 1596 * we'll wake up often enough anyway that this won't be a huge 1597 * hit, and the power savings from the devices will still be a 1598 * win. 1599 * 1600 * Although PCIe uses in-band PME message instead of PME# line 1601 * to report PME, PME does not work for some PCIe devices in 1602 * reality. For example, there are devices that set their PME 1603 * status bits, but don't really bother to send a PME message; 1604 * there are PCI Express Root Ports that don't bother to 1605 * trigger interrupts when they receive PME messages from the 1606 * devices below. So PME poll is used for PCIe devices too. 1607 */ 1608 1609 if (dev->pme_poll) { 1610 struct pci_pme_device *pme_dev; 1611 if (enable) { 1612 pme_dev = kmalloc(sizeof(struct pci_pme_device), 1613 GFP_KERNEL); 1614 if (!pme_dev) 1615 goto out; 1616 pme_dev->dev = dev; 1617 mutex_lock(&pci_pme_list_mutex); 1618 list_add(&pme_dev->list, &pci_pme_list); 1619 if (list_is_singular(&pci_pme_list)) 1620 schedule_delayed_work(&pci_pme_work, 1621 msecs_to_jiffies(PME_TIMEOUT)); 1622 mutex_unlock(&pci_pme_list_mutex); 1623 } else { 1624 mutex_lock(&pci_pme_list_mutex); 1625 list_for_each_entry(pme_dev, &pci_pme_list, list) { 1626 if (pme_dev->dev == dev) { 1627 list_del(&pme_dev->list); 1628 kfree(pme_dev); 1629 break; 1630 } 1631 } 1632 mutex_unlock(&pci_pme_list_mutex); 1633 } 1634 } 1635 1636 out: 1637 dev_dbg(&dev->dev, "PME# %s\n", enable ? "enabled" : "disabled"); 1638 } 1639 1640 /** 1641 * __pci_enable_wake - enable PCI device as wakeup event source 1642 * @dev: PCI device affected 1643 * @state: PCI state from which device will issue wakeup events 1644 * @runtime: True if the events are to be generated at run time 1645 * @enable: True to enable event generation; false to disable 1646 * 1647 * This enables the device as a wakeup event source, or disables it. 1648 * When such events involves platform-specific hooks, those hooks are 1649 * called automatically by this routine. 1650 * 1651 * Devices with legacy power management (no standard PCI PM capabilities) 1652 * always require such platform hooks. 1653 * 1654 * RETURN VALUE: 1655 * 0 is returned on success 1656 * -EINVAL is returned if device is not supposed to wake up the system 1657 * Error code depending on the platform is returned if both the platform and 1658 * the native mechanism fail to enable the generation of wake-up events 1659 */ 1660 int __pci_enable_wake(struct pci_dev *dev, pci_power_t state, 1661 bool runtime, bool enable) 1662 { 1663 int ret = 0; 1664 1665 if (enable && !runtime && !device_may_wakeup(&dev->dev)) 1666 return -EINVAL; 1667 1668 /* Don't do the same thing twice in a row for one device. */ 1669 if (!!enable == !!dev->wakeup_prepared) 1670 return 0; 1671 1672 /* 1673 * According to "PCI System Architecture" 4th ed. by Tom Shanley & Don 1674 * Anderson we should be doing PME# wake enable followed by ACPI wake 1675 * enable. To disable wake-up we call the platform first, for symmetry. 1676 */ 1677 1678 if (enable) { 1679 int error; 1680 1681 if (pci_pme_capable(dev, state)) 1682 pci_pme_active(dev, true); 1683 else 1684 ret = 1; 1685 error = runtime ? platform_pci_run_wake(dev, true) : 1686 platform_pci_sleep_wake(dev, true); 1687 if (ret) 1688 ret = error; 1689 if (!ret) 1690 dev->wakeup_prepared = true; 1691 } else { 1692 if (runtime) 1693 platform_pci_run_wake(dev, false); 1694 else 1695 platform_pci_sleep_wake(dev, false); 1696 pci_pme_active(dev, false); 1697 dev->wakeup_prepared = false; 1698 } 1699 1700 return ret; 1701 } 1702 EXPORT_SYMBOL(__pci_enable_wake); 1703 1704 /** 1705 * pci_wake_from_d3 - enable/disable device to wake up from D3_hot or D3_cold 1706 * @dev: PCI device to prepare 1707 * @enable: True to enable wake-up event generation; false to disable 1708 * 1709 * Many drivers want the device to wake up the system from D3_hot or D3_cold 1710 * and this function allows them to set that up cleanly - pci_enable_wake() 1711 * should not be called twice in a row to enable wake-up due to PCI PM vs ACPI 1712 * ordering constraints. 1713 * 1714 * This function only returns error code if the device is not capable of 1715 * generating PME# from both D3_hot and D3_cold, and the platform is unable to 1716 * enable wake-up power for it. 1717 */ 1718 int pci_wake_from_d3(struct pci_dev *dev, bool enable) 1719 { 1720 return pci_pme_capable(dev, PCI_D3cold) ? 1721 pci_enable_wake(dev, PCI_D3cold, enable) : 1722 pci_enable_wake(dev, PCI_D3hot, enable); 1723 } 1724 1725 /** 1726 * pci_target_state - find an appropriate low power state for a given PCI dev 1727 * @dev: PCI device 1728 * 1729 * Use underlying platform code to find a supported low power state for @dev. 1730 * If the platform can't manage @dev, return the deepest state from which it 1731 * can generate wake events, based on any available PME info. 1732 */ 1733 pci_power_t pci_target_state(struct pci_dev *dev) 1734 { 1735 pci_power_t target_state = PCI_D3hot; 1736 1737 if (platform_pci_power_manageable(dev)) { 1738 /* 1739 * Call the platform to choose the target state of the device 1740 * and enable wake-up from this state if supported. 1741 */ 1742 pci_power_t state = platform_pci_choose_state(dev); 1743 1744 switch (state) { 1745 case PCI_POWER_ERROR: 1746 case PCI_UNKNOWN: 1747 break; 1748 case PCI_D1: 1749 case PCI_D2: 1750 if (pci_no_d1d2(dev)) 1751 break; 1752 default: 1753 target_state = state; 1754 } 1755 } else if (!dev->pm_cap) { 1756 target_state = PCI_D0; 1757 } else if (device_may_wakeup(&dev->dev)) { 1758 /* 1759 * Find the deepest state from which the device can generate 1760 * wake-up events, make it the target state and enable device 1761 * to generate PME#. 1762 */ 1763 if (dev->pme_support) { 1764 while (target_state 1765 && !(dev->pme_support & (1 << target_state))) 1766 target_state--; 1767 } 1768 } 1769 1770 return target_state; 1771 } 1772 1773 /** 1774 * pci_prepare_to_sleep - prepare PCI device for system-wide transition into a sleep state 1775 * @dev: Device to handle. 1776 * 1777 * Choose the power state appropriate for the device depending on whether 1778 * it can wake up the system and/or is power manageable by the platform 1779 * (PCI_D3hot is the default) and put the device into that state. 1780 */ 1781 int pci_prepare_to_sleep(struct pci_dev *dev) 1782 { 1783 pci_power_t target_state = pci_target_state(dev); 1784 int error; 1785 1786 if (target_state == PCI_POWER_ERROR) 1787 return -EIO; 1788 1789 /* D3cold during system suspend/hibernate is not supported */ 1790 if (target_state > PCI_D3hot) 1791 target_state = PCI_D3hot; 1792 1793 pci_enable_wake(dev, target_state, device_may_wakeup(&dev->dev)); 1794 1795 error = pci_set_power_state(dev, target_state); 1796 1797 if (error) 1798 pci_enable_wake(dev, target_state, false); 1799 1800 return error; 1801 } 1802 1803 /** 1804 * pci_back_from_sleep - turn PCI device on during system-wide transition into working state 1805 * @dev: Device to handle. 1806 * 1807 * Disable device's system wake-up capability and put it into D0. 1808 */ 1809 int pci_back_from_sleep(struct pci_dev *dev) 1810 { 1811 pci_enable_wake(dev, PCI_D0, false); 1812 return pci_set_power_state(dev, PCI_D0); 1813 } 1814 1815 /** 1816 * pci_finish_runtime_suspend - Carry out PCI-specific part of runtime suspend. 1817 * @dev: PCI device being suspended. 1818 * 1819 * Prepare @dev to generate wake-up events at run time and put it into a low 1820 * power state. 1821 */ 1822 int pci_finish_runtime_suspend(struct pci_dev *dev) 1823 { 1824 pci_power_t target_state = pci_target_state(dev); 1825 int error; 1826 1827 if (target_state == PCI_POWER_ERROR) 1828 return -EIO; 1829 1830 dev->runtime_d3cold = target_state == PCI_D3cold; 1831 1832 __pci_enable_wake(dev, target_state, true, pci_dev_run_wake(dev)); 1833 1834 error = pci_set_power_state(dev, target_state); 1835 1836 if (error) { 1837 __pci_enable_wake(dev, target_state, true, false); 1838 dev->runtime_d3cold = false; 1839 } 1840 1841 return error; 1842 } 1843 1844 /** 1845 * pci_dev_run_wake - Check if device can generate run-time wake-up events. 1846 * @dev: Device to check. 1847 * 1848 * Return true if the device itself is cabable of generating wake-up events 1849 * (through the platform or using the native PCIe PME) or if the device supports 1850 * PME and one of its upstream bridges can generate wake-up events. 1851 */ 1852 bool pci_dev_run_wake(struct pci_dev *dev) 1853 { 1854 struct pci_bus *bus = dev->bus; 1855 1856 if (device_run_wake(&dev->dev)) 1857 return true; 1858 1859 if (!dev->pme_support) 1860 return false; 1861 1862 while (bus->parent) { 1863 struct pci_dev *bridge = bus->self; 1864 1865 if (device_run_wake(&bridge->dev)) 1866 return true; 1867 1868 bus = bus->parent; 1869 } 1870 1871 /* We have reached the root bus. */ 1872 if (bus->bridge) 1873 return device_run_wake(bus->bridge); 1874 1875 return false; 1876 } 1877 EXPORT_SYMBOL_GPL(pci_dev_run_wake); 1878 1879 void pci_config_pm_runtime_get(struct pci_dev *pdev) 1880 { 1881 struct device *dev = &pdev->dev; 1882 struct device *parent = dev->parent; 1883 1884 if (parent) 1885 pm_runtime_get_sync(parent); 1886 pm_runtime_get_noresume(dev); 1887 /* 1888 * pdev->current_state is set to PCI_D3cold during suspending, 1889 * so wait until suspending completes 1890 */ 1891 pm_runtime_barrier(dev); 1892 /* 1893 * Only need to resume devices in D3cold, because config 1894 * registers are still accessible for devices suspended but 1895 * not in D3cold. 1896 */ 1897 if (pdev->current_state == PCI_D3cold) 1898 pm_runtime_resume(dev); 1899 } 1900 1901 void pci_config_pm_runtime_put(struct pci_dev *pdev) 1902 { 1903 struct device *dev = &pdev->dev; 1904 struct device *parent = dev->parent; 1905 1906 pm_runtime_put(dev); 1907 if (parent) 1908 pm_runtime_put_sync(parent); 1909 } 1910 1911 /** 1912 * pci_pm_init - Initialize PM functions of given PCI device 1913 * @dev: PCI device to handle. 1914 */ 1915 void pci_pm_init(struct pci_dev *dev) 1916 { 1917 int pm; 1918 u16 pmc; 1919 1920 pm_runtime_forbid(&dev->dev); 1921 pm_runtime_set_active(&dev->dev); 1922 pm_runtime_enable(&dev->dev); 1923 device_enable_async_suspend(&dev->dev); 1924 dev->wakeup_prepared = false; 1925 1926 dev->pm_cap = 0; 1927 1928 /* find PCI PM capability in list */ 1929 pm = pci_find_capability(dev, PCI_CAP_ID_PM); 1930 if (!pm) 1931 return; 1932 /* Check device's ability to generate PME# */ 1933 pci_read_config_word(dev, pm + PCI_PM_PMC, &pmc); 1934 1935 if ((pmc & PCI_PM_CAP_VER_MASK) > 3) { 1936 dev_err(&dev->dev, "unsupported PM cap regs version (%u)\n", 1937 pmc & PCI_PM_CAP_VER_MASK); 1938 return; 1939 } 1940 1941 dev->pm_cap = pm; 1942 dev->d3_delay = PCI_PM_D3_WAIT; 1943 dev->d3cold_delay = PCI_PM_D3COLD_WAIT; 1944 dev->d3cold_allowed = true; 1945 1946 dev->d1_support = false; 1947 dev->d2_support = false; 1948 if (!pci_no_d1d2(dev)) { 1949 if (pmc & PCI_PM_CAP_D1) 1950 dev->d1_support = true; 1951 if (pmc & PCI_PM_CAP_D2) 1952 dev->d2_support = true; 1953 1954 if (dev->d1_support || dev->d2_support) 1955 dev_printk(KERN_DEBUG, &dev->dev, "supports%s%s\n", 1956 dev->d1_support ? " D1" : "", 1957 dev->d2_support ? " D2" : ""); 1958 } 1959 1960 pmc &= PCI_PM_CAP_PME_MASK; 1961 if (pmc) { 1962 dev_printk(KERN_DEBUG, &dev->dev, 1963 "PME# supported from%s%s%s%s%s\n", 1964 (pmc & PCI_PM_CAP_PME_D0) ? " D0" : "", 1965 (pmc & PCI_PM_CAP_PME_D1) ? " D1" : "", 1966 (pmc & PCI_PM_CAP_PME_D2) ? " D2" : "", 1967 (pmc & PCI_PM_CAP_PME_D3) ? " D3hot" : "", 1968 (pmc & PCI_PM_CAP_PME_D3cold) ? " D3cold" : ""); 1969 dev->pme_support = pmc >> PCI_PM_CAP_PME_SHIFT; 1970 dev->pme_poll = true; 1971 /* 1972 * Make device's PM flags reflect the wake-up capability, but 1973 * let the user space enable it to wake up the system as needed. 1974 */ 1975 device_set_wakeup_capable(&dev->dev, true); 1976 /* Disable the PME# generation functionality */ 1977 pci_pme_active(dev, false); 1978 } else { 1979 dev->pme_support = 0; 1980 } 1981 } 1982 1983 static void pci_add_saved_cap(struct pci_dev *pci_dev, 1984 struct pci_cap_saved_state *new_cap) 1985 { 1986 hlist_add_head(&new_cap->next, &pci_dev->saved_cap_space); 1987 } 1988 1989 /** 1990 * pci_add_save_buffer - allocate buffer for saving given capability registers 1991 * @dev: the PCI device 1992 * @cap: the capability to allocate the buffer for 1993 * @size: requested size of the buffer 1994 */ 1995 static int pci_add_cap_save_buffer( 1996 struct pci_dev *dev, char cap, unsigned int size) 1997 { 1998 int pos; 1999 struct pci_cap_saved_state *save_state; 2000 2001 pos = pci_find_capability(dev, cap); 2002 if (pos <= 0) 2003 return 0; 2004 2005 save_state = kzalloc(sizeof(*save_state) + size, GFP_KERNEL); 2006 if (!save_state) 2007 return -ENOMEM; 2008 2009 save_state->cap.cap_nr = cap; 2010 save_state->cap.size = size; 2011 pci_add_saved_cap(dev, save_state); 2012 2013 return 0; 2014 } 2015 2016 /** 2017 * pci_allocate_cap_save_buffers - allocate buffers for saving capabilities 2018 * @dev: the PCI device 2019 */ 2020 void pci_allocate_cap_save_buffers(struct pci_dev *dev) 2021 { 2022 int error; 2023 2024 error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_EXP, 2025 PCI_EXP_SAVE_REGS * sizeof(u16)); 2026 if (error) 2027 dev_err(&dev->dev, 2028 "unable to preallocate PCI Express save buffer\n"); 2029 2030 error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_PCIX, sizeof(u16)); 2031 if (error) 2032 dev_err(&dev->dev, 2033 "unable to preallocate PCI-X save buffer\n"); 2034 } 2035 2036 void pci_free_cap_save_buffers(struct pci_dev *dev) 2037 { 2038 struct pci_cap_saved_state *tmp; 2039 struct hlist_node *n; 2040 2041 hlist_for_each_entry_safe(tmp, n, &dev->saved_cap_space, next) 2042 kfree(tmp); 2043 } 2044 2045 /** 2046 * pci_configure_ari - enable or disable ARI forwarding 2047 * @dev: the PCI device 2048 * 2049 * If @dev and its upstream bridge both support ARI, enable ARI in the 2050 * bridge. Otherwise, disable ARI in the bridge. 2051 */ 2052 void pci_configure_ari(struct pci_dev *dev) 2053 { 2054 u32 cap; 2055 struct pci_dev *bridge; 2056 2057 if (pcie_ari_disabled || !pci_is_pcie(dev) || dev->devfn) 2058 return; 2059 2060 bridge = dev->bus->self; 2061 if (!bridge) 2062 return; 2063 2064 pcie_capability_read_dword(bridge, PCI_EXP_DEVCAP2, &cap); 2065 if (!(cap & PCI_EXP_DEVCAP2_ARI)) 2066 return; 2067 2068 if (pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI)) { 2069 pcie_capability_set_word(bridge, PCI_EXP_DEVCTL2, 2070 PCI_EXP_DEVCTL2_ARI); 2071 bridge->ari_enabled = 1; 2072 } else { 2073 pcie_capability_clear_word(bridge, PCI_EXP_DEVCTL2, 2074 PCI_EXP_DEVCTL2_ARI); 2075 bridge->ari_enabled = 0; 2076 } 2077 } 2078 2079 /** 2080 * pci_enable_ido - enable ID-based Ordering on a device 2081 * @dev: the PCI device 2082 * @type: which types of IDO to enable 2083 * 2084 * Enable ID-based ordering on @dev. @type can contain the bits 2085 * %PCI_EXP_IDO_REQUEST and/or %PCI_EXP_IDO_COMPLETION to indicate 2086 * which types of transactions are allowed to be re-ordered. 2087 */ 2088 void pci_enable_ido(struct pci_dev *dev, unsigned long type) 2089 { 2090 u16 ctrl = 0; 2091 2092 if (type & PCI_EXP_IDO_REQUEST) 2093 ctrl |= PCI_EXP_IDO_REQ_EN; 2094 if (type & PCI_EXP_IDO_COMPLETION) 2095 ctrl |= PCI_EXP_IDO_CMP_EN; 2096 if (ctrl) 2097 pcie_capability_set_word(dev, PCI_EXP_DEVCTL2, ctrl); 2098 } 2099 EXPORT_SYMBOL(pci_enable_ido); 2100 2101 /** 2102 * pci_disable_ido - disable ID-based ordering on a device 2103 * @dev: the PCI device 2104 * @type: which types of IDO to disable 2105 */ 2106 void pci_disable_ido(struct pci_dev *dev, unsigned long type) 2107 { 2108 u16 ctrl = 0; 2109 2110 if (type & PCI_EXP_IDO_REQUEST) 2111 ctrl |= PCI_EXP_IDO_REQ_EN; 2112 if (type & PCI_EXP_IDO_COMPLETION) 2113 ctrl |= PCI_EXP_IDO_CMP_EN; 2114 if (ctrl) 2115 pcie_capability_clear_word(dev, PCI_EXP_DEVCTL2, ctrl); 2116 } 2117 EXPORT_SYMBOL(pci_disable_ido); 2118 2119 /** 2120 * pci_enable_obff - enable optimized buffer flush/fill 2121 * @dev: PCI device 2122 * @type: type of signaling to use 2123 * 2124 * Try to enable @type OBFF signaling on @dev. It will try using WAKE# 2125 * signaling if possible, falling back to message signaling only if 2126 * WAKE# isn't supported. @type should indicate whether the PCIe link 2127 * be brought out of L0s or L1 to send the message. It should be either 2128 * %PCI_EXP_OBFF_SIGNAL_ALWAYS or %PCI_OBFF_SIGNAL_L0. 2129 * 2130 * If your device can benefit from receiving all messages, even at the 2131 * power cost of bringing the link back up from a low power state, use 2132 * %PCI_EXP_OBFF_SIGNAL_ALWAYS. Otherwise, use %PCI_OBFF_SIGNAL_L0 (the 2133 * preferred type). 2134 * 2135 * RETURNS: 2136 * Zero on success, appropriate error number on failure. 2137 */ 2138 int pci_enable_obff(struct pci_dev *dev, enum pci_obff_signal_type type) 2139 { 2140 u32 cap; 2141 u16 ctrl; 2142 int ret; 2143 2144 pcie_capability_read_dword(dev, PCI_EXP_DEVCAP2, &cap); 2145 if (!(cap & PCI_EXP_OBFF_MASK)) 2146 return -ENOTSUPP; /* no OBFF support at all */ 2147 2148 /* Make sure the topology supports OBFF as well */ 2149 if (dev->bus->self) { 2150 ret = pci_enable_obff(dev->bus->self, type); 2151 if (ret) 2152 return ret; 2153 } 2154 2155 pcie_capability_read_word(dev, PCI_EXP_DEVCTL2, &ctrl); 2156 if (cap & PCI_EXP_OBFF_WAKE) 2157 ctrl |= PCI_EXP_OBFF_WAKE_EN; 2158 else { 2159 switch (type) { 2160 case PCI_EXP_OBFF_SIGNAL_L0: 2161 if (!(ctrl & PCI_EXP_OBFF_WAKE_EN)) 2162 ctrl |= PCI_EXP_OBFF_MSGA_EN; 2163 break; 2164 case PCI_EXP_OBFF_SIGNAL_ALWAYS: 2165 ctrl &= ~PCI_EXP_OBFF_WAKE_EN; 2166 ctrl |= PCI_EXP_OBFF_MSGB_EN; 2167 break; 2168 default: 2169 WARN(1, "bad OBFF signal type\n"); 2170 return -ENOTSUPP; 2171 } 2172 } 2173 pcie_capability_write_word(dev, PCI_EXP_DEVCTL2, ctrl); 2174 2175 return 0; 2176 } 2177 EXPORT_SYMBOL(pci_enable_obff); 2178 2179 /** 2180 * pci_disable_obff - disable optimized buffer flush/fill 2181 * @dev: PCI device 2182 * 2183 * Disable OBFF on @dev. 2184 */ 2185 void pci_disable_obff(struct pci_dev *dev) 2186 { 2187 pcie_capability_clear_word(dev, PCI_EXP_DEVCTL2, PCI_EXP_OBFF_WAKE_EN); 2188 } 2189 EXPORT_SYMBOL(pci_disable_obff); 2190 2191 /** 2192 * pci_ltr_supported - check whether a device supports LTR 2193 * @dev: PCI device 2194 * 2195 * RETURNS: 2196 * True if @dev supports latency tolerance reporting, false otherwise. 2197 */ 2198 static bool pci_ltr_supported(struct pci_dev *dev) 2199 { 2200 u32 cap; 2201 2202 pcie_capability_read_dword(dev, PCI_EXP_DEVCAP2, &cap); 2203 2204 return cap & PCI_EXP_DEVCAP2_LTR; 2205 } 2206 2207 /** 2208 * pci_enable_ltr - enable latency tolerance reporting 2209 * @dev: PCI device 2210 * 2211 * Enable LTR on @dev if possible, which means enabling it first on 2212 * upstream ports. 2213 * 2214 * RETURNS: 2215 * Zero on success, errno on failure. 2216 */ 2217 int pci_enable_ltr(struct pci_dev *dev) 2218 { 2219 int ret; 2220 2221 /* Only primary function can enable/disable LTR */ 2222 if (PCI_FUNC(dev->devfn) != 0) 2223 return -EINVAL; 2224 2225 if (!pci_ltr_supported(dev)) 2226 return -ENOTSUPP; 2227 2228 /* Enable upstream ports first */ 2229 if (dev->bus->self) { 2230 ret = pci_enable_ltr(dev->bus->self); 2231 if (ret) 2232 return ret; 2233 } 2234 2235 return pcie_capability_set_word(dev, PCI_EXP_DEVCTL2, PCI_EXP_LTR_EN); 2236 } 2237 EXPORT_SYMBOL(pci_enable_ltr); 2238 2239 /** 2240 * pci_disable_ltr - disable latency tolerance reporting 2241 * @dev: PCI device 2242 */ 2243 void pci_disable_ltr(struct pci_dev *dev) 2244 { 2245 /* Only primary function can enable/disable LTR */ 2246 if (PCI_FUNC(dev->devfn) != 0) 2247 return; 2248 2249 if (!pci_ltr_supported(dev)) 2250 return; 2251 2252 pcie_capability_clear_word(dev, PCI_EXP_DEVCTL2, PCI_EXP_LTR_EN); 2253 } 2254 EXPORT_SYMBOL(pci_disable_ltr); 2255 2256 static int __pci_ltr_scale(int *val) 2257 { 2258 int scale = 0; 2259 2260 while (*val > 1023) { 2261 *val = (*val + 31) / 32; 2262 scale++; 2263 } 2264 return scale; 2265 } 2266 2267 /** 2268 * pci_set_ltr - set LTR latency values 2269 * @dev: PCI device 2270 * @snoop_lat_ns: snoop latency in nanoseconds 2271 * @nosnoop_lat_ns: nosnoop latency in nanoseconds 2272 * 2273 * Figure out the scale and set the LTR values accordingly. 2274 */ 2275 int pci_set_ltr(struct pci_dev *dev, int snoop_lat_ns, int nosnoop_lat_ns) 2276 { 2277 int pos, ret, snoop_scale, nosnoop_scale; 2278 u16 val; 2279 2280 if (!pci_ltr_supported(dev)) 2281 return -ENOTSUPP; 2282 2283 snoop_scale = __pci_ltr_scale(&snoop_lat_ns); 2284 nosnoop_scale = __pci_ltr_scale(&nosnoop_lat_ns); 2285 2286 if (snoop_lat_ns > PCI_LTR_VALUE_MASK || 2287 nosnoop_lat_ns > PCI_LTR_VALUE_MASK) 2288 return -EINVAL; 2289 2290 if ((snoop_scale > (PCI_LTR_SCALE_MASK >> PCI_LTR_SCALE_SHIFT)) || 2291 (nosnoop_scale > (PCI_LTR_SCALE_MASK >> PCI_LTR_SCALE_SHIFT))) 2292 return -EINVAL; 2293 2294 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_LTR); 2295 if (!pos) 2296 return -ENOTSUPP; 2297 2298 val = (snoop_scale << PCI_LTR_SCALE_SHIFT) | snoop_lat_ns; 2299 ret = pci_write_config_word(dev, pos + PCI_LTR_MAX_SNOOP_LAT, val); 2300 if (ret != 4) 2301 return -EIO; 2302 2303 val = (nosnoop_scale << PCI_LTR_SCALE_SHIFT) | nosnoop_lat_ns; 2304 ret = pci_write_config_word(dev, pos + PCI_LTR_MAX_NOSNOOP_LAT, val); 2305 if (ret != 4) 2306 return -EIO; 2307 2308 return 0; 2309 } 2310 EXPORT_SYMBOL(pci_set_ltr); 2311 2312 static int pci_acs_enable; 2313 2314 /** 2315 * pci_request_acs - ask for ACS to be enabled if supported 2316 */ 2317 void pci_request_acs(void) 2318 { 2319 pci_acs_enable = 1; 2320 } 2321 2322 /** 2323 * pci_enable_acs - enable ACS if hardware support it 2324 * @dev: the PCI device 2325 */ 2326 void pci_enable_acs(struct pci_dev *dev) 2327 { 2328 int pos; 2329 u16 cap; 2330 u16 ctrl; 2331 2332 if (!pci_acs_enable) 2333 return; 2334 2335 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS); 2336 if (!pos) 2337 return; 2338 2339 pci_read_config_word(dev, pos + PCI_ACS_CAP, &cap); 2340 pci_read_config_word(dev, pos + PCI_ACS_CTRL, &ctrl); 2341 2342 /* Source Validation */ 2343 ctrl |= (cap & PCI_ACS_SV); 2344 2345 /* P2P Request Redirect */ 2346 ctrl |= (cap & PCI_ACS_RR); 2347 2348 /* P2P Completion Redirect */ 2349 ctrl |= (cap & PCI_ACS_CR); 2350 2351 /* Upstream Forwarding */ 2352 ctrl |= (cap & PCI_ACS_UF); 2353 2354 pci_write_config_word(dev, pos + PCI_ACS_CTRL, ctrl); 2355 } 2356 2357 /** 2358 * pci_acs_enabled - test ACS against required flags for a given device 2359 * @pdev: device to test 2360 * @acs_flags: required PCI ACS flags 2361 * 2362 * Return true if the device supports the provided flags. Automatically 2363 * filters out flags that are not implemented on multifunction devices. 2364 */ 2365 bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags) 2366 { 2367 int pos, ret; 2368 u16 ctrl; 2369 2370 ret = pci_dev_specific_acs_enabled(pdev, acs_flags); 2371 if (ret >= 0) 2372 return ret > 0; 2373 2374 if (!pci_is_pcie(pdev)) 2375 return false; 2376 2377 /* Filter out flags not applicable to multifunction */ 2378 if (pdev->multifunction) 2379 acs_flags &= (PCI_ACS_RR | PCI_ACS_CR | 2380 PCI_ACS_EC | PCI_ACS_DT); 2381 2382 if (pci_pcie_type(pdev) == PCI_EXP_TYPE_DOWNSTREAM || 2383 pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT || 2384 pdev->multifunction) { 2385 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ACS); 2386 if (!pos) 2387 return false; 2388 2389 pci_read_config_word(pdev, pos + PCI_ACS_CTRL, &ctrl); 2390 if ((ctrl & acs_flags) != acs_flags) 2391 return false; 2392 } 2393 2394 return true; 2395 } 2396 2397 /** 2398 * pci_acs_path_enable - test ACS flags from start to end in a hierarchy 2399 * @start: starting downstream device 2400 * @end: ending upstream device or NULL to search to the root bus 2401 * @acs_flags: required flags 2402 * 2403 * Walk up a device tree from start to end testing PCI ACS support. If 2404 * any step along the way does not support the required flags, return false. 2405 */ 2406 bool pci_acs_path_enabled(struct pci_dev *start, 2407 struct pci_dev *end, u16 acs_flags) 2408 { 2409 struct pci_dev *pdev, *parent = start; 2410 2411 do { 2412 pdev = parent; 2413 2414 if (!pci_acs_enabled(pdev, acs_flags)) 2415 return false; 2416 2417 if (pci_is_root_bus(pdev->bus)) 2418 return (end == NULL); 2419 2420 parent = pdev->bus->self; 2421 } while (pdev != end); 2422 2423 return true; 2424 } 2425 2426 /** 2427 * pci_swizzle_interrupt_pin - swizzle INTx for device behind bridge 2428 * @dev: the PCI device 2429 * @pin: the INTx pin (1=INTA, 2=INTB, 3=INTD, 4=INTD) 2430 * 2431 * Perform INTx swizzling for a device behind one level of bridge. This is 2432 * required by section 9.1 of the PCI-to-PCI bridge specification for devices 2433 * behind bridges on add-in cards. For devices with ARI enabled, the slot 2434 * number is always 0 (see the Implementation Note in section 2.2.8.1 of 2435 * the PCI Express Base Specification, Revision 2.1) 2436 */ 2437 u8 pci_swizzle_interrupt_pin(const struct pci_dev *dev, u8 pin) 2438 { 2439 int slot; 2440 2441 if (pci_ari_enabled(dev->bus)) 2442 slot = 0; 2443 else 2444 slot = PCI_SLOT(dev->devfn); 2445 2446 return (((pin - 1) + slot) % 4) + 1; 2447 } 2448 2449 int 2450 pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge) 2451 { 2452 u8 pin; 2453 2454 pin = dev->pin; 2455 if (!pin) 2456 return -1; 2457 2458 while (!pci_is_root_bus(dev->bus)) { 2459 pin = pci_swizzle_interrupt_pin(dev, pin); 2460 dev = dev->bus->self; 2461 } 2462 *bridge = dev; 2463 return pin; 2464 } 2465 2466 /** 2467 * pci_common_swizzle - swizzle INTx all the way to root bridge 2468 * @dev: the PCI device 2469 * @pinp: pointer to the INTx pin value (1=INTA, 2=INTB, 3=INTD, 4=INTD) 2470 * 2471 * Perform INTx swizzling for a device. This traverses through all PCI-to-PCI 2472 * bridges all the way up to a PCI root bus. 2473 */ 2474 u8 pci_common_swizzle(struct pci_dev *dev, u8 *pinp) 2475 { 2476 u8 pin = *pinp; 2477 2478 while (!pci_is_root_bus(dev->bus)) { 2479 pin = pci_swizzle_interrupt_pin(dev, pin); 2480 dev = dev->bus->self; 2481 } 2482 *pinp = pin; 2483 return PCI_SLOT(dev->devfn); 2484 } 2485 2486 /** 2487 * pci_release_region - Release a PCI bar 2488 * @pdev: PCI device whose resources were previously reserved by pci_request_region 2489 * @bar: BAR to release 2490 * 2491 * Releases the PCI I/O and memory resources previously reserved by a 2492 * successful call to pci_request_region. Call this function only 2493 * after all use of the PCI regions has ceased. 2494 */ 2495 void pci_release_region(struct pci_dev *pdev, int bar) 2496 { 2497 struct pci_devres *dr; 2498 2499 if (pci_resource_len(pdev, bar) == 0) 2500 return; 2501 if (pci_resource_flags(pdev, bar) & IORESOURCE_IO) 2502 release_region(pci_resource_start(pdev, bar), 2503 pci_resource_len(pdev, bar)); 2504 else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) 2505 release_mem_region(pci_resource_start(pdev, bar), 2506 pci_resource_len(pdev, bar)); 2507 2508 dr = find_pci_dr(pdev); 2509 if (dr) 2510 dr->region_mask &= ~(1 << bar); 2511 } 2512 2513 /** 2514 * __pci_request_region - Reserved PCI I/O and memory resource 2515 * @pdev: PCI device whose resources are to be reserved 2516 * @bar: BAR to be reserved 2517 * @res_name: Name to be associated with resource. 2518 * @exclusive: whether the region access is exclusive or not 2519 * 2520 * Mark the PCI region associated with PCI device @pdev BR @bar as 2521 * being reserved by owner @res_name. Do not access any 2522 * address inside the PCI regions unless this call returns 2523 * successfully. 2524 * 2525 * If @exclusive is set, then the region is marked so that userspace 2526 * is explicitly not allowed to map the resource via /dev/mem or 2527 * sysfs MMIO access. 2528 * 2529 * Returns 0 on success, or %EBUSY on error. A warning 2530 * message is also printed on failure. 2531 */ 2532 static int __pci_request_region(struct pci_dev *pdev, int bar, const char *res_name, 2533 int exclusive) 2534 { 2535 struct pci_devres *dr; 2536 2537 if (pci_resource_len(pdev, bar) == 0) 2538 return 0; 2539 2540 if (pci_resource_flags(pdev, bar) & IORESOURCE_IO) { 2541 if (!request_region(pci_resource_start(pdev, bar), 2542 pci_resource_len(pdev, bar), res_name)) 2543 goto err_out; 2544 } 2545 else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) { 2546 if (!__request_mem_region(pci_resource_start(pdev, bar), 2547 pci_resource_len(pdev, bar), res_name, 2548 exclusive)) 2549 goto err_out; 2550 } 2551 2552 dr = find_pci_dr(pdev); 2553 if (dr) 2554 dr->region_mask |= 1 << bar; 2555 2556 return 0; 2557 2558 err_out: 2559 dev_warn(&pdev->dev, "BAR %d: can't reserve %pR\n", bar, 2560 &pdev->resource[bar]); 2561 return -EBUSY; 2562 } 2563 2564 /** 2565 * pci_request_region - Reserve PCI I/O and memory resource 2566 * @pdev: PCI device whose resources are to be reserved 2567 * @bar: BAR to be reserved 2568 * @res_name: Name to be associated with resource 2569 * 2570 * Mark the PCI region associated with PCI device @pdev BAR @bar as 2571 * being reserved by owner @res_name. Do not access any 2572 * address inside the PCI regions unless this call returns 2573 * successfully. 2574 * 2575 * Returns 0 on success, or %EBUSY on error. A warning 2576 * message is also printed on failure. 2577 */ 2578 int pci_request_region(struct pci_dev *pdev, int bar, const char *res_name) 2579 { 2580 return __pci_request_region(pdev, bar, res_name, 0); 2581 } 2582 2583 /** 2584 * pci_request_region_exclusive - Reserved PCI I/O and memory resource 2585 * @pdev: PCI device whose resources are to be reserved 2586 * @bar: BAR to be reserved 2587 * @res_name: Name to be associated with resource. 2588 * 2589 * Mark the PCI region associated with PCI device @pdev BR @bar as 2590 * being reserved by owner @res_name. Do not access any 2591 * address inside the PCI regions unless this call returns 2592 * successfully. 2593 * 2594 * Returns 0 on success, or %EBUSY on error. A warning 2595 * message is also printed on failure. 2596 * 2597 * The key difference that _exclusive makes it that userspace is 2598 * explicitly not allowed to map the resource via /dev/mem or 2599 * sysfs. 2600 */ 2601 int pci_request_region_exclusive(struct pci_dev *pdev, int bar, const char *res_name) 2602 { 2603 return __pci_request_region(pdev, bar, res_name, IORESOURCE_EXCLUSIVE); 2604 } 2605 /** 2606 * pci_release_selected_regions - Release selected PCI I/O and memory resources 2607 * @pdev: PCI device whose resources were previously reserved 2608 * @bars: Bitmask of BARs to be released 2609 * 2610 * Release selected PCI I/O and memory resources previously reserved. 2611 * Call this function only after all use of the PCI regions has ceased. 2612 */ 2613 void pci_release_selected_regions(struct pci_dev *pdev, int bars) 2614 { 2615 int i; 2616 2617 for (i = 0; i < 6; i++) 2618 if (bars & (1 << i)) 2619 pci_release_region(pdev, i); 2620 } 2621 2622 int __pci_request_selected_regions(struct pci_dev *pdev, int bars, 2623 const char *res_name, int excl) 2624 { 2625 int i; 2626 2627 for (i = 0; i < 6; i++) 2628 if (bars & (1 << i)) 2629 if (__pci_request_region(pdev, i, res_name, excl)) 2630 goto err_out; 2631 return 0; 2632 2633 err_out: 2634 while(--i >= 0) 2635 if (bars & (1 << i)) 2636 pci_release_region(pdev, i); 2637 2638 return -EBUSY; 2639 } 2640 2641 2642 /** 2643 * pci_request_selected_regions - Reserve selected PCI I/O and memory resources 2644 * @pdev: PCI device whose resources are to be reserved 2645 * @bars: Bitmask of BARs to be requested 2646 * @res_name: Name to be associated with resource 2647 */ 2648 int pci_request_selected_regions(struct pci_dev *pdev, int bars, 2649 const char *res_name) 2650 { 2651 return __pci_request_selected_regions(pdev, bars, res_name, 0); 2652 } 2653 2654 int pci_request_selected_regions_exclusive(struct pci_dev *pdev, 2655 int bars, const char *res_name) 2656 { 2657 return __pci_request_selected_regions(pdev, bars, res_name, 2658 IORESOURCE_EXCLUSIVE); 2659 } 2660 2661 /** 2662 * pci_release_regions - Release reserved PCI I/O and memory resources 2663 * @pdev: PCI device whose resources were previously reserved by pci_request_regions 2664 * 2665 * Releases all PCI I/O and memory resources previously reserved by a 2666 * successful call to pci_request_regions. Call this function only 2667 * after all use of the PCI regions has ceased. 2668 */ 2669 2670 void pci_release_regions(struct pci_dev *pdev) 2671 { 2672 pci_release_selected_regions(pdev, (1 << 6) - 1); 2673 } 2674 2675 /** 2676 * pci_request_regions - Reserved PCI I/O and memory resources 2677 * @pdev: PCI device whose resources are to be reserved 2678 * @res_name: Name to be associated with resource. 2679 * 2680 * Mark all PCI regions associated with PCI device @pdev as 2681 * being reserved by owner @res_name. Do not access any 2682 * address inside the PCI regions unless this call returns 2683 * successfully. 2684 * 2685 * Returns 0 on success, or %EBUSY on error. A warning 2686 * message is also printed on failure. 2687 */ 2688 int pci_request_regions(struct pci_dev *pdev, const char *res_name) 2689 { 2690 return pci_request_selected_regions(pdev, ((1 << 6) - 1), res_name); 2691 } 2692 2693 /** 2694 * pci_request_regions_exclusive - Reserved PCI I/O and memory resources 2695 * @pdev: PCI device whose resources are to be reserved 2696 * @res_name: Name to be associated with resource. 2697 * 2698 * Mark all PCI regions associated with PCI device @pdev as 2699 * being reserved by owner @res_name. Do not access any 2700 * address inside the PCI regions unless this call returns 2701 * successfully. 2702 * 2703 * pci_request_regions_exclusive() will mark the region so that 2704 * /dev/mem and the sysfs MMIO access will not be allowed. 2705 * 2706 * Returns 0 on success, or %EBUSY on error. A warning 2707 * message is also printed on failure. 2708 */ 2709 int pci_request_regions_exclusive(struct pci_dev *pdev, const char *res_name) 2710 { 2711 return pci_request_selected_regions_exclusive(pdev, 2712 ((1 << 6) - 1), res_name); 2713 } 2714 2715 static void __pci_set_master(struct pci_dev *dev, bool enable) 2716 { 2717 u16 old_cmd, cmd; 2718 2719 pci_read_config_word(dev, PCI_COMMAND, &old_cmd); 2720 if (enable) 2721 cmd = old_cmd | PCI_COMMAND_MASTER; 2722 else 2723 cmd = old_cmd & ~PCI_COMMAND_MASTER; 2724 if (cmd != old_cmd) { 2725 dev_dbg(&dev->dev, "%s bus mastering\n", 2726 enable ? "enabling" : "disabling"); 2727 pci_write_config_word(dev, PCI_COMMAND, cmd); 2728 } 2729 dev->is_busmaster = enable; 2730 } 2731 2732 /** 2733 * pcibios_setup - process "pci=" kernel boot arguments 2734 * @str: string used to pass in "pci=" kernel boot arguments 2735 * 2736 * Process kernel boot arguments. This is the default implementation. 2737 * Architecture specific implementations can override this as necessary. 2738 */ 2739 char * __weak __init pcibios_setup(char *str) 2740 { 2741 return str; 2742 } 2743 2744 /** 2745 * pcibios_set_master - enable PCI bus-mastering for device dev 2746 * @dev: the PCI device to enable 2747 * 2748 * Enables PCI bus-mastering for the device. This is the default 2749 * implementation. Architecture specific implementations can override 2750 * this if necessary. 2751 */ 2752 void __weak pcibios_set_master(struct pci_dev *dev) 2753 { 2754 u8 lat; 2755 2756 /* The latency timer doesn't apply to PCIe (either Type 0 or Type 1) */ 2757 if (pci_is_pcie(dev)) 2758 return; 2759 2760 pci_read_config_byte(dev, PCI_LATENCY_TIMER, &lat); 2761 if (lat < 16) 2762 lat = (64 <= pcibios_max_latency) ? 64 : pcibios_max_latency; 2763 else if (lat > pcibios_max_latency) 2764 lat = pcibios_max_latency; 2765 else 2766 return; 2767 dev_printk(KERN_DEBUG, &dev->dev, "setting latency timer to %d\n", lat); 2768 pci_write_config_byte(dev, PCI_LATENCY_TIMER, lat); 2769 } 2770 2771 /** 2772 * pci_set_master - enables bus-mastering for device dev 2773 * @dev: the PCI device to enable 2774 * 2775 * Enables bus-mastering on the device and calls pcibios_set_master() 2776 * to do the needed arch specific settings. 2777 */ 2778 void pci_set_master(struct pci_dev *dev) 2779 { 2780 __pci_set_master(dev, true); 2781 pcibios_set_master(dev); 2782 } 2783 2784 /** 2785 * pci_clear_master - disables bus-mastering for device dev 2786 * @dev: the PCI device to disable 2787 */ 2788 void pci_clear_master(struct pci_dev *dev) 2789 { 2790 __pci_set_master(dev, false); 2791 } 2792 2793 /** 2794 * pci_set_cacheline_size - ensure the CACHE_LINE_SIZE register is programmed 2795 * @dev: the PCI device for which MWI is to be enabled 2796 * 2797 * Helper function for pci_set_mwi. 2798 * Originally copied from drivers/net/acenic.c. 2799 * Copyright 1998-2001 by Jes Sorensen, <jes@trained-monkey.org>. 2800 * 2801 * RETURNS: An appropriate -ERRNO error value on error, or zero for success. 2802 */ 2803 int pci_set_cacheline_size(struct pci_dev *dev) 2804 { 2805 u8 cacheline_size; 2806 2807 if (!pci_cache_line_size) 2808 return -EINVAL; 2809 2810 /* Validate current setting: the PCI_CACHE_LINE_SIZE must be 2811 equal to or multiple of the right value. */ 2812 pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size); 2813 if (cacheline_size >= pci_cache_line_size && 2814 (cacheline_size % pci_cache_line_size) == 0) 2815 return 0; 2816 2817 /* Write the correct value. */ 2818 pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, pci_cache_line_size); 2819 /* Read it back. */ 2820 pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size); 2821 if (cacheline_size == pci_cache_line_size) 2822 return 0; 2823 2824 dev_printk(KERN_DEBUG, &dev->dev, "cache line size of %d is not " 2825 "supported\n", pci_cache_line_size << 2); 2826 2827 return -EINVAL; 2828 } 2829 EXPORT_SYMBOL_GPL(pci_set_cacheline_size); 2830 2831 #ifdef PCI_DISABLE_MWI 2832 int pci_set_mwi(struct pci_dev *dev) 2833 { 2834 return 0; 2835 } 2836 2837 int pci_try_set_mwi(struct pci_dev *dev) 2838 { 2839 return 0; 2840 } 2841 2842 void pci_clear_mwi(struct pci_dev *dev) 2843 { 2844 } 2845 2846 #else 2847 2848 /** 2849 * pci_set_mwi - enables memory-write-invalidate PCI transaction 2850 * @dev: the PCI device for which MWI is enabled 2851 * 2852 * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND. 2853 * 2854 * RETURNS: An appropriate -ERRNO error value on error, or zero for success. 2855 */ 2856 int 2857 pci_set_mwi(struct pci_dev *dev) 2858 { 2859 int rc; 2860 u16 cmd; 2861 2862 rc = pci_set_cacheline_size(dev); 2863 if (rc) 2864 return rc; 2865 2866 pci_read_config_word(dev, PCI_COMMAND, &cmd); 2867 if (! (cmd & PCI_COMMAND_INVALIDATE)) { 2868 dev_dbg(&dev->dev, "enabling Mem-Wr-Inval\n"); 2869 cmd |= PCI_COMMAND_INVALIDATE; 2870 pci_write_config_word(dev, PCI_COMMAND, cmd); 2871 } 2872 2873 return 0; 2874 } 2875 2876 /** 2877 * pci_try_set_mwi - enables memory-write-invalidate PCI transaction 2878 * @dev: the PCI device for which MWI is enabled 2879 * 2880 * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND. 2881 * Callers are not required to check the return value. 2882 * 2883 * RETURNS: An appropriate -ERRNO error value on error, or zero for success. 2884 */ 2885 int pci_try_set_mwi(struct pci_dev *dev) 2886 { 2887 int rc = pci_set_mwi(dev); 2888 return rc; 2889 } 2890 2891 /** 2892 * pci_clear_mwi - disables Memory-Write-Invalidate for device dev 2893 * @dev: the PCI device to disable 2894 * 2895 * Disables PCI Memory-Write-Invalidate transaction on the device 2896 */ 2897 void 2898 pci_clear_mwi(struct pci_dev *dev) 2899 { 2900 u16 cmd; 2901 2902 pci_read_config_word(dev, PCI_COMMAND, &cmd); 2903 if (cmd & PCI_COMMAND_INVALIDATE) { 2904 cmd &= ~PCI_COMMAND_INVALIDATE; 2905 pci_write_config_word(dev, PCI_COMMAND, cmd); 2906 } 2907 } 2908 #endif /* ! PCI_DISABLE_MWI */ 2909 2910 /** 2911 * pci_intx - enables/disables PCI INTx for device dev 2912 * @pdev: the PCI device to operate on 2913 * @enable: boolean: whether to enable or disable PCI INTx 2914 * 2915 * Enables/disables PCI INTx for device dev 2916 */ 2917 void 2918 pci_intx(struct pci_dev *pdev, int enable) 2919 { 2920 u16 pci_command, new; 2921 2922 pci_read_config_word(pdev, PCI_COMMAND, &pci_command); 2923 2924 if (enable) { 2925 new = pci_command & ~PCI_COMMAND_INTX_DISABLE; 2926 } else { 2927 new = pci_command | PCI_COMMAND_INTX_DISABLE; 2928 } 2929 2930 if (new != pci_command) { 2931 struct pci_devres *dr; 2932 2933 pci_write_config_word(pdev, PCI_COMMAND, new); 2934 2935 dr = find_pci_dr(pdev); 2936 if (dr && !dr->restore_intx) { 2937 dr->restore_intx = 1; 2938 dr->orig_intx = !enable; 2939 } 2940 } 2941 } 2942 2943 /** 2944 * pci_intx_mask_supported - probe for INTx masking support 2945 * @dev: the PCI device to operate on 2946 * 2947 * Check if the device dev support INTx masking via the config space 2948 * command word. 2949 */ 2950 bool pci_intx_mask_supported(struct pci_dev *dev) 2951 { 2952 bool mask_supported = false; 2953 u16 orig, new; 2954 2955 if (dev->broken_intx_masking) 2956 return false; 2957 2958 pci_cfg_access_lock(dev); 2959 2960 pci_read_config_word(dev, PCI_COMMAND, &orig); 2961 pci_write_config_word(dev, PCI_COMMAND, 2962 orig ^ PCI_COMMAND_INTX_DISABLE); 2963 pci_read_config_word(dev, PCI_COMMAND, &new); 2964 2965 /* 2966 * There's no way to protect against hardware bugs or detect them 2967 * reliably, but as long as we know what the value should be, let's 2968 * go ahead and check it. 2969 */ 2970 if ((new ^ orig) & ~PCI_COMMAND_INTX_DISABLE) { 2971 dev_err(&dev->dev, "Command register changed from " 2972 "0x%x to 0x%x: driver or hardware bug?\n", orig, new); 2973 } else if ((new ^ orig) & PCI_COMMAND_INTX_DISABLE) { 2974 mask_supported = true; 2975 pci_write_config_word(dev, PCI_COMMAND, orig); 2976 } 2977 2978 pci_cfg_access_unlock(dev); 2979 return mask_supported; 2980 } 2981 EXPORT_SYMBOL_GPL(pci_intx_mask_supported); 2982 2983 static bool pci_check_and_set_intx_mask(struct pci_dev *dev, bool mask) 2984 { 2985 struct pci_bus *bus = dev->bus; 2986 bool mask_updated = true; 2987 u32 cmd_status_dword; 2988 u16 origcmd, newcmd; 2989 unsigned long flags; 2990 bool irq_pending; 2991 2992 /* 2993 * We do a single dword read to retrieve both command and status. 2994 * Document assumptions that make this possible. 2995 */ 2996 BUILD_BUG_ON(PCI_COMMAND % 4); 2997 BUILD_BUG_ON(PCI_COMMAND + 2 != PCI_STATUS); 2998 2999 raw_spin_lock_irqsave(&pci_lock, flags); 3000 3001 bus->ops->read(bus, dev->devfn, PCI_COMMAND, 4, &cmd_status_dword); 3002 3003 irq_pending = (cmd_status_dword >> 16) & PCI_STATUS_INTERRUPT; 3004 3005 /* 3006 * Check interrupt status register to see whether our device 3007 * triggered the interrupt (when masking) or the next IRQ is 3008 * already pending (when unmasking). 3009 */ 3010 if (mask != irq_pending) { 3011 mask_updated = false; 3012 goto done; 3013 } 3014 3015 origcmd = cmd_status_dword; 3016 newcmd = origcmd & ~PCI_COMMAND_INTX_DISABLE; 3017 if (mask) 3018 newcmd |= PCI_COMMAND_INTX_DISABLE; 3019 if (newcmd != origcmd) 3020 bus->ops->write(bus, dev->devfn, PCI_COMMAND, 2, newcmd); 3021 3022 done: 3023 raw_spin_unlock_irqrestore(&pci_lock, flags); 3024 3025 return mask_updated; 3026 } 3027 3028 /** 3029 * pci_check_and_mask_intx - mask INTx on pending interrupt 3030 * @dev: the PCI device to operate on 3031 * 3032 * Check if the device dev has its INTx line asserted, mask it and 3033 * return true in that case. False is returned if not interrupt was 3034 * pending. 3035 */ 3036 bool pci_check_and_mask_intx(struct pci_dev *dev) 3037 { 3038 return pci_check_and_set_intx_mask(dev, true); 3039 } 3040 EXPORT_SYMBOL_GPL(pci_check_and_mask_intx); 3041 3042 /** 3043 * pci_check_and_mask_intx - unmask INTx of no interrupt is pending 3044 * @dev: the PCI device to operate on 3045 * 3046 * Check if the device dev has its INTx line asserted, unmask it if not 3047 * and return true. False is returned and the mask remains active if 3048 * there was still an interrupt pending. 3049 */ 3050 bool pci_check_and_unmask_intx(struct pci_dev *dev) 3051 { 3052 return pci_check_and_set_intx_mask(dev, false); 3053 } 3054 EXPORT_SYMBOL_GPL(pci_check_and_unmask_intx); 3055 3056 /** 3057 * pci_msi_off - disables any msi or msix capabilities 3058 * @dev: the PCI device to operate on 3059 * 3060 * If you want to use msi see pci_enable_msi and friends. 3061 * This is a lower level primitive that allows us to disable 3062 * msi operation at the device level. 3063 */ 3064 void pci_msi_off(struct pci_dev *dev) 3065 { 3066 int pos; 3067 u16 control; 3068 3069 pos = pci_find_capability(dev, PCI_CAP_ID_MSI); 3070 if (pos) { 3071 pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control); 3072 control &= ~PCI_MSI_FLAGS_ENABLE; 3073 pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control); 3074 } 3075 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX); 3076 if (pos) { 3077 pci_read_config_word(dev, pos + PCI_MSIX_FLAGS, &control); 3078 control &= ~PCI_MSIX_FLAGS_ENABLE; 3079 pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control); 3080 } 3081 } 3082 EXPORT_SYMBOL_GPL(pci_msi_off); 3083 3084 int pci_set_dma_max_seg_size(struct pci_dev *dev, unsigned int size) 3085 { 3086 return dma_set_max_seg_size(&dev->dev, size); 3087 } 3088 EXPORT_SYMBOL(pci_set_dma_max_seg_size); 3089 3090 int pci_set_dma_seg_boundary(struct pci_dev *dev, unsigned long mask) 3091 { 3092 return dma_set_seg_boundary(&dev->dev, mask); 3093 } 3094 EXPORT_SYMBOL(pci_set_dma_seg_boundary); 3095 3096 static int pcie_flr(struct pci_dev *dev, int probe) 3097 { 3098 int i; 3099 u32 cap; 3100 u16 status; 3101 3102 pcie_capability_read_dword(dev, PCI_EXP_DEVCAP, &cap); 3103 if (!(cap & PCI_EXP_DEVCAP_FLR)) 3104 return -ENOTTY; 3105 3106 if (probe) 3107 return 0; 3108 3109 /* Wait for Transaction Pending bit clean */ 3110 for (i = 0; i < 4; i++) { 3111 if (i) 3112 msleep((1 << (i - 1)) * 100); 3113 3114 pcie_capability_read_word(dev, PCI_EXP_DEVSTA, &status); 3115 if (!(status & PCI_EXP_DEVSTA_TRPND)) 3116 goto clear; 3117 } 3118 3119 dev_err(&dev->dev, "transaction is not cleared; " 3120 "proceeding with reset anyway\n"); 3121 3122 clear: 3123 pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_BCR_FLR); 3124 3125 msleep(100); 3126 3127 return 0; 3128 } 3129 3130 static int pci_af_flr(struct pci_dev *dev, int probe) 3131 { 3132 int i; 3133 int pos; 3134 u8 cap; 3135 u8 status; 3136 3137 pos = pci_find_capability(dev, PCI_CAP_ID_AF); 3138 if (!pos) 3139 return -ENOTTY; 3140 3141 pci_read_config_byte(dev, pos + PCI_AF_CAP, &cap); 3142 if (!(cap & PCI_AF_CAP_TP) || !(cap & PCI_AF_CAP_FLR)) 3143 return -ENOTTY; 3144 3145 if (probe) 3146 return 0; 3147 3148 /* Wait for Transaction Pending bit clean */ 3149 for (i = 0; i < 4; i++) { 3150 if (i) 3151 msleep((1 << (i - 1)) * 100); 3152 3153 pci_read_config_byte(dev, pos + PCI_AF_STATUS, &status); 3154 if (!(status & PCI_AF_STATUS_TP)) 3155 goto clear; 3156 } 3157 3158 dev_err(&dev->dev, "transaction is not cleared; " 3159 "proceeding with reset anyway\n"); 3160 3161 clear: 3162 pci_write_config_byte(dev, pos + PCI_AF_CTRL, PCI_AF_CTRL_FLR); 3163 msleep(100); 3164 3165 return 0; 3166 } 3167 3168 /** 3169 * pci_pm_reset - Put device into PCI_D3 and back into PCI_D0. 3170 * @dev: Device to reset. 3171 * @probe: If set, only check if the device can be reset this way. 3172 * 3173 * If @dev supports native PCI PM and its PCI_PM_CTRL_NO_SOFT_RESET flag is 3174 * unset, it will be reinitialized internally when going from PCI_D3hot to 3175 * PCI_D0. If that's the case and the device is not in a low-power state 3176 * already, force it into PCI_D3hot and back to PCI_D0, causing it to be reset. 3177 * 3178 * NOTE: This causes the caller to sleep for twice the device power transition 3179 * cooldown period, which for the D0->D3hot and D3hot->D0 transitions is 10 ms 3180 * by devault (i.e. unless the @dev's d3_delay field has a different value). 3181 * Moreover, only devices in D0 can be reset by this function. 3182 */ 3183 static int pci_pm_reset(struct pci_dev *dev, int probe) 3184 { 3185 u16 csr; 3186 3187 if (!dev->pm_cap) 3188 return -ENOTTY; 3189 3190 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &csr); 3191 if (csr & PCI_PM_CTRL_NO_SOFT_RESET) 3192 return -ENOTTY; 3193 3194 if (probe) 3195 return 0; 3196 3197 if (dev->current_state != PCI_D0) 3198 return -EINVAL; 3199 3200 csr &= ~PCI_PM_CTRL_STATE_MASK; 3201 csr |= PCI_D3hot; 3202 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr); 3203 pci_dev_d3_sleep(dev); 3204 3205 csr &= ~PCI_PM_CTRL_STATE_MASK; 3206 csr |= PCI_D0; 3207 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr); 3208 pci_dev_d3_sleep(dev); 3209 3210 return 0; 3211 } 3212 3213 static int pci_parent_bus_reset(struct pci_dev *dev, int probe) 3214 { 3215 u16 ctrl; 3216 struct pci_dev *pdev; 3217 3218 if (pci_is_root_bus(dev->bus) || dev->subordinate || !dev->bus->self) 3219 return -ENOTTY; 3220 3221 list_for_each_entry(pdev, &dev->bus->devices, bus_list) 3222 if (pdev != dev) 3223 return -ENOTTY; 3224 3225 if (probe) 3226 return 0; 3227 3228 pci_read_config_word(dev->bus->self, PCI_BRIDGE_CONTROL, &ctrl); 3229 ctrl |= PCI_BRIDGE_CTL_BUS_RESET; 3230 pci_write_config_word(dev->bus->self, PCI_BRIDGE_CONTROL, ctrl); 3231 msleep(100); 3232 3233 ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET; 3234 pci_write_config_word(dev->bus->self, PCI_BRIDGE_CONTROL, ctrl); 3235 msleep(100); 3236 3237 return 0; 3238 } 3239 3240 static int __pci_dev_reset(struct pci_dev *dev, int probe) 3241 { 3242 int rc; 3243 3244 might_sleep(); 3245 3246 rc = pci_dev_specific_reset(dev, probe); 3247 if (rc != -ENOTTY) 3248 goto done; 3249 3250 rc = pcie_flr(dev, probe); 3251 if (rc != -ENOTTY) 3252 goto done; 3253 3254 rc = pci_af_flr(dev, probe); 3255 if (rc != -ENOTTY) 3256 goto done; 3257 3258 rc = pci_pm_reset(dev, probe); 3259 if (rc != -ENOTTY) 3260 goto done; 3261 3262 rc = pci_parent_bus_reset(dev, probe); 3263 done: 3264 return rc; 3265 } 3266 3267 static int pci_dev_reset(struct pci_dev *dev, int probe) 3268 { 3269 int rc; 3270 3271 if (!probe) { 3272 pci_cfg_access_lock(dev); 3273 /* block PM suspend, driver probe, etc. */ 3274 device_lock(&dev->dev); 3275 } 3276 3277 rc = __pci_dev_reset(dev, probe); 3278 3279 if (!probe) { 3280 device_unlock(&dev->dev); 3281 pci_cfg_access_unlock(dev); 3282 } 3283 return rc; 3284 } 3285 /** 3286 * __pci_reset_function - reset a PCI device function 3287 * @dev: PCI device to reset 3288 * 3289 * Some devices allow an individual function to be reset without affecting 3290 * other functions in the same device. The PCI device must be responsive 3291 * to PCI config space in order to use this function. 3292 * 3293 * The device function is presumed to be unused when this function is called. 3294 * Resetting the device will make the contents of PCI configuration space 3295 * random, so any caller of this must be prepared to reinitialise the 3296 * device including MSI, bus mastering, BARs, decoding IO and memory spaces, 3297 * etc. 3298 * 3299 * Returns 0 if the device function was successfully reset or negative if the 3300 * device doesn't support resetting a single function. 3301 */ 3302 int __pci_reset_function(struct pci_dev *dev) 3303 { 3304 return pci_dev_reset(dev, 0); 3305 } 3306 EXPORT_SYMBOL_GPL(__pci_reset_function); 3307 3308 /** 3309 * __pci_reset_function_locked - reset a PCI device function while holding 3310 * the @dev mutex lock. 3311 * @dev: PCI device to reset 3312 * 3313 * Some devices allow an individual function to be reset without affecting 3314 * other functions in the same device. The PCI device must be responsive 3315 * to PCI config space in order to use this function. 3316 * 3317 * The device function is presumed to be unused and the caller is holding 3318 * the device mutex lock when this function is called. 3319 * Resetting the device will make the contents of PCI configuration space 3320 * random, so any caller of this must be prepared to reinitialise the 3321 * device including MSI, bus mastering, BARs, decoding IO and memory spaces, 3322 * etc. 3323 * 3324 * Returns 0 if the device function was successfully reset or negative if the 3325 * device doesn't support resetting a single function. 3326 */ 3327 int __pci_reset_function_locked(struct pci_dev *dev) 3328 { 3329 return __pci_dev_reset(dev, 0); 3330 } 3331 EXPORT_SYMBOL_GPL(__pci_reset_function_locked); 3332 3333 /** 3334 * pci_probe_reset_function - check whether the device can be safely reset 3335 * @dev: PCI device to reset 3336 * 3337 * Some devices allow an individual function to be reset without affecting 3338 * other functions in the same device. The PCI device must be responsive 3339 * to PCI config space in order to use this function. 3340 * 3341 * Returns 0 if the device function can be reset or negative if the 3342 * device doesn't support resetting a single function. 3343 */ 3344 int pci_probe_reset_function(struct pci_dev *dev) 3345 { 3346 return pci_dev_reset(dev, 1); 3347 } 3348 3349 /** 3350 * pci_reset_function - quiesce and reset a PCI device function 3351 * @dev: PCI device to reset 3352 * 3353 * Some devices allow an individual function to be reset without affecting 3354 * other functions in the same device. The PCI device must be responsive 3355 * to PCI config space in order to use this function. 3356 * 3357 * This function does not just reset the PCI portion of a device, but 3358 * clears all the state associated with the device. This function differs 3359 * from __pci_reset_function in that it saves and restores device state 3360 * over the reset. 3361 * 3362 * Returns 0 if the device function was successfully reset or negative if the 3363 * device doesn't support resetting a single function. 3364 */ 3365 int pci_reset_function(struct pci_dev *dev) 3366 { 3367 int rc; 3368 3369 rc = pci_dev_reset(dev, 1); 3370 if (rc) 3371 return rc; 3372 3373 pci_save_state(dev); 3374 3375 /* 3376 * both INTx and MSI are disabled after the Interrupt Disable bit 3377 * is set and the Bus Master bit is cleared. 3378 */ 3379 pci_write_config_word(dev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE); 3380 3381 rc = pci_dev_reset(dev, 0); 3382 3383 pci_restore_state(dev); 3384 3385 return rc; 3386 } 3387 EXPORT_SYMBOL_GPL(pci_reset_function); 3388 3389 /** 3390 * pcix_get_max_mmrbc - get PCI-X maximum designed memory read byte count 3391 * @dev: PCI device to query 3392 * 3393 * Returns mmrbc: maximum designed memory read count in bytes 3394 * or appropriate error value. 3395 */ 3396 int pcix_get_max_mmrbc(struct pci_dev *dev) 3397 { 3398 int cap; 3399 u32 stat; 3400 3401 cap = pci_find_capability(dev, PCI_CAP_ID_PCIX); 3402 if (!cap) 3403 return -EINVAL; 3404 3405 if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat)) 3406 return -EINVAL; 3407 3408 return 512 << ((stat & PCI_X_STATUS_MAX_READ) >> 21); 3409 } 3410 EXPORT_SYMBOL(pcix_get_max_mmrbc); 3411 3412 /** 3413 * pcix_get_mmrbc - get PCI-X maximum memory read byte count 3414 * @dev: PCI device to query 3415 * 3416 * Returns mmrbc: maximum memory read count in bytes 3417 * or appropriate error value. 3418 */ 3419 int pcix_get_mmrbc(struct pci_dev *dev) 3420 { 3421 int cap; 3422 u16 cmd; 3423 3424 cap = pci_find_capability(dev, PCI_CAP_ID_PCIX); 3425 if (!cap) 3426 return -EINVAL; 3427 3428 if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd)) 3429 return -EINVAL; 3430 3431 return 512 << ((cmd & PCI_X_CMD_MAX_READ) >> 2); 3432 } 3433 EXPORT_SYMBOL(pcix_get_mmrbc); 3434 3435 /** 3436 * pcix_set_mmrbc - set PCI-X maximum memory read byte count 3437 * @dev: PCI device to query 3438 * @mmrbc: maximum memory read count in bytes 3439 * valid values are 512, 1024, 2048, 4096 3440 * 3441 * If possible sets maximum memory read byte count, some bridges have erratas 3442 * that prevent this. 3443 */ 3444 int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc) 3445 { 3446 int cap; 3447 u32 stat, v, o; 3448 u16 cmd; 3449 3450 if (mmrbc < 512 || mmrbc > 4096 || !is_power_of_2(mmrbc)) 3451 return -EINVAL; 3452 3453 v = ffs(mmrbc) - 10; 3454 3455 cap = pci_find_capability(dev, PCI_CAP_ID_PCIX); 3456 if (!cap) 3457 return -EINVAL; 3458 3459 if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat)) 3460 return -EINVAL; 3461 3462 if (v > (stat & PCI_X_STATUS_MAX_READ) >> 21) 3463 return -E2BIG; 3464 3465 if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd)) 3466 return -EINVAL; 3467 3468 o = (cmd & PCI_X_CMD_MAX_READ) >> 2; 3469 if (o != v) { 3470 if (v > o && (dev->bus->bus_flags & PCI_BUS_FLAGS_NO_MMRBC)) 3471 return -EIO; 3472 3473 cmd &= ~PCI_X_CMD_MAX_READ; 3474 cmd |= v << 2; 3475 if (pci_write_config_word(dev, cap + PCI_X_CMD, cmd)) 3476 return -EIO; 3477 } 3478 return 0; 3479 } 3480 EXPORT_SYMBOL(pcix_set_mmrbc); 3481 3482 /** 3483 * pcie_get_readrq - get PCI Express read request size 3484 * @dev: PCI device to query 3485 * 3486 * Returns maximum memory read request in bytes 3487 * or appropriate error value. 3488 */ 3489 int pcie_get_readrq(struct pci_dev *dev) 3490 { 3491 u16 ctl; 3492 3493 pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &ctl); 3494 3495 return 128 << ((ctl & PCI_EXP_DEVCTL_READRQ) >> 12); 3496 } 3497 EXPORT_SYMBOL(pcie_get_readrq); 3498 3499 /** 3500 * pcie_set_readrq - set PCI Express maximum memory read request 3501 * @dev: PCI device to query 3502 * @rq: maximum memory read count in bytes 3503 * valid values are 128, 256, 512, 1024, 2048, 4096 3504 * 3505 * If possible sets maximum memory read request in bytes 3506 */ 3507 int pcie_set_readrq(struct pci_dev *dev, int rq) 3508 { 3509 u16 v; 3510 3511 if (rq < 128 || rq > 4096 || !is_power_of_2(rq)) 3512 return -EINVAL; 3513 3514 /* 3515 * If using the "performance" PCIe config, we clamp the 3516 * read rq size to the max packet size to prevent the 3517 * host bridge generating requests larger than we can 3518 * cope with 3519 */ 3520 if (pcie_bus_config == PCIE_BUS_PERFORMANCE) { 3521 int mps = pcie_get_mps(dev); 3522 3523 if (mps < 0) 3524 return mps; 3525 if (mps < rq) 3526 rq = mps; 3527 } 3528 3529 v = (ffs(rq) - 8) << 12; 3530 3531 return pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL, 3532 PCI_EXP_DEVCTL_READRQ, v); 3533 } 3534 EXPORT_SYMBOL(pcie_set_readrq); 3535 3536 /** 3537 * pcie_get_mps - get PCI Express maximum payload size 3538 * @dev: PCI device to query 3539 * 3540 * Returns maximum payload size in bytes 3541 * or appropriate error value. 3542 */ 3543 int pcie_get_mps(struct pci_dev *dev) 3544 { 3545 u16 ctl; 3546 3547 pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &ctl); 3548 3549 return 128 << ((ctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5); 3550 } 3551 3552 /** 3553 * pcie_set_mps - set PCI Express maximum payload size 3554 * @dev: PCI device to query 3555 * @mps: maximum payload size in bytes 3556 * valid values are 128, 256, 512, 1024, 2048, 4096 3557 * 3558 * If possible sets maximum payload size 3559 */ 3560 int pcie_set_mps(struct pci_dev *dev, int mps) 3561 { 3562 u16 v; 3563 3564 if (mps < 128 || mps > 4096 || !is_power_of_2(mps)) 3565 return -EINVAL; 3566 3567 v = ffs(mps) - 8; 3568 if (v > dev->pcie_mpss) 3569 return -EINVAL; 3570 v <<= 5; 3571 3572 return pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL, 3573 PCI_EXP_DEVCTL_PAYLOAD, v); 3574 } 3575 3576 /** 3577 * pci_select_bars - Make BAR mask from the type of resource 3578 * @dev: the PCI device for which BAR mask is made 3579 * @flags: resource type mask to be selected 3580 * 3581 * This helper routine makes bar mask from the type of resource. 3582 */ 3583 int pci_select_bars(struct pci_dev *dev, unsigned long flags) 3584 { 3585 int i, bars = 0; 3586 for (i = 0; i < PCI_NUM_RESOURCES; i++) 3587 if (pci_resource_flags(dev, i) & flags) 3588 bars |= (1 << i); 3589 return bars; 3590 } 3591 3592 /** 3593 * pci_resource_bar - get position of the BAR associated with a resource 3594 * @dev: the PCI device 3595 * @resno: the resource number 3596 * @type: the BAR type to be filled in 3597 * 3598 * Returns BAR position in config space, or 0 if the BAR is invalid. 3599 */ 3600 int pci_resource_bar(struct pci_dev *dev, int resno, enum pci_bar_type *type) 3601 { 3602 int reg; 3603 3604 if (resno < PCI_ROM_RESOURCE) { 3605 *type = pci_bar_unknown; 3606 return PCI_BASE_ADDRESS_0 + 4 * resno; 3607 } else if (resno == PCI_ROM_RESOURCE) { 3608 *type = pci_bar_mem32; 3609 return dev->rom_base_reg; 3610 } else if (resno < PCI_BRIDGE_RESOURCES) { 3611 /* device specific resource */ 3612 reg = pci_iov_resource_bar(dev, resno, type); 3613 if (reg) 3614 return reg; 3615 } 3616 3617 dev_err(&dev->dev, "BAR %d: invalid resource\n", resno); 3618 return 0; 3619 } 3620 3621 /* Some architectures require additional programming to enable VGA */ 3622 static arch_set_vga_state_t arch_set_vga_state; 3623 3624 void __init pci_register_set_vga_state(arch_set_vga_state_t func) 3625 { 3626 arch_set_vga_state = func; /* NULL disables */ 3627 } 3628 3629 static int pci_set_vga_state_arch(struct pci_dev *dev, bool decode, 3630 unsigned int command_bits, u32 flags) 3631 { 3632 if (arch_set_vga_state) 3633 return arch_set_vga_state(dev, decode, command_bits, 3634 flags); 3635 return 0; 3636 } 3637 3638 /** 3639 * pci_set_vga_state - set VGA decode state on device and parents if requested 3640 * @dev: the PCI device 3641 * @decode: true = enable decoding, false = disable decoding 3642 * @command_bits: PCI_COMMAND_IO and/or PCI_COMMAND_MEMORY 3643 * @flags: traverse ancestors and change bridges 3644 * CHANGE_BRIDGE_ONLY / CHANGE_BRIDGE 3645 */ 3646 int pci_set_vga_state(struct pci_dev *dev, bool decode, 3647 unsigned int command_bits, u32 flags) 3648 { 3649 struct pci_bus *bus; 3650 struct pci_dev *bridge; 3651 u16 cmd; 3652 int rc; 3653 3654 WARN_ON((flags & PCI_VGA_STATE_CHANGE_DECODES) & (command_bits & ~(PCI_COMMAND_IO|PCI_COMMAND_MEMORY))); 3655 3656 /* ARCH specific VGA enables */ 3657 rc = pci_set_vga_state_arch(dev, decode, command_bits, flags); 3658 if (rc) 3659 return rc; 3660 3661 if (flags & PCI_VGA_STATE_CHANGE_DECODES) { 3662 pci_read_config_word(dev, PCI_COMMAND, &cmd); 3663 if (decode == true) 3664 cmd |= command_bits; 3665 else 3666 cmd &= ~command_bits; 3667 pci_write_config_word(dev, PCI_COMMAND, cmd); 3668 } 3669 3670 if (!(flags & PCI_VGA_STATE_CHANGE_BRIDGE)) 3671 return 0; 3672 3673 bus = dev->bus; 3674 while (bus) { 3675 bridge = bus->self; 3676 if (bridge) { 3677 pci_read_config_word(bridge, PCI_BRIDGE_CONTROL, 3678 &cmd); 3679 if (decode == true) 3680 cmd |= PCI_BRIDGE_CTL_VGA; 3681 else 3682 cmd &= ~PCI_BRIDGE_CTL_VGA; 3683 pci_write_config_word(bridge, PCI_BRIDGE_CONTROL, 3684 cmd); 3685 } 3686 bus = bus->parent; 3687 } 3688 return 0; 3689 } 3690 3691 #define RESOURCE_ALIGNMENT_PARAM_SIZE COMMAND_LINE_SIZE 3692 static char resource_alignment_param[RESOURCE_ALIGNMENT_PARAM_SIZE] = {0}; 3693 static DEFINE_SPINLOCK(resource_alignment_lock); 3694 3695 /** 3696 * pci_specified_resource_alignment - get resource alignment specified by user. 3697 * @dev: the PCI device to get 3698 * 3699 * RETURNS: Resource alignment if it is specified. 3700 * Zero if it is not specified. 3701 */ 3702 resource_size_t pci_specified_resource_alignment(struct pci_dev *dev) 3703 { 3704 int seg, bus, slot, func, align_order, count; 3705 resource_size_t align = 0; 3706 char *p; 3707 3708 spin_lock(&resource_alignment_lock); 3709 p = resource_alignment_param; 3710 while (*p) { 3711 count = 0; 3712 if (sscanf(p, "%d%n", &align_order, &count) == 1 && 3713 p[count] == '@') { 3714 p += count + 1; 3715 } else { 3716 align_order = -1; 3717 } 3718 if (sscanf(p, "%x:%x:%x.%x%n", 3719 &seg, &bus, &slot, &func, &count) != 4) { 3720 seg = 0; 3721 if (sscanf(p, "%x:%x.%x%n", 3722 &bus, &slot, &func, &count) != 3) { 3723 /* Invalid format */ 3724 printk(KERN_ERR "PCI: Can't parse resource_alignment parameter: %s\n", 3725 p); 3726 break; 3727 } 3728 } 3729 p += count; 3730 if (seg == pci_domain_nr(dev->bus) && 3731 bus == dev->bus->number && 3732 slot == PCI_SLOT(dev->devfn) && 3733 func == PCI_FUNC(dev->devfn)) { 3734 if (align_order == -1) { 3735 align = PAGE_SIZE; 3736 } else { 3737 align = 1 << align_order; 3738 } 3739 /* Found */ 3740 break; 3741 } 3742 if (*p != ';' && *p != ',') { 3743 /* End of param or invalid format */ 3744 break; 3745 } 3746 p++; 3747 } 3748 spin_unlock(&resource_alignment_lock); 3749 return align; 3750 } 3751 3752 /* 3753 * This function disables memory decoding and releases memory resources 3754 * of the device specified by kernel's boot parameter 'pci=resource_alignment='. 3755 * It also rounds up size to specified alignment. 3756 * Later on, the kernel will assign page-aligned memory resource back 3757 * to the device. 3758 */ 3759 void pci_reassigndev_resource_alignment(struct pci_dev *dev) 3760 { 3761 int i; 3762 struct resource *r; 3763 resource_size_t align, size; 3764 u16 command; 3765 3766 /* check if specified PCI is target device to reassign */ 3767 align = pci_specified_resource_alignment(dev); 3768 if (!align) 3769 return; 3770 3771 if (dev->hdr_type == PCI_HEADER_TYPE_NORMAL && 3772 (dev->class >> 8) == PCI_CLASS_BRIDGE_HOST) { 3773 dev_warn(&dev->dev, 3774 "Can't reassign resources to host bridge.\n"); 3775 return; 3776 } 3777 3778 dev_info(&dev->dev, 3779 "Disabling memory decoding and releasing memory resources.\n"); 3780 pci_read_config_word(dev, PCI_COMMAND, &command); 3781 command &= ~PCI_COMMAND_MEMORY; 3782 pci_write_config_word(dev, PCI_COMMAND, command); 3783 3784 for (i = 0; i < PCI_BRIDGE_RESOURCES; i++) { 3785 r = &dev->resource[i]; 3786 if (!(r->flags & IORESOURCE_MEM)) 3787 continue; 3788 size = resource_size(r); 3789 if (size < align) { 3790 size = align; 3791 dev_info(&dev->dev, 3792 "Rounding up size of resource #%d to %#llx.\n", 3793 i, (unsigned long long)size); 3794 } 3795 r->end = size - 1; 3796 r->start = 0; 3797 } 3798 /* Need to disable bridge's resource window, 3799 * to enable the kernel to reassign new resource 3800 * window later on. 3801 */ 3802 if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE && 3803 (dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) { 3804 for (i = PCI_BRIDGE_RESOURCES; i < PCI_NUM_RESOURCES; i++) { 3805 r = &dev->resource[i]; 3806 if (!(r->flags & IORESOURCE_MEM)) 3807 continue; 3808 r->end = resource_size(r) - 1; 3809 r->start = 0; 3810 } 3811 pci_disable_bridge_window(dev); 3812 } 3813 } 3814 3815 ssize_t pci_set_resource_alignment_param(const char *buf, size_t count) 3816 { 3817 if (count > RESOURCE_ALIGNMENT_PARAM_SIZE - 1) 3818 count = RESOURCE_ALIGNMENT_PARAM_SIZE - 1; 3819 spin_lock(&resource_alignment_lock); 3820 strncpy(resource_alignment_param, buf, count); 3821 resource_alignment_param[count] = '\0'; 3822 spin_unlock(&resource_alignment_lock); 3823 return count; 3824 } 3825 3826 ssize_t pci_get_resource_alignment_param(char *buf, size_t size) 3827 { 3828 size_t count; 3829 spin_lock(&resource_alignment_lock); 3830 count = snprintf(buf, size, "%s", resource_alignment_param); 3831 spin_unlock(&resource_alignment_lock); 3832 return count; 3833 } 3834 3835 static ssize_t pci_resource_alignment_show(struct bus_type *bus, char *buf) 3836 { 3837 return pci_get_resource_alignment_param(buf, PAGE_SIZE); 3838 } 3839 3840 static ssize_t pci_resource_alignment_store(struct bus_type *bus, 3841 const char *buf, size_t count) 3842 { 3843 return pci_set_resource_alignment_param(buf, count); 3844 } 3845 3846 BUS_ATTR(resource_alignment, 0644, pci_resource_alignment_show, 3847 pci_resource_alignment_store); 3848 3849 static int __init pci_resource_alignment_sysfs_init(void) 3850 { 3851 return bus_create_file(&pci_bus_type, 3852 &bus_attr_resource_alignment); 3853 } 3854 3855 late_initcall(pci_resource_alignment_sysfs_init); 3856 3857 static void pci_no_domains(void) 3858 { 3859 #ifdef CONFIG_PCI_DOMAINS 3860 pci_domains_supported = 0; 3861 #endif 3862 } 3863 3864 /** 3865 * pci_ext_cfg_avail - can we access extended PCI config space? 3866 * 3867 * Returns 1 if we can access PCI extended config space (offsets 3868 * greater than 0xff). This is the default implementation. Architecture 3869 * implementations can override this. 3870 */ 3871 int __weak pci_ext_cfg_avail(void) 3872 { 3873 return 1; 3874 } 3875 3876 void __weak pci_fixup_cardbus(struct pci_bus *bus) 3877 { 3878 } 3879 EXPORT_SYMBOL(pci_fixup_cardbus); 3880 3881 static int __init pci_setup(char *str) 3882 { 3883 while (str) { 3884 char *k = strchr(str, ','); 3885 if (k) 3886 *k++ = 0; 3887 if (*str && (str = pcibios_setup(str)) && *str) { 3888 if (!strcmp(str, "nomsi")) { 3889 pci_no_msi(); 3890 } else if (!strcmp(str, "noaer")) { 3891 pci_no_aer(); 3892 } else if (!strncmp(str, "realloc=", 8)) { 3893 pci_realloc_get_opt(str + 8); 3894 } else if (!strncmp(str, "realloc", 7)) { 3895 pci_realloc_get_opt("on"); 3896 } else if (!strcmp(str, "nodomains")) { 3897 pci_no_domains(); 3898 } else if (!strncmp(str, "noari", 5)) { 3899 pcie_ari_disabled = true; 3900 } else if (!strncmp(str, "cbiosize=", 9)) { 3901 pci_cardbus_io_size = memparse(str + 9, &str); 3902 } else if (!strncmp(str, "cbmemsize=", 10)) { 3903 pci_cardbus_mem_size = memparse(str + 10, &str); 3904 } else if (!strncmp(str, "resource_alignment=", 19)) { 3905 pci_set_resource_alignment_param(str + 19, 3906 strlen(str + 19)); 3907 } else if (!strncmp(str, "ecrc=", 5)) { 3908 pcie_ecrc_get_policy(str + 5); 3909 } else if (!strncmp(str, "hpiosize=", 9)) { 3910 pci_hotplug_io_size = memparse(str + 9, &str); 3911 } else if (!strncmp(str, "hpmemsize=", 10)) { 3912 pci_hotplug_mem_size = memparse(str + 10, &str); 3913 } else if (!strncmp(str, "pcie_bus_tune_off", 17)) { 3914 pcie_bus_config = PCIE_BUS_TUNE_OFF; 3915 } else if (!strncmp(str, "pcie_bus_safe", 13)) { 3916 pcie_bus_config = PCIE_BUS_SAFE; 3917 } else if (!strncmp(str, "pcie_bus_perf", 13)) { 3918 pcie_bus_config = PCIE_BUS_PERFORMANCE; 3919 } else if (!strncmp(str, "pcie_bus_peer2peer", 18)) { 3920 pcie_bus_config = PCIE_BUS_PEER2PEER; 3921 } else if (!strncmp(str, "pcie_scan_all", 13)) { 3922 pci_add_flags(PCI_SCAN_ALL_PCIE_DEVS); 3923 } else { 3924 printk(KERN_ERR "PCI: Unknown option `%s'\n", 3925 str); 3926 } 3927 } 3928 str = k; 3929 } 3930 return 0; 3931 } 3932 early_param("pci", pci_setup); 3933 3934 EXPORT_SYMBOL(pci_reenable_device); 3935 EXPORT_SYMBOL(pci_enable_device_io); 3936 EXPORT_SYMBOL(pci_enable_device_mem); 3937 EXPORT_SYMBOL(pci_enable_device); 3938 EXPORT_SYMBOL(pcim_enable_device); 3939 EXPORT_SYMBOL(pcim_pin_device); 3940 EXPORT_SYMBOL(pci_disable_device); 3941 EXPORT_SYMBOL(pci_find_capability); 3942 EXPORT_SYMBOL(pci_bus_find_capability); 3943 EXPORT_SYMBOL(pci_release_regions); 3944 EXPORT_SYMBOL(pci_request_regions); 3945 EXPORT_SYMBOL(pci_request_regions_exclusive); 3946 EXPORT_SYMBOL(pci_release_region); 3947 EXPORT_SYMBOL(pci_request_region); 3948 EXPORT_SYMBOL(pci_request_region_exclusive); 3949 EXPORT_SYMBOL(pci_release_selected_regions); 3950 EXPORT_SYMBOL(pci_request_selected_regions); 3951 EXPORT_SYMBOL(pci_request_selected_regions_exclusive); 3952 EXPORT_SYMBOL(pci_set_master); 3953 EXPORT_SYMBOL(pci_clear_master); 3954 EXPORT_SYMBOL(pci_set_mwi); 3955 EXPORT_SYMBOL(pci_try_set_mwi); 3956 EXPORT_SYMBOL(pci_clear_mwi); 3957 EXPORT_SYMBOL_GPL(pci_intx); 3958 EXPORT_SYMBOL(pci_assign_resource); 3959 EXPORT_SYMBOL(pci_find_parent_resource); 3960 EXPORT_SYMBOL(pci_select_bars); 3961 3962 EXPORT_SYMBOL(pci_set_power_state); 3963 EXPORT_SYMBOL(pci_save_state); 3964 EXPORT_SYMBOL(pci_restore_state); 3965 EXPORT_SYMBOL(pci_pme_capable); 3966 EXPORT_SYMBOL(pci_pme_active); 3967 EXPORT_SYMBOL(pci_wake_from_d3); 3968 EXPORT_SYMBOL(pci_target_state); 3969 EXPORT_SYMBOL(pci_prepare_to_sleep); 3970 EXPORT_SYMBOL(pci_back_from_sleep); 3971 EXPORT_SYMBOL_GPL(pci_set_pcie_reset_state); 3972