1 /* 2 * PCI Bus Services, see include/linux/pci.h for further explanation. 3 * 4 * Copyright 1993 -- 1997 Drew Eckhardt, Frederic Potter, 5 * David Mosberger-Tang 6 * 7 * Copyright 1997 -- 2000 Martin Mares <mj@ucw.cz> 8 */ 9 10 #include <linux/kernel.h> 11 #include <linux/delay.h> 12 #include <linux/init.h> 13 #include <linux/pci.h> 14 #include <linux/pm.h> 15 #include <linux/slab.h> 16 #include <linux/module.h> 17 #include <linux/spinlock.h> 18 #include <linux/string.h> 19 #include <linux/log2.h> 20 #include <linux/pci-aspm.h> 21 #include <linux/pm_wakeup.h> 22 #include <linux/interrupt.h> 23 #include <linux/device.h> 24 #include <linux/pm_runtime.h> 25 #include <asm-generic/pci-bridge.h> 26 #include <asm/setup.h> 27 #include "pci.h" 28 29 const char *pci_power_names[] = { 30 "error", "D0", "D1", "D2", "D3hot", "D3cold", "unknown", 31 }; 32 EXPORT_SYMBOL_GPL(pci_power_names); 33 34 int isa_dma_bridge_buggy; 35 EXPORT_SYMBOL(isa_dma_bridge_buggy); 36 37 int pci_pci_problems; 38 EXPORT_SYMBOL(pci_pci_problems); 39 40 unsigned int pci_pm_d3_delay; 41 42 static void pci_pme_list_scan(struct work_struct *work); 43 44 static LIST_HEAD(pci_pme_list); 45 static DEFINE_MUTEX(pci_pme_list_mutex); 46 static DECLARE_DELAYED_WORK(pci_pme_work, pci_pme_list_scan); 47 48 struct pci_pme_device { 49 struct list_head list; 50 struct pci_dev *dev; 51 }; 52 53 #define PME_TIMEOUT 1000 /* How long between PME checks */ 54 55 static void pci_dev_d3_sleep(struct pci_dev *dev) 56 { 57 unsigned int delay = dev->d3_delay; 58 59 if (delay < pci_pm_d3_delay) 60 delay = pci_pm_d3_delay; 61 62 msleep(delay); 63 } 64 65 #ifdef CONFIG_PCI_DOMAINS 66 int pci_domains_supported = 1; 67 #endif 68 69 #define DEFAULT_CARDBUS_IO_SIZE (256) 70 #define DEFAULT_CARDBUS_MEM_SIZE (64*1024*1024) 71 /* pci=cbmemsize=nnM,cbiosize=nn can override this */ 72 unsigned long pci_cardbus_io_size = DEFAULT_CARDBUS_IO_SIZE; 73 unsigned long pci_cardbus_mem_size = DEFAULT_CARDBUS_MEM_SIZE; 74 75 #define DEFAULT_HOTPLUG_IO_SIZE (256) 76 #define DEFAULT_HOTPLUG_MEM_SIZE (2*1024*1024) 77 /* pci=hpmemsize=nnM,hpiosize=nn can override this */ 78 unsigned long pci_hotplug_io_size = DEFAULT_HOTPLUG_IO_SIZE; 79 unsigned long pci_hotplug_mem_size = DEFAULT_HOTPLUG_MEM_SIZE; 80 81 enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_TUNE_OFF; 82 83 /* 84 * The default CLS is used if arch didn't set CLS explicitly and not 85 * all pci devices agree on the same value. Arch can override either 86 * the dfl or actual value as it sees fit. Don't forget this is 87 * measured in 32-bit words, not bytes. 88 */ 89 u8 pci_dfl_cache_line_size = L1_CACHE_BYTES >> 2; 90 u8 pci_cache_line_size; 91 92 /* 93 * If we set up a device for bus mastering, we need to check the latency 94 * timer as certain BIOSes forget to set it properly. 95 */ 96 unsigned int pcibios_max_latency = 255; 97 98 /* If set, the PCIe ARI capability will not be used. */ 99 static bool pcie_ari_disabled; 100 101 /** 102 * pci_bus_max_busnr - returns maximum PCI bus number of given bus' children 103 * @bus: pointer to PCI bus structure to search 104 * 105 * Given a PCI bus, returns the highest PCI bus number present in the set 106 * including the given PCI bus and its list of child PCI buses. 107 */ 108 unsigned char pci_bus_max_busnr(struct pci_bus* bus) 109 { 110 struct list_head *tmp; 111 unsigned char max, n; 112 113 max = bus->busn_res.end; 114 list_for_each(tmp, &bus->children) { 115 n = pci_bus_max_busnr(pci_bus_b(tmp)); 116 if(n > max) 117 max = n; 118 } 119 return max; 120 } 121 EXPORT_SYMBOL_GPL(pci_bus_max_busnr); 122 123 #ifdef CONFIG_HAS_IOMEM 124 void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar) 125 { 126 /* 127 * Make sure the BAR is actually a memory resource, not an IO resource 128 */ 129 if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM)) { 130 WARN_ON(1); 131 return NULL; 132 } 133 return ioremap_nocache(pci_resource_start(pdev, bar), 134 pci_resource_len(pdev, bar)); 135 } 136 EXPORT_SYMBOL_GPL(pci_ioremap_bar); 137 #endif 138 139 #define PCI_FIND_CAP_TTL 48 140 141 static int __pci_find_next_cap_ttl(struct pci_bus *bus, unsigned int devfn, 142 u8 pos, int cap, int *ttl) 143 { 144 u8 id; 145 146 while ((*ttl)--) { 147 pci_bus_read_config_byte(bus, devfn, pos, &pos); 148 if (pos < 0x40) 149 break; 150 pos &= ~3; 151 pci_bus_read_config_byte(bus, devfn, pos + PCI_CAP_LIST_ID, 152 &id); 153 if (id == 0xff) 154 break; 155 if (id == cap) 156 return pos; 157 pos += PCI_CAP_LIST_NEXT; 158 } 159 return 0; 160 } 161 162 static int __pci_find_next_cap(struct pci_bus *bus, unsigned int devfn, 163 u8 pos, int cap) 164 { 165 int ttl = PCI_FIND_CAP_TTL; 166 167 return __pci_find_next_cap_ttl(bus, devfn, pos, cap, &ttl); 168 } 169 170 int pci_find_next_capability(struct pci_dev *dev, u8 pos, int cap) 171 { 172 return __pci_find_next_cap(dev->bus, dev->devfn, 173 pos + PCI_CAP_LIST_NEXT, cap); 174 } 175 EXPORT_SYMBOL_GPL(pci_find_next_capability); 176 177 static int __pci_bus_find_cap_start(struct pci_bus *bus, 178 unsigned int devfn, u8 hdr_type) 179 { 180 u16 status; 181 182 pci_bus_read_config_word(bus, devfn, PCI_STATUS, &status); 183 if (!(status & PCI_STATUS_CAP_LIST)) 184 return 0; 185 186 switch (hdr_type) { 187 case PCI_HEADER_TYPE_NORMAL: 188 case PCI_HEADER_TYPE_BRIDGE: 189 return PCI_CAPABILITY_LIST; 190 case PCI_HEADER_TYPE_CARDBUS: 191 return PCI_CB_CAPABILITY_LIST; 192 default: 193 return 0; 194 } 195 196 return 0; 197 } 198 199 /** 200 * pci_find_capability - query for devices' capabilities 201 * @dev: PCI device to query 202 * @cap: capability code 203 * 204 * Tell if a device supports a given PCI capability. 205 * Returns the address of the requested capability structure within the 206 * device's PCI configuration space or 0 in case the device does not 207 * support it. Possible values for @cap: 208 * 209 * %PCI_CAP_ID_PM Power Management 210 * %PCI_CAP_ID_AGP Accelerated Graphics Port 211 * %PCI_CAP_ID_VPD Vital Product Data 212 * %PCI_CAP_ID_SLOTID Slot Identification 213 * %PCI_CAP_ID_MSI Message Signalled Interrupts 214 * %PCI_CAP_ID_CHSWP CompactPCI HotSwap 215 * %PCI_CAP_ID_PCIX PCI-X 216 * %PCI_CAP_ID_EXP PCI Express 217 */ 218 int pci_find_capability(struct pci_dev *dev, int cap) 219 { 220 int pos; 221 222 pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type); 223 if (pos) 224 pos = __pci_find_next_cap(dev->bus, dev->devfn, pos, cap); 225 226 return pos; 227 } 228 229 /** 230 * pci_bus_find_capability - query for devices' capabilities 231 * @bus: the PCI bus to query 232 * @devfn: PCI device to query 233 * @cap: capability code 234 * 235 * Like pci_find_capability() but works for pci devices that do not have a 236 * pci_dev structure set up yet. 237 * 238 * Returns the address of the requested capability structure within the 239 * device's PCI configuration space or 0 in case the device does not 240 * support it. 241 */ 242 int pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap) 243 { 244 int pos; 245 u8 hdr_type; 246 247 pci_bus_read_config_byte(bus, devfn, PCI_HEADER_TYPE, &hdr_type); 248 249 pos = __pci_bus_find_cap_start(bus, devfn, hdr_type & 0x7f); 250 if (pos) 251 pos = __pci_find_next_cap(bus, devfn, pos, cap); 252 253 return pos; 254 } 255 256 /** 257 * pci_find_next_ext_capability - Find an extended capability 258 * @dev: PCI device to query 259 * @start: address at which to start looking (0 to start at beginning of list) 260 * @cap: capability code 261 * 262 * Returns the address of the next matching extended capability structure 263 * within the device's PCI configuration space or 0 if the device does 264 * not support it. Some capabilities can occur several times, e.g., the 265 * vendor-specific capability, and this provides a way to find them all. 266 */ 267 int pci_find_next_ext_capability(struct pci_dev *dev, int start, int cap) 268 { 269 u32 header; 270 int ttl; 271 int pos = PCI_CFG_SPACE_SIZE; 272 273 /* minimum 8 bytes per capability */ 274 ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8; 275 276 if (dev->cfg_size <= PCI_CFG_SPACE_SIZE) 277 return 0; 278 279 if (start) 280 pos = start; 281 282 if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL) 283 return 0; 284 285 /* 286 * If we have no capabilities, this is indicated by cap ID, 287 * cap version and next pointer all being 0. 288 */ 289 if (header == 0) 290 return 0; 291 292 while (ttl-- > 0) { 293 if (PCI_EXT_CAP_ID(header) == cap && pos != start) 294 return pos; 295 296 pos = PCI_EXT_CAP_NEXT(header); 297 if (pos < PCI_CFG_SPACE_SIZE) 298 break; 299 300 if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL) 301 break; 302 } 303 304 return 0; 305 } 306 EXPORT_SYMBOL_GPL(pci_find_next_ext_capability); 307 308 /** 309 * pci_find_ext_capability - Find an extended capability 310 * @dev: PCI device to query 311 * @cap: capability code 312 * 313 * Returns the address of the requested extended capability structure 314 * within the device's PCI configuration space or 0 if the device does 315 * not support it. Possible values for @cap: 316 * 317 * %PCI_EXT_CAP_ID_ERR Advanced Error Reporting 318 * %PCI_EXT_CAP_ID_VC Virtual Channel 319 * %PCI_EXT_CAP_ID_DSN Device Serial Number 320 * %PCI_EXT_CAP_ID_PWR Power Budgeting 321 */ 322 int pci_find_ext_capability(struct pci_dev *dev, int cap) 323 { 324 return pci_find_next_ext_capability(dev, 0, cap); 325 } 326 EXPORT_SYMBOL_GPL(pci_find_ext_capability); 327 328 static int __pci_find_next_ht_cap(struct pci_dev *dev, int pos, int ht_cap) 329 { 330 int rc, ttl = PCI_FIND_CAP_TTL; 331 u8 cap, mask; 332 333 if (ht_cap == HT_CAPTYPE_SLAVE || ht_cap == HT_CAPTYPE_HOST) 334 mask = HT_3BIT_CAP_MASK; 335 else 336 mask = HT_5BIT_CAP_MASK; 337 338 pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn, pos, 339 PCI_CAP_ID_HT, &ttl); 340 while (pos) { 341 rc = pci_read_config_byte(dev, pos + 3, &cap); 342 if (rc != PCIBIOS_SUCCESSFUL) 343 return 0; 344 345 if ((cap & mask) == ht_cap) 346 return pos; 347 348 pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn, 349 pos + PCI_CAP_LIST_NEXT, 350 PCI_CAP_ID_HT, &ttl); 351 } 352 353 return 0; 354 } 355 /** 356 * pci_find_next_ht_capability - query a device's Hypertransport capabilities 357 * @dev: PCI device to query 358 * @pos: Position from which to continue searching 359 * @ht_cap: Hypertransport capability code 360 * 361 * To be used in conjunction with pci_find_ht_capability() to search for 362 * all capabilities matching @ht_cap. @pos should always be a value returned 363 * from pci_find_ht_capability(). 364 * 365 * NB. To be 100% safe against broken PCI devices, the caller should take 366 * steps to avoid an infinite loop. 367 */ 368 int pci_find_next_ht_capability(struct pci_dev *dev, int pos, int ht_cap) 369 { 370 return __pci_find_next_ht_cap(dev, pos + PCI_CAP_LIST_NEXT, ht_cap); 371 } 372 EXPORT_SYMBOL_GPL(pci_find_next_ht_capability); 373 374 /** 375 * pci_find_ht_capability - query a device's Hypertransport capabilities 376 * @dev: PCI device to query 377 * @ht_cap: Hypertransport capability code 378 * 379 * Tell if a device supports a given Hypertransport capability. 380 * Returns an address within the device's PCI configuration space 381 * or 0 in case the device does not support the request capability. 382 * The address points to the PCI capability, of type PCI_CAP_ID_HT, 383 * which has a Hypertransport capability matching @ht_cap. 384 */ 385 int pci_find_ht_capability(struct pci_dev *dev, int ht_cap) 386 { 387 int pos; 388 389 pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type); 390 if (pos) 391 pos = __pci_find_next_ht_cap(dev, pos, ht_cap); 392 393 return pos; 394 } 395 EXPORT_SYMBOL_GPL(pci_find_ht_capability); 396 397 /** 398 * pci_find_parent_resource - return resource region of parent bus of given region 399 * @dev: PCI device structure contains resources to be searched 400 * @res: child resource record for which parent is sought 401 * 402 * For given resource region of given device, return the resource 403 * region of parent bus the given region is contained in or where 404 * it should be allocated from. 405 */ 406 struct resource * 407 pci_find_parent_resource(const struct pci_dev *dev, struct resource *res) 408 { 409 const struct pci_bus *bus = dev->bus; 410 int i; 411 struct resource *best = NULL, *r; 412 413 pci_bus_for_each_resource(bus, r, i) { 414 if (!r) 415 continue; 416 if (res->start && !(res->start >= r->start && res->end <= r->end)) 417 continue; /* Not contained */ 418 if ((res->flags ^ r->flags) & (IORESOURCE_IO | IORESOURCE_MEM)) 419 continue; /* Wrong type */ 420 if (!((res->flags ^ r->flags) & IORESOURCE_PREFETCH)) 421 return r; /* Exact match */ 422 /* We can't insert a non-prefetch resource inside a prefetchable parent .. */ 423 if (r->flags & IORESOURCE_PREFETCH) 424 continue; 425 /* .. but we can put a prefetchable resource inside a non-prefetchable one */ 426 if (!best) 427 best = r; 428 } 429 return best; 430 } 431 432 /** 433 * pci_restore_bars - restore a devices BAR values (e.g. after wake-up) 434 * @dev: PCI device to have its BARs restored 435 * 436 * Restore the BAR values for a given device, so as to make it 437 * accessible by its driver. 438 */ 439 static void 440 pci_restore_bars(struct pci_dev *dev) 441 { 442 int i; 443 444 for (i = 0; i < PCI_BRIDGE_RESOURCES; i++) 445 pci_update_resource(dev, i); 446 } 447 448 static struct pci_platform_pm_ops *pci_platform_pm; 449 450 int pci_set_platform_pm(struct pci_platform_pm_ops *ops) 451 { 452 if (!ops->is_manageable || !ops->set_state || !ops->choose_state 453 || !ops->sleep_wake) 454 return -EINVAL; 455 pci_platform_pm = ops; 456 return 0; 457 } 458 459 static inline bool platform_pci_power_manageable(struct pci_dev *dev) 460 { 461 return pci_platform_pm ? pci_platform_pm->is_manageable(dev) : false; 462 } 463 464 static inline int platform_pci_set_power_state(struct pci_dev *dev, 465 pci_power_t t) 466 { 467 return pci_platform_pm ? pci_platform_pm->set_state(dev, t) : -ENOSYS; 468 } 469 470 static inline pci_power_t platform_pci_choose_state(struct pci_dev *dev) 471 { 472 return pci_platform_pm ? 473 pci_platform_pm->choose_state(dev) : PCI_POWER_ERROR; 474 } 475 476 static inline int platform_pci_sleep_wake(struct pci_dev *dev, bool enable) 477 { 478 return pci_platform_pm ? 479 pci_platform_pm->sleep_wake(dev, enable) : -ENODEV; 480 } 481 482 static inline int platform_pci_run_wake(struct pci_dev *dev, bool enable) 483 { 484 return pci_platform_pm ? 485 pci_platform_pm->run_wake(dev, enable) : -ENODEV; 486 } 487 488 /** 489 * pci_raw_set_power_state - Use PCI PM registers to set the power state of 490 * given PCI device 491 * @dev: PCI device to handle. 492 * @state: PCI power state (D0, D1, D2, D3hot) to put the device into. 493 * 494 * RETURN VALUE: 495 * -EINVAL if the requested state is invalid. 496 * -EIO if device does not support PCI PM or its PM capabilities register has a 497 * wrong version, or device doesn't support the requested state. 498 * 0 if device already is in the requested state. 499 * 0 if device's power state has been successfully changed. 500 */ 501 static int pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state) 502 { 503 u16 pmcsr; 504 bool need_restore = false; 505 506 /* Check if we're already there */ 507 if (dev->current_state == state) 508 return 0; 509 510 if (!dev->pm_cap) 511 return -EIO; 512 513 if (state < PCI_D0 || state > PCI_D3hot) 514 return -EINVAL; 515 516 /* Validate current state: 517 * Can enter D0 from any state, but if we can only go deeper 518 * to sleep if we're already in a low power state 519 */ 520 if (state != PCI_D0 && dev->current_state <= PCI_D3cold 521 && dev->current_state > state) { 522 dev_err(&dev->dev, "invalid power transition " 523 "(from state %d to %d)\n", dev->current_state, state); 524 return -EINVAL; 525 } 526 527 /* check if this device supports the desired state */ 528 if ((state == PCI_D1 && !dev->d1_support) 529 || (state == PCI_D2 && !dev->d2_support)) 530 return -EIO; 531 532 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr); 533 534 /* If we're (effectively) in D3, force entire word to 0. 535 * This doesn't affect PME_Status, disables PME_En, and 536 * sets PowerState to 0. 537 */ 538 switch (dev->current_state) { 539 case PCI_D0: 540 case PCI_D1: 541 case PCI_D2: 542 pmcsr &= ~PCI_PM_CTRL_STATE_MASK; 543 pmcsr |= state; 544 break; 545 case PCI_D3hot: 546 case PCI_D3cold: 547 case PCI_UNKNOWN: /* Boot-up */ 548 if ((pmcsr & PCI_PM_CTRL_STATE_MASK) == PCI_D3hot 549 && !(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET)) 550 need_restore = true; 551 /* Fall-through: force to D0 */ 552 default: 553 pmcsr = 0; 554 break; 555 } 556 557 /* enter specified state */ 558 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr); 559 560 /* Mandatory power management transition delays */ 561 /* see PCI PM 1.1 5.6.1 table 18 */ 562 if (state == PCI_D3hot || dev->current_state == PCI_D3hot) 563 pci_dev_d3_sleep(dev); 564 else if (state == PCI_D2 || dev->current_state == PCI_D2) 565 udelay(PCI_PM_D2_DELAY); 566 567 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr); 568 dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK); 569 if (dev->current_state != state && printk_ratelimit()) 570 dev_info(&dev->dev, "Refused to change power state, " 571 "currently in D%d\n", dev->current_state); 572 573 /* 574 * According to section 5.4.1 of the "PCI BUS POWER MANAGEMENT 575 * INTERFACE SPECIFICATION, REV. 1.2", a device transitioning 576 * from D3hot to D0 _may_ perform an internal reset, thereby 577 * going to "D0 Uninitialized" rather than "D0 Initialized". 578 * For example, at least some versions of the 3c905B and the 579 * 3c556B exhibit this behaviour. 580 * 581 * At least some laptop BIOSen (e.g. the Thinkpad T21) leave 582 * devices in a D3hot state at boot. Consequently, we need to 583 * restore at least the BARs so that the device will be 584 * accessible to its driver. 585 */ 586 if (need_restore) 587 pci_restore_bars(dev); 588 589 if (dev->bus->self) 590 pcie_aspm_pm_state_change(dev->bus->self); 591 592 return 0; 593 } 594 595 /** 596 * pci_update_current_state - Read PCI power state of given device from its 597 * PCI PM registers and cache it 598 * @dev: PCI device to handle. 599 * @state: State to cache in case the device doesn't have the PM capability 600 */ 601 void pci_update_current_state(struct pci_dev *dev, pci_power_t state) 602 { 603 if (dev->pm_cap) { 604 u16 pmcsr; 605 606 /* 607 * Configuration space is not accessible for device in 608 * D3cold, so just keep or set D3cold for safety 609 */ 610 if (dev->current_state == PCI_D3cold) 611 return; 612 if (state == PCI_D3cold) { 613 dev->current_state = PCI_D3cold; 614 return; 615 } 616 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr); 617 dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK); 618 } else { 619 dev->current_state = state; 620 } 621 } 622 623 /** 624 * pci_power_up - Put the given device into D0 forcibly 625 * @dev: PCI device to power up 626 */ 627 void pci_power_up(struct pci_dev *dev) 628 { 629 if (platform_pci_power_manageable(dev)) 630 platform_pci_set_power_state(dev, PCI_D0); 631 632 pci_raw_set_power_state(dev, PCI_D0); 633 pci_update_current_state(dev, PCI_D0); 634 } 635 636 /** 637 * pci_platform_power_transition - Use platform to change device power state 638 * @dev: PCI device to handle. 639 * @state: State to put the device into. 640 */ 641 static int pci_platform_power_transition(struct pci_dev *dev, pci_power_t state) 642 { 643 int error; 644 645 if (platform_pci_power_manageable(dev)) { 646 error = platform_pci_set_power_state(dev, state); 647 if (!error) 648 pci_update_current_state(dev, state); 649 } else 650 error = -ENODEV; 651 652 if (error && !dev->pm_cap) /* Fall back to PCI_D0 */ 653 dev->current_state = PCI_D0; 654 655 return error; 656 } 657 658 /** 659 * __pci_start_power_transition - Start power transition of a PCI device 660 * @dev: PCI device to handle. 661 * @state: State to put the device into. 662 */ 663 static void __pci_start_power_transition(struct pci_dev *dev, pci_power_t state) 664 { 665 if (state == PCI_D0) { 666 pci_platform_power_transition(dev, PCI_D0); 667 /* 668 * Mandatory power management transition delays, see 669 * PCI Express Base Specification Revision 2.0 Section 670 * 6.6.1: Conventional Reset. Do not delay for 671 * devices powered on/off by corresponding bridge, 672 * because have already delayed for the bridge. 673 */ 674 if (dev->runtime_d3cold) { 675 msleep(dev->d3cold_delay); 676 /* 677 * When powering on a bridge from D3cold, the 678 * whole hierarchy may be powered on into 679 * D0uninitialized state, resume them to give 680 * them a chance to suspend again 681 */ 682 pci_wakeup_bus(dev->subordinate); 683 } 684 } 685 } 686 687 /** 688 * __pci_dev_set_current_state - Set current state of a PCI device 689 * @dev: Device to handle 690 * @data: pointer to state to be set 691 */ 692 static int __pci_dev_set_current_state(struct pci_dev *dev, void *data) 693 { 694 pci_power_t state = *(pci_power_t *)data; 695 696 dev->current_state = state; 697 return 0; 698 } 699 700 /** 701 * __pci_bus_set_current_state - Walk given bus and set current state of devices 702 * @bus: Top bus of the subtree to walk. 703 * @state: state to be set 704 */ 705 static void __pci_bus_set_current_state(struct pci_bus *bus, pci_power_t state) 706 { 707 if (bus) 708 pci_walk_bus(bus, __pci_dev_set_current_state, &state); 709 } 710 711 /** 712 * __pci_complete_power_transition - Complete power transition of a PCI device 713 * @dev: PCI device to handle. 714 * @state: State to put the device into. 715 * 716 * This function should not be called directly by device drivers. 717 */ 718 int __pci_complete_power_transition(struct pci_dev *dev, pci_power_t state) 719 { 720 int ret; 721 722 if (state <= PCI_D0) 723 return -EINVAL; 724 ret = pci_platform_power_transition(dev, state); 725 /* Power off the bridge may power off the whole hierarchy */ 726 if (!ret && state == PCI_D3cold) 727 __pci_bus_set_current_state(dev->subordinate, PCI_D3cold); 728 return ret; 729 } 730 EXPORT_SYMBOL_GPL(__pci_complete_power_transition); 731 732 /** 733 * pci_set_power_state - Set the power state of a PCI device 734 * @dev: PCI device to handle. 735 * @state: PCI power state (D0, D1, D2, D3hot) to put the device into. 736 * 737 * Transition a device to a new power state, using the platform firmware and/or 738 * the device's PCI PM registers. 739 * 740 * RETURN VALUE: 741 * -EINVAL if the requested state is invalid. 742 * -EIO if device does not support PCI PM or its PM capabilities register has a 743 * wrong version, or device doesn't support the requested state. 744 * 0 if device already is in the requested state. 745 * 0 if device's power state has been successfully changed. 746 */ 747 int pci_set_power_state(struct pci_dev *dev, pci_power_t state) 748 { 749 int error; 750 751 /* bound the state we're entering */ 752 if (state > PCI_D3cold) 753 state = PCI_D3cold; 754 else if (state < PCI_D0) 755 state = PCI_D0; 756 else if ((state == PCI_D1 || state == PCI_D2) && pci_no_d1d2(dev)) 757 /* 758 * If the device or the parent bridge do not support PCI PM, 759 * ignore the request if we're doing anything other than putting 760 * it into D0 (which would only happen on boot). 761 */ 762 return 0; 763 764 /* Check if we're already there */ 765 if (dev->current_state == state) 766 return 0; 767 768 __pci_start_power_transition(dev, state); 769 770 /* This device is quirked not to be put into D3, so 771 don't put it in D3 */ 772 if (state >= PCI_D3hot && (dev->dev_flags & PCI_DEV_FLAGS_NO_D3)) 773 return 0; 774 775 /* 776 * To put device in D3cold, we put device into D3hot in native 777 * way, then put device into D3cold with platform ops 778 */ 779 error = pci_raw_set_power_state(dev, state > PCI_D3hot ? 780 PCI_D3hot : state); 781 782 if (!__pci_complete_power_transition(dev, state)) 783 error = 0; 784 /* 785 * When aspm_policy is "powersave" this call ensures 786 * that ASPM is configured. 787 */ 788 if (!error && dev->bus->self) 789 pcie_aspm_powersave_config_link(dev->bus->self); 790 791 return error; 792 } 793 794 /** 795 * pci_choose_state - Choose the power state of a PCI device 796 * @dev: PCI device to be suspended 797 * @state: target sleep state for the whole system. This is the value 798 * that is passed to suspend() function. 799 * 800 * Returns PCI power state suitable for given device and given system 801 * message. 802 */ 803 804 pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state) 805 { 806 pci_power_t ret; 807 808 if (!pci_find_capability(dev, PCI_CAP_ID_PM)) 809 return PCI_D0; 810 811 ret = platform_pci_choose_state(dev); 812 if (ret != PCI_POWER_ERROR) 813 return ret; 814 815 switch (state.event) { 816 case PM_EVENT_ON: 817 return PCI_D0; 818 case PM_EVENT_FREEZE: 819 case PM_EVENT_PRETHAW: 820 /* REVISIT both freeze and pre-thaw "should" use D0 */ 821 case PM_EVENT_SUSPEND: 822 case PM_EVENT_HIBERNATE: 823 return PCI_D3hot; 824 default: 825 dev_info(&dev->dev, "unrecognized suspend event %d\n", 826 state.event); 827 BUG(); 828 } 829 return PCI_D0; 830 } 831 832 EXPORT_SYMBOL(pci_choose_state); 833 834 #define PCI_EXP_SAVE_REGS 7 835 836 837 static struct pci_cap_saved_state *pci_find_saved_cap( 838 struct pci_dev *pci_dev, char cap) 839 { 840 struct pci_cap_saved_state *tmp; 841 842 hlist_for_each_entry(tmp, &pci_dev->saved_cap_space, next) { 843 if (tmp->cap.cap_nr == cap) 844 return tmp; 845 } 846 return NULL; 847 } 848 849 static int pci_save_pcie_state(struct pci_dev *dev) 850 { 851 int i = 0; 852 struct pci_cap_saved_state *save_state; 853 u16 *cap; 854 855 if (!pci_is_pcie(dev)) 856 return 0; 857 858 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP); 859 if (!save_state) { 860 dev_err(&dev->dev, "buffer not found in %s\n", __func__); 861 return -ENOMEM; 862 } 863 864 cap = (u16 *)&save_state->cap.data[0]; 865 pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &cap[i++]); 866 pcie_capability_read_word(dev, PCI_EXP_LNKCTL, &cap[i++]); 867 pcie_capability_read_word(dev, PCI_EXP_SLTCTL, &cap[i++]); 868 pcie_capability_read_word(dev, PCI_EXP_RTCTL, &cap[i++]); 869 pcie_capability_read_word(dev, PCI_EXP_DEVCTL2, &cap[i++]); 870 pcie_capability_read_word(dev, PCI_EXP_LNKCTL2, &cap[i++]); 871 pcie_capability_read_word(dev, PCI_EXP_SLTCTL2, &cap[i++]); 872 873 return 0; 874 } 875 876 static void pci_restore_pcie_state(struct pci_dev *dev) 877 { 878 int i = 0; 879 struct pci_cap_saved_state *save_state; 880 u16 *cap; 881 882 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP); 883 if (!save_state) 884 return; 885 886 cap = (u16 *)&save_state->cap.data[0]; 887 pcie_capability_write_word(dev, PCI_EXP_DEVCTL, cap[i++]); 888 pcie_capability_write_word(dev, PCI_EXP_LNKCTL, cap[i++]); 889 pcie_capability_write_word(dev, PCI_EXP_SLTCTL, cap[i++]); 890 pcie_capability_write_word(dev, PCI_EXP_RTCTL, cap[i++]); 891 pcie_capability_write_word(dev, PCI_EXP_DEVCTL2, cap[i++]); 892 pcie_capability_write_word(dev, PCI_EXP_LNKCTL2, cap[i++]); 893 pcie_capability_write_word(dev, PCI_EXP_SLTCTL2, cap[i++]); 894 } 895 896 897 static int pci_save_pcix_state(struct pci_dev *dev) 898 { 899 int pos; 900 struct pci_cap_saved_state *save_state; 901 902 pos = pci_find_capability(dev, PCI_CAP_ID_PCIX); 903 if (pos <= 0) 904 return 0; 905 906 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX); 907 if (!save_state) { 908 dev_err(&dev->dev, "buffer not found in %s\n", __func__); 909 return -ENOMEM; 910 } 911 912 pci_read_config_word(dev, pos + PCI_X_CMD, 913 (u16 *)save_state->cap.data); 914 915 return 0; 916 } 917 918 static void pci_restore_pcix_state(struct pci_dev *dev) 919 { 920 int i = 0, pos; 921 struct pci_cap_saved_state *save_state; 922 u16 *cap; 923 924 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX); 925 pos = pci_find_capability(dev, PCI_CAP_ID_PCIX); 926 if (!save_state || pos <= 0) 927 return; 928 cap = (u16 *)&save_state->cap.data[0]; 929 930 pci_write_config_word(dev, pos + PCI_X_CMD, cap[i++]); 931 } 932 933 934 /** 935 * pci_save_state - save the PCI configuration space of a device before suspending 936 * @dev: - PCI device that we're dealing with 937 */ 938 int 939 pci_save_state(struct pci_dev *dev) 940 { 941 int i; 942 /* XXX: 100% dword access ok here? */ 943 for (i = 0; i < 16; i++) 944 pci_read_config_dword(dev, i * 4, &dev->saved_config_space[i]); 945 dev->state_saved = true; 946 if ((i = pci_save_pcie_state(dev)) != 0) 947 return i; 948 if ((i = pci_save_pcix_state(dev)) != 0) 949 return i; 950 return 0; 951 } 952 953 static void pci_restore_config_dword(struct pci_dev *pdev, int offset, 954 u32 saved_val, int retry) 955 { 956 u32 val; 957 958 pci_read_config_dword(pdev, offset, &val); 959 if (val == saved_val) 960 return; 961 962 for (;;) { 963 dev_dbg(&pdev->dev, "restoring config space at offset " 964 "%#x (was %#x, writing %#x)\n", offset, val, saved_val); 965 pci_write_config_dword(pdev, offset, saved_val); 966 if (retry-- <= 0) 967 return; 968 969 pci_read_config_dword(pdev, offset, &val); 970 if (val == saved_val) 971 return; 972 973 mdelay(1); 974 } 975 } 976 977 static void pci_restore_config_space_range(struct pci_dev *pdev, 978 int start, int end, int retry) 979 { 980 int index; 981 982 for (index = end; index >= start; index--) 983 pci_restore_config_dword(pdev, 4 * index, 984 pdev->saved_config_space[index], 985 retry); 986 } 987 988 static void pci_restore_config_space(struct pci_dev *pdev) 989 { 990 if (pdev->hdr_type == PCI_HEADER_TYPE_NORMAL) { 991 pci_restore_config_space_range(pdev, 10, 15, 0); 992 /* Restore BARs before the command register. */ 993 pci_restore_config_space_range(pdev, 4, 9, 10); 994 pci_restore_config_space_range(pdev, 0, 3, 0); 995 } else { 996 pci_restore_config_space_range(pdev, 0, 15, 0); 997 } 998 } 999 1000 /** 1001 * pci_restore_state - Restore the saved state of a PCI device 1002 * @dev: - PCI device that we're dealing with 1003 */ 1004 void pci_restore_state(struct pci_dev *dev) 1005 { 1006 if (!dev->state_saved) 1007 return; 1008 1009 /* PCI Express register must be restored first */ 1010 pci_restore_pcie_state(dev); 1011 pci_restore_ats_state(dev); 1012 1013 pci_restore_config_space(dev); 1014 1015 pci_restore_pcix_state(dev); 1016 pci_restore_msi_state(dev); 1017 pci_restore_iov_state(dev); 1018 1019 dev->state_saved = false; 1020 } 1021 1022 struct pci_saved_state { 1023 u32 config_space[16]; 1024 struct pci_cap_saved_data cap[0]; 1025 }; 1026 1027 /** 1028 * pci_store_saved_state - Allocate and return an opaque struct containing 1029 * the device saved state. 1030 * @dev: PCI device that we're dealing with 1031 * 1032 * Rerturn NULL if no state or error. 1033 */ 1034 struct pci_saved_state *pci_store_saved_state(struct pci_dev *dev) 1035 { 1036 struct pci_saved_state *state; 1037 struct pci_cap_saved_state *tmp; 1038 struct pci_cap_saved_data *cap; 1039 size_t size; 1040 1041 if (!dev->state_saved) 1042 return NULL; 1043 1044 size = sizeof(*state) + sizeof(struct pci_cap_saved_data); 1045 1046 hlist_for_each_entry(tmp, &dev->saved_cap_space, next) 1047 size += sizeof(struct pci_cap_saved_data) + tmp->cap.size; 1048 1049 state = kzalloc(size, GFP_KERNEL); 1050 if (!state) 1051 return NULL; 1052 1053 memcpy(state->config_space, dev->saved_config_space, 1054 sizeof(state->config_space)); 1055 1056 cap = state->cap; 1057 hlist_for_each_entry(tmp, &dev->saved_cap_space, next) { 1058 size_t len = sizeof(struct pci_cap_saved_data) + tmp->cap.size; 1059 memcpy(cap, &tmp->cap, len); 1060 cap = (struct pci_cap_saved_data *)((u8 *)cap + len); 1061 } 1062 /* Empty cap_save terminates list */ 1063 1064 return state; 1065 } 1066 EXPORT_SYMBOL_GPL(pci_store_saved_state); 1067 1068 /** 1069 * pci_load_saved_state - Reload the provided save state into struct pci_dev. 1070 * @dev: PCI device that we're dealing with 1071 * @state: Saved state returned from pci_store_saved_state() 1072 */ 1073 int pci_load_saved_state(struct pci_dev *dev, struct pci_saved_state *state) 1074 { 1075 struct pci_cap_saved_data *cap; 1076 1077 dev->state_saved = false; 1078 1079 if (!state) 1080 return 0; 1081 1082 memcpy(dev->saved_config_space, state->config_space, 1083 sizeof(state->config_space)); 1084 1085 cap = state->cap; 1086 while (cap->size) { 1087 struct pci_cap_saved_state *tmp; 1088 1089 tmp = pci_find_saved_cap(dev, cap->cap_nr); 1090 if (!tmp || tmp->cap.size != cap->size) 1091 return -EINVAL; 1092 1093 memcpy(tmp->cap.data, cap->data, tmp->cap.size); 1094 cap = (struct pci_cap_saved_data *)((u8 *)cap + 1095 sizeof(struct pci_cap_saved_data) + cap->size); 1096 } 1097 1098 dev->state_saved = true; 1099 return 0; 1100 } 1101 EXPORT_SYMBOL_GPL(pci_load_saved_state); 1102 1103 /** 1104 * pci_load_and_free_saved_state - Reload the save state pointed to by state, 1105 * and free the memory allocated for it. 1106 * @dev: PCI device that we're dealing with 1107 * @state: Pointer to saved state returned from pci_store_saved_state() 1108 */ 1109 int pci_load_and_free_saved_state(struct pci_dev *dev, 1110 struct pci_saved_state **state) 1111 { 1112 int ret = pci_load_saved_state(dev, *state); 1113 kfree(*state); 1114 *state = NULL; 1115 return ret; 1116 } 1117 EXPORT_SYMBOL_GPL(pci_load_and_free_saved_state); 1118 1119 static int do_pci_enable_device(struct pci_dev *dev, int bars) 1120 { 1121 int err; 1122 1123 err = pci_set_power_state(dev, PCI_D0); 1124 if (err < 0 && err != -EIO) 1125 return err; 1126 err = pcibios_enable_device(dev, bars); 1127 if (err < 0) 1128 return err; 1129 pci_fixup_device(pci_fixup_enable, dev); 1130 1131 return 0; 1132 } 1133 1134 /** 1135 * pci_reenable_device - Resume abandoned device 1136 * @dev: PCI device to be resumed 1137 * 1138 * Note this function is a backend of pci_default_resume and is not supposed 1139 * to be called by normal code, write proper resume handler and use it instead. 1140 */ 1141 int pci_reenable_device(struct pci_dev *dev) 1142 { 1143 if (pci_is_enabled(dev)) 1144 return do_pci_enable_device(dev, (1 << PCI_NUM_RESOURCES) - 1); 1145 return 0; 1146 } 1147 1148 static int pci_enable_device_flags(struct pci_dev *dev, unsigned long flags) 1149 { 1150 int err; 1151 int i, bars = 0; 1152 1153 /* 1154 * Power state could be unknown at this point, either due to a fresh 1155 * boot or a device removal call. So get the current power state 1156 * so that things like MSI message writing will behave as expected 1157 * (e.g. if the device really is in D0 at enable time). 1158 */ 1159 if (dev->pm_cap) { 1160 u16 pmcsr; 1161 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr); 1162 dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK); 1163 } 1164 1165 if (atomic_inc_return(&dev->enable_cnt) > 1) 1166 return 0; /* already enabled */ 1167 1168 /* only skip sriov related */ 1169 for (i = 0; i <= PCI_ROM_RESOURCE; i++) 1170 if (dev->resource[i].flags & flags) 1171 bars |= (1 << i); 1172 for (i = PCI_BRIDGE_RESOURCES; i < DEVICE_COUNT_RESOURCE; i++) 1173 if (dev->resource[i].flags & flags) 1174 bars |= (1 << i); 1175 1176 err = do_pci_enable_device(dev, bars); 1177 if (err < 0) 1178 atomic_dec(&dev->enable_cnt); 1179 return err; 1180 } 1181 1182 /** 1183 * pci_enable_device_io - Initialize a device for use with IO space 1184 * @dev: PCI device to be initialized 1185 * 1186 * Initialize device before it's used by a driver. Ask low-level code 1187 * to enable I/O resources. Wake up the device if it was suspended. 1188 * Beware, this function can fail. 1189 */ 1190 int pci_enable_device_io(struct pci_dev *dev) 1191 { 1192 return pci_enable_device_flags(dev, IORESOURCE_IO); 1193 } 1194 1195 /** 1196 * pci_enable_device_mem - Initialize a device for use with Memory space 1197 * @dev: PCI device to be initialized 1198 * 1199 * Initialize device before it's used by a driver. Ask low-level code 1200 * to enable Memory resources. Wake up the device if it was suspended. 1201 * Beware, this function can fail. 1202 */ 1203 int pci_enable_device_mem(struct pci_dev *dev) 1204 { 1205 return pci_enable_device_flags(dev, IORESOURCE_MEM); 1206 } 1207 1208 /** 1209 * pci_enable_device - Initialize device before it's used by a driver. 1210 * @dev: PCI device to be initialized 1211 * 1212 * Initialize device before it's used by a driver. Ask low-level code 1213 * to enable I/O and memory. Wake up the device if it was suspended. 1214 * Beware, this function can fail. 1215 * 1216 * Note we don't actually enable the device many times if we call 1217 * this function repeatedly (we just increment the count). 1218 */ 1219 int pci_enable_device(struct pci_dev *dev) 1220 { 1221 return pci_enable_device_flags(dev, IORESOURCE_MEM | IORESOURCE_IO); 1222 } 1223 1224 /* 1225 * Managed PCI resources. This manages device on/off, intx/msi/msix 1226 * on/off and BAR regions. pci_dev itself records msi/msix status, so 1227 * there's no need to track it separately. pci_devres is initialized 1228 * when a device is enabled using managed PCI device enable interface. 1229 */ 1230 struct pci_devres { 1231 unsigned int enabled:1; 1232 unsigned int pinned:1; 1233 unsigned int orig_intx:1; 1234 unsigned int restore_intx:1; 1235 u32 region_mask; 1236 }; 1237 1238 static void pcim_release(struct device *gendev, void *res) 1239 { 1240 struct pci_dev *dev = container_of(gendev, struct pci_dev, dev); 1241 struct pci_devres *this = res; 1242 int i; 1243 1244 if (dev->msi_enabled) 1245 pci_disable_msi(dev); 1246 if (dev->msix_enabled) 1247 pci_disable_msix(dev); 1248 1249 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) 1250 if (this->region_mask & (1 << i)) 1251 pci_release_region(dev, i); 1252 1253 if (this->restore_intx) 1254 pci_intx(dev, this->orig_intx); 1255 1256 if (this->enabled && !this->pinned) 1257 pci_disable_device(dev); 1258 } 1259 1260 static struct pci_devres * get_pci_dr(struct pci_dev *pdev) 1261 { 1262 struct pci_devres *dr, *new_dr; 1263 1264 dr = devres_find(&pdev->dev, pcim_release, NULL, NULL); 1265 if (dr) 1266 return dr; 1267 1268 new_dr = devres_alloc(pcim_release, sizeof(*new_dr), GFP_KERNEL); 1269 if (!new_dr) 1270 return NULL; 1271 return devres_get(&pdev->dev, new_dr, NULL, NULL); 1272 } 1273 1274 static struct pci_devres * find_pci_dr(struct pci_dev *pdev) 1275 { 1276 if (pci_is_managed(pdev)) 1277 return devres_find(&pdev->dev, pcim_release, NULL, NULL); 1278 return NULL; 1279 } 1280 1281 /** 1282 * pcim_enable_device - Managed pci_enable_device() 1283 * @pdev: PCI device to be initialized 1284 * 1285 * Managed pci_enable_device(). 1286 */ 1287 int pcim_enable_device(struct pci_dev *pdev) 1288 { 1289 struct pci_devres *dr; 1290 int rc; 1291 1292 dr = get_pci_dr(pdev); 1293 if (unlikely(!dr)) 1294 return -ENOMEM; 1295 if (dr->enabled) 1296 return 0; 1297 1298 rc = pci_enable_device(pdev); 1299 if (!rc) { 1300 pdev->is_managed = 1; 1301 dr->enabled = 1; 1302 } 1303 return rc; 1304 } 1305 1306 /** 1307 * pcim_pin_device - Pin managed PCI device 1308 * @pdev: PCI device to pin 1309 * 1310 * Pin managed PCI device @pdev. Pinned device won't be disabled on 1311 * driver detach. @pdev must have been enabled with 1312 * pcim_enable_device(). 1313 */ 1314 void pcim_pin_device(struct pci_dev *pdev) 1315 { 1316 struct pci_devres *dr; 1317 1318 dr = find_pci_dr(pdev); 1319 WARN_ON(!dr || !dr->enabled); 1320 if (dr) 1321 dr->pinned = 1; 1322 } 1323 1324 /* 1325 * pcibios_add_device - provide arch specific hooks when adding device dev 1326 * @dev: the PCI device being added 1327 * 1328 * Permits the platform to provide architecture specific functionality when 1329 * devices are added. This is the default implementation. Architecture 1330 * implementations can override this. 1331 */ 1332 int __weak pcibios_add_device (struct pci_dev *dev) 1333 { 1334 return 0; 1335 } 1336 1337 /** 1338 * pcibios_disable_device - disable arch specific PCI resources for device dev 1339 * @dev: the PCI device to disable 1340 * 1341 * Disables architecture specific PCI resources for the device. This 1342 * is the default implementation. Architecture implementations can 1343 * override this. 1344 */ 1345 void __weak pcibios_disable_device (struct pci_dev *dev) {} 1346 1347 static void do_pci_disable_device(struct pci_dev *dev) 1348 { 1349 u16 pci_command; 1350 1351 pci_read_config_word(dev, PCI_COMMAND, &pci_command); 1352 if (pci_command & PCI_COMMAND_MASTER) { 1353 pci_command &= ~PCI_COMMAND_MASTER; 1354 pci_write_config_word(dev, PCI_COMMAND, pci_command); 1355 } 1356 1357 pcibios_disable_device(dev); 1358 } 1359 1360 /** 1361 * pci_disable_enabled_device - Disable device without updating enable_cnt 1362 * @dev: PCI device to disable 1363 * 1364 * NOTE: This function is a backend of PCI power management routines and is 1365 * not supposed to be called drivers. 1366 */ 1367 void pci_disable_enabled_device(struct pci_dev *dev) 1368 { 1369 if (pci_is_enabled(dev)) 1370 do_pci_disable_device(dev); 1371 } 1372 1373 /** 1374 * pci_disable_device - Disable PCI device after use 1375 * @dev: PCI device to be disabled 1376 * 1377 * Signal to the system that the PCI device is not in use by the system 1378 * anymore. This only involves disabling PCI bus-mastering, if active. 1379 * 1380 * Note we don't actually disable the device until all callers of 1381 * pci_enable_device() have called pci_disable_device(). 1382 */ 1383 void 1384 pci_disable_device(struct pci_dev *dev) 1385 { 1386 struct pci_devres *dr; 1387 1388 dr = find_pci_dr(dev); 1389 if (dr) 1390 dr->enabled = 0; 1391 1392 dev_WARN_ONCE(&dev->dev, atomic_read(&dev->enable_cnt) <= 0, 1393 "disabling already-disabled device"); 1394 1395 if (atomic_dec_return(&dev->enable_cnt) != 0) 1396 return; 1397 1398 do_pci_disable_device(dev); 1399 1400 dev->is_busmaster = 0; 1401 } 1402 1403 /** 1404 * pcibios_set_pcie_reset_state - set reset state for device dev 1405 * @dev: the PCIe device reset 1406 * @state: Reset state to enter into 1407 * 1408 * 1409 * Sets the PCIe reset state for the device. This is the default 1410 * implementation. Architecture implementations can override this. 1411 */ 1412 int __weak pcibios_set_pcie_reset_state(struct pci_dev *dev, 1413 enum pcie_reset_state state) 1414 { 1415 return -EINVAL; 1416 } 1417 1418 /** 1419 * pci_set_pcie_reset_state - set reset state for device dev 1420 * @dev: the PCIe device reset 1421 * @state: Reset state to enter into 1422 * 1423 * 1424 * Sets the PCI reset state for the device. 1425 */ 1426 int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state) 1427 { 1428 return pcibios_set_pcie_reset_state(dev, state); 1429 } 1430 1431 /** 1432 * pci_check_pme_status - Check if given device has generated PME. 1433 * @dev: Device to check. 1434 * 1435 * Check the PME status of the device and if set, clear it and clear PME enable 1436 * (if set). Return 'true' if PME status and PME enable were both set or 1437 * 'false' otherwise. 1438 */ 1439 bool pci_check_pme_status(struct pci_dev *dev) 1440 { 1441 int pmcsr_pos; 1442 u16 pmcsr; 1443 bool ret = false; 1444 1445 if (!dev->pm_cap) 1446 return false; 1447 1448 pmcsr_pos = dev->pm_cap + PCI_PM_CTRL; 1449 pci_read_config_word(dev, pmcsr_pos, &pmcsr); 1450 if (!(pmcsr & PCI_PM_CTRL_PME_STATUS)) 1451 return false; 1452 1453 /* Clear PME status. */ 1454 pmcsr |= PCI_PM_CTRL_PME_STATUS; 1455 if (pmcsr & PCI_PM_CTRL_PME_ENABLE) { 1456 /* Disable PME to avoid interrupt flood. */ 1457 pmcsr &= ~PCI_PM_CTRL_PME_ENABLE; 1458 ret = true; 1459 } 1460 1461 pci_write_config_word(dev, pmcsr_pos, pmcsr); 1462 1463 return ret; 1464 } 1465 1466 /** 1467 * pci_pme_wakeup - Wake up a PCI device if its PME Status bit is set. 1468 * @dev: Device to handle. 1469 * @pme_poll_reset: Whether or not to reset the device's pme_poll flag. 1470 * 1471 * Check if @dev has generated PME and queue a resume request for it in that 1472 * case. 1473 */ 1474 static int pci_pme_wakeup(struct pci_dev *dev, void *pme_poll_reset) 1475 { 1476 if (pme_poll_reset && dev->pme_poll) 1477 dev->pme_poll = false; 1478 1479 if (pci_check_pme_status(dev)) { 1480 pci_wakeup_event(dev); 1481 pm_request_resume(&dev->dev); 1482 } 1483 return 0; 1484 } 1485 1486 /** 1487 * pci_pme_wakeup_bus - Walk given bus and wake up devices on it, if necessary. 1488 * @bus: Top bus of the subtree to walk. 1489 */ 1490 void pci_pme_wakeup_bus(struct pci_bus *bus) 1491 { 1492 if (bus) 1493 pci_walk_bus(bus, pci_pme_wakeup, (void *)true); 1494 } 1495 1496 /** 1497 * pci_wakeup - Wake up a PCI device 1498 * @pci_dev: Device to handle. 1499 * @ign: ignored parameter 1500 */ 1501 static int pci_wakeup(struct pci_dev *pci_dev, void *ign) 1502 { 1503 pci_wakeup_event(pci_dev); 1504 pm_request_resume(&pci_dev->dev); 1505 return 0; 1506 } 1507 1508 /** 1509 * pci_wakeup_bus - Walk given bus and wake up devices on it 1510 * @bus: Top bus of the subtree to walk. 1511 */ 1512 void pci_wakeup_bus(struct pci_bus *bus) 1513 { 1514 if (bus) 1515 pci_walk_bus(bus, pci_wakeup, NULL); 1516 } 1517 1518 /** 1519 * pci_pme_capable - check the capability of PCI device to generate PME# 1520 * @dev: PCI device to handle. 1521 * @state: PCI state from which device will issue PME#. 1522 */ 1523 bool pci_pme_capable(struct pci_dev *dev, pci_power_t state) 1524 { 1525 if (!dev->pm_cap) 1526 return false; 1527 1528 return !!(dev->pme_support & (1 << state)); 1529 } 1530 1531 static void pci_pme_list_scan(struct work_struct *work) 1532 { 1533 struct pci_pme_device *pme_dev, *n; 1534 1535 mutex_lock(&pci_pme_list_mutex); 1536 if (!list_empty(&pci_pme_list)) { 1537 list_for_each_entry_safe(pme_dev, n, &pci_pme_list, list) { 1538 if (pme_dev->dev->pme_poll) { 1539 struct pci_dev *bridge; 1540 1541 bridge = pme_dev->dev->bus->self; 1542 /* 1543 * If bridge is in low power state, the 1544 * configuration space of subordinate devices 1545 * may be not accessible 1546 */ 1547 if (bridge && bridge->current_state != PCI_D0) 1548 continue; 1549 pci_pme_wakeup(pme_dev->dev, NULL); 1550 } else { 1551 list_del(&pme_dev->list); 1552 kfree(pme_dev); 1553 } 1554 } 1555 if (!list_empty(&pci_pme_list)) 1556 schedule_delayed_work(&pci_pme_work, 1557 msecs_to_jiffies(PME_TIMEOUT)); 1558 } 1559 mutex_unlock(&pci_pme_list_mutex); 1560 } 1561 1562 /** 1563 * pci_pme_active - enable or disable PCI device's PME# function 1564 * @dev: PCI device to handle. 1565 * @enable: 'true' to enable PME# generation; 'false' to disable it. 1566 * 1567 * The caller must verify that the device is capable of generating PME# before 1568 * calling this function with @enable equal to 'true'. 1569 */ 1570 void pci_pme_active(struct pci_dev *dev, bool enable) 1571 { 1572 u16 pmcsr; 1573 1574 if (!dev->pme_support) 1575 return; 1576 1577 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr); 1578 /* Clear PME_Status by writing 1 to it and enable PME# */ 1579 pmcsr |= PCI_PM_CTRL_PME_STATUS | PCI_PM_CTRL_PME_ENABLE; 1580 if (!enable) 1581 pmcsr &= ~PCI_PM_CTRL_PME_ENABLE; 1582 1583 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr); 1584 1585 /* 1586 * PCI (as opposed to PCIe) PME requires that the device have 1587 * its PME# line hooked up correctly. Not all hardware vendors 1588 * do this, so the PME never gets delivered and the device 1589 * remains asleep. The easiest way around this is to 1590 * periodically walk the list of suspended devices and check 1591 * whether any have their PME flag set. The assumption is that 1592 * we'll wake up often enough anyway that this won't be a huge 1593 * hit, and the power savings from the devices will still be a 1594 * win. 1595 * 1596 * Although PCIe uses in-band PME message instead of PME# line 1597 * to report PME, PME does not work for some PCIe devices in 1598 * reality. For example, there are devices that set their PME 1599 * status bits, but don't really bother to send a PME message; 1600 * there are PCI Express Root Ports that don't bother to 1601 * trigger interrupts when they receive PME messages from the 1602 * devices below. So PME poll is used for PCIe devices too. 1603 */ 1604 1605 if (dev->pme_poll) { 1606 struct pci_pme_device *pme_dev; 1607 if (enable) { 1608 pme_dev = kmalloc(sizeof(struct pci_pme_device), 1609 GFP_KERNEL); 1610 if (!pme_dev) 1611 goto out; 1612 pme_dev->dev = dev; 1613 mutex_lock(&pci_pme_list_mutex); 1614 list_add(&pme_dev->list, &pci_pme_list); 1615 if (list_is_singular(&pci_pme_list)) 1616 schedule_delayed_work(&pci_pme_work, 1617 msecs_to_jiffies(PME_TIMEOUT)); 1618 mutex_unlock(&pci_pme_list_mutex); 1619 } else { 1620 mutex_lock(&pci_pme_list_mutex); 1621 list_for_each_entry(pme_dev, &pci_pme_list, list) { 1622 if (pme_dev->dev == dev) { 1623 list_del(&pme_dev->list); 1624 kfree(pme_dev); 1625 break; 1626 } 1627 } 1628 mutex_unlock(&pci_pme_list_mutex); 1629 } 1630 } 1631 1632 out: 1633 dev_dbg(&dev->dev, "PME# %s\n", enable ? "enabled" : "disabled"); 1634 } 1635 1636 /** 1637 * __pci_enable_wake - enable PCI device as wakeup event source 1638 * @dev: PCI device affected 1639 * @state: PCI state from which device will issue wakeup events 1640 * @runtime: True if the events are to be generated at run time 1641 * @enable: True to enable event generation; false to disable 1642 * 1643 * This enables the device as a wakeup event source, or disables it. 1644 * When such events involves platform-specific hooks, those hooks are 1645 * called automatically by this routine. 1646 * 1647 * Devices with legacy power management (no standard PCI PM capabilities) 1648 * always require such platform hooks. 1649 * 1650 * RETURN VALUE: 1651 * 0 is returned on success 1652 * -EINVAL is returned if device is not supposed to wake up the system 1653 * Error code depending on the platform is returned if both the platform and 1654 * the native mechanism fail to enable the generation of wake-up events 1655 */ 1656 int __pci_enable_wake(struct pci_dev *dev, pci_power_t state, 1657 bool runtime, bool enable) 1658 { 1659 int ret = 0; 1660 1661 if (enable && !runtime && !device_may_wakeup(&dev->dev)) 1662 return -EINVAL; 1663 1664 /* Don't do the same thing twice in a row for one device. */ 1665 if (!!enable == !!dev->wakeup_prepared) 1666 return 0; 1667 1668 /* 1669 * According to "PCI System Architecture" 4th ed. by Tom Shanley & Don 1670 * Anderson we should be doing PME# wake enable followed by ACPI wake 1671 * enable. To disable wake-up we call the platform first, for symmetry. 1672 */ 1673 1674 if (enable) { 1675 int error; 1676 1677 if (pci_pme_capable(dev, state)) 1678 pci_pme_active(dev, true); 1679 else 1680 ret = 1; 1681 error = runtime ? platform_pci_run_wake(dev, true) : 1682 platform_pci_sleep_wake(dev, true); 1683 if (ret) 1684 ret = error; 1685 if (!ret) 1686 dev->wakeup_prepared = true; 1687 } else { 1688 if (runtime) 1689 platform_pci_run_wake(dev, false); 1690 else 1691 platform_pci_sleep_wake(dev, false); 1692 pci_pme_active(dev, false); 1693 dev->wakeup_prepared = false; 1694 } 1695 1696 return ret; 1697 } 1698 EXPORT_SYMBOL(__pci_enable_wake); 1699 1700 /** 1701 * pci_wake_from_d3 - enable/disable device to wake up from D3_hot or D3_cold 1702 * @dev: PCI device to prepare 1703 * @enable: True to enable wake-up event generation; false to disable 1704 * 1705 * Many drivers want the device to wake up the system from D3_hot or D3_cold 1706 * and this function allows them to set that up cleanly - pci_enable_wake() 1707 * should not be called twice in a row to enable wake-up due to PCI PM vs ACPI 1708 * ordering constraints. 1709 * 1710 * This function only returns error code if the device is not capable of 1711 * generating PME# from both D3_hot and D3_cold, and the platform is unable to 1712 * enable wake-up power for it. 1713 */ 1714 int pci_wake_from_d3(struct pci_dev *dev, bool enable) 1715 { 1716 return pci_pme_capable(dev, PCI_D3cold) ? 1717 pci_enable_wake(dev, PCI_D3cold, enable) : 1718 pci_enable_wake(dev, PCI_D3hot, enable); 1719 } 1720 1721 /** 1722 * pci_target_state - find an appropriate low power state for a given PCI dev 1723 * @dev: PCI device 1724 * 1725 * Use underlying platform code to find a supported low power state for @dev. 1726 * If the platform can't manage @dev, return the deepest state from which it 1727 * can generate wake events, based on any available PME info. 1728 */ 1729 pci_power_t pci_target_state(struct pci_dev *dev) 1730 { 1731 pci_power_t target_state = PCI_D3hot; 1732 1733 if (platform_pci_power_manageable(dev)) { 1734 /* 1735 * Call the platform to choose the target state of the device 1736 * and enable wake-up from this state if supported. 1737 */ 1738 pci_power_t state = platform_pci_choose_state(dev); 1739 1740 switch (state) { 1741 case PCI_POWER_ERROR: 1742 case PCI_UNKNOWN: 1743 break; 1744 case PCI_D1: 1745 case PCI_D2: 1746 if (pci_no_d1d2(dev)) 1747 break; 1748 default: 1749 target_state = state; 1750 } 1751 } else if (!dev->pm_cap) { 1752 target_state = PCI_D0; 1753 } else if (device_may_wakeup(&dev->dev)) { 1754 /* 1755 * Find the deepest state from which the device can generate 1756 * wake-up events, make it the target state and enable device 1757 * to generate PME#. 1758 */ 1759 if (dev->pme_support) { 1760 while (target_state 1761 && !(dev->pme_support & (1 << target_state))) 1762 target_state--; 1763 } 1764 } 1765 1766 return target_state; 1767 } 1768 1769 /** 1770 * pci_prepare_to_sleep - prepare PCI device for system-wide transition into a sleep state 1771 * @dev: Device to handle. 1772 * 1773 * Choose the power state appropriate for the device depending on whether 1774 * it can wake up the system and/or is power manageable by the platform 1775 * (PCI_D3hot is the default) and put the device into that state. 1776 */ 1777 int pci_prepare_to_sleep(struct pci_dev *dev) 1778 { 1779 pci_power_t target_state = pci_target_state(dev); 1780 int error; 1781 1782 if (target_state == PCI_POWER_ERROR) 1783 return -EIO; 1784 1785 /* D3cold during system suspend/hibernate is not supported */ 1786 if (target_state > PCI_D3hot) 1787 target_state = PCI_D3hot; 1788 1789 pci_enable_wake(dev, target_state, device_may_wakeup(&dev->dev)); 1790 1791 error = pci_set_power_state(dev, target_state); 1792 1793 if (error) 1794 pci_enable_wake(dev, target_state, false); 1795 1796 return error; 1797 } 1798 1799 /** 1800 * pci_back_from_sleep - turn PCI device on during system-wide transition into working state 1801 * @dev: Device to handle. 1802 * 1803 * Disable device's system wake-up capability and put it into D0. 1804 */ 1805 int pci_back_from_sleep(struct pci_dev *dev) 1806 { 1807 pci_enable_wake(dev, PCI_D0, false); 1808 return pci_set_power_state(dev, PCI_D0); 1809 } 1810 1811 /** 1812 * pci_finish_runtime_suspend - Carry out PCI-specific part of runtime suspend. 1813 * @dev: PCI device being suspended. 1814 * 1815 * Prepare @dev to generate wake-up events at run time and put it into a low 1816 * power state. 1817 */ 1818 int pci_finish_runtime_suspend(struct pci_dev *dev) 1819 { 1820 pci_power_t target_state = pci_target_state(dev); 1821 int error; 1822 1823 if (target_state == PCI_POWER_ERROR) 1824 return -EIO; 1825 1826 dev->runtime_d3cold = target_state == PCI_D3cold; 1827 1828 __pci_enable_wake(dev, target_state, true, pci_dev_run_wake(dev)); 1829 1830 error = pci_set_power_state(dev, target_state); 1831 1832 if (error) { 1833 __pci_enable_wake(dev, target_state, true, false); 1834 dev->runtime_d3cold = false; 1835 } 1836 1837 return error; 1838 } 1839 1840 /** 1841 * pci_dev_run_wake - Check if device can generate run-time wake-up events. 1842 * @dev: Device to check. 1843 * 1844 * Return true if the device itself is cabable of generating wake-up events 1845 * (through the platform or using the native PCIe PME) or if the device supports 1846 * PME and one of its upstream bridges can generate wake-up events. 1847 */ 1848 bool pci_dev_run_wake(struct pci_dev *dev) 1849 { 1850 struct pci_bus *bus = dev->bus; 1851 1852 if (device_run_wake(&dev->dev)) 1853 return true; 1854 1855 if (!dev->pme_support) 1856 return false; 1857 1858 while (bus->parent) { 1859 struct pci_dev *bridge = bus->self; 1860 1861 if (device_run_wake(&bridge->dev)) 1862 return true; 1863 1864 bus = bus->parent; 1865 } 1866 1867 /* We have reached the root bus. */ 1868 if (bus->bridge) 1869 return device_run_wake(bus->bridge); 1870 1871 return false; 1872 } 1873 EXPORT_SYMBOL_GPL(pci_dev_run_wake); 1874 1875 void pci_config_pm_runtime_get(struct pci_dev *pdev) 1876 { 1877 struct device *dev = &pdev->dev; 1878 struct device *parent = dev->parent; 1879 1880 if (parent) 1881 pm_runtime_get_sync(parent); 1882 pm_runtime_get_noresume(dev); 1883 /* 1884 * pdev->current_state is set to PCI_D3cold during suspending, 1885 * so wait until suspending completes 1886 */ 1887 pm_runtime_barrier(dev); 1888 /* 1889 * Only need to resume devices in D3cold, because config 1890 * registers are still accessible for devices suspended but 1891 * not in D3cold. 1892 */ 1893 if (pdev->current_state == PCI_D3cold) 1894 pm_runtime_resume(dev); 1895 } 1896 1897 void pci_config_pm_runtime_put(struct pci_dev *pdev) 1898 { 1899 struct device *dev = &pdev->dev; 1900 struct device *parent = dev->parent; 1901 1902 pm_runtime_put(dev); 1903 if (parent) 1904 pm_runtime_put_sync(parent); 1905 } 1906 1907 /** 1908 * pci_pm_init - Initialize PM functions of given PCI device 1909 * @dev: PCI device to handle. 1910 */ 1911 void pci_pm_init(struct pci_dev *dev) 1912 { 1913 int pm; 1914 u16 pmc; 1915 1916 pm_runtime_forbid(&dev->dev); 1917 pm_runtime_set_active(&dev->dev); 1918 pm_runtime_enable(&dev->dev); 1919 device_enable_async_suspend(&dev->dev); 1920 dev->wakeup_prepared = false; 1921 1922 dev->pm_cap = 0; 1923 dev->pme_support = 0; 1924 1925 /* find PCI PM capability in list */ 1926 pm = pci_find_capability(dev, PCI_CAP_ID_PM); 1927 if (!pm) 1928 return; 1929 /* Check device's ability to generate PME# */ 1930 pci_read_config_word(dev, pm + PCI_PM_PMC, &pmc); 1931 1932 if ((pmc & PCI_PM_CAP_VER_MASK) > 3) { 1933 dev_err(&dev->dev, "unsupported PM cap regs version (%u)\n", 1934 pmc & PCI_PM_CAP_VER_MASK); 1935 return; 1936 } 1937 1938 dev->pm_cap = pm; 1939 dev->d3_delay = PCI_PM_D3_WAIT; 1940 dev->d3cold_delay = PCI_PM_D3COLD_WAIT; 1941 dev->d3cold_allowed = true; 1942 1943 dev->d1_support = false; 1944 dev->d2_support = false; 1945 if (!pci_no_d1d2(dev)) { 1946 if (pmc & PCI_PM_CAP_D1) 1947 dev->d1_support = true; 1948 if (pmc & PCI_PM_CAP_D2) 1949 dev->d2_support = true; 1950 1951 if (dev->d1_support || dev->d2_support) 1952 dev_printk(KERN_DEBUG, &dev->dev, "supports%s%s\n", 1953 dev->d1_support ? " D1" : "", 1954 dev->d2_support ? " D2" : ""); 1955 } 1956 1957 pmc &= PCI_PM_CAP_PME_MASK; 1958 if (pmc) { 1959 dev_printk(KERN_DEBUG, &dev->dev, 1960 "PME# supported from%s%s%s%s%s\n", 1961 (pmc & PCI_PM_CAP_PME_D0) ? " D0" : "", 1962 (pmc & PCI_PM_CAP_PME_D1) ? " D1" : "", 1963 (pmc & PCI_PM_CAP_PME_D2) ? " D2" : "", 1964 (pmc & PCI_PM_CAP_PME_D3) ? " D3hot" : "", 1965 (pmc & PCI_PM_CAP_PME_D3cold) ? " D3cold" : ""); 1966 dev->pme_support = pmc >> PCI_PM_CAP_PME_SHIFT; 1967 dev->pme_poll = true; 1968 /* 1969 * Make device's PM flags reflect the wake-up capability, but 1970 * let the user space enable it to wake up the system as needed. 1971 */ 1972 device_set_wakeup_capable(&dev->dev, true); 1973 /* Disable the PME# generation functionality */ 1974 pci_pme_active(dev, false); 1975 } 1976 } 1977 1978 static void pci_add_saved_cap(struct pci_dev *pci_dev, 1979 struct pci_cap_saved_state *new_cap) 1980 { 1981 hlist_add_head(&new_cap->next, &pci_dev->saved_cap_space); 1982 } 1983 1984 /** 1985 * pci_add_save_buffer - allocate buffer for saving given capability registers 1986 * @dev: the PCI device 1987 * @cap: the capability to allocate the buffer for 1988 * @size: requested size of the buffer 1989 */ 1990 static int pci_add_cap_save_buffer( 1991 struct pci_dev *dev, char cap, unsigned int size) 1992 { 1993 int pos; 1994 struct pci_cap_saved_state *save_state; 1995 1996 pos = pci_find_capability(dev, cap); 1997 if (pos <= 0) 1998 return 0; 1999 2000 save_state = kzalloc(sizeof(*save_state) + size, GFP_KERNEL); 2001 if (!save_state) 2002 return -ENOMEM; 2003 2004 save_state->cap.cap_nr = cap; 2005 save_state->cap.size = size; 2006 pci_add_saved_cap(dev, save_state); 2007 2008 return 0; 2009 } 2010 2011 /** 2012 * pci_allocate_cap_save_buffers - allocate buffers for saving capabilities 2013 * @dev: the PCI device 2014 */ 2015 void pci_allocate_cap_save_buffers(struct pci_dev *dev) 2016 { 2017 int error; 2018 2019 error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_EXP, 2020 PCI_EXP_SAVE_REGS * sizeof(u16)); 2021 if (error) 2022 dev_err(&dev->dev, 2023 "unable to preallocate PCI Express save buffer\n"); 2024 2025 error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_PCIX, sizeof(u16)); 2026 if (error) 2027 dev_err(&dev->dev, 2028 "unable to preallocate PCI-X save buffer\n"); 2029 } 2030 2031 void pci_free_cap_save_buffers(struct pci_dev *dev) 2032 { 2033 struct pci_cap_saved_state *tmp; 2034 struct hlist_node *n; 2035 2036 hlist_for_each_entry_safe(tmp, n, &dev->saved_cap_space, next) 2037 kfree(tmp); 2038 } 2039 2040 /** 2041 * pci_configure_ari - enable or disable ARI forwarding 2042 * @dev: the PCI device 2043 * 2044 * If @dev and its upstream bridge both support ARI, enable ARI in the 2045 * bridge. Otherwise, disable ARI in the bridge. 2046 */ 2047 void pci_configure_ari(struct pci_dev *dev) 2048 { 2049 u32 cap; 2050 struct pci_dev *bridge; 2051 2052 if (pcie_ari_disabled || !pci_is_pcie(dev) || dev->devfn) 2053 return; 2054 2055 bridge = dev->bus->self; 2056 if (!bridge) 2057 return; 2058 2059 pcie_capability_read_dword(bridge, PCI_EXP_DEVCAP2, &cap); 2060 if (!(cap & PCI_EXP_DEVCAP2_ARI)) 2061 return; 2062 2063 if (pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI)) { 2064 pcie_capability_set_word(bridge, PCI_EXP_DEVCTL2, 2065 PCI_EXP_DEVCTL2_ARI); 2066 bridge->ari_enabled = 1; 2067 } else { 2068 pcie_capability_clear_word(bridge, PCI_EXP_DEVCTL2, 2069 PCI_EXP_DEVCTL2_ARI); 2070 bridge->ari_enabled = 0; 2071 } 2072 } 2073 2074 /** 2075 * pci_enable_ido - enable ID-based Ordering on a device 2076 * @dev: the PCI device 2077 * @type: which types of IDO to enable 2078 * 2079 * Enable ID-based ordering on @dev. @type can contain the bits 2080 * %PCI_EXP_IDO_REQUEST and/or %PCI_EXP_IDO_COMPLETION to indicate 2081 * which types of transactions are allowed to be re-ordered. 2082 */ 2083 void pci_enable_ido(struct pci_dev *dev, unsigned long type) 2084 { 2085 u16 ctrl = 0; 2086 2087 if (type & PCI_EXP_IDO_REQUEST) 2088 ctrl |= PCI_EXP_IDO_REQ_EN; 2089 if (type & PCI_EXP_IDO_COMPLETION) 2090 ctrl |= PCI_EXP_IDO_CMP_EN; 2091 if (ctrl) 2092 pcie_capability_set_word(dev, PCI_EXP_DEVCTL2, ctrl); 2093 } 2094 EXPORT_SYMBOL(pci_enable_ido); 2095 2096 /** 2097 * pci_disable_ido - disable ID-based ordering on a device 2098 * @dev: the PCI device 2099 * @type: which types of IDO to disable 2100 */ 2101 void pci_disable_ido(struct pci_dev *dev, unsigned long type) 2102 { 2103 u16 ctrl = 0; 2104 2105 if (type & PCI_EXP_IDO_REQUEST) 2106 ctrl |= PCI_EXP_IDO_REQ_EN; 2107 if (type & PCI_EXP_IDO_COMPLETION) 2108 ctrl |= PCI_EXP_IDO_CMP_EN; 2109 if (ctrl) 2110 pcie_capability_clear_word(dev, PCI_EXP_DEVCTL2, ctrl); 2111 } 2112 EXPORT_SYMBOL(pci_disable_ido); 2113 2114 /** 2115 * pci_enable_obff - enable optimized buffer flush/fill 2116 * @dev: PCI device 2117 * @type: type of signaling to use 2118 * 2119 * Try to enable @type OBFF signaling on @dev. It will try using WAKE# 2120 * signaling if possible, falling back to message signaling only if 2121 * WAKE# isn't supported. @type should indicate whether the PCIe link 2122 * be brought out of L0s or L1 to send the message. It should be either 2123 * %PCI_EXP_OBFF_SIGNAL_ALWAYS or %PCI_OBFF_SIGNAL_L0. 2124 * 2125 * If your device can benefit from receiving all messages, even at the 2126 * power cost of bringing the link back up from a low power state, use 2127 * %PCI_EXP_OBFF_SIGNAL_ALWAYS. Otherwise, use %PCI_OBFF_SIGNAL_L0 (the 2128 * preferred type). 2129 * 2130 * RETURNS: 2131 * Zero on success, appropriate error number on failure. 2132 */ 2133 int pci_enable_obff(struct pci_dev *dev, enum pci_obff_signal_type type) 2134 { 2135 u32 cap; 2136 u16 ctrl; 2137 int ret; 2138 2139 pcie_capability_read_dword(dev, PCI_EXP_DEVCAP2, &cap); 2140 if (!(cap & PCI_EXP_OBFF_MASK)) 2141 return -ENOTSUPP; /* no OBFF support at all */ 2142 2143 /* Make sure the topology supports OBFF as well */ 2144 if (dev->bus->self) { 2145 ret = pci_enable_obff(dev->bus->self, type); 2146 if (ret) 2147 return ret; 2148 } 2149 2150 pcie_capability_read_word(dev, PCI_EXP_DEVCTL2, &ctrl); 2151 if (cap & PCI_EXP_OBFF_WAKE) 2152 ctrl |= PCI_EXP_OBFF_WAKE_EN; 2153 else { 2154 switch (type) { 2155 case PCI_EXP_OBFF_SIGNAL_L0: 2156 if (!(ctrl & PCI_EXP_OBFF_WAKE_EN)) 2157 ctrl |= PCI_EXP_OBFF_MSGA_EN; 2158 break; 2159 case PCI_EXP_OBFF_SIGNAL_ALWAYS: 2160 ctrl &= ~PCI_EXP_OBFF_WAKE_EN; 2161 ctrl |= PCI_EXP_OBFF_MSGB_EN; 2162 break; 2163 default: 2164 WARN(1, "bad OBFF signal type\n"); 2165 return -ENOTSUPP; 2166 } 2167 } 2168 pcie_capability_write_word(dev, PCI_EXP_DEVCTL2, ctrl); 2169 2170 return 0; 2171 } 2172 EXPORT_SYMBOL(pci_enable_obff); 2173 2174 /** 2175 * pci_disable_obff - disable optimized buffer flush/fill 2176 * @dev: PCI device 2177 * 2178 * Disable OBFF on @dev. 2179 */ 2180 void pci_disable_obff(struct pci_dev *dev) 2181 { 2182 pcie_capability_clear_word(dev, PCI_EXP_DEVCTL2, PCI_EXP_OBFF_WAKE_EN); 2183 } 2184 EXPORT_SYMBOL(pci_disable_obff); 2185 2186 /** 2187 * pci_ltr_supported - check whether a device supports LTR 2188 * @dev: PCI device 2189 * 2190 * RETURNS: 2191 * True if @dev supports latency tolerance reporting, false otherwise. 2192 */ 2193 static bool pci_ltr_supported(struct pci_dev *dev) 2194 { 2195 u32 cap; 2196 2197 pcie_capability_read_dword(dev, PCI_EXP_DEVCAP2, &cap); 2198 2199 return cap & PCI_EXP_DEVCAP2_LTR; 2200 } 2201 2202 /** 2203 * pci_enable_ltr - enable latency tolerance reporting 2204 * @dev: PCI device 2205 * 2206 * Enable LTR on @dev if possible, which means enabling it first on 2207 * upstream ports. 2208 * 2209 * RETURNS: 2210 * Zero on success, errno on failure. 2211 */ 2212 int pci_enable_ltr(struct pci_dev *dev) 2213 { 2214 int ret; 2215 2216 /* Only primary function can enable/disable LTR */ 2217 if (PCI_FUNC(dev->devfn) != 0) 2218 return -EINVAL; 2219 2220 if (!pci_ltr_supported(dev)) 2221 return -ENOTSUPP; 2222 2223 /* Enable upstream ports first */ 2224 if (dev->bus->self) { 2225 ret = pci_enable_ltr(dev->bus->self); 2226 if (ret) 2227 return ret; 2228 } 2229 2230 return pcie_capability_set_word(dev, PCI_EXP_DEVCTL2, PCI_EXP_LTR_EN); 2231 } 2232 EXPORT_SYMBOL(pci_enable_ltr); 2233 2234 /** 2235 * pci_disable_ltr - disable latency tolerance reporting 2236 * @dev: PCI device 2237 */ 2238 void pci_disable_ltr(struct pci_dev *dev) 2239 { 2240 /* Only primary function can enable/disable LTR */ 2241 if (PCI_FUNC(dev->devfn) != 0) 2242 return; 2243 2244 if (!pci_ltr_supported(dev)) 2245 return; 2246 2247 pcie_capability_clear_word(dev, PCI_EXP_DEVCTL2, PCI_EXP_LTR_EN); 2248 } 2249 EXPORT_SYMBOL(pci_disable_ltr); 2250 2251 static int __pci_ltr_scale(int *val) 2252 { 2253 int scale = 0; 2254 2255 while (*val > 1023) { 2256 *val = (*val + 31) / 32; 2257 scale++; 2258 } 2259 return scale; 2260 } 2261 2262 /** 2263 * pci_set_ltr - set LTR latency values 2264 * @dev: PCI device 2265 * @snoop_lat_ns: snoop latency in nanoseconds 2266 * @nosnoop_lat_ns: nosnoop latency in nanoseconds 2267 * 2268 * Figure out the scale and set the LTR values accordingly. 2269 */ 2270 int pci_set_ltr(struct pci_dev *dev, int snoop_lat_ns, int nosnoop_lat_ns) 2271 { 2272 int pos, ret, snoop_scale, nosnoop_scale; 2273 u16 val; 2274 2275 if (!pci_ltr_supported(dev)) 2276 return -ENOTSUPP; 2277 2278 snoop_scale = __pci_ltr_scale(&snoop_lat_ns); 2279 nosnoop_scale = __pci_ltr_scale(&nosnoop_lat_ns); 2280 2281 if (snoop_lat_ns > PCI_LTR_VALUE_MASK || 2282 nosnoop_lat_ns > PCI_LTR_VALUE_MASK) 2283 return -EINVAL; 2284 2285 if ((snoop_scale > (PCI_LTR_SCALE_MASK >> PCI_LTR_SCALE_SHIFT)) || 2286 (nosnoop_scale > (PCI_LTR_SCALE_MASK >> PCI_LTR_SCALE_SHIFT))) 2287 return -EINVAL; 2288 2289 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_LTR); 2290 if (!pos) 2291 return -ENOTSUPP; 2292 2293 val = (snoop_scale << PCI_LTR_SCALE_SHIFT) | snoop_lat_ns; 2294 ret = pci_write_config_word(dev, pos + PCI_LTR_MAX_SNOOP_LAT, val); 2295 if (ret != 4) 2296 return -EIO; 2297 2298 val = (nosnoop_scale << PCI_LTR_SCALE_SHIFT) | nosnoop_lat_ns; 2299 ret = pci_write_config_word(dev, pos + PCI_LTR_MAX_NOSNOOP_LAT, val); 2300 if (ret != 4) 2301 return -EIO; 2302 2303 return 0; 2304 } 2305 EXPORT_SYMBOL(pci_set_ltr); 2306 2307 static int pci_acs_enable; 2308 2309 /** 2310 * pci_request_acs - ask for ACS to be enabled if supported 2311 */ 2312 void pci_request_acs(void) 2313 { 2314 pci_acs_enable = 1; 2315 } 2316 2317 /** 2318 * pci_enable_acs - enable ACS if hardware support it 2319 * @dev: the PCI device 2320 */ 2321 void pci_enable_acs(struct pci_dev *dev) 2322 { 2323 int pos; 2324 u16 cap; 2325 u16 ctrl; 2326 2327 if (!pci_acs_enable) 2328 return; 2329 2330 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS); 2331 if (!pos) 2332 return; 2333 2334 pci_read_config_word(dev, pos + PCI_ACS_CAP, &cap); 2335 pci_read_config_word(dev, pos + PCI_ACS_CTRL, &ctrl); 2336 2337 /* Source Validation */ 2338 ctrl |= (cap & PCI_ACS_SV); 2339 2340 /* P2P Request Redirect */ 2341 ctrl |= (cap & PCI_ACS_RR); 2342 2343 /* P2P Completion Redirect */ 2344 ctrl |= (cap & PCI_ACS_CR); 2345 2346 /* Upstream Forwarding */ 2347 ctrl |= (cap & PCI_ACS_UF); 2348 2349 pci_write_config_word(dev, pos + PCI_ACS_CTRL, ctrl); 2350 } 2351 2352 /** 2353 * pci_acs_enabled - test ACS against required flags for a given device 2354 * @pdev: device to test 2355 * @acs_flags: required PCI ACS flags 2356 * 2357 * Return true if the device supports the provided flags. Automatically 2358 * filters out flags that are not implemented on multifunction devices. 2359 */ 2360 bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags) 2361 { 2362 int pos, ret; 2363 u16 ctrl; 2364 2365 ret = pci_dev_specific_acs_enabled(pdev, acs_flags); 2366 if (ret >= 0) 2367 return ret > 0; 2368 2369 if (!pci_is_pcie(pdev)) 2370 return false; 2371 2372 /* Filter out flags not applicable to multifunction */ 2373 if (pdev->multifunction) 2374 acs_flags &= (PCI_ACS_RR | PCI_ACS_CR | 2375 PCI_ACS_EC | PCI_ACS_DT); 2376 2377 if (pci_pcie_type(pdev) == PCI_EXP_TYPE_DOWNSTREAM || 2378 pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT || 2379 pdev->multifunction) { 2380 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ACS); 2381 if (!pos) 2382 return false; 2383 2384 pci_read_config_word(pdev, pos + PCI_ACS_CTRL, &ctrl); 2385 if ((ctrl & acs_flags) != acs_flags) 2386 return false; 2387 } 2388 2389 return true; 2390 } 2391 2392 /** 2393 * pci_acs_path_enable - test ACS flags from start to end in a hierarchy 2394 * @start: starting downstream device 2395 * @end: ending upstream device or NULL to search to the root bus 2396 * @acs_flags: required flags 2397 * 2398 * Walk up a device tree from start to end testing PCI ACS support. If 2399 * any step along the way does not support the required flags, return false. 2400 */ 2401 bool pci_acs_path_enabled(struct pci_dev *start, 2402 struct pci_dev *end, u16 acs_flags) 2403 { 2404 struct pci_dev *pdev, *parent = start; 2405 2406 do { 2407 pdev = parent; 2408 2409 if (!pci_acs_enabled(pdev, acs_flags)) 2410 return false; 2411 2412 if (pci_is_root_bus(pdev->bus)) 2413 return (end == NULL); 2414 2415 parent = pdev->bus->self; 2416 } while (pdev != end); 2417 2418 return true; 2419 } 2420 2421 /** 2422 * pci_swizzle_interrupt_pin - swizzle INTx for device behind bridge 2423 * @dev: the PCI device 2424 * @pin: the INTx pin (1=INTA, 2=INTB, 3=INTD, 4=INTD) 2425 * 2426 * Perform INTx swizzling for a device behind one level of bridge. This is 2427 * required by section 9.1 of the PCI-to-PCI bridge specification for devices 2428 * behind bridges on add-in cards. For devices with ARI enabled, the slot 2429 * number is always 0 (see the Implementation Note in section 2.2.8.1 of 2430 * the PCI Express Base Specification, Revision 2.1) 2431 */ 2432 u8 pci_swizzle_interrupt_pin(const struct pci_dev *dev, u8 pin) 2433 { 2434 int slot; 2435 2436 if (pci_ari_enabled(dev->bus)) 2437 slot = 0; 2438 else 2439 slot = PCI_SLOT(dev->devfn); 2440 2441 return (((pin - 1) + slot) % 4) + 1; 2442 } 2443 2444 int 2445 pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge) 2446 { 2447 u8 pin; 2448 2449 pin = dev->pin; 2450 if (!pin) 2451 return -1; 2452 2453 while (!pci_is_root_bus(dev->bus)) { 2454 pin = pci_swizzle_interrupt_pin(dev, pin); 2455 dev = dev->bus->self; 2456 } 2457 *bridge = dev; 2458 return pin; 2459 } 2460 2461 /** 2462 * pci_common_swizzle - swizzle INTx all the way to root bridge 2463 * @dev: the PCI device 2464 * @pinp: pointer to the INTx pin value (1=INTA, 2=INTB, 3=INTD, 4=INTD) 2465 * 2466 * Perform INTx swizzling for a device. This traverses through all PCI-to-PCI 2467 * bridges all the way up to a PCI root bus. 2468 */ 2469 u8 pci_common_swizzle(struct pci_dev *dev, u8 *pinp) 2470 { 2471 u8 pin = *pinp; 2472 2473 while (!pci_is_root_bus(dev->bus)) { 2474 pin = pci_swizzle_interrupt_pin(dev, pin); 2475 dev = dev->bus->self; 2476 } 2477 *pinp = pin; 2478 return PCI_SLOT(dev->devfn); 2479 } 2480 2481 /** 2482 * pci_release_region - Release a PCI bar 2483 * @pdev: PCI device whose resources were previously reserved by pci_request_region 2484 * @bar: BAR to release 2485 * 2486 * Releases the PCI I/O and memory resources previously reserved by a 2487 * successful call to pci_request_region. Call this function only 2488 * after all use of the PCI regions has ceased. 2489 */ 2490 void pci_release_region(struct pci_dev *pdev, int bar) 2491 { 2492 struct pci_devres *dr; 2493 2494 if (pci_resource_len(pdev, bar) == 0) 2495 return; 2496 if (pci_resource_flags(pdev, bar) & IORESOURCE_IO) 2497 release_region(pci_resource_start(pdev, bar), 2498 pci_resource_len(pdev, bar)); 2499 else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) 2500 release_mem_region(pci_resource_start(pdev, bar), 2501 pci_resource_len(pdev, bar)); 2502 2503 dr = find_pci_dr(pdev); 2504 if (dr) 2505 dr->region_mask &= ~(1 << bar); 2506 } 2507 2508 /** 2509 * __pci_request_region - Reserved PCI I/O and memory resource 2510 * @pdev: PCI device whose resources are to be reserved 2511 * @bar: BAR to be reserved 2512 * @res_name: Name to be associated with resource. 2513 * @exclusive: whether the region access is exclusive or not 2514 * 2515 * Mark the PCI region associated with PCI device @pdev BR @bar as 2516 * being reserved by owner @res_name. Do not access any 2517 * address inside the PCI regions unless this call returns 2518 * successfully. 2519 * 2520 * If @exclusive is set, then the region is marked so that userspace 2521 * is explicitly not allowed to map the resource via /dev/mem or 2522 * sysfs MMIO access. 2523 * 2524 * Returns 0 on success, or %EBUSY on error. A warning 2525 * message is also printed on failure. 2526 */ 2527 static int __pci_request_region(struct pci_dev *pdev, int bar, const char *res_name, 2528 int exclusive) 2529 { 2530 struct pci_devres *dr; 2531 2532 if (pci_resource_len(pdev, bar) == 0) 2533 return 0; 2534 2535 if (pci_resource_flags(pdev, bar) & IORESOURCE_IO) { 2536 if (!request_region(pci_resource_start(pdev, bar), 2537 pci_resource_len(pdev, bar), res_name)) 2538 goto err_out; 2539 } 2540 else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) { 2541 if (!__request_mem_region(pci_resource_start(pdev, bar), 2542 pci_resource_len(pdev, bar), res_name, 2543 exclusive)) 2544 goto err_out; 2545 } 2546 2547 dr = find_pci_dr(pdev); 2548 if (dr) 2549 dr->region_mask |= 1 << bar; 2550 2551 return 0; 2552 2553 err_out: 2554 dev_warn(&pdev->dev, "BAR %d: can't reserve %pR\n", bar, 2555 &pdev->resource[bar]); 2556 return -EBUSY; 2557 } 2558 2559 /** 2560 * pci_request_region - Reserve PCI I/O and memory resource 2561 * @pdev: PCI device whose resources are to be reserved 2562 * @bar: BAR to be reserved 2563 * @res_name: Name to be associated with resource 2564 * 2565 * Mark the PCI region associated with PCI device @pdev BAR @bar as 2566 * being reserved by owner @res_name. Do not access any 2567 * address inside the PCI regions unless this call returns 2568 * successfully. 2569 * 2570 * Returns 0 on success, or %EBUSY on error. A warning 2571 * message is also printed on failure. 2572 */ 2573 int pci_request_region(struct pci_dev *pdev, int bar, const char *res_name) 2574 { 2575 return __pci_request_region(pdev, bar, res_name, 0); 2576 } 2577 2578 /** 2579 * pci_request_region_exclusive - Reserved PCI I/O and memory resource 2580 * @pdev: PCI device whose resources are to be reserved 2581 * @bar: BAR to be reserved 2582 * @res_name: Name to be associated with resource. 2583 * 2584 * Mark the PCI region associated with PCI device @pdev BR @bar as 2585 * being reserved by owner @res_name. Do not access any 2586 * address inside the PCI regions unless this call returns 2587 * successfully. 2588 * 2589 * Returns 0 on success, or %EBUSY on error. A warning 2590 * message is also printed on failure. 2591 * 2592 * The key difference that _exclusive makes it that userspace is 2593 * explicitly not allowed to map the resource via /dev/mem or 2594 * sysfs. 2595 */ 2596 int pci_request_region_exclusive(struct pci_dev *pdev, int bar, const char *res_name) 2597 { 2598 return __pci_request_region(pdev, bar, res_name, IORESOURCE_EXCLUSIVE); 2599 } 2600 /** 2601 * pci_release_selected_regions - Release selected PCI I/O and memory resources 2602 * @pdev: PCI device whose resources were previously reserved 2603 * @bars: Bitmask of BARs to be released 2604 * 2605 * Release selected PCI I/O and memory resources previously reserved. 2606 * Call this function only after all use of the PCI regions has ceased. 2607 */ 2608 void pci_release_selected_regions(struct pci_dev *pdev, int bars) 2609 { 2610 int i; 2611 2612 for (i = 0; i < 6; i++) 2613 if (bars & (1 << i)) 2614 pci_release_region(pdev, i); 2615 } 2616 2617 static int __pci_request_selected_regions(struct pci_dev *pdev, int bars, 2618 const char *res_name, int excl) 2619 { 2620 int i; 2621 2622 for (i = 0; i < 6; i++) 2623 if (bars & (1 << i)) 2624 if (__pci_request_region(pdev, i, res_name, excl)) 2625 goto err_out; 2626 return 0; 2627 2628 err_out: 2629 while(--i >= 0) 2630 if (bars & (1 << i)) 2631 pci_release_region(pdev, i); 2632 2633 return -EBUSY; 2634 } 2635 2636 2637 /** 2638 * pci_request_selected_regions - Reserve selected PCI I/O and memory resources 2639 * @pdev: PCI device whose resources are to be reserved 2640 * @bars: Bitmask of BARs to be requested 2641 * @res_name: Name to be associated with resource 2642 */ 2643 int pci_request_selected_regions(struct pci_dev *pdev, int bars, 2644 const char *res_name) 2645 { 2646 return __pci_request_selected_regions(pdev, bars, res_name, 0); 2647 } 2648 2649 int pci_request_selected_regions_exclusive(struct pci_dev *pdev, 2650 int bars, const char *res_name) 2651 { 2652 return __pci_request_selected_regions(pdev, bars, res_name, 2653 IORESOURCE_EXCLUSIVE); 2654 } 2655 2656 /** 2657 * pci_release_regions - Release reserved PCI I/O and memory resources 2658 * @pdev: PCI device whose resources were previously reserved by pci_request_regions 2659 * 2660 * Releases all PCI I/O and memory resources previously reserved by a 2661 * successful call to pci_request_regions. Call this function only 2662 * after all use of the PCI regions has ceased. 2663 */ 2664 2665 void pci_release_regions(struct pci_dev *pdev) 2666 { 2667 pci_release_selected_regions(pdev, (1 << 6) - 1); 2668 } 2669 2670 /** 2671 * pci_request_regions - Reserved PCI I/O and memory resources 2672 * @pdev: PCI device whose resources are to be reserved 2673 * @res_name: Name to be associated with resource. 2674 * 2675 * Mark all PCI regions associated with PCI device @pdev as 2676 * being reserved by owner @res_name. Do not access any 2677 * address inside the PCI regions unless this call returns 2678 * successfully. 2679 * 2680 * Returns 0 on success, or %EBUSY on error. A warning 2681 * message is also printed on failure. 2682 */ 2683 int pci_request_regions(struct pci_dev *pdev, const char *res_name) 2684 { 2685 return pci_request_selected_regions(pdev, ((1 << 6) - 1), res_name); 2686 } 2687 2688 /** 2689 * pci_request_regions_exclusive - Reserved PCI I/O and memory resources 2690 * @pdev: PCI device whose resources are to be reserved 2691 * @res_name: Name to be associated with resource. 2692 * 2693 * Mark all PCI regions associated with PCI device @pdev as 2694 * being reserved by owner @res_name. Do not access any 2695 * address inside the PCI regions unless this call returns 2696 * successfully. 2697 * 2698 * pci_request_regions_exclusive() will mark the region so that 2699 * /dev/mem and the sysfs MMIO access will not be allowed. 2700 * 2701 * Returns 0 on success, or %EBUSY on error. A warning 2702 * message is also printed on failure. 2703 */ 2704 int pci_request_regions_exclusive(struct pci_dev *pdev, const char *res_name) 2705 { 2706 return pci_request_selected_regions_exclusive(pdev, 2707 ((1 << 6) - 1), res_name); 2708 } 2709 2710 static void __pci_set_master(struct pci_dev *dev, bool enable) 2711 { 2712 u16 old_cmd, cmd; 2713 2714 pci_read_config_word(dev, PCI_COMMAND, &old_cmd); 2715 if (enable) 2716 cmd = old_cmd | PCI_COMMAND_MASTER; 2717 else 2718 cmd = old_cmd & ~PCI_COMMAND_MASTER; 2719 if (cmd != old_cmd) { 2720 dev_dbg(&dev->dev, "%s bus mastering\n", 2721 enable ? "enabling" : "disabling"); 2722 pci_write_config_word(dev, PCI_COMMAND, cmd); 2723 } 2724 dev->is_busmaster = enable; 2725 } 2726 2727 /** 2728 * pcibios_setup - process "pci=" kernel boot arguments 2729 * @str: string used to pass in "pci=" kernel boot arguments 2730 * 2731 * Process kernel boot arguments. This is the default implementation. 2732 * Architecture specific implementations can override this as necessary. 2733 */ 2734 char * __weak __init pcibios_setup(char *str) 2735 { 2736 return str; 2737 } 2738 2739 /** 2740 * pcibios_set_master - enable PCI bus-mastering for device dev 2741 * @dev: the PCI device to enable 2742 * 2743 * Enables PCI bus-mastering for the device. This is the default 2744 * implementation. Architecture specific implementations can override 2745 * this if necessary. 2746 */ 2747 void __weak pcibios_set_master(struct pci_dev *dev) 2748 { 2749 u8 lat; 2750 2751 /* The latency timer doesn't apply to PCIe (either Type 0 or Type 1) */ 2752 if (pci_is_pcie(dev)) 2753 return; 2754 2755 pci_read_config_byte(dev, PCI_LATENCY_TIMER, &lat); 2756 if (lat < 16) 2757 lat = (64 <= pcibios_max_latency) ? 64 : pcibios_max_latency; 2758 else if (lat > pcibios_max_latency) 2759 lat = pcibios_max_latency; 2760 else 2761 return; 2762 dev_printk(KERN_DEBUG, &dev->dev, "setting latency timer to %d\n", lat); 2763 pci_write_config_byte(dev, PCI_LATENCY_TIMER, lat); 2764 } 2765 2766 /** 2767 * pci_set_master - enables bus-mastering for device dev 2768 * @dev: the PCI device to enable 2769 * 2770 * Enables bus-mastering on the device and calls pcibios_set_master() 2771 * to do the needed arch specific settings. 2772 */ 2773 void pci_set_master(struct pci_dev *dev) 2774 { 2775 __pci_set_master(dev, true); 2776 pcibios_set_master(dev); 2777 } 2778 2779 /** 2780 * pci_clear_master - disables bus-mastering for device dev 2781 * @dev: the PCI device to disable 2782 */ 2783 void pci_clear_master(struct pci_dev *dev) 2784 { 2785 __pci_set_master(dev, false); 2786 } 2787 2788 /** 2789 * pci_set_cacheline_size - ensure the CACHE_LINE_SIZE register is programmed 2790 * @dev: the PCI device for which MWI is to be enabled 2791 * 2792 * Helper function for pci_set_mwi. 2793 * Originally copied from drivers/net/acenic.c. 2794 * Copyright 1998-2001 by Jes Sorensen, <jes@trained-monkey.org>. 2795 * 2796 * RETURNS: An appropriate -ERRNO error value on error, or zero for success. 2797 */ 2798 int pci_set_cacheline_size(struct pci_dev *dev) 2799 { 2800 u8 cacheline_size; 2801 2802 if (!pci_cache_line_size) 2803 return -EINVAL; 2804 2805 /* Validate current setting: the PCI_CACHE_LINE_SIZE must be 2806 equal to or multiple of the right value. */ 2807 pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size); 2808 if (cacheline_size >= pci_cache_line_size && 2809 (cacheline_size % pci_cache_line_size) == 0) 2810 return 0; 2811 2812 /* Write the correct value. */ 2813 pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, pci_cache_line_size); 2814 /* Read it back. */ 2815 pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size); 2816 if (cacheline_size == pci_cache_line_size) 2817 return 0; 2818 2819 dev_printk(KERN_DEBUG, &dev->dev, "cache line size of %d is not " 2820 "supported\n", pci_cache_line_size << 2); 2821 2822 return -EINVAL; 2823 } 2824 EXPORT_SYMBOL_GPL(pci_set_cacheline_size); 2825 2826 #ifdef PCI_DISABLE_MWI 2827 int pci_set_mwi(struct pci_dev *dev) 2828 { 2829 return 0; 2830 } 2831 2832 int pci_try_set_mwi(struct pci_dev *dev) 2833 { 2834 return 0; 2835 } 2836 2837 void pci_clear_mwi(struct pci_dev *dev) 2838 { 2839 } 2840 2841 #else 2842 2843 /** 2844 * pci_set_mwi - enables memory-write-invalidate PCI transaction 2845 * @dev: the PCI device for which MWI is enabled 2846 * 2847 * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND. 2848 * 2849 * RETURNS: An appropriate -ERRNO error value on error, or zero for success. 2850 */ 2851 int 2852 pci_set_mwi(struct pci_dev *dev) 2853 { 2854 int rc; 2855 u16 cmd; 2856 2857 rc = pci_set_cacheline_size(dev); 2858 if (rc) 2859 return rc; 2860 2861 pci_read_config_word(dev, PCI_COMMAND, &cmd); 2862 if (! (cmd & PCI_COMMAND_INVALIDATE)) { 2863 dev_dbg(&dev->dev, "enabling Mem-Wr-Inval\n"); 2864 cmd |= PCI_COMMAND_INVALIDATE; 2865 pci_write_config_word(dev, PCI_COMMAND, cmd); 2866 } 2867 2868 return 0; 2869 } 2870 2871 /** 2872 * pci_try_set_mwi - enables memory-write-invalidate PCI transaction 2873 * @dev: the PCI device for which MWI is enabled 2874 * 2875 * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND. 2876 * Callers are not required to check the return value. 2877 * 2878 * RETURNS: An appropriate -ERRNO error value on error, or zero for success. 2879 */ 2880 int pci_try_set_mwi(struct pci_dev *dev) 2881 { 2882 int rc = pci_set_mwi(dev); 2883 return rc; 2884 } 2885 2886 /** 2887 * pci_clear_mwi - disables Memory-Write-Invalidate for device dev 2888 * @dev: the PCI device to disable 2889 * 2890 * Disables PCI Memory-Write-Invalidate transaction on the device 2891 */ 2892 void 2893 pci_clear_mwi(struct pci_dev *dev) 2894 { 2895 u16 cmd; 2896 2897 pci_read_config_word(dev, PCI_COMMAND, &cmd); 2898 if (cmd & PCI_COMMAND_INVALIDATE) { 2899 cmd &= ~PCI_COMMAND_INVALIDATE; 2900 pci_write_config_word(dev, PCI_COMMAND, cmd); 2901 } 2902 } 2903 #endif /* ! PCI_DISABLE_MWI */ 2904 2905 /** 2906 * pci_intx - enables/disables PCI INTx for device dev 2907 * @pdev: the PCI device to operate on 2908 * @enable: boolean: whether to enable or disable PCI INTx 2909 * 2910 * Enables/disables PCI INTx for device dev 2911 */ 2912 void 2913 pci_intx(struct pci_dev *pdev, int enable) 2914 { 2915 u16 pci_command, new; 2916 2917 pci_read_config_word(pdev, PCI_COMMAND, &pci_command); 2918 2919 if (enable) { 2920 new = pci_command & ~PCI_COMMAND_INTX_DISABLE; 2921 } else { 2922 new = pci_command | PCI_COMMAND_INTX_DISABLE; 2923 } 2924 2925 if (new != pci_command) { 2926 struct pci_devres *dr; 2927 2928 pci_write_config_word(pdev, PCI_COMMAND, new); 2929 2930 dr = find_pci_dr(pdev); 2931 if (dr && !dr->restore_intx) { 2932 dr->restore_intx = 1; 2933 dr->orig_intx = !enable; 2934 } 2935 } 2936 } 2937 2938 /** 2939 * pci_intx_mask_supported - probe for INTx masking support 2940 * @dev: the PCI device to operate on 2941 * 2942 * Check if the device dev support INTx masking via the config space 2943 * command word. 2944 */ 2945 bool pci_intx_mask_supported(struct pci_dev *dev) 2946 { 2947 bool mask_supported = false; 2948 u16 orig, new; 2949 2950 if (dev->broken_intx_masking) 2951 return false; 2952 2953 pci_cfg_access_lock(dev); 2954 2955 pci_read_config_word(dev, PCI_COMMAND, &orig); 2956 pci_write_config_word(dev, PCI_COMMAND, 2957 orig ^ PCI_COMMAND_INTX_DISABLE); 2958 pci_read_config_word(dev, PCI_COMMAND, &new); 2959 2960 /* 2961 * There's no way to protect against hardware bugs or detect them 2962 * reliably, but as long as we know what the value should be, let's 2963 * go ahead and check it. 2964 */ 2965 if ((new ^ orig) & ~PCI_COMMAND_INTX_DISABLE) { 2966 dev_err(&dev->dev, "Command register changed from " 2967 "0x%x to 0x%x: driver or hardware bug?\n", orig, new); 2968 } else if ((new ^ orig) & PCI_COMMAND_INTX_DISABLE) { 2969 mask_supported = true; 2970 pci_write_config_word(dev, PCI_COMMAND, orig); 2971 } 2972 2973 pci_cfg_access_unlock(dev); 2974 return mask_supported; 2975 } 2976 EXPORT_SYMBOL_GPL(pci_intx_mask_supported); 2977 2978 static bool pci_check_and_set_intx_mask(struct pci_dev *dev, bool mask) 2979 { 2980 struct pci_bus *bus = dev->bus; 2981 bool mask_updated = true; 2982 u32 cmd_status_dword; 2983 u16 origcmd, newcmd; 2984 unsigned long flags; 2985 bool irq_pending; 2986 2987 /* 2988 * We do a single dword read to retrieve both command and status. 2989 * Document assumptions that make this possible. 2990 */ 2991 BUILD_BUG_ON(PCI_COMMAND % 4); 2992 BUILD_BUG_ON(PCI_COMMAND + 2 != PCI_STATUS); 2993 2994 raw_spin_lock_irqsave(&pci_lock, flags); 2995 2996 bus->ops->read(bus, dev->devfn, PCI_COMMAND, 4, &cmd_status_dword); 2997 2998 irq_pending = (cmd_status_dword >> 16) & PCI_STATUS_INTERRUPT; 2999 3000 /* 3001 * Check interrupt status register to see whether our device 3002 * triggered the interrupt (when masking) or the next IRQ is 3003 * already pending (when unmasking). 3004 */ 3005 if (mask != irq_pending) { 3006 mask_updated = false; 3007 goto done; 3008 } 3009 3010 origcmd = cmd_status_dword; 3011 newcmd = origcmd & ~PCI_COMMAND_INTX_DISABLE; 3012 if (mask) 3013 newcmd |= PCI_COMMAND_INTX_DISABLE; 3014 if (newcmd != origcmd) 3015 bus->ops->write(bus, dev->devfn, PCI_COMMAND, 2, newcmd); 3016 3017 done: 3018 raw_spin_unlock_irqrestore(&pci_lock, flags); 3019 3020 return mask_updated; 3021 } 3022 3023 /** 3024 * pci_check_and_mask_intx - mask INTx on pending interrupt 3025 * @dev: the PCI device to operate on 3026 * 3027 * Check if the device dev has its INTx line asserted, mask it and 3028 * return true in that case. False is returned if not interrupt was 3029 * pending. 3030 */ 3031 bool pci_check_and_mask_intx(struct pci_dev *dev) 3032 { 3033 return pci_check_and_set_intx_mask(dev, true); 3034 } 3035 EXPORT_SYMBOL_GPL(pci_check_and_mask_intx); 3036 3037 /** 3038 * pci_check_and_mask_intx - unmask INTx of no interrupt is pending 3039 * @dev: the PCI device to operate on 3040 * 3041 * Check if the device dev has its INTx line asserted, unmask it if not 3042 * and return true. False is returned and the mask remains active if 3043 * there was still an interrupt pending. 3044 */ 3045 bool pci_check_and_unmask_intx(struct pci_dev *dev) 3046 { 3047 return pci_check_and_set_intx_mask(dev, false); 3048 } 3049 EXPORT_SYMBOL_GPL(pci_check_and_unmask_intx); 3050 3051 /** 3052 * pci_msi_off - disables any msi or msix capabilities 3053 * @dev: the PCI device to operate on 3054 * 3055 * If you want to use msi see pci_enable_msi and friends. 3056 * This is a lower level primitive that allows us to disable 3057 * msi operation at the device level. 3058 */ 3059 void pci_msi_off(struct pci_dev *dev) 3060 { 3061 int pos; 3062 u16 control; 3063 3064 pos = pci_find_capability(dev, PCI_CAP_ID_MSI); 3065 if (pos) { 3066 pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control); 3067 control &= ~PCI_MSI_FLAGS_ENABLE; 3068 pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control); 3069 } 3070 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX); 3071 if (pos) { 3072 pci_read_config_word(dev, pos + PCI_MSIX_FLAGS, &control); 3073 control &= ~PCI_MSIX_FLAGS_ENABLE; 3074 pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control); 3075 } 3076 } 3077 EXPORT_SYMBOL_GPL(pci_msi_off); 3078 3079 int pci_set_dma_max_seg_size(struct pci_dev *dev, unsigned int size) 3080 { 3081 return dma_set_max_seg_size(&dev->dev, size); 3082 } 3083 EXPORT_SYMBOL(pci_set_dma_max_seg_size); 3084 3085 int pci_set_dma_seg_boundary(struct pci_dev *dev, unsigned long mask) 3086 { 3087 return dma_set_seg_boundary(&dev->dev, mask); 3088 } 3089 EXPORT_SYMBOL(pci_set_dma_seg_boundary); 3090 3091 static int pcie_flr(struct pci_dev *dev, int probe) 3092 { 3093 int i; 3094 u32 cap; 3095 u16 status; 3096 3097 pcie_capability_read_dword(dev, PCI_EXP_DEVCAP, &cap); 3098 if (!(cap & PCI_EXP_DEVCAP_FLR)) 3099 return -ENOTTY; 3100 3101 if (probe) 3102 return 0; 3103 3104 /* Wait for Transaction Pending bit clean */ 3105 for (i = 0; i < 4; i++) { 3106 if (i) 3107 msleep((1 << (i - 1)) * 100); 3108 3109 pcie_capability_read_word(dev, PCI_EXP_DEVSTA, &status); 3110 if (!(status & PCI_EXP_DEVSTA_TRPND)) 3111 goto clear; 3112 } 3113 3114 dev_err(&dev->dev, "transaction is not cleared; " 3115 "proceeding with reset anyway\n"); 3116 3117 clear: 3118 pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_BCR_FLR); 3119 3120 msleep(100); 3121 3122 return 0; 3123 } 3124 3125 static int pci_af_flr(struct pci_dev *dev, int probe) 3126 { 3127 int i; 3128 int pos; 3129 u8 cap; 3130 u8 status; 3131 3132 pos = pci_find_capability(dev, PCI_CAP_ID_AF); 3133 if (!pos) 3134 return -ENOTTY; 3135 3136 pci_read_config_byte(dev, pos + PCI_AF_CAP, &cap); 3137 if (!(cap & PCI_AF_CAP_TP) || !(cap & PCI_AF_CAP_FLR)) 3138 return -ENOTTY; 3139 3140 if (probe) 3141 return 0; 3142 3143 /* Wait for Transaction Pending bit clean */ 3144 for (i = 0; i < 4; i++) { 3145 if (i) 3146 msleep((1 << (i - 1)) * 100); 3147 3148 pci_read_config_byte(dev, pos + PCI_AF_STATUS, &status); 3149 if (!(status & PCI_AF_STATUS_TP)) 3150 goto clear; 3151 } 3152 3153 dev_err(&dev->dev, "transaction is not cleared; " 3154 "proceeding with reset anyway\n"); 3155 3156 clear: 3157 pci_write_config_byte(dev, pos + PCI_AF_CTRL, PCI_AF_CTRL_FLR); 3158 msleep(100); 3159 3160 return 0; 3161 } 3162 3163 /** 3164 * pci_pm_reset - Put device into PCI_D3 and back into PCI_D0. 3165 * @dev: Device to reset. 3166 * @probe: If set, only check if the device can be reset this way. 3167 * 3168 * If @dev supports native PCI PM and its PCI_PM_CTRL_NO_SOFT_RESET flag is 3169 * unset, it will be reinitialized internally when going from PCI_D3hot to 3170 * PCI_D0. If that's the case and the device is not in a low-power state 3171 * already, force it into PCI_D3hot and back to PCI_D0, causing it to be reset. 3172 * 3173 * NOTE: This causes the caller to sleep for twice the device power transition 3174 * cooldown period, which for the D0->D3hot and D3hot->D0 transitions is 10 ms 3175 * by devault (i.e. unless the @dev's d3_delay field has a different value). 3176 * Moreover, only devices in D0 can be reset by this function. 3177 */ 3178 static int pci_pm_reset(struct pci_dev *dev, int probe) 3179 { 3180 u16 csr; 3181 3182 if (!dev->pm_cap) 3183 return -ENOTTY; 3184 3185 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &csr); 3186 if (csr & PCI_PM_CTRL_NO_SOFT_RESET) 3187 return -ENOTTY; 3188 3189 if (probe) 3190 return 0; 3191 3192 if (dev->current_state != PCI_D0) 3193 return -EINVAL; 3194 3195 csr &= ~PCI_PM_CTRL_STATE_MASK; 3196 csr |= PCI_D3hot; 3197 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr); 3198 pci_dev_d3_sleep(dev); 3199 3200 csr &= ~PCI_PM_CTRL_STATE_MASK; 3201 csr |= PCI_D0; 3202 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr); 3203 pci_dev_d3_sleep(dev); 3204 3205 return 0; 3206 } 3207 3208 static int pci_parent_bus_reset(struct pci_dev *dev, int probe) 3209 { 3210 u16 ctrl; 3211 struct pci_dev *pdev; 3212 3213 if (pci_is_root_bus(dev->bus) || dev->subordinate || !dev->bus->self) 3214 return -ENOTTY; 3215 3216 list_for_each_entry(pdev, &dev->bus->devices, bus_list) 3217 if (pdev != dev) 3218 return -ENOTTY; 3219 3220 if (probe) 3221 return 0; 3222 3223 pci_read_config_word(dev->bus->self, PCI_BRIDGE_CONTROL, &ctrl); 3224 ctrl |= PCI_BRIDGE_CTL_BUS_RESET; 3225 pci_write_config_word(dev->bus->self, PCI_BRIDGE_CONTROL, ctrl); 3226 msleep(100); 3227 3228 ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET; 3229 pci_write_config_word(dev->bus->self, PCI_BRIDGE_CONTROL, ctrl); 3230 msleep(100); 3231 3232 return 0; 3233 } 3234 3235 static int __pci_dev_reset(struct pci_dev *dev, int probe) 3236 { 3237 int rc; 3238 3239 might_sleep(); 3240 3241 rc = pci_dev_specific_reset(dev, probe); 3242 if (rc != -ENOTTY) 3243 goto done; 3244 3245 rc = pcie_flr(dev, probe); 3246 if (rc != -ENOTTY) 3247 goto done; 3248 3249 rc = pci_af_flr(dev, probe); 3250 if (rc != -ENOTTY) 3251 goto done; 3252 3253 rc = pci_pm_reset(dev, probe); 3254 if (rc != -ENOTTY) 3255 goto done; 3256 3257 rc = pci_parent_bus_reset(dev, probe); 3258 done: 3259 return rc; 3260 } 3261 3262 static int pci_dev_reset(struct pci_dev *dev, int probe) 3263 { 3264 int rc; 3265 3266 if (!probe) { 3267 pci_cfg_access_lock(dev); 3268 /* block PM suspend, driver probe, etc. */ 3269 device_lock(&dev->dev); 3270 } 3271 3272 rc = __pci_dev_reset(dev, probe); 3273 3274 if (!probe) { 3275 device_unlock(&dev->dev); 3276 pci_cfg_access_unlock(dev); 3277 } 3278 return rc; 3279 } 3280 /** 3281 * __pci_reset_function - reset a PCI device function 3282 * @dev: PCI device to reset 3283 * 3284 * Some devices allow an individual function to be reset without affecting 3285 * other functions in the same device. The PCI device must be responsive 3286 * to PCI config space in order to use this function. 3287 * 3288 * The device function is presumed to be unused when this function is called. 3289 * Resetting the device will make the contents of PCI configuration space 3290 * random, so any caller of this must be prepared to reinitialise the 3291 * device including MSI, bus mastering, BARs, decoding IO and memory spaces, 3292 * etc. 3293 * 3294 * Returns 0 if the device function was successfully reset or negative if the 3295 * device doesn't support resetting a single function. 3296 */ 3297 int __pci_reset_function(struct pci_dev *dev) 3298 { 3299 return pci_dev_reset(dev, 0); 3300 } 3301 EXPORT_SYMBOL_GPL(__pci_reset_function); 3302 3303 /** 3304 * __pci_reset_function_locked - reset a PCI device function while holding 3305 * the @dev mutex lock. 3306 * @dev: PCI device to reset 3307 * 3308 * Some devices allow an individual function to be reset without affecting 3309 * other functions in the same device. The PCI device must be responsive 3310 * to PCI config space in order to use this function. 3311 * 3312 * The device function is presumed to be unused and the caller is holding 3313 * the device mutex lock when this function is called. 3314 * Resetting the device will make the contents of PCI configuration space 3315 * random, so any caller of this must be prepared to reinitialise the 3316 * device including MSI, bus mastering, BARs, decoding IO and memory spaces, 3317 * etc. 3318 * 3319 * Returns 0 if the device function was successfully reset or negative if the 3320 * device doesn't support resetting a single function. 3321 */ 3322 int __pci_reset_function_locked(struct pci_dev *dev) 3323 { 3324 return __pci_dev_reset(dev, 0); 3325 } 3326 EXPORT_SYMBOL_GPL(__pci_reset_function_locked); 3327 3328 /** 3329 * pci_probe_reset_function - check whether the device can be safely reset 3330 * @dev: PCI device to reset 3331 * 3332 * Some devices allow an individual function to be reset without affecting 3333 * other functions in the same device. The PCI device must be responsive 3334 * to PCI config space in order to use this function. 3335 * 3336 * Returns 0 if the device function can be reset or negative if the 3337 * device doesn't support resetting a single function. 3338 */ 3339 int pci_probe_reset_function(struct pci_dev *dev) 3340 { 3341 return pci_dev_reset(dev, 1); 3342 } 3343 3344 /** 3345 * pci_reset_function - quiesce and reset a PCI device function 3346 * @dev: PCI device to reset 3347 * 3348 * Some devices allow an individual function to be reset without affecting 3349 * other functions in the same device. The PCI device must be responsive 3350 * to PCI config space in order to use this function. 3351 * 3352 * This function does not just reset the PCI portion of a device, but 3353 * clears all the state associated with the device. This function differs 3354 * from __pci_reset_function in that it saves and restores device state 3355 * over the reset. 3356 * 3357 * Returns 0 if the device function was successfully reset or negative if the 3358 * device doesn't support resetting a single function. 3359 */ 3360 int pci_reset_function(struct pci_dev *dev) 3361 { 3362 int rc; 3363 3364 rc = pci_dev_reset(dev, 1); 3365 if (rc) 3366 return rc; 3367 3368 pci_save_state(dev); 3369 3370 /* 3371 * both INTx and MSI are disabled after the Interrupt Disable bit 3372 * is set and the Bus Master bit is cleared. 3373 */ 3374 pci_write_config_word(dev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE); 3375 3376 rc = pci_dev_reset(dev, 0); 3377 3378 pci_restore_state(dev); 3379 3380 return rc; 3381 } 3382 EXPORT_SYMBOL_GPL(pci_reset_function); 3383 3384 /** 3385 * pcix_get_max_mmrbc - get PCI-X maximum designed memory read byte count 3386 * @dev: PCI device to query 3387 * 3388 * Returns mmrbc: maximum designed memory read count in bytes 3389 * or appropriate error value. 3390 */ 3391 int pcix_get_max_mmrbc(struct pci_dev *dev) 3392 { 3393 int cap; 3394 u32 stat; 3395 3396 cap = pci_find_capability(dev, PCI_CAP_ID_PCIX); 3397 if (!cap) 3398 return -EINVAL; 3399 3400 if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat)) 3401 return -EINVAL; 3402 3403 return 512 << ((stat & PCI_X_STATUS_MAX_READ) >> 21); 3404 } 3405 EXPORT_SYMBOL(pcix_get_max_mmrbc); 3406 3407 /** 3408 * pcix_get_mmrbc - get PCI-X maximum memory read byte count 3409 * @dev: PCI device to query 3410 * 3411 * Returns mmrbc: maximum memory read count in bytes 3412 * or appropriate error value. 3413 */ 3414 int pcix_get_mmrbc(struct pci_dev *dev) 3415 { 3416 int cap; 3417 u16 cmd; 3418 3419 cap = pci_find_capability(dev, PCI_CAP_ID_PCIX); 3420 if (!cap) 3421 return -EINVAL; 3422 3423 if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd)) 3424 return -EINVAL; 3425 3426 return 512 << ((cmd & PCI_X_CMD_MAX_READ) >> 2); 3427 } 3428 EXPORT_SYMBOL(pcix_get_mmrbc); 3429 3430 /** 3431 * pcix_set_mmrbc - set PCI-X maximum memory read byte count 3432 * @dev: PCI device to query 3433 * @mmrbc: maximum memory read count in bytes 3434 * valid values are 512, 1024, 2048, 4096 3435 * 3436 * If possible sets maximum memory read byte count, some bridges have erratas 3437 * that prevent this. 3438 */ 3439 int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc) 3440 { 3441 int cap; 3442 u32 stat, v, o; 3443 u16 cmd; 3444 3445 if (mmrbc < 512 || mmrbc > 4096 || !is_power_of_2(mmrbc)) 3446 return -EINVAL; 3447 3448 v = ffs(mmrbc) - 10; 3449 3450 cap = pci_find_capability(dev, PCI_CAP_ID_PCIX); 3451 if (!cap) 3452 return -EINVAL; 3453 3454 if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat)) 3455 return -EINVAL; 3456 3457 if (v > (stat & PCI_X_STATUS_MAX_READ) >> 21) 3458 return -E2BIG; 3459 3460 if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd)) 3461 return -EINVAL; 3462 3463 o = (cmd & PCI_X_CMD_MAX_READ) >> 2; 3464 if (o != v) { 3465 if (v > o && (dev->bus->bus_flags & PCI_BUS_FLAGS_NO_MMRBC)) 3466 return -EIO; 3467 3468 cmd &= ~PCI_X_CMD_MAX_READ; 3469 cmd |= v << 2; 3470 if (pci_write_config_word(dev, cap + PCI_X_CMD, cmd)) 3471 return -EIO; 3472 } 3473 return 0; 3474 } 3475 EXPORT_SYMBOL(pcix_set_mmrbc); 3476 3477 /** 3478 * pcie_get_readrq - get PCI Express read request size 3479 * @dev: PCI device to query 3480 * 3481 * Returns maximum memory read request in bytes 3482 * or appropriate error value. 3483 */ 3484 int pcie_get_readrq(struct pci_dev *dev) 3485 { 3486 u16 ctl; 3487 3488 pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &ctl); 3489 3490 return 128 << ((ctl & PCI_EXP_DEVCTL_READRQ) >> 12); 3491 } 3492 EXPORT_SYMBOL(pcie_get_readrq); 3493 3494 /** 3495 * pcie_set_readrq - set PCI Express maximum memory read request 3496 * @dev: PCI device to query 3497 * @rq: maximum memory read count in bytes 3498 * valid values are 128, 256, 512, 1024, 2048, 4096 3499 * 3500 * If possible sets maximum memory read request in bytes 3501 */ 3502 int pcie_set_readrq(struct pci_dev *dev, int rq) 3503 { 3504 u16 v; 3505 3506 if (rq < 128 || rq > 4096 || !is_power_of_2(rq)) 3507 return -EINVAL; 3508 3509 /* 3510 * If using the "performance" PCIe config, we clamp the 3511 * read rq size to the max packet size to prevent the 3512 * host bridge generating requests larger than we can 3513 * cope with 3514 */ 3515 if (pcie_bus_config == PCIE_BUS_PERFORMANCE) { 3516 int mps = pcie_get_mps(dev); 3517 3518 if (mps < 0) 3519 return mps; 3520 if (mps < rq) 3521 rq = mps; 3522 } 3523 3524 v = (ffs(rq) - 8) << 12; 3525 3526 return pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL, 3527 PCI_EXP_DEVCTL_READRQ, v); 3528 } 3529 EXPORT_SYMBOL(pcie_set_readrq); 3530 3531 /** 3532 * pcie_get_mps - get PCI Express maximum payload size 3533 * @dev: PCI device to query 3534 * 3535 * Returns maximum payload size in bytes 3536 * or appropriate error value. 3537 */ 3538 int pcie_get_mps(struct pci_dev *dev) 3539 { 3540 u16 ctl; 3541 3542 pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &ctl); 3543 3544 return 128 << ((ctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5); 3545 } 3546 3547 /** 3548 * pcie_set_mps - set PCI Express maximum payload size 3549 * @dev: PCI device to query 3550 * @mps: maximum payload size in bytes 3551 * valid values are 128, 256, 512, 1024, 2048, 4096 3552 * 3553 * If possible sets maximum payload size 3554 */ 3555 int pcie_set_mps(struct pci_dev *dev, int mps) 3556 { 3557 u16 v; 3558 3559 if (mps < 128 || mps > 4096 || !is_power_of_2(mps)) 3560 return -EINVAL; 3561 3562 v = ffs(mps) - 8; 3563 if (v > dev->pcie_mpss) 3564 return -EINVAL; 3565 v <<= 5; 3566 3567 return pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL, 3568 PCI_EXP_DEVCTL_PAYLOAD, v); 3569 } 3570 3571 /** 3572 * pci_select_bars - Make BAR mask from the type of resource 3573 * @dev: the PCI device for which BAR mask is made 3574 * @flags: resource type mask to be selected 3575 * 3576 * This helper routine makes bar mask from the type of resource. 3577 */ 3578 int pci_select_bars(struct pci_dev *dev, unsigned long flags) 3579 { 3580 int i, bars = 0; 3581 for (i = 0; i < PCI_NUM_RESOURCES; i++) 3582 if (pci_resource_flags(dev, i) & flags) 3583 bars |= (1 << i); 3584 return bars; 3585 } 3586 3587 /** 3588 * pci_resource_bar - get position of the BAR associated with a resource 3589 * @dev: the PCI device 3590 * @resno: the resource number 3591 * @type: the BAR type to be filled in 3592 * 3593 * Returns BAR position in config space, or 0 if the BAR is invalid. 3594 */ 3595 int pci_resource_bar(struct pci_dev *dev, int resno, enum pci_bar_type *type) 3596 { 3597 int reg; 3598 3599 if (resno < PCI_ROM_RESOURCE) { 3600 *type = pci_bar_unknown; 3601 return PCI_BASE_ADDRESS_0 + 4 * resno; 3602 } else if (resno == PCI_ROM_RESOURCE) { 3603 *type = pci_bar_mem32; 3604 return dev->rom_base_reg; 3605 } else if (resno < PCI_BRIDGE_RESOURCES) { 3606 /* device specific resource */ 3607 reg = pci_iov_resource_bar(dev, resno, type); 3608 if (reg) 3609 return reg; 3610 } 3611 3612 dev_err(&dev->dev, "BAR %d: invalid resource\n", resno); 3613 return 0; 3614 } 3615 3616 /* Some architectures require additional programming to enable VGA */ 3617 static arch_set_vga_state_t arch_set_vga_state; 3618 3619 void __init pci_register_set_vga_state(arch_set_vga_state_t func) 3620 { 3621 arch_set_vga_state = func; /* NULL disables */ 3622 } 3623 3624 static int pci_set_vga_state_arch(struct pci_dev *dev, bool decode, 3625 unsigned int command_bits, u32 flags) 3626 { 3627 if (arch_set_vga_state) 3628 return arch_set_vga_state(dev, decode, command_bits, 3629 flags); 3630 return 0; 3631 } 3632 3633 /** 3634 * pci_set_vga_state - set VGA decode state on device and parents if requested 3635 * @dev: the PCI device 3636 * @decode: true = enable decoding, false = disable decoding 3637 * @command_bits: PCI_COMMAND_IO and/or PCI_COMMAND_MEMORY 3638 * @flags: traverse ancestors and change bridges 3639 * CHANGE_BRIDGE_ONLY / CHANGE_BRIDGE 3640 */ 3641 int pci_set_vga_state(struct pci_dev *dev, bool decode, 3642 unsigned int command_bits, u32 flags) 3643 { 3644 struct pci_bus *bus; 3645 struct pci_dev *bridge; 3646 u16 cmd; 3647 int rc; 3648 3649 WARN_ON((flags & PCI_VGA_STATE_CHANGE_DECODES) & (command_bits & ~(PCI_COMMAND_IO|PCI_COMMAND_MEMORY))); 3650 3651 /* ARCH specific VGA enables */ 3652 rc = pci_set_vga_state_arch(dev, decode, command_bits, flags); 3653 if (rc) 3654 return rc; 3655 3656 if (flags & PCI_VGA_STATE_CHANGE_DECODES) { 3657 pci_read_config_word(dev, PCI_COMMAND, &cmd); 3658 if (decode == true) 3659 cmd |= command_bits; 3660 else 3661 cmd &= ~command_bits; 3662 pci_write_config_word(dev, PCI_COMMAND, cmd); 3663 } 3664 3665 if (!(flags & PCI_VGA_STATE_CHANGE_BRIDGE)) 3666 return 0; 3667 3668 bus = dev->bus; 3669 while (bus) { 3670 bridge = bus->self; 3671 if (bridge) { 3672 pci_read_config_word(bridge, PCI_BRIDGE_CONTROL, 3673 &cmd); 3674 if (decode == true) 3675 cmd |= PCI_BRIDGE_CTL_VGA; 3676 else 3677 cmd &= ~PCI_BRIDGE_CTL_VGA; 3678 pci_write_config_word(bridge, PCI_BRIDGE_CONTROL, 3679 cmd); 3680 } 3681 bus = bus->parent; 3682 } 3683 return 0; 3684 } 3685 3686 #define RESOURCE_ALIGNMENT_PARAM_SIZE COMMAND_LINE_SIZE 3687 static char resource_alignment_param[RESOURCE_ALIGNMENT_PARAM_SIZE] = {0}; 3688 static DEFINE_SPINLOCK(resource_alignment_lock); 3689 3690 /** 3691 * pci_specified_resource_alignment - get resource alignment specified by user. 3692 * @dev: the PCI device to get 3693 * 3694 * RETURNS: Resource alignment if it is specified. 3695 * Zero if it is not specified. 3696 */ 3697 static resource_size_t pci_specified_resource_alignment(struct pci_dev *dev) 3698 { 3699 int seg, bus, slot, func, align_order, count; 3700 resource_size_t align = 0; 3701 char *p; 3702 3703 spin_lock(&resource_alignment_lock); 3704 p = resource_alignment_param; 3705 while (*p) { 3706 count = 0; 3707 if (sscanf(p, "%d%n", &align_order, &count) == 1 && 3708 p[count] == '@') { 3709 p += count + 1; 3710 } else { 3711 align_order = -1; 3712 } 3713 if (sscanf(p, "%x:%x:%x.%x%n", 3714 &seg, &bus, &slot, &func, &count) != 4) { 3715 seg = 0; 3716 if (sscanf(p, "%x:%x.%x%n", 3717 &bus, &slot, &func, &count) != 3) { 3718 /* Invalid format */ 3719 printk(KERN_ERR "PCI: Can't parse resource_alignment parameter: %s\n", 3720 p); 3721 break; 3722 } 3723 } 3724 p += count; 3725 if (seg == pci_domain_nr(dev->bus) && 3726 bus == dev->bus->number && 3727 slot == PCI_SLOT(dev->devfn) && 3728 func == PCI_FUNC(dev->devfn)) { 3729 if (align_order == -1) { 3730 align = PAGE_SIZE; 3731 } else { 3732 align = 1 << align_order; 3733 } 3734 /* Found */ 3735 break; 3736 } 3737 if (*p != ';' && *p != ',') { 3738 /* End of param or invalid format */ 3739 break; 3740 } 3741 p++; 3742 } 3743 spin_unlock(&resource_alignment_lock); 3744 return align; 3745 } 3746 3747 /* 3748 * This function disables memory decoding and releases memory resources 3749 * of the device specified by kernel's boot parameter 'pci=resource_alignment='. 3750 * It also rounds up size to specified alignment. 3751 * Later on, the kernel will assign page-aligned memory resource back 3752 * to the device. 3753 */ 3754 void pci_reassigndev_resource_alignment(struct pci_dev *dev) 3755 { 3756 int i; 3757 struct resource *r; 3758 resource_size_t align, size; 3759 u16 command; 3760 3761 /* check if specified PCI is target device to reassign */ 3762 align = pci_specified_resource_alignment(dev); 3763 if (!align) 3764 return; 3765 3766 if (dev->hdr_type == PCI_HEADER_TYPE_NORMAL && 3767 (dev->class >> 8) == PCI_CLASS_BRIDGE_HOST) { 3768 dev_warn(&dev->dev, 3769 "Can't reassign resources to host bridge.\n"); 3770 return; 3771 } 3772 3773 dev_info(&dev->dev, 3774 "Disabling memory decoding and releasing memory resources.\n"); 3775 pci_read_config_word(dev, PCI_COMMAND, &command); 3776 command &= ~PCI_COMMAND_MEMORY; 3777 pci_write_config_word(dev, PCI_COMMAND, command); 3778 3779 for (i = 0; i < PCI_BRIDGE_RESOURCES; i++) { 3780 r = &dev->resource[i]; 3781 if (!(r->flags & IORESOURCE_MEM)) 3782 continue; 3783 size = resource_size(r); 3784 if (size < align) { 3785 size = align; 3786 dev_info(&dev->dev, 3787 "Rounding up size of resource #%d to %#llx.\n", 3788 i, (unsigned long long)size); 3789 } 3790 r->end = size - 1; 3791 r->start = 0; 3792 } 3793 /* Need to disable bridge's resource window, 3794 * to enable the kernel to reassign new resource 3795 * window later on. 3796 */ 3797 if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE && 3798 (dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) { 3799 for (i = PCI_BRIDGE_RESOURCES; i < PCI_NUM_RESOURCES; i++) { 3800 r = &dev->resource[i]; 3801 if (!(r->flags & IORESOURCE_MEM)) 3802 continue; 3803 r->end = resource_size(r) - 1; 3804 r->start = 0; 3805 } 3806 pci_disable_bridge_window(dev); 3807 } 3808 } 3809 3810 static ssize_t pci_set_resource_alignment_param(const char *buf, size_t count) 3811 { 3812 if (count > RESOURCE_ALIGNMENT_PARAM_SIZE - 1) 3813 count = RESOURCE_ALIGNMENT_PARAM_SIZE - 1; 3814 spin_lock(&resource_alignment_lock); 3815 strncpy(resource_alignment_param, buf, count); 3816 resource_alignment_param[count] = '\0'; 3817 spin_unlock(&resource_alignment_lock); 3818 return count; 3819 } 3820 3821 static ssize_t pci_get_resource_alignment_param(char *buf, size_t size) 3822 { 3823 size_t count; 3824 spin_lock(&resource_alignment_lock); 3825 count = snprintf(buf, size, "%s", resource_alignment_param); 3826 spin_unlock(&resource_alignment_lock); 3827 return count; 3828 } 3829 3830 static ssize_t pci_resource_alignment_show(struct bus_type *bus, char *buf) 3831 { 3832 return pci_get_resource_alignment_param(buf, PAGE_SIZE); 3833 } 3834 3835 static ssize_t pci_resource_alignment_store(struct bus_type *bus, 3836 const char *buf, size_t count) 3837 { 3838 return pci_set_resource_alignment_param(buf, count); 3839 } 3840 3841 BUS_ATTR(resource_alignment, 0644, pci_resource_alignment_show, 3842 pci_resource_alignment_store); 3843 3844 static int __init pci_resource_alignment_sysfs_init(void) 3845 { 3846 return bus_create_file(&pci_bus_type, 3847 &bus_attr_resource_alignment); 3848 } 3849 3850 late_initcall(pci_resource_alignment_sysfs_init); 3851 3852 static void pci_no_domains(void) 3853 { 3854 #ifdef CONFIG_PCI_DOMAINS 3855 pci_domains_supported = 0; 3856 #endif 3857 } 3858 3859 /** 3860 * pci_ext_cfg_avail - can we access extended PCI config space? 3861 * 3862 * Returns 1 if we can access PCI extended config space (offsets 3863 * greater than 0xff). This is the default implementation. Architecture 3864 * implementations can override this. 3865 */ 3866 int __weak pci_ext_cfg_avail(void) 3867 { 3868 return 1; 3869 } 3870 3871 void __weak pci_fixup_cardbus(struct pci_bus *bus) 3872 { 3873 } 3874 EXPORT_SYMBOL(pci_fixup_cardbus); 3875 3876 static int __init pci_setup(char *str) 3877 { 3878 while (str) { 3879 char *k = strchr(str, ','); 3880 if (k) 3881 *k++ = 0; 3882 if (*str && (str = pcibios_setup(str)) && *str) { 3883 if (!strcmp(str, "nomsi")) { 3884 pci_no_msi(); 3885 } else if (!strcmp(str, "noaer")) { 3886 pci_no_aer(); 3887 } else if (!strncmp(str, "realloc=", 8)) { 3888 pci_realloc_get_opt(str + 8); 3889 } else if (!strncmp(str, "realloc", 7)) { 3890 pci_realloc_get_opt("on"); 3891 } else if (!strcmp(str, "nodomains")) { 3892 pci_no_domains(); 3893 } else if (!strncmp(str, "noari", 5)) { 3894 pcie_ari_disabled = true; 3895 } else if (!strncmp(str, "cbiosize=", 9)) { 3896 pci_cardbus_io_size = memparse(str + 9, &str); 3897 } else if (!strncmp(str, "cbmemsize=", 10)) { 3898 pci_cardbus_mem_size = memparse(str + 10, &str); 3899 } else if (!strncmp(str, "resource_alignment=", 19)) { 3900 pci_set_resource_alignment_param(str + 19, 3901 strlen(str + 19)); 3902 } else if (!strncmp(str, "ecrc=", 5)) { 3903 pcie_ecrc_get_policy(str + 5); 3904 } else if (!strncmp(str, "hpiosize=", 9)) { 3905 pci_hotplug_io_size = memparse(str + 9, &str); 3906 } else if (!strncmp(str, "hpmemsize=", 10)) { 3907 pci_hotplug_mem_size = memparse(str + 10, &str); 3908 } else if (!strncmp(str, "pcie_bus_tune_off", 17)) { 3909 pcie_bus_config = PCIE_BUS_TUNE_OFF; 3910 } else if (!strncmp(str, "pcie_bus_safe", 13)) { 3911 pcie_bus_config = PCIE_BUS_SAFE; 3912 } else if (!strncmp(str, "pcie_bus_perf", 13)) { 3913 pcie_bus_config = PCIE_BUS_PERFORMANCE; 3914 } else if (!strncmp(str, "pcie_bus_peer2peer", 18)) { 3915 pcie_bus_config = PCIE_BUS_PEER2PEER; 3916 } else if (!strncmp(str, "pcie_scan_all", 13)) { 3917 pci_add_flags(PCI_SCAN_ALL_PCIE_DEVS); 3918 } else { 3919 printk(KERN_ERR "PCI: Unknown option `%s'\n", 3920 str); 3921 } 3922 } 3923 str = k; 3924 } 3925 return 0; 3926 } 3927 early_param("pci", pci_setup); 3928 3929 EXPORT_SYMBOL(pci_reenable_device); 3930 EXPORT_SYMBOL(pci_enable_device_io); 3931 EXPORT_SYMBOL(pci_enable_device_mem); 3932 EXPORT_SYMBOL(pci_enable_device); 3933 EXPORT_SYMBOL(pcim_enable_device); 3934 EXPORT_SYMBOL(pcim_pin_device); 3935 EXPORT_SYMBOL(pci_disable_device); 3936 EXPORT_SYMBOL(pci_find_capability); 3937 EXPORT_SYMBOL(pci_bus_find_capability); 3938 EXPORT_SYMBOL(pci_release_regions); 3939 EXPORT_SYMBOL(pci_request_regions); 3940 EXPORT_SYMBOL(pci_request_regions_exclusive); 3941 EXPORT_SYMBOL(pci_release_region); 3942 EXPORT_SYMBOL(pci_request_region); 3943 EXPORT_SYMBOL(pci_request_region_exclusive); 3944 EXPORT_SYMBOL(pci_release_selected_regions); 3945 EXPORT_SYMBOL(pci_request_selected_regions); 3946 EXPORT_SYMBOL(pci_request_selected_regions_exclusive); 3947 EXPORT_SYMBOL(pci_set_master); 3948 EXPORT_SYMBOL(pci_clear_master); 3949 EXPORT_SYMBOL(pci_set_mwi); 3950 EXPORT_SYMBOL(pci_try_set_mwi); 3951 EXPORT_SYMBOL(pci_clear_mwi); 3952 EXPORT_SYMBOL_GPL(pci_intx); 3953 EXPORT_SYMBOL(pci_assign_resource); 3954 EXPORT_SYMBOL(pci_find_parent_resource); 3955 EXPORT_SYMBOL(pci_select_bars); 3956 3957 EXPORT_SYMBOL(pci_set_power_state); 3958 EXPORT_SYMBOL(pci_save_state); 3959 EXPORT_SYMBOL(pci_restore_state); 3960 EXPORT_SYMBOL(pci_pme_capable); 3961 EXPORT_SYMBOL(pci_pme_active); 3962 EXPORT_SYMBOL(pci_wake_from_d3); 3963 EXPORT_SYMBOL(pci_target_state); 3964 EXPORT_SYMBOL(pci_prepare_to_sleep); 3965 EXPORT_SYMBOL(pci_back_from_sleep); 3966 EXPORT_SYMBOL_GPL(pci_set_pcie_reset_state); 3967