1 /* 2 * PCI Bus Services, see include/linux/pci.h for further explanation. 3 * 4 * Copyright 1993 -- 1997 Drew Eckhardt, Frederic Potter, 5 * David Mosberger-Tang 6 * 7 * Copyright 1997 -- 2000 Martin Mares <mj@ucw.cz> 8 */ 9 10 #include <linux/kernel.h> 11 #include <linux/delay.h> 12 #include <linux/init.h> 13 #include <linux/pci.h> 14 #include <linux/pm.h> 15 #include <linux/slab.h> 16 #include <linux/module.h> 17 #include <linux/spinlock.h> 18 #include <linux/string.h> 19 #include <linux/log2.h> 20 #include <linux/pci-aspm.h> 21 #include <linux/pm_wakeup.h> 22 #include <linux/interrupt.h> 23 #include <linux/device.h> 24 #include <linux/pm_runtime.h> 25 #include <asm-generic/pci-bridge.h> 26 #include <asm/setup.h> 27 #include "pci.h" 28 29 const char *pci_power_names[] = { 30 "error", "D0", "D1", "D2", "D3hot", "D3cold", "unknown", 31 }; 32 EXPORT_SYMBOL_GPL(pci_power_names); 33 34 int isa_dma_bridge_buggy; 35 EXPORT_SYMBOL(isa_dma_bridge_buggy); 36 37 int pci_pci_problems; 38 EXPORT_SYMBOL(pci_pci_problems); 39 40 unsigned int pci_pm_d3_delay; 41 42 static void pci_pme_list_scan(struct work_struct *work); 43 44 static LIST_HEAD(pci_pme_list); 45 static DEFINE_MUTEX(pci_pme_list_mutex); 46 static DECLARE_DELAYED_WORK(pci_pme_work, pci_pme_list_scan); 47 48 struct pci_pme_device { 49 struct list_head list; 50 struct pci_dev *dev; 51 }; 52 53 #define PME_TIMEOUT 1000 /* How long between PME checks */ 54 55 static void pci_dev_d3_sleep(struct pci_dev *dev) 56 { 57 unsigned int delay = dev->d3_delay; 58 59 if (delay < pci_pm_d3_delay) 60 delay = pci_pm_d3_delay; 61 62 msleep(delay); 63 } 64 65 #ifdef CONFIG_PCI_DOMAINS 66 int pci_domains_supported = 1; 67 #endif 68 69 #define DEFAULT_CARDBUS_IO_SIZE (256) 70 #define DEFAULT_CARDBUS_MEM_SIZE (64*1024*1024) 71 /* pci=cbmemsize=nnM,cbiosize=nn can override this */ 72 unsigned long pci_cardbus_io_size = DEFAULT_CARDBUS_IO_SIZE; 73 unsigned long pci_cardbus_mem_size = DEFAULT_CARDBUS_MEM_SIZE; 74 75 #define DEFAULT_HOTPLUG_IO_SIZE (256) 76 #define DEFAULT_HOTPLUG_MEM_SIZE (2*1024*1024) 77 /* pci=hpmemsize=nnM,hpiosize=nn can override this */ 78 unsigned long pci_hotplug_io_size = DEFAULT_HOTPLUG_IO_SIZE; 79 unsigned long pci_hotplug_mem_size = DEFAULT_HOTPLUG_MEM_SIZE; 80 81 enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_TUNE_OFF; 82 83 /* 84 * The default CLS is used if arch didn't set CLS explicitly and not 85 * all pci devices agree on the same value. Arch can override either 86 * the dfl or actual value as it sees fit. Don't forget this is 87 * measured in 32-bit words, not bytes. 88 */ 89 u8 pci_dfl_cache_line_size = L1_CACHE_BYTES >> 2; 90 u8 pci_cache_line_size; 91 92 /* 93 * If we set up a device for bus mastering, we need to check the latency 94 * timer as certain BIOSes forget to set it properly. 95 */ 96 unsigned int pcibios_max_latency = 255; 97 98 /* If set, the PCIe ARI capability will not be used. */ 99 static bool pcie_ari_disabled; 100 101 /** 102 * pci_bus_max_busnr - returns maximum PCI bus number of given bus' children 103 * @bus: pointer to PCI bus structure to search 104 * 105 * Given a PCI bus, returns the highest PCI bus number present in the set 106 * including the given PCI bus and its list of child PCI buses. 107 */ 108 unsigned char pci_bus_max_busnr(struct pci_bus* bus) 109 { 110 struct list_head *tmp; 111 unsigned char max, n; 112 113 max = bus->busn_res.end; 114 list_for_each(tmp, &bus->children) { 115 n = pci_bus_max_busnr(pci_bus_b(tmp)); 116 if(n > max) 117 max = n; 118 } 119 return max; 120 } 121 EXPORT_SYMBOL_GPL(pci_bus_max_busnr); 122 123 #ifdef CONFIG_HAS_IOMEM 124 void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar) 125 { 126 /* 127 * Make sure the BAR is actually a memory resource, not an IO resource 128 */ 129 if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM)) { 130 WARN_ON(1); 131 return NULL; 132 } 133 return ioremap_nocache(pci_resource_start(pdev, bar), 134 pci_resource_len(pdev, bar)); 135 } 136 EXPORT_SYMBOL_GPL(pci_ioremap_bar); 137 #endif 138 139 #define PCI_FIND_CAP_TTL 48 140 141 static int __pci_find_next_cap_ttl(struct pci_bus *bus, unsigned int devfn, 142 u8 pos, int cap, int *ttl) 143 { 144 u8 id; 145 146 while ((*ttl)--) { 147 pci_bus_read_config_byte(bus, devfn, pos, &pos); 148 if (pos < 0x40) 149 break; 150 pos &= ~3; 151 pci_bus_read_config_byte(bus, devfn, pos + PCI_CAP_LIST_ID, 152 &id); 153 if (id == 0xff) 154 break; 155 if (id == cap) 156 return pos; 157 pos += PCI_CAP_LIST_NEXT; 158 } 159 return 0; 160 } 161 162 static int __pci_find_next_cap(struct pci_bus *bus, unsigned int devfn, 163 u8 pos, int cap) 164 { 165 int ttl = PCI_FIND_CAP_TTL; 166 167 return __pci_find_next_cap_ttl(bus, devfn, pos, cap, &ttl); 168 } 169 170 int pci_find_next_capability(struct pci_dev *dev, u8 pos, int cap) 171 { 172 return __pci_find_next_cap(dev->bus, dev->devfn, 173 pos + PCI_CAP_LIST_NEXT, cap); 174 } 175 EXPORT_SYMBOL_GPL(pci_find_next_capability); 176 177 static int __pci_bus_find_cap_start(struct pci_bus *bus, 178 unsigned int devfn, u8 hdr_type) 179 { 180 u16 status; 181 182 pci_bus_read_config_word(bus, devfn, PCI_STATUS, &status); 183 if (!(status & PCI_STATUS_CAP_LIST)) 184 return 0; 185 186 switch (hdr_type) { 187 case PCI_HEADER_TYPE_NORMAL: 188 case PCI_HEADER_TYPE_BRIDGE: 189 return PCI_CAPABILITY_LIST; 190 case PCI_HEADER_TYPE_CARDBUS: 191 return PCI_CB_CAPABILITY_LIST; 192 default: 193 return 0; 194 } 195 196 return 0; 197 } 198 199 /** 200 * pci_find_capability - query for devices' capabilities 201 * @dev: PCI device to query 202 * @cap: capability code 203 * 204 * Tell if a device supports a given PCI capability. 205 * Returns the address of the requested capability structure within the 206 * device's PCI configuration space or 0 in case the device does not 207 * support it. Possible values for @cap: 208 * 209 * %PCI_CAP_ID_PM Power Management 210 * %PCI_CAP_ID_AGP Accelerated Graphics Port 211 * %PCI_CAP_ID_VPD Vital Product Data 212 * %PCI_CAP_ID_SLOTID Slot Identification 213 * %PCI_CAP_ID_MSI Message Signalled Interrupts 214 * %PCI_CAP_ID_CHSWP CompactPCI HotSwap 215 * %PCI_CAP_ID_PCIX PCI-X 216 * %PCI_CAP_ID_EXP PCI Express 217 */ 218 int pci_find_capability(struct pci_dev *dev, int cap) 219 { 220 int pos; 221 222 pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type); 223 if (pos) 224 pos = __pci_find_next_cap(dev->bus, dev->devfn, pos, cap); 225 226 return pos; 227 } 228 229 /** 230 * pci_bus_find_capability - query for devices' capabilities 231 * @bus: the PCI bus to query 232 * @devfn: PCI device to query 233 * @cap: capability code 234 * 235 * Like pci_find_capability() but works for pci devices that do not have a 236 * pci_dev structure set up yet. 237 * 238 * Returns the address of the requested capability structure within the 239 * device's PCI configuration space or 0 in case the device does not 240 * support it. 241 */ 242 int pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap) 243 { 244 int pos; 245 u8 hdr_type; 246 247 pci_bus_read_config_byte(bus, devfn, PCI_HEADER_TYPE, &hdr_type); 248 249 pos = __pci_bus_find_cap_start(bus, devfn, hdr_type & 0x7f); 250 if (pos) 251 pos = __pci_find_next_cap(bus, devfn, pos, cap); 252 253 return pos; 254 } 255 256 /** 257 * pci_find_next_ext_capability - Find an extended capability 258 * @dev: PCI device to query 259 * @start: address at which to start looking (0 to start at beginning of list) 260 * @cap: capability code 261 * 262 * Returns the address of the next matching extended capability structure 263 * within the device's PCI configuration space or 0 if the device does 264 * not support it. Some capabilities can occur several times, e.g., the 265 * vendor-specific capability, and this provides a way to find them all. 266 */ 267 int pci_find_next_ext_capability(struct pci_dev *dev, int start, int cap) 268 { 269 u32 header; 270 int ttl; 271 int pos = PCI_CFG_SPACE_SIZE; 272 273 /* minimum 8 bytes per capability */ 274 ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8; 275 276 if (dev->cfg_size <= PCI_CFG_SPACE_SIZE) 277 return 0; 278 279 if (start) 280 pos = start; 281 282 if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL) 283 return 0; 284 285 /* 286 * If we have no capabilities, this is indicated by cap ID, 287 * cap version and next pointer all being 0. 288 */ 289 if (header == 0) 290 return 0; 291 292 while (ttl-- > 0) { 293 if (PCI_EXT_CAP_ID(header) == cap && pos != start) 294 return pos; 295 296 pos = PCI_EXT_CAP_NEXT(header); 297 if (pos < PCI_CFG_SPACE_SIZE) 298 break; 299 300 if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL) 301 break; 302 } 303 304 return 0; 305 } 306 EXPORT_SYMBOL_GPL(pci_find_next_ext_capability); 307 308 /** 309 * pci_find_ext_capability - Find an extended capability 310 * @dev: PCI device to query 311 * @cap: capability code 312 * 313 * Returns the address of the requested extended capability structure 314 * within the device's PCI configuration space or 0 if the device does 315 * not support it. Possible values for @cap: 316 * 317 * %PCI_EXT_CAP_ID_ERR Advanced Error Reporting 318 * %PCI_EXT_CAP_ID_VC Virtual Channel 319 * %PCI_EXT_CAP_ID_DSN Device Serial Number 320 * %PCI_EXT_CAP_ID_PWR Power Budgeting 321 */ 322 int pci_find_ext_capability(struct pci_dev *dev, int cap) 323 { 324 return pci_find_next_ext_capability(dev, 0, cap); 325 } 326 EXPORT_SYMBOL_GPL(pci_find_ext_capability); 327 328 static int __pci_find_next_ht_cap(struct pci_dev *dev, int pos, int ht_cap) 329 { 330 int rc, ttl = PCI_FIND_CAP_TTL; 331 u8 cap, mask; 332 333 if (ht_cap == HT_CAPTYPE_SLAVE || ht_cap == HT_CAPTYPE_HOST) 334 mask = HT_3BIT_CAP_MASK; 335 else 336 mask = HT_5BIT_CAP_MASK; 337 338 pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn, pos, 339 PCI_CAP_ID_HT, &ttl); 340 while (pos) { 341 rc = pci_read_config_byte(dev, pos + 3, &cap); 342 if (rc != PCIBIOS_SUCCESSFUL) 343 return 0; 344 345 if ((cap & mask) == ht_cap) 346 return pos; 347 348 pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn, 349 pos + PCI_CAP_LIST_NEXT, 350 PCI_CAP_ID_HT, &ttl); 351 } 352 353 return 0; 354 } 355 /** 356 * pci_find_next_ht_capability - query a device's Hypertransport capabilities 357 * @dev: PCI device to query 358 * @pos: Position from which to continue searching 359 * @ht_cap: Hypertransport capability code 360 * 361 * To be used in conjunction with pci_find_ht_capability() to search for 362 * all capabilities matching @ht_cap. @pos should always be a value returned 363 * from pci_find_ht_capability(). 364 * 365 * NB. To be 100% safe against broken PCI devices, the caller should take 366 * steps to avoid an infinite loop. 367 */ 368 int pci_find_next_ht_capability(struct pci_dev *dev, int pos, int ht_cap) 369 { 370 return __pci_find_next_ht_cap(dev, pos + PCI_CAP_LIST_NEXT, ht_cap); 371 } 372 EXPORT_SYMBOL_GPL(pci_find_next_ht_capability); 373 374 /** 375 * pci_find_ht_capability - query a device's Hypertransport capabilities 376 * @dev: PCI device to query 377 * @ht_cap: Hypertransport capability code 378 * 379 * Tell if a device supports a given Hypertransport capability. 380 * Returns an address within the device's PCI configuration space 381 * or 0 in case the device does not support the request capability. 382 * The address points to the PCI capability, of type PCI_CAP_ID_HT, 383 * which has a Hypertransport capability matching @ht_cap. 384 */ 385 int pci_find_ht_capability(struct pci_dev *dev, int ht_cap) 386 { 387 int pos; 388 389 pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type); 390 if (pos) 391 pos = __pci_find_next_ht_cap(dev, pos, ht_cap); 392 393 return pos; 394 } 395 EXPORT_SYMBOL_GPL(pci_find_ht_capability); 396 397 /** 398 * pci_find_parent_resource - return resource region of parent bus of given region 399 * @dev: PCI device structure contains resources to be searched 400 * @res: child resource record for which parent is sought 401 * 402 * For given resource region of given device, return the resource 403 * region of parent bus the given region is contained in or where 404 * it should be allocated from. 405 */ 406 struct resource * 407 pci_find_parent_resource(const struct pci_dev *dev, struct resource *res) 408 { 409 const struct pci_bus *bus = dev->bus; 410 int i; 411 struct resource *best = NULL, *r; 412 413 pci_bus_for_each_resource(bus, r, i) { 414 if (!r) 415 continue; 416 if (res->start && !(res->start >= r->start && res->end <= r->end)) 417 continue; /* Not contained */ 418 if ((res->flags ^ r->flags) & (IORESOURCE_IO | IORESOURCE_MEM)) 419 continue; /* Wrong type */ 420 if (!((res->flags ^ r->flags) & IORESOURCE_PREFETCH)) 421 return r; /* Exact match */ 422 /* We can't insert a non-prefetch resource inside a prefetchable parent .. */ 423 if (r->flags & IORESOURCE_PREFETCH) 424 continue; 425 /* .. but we can put a prefetchable resource inside a non-prefetchable one */ 426 if (!best) 427 best = r; 428 } 429 return best; 430 } 431 432 /** 433 * pci_restore_bars - restore a devices BAR values (e.g. after wake-up) 434 * @dev: PCI device to have its BARs restored 435 * 436 * Restore the BAR values for a given device, so as to make it 437 * accessible by its driver. 438 */ 439 static void 440 pci_restore_bars(struct pci_dev *dev) 441 { 442 int i; 443 444 for (i = 0; i < PCI_BRIDGE_RESOURCES; i++) 445 pci_update_resource(dev, i); 446 } 447 448 static struct pci_platform_pm_ops *pci_platform_pm; 449 450 int pci_set_platform_pm(struct pci_platform_pm_ops *ops) 451 { 452 if (!ops->is_manageable || !ops->set_state || !ops->choose_state 453 || !ops->sleep_wake) 454 return -EINVAL; 455 pci_platform_pm = ops; 456 return 0; 457 } 458 459 static inline bool platform_pci_power_manageable(struct pci_dev *dev) 460 { 461 return pci_platform_pm ? pci_platform_pm->is_manageable(dev) : false; 462 } 463 464 static inline int platform_pci_set_power_state(struct pci_dev *dev, 465 pci_power_t t) 466 { 467 return pci_platform_pm ? pci_platform_pm->set_state(dev, t) : -ENOSYS; 468 } 469 470 static inline pci_power_t platform_pci_choose_state(struct pci_dev *dev) 471 { 472 return pci_platform_pm ? 473 pci_platform_pm->choose_state(dev) : PCI_POWER_ERROR; 474 } 475 476 static inline int platform_pci_sleep_wake(struct pci_dev *dev, bool enable) 477 { 478 return pci_platform_pm ? 479 pci_platform_pm->sleep_wake(dev, enable) : -ENODEV; 480 } 481 482 static inline int platform_pci_run_wake(struct pci_dev *dev, bool enable) 483 { 484 return pci_platform_pm ? 485 pci_platform_pm->run_wake(dev, enable) : -ENODEV; 486 } 487 488 /** 489 * pci_raw_set_power_state - Use PCI PM registers to set the power state of 490 * given PCI device 491 * @dev: PCI device to handle. 492 * @state: PCI power state (D0, D1, D2, D3hot) to put the device into. 493 * 494 * RETURN VALUE: 495 * -EINVAL if the requested state is invalid. 496 * -EIO if device does not support PCI PM or its PM capabilities register has a 497 * wrong version, or device doesn't support the requested state. 498 * 0 if device already is in the requested state. 499 * 0 if device's power state has been successfully changed. 500 */ 501 static int pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state) 502 { 503 u16 pmcsr; 504 bool need_restore = false; 505 506 /* Check if we're already there */ 507 if (dev->current_state == state) 508 return 0; 509 510 if (!dev->pm_cap) 511 return -EIO; 512 513 if (state < PCI_D0 || state > PCI_D3hot) 514 return -EINVAL; 515 516 /* Validate current state: 517 * Can enter D0 from any state, but if we can only go deeper 518 * to sleep if we're already in a low power state 519 */ 520 if (state != PCI_D0 && dev->current_state <= PCI_D3cold 521 && dev->current_state > state) { 522 dev_err(&dev->dev, "invalid power transition " 523 "(from state %d to %d)\n", dev->current_state, state); 524 return -EINVAL; 525 } 526 527 /* check if this device supports the desired state */ 528 if ((state == PCI_D1 && !dev->d1_support) 529 || (state == PCI_D2 && !dev->d2_support)) 530 return -EIO; 531 532 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr); 533 534 /* If we're (effectively) in D3, force entire word to 0. 535 * This doesn't affect PME_Status, disables PME_En, and 536 * sets PowerState to 0. 537 */ 538 switch (dev->current_state) { 539 case PCI_D0: 540 case PCI_D1: 541 case PCI_D2: 542 pmcsr &= ~PCI_PM_CTRL_STATE_MASK; 543 pmcsr |= state; 544 break; 545 case PCI_D3hot: 546 case PCI_D3cold: 547 case PCI_UNKNOWN: /* Boot-up */ 548 if ((pmcsr & PCI_PM_CTRL_STATE_MASK) == PCI_D3hot 549 && !(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET)) 550 need_restore = true; 551 /* Fall-through: force to D0 */ 552 default: 553 pmcsr = 0; 554 break; 555 } 556 557 /* enter specified state */ 558 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr); 559 560 /* Mandatory power management transition delays */ 561 /* see PCI PM 1.1 5.6.1 table 18 */ 562 if (state == PCI_D3hot || dev->current_state == PCI_D3hot) 563 pci_dev_d3_sleep(dev); 564 else if (state == PCI_D2 || dev->current_state == PCI_D2) 565 udelay(PCI_PM_D2_DELAY); 566 567 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr); 568 dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK); 569 if (dev->current_state != state && printk_ratelimit()) 570 dev_info(&dev->dev, "Refused to change power state, " 571 "currently in D%d\n", dev->current_state); 572 573 /* 574 * According to section 5.4.1 of the "PCI BUS POWER MANAGEMENT 575 * INTERFACE SPECIFICATION, REV. 1.2", a device transitioning 576 * from D3hot to D0 _may_ perform an internal reset, thereby 577 * going to "D0 Uninitialized" rather than "D0 Initialized". 578 * For example, at least some versions of the 3c905B and the 579 * 3c556B exhibit this behaviour. 580 * 581 * At least some laptop BIOSen (e.g. the Thinkpad T21) leave 582 * devices in a D3hot state at boot. Consequently, we need to 583 * restore at least the BARs so that the device will be 584 * accessible to its driver. 585 */ 586 if (need_restore) 587 pci_restore_bars(dev); 588 589 if (dev->bus->self) 590 pcie_aspm_pm_state_change(dev->bus->self); 591 592 return 0; 593 } 594 595 /** 596 * pci_update_current_state - Read PCI power state of given device from its 597 * PCI PM registers and cache it 598 * @dev: PCI device to handle. 599 * @state: State to cache in case the device doesn't have the PM capability 600 */ 601 void pci_update_current_state(struct pci_dev *dev, pci_power_t state) 602 { 603 if (dev->pm_cap) { 604 u16 pmcsr; 605 606 /* 607 * Configuration space is not accessible for device in 608 * D3cold, so just keep or set D3cold for safety 609 */ 610 if (dev->current_state == PCI_D3cold) 611 return; 612 if (state == PCI_D3cold) { 613 dev->current_state = PCI_D3cold; 614 return; 615 } 616 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr); 617 dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK); 618 } else { 619 dev->current_state = state; 620 } 621 } 622 623 /** 624 * pci_power_up - Put the given device into D0 forcibly 625 * @dev: PCI device to power up 626 */ 627 void pci_power_up(struct pci_dev *dev) 628 { 629 if (platform_pci_power_manageable(dev)) 630 platform_pci_set_power_state(dev, PCI_D0); 631 632 pci_raw_set_power_state(dev, PCI_D0); 633 pci_update_current_state(dev, PCI_D0); 634 } 635 636 /** 637 * pci_platform_power_transition - Use platform to change device power state 638 * @dev: PCI device to handle. 639 * @state: State to put the device into. 640 */ 641 static int pci_platform_power_transition(struct pci_dev *dev, pci_power_t state) 642 { 643 int error; 644 645 if (platform_pci_power_manageable(dev)) { 646 error = platform_pci_set_power_state(dev, state); 647 if (!error) 648 pci_update_current_state(dev, state); 649 } else 650 error = -ENODEV; 651 652 if (error && !dev->pm_cap) /* Fall back to PCI_D0 */ 653 dev->current_state = PCI_D0; 654 655 return error; 656 } 657 658 /** 659 * __pci_start_power_transition - Start power transition of a PCI device 660 * @dev: PCI device to handle. 661 * @state: State to put the device into. 662 */ 663 static void __pci_start_power_transition(struct pci_dev *dev, pci_power_t state) 664 { 665 if (state == PCI_D0) { 666 pci_platform_power_transition(dev, PCI_D0); 667 /* 668 * Mandatory power management transition delays, see 669 * PCI Express Base Specification Revision 2.0 Section 670 * 6.6.1: Conventional Reset. Do not delay for 671 * devices powered on/off by corresponding bridge, 672 * because have already delayed for the bridge. 673 */ 674 if (dev->runtime_d3cold) { 675 msleep(dev->d3cold_delay); 676 /* 677 * When powering on a bridge from D3cold, the 678 * whole hierarchy may be powered on into 679 * D0uninitialized state, resume them to give 680 * them a chance to suspend again 681 */ 682 pci_wakeup_bus(dev->subordinate); 683 } 684 } 685 } 686 687 /** 688 * __pci_dev_set_current_state - Set current state of a PCI device 689 * @dev: Device to handle 690 * @data: pointer to state to be set 691 */ 692 static int __pci_dev_set_current_state(struct pci_dev *dev, void *data) 693 { 694 pci_power_t state = *(pci_power_t *)data; 695 696 dev->current_state = state; 697 return 0; 698 } 699 700 /** 701 * __pci_bus_set_current_state - Walk given bus and set current state of devices 702 * @bus: Top bus of the subtree to walk. 703 * @state: state to be set 704 */ 705 static void __pci_bus_set_current_state(struct pci_bus *bus, pci_power_t state) 706 { 707 if (bus) 708 pci_walk_bus(bus, __pci_dev_set_current_state, &state); 709 } 710 711 /** 712 * __pci_complete_power_transition - Complete power transition of a PCI device 713 * @dev: PCI device to handle. 714 * @state: State to put the device into. 715 * 716 * This function should not be called directly by device drivers. 717 */ 718 int __pci_complete_power_transition(struct pci_dev *dev, pci_power_t state) 719 { 720 int ret; 721 722 if (state <= PCI_D0) 723 return -EINVAL; 724 ret = pci_platform_power_transition(dev, state); 725 /* Power off the bridge may power off the whole hierarchy */ 726 if (!ret && state == PCI_D3cold) 727 __pci_bus_set_current_state(dev->subordinate, PCI_D3cold); 728 return ret; 729 } 730 EXPORT_SYMBOL_GPL(__pci_complete_power_transition); 731 732 /** 733 * pci_set_power_state - Set the power state of a PCI device 734 * @dev: PCI device to handle. 735 * @state: PCI power state (D0, D1, D2, D3hot) to put the device into. 736 * 737 * Transition a device to a new power state, using the platform firmware and/or 738 * the device's PCI PM registers. 739 * 740 * RETURN VALUE: 741 * -EINVAL if the requested state is invalid. 742 * -EIO if device does not support PCI PM or its PM capabilities register has a 743 * wrong version, or device doesn't support the requested state. 744 * 0 if device already is in the requested state. 745 * 0 if device's power state has been successfully changed. 746 */ 747 int pci_set_power_state(struct pci_dev *dev, pci_power_t state) 748 { 749 int error; 750 751 /* bound the state we're entering */ 752 if (state > PCI_D3cold) 753 state = PCI_D3cold; 754 else if (state < PCI_D0) 755 state = PCI_D0; 756 else if ((state == PCI_D1 || state == PCI_D2) && pci_no_d1d2(dev)) 757 /* 758 * If the device or the parent bridge do not support PCI PM, 759 * ignore the request if we're doing anything other than putting 760 * it into D0 (which would only happen on boot). 761 */ 762 return 0; 763 764 /* Check if we're already there */ 765 if (dev->current_state == state) 766 return 0; 767 768 __pci_start_power_transition(dev, state); 769 770 /* This device is quirked not to be put into D3, so 771 don't put it in D3 */ 772 if (state >= PCI_D3hot && (dev->dev_flags & PCI_DEV_FLAGS_NO_D3)) 773 return 0; 774 775 /* 776 * To put device in D3cold, we put device into D3hot in native 777 * way, then put device into D3cold with platform ops 778 */ 779 error = pci_raw_set_power_state(dev, state > PCI_D3hot ? 780 PCI_D3hot : state); 781 782 if (!__pci_complete_power_transition(dev, state)) 783 error = 0; 784 /* 785 * When aspm_policy is "powersave" this call ensures 786 * that ASPM is configured. 787 */ 788 if (!error && dev->bus->self) 789 pcie_aspm_powersave_config_link(dev->bus->self); 790 791 return error; 792 } 793 794 /** 795 * pci_choose_state - Choose the power state of a PCI device 796 * @dev: PCI device to be suspended 797 * @state: target sleep state for the whole system. This is the value 798 * that is passed to suspend() function. 799 * 800 * Returns PCI power state suitable for given device and given system 801 * message. 802 */ 803 804 pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state) 805 { 806 pci_power_t ret; 807 808 if (!dev->pm_cap) 809 return PCI_D0; 810 811 ret = platform_pci_choose_state(dev); 812 if (ret != PCI_POWER_ERROR) 813 return ret; 814 815 switch (state.event) { 816 case PM_EVENT_ON: 817 return PCI_D0; 818 case PM_EVENT_FREEZE: 819 case PM_EVENT_PRETHAW: 820 /* REVISIT both freeze and pre-thaw "should" use D0 */ 821 case PM_EVENT_SUSPEND: 822 case PM_EVENT_HIBERNATE: 823 return PCI_D3hot; 824 default: 825 dev_info(&dev->dev, "unrecognized suspend event %d\n", 826 state.event); 827 BUG(); 828 } 829 return PCI_D0; 830 } 831 832 EXPORT_SYMBOL(pci_choose_state); 833 834 #define PCI_EXP_SAVE_REGS 7 835 836 837 static struct pci_cap_saved_state *pci_find_saved_cap( 838 struct pci_dev *pci_dev, char cap) 839 { 840 struct pci_cap_saved_state *tmp; 841 842 hlist_for_each_entry(tmp, &pci_dev->saved_cap_space, next) { 843 if (tmp->cap.cap_nr == cap) 844 return tmp; 845 } 846 return NULL; 847 } 848 849 static int pci_save_pcie_state(struct pci_dev *dev) 850 { 851 int i = 0; 852 struct pci_cap_saved_state *save_state; 853 u16 *cap; 854 855 if (!pci_is_pcie(dev)) 856 return 0; 857 858 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP); 859 if (!save_state) { 860 dev_err(&dev->dev, "buffer not found in %s\n", __func__); 861 return -ENOMEM; 862 } 863 864 cap = (u16 *)&save_state->cap.data[0]; 865 pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &cap[i++]); 866 pcie_capability_read_word(dev, PCI_EXP_LNKCTL, &cap[i++]); 867 pcie_capability_read_word(dev, PCI_EXP_SLTCTL, &cap[i++]); 868 pcie_capability_read_word(dev, PCI_EXP_RTCTL, &cap[i++]); 869 pcie_capability_read_word(dev, PCI_EXP_DEVCTL2, &cap[i++]); 870 pcie_capability_read_word(dev, PCI_EXP_LNKCTL2, &cap[i++]); 871 pcie_capability_read_word(dev, PCI_EXP_SLTCTL2, &cap[i++]); 872 873 return 0; 874 } 875 876 static void pci_restore_pcie_state(struct pci_dev *dev) 877 { 878 int i = 0; 879 struct pci_cap_saved_state *save_state; 880 u16 *cap; 881 882 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP); 883 if (!save_state) 884 return; 885 886 cap = (u16 *)&save_state->cap.data[0]; 887 pcie_capability_write_word(dev, PCI_EXP_DEVCTL, cap[i++]); 888 pcie_capability_write_word(dev, PCI_EXP_LNKCTL, cap[i++]); 889 pcie_capability_write_word(dev, PCI_EXP_SLTCTL, cap[i++]); 890 pcie_capability_write_word(dev, PCI_EXP_RTCTL, cap[i++]); 891 pcie_capability_write_word(dev, PCI_EXP_DEVCTL2, cap[i++]); 892 pcie_capability_write_word(dev, PCI_EXP_LNKCTL2, cap[i++]); 893 pcie_capability_write_word(dev, PCI_EXP_SLTCTL2, cap[i++]); 894 } 895 896 897 static int pci_save_pcix_state(struct pci_dev *dev) 898 { 899 int pos; 900 struct pci_cap_saved_state *save_state; 901 902 pos = pci_find_capability(dev, PCI_CAP_ID_PCIX); 903 if (pos <= 0) 904 return 0; 905 906 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX); 907 if (!save_state) { 908 dev_err(&dev->dev, "buffer not found in %s\n", __func__); 909 return -ENOMEM; 910 } 911 912 pci_read_config_word(dev, pos + PCI_X_CMD, 913 (u16 *)save_state->cap.data); 914 915 return 0; 916 } 917 918 static void pci_restore_pcix_state(struct pci_dev *dev) 919 { 920 int i = 0, pos; 921 struct pci_cap_saved_state *save_state; 922 u16 *cap; 923 924 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX); 925 pos = pci_find_capability(dev, PCI_CAP_ID_PCIX); 926 if (!save_state || pos <= 0) 927 return; 928 cap = (u16 *)&save_state->cap.data[0]; 929 930 pci_write_config_word(dev, pos + PCI_X_CMD, cap[i++]); 931 } 932 933 934 /** 935 * pci_save_state - save the PCI configuration space of a device before suspending 936 * @dev: - PCI device that we're dealing with 937 */ 938 int 939 pci_save_state(struct pci_dev *dev) 940 { 941 int i; 942 /* XXX: 100% dword access ok here? */ 943 for (i = 0; i < 16; i++) 944 pci_read_config_dword(dev, i * 4, &dev->saved_config_space[i]); 945 dev->state_saved = true; 946 if ((i = pci_save_pcie_state(dev)) != 0) 947 return i; 948 if ((i = pci_save_pcix_state(dev)) != 0) 949 return i; 950 return 0; 951 } 952 953 static void pci_restore_config_dword(struct pci_dev *pdev, int offset, 954 u32 saved_val, int retry) 955 { 956 u32 val; 957 958 pci_read_config_dword(pdev, offset, &val); 959 if (val == saved_val) 960 return; 961 962 for (;;) { 963 dev_dbg(&pdev->dev, "restoring config space at offset " 964 "%#x (was %#x, writing %#x)\n", offset, val, saved_val); 965 pci_write_config_dword(pdev, offset, saved_val); 966 if (retry-- <= 0) 967 return; 968 969 pci_read_config_dword(pdev, offset, &val); 970 if (val == saved_val) 971 return; 972 973 mdelay(1); 974 } 975 } 976 977 static void pci_restore_config_space_range(struct pci_dev *pdev, 978 int start, int end, int retry) 979 { 980 int index; 981 982 for (index = end; index >= start; index--) 983 pci_restore_config_dword(pdev, 4 * index, 984 pdev->saved_config_space[index], 985 retry); 986 } 987 988 static void pci_restore_config_space(struct pci_dev *pdev) 989 { 990 if (pdev->hdr_type == PCI_HEADER_TYPE_NORMAL) { 991 pci_restore_config_space_range(pdev, 10, 15, 0); 992 /* Restore BARs before the command register. */ 993 pci_restore_config_space_range(pdev, 4, 9, 10); 994 pci_restore_config_space_range(pdev, 0, 3, 0); 995 } else { 996 pci_restore_config_space_range(pdev, 0, 15, 0); 997 } 998 } 999 1000 /** 1001 * pci_restore_state - Restore the saved state of a PCI device 1002 * @dev: - PCI device that we're dealing with 1003 */ 1004 void pci_restore_state(struct pci_dev *dev) 1005 { 1006 if (!dev->state_saved) 1007 return; 1008 1009 /* PCI Express register must be restored first */ 1010 pci_restore_pcie_state(dev); 1011 pci_restore_ats_state(dev); 1012 1013 pci_restore_config_space(dev); 1014 1015 pci_restore_pcix_state(dev); 1016 pci_restore_msi_state(dev); 1017 pci_restore_iov_state(dev); 1018 1019 dev->state_saved = false; 1020 } 1021 1022 struct pci_saved_state { 1023 u32 config_space[16]; 1024 struct pci_cap_saved_data cap[0]; 1025 }; 1026 1027 /** 1028 * pci_store_saved_state - Allocate and return an opaque struct containing 1029 * the device saved state. 1030 * @dev: PCI device that we're dealing with 1031 * 1032 * Rerturn NULL if no state or error. 1033 */ 1034 struct pci_saved_state *pci_store_saved_state(struct pci_dev *dev) 1035 { 1036 struct pci_saved_state *state; 1037 struct pci_cap_saved_state *tmp; 1038 struct pci_cap_saved_data *cap; 1039 size_t size; 1040 1041 if (!dev->state_saved) 1042 return NULL; 1043 1044 size = sizeof(*state) + sizeof(struct pci_cap_saved_data); 1045 1046 hlist_for_each_entry(tmp, &dev->saved_cap_space, next) 1047 size += sizeof(struct pci_cap_saved_data) + tmp->cap.size; 1048 1049 state = kzalloc(size, GFP_KERNEL); 1050 if (!state) 1051 return NULL; 1052 1053 memcpy(state->config_space, dev->saved_config_space, 1054 sizeof(state->config_space)); 1055 1056 cap = state->cap; 1057 hlist_for_each_entry(tmp, &dev->saved_cap_space, next) { 1058 size_t len = sizeof(struct pci_cap_saved_data) + tmp->cap.size; 1059 memcpy(cap, &tmp->cap, len); 1060 cap = (struct pci_cap_saved_data *)((u8 *)cap + len); 1061 } 1062 /* Empty cap_save terminates list */ 1063 1064 return state; 1065 } 1066 EXPORT_SYMBOL_GPL(pci_store_saved_state); 1067 1068 /** 1069 * pci_load_saved_state - Reload the provided save state into struct pci_dev. 1070 * @dev: PCI device that we're dealing with 1071 * @state: Saved state returned from pci_store_saved_state() 1072 */ 1073 int pci_load_saved_state(struct pci_dev *dev, struct pci_saved_state *state) 1074 { 1075 struct pci_cap_saved_data *cap; 1076 1077 dev->state_saved = false; 1078 1079 if (!state) 1080 return 0; 1081 1082 memcpy(dev->saved_config_space, state->config_space, 1083 sizeof(state->config_space)); 1084 1085 cap = state->cap; 1086 while (cap->size) { 1087 struct pci_cap_saved_state *tmp; 1088 1089 tmp = pci_find_saved_cap(dev, cap->cap_nr); 1090 if (!tmp || tmp->cap.size != cap->size) 1091 return -EINVAL; 1092 1093 memcpy(tmp->cap.data, cap->data, tmp->cap.size); 1094 cap = (struct pci_cap_saved_data *)((u8 *)cap + 1095 sizeof(struct pci_cap_saved_data) + cap->size); 1096 } 1097 1098 dev->state_saved = true; 1099 return 0; 1100 } 1101 EXPORT_SYMBOL_GPL(pci_load_saved_state); 1102 1103 /** 1104 * pci_load_and_free_saved_state - Reload the save state pointed to by state, 1105 * and free the memory allocated for it. 1106 * @dev: PCI device that we're dealing with 1107 * @state: Pointer to saved state returned from pci_store_saved_state() 1108 */ 1109 int pci_load_and_free_saved_state(struct pci_dev *dev, 1110 struct pci_saved_state **state) 1111 { 1112 int ret = pci_load_saved_state(dev, *state); 1113 kfree(*state); 1114 *state = NULL; 1115 return ret; 1116 } 1117 EXPORT_SYMBOL_GPL(pci_load_and_free_saved_state); 1118 1119 static int do_pci_enable_device(struct pci_dev *dev, int bars) 1120 { 1121 int err; 1122 1123 err = pci_set_power_state(dev, PCI_D0); 1124 if (err < 0 && err != -EIO) 1125 return err; 1126 err = pcibios_enable_device(dev, bars); 1127 if (err < 0) 1128 return err; 1129 pci_fixup_device(pci_fixup_enable, dev); 1130 1131 return 0; 1132 } 1133 1134 /** 1135 * pci_reenable_device - Resume abandoned device 1136 * @dev: PCI device to be resumed 1137 * 1138 * Note this function is a backend of pci_default_resume and is not supposed 1139 * to be called by normal code, write proper resume handler and use it instead. 1140 */ 1141 int pci_reenable_device(struct pci_dev *dev) 1142 { 1143 if (pci_is_enabled(dev)) 1144 return do_pci_enable_device(dev, (1 << PCI_NUM_RESOURCES) - 1); 1145 return 0; 1146 } 1147 1148 static int pci_enable_device_flags(struct pci_dev *dev, unsigned long flags) 1149 { 1150 int err; 1151 int i, bars = 0; 1152 1153 /* 1154 * Power state could be unknown at this point, either due to a fresh 1155 * boot or a device removal call. So get the current power state 1156 * so that things like MSI message writing will behave as expected 1157 * (e.g. if the device really is in D0 at enable time). 1158 */ 1159 if (dev->pm_cap) { 1160 u16 pmcsr; 1161 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr); 1162 dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK); 1163 } 1164 1165 if (atomic_inc_return(&dev->enable_cnt) > 1) 1166 return 0; /* already enabled */ 1167 1168 /* only skip sriov related */ 1169 for (i = 0; i <= PCI_ROM_RESOURCE; i++) 1170 if (dev->resource[i].flags & flags) 1171 bars |= (1 << i); 1172 for (i = PCI_BRIDGE_RESOURCES; i < DEVICE_COUNT_RESOURCE; i++) 1173 if (dev->resource[i].flags & flags) 1174 bars |= (1 << i); 1175 1176 err = do_pci_enable_device(dev, bars); 1177 if (err < 0) 1178 atomic_dec(&dev->enable_cnt); 1179 return err; 1180 } 1181 1182 /** 1183 * pci_enable_device_io - Initialize a device for use with IO space 1184 * @dev: PCI device to be initialized 1185 * 1186 * Initialize device before it's used by a driver. Ask low-level code 1187 * to enable I/O resources. Wake up the device if it was suspended. 1188 * Beware, this function can fail. 1189 */ 1190 int pci_enable_device_io(struct pci_dev *dev) 1191 { 1192 return pci_enable_device_flags(dev, IORESOURCE_IO); 1193 } 1194 1195 /** 1196 * pci_enable_device_mem - Initialize a device for use with Memory space 1197 * @dev: PCI device to be initialized 1198 * 1199 * Initialize device before it's used by a driver. Ask low-level code 1200 * to enable Memory resources. Wake up the device if it was suspended. 1201 * Beware, this function can fail. 1202 */ 1203 int pci_enable_device_mem(struct pci_dev *dev) 1204 { 1205 return pci_enable_device_flags(dev, IORESOURCE_MEM); 1206 } 1207 1208 /** 1209 * pci_enable_device - Initialize device before it's used by a driver. 1210 * @dev: PCI device to be initialized 1211 * 1212 * Initialize device before it's used by a driver. Ask low-level code 1213 * to enable I/O and memory. Wake up the device if it was suspended. 1214 * Beware, this function can fail. 1215 * 1216 * Note we don't actually enable the device many times if we call 1217 * this function repeatedly (we just increment the count). 1218 */ 1219 int pci_enable_device(struct pci_dev *dev) 1220 { 1221 return pci_enable_device_flags(dev, IORESOURCE_MEM | IORESOURCE_IO); 1222 } 1223 1224 /* 1225 * Managed PCI resources. This manages device on/off, intx/msi/msix 1226 * on/off and BAR regions. pci_dev itself records msi/msix status, so 1227 * there's no need to track it separately. pci_devres is initialized 1228 * when a device is enabled using managed PCI device enable interface. 1229 */ 1230 struct pci_devres { 1231 unsigned int enabled:1; 1232 unsigned int pinned:1; 1233 unsigned int orig_intx:1; 1234 unsigned int restore_intx:1; 1235 u32 region_mask; 1236 }; 1237 1238 static void pcim_release(struct device *gendev, void *res) 1239 { 1240 struct pci_dev *dev = container_of(gendev, struct pci_dev, dev); 1241 struct pci_devres *this = res; 1242 int i; 1243 1244 if (dev->msi_enabled) 1245 pci_disable_msi(dev); 1246 if (dev->msix_enabled) 1247 pci_disable_msix(dev); 1248 1249 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) 1250 if (this->region_mask & (1 << i)) 1251 pci_release_region(dev, i); 1252 1253 if (this->restore_intx) 1254 pci_intx(dev, this->orig_intx); 1255 1256 if (this->enabled && !this->pinned) 1257 pci_disable_device(dev); 1258 } 1259 1260 static struct pci_devres * get_pci_dr(struct pci_dev *pdev) 1261 { 1262 struct pci_devres *dr, *new_dr; 1263 1264 dr = devres_find(&pdev->dev, pcim_release, NULL, NULL); 1265 if (dr) 1266 return dr; 1267 1268 new_dr = devres_alloc(pcim_release, sizeof(*new_dr), GFP_KERNEL); 1269 if (!new_dr) 1270 return NULL; 1271 return devres_get(&pdev->dev, new_dr, NULL, NULL); 1272 } 1273 1274 static struct pci_devres * find_pci_dr(struct pci_dev *pdev) 1275 { 1276 if (pci_is_managed(pdev)) 1277 return devres_find(&pdev->dev, pcim_release, NULL, NULL); 1278 return NULL; 1279 } 1280 1281 /** 1282 * pcim_enable_device - Managed pci_enable_device() 1283 * @pdev: PCI device to be initialized 1284 * 1285 * Managed pci_enable_device(). 1286 */ 1287 int pcim_enable_device(struct pci_dev *pdev) 1288 { 1289 struct pci_devres *dr; 1290 int rc; 1291 1292 dr = get_pci_dr(pdev); 1293 if (unlikely(!dr)) 1294 return -ENOMEM; 1295 if (dr->enabled) 1296 return 0; 1297 1298 rc = pci_enable_device(pdev); 1299 if (!rc) { 1300 pdev->is_managed = 1; 1301 dr->enabled = 1; 1302 } 1303 return rc; 1304 } 1305 1306 /** 1307 * pcim_pin_device - Pin managed PCI device 1308 * @pdev: PCI device to pin 1309 * 1310 * Pin managed PCI device @pdev. Pinned device won't be disabled on 1311 * driver detach. @pdev must have been enabled with 1312 * pcim_enable_device(). 1313 */ 1314 void pcim_pin_device(struct pci_dev *pdev) 1315 { 1316 struct pci_devres *dr; 1317 1318 dr = find_pci_dr(pdev); 1319 WARN_ON(!dr || !dr->enabled); 1320 if (dr) 1321 dr->pinned = 1; 1322 } 1323 1324 /* 1325 * pcibios_add_device - provide arch specific hooks when adding device dev 1326 * @dev: the PCI device being added 1327 * 1328 * Permits the platform to provide architecture specific functionality when 1329 * devices are added. This is the default implementation. Architecture 1330 * implementations can override this. 1331 */ 1332 int __weak pcibios_add_device (struct pci_dev *dev) 1333 { 1334 return 0; 1335 } 1336 1337 /** 1338 * pcibios_release_device - provide arch specific hooks when releasing device dev 1339 * @dev: the PCI device being released 1340 * 1341 * Permits the platform to provide architecture specific functionality when 1342 * devices are released. This is the default implementation. Architecture 1343 * implementations can override this. 1344 */ 1345 void __weak pcibios_release_device(struct pci_dev *dev) {} 1346 1347 /** 1348 * pcibios_disable_device - disable arch specific PCI resources for device dev 1349 * @dev: the PCI device to disable 1350 * 1351 * Disables architecture specific PCI resources for the device. This 1352 * is the default implementation. Architecture implementations can 1353 * override this. 1354 */ 1355 void __weak pcibios_disable_device (struct pci_dev *dev) {} 1356 1357 static void do_pci_disable_device(struct pci_dev *dev) 1358 { 1359 u16 pci_command; 1360 1361 pci_read_config_word(dev, PCI_COMMAND, &pci_command); 1362 if (pci_command & PCI_COMMAND_MASTER) { 1363 pci_command &= ~PCI_COMMAND_MASTER; 1364 pci_write_config_word(dev, PCI_COMMAND, pci_command); 1365 } 1366 1367 pcibios_disable_device(dev); 1368 } 1369 1370 /** 1371 * pci_disable_enabled_device - Disable device without updating enable_cnt 1372 * @dev: PCI device to disable 1373 * 1374 * NOTE: This function is a backend of PCI power management routines and is 1375 * not supposed to be called drivers. 1376 */ 1377 void pci_disable_enabled_device(struct pci_dev *dev) 1378 { 1379 if (pci_is_enabled(dev)) 1380 do_pci_disable_device(dev); 1381 } 1382 1383 /** 1384 * pci_disable_device - Disable PCI device after use 1385 * @dev: PCI device to be disabled 1386 * 1387 * Signal to the system that the PCI device is not in use by the system 1388 * anymore. This only involves disabling PCI bus-mastering, if active. 1389 * 1390 * Note we don't actually disable the device until all callers of 1391 * pci_enable_device() have called pci_disable_device(). 1392 */ 1393 void 1394 pci_disable_device(struct pci_dev *dev) 1395 { 1396 struct pci_devres *dr; 1397 1398 dr = find_pci_dr(dev); 1399 if (dr) 1400 dr->enabled = 0; 1401 1402 dev_WARN_ONCE(&dev->dev, atomic_read(&dev->enable_cnt) <= 0, 1403 "disabling already-disabled device"); 1404 1405 if (atomic_dec_return(&dev->enable_cnt) != 0) 1406 return; 1407 1408 do_pci_disable_device(dev); 1409 1410 dev->is_busmaster = 0; 1411 } 1412 1413 /** 1414 * pcibios_set_pcie_reset_state - set reset state for device dev 1415 * @dev: the PCIe device reset 1416 * @state: Reset state to enter into 1417 * 1418 * 1419 * Sets the PCIe reset state for the device. This is the default 1420 * implementation. Architecture implementations can override this. 1421 */ 1422 int __weak pcibios_set_pcie_reset_state(struct pci_dev *dev, 1423 enum pcie_reset_state state) 1424 { 1425 return -EINVAL; 1426 } 1427 1428 /** 1429 * pci_set_pcie_reset_state - set reset state for device dev 1430 * @dev: the PCIe device reset 1431 * @state: Reset state to enter into 1432 * 1433 * 1434 * Sets the PCI reset state for the device. 1435 */ 1436 int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state) 1437 { 1438 return pcibios_set_pcie_reset_state(dev, state); 1439 } 1440 1441 /** 1442 * pci_check_pme_status - Check if given device has generated PME. 1443 * @dev: Device to check. 1444 * 1445 * Check the PME status of the device and if set, clear it and clear PME enable 1446 * (if set). Return 'true' if PME status and PME enable were both set or 1447 * 'false' otherwise. 1448 */ 1449 bool pci_check_pme_status(struct pci_dev *dev) 1450 { 1451 int pmcsr_pos; 1452 u16 pmcsr; 1453 bool ret = false; 1454 1455 if (!dev->pm_cap) 1456 return false; 1457 1458 pmcsr_pos = dev->pm_cap + PCI_PM_CTRL; 1459 pci_read_config_word(dev, pmcsr_pos, &pmcsr); 1460 if (!(pmcsr & PCI_PM_CTRL_PME_STATUS)) 1461 return false; 1462 1463 /* Clear PME status. */ 1464 pmcsr |= PCI_PM_CTRL_PME_STATUS; 1465 if (pmcsr & PCI_PM_CTRL_PME_ENABLE) { 1466 /* Disable PME to avoid interrupt flood. */ 1467 pmcsr &= ~PCI_PM_CTRL_PME_ENABLE; 1468 ret = true; 1469 } 1470 1471 pci_write_config_word(dev, pmcsr_pos, pmcsr); 1472 1473 return ret; 1474 } 1475 1476 /** 1477 * pci_pme_wakeup - Wake up a PCI device if its PME Status bit is set. 1478 * @dev: Device to handle. 1479 * @pme_poll_reset: Whether or not to reset the device's pme_poll flag. 1480 * 1481 * Check if @dev has generated PME and queue a resume request for it in that 1482 * case. 1483 */ 1484 static int pci_pme_wakeup(struct pci_dev *dev, void *pme_poll_reset) 1485 { 1486 if (pme_poll_reset && dev->pme_poll) 1487 dev->pme_poll = false; 1488 1489 if (pci_check_pme_status(dev)) { 1490 pci_wakeup_event(dev); 1491 pm_request_resume(&dev->dev); 1492 } 1493 return 0; 1494 } 1495 1496 /** 1497 * pci_pme_wakeup_bus - Walk given bus and wake up devices on it, if necessary. 1498 * @bus: Top bus of the subtree to walk. 1499 */ 1500 void pci_pme_wakeup_bus(struct pci_bus *bus) 1501 { 1502 if (bus) 1503 pci_walk_bus(bus, pci_pme_wakeup, (void *)true); 1504 } 1505 1506 /** 1507 * pci_wakeup - Wake up a PCI device 1508 * @pci_dev: Device to handle. 1509 * @ign: ignored parameter 1510 */ 1511 static int pci_wakeup(struct pci_dev *pci_dev, void *ign) 1512 { 1513 pci_wakeup_event(pci_dev); 1514 pm_request_resume(&pci_dev->dev); 1515 return 0; 1516 } 1517 1518 /** 1519 * pci_wakeup_bus - Walk given bus and wake up devices on it 1520 * @bus: Top bus of the subtree to walk. 1521 */ 1522 void pci_wakeup_bus(struct pci_bus *bus) 1523 { 1524 if (bus) 1525 pci_walk_bus(bus, pci_wakeup, NULL); 1526 } 1527 1528 /** 1529 * pci_pme_capable - check the capability of PCI device to generate PME# 1530 * @dev: PCI device to handle. 1531 * @state: PCI state from which device will issue PME#. 1532 */ 1533 bool pci_pme_capable(struct pci_dev *dev, pci_power_t state) 1534 { 1535 if (!dev->pm_cap) 1536 return false; 1537 1538 return !!(dev->pme_support & (1 << state)); 1539 } 1540 1541 static void pci_pme_list_scan(struct work_struct *work) 1542 { 1543 struct pci_pme_device *pme_dev, *n; 1544 1545 mutex_lock(&pci_pme_list_mutex); 1546 if (!list_empty(&pci_pme_list)) { 1547 list_for_each_entry_safe(pme_dev, n, &pci_pme_list, list) { 1548 if (pme_dev->dev->pme_poll) { 1549 struct pci_dev *bridge; 1550 1551 bridge = pme_dev->dev->bus->self; 1552 /* 1553 * If bridge is in low power state, the 1554 * configuration space of subordinate devices 1555 * may be not accessible 1556 */ 1557 if (bridge && bridge->current_state != PCI_D0) 1558 continue; 1559 pci_pme_wakeup(pme_dev->dev, NULL); 1560 } else { 1561 list_del(&pme_dev->list); 1562 kfree(pme_dev); 1563 } 1564 } 1565 if (!list_empty(&pci_pme_list)) 1566 schedule_delayed_work(&pci_pme_work, 1567 msecs_to_jiffies(PME_TIMEOUT)); 1568 } 1569 mutex_unlock(&pci_pme_list_mutex); 1570 } 1571 1572 /** 1573 * pci_pme_active - enable or disable PCI device's PME# function 1574 * @dev: PCI device to handle. 1575 * @enable: 'true' to enable PME# generation; 'false' to disable it. 1576 * 1577 * The caller must verify that the device is capable of generating PME# before 1578 * calling this function with @enable equal to 'true'. 1579 */ 1580 void pci_pme_active(struct pci_dev *dev, bool enable) 1581 { 1582 u16 pmcsr; 1583 1584 if (!dev->pme_support) 1585 return; 1586 1587 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr); 1588 /* Clear PME_Status by writing 1 to it and enable PME# */ 1589 pmcsr |= PCI_PM_CTRL_PME_STATUS | PCI_PM_CTRL_PME_ENABLE; 1590 if (!enable) 1591 pmcsr &= ~PCI_PM_CTRL_PME_ENABLE; 1592 1593 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr); 1594 1595 /* 1596 * PCI (as opposed to PCIe) PME requires that the device have 1597 * its PME# line hooked up correctly. Not all hardware vendors 1598 * do this, so the PME never gets delivered and the device 1599 * remains asleep. The easiest way around this is to 1600 * periodically walk the list of suspended devices and check 1601 * whether any have their PME flag set. The assumption is that 1602 * we'll wake up often enough anyway that this won't be a huge 1603 * hit, and the power savings from the devices will still be a 1604 * win. 1605 * 1606 * Although PCIe uses in-band PME message instead of PME# line 1607 * to report PME, PME does not work for some PCIe devices in 1608 * reality. For example, there are devices that set their PME 1609 * status bits, but don't really bother to send a PME message; 1610 * there are PCI Express Root Ports that don't bother to 1611 * trigger interrupts when they receive PME messages from the 1612 * devices below. So PME poll is used for PCIe devices too. 1613 */ 1614 1615 if (dev->pme_poll) { 1616 struct pci_pme_device *pme_dev; 1617 if (enable) { 1618 pme_dev = kmalloc(sizeof(struct pci_pme_device), 1619 GFP_KERNEL); 1620 if (!pme_dev) 1621 goto out; 1622 pme_dev->dev = dev; 1623 mutex_lock(&pci_pme_list_mutex); 1624 list_add(&pme_dev->list, &pci_pme_list); 1625 if (list_is_singular(&pci_pme_list)) 1626 schedule_delayed_work(&pci_pme_work, 1627 msecs_to_jiffies(PME_TIMEOUT)); 1628 mutex_unlock(&pci_pme_list_mutex); 1629 } else { 1630 mutex_lock(&pci_pme_list_mutex); 1631 list_for_each_entry(pme_dev, &pci_pme_list, list) { 1632 if (pme_dev->dev == dev) { 1633 list_del(&pme_dev->list); 1634 kfree(pme_dev); 1635 break; 1636 } 1637 } 1638 mutex_unlock(&pci_pme_list_mutex); 1639 } 1640 } 1641 1642 out: 1643 dev_dbg(&dev->dev, "PME# %s\n", enable ? "enabled" : "disabled"); 1644 } 1645 1646 /** 1647 * __pci_enable_wake - enable PCI device as wakeup event source 1648 * @dev: PCI device affected 1649 * @state: PCI state from which device will issue wakeup events 1650 * @runtime: True if the events are to be generated at run time 1651 * @enable: True to enable event generation; false to disable 1652 * 1653 * This enables the device as a wakeup event source, or disables it. 1654 * When such events involves platform-specific hooks, those hooks are 1655 * called automatically by this routine. 1656 * 1657 * Devices with legacy power management (no standard PCI PM capabilities) 1658 * always require such platform hooks. 1659 * 1660 * RETURN VALUE: 1661 * 0 is returned on success 1662 * -EINVAL is returned if device is not supposed to wake up the system 1663 * Error code depending on the platform is returned if both the platform and 1664 * the native mechanism fail to enable the generation of wake-up events 1665 */ 1666 int __pci_enable_wake(struct pci_dev *dev, pci_power_t state, 1667 bool runtime, bool enable) 1668 { 1669 int ret = 0; 1670 1671 if (enable && !runtime && !device_may_wakeup(&dev->dev)) 1672 return -EINVAL; 1673 1674 /* Don't do the same thing twice in a row for one device. */ 1675 if (!!enable == !!dev->wakeup_prepared) 1676 return 0; 1677 1678 /* 1679 * According to "PCI System Architecture" 4th ed. by Tom Shanley & Don 1680 * Anderson we should be doing PME# wake enable followed by ACPI wake 1681 * enable. To disable wake-up we call the platform first, for symmetry. 1682 */ 1683 1684 if (enable) { 1685 int error; 1686 1687 if (pci_pme_capable(dev, state)) 1688 pci_pme_active(dev, true); 1689 else 1690 ret = 1; 1691 error = runtime ? platform_pci_run_wake(dev, true) : 1692 platform_pci_sleep_wake(dev, true); 1693 if (ret) 1694 ret = error; 1695 if (!ret) 1696 dev->wakeup_prepared = true; 1697 } else { 1698 if (runtime) 1699 platform_pci_run_wake(dev, false); 1700 else 1701 platform_pci_sleep_wake(dev, false); 1702 pci_pme_active(dev, false); 1703 dev->wakeup_prepared = false; 1704 } 1705 1706 return ret; 1707 } 1708 EXPORT_SYMBOL(__pci_enable_wake); 1709 1710 /** 1711 * pci_wake_from_d3 - enable/disable device to wake up from D3_hot or D3_cold 1712 * @dev: PCI device to prepare 1713 * @enable: True to enable wake-up event generation; false to disable 1714 * 1715 * Many drivers want the device to wake up the system from D3_hot or D3_cold 1716 * and this function allows them to set that up cleanly - pci_enable_wake() 1717 * should not be called twice in a row to enable wake-up due to PCI PM vs ACPI 1718 * ordering constraints. 1719 * 1720 * This function only returns error code if the device is not capable of 1721 * generating PME# from both D3_hot and D3_cold, and the platform is unable to 1722 * enable wake-up power for it. 1723 */ 1724 int pci_wake_from_d3(struct pci_dev *dev, bool enable) 1725 { 1726 return pci_pme_capable(dev, PCI_D3cold) ? 1727 pci_enable_wake(dev, PCI_D3cold, enable) : 1728 pci_enable_wake(dev, PCI_D3hot, enable); 1729 } 1730 1731 /** 1732 * pci_target_state - find an appropriate low power state for a given PCI dev 1733 * @dev: PCI device 1734 * 1735 * Use underlying platform code to find a supported low power state for @dev. 1736 * If the platform can't manage @dev, return the deepest state from which it 1737 * can generate wake events, based on any available PME info. 1738 */ 1739 pci_power_t pci_target_state(struct pci_dev *dev) 1740 { 1741 pci_power_t target_state = PCI_D3hot; 1742 1743 if (platform_pci_power_manageable(dev)) { 1744 /* 1745 * Call the platform to choose the target state of the device 1746 * and enable wake-up from this state if supported. 1747 */ 1748 pci_power_t state = platform_pci_choose_state(dev); 1749 1750 switch (state) { 1751 case PCI_POWER_ERROR: 1752 case PCI_UNKNOWN: 1753 break; 1754 case PCI_D1: 1755 case PCI_D2: 1756 if (pci_no_d1d2(dev)) 1757 break; 1758 default: 1759 target_state = state; 1760 } 1761 } else if (!dev->pm_cap) { 1762 target_state = PCI_D0; 1763 } else if (device_may_wakeup(&dev->dev)) { 1764 /* 1765 * Find the deepest state from which the device can generate 1766 * wake-up events, make it the target state and enable device 1767 * to generate PME#. 1768 */ 1769 if (dev->pme_support) { 1770 while (target_state 1771 && !(dev->pme_support & (1 << target_state))) 1772 target_state--; 1773 } 1774 } 1775 1776 return target_state; 1777 } 1778 1779 /** 1780 * pci_prepare_to_sleep - prepare PCI device for system-wide transition into a sleep state 1781 * @dev: Device to handle. 1782 * 1783 * Choose the power state appropriate for the device depending on whether 1784 * it can wake up the system and/or is power manageable by the platform 1785 * (PCI_D3hot is the default) and put the device into that state. 1786 */ 1787 int pci_prepare_to_sleep(struct pci_dev *dev) 1788 { 1789 pci_power_t target_state = pci_target_state(dev); 1790 int error; 1791 1792 if (target_state == PCI_POWER_ERROR) 1793 return -EIO; 1794 1795 /* D3cold during system suspend/hibernate is not supported */ 1796 if (target_state > PCI_D3hot) 1797 target_state = PCI_D3hot; 1798 1799 pci_enable_wake(dev, target_state, device_may_wakeup(&dev->dev)); 1800 1801 error = pci_set_power_state(dev, target_state); 1802 1803 if (error) 1804 pci_enable_wake(dev, target_state, false); 1805 1806 return error; 1807 } 1808 1809 /** 1810 * pci_back_from_sleep - turn PCI device on during system-wide transition into working state 1811 * @dev: Device to handle. 1812 * 1813 * Disable device's system wake-up capability and put it into D0. 1814 */ 1815 int pci_back_from_sleep(struct pci_dev *dev) 1816 { 1817 pci_enable_wake(dev, PCI_D0, false); 1818 return pci_set_power_state(dev, PCI_D0); 1819 } 1820 1821 /** 1822 * pci_finish_runtime_suspend - Carry out PCI-specific part of runtime suspend. 1823 * @dev: PCI device being suspended. 1824 * 1825 * Prepare @dev to generate wake-up events at run time and put it into a low 1826 * power state. 1827 */ 1828 int pci_finish_runtime_suspend(struct pci_dev *dev) 1829 { 1830 pci_power_t target_state = pci_target_state(dev); 1831 int error; 1832 1833 if (target_state == PCI_POWER_ERROR) 1834 return -EIO; 1835 1836 dev->runtime_d3cold = target_state == PCI_D3cold; 1837 1838 __pci_enable_wake(dev, target_state, true, pci_dev_run_wake(dev)); 1839 1840 error = pci_set_power_state(dev, target_state); 1841 1842 if (error) { 1843 __pci_enable_wake(dev, target_state, true, false); 1844 dev->runtime_d3cold = false; 1845 } 1846 1847 return error; 1848 } 1849 1850 /** 1851 * pci_dev_run_wake - Check if device can generate run-time wake-up events. 1852 * @dev: Device to check. 1853 * 1854 * Return true if the device itself is cabable of generating wake-up events 1855 * (through the platform or using the native PCIe PME) or if the device supports 1856 * PME and one of its upstream bridges can generate wake-up events. 1857 */ 1858 bool pci_dev_run_wake(struct pci_dev *dev) 1859 { 1860 struct pci_bus *bus = dev->bus; 1861 1862 if (device_run_wake(&dev->dev)) 1863 return true; 1864 1865 if (!dev->pme_support) 1866 return false; 1867 1868 while (bus->parent) { 1869 struct pci_dev *bridge = bus->self; 1870 1871 if (device_run_wake(&bridge->dev)) 1872 return true; 1873 1874 bus = bus->parent; 1875 } 1876 1877 /* We have reached the root bus. */ 1878 if (bus->bridge) 1879 return device_run_wake(bus->bridge); 1880 1881 return false; 1882 } 1883 EXPORT_SYMBOL_GPL(pci_dev_run_wake); 1884 1885 void pci_config_pm_runtime_get(struct pci_dev *pdev) 1886 { 1887 struct device *dev = &pdev->dev; 1888 struct device *parent = dev->parent; 1889 1890 if (parent) 1891 pm_runtime_get_sync(parent); 1892 pm_runtime_get_noresume(dev); 1893 /* 1894 * pdev->current_state is set to PCI_D3cold during suspending, 1895 * so wait until suspending completes 1896 */ 1897 pm_runtime_barrier(dev); 1898 /* 1899 * Only need to resume devices in D3cold, because config 1900 * registers are still accessible for devices suspended but 1901 * not in D3cold. 1902 */ 1903 if (pdev->current_state == PCI_D3cold) 1904 pm_runtime_resume(dev); 1905 } 1906 1907 void pci_config_pm_runtime_put(struct pci_dev *pdev) 1908 { 1909 struct device *dev = &pdev->dev; 1910 struct device *parent = dev->parent; 1911 1912 pm_runtime_put(dev); 1913 if (parent) 1914 pm_runtime_put_sync(parent); 1915 } 1916 1917 /** 1918 * pci_pm_init - Initialize PM functions of given PCI device 1919 * @dev: PCI device to handle. 1920 */ 1921 void pci_pm_init(struct pci_dev *dev) 1922 { 1923 int pm; 1924 u16 pmc; 1925 1926 pm_runtime_forbid(&dev->dev); 1927 pm_runtime_set_active(&dev->dev); 1928 pm_runtime_enable(&dev->dev); 1929 device_enable_async_suspend(&dev->dev); 1930 dev->wakeup_prepared = false; 1931 1932 dev->pm_cap = 0; 1933 dev->pme_support = 0; 1934 1935 /* find PCI PM capability in list */ 1936 pm = pci_find_capability(dev, PCI_CAP_ID_PM); 1937 if (!pm) 1938 return; 1939 /* Check device's ability to generate PME# */ 1940 pci_read_config_word(dev, pm + PCI_PM_PMC, &pmc); 1941 1942 if ((pmc & PCI_PM_CAP_VER_MASK) > 3) { 1943 dev_err(&dev->dev, "unsupported PM cap regs version (%u)\n", 1944 pmc & PCI_PM_CAP_VER_MASK); 1945 return; 1946 } 1947 1948 dev->pm_cap = pm; 1949 dev->d3_delay = PCI_PM_D3_WAIT; 1950 dev->d3cold_delay = PCI_PM_D3COLD_WAIT; 1951 dev->d3cold_allowed = true; 1952 1953 dev->d1_support = false; 1954 dev->d2_support = false; 1955 if (!pci_no_d1d2(dev)) { 1956 if (pmc & PCI_PM_CAP_D1) 1957 dev->d1_support = true; 1958 if (pmc & PCI_PM_CAP_D2) 1959 dev->d2_support = true; 1960 1961 if (dev->d1_support || dev->d2_support) 1962 dev_printk(KERN_DEBUG, &dev->dev, "supports%s%s\n", 1963 dev->d1_support ? " D1" : "", 1964 dev->d2_support ? " D2" : ""); 1965 } 1966 1967 pmc &= PCI_PM_CAP_PME_MASK; 1968 if (pmc) { 1969 dev_printk(KERN_DEBUG, &dev->dev, 1970 "PME# supported from%s%s%s%s%s\n", 1971 (pmc & PCI_PM_CAP_PME_D0) ? " D0" : "", 1972 (pmc & PCI_PM_CAP_PME_D1) ? " D1" : "", 1973 (pmc & PCI_PM_CAP_PME_D2) ? " D2" : "", 1974 (pmc & PCI_PM_CAP_PME_D3) ? " D3hot" : "", 1975 (pmc & PCI_PM_CAP_PME_D3cold) ? " D3cold" : ""); 1976 dev->pme_support = pmc >> PCI_PM_CAP_PME_SHIFT; 1977 dev->pme_poll = true; 1978 /* 1979 * Make device's PM flags reflect the wake-up capability, but 1980 * let the user space enable it to wake up the system as needed. 1981 */ 1982 device_set_wakeup_capable(&dev->dev, true); 1983 /* Disable the PME# generation functionality */ 1984 pci_pme_active(dev, false); 1985 } 1986 } 1987 1988 static void pci_add_saved_cap(struct pci_dev *pci_dev, 1989 struct pci_cap_saved_state *new_cap) 1990 { 1991 hlist_add_head(&new_cap->next, &pci_dev->saved_cap_space); 1992 } 1993 1994 /** 1995 * pci_add_save_buffer - allocate buffer for saving given capability registers 1996 * @dev: the PCI device 1997 * @cap: the capability to allocate the buffer for 1998 * @size: requested size of the buffer 1999 */ 2000 static int pci_add_cap_save_buffer( 2001 struct pci_dev *dev, char cap, unsigned int size) 2002 { 2003 int pos; 2004 struct pci_cap_saved_state *save_state; 2005 2006 pos = pci_find_capability(dev, cap); 2007 if (pos <= 0) 2008 return 0; 2009 2010 save_state = kzalloc(sizeof(*save_state) + size, GFP_KERNEL); 2011 if (!save_state) 2012 return -ENOMEM; 2013 2014 save_state->cap.cap_nr = cap; 2015 save_state->cap.size = size; 2016 pci_add_saved_cap(dev, save_state); 2017 2018 return 0; 2019 } 2020 2021 /** 2022 * pci_allocate_cap_save_buffers - allocate buffers for saving capabilities 2023 * @dev: the PCI device 2024 */ 2025 void pci_allocate_cap_save_buffers(struct pci_dev *dev) 2026 { 2027 int error; 2028 2029 error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_EXP, 2030 PCI_EXP_SAVE_REGS * sizeof(u16)); 2031 if (error) 2032 dev_err(&dev->dev, 2033 "unable to preallocate PCI Express save buffer\n"); 2034 2035 error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_PCIX, sizeof(u16)); 2036 if (error) 2037 dev_err(&dev->dev, 2038 "unable to preallocate PCI-X save buffer\n"); 2039 } 2040 2041 void pci_free_cap_save_buffers(struct pci_dev *dev) 2042 { 2043 struct pci_cap_saved_state *tmp; 2044 struct hlist_node *n; 2045 2046 hlist_for_each_entry_safe(tmp, n, &dev->saved_cap_space, next) 2047 kfree(tmp); 2048 } 2049 2050 /** 2051 * pci_configure_ari - enable or disable ARI forwarding 2052 * @dev: the PCI device 2053 * 2054 * If @dev and its upstream bridge both support ARI, enable ARI in the 2055 * bridge. Otherwise, disable ARI in the bridge. 2056 */ 2057 void pci_configure_ari(struct pci_dev *dev) 2058 { 2059 u32 cap; 2060 struct pci_dev *bridge; 2061 2062 if (pcie_ari_disabled || !pci_is_pcie(dev) || dev->devfn) 2063 return; 2064 2065 bridge = dev->bus->self; 2066 if (!bridge) 2067 return; 2068 2069 pcie_capability_read_dword(bridge, PCI_EXP_DEVCAP2, &cap); 2070 if (!(cap & PCI_EXP_DEVCAP2_ARI)) 2071 return; 2072 2073 if (pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI)) { 2074 pcie_capability_set_word(bridge, PCI_EXP_DEVCTL2, 2075 PCI_EXP_DEVCTL2_ARI); 2076 bridge->ari_enabled = 1; 2077 } else { 2078 pcie_capability_clear_word(bridge, PCI_EXP_DEVCTL2, 2079 PCI_EXP_DEVCTL2_ARI); 2080 bridge->ari_enabled = 0; 2081 } 2082 } 2083 2084 /** 2085 * pci_enable_ido - enable ID-based Ordering on a device 2086 * @dev: the PCI device 2087 * @type: which types of IDO to enable 2088 * 2089 * Enable ID-based ordering on @dev. @type can contain the bits 2090 * %PCI_EXP_IDO_REQUEST and/or %PCI_EXP_IDO_COMPLETION to indicate 2091 * which types of transactions are allowed to be re-ordered. 2092 */ 2093 void pci_enable_ido(struct pci_dev *dev, unsigned long type) 2094 { 2095 u16 ctrl = 0; 2096 2097 if (type & PCI_EXP_IDO_REQUEST) 2098 ctrl |= PCI_EXP_IDO_REQ_EN; 2099 if (type & PCI_EXP_IDO_COMPLETION) 2100 ctrl |= PCI_EXP_IDO_CMP_EN; 2101 if (ctrl) 2102 pcie_capability_set_word(dev, PCI_EXP_DEVCTL2, ctrl); 2103 } 2104 EXPORT_SYMBOL(pci_enable_ido); 2105 2106 /** 2107 * pci_disable_ido - disable ID-based ordering on a device 2108 * @dev: the PCI device 2109 * @type: which types of IDO to disable 2110 */ 2111 void pci_disable_ido(struct pci_dev *dev, unsigned long type) 2112 { 2113 u16 ctrl = 0; 2114 2115 if (type & PCI_EXP_IDO_REQUEST) 2116 ctrl |= PCI_EXP_IDO_REQ_EN; 2117 if (type & PCI_EXP_IDO_COMPLETION) 2118 ctrl |= PCI_EXP_IDO_CMP_EN; 2119 if (ctrl) 2120 pcie_capability_clear_word(dev, PCI_EXP_DEVCTL2, ctrl); 2121 } 2122 EXPORT_SYMBOL(pci_disable_ido); 2123 2124 /** 2125 * pci_enable_obff - enable optimized buffer flush/fill 2126 * @dev: PCI device 2127 * @type: type of signaling to use 2128 * 2129 * Try to enable @type OBFF signaling on @dev. It will try using WAKE# 2130 * signaling if possible, falling back to message signaling only if 2131 * WAKE# isn't supported. @type should indicate whether the PCIe link 2132 * be brought out of L0s or L1 to send the message. It should be either 2133 * %PCI_EXP_OBFF_SIGNAL_ALWAYS or %PCI_OBFF_SIGNAL_L0. 2134 * 2135 * If your device can benefit from receiving all messages, even at the 2136 * power cost of bringing the link back up from a low power state, use 2137 * %PCI_EXP_OBFF_SIGNAL_ALWAYS. Otherwise, use %PCI_OBFF_SIGNAL_L0 (the 2138 * preferred type). 2139 * 2140 * RETURNS: 2141 * Zero on success, appropriate error number on failure. 2142 */ 2143 int pci_enable_obff(struct pci_dev *dev, enum pci_obff_signal_type type) 2144 { 2145 u32 cap; 2146 u16 ctrl; 2147 int ret; 2148 2149 pcie_capability_read_dword(dev, PCI_EXP_DEVCAP2, &cap); 2150 if (!(cap & PCI_EXP_OBFF_MASK)) 2151 return -ENOTSUPP; /* no OBFF support at all */ 2152 2153 /* Make sure the topology supports OBFF as well */ 2154 if (dev->bus->self) { 2155 ret = pci_enable_obff(dev->bus->self, type); 2156 if (ret) 2157 return ret; 2158 } 2159 2160 pcie_capability_read_word(dev, PCI_EXP_DEVCTL2, &ctrl); 2161 if (cap & PCI_EXP_OBFF_WAKE) 2162 ctrl |= PCI_EXP_OBFF_WAKE_EN; 2163 else { 2164 switch (type) { 2165 case PCI_EXP_OBFF_SIGNAL_L0: 2166 if (!(ctrl & PCI_EXP_OBFF_WAKE_EN)) 2167 ctrl |= PCI_EXP_OBFF_MSGA_EN; 2168 break; 2169 case PCI_EXP_OBFF_SIGNAL_ALWAYS: 2170 ctrl &= ~PCI_EXP_OBFF_WAKE_EN; 2171 ctrl |= PCI_EXP_OBFF_MSGB_EN; 2172 break; 2173 default: 2174 WARN(1, "bad OBFF signal type\n"); 2175 return -ENOTSUPP; 2176 } 2177 } 2178 pcie_capability_write_word(dev, PCI_EXP_DEVCTL2, ctrl); 2179 2180 return 0; 2181 } 2182 EXPORT_SYMBOL(pci_enable_obff); 2183 2184 /** 2185 * pci_disable_obff - disable optimized buffer flush/fill 2186 * @dev: PCI device 2187 * 2188 * Disable OBFF on @dev. 2189 */ 2190 void pci_disable_obff(struct pci_dev *dev) 2191 { 2192 pcie_capability_clear_word(dev, PCI_EXP_DEVCTL2, PCI_EXP_OBFF_WAKE_EN); 2193 } 2194 EXPORT_SYMBOL(pci_disable_obff); 2195 2196 /** 2197 * pci_ltr_supported - check whether a device supports LTR 2198 * @dev: PCI device 2199 * 2200 * RETURNS: 2201 * True if @dev supports latency tolerance reporting, false otherwise. 2202 */ 2203 static bool pci_ltr_supported(struct pci_dev *dev) 2204 { 2205 u32 cap; 2206 2207 pcie_capability_read_dword(dev, PCI_EXP_DEVCAP2, &cap); 2208 2209 return cap & PCI_EXP_DEVCAP2_LTR; 2210 } 2211 2212 /** 2213 * pci_enable_ltr - enable latency tolerance reporting 2214 * @dev: PCI device 2215 * 2216 * Enable LTR on @dev if possible, which means enabling it first on 2217 * upstream ports. 2218 * 2219 * RETURNS: 2220 * Zero on success, errno on failure. 2221 */ 2222 int pci_enable_ltr(struct pci_dev *dev) 2223 { 2224 int ret; 2225 2226 /* Only primary function can enable/disable LTR */ 2227 if (PCI_FUNC(dev->devfn) != 0) 2228 return -EINVAL; 2229 2230 if (!pci_ltr_supported(dev)) 2231 return -ENOTSUPP; 2232 2233 /* Enable upstream ports first */ 2234 if (dev->bus->self) { 2235 ret = pci_enable_ltr(dev->bus->self); 2236 if (ret) 2237 return ret; 2238 } 2239 2240 return pcie_capability_set_word(dev, PCI_EXP_DEVCTL2, PCI_EXP_LTR_EN); 2241 } 2242 EXPORT_SYMBOL(pci_enable_ltr); 2243 2244 /** 2245 * pci_disable_ltr - disable latency tolerance reporting 2246 * @dev: PCI device 2247 */ 2248 void pci_disable_ltr(struct pci_dev *dev) 2249 { 2250 /* Only primary function can enable/disable LTR */ 2251 if (PCI_FUNC(dev->devfn) != 0) 2252 return; 2253 2254 if (!pci_ltr_supported(dev)) 2255 return; 2256 2257 pcie_capability_clear_word(dev, PCI_EXP_DEVCTL2, PCI_EXP_LTR_EN); 2258 } 2259 EXPORT_SYMBOL(pci_disable_ltr); 2260 2261 static int __pci_ltr_scale(int *val) 2262 { 2263 int scale = 0; 2264 2265 while (*val > 1023) { 2266 *val = (*val + 31) / 32; 2267 scale++; 2268 } 2269 return scale; 2270 } 2271 2272 /** 2273 * pci_set_ltr - set LTR latency values 2274 * @dev: PCI device 2275 * @snoop_lat_ns: snoop latency in nanoseconds 2276 * @nosnoop_lat_ns: nosnoop latency in nanoseconds 2277 * 2278 * Figure out the scale and set the LTR values accordingly. 2279 */ 2280 int pci_set_ltr(struct pci_dev *dev, int snoop_lat_ns, int nosnoop_lat_ns) 2281 { 2282 int pos, ret, snoop_scale, nosnoop_scale; 2283 u16 val; 2284 2285 if (!pci_ltr_supported(dev)) 2286 return -ENOTSUPP; 2287 2288 snoop_scale = __pci_ltr_scale(&snoop_lat_ns); 2289 nosnoop_scale = __pci_ltr_scale(&nosnoop_lat_ns); 2290 2291 if (snoop_lat_ns > PCI_LTR_VALUE_MASK || 2292 nosnoop_lat_ns > PCI_LTR_VALUE_MASK) 2293 return -EINVAL; 2294 2295 if ((snoop_scale > (PCI_LTR_SCALE_MASK >> PCI_LTR_SCALE_SHIFT)) || 2296 (nosnoop_scale > (PCI_LTR_SCALE_MASK >> PCI_LTR_SCALE_SHIFT))) 2297 return -EINVAL; 2298 2299 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_LTR); 2300 if (!pos) 2301 return -ENOTSUPP; 2302 2303 val = (snoop_scale << PCI_LTR_SCALE_SHIFT) | snoop_lat_ns; 2304 ret = pci_write_config_word(dev, pos + PCI_LTR_MAX_SNOOP_LAT, val); 2305 if (ret != 4) 2306 return -EIO; 2307 2308 val = (nosnoop_scale << PCI_LTR_SCALE_SHIFT) | nosnoop_lat_ns; 2309 ret = pci_write_config_word(dev, pos + PCI_LTR_MAX_NOSNOOP_LAT, val); 2310 if (ret != 4) 2311 return -EIO; 2312 2313 return 0; 2314 } 2315 EXPORT_SYMBOL(pci_set_ltr); 2316 2317 static int pci_acs_enable; 2318 2319 /** 2320 * pci_request_acs - ask for ACS to be enabled if supported 2321 */ 2322 void pci_request_acs(void) 2323 { 2324 pci_acs_enable = 1; 2325 } 2326 2327 /** 2328 * pci_enable_acs - enable ACS if hardware support it 2329 * @dev: the PCI device 2330 */ 2331 void pci_enable_acs(struct pci_dev *dev) 2332 { 2333 int pos; 2334 u16 cap; 2335 u16 ctrl; 2336 2337 if (!pci_acs_enable) 2338 return; 2339 2340 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS); 2341 if (!pos) 2342 return; 2343 2344 pci_read_config_word(dev, pos + PCI_ACS_CAP, &cap); 2345 pci_read_config_word(dev, pos + PCI_ACS_CTRL, &ctrl); 2346 2347 /* Source Validation */ 2348 ctrl |= (cap & PCI_ACS_SV); 2349 2350 /* P2P Request Redirect */ 2351 ctrl |= (cap & PCI_ACS_RR); 2352 2353 /* P2P Completion Redirect */ 2354 ctrl |= (cap & PCI_ACS_CR); 2355 2356 /* Upstream Forwarding */ 2357 ctrl |= (cap & PCI_ACS_UF); 2358 2359 pci_write_config_word(dev, pos + PCI_ACS_CTRL, ctrl); 2360 } 2361 2362 /** 2363 * pci_acs_enabled - test ACS against required flags for a given device 2364 * @pdev: device to test 2365 * @acs_flags: required PCI ACS flags 2366 * 2367 * Return true if the device supports the provided flags. Automatically 2368 * filters out flags that are not implemented on multifunction devices. 2369 */ 2370 bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags) 2371 { 2372 int pos, ret; 2373 u16 ctrl; 2374 2375 ret = pci_dev_specific_acs_enabled(pdev, acs_flags); 2376 if (ret >= 0) 2377 return ret > 0; 2378 2379 if (!pci_is_pcie(pdev)) 2380 return false; 2381 2382 /* Filter out flags not applicable to multifunction */ 2383 if (pdev->multifunction) 2384 acs_flags &= (PCI_ACS_RR | PCI_ACS_CR | 2385 PCI_ACS_EC | PCI_ACS_DT); 2386 2387 if (pci_pcie_type(pdev) == PCI_EXP_TYPE_DOWNSTREAM || 2388 pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT || 2389 pdev->multifunction) { 2390 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ACS); 2391 if (!pos) 2392 return false; 2393 2394 pci_read_config_word(pdev, pos + PCI_ACS_CTRL, &ctrl); 2395 if ((ctrl & acs_flags) != acs_flags) 2396 return false; 2397 } 2398 2399 return true; 2400 } 2401 2402 /** 2403 * pci_acs_path_enable - test ACS flags from start to end in a hierarchy 2404 * @start: starting downstream device 2405 * @end: ending upstream device or NULL to search to the root bus 2406 * @acs_flags: required flags 2407 * 2408 * Walk up a device tree from start to end testing PCI ACS support. If 2409 * any step along the way does not support the required flags, return false. 2410 */ 2411 bool pci_acs_path_enabled(struct pci_dev *start, 2412 struct pci_dev *end, u16 acs_flags) 2413 { 2414 struct pci_dev *pdev, *parent = start; 2415 2416 do { 2417 pdev = parent; 2418 2419 if (!pci_acs_enabled(pdev, acs_flags)) 2420 return false; 2421 2422 if (pci_is_root_bus(pdev->bus)) 2423 return (end == NULL); 2424 2425 parent = pdev->bus->self; 2426 } while (pdev != end); 2427 2428 return true; 2429 } 2430 2431 /** 2432 * pci_swizzle_interrupt_pin - swizzle INTx for device behind bridge 2433 * @dev: the PCI device 2434 * @pin: the INTx pin (1=INTA, 2=INTB, 3=INTC, 4=INTD) 2435 * 2436 * Perform INTx swizzling for a device behind one level of bridge. This is 2437 * required by section 9.1 of the PCI-to-PCI bridge specification for devices 2438 * behind bridges on add-in cards. For devices with ARI enabled, the slot 2439 * number is always 0 (see the Implementation Note in section 2.2.8.1 of 2440 * the PCI Express Base Specification, Revision 2.1) 2441 */ 2442 u8 pci_swizzle_interrupt_pin(const struct pci_dev *dev, u8 pin) 2443 { 2444 int slot; 2445 2446 if (pci_ari_enabled(dev->bus)) 2447 slot = 0; 2448 else 2449 slot = PCI_SLOT(dev->devfn); 2450 2451 return (((pin - 1) + slot) % 4) + 1; 2452 } 2453 2454 int 2455 pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge) 2456 { 2457 u8 pin; 2458 2459 pin = dev->pin; 2460 if (!pin) 2461 return -1; 2462 2463 while (!pci_is_root_bus(dev->bus)) { 2464 pin = pci_swizzle_interrupt_pin(dev, pin); 2465 dev = dev->bus->self; 2466 } 2467 *bridge = dev; 2468 return pin; 2469 } 2470 2471 /** 2472 * pci_common_swizzle - swizzle INTx all the way to root bridge 2473 * @dev: the PCI device 2474 * @pinp: pointer to the INTx pin value (1=INTA, 2=INTB, 3=INTD, 4=INTD) 2475 * 2476 * Perform INTx swizzling for a device. This traverses through all PCI-to-PCI 2477 * bridges all the way up to a PCI root bus. 2478 */ 2479 u8 pci_common_swizzle(struct pci_dev *dev, u8 *pinp) 2480 { 2481 u8 pin = *pinp; 2482 2483 while (!pci_is_root_bus(dev->bus)) { 2484 pin = pci_swizzle_interrupt_pin(dev, pin); 2485 dev = dev->bus->self; 2486 } 2487 *pinp = pin; 2488 return PCI_SLOT(dev->devfn); 2489 } 2490 2491 /** 2492 * pci_release_region - Release a PCI bar 2493 * @pdev: PCI device whose resources were previously reserved by pci_request_region 2494 * @bar: BAR to release 2495 * 2496 * Releases the PCI I/O and memory resources previously reserved by a 2497 * successful call to pci_request_region. Call this function only 2498 * after all use of the PCI regions has ceased. 2499 */ 2500 void pci_release_region(struct pci_dev *pdev, int bar) 2501 { 2502 struct pci_devres *dr; 2503 2504 if (pci_resource_len(pdev, bar) == 0) 2505 return; 2506 if (pci_resource_flags(pdev, bar) & IORESOURCE_IO) 2507 release_region(pci_resource_start(pdev, bar), 2508 pci_resource_len(pdev, bar)); 2509 else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) 2510 release_mem_region(pci_resource_start(pdev, bar), 2511 pci_resource_len(pdev, bar)); 2512 2513 dr = find_pci_dr(pdev); 2514 if (dr) 2515 dr->region_mask &= ~(1 << bar); 2516 } 2517 2518 /** 2519 * __pci_request_region - Reserved PCI I/O and memory resource 2520 * @pdev: PCI device whose resources are to be reserved 2521 * @bar: BAR to be reserved 2522 * @res_name: Name to be associated with resource. 2523 * @exclusive: whether the region access is exclusive or not 2524 * 2525 * Mark the PCI region associated with PCI device @pdev BR @bar as 2526 * being reserved by owner @res_name. Do not access any 2527 * address inside the PCI regions unless this call returns 2528 * successfully. 2529 * 2530 * If @exclusive is set, then the region is marked so that userspace 2531 * is explicitly not allowed to map the resource via /dev/mem or 2532 * sysfs MMIO access. 2533 * 2534 * Returns 0 on success, or %EBUSY on error. A warning 2535 * message is also printed on failure. 2536 */ 2537 static int __pci_request_region(struct pci_dev *pdev, int bar, const char *res_name, 2538 int exclusive) 2539 { 2540 struct pci_devres *dr; 2541 2542 if (pci_resource_len(pdev, bar) == 0) 2543 return 0; 2544 2545 if (pci_resource_flags(pdev, bar) & IORESOURCE_IO) { 2546 if (!request_region(pci_resource_start(pdev, bar), 2547 pci_resource_len(pdev, bar), res_name)) 2548 goto err_out; 2549 } 2550 else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) { 2551 if (!__request_mem_region(pci_resource_start(pdev, bar), 2552 pci_resource_len(pdev, bar), res_name, 2553 exclusive)) 2554 goto err_out; 2555 } 2556 2557 dr = find_pci_dr(pdev); 2558 if (dr) 2559 dr->region_mask |= 1 << bar; 2560 2561 return 0; 2562 2563 err_out: 2564 dev_warn(&pdev->dev, "BAR %d: can't reserve %pR\n", bar, 2565 &pdev->resource[bar]); 2566 return -EBUSY; 2567 } 2568 2569 /** 2570 * pci_request_region - Reserve PCI I/O and memory resource 2571 * @pdev: PCI device whose resources are to be reserved 2572 * @bar: BAR to be reserved 2573 * @res_name: Name to be associated with resource 2574 * 2575 * Mark the PCI region associated with PCI device @pdev BAR @bar as 2576 * being reserved by owner @res_name. Do not access any 2577 * address inside the PCI regions unless this call returns 2578 * successfully. 2579 * 2580 * Returns 0 on success, or %EBUSY on error. A warning 2581 * message is also printed on failure. 2582 */ 2583 int pci_request_region(struct pci_dev *pdev, int bar, const char *res_name) 2584 { 2585 return __pci_request_region(pdev, bar, res_name, 0); 2586 } 2587 2588 /** 2589 * pci_request_region_exclusive - Reserved PCI I/O and memory resource 2590 * @pdev: PCI device whose resources are to be reserved 2591 * @bar: BAR to be reserved 2592 * @res_name: Name to be associated with resource. 2593 * 2594 * Mark the PCI region associated with PCI device @pdev BR @bar as 2595 * being reserved by owner @res_name. Do not access any 2596 * address inside the PCI regions unless this call returns 2597 * successfully. 2598 * 2599 * Returns 0 on success, or %EBUSY on error. A warning 2600 * message is also printed on failure. 2601 * 2602 * The key difference that _exclusive makes it that userspace is 2603 * explicitly not allowed to map the resource via /dev/mem or 2604 * sysfs. 2605 */ 2606 int pci_request_region_exclusive(struct pci_dev *pdev, int bar, const char *res_name) 2607 { 2608 return __pci_request_region(pdev, bar, res_name, IORESOURCE_EXCLUSIVE); 2609 } 2610 /** 2611 * pci_release_selected_regions - Release selected PCI I/O and memory resources 2612 * @pdev: PCI device whose resources were previously reserved 2613 * @bars: Bitmask of BARs to be released 2614 * 2615 * Release selected PCI I/O and memory resources previously reserved. 2616 * Call this function only after all use of the PCI regions has ceased. 2617 */ 2618 void pci_release_selected_regions(struct pci_dev *pdev, int bars) 2619 { 2620 int i; 2621 2622 for (i = 0; i < 6; i++) 2623 if (bars & (1 << i)) 2624 pci_release_region(pdev, i); 2625 } 2626 2627 static int __pci_request_selected_regions(struct pci_dev *pdev, int bars, 2628 const char *res_name, int excl) 2629 { 2630 int i; 2631 2632 for (i = 0; i < 6; i++) 2633 if (bars & (1 << i)) 2634 if (__pci_request_region(pdev, i, res_name, excl)) 2635 goto err_out; 2636 return 0; 2637 2638 err_out: 2639 while(--i >= 0) 2640 if (bars & (1 << i)) 2641 pci_release_region(pdev, i); 2642 2643 return -EBUSY; 2644 } 2645 2646 2647 /** 2648 * pci_request_selected_regions - Reserve selected PCI I/O and memory resources 2649 * @pdev: PCI device whose resources are to be reserved 2650 * @bars: Bitmask of BARs to be requested 2651 * @res_name: Name to be associated with resource 2652 */ 2653 int pci_request_selected_regions(struct pci_dev *pdev, int bars, 2654 const char *res_name) 2655 { 2656 return __pci_request_selected_regions(pdev, bars, res_name, 0); 2657 } 2658 2659 int pci_request_selected_regions_exclusive(struct pci_dev *pdev, 2660 int bars, const char *res_name) 2661 { 2662 return __pci_request_selected_regions(pdev, bars, res_name, 2663 IORESOURCE_EXCLUSIVE); 2664 } 2665 2666 /** 2667 * pci_release_regions - Release reserved PCI I/O and memory resources 2668 * @pdev: PCI device whose resources were previously reserved by pci_request_regions 2669 * 2670 * Releases all PCI I/O and memory resources previously reserved by a 2671 * successful call to pci_request_regions. Call this function only 2672 * after all use of the PCI regions has ceased. 2673 */ 2674 2675 void pci_release_regions(struct pci_dev *pdev) 2676 { 2677 pci_release_selected_regions(pdev, (1 << 6) - 1); 2678 } 2679 2680 /** 2681 * pci_request_regions - Reserved PCI I/O and memory resources 2682 * @pdev: PCI device whose resources are to be reserved 2683 * @res_name: Name to be associated with resource. 2684 * 2685 * Mark all PCI regions associated with PCI device @pdev as 2686 * being reserved by owner @res_name. Do not access any 2687 * address inside the PCI regions unless this call returns 2688 * successfully. 2689 * 2690 * Returns 0 on success, or %EBUSY on error. A warning 2691 * message is also printed on failure. 2692 */ 2693 int pci_request_regions(struct pci_dev *pdev, const char *res_name) 2694 { 2695 return pci_request_selected_regions(pdev, ((1 << 6) - 1), res_name); 2696 } 2697 2698 /** 2699 * pci_request_regions_exclusive - Reserved PCI I/O and memory resources 2700 * @pdev: PCI device whose resources are to be reserved 2701 * @res_name: Name to be associated with resource. 2702 * 2703 * Mark all PCI regions associated with PCI device @pdev as 2704 * being reserved by owner @res_name. Do not access any 2705 * address inside the PCI regions unless this call returns 2706 * successfully. 2707 * 2708 * pci_request_regions_exclusive() will mark the region so that 2709 * /dev/mem and the sysfs MMIO access will not be allowed. 2710 * 2711 * Returns 0 on success, or %EBUSY on error. A warning 2712 * message is also printed on failure. 2713 */ 2714 int pci_request_regions_exclusive(struct pci_dev *pdev, const char *res_name) 2715 { 2716 return pci_request_selected_regions_exclusive(pdev, 2717 ((1 << 6) - 1), res_name); 2718 } 2719 2720 static void __pci_set_master(struct pci_dev *dev, bool enable) 2721 { 2722 u16 old_cmd, cmd; 2723 2724 pci_read_config_word(dev, PCI_COMMAND, &old_cmd); 2725 if (enable) 2726 cmd = old_cmd | PCI_COMMAND_MASTER; 2727 else 2728 cmd = old_cmd & ~PCI_COMMAND_MASTER; 2729 if (cmd != old_cmd) { 2730 dev_dbg(&dev->dev, "%s bus mastering\n", 2731 enable ? "enabling" : "disabling"); 2732 pci_write_config_word(dev, PCI_COMMAND, cmd); 2733 } 2734 dev->is_busmaster = enable; 2735 } 2736 2737 /** 2738 * pcibios_setup - process "pci=" kernel boot arguments 2739 * @str: string used to pass in "pci=" kernel boot arguments 2740 * 2741 * Process kernel boot arguments. This is the default implementation. 2742 * Architecture specific implementations can override this as necessary. 2743 */ 2744 char * __weak __init pcibios_setup(char *str) 2745 { 2746 return str; 2747 } 2748 2749 /** 2750 * pcibios_set_master - enable PCI bus-mastering for device dev 2751 * @dev: the PCI device to enable 2752 * 2753 * Enables PCI bus-mastering for the device. This is the default 2754 * implementation. Architecture specific implementations can override 2755 * this if necessary. 2756 */ 2757 void __weak pcibios_set_master(struct pci_dev *dev) 2758 { 2759 u8 lat; 2760 2761 /* The latency timer doesn't apply to PCIe (either Type 0 or Type 1) */ 2762 if (pci_is_pcie(dev)) 2763 return; 2764 2765 pci_read_config_byte(dev, PCI_LATENCY_TIMER, &lat); 2766 if (lat < 16) 2767 lat = (64 <= pcibios_max_latency) ? 64 : pcibios_max_latency; 2768 else if (lat > pcibios_max_latency) 2769 lat = pcibios_max_latency; 2770 else 2771 return; 2772 dev_printk(KERN_DEBUG, &dev->dev, "setting latency timer to %d\n", lat); 2773 pci_write_config_byte(dev, PCI_LATENCY_TIMER, lat); 2774 } 2775 2776 /** 2777 * pci_set_master - enables bus-mastering for device dev 2778 * @dev: the PCI device to enable 2779 * 2780 * Enables bus-mastering on the device and calls pcibios_set_master() 2781 * to do the needed arch specific settings. 2782 */ 2783 void pci_set_master(struct pci_dev *dev) 2784 { 2785 __pci_set_master(dev, true); 2786 pcibios_set_master(dev); 2787 } 2788 2789 /** 2790 * pci_clear_master - disables bus-mastering for device dev 2791 * @dev: the PCI device to disable 2792 */ 2793 void pci_clear_master(struct pci_dev *dev) 2794 { 2795 __pci_set_master(dev, false); 2796 } 2797 2798 /** 2799 * pci_set_cacheline_size - ensure the CACHE_LINE_SIZE register is programmed 2800 * @dev: the PCI device for which MWI is to be enabled 2801 * 2802 * Helper function for pci_set_mwi. 2803 * Originally copied from drivers/net/acenic.c. 2804 * Copyright 1998-2001 by Jes Sorensen, <jes@trained-monkey.org>. 2805 * 2806 * RETURNS: An appropriate -ERRNO error value on error, or zero for success. 2807 */ 2808 int pci_set_cacheline_size(struct pci_dev *dev) 2809 { 2810 u8 cacheline_size; 2811 2812 if (!pci_cache_line_size) 2813 return -EINVAL; 2814 2815 /* Validate current setting: the PCI_CACHE_LINE_SIZE must be 2816 equal to or multiple of the right value. */ 2817 pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size); 2818 if (cacheline_size >= pci_cache_line_size && 2819 (cacheline_size % pci_cache_line_size) == 0) 2820 return 0; 2821 2822 /* Write the correct value. */ 2823 pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, pci_cache_line_size); 2824 /* Read it back. */ 2825 pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size); 2826 if (cacheline_size == pci_cache_line_size) 2827 return 0; 2828 2829 dev_printk(KERN_DEBUG, &dev->dev, "cache line size of %d is not " 2830 "supported\n", pci_cache_line_size << 2); 2831 2832 return -EINVAL; 2833 } 2834 EXPORT_SYMBOL_GPL(pci_set_cacheline_size); 2835 2836 #ifdef PCI_DISABLE_MWI 2837 int pci_set_mwi(struct pci_dev *dev) 2838 { 2839 return 0; 2840 } 2841 2842 int pci_try_set_mwi(struct pci_dev *dev) 2843 { 2844 return 0; 2845 } 2846 2847 void pci_clear_mwi(struct pci_dev *dev) 2848 { 2849 } 2850 2851 #else 2852 2853 /** 2854 * pci_set_mwi - enables memory-write-invalidate PCI transaction 2855 * @dev: the PCI device for which MWI is enabled 2856 * 2857 * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND. 2858 * 2859 * RETURNS: An appropriate -ERRNO error value on error, or zero for success. 2860 */ 2861 int 2862 pci_set_mwi(struct pci_dev *dev) 2863 { 2864 int rc; 2865 u16 cmd; 2866 2867 rc = pci_set_cacheline_size(dev); 2868 if (rc) 2869 return rc; 2870 2871 pci_read_config_word(dev, PCI_COMMAND, &cmd); 2872 if (! (cmd & PCI_COMMAND_INVALIDATE)) { 2873 dev_dbg(&dev->dev, "enabling Mem-Wr-Inval\n"); 2874 cmd |= PCI_COMMAND_INVALIDATE; 2875 pci_write_config_word(dev, PCI_COMMAND, cmd); 2876 } 2877 2878 return 0; 2879 } 2880 2881 /** 2882 * pci_try_set_mwi - enables memory-write-invalidate PCI transaction 2883 * @dev: the PCI device for which MWI is enabled 2884 * 2885 * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND. 2886 * Callers are not required to check the return value. 2887 * 2888 * RETURNS: An appropriate -ERRNO error value on error, or zero for success. 2889 */ 2890 int pci_try_set_mwi(struct pci_dev *dev) 2891 { 2892 int rc = pci_set_mwi(dev); 2893 return rc; 2894 } 2895 2896 /** 2897 * pci_clear_mwi - disables Memory-Write-Invalidate for device dev 2898 * @dev: the PCI device to disable 2899 * 2900 * Disables PCI Memory-Write-Invalidate transaction on the device 2901 */ 2902 void 2903 pci_clear_mwi(struct pci_dev *dev) 2904 { 2905 u16 cmd; 2906 2907 pci_read_config_word(dev, PCI_COMMAND, &cmd); 2908 if (cmd & PCI_COMMAND_INVALIDATE) { 2909 cmd &= ~PCI_COMMAND_INVALIDATE; 2910 pci_write_config_word(dev, PCI_COMMAND, cmd); 2911 } 2912 } 2913 #endif /* ! PCI_DISABLE_MWI */ 2914 2915 /** 2916 * pci_intx - enables/disables PCI INTx for device dev 2917 * @pdev: the PCI device to operate on 2918 * @enable: boolean: whether to enable or disable PCI INTx 2919 * 2920 * Enables/disables PCI INTx for device dev 2921 */ 2922 void 2923 pci_intx(struct pci_dev *pdev, int enable) 2924 { 2925 u16 pci_command, new; 2926 2927 pci_read_config_word(pdev, PCI_COMMAND, &pci_command); 2928 2929 if (enable) { 2930 new = pci_command & ~PCI_COMMAND_INTX_DISABLE; 2931 } else { 2932 new = pci_command | PCI_COMMAND_INTX_DISABLE; 2933 } 2934 2935 if (new != pci_command) { 2936 struct pci_devres *dr; 2937 2938 pci_write_config_word(pdev, PCI_COMMAND, new); 2939 2940 dr = find_pci_dr(pdev); 2941 if (dr && !dr->restore_intx) { 2942 dr->restore_intx = 1; 2943 dr->orig_intx = !enable; 2944 } 2945 } 2946 } 2947 2948 /** 2949 * pci_intx_mask_supported - probe for INTx masking support 2950 * @dev: the PCI device to operate on 2951 * 2952 * Check if the device dev support INTx masking via the config space 2953 * command word. 2954 */ 2955 bool pci_intx_mask_supported(struct pci_dev *dev) 2956 { 2957 bool mask_supported = false; 2958 u16 orig, new; 2959 2960 if (dev->broken_intx_masking) 2961 return false; 2962 2963 pci_cfg_access_lock(dev); 2964 2965 pci_read_config_word(dev, PCI_COMMAND, &orig); 2966 pci_write_config_word(dev, PCI_COMMAND, 2967 orig ^ PCI_COMMAND_INTX_DISABLE); 2968 pci_read_config_word(dev, PCI_COMMAND, &new); 2969 2970 /* 2971 * There's no way to protect against hardware bugs or detect them 2972 * reliably, but as long as we know what the value should be, let's 2973 * go ahead and check it. 2974 */ 2975 if ((new ^ orig) & ~PCI_COMMAND_INTX_DISABLE) { 2976 dev_err(&dev->dev, "Command register changed from " 2977 "0x%x to 0x%x: driver or hardware bug?\n", orig, new); 2978 } else if ((new ^ orig) & PCI_COMMAND_INTX_DISABLE) { 2979 mask_supported = true; 2980 pci_write_config_word(dev, PCI_COMMAND, orig); 2981 } 2982 2983 pci_cfg_access_unlock(dev); 2984 return mask_supported; 2985 } 2986 EXPORT_SYMBOL_GPL(pci_intx_mask_supported); 2987 2988 static bool pci_check_and_set_intx_mask(struct pci_dev *dev, bool mask) 2989 { 2990 struct pci_bus *bus = dev->bus; 2991 bool mask_updated = true; 2992 u32 cmd_status_dword; 2993 u16 origcmd, newcmd; 2994 unsigned long flags; 2995 bool irq_pending; 2996 2997 /* 2998 * We do a single dword read to retrieve both command and status. 2999 * Document assumptions that make this possible. 3000 */ 3001 BUILD_BUG_ON(PCI_COMMAND % 4); 3002 BUILD_BUG_ON(PCI_COMMAND + 2 != PCI_STATUS); 3003 3004 raw_spin_lock_irqsave(&pci_lock, flags); 3005 3006 bus->ops->read(bus, dev->devfn, PCI_COMMAND, 4, &cmd_status_dword); 3007 3008 irq_pending = (cmd_status_dword >> 16) & PCI_STATUS_INTERRUPT; 3009 3010 /* 3011 * Check interrupt status register to see whether our device 3012 * triggered the interrupt (when masking) or the next IRQ is 3013 * already pending (when unmasking). 3014 */ 3015 if (mask != irq_pending) { 3016 mask_updated = false; 3017 goto done; 3018 } 3019 3020 origcmd = cmd_status_dword; 3021 newcmd = origcmd & ~PCI_COMMAND_INTX_DISABLE; 3022 if (mask) 3023 newcmd |= PCI_COMMAND_INTX_DISABLE; 3024 if (newcmd != origcmd) 3025 bus->ops->write(bus, dev->devfn, PCI_COMMAND, 2, newcmd); 3026 3027 done: 3028 raw_spin_unlock_irqrestore(&pci_lock, flags); 3029 3030 return mask_updated; 3031 } 3032 3033 /** 3034 * pci_check_and_mask_intx - mask INTx on pending interrupt 3035 * @dev: the PCI device to operate on 3036 * 3037 * Check if the device dev has its INTx line asserted, mask it and 3038 * return true in that case. False is returned if not interrupt was 3039 * pending. 3040 */ 3041 bool pci_check_and_mask_intx(struct pci_dev *dev) 3042 { 3043 return pci_check_and_set_intx_mask(dev, true); 3044 } 3045 EXPORT_SYMBOL_GPL(pci_check_and_mask_intx); 3046 3047 /** 3048 * pci_check_and_mask_intx - unmask INTx of no interrupt is pending 3049 * @dev: the PCI device to operate on 3050 * 3051 * Check if the device dev has its INTx line asserted, unmask it if not 3052 * and return true. False is returned and the mask remains active if 3053 * there was still an interrupt pending. 3054 */ 3055 bool pci_check_and_unmask_intx(struct pci_dev *dev) 3056 { 3057 return pci_check_and_set_intx_mask(dev, false); 3058 } 3059 EXPORT_SYMBOL_GPL(pci_check_and_unmask_intx); 3060 3061 /** 3062 * pci_msi_off - disables any msi or msix capabilities 3063 * @dev: the PCI device to operate on 3064 * 3065 * If you want to use msi see pci_enable_msi and friends. 3066 * This is a lower level primitive that allows us to disable 3067 * msi operation at the device level. 3068 */ 3069 void pci_msi_off(struct pci_dev *dev) 3070 { 3071 int pos; 3072 u16 control; 3073 3074 pos = pci_find_capability(dev, PCI_CAP_ID_MSI); 3075 if (pos) { 3076 pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control); 3077 control &= ~PCI_MSI_FLAGS_ENABLE; 3078 pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control); 3079 } 3080 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX); 3081 if (pos) { 3082 pci_read_config_word(dev, pos + PCI_MSIX_FLAGS, &control); 3083 control &= ~PCI_MSIX_FLAGS_ENABLE; 3084 pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control); 3085 } 3086 } 3087 EXPORT_SYMBOL_GPL(pci_msi_off); 3088 3089 int pci_set_dma_max_seg_size(struct pci_dev *dev, unsigned int size) 3090 { 3091 return dma_set_max_seg_size(&dev->dev, size); 3092 } 3093 EXPORT_SYMBOL(pci_set_dma_max_seg_size); 3094 3095 int pci_set_dma_seg_boundary(struct pci_dev *dev, unsigned long mask) 3096 { 3097 return dma_set_seg_boundary(&dev->dev, mask); 3098 } 3099 EXPORT_SYMBOL(pci_set_dma_seg_boundary); 3100 3101 static int pcie_flr(struct pci_dev *dev, int probe) 3102 { 3103 int i; 3104 u32 cap; 3105 u16 status; 3106 3107 pcie_capability_read_dword(dev, PCI_EXP_DEVCAP, &cap); 3108 if (!(cap & PCI_EXP_DEVCAP_FLR)) 3109 return -ENOTTY; 3110 3111 if (probe) 3112 return 0; 3113 3114 /* Wait for Transaction Pending bit clean */ 3115 for (i = 0; i < 4; i++) { 3116 if (i) 3117 msleep((1 << (i - 1)) * 100); 3118 3119 pcie_capability_read_word(dev, PCI_EXP_DEVSTA, &status); 3120 if (!(status & PCI_EXP_DEVSTA_TRPND)) 3121 goto clear; 3122 } 3123 3124 dev_err(&dev->dev, "transaction is not cleared; " 3125 "proceeding with reset anyway\n"); 3126 3127 clear: 3128 pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_BCR_FLR); 3129 3130 msleep(100); 3131 3132 return 0; 3133 } 3134 3135 static int pci_af_flr(struct pci_dev *dev, int probe) 3136 { 3137 int i; 3138 int pos; 3139 u8 cap; 3140 u8 status; 3141 3142 pos = pci_find_capability(dev, PCI_CAP_ID_AF); 3143 if (!pos) 3144 return -ENOTTY; 3145 3146 pci_read_config_byte(dev, pos + PCI_AF_CAP, &cap); 3147 if (!(cap & PCI_AF_CAP_TP) || !(cap & PCI_AF_CAP_FLR)) 3148 return -ENOTTY; 3149 3150 if (probe) 3151 return 0; 3152 3153 /* Wait for Transaction Pending bit clean */ 3154 for (i = 0; i < 4; i++) { 3155 if (i) 3156 msleep((1 << (i - 1)) * 100); 3157 3158 pci_read_config_byte(dev, pos + PCI_AF_STATUS, &status); 3159 if (!(status & PCI_AF_STATUS_TP)) 3160 goto clear; 3161 } 3162 3163 dev_err(&dev->dev, "transaction is not cleared; " 3164 "proceeding with reset anyway\n"); 3165 3166 clear: 3167 pci_write_config_byte(dev, pos + PCI_AF_CTRL, PCI_AF_CTRL_FLR); 3168 msleep(100); 3169 3170 return 0; 3171 } 3172 3173 /** 3174 * pci_pm_reset - Put device into PCI_D3 and back into PCI_D0. 3175 * @dev: Device to reset. 3176 * @probe: If set, only check if the device can be reset this way. 3177 * 3178 * If @dev supports native PCI PM and its PCI_PM_CTRL_NO_SOFT_RESET flag is 3179 * unset, it will be reinitialized internally when going from PCI_D3hot to 3180 * PCI_D0. If that's the case and the device is not in a low-power state 3181 * already, force it into PCI_D3hot and back to PCI_D0, causing it to be reset. 3182 * 3183 * NOTE: This causes the caller to sleep for twice the device power transition 3184 * cooldown period, which for the D0->D3hot and D3hot->D0 transitions is 10 ms 3185 * by devault (i.e. unless the @dev's d3_delay field has a different value). 3186 * Moreover, only devices in D0 can be reset by this function. 3187 */ 3188 static int pci_pm_reset(struct pci_dev *dev, int probe) 3189 { 3190 u16 csr; 3191 3192 if (!dev->pm_cap) 3193 return -ENOTTY; 3194 3195 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &csr); 3196 if (csr & PCI_PM_CTRL_NO_SOFT_RESET) 3197 return -ENOTTY; 3198 3199 if (probe) 3200 return 0; 3201 3202 if (dev->current_state != PCI_D0) 3203 return -EINVAL; 3204 3205 csr &= ~PCI_PM_CTRL_STATE_MASK; 3206 csr |= PCI_D3hot; 3207 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr); 3208 pci_dev_d3_sleep(dev); 3209 3210 csr &= ~PCI_PM_CTRL_STATE_MASK; 3211 csr |= PCI_D0; 3212 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr); 3213 pci_dev_d3_sleep(dev); 3214 3215 return 0; 3216 } 3217 3218 static int pci_parent_bus_reset(struct pci_dev *dev, int probe) 3219 { 3220 u16 ctrl; 3221 struct pci_dev *pdev; 3222 3223 if (pci_is_root_bus(dev->bus) || dev->subordinate || !dev->bus->self) 3224 return -ENOTTY; 3225 3226 list_for_each_entry(pdev, &dev->bus->devices, bus_list) 3227 if (pdev != dev) 3228 return -ENOTTY; 3229 3230 if (probe) 3231 return 0; 3232 3233 pci_read_config_word(dev->bus->self, PCI_BRIDGE_CONTROL, &ctrl); 3234 ctrl |= PCI_BRIDGE_CTL_BUS_RESET; 3235 pci_write_config_word(dev->bus->self, PCI_BRIDGE_CONTROL, ctrl); 3236 msleep(100); 3237 3238 ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET; 3239 pci_write_config_word(dev->bus->self, PCI_BRIDGE_CONTROL, ctrl); 3240 msleep(100); 3241 3242 return 0; 3243 } 3244 3245 static int __pci_dev_reset(struct pci_dev *dev, int probe) 3246 { 3247 int rc; 3248 3249 might_sleep(); 3250 3251 rc = pci_dev_specific_reset(dev, probe); 3252 if (rc != -ENOTTY) 3253 goto done; 3254 3255 rc = pcie_flr(dev, probe); 3256 if (rc != -ENOTTY) 3257 goto done; 3258 3259 rc = pci_af_flr(dev, probe); 3260 if (rc != -ENOTTY) 3261 goto done; 3262 3263 rc = pci_pm_reset(dev, probe); 3264 if (rc != -ENOTTY) 3265 goto done; 3266 3267 rc = pci_parent_bus_reset(dev, probe); 3268 done: 3269 return rc; 3270 } 3271 3272 static int pci_dev_reset(struct pci_dev *dev, int probe) 3273 { 3274 int rc; 3275 3276 if (!probe) { 3277 pci_cfg_access_lock(dev); 3278 /* block PM suspend, driver probe, etc. */ 3279 device_lock(&dev->dev); 3280 } 3281 3282 rc = __pci_dev_reset(dev, probe); 3283 3284 if (!probe) { 3285 device_unlock(&dev->dev); 3286 pci_cfg_access_unlock(dev); 3287 } 3288 return rc; 3289 } 3290 /** 3291 * __pci_reset_function - reset a PCI device function 3292 * @dev: PCI device to reset 3293 * 3294 * Some devices allow an individual function to be reset without affecting 3295 * other functions in the same device. The PCI device must be responsive 3296 * to PCI config space in order to use this function. 3297 * 3298 * The device function is presumed to be unused when this function is called. 3299 * Resetting the device will make the contents of PCI configuration space 3300 * random, so any caller of this must be prepared to reinitialise the 3301 * device including MSI, bus mastering, BARs, decoding IO and memory spaces, 3302 * etc. 3303 * 3304 * Returns 0 if the device function was successfully reset or negative if the 3305 * device doesn't support resetting a single function. 3306 */ 3307 int __pci_reset_function(struct pci_dev *dev) 3308 { 3309 return pci_dev_reset(dev, 0); 3310 } 3311 EXPORT_SYMBOL_GPL(__pci_reset_function); 3312 3313 /** 3314 * __pci_reset_function_locked - reset a PCI device function while holding 3315 * the @dev mutex lock. 3316 * @dev: PCI device to reset 3317 * 3318 * Some devices allow an individual function to be reset without affecting 3319 * other functions in the same device. The PCI device must be responsive 3320 * to PCI config space in order to use this function. 3321 * 3322 * The device function is presumed to be unused and the caller is holding 3323 * the device mutex lock when this function is called. 3324 * Resetting the device will make the contents of PCI configuration space 3325 * random, so any caller of this must be prepared to reinitialise the 3326 * device including MSI, bus mastering, BARs, decoding IO and memory spaces, 3327 * etc. 3328 * 3329 * Returns 0 if the device function was successfully reset or negative if the 3330 * device doesn't support resetting a single function. 3331 */ 3332 int __pci_reset_function_locked(struct pci_dev *dev) 3333 { 3334 return __pci_dev_reset(dev, 0); 3335 } 3336 EXPORT_SYMBOL_GPL(__pci_reset_function_locked); 3337 3338 /** 3339 * pci_probe_reset_function - check whether the device can be safely reset 3340 * @dev: PCI device to reset 3341 * 3342 * Some devices allow an individual function to be reset without affecting 3343 * other functions in the same device. The PCI device must be responsive 3344 * to PCI config space in order to use this function. 3345 * 3346 * Returns 0 if the device function can be reset or negative if the 3347 * device doesn't support resetting a single function. 3348 */ 3349 int pci_probe_reset_function(struct pci_dev *dev) 3350 { 3351 return pci_dev_reset(dev, 1); 3352 } 3353 3354 /** 3355 * pci_reset_function - quiesce and reset a PCI device function 3356 * @dev: PCI device to reset 3357 * 3358 * Some devices allow an individual function to be reset without affecting 3359 * other functions in the same device. The PCI device must be responsive 3360 * to PCI config space in order to use this function. 3361 * 3362 * This function does not just reset the PCI portion of a device, but 3363 * clears all the state associated with the device. This function differs 3364 * from __pci_reset_function in that it saves and restores device state 3365 * over the reset. 3366 * 3367 * Returns 0 if the device function was successfully reset or negative if the 3368 * device doesn't support resetting a single function. 3369 */ 3370 int pci_reset_function(struct pci_dev *dev) 3371 { 3372 int rc; 3373 3374 rc = pci_dev_reset(dev, 1); 3375 if (rc) 3376 return rc; 3377 3378 pci_save_state(dev); 3379 3380 /* 3381 * both INTx and MSI are disabled after the Interrupt Disable bit 3382 * is set and the Bus Master bit is cleared. 3383 */ 3384 pci_write_config_word(dev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE); 3385 3386 rc = pci_dev_reset(dev, 0); 3387 3388 pci_restore_state(dev); 3389 3390 return rc; 3391 } 3392 EXPORT_SYMBOL_GPL(pci_reset_function); 3393 3394 /** 3395 * pcix_get_max_mmrbc - get PCI-X maximum designed memory read byte count 3396 * @dev: PCI device to query 3397 * 3398 * Returns mmrbc: maximum designed memory read count in bytes 3399 * or appropriate error value. 3400 */ 3401 int pcix_get_max_mmrbc(struct pci_dev *dev) 3402 { 3403 int cap; 3404 u32 stat; 3405 3406 cap = pci_find_capability(dev, PCI_CAP_ID_PCIX); 3407 if (!cap) 3408 return -EINVAL; 3409 3410 if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat)) 3411 return -EINVAL; 3412 3413 return 512 << ((stat & PCI_X_STATUS_MAX_READ) >> 21); 3414 } 3415 EXPORT_SYMBOL(pcix_get_max_mmrbc); 3416 3417 /** 3418 * pcix_get_mmrbc - get PCI-X maximum memory read byte count 3419 * @dev: PCI device to query 3420 * 3421 * Returns mmrbc: maximum memory read count in bytes 3422 * or appropriate error value. 3423 */ 3424 int pcix_get_mmrbc(struct pci_dev *dev) 3425 { 3426 int cap; 3427 u16 cmd; 3428 3429 cap = pci_find_capability(dev, PCI_CAP_ID_PCIX); 3430 if (!cap) 3431 return -EINVAL; 3432 3433 if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd)) 3434 return -EINVAL; 3435 3436 return 512 << ((cmd & PCI_X_CMD_MAX_READ) >> 2); 3437 } 3438 EXPORT_SYMBOL(pcix_get_mmrbc); 3439 3440 /** 3441 * pcix_set_mmrbc - set PCI-X maximum memory read byte count 3442 * @dev: PCI device to query 3443 * @mmrbc: maximum memory read count in bytes 3444 * valid values are 512, 1024, 2048, 4096 3445 * 3446 * If possible sets maximum memory read byte count, some bridges have erratas 3447 * that prevent this. 3448 */ 3449 int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc) 3450 { 3451 int cap; 3452 u32 stat, v, o; 3453 u16 cmd; 3454 3455 if (mmrbc < 512 || mmrbc > 4096 || !is_power_of_2(mmrbc)) 3456 return -EINVAL; 3457 3458 v = ffs(mmrbc) - 10; 3459 3460 cap = pci_find_capability(dev, PCI_CAP_ID_PCIX); 3461 if (!cap) 3462 return -EINVAL; 3463 3464 if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat)) 3465 return -EINVAL; 3466 3467 if (v > (stat & PCI_X_STATUS_MAX_READ) >> 21) 3468 return -E2BIG; 3469 3470 if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd)) 3471 return -EINVAL; 3472 3473 o = (cmd & PCI_X_CMD_MAX_READ) >> 2; 3474 if (o != v) { 3475 if (v > o && (dev->bus->bus_flags & PCI_BUS_FLAGS_NO_MMRBC)) 3476 return -EIO; 3477 3478 cmd &= ~PCI_X_CMD_MAX_READ; 3479 cmd |= v << 2; 3480 if (pci_write_config_word(dev, cap + PCI_X_CMD, cmd)) 3481 return -EIO; 3482 } 3483 return 0; 3484 } 3485 EXPORT_SYMBOL(pcix_set_mmrbc); 3486 3487 /** 3488 * pcie_get_readrq - get PCI Express read request size 3489 * @dev: PCI device to query 3490 * 3491 * Returns maximum memory read request in bytes 3492 * or appropriate error value. 3493 */ 3494 int pcie_get_readrq(struct pci_dev *dev) 3495 { 3496 u16 ctl; 3497 3498 pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &ctl); 3499 3500 return 128 << ((ctl & PCI_EXP_DEVCTL_READRQ) >> 12); 3501 } 3502 EXPORT_SYMBOL(pcie_get_readrq); 3503 3504 /** 3505 * pcie_set_readrq - set PCI Express maximum memory read request 3506 * @dev: PCI device to query 3507 * @rq: maximum memory read count in bytes 3508 * valid values are 128, 256, 512, 1024, 2048, 4096 3509 * 3510 * If possible sets maximum memory read request in bytes 3511 */ 3512 int pcie_set_readrq(struct pci_dev *dev, int rq) 3513 { 3514 u16 v; 3515 3516 if (rq < 128 || rq > 4096 || !is_power_of_2(rq)) 3517 return -EINVAL; 3518 3519 /* 3520 * If using the "performance" PCIe config, we clamp the 3521 * read rq size to the max packet size to prevent the 3522 * host bridge generating requests larger than we can 3523 * cope with 3524 */ 3525 if (pcie_bus_config == PCIE_BUS_PERFORMANCE) { 3526 int mps = pcie_get_mps(dev); 3527 3528 if (mps < 0) 3529 return mps; 3530 if (mps < rq) 3531 rq = mps; 3532 } 3533 3534 v = (ffs(rq) - 8) << 12; 3535 3536 return pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL, 3537 PCI_EXP_DEVCTL_READRQ, v); 3538 } 3539 EXPORT_SYMBOL(pcie_set_readrq); 3540 3541 /** 3542 * pcie_get_mps - get PCI Express maximum payload size 3543 * @dev: PCI device to query 3544 * 3545 * Returns maximum payload size in bytes 3546 * or appropriate error value. 3547 */ 3548 int pcie_get_mps(struct pci_dev *dev) 3549 { 3550 u16 ctl; 3551 3552 pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &ctl); 3553 3554 return 128 << ((ctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5); 3555 } 3556 3557 /** 3558 * pcie_set_mps - set PCI Express maximum payload size 3559 * @dev: PCI device to query 3560 * @mps: maximum payload size in bytes 3561 * valid values are 128, 256, 512, 1024, 2048, 4096 3562 * 3563 * If possible sets maximum payload size 3564 */ 3565 int pcie_set_mps(struct pci_dev *dev, int mps) 3566 { 3567 u16 v; 3568 3569 if (mps < 128 || mps > 4096 || !is_power_of_2(mps)) 3570 return -EINVAL; 3571 3572 v = ffs(mps) - 8; 3573 if (v > dev->pcie_mpss) 3574 return -EINVAL; 3575 v <<= 5; 3576 3577 return pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL, 3578 PCI_EXP_DEVCTL_PAYLOAD, v); 3579 } 3580 3581 /** 3582 * pci_select_bars - Make BAR mask from the type of resource 3583 * @dev: the PCI device for which BAR mask is made 3584 * @flags: resource type mask to be selected 3585 * 3586 * This helper routine makes bar mask from the type of resource. 3587 */ 3588 int pci_select_bars(struct pci_dev *dev, unsigned long flags) 3589 { 3590 int i, bars = 0; 3591 for (i = 0; i < PCI_NUM_RESOURCES; i++) 3592 if (pci_resource_flags(dev, i) & flags) 3593 bars |= (1 << i); 3594 return bars; 3595 } 3596 3597 /** 3598 * pci_resource_bar - get position of the BAR associated with a resource 3599 * @dev: the PCI device 3600 * @resno: the resource number 3601 * @type: the BAR type to be filled in 3602 * 3603 * Returns BAR position in config space, or 0 if the BAR is invalid. 3604 */ 3605 int pci_resource_bar(struct pci_dev *dev, int resno, enum pci_bar_type *type) 3606 { 3607 int reg; 3608 3609 if (resno < PCI_ROM_RESOURCE) { 3610 *type = pci_bar_unknown; 3611 return PCI_BASE_ADDRESS_0 + 4 * resno; 3612 } else if (resno == PCI_ROM_RESOURCE) { 3613 *type = pci_bar_mem32; 3614 return dev->rom_base_reg; 3615 } else if (resno < PCI_BRIDGE_RESOURCES) { 3616 /* device specific resource */ 3617 reg = pci_iov_resource_bar(dev, resno, type); 3618 if (reg) 3619 return reg; 3620 } 3621 3622 dev_err(&dev->dev, "BAR %d: invalid resource\n", resno); 3623 return 0; 3624 } 3625 3626 /* Some architectures require additional programming to enable VGA */ 3627 static arch_set_vga_state_t arch_set_vga_state; 3628 3629 void __init pci_register_set_vga_state(arch_set_vga_state_t func) 3630 { 3631 arch_set_vga_state = func; /* NULL disables */ 3632 } 3633 3634 static int pci_set_vga_state_arch(struct pci_dev *dev, bool decode, 3635 unsigned int command_bits, u32 flags) 3636 { 3637 if (arch_set_vga_state) 3638 return arch_set_vga_state(dev, decode, command_bits, 3639 flags); 3640 return 0; 3641 } 3642 3643 /** 3644 * pci_set_vga_state - set VGA decode state on device and parents if requested 3645 * @dev: the PCI device 3646 * @decode: true = enable decoding, false = disable decoding 3647 * @command_bits: PCI_COMMAND_IO and/or PCI_COMMAND_MEMORY 3648 * @flags: traverse ancestors and change bridges 3649 * CHANGE_BRIDGE_ONLY / CHANGE_BRIDGE 3650 */ 3651 int pci_set_vga_state(struct pci_dev *dev, bool decode, 3652 unsigned int command_bits, u32 flags) 3653 { 3654 struct pci_bus *bus; 3655 struct pci_dev *bridge; 3656 u16 cmd; 3657 int rc; 3658 3659 WARN_ON((flags & PCI_VGA_STATE_CHANGE_DECODES) & (command_bits & ~(PCI_COMMAND_IO|PCI_COMMAND_MEMORY))); 3660 3661 /* ARCH specific VGA enables */ 3662 rc = pci_set_vga_state_arch(dev, decode, command_bits, flags); 3663 if (rc) 3664 return rc; 3665 3666 if (flags & PCI_VGA_STATE_CHANGE_DECODES) { 3667 pci_read_config_word(dev, PCI_COMMAND, &cmd); 3668 if (decode == true) 3669 cmd |= command_bits; 3670 else 3671 cmd &= ~command_bits; 3672 pci_write_config_word(dev, PCI_COMMAND, cmd); 3673 } 3674 3675 if (!(flags & PCI_VGA_STATE_CHANGE_BRIDGE)) 3676 return 0; 3677 3678 bus = dev->bus; 3679 while (bus) { 3680 bridge = bus->self; 3681 if (bridge) { 3682 pci_read_config_word(bridge, PCI_BRIDGE_CONTROL, 3683 &cmd); 3684 if (decode == true) 3685 cmd |= PCI_BRIDGE_CTL_VGA; 3686 else 3687 cmd &= ~PCI_BRIDGE_CTL_VGA; 3688 pci_write_config_word(bridge, PCI_BRIDGE_CONTROL, 3689 cmd); 3690 } 3691 bus = bus->parent; 3692 } 3693 return 0; 3694 } 3695 3696 #define RESOURCE_ALIGNMENT_PARAM_SIZE COMMAND_LINE_SIZE 3697 static char resource_alignment_param[RESOURCE_ALIGNMENT_PARAM_SIZE] = {0}; 3698 static DEFINE_SPINLOCK(resource_alignment_lock); 3699 3700 /** 3701 * pci_specified_resource_alignment - get resource alignment specified by user. 3702 * @dev: the PCI device to get 3703 * 3704 * RETURNS: Resource alignment if it is specified. 3705 * Zero if it is not specified. 3706 */ 3707 static resource_size_t pci_specified_resource_alignment(struct pci_dev *dev) 3708 { 3709 int seg, bus, slot, func, align_order, count; 3710 resource_size_t align = 0; 3711 char *p; 3712 3713 spin_lock(&resource_alignment_lock); 3714 p = resource_alignment_param; 3715 while (*p) { 3716 count = 0; 3717 if (sscanf(p, "%d%n", &align_order, &count) == 1 && 3718 p[count] == '@') { 3719 p += count + 1; 3720 } else { 3721 align_order = -1; 3722 } 3723 if (sscanf(p, "%x:%x:%x.%x%n", 3724 &seg, &bus, &slot, &func, &count) != 4) { 3725 seg = 0; 3726 if (sscanf(p, "%x:%x.%x%n", 3727 &bus, &slot, &func, &count) != 3) { 3728 /* Invalid format */ 3729 printk(KERN_ERR "PCI: Can't parse resource_alignment parameter: %s\n", 3730 p); 3731 break; 3732 } 3733 } 3734 p += count; 3735 if (seg == pci_domain_nr(dev->bus) && 3736 bus == dev->bus->number && 3737 slot == PCI_SLOT(dev->devfn) && 3738 func == PCI_FUNC(dev->devfn)) { 3739 if (align_order == -1) { 3740 align = PAGE_SIZE; 3741 } else { 3742 align = 1 << align_order; 3743 } 3744 /* Found */ 3745 break; 3746 } 3747 if (*p != ';' && *p != ',') { 3748 /* End of param or invalid format */ 3749 break; 3750 } 3751 p++; 3752 } 3753 spin_unlock(&resource_alignment_lock); 3754 return align; 3755 } 3756 3757 /* 3758 * This function disables memory decoding and releases memory resources 3759 * of the device specified by kernel's boot parameter 'pci=resource_alignment='. 3760 * It also rounds up size to specified alignment. 3761 * Later on, the kernel will assign page-aligned memory resource back 3762 * to the device. 3763 */ 3764 void pci_reassigndev_resource_alignment(struct pci_dev *dev) 3765 { 3766 int i; 3767 struct resource *r; 3768 resource_size_t align, size; 3769 u16 command; 3770 3771 /* check if specified PCI is target device to reassign */ 3772 align = pci_specified_resource_alignment(dev); 3773 if (!align) 3774 return; 3775 3776 if (dev->hdr_type == PCI_HEADER_TYPE_NORMAL && 3777 (dev->class >> 8) == PCI_CLASS_BRIDGE_HOST) { 3778 dev_warn(&dev->dev, 3779 "Can't reassign resources to host bridge.\n"); 3780 return; 3781 } 3782 3783 dev_info(&dev->dev, 3784 "Disabling memory decoding and releasing memory resources.\n"); 3785 pci_read_config_word(dev, PCI_COMMAND, &command); 3786 command &= ~PCI_COMMAND_MEMORY; 3787 pci_write_config_word(dev, PCI_COMMAND, command); 3788 3789 for (i = 0; i < PCI_BRIDGE_RESOURCES; i++) { 3790 r = &dev->resource[i]; 3791 if (!(r->flags & IORESOURCE_MEM)) 3792 continue; 3793 size = resource_size(r); 3794 if (size < align) { 3795 size = align; 3796 dev_info(&dev->dev, 3797 "Rounding up size of resource #%d to %#llx.\n", 3798 i, (unsigned long long)size); 3799 } 3800 r->end = size - 1; 3801 r->start = 0; 3802 } 3803 /* Need to disable bridge's resource window, 3804 * to enable the kernel to reassign new resource 3805 * window later on. 3806 */ 3807 if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE && 3808 (dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) { 3809 for (i = PCI_BRIDGE_RESOURCES; i < PCI_NUM_RESOURCES; i++) { 3810 r = &dev->resource[i]; 3811 if (!(r->flags & IORESOURCE_MEM)) 3812 continue; 3813 r->end = resource_size(r) - 1; 3814 r->start = 0; 3815 } 3816 pci_disable_bridge_window(dev); 3817 } 3818 } 3819 3820 static ssize_t pci_set_resource_alignment_param(const char *buf, size_t count) 3821 { 3822 if (count > RESOURCE_ALIGNMENT_PARAM_SIZE - 1) 3823 count = RESOURCE_ALIGNMENT_PARAM_SIZE - 1; 3824 spin_lock(&resource_alignment_lock); 3825 strncpy(resource_alignment_param, buf, count); 3826 resource_alignment_param[count] = '\0'; 3827 spin_unlock(&resource_alignment_lock); 3828 return count; 3829 } 3830 3831 static ssize_t pci_get_resource_alignment_param(char *buf, size_t size) 3832 { 3833 size_t count; 3834 spin_lock(&resource_alignment_lock); 3835 count = snprintf(buf, size, "%s", resource_alignment_param); 3836 spin_unlock(&resource_alignment_lock); 3837 return count; 3838 } 3839 3840 static ssize_t pci_resource_alignment_show(struct bus_type *bus, char *buf) 3841 { 3842 return pci_get_resource_alignment_param(buf, PAGE_SIZE); 3843 } 3844 3845 static ssize_t pci_resource_alignment_store(struct bus_type *bus, 3846 const char *buf, size_t count) 3847 { 3848 return pci_set_resource_alignment_param(buf, count); 3849 } 3850 3851 BUS_ATTR(resource_alignment, 0644, pci_resource_alignment_show, 3852 pci_resource_alignment_store); 3853 3854 static int __init pci_resource_alignment_sysfs_init(void) 3855 { 3856 return bus_create_file(&pci_bus_type, 3857 &bus_attr_resource_alignment); 3858 } 3859 3860 late_initcall(pci_resource_alignment_sysfs_init); 3861 3862 static void pci_no_domains(void) 3863 { 3864 #ifdef CONFIG_PCI_DOMAINS 3865 pci_domains_supported = 0; 3866 #endif 3867 } 3868 3869 /** 3870 * pci_ext_cfg_avail - can we access extended PCI config space? 3871 * 3872 * Returns 1 if we can access PCI extended config space (offsets 3873 * greater than 0xff). This is the default implementation. Architecture 3874 * implementations can override this. 3875 */ 3876 int __weak pci_ext_cfg_avail(void) 3877 { 3878 return 1; 3879 } 3880 3881 void __weak pci_fixup_cardbus(struct pci_bus *bus) 3882 { 3883 } 3884 EXPORT_SYMBOL(pci_fixup_cardbus); 3885 3886 static int __init pci_setup(char *str) 3887 { 3888 while (str) { 3889 char *k = strchr(str, ','); 3890 if (k) 3891 *k++ = 0; 3892 if (*str && (str = pcibios_setup(str)) && *str) { 3893 if (!strcmp(str, "nomsi")) { 3894 pci_no_msi(); 3895 } else if (!strcmp(str, "noaer")) { 3896 pci_no_aer(); 3897 } else if (!strncmp(str, "realloc=", 8)) { 3898 pci_realloc_get_opt(str + 8); 3899 } else if (!strncmp(str, "realloc", 7)) { 3900 pci_realloc_get_opt("on"); 3901 } else if (!strcmp(str, "nodomains")) { 3902 pci_no_domains(); 3903 } else if (!strncmp(str, "noari", 5)) { 3904 pcie_ari_disabled = true; 3905 } else if (!strncmp(str, "cbiosize=", 9)) { 3906 pci_cardbus_io_size = memparse(str + 9, &str); 3907 } else if (!strncmp(str, "cbmemsize=", 10)) { 3908 pci_cardbus_mem_size = memparse(str + 10, &str); 3909 } else if (!strncmp(str, "resource_alignment=", 19)) { 3910 pci_set_resource_alignment_param(str + 19, 3911 strlen(str + 19)); 3912 } else if (!strncmp(str, "ecrc=", 5)) { 3913 pcie_ecrc_get_policy(str + 5); 3914 } else if (!strncmp(str, "hpiosize=", 9)) { 3915 pci_hotplug_io_size = memparse(str + 9, &str); 3916 } else if (!strncmp(str, "hpmemsize=", 10)) { 3917 pci_hotplug_mem_size = memparse(str + 10, &str); 3918 } else if (!strncmp(str, "pcie_bus_tune_off", 17)) { 3919 pcie_bus_config = PCIE_BUS_TUNE_OFF; 3920 } else if (!strncmp(str, "pcie_bus_safe", 13)) { 3921 pcie_bus_config = PCIE_BUS_SAFE; 3922 } else if (!strncmp(str, "pcie_bus_perf", 13)) { 3923 pcie_bus_config = PCIE_BUS_PERFORMANCE; 3924 } else if (!strncmp(str, "pcie_bus_peer2peer", 18)) { 3925 pcie_bus_config = PCIE_BUS_PEER2PEER; 3926 } else if (!strncmp(str, "pcie_scan_all", 13)) { 3927 pci_add_flags(PCI_SCAN_ALL_PCIE_DEVS); 3928 } else { 3929 printk(KERN_ERR "PCI: Unknown option `%s'\n", 3930 str); 3931 } 3932 } 3933 str = k; 3934 } 3935 return 0; 3936 } 3937 early_param("pci", pci_setup); 3938 3939 EXPORT_SYMBOL(pci_reenable_device); 3940 EXPORT_SYMBOL(pci_enable_device_io); 3941 EXPORT_SYMBOL(pci_enable_device_mem); 3942 EXPORT_SYMBOL(pci_enable_device); 3943 EXPORT_SYMBOL(pcim_enable_device); 3944 EXPORT_SYMBOL(pcim_pin_device); 3945 EXPORT_SYMBOL(pci_disable_device); 3946 EXPORT_SYMBOL(pci_find_capability); 3947 EXPORT_SYMBOL(pci_bus_find_capability); 3948 EXPORT_SYMBOL(pci_release_regions); 3949 EXPORT_SYMBOL(pci_request_regions); 3950 EXPORT_SYMBOL(pci_request_regions_exclusive); 3951 EXPORT_SYMBOL(pci_release_region); 3952 EXPORT_SYMBOL(pci_request_region); 3953 EXPORT_SYMBOL(pci_request_region_exclusive); 3954 EXPORT_SYMBOL(pci_release_selected_regions); 3955 EXPORT_SYMBOL(pci_request_selected_regions); 3956 EXPORT_SYMBOL(pci_request_selected_regions_exclusive); 3957 EXPORT_SYMBOL(pci_set_master); 3958 EXPORT_SYMBOL(pci_clear_master); 3959 EXPORT_SYMBOL(pci_set_mwi); 3960 EXPORT_SYMBOL(pci_try_set_mwi); 3961 EXPORT_SYMBOL(pci_clear_mwi); 3962 EXPORT_SYMBOL_GPL(pci_intx); 3963 EXPORT_SYMBOL(pci_assign_resource); 3964 EXPORT_SYMBOL(pci_find_parent_resource); 3965 EXPORT_SYMBOL(pci_select_bars); 3966 3967 EXPORT_SYMBOL(pci_set_power_state); 3968 EXPORT_SYMBOL(pci_save_state); 3969 EXPORT_SYMBOL(pci_restore_state); 3970 EXPORT_SYMBOL(pci_pme_capable); 3971 EXPORT_SYMBOL(pci_pme_active); 3972 EXPORT_SYMBOL(pci_wake_from_d3); 3973 EXPORT_SYMBOL(pci_target_state); 3974 EXPORT_SYMBOL(pci_prepare_to_sleep); 3975 EXPORT_SYMBOL(pci_back_from_sleep); 3976 EXPORT_SYMBOL_GPL(pci_set_pcie_reset_state); 3977