1 /* 2 * PCI Bus Services, see include/linux/pci.h for further explanation. 3 * 4 * Copyright 1993 -- 1997 Drew Eckhardt, Frederic Potter, 5 * David Mosberger-Tang 6 * 7 * Copyright 1997 -- 2000 Martin Mares <mj@ucw.cz> 8 */ 9 10 #include <linux/kernel.h> 11 #include <linux/delay.h> 12 #include <linux/init.h> 13 #include <linux/pci.h> 14 #include <linux/pm.h> 15 #include <linux/module.h> 16 #include <linux/spinlock.h> 17 #include <linux/string.h> 18 #include <linux/log2.h> 19 #include <linux/pci-aspm.h> 20 #include <linux/pm_wakeup.h> 21 #include <linux/interrupt.h> 22 #include <asm/dma.h> /* isa_dma_bridge_buggy */ 23 #include <linux/device.h> 24 #include <asm/setup.h> 25 #include "pci.h" 26 27 const char *pci_power_names[] = { 28 "error", "D0", "D1", "D2", "D3hot", "D3cold", "unknown", 29 }; 30 EXPORT_SYMBOL_GPL(pci_power_names); 31 32 unsigned int pci_pm_d3_delay = PCI_PM_D3_WAIT; 33 34 #ifdef CONFIG_PCI_DOMAINS 35 int pci_domains_supported = 1; 36 #endif 37 38 #define DEFAULT_CARDBUS_IO_SIZE (256) 39 #define DEFAULT_CARDBUS_MEM_SIZE (64*1024*1024) 40 /* pci=cbmemsize=nnM,cbiosize=nn can override this */ 41 unsigned long pci_cardbus_io_size = DEFAULT_CARDBUS_IO_SIZE; 42 unsigned long pci_cardbus_mem_size = DEFAULT_CARDBUS_MEM_SIZE; 43 44 #define DEFAULT_HOTPLUG_IO_SIZE (256) 45 #define DEFAULT_HOTPLUG_MEM_SIZE (2*1024*1024) 46 /* pci=hpmemsize=nnM,hpiosize=nn can override this */ 47 unsigned long pci_hotplug_io_size = DEFAULT_HOTPLUG_IO_SIZE; 48 unsigned long pci_hotplug_mem_size = DEFAULT_HOTPLUG_MEM_SIZE; 49 50 /** 51 * pci_bus_max_busnr - returns maximum PCI bus number of given bus' children 52 * @bus: pointer to PCI bus structure to search 53 * 54 * Given a PCI bus, returns the highest PCI bus number present in the set 55 * including the given PCI bus and its list of child PCI buses. 56 */ 57 unsigned char pci_bus_max_busnr(struct pci_bus* bus) 58 { 59 struct list_head *tmp; 60 unsigned char max, n; 61 62 max = bus->subordinate; 63 list_for_each(tmp, &bus->children) { 64 n = pci_bus_max_busnr(pci_bus_b(tmp)); 65 if(n > max) 66 max = n; 67 } 68 return max; 69 } 70 EXPORT_SYMBOL_GPL(pci_bus_max_busnr); 71 72 #ifdef CONFIG_HAS_IOMEM 73 void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar) 74 { 75 /* 76 * Make sure the BAR is actually a memory resource, not an IO resource 77 */ 78 if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM)) { 79 WARN_ON(1); 80 return NULL; 81 } 82 return ioremap_nocache(pci_resource_start(pdev, bar), 83 pci_resource_len(pdev, bar)); 84 } 85 EXPORT_SYMBOL_GPL(pci_ioremap_bar); 86 #endif 87 88 #if 0 89 /** 90 * pci_max_busnr - returns maximum PCI bus number 91 * 92 * Returns the highest PCI bus number present in the system global list of 93 * PCI buses. 94 */ 95 unsigned char __devinit 96 pci_max_busnr(void) 97 { 98 struct pci_bus *bus = NULL; 99 unsigned char max, n; 100 101 max = 0; 102 while ((bus = pci_find_next_bus(bus)) != NULL) { 103 n = pci_bus_max_busnr(bus); 104 if(n > max) 105 max = n; 106 } 107 return max; 108 } 109 110 #endif /* 0 */ 111 112 #define PCI_FIND_CAP_TTL 48 113 114 static int __pci_find_next_cap_ttl(struct pci_bus *bus, unsigned int devfn, 115 u8 pos, int cap, int *ttl) 116 { 117 u8 id; 118 119 while ((*ttl)--) { 120 pci_bus_read_config_byte(bus, devfn, pos, &pos); 121 if (pos < 0x40) 122 break; 123 pos &= ~3; 124 pci_bus_read_config_byte(bus, devfn, pos + PCI_CAP_LIST_ID, 125 &id); 126 if (id == 0xff) 127 break; 128 if (id == cap) 129 return pos; 130 pos += PCI_CAP_LIST_NEXT; 131 } 132 return 0; 133 } 134 135 static int __pci_find_next_cap(struct pci_bus *bus, unsigned int devfn, 136 u8 pos, int cap) 137 { 138 int ttl = PCI_FIND_CAP_TTL; 139 140 return __pci_find_next_cap_ttl(bus, devfn, pos, cap, &ttl); 141 } 142 143 int pci_find_next_capability(struct pci_dev *dev, u8 pos, int cap) 144 { 145 return __pci_find_next_cap(dev->bus, dev->devfn, 146 pos + PCI_CAP_LIST_NEXT, cap); 147 } 148 EXPORT_SYMBOL_GPL(pci_find_next_capability); 149 150 static int __pci_bus_find_cap_start(struct pci_bus *bus, 151 unsigned int devfn, u8 hdr_type) 152 { 153 u16 status; 154 155 pci_bus_read_config_word(bus, devfn, PCI_STATUS, &status); 156 if (!(status & PCI_STATUS_CAP_LIST)) 157 return 0; 158 159 switch (hdr_type) { 160 case PCI_HEADER_TYPE_NORMAL: 161 case PCI_HEADER_TYPE_BRIDGE: 162 return PCI_CAPABILITY_LIST; 163 case PCI_HEADER_TYPE_CARDBUS: 164 return PCI_CB_CAPABILITY_LIST; 165 default: 166 return 0; 167 } 168 169 return 0; 170 } 171 172 /** 173 * pci_find_capability - query for devices' capabilities 174 * @dev: PCI device to query 175 * @cap: capability code 176 * 177 * Tell if a device supports a given PCI capability. 178 * Returns the address of the requested capability structure within the 179 * device's PCI configuration space or 0 in case the device does not 180 * support it. Possible values for @cap: 181 * 182 * %PCI_CAP_ID_PM Power Management 183 * %PCI_CAP_ID_AGP Accelerated Graphics Port 184 * %PCI_CAP_ID_VPD Vital Product Data 185 * %PCI_CAP_ID_SLOTID Slot Identification 186 * %PCI_CAP_ID_MSI Message Signalled Interrupts 187 * %PCI_CAP_ID_CHSWP CompactPCI HotSwap 188 * %PCI_CAP_ID_PCIX PCI-X 189 * %PCI_CAP_ID_EXP PCI Express 190 */ 191 int pci_find_capability(struct pci_dev *dev, int cap) 192 { 193 int pos; 194 195 pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type); 196 if (pos) 197 pos = __pci_find_next_cap(dev->bus, dev->devfn, pos, cap); 198 199 return pos; 200 } 201 202 /** 203 * pci_bus_find_capability - query for devices' capabilities 204 * @bus: the PCI bus to query 205 * @devfn: PCI device to query 206 * @cap: capability code 207 * 208 * Like pci_find_capability() but works for pci devices that do not have a 209 * pci_dev structure set up yet. 210 * 211 * Returns the address of the requested capability structure within the 212 * device's PCI configuration space or 0 in case the device does not 213 * support it. 214 */ 215 int pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap) 216 { 217 int pos; 218 u8 hdr_type; 219 220 pci_bus_read_config_byte(bus, devfn, PCI_HEADER_TYPE, &hdr_type); 221 222 pos = __pci_bus_find_cap_start(bus, devfn, hdr_type & 0x7f); 223 if (pos) 224 pos = __pci_find_next_cap(bus, devfn, pos, cap); 225 226 return pos; 227 } 228 229 /** 230 * pci_find_ext_capability - Find an extended capability 231 * @dev: PCI device to query 232 * @cap: capability code 233 * 234 * Returns the address of the requested extended capability structure 235 * within the device's PCI configuration space or 0 if the device does 236 * not support it. Possible values for @cap: 237 * 238 * %PCI_EXT_CAP_ID_ERR Advanced Error Reporting 239 * %PCI_EXT_CAP_ID_VC Virtual Channel 240 * %PCI_EXT_CAP_ID_DSN Device Serial Number 241 * %PCI_EXT_CAP_ID_PWR Power Budgeting 242 */ 243 int pci_find_ext_capability(struct pci_dev *dev, int cap) 244 { 245 u32 header; 246 int ttl; 247 int pos = PCI_CFG_SPACE_SIZE; 248 249 /* minimum 8 bytes per capability */ 250 ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8; 251 252 if (dev->cfg_size <= PCI_CFG_SPACE_SIZE) 253 return 0; 254 255 if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL) 256 return 0; 257 258 /* 259 * If we have no capabilities, this is indicated by cap ID, 260 * cap version and next pointer all being 0. 261 */ 262 if (header == 0) 263 return 0; 264 265 while (ttl-- > 0) { 266 if (PCI_EXT_CAP_ID(header) == cap) 267 return pos; 268 269 pos = PCI_EXT_CAP_NEXT(header); 270 if (pos < PCI_CFG_SPACE_SIZE) 271 break; 272 273 if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL) 274 break; 275 } 276 277 return 0; 278 } 279 EXPORT_SYMBOL_GPL(pci_find_ext_capability); 280 281 static int __pci_find_next_ht_cap(struct pci_dev *dev, int pos, int ht_cap) 282 { 283 int rc, ttl = PCI_FIND_CAP_TTL; 284 u8 cap, mask; 285 286 if (ht_cap == HT_CAPTYPE_SLAVE || ht_cap == HT_CAPTYPE_HOST) 287 mask = HT_3BIT_CAP_MASK; 288 else 289 mask = HT_5BIT_CAP_MASK; 290 291 pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn, pos, 292 PCI_CAP_ID_HT, &ttl); 293 while (pos) { 294 rc = pci_read_config_byte(dev, pos + 3, &cap); 295 if (rc != PCIBIOS_SUCCESSFUL) 296 return 0; 297 298 if ((cap & mask) == ht_cap) 299 return pos; 300 301 pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn, 302 pos + PCI_CAP_LIST_NEXT, 303 PCI_CAP_ID_HT, &ttl); 304 } 305 306 return 0; 307 } 308 /** 309 * pci_find_next_ht_capability - query a device's Hypertransport capabilities 310 * @dev: PCI device to query 311 * @pos: Position from which to continue searching 312 * @ht_cap: Hypertransport capability code 313 * 314 * To be used in conjunction with pci_find_ht_capability() to search for 315 * all capabilities matching @ht_cap. @pos should always be a value returned 316 * from pci_find_ht_capability(). 317 * 318 * NB. To be 100% safe against broken PCI devices, the caller should take 319 * steps to avoid an infinite loop. 320 */ 321 int pci_find_next_ht_capability(struct pci_dev *dev, int pos, int ht_cap) 322 { 323 return __pci_find_next_ht_cap(dev, pos + PCI_CAP_LIST_NEXT, ht_cap); 324 } 325 EXPORT_SYMBOL_GPL(pci_find_next_ht_capability); 326 327 /** 328 * pci_find_ht_capability - query a device's Hypertransport capabilities 329 * @dev: PCI device to query 330 * @ht_cap: Hypertransport capability code 331 * 332 * Tell if a device supports a given Hypertransport capability. 333 * Returns an address within the device's PCI configuration space 334 * or 0 in case the device does not support the request capability. 335 * The address points to the PCI capability, of type PCI_CAP_ID_HT, 336 * which has a Hypertransport capability matching @ht_cap. 337 */ 338 int pci_find_ht_capability(struct pci_dev *dev, int ht_cap) 339 { 340 int pos; 341 342 pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type); 343 if (pos) 344 pos = __pci_find_next_ht_cap(dev, pos, ht_cap); 345 346 return pos; 347 } 348 EXPORT_SYMBOL_GPL(pci_find_ht_capability); 349 350 /** 351 * pci_find_parent_resource - return resource region of parent bus of given region 352 * @dev: PCI device structure contains resources to be searched 353 * @res: child resource record for which parent is sought 354 * 355 * For given resource region of given device, return the resource 356 * region of parent bus the given region is contained in or where 357 * it should be allocated from. 358 */ 359 struct resource * 360 pci_find_parent_resource(const struct pci_dev *dev, struct resource *res) 361 { 362 const struct pci_bus *bus = dev->bus; 363 int i; 364 struct resource *best = NULL; 365 366 for(i = 0; i < PCI_BUS_NUM_RESOURCES; i++) { 367 struct resource *r = bus->resource[i]; 368 if (!r) 369 continue; 370 if (res->start && !(res->start >= r->start && res->end <= r->end)) 371 continue; /* Not contained */ 372 if ((res->flags ^ r->flags) & (IORESOURCE_IO | IORESOURCE_MEM)) 373 continue; /* Wrong type */ 374 if (!((res->flags ^ r->flags) & IORESOURCE_PREFETCH)) 375 return r; /* Exact match */ 376 if ((res->flags & IORESOURCE_PREFETCH) && !(r->flags & IORESOURCE_PREFETCH)) 377 best = r; /* Approximating prefetchable by non-prefetchable */ 378 } 379 return best; 380 } 381 382 /** 383 * pci_restore_bars - restore a devices BAR values (e.g. after wake-up) 384 * @dev: PCI device to have its BARs restored 385 * 386 * Restore the BAR values for a given device, so as to make it 387 * accessible by its driver. 388 */ 389 static void 390 pci_restore_bars(struct pci_dev *dev) 391 { 392 int i; 393 394 for (i = 0; i < PCI_BRIDGE_RESOURCES; i++) 395 pci_update_resource(dev, i); 396 } 397 398 static struct pci_platform_pm_ops *pci_platform_pm; 399 400 int pci_set_platform_pm(struct pci_platform_pm_ops *ops) 401 { 402 if (!ops->is_manageable || !ops->set_state || !ops->choose_state 403 || !ops->sleep_wake || !ops->can_wakeup) 404 return -EINVAL; 405 pci_platform_pm = ops; 406 return 0; 407 } 408 409 static inline bool platform_pci_power_manageable(struct pci_dev *dev) 410 { 411 return pci_platform_pm ? pci_platform_pm->is_manageable(dev) : false; 412 } 413 414 static inline int platform_pci_set_power_state(struct pci_dev *dev, 415 pci_power_t t) 416 { 417 return pci_platform_pm ? pci_platform_pm->set_state(dev, t) : -ENOSYS; 418 } 419 420 static inline pci_power_t platform_pci_choose_state(struct pci_dev *dev) 421 { 422 return pci_platform_pm ? 423 pci_platform_pm->choose_state(dev) : PCI_POWER_ERROR; 424 } 425 426 static inline bool platform_pci_can_wakeup(struct pci_dev *dev) 427 { 428 return pci_platform_pm ? pci_platform_pm->can_wakeup(dev) : false; 429 } 430 431 static inline int platform_pci_sleep_wake(struct pci_dev *dev, bool enable) 432 { 433 return pci_platform_pm ? 434 pci_platform_pm->sleep_wake(dev, enable) : -ENODEV; 435 } 436 437 /** 438 * pci_raw_set_power_state - Use PCI PM registers to set the power state of 439 * given PCI device 440 * @dev: PCI device to handle. 441 * @state: PCI power state (D0, D1, D2, D3hot) to put the device into. 442 * 443 * RETURN VALUE: 444 * -EINVAL if the requested state is invalid. 445 * -EIO if device does not support PCI PM or its PM capabilities register has a 446 * wrong version, or device doesn't support the requested state. 447 * 0 if device already is in the requested state. 448 * 0 if device's power state has been successfully changed. 449 */ 450 static int pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state) 451 { 452 u16 pmcsr; 453 bool need_restore = false; 454 455 /* Check if we're already there */ 456 if (dev->current_state == state) 457 return 0; 458 459 if (!dev->pm_cap) 460 return -EIO; 461 462 if (state < PCI_D0 || state > PCI_D3hot) 463 return -EINVAL; 464 465 /* Validate current state: 466 * Can enter D0 from any state, but if we can only go deeper 467 * to sleep if we're already in a low power state 468 */ 469 if (state != PCI_D0 && dev->current_state <= PCI_D3cold 470 && dev->current_state > state) { 471 dev_err(&dev->dev, "invalid power transition " 472 "(from state %d to %d)\n", dev->current_state, state); 473 return -EINVAL; 474 } 475 476 /* check if this device supports the desired state */ 477 if ((state == PCI_D1 && !dev->d1_support) 478 || (state == PCI_D2 && !dev->d2_support)) 479 return -EIO; 480 481 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr); 482 483 /* If we're (effectively) in D3, force entire word to 0. 484 * This doesn't affect PME_Status, disables PME_En, and 485 * sets PowerState to 0. 486 */ 487 switch (dev->current_state) { 488 case PCI_D0: 489 case PCI_D1: 490 case PCI_D2: 491 pmcsr &= ~PCI_PM_CTRL_STATE_MASK; 492 pmcsr |= state; 493 break; 494 case PCI_D3hot: 495 case PCI_D3cold: 496 case PCI_UNKNOWN: /* Boot-up */ 497 if ((pmcsr & PCI_PM_CTRL_STATE_MASK) == PCI_D3hot 498 && !(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET)) 499 need_restore = true; 500 /* Fall-through: force to D0 */ 501 default: 502 pmcsr = 0; 503 break; 504 } 505 506 /* enter specified state */ 507 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr); 508 509 /* Mandatory power management transition delays */ 510 /* see PCI PM 1.1 5.6.1 table 18 */ 511 if (state == PCI_D3hot || dev->current_state == PCI_D3hot) 512 msleep(pci_pm_d3_delay); 513 else if (state == PCI_D2 || dev->current_state == PCI_D2) 514 udelay(PCI_PM_D2_DELAY); 515 516 dev->current_state = state; 517 518 /* According to section 5.4.1 of the "PCI BUS POWER MANAGEMENT 519 * INTERFACE SPECIFICATION, REV. 1.2", a device transitioning 520 * from D3hot to D0 _may_ perform an internal reset, thereby 521 * going to "D0 Uninitialized" rather than "D0 Initialized". 522 * For example, at least some versions of the 3c905B and the 523 * 3c556B exhibit this behaviour. 524 * 525 * At least some laptop BIOSen (e.g. the Thinkpad T21) leave 526 * devices in a D3hot state at boot. Consequently, we need to 527 * restore at least the BARs so that the device will be 528 * accessible to its driver. 529 */ 530 if (need_restore) 531 pci_restore_bars(dev); 532 533 if (dev->bus->self) 534 pcie_aspm_pm_state_change(dev->bus->self); 535 536 return 0; 537 } 538 539 /** 540 * pci_update_current_state - Read PCI power state of given device from its 541 * PCI PM registers and cache it 542 * @dev: PCI device to handle. 543 * @state: State to cache in case the device doesn't have the PM capability 544 */ 545 void pci_update_current_state(struct pci_dev *dev, pci_power_t state) 546 { 547 if (dev->pm_cap) { 548 u16 pmcsr; 549 550 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr); 551 dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK); 552 } else { 553 dev->current_state = state; 554 } 555 } 556 557 /** 558 * pci_platform_power_transition - Use platform to change device power state 559 * @dev: PCI device to handle. 560 * @state: State to put the device into. 561 */ 562 static int pci_platform_power_transition(struct pci_dev *dev, pci_power_t state) 563 { 564 int error; 565 566 if (platform_pci_power_manageable(dev)) { 567 error = platform_pci_set_power_state(dev, state); 568 if (!error) 569 pci_update_current_state(dev, state); 570 } else { 571 error = -ENODEV; 572 /* Fall back to PCI_D0 if native PM is not supported */ 573 if (!dev->pm_cap) 574 dev->current_state = PCI_D0; 575 } 576 577 return error; 578 } 579 580 /** 581 * __pci_start_power_transition - Start power transition of a PCI device 582 * @dev: PCI device to handle. 583 * @state: State to put the device into. 584 */ 585 static void __pci_start_power_transition(struct pci_dev *dev, pci_power_t state) 586 { 587 if (state == PCI_D0) 588 pci_platform_power_transition(dev, PCI_D0); 589 } 590 591 /** 592 * __pci_complete_power_transition - Complete power transition of a PCI device 593 * @dev: PCI device to handle. 594 * @state: State to put the device into. 595 * 596 * This function should not be called directly by device drivers. 597 */ 598 int __pci_complete_power_transition(struct pci_dev *dev, pci_power_t state) 599 { 600 return state > PCI_D0 ? 601 pci_platform_power_transition(dev, state) : -EINVAL; 602 } 603 EXPORT_SYMBOL_GPL(__pci_complete_power_transition); 604 605 /** 606 * pci_set_power_state - Set the power state of a PCI device 607 * @dev: PCI device to handle. 608 * @state: PCI power state (D0, D1, D2, D3hot) to put the device into. 609 * 610 * Transition a device to a new power state, using the platform firmware and/or 611 * the device's PCI PM registers. 612 * 613 * RETURN VALUE: 614 * -EINVAL if the requested state is invalid. 615 * -EIO if device does not support PCI PM or its PM capabilities register has a 616 * wrong version, or device doesn't support the requested state. 617 * 0 if device already is in the requested state. 618 * 0 if device's power state has been successfully changed. 619 */ 620 int pci_set_power_state(struct pci_dev *dev, pci_power_t state) 621 { 622 int error; 623 624 /* bound the state we're entering */ 625 if (state > PCI_D3hot) 626 state = PCI_D3hot; 627 else if (state < PCI_D0) 628 state = PCI_D0; 629 else if ((state == PCI_D1 || state == PCI_D2) && pci_no_d1d2(dev)) 630 /* 631 * If the device or the parent bridge do not support PCI PM, 632 * ignore the request if we're doing anything other than putting 633 * it into D0 (which would only happen on boot). 634 */ 635 return 0; 636 637 /* Check if we're already there */ 638 if (dev->current_state == state) 639 return 0; 640 641 __pci_start_power_transition(dev, state); 642 643 /* This device is quirked not to be put into D3, so 644 don't put it in D3 */ 645 if (state == PCI_D3hot && (dev->dev_flags & PCI_DEV_FLAGS_NO_D3)) 646 return 0; 647 648 error = pci_raw_set_power_state(dev, state); 649 650 if (!__pci_complete_power_transition(dev, state)) 651 error = 0; 652 653 return error; 654 } 655 656 /** 657 * pci_choose_state - Choose the power state of a PCI device 658 * @dev: PCI device to be suspended 659 * @state: target sleep state for the whole system. This is the value 660 * that is passed to suspend() function. 661 * 662 * Returns PCI power state suitable for given device and given system 663 * message. 664 */ 665 666 pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state) 667 { 668 pci_power_t ret; 669 670 if (!pci_find_capability(dev, PCI_CAP_ID_PM)) 671 return PCI_D0; 672 673 ret = platform_pci_choose_state(dev); 674 if (ret != PCI_POWER_ERROR) 675 return ret; 676 677 switch (state.event) { 678 case PM_EVENT_ON: 679 return PCI_D0; 680 case PM_EVENT_FREEZE: 681 case PM_EVENT_PRETHAW: 682 /* REVISIT both freeze and pre-thaw "should" use D0 */ 683 case PM_EVENT_SUSPEND: 684 case PM_EVENT_HIBERNATE: 685 return PCI_D3hot; 686 default: 687 dev_info(&dev->dev, "unrecognized suspend event %d\n", 688 state.event); 689 BUG(); 690 } 691 return PCI_D0; 692 } 693 694 EXPORT_SYMBOL(pci_choose_state); 695 696 #define PCI_EXP_SAVE_REGS 7 697 698 #define pcie_cap_has_devctl(type, flags) 1 699 #define pcie_cap_has_lnkctl(type, flags) \ 700 ((flags & PCI_EXP_FLAGS_VERS) > 1 || \ 701 (type == PCI_EXP_TYPE_ROOT_PORT || \ 702 type == PCI_EXP_TYPE_ENDPOINT || \ 703 type == PCI_EXP_TYPE_LEG_END)) 704 #define pcie_cap_has_sltctl(type, flags) \ 705 ((flags & PCI_EXP_FLAGS_VERS) > 1 || \ 706 ((type == PCI_EXP_TYPE_ROOT_PORT) || \ 707 (type == PCI_EXP_TYPE_DOWNSTREAM && \ 708 (flags & PCI_EXP_FLAGS_SLOT)))) 709 #define pcie_cap_has_rtctl(type, flags) \ 710 ((flags & PCI_EXP_FLAGS_VERS) > 1 || \ 711 (type == PCI_EXP_TYPE_ROOT_PORT || \ 712 type == PCI_EXP_TYPE_RC_EC)) 713 #define pcie_cap_has_devctl2(type, flags) \ 714 ((flags & PCI_EXP_FLAGS_VERS) > 1) 715 #define pcie_cap_has_lnkctl2(type, flags) \ 716 ((flags & PCI_EXP_FLAGS_VERS) > 1) 717 #define pcie_cap_has_sltctl2(type, flags) \ 718 ((flags & PCI_EXP_FLAGS_VERS) > 1) 719 720 static int pci_save_pcie_state(struct pci_dev *dev) 721 { 722 int pos, i = 0; 723 struct pci_cap_saved_state *save_state; 724 u16 *cap; 725 u16 flags; 726 727 pos = pci_find_capability(dev, PCI_CAP_ID_EXP); 728 if (pos <= 0) 729 return 0; 730 731 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP); 732 if (!save_state) { 733 dev_err(&dev->dev, "buffer not found in %s\n", __func__); 734 return -ENOMEM; 735 } 736 cap = (u16 *)&save_state->data[0]; 737 738 pci_read_config_word(dev, pos + PCI_EXP_FLAGS, &flags); 739 740 if (pcie_cap_has_devctl(dev->pcie_type, flags)) 741 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, &cap[i++]); 742 if (pcie_cap_has_lnkctl(dev->pcie_type, flags)) 743 pci_read_config_word(dev, pos + PCI_EXP_LNKCTL, &cap[i++]); 744 if (pcie_cap_has_sltctl(dev->pcie_type, flags)) 745 pci_read_config_word(dev, pos + PCI_EXP_SLTCTL, &cap[i++]); 746 if (pcie_cap_has_rtctl(dev->pcie_type, flags)) 747 pci_read_config_word(dev, pos + PCI_EXP_RTCTL, &cap[i++]); 748 if (pcie_cap_has_devctl2(dev->pcie_type, flags)) 749 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &cap[i++]); 750 if (pcie_cap_has_lnkctl2(dev->pcie_type, flags)) 751 pci_read_config_word(dev, pos + PCI_EXP_LNKCTL2, &cap[i++]); 752 if (pcie_cap_has_sltctl2(dev->pcie_type, flags)) 753 pci_read_config_word(dev, pos + PCI_EXP_SLTCTL2, &cap[i++]); 754 755 return 0; 756 } 757 758 static void pci_restore_pcie_state(struct pci_dev *dev) 759 { 760 int i = 0, pos; 761 struct pci_cap_saved_state *save_state; 762 u16 *cap; 763 u16 flags; 764 765 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP); 766 pos = pci_find_capability(dev, PCI_CAP_ID_EXP); 767 if (!save_state || pos <= 0) 768 return; 769 cap = (u16 *)&save_state->data[0]; 770 771 pci_read_config_word(dev, pos + PCI_EXP_FLAGS, &flags); 772 773 if (pcie_cap_has_devctl(dev->pcie_type, flags)) 774 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, cap[i++]); 775 if (pcie_cap_has_lnkctl(dev->pcie_type, flags)) 776 pci_write_config_word(dev, pos + PCI_EXP_LNKCTL, cap[i++]); 777 if (pcie_cap_has_sltctl(dev->pcie_type, flags)) 778 pci_write_config_word(dev, pos + PCI_EXP_SLTCTL, cap[i++]); 779 if (pcie_cap_has_rtctl(dev->pcie_type, flags)) 780 pci_write_config_word(dev, pos + PCI_EXP_RTCTL, cap[i++]); 781 if (pcie_cap_has_devctl2(dev->pcie_type, flags)) 782 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, cap[i++]); 783 if (pcie_cap_has_lnkctl2(dev->pcie_type, flags)) 784 pci_write_config_word(dev, pos + PCI_EXP_LNKCTL2, cap[i++]); 785 if (pcie_cap_has_sltctl2(dev->pcie_type, flags)) 786 pci_write_config_word(dev, pos + PCI_EXP_SLTCTL2, cap[i++]); 787 } 788 789 790 static int pci_save_pcix_state(struct pci_dev *dev) 791 { 792 int pos; 793 struct pci_cap_saved_state *save_state; 794 795 pos = pci_find_capability(dev, PCI_CAP_ID_PCIX); 796 if (pos <= 0) 797 return 0; 798 799 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX); 800 if (!save_state) { 801 dev_err(&dev->dev, "buffer not found in %s\n", __func__); 802 return -ENOMEM; 803 } 804 805 pci_read_config_word(dev, pos + PCI_X_CMD, (u16 *)save_state->data); 806 807 return 0; 808 } 809 810 static void pci_restore_pcix_state(struct pci_dev *dev) 811 { 812 int i = 0, pos; 813 struct pci_cap_saved_state *save_state; 814 u16 *cap; 815 816 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX); 817 pos = pci_find_capability(dev, PCI_CAP_ID_PCIX); 818 if (!save_state || pos <= 0) 819 return; 820 cap = (u16 *)&save_state->data[0]; 821 822 pci_write_config_word(dev, pos + PCI_X_CMD, cap[i++]); 823 } 824 825 826 /** 827 * pci_save_state - save the PCI configuration space of a device before suspending 828 * @dev: - PCI device that we're dealing with 829 */ 830 int 831 pci_save_state(struct pci_dev *dev) 832 { 833 int i; 834 /* XXX: 100% dword access ok here? */ 835 for (i = 0; i < 16; i++) 836 pci_read_config_dword(dev, i * 4,&dev->saved_config_space[i]); 837 dev->state_saved = true; 838 if ((i = pci_save_pcie_state(dev)) != 0) 839 return i; 840 if ((i = pci_save_pcix_state(dev)) != 0) 841 return i; 842 return 0; 843 } 844 845 /** 846 * pci_restore_state - Restore the saved state of a PCI device 847 * @dev: - PCI device that we're dealing with 848 */ 849 int 850 pci_restore_state(struct pci_dev *dev) 851 { 852 int i; 853 u32 val; 854 855 if (!dev->state_saved) 856 return 0; 857 858 /* PCI Express register must be restored first */ 859 pci_restore_pcie_state(dev); 860 861 /* 862 * The Base Address register should be programmed before the command 863 * register(s) 864 */ 865 for (i = 15; i >= 0; i--) { 866 pci_read_config_dword(dev, i * 4, &val); 867 if (val != dev->saved_config_space[i]) { 868 dev_printk(KERN_DEBUG, &dev->dev, "restoring config " 869 "space at offset %#x (was %#x, writing %#x)\n", 870 i, val, (int)dev->saved_config_space[i]); 871 pci_write_config_dword(dev,i * 4, 872 dev->saved_config_space[i]); 873 } 874 } 875 pci_restore_pcix_state(dev); 876 pci_restore_msi_state(dev); 877 pci_restore_iov_state(dev); 878 879 dev->state_saved = false; 880 881 return 0; 882 } 883 884 static int do_pci_enable_device(struct pci_dev *dev, int bars) 885 { 886 int err; 887 888 err = pci_set_power_state(dev, PCI_D0); 889 if (err < 0 && err != -EIO) 890 return err; 891 err = pcibios_enable_device(dev, bars); 892 if (err < 0) 893 return err; 894 pci_fixup_device(pci_fixup_enable, dev); 895 896 return 0; 897 } 898 899 /** 900 * pci_reenable_device - Resume abandoned device 901 * @dev: PCI device to be resumed 902 * 903 * Note this function is a backend of pci_default_resume and is not supposed 904 * to be called by normal code, write proper resume handler and use it instead. 905 */ 906 int pci_reenable_device(struct pci_dev *dev) 907 { 908 if (pci_is_enabled(dev)) 909 return do_pci_enable_device(dev, (1 << PCI_NUM_RESOURCES) - 1); 910 return 0; 911 } 912 913 static int __pci_enable_device_flags(struct pci_dev *dev, 914 resource_size_t flags) 915 { 916 int err; 917 int i, bars = 0; 918 919 if (atomic_add_return(1, &dev->enable_cnt) > 1) 920 return 0; /* already enabled */ 921 922 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) 923 if (dev->resource[i].flags & flags) 924 bars |= (1 << i); 925 926 err = do_pci_enable_device(dev, bars); 927 if (err < 0) 928 atomic_dec(&dev->enable_cnt); 929 return err; 930 } 931 932 /** 933 * pci_enable_device_io - Initialize a device for use with IO space 934 * @dev: PCI device to be initialized 935 * 936 * Initialize device before it's used by a driver. Ask low-level code 937 * to enable I/O resources. Wake up the device if it was suspended. 938 * Beware, this function can fail. 939 */ 940 int pci_enable_device_io(struct pci_dev *dev) 941 { 942 return __pci_enable_device_flags(dev, IORESOURCE_IO); 943 } 944 945 /** 946 * pci_enable_device_mem - Initialize a device for use with Memory space 947 * @dev: PCI device to be initialized 948 * 949 * Initialize device before it's used by a driver. Ask low-level code 950 * to enable Memory resources. Wake up the device if it was suspended. 951 * Beware, this function can fail. 952 */ 953 int pci_enable_device_mem(struct pci_dev *dev) 954 { 955 return __pci_enable_device_flags(dev, IORESOURCE_MEM); 956 } 957 958 /** 959 * pci_enable_device - Initialize device before it's used by a driver. 960 * @dev: PCI device to be initialized 961 * 962 * Initialize device before it's used by a driver. Ask low-level code 963 * to enable I/O and memory. Wake up the device if it was suspended. 964 * Beware, this function can fail. 965 * 966 * Note we don't actually enable the device many times if we call 967 * this function repeatedly (we just increment the count). 968 */ 969 int pci_enable_device(struct pci_dev *dev) 970 { 971 return __pci_enable_device_flags(dev, IORESOURCE_MEM | IORESOURCE_IO); 972 } 973 974 /* 975 * Managed PCI resources. This manages device on/off, intx/msi/msix 976 * on/off and BAR regions. pci_dev itself records msi/msix status, so 977 * there's no need to track it separately. pci_devres is initialized 978 * when a device is enabled using managed PCI device enable interface. 979 */ 980 struct pci_devres { 981 unsigned int enabled:1; 982 unsigned int pinned:1; 983 unsigned int orig_intx:1; 984 unsigned int restore_intx:1; 985 u32 region_mask; 986 }; 987 988 static void pcim_release(struct device *gendev, void *res) 989 { 990 struct pci_dev *dev = container_of(gendev, struct pci_dev, dev); 991 struct pci_devres *this = res; 992 int i; 993 994 if (dev->msi_enabled) 995 pci_disable_msi(dev); 996 if (dev->msix_enabled) 997 pci_disable_msix(dev); 998 999 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) 1000 if (this->region_mask & (1 << i)) 1001 pci_release_region(dev, i); 1002 1003 if (this->restore_intx) 1004 pci_intx(dev, this->orig_intx); 1005 1006 if (this->enabled && !this->pinned) 1007 pci_disable_device(dev); 1008 } 1009 1010 static struct pci_devres * get_pci_dr(struct pci_dev *pdev) 1011 { 1012 struct pci_devres *dr, *new_dr; 1013 1014 dr = devres_find(&pdev->dev, pcim_release, NULL, NULL); 1015 if (dr) 1016 return dr; 1017 1018 new_dr = devres_alloc(pcim_release, sizeof(*new_dr), GFP_KERNEL); 1019 if (!new_dr) 1020 return NULL; 1021 return devres_get(&pdev->dev, new_dr, NULL, NULL); 1022 } 1023 1024 static struct pci_devres * find_pci_dr(struct pci_dev *pdev) 1025 { 1026 if (pci_is_managed(pdev)) 1027 return devres_find(&pdev->dev, pcim_release, NULL, NULL); 1028 return NULL; 1029 } 1030 1031 /** 1032 * pcim_enable_device - Managed pci_enable_device() 1033 * @pdev: PCI device to be initialized 1034 * 1035 * Managed pci_enable_device(). 1036 */ 1037 int pcim_enable_device(struct pci_dev *pdev) 1038 { 1039 struct pci_devres *dr; 1040 int rc; 1041 1042 dr = get_pci_dr(pdev); 1043 if (unlikely(!dr)) 1044 return -ENOMEM; 1045 if (dr->enabled) 1046 return 0; 1047 1048 rc = pci_enable_device(pdev); 1049 if (!rc) { 1050 pdev->is_managed = 1; 1051 dr->enabled = 1; 1052 } 1053 return rc; 1054 } 1055 1056 /** 1057 * pcim_pin_device - Pin managed PCI device 1058 * @pdev: PCI device to pin 1059 * 1060 * Pin managed PCI device @pdev. Pinned device won't be disabled on 1061 * driver detach. @pdev must have been enabled with 1062 * pcim_enable_device(). 1063 */ 1064 void pcim_pin_device(struct pci_dev *pdev) 1065 { 1066 struct pci_devres *dr; 1067 1068 dr = find_pci_dr(pdev); 1069 WARN_ON(!dr || !dr->enabled); 1070 if (dr) 1071 dr->pinned = 1; 1072 } 1073 1074 /** 1075 * pcibios_disable_device - disable arch specific PCI resources for device dev 1076 * @dev: the PCI device to disable 1077 * 1078 * Disables architecture specific PCI resources for the device. This 1079 * is the default implementation. Architecture implementations can 1080 * override this. 1081 */ 1082 void __attribute__ ((weak)) pcibios_disable_device (struct pci_dev *dev) {} 1083 1084 static void do_pci_disable_device(struct pci_dev *dev) 1085 { 1086 u16 pci_command; 1087 1088 pci_read_config_word(dev, PCI_COMMAND, &pci_command); 1089 if (pci_command & PCI_COMMAND_MASTER) { 1090 pci_command &= ~PCI_COMMAND_MASTER; 1091 pci_write_config_word(dev, PCI_COMMAND, pci_command); 1092 } 1093 1094 pcibios_disable_device(dev); 1095 } 1096 1097 /** 1098 * pci_disable_enabled_device - Disable device without updating enable_cnt 1099 * @dev: PCI device to disable 1100 * 1101 * NOTE: This function is a backend of PCI power management routines and is 1102 * not supposed to be called drivers. 1103 */ 1104 void pci_disable_enabled_device(struct pci_dev *dev) 1105 { 1106 if (pci_is_enabled(dev)) 1107 do_pci_disable_device(dev); 1108 } 1109 1110 /** 1111 * pci_disable_device - Disable PCI device after use 1112 * @dev: PCI device to be disabled 1113 * 1114 * Signal to the system that the PCI device is not in use by the system 1115 * anymore. This only involves disabling PCI bus-mastering, if active. 1116 * 1117 * Note we don't actually disable the device until all callers of 1118 * pci_device_enable() have called pci_device_disable(). 1119 */ 1120 void 1121 pci_disable_device(struct pci_dev *dev) 1122 { 1123 struct pci_devres *dr; 1124 1125 dr = find_pci_dr(dev); 1126 if (dr) 1127 dr->enabled = 0; 1128 1129 if (atomic_sub_return(1, &dev->enable_cnt) != 0) 1130 return; 1131 1132 do_pci_disable_device(dev); 1133 1134 dev->is_busmaster = 0; 1135 } 1136 1137 /** 1138 * pcibios_set_pcie_reset_state - set reset state for device dev 1139 * @dev: the PCI-E device reset 1140 * @state: Reset state to enter into 1141 * 1142 * 1143 * Sets the PCI-E reset state for the device. This is the default 1144 * implementation. Architecture implementations can override this. 1145 */ 1146 int __attribute__ ((weak)) pcibios_set_pcie_reset_state(struct pci_dev *dev, 1147 enum pcie_reset_state state) 1148 { 1149 return -EINVAL; 1150 } 1151 1152 /** 1153 * pci_set_pcie_reset_state - set reset state for device dev 1154 * @dev: the PCI-E device reset 1155 * @state: Reset state to enter into 1156 * 1157 * 1158 * Sets the PCI reset state for the device. 1159 */ 1160 int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state) 1161 { 1162 return pcibios_set_pcie_reset_state(dev, state); 1163 } 1164 1165 /** 1166 * pci_pme_capable - check the capability of PCI device to generate PME# 1167 * @dev: PCI device to handle. 1168 * @state: PCI state from which device will issue PME#. 1169 */ 1170 bool pci_pme_capable(struct pci_dev *dev, pci_power_t state) 1171 { 1172 if (!dev->pm_cap) 1173 return false; 1174 1175 return !!(dev->pme_support & (1 << state)); 1176 } 1177 1178 /** 1179 * pci_pme_active - enable or disable PCI device's PME# function 1180 * @dev: PCI device to handle. 1181 * @enable: 'true' to enable PME# generation; 'false' to disable it. 1182 * 1183 * The caller must verify that the device is capable of generating PME# before 1184 * calling this function with @enable equal to 'true'. 1185 */ 1186 void pci_pme_active(struct pci_dev *dev, bool enable) 1187 { 1188 u16 pmcsr; 1189 1190 if (!dev->pm_cap) 1191 return; 1192 1193 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr); 1194 /* Clear PME_Status by writing 1 to it and enable PME# */ 1195 pmcsr |= PCI_PM_CTRL_PME_STATUS | PCI_PM_CTRL_PME_ENABLE; 1196 if (!enable) 1197 pmcsr &= ~PCI_PM_CTRL_PME_ENABLE; 1198 1199 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr); 1200 1201 dev_printk(KERN_INFO, &dev->dev, "PME# %s\n", 1202 enable ? "enabled" : "disabled"); 1203 } 1204 1205 /** 1206 * pci_enable_wake - enable PCI device as wakeup event source 1207 * @dev: PCI device affected 1208 * @state: PCI state from which device will issue wakeup events 1209 * @enable: True to enable event generation; false to disable 1210 * 1211 * This enables the device as a wakeup event source, or disables it. 1212 * When such events involves platform-specific hooks, those hooks are 1213 * called automatically by this routine. 1214 * 1215 * Devices with legacy power management (no standard PCI PM capabilities) 1216 * always require such platform hooks. 1217 * 1218 * RETURN VALUE: 1219 * 0 is returned on success 1220 * -EINVAL is returned if device is not supposed to wake up the system 1221 * Error code depending on the platform is returned if both the platform and 1222 * the native mechanism fail to enable the generation of wake-up events 1223 */ 1224 int pci_enable_wake(struct pci_dev *dev, pci_power_t state, bool enable) 1225 { 1226 int ret = 0; 1227 1228 if (enable && !device_may_wakeup(&dev->dev)) 1229 return -EINVAL; 1230 1231 /* Don't do the same thing twice in a row for one device. */ 1232 if (!!enable == !!dev->wakeup_prepared) 1233 return 0; 1234 1235 /* 1236 * According to "PCI System Architecture" 4th ed. by Tom Shanley & Don 1237 * Anderson we should be doing PME# wake enable followed by ACPI wake 1238 * enable. To disable wake-up we call the platform first, for symmetry. 1239 */ 1240 1241 if (enable) { 1242 int error; 1243 1244 if (pci_pme_capable(dev, state)) 1245 pci_pme_active(dev, true); 1246 else 1247 ret = 1; 1248 error = platform_pci_sleep_wake(dev, true); 1249 if (ret) 1250 ret = error; 1251 if (!ret) 1252 dev->wakeup_prepared = true; 1253 } else { 1254 platform_pci_sleep_wake(dev, false); 1255 pci_pme_active(dev, false); 1256 dev->wakeup_prepared = false; 1257 } 1258 1259 return ret; 1260 } 1261 1262 /** 1263 * pci_wake_from_d3 - enable/disable device to wake up from D3_hot or D3_cold 1264 * @dev: PCI device to prepare 1265 * @enable: True to enable wake-up event generation; false to disable 1266 * 1267 * Many drivers want the device to wake up the system from D3_hot or D3_cold 1268 * and this function allows them to set that up cleanly - pci_enable_wake() 1269 * should not be called twice in a row to enable wake-up due to PCI PM vs ACPI 1270 * ordering constraints. 1271 * 1272 * This function only returns error code if the device is not capable of 1273 * generating PME# from both D3_hot and D3_cold, and the platform is unable to 1274 * enable wake-up power for it. 1275 */ 1276 int pci_wake_from_d3(struct pci_dev *dev, bool enable) 1277 { 1278 return pci_pme_capable(dev, PCI_D3cold) ? 1279 pci_enable_wake(dev, PCI_D3cold, enable) : 1280 pci_enable_wake(dev, PCI_D3hot, enable); 1281 } 1282 1283 /** 1284 * pci_target_state - find an appropriate low power state for a given PCI dev 1285 * @dev: PCI device 1286 * 1287 * Use underlying platform code to find a supported low power state for @dev. 1288 * If the platform can't manage @dev, return the deepest state from which it 1289 * can generate wake events, based on any available PME info. 1290 */ 1291 pci_power_t pci_target_state(struct pci_dev *dev) 1292 { 1293 pci_power_t target_state = PCI_D3hot; 1294 1295 if (platform_pci_power_manageable(dev)) { 1296 /* 1297 * Call the platform to choose the target state of the device 1298 * and enable wake-up from this state if supported. 1299 */ 1300 pci_power_t state = platform_pci_choose_state(dev); 1301 1302 switch (state) { 1303 case PCI_POWER_ERROR: 1304 case PCI_UNKNOWN: 1305 break; 1306 case PCI_D1: 1307 case PCI_D2: 1308 if (pci_no_d1d2(dev)) 1309 break; 1310 default: 1311 target_state = state; 1312 } 1313 } else if (!dev->pm_cap) { 1314 target_state = PCI_D0; 1315 } else if (device_may_wakeup(&dev->dev)) { 1316 /* 1317 * Find the deepest state from which the device can generate 1318 * wake-up events, make it the target state and enable device 1319 * to generate PME#. 1320 */ 1321 if (dev->pme_support) { 1322 while (target_state 1323 && !(dev->pme_support & (1 << target_state))) 1324 target_state--; 1325 } 1326 } 1327 1328 return target_state; 1329 } 1330 1331 /** 1332 * pci_prepare_to_sleep - prepare PCI device for system-wide transition into a sleep state 1333 * @dev: Device to handle. 1334 * 1335 * Choose the power state appropriate for the device depending on whether 1336 * it can wake up the system and/or is power manageable by the platform 1337 * (PCI_D3hot is the default) and put the device into that state. 1338 */ 1339 int pci_prepare_to_sleep(struct pci_dev *dev) 1340 { 1341 pci_power_t target_state = pci_target_state(dev); 1342 int error; 1343 1344 if (target_state == PCI_POWER_ERROR) 1345 return -EIO; 1346 1347 pci_enable_wake(dev, target_state, device_may_wakeup(&dev->dev)); 1348 1349 error = pci_set_power_state(dev, target_state); 1350 1351 if (error) 1352 pci_enable_wake(dev, target_state, false); 1353 1354 return error; 1355 } 1356 1357 /** 1358 * pci_back_from_sleep - turn PCI device on during system-wide transition into working state 1359 * @dev: Device to handle. 1360 * 1361 * Disable device's sytem wake-up capability and put it into D0. 1362 */ 1363 int pci_back_from_sleep(struct pci_dev *dev) 1364 { 1365 pci_enable_wake(dev, PCI_D0, false); 1366 return pci_set_power_state(dev, PCI_D0); 1367 } 1368 1369 /** 1370 * pci_pm_init - Initialize PM functions of given PCI device 1371 * @dev: PCI device to handle. 1372 */ 1373 void pci_pm_init(struct pci_dev *dev) 1374 { 1375 int pm; 1376 u16 pmc; 1377 1378 dev->wakeup_prepared = false; 1379 dev->pm_cap = 0; 1380 1381 /* find PCI PM capability in list */ 1382 pm = pci_find_capability(dev, PCI_CAP_ID_PM); 1383 if (!pm) 1384 return; 1385 /* Check device's ability to generate PME# */ 1386 pci_read_config_word(dev, pm + PCI_PM_PMC, &pmc); 1387 1388 if ((pmc & PCI_PM_CAP_VER_MASK) > 3) { 1389 dev_err(&dev->dev, "unsupported PM cap regs version (%u)\n", 1390 pmc & PCI_PM_CAP_VER_MASK); 1391 return; 1392 } 1393 1394 dev->pm_cap = pm; 1395 1396 dev->d1_support = false; 1397 dev->d2_support = false; 1398 if (!pci_no_d1d2(dev)) { 1399 if (pmc & PCI_PM_CAP_D1) 1400 dev->d1_support = true; 1401 if (pmc & PCI_PM_CAP_D2) 1402 dev->d2_support = true; 1403 1404 if (dev->d1_support || dev->d2_support) 1405 dev_printk(KERN_DEBUG, &dev->dev, "supports%s%s\n", 1406 dev->d1_support ? " D1" : "", 1407 dev->d2_support ? " D2" : ""); 1408 } 1409 1410 pmc &= PCI_PM_CAP_PME_MASK; 1411 if (pmc) { 1412 dev_info(&dev->dev, "PME# supported from%s%s%s%s%s\n", 1413 (pmc & PCI_PM_CAP_PME_D0) ? " D0" : "", 1414 (pmc & PCI_PM_CAP_PME_D1) ? " D1" : "", 1415 (pmc & PCI_PM_CAP_PME_D2) ? " D2" : "", 1416 (pmc & PCI_PM_CAP_PME_D3) ? " D3hot" : "", 1417 (pmc & PCI_PM_CAP_PME_D3cold) ? " D3cold" : ""); 1418 dev->pme_support = pmc >> PCI_PM_CAP_PME_SHIFT; 1419 /* 1420 * Make device's PM flags reflect the wake-up capability, but 1421 * let the user space enable it to wake up the system as needed. 1422 */ 1423 device_set_wakeup_capable(&dev->dev, true); 1424 device_set_wakeup_enable(&dev->dev, false); 1425 /* Disable the PME# generation functionality */ 1426 pci_pme_active(dev, false); 1427 } else { 1428 dev->pme_support = 0; 1429 } 1430 } 1431 1432 /** 1433 * platform_pci_wakeup_init - init platform wakeup if present 1434 * @dev: PCI device 1435 * 1436 * Some devices don't have PCI PM caps but can still generate wakeup 1437 * events through platform methods (like ACPI events). If @dev supports 1438 * platform wakeup events, set the device flag to indicate as much. This 1439 * may be redundant if the device also supports PCI PM caps, but double 1440 * initialization should be safe in that case. 1441 */ 1442 void platform_pci_wakeup_init(struct pci_dev *dev) 1443 { 1444 if (!platform_pci_can_wakeup(dev)) 1445 return; 1446 1447 device_set_wakeup_capable(&dev->dev, true); 1448 device_set_wakeup_enable(&dev->dev, false); 1449 platform_pci_sleep_wake(dev, false); 1450 } 1451 1452 /** 1453 * pci_add_save_buffer - allocate buffer for saving given capability registers 1454 * @dev: the PCI device 1455 * @cap: the capability to allocate the buffer for 1456 * @size: requested size of the buffer 1457 */ 1458 static int pci_add_cap_save_buffer( 1459 struct pci_dev *dev, char cap, unsigned int size) 1460 { 1461 int pos; 1462 struct pci_cap_saved_state *save_state; 1463 1464 pos = pci_find_capability(dev, cap); 1465 if (pos <= 0) 1466 return 0; 1467 1468 save_state = kzalloc(sizeof(*save_state) + size, GFP_KERNEL); 1469 if (!save_state) 1470 return -ENOMEM; 1471 1472 save_state->cap_nr = cap; 1473 pci_add_saved_cap(dev, save_state); 1474 1475 return 0; 1476 } 1477 1478 /** 1479 * pci_allocate_cap_save_buffers - allocate buffers for saving capabilities 1480 * @dev: the PCI device 1481 */ 1482 void pci_allocate_cap_save_buffers(struct pci_dev *dev) 1483 { 1484 int error; 1485 1486 error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_EXP, 1487 PCI_EXP_SAVE_REGS * sizeof(u16)); 1488 if (error) 1489 dev_err(&dev->dev, 1490 "unable to preallocate PCI Express save buffer\n"); 1491 1492 error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_PCIX, sizeof(u16)); 1493 if (error) 1494 dev_err(&dev->dev, 1495 "unable to preallocate PCI-X save buffer\n"); 1496 } 1497 1498 /** 1499 * pci_enable_ari - enable ARI forwarding if hardware support it 1500 * @dev: the PCI device 1501 */ 1502 void pci_enable_ari(struct pci_dev *dev) 1503 { 1504 int pos; 1505 u32 cap; 1506 u16 ctrl; 1507 struct pci_dev *bridge; 1508 1509 if (!dev->is_pcie || dev->devfn) 1510 return; 1511 1512 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI); 1513 if (!pos) 1514 return; 1515 1516 bridge = dev->bus->self; 1517 if (!bridge || !bridge->is_pcie) 1518 return; 1519 1520 pos = pci_find_capability(bridge, PCI_CAP_ID_EXP); 1521 if (!pos) 1522 return; 1523 1524 pci_read_config_dword(bridge, pos + PCI_EXP_DEVCAP2, &cap); 1525 if (!(cap & PCI_EXP_DEVCAP2_ARI)) 1526 return; 1527 1528 pci_read_config_word(bridge, pos + PCI_EXP_DEVCTL2, &ctrl); 1529 ctrl |= PCI_EXP_DEVCTL2_ARI; 1530 pci_write_config_word(bridge, pos + PCI_EXP_DEVCTL2, ctrl); 1531 1532 bridge->ari_enabled = 1; 1533 } 1534 1535 /** 1536 * pci_swizzle_interrupt_pin - swizzle INTx for device behind bridge 1537 * @dev: the PCI device 1538 * @pin: the INTx pin (1=INTA, 2=INTB, 3=INTD, 4=INTD) 1539 * 1540 * Perform INTx swizzling for a device behind one level of bridge. This is 1541 * required by section 9.1 of the PCI-to-PCI bridge specification for devices 1542 * behind bridges on add-in cards. For devices with ARI enabled, the slot 1543 * number is always 0 (see the Implementation Note in section 2.2.8.1 of 1544 * the PCI Express Base Specification, Revision 2.1) 1545 */ 1546 u8 pci_swizzle_interrupt_pin(struct pci_dev *dev, u8 pin) 1547 { 1548 int slot; 1549 1550 if (pci_ari_enabled(dev->bus)) 1551 slot = 0; 1552 else 1553 slot = PCI_SLOT(dev->devfn); 1554 1555 return (((pin - 1) + slot) % 4) + 1; 1556 } 1557 1558 int 1559 pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge) 1560 { 1561 u8 pin; 1562 1563 pin = dev->pin; 1564 if (!pin) 1565 return -1; 1566 1567 while (!pci_is_root_bus(dev->bus)) { 1568 pin = pci_swizzle_interrupt_pin(dev, pin); 1569 dev = dev->bus->self; 1570 } 1571 *bridge = dev; 1572 return pin; 1573 } 1574 1575 /** 1576 * pci_common_swizzle - swizzle INTx all the way to root bridge 1577 * @dev: the PCI device 1578 * @pinp: pointer to the INTx pin value (1=INTA, 2=INTB, 3=INTD, 4=INTD) 1579 * 1580 * Perform INTx swizzling for a device. This traverses through all PCI-to-PCI 1581 * bridges all the way up to a PCI root bus. 1582 */ 1583 u8 pci_common_swizzle(struct pci_dev *dev, u8 *pinp) 1584 { 1585 u8 pin = *pinp; 1586 1587 while (!pci_is_root_bus(dev->bus)) { 1588 pin = pci_swizzle_interrupt_pin(dev, pin); 1589 dev = dev->bus->self; 1590 } 1591 *pinp = pin; 1592 return PCI_SLOT(dev->devfn); 1593 } 1594 1595 /** 1596 * pci_release_region - Release a PCI bar 1597 * @pdev: PCI device whose resources were previously reserved by pci_request_region 1598 * @bar: BAR to release 1599 * 1600 * Releases the PCI I/O and memory resources previously reserved by a 1601 * successful call to pci_request_region. Call this function only 1602 * after all use of the PCI regions has ceased. 1603 */ 1604 void pci_release_region(struct pci_dev *pdev, int bar) 1605 { 1606 struct pci_devres *dr; 1607 1608 if (pci_resource_len(pdev, bar) == 0) 1609 return; 1610 if (pci_resource_flags(pdev, bar) & IORESOURCE_IO) 1611 release_region(pci_resource_start(pdev, bar), 1612 pci_resource_len(pdev, bar)); 1613 else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) 1614 release_mem_region(pci_resource_start(pdev, bar), 1615 pci_resource_len(pdev, bar)); 1616 1617 dr = find_pci_dr(pdev); 1618 if (dr) 1619 dr->region_mask &= ~(1 << bar); 1620 } 1621 1622 /** 1623 * __pci_request_region - Reserved PCI I/O and memory resource 1624 * @pdev: PCI device whose resources are to be reserved 1625 * @bar: BAR to be reserved 1626 * @res_name: Name to be associated with resource. 1627 * @exclusive: whether the region access is exclusive or not 1628 * 1629 * Mark the PCI region associated with PCI device @pdev BR @bar as 1630 * being reserved by owner @res_name. Do not access any 1631 * address inside the PCI regions unless this call returns 1632 * successfully. 1633 * 1634 * If @exclusive is set, then the region is marked so that userspace 1635 * is explicitly not allowed to map the resource via /dev/mem or 1636 * sysfs MMIO access. 1637 * 1638 * Returns 0 on success, or %EBUSY on error. A warning 1639 * message is also printed on failure. 1640 */ 1641 static int __pci_request_region(struct pci_dev *pdev, int bar, const char *res_name, 1642 int exclusive) 1643 { 1644 struct pci_devres *dr; 1645 1646 if (pci_resource_len(pdev, bar) == 0) 1647 return 0; 1648 1649 if (pci_resource_flags(pdev, bar) & IORESOURCE_IO) { 1650 if (!request_region(pci_resource_start(pdev, bar), 1651 pci_resource_len(pdev, bar), res_name)) 1652 goto err_out; 1653 } 1654 else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) { 1655 if (!__request_mem_region(pci_resource_start(pdev, bar), 1656 pci_resource_len(pdev, bar), res_name, 1657 exclusive)) 1658 goto err_out; 1659 } 1660 1661 dr = find_pci_dr(pdev); 1662 if (dr) 1663 dr->region_mask |= 1 << bar; 1664 1665 return 0; 1666 1667 err_out: 1668 dev_warn(&pdev->dev, "BAR %d: can't reserve %s region %pR\n", 1669 bar, 1670 pci_resource_flags(pdev, bar) & IORESOURCE_IO ? "I/O" : "mem", 1671 &pdev->resource[bar]); 1672 return -EBUSY; 1673 } 1674 1675 /** 1676 * pci_request_region - Reserve PCI I/O and memory resource 1677 * @pdev: PCI device whose resources are to be reserved 1678 * @bar: BAR to be reserved 1679 * @res_name: Name to be associated with resource 1680 * 1681 * Mark the PCI region associated with PCI device @pdev BAR @bar as 1682 * being reserved by owner @res_name. Do not access any 1683 * address inside the PCI regions unless this call returns 1684 * successfully. 1685 * 1686 * Returns 0 on success, or %EBUSY on error. A warning 1687 * message is also printed on failure. 1688 */ 1689 int pci_request_region(struct pci_dev *pdev, int bar, const char *res_name) 1690 { 1691 return __pci_request_region(pdev, bar, res_name, 0); 1692 } 1693 1694 /** 1695 * pci_request_region_exclusive - Reserved PCI I/O and memory resource 1696 * @pdev: PCI device whose resources are to be reserved 1697 * @bar: BAR to be reserved 1698 * @res_name: Name to be associated with resource. 1699 * 1700 * Mark the PCI region associated with PCI device @pdev BR @bar as 1701 * being reserved by owner @res_name. Do not access any 1702 * address inside the PCI regions unless this call returns 1703 * successfully. 1704 * 1705 * Returns 0 on success, or %EBUSY on error. A warning 1706 * message is also printed on failure. 1707 * 1708 * The key difference that _exclusive makes it that userspace is 1709 * explicitly not allowed to map the resource via /dev/mem or 1710 * sysfs. 1711 */ 1712 int pci_request_region_exclusive(struct pci_dev *pdev, int bar, const char *res_name) 1713 { 1714 return __pci_request_region(pdev, bar, res_name, IORESOURCE_EXCLUSIVE); 1715 } 1716 /** 1717 * pci_release_selected_regions - Release selected PCI I/O and memory resources 1718 * @pdev: PCI device whose resources were previously reserved 1719 * @bars: Bitmask of BARs to be released 1720 * 1721 * Release selected PCI I/O and memory resources previously reserved. 1722 * Call this function only after all use of the PCI regions has ceased. 1723 */ 1724 void pci_release_selected_regions(struct pci_dev *pdev, int bars) 1725 { 1726 int i; 1727 1728 for (i = 0; i < 6; i++) 1729 if (bars & (1 << i)) 1730 pci_release_region(pdev, i); 1731 } 1732 1733 int __pci_request_selected_regions(struct pci_dev *pdev, int bars, 1734 const char *res_name, int excl) 1735 { 1736 int i; 1737 1738 for (i = 0; i < 6; i++) 1739 if (bars & (1 << i)) 1740 if (__pci_request_region(pdev, i, res_name, excl)) 1741 goto err_out; 1742 return 0; 1743 1744 err_out: 1745 while(--i >= 0) 1746 if (bars & (1 << i)) 1747 pci_release_region(pdev, i); 1748 1749 return -EBUSY; 1750 } 1751 1752 1753 /** 1754 * pci_request_selected_regions - Reserve selected PCI I/O and memory resources 1755 * @pdev: PCI device whose resources are to be reserved 1756 * @bars: Bitmask of BARs to be requested 1757 * @res_name: Name to be associated with resource 1758 */ 1759 int pci_request_selected_regions(struct pci_dev *pdev, int bars, 1760 const char *res_name) 1761 { 1762 return __pci_request_selected_regions(pdev, bars, res_name, 0); 1763 } 1764 1765 int pci_request_selected_regions_exclusive(struct pci_dev *pdev, 1766 int bars, const char *res_name) 1767 { 1768 return __pci_request_selected_regions(pdev, bars, res_name, 1769 IORESOURCE_EXCLUSIVE); 1770 } 1771 1772 /** 1773 * pci_release_regions - Release reserved PCI I/O and memory resources 1774 * @pdev: PCI device whose resources were previously reserved by pci_request_regions 1775 * 1776 * Releases all PCI I/O and memory resources previously reserved by a 1777 * successful call to pci_request_regions. Call this function only 1778 * after all use of the PCI regions has ceased. 1779 */ 1780 1781 void pci_release_regions(struct pci_dev *pdev) 1782 { 1783 pci_release_selected_regions(pdev, (1 << 6) - 1); 1784 } 1785 1786 /** 1787 * pci_request_regions - Reserved PCI I/O and memory resources 1788 * @pdev: PCI device whose resources are to be reserved 1789 * @res_name: Name to be associated with resource. 1790 * 1791 * Mark all PCI regions associated with PCI device @pdev as 1792 * being reserved by owner @res_name. Do not access any 1793 * address inside the PCI regions unless this call returns 1794 * successfully. 1795 * 1796 * Returns 0 on success, or %EBUSY on error. A warning 1797 * message is also printed on failure. 1798 */ 1799 int pci_request_regions(struct pci_dev *pdev, const char *res_name) 1800 { 1801 return pci_request_selected_regions(pdev, ((1 << 6) - 1), res_name); 1802 } 1803 1804 /** 1805 * pci_request_regions_exclusive - Reserved PCI I/O and memory resources 1806 * @pdev: PCI device whose resources are to be reserved 1807 * @res_name: Name to be associated with resource. 1808 * 1809 * Mark all PCI regions associated with PCI device @pdev as 1810 * being reserved by owner @res_name. Do not access any 1811 * address inside the PCI regions unless this call returns 1812 * successfully. 1813 * 1814 * pci_request_regions_exclusive() will mark the region so that 1815 * /dev/mem and the sysfs MMIO access will not be allowed. 1816 * 1817 * Returns 0 on success, or %EBUSY on error. A warning 1818 * message is also printed on failure. 1819 */ 1820 int pci_request_regions_exclusive(struct pci_dev *pdev, const char *res_name) 1821 { 1822 return pci_request_selected_regions_exclusive(pdev, 1823 ((1 << 6) - 1), res_name); 1824 } 1825 1826 static void __pci_set_master(struct pci_dev *dev, bool enable) 1827 { 1828 u16 old_cmd, cmd; 1829 1830 pci_read_config_word(dev, PCI_COMMAND, &old_cmd); 1831 if (enable) 1832 cmd = old_cmd | PCI_COMMAND_MASTER; 1833 else 1834 cmd = old_cmd & ~PCI_COMMAND_MASTER; 1835 if (cmd != old_cmd) { 1836 dev_dbg(&dev->dev, "%s bus mastering\n", 1837 enable ? "enabling" : "disabling"); 1838 pci_write_config_word(dev, PCI_COMMAND, cmd); 1839 } 1840 dev->is_busmaster = enable; 1841 } 1842 1843 /** 1844 * pci_set_master - enables bus-mastering for device dev 1845 * @dev: the PCI device to enable 1846 * 1847 * Enables bus-mastering on the device and calls pcibios_set_master() 1848 * to do the needed arch specific settings. 1849 */ 1850 void pci_set_master(struct pci_dev *dev) 1851 { 1852 __pci_set_master(dev, true); 1853 pcibios_set_master(dev); 1854 } 1855 1856 /** 1857 * pci_clear_master - disables bus-mastering for device dev 1858 * @dev: the PCI device to disable 1859 */ 1860 void pci_clear_master(struct pci_dev *dev) 1861 { 1862 __pci_set_master(dev, false); 1863 } 1864 1865 #ifdef PCI_DISABLE_MWI 1866 int pci_set_mwi(struct pci_dev *dev) 1867 { 1868 return 0; 1869 } 1870 1871 int pci_try_set_mwi(struct pci_dev *dev) 1872 { 1873 return 0; 1874 } 1875 1876 void pci_clear_mwi(struct pci_dev *dev) 1877 { 1878 } 1879 1880 #else 1881 1882 #ifndef PCI_CACHE_LINE_BYTES 1883 #define PCI_CACHE_LINE_BYTES L1_CACHE_BYTES 1884 #endif 1885 1886 /* This can be overridden by arch code. */ 1887 /* Don't forget this is measured in 32-bit words, not bytes */ 1888 u8 pci_cache_line_size = PCI_CACHE_LINE_BYTES / 4; 1889 1890 /** 1891 * pci_set_cacheline_size - ensure the CACHE_LINE_SIZE register is programmed 1892 * @dev: the PCI device for which MWI is to be enabled 1893 * 1894 * Helper function for pci_set_mwi. 1895 * Originally copied from drivers/net/acenic.c. 1896 * Copyright 1998-2001 by Jes Sorensen, <jes@trained-monkey.org>. 1897 * 1898 * RETURNS: An appropriate -ERRNO error value on error, or zero for success. 1899 */ 1900 static int 1901 pci_set_cacheline_size(struct pci_dev *dev) 1902 { 1903 u8 cacheline_size; 1904 1905 if (!pci_cache_line_size) 1906 return -EINVAL; /* The system doesn't support MWI. */ 1907 1908 /* Validate current setting: the PCI_CACHE_LINE_SIZE must be 1909 equal to or multiple of the right value. */ 1910 pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size); 1911 if (cacheline_size >= pci_cache_line_size && 1912 (cacheline_size % pci_cache_line_size) == 0) 1913 return 0; 1914 1915 /* Write the correct value. */ 1916 pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, pci_cache_line_size); 1917 /* Read it back. */ 1918 pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size); 1919 if (cacheline_size == pci_cache_line_size) 1920 return 0; 1921 1922 dev_printk(KERN_DEBUG, &dev->dev, "cache line size of %d is not " 1923 "supported\n", pci_cache_line_size << 2); 1924 1925 return -EINVAL; 1926 } 1927 1928 /** 1929 * pci_set_mwi - enables memory-write-invalidate PCI transaction 1930 * @dev: the PCI device for which MWI is enabled 1931 * 1932 * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND. 1933 * 1934 * RETURNS: An appropriate -ERRNO error value on error, or zero for success. 1935 */ 1936 int 1937 pci_set_mwi(struct pci_dev *dev) 1938 { 1939 int rc; 1940 u16 cmd; 1941 1942 rc = pci_set_cacheline_size(dev); 1943 if (rc) 1944 return rc; 1945 1946 pci_read_config_word(dev, PCI_COMMAND, &cmd); 1947 if (! (cmd & PCI_COMMAND_INVALIDATE)) { 1948 dev_dbg(&dev->dev, "enabling Mem-Wr-Inval\n"); 1949 cmd |= PCI_COMMAND_INVALIDATE; 1950 pci_write_config_word(dev, PCI_COMMAND, cmd); 1951 } 1952 1953 return 0; 1954 } 1955 1956 /** 1957 * pci_try_set_mwi - enables memory-write-invalidate PCI transaction 1958 * @dev: the PCI device for which MWI is enabled 1959 * 1960 * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND. 1961 * Callers are not required to check the return value. 1962 * 1963 * RETURNS: An appropriate -ERRNO error value on error, or zero for success. 1964 */ 1965 int pci_try_set_mwi(struct pci_dev *dev) 1966 { 1967 int rc = pci_set_mwi(dev); 1968 return rc; 1969 } 1970 1971 /** 1972 * pci_clear_mwi - disables Memory-Write-Invalidate for device dev 1973 * @dev: the PCI device to disable 1974 * 1975 * Disables PCI Memory-Write-Invalidate transaction on the device 1976 */ 1977 void 1978 pci_clear_mwi(struct pci_dev *dev) 1979 { 1980 u16 cmd; 1981 1982 pci_read_config_word(dev, PCI_COMMAND, &cmd); 1983 if (cmd & PCI_COMMAND_INVALIDATE) { 1984 cmd &= ~PCI_COMMAND_INVALIDATE; 1985 pci_write_config_word(dev, PCI_COMMAND, cmd); 1986 } 1987 } 1988 #endif /* ! PCI_DISABLE_MWI */ 1989 1990 /** 1991 * pci_intx - enables/disables PCI INTx for device dev 1992 * @pdev: the PCI device to operate on 1993 * @enable: boolean: whether to enable or disable PCI INTx 1994 * 1995 * Enables/disables PCI INTx for device dev 1996 */ 1997 void 1998 pci_intx(struct pci_dev *pdev, int enable) 1999 { 2000 u16 pci_command, new; 2001 2002 pci_read_config_word(pdev, PCI_COMMAND, &pci_command); 2003 2004 if (enable) { 2005 new = pci_command & ~PCI_COMMAND_INTX_DISABLE; 2006 } else { 2007 new = pci_command | PCI_COMMAND_INTX_DISABLE; 2008 } 2009 2010 if (new != pci_command) { 2011 struct pci_devres *dr; 2012 2013 pci_write_config_word(pdev, PCI_COMMAND, new); 2014 2015 dr = find_pci_dr(pdev); 2016 if (dr && !dr->restore_intx) { 2017 dr->restore_intx = 1; 2018 dr->orig_intx = !enable; 2019 } 2020 } 2021 } 2022 2023 /** 2024 * pci_msi_off - disables any msi or msix capabilities 2025 * @dev: the PCI device to operate on 2026 * 2027 * If you want to use msi see pci_enable_msi and friends. 2028 * This is a lower level primitive that allows us to disable 2029 * msi operation at the device level. 2030 */ 2031 void pci_msi_off(struct pci_dev *dev) 2032 { 2033 int pos; 2034 u16 control; 2035 2036 pos = pci_find_capability(dev, PCI_CAP_ID_MSI); 2037 if (pos) { 2038 pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control); 2039 control &= ~PCI_MSI_FLAGS_ENABLE; 2040 pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control); 2041 } 2042 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX); 2043 if (pos) { 2044 pci_read_config_word(dev, pos + PCI_MSIX_FLAGS, &control); 2045 control &= ~PCI_MSIX_FLAGS_ENABLE; 2046 pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control); 2047 } 2048 } 2049 2050 #ifndef HAVE_ARCH_PCI_SET_DMA_MASK 2051 /* 2052 * These can be overridden by arch-specific implementations 2053 */ 2054 int 2055 pci_set_dma_mask(struct pci_dev *dev, u64 mask) 2056 { 2057 if (!pci_dma_supported(dev, mask)) 2058 return -EIO; 2059 2060 dev->dma_mask = mask; 2061 2062 return 0; 2063 } 2064 2065 int 2066 pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask) 2067 { 2068 if (!pci_dma_supported(dev, mask)) 2069 return -EIO; 2070 2071 dev->dev.coherent_dma_mask = mask; 2072 2073 return 0; 2074 } 2075 #endif 2076 2077 #ifndef HAVE_ARCH_PCI_SET_DMA_MAX_SEGMENT_SIZE 2078 int pci_set_dma_max_seg_size(struct pci_dev *dev, unsigned int size) 2079 { 2080 return dma_set_max_seg_size(&dev->dev, size); 2081 } 2082 EXPORT_SYMBOL(pci_set_dma_max_seg_size); 2083 #endif 2084 2085 #ifndef HAVE_ARCH_PCI_SET_DMA_SEGMENT_BOUNDARY 2086 int pci_set_dma_seg_boundary(struct pci_dev *dev, unsigned long mask) 2087 { 2088 return dma_set_seg_boundary(&dev->dev, mask); 2089 } 2090 EXPORT_SYMBOL(pci_set_dma_seg_boundary); 2091 #endif 2092 2093 static int pcie_flr(struct pci_dev *dev, int probe) 2094 { 2095 int i; 2096 int pos; 2097 u32 cap; 2098 u16 status; 2099 2100 pos = pci_find_capability(dev, PCI_CAP_ID_EXP); 2101 if (!pos) 2102 return -ENOTTY; 2103 2104 pci_read_config_dword(dev, pos + PCI_EXP_DEVCAP, &cap); 2105 if (!(cap & PCI_EXP_DEVCAP_FLR)) 2106 return -ENOTTY; 2107 2108 if (probe) 2109 return 0; 2110 2111 /* Wait for Transaction Pending bit clean */ 2112 for (i = 0; i < 4; i++) { 2113 if (i) 2114 msleep((1 << (i - 1)) * 100); 2115 2116 pci_read_config_word(dev, pos + PCI_EXP_DEVSTA, &status); 2117 if (!(status & PCI_EXP_DEVSTA_TRPND)) 2118 goto clear; 2119 } 2120 2121 dev_err(&dev->dev, "transaction is not cleared; " 2122 "proceeding with reset anyway\n"); 2123 2124 clear: 2125 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, 2126 PCI_EXP_DEVCTL_BCR_FLR); 2127 msleep(100); 2128 2129 return 0; 2130 } 2131 2132 static int pci_af_flr(struct pci_dev *dev, int probe) 2133 { 2134 int i; 2135 int pos; 2136 u8 cap; 2137 u8 status; 2138 2139 pos = pci_find_capability(dev, PCI_CAP_ID_AF); 2140 if (!pos) 2141 return -ENOTTY; 2142 2143 pci_read_config_byte(dev, pos + PCI_AF_CAP, &cap); 2144 if (!(cap & PCI_AF_CAP_TP) || !(cap & PCI_AF_CAP_FLR)) 2145 return -ENOTTY; 2146 2147 if (probe) 2148 return 0; 2149 2150 /* Wait for Transaction Pending bit clean */ 2151 for (i = 0; i < 4; i++) { 2152 if (i) 2153 msleep((1 << (i - 1)) * 100); 2154 2155 pci_read_config_byte(dev, pos + PCI_AF_STATUS, &status); 2156 if (!(status & PCI_AF_STATUS_TP)) 2157 goto clear; 2158 } 2159 2160 dev_err(&dev->dev, "transaction is not cleared; " 2161 "proceeding with reset anyway\n"); 2162 2163 clear: 2164 pci_write_config_byte(dev, pos + PCI_AF_CTRL, PCI_AF_CTRL_FLR); 2165 msleep(100); 2166 2167 return 0; 2168 } 2169 2170 static int pci_pm_reset(struct pci_dev *dev, int probe) 2171 { 2172 u16 csr; 2173 2174 if (!dev->pm_cap) 2175 return -ENOTTY; 2176 2177 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &csr); 2178 if (csr & PCI_PM_CTRL_NO_SOFT_RESET) 2179 return -ENOTTY; 2180 2181 if (probe) 2182 return 0; 2183 2184 if (dev->current_state != PCI_D0) 2185 return -EINVAL; 2186 2187 csr &= ~PCI_PM_CTRL_STATE_MASK; 2188 csr |= PCI_D3hot; 2189 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr); 2190 msleep(pci_pm_d3_delay); 2191 2192 csr &= ~PCI_PM_CTRL_STATE_MASK; 2193 csr |= PCI_D0; 2194 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr); 2195 msleep(pci_pm_d3_delay); 2196 2197 return 0; 2198 } 2199 2200 static int pci_parent_bus_reset(struct pci_dev *dev, int probe) 2201 { 2202 u16 ctrl; 2203 struct pci_dev *pdev; 2204 2205 if (pci_is_root_bus(dev->bus) || dev->subordinate || !dev->bus->self) 2206 return -ENOTTY; 2207 2208 list_for_each_entry(pdev, &dev->bus->devices, bus_list) 2209 if (pdev != dev) 2210 return -ENOTTY; 2211 2212 if (probe) 2213 return 0; 2214 2215 pci_read_config_word(dev->bus->self, PCI_BRIDGE_CONTROL, &ctrl); 2216 ctrl |= PCI_BRIDGE_CTL_BUS_RESET; 2217 pci_write_config_word(dev->bus->self, PCI_BRIDGE_CONTROL, ctrl); 2218 msleep(100); 2219 2220 ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET; 2221 pci_write_config_word(dev->bus->self, PCI_BRIDGE_CONTROL, ctrl); 2222 msleep(100); 2223 2224 return 0; 2225 } 2226 2227 static int pci_dev_reset(struct pci_dev *dev, int probe) 2228 { 2229 int rc; 2230 2231 might_sleep(); 2232 2233 if (!probe) { 2234 pci_block_user_cfg_access(dev); 2235 /* block PM suspend, driver probe, etc. */ 2236 down(&dev->dev.sem); 2237 } 2238 2239 rc = pcie_flr(dev, probe); 2240 if (rc != -ENOTTY) 2241 goto done; 2242 2243 rc = pci_af_flr(dev, probe); 2244 if (rc != -ENOTTY) 2245 goto done; 2246 2247 rc = pci_pm_reset(dev, probe); 2248 if (rc != -ENOTTY) 2249 goto done; 2250 2251 rc = pci_parent_bus_reset(dev, probe); 2252 done: 2253 if (!probe) { 2254 up(&dev->dev.sem); 2255 pci_unblock_user_cfg_access(dev); 2256 } 2257 2258 return rc; 2259 } 2260 2261 /** 2262 * __pci_reset_function - reset a PCI device function 2263 * @dev: PCI device to reset 2264 * 2265 * Some devices allow an individual function to be reset without affecting 2266 * other functions in the same device. The PCI device must be responsive 2267 * to PCI config space in order to use this function. 2268 * 2269 * The device function is presumed to be unused when this function is called. 2270 * Resetting the device will make the contents of PCI configuration space 2271 * random, so any caller of this must be prepared to reinitialise the 2272 * device including MSI, bus mastering, BARs, decoding IO and memory spaces, 2273 * etc. 2274 * 2275 * Returns 0 if the device function was successfully reset or negative if the 2276 * device doesn't support resetting a single function. 2277 */ 2278 int __pci_reset_function(struct pci_dev *dev) 2279 { 2280 return pci_dev_reset(dev, 0); 2281 } 2282 EXPORT_SYMBOL_GPL(__pci_reset_function); 2283 2284 /** 2285 * pci_probe_reset_function - check whether the device can be safely reset 2286 * @dev: PCI device to reset 2287 * 2288 * Some devices allow an individual function to be reset without affecting 2289 * other functions in the same device. The PCI device must be responsive 2290 * to PCI config space in order to use this function. 2291 * 2292 * Returns 0 if the device function can be reset or negative if the 2293 * device doesn't support resetting a single function. 2294 */ 2295 int pci_probe_reset_function(struct pci_dev *dev) 2296 { 2297 return pci_dev_reset(dev, 1); 2298 } 2299 2300 /** 2301 * pci_reset_function - quiesce and reset a PCI device function 2302 * @dev: PCI device to reset 2303 * 2304 * Some devices allow an individual function to be reset without affecting 2305 * other functions in the same device. The PCI device must be responsive 2306 * to PCI config space in order to use this function. 2307 * 2308 * This function does not just reset the PCI portion of a device, but 2309 * clears all the state associated with the device. This function differs 2310 * from __pci_reset_function in that it saves and restores device state 2311 * over the reset. 2312 * 2313 * Returns 0 if the device function was successfully reset or negative if the 2314 * device doesn't support resetting a single function. 2315 */ 2316 int pci_reset_function(struct pci_dev *dev) 2317 { 2318 int rc; 2319 2320 rc = pci_dev_reset(dev, 1); 2321 if (rc) 2322 return rc; 2323 2324 pci_save_state(dev); 2325 2326 /* 2327 * both INTx and MSI are disabled after the Interrupt Disable bit 2328 * is set and the Bus Master bit is cleared. 2329 */ 2330 pci_write_config_word(dev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE); 2331 2332 rc = pci_dev_reset(dev, 0); 2333 2334 pci_restore_state(dev); 2335 2336 return rc; 2337 } 2338 EXPORT_SYMBOL_GPL(pci_reset_function); 2339 2340 /** 2341 * pcix_get_max_mmrbc - get PCI-X maximum designed memory read byte count 2342 * @dev: PCI device to query 2343 * 2344 * Returns mmrbc: maximum designed memory read count in bytes 2345 * or appropriate error value. 2346 */ 2347 int pcix_get_max_mmrbc(struct pci_dev *dev) 2348 { 2349 int err, cap; 2350 u32 stat; 2351 2352 cap = pci_find_capability(dev, PCI_CAP_ID_PCIX); 2353 if (!cap) 2354 return -EINVAL; 2355 2356 err = pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat); 2357 if (err) 2358 return -EINVAL; 2359 2360 return (stat & PCI_X_STATUS_MAX_READ) >> 12; 2361 } 2362 EXPORT_SYMBOL(pcix_get_max_mmrbc); 2363 2364 /** 2365 * pcix_get_mmrbc - get PCI-X maximum memory read byte count 2366 * @dev: PCI device to query 2367 * 2368 * Returns mmrbc: maximum memory read count in bytes 2369 * or appropriate error value. 2370 */ 2371 int pcix_get_mmrbc(struct pci_dev *dev) 2372 { 2373 int ret, cap; 2374 u32 cmd; 2375 2376 cap = pci_find_capability(dev, PCI_CAP_ID_PCIX); 2377 if (!cap) 2378 return -EINVAL; 2379 2380 ret = pci_read_config_dword(dev, cap + PCI_X_CMD, &cmd); 2381 if (!ret) 2382 ret = 512 << ((cmd & PCI_X_CMD_MAX_READ) >> 2); 2383 2384 return ret; 2385 } 2386 EXPORT_SYMBOL(pcix_get_mmrbc); 2387 2388 /** 2389 * pcix_set_mmrbc - set PCI-X maximum memory read byte count 2390 * @dev: PCI device to query 2391 * @mmrbc: maximum memory read count in bytes 2392 * valid values are 512, 1024, 2048, 4096 2393 * 2394 * If possible sets maximum memory read byte count, some bridges have erratas 2395 * that prevent this. 2396 */ 2397 int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc) 2398 { 2399 int cap, err = -EINVAL; 2400 u32 stat, cmd, v, o; 2401 2402 if (mmrbc < 512 || mmrbc > 4096 || !is_power_of_2(mmrbc)) 2403 goto out; 2404 2405 v = ffs(mmrbc) - 10; 2406 2407 cap = pci_find_capability(dev, PCI_CAP_ID_PCIX); 2408 if (!cap) 2409 goto out; 2410 2411 err = pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat); 2412 if (err) 2413 goto out; 2414 2415 if (v > (stat & PCI_X_STATUS_MAX_READ) >> 21) 2416 return -E2BIG; 2417 2418 err = pci_read_config_dword(dev, cap + PCI_X_CMD, &cmd); 2419 if (err) 2420 goto out; 2421 2422 o = (cmd & PCI_X_CMD_MAX_READ) >> 2; 2423 if (o != v) { 2424 if (v > o && dev->bus && 2425 (dev->bus->bus_flags & PCI_BUS_FLAGS_NO_MMRBC)) 2426 return -EIO; 2427 2428 cmd &= ~PCI_X_CMD_MAX_READ; 2429 cmd |= v << 2; 2430 err = pci_write_config_dword(dev, cap + PCI_X_CMD, cmd); 2431 } 2432 out: 2433 return err; 2434 } 2435 EXPORT_SYMBOL(pcix_set_mmrbc); 2436 2437 /** 2438 * pcie_get_readrq - get PCI Express read request size 2439 * @dev: PCI device to query 2440 * 2441 * Returns maximum memory read request in bytes 2442 * or appropriate error value. 2443 */ 2444 int pcie_get_readrq(struct pci_dev *dev) 2445 { 2446 int ret, cap; 2447 u16 ctl; 2448 2449 cap = pci_find_capability(dev, PCI_CAP_ID_EXP); 2450 if (!cap) 2451 return -EINVAL; 2452 2453 ret = pci_read_config_word(dev, cap + PCI_EXP_DEVCTL, &ctl); 2454 if (!ret) 2455 ret = 128 << ((ctl & PCI_EXP_DEVCTL_READRQ) >> 12); 2456 2457 return ret; 2458 } 2459 EXPORT_SYMBOL(pcie_get_readrq); 2460 2461 /** 2462 * pcie_set_readrq - set PCI Express maximum memory read request 2463 * @dev: PCI device to query 2464 * @rq: maximum memory read count in bytes 2465 * valid values are 128, 256, 512, 1024, 2048, 4096 2466 * 2467 * If possible sets maximum read byte count 2468 */ 2469 int pcie_set_readrq(struct pci_dev *dev, int rq) 2470 { 2471 int cap, err = -EINVAL; 2472 u16 ctl, v; 2473 2474 if (rq < 128 || rq > 4096 || !is_power_of_2(rq)) 2475 goto out; 2476 2477 v = (ffs(rq) - 8) << 12; 2478 2479 cap = pci_find_capability(dev, PCI_CAP_ID_EXP); 2480 if (!cap) 2481 goto out; 2482 2483 err = pci_read_config_word(dev, cap + PCI_EXP_DEVCTL, &ctl); 2484 if (err) 2485 goto out; 2486 2487 if ((ctl & PCI_EXP_DEVCTL_READRQ) != v) { 2488 ctl &= ~PCI_EXP_DEVCTL_READRQ; 2489 ctl |= v; 2490 err = pci_write_config_dword(dev, cap + PCI_EXP_DEVCTL, ctl); 2491 } 2492 2493 out: 2494 return err; 2495 } 2496 EXPORT_SYMBOL(pcie_set_readrq); 2497 2498 /** 2499 * pci_select_bars - Make BAR mask from the type of resource 2500 * @dev: the PCI device for which BAR mask is made 2501 * @flags: resource type mask to be selected 2502 * 2503 * This helper routine makes bar mask from the type of resource. 2504 */ 2505 int pci_select_bars(struct pci_dev *dev, unsigned long flags) 2506 { 2507 int i, bars = 0; 2508 for (i = 0; i < PCI_NUM_RESOURCES; i++) 2509 if (pci_resource_flags(dev, i) & flags) 2510 bars |= (1 << i); 2511 return bars; 2512 } 2513 2514 /** 2515 * pci_resource_bar - get position of the BAR associated with a resource 2516 * @dev: the PCI device 2517 * @resno: the resource number 2518 * @type: the BAR type to be filled in 2519 * 2520 * Returns BAR position in config space, or 0 if the BAR is invalid. 2521 */ 2522 int pci_resource_bar(struct pci_dev *dev, int resno, enum pci_bar_type *type) 2523 { 2524 int reg; 2525 2526 if (resno < PCI_ROM_RESOURCE) { 2527 *type = pci_bar_unknown; 2528 return PCI_BASE_ADDRESS_0 + 4 * resno; 2529 } else if (resno == PCI_ROM_RESOURCE) { 2530 *type = pci_bar_mem32; 2531 return dev->rom_base_reg; 2532 } else if (resno < PCI_BRIDGE_RESOURCES) { 2533 /* device specific resource */ 2534 reg = pci_iov_resource_bar(dev, resno, type); 2535 if (reg) 2536 return reg; 2537 } 2538 2539 dev_err(&dev->dev, "BAR: invalid resource #%d\n", resno); 2540 return 0; 2541 } 2542 2543 /** 2544 * pci_set_vga_state - set VGA decode state on device and parents if requested 2545 * @dev the PCI device 2546 * @decode - true = enable decoding, false = disable decoding 2547 * @command_bits PCI_COMMAND_IO and/or PCI_COMMAND_MEMORY 2548 * @change_bridge - traverse ancestors and change bridges 2549 */ 2550 int pci_set_vga_state(struct pci_dev *dev, bool decode, 2551 unsigned int command_bits, bool change_bridge) 2552 { 2553 struct pci_bus *bus; 2554 struct pci_dev *bridge; 2555 u16 cmd; 2556 2557 WARN_ON(command_bits & ~(PCI_COMMAND_IO|PCI_COMMAND_MEMORY)); 2558 2559 pci_read_config_word(dev, PCI_COMMAND, &cmd); 2560 if (decode == true) 2561 cmd |= command_bits; 2562 else 2563 cmd &= ~command_bits; 2564 pci_write_config_word(dev, PCI_COMMAND, cmd); 2565 2566 if (change_bridge == false) 2567 return 0; 2568 2569 bus = dev->bus; 2570 while (bus) { 2571 bridge = bus->self; 2572 if (bridge) { 2573 pci_read_config_word(bridge, PCI_BRIDGE_CONTROL, 2574 &cmd); 2575 if (decode == true) 2576 cmd |= PCI_BRIDGE_CTL_VGA; 2577 else 2578 cmd &= ~PCI_BRIDGE_CTL_VGA; 2579 pci_write_config_word(bridge, PCI_BRIDGE_CONTROL, 2580 cmd); 2581 } 2582 bus = bus->parent; 2583 } 2584 return 0; 2585 } 2586 2587 #define RESOURCE_ALIGNMENT_PARAM_SIZE COMMAND_LINE_SIZE 2588 static char resource_alignment_param[RESOURCE_ALIGNMENT_PARAM_SIZE] = {0}; 2589 spinlock_t resource_alignment_lock = SPIN_LOCK_UNLOCKED; 2590 2591 /** 2592 * pci_specified_resource_alignment - get resource alignment specified by user. 2593 * @dev: the PCI device to get 2594 * 2595 * RETURNS: Resource alignment if it is specified. 2596 * Zero if it is not specified. 2597 */ 2598 resource_size_t pci_specified_resource_alignment(struct pci_dev *dev) 2599 { 2600 int seg, bus, slot, func, align_order, count; 2601 resource_size_t align = 0; 2602 char *p; 2603 2604 spin_lock(&resource_alignment_lock); 2605 p = resource_alignment_param; 2606 while (*p) { 2607 count = 0; 2608 if (sscanf(p, "%d%n", &align_order, &count) == 1 && 2609 p[count] == '@') { 2610 p += count + 1; 2611 } else { 2612 align_order = -1; 2613 } 2614 if (sscanf(p, "%x:%x:%x.%x%n", 2615 &seg, &bus, &slot, &func, &count) != 4) { 2616 seg = 0; 2617 if (sscanf(p, "%x:%x.%x%n", 2618 &bus, &slot, &func, &count) != 3) { 2619 /* Invalid format */ 2620 printk(KERN_ERR "PCI: Can't parse resource_alignment parameter: %s\n", 2621 p); 2622 break; 2623 } 2624 } 2625 p += count; 2626 if (seg == pci_domain_nr(dev->bus) && 2627 bus == dev->bus->number && 2628 slot == PCI_SLOT(dev->devfn) && 2629 func == PCI_FUNC(dev->devfn)) { 2630 if (align_order == -1) { 2631 align = PAGE_SIZE; 2632 } else { 2633 align = 1 << align_order; 2634 } 2635 /* Found */ 2636 break; 2637 } 2638 if (*p != ';' && *p != ',') { 2639 /* End of param or invalid format */ 2640 break; 2641 } 2642 p++; 2643 } 2644 spin_unlock(&resource_alignment_lock); 2645 return align; 2646 } 2647 2648 /** 2649 * pci_is_reassigndev - check if specified PCI is target device to reassign 2650 * @dev: the PCI device to check 2651 * 2652 * RETURNS: non-zero for PCI device is a target device to reassign, 2653 * or zero is not. 2654 */ 2655 int pci_is_reassigndev(struct pci_dev *dev) 2656 { 2657 return (pci_specified_resource_alignment(dev) != 0); 2658 } 2659 2660 ssize_t pci_set_resource_alignment_param(const char *buf, size_t count) 2661 { 2662 if (count > RESOURCE_ALIGNMENT_PARAM_SIZE - 1) 2663 count = RESOURCE_ALIGNMENT_PARAM_SIZE - 1; 2664 spin_lock(&resource_alignment_lock); 2665 strncpy(resource_alignment_param, buf, count); 2666 resource_alignment_param[count] = '\0'; 2667 spin_unlock(&resource_alignment_lock); 2668 return count; 2669 } 2670 2671 ssize_t pci_get_resource_alignment_param(char *buf, size_t size) 2672 { 2673 size_t count; 2674 spin_lock(&resource_alignment_lock); 2675 count = snprintf(buf, size, "%s", resource_alignment_param); 2676 spin_unlock(&resource_alignment_lock); 2677 return count; 2678 } 2679 2680 static ssize_t pci_resource_alignment_show(struct bus_type *bus, char *buf) 2681 { 2682 return pci_get_resource_alignment_param(buf, PAGE_SIZE); 2683 } 2684 2685 static ssize_t pci_resource_alignment_store(struct bus_type *bus, 2686 const char *buf, size_t count) 2687 { 2688 return pci_set_resource_alignment_param(buf, count); 2689 } 2690 2691 BUS_ATTR(resource_alignment, 0644, pci_resource_alignment_show, 2692 pci_resource_alignment_store); 2693 2694 static int __init pci_resource_alignment_sysfs_init(void) 2695 { 2696 return bus_create_file(&pci_bus_type, 2697 &bus_attr_resource_alignment); 2698 } 2699 2700 late_initcall(pci_resource_alignment_sysfs_init); 2701 2702 static void __devinit pci_no_domains(void) 2703 { 2704 #ifdef CONFIG_PCI_DOMAINS 2705 pci_domains_supported = 0; 2706 #endif 2707 } 2708 2709 /** 2710 * pci_ext_cfg_enabled - can we access extended PCI config space? 2711 * @dev: The PCI device of the root bridge. 2712 * 2713 * Returns 1 if we can access PCI extended config space (offsets 2714 * greater than 0xff). This is the default implementation. Architecture 2715 * implementations can override this. 2716 */ 2717 int __attribute__ ((weak)) pci_ext_cfg_avail(struct pci_dev *dev) 2718 { 2719 return 1; 2720 } 2721 2722 static int __devinit pci_init(void) 2723 { 2724 struct pci_dev *dev = NULL; 2725 2726 while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) { 2727 pci_fixup_device(pci_fixup_final, dev); 2728 } 2729 2730 return 0; 2731 } 2732 2733 static int __init pci_setup(char *str) 2734 { 2735 while (str) { 2736 char *k = strchr(str, ','); 2737 if (k) 2738 *k++ = 0; 2739 if (*str && (str = pcibios_setup(str)) && *str) { 2740 if (!strcmp(str, "nomsi")) { 2741 pci_no_msi(); 2742 } else if (!strcmp(str, "noaer")) { 2743 pci_no_aer(); 2744 } else if (!strcmp(str, "nodomains")) { 2745 pci_no_domains(); 2746 } else if (!strncmp(str, "cbiosize=", 9)) { 2747 pci_cardbus_io_size = memparse(str + 9, &str); 2748 } else if (!strncmp(str, "cbmemsize=", 10)) { 2749 pci_cardbus_mem_size = memparse(str + 10, &str); 2750 } else if (!strncmp(str, "resource_alignment=", 19)) { 2751 pci_set_resource_alignment_param(str + 19, 2752 strlen(str + 19)); 2753 } else if (!strncmp(str, "ecrc=", 5)) { 2754 pcie_ecrc_get_policy(str + 5); 2755 } else if (!strncmp(str, "hpiosize=", 9)) { 2756 pci_hotplug_io_size = memparse(str + 9, &str); 2757 } else if (!strncmp(str, "hpmemsize=", 10)) { 2758 pci_hotplug_mem_size = memparse(str + 10, &str); 2759 } else { 2760 printk(KERN_ERR "PCI: Unknown option `%s'\n", 2761 str); 2762 } 2763 } 2764 str = k; 2765 } 2766 return 0; 2767 } 2768 early_param("pci", pci_setup); 2769 2770 device_initcall(pci_init); 2771 2772 EXPORT_SYMBOL(pci_reenable_device); 2773 EXPORT_SYMBOL(pci_enable_device_io); 2774 EXPORT_SYMBOL(pci_enable_device_mem); 2775 EXPORT_SYMBOL(pci_enable_device); 2776 EXPORT_SYMBOL(pcim_enable_device); 2777 EXPORT_SYMBOL(pcim_pin_device); 2778 EXPORT_SYMBOL(pci_disable_device); 2779 EXPORT_SYMBOL(pci_find_capability); 2780 EXPORT_SYMBOL(pci_bus_find_capability); 2781 EXPORT_SYMBOL(pci_release_regions); 2782 EXPORT_SYMBOL(pci_request_regions); 2783 EXPORT_SYMBOL(pci_request_regions_exclusive); 2784 EXPORT_SYMBOL(pci_release_region); 2785 EXPORT_SYMBOL(pci_request_region); 2786 EXPORT_SYMBOL(pci_request_region_exclusive); 2787 EXPORT_SYMBOL(pci_release_selected_regions); 2788 EXPORT_SYMBOL(pci_request_selected_regions); 2789 EXPORT_SYMBOL(pci_request_selected_regions_exclusive); 2790 EXPORT_SYMBOL(pci_set_master); 2791 EXPORT_SYMBOL(pci_clear_master); 2792 EXPORT_SYMBOL(pci_set_mwi); 2793 EXPORT_SYMBOL(pci_try_set_mwi); 2794 EXPORT_SYMBOL(pci_clear_mwi); 2795 EXPORT_SYMBOL_GPL(pci_intx); 2796 EXPORT_SYMBOL(pci_set_dma_mask); 2797 EXPORT_SYMBOL(pci_set_consistent_dma_mask); 2798 EXPORT_SYMBOL(pci_assign_resource); 2799 EXPORT_SYMBOL(pci_find_parent_resource); 2800 EXPORT_SYMBOL(pci_select_bars); 2801 2802 EXPORT_SYMBOL(pci_set_power_state); 2803 EXPORT_SYMBOL(pci_save_state); 2804 EXPORT_SYMBOL(pci_restore_state); 2805 EXPORT_SYMBOL(pci_pme_capable); 2806 EXPORT_SYMBOL(pci_pme_active); 2807 EXPORT_SYMBOL(pci_enable_wake); 2808 EXPORT_SYMBOL(pci_wake_from_d3); 2809 EXPORT_SYMBOL(pci_target_state); 2810 EXPORT_SYMBOL(pci_prepare_to_sleep); 2811 EXPORT_SYMBOL(pci_back_from_sleep); 2812 EXPORT_SYMBOL_GPL(pci_set_pcie_reset_state); 2813 2814