1 /* 2 * PCI Bus Services, see include/linux/pci.h for further explanation. 3 * 4 * Copyright 1993 -- 1997 Drew Eckhardt, Frederic Potter, 5 * David Mosberger-Tang 6 * 7 * Copyright 1997 -- 2000 Martin Mares <mj@ucw.cz> 8 */ 9 10 #include <linux/kernel.h> 11 #include <linux/delay.h> 12 #include <linux/init.h> 13 #include <linux/pci.h> 14 #include <linux/pm.h> 15 #include <linux/module.h> 16 #include <linux/spinlock.h> 17 #include <linux/string.h> 18 #include <linux/log2.h> 19 #include <linux/pci-aspm.h> 20 #include <linux/pm_wakeup.h> 21 #include <linux/interrupt.h> 22 #include <asm/dma.h> /* isa_dma_bridge_buggy */ 23 #include <linux/device.h> 24 #include <asm/setup.h> 25 #include "pci.h" 26 27 unsigned int pci_pm_d3_delay = PCI_PM_D3_WAIT; 28 29 #ifdef CONFIG_PCI_DOMAINS 30 int pci_domains_supported = 1; 31 #endif 32 33 #define DEFAULT_CARDBUS_IO_SIZE (256) 34 #define DEFAULT_CARDBUS_MEM_SIZE (64*1024*1024) 35 /* pci=cbmemsize=nnM,cbiosize=nn can override this */ 36 unsigned long pci_cardbus_io_size = DEFAULT_CARDBUS_IO_SIZE; 37 unsigned long pci_cardbus_mem_size = DEFAULT_CARDBUS_MEM_SIZE; 38 39 /** 40 * pci_bus_max_busnr - returns maximum PCI bus number of given bus' children 41 * @bus: pointer to PCI bus structure to search 42 * 43 * Given a PCI bus, returns the highest PCI bus number present in the set 44 * including the given PCI bus and its list of child PCI buses. 45 */ 46 unsigned char pci_bus_max_busnr(struct pci_bus* bus) 47 { 48 struct list_head *tmp; 49 unsigned char max, n; 50 51 max = bus->subordinate; 52 list_for_each(tmp, &bus->children) { 53 n = pci_bus_max_busnr(pci_bus_b(tmp)); 54 if(n > max) 55 max = n; 56 } 57 return max; 58 } 59 EXPORT_SYMBOL_GPL(pci_bus_max_busnr); 60 61 #ifdef CONFIG_HAS_IOMEM 62 void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar) 63 { 64 /* 65 * Make sure the BAR is actually a memory resource, not an IO resource 66 */ 67 if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM)) { 68 WARN_ON(1); 69 return NULL; 70 } 71 return ioremap_nocache(pci_resource_start(pdev, bar), 72 pci_resource_len(pdev, bar)); 73 } 74 EXPORT_SYMBOL_GPL(pci_ioremap_bar); 75 #endif 76 77 #if 0 78 /** 79 * pci_max_busnr - returns maximum PCI bus number 80 * 81 * Returns the highest PCI bus number present in the system global list of 82 * PCI buses. 83 */ 84 unsigned char __devinit 85 pci_max_busnr(void) 86 { 87 struct pci_bus *bus = NULL; 88 unsigned char max, n; 89 90 max = 0; 91 while ((bus = pci_find_next_bus(bus)) != NULL) { 92 n = pci_bus_max_busnr(bus); 93 if(n > max) 94 max = n; 95 } 96 return max; 97 } 98 99 #endif /* 0 */ 100 101 #define PCI_FIND_CAP_TTL 48 102 103 static int __pci_find_next_cap_ttl(struct pci_bus *bus, unsigned int devfn, 104 u8 pos, int cap, int *ttl) 105 { 106 u8 id; 107 108 while ((*ttl)--) { 109 pci_bus_read_config_byte(bus, devfn, pos, &pos); 110 if (pos < 0x40) 111 break; 112 pos &= ~3; 113 pci_bus_read_config_byte(bus, devfn, pos + PCI_CAP_LIST_ID, 114 &id); 115 if (id == 0xff) 116 break; 117 if (id == cap) 118 return pos; 119 pos += PCI_CAP_LIST_NEXT; 120 } 121 return 0; 122 } 123 124 static int __pci_find_next_cap(struct pci_bus *bus, unsigned int devfn, 125 u8 pos, int cap) 126 { 127 int ttl = PCI_FIND_CAP_TTL; 128 129 return __pci_find_next_cap_ttl(bus, devfn, pos, cap, &ttl); 130 } 131 132 int pci_find_next_capability(struct pci_dev *dev, u8 pos, int cap) 133 { 134 return __pci_find_next_cap(dev->bus, dev->devfn, 135 pos + PCI_CAP_LIST_NEXT, cap); 136 } 137 EXPORT_SYMBOL_GPL(pci_find_next_capability); 138 139 static int __pci_bus_find_cap_start(struct pci_bus *bus, 140 unsigned int devfn, u8 hdr_type) 141 { 142 u16 status; 143 144 pci_bus_read_config_word(bus, devfn, PCI_STATUS, &status); 145 if (!(status & PCI_STATUS_CAP_LIST)) 146 return 0; 147 148 switch (hdr_type) { 149 case PCI_HEADER_TYPE_NORMAL: 150 case PCI_HEADER_TYPE_BRIDGE: 151 return PCI_CAPABILITY_LIST; 152 case PCI_HEADER_TYPE_CARDBUS: 153 return PCI_CB_CAPABILITY_LIST; 154 default: 155 return 0; 156 } 157 158 return 0; 159 } 160 161 /** 162 * pci_find_capability - query for devices' capabilities 163 * @dev: PCI device to query 164 * @cap: capability code 165 * 166 * Tell if a device supports a given PCI capability. 167 * Returns the address of the requested capability structure within the 168 * device's PCI configuration space or 0 in case the device does not 169 * support it. Possible values for @cap: 170 * 171 * %PCI_CAP_ID_PM Power Management 172 * %PCI_CAP_ID_AGP Accelerated Graphics Port 173 * %PCI_CAP_ID_VPD Vital Product Data 174 * %PCI_CAP_ID_SLOTID Slot Identification 175 * %PCI_CAP_ID_MSI Message Signalled Interrupts 176 * %PCI_CAP_ID_CHSWP CompactPCI HotSwap 177 * %PCI_CAP_ID_PCIX PCI-X 178 * %PCI_CAP_ID_EXP PCI Express 179 */ 180 int pci_find_capability(struct pci_dev *dev, int cap) 181 { 182 int pos; 183 184 pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type); 185 if (pos) 186 pos = __pci_find_next_cap(dev->bus, dev->devfn, pos, cap); 187 188 return pos; 189 } 190 191 /** 192 * pci_bus_find_capability - query for devices' capabilities 193 * @bus: the PCI bus to query 194 * @devfn: PCI device to query 195 * @cap: capability code 196 * 197 * Like pci_find_capability() but works for pci devices that do not have a 198 * pci_dev structure set up yet. 199 * 200 * Returns the address of the requested capability structure within the 201 * device's PCI configuration space or 0 in case the device does not 202 * support it. 203 */ 204 int pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap) 205 { 206 int pos; 207 u8 hdr_type; 208 209 pci_bus_read_config_byte(bus, devfn, PCI_HEADER_TYPE, &hdr_type); 210 211 pos = __pci_bus_find_cap_start(bus, devfn, hdr_type & 0x7f); 212 if (pos) 213 pos = __pci_find_next_cap(bus, devfn, pos, cap); 214 215 return pos; 216 } 217 218 /** 219 * pci_find_ext_capability - Find an extended capability 220 * @dev: PCI device to query 221 * @cap: capability code 222 * 223 * Returns the address of the requested extended capability structure 224 * within the device's PCI configuration space or 0 if the device does 225 * not support it. Possible values for @cap: 226 * 227 * %PCI_EXT_CAP_ID_ERR Advanced Error Reporting 228 * %PCI_EXT_CAP_ID_VC Virtual Channel 229 * %PCI_EXT_CAP_ID_DSN Device Serial Number 230 * %PCI_EXT_CAP_ID_PWR Power Budgeting 231 */ 232 int pci_find_ext_capability(struct pci_dev *dev, int cap) 233 { 234 u32 header; 235 int ttl; 236 int pos = PCI_CFG_SPACE_SIZE; 237 238 /* minimum 8 bytes per capability */ 239 ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8; 240 241 if (dev->cfg_size <= PCI_CFG_SPACE_SIZE) 242 return 0; 243 244 if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL) 245 return 0; 246 247 /* 248 * If we have no capabilities, this is indicated by cap ID, 249 * cap version and next pointer all being 0. 250 */ 251 if (header == 0) 252 return 0; 253 254 while (ttl-- > 0) { 255 if (PCI_EXT_CAP_ID(header) == cap) 256 return pos; 257 258 pos = PCI_EXT_CAP_NEXT(header); 259 if (pos < PCI_CFG_SPACE_SIZE) 260 break; 261 262 if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL) 263 break; 264 } 265 266 return 0; 267 } 268 EXPORT_SYMBOL_GPL(pci_find_ext_capability); 269 270 static int __pci_find_next_ht_cap(struct pci_dev *dev, int pos, int ht_cap) 271 { 272 int rc, ttl = PCI_FIND_CAP_TTL; 273 u8 cap, mask; 274 275 if (ht_cap == HT_CAPTYPE_SLAVE || ht_cap == HT_CAPTYPE_HOST) 276 mask = HT_3BIT_CAP_MASK; 277 else 278 mask = HT_5BIT_CAP_MASK; 279 280 pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn, pos, 281 PCI_CAP_ID_HT, &ttl); 282 while (pos) { 283 rc = pci_read_config_byte(dev, pos + 3, &cap); 284 if (rc != PCIBIOS_SUCCESSFUL) 285 return 0; 286 287 if ((cap & mask) == ht_cap) 288 return pos; 289 290 pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn, 291 pos + PCI_CAP_LIST_NEXT, 292 PCI_CAP_ID_HT, &ttl); 293 } 294 295 return 0; 296 } 297 /** 298 * pci_find_next_ht_capability - query a device's Hypertransport capabilities 299 * @dev: PCI device to query 300 * @pos: Position from which to continue searching 301 * @ht_cap: Hypertransport capability code 302 * 303 * To be used in conjunction with pci_find_ht_capability() to search for 304 * all capabilities matching @ht_cap. @pos should always be a value returned 305 * from pci_find_ht_capability(). 306 * 307 * NB. To be 100% safe against broken PCI devices, the caller should take 308 * steps to avoid an infinite loop. 309 */ 310 int pci_find_next_ht_capability(struct pci_dev *dev, int pos, int ht_cap) 311 { 312 return __pci_find_next_ht_cap(dev, pos + PCI_CAP_LIST_NEXT, ht_cap); 313 } 314 EXPORT_SYMBOL_GPL(pci_find_next_ht_capability); 315 316 /** 317 * pci_find_ht_capability - query a device's Hypertransport capabilities 318 * @dev: PCI device to query 319 * @ht_cap: Hypertransport capability code 320 * 321 * Tell if a device supports a given Hypertransport capability. 322 * Returns an address within the device's PCI configuration space 323 * or 0 in case the device does not support the request capability. 324 * The address points to the PCI capability, of type PCI_CAP_ID_HT, 325 * which has a Hypertransport capability matching @ht_cap. 326 */ 327 int pci_find_ht_capability(struct pci_dev *dev, int ht_cap) 328 { 329 int pos; 330 331 pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type); 332 if (pos) 333 pos = __pci_find_next_ht_cap(dev, pos, ht_cap); 334 335 return pos; 336 } 337 EXPORT_SYMBOL_GPL(pci_find_ht_capability); 338 339 /** 340 * pci_find_parent_resource - return resource region of parent bus of given region 341 * @dev: PCI device structure contains resources to be searched 342 * @res: child resource record for which parent is sought 343 * 344 * For given resource region of given device, return the resource 345 * region of parent bus the given region is contained in or where 346 * it should be allocated from. 347 */ 348 struct resource * 349 pci_find_parent_resource(const struct pci_dev *dev, struct resource *res) 350 { 351 const struct pci_bus *bus = dev->bus; 352 int i; 353 struct resource *best = NULL; 354 355 for(i = 0; i < PCI_BUS_NUM_RESOURCES; i++) { 356 struct resource *r = bus->resource[i]; 357 if (!r) 358 continue; 359 if (res->start && !(res->start >= r->start && res->end <= r->end)) 360 continue; /* Not contained */ 361 if ((res->flags ^ r->flags) & (IORESOURCE_IO | IORESOURCE_MEM)) 362 continue; /* Wrong type */ 363 if (!((res->flags ^ r->flags) & IORESOURCE_PREFETCH)) 364 return r; /* Exact match */ 365 if ((res->flags & IORESOURCE_PREFETCH) && !(r->flags & IORESOURCE_PREFETCH)) 366 best = r; /* Approximating prefetchable by non-prefetchable */ 367 } 368 return best; 369 } 370 371 /** 372 * pci_restore_bars - restore a devices BAR values (e.g. after wake-up) 373 * @dev: PCI device to have its BARs restored 374 * 375 * Restore the BAR values for a given device, so as to make it 376 * accessible by its driver. 377 */ 378 static void 379 pci_restore_bars(struct pci_dev *dev) 380 { 381 int i; 382 383 for (i = 0; i < PCI_BRIDGE_RESOURCES; i++) 384 pci_update_resource(dev, i); 385 } 386 387 static struct pci_platform_pm_ops *pci_platform_pm; 388 389 int pci_set_platform_pm(struct pci_platform_pm_ops *ops) 390 { 391 if (!ops->is_manageable || !ops->set_state || !ops->choose_state 392 || !ops->sleep_wake || !ops->can_wakeup) 393 return -EINVAL; 394 pci_platform_pm = ops; 395 return 0; 396 } 397 398 static inline bool platform_pci_power_manageable(struct pci_dev *dev) 399 { 400 return pci_platform_pm ? pci_platform_pm->is_manageable(dev) : false; 401 } 402 403 static inline int platform_pci_set_power_state(struct pci_dev *dev, 404 pci_power_t t) 405 { 406 return pci_platform_pm ? pci_platform_pm->set_state(dev, t) : -ENOSYS; 407 } 408 409 static inline pci_power_t platform_pci_choose_state(struct pci_dev *dev) 410 { 411 return pci_platform_pm ? 412 pci_platform_pm->choose_state(dev) : PCI_POWER_ERROR; 413 } 414 415 static inline bool platform_pci_can_wakeup(struct pci_dev *dev) 416 { 417 return pci_platform_pm ? pci_platform_pm->can_wakeup(dev) : false; 418 } 419 420 static inline int platform_pci_sleep_wake(struct pci_dev *dev, bool enable) 421 { 422 return pci_platform_pm ? 423 pci_platform_pm->sleep_wake(dev, enable) : -ENODEV; 424 } 425 426 /** 427 * pci_raw_set_power_state - Use PCI PM registers to set the power state of 428 * given PCI device 429 * @dev: PCI device to handle. 430 * @state: PCI power state (D0, D1, D2, D3hot) to put the device into. 431 * 432 * RETURN VALUE: 433 * -EINVAL if the requested state is invalid. 434 * -EIO if device does not support PCI PM or its PM capabilities register has a 435 * wrong version, or device doesn't support the requested state. 436 * 0 if device already is in the requested state. 437 * 0 if device's power state has been successfully changed. 438 */ 439 static int pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state) 440 { 441 u16 pmcsr; 442 bool need_restore = false; 443 444 /* Check if we're already there */ 445 if (dev->current_state == state) 446 return 0; 447 448 if (!dev->pm_cap) 449 return -EIO; 450 451 if (state < PCI_D0 || state > PCI_D3hot) 452 return -EINVAL; 453 454 /* Validate current state: 455 * Can enter D0 from any state, but if we can only go deeper 456 * to sleep if we're already in a low power state 457 */ 458 if (state != PCI_D0 && dev->current_state <= PCI_D3cold 459 && dev->current_state > state) { 460 dev_err(&dev->dev, "invalid power transition " 461 "(from state %d to %d)\n", dev->current_state, state); 462 return -EINVAL; 463 } 464 465 /* check if this device supports the desired state */ 466 if ((state == PCI_D1 && !dev->d1_support) 467 || (state == PCI_D2 && !dev->d2_support)) 468 return -EIO; 469 470 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr); 471 472 /* If we're (effectively) in D3, force entire word to 0. 473 * This doesn't affect PME_Status, disables PME_En, and 474 * sets PowerState to 0. 475 */ 476 switch (dev->current_state) { 477 case PCI_D0: 478 case PCI_D1: 479 case PCI_D2: 480 pmcsr &= ~PCI_PM_CTRL_STATE_MASK; 481 pmcsr |= state; 482 break; 483 case PCI_UNKNOWN: /* Boot-up */ 484 if ((pmcsr & PCI_PM_CTRL_STATE_MASK) == PCI_D3hot 485 && !(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET)) 486 need_restore = true; 487 /* Fall-through: force to D0 */ 488 default: 489 pmcsr = 0; 490 break; 491 } 492 493 /* enter specified state */ 494 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr); 495 496 /* Mandatory power management transition delays */ 497 /* see PCI PM 1.1 5.6.1 table 18 */ 498 if (state == PCI_D3hot || dev->current_state == PCI_D3hot) 499 msleep(pci_pm_d3_delay); 500 else if (state == PCI_D2 || dev->current_state == PCI_D2) 501 udelay(PCI_PM_D2_DELAY); 502 503 dev->current_state = state; 504 505 /* According to section 5.4.1 of the "PCI BUS POWER MANAGEMENT 506 * INTERFACE SPECIFICATION, REV. 1.2", a device transitioning 507 * from D3hot to D0 _may_ perform an internal reset, thereby 508 * going to "D0 Uninitialized" rather than "D0 Initialized". 509 * For example, at least some versions of the 3c905B and the 510 * 3c556B exhibit this behaviour. 511 * 512 * At least some laptop BIOSen (e.g. the Thinkpad T21) leave 513 * devices in a D3hot state at boot. Consequently, we need to 514 * restore at least the BARs so that the device will be 515 * accessible to its driver. 516 */ 517 if (need_restore) 518 pci_restore_bars(dev); 519 520 if (dev->bus->self) 521 pcie_aspm_pm_state_change(dev->bus->self); 522 523 return 0; 524 } 525 526 /** 527 * pci_update_current_state - Read PCI power state of given device from its 528 * PCI PM registers and cache it 529 * @dev: PCI device to handle. 530 * @state: State to cache in case the device doesn't have the PM capability 531 */ 532 void pci_update_current_state(struct pci_dev *dev, pci_power_t state) 533 { 534 if (dev->pm_cap) { 535 u16 pmcsr; 536 537 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr); 538 dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK); 539 } else { 540 dev->current_state = state; 541 } 542 } 543 544 /** 545 * pci_platform_power_transition - Use platform to change device power state 546 * @dev: PCI device to handle. 547 * @state: State to put the device into. 548 */ 549 static int pci_platform_power_transition(struct pci_dev *dev, pci_power_t state) 550 { 551 int error; 552 553 if (platform_pci_power_manageable(dev)) { 554 error = platform_pci_set_power_state(dev, state); 555 if (!error) 556 pci_update_current_state(dev, state); 557 } else { 558 error = -ENODEV; 559 /* Fall back to PCI_D0 if native PM is not supported */ 560 if (!dev->pm_cap) 561 dev->current_state = PCI_D0; 562 } 563 564 return error; 565 } 566 567 /** 568 * __pci_start_power_transition - Start power transition of a PCI device 569 * @dev: PCI device to handle. 570 * @state: State to put the device into. 571 */ 572 static void __pci_start_power_transition(struct pci_dev *dev, pci_power_t state) 573 { 574 if (state == PCI_D0) 575 pci_platform_power_transition(dev, PCI_D0); 576 } 577 578 /** 579 * __pci_complete_power_transition - Complete power transition of a PCI device 580 * @dev: PCI device to handle. 581 * @state: State to put the device into. 582 * 583 * This function should not be called directly by device drivers. 584 */ 585 int __pci_complete_power_transition(struct pci_dev *dev, pci_power_t state) 586 { 587 return state > PCI_D0 ? 588 pci_platform_power_transition(dev, state) : -EINVAL; 589 } 590 EXPORT_SYMBOL_GPL(__pci_complete_power_transition); 591 592 /** 593 * pci_set_power_state - Set the power state of a PCI device 594 * @dev: PCI device to handle. 595 * @state: PCI power state (D0, D1, D2, D3hot) to put the device into. 596 * 597 * Transition a device to a new power state, using the platform firmware and/or 598 * the device's PCI PM registers. 599 * 600 * RETURN VALUE: 601 * -EINVAL if the requested state is invalid. 602 * -EIO if device does not support PCI PM or its PM capabilities register has a 603 * wrong version, or device doesn't support the requested state. 604 * 0 if device already is in the requested state. 605 * 0 if device's power state has been successfully changed. 606 */ 607 int pci_set_power_state(struct pci_dev *dev, pci_power_t state) 608 { 609 int error; 610 611 /* bound the state we're entering */ 612 if (state > PCI_D3hot) 613 state = PCI_D3hot; 614 else if (state < PCI_D0) 615 state = PCI_D0; 616 else if ((state == PCI_D1 || state == PCI_D2) && pci_no_d1d2(dev)) 617 /* 618 * If the device or the parent bridge do not support PCI PM, 619 * ignore the request if we're doing anything other than putting 620 * it into D0 (which would only happen on boot). 621 */ 622 return 0; 623 624 /* Check if we're already there */ 625 if (dev->current_state == state) 626 return 0; 627 628 __pci_start_power_transition(dev, state); 629 630 /* This device is quirked not to be put into D3, so 631 don't put it in D3 */ 632 if (state == PCI_D3hot && (dev->dev_flags & PCI_DEV_FLAGS_NO_D3)) 633 return 0; 634 635 error = pci_raw_set_power_state(dev, state); 636 637 if (!__pci_complete_power_transition(dev, state)) 638 error = 0; 639 640 return error; 641 } 642 643 /** 644 * pci_choose_state - Choose the power state of a PCI device 645 * @dev: PCI device to be suspended 646 * @state: target sleep state for the whole system. This is the value 647 * that is passed to suspend() function. 648 * 649 * Returns PCI power state suitable for given device and given system 650 * message. 651 */ 652 653 pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state) 654 { 655 pci_power_t ret; 656 657 if (!pci_find_capability(dev, PCI_CAP_ID_PM)) 658 return PCI_D0; 659 660 ret = platform_pci_choose_state(dev); 661 if (ret != PCI_POWER_ERROR) 662 return ret; 663 664 switch (state.event) { 665 case PM_EVENT_ON: 666 return PCI_D0; 667 case PM_EVENT_FREEZE: 668 case PM_EVENT_PRETHAW: 669 /* REVISIT both freeze and pre-thaw "should" use D0 */ 670 case PM_EVENT_SUSPEND: 671 case PM_EVENT_HIBERNATE: 672 return PCI_D3hot; 673 default: 674 dev_info(&dev->dev, "unrecognized suspend event %d\n", 675 state.event); 676 BUG(); 677 } 678 return PCI_D0; 679 } 680 681 EXPORT_SYMBOL(pci_choose_state); 682 683 #define PCI_EXP_SAVE_REGS 7 684 685 #define pcie_cap_has_devctl(type, flags) 1 686 #define pcie_cap_has_lnkctl(type, flags) \ 687 ((flags & PCI_EXP_FLAGS_VERS) > 1 || \ 688 (type == PCI_EXP_TYPE_ROOT_PORT || \ 689 type == PCI_EXP_TYPE_ENDPOINT || \ 690 type == PCI_EXP_TYPE_LEG_END)) 691 #define pcie_cap_has_sltctl(type, flags) \ 692 ((flags & PCI_EXP_FLAGS_VERS) > 1 || \ 693 ((type == PCI_EXP_TYPE_ROOT_PORT) || \ 694 (type == PCI_EXP_TYPE_DOWNSTREAM && \ 695 (flags & PCI_EXP_FLAGS_SLOT)))) 696 #define pcie_cap_has_rtctl(type, flags) \ 697 ((flags & PCI_EXP_FLAGS_VERS) > 1 || \ 698 (type == PCI_EXP_TYPE_ROOT_PORT || \ 699 type == PCI_EXP_TYPE_RC_EC)) 700 #define pcie_cap_has_devctl2(type, flags) \ 701 ((flags & PCI_EXP_FLAGS_VERS) > 1) 702 #define pcie_cap_has_lnkctl2(type, flags) \ 703 ((flags & PCI_EXP_FLAGS_VERS) > 1) 704 #define pcie_cap_has_sltctl2(type, flags) \ 705 ((flags & PCI_EXP_FLAGS_VERS) > 1) 706 707 static int pci_save_pcie_state(struct pci_dev *dev) 708 { 709 int pos, i = 0; 710 struct pci_cap_saved_state *save_state; 711 u16 *cap; 712 u16 flags; 713 714 pos = pci_find_capability(dev, PCI_CAP_ID_EXP); 715 if (pos <= 0) 716 return 0; 717 718 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP); 719 if (!save_state) { 720 dev_err(&dev->dev, "buffer not found in %s\n", __func__); 721 return -ENOMEM; 722 } 723 cap = (u16 *)&save_state->data[0]; 724 725 pci_read_config_word(dev, pos + PCI_EXP_FLAGS, &flags); 726 727 if (pcie_cap_has_devctl(dev->pcie_type, flags)) 728 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, &cap[i++]); 729 if (pcie_cap_has_lnkctl(dev->pcie_type, flags)) 730 pci_read_config_word(dev, pos + PCI_EXP_LNKCTL, &cap[i++]); 731 if (pcie_cap_has_sltctl(dev->pcie_type, flags)) 732 pci_read_config_word(dev, pos + PCI_EXP_SLTCTL, &cap[i++]); 733 if (pcie_cap_has_rtctl(dev->pcie_type, flags)) 734 pci_read_config_word(dev, pos + PCI_EXP_RTCTL, &cap[i++]); 735 if (pcie_cap_has_devctl2(dev->pcie_type, flags)) 736 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &cap[i++]); 737 if (pcie_cap_has_lnkctl2(dev->pcie_type, flags)) 738 pci_read_config_word(dev, pos + PCI_EXP_LNKCTL2, &cap[i++]); 739 if (pcie_cap_has_sltctl2(dev->pcie_type, flags)) 740 pci_read_config_word(dev, pos + PCI_EXP_SLTCTL2, &cap[i++]); 741 742 return 0; 743 } 744 745 static void pci_restore_pcie_state(struct pci_dev *dev) 746 { 747 int i = 0, pos; 748 struct pci_cap_saved_state *save_state; 749 u16 *cap; 750 u16 flags; 751 752 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP); 753 pos = pci_find_capability(dev, PCI_CAP_ID_EXP); 754 if (!save_state || pos <= 0) 755 return; 756 cap = (u16 *)&save_state->data[0]; 757 758 pci_read_config_word(dev, pos + PCI_EXP_FLAGS, &flags); 759 760 if (pcie_cap_has_devctl(dev->pcie_type, flags)) 761 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, cap[i++]); 762 if (pcie_cap_has_lnkctl(dev->pcie_type, flags)) 763 pci_write_config_word(dev, pos + PCI_EXP_LNKCTL, cap[i++]); 764 if (pcie_cap_has_sltctl(dev->pcie_type, flags)) 765 pci_write_config_word(dev, pos + PCI_EXP_SLTCTL, cap[i++]); 766 if (pcie_cap_has_rtctl(dev->pcie_type, flags)) 767 pci_write_config_word(dev, pos + PCI_EXP_RTCTL, cap[i++]); 768 if (pcie_cap_has_devctl2(dev->pcie_type, flags)) 769 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, cap[i++]); 770 if (pcie_cap_has_lnkctl2(dev->pcie_type, flags)) 771 pci_write_config_word(dev, pos + PCI_EXP_LNKCTL2, cap[i++]); 772 if (pcie_cap_has_sltctl2(dev->pcie_type, flags)) 773 pci_write_config_word(dev, pos + PCI_EXP_SLTCTL2, cap[i++]); 774 } 775 776 777 static int pci_save_pcix_state(struct pci_dev *dev) 778 { 779 int pos; 780 struct pci_cap_saved_state *save_state; 781 782 pos = pci_find_capability(dev, PCI_CAP_ID_PCIX); 783 if (pos <= 0) 784 return 0; 785 786 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX); 787 if (!save_state) { 788 dev_err(&dev->dev, "buffer not found in %s\n", __func__); 789 return -ENOMEM; 790 } 791 792 pci_read_config_word(dev, pos + PCI_X_CMD, (u16 *)save_state->data); 793 794 return 0; 795 } 796 797 static void pci_restore_pcix_state(struct pci_dev *dev) 798 { 799 int i = 0, pos; 800 struct pci_cap_saved_state *save_state; 801 u16 *cap; 802 803 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX); 804 pos = pci_find_capability(dev, PCI_CAP_ID_PCIX); 805 if (!save_state || pos <= 0) 806 return; 807 cap = (u16 *)&save_state->data[0]; 808 809 pci_write_config_word(dev, pos + PCI_X_CMD, cap[i++]); 810 } 811 812 813 /** 814 * pci_save_state - save the PCI configuration space of a device before suspending 815 * @dev: - PCI device that we're dealing with 816 */ 817 int 818 pci_save_state(struct pci_dev *dev) 819 { 820 int i; 821 /* XXX: 100% dword access ok here? */ 822 for (i = 0; i < 16; i++) 823 pci_read_config_dword(dev, i * 4,&dev->saved_config_space[i]); 824 dev->state_saved = true; 825 if ((i = pci_save_pcie_state(dev)) != 0) 826 return i; 827 if ((i = pci_save_pcix_state(dev)) != 0) 828 return i; 829 return 0; 830 } 831 832 /** 833 * pci_restore_state - Restore the saved state of a PCI device 834 * @dev: - PCI device that we're dealing with 835 */ 836 int 837 pci_restore_state(struct pci_dev *dev) 838 { 839 int i; 840 u32 val; 841 842 /* PCI Express register must be restored first */ 843 pci_restore_pcie_state(dev); 844 845 /* 846 * The Base Address register should be programmed before the command 847 * register(s) 848 */ 849 for (i = 15; i >= 0; i--) { 850 pci_read_config_dword(dev, i * 4, &val); 851 if (val != dev->saved_config_space[i]) { 852 dev_printk(KERN_DEBUG, &dev->dev, "restoring config " 853 "space at offset %#x (was %#x, writing %#x)\n", 854 i, val, (int)dev->saved_config_space[i]); 855 pci_write_config_dword(dev,i * 4, 856 dev->saved_config_space[i]); 857 } 858 } 859 pci_restore_pcix_state(dev); 860 pci_restore_msi_state(dev); 861 pci_restore_iov_state(dev); 862 863 return 0; 864 } 865 866 static int do_pci_enable_device(struct pci_dev *dev, int bars) 867 { 868 int err; 869 870 err = pci_set_power_state(dev, PCI_D0); 871 if (err < 0 && err != -EIO) 872 return err; 873 err = pcibios_enable_device(dev, bars); 874 if (err < 0) 875 return err; 876 pci_fixup_device(pci_fixup_enable, dev); 877 878 return 0; 879 } 880 881 /** 882 * pci_reenable_device - Resume abandoned device 883 * @dev: PCI device to be resumed 884 * 885 * Note this function is a backend of pci_default_resume and is not supposed 886 * to be called by normal code, write proper resume handler and use it instead. 887 */ 888 int pci_reenable_device(struct pci_dev *dev) 889 { 890 if (pci_is_enabled(dev)) 891 return do_pci_enable_device(dev, (1 << PCI_NUM_RESOURCES) - 1); 892 return 0; 893 } 894 895 static int __pci_enable_device_flags(struct pci_dev *dev, 896 resource_size_t flags) 897 { 898 int err; 899 int i, bars = 0; 900 901 if (atomic_add_return(1, &dev->enable_cnt) > 1) 902 return 0; /* already enabled */ 903 904 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) 905 if (dev->resource[i].flags & flags) 906 bars |= (1 << i); 907 908 err = do_pci_enable_device(dev, bars); 909 if (err < 0) 910 atomic_dec(&dev->enable_cnt); 911 return err; 912 } 913 914 /** 915 * pci_enable_device_io - Initialize a device for use with IO space 916 * @dev: PCI device to be initialized 917 * 918 * Initialize device before it's used by a driver. Ask low-level code 919 * to enable I/O resources. Wake up the device if it was suspended. 920 * Beware, this function can fail. 921 */ 922 int pci_enable_device_io(struct pci_dev *dev) 923 { 924 return __pci_enable_device_flags(dev, IORESOURCE_IO); 925 } 926 927 /** 928 * pci_enable_device_mem - Initialize a device for use with Memory space 929 * @dev: PCI device to be initialized 930 * 931 * Initialize device before it's used by a driver. Ask low-level code 932 * to enable Memory resources. Wake up the device if it was suspended. 933 * Beware, this function can fail. 934 */ 935 int pci_enable_device_mem(struct pci_dev *dev) 936 { 937 return __pci_enable_device_flags(dev, IORESOURCE_MEM); 938 } 939 940 /** 941 * pci_enable_device - Initialize device before it's used by a driver. 942 * @dev: PCI device to be initialized 943 * 944 * Initialize device before it's used by a driver. Ask low-level code 945 * to enable I/O and memory. Wake up the device if it was suspended. 946 * Beware, this function can fail. 947 * 948 * Note we don't actually enable the device many times if we call 949 * this function repeatedly (we just increment the count). 950 */ 951 int pci_enable_device(struct pci_dev *dev) 952 { 953 return __pci_enable_device_flags(dev, IORESOURCE_MEM | IORESOURCE_IO); 954 } 955 956 /* 957 * Managed PCI resources. This manages device on/off, intx/msi/msix 958 * on/off and BAR regions. pci_dev itself records msi/msix status, so 959 * there's no need to track it separately. pci_devres is initialized 960 * when a device is enabled using managed PCI device enable interface. 961 */ 962 struct pci_devres { 963 unsigned int enabled:1; 964 unsigned int pinned:1; 965 unsigned int orig_intx:1; 966 unsigned int restore_intx:1; 967 u32 region_mask; 968 }; 969 970 static void pcim_release(struct device *gendev, void *res) 971 { 972 struct pci_dev *dev = container_of(gendev, struct pci_dev, dev); 973 struct pci_devres *this = res; 974 int i; 975 976 if (dev->msi_enabled) 977 pci_disable_msi(dev); 978 if (dev->msix_enabled) 979 pci_disable_msix(dev); 980 981 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) 982 if (this->region_mask & (1 << i)) 983 pci_release_region(dev, i); 984 985 if (this->restore_intx) 986 pci_intx(dev, this->orig_intx); 987 988 if (this->enabled && !this->pinned) 989 pci_disable_device(dev); 990 } 991 992 static struct pci_devres * get_pci_dr(struct pci_dev *pdev) 993 { 994 struct pci_devres *dr, *new_dr; 995 996 dr = devres_find(&pdev->dev, pcim_release, NULL, NULL); 997 if (dr) 998 return dr; 999 1000 new_dr = devres_alloc(pcim_release, sizeof(*new_dr), GFP_KERNEL); 1001 if (!new_dr) 1002 return NULL; 1003 return devres_get(&pdev->dev, new_dr, NULL, NULL); 1004 } 1005 1006 static struct pci_devres * find_pci_dr(struct pci_dev *pdev) 1007 { 1008 if (pci_is_managed(pdev)) 1009 return devres_find(&pdev->dev, pcim_release, NULL, NULL); 1010 return NULL; 1011 } 1012 1013 /** 1014 * pcim_enable_device - Managed pci_enable_device() 1015 * @pdev: PCI device to be initialized 1016 * 1017 * Managed pci_enable_device(). 1018 */ 1019 int pcim_enable_device(struct pci_dev *pdev) 1020 { 1021 struct pci_devres *dr; 1022 int rc; 1023 1024 dr = get_pci_dr(pdev); 1025 if (unlikely(!dr)) 1026 return -ENOMEM; 1027 if (dr->enabled) 1028 return 0; 1029 1030 rc = pci_enable_device(pdev); 1031 if (!rc) { 1032 pdev->is_managed = 1; 1033 dr->enabled = 1; 1034 } 1035 return rc; 1036 } 1037 1038 /** 1039 * pcim_pin_device - Pin managed PCI device 1040 * @pdev: PCI device to pin 1041 * 1042 * Pin managed PCI device @pdev. Pinned device won't be disabled on 1043 * driver detach. @pdev must have been enabled with 1044 * pcim_enable_device(). 1045 */ 1046 void pcim_pin_device(struct pci_dev *pdev) 1047 { 1048 struct pci_devres *dr; 1049 1050 dr = find_pci_dr(pdev); 1051 WARN_ON(!dr || !dr->enabled); 1052 if (dr) 1053 dr->pinned = 1; 1054 } 1055 1056 /** 1057 * pcibios_disable_device - disable arch specific PCI resources for device dev 1058 * @dev: the PCI device to disable 1059 * 1060 * Disables architecture specific PCI resources for the device. This 1061 * is the default implementation. Architecture implementations can 1062 * override this. 1063 */ 1064 void __attribute__ ((weak)) pcibios_disable_device (struct pci_dev *dev) {} 1065 1066 static void do_pci_disable_device(struct pci_dev *dev) 1067 { 1068 u16 pci_command; 1069 1070 pci_read_config_word(dev, PCI_COMMAND, &pci_command); 1071 if (pci_command & PCI_COMMAND_MASTER) { 1072 pci_command &= ~PCI_COMMAND_MASTER; 1073 pci_write_config_word(dev, PCI_COMMAND, pci_command); 1074 } 1075 1076 pcibios_disable_device(dev); 1077 } 1078 1079 /** 1080 * pci_disable_enabled_device - Disable device without updating enable_cnt 1081 * @dev: PCI device to disable 1082 * 1083 * NOTE: This function is a backend of PCI power management routines and is 1084 * not supposed to be called drivers. 1085 */ 1086 void pci_disable_enabled_device(struct pci_dev *dev) 1087 { 1088 if (pci_is_enabled(dev)) 1089 do_pci_disable_device(dev); 1090 } 1091 1092 /** 1093 * pci_disable_device - Disable PCI device after use 1094 * @dev: PCI device to be disabled 1095 * 1096 * Signal to the system that the PCI device is not in use by the system 1097 * anymore. This only involves disabling PCI bus-mastering, if active. 1098 * 1099 * Note we don't actually disable the device until all callers of 1100 * pci_device_enable() have called pci_device_disable(). 1101 */ 1102 void 1103 pci_disable_device(struct pci_dev *dev) 1104 { 1105 struct pci_devres *dr; 1106 1107 dr = find_pci_dr(dev); 1108 if (dr) 1109 dr->enabled = 0; 1110 1111 if (atomic_sub_return(1, &dev->enable_cnt) != 0) 1112 return; 1113 1114 do_pci_disable_device(dev); 1115 1116 dev->is_busmaster = 0; 1117 } 1118 1119 /** 1120 * pcibios_set_pcie_reset_state - set reset state for device dev 1121 * @dev: the PCI-E device reset 1122 * @state: Reset state to enter into 1123 * 1124 * 1125 * Sets the PCI-E reset state for the device. This is the default 1126 * implementation. Architecture implementations can override this. 1127 */ 1128 int __attribute__ ((weak)) pcibios_set_pcie_reset_state(struct pci_dev *dev, 1129 enum pcie_reset_state state) 1130 { 1131 return -EINVAL; 1132 } 1133 1134 /** 1135 * pci_set_pcie_reset_state - set reset state for device dev 1136 * @dev: the PCI-E device reset 1137 * @state: Reset state to enter into 1138 * 1139 * 1140 * Sets the PCI reset state for the device. 1141 */ 1142 int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state) 1143 { 1144 return pcibios_set_pcie_reset_state(dev, state); 1145 } 1146 1147 /** 1148 * pci_pme_capable - check the capability of PCI device to generate PME# 1149 * @dev: PCI device to handle. 1150 * @state: PCI state from which device will issue PME#. 1151 */ 1152 bool pci_pme_capable(struct pci_dev *dev, pci_power_t state) 1153 { 1154 if (!dev->pm_cap) 1155 return false; 1156 1157 return !!(dev->pme_support & (1 << state)); 1158 } 1159 1160 /** 1161 * pci_pme_active - enable or disable PCI device's PME# function 1162 * @dev: PCI device to handle. 1163 * @enable: 'true' to enable PME# generation; 'false' to disable it. 1164 * 1165 * The caller must verify that the device is capable of generating PME# before 1166 * calling this function with @enable equal to 'true'. 1167 */ 1168 void pci_pme_active(struct pci_dev *dev, bool enable) 1169 { 1170 u16 pmcsr; 1171 1172 if (!dev->pm_cap) 1173 return; 1174 1175 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr); 1176 /* Clear PME_Status by writing 1 to it and enable PME# */ 1177 pmcsr |= PCI_PM_CTRL_PME_STATUS | PCI_PM_CTRL_PME_ENABLE; 1178 if (!enable) 1179 pmcsr &= ~PCI_PM_CTRL_PME_ENABLE; 1180 1181 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr); 1182 1183 dev_printk(KERN_INFO, &dev->dev, "PME# %s\n", 1184 enable ? "enabled" : "disabled"); 1185 } 1186 1187 /** 1188 * pci_enable_wake - enable PCI device as wakeup event source 1189 * @dev: PCI device affected 1190 * @state: PCI state from which device will issue wakeup events 1191 * @enable: True to enable event generation; false to disable 1192 * 1193 * This enables the device as a wakeup event source, or disables it. 1194 * When such events involves platform-specific hooks, those hooks are 1195 * called automatically by this routine. 1196 * 1197 * Devices with legacy power management (no standard PCI PM capabilities) 1198 * always require such platform hooks. 1199 * 1200 * RETURN VALUE: 1201 * 0 is returned on success 1202 * -EINVAL is returned if device is not supposed to wake up the system 1203 * Error code depending on the platform is returned if both the platform and 1204 * the native mechanism fail to enable the generation of wake-up events 1205 */ 1206 int pci_enable_wake(struct pci_dev *dev, pci_power_t state, int enable) 1207 { 1208 int error = 0; 1209 bool pme_done = false; 1210 1211 if (enable && !device_may_wakeup(&dev->dev)) 1212 return -EINVAL; 1213 1214 /* 1215 * According to "PCI System Architecture" 4th ed. by Tom Shanley & Don 1216 * Anderson we should be doing PME# wake enable followed by ACPI wake 1217 * enable. To disable wake-up we call the platform first, for symmetry. 1218 */ 1219 1220 if (!enable && platform_pci_can_wakeup(dev)) 1221 error = platform_pci_sleep_wake(dev, false); 1222 1223 if (!enable || pci_pme_capable(dev, state)) { 1224 pci_pme_active(dev, enable); 1225 pme_done = true; 1226 } 1227 1228 if (enable && platform_pci_can_wakeup(dev)) 1229 error = platform_pci_sleep_wake(dev, true); 1230 1231 return pme_done ? 0 : error; 1232 } 1233 1234 /** 1235 * pci_wake_from_d3 - enable/disable device to wake up from D3_hot or D3_cold 1236 * @dev: PCI device to prepare 1237 * @enable: True to enable wake-up event generation; false to disable 1238 * 1239 * Many drivers want the device to wake up the system from D3_hot or D3_cold 1240 * and this function allows them to set that up cleanly - pci_enable_wake() 1241 * should not be called twice in a row to enable wake-up due to PCI PM vs ACPI 1242 * ordering constraints. 1243 * 1244 * This function only returns error code if the device is not capable of 1245 * generating PME# from both D3_hot and D3_cold, and the platform is unable to 1246 * enable wake-up power for it. 1247 */ 1248 int pci_wake_from_d3(struct pci_dev *dev, bool enable) 1249 { 1250 return pci_pme_capable(dev, PCI_D3cold) ? 1251 pci_enable_wake(dev, PCI_D3cold, enable) : 1252 pci_enable_wake(dev, PCI_D3hot, enable); 1253 } 1254 1255 /** 1256 * pci_target_state - find an appropriate low power state for a given PCI dev 1257 * @dev: PCI device 1258 * 1259 * Use underlying platform code to find a supported low power state for @dev. 1260 * If the platform can't manage @dev, return the deepest state from which it 1261 * can generate wake events, based on any available PME info. 1262 */ 1263 pci_power_t pci_target_state(struct pci_dev *dev) 1264 { 1265 pci_power_t target_state = PCI_D3hot; 1266 1267 if (platform_pci_power_manageable(dev)) { 1268 /* 1269 * Call the platform to choose the target state of the device 1270 * and enable wake-up from this state if supported. 1271 */ 1272 pci_power_t state = platform_pci_choose_state(dev); 1273 1274 switch (state) { 1275 case PCI_POWER_ERROR: 1276 case PCI_UNKNOWN: 1277 break; 1278 case PCI_D1: 1279 case PCI_D2: 1280 if (pci_no_d1d2(dev)) 1281 break; 1282 default: 1283 target_state = state; 1284 } 1285 } else if (device_may_wakeup(&dev->dev)) { 1286 /* 1287 * Find the deepest state from which the device can generate 1288 * wake-up events, make it the target state and enable device 1289 * to generate PME#. 1290 */ 1291 if (!dev->pm_cap) 1292 return PCI_POWER_ERROR; 1293 1294 if (dev->pme_support) { 1295 while (target_state 1296 && !(dev->pme_support & (1 << target_state))) 1297 target_state--; 1298 } 1299 } 1300 1301 return target_state; 1302 } 1303 1304 /** 1305 * pci_prepare_to_sleep - prepare PCI device for system-wide transition into a sleep state 1306 * @dev: Device to handle. 1307 * 1308 * Choose the power state appropriate for the device depending on whether 1309 * it can wake up the system and/or is power manageable by the platform 1310 * (PCI_D3hot is the default) and put the device into that state. 1311 */ 1312 int pci_prepare_to_sleep(struct pci_dev *dev) 1313 { 1314 pci_power_t target_state = pci_target_state(dev); 1315 int error; 1316 1317 if (target_state == PCI_POWER_ERROR) 1318 return -EIO; 1319 1320 pci_enable_wake(dev, target_state, device_may_wakeup(&dev->dev)); 1321 1322 error = pci_set_power_state(dev, target_state); 1323 1324 if (error) 1325 pci_enable_wake(dev, target_state, false); 1326 1327 return error; 1328 } 1329 1330 /** 1331 * pci_back_from_sleep - turn PCI device on during system-wide transition into working state 1332 * @dev: Device to handle. 1333 * 1334 * Disable device's sytem wake-up capability and put it into D0. 1335 */ 1336 int pci_back_from_sleep(struct pci_dev *dev) 1337 { 1338 pci_enable_wake(dev, PCI_D0, false); 1339 return pci_set_power_state(dev, PCI_D0); 1340 } 1341 1342 /** 1343 * pci_pm_init - Initialize PM functions of given PCI device 1344 * @dev: PCI device to handle. 1345 */ 1346 void pci_pm_init(struct pci_dev *dev) 1347 { 1348 int pm; 1349 u16 pmc; 1350 1351 dev->pm_cap = 0; 1352 1353 /* find PCI PM capability in list */ 1354 pm = pci_find_capability(dev, PCI_CAP_ID_PM); 1355 if (!pm) 1356 return; 1357 /* Check device's ability to generate PME# */ 1358 pci_read_config_word(dev, pm + PCI_PM_PMC, &pmc); 1359 1360 if ((pmc & PCI_PM_CAP_VER_MASK) > 3) { 1361 dev_err(&dev->dev, "unsupported PM cap regs version (%u)\n", 1362 pmc & PCI_PM_CAP_VER_MASK); 1363 return; 1364 } 1365 1366 dev->pm_cap = pm; 1367 1368 dev->d1_support = false; 1369 dev->d2_support = false; 1370 if (!pci_no_d1d2(dev)) { 1371 if (pmc & PCI_PM_CAP_D1) 1372 dev->d1_support = true; 1373 if (pmc & PCI_PM_CAP_D2) 1374 dev->d2_support = true; 1375 1376 if (dev->d1_support || dev->d2_support) 1377 dev_printk(KERN_DEBUG, &dev->dev, "supports%s%s\n", 1378 dev->d1_support ? " D1" : "", 1379 dev->d2_support ? " D2" : ""); 1380 } 1381 1382 pmc &= PCI_PM_CAP_PME_MASK; 1383 if (pmc) { 1384 dev_info(&dev->dev, "PME# supported from%s%s%s%s%s\n", 1385 (pmc & PCI_PM_CAP_PME_D0) ? " D0" : "", 1386 (pmc & PCI_PM_CAP_PME_D1) ? " D1" : "", 1387 (pmc & PCI_PM_CAP_PME_D2) ? " D2" : "", 1388 (pmc & PCI_PM_CAP_PME_D3) ? " D3hot" : "", 1389 (pmc & PCI_PM_CAP_PME_D3cold) ? " D3cold" : ""); 1390 dev->pme_support = pmc >> PCI_PM_CAP_PME_SHIFT; 1391 /* 1392 * Make device's PM flags reflect the wake-up capability, but 1393 * let the user space enable it to wake up the system as needed. 1394 */ 1395 device_set_wakeup_capable(&dev->dev, true); 1396 device_set_wakeup_enable(&dev->dev, false); 1397 /* Disable the PME# generation functionality */ 1398 pci_pme_active(dev, false); 1399 } else { 1400 dev->pme_support = 0; 1401 } 1402 } 1403 1404 /** 1405 * platform_pci_wakeup_init - init platform wakeup if present 1406 * @dev: PCI device 1407 * 1408 * Some devices don't have PCI PM caps but can still generate wakeup 1409 * events through platform methods (like ACPI events). If @dev supports 1410 * platform wakeup events, set the device flag to indicate as much. This 1411 * may be redundant if the device also supports PCI PM caps, but double 1412 * initialization should be safe in that case. 1413 */ 1414 void platform_pci_wakeup_init(struct pci_dev *dev) 1415 { 1416 if (!platform_pci_can_wakeup(dev)) 1417 return; 1418 1419 device_set_wakeup_capable(&dev->dev, true); 1420 device_set_wakeup_enable(&dev->dev, false); 1421 platform_pci_sleep_wake(dev, false); 1422 } 1423 1424 /** 1425 * pci_add_save_buffer - allocate buffer for saving given capability registers 1426 * @dev: the PCI device 1427 * @cap: the capability to allocate the buffer for 1428 * @size: requested size of the buffer 1429 */ 1430 static int pci_add_cap_save_buffer( 1431 struct pci_dev *dev, char cap, unsigned int size) 1432 { 1433 int pos; 1434 struct pci_cap_saved_state *save_state; 1435 1436 pos = pci_find_capability(dev, cap); 1437 if (pos <= 0) 1438 return 0; 1439 1440 save_state = kzalloc(sizeof(*save_state) + size, GFP_KERNEL); 1441 if (!save_state) 1442 return -ENOMEM; 1443 1444 save_state->cap_nr = cap; 1445 pci_add_saved_cap(dev, save_state); 1446 1447 return 0; 1448 } 1449 1450 /** 1451 * pci_allocate_cap_save_buffers - allocate buffers for saving capabilities 1452 * @dev: the PCI device 1453 */ 1454 void pci_allocate_cap_save_buffers(struct pci_dev *dev) 1455 { 1456 int error; 1457 1458 error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_EXP, 1459 PCI_EXP_SAVE_REGS * sizeof(u16)); 1460 if (error) 1461 dev_err(&dev->dev, 1462 "unable to preallocate PCI Express save buffer\n"); 1463 1464 error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_PCIX, sizeof(u16)); 1465 if (error) 1466 dev_err(&dev->dev, 1467 "unable to preallocate PCI-X save buffer\n"); 1468 } 1469 1470 /** 1471 * pci_enable_ari - enable ARI forwarding if hardware support it 1472 * @dev: the PCI device 1473 */ 1474 void pci_enable_ari(struct pci_dev *dev) 1475 { 1476 int pos; 1477 u32 cap; 1478 u16 ctrl; 1479 struct pci_dev *bridge; 1480 1481 if (!dev->is_pcie || dev->devfn) 1482 return; 1483 1484 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI); 1485 if (!pos) 1486 return; 1487 1488 bridge = dev->bus->self; 1489 if (!bridge || !bridge->is_pcie) 1490 return; 1491 1492 pos = pci_find_capability(bridge, PCI_CAP_ID_EXP); 1493 if (!pos) 1494 return; 1495 1496 pci_read_config_dword(bridge, pos + PCI_EXP_DEVCAP2, &cap); 1497 if (!(cap & PCI_EXP_DEVCAP2_ARI)) 1498 return; 1499 1500 pci_read_config_word(bridge, pos + PCI_EXP_DEVCTL2, &ctrl); 1501 ctrl |= PCI_EXP_DEVCTL2_ARI; 1502 pci_write_config_word(bridge, pos + PCI_EXP_DEVCTL2, ctrl); 1503 1504 bridge->ari_enabled = 1; 1505 } 1506 1507 /** 1508 * pci_swizzle_interrupt_pin - swizzle INTx for device behind bridge 1509 * @dev: the PCI device 1510 * @pin: the INTx pin (1=INTA, 2=INTB, 3=INTD, 4=INTD) 1511 * 1512 * Perform INTx swizzling for a device behind one level of bridge. This is 1513 * required by section 9.1 of the PCI-to-PCI bridge specification for devices 1514 * behind bridges on add-in cards. 1515 */ 1516 u8 pci_swizzle_interrupt_pin(struct pci_dev *dev, u8 pin) 1517 { 1518 return (((pin - 1) + PCI_SLOT(dev->devfn)) % 4) + 1; 1519 } 1520 1521 int 1522 pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge) 1523 { 1524 u8 pin; 1525 1526 pin = dev->pin; 1527 if (!pin) 1528 return -1; 1529 1530 while (dev->bus->parent) { 1531 pin = pci_swizzle_interrupt_pin(dev, pin); 1532 dev = dev->bus->self; 1533 } 1534 *bridge = dev; 1535 return pin; 1536 } 1537 1538 /** 1539 * pci_common_swizzle - swizzle INTx all the way to root bridge 1540 * @dev: the PCI device 1541 * @pinp: pointer to the INTx pin value (1=INTA, 2=INTB, 3=INTD, 4=INTD) 1542 * 1543 * Perform INTx swizzling for a device. This traverses through all PCI-to-PCI 1544 * bridges all the way up to a PCI root bus. 1545 */ 1546 u8 pci_common_swizzle(struct pci_dev *dev, u8 *pinp) 1547 { 1548 u8 pin = *pinp; 1549 1550 while (dev->bus->parent) { 1551 pin = pci_swizzle_interrupt_pin(dev, pin); 1552 dev = dev->bus->self; 1553 } 1554 *pinp = pin; 1555 return PCI_SLOT(dev->devfn); 1556 } 1557 1558 /** 1559 * pci_release_region - Release a PCI bar 1560 * @pdev: PCI device whose resources were previously reserved by pci_request_region 1561 * @bar: BAR to release 1562 * 1563 * Releases the PCI I/O and memory resources previously reserved by a 1564 * successful call to pci_request_region. Call this function only 1565 * after all use of the PCI regions has ceased. 1566 */ 1567 void pci_release_region(struct pci_dev *pdev, int bar) 1568 { 1569 struct pci_devres *dr; 1570 1571 if (pci_resource_len(pdev, bar) == 0) 1572 return; 1573 if (pci_resource_flags(pdev, bar) & IORESOURCE_IO) 1574 release_region(pci_resource_start(pdev, bar), 1575 pci_resource_len(pdev, bar)); 1576 else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) 1577 release_mem_region(pci_resource_start(pdev, bar), 1578 pci_resource_len(pdev, bar)); 1579 1580 dr = find_pci_dr(pdev); 1581 if (dr) 1582 dr->region_mask &= ~(1 << bar); 1583 } 1584 1585 /** 1586 * __pci_request_region - Reserved PCI I/O and memory resource 1587 * @pdev: PCI device whose resources are to be reserved 1588 * @bar: BAR to be reserved 1589 * @res_name: Name to be associated with resource. 1590 * @exclusive: whether the region access is exclusive or not 1591 * 1592 * Mark the PCI region associated with PCI device @pdev BR @bar as 1593 * being reserved by owner @res_name. Do not access any 1594 * address inside the PCI regions unless this call returns 1595 * successfully. 1596 * 1597 * If @exclusive is set, then the region is marked so that userspace 1598 * is explicitly not allowed to map the resource via /dev/mem or 1599 * sysfs MMIO access. 1600 * 1601 * Returns 0 on success, or %EBUSY on error. A warning 1602 * message is also printed on failure. 1603 */ 1604 static int __pci_request_region(struct pci_dev *pdev, int bar, const char *res_name, 1605 int exclusive) 1606 { 1607 struct pci_devres *dr; 1608 1609 if (pci_resource_len(pdev, bar) == 0) 1610 return 0; 1611 1612 if (pci_resource_flags(pdev, bar) & IORESOURCE_IO) { 1613 if (!request_region(pci_resource_start(pdev, bar), 1614 pci_resource_len(pdev, bar), res_name)) 1615 goto err_out; 1616 } 1617 else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) { 1618 if (!__request_mem_region(pci_resource_start(pdev, bar), 1619 pci_resource_len(pdev, bar), res_name, 1620 exclusive)) 1621 goto err_out; 1622 } 1623 1624 dr = find_pci_dr(pdev); 1625 if (dr) 1626 dr->region_mask |= 1 << bar; 1627 1628 return 0; 1629 1630 err_out: 1631 dev_warn(&pdev->dev, "BAR %d: can't reserve %s region %pR\n", 1632 bar, 1633 pci_resource_flags(pdev, bar) & IORESOURCE_IO ? "I/O" : "mem", 1634 &pdev->resource[bar]); 1635 return -EBUSY; 1636 } 1637 1638 /** 1639 * pci_request_region - Reserve PCI I/O and memory resource 1640 * @pdev: PCI device whose resources are to be reserved 1641 * @bar: BAR to be reserved 1642 * @res_name: Name to be associated with resource 1643 * 1644 * Mark the PCI region associated with PCI device @pdev BAR @bar as 1645 * being reserved by owner @res_name. Do not access any 1646 * address inside the PCI regions unless this call returns 1647 * successfully. 1648 * 1649 * Returns 0 on success, or %EBUSY on error. A warning 1650 * message is also printed on failure. 1651 */ 1652 int pci_request_region(struct pci_dev *pdev, int bar, const char *res_name) 1653 { 1654 return __pci_request_region(pdev, bar, res_name, 0); 1655 } 1656 1657 /** 1658 * pci_request_region_exclusive - Reserved PCI I/O and memory resource 1659 * @pdev: PCI device whose resources are to be reserved 1660 * @bar: BAR to be reserved 1661 * @res_name: Name to be associated with resource. 1662 * 1663 * Mark the PCI region associated with PCI device @pdev BR @bar as 1664 * being reserved by owner @res_name. Do not access any 1665 * address inside the PCI regions unless this call returns 1666 * successfully. 1667 * 1668 * Returns 0 on success, or %EBUSY on error. A warning 1669 * message is also printed on failure. 1670 * 1671 * The key difference that _exclusive makes it that userspace is 1672 * explicitly not allowed to map the resource via /dev/mem or 1673 * sysfs. 1674 */ 1675 int pci_request_region_exclusive(struct pci_dev *pdev, int bar, const char *res_name) 1676 { 1677 return __pci_request_region(pdev, bar, res_name, IORESOURCE_EXCLUSIVE); 1678 } 1679 /** 1680 * pci_release_selected_regions - Release selected PCI I/O and memory resources 1681 * @pdev: PCI device whose resources were previously reserved 1682 * @bars: Bitmask of BARs to be released 1683 * 1684 * Release selected PCI I/O and memory resources previously reserved. 1685 * Call this function only after all use of the PCI regions has ceased. 1686 */ 1687 void pci_release_selected_regions(struct pci_dev *pdev, int bars) 1688 { 1689 int i; 1690 1691 for (i = 0; i < 6; i++) 1692 if (bars & (1 << i)) 1693 pci_release_region(pdev, i); 1694 } 1695 1696 int __pci_request_selected_regions(struct pci_dev *pdev, int bars, 1697 const char *res_name, int excl) 1698 { 1699 int i; 1700 1701 for (i = 0; i < 6; i++) 1702 if (bars & (1 << i)) 1703 if (__pci_request_region(pdev, i, res_name, excl)) 1704 goto err_out; 1705 return 0; 1706 1707 err_out: 1708 while(--i >= 0) 1709 if (bars & (1 << i)) 1710 pci_release_region(pdev, i); 1711 1712 return -EBUSY; 1713 } 1714 1715 1716 /** 1717 * pci_request_selected_regions - Reserve selected PCI I/O and memory resources 1718 * @pdev: PCI device whose resources are to be reserved 1719 * @bars: Bitmask of BARs to be requested 1720 * @res_name: Name to be associated with resource 1721 */ 1722 int pci_request_selected_regions(struct pci_dev *pdev, int bars, 1723 const char *res_name) 1724 { 1725 return __pci_request_selected_regions(pdev, bars, res_name, 0); 1726 } 1727 1728 int pci_request_selected_regions_exclusive(struct pci_dev *pdev, 1729 int bars, const char *res_name) 1730 { 1731 return __pci_request_selected_regions(pdev, bars, res_name, 1732 IORESOURCE_EXCLUSIVE); 1733 } 1734 1735 /** 1736 * pci_release_regions - Release reserved PCI I/O and memory resources 1737 * @pdev: PCI device whose resources were previously reserved by pci_request_regions 1738 * 1739 * Releases all PCI I/O and memory resources previously reserved by a 1740 * successful call to pci_request_regions. Call this function only 1741 * after all use of the PCI regions has ceased. 1742 */ 1743 1744 void pci_release_regions(struct pci_dev *pdev) 1745 { 1746 pci_release_selected_regions(pdev, (1 << 6) - 1); 1747 } 1748 1749 /** 1750 * pci_request_regions - Reserved PCI I/O and memory resources 1751 * @pdev: PCI device whose resources are to be reserved 1752 * @res_name: Name to be associated with resource. 1753 * 1754 * Mark all PCI regions associated with PCI device @pdev as 1755 * being reserved by owner @res_name. Do not access any 1756 * address inside the PCI regions unless this call returns 1757 * successfully. 1758 * 1759 * Returns 0 on success, or %EBUSY on error. A warning 1760 * message is also printed on failure. 1761 */ 1762 int pci_request_regions(struct pci_dev *pdev, const char *res_name) 1763 { 1764 return pci_request_selected_regions(pdev, ((1 << 6) - 1), res_name); 1765 } 1766 1767 /** 1768 * pci_request_regions_exclusive - Reserved PCI I/O and memory resources 1769 * @pdev: PCI device whose resources are to be reserved 1770 * @res_name: Name to be associated with resource. 1771 * 1772 * Mark all PCI regions associated with PCI device @pdev as 1773 * being reserved by owner @res_name. Do not access any 1774 * address inside the PCI regions unless this call returns 1775 * successfully. 1776 * 1777 * pci_request_regions_exclusive() will mark the region so that 1778 * /dev/mem and the sysfs MMIO access will not be allowed. 1779 * 1780 * Returns 0 on success, or %EBUSY on error. A warning 1781 * message is also printed on failure. 1782 */ 1783 int pci_request_regions_exclusive(struct pci_dev *pdev, const char *res_name) 1784 { 1785 return pci_request_selected_regions_exclusive(pdev, 1786 ((1 << 6) - 1), res_name); 1787 } 1788 1789 static void __pci_set_master(struct pci_dev *dev, bool enable) 1790 { 1791 u16 old_cmd, cmd; 1792 1793 pci_read_config_word(dev, PCI_COMMAND, &old_cmd); 1794 if (enable) 1795 cmd = old_cmd | PCI_COMMAND_MASTER; 1796 else 1797 cmd = old_cmd & ~PCI_COMMAND_MASTER; 1798 if (cmd != old_cmd) { 1799 dev_dbg(&dev->dev, "%s bus mastering\n", 1800 enable ? "enabling" : "disabling"); 1801 pci_write_config_word(dev, PCI_COMMAND, cmd); 1802 } 1803 dev->is_busmaster = enable; 1804 } 1805 1806 /** 1807 * pci_set_master - enables bus-mastering for device dev 1808 * @dev: the PCI device to enable 1809 * 1810 * Enables bus-mastering on the device and calls pcibios_set_master() 1811 * to do the needed arch specific settings. 1812 */ 1813 void pci_set_master(struct pci_dev *dev) 1814 { 1815 __pci_set_master(dev, true); 1816 pcibios_set_master(dev); 1817 } 1818 1819 /** 1820 * pci_clear_master - disables bus-mastering for device dev 1821 * @dev: the PCI device to disable 1822 */ 1823 void pci_clear_master(struct pci_dev *dev) 1824 { 1825 __pci_set_master(dev, false); 1826 } 1827 1828 #ifdef PCI_DISABLE_MWI 1829 int pci_set_mwi(struct pci_dev *dev) 1830 { 1831 return 0; 1832 } 1833 1834 int pci_try_set_mwi(struct pci_dev *dev) 1835 { 1836 return 0; 1837 } 1838 1839 void pci_clear_mwi(struct pci_dev *dev) 1840 { 1841 } 1842 1843 #else 1844 1845 #ifndef PCI_CACHE_LINE_BYTES 1846 #define PCI_CACHE_LINE_BYTES L1_CACHE_BYTES 1847 #endif 1848 1849 /* This can be overridden by arch code. */ 1850 /* Don't forget this is measured in 32-bit words, not bytes */ 1851 u8 pci_cache_line_size = PCI_CACHE_LINE_BYTES / 4; 1852 1853 /** 1854 * pci_set_cacheline_size - ensure the CACHE_LINE_SIZE register is programmed 1855 * @dev: the PCI device for which MWI is to be enabled 1856 * 1857 * Helper function for pci_set_mwi. 1858 * Originally copied from drivers/net/acenic.c. 1859 * Copyright 1998-2001 by Jes Sorensen, <jes@trained-monkey.org>. 1860 * 1861 * RETURNS: An appropriate -ERRNO error value on error, or zero for success. 1862 */ 1863 static int 1864 pci_set_cacheline_size(struct pci_dev *dev) 1865 { 1866 u8 cacheline_size; 1867 1868 if (!pci_cache_line_size) 1869 return -EINVAL; /* The system doesn't support MWI. */ 1870 1871 /* Validate current setting: the PCI_CACHE_LINE_SIZE must be 1872 equal to or multiple of the right value. */ 1873 pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size); 1874 if (cacheline_size >= pci_cache_line_size && 1875 (cacheline_size % pci_cache_line_size) == 0) 1876 return 0; 1877 1878 /* Write the correct value. */ 1879 pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, pci_cache_line_size); 1880 /* Read it back. */ 1881 pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size); 1882 if (cacheline_size == pci_cache_line_size) 1883 return 0; 1884 1885 dev_printk(KERN_DEBUG, &dev->dev, "cache line size of %d is not " 1886 "supported\n", pci_cache_line_size << 2); 1887 1888 return -EINVAL; 1889 } 1890 1891 /** 1892 * pci_set_mwi - enables memory-write-invalidate PCI transaction 1893 * @dev: the PCI device for which MWI is enabled 1894 * 1895 * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND. 1896 * 1897 * RETURNS: An appropriate -ERRNO error value on error, or zero for success. 1898 */ 1899 int 1900 pci_set_mwi(struct pci_dev *dev) 1901 { 1902 int rc; 1903 u16 cmd; 1904 1905 rc = pci_set_cacheline_size(dev); 1906 if (rc) 1907 return rc; 1908 1909 pci_read_config_word(dev, PCI_COMMAND, &cmd); 1910 if (! (cmd & PCI_COMMAND_INVALIDATE)) { 1911 dev_dbg(&dev->dev, "enabling Mem-Wr-Inval\n"); 1912 cmd |= PCI_COMMAND_INVALIDATE; 1913 pci_write_config_word(dev, PCI_COMMAND, cmd); 1914 } 1915 1916 return 0; 1917 } 1918 1919 /** 1920 * pci_try_set_mwi - enables memory-write-invalidate PCI transaction 1921 * @dev: the PCI device for which MWI is enabled 1922 * 1923 * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND. 1924 * Callers are not required to check the return value. 1925 * 1926 * RETURNS: An appropriate -ERRNO error value on error, or zero for success. 1927 */ 1928 int pci_try_set_mwi(struct pci_dev *dev) 1929 { 1930 int rc = pci_set_mwi(dev); 1931 return rc; 1932 } 1933 1934 /** 1935 * pci_clear_mwi - disables Memory-Write-Invalidate for device dev 1936 * @dev: the PCI device to disable 1937 * 1938 * Disables PCI Memory-Write-Invalidate transaction on the device 1939 */ 1940 void 1941 pci_clear_mwi(struct pci_dev *dev) 1942 { 1943 u16 cmd; 1944 1945 pci_read_config_word(dev, PCI_COMMAND, &cmd); 1946 if (cmd & PCI_COMMAND_INVALIDATE) { 1947 cmd &= ~PCI_COMMAND_INVALIDATE; 1948 pci_write_config_word(dev, PCI_COMMAND, cmd); 1949 } 1950 } 1951 #endif /* ! PCI_DISABLE_MWI */ 1952 1953 /** 1954 * pci_intx - enables/disables PCI INTx for device dev 1955 * @pdev: the PCI device to operate on 1956 * @enable: boolean: whether to enable or disable PCI INTx 1957 * 1958 * Enables/disables PCI INTx for device dev 1959 */ 1960 void 1961 pci_intx(struct pci_dev *pdev, int enable) 1962 { 1963 u16 pci_command, new; 1964 1965 pci_read_config_word(pdev, PCI_COMMAND, &pci_command); 1966 1967 if (enable) { 1968 new = pci_command & ~PCI_COMMAND_INTX_DISABLE; 1969 } else { 1970 new = pci_command | PCI_COMMAND_INTX_DISABLE; 1971 } 1972 1973 if (new != pci_command) { 1974 struct pci_devres *dr; 1975 1976 pci_write_config_word(pdev, PCI_COMMAND, new); 1977 1978 dr = find_pci_dr(pdev); 1979 if (dr && !dr->restore_intx) { 1980 dr->restore_intx = 1; 1981 dr->orig_intx = !enable; 1982 } 1983 } 1984 } 1985 1986 /** 1987 * pci_msi_off - disables any msi or msix capabilities 1988 * @dev: the PCI device to operate on 1989 * 1990 * If you want to use msi see pci_enable_msi and friends. 1991 * This is a lower level primitive that allows us to disable 1992 * msi operation at the device level. 1993 */ 1994 void pci_msi_off(struct pci_dev *dev) 1995 { 1996 int pos; 1997 u16 control; 1998 1999 pos = pci_find_capability(dev, PCI_CAP_ID_MSI); 2000 if (pos) { 2001 pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control); 2002 control &= ~PCI_MSI_FLAGS_ENABLE; 2003 pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control); 2004 } 2005 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX); 2006 if (pos) { 2007 pci_read_config_word(dev, pos + PCI_MSIX_FLAGS, &control); 2008 control &= ~PCI_MSIX_FLAGS_ENABLE; 2009 pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control); 2010 } 2011 } 2012 2013 #ifndef HAVE_ARCH_PCI_SET_DMA_MASK 2014 /* 2015 * These can be overridden by arch-specific implementations 2016 */ 2017 int 2018 pci_set_dma_mask(struct pci_dev *dev, u64 mask) 2019 { 2020 if (!pci_dma_supported(dev, mask)) 2021 return -EIO; 2022 2023 dev->dma_mask = mask; 2024 2025 return 0; 2026 } 2027 2028 int 2029 pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask) 2030 { 2031 if (!pci_dma_supported(dev, mask)) 2032 return -EIO; 2033 2034 dev->dev.coherent_dma_mask = mask; 2035 2036 return 0; 2037 } 2038 #endif 2039 2040 #ifndef HAVE_ARCH_PCI_SET_DMA_MAX_SEGMENT_SIZE 2041 int pci_set_dma_max_seg_size(struct pci_dev *dev, unsigned int size) 2042 { 2043 return dma_set_max_seg_size(&dev->dev, size); 2044 } 2045 EXPORT_SYMBOL(pci_set_dma_max_seg_size); 2046 #endif 2047 2048 #ifndef HAVE_ARCH_PCI_SET_DMA_SEGMENT_BOUNDARY 2049 int pci_set_dma_seg_boundary(struct pci_dev *dev, unsigned long mask) 2050 { 2051 return dma_set_seg_boundary(&dev->dev, mask); 2052 } 2053 EXPORT_SYMBOL(pci_set_dma_seg_boundary); 2054 #endif 2055 2056 static int __pcie_flr(struct pci_dev *dev, int probe) 2057 { 2058 u16 status; 2059 u32 cap; 2060 int exppos = pci_find_capability(dev, PCI_CAP_ID_EXP); 2061 2062 if (!exppos) 2063 return -ENOTTY; 2064 pci_read_config_dword(dev, exppos + PCI_EXP_DEVCAP, &cap); 2065 if (!(cap & PCI_EXP_DEVCAP_FLR)) 2066 return -ENOTTY; 2067 2068 if (probe) 2069 return 0; 2070 2071 pci_block_user_cfg_access(dev); 2072 2073 /* Wait for Transaction Pending bit clean */ 2074 pci_read_config_word(dev, exppos + PCI_EXP_DEVSTA, &status); 2075 if (!(status & PCI_EXP_DEVSTA_TRPND)) 2076 goto transaction_done; 2077 2078 msleep(100); 2079 pci_read_config_word(dev, exppos + PCI_EXP_DEVSTA, &status); 2080 if (!(status & PCI_EXP_DEVSTA_TRPND)) 2081 goto transaction_done; 2082 2083 dev_info(&dev->dev, "Busy after 100ms while trying to reset; " 2084 "sleeping for 1 second\n"); 2085 ssleep(1); 2086 pci_read_config_word(dev, exppos + PCI_EXP_DEVSTA, &status); 2087 if (status & PCI_EXP_DEVSTA_TRPND) 2088 dev_info(&dev->dev, "Still busy after 1s; " 2089 "proceeding with reset anyway\n"); 2090 2091 transaction_done: 2092 pci_write_config_word(dev, exppos + PCI_EXP_DEVCTL, 2093 PCI_EXP_DEVCTL_BCR_FLR); 2094 mdelay(100); 2095 2096 pci_unblock_user_cfg_access(dev); 2097 return 0; 2098 } 2099 2100 static int __pci_af_flr(struct pci_dev *dev, int probe) 2101 { 2102 int cappos = pci_find_capability(dev, PCI_CAP_ID_AF); 2103 u8 status; 2104 u8 cap; 2105 2106 if (!cappos) 2107 return -ENOTTY; 2108 pci_read_config_byte(dev, cappos + PCI_AF_CAP, &cap); 2109 if (!(cap & PCI_AF_CAP_TP) || !(cap & PCI_AF_CAP_FLR)) 2110 return -ENOTTY; 2111 2112 if (probe) 2113 return 0; 2114 2115 pci_block_user_cfg_access(dev); 2116 2117 /* Wait for Transaction Pending bit clean */ 2118 pci_read_config_byte(dev, cappos + PCI_AF_STATUS, &status); 2119 if (!(status & PCI_AF_STATUS_TP)) 2120 goto transaction_done; 2121 2122 msleep(100); 2123 pci_read_config_byte(dev, cappos + PCI_AF_STATUS, &status); 2124 if (!(status & PCI_AF_STATUS_TP)) 2125 goto transaction_done; 2126 2127 dev_info(&dev->dev, "Busy after 100ms while trying to" 2128 " reset; sleeping for 1 second\n"); 2129 ssleep(1); 2130 pci_read_config_byte(dev, cappos + PCI_AF_STATUS, &status); 2131 if (status & PCI_AF_STATUS_TP) 2132 dev_info(&dev->dev, "Still busy after 1s; " 2133 "proceeding with reset anyway\n"); 2134 2135 transaction_done: 2136 pci_write_config_byte(dev, cappos + PCI_AF_CTRL, PCI_AF_CTRL_FLR); 2137 mdelay(100); 2138 2139 pci_unblock_user_cfg_access(dev); 2140 return 0; 2141 } 2142 2143 static int __pci_reset_function(struct pci_dev *pdev, int probe) 2144 { 2145 int res; 2146 2147 res = __pcie_flr(pdev, probe); 2148 if (res != -ENOTTY) 2149 return res; 2150 2151 res = __pci_af_flr(pdev, probe); 2152 if (res != -ENOTTY) 2153 return res; 2154 2155 return res; 2156 } 2157 2158 /** 2159 * pci_execute_reset_function() - Reset a PCI device function 2160 * @dev: Device function to reset 2161 * 2162 * Some devices allow an individual function to be reset without affecting 2163 * other functions in the same device. The PCI device must be responsive 2164 * to PCI config space in order to use this function. 2165 * 2166 * The device function is presumed to be unused when this function is called. 2167 * Resetting the device will make the contents of PCI configuration space 2168 * random, so any caller of this must be prepared to reinitialise the 2169 * device including MSI, bus mastering, BARs, decoding IO and memory spaces, 2170 * etc. 2171 * 2172 * Returns 0 if the device function was successfully reset or -ENOTTY if the 2173 * device doesn't support resetting a single function. 2174 */ 2175 int pci_execute_reset_function(struct pci_dev *dev) 2176 { 2177 return __pci_reset_function(dev, 0); 2178 } 2179 EXPORT_SYMBOL_GPL(pci_execute_reset_function); 2180 2181 /** 2182 * pci_reset_function() - quiesce and reset a PCI device function 2183 * @dev: Device function to reset 2184 * 2185 * Some devices allow an individual function to be reset without affecting 2186 * other functions in the same device. The PCI device must be responsive 2187 * to PCI config space in order to use this function. 2188 * 2189 * This function does not just reset the PCI portion of a device, but 2190 * clears all the state associated with the device. This function differs 2191 * from pci_execute_reset_function in that it saves and restores device state 2192 * over the reset. 2193 * 2194 * Returns 0 if the device function was successfully reset or -ENOTTY if the 2195 * device doesn't support resetting a single function. 2196 */ 2197 int pci_reset_function(struct pci_dev *dev) 2198 { 2199 int r = __pci_reset_function(dev, 1); 2200 2201 if (r < 0) 2202 return r; 2203 2204 if (!dev->msi_enabled && !dev->msix_enabled && dev->irq != 0) 2205 disable_irq(dev->irq); 2206 pci_save_state(dev); 2207 2208 pci_write_config_word(dev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE); 2209 2210 r = pci_execute_reset_function(dev); 2211 2212 pci_restore_state(dev); 2213 if (!dev->msi_enabled && !dev->msix_enabled && dev->irq != 0) 2214 enable_irq(dev->irq); 2215 2216 return r; 2217 } 2218 EXPORT_SYMBOL_GPL(pci_reset_function); 2219 2220 /** 2221 * pcix_get_max_mmrbc - get PCI-X maximum designed memory read byte count 2222 * @dev: PCI device to query 2223 * 2224 * Returns mmrbc: maximum designed memory read count in bytes 2225 * or appropriate error value. 2226 */ 2227 int pcix_get_max_mmrbc(struct pci_dev *dev) 2228 { 2229 int err, cap; 2230 u32 stat; 2231 2232 cap = pci_find_capability(dev, PCI_CAP_ID_PCIX); 2233 if (!cap) 2234 return -EINVAL; 2235 2236 err = pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat); 2237 if (err) 2238 return -EINVAL; 2239 2240 return (stat & PCI_X_STATUS_MAX_READ) >> 12; 2241 } 2242 EXPORT_SYMBOL(pcix_get_max_mmrbc); 2243 2244 /** 2245 * pcix_get_mmrbc - get PCI-X maximum memory read byte count 2246 * @dev: PCI device to query 2247 * 2248 * Returns mmrbc: maximum memory read count in bytes 2249 * or appropriate error value. 2250 */ 2251 int pcix_get_mmrbc(struct pci_dev *dev) 2252 { 2253 int ret, cap; 2254 u32 cmd; 2255 2256 cap = pci_find_capability(dev, PCI_CAP_ID_PCIX); 2257 if (!cap) 2258 return -EINVAL; 2259 2260 ret = pci_read_config_dword(dev, cap + PCI_X_CMD, &cmd); 2261 if (!ret) 2262 ret = 512 << ((cmd & PCI_X_CMD_MAX_READ) >> 2); 2263 2264 return ret; 2265 } 2266 EXPORT_SYMBOL(pcix_get_mmrbc); 2267 2268 /** 2269 * pcix_set_mmrbc - set PCI-X maximum memory read byte count 2270 * @dev: PCI device to query 2271 * @mmrbc: maximum memory read count in bytes 2272 * valid values are 512, 1024, 2048, 4096 2273 * 2274 * If possible sets maximum memory read byte count, some bridges have erratas 2275 * that prevent this. 2276 */ 2277 int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc) 2278 { 2279 int cap, err = -EINVAL; 2280 u32 stat, cmd, v, o; 2281 2282 if (mmrbc < 512 || mmrbc > 4096 || !is_power_of_2(mmrbc)) 2283 goto out; 2284 2285 v = ffs(mmrbc) - 10; 2286 2287 cap = pci_find_capability(dev, PCI_CAP_ID_PCIX); 2288 if (!cap) 2289 goto out; 2290 2291 err = pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat); 2292 if (err) 2293 goto out; 2294 2295 if (v > (stat & PCI_X_STATUS_MAX_READ) >> 21) 2296 return -E2BIG; 2297 2298 err = pci_read_config_dword(dev, cap + PCI_X_CMD, &cmd); 2299 if (err) 2300 goto out; 2301 2302 o = (cmd & PCI_X_CMD_MAX_READ) >> 2; 2303 if (o != v) { 2304 if (v > o && dev->bus && 2305 (dev->bus->bus_flags & PCI_BUS_FLAGS_NO_MMRBC)) 2306 return -EIO; 2307 2308 cmd &= ~PCI_X_CMD_MAX_READ; 2309 cmd |= v << 2; 2310 err = pci_write_config_dword(dev, cap + PCI_X_CMD, cmd); 2311 } 2312 out: 2313 return err; 2314 } 2315 EXPORT_SYMBOL(pcix_set_mmrbc); 2316 2317 /** 2318 * pcie_get_readrq - get PCI Express read request size 2319 * @dev: PCI device to query 2320 * 2321 * Returns maximum memory read request in bytes 2322 * or appropriate error value. 2323 */ 2324 int pcie_get_readrq(struct pci_dev *dev) 2325 { 2326 int ret, cap; 2327 u16 ctl; 2328 2329 cap = pci_find_capability(dev, PCI_CAP_ID_EXP); 2330 if (!cap) 2331 return -EINVAL; 2332 2333 ret = pci_read_config_word(dev, cap + PCI_EXP_DEVCTL, &ctl); 2334 if (!ret) 2335 ret = 128 << ((ctl & PCI_EXP_DEVCTL_READRQ) >> 12); 2336 2337 return ret; 2338 } 2339 EXPORT_SYMBOL(pcie_get_readrq); 2340 2341 /** 2342 * pcie_set_readrq - set PCI Express maximum memory read request 2343 * @dev: PCI device to query 2344 * @rq: maximum memory read count in bytes 2345 * valid values are 128, 256, 512, 1024, 2048, 4096 2346 * 2347 * If possible sets maximum read byte count 2348 */ 2349 int pcie_set_readrq(struct pci_dev *dev, int rq) 2350 { 2351 int cap, err = -EINVAL; 2352 u16 ctl, v; 2353 2354 if (rq < 128 || rq > 4096 || !is_power_of_2(rq)) 2355 goto out; 2356 2357 v = (ffs(rq) - 8) << 12; 2358 2359 cap = pci_find_capability(dev, PCI_CAP_ID_EXP); 2360 if (!cap) 2361 goto out; 2362 2363 err = pci_read_config_word(dev, cap + PCI_EXP_DEVCTL, &ctl); 2364 if (err) 2365 goto out; 2366 2367 if ((ctl & PCI_EXP_DEVCTL_READRQ) != v) { 2368 ctl &= ~PCI_EXP_DEVCTL_READRQ; 2369 ctl |= v; 2370 err = pci_write_config_dword(dev, cap + PCI_EXP_DEVCTL, ctl); 2371 } 2372 2373 out: 2374 return err; 2375 } 2376 EXPORT_SYMBOL(pcie_set_readrq); 2377 2378 /** 2379 * pci_select_bars - Make BAR mask from the type of resource 2380 * @dev: the PCI device for which BAR mask is made 2381 * @flags: resource type mask to be selected 2382 * 2383 * This helper routine makes bar mask from the type of resource. 2384 */ 2385 int pci_select_bars(struct pci_dev *dev, unsigned long flags) 2386 { 2387 int i, bars = 0; 2388 for (i = 0; i < PCI_NUM_RESOURCES; i++) 2389 if (pci_resource_flags(dev, i) & flags) 2390 bars |= (1 << i); 2391 return bars; 2392 } 2393 2394 /** 2395 * pci_resource_bar - get position of the BAR associated with a resource 2396 * @dev: the PCI device 2397 * @resno: the resource number 2398 * @type: the BAR type to be filled in 2399 * 2400 * Returns BAR position in config space, or 0 if the BAR is invalid. 2401 */ 2402 int pci_resource_bar(struct pci_dev *dev, int resno, enum pci_bar_type *type) 2403 { 2404 int reg; 2405 2406 if (resno < PCI_ROM_RESOURCE) { 2407 *type = pci_bar_unknown; 2408 return PCI_BASE_ADDRESS_0 + 4 * resno; 2409 } else if (resno == PCI_ROM_RESOURCE) { 2410 *type = pci_bar_mem32; 2411 return dev->rom_base_reg; 2412 } else if (resno < PCI_BRIDGE_RESOURCES) { 2413 /* device specific resource */ 2414 reg = pci_iov_resource_bar(dev, resno, type); 2415 if (reg) 2416 return reg; 2417 } 2418 2419 dev_err(&dev->dev, "BAR: invalid resource #%d\n", resno); 2420 return 0; 2421 } 2422 2423 #define RESOURCE_ALIGNMENT_PARAM_SIZE COMMAND_LINE_SIZE 2424 static char resource_alignment_param[RESOURCE_ALIGNMENT_PARAM_SIZE] = {0}; 2425 spinlock_t resource_alignment_lock = SPIN_LOCK_UNLOCKED; 2426 2427 /** 2428 * pci_specified_resource_alignment - get resource alignment specified by user. 2429 * @dev: the PCI device to get 2430 * 2431 * RETURNS: Resource alignment if it is specified. 2432 * Zero if it is not specified. 2433 */ 2434 resource_size_t pci_specified_resource_alignment(struct pci_dev *dev) 2435 { 2436 int seg, bus, slot, func, align_order, count; 2437 resource_size_t align = 0; 2438 char *p; 2439 2440 spin_lock(&resource_alignment_lock); 2441 p = resource_alignment_param; 2442 while (*p) { 2443 count = 0; 2444 if (sscanf(p, "%d%n", &align_order, &count) == 1 && 2445 p[count] == '@') { 2446 p += count + 1; 2447 } else { 2448 align_order = -1; 2449 } 2450 if (sscanf(p, "%x:%x:%x.%x%n", 2451 &seg, &bus, &slot, &func, &count) != 4) { 2452 seg = 0; 2453 if (sscanf(p, "%x:%x.%x%n", 2454 &bus, &slot, &func, &count) != 3) { 2455 /* Invalid format */ 2456 printk(KERN_ERR "PCI: Can't parse resource_alignment parameter: %s\n", 2457 p); 2458 break; 2459 } 2460 } 2461 p += count; 2462 if (seg == pci_domain_nr(dev->bus) && 2463 bus == dev->bus->number && 2464 slot == PCI_SLOT(dev->devfn) && 2465 func == PCI_FUNC(dev->devfn)) { 2466 if (align_order == -1) { 2467 align = PAGE_SIZE; 2468 } else { 2469 align = 1 << align_order; 2470 } 2471 /* Found */ 2472 break; 2473 } 2474 if (*p != ';' && *p != ',') { 2475 /* End of param or invalid format */ 2476 break; 2477 } 2478 p++; 2479 } 2480 spin_unlock(&resource_alignment_lock); 2481 return align; 2482 } 2483 2484 /** 2485 * pci_is_reassigndev - check if specified PCI is target device to reassign 2486 * @dev: the PCI device to check 2487 * 2488 * RETURNS: non-zero for PCI device is a target device to reassign, 2489 * or zero is not. 2490 */ 2491 int pci_is_reassigndev(struct pci_dev *dev) 2492 { 2493 return (pci_specified_resource_alignment(dev) != 0); 2494 } 2495 2496 ssize_t pci_set_resource_alignment_param(const char *buf, size_t count) 2497 { 2498 if (count > RESOURCE_ALIGNMENT_PARAM_SIZE - 1) 2499 count = RESOURCE_ALIGNMENT_PARAM_SIZE - 1; 2500 spin_lock(&resource_alignment_lock); 2501 strncpy(resource_alignment_param, buf, count); 2502 resource_alignment_param[count] = '\0'; 2503 spin_unlock(&resource_alignment_lock); 2504 return count; 2505 } 2506 2507 ssize_t pci_get_resource_alignment_param(char *buf, size_t size) 2508 { 2509 size_t count; 2510 spin_lock(&resource_alignment_lock); 2511 count = snprintf(buf, size, "%s", resource_alignment_param); 2512 spin_unlock(&resource_alignment_lock); 2513 return count; 2514 } 2515 2516 static ssize_t pci_resource_alignment_show(struct bus_type *bus, char *buf) 2517 { 2518 return pci_get_resource_alignment_param(buf, PAGE_SIZE); 2519 } 2520 2521 static ssize_t pci_resource_alignment_store(struct bus_type *bus, 2522 const char *buf, size_t count) 2523 { 2524 return pci_set_resource_alignment_param(buf, count); 2525 } 2526 2527 BUS_ATTR(resource_alignment, 0644, pci_resource_alignment_show, 2528 pci_resource_alignment_store); 2529 2530 static int __init pci_resource_alignment_sysfs_init(void) 2531 { 2532 return bus_create_file(&pci_bus_type, 2533 &bus_attr_resource_alignment); 2534 } 2535 2536 late_initcall(pci_resource_alignment_sysfs_init); 2537 2538 static void __devinit pci_no_domains(void) 2539 { 2540 #ifdef CONFIG_PCI_DOMAINS 2541 pci_domains_supported = 0; 2542 #endif 2543 } 2544 2545 /** 2546 * pci_ext_cfg_enabled - can we access extended PCI config space? 2547 * @dev: The PCI device of the root bridge. 2548 * 2549 * Returns 1 if we can access PCI extended config space (offsets 2550 * greater than 0xff). This is the default implementation. Architecture 2551 * implementations can override this. 2552 */ 2553 int __attribute__ ((weak)) pci_ext_cfg_avail(struct pci_dev *dev) 2554 { 2555 return 1; 2556 } 2557 2558 static int __devinit pci_init(void) 2559 { 2560 struct pci_dev *dev = NULL; 2561 2562 while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) { 2563 pci_fixup_device(pci_fixup_final, dev); 2564 } 2565 2566 return 0; 2567 } 2568 2569 static int __init pci_setup(char *str) 2570 { 2571 while (str) { 2572 char *k = strchr(str, ','); 2573 if (k) 2574 *k++ = 0; 2575 if (*str && (str = pcibios_setup(str)) && *str) { 2576 if (!strcmp(str, "nomsi")) { 2577 pci_no_msi(); 2578 } else if (!strcmp(str, "noaer")) { 2579 pci_no_aer(); 2580 } else if (!strcmp(str, "nodomains")) { 2581 pci_no_domains(); 2582 } else if (!strncmp(str, "cbiosize=", 9)) { 2583 pci_cardbus_io_size = memparse(str + 9, &str); 2584 } else if (!strncmp(str, "cbmemsize=", 10)) { 2585 pci_cardbus_mem_size = memparse(str + 10, &str); 2586 } else if (!strncmp(str, "resource_alignment=", 19)) { 2587 pci_set_resource_alignment_param(str + 19, 2588 strlen(str + 19)); 2589 } else { 2590 printk(KERN_ERR "PCI: Unknown option `%s'\n", 2591 str); 2592 } 2593 } 2594 str = k; 2595 } 2596 return 0; 2597 } 2598 early_param("pci", pci_setup); 2599 2600 device_initcall(pci_init); 2601 2602 EXPORT_SYMBOL(pci_reenable_device); 2603 EXPORT_SYMBOL(pci_enable_device_io); 2604 EXPORT_SYMBOL(pci_enable_device_mem); 2605 EXPORT_SYMBOL(pci_enable_device); 2606 EXPORT_SYMBOL(pcim_enable_device); 2607 EXPORT_SYMBOL(pcim_pin_device); 2608 EXPORT_SYMBOL(pci_disable_device); 2609 EXPORT_SYMBOL(pci_find_capability); 2610 EXPORT_SYMBOL(pci_bus_find_capability); 2611 EXPORT_SYMBOL(pci_release_regions); 2612 EXPORT_SYMBOL(pci_request_regions); 2613 EXPORT_SYMBOL(pci_request_regions_exclusive); 2614 EXPORT_SYMBOL(pci_release_region); 2615 EXPORT_SYMBOL(pci_request_region); 2616 EXPORT_SYMBOL(pci_request_region_exclusive); 2617 EXPORT_SYMBOL(pci_release_selected_regions); 2618 EXPORT_SYMBOL(pci_request_selected_regions); 2619 EXPORT_SYMBOL(pci_request_selected_regions_exclusive); 2620 EXPORT_SYMBOL(pci_set_master); 2621 EXPORT_SYMBOL(pci_clear_master); 2622 EXPORT_SYMBOL(pci_set_mwi); 2623 EXPORT_SYMBOL(pci_try_set_mwi); 2624 EXPORT_SYMBOL(pci_clear_mwi); 2625 EXPORT_SYMBOL_GPL(pci_intx); 2626 EXPORT_SYMBOL(pci_set_dma_mask); 2627 EXPORT_SYMBOL(pci_set_consistent_dma_mask); 2628 EXPORT_SYMBOL(pci_assign_resource); 2629 EXPORT_SYMBOL(pci_find_parent_resource); 2630 EXPORT_SYMBOL(pci_select_bars); 2631 2632 EXPORT_SYMBOL(pci_set_power_state); 2633 EXPORT_SYMBOL(pci_save_state); 2634 EXPORT_SYMBOL(pci_restore_state); 2635 EXPORT_SYMBOL(pci_pme_capable); 2636 EXPORT_SYMBOL(pci_pme_active); 2637 EXPORT_SYMBOL(pci_enable_wake); 2638 EXPORT_SYMBOL(pci_wake_from_d3); 2639 EXPORT_SYMBOL(pci_target_state); 2640 EXPORT_SYMBOL(pci_prepare_to_sleep); 2641 EXPORT_SYMBOL(pci_back_from_sleep); 2642 EXPORT_SYMBOL_GPL(pci_set_pcie_reset_state); 2643 2644