1 #include <linux/delay.h> 2 #include <linux/pci.h> 3 #include <linux/module.h> 4 #include <linux/sched.h> 5 #include <linux/slab.h> 6 #include <linux/ioport.h> 7 #include <linux/wait.h> 8 9 #include "pci.h" 10 11 /* 12 * This interrupt-safe spinlock protects all accesses to PCI 13 * configuration space. 14 */ 15 16 DEFINE_RAW_SPINLOCK(pci_lock); 17 18 /* 19 * Wrappers for all PCI configuration access functions. They just check 20 * alignment, do locking and call the low-level functions pointed to 21 * by pci_dev->ops. 22 */ 23 24 #define PCI_byte_BAD 0 25 #define PCI_word_BAD (pos & 1) 26 #define PCI_dword_BAD (pos & 3) 27 28 #define PCI_OP_READ(size, type, len) \ 29 int pci_bus_read_config_##size \ 30 (struct pci_bus *bus, unsigned int devfn, int pos, type *value) \ 31 { \ 32 int res; \ 33 unsigned long flags; \ 34 u32 data = 0; \ 35 if (PCI_##size##_BAD) return PCIBIOS_BAD_REGISTER_NUMBER; \ 36 raw_spin_lock_irqsave(&pci_lock, flags); \ 37 res = bus->ops->read(bus, devfn, pos, len, &data); \ 38 *value = (type)data; \ 39 raw_spin_unlock_irqrestore(&pci_lock, flags); \ 40 return res; \ 41 } 42 43 #define PCI_OP_WRITE(size, type, len) \ 44 int pci_bus_write_config_##size \ 45 (struct pci_bus *bus, unsigned int devfn, int pos, type value) \ 46 { \ 47 int res; \ 48 unsigned long flags; \ 49 if (PCI_##size##_BAD) return PCIBIOS_BAD_REGISTER_NUMBER; \ 50 raw_spin_lock_irqsave(&pci_lock, flags); \ 51 res = bus->ops->write(bus, devfn, pos, len, value); \ 52 raw_spin_unlock_irqrestore(&pci_lock, flags); \ 53 return res; \ 54 } 55 56 PCI_OP_READ(byte, u8, 1) 57 PCI_OP_READ(word, u16, 2) 58 PCI_OP_READ(dword, u32, 4) 59 PCI_OP_WRITE(byte, u8, 1) 60 PCI_OP_WRITE(word, u16, 2) 61 PCI_OP_WRITE(dword, u32, 4) 62 63 EXPORT_SYMBOL(pci_bus_read_config_byte); 64 EXPORT_SYMBOL(pci_bus_read_config_word); 65 EXPORT_SYMBOL(pci_bus_read_config_dword); 66 EXPORT_SYMBOL(pci_bus_write_config_byte); 67 EXPORT_SYMBOL(pci_bus_write_config_word); 68 EXPORT_SYMBOL(pci_bus_write_config_dword); 69 70 int pci_generic_config_read(struct pci_bus *bus, unsigned int devfn, 71 int where, int size, u32 *val) 72 { 73 void __iomem *addr; 74 75 addr = bus->ops->map_bus(bus, devfn, where); 76 if (!addr) { 77 *val = ~0; 78 return PCIBIOS_DEVICE_NOT_FOUND; 79 } 80 81 if (size == 1) 82 *val = readb(addr); 83 else if (size == 2) 84 *val = readw(addr); 85 else 86 *val = readl(addr); 87 88 return PCIBIOS_SUCCESSFUL; 89 } 90 EXPORT_SYMBOL_GPL(pci_generic_config_read); 91 92 int pci_generic_config_write(struct pci_bus *bus, unsigned int devfn, 93 int where, int size, u32 val) 94 { 95 void __iomem *addr; 96 97 addr = bus->ops->map_bus(bus, devfn, where); 98 if (!addr) 99 return PCIBIOS_DEVICE_NOT_FOUND; 100 101 if (size == 1) 102 writeb(val, addr); 103 else if (size == 2) 104 writew(val, addr); 105 else 106 writel(val, addr); 107 108 return PCIBIOS_SUCCESSFUL; 109 } 110 EXPORT_SYMBOL_GPL(pci_generic_config_write); 111 112 int pci_generic_config_read32(struct pci_bus *bus, unsigned int devfn, 113 int where, int size, u32 *val) 114 { 115 void __iomem *addr; 116 117 addr = bus->ops->map_bus(bus, devfn, where & ~0x3); 118 if (!addr) { 119 *val = ~0; 120 return PCIBIOS_DEVICE_NOT_FOUND; 121 } 122 123 *val = readl(addr); 124 125 if (size <= 2) 126 *val = (*val >> (8 * (where & 3))) & ((1 << (size * 8)) - 1); 127 128 return PCIBIOS_SUCCESSFUL; 129 } 130 EXPORT_SYMBOL_GPL(pci_generic_config_read32); 131 132 int pci_generic_config_write32(struct pci_bus *bus, unsigned int devfn, 133 int where, int size, u32 val) 134 { 135 void __iomem *addr; 136 u32 mask, tmp; 137 138 addr = bus->ops->map_bus(bus, devfn, where & ~0x3); 139 if (!addr) 140 return PCIBIOS_DEVICE_NOT_FOUND; 141 142 if (size == 4) { 143 writel(val, addr); 144 return PCIBIOS_SUCCESSFUL; 145 } else { 146 mask = ~(((1 << (size * 8)) - 1) << ((where & 0x3) * 8)); 147 } 148 149 tmp = readl(addr) & mask; 150 tmp |= val << ((where & 0x3) * 8); 151 writel(tmp, addr); 152 153 return PCIBIOS_SUCCESSFUL; 154 } 155 EXPORT_SYMBOL_GPL(pci_generic_config_write32); 156 157 /** 158 * pci_bus_set_ops - Set raw operations of pci bus 159 * @bus: pci bus struct 160 * @ops: new raw operations 161 * 162 * Return previous raw operations 163 */ 164 struct pci_ops *pci_bus_set_ops(struct pci_bus *bus, struct pci_ops *ops) 165 { 166 struct pci_ops *old_ops; 167 unsigned long flags; 168 169 raw_spin_lock_irqsave(&pci_lock, flags); 170 old_ops = bus->ops; 171 bus->ops = ops; 172 raw_spin_unlock_irqrestore(&pci_lock, flags); 173 return old_ops; 174 } 175 EXPORT_SYMBOL(pci_bus_set_ops); 176 177 /* 178 * The following routines are to prevent the user from accessing PCI config 179 * space when it's unsafe to do so. Some devices require this during BIST and 180 * we're required to prevent it during D-state transitions. 181 * 182 * We have a bit per device to indicate it's blocked and a global wait queue 183 * for callers to sleep on until devices are unblocked. 184 */ 185 static DECLARE_WAIT_QUEUE_HEAD(pci_cfg_wait); 186 187 static noinline void pci_wait_cfg(struct pci_dev *dev) 188 { 189 DECLARE_WAITQUEUE(wait, current); 190 191 __add_wait_queue(&pci_cfg_wait, &wait); 192 do { 193 set_current_state(TASK_UNINTERRUPTIBLE); 194 raw_spin_unlock_irq(&pci_lock); 195 schedule(); 196 raw_spin_lock_irq(&pci_lock); 197 } while (dev->block_cfg_access); 198 __remove_wait_queue(&pci_cfg_wait, &wait); 199 } 200 201 /* Returns 0 on success, negative values indicate error. */ 202 #define PCI_USER_READ_CONFIG(size, type) \ 203 int pci_user_read_config_##size \ 204 (struct pci_dev *dev, int pos, type *val) \ 205 { \ 206 int ret = PCIBIOS_SUCCESSFUL; \ 207 u32 data = -1; \ 208 if (PCI_##size##_BAD) \ 209 return -EINVAL; \ 210 raw_spin_lock_irq(&pci_lock); \ 211 if (unlikely(dev->block_cfg_access)) \ 212 pci_wait_cfg(dev); \ 213 ret = dev->bus->ops->read(dev->bus, dev->devfn, \ 214 pos, sizeof(type), &data); \ 215 raw_spin_unlock_irq(&pci_lock); \ 216 *val = (type)data; \ 217 return pcibios_err_to_errno(ret); \ 218 } \ 219 EXPORT_SYMBOL_GPL(pci_user_read_config_##size); 220 221 /* Returns 0 on success, negative values indicate error. */ 222 #define PCI_USER_WRITE_CONFIG(size, type) \ 223 int pci_user_write_config_##size \ 224 (struct pci_dev *dev, int pos, type val) \ 225 { \ 226 int ret = PCIBIOS_SUCCESSFUL; \ 227 if (PCI_##size##_BAD) \ 228 return -EINVAL; \ 229 raw_spin_lock_irq(&pci_lock); \ 230 if (unlikely(dev->block_cfg_access)) \ 231 pci_wait_cfg(dev); \ 232 ret = dev->bus->ops->write(dev->bus, dev->devfn, \ 233 pos, sizeof(type), val); \ 234 raw_spin_unlock_irq(&pci_lock); \ 235 return pcibios_err_to_errno(ret); \ 236 } \ 237 EXPORT_SYMBOL_GPL(pci_user_write_config_##size); 238 239 PCI_USER_READ_CONFIG(byte, u8) 240 PCI_USER_READ_CONFIG(word, u16) 241 PCI_USER_READ_CONFIG(dword, u32) 242 PCI_USER_WRITE_CONFIG(byte, u8) 243 PCI_USER_WRITE_CONFIG(word, u16) 244 PCI_USER_WRITE_CONFIG(dword, u32) 245 246 /* VPD access through PCI 2.2+ VPD capability */ 247 248 /** 249 * pci_read_vpd - Read one entry from Vital Product Data 250 * @dev: pci device struct 251 * @pos: offset in vpd space 252 * @count: number of bytes to read 253 * @buf: pointer to where to store result 254 */ 255 ssize_t pci_read_vpd(struct pci_dev *dev, loff_t pos, size_t count, void *buf) 256 { 257 if (!dev->vpd || !dev->vpd->ops) 258 return -ENODEV; 259 return dev->vpd->ops->read(dev, pos, count, buf); 260 } 261 EXPORT_SYMBOL(pci_read_vpd); 262 263 /** 264 * pci_write_vpd - Write entry to Vital Product Data 265 * @dev: pci device struct 266 * @pos: offset in vpd space 267 * @count: number of bytes to write 268 * @buf: buffer containing write data 269 */ 270 ssize_t pci_write_vpd(struct pci_dev *dev, loff_t pos, size_t count, const void *buf) 271 { 272 if (!dev->vpd || !dev->vpd->ops) 273 return -ENODEV; 274 return dev->vpd->ops->write(dev, pos, count, buf); 275 } 276 EXPORT_SYMBOL(pci_write_vpd); 277 278 #define PCI_VPD_MAX_SIZE (PCI_VPD_ADDR_MASK + 1) 279 280 /** 281 * pci_vpd_size - determine actual size of Vital Product Data 282 * @dev: pci device struct 283 * @old_size: current assumed size, also maximum allowed size 284 */ 285 static size_t pci_vpd_size(struct pci_dev *dev, size_t old_size) 286 { 287 size_t off = 0; 288 unsigned char header[1+2]; /* 1 byte tag, 2 bytes length */ 289 290 while (off < old_size && 291 pci_read_vpd(dev, off, 1, header) == 1) { 292 unsigned char tag; 293 294 if (header[0] & PCI_VPD_LRDT) { 295 /* Large Resource Data Type Tag */ 296 tag = pci_vpd_lrdt_tag(header); 297 /* Only read length from known tag items */ 298 if ((tag == PCI_VPD_LTIN_ID_STRING) || 299 (tag == PCI_VPD_LTIN_RO_DATA) || 300 (tag == PCI_VPD_LTIN_RW_DATA)) { 301 if (pci_read_vpd(dev, off+1, 2, 302 &header[1]) != 2) { 303 dev_warn(&dev->dev, 304 "invalid large VPD tag %02x size at offset %zu", 305 tag, off + 1); 306 return 0; 307 } 308 off += PCI_VPD_LRDT_TAG_SIZE + 309 pci_vpd_lrdt_size(header); 310 } 311 } else { 312 /* Short Resource Data Type Tag */ 313 off += PCI_VPD_SRDT_TAG_SIZE + 314 pci_vpd_srdt_size(header); 315 tag = pci_vpd_srdt_tag(header); 316 } 317 318 if (tag == PCI_VPD_STIN_END) /* End tag descriptor */ 319 return off; 320 321 if ((tag != PCI_VPD_LTIN_ID_STRING) && 322 (tag != PCI_VPD_LTIN_RO_DATA) && 323 (tag != PCI_VPD_LTIN_RW_DATA)) { 324 dev_warn(&dev->dev, 325 "invalid %s VPD tag %02x at offset %zu", 326 (header[0] & PCI_VPD_LRDT) ? "large" : "short", 327 tag, off); 328 return 0; 329 } 330 } 331 return 0; 332 } 333 334 /* 335 * Wait for last operation to complete. 336 * This code has to spin since there is no other notification from the PCI 337 * hardware. Since the VPD is often implemented by serial attachment to an 338 * EEPROM, it may take many milliseconds to complete. 339 * 340 * Returns 0 on success, negative values indicate error. 341 */ 342 static int pci_vpd_wait(struct pci_dev *dev) 343 { 344 struct pci_vpd *vpd = dev->vpd; 345 unsigned long timeout = jiffies + msecs_to_jiffies(50); 346 unsigned long max_sleep = 16; 347 u16 status; 348 int ret; 349 350 if (!vpd->busy) 351 return 0; 352 353 while (time_before(jiffies, timeout)) { 354 ret = pci_user_read_config_word(dev, vpd->cap + PCI_VPD_ADDR, 355 &status); 356 if (ret < 0) 357 return ret; 358 359 if ((status & PCI_VPD_ADDR_F) == vpd->flag) { 360 vpd->busy = 0; 361 return 0; 362 } 363 364 if (fatal_signal_pending(current)) 365 return -EINTR; 366 367 usleep_range(10, max_sleep); 368 if (max_sleep < 1024) 369 max_sleep *= 2; 370 } 371 372 dev_warn(&dev->dev, "VPD access failed. This is likely a firmware bug on this device. Contact the card vendor for a firmware update\n"); 373 return -ETIMEDOUT; 374 } 375 376 static ssize_t pci_vpd_read(struct pci_dev *dev, loff_t pos, size_t count, 377 void *arg) 378 { 379 struct pci_vpd *vpd = dev->vpd; 380 int ret; 381 loff_t end = pos + count; 382 u8 *buf = arg; 383 384 if (pos < 0) 385 return -EINVAL; 386 387 if (!vpd->valid) { 388 vpd->valid = 1; 389 vpd->len = pci_vpd_size(dev, vpd->len); 390 } 391 392 if (vpd->len == 0) 393 return -EIO; 394 395 if (pos > vpd->len) 396 return 0; 397 398 if (end > vpd->len) { 399 end = vpd->len; 400 count = end - pos; 401 } 402 403 if (mutex_lock_killable(&vpd->lock)) 404 return -EINTR; 405 406 ret = pci_vpd_wait(dev); 407 if (ret < 0) 408 goto out; 409 410 while (pos < end) { 411 u32 val; 412 unsigned int i, skip; 413 414 ret = pci_user_write_config_word(dev, vpd->cap + PCI_VPD_ADDR, 415 pos & ~3); 416 if (ret < 0) 417 break; 418 vpd->busy = 1; 419 vpd->flag = PCI_VPD_ADDR_F; 420 ret = pci_vpd_wait(dev); 421 if (ret < 0) 422 break; 423 424 ret = pci_user_read_config_dword(dev, vpd->cap + PCI_VPD_DATA, &val); 425 if (ret < 0) 426 break; 427 428 skip = pos & 3; 429 for (i = 0; i < sizeof(u32); i++) { 430 if (i >= skip) { 431 *buf++ = val; 432 if (++pos == end) 433 break; 434 } 435 val >>= 8; 436 } 437 } 438 out: 439 mutex_unlock(&vpd->lock); 440 return ret ? ret : count; 441 } 442 443 static ssize_t pci_vpd_write(struct pci_dev *dev, loff_t pos, size_t count, 444 const void *arg) 445 { 446 struct pci_vpd *vpd = dev->vpd; 447 const u8 *buf = arg; 448 loff_t end = pos + count; 449 int ret = 0; 450 451 if (pos < 0 || (pos & 3) || (count & 3)) 452 return -EINVAL; 453 454 if (!vpd->valid) { 455 vpd->valid = 1; 456 vpd->len = pci_vpd_size(dev, vpd->len); 457 } 458 459 if (vpd->len == 0) 460 return -EIO; 461 462 if (end > vpd->len) 463 return -EINVAL; 464 465 if (mutex_lock_killable(&vpd->lock)) 466 return -EINTR; 467 468 ret = pci_vpd_wait(dev); 469 if (ret < 0) 470 goto out; 471 472 while (pos < end) { 473 u32 val; 474 475 val = *buf++; 476 val |= *buf++ << 8; 477 val |= *buf++ << 16; 478 val |= *buf++ << 24; 479 480 ret = pci_user_write_config_dword(dev, vpd->cap + PCI_VPD_DATA, val); 481 if (ret < 0) 482 break; 483 ret = pci_user_write_config_word(dev, vpd->cap + PCI_VPD_ADDR, 484 pos | PCI_VPD_ADDR_F); 485 if (ret < 0) 486 break; 487 488 vpd->busy = 1; 489 vpd->flag = 0; 490 ret = pci_vpd_wait(dev); 491 if (ret < 0) 492 break; 493 494 pos += sizeof(u32); 495 } 496 out: 497 mutex_unlock(&vpd->lock); 498 return ret ? ret : count; 499 } 500 501 static const struct pci_vpd_ops pci_vpd_ops = { 502 .read = pci_vpd_read, 503 .write = pci_vpd_write, 504 }; 505 506 static ssize_t pci_vpd_f0_read(struct pci_dev *dev, loff_t pos, size_t count, 507 void *arg) 508 { 509 struct pci_dev *tdev = pci_get_slot(dev->bus, 510 PCI_DEVFN(PCI_SLOT(dev->devfn), 0)); 511 ssize_t ret; 512 513 if (!tdev) 514 return -ENODEV; 515 516 ret = pci_read_vpd(tdev, pos, count, arg); 517 pci_dev_put(tdev); 518 return ret; 519 } 520 521 static ssize_t pci_vpd_f0_write(struct pci_dev *dev, loff_t pos, size_t count, 522 const void *arg) 523 { 524 struct pci_dev *tdev = pci_get_slot(dev->bus, 525 PCI_DEVFN(PCI_SLOT(dev->devfn), 0)); 526 ssize_t ret; 527 528 if (!tdev) 529 return -ENODEV; 530 531 ret = pci_write_vpd(tdev, pos, count, arg); 532 pci_dev_put(tdev); 533 return ret; 534 } 535 536 static const struct pci_vpd_ops pci_vpd_f0_ops = { 537 .read = pci_vpd_f0_read, 538 .write = pci_vpd_f0_write, 539 }; 540 541 int pci_vpd_init(struct pci_dev *dev) 542 { 543 struct pci_vpd *vpd; 544 u8 cap; 545 546 cap = pci_find_capability(dev, PCI_CAP_ID_VPD); 547 if (!cap) 548 return -ENODEV; 549 550 vpd = kzalloc(sizeof(*vpd), GFP_ATOMIC); 551 if (!vpd) 552 return -ENOMEM; 553 554 vpd->len = PCI_VPD_MAX_SIZE; 555 if (dev->dev_flags & PCI_DEV_FLAGS_VPD_REF_F0) 556 vpd->ops = &pci_vpd_f0_ops; 557 else 558 vpd->ops = &pci_vpd_ops; 559 mutex_init(&vpd->lock); 560 vpd->cap = cap; 561 vpd->busy = 0; 562 vpd->valid = 0; 563 dev->vpd = vpd; 564 return 0; 565 } 566 567 void pci_vpd_release(struct pci_dev *dev) 568 { 569 kfree(dev->vpd); 570 } 571 572 /** 573 * pci_cfg_access_lock - Lock PCI config reads/writes 574 * @dev: pci device struct 575 * 576 * When access is locked, any userspace reads or writes to config 577 * space and concurrent lock requests will sleep until access is 578 * allowed via pci_cfg_access_unlocked again. 579 */ 580 void pci_cfg_access_lock(struct pci_dev *dev) 581 { 582 might_sleep(); 583 584 raw_spin_lock_irq(&pci_lock); 585 if (dev->block_cfg_access) 586 pci_wait_cfg(dev); 587 dev->block_cfg_access = 1; 588 raw_spin_unlock_irq(&pci_lock); 589 } 590 EXPORT_SYMBOL_GPL(pci_cfg_access_lock); 591 592 /** 593 * pci_cfg_access_trylock - try to lock PCI config reads/writes 594 * @dev: pci device struct 595 * 596 * Same as pci_cfg_access_lock, but will return 0 if access is 597 * already locked, 1 otherwise. This function can be used from 598 * atomic contexts. 599 */ 600 bool pci_cfg_access_trylock(struct pci_dev *dev) 601 { 602 unsigned long flags; 603 bool locked = true; 604 605 raw_spin_lock_irqsave(&pci_lock, flags); 606 if (dev->block_cfg_access) 607 locked = false; 608 else 609 dev->block_cfg_access = 1; 610 raw_spin_unlock_irqrestore(&pci_lock, flags); 611 612 return locked; 613 } 614 EXPORT_SYMBOL_GPL(pci_cfg_access_trylock); 615 616 /** 617 * pci_cfg_access_unlock - Unlock PCI config reads/writes 618 * @dev: pci device struct 619 * 620 * This function allows PCI config accesses to resume. 621 */ 622 void pci_cfg_access_unlock(struct pci_dev *dev) 623 { 624 unsigned long flags; 625 626 raw_spin_lock_irqsave(&pci_lock, flags); 627 628 /* This indicates a problem in the caller, but we don't need 629 * to kill them, unlike a double-block above. */ 630 WARN_ON(!dev->block_cfg_access); 631 632 dev->block_cfg_access = 0; 633 wake_up_all(&pci_cfg_wait); 634 raw_spin_unlock_irqrestore(&pci_lock, flags); 635 } 636 EXPORT_SYMBOL_GPL(pci_cfg_access_unlock); 637 638 static inline int pcie_cap_version(const struct pci_dev *dev) 639 { 640 return pcie_caps_reg(dev) & PCI_EXP_FLAGS_VERS; 641 } 642 643 static bool pcie_downstream_port(const struct pci_dev *dev) 644 { 645 int type = pci_pcie_type(dev); 646 647 return type == PCI_EXP_TYPE_ROOT_PORT || 648 type == PCI_EXP_TYPE_DOWNSTREAM; 649 } 650 651 bool pcie_cap_has_lnkctl(const struct pci_dev *dev) 652 { 653 int type = pci_pcie_type(dev); 654 655 return type == PCI_EXP_TYPE_ENDPOINT || 656 type == PCI_EXP_TYPE_LEG_END || 657 type == PCI_EXP_TYPE_ROOT_PORT || 658 type == PCI_EXP_TYPE_UPSTREAM || 659 type == PCI_EXP_TYPE_DOWNSTREAM || 660 type == PCI_EXP_TYPE_PCI_BRIDGE || 661 type == PCI_EXP_TYPE_PCIE_BRIDGE; 662 } 663 664 static inline bool pcie_cap_has_sltctl(const struct pci_dev *dev) 665 { 666 return pcie_downstream_port(dev) && 667 pcie_caps_reg(dev) & PCI_EXP_FLAGS_SLOT; 668 } 669 670 static inline bool pcie_cap_has_rtctl(const struct pci_dev *dev) 671 { 672 int type = pci_pcie_type(dev); 673 674 return type == PCI_EXP_TYPE_ROOT_PORT || 675 type == PCI_EXP_TYPE_RC_EC; 676 } 677 678 static bool pcie_capability_reg_implemented(struct pci_dev *dev, int pos) 679 { 680 if (!pci_is_pcie(dev)) 681 return false; 682 683 switch (pos) { 684 case PCI_EXP_FLAGS: 685 return true; 686 case PCI_EXP_DEVCAP: 687 case PCI_EXP_DEVCTL: 688 case PCI_EXP_DEVSTA: 689 return true; 690 case PCI_EXP_LNKCAP: 691 case PCI_EXP_LNKCTL: 692 case PCI_EXP_LNKSTA: 693 return pcie_cap_has_lnkctl(dev); 694 case PCI_EXP_SLTCAP: 695 case PCI_EXP_SLTCTL: 696 case PCI_EXP_SLTSTA: 697 return pcie_cap_has_sltctl(dev); 698 case PCI_EXP_RTCTL: 699 case PCI_EXP_RTCAP: 700 case PCI_EXP_RTSTA: 701 return pcie_cap_has_rtctl(dev); 702 case PCI_EXP_DEVCAP2: 703 case PCI_EXP_DEVCTL2: 704 case PCI_EXP_LNKCAP2: 705 case PCI_EXP_LNKCTL2: 706 case PCI_EXP_LNKSTA2: 707 return pcie_cap_version(dev) > 1; 708 default: 709 return false; 710 } 711 } 712 713 /* 714 * Note that these accessor functions are only for the "PCI Express 715 * Capability" (see PCIe spec r3.0, sec 7.8). They do not apply to the 716 * other "PCI Express Extended Capabilities" (AER, VC, ACS, MFVC, etc.) 717 */ 718 int pcie_capability_read_word(struct pci_dev *dev, int pos, u16 *val) 719 { 720 int ret; 721 722 *val = 0; 723 if (pos & 1) 724 return -EINVAL; 725 726 if (pcie_capability_reg_implemented(dev, pos)) { 727 ret = pci_read_config_word(dev, pci_pcie_cap(dev) + pos, val); 728 /* 729 * Reset *val to 0 if pci_read_config_word() fails, it may 730 * have been written as 0xFFFF if hardware error happens 731 * during pci_read_config_word(). 732 */ 733 if (ret) 734 *val = 0; 735 return ret; 736 } 737 738 /* 739 * For Functions that do not implement the Slot Capabilities, 740 * Slot Status, and Slot Control registers, these spaces must 741 * be hardwired to 0b, with the exception of the Presence Detect 742 * State bit in the Slot Status register of Downstream Ports, 743 * which must be hardwired to 1b. (PCIe Base Spec 3.0, sec 7.8) 744 */ 745 if (pci_is_pcie(dev) && pcie_downstream_port(dev) && 746 pos == PCI_EXP_SLTSTA) 747 *val = PCI_EXP_SLTSTA_PDS; 748 749 return 0; 750 } 751 EXPORT_SYMBOL(pcie_capability_read_word); 752 753 int pcie_capability_read_dword(struct pci_dev *dev, int pos, u32 *val) 754 { 755 int ret; 756 757 *val = 0; 758 if (pos & 3) 759 return -EINVAL; 760 761 if (pcie_capability_reg_implemented(dev, pos)) { 762 ret = pci_read_config_dword(dev, pci_pcie_cap(dev) + pos, val); 763 /* 764 * Reset *val to 0 if pci_read_config_dword() fails, it may 765 * have been written as 0xFFFFFFFF if hardware error happens 766 * during pci_read_config_dword(). 767 */ 768 if (ret) 769 *val = 0; 770 return ret; 771 } 772 773 if (pci_is_pcie(dev) && pcie_downstream_port(dev) && 774 pos == PCI_EXP_SLTSTA) 775 *val = PCI_EXP_SLTSTA_PDS; 776 777 return 0; 778 } 779 EXPORT_SYMBOL(pcie_capability_read_dword); 780 781 int pcie_capability_write_word(struct pci_dev *dev, int pos, u16 val) 782 { 783 if (pos & 1) 784 return -EINVAL; 785 786 if (!pcie_capability_reg_implemented(dev, pos)) 787 return 0; 788 789 return pci_write_config_word(dev, pci_pcie_cap(dev) + pos, val); 790 } 791 EXPORT_SYMBOL(pcie_capability_write_word); 792 793 int pcie_capability_write_dword(struct pci_dev *dev, int pos, u32 val) 794 { 795 if (pos & 3) 796 return -EINVAL; 797 798 if (!pcie_capability_reg_implemented(dev, pos)) 799 return 0; 800 801 return pci_write_config_dword(dev, pci_pcie_cap(dev) + pos, val); 802 } 803 EXPORT_SYMBOL(pcie_capability_write_dword); 804 805 int pcie_capability_clear_and_set_word(struct pci_dev *dev, int pos, 806 u16 clear, u16 set) 807 { 808 int ret; 809 u16 val; 810 811 ret = pcie_capability_read_word(dev, pos, &val); 812 if (!ret) { 813 val &= ~clear; 814 val |= set; 815 ret = pcie_capability_write_word(dev, pos, val); 816 } 817 818 return ret; 819 } 820 EXPORT_SYMBOL(pcie_capability_clear_and_set_word); 821 822 int pcie_capability_clear_and_set_dword(struct pci_dev *dev, int pos, 823 u32 clear, u32 set) 824 { 825 int ret; 826 u32 val; 827 828 ret = pcie_capability_read_dword(dev, pos, &val); 829 if (!ret) { 830 val &= ~clear; 831 val |= set; 832 ret = pcie_capability_write_dword(dev, pos, val); 833 } 834 835 return ret; 836 } 837 EXPORT_SYMBOL(pcie_capability_clear_and_set_dword); 838