1 #include <linux/delay.h> 2 #include <linux/pci.h> 3 #include <linux/module.h> 4 #include <linux/sched.h> 5 #include <linux/slab.h> 6 #include <linux/ioport.h> 7 #include <linux/wait.h> 8 9 #include "pci.h" 10 11 /* 12 * This interrupt-safe spinlock protects all accesses to PCI 13 * configuration space. 14 */ 15 16 DEFINE_RAW_SPINLOCK(pci_lock); 17 18 /* 19 * Wrappers for all PCI configuration access functions. They just check 20 * alignment, do locking and call the low-level functions pointed to 21 * by pci_dev->ops. 22 */ 23 24 #define PCI_byte_BAD 0 25 #define PCI_word_BAD (pos & 1) 26 #define PCI_dword_BAD (pos & 3) 27 28 #define PCI_OP_READ(size,type,len) \ 29 int pci_bus_read_config_##size \ 30 (struct pci_bus *bus, unsigned int devfn, int pos, type *value) \ 31 { \ 32 int res; \ 33 unsigned long flags; \ 34 u32 data = 0; \ 35 if (PCI_##size##_BAD) return PCIBIOS_BAD_REGISTER_NUMBER; \ 36 raw_spin_lock_irqsave(&pci_lock, flags); \ 37 res = bus->ops->read(bus, devfn, pos, len, &data); \ 38 *value = (type)data; \ 39 raw_spin_unlock_irqrestore(&pci_lock, flags); \ 40 return res; \ 41 } 42 43 #define PCI_OP_WRITE(size,type,len) \ 44 int pci_bus_write_config_##size \ 45 (struct pci_bus *bus, unsigned int devfn, int pos, type value) \ 46 { \ 47 int res; \ 48 unsigned long flags; \ 49 if (PCI_##size##_BAD) return PCIBIOS_BAD_REGISTER_NUMBER; \ 50 raw_spin_lock_irqsave(&pci_lock, flags); \ 51 res = bus->ops->write(bus, devfn, pos, len, value); \ 52 raw_spin_unlock_irqrestore(&pci_lock, flags); \ 53 return res; \ 54 } 55 56 PCI_OP_READ(byte, u8, 1) 57 PCI_OP_READ(word, u16, 2) 58 PCI_OP_READ(dword, u32, 4) 59 PCI_OP_WRITE(byte, u8, 1) 60 PCI_OP_WRITE(word, u16, 2) 61 PCI_OP_WRITE(dword, u32, 4) 62 63 EXPORT_SYMBOL(pci_bus_read_config_byte); 64 EXPORT_SYMBOL(pci_bus_read_config_word); 65 EXPORT_SYMBOL(pci_bus_read_config_dword); 66 EXPORT_SYMBOL(pci_bus_write_config_byte); 67 EXPORT_SYMBOL(pci_bus_write_config_word); 68 EXPORT_SYMBOL(pci_bus_write_config_dword); 69 70 /** 71 * pci_bus_set_ops - Set raw operations of pci bus 72 * @bus: pci bus struct 73 * @ops: new raw operations 74 * 75 * Return previous raw operations 76 */ 77 struct pci_ops *pci_bus_set_ops(struct pci_bus *bus, struct pci_ops *ops) 78 { 79 struct pci_ops *old_ops; 80 unsigned long flags; 81 82 raw_spin_lock_irqsave(&pci_lock, flags); 83 old_ops = bus->ops; 84 bus->ops = ops; 85 raw_spin_unlock_irqrestore(&pci_lock, flags); 86 return old_ops; 87 } 88 EXPORT_SYMBOL(pci_bus_set_ops); 89 90 /** 91 * pci_read_vpd - Read one entry from Vital Product Data 92 * @dev: pci device struct 93 * @pos: offset in vpd space 94 * @count: number of bytes to read 95 * @buf: pointer to where to store result 96 * 97 */ 98 ssize_t pci_read_vpd(struct pci_dev *dev, loff_t pos, size_t count, void *buf) 99 { 100 if (!dev->vpd || !dev->vpd->ops) 101 return -ENODEV; 102 return dev->vpd->ops->read(dev, pos, count, buf); 103 } 104 EXPORT_SYMBOL(pci_read_vpd); 105 106 /** 107 * pci_write_vpd - Write entry to Vital Product Data 108 * @dev: pci device struct 109 * @pos: offset in vpd space 110 * @count: number of bytes to write 111 * @buf: buffer containing write data 112 * 113 */ 114 ssize_t pci_write_vpd(struct pci_dev *dev, loff_t pos, size_t count, const void *buf) 115 { 116 if (!dev->vpd || !dev->vpd->ops) 117 return -ENODEV; 118 return dev->vpd->ops->write(dev, pos, count, buf); 119 } 120 EXPORT_SYMBOL(pci_write_vpd); 121 122 /* 123 * The following routines are to prevent the user from accessing PCI config 124 * space when it's unsafe to do so. Some devices require this during BIST and 125 * we're required to prevent it during D-state transitions. 126 * 127 * We have a bit per device to indicate it's blocked and a global wait queue 128 * for callers to sleep on until devices are unblocked. 129 */ 130 static DECLARE_WAIT_QUEUE_HEAD(pci_cfg_wait); 131 132 static noinline void pci_wait_cfg(struct pci_dev *dev) 133 { 134 DECLARE_WAITQUEUE(wait, current); 135 136 __add_wait_queue(&pci_cfg_wait, &wait); 137 do { 138 set_current_state(TASK_UNINTERRUPTIBLE); 139 raw_spin_unlock_irq(&pci_lock); 140 schedule(); 141 raw_spin_lock_irq(&pci_lock); 142 } while (dev->block_cfg_access); 143 __remove_wait_queue(&pci_cfg_wait, &wait); 144 } 145 146 /* Returns 0 on success, negative values indicate error. */ 147 #define PCI_USER_READ_CONFIG(size,type) \ 148 int pci_user_read_config_##size \ 149 (struct pci_dev *dev, int pos, type *val) \ 150 { \ 151 int ret = PCIBIOS_SUCCESSFUL; \ 152 u32 data = -1; \ 153 if (PCI_##size##_BAD) \ 154 return -EINVAL; \ 155 raw_spin_lock_irq(&pci_lock); \ 156 if (unlikely(dev->block_cfg_access)) \ 157 pci_wait_cfg(dev); \ 158 ret = dev->bus->ops->read(dev->bus, dev->devfn, \ 159 pos, sizeof(type), &data); \ 160 raw_spin_unlock_irq(&pci_lock); \ 161 *val = (type)data; \ 162 return pcibios_err_to_errno(ret); \ 163 } \ 164 EXPORT_SYMBOL_GPL(pci_user_read_config_##size); 165 166 /* Returns 0 on success, negative values indicate error. */ 167 #define PCI_USER_WRITE_CONFIG(size,type) \ 168 int pci_user_write_config_##size \ 169 (struct pci_dev *dev, int pos, type val) \ 170 { \ 171 int ret = PCIBIOS_SUCCESSFUL; \ 172 if (PCI_##size##_BAD) \ 173 return -EINVAL; \ 174 raw_spin_lock_irq(&pci_lock); \ 175 if (unlikely(dev->block_cfg_access)) \ 176 pci_wait_cfg(dev); \ 177 ret = dev->bus->ops->write(dev->bus, dev->devfn, \ 178 pos, sizeof(type), val); \ 179 raw_spin_unlock_irq(&pci_lock); \ 180 return pcibios_err_to_errno(ret); \ 181 } \ 182 EXPORT_SYMBOL_GPL(pci_user_write_config_##size); 183 184 PCI_USER_READ_CONFIG(byte, u8) 185 PCI_USER_READ_CONFIG(word, u16) 186 PCI_USER_READ_CONFIG(dword, u32) 187 PCI_USER_WRITE_CONFIG(byte, u8) 188 PCI_USER_WRITE_CONFIG(word, u16) 189 PCI_USER_WRITE_CONFIG(dword, u32) 190 191 /* VPD access through PCI 2.2+ VPD capability */ 192 193 #define PCI_VPD_PCI22_SIZE (PCI_VPD_ADDR_MASK + 1) 194 195 struct pci_vpd_pci22 { 196 struct pci_vpd base; 197 struct mutex lock; 198 u16 flag; 199 bool busy; 200 u8 cap; 201 }; 202 203 /* 204 * Wait for last operation to complete. 205 * This code has to spin since there is no other notification from the PCI 206 * hardware. Since the VPD is often implemented by serial attachment to an 207 * EEPROM, it may take many milliseconds to complete. 208 * 209 * Returns 0 on success, negative values indicate error. 210 */ 211 static int pci_vpd_pci22_wait(struct pci_dev *dev) 212 { 213 struct pci_vpd_pci22 *vpd = 214 container_of(dev->vpd, struct pci_vpd_pci22, base); 215 unsigned long timeout = jiffies + HZ/20 + 2; 216 u16 status; 217 int ret; 218 219 if (!vpd->busy) 220 return 0; 221 222 for (;;) { 223 ret = pci_user_read_config_word(dev, vpd->cap + PCI_VPD_ADDR, 224 &status); 225 if (ret < 0) 226 return ret; 227 228 if ((status & PCI_VPD_ADDR_F) == vpd->flag) { 229 vpd->busy = false; 230 return 0; 231 } 232 233 if (time_after(jiffies, timeout)) { 234 dev_printk(KERN_DEBUG, &dev->dev, 235 "vpd r/w failed. This is likely a firmware " 236 "bug on this device. Contact the card " 237 "vendor for a firmware update."); 238 return -ETIMEDOUT; 239 } 240 if (fatal_signal_pending(current)) 241 return -EINTR; 242 if (!cond_resched()) 243 udelay(10); 244 } 245 } 246 247 static ssize_t pci_vpd_pci22_read(struct pci_dev *dev, loff_t pos, size_t count, 248 void *arg) 249 { 250 struct pci_vpd_pci22 *vpd = 251 container_of(dev->vpd, struct pci_vpd_pci22, base); 252 int ret; 253 loff_t end = pos + count; 254 u8 *buf = arg; 255 256 if (pos < 0 || pos > vpd->base.len || end > vpd->base.len) 257 return -EINVAL; 258 259 if (mutex_lock_killable(&vpd->lock)) 260 return -EINTR; 261 262 ret = pci_vpd_pci22_wait(dev); 263 if (ret < 0) 264 goto out; 265 266 while (pos < end) { 267 u32 val; 268 unsigned int i, skip; 269 270 ret = pci_user_write_config_word(dev, vpd->cap + PCI_VPD_ADDR, 271 pos & ~3); 272 if (ret < 0) 273 break; 274 vpd->busy = true; 275 vpd->flag = PCI_VPD_ADDR_F; 276 ret = pci_vpd_pci22_wait(dev); 277 if (ret < 0) 278 break; 279 280 ret = pci_user_read_config_dword(dev, vpd->cap + PCI_VPD_DATA, &val); 281 if (ret < 0) 282 break; 283 284 skip = pos & 3; 285 for (i = 0; i < sizeof(u32); i++) { 286 if (i >= skip) { 287 *buf++ = val; 288 if (++pos == end) 289 break; 290 } 291 val >>= 8; 292 } 293 } 294 out: 295 mutex_unlock(&vpd->lock); 296 return ret ? ret : count; 297 } 298 299 static ssize_t pci_vpd_pci22_write(struct pci_dev *dev, loff_t pos, size_t count, 300 const void *arg) 301 { 302 struct pci_vpd_pci22 *vpd = 303 container_of(dev->vpd, struct pci_vpd_pci22, base); 304 const u8 *buf = arg; 305 loff_t end = pos + count; 306 int ret = 0; 307 308 if (pos < 0 || (pos & 3) || (count & 3) || end > vpd->base.len) 309 return -EINVAL; 310 311 if (mutex_lock_killable(&vpd->lock)) 312 return -EINTR; 313 314 ret = pci_vpd_pci22_wait(dev); 315 if (ret < 0) 316 goto out; 317 318 while (pos < end) { 319 u32 val; 320 321 val = *buf++; 322 val |= *buf++ << 8; 323 val |= *buf++ << 16; 324 val |= *buf++ << 24; 325 326 ret = pci_user_write_config_dword(dev, vpd->cap + PCI_VPD_DATA, val); 327 if (ret < 0) 328 break; 329 ret = pci_user_write_config_word(dev, vpd->cap + PCI_VPD_ADDR, 330 pos | PCI_VPD_ADDR_F); 331 if (ret < 0) 332 break; 333 334 vpd->busy = true; 335 vpd->flag = 0; 336 ret = pci_vpd_pci22_wait(dev); 337 if (ret < 0) 338 break; 339 340 pos += sizeof(u32); 341 } 342 out: 343 mutex_unlock(&vpd->lock); 344 return ret ? ret : count; 345 } 346 347 static void pci_vpd_pci22_release(struct pci_dev *dev) 348 { 349 kfree(container_of(dev->vpd, struct pci_vpd_pci22, base)); 350 } 351 352 static const struct pci_vpd_ops pci_vpd_pci22_ops = { 353 .read = pci_vpd_pci22_read, 354 .write = pci_vpd_pci22_write, 355 .release = pci_vpd_pci22_release, 356 }; 357 358 int pci_vpd_pci22_init(struct pci_dev *dev) 359 { 360 struct pci_vpd_pci22 *vpd; 361 u8 cap; 362 363 cap = pci_find_capability(dev, PCI_CAP_ID_VPD); 364 if (!cap) 365 return -ENODEV; 366 vpd = kzalloc(sizeof(*vpd), GFP_ATOMIC); 367 if (!vpd) 368 return -ENOMEM; 369 370 vpd->base.len = PCI_VPD_PCI22_SIZE; 371 vpd->base.ops = &pci_vpd_pci22_ops; 372 mutex_init(&vpd->lock); 373 vpd->cap = cap; 374 vpd->busy = false; 375 dev->vpd = &vpd->base; 376 return 0; 377 } 378 379 /** 380 * pci_cfg_access_lock - Lock PCI config reads/writes 381 * @dev: pci device struct 382 * 383 * When access is locked, any userspace reads or writes to config 384 * space and concurrent lock requests will sleep until access is 385 * allowed via pci_cfg_access_unlocked again. 386 */ 387 void pci_cfg_access_lock(struct pci_dev *dev) 388 { 389 might_sleep(); 390 391 raw_spin_lock_irq(&pci_lock); 392 if (dev->block_cfg_access) 393 pci_wait_cfg(dev); 394 dev->block_cfg_access = 1; 395 raw_spin_unlock_irq(&pci_lock); 396 } 397 EXPORT_SYMBOL_GPL(pci_cfg_access_lock); 398 399 /** 400 * pci_cfg_access_trylock - try to lock PCI config reads/writes 401 * @dev: pci device struct 402 * 403 * Same as pci_cfg_access_lock, but will return 0 if access is 404 * already locked, 1 otherwise. This function can be used from 405 * atomic contexts. 406 */ 407 bool pci_cfg_access_trylock(struct pci_dev *dev) 408 { 409 unsigned long flags; 410 bool locked = true; 411 412 raw_spin_lock_irqsave(&pci_lock, flags); 413 if (dev->block_cfg_access) 414 locked = false; 415 else 416 dev->block_cfg_access = 1; 417 raw_spin_unlock_irqrestore(&pci_lock, flags); 418 419 return locked; 420 } 421 EXPORT_SYMBOL_GPL(pci_cfg_access_trylock); 422 423 /** 424 * pci_cfg_access_unlock - Unlock PCI config reads/writes 425 * @dev: pci device struct 426 * 427 * This function allows PCI config accesses to resume. 428 */ 429 void pci_cfg_access_unlock(struct pci_dev *dev) 430 { 431 unsigned long flags; 432 433 raw_spin_lock_irqsave(&pci_lock, flags); 434 435 /* This indicates a problem in the caller, but we don't need 436 * to kill them, unlike a double-block above. */ 437 WARN_ON(!dev->block_cfg_access); 438 439 dev->block_cfg_access = 0; 440 wake_up_all(&pci_cfg_wait); 441 raw_spin_unlock_irqrestore(&pci_lock, flags); 442 } 443 EXPORT_SYMBOL_GPL(pci_cfg_access_unlock); 444 445 static inline int pcie_cap_version(const struct pci_dev *dev) 446 { 447 return pcie_caps_reg(dev) & PCI_EXP_FLAGS_VERS; 448 } 449 450 static inline bool pcie_cap_has_lnkctl(const struct pci_dev *dev) 451 { 452 int type = pci_pcie_type(dev); 453 454 return type == PCI_EXP_TYPE_ENDPOINT || 455 type == PCI_EXP_TYPE_LEG_END || 456 type == PCI_EXP_TYPE_ROOT_PORT || 457 type == PCI_EXP_TYPE_UPSTREAM || 458 type == PCI_EXP_TYPE_DOWNSTREAM || 459 type == PCI_EXP_TYPE_PCI_BRIDGE || 460 type == PCI_EXP_TYPE_PCIE_BRIDGE; 461 } 462 463 static inline bool pcie_cap_has_sltctl(const struct pci_dev *dev) 464 { 465 int type = pci_pcie_type(dev); 466 467 return (type == PCI_EXP_TYPE_ROOT_PORT || 468 type == PCI_EXP_TYPE_DOWNSTREAM) && 469 pcie_caps_reg(dev) & PCI_EXP_FLAGS_SLOT; 470 } 471 472 static inline bool pcie_cap_has_rtctl(const struct pci_dev *dev) 473 { 474 int type = pci_pcie_type(dev); 475 476 return type == PCI_EXP_TYPE_ROOT_PORT || 477 type == PCI_EXP_TYPE_RC_EC; 478 } 479 480 static bool pcie_capability_reg_implemented(struct pci_dev *dev, int pos) 481 { 482 if (!pci_is_pcie(dev)) 483 return false; 484 485 switch (pos) { 486 case PCI_EXP_FLAGS: 487 return true; 488 case PCI_EXP_DEVCAP: 489 case PCI_EXP_DEVCTL: 490 case PCI_EXP_DEVSTA: 491 return true; 492 case PCI_EXP_LNKCAP: 493 case PCI_EXP_LNKCTL: 494 case PCI_EXP_LNKSTA: 495 return pcie_cap_has_lnkctl(dev); 496 case PCI_EXP_SLTCAP: 497 case PCI_EXP_SLTCTL: 498 case PCI_EXP_SLTSTA: 499 return pcie_cap_has_sltctl(dev); 500 case PCI_EXP_RTCTL: 501 case PCI_EXP_RTCAP: 502 case PCI_EXP_RTSTA: 503 return pcie_cap_has_rtctl(dev); 504 case PCI_EXP_DEVCAP2: 505 case PCI_EXP_DEVCTL2: 506 case PCI_EXP_LNKCAP2: 507 case PCI_EXP_LNKCTL2: 508 case PCI_EXP_LNKSTA2: 509 return pcie_cap_version(dev) > 1; 510 default: 511 return false; 512 } 513 } 514 515 /* 516 * Note that these accessor functions are only for the "PCI Express 517 * Capability" (see PCIe spec r3.0, sec 7.8). They do not apply to the 518 * other "PCI Express Extended Capabilities" (AER, VC, ACS, MFVC, etc.) 519 */ 520 int pcie_capability_read_word(struct pci_dev *dev, int pos, u16 *val) 521 { 522 int ret; 523 524 *val = 0; 525 if (pos & 1) 526 return -EINVAL; 527 528 if (pcie_capability_reg_implemented(dev, pos)) { 529 ret = pci_read_config_word(dev, pci_pcie_cap(dev) + pos, val); 530 /* 531 * Reset *val to 0 if pci_read_config_word() fails, it may 532 * have been written as 0xFFFF if hardware error happens 533 * during pci_read_config_word(). 534 */ 535 if (ret) 536 *val = 0; 537 return ret; 538 } 539 540 /* 541 * For Functions that do not implement the Slot Capabilities, 542 * Slot Status, and Slot Control registers, these spaces must 543 * be hardwired to 0b, with the exception of the Presence Detect 544 * State bit in the Slot Status register of Downstream Ports, 545 * which must be hardwired to 1b. (PCIe Base Spec 3.0, sec 7.8) 546 */ 547 if (pci_is_pcie(dev) && pos == PCI_EXP_SLTSTA && 548 pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM) { 549 *val = PCI_EXP_SLTSTA_PDS; 550 } 551 552 return 0; 553 } 554 EXPORT_SYMBOL(pcie_capability_read_word); 555 556 int pcie_capability_read_dword(struct pci_dev *dev, int pos, u32 *val) 557 { 558 int ret; 559 560 *val = 0; 561 if (pos & 3) 562 return -EINVAL; 563 564 if (pcie_capability_reg_implemented(dev, pos)) { 565 ret = pci_read_config_dword(dev, pci_pcie_cap(dev) + pos, val); 566 /* 567 * Reset *val to 0 if pci_read_config_dword() fails, it may 568 * have been written as 0xFFFFFFFF if hardware error happens 569 * during pci_read_config_dword(). 570 */ 571 if (ret) 572 *val = 0; 573 return ret; 574 } 575 576 if (pci_is_pcie(dev) && pos == PCI_EXP_SLTCTL && 577 pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM) { 578 *val = PCI_EXP_SLTSTA_PDS; 579 } 580 581 return 0; 582 } 583 EXPORT_SYMBOL(pcie_capability_read_dword); 584 585 int pcie_capability_write_word(struct pci_dev *dev, int pos, u16 val) 586 { 587 if (pos & 1) 588 return -EINVAL; 589 590 if (!pcie_capability_reg_implemented(dev, pos)) 591 return 0; 592 593 return pci_write_config_word(dev, pci_pcie_cap(dev) + pos, val); 594 } 595 EXPORT_SYMBOL(pcie_capability_write_word); 596 597 int pcie_capability_write_dword(struct pci_dev *dev, int pos, u32 val) 598 { 599 if (pos & 3) 600 return -EINVAL; 601 602 if (!pcie_capability_reg_implemented(dev, pos)) 603 return 0; 604 605 return pci_write_config_dword(dev, pci_pcie_cap(dev) + pos, val); 606 } 607 EXPORT_SYMBOL(pcie_capability_write_dword); 608 609 int pcie_capability_clear_and_set_word(struct pci_dev *dev, int pos, 610 u16 clear, u16 set) 611 { 612 int ret; 613 u16 val; 614 615 ret = pcie_capability_read_word(dev, pos, &val); 616 if (!ret) { 617 val &= ~clear; 618 val |= set; 619 ret = pcie_capability_write_word(dev, pos, val); 620 } 621 622 return ret; 623 } 624 EXPORT_SYMBOL(pcie_capability_clear_and_set_word); 625 626 int pcie_capability_clear_and_set_dword(struct pci_dev *dev, int pos, 627 u32 clear, u32 set) 628 { 629 int ret; 630 u32 val; 631 632 ret = pcie_capability_read_dword(dev, pos, &val); 633 if (!ret) { 634 val &= ~clear; 635 val |= set; 636 ret = pcie_capability_write_dword(dev, pos, val); 637 } 638 639 return ret; 640 } 641 EXPORT_SYMBOL(pcie_capability_clear_and_set_dword); 642