1 // SPDX-License-Identifier: GPL-2.0 2 #include <linux/pci.h> 3 #include <linux/module.h> 4 #include <linux/slab.h> 5 #include <linux/ioport.h> 6 #include <linux/wait.h> 7 8 #include "pci.h" 9 10 /* 11 * This interrupt-safe spinlock protects all accesses to PCI 12 * configuration space. 13 */ 14 15 DEFINE_RAW_SPINLOCK(pci_lock); 16 17 /* 18 * Wrappers for all PCI configuration access functions. They just check 19 * alignment, do locking and call the low-level functions pointed to 20 * by pci_dev->ops. 21 */ 22 23 #define PCI_byte_BAD 0 24 #define PCI_word_BAD (pos & 1) 25 #define PCI_dword_BAD (pos & 3) 26 27 #ifdef CONFIG_PCI_LOCKLESS_CONFIG 28 # define pci_lock_config(f) do { (void)(f); } while (0) 29 # define pci_unlock_config(f) do { (void)(f); } while (0) 30 #else 31 # define pci_lock_config(f) raw_spin_lock_irqsave(&pci_lock, f) 32 # define pci_unlock_config(f) raw_spin_unlock_irqrestore(&pci_lock, f) 33 #endif 34 35 #define PCI_OP_READ(size, type, len) \ 36 int noinline pci_bus_read_config_##size \ 37 (struct pci_bus *bus, unsigned int devfn, int pos, type *value) \ 38 { \ 39 int res; \ 40 unsigned long flags; \ 41 u32 data = 0; \ 42 if (PCI_##size##_BAD) return PCIBIOS_BAD_REGISTER_NUMBER; \ 43 pci_lock_config(flags); \ 44 res = bus->ops->read(bus, devfn, pos, len, &data); \ 45 if (res) \ 46 PCI_SET_ERROR_RESPONSE(value); \ 47 else \ 48 *value = (type)data; \ 49 pci_unlock_config(flags); \ 50 return res; \ 51 } 52 53 #define PCI_OP_WRITE(size, type, len) \ 54 int noinline pci_bus_write_config_##size \ 55 (struct pci_bus *bus, unsigned int devfn, int pos, type value) \ 56 { \ 57 int res; \ 58 unsigned long flags; \ 59 if (PCI_##size##_BAD) return PCIBIOS_BAD_REGISTER_NUMBER; \ 60 pci_lock_config(flags); \ 61 res = bus->ops->write(bus, devfn, pos, len, value); \ 62 pci_unlock_config(flags); \ 63 return res; \ 64 } 65 66 PCI_OP_READ(byte, u8, 1) 67 PCI_OP_READ(word, u16, 2) 68 PCI_OP_READ(dword, u32, 4) 69 PCI_OP_WRITE(byte, u8, 1) 70 PCI_OP_WRITE(word, u16, 2) 71 PCI_OP_WRITE(dword, u32, 4) 72 73 EXPORT_SYMBOL(pci_bus_read_config_byte); 74 EXPORT_SYMBOL(pci_bus_read_config_word); 75 EXPORT_SYMBOL(pci_bus_read_config_dword); 76 EXPORT_SYMBOL(pci_bus_write_config_byte); 77 EXPORT_SYMBOL(pci_bus_write_config_word); 78 EXPORT_SYMBOL(pci_bus_write_config_dword); 79 80 int pci_generic_config_read(struct pci_bus *bus, unsigned int devfn, 81 int where, int size, u32 *val) 82 { 83 void __iomem *addr; 84 85 addr = bus->ops->map_bus(bus, devfn, where); 86 if (!addr) 87 return PCIBIOS_DEVICE_NOT_FOUND; 88 89 if (size == 1) 90 *val = readb(addr); 91 else if (size == 2) 92 *val = readw(addr); 93 else 94 *val = readl(addr); 95 96 return PCIBIOS_SUCCESSFUL; 97 } 98 EXPORT_SYMBOL_GPL(pci_generic_config_read); 99 100 int pci_generic_config_write(struct pci_bus *bus, unsigned int devfn, 101 int where, int size, u32 val) 102 { 103 void __iomem *addr; 104 105 addr = bus->ops->map_bus(bus, devfn, where); 106 if (!addr) 107 return PCIBIOS_DEVICE_NOT_FOUND; 108 109 if (size == 1) 110 writeb(val, addr); 111 else if (size == 2) 112 writew(val, addr); 113 else 114 writel(val, addr); 115 116 return PCIBIOS_SUCCESSFUL; 117 } 118 EXPORT_SYMBOL_GPL(pci_generic_config_write); 119 120 int pci_generic_config_read32(struct pci_bus *bus, unsigned int devfn, 121 int where, int size, u32 *val) 122 { 123 void __iomem *addr; 124 125 addr = bus->ops->map_bus(bus, devfn, where & ~0x3); 126 if (!addr) 127 return PCIBIOS_DEVICE_NOT_FOUND; 128 129 *val = readl(addr); 130 131 if (size <= 2) 132 *val = (*val >> (8 * (where & 3))) & ((1 << (size * 8)) - 1); 133 134 return PCIBIOS_SUCCESSFUL; 135 } 136 EXPORT_SYMBOL_GPL(pci_generic_config_read32); 137 138 int pci_generic_config_write32(struct pci_bus *bus, unsigned int devfn, 139 int where, int size, u32 val) 140 { 141 void __iomem *addr; 142 u32 mask, tmp; 143 144 addr = bus->ops->map_bus(bus, devfn, where & ~0x3); 145 if (!addr) 146 return PCIBIOS_DEVICE_NOT_FOUND; 147 148 if (size == 4) { 149 writel(val, addr); 150 return PCIBIOS_SUCCESSFUL; 151 } 152 153 /* 154 * In general, hardware that supports only 32-bit writes on PCI is 155 * not spec-compliant. For example, software may perform a 16-bit 156 * write. If the hardware only supports 32-bit accesses, we must 157 * do a 32-bit read, merge in the 16 bits we intend to write, 158 * followed by a 32-bit write. If the 16 bits we *don't* intend to 159 * write happen to have any RW1C (write-one-to-clear) bits set, we 160 * just inadvertently cleared something we shouldn't have. 161 */ 162 if (!bus->unsafe_warn) { 163 dev_warn(&bus->dev, "%d-byte config write to %04x:%02x:%02x.%d offset %#x may corrupt adjacent RW1C bits\n", 164 size, pci_domain_nr(bus), bus->number, 165 PCI_SLOT(devfn), PCI_FUNC(devfn), where); 166 bus->unsafe_warn = 1; 167 } 168 169 mask = ~(((1 << (size * 8)) - 1) << ((where & 0x3) * 8)); 170 tmp = readl(addr) & mask; 171 tmp |= val << ((where & 0x3) * 8); 172 writel(tmp, addr); 173 174 return PCIBIOS_SUCCESSFUL; 175 } 176 EXPORT_SYMBOL_GPL(pci_generic_config_write32); 177 178 /** 179 * pci_bus_set_ops - Set raw operations of pci bus 180 * @bus: pci bus struct 181 * @ops: new raw operations 182 * 183 * Return previous raw operations 184 */ 185 struct pci_ops *pci_bus_set_ops(struct pci_bus *bus, struct pci_ops *ops) 186 { 187 struct pci_ops *old_ops; 188 unsigned long flags; 189 190 raw_spin_lock_irqsave(&pci_lock, flags); 191 old_ops = bus->ops; 192 bus->ops = ops; 193 raw_spin_unlock_irqrestore(&pci_lock, flags); 194 return old_ops; 195 } 196 EXPORT_SYMBOL(pci_bus_set_ops); 197 198 /* 199 * The following routines are to prevent the user from accessing PCI config 200 * space when it's unsafe to do so. Some devices require this during BIST and 201 * we're required to prevent it during D-state transitions. 202 * 203 * We have a bit per device to indicate it's blocked and a global wait queue 204 * for callers to sleep on until devices are unblocked. 205 */ 206 static DECLARE_WAIT_QUEUE_HEAD(pci_cfg_wait); 207 208 static noinline void pci_wait_cfg(struct pci_dev *dev) 209 __must_hold(&pci_lock) 210 { 211 do { 212 raw_spin_unlock_irq(&pci_lock); 213 wait_event(pci_cfg_wait, !dev->block_cfg_access); 214 raw_spin_lock_irq(&pci_lock); 215 } while (dev->block_cfg_access); 216 } 217 218 /* Returns 0 on success, negative values indicate error. */ 219 #define PCI_USER_READ_CONFIG(size, type) \ 220 int pci_user_read_config_##size \ 221 (struct pci_dev *dev, int pos, type *val) \ 222 { \ 223 int ret = PCIBIOS_SUCCESSFUL; \ 224 u32 data = -1; \ 225 if (PCI_##size##_BAD) \ 226 return -EINVAL; \ 227 raw_spin_lock_irq(&pci_lock); \ 228 if (unlikely(dev->block_cfg_access)) \ 229 pci_wait_cfg(dev); \ 230 ret = dev->bus->ops->read(dev->bus, dev->devfn, \ 231 pos, sizeof(type), &data); \ 232 raw_spin_unlock_irq(&pci_lock); \ 233 if (ret) \ 234 PCI_SET_ERROR_RESPONSE(val); \ 235 else \ 236 *val = (type)data; \ 237 return pcibios_err_to_errno(ret); \ 238 } \ 239 EXPORT_SYMBOL_GPL(pci_user_read_config_##size); 240 241 /* Returns 0 on success, negative values indicate error. */ 242 #define PCI_USER_WRITE_CONFIG(size, type) \ 243 int pci_user_write_config_##size \ 244 (struct pci_dev *dev, int pos, type val) \ 245 { \ 246 int ret = PCIBIOS_SUCCESSFUL; \ 247 if (PCI_##size##_BAD) \ 248 return -EINVAL; \ 249 raw_spin_lock_irq(&pci_lock); \ 250 if (unlikely(dev->block_cfg_access)) \ 251 pci_wait_cfg(dev); \ 252 ret = dev->bus->ops->write(dev->bus, dev->devfn, \ 253 pos, sizeof(type), val); \ 254 raw_spin_unlock_irq(&pci_lock); \ 255 return pcibios_err_to_errno(ret); \ 256 } \ 257 EXPORT_SYMBOL_GPL(pci_user_write_config_##size); 258 259 PCI_USER_READ_CONFIG(byte, u8) 260 PCI_USER_READ_CONFIG(word, u16) 261 PCI_USER_READ_CONFIG(dword, u32) 262 PCI_USER_WRITE_CONFIG(byte, u8) 263 PCI_USER_WRITE_CONFIG(word, u16) 264 PCI_USER_WRITE_CONFIG(dword, u32) 265 266 /** 267 * pci_cfg_access_lock - Lock PCI config reads/writes 268 * @dev: pci device struct 269 * 270 * When access is locked, any userspace reads or writes to config 271 * space and concurrent lock requests will sleep until access is 272 * allowed via pci_cfg_access_unlock() again. 273 */ 274 void pci_cfg_access_lock(struct pci_dev *dev) 275 { 276 might_sleep(); 277 278 raw_spin_lock_irq(&pci_lock); 279 if (dev->block_cfg_access) 280 pci_wait_cfg(dev); 281 dev->block_cfg_access = 1; 282 raw_spin_unlock_irq(&pci_lock); 283 } 284 EXPORT_SYMBOL_GPL(pci_cfg_access_lock); 285 286 /** 287 * pci_cfg_access_trylock - try to lock PCI config reads/writes 288 * @dev: pci device struct 289 * 290 * Same as pci_cfg_access_lock, but will return 0 if access is 291 * already locked, 1 otherwise. This function can be used from 292 * atomic contexts. 293 */ 294 bool pci_cfg_access_trylock(struct pci_dev *dev) 295 { 296 unsigned long flags; 297 bool locked = true; 298 299 raw_spin_lock_irqsave(&pci_lock, flags); 300 if (dev->block_cfg_access) 301 locked = false; 302 else 303 dev->block_cfg_access = 1; 304 raw_spin_unlock_irqrestore(&pci_lock, flags); 305 306 return locked; 307 } 308 EXPORT_SYMBOL_GPL(pci_cfg_access_trylock); 309 310 /** 311 * pci_cfg_access_unlock - Unlock PCI config reads/writes 312 * @dev: pci device struct 313 * 314 * This function allows PCI config accesses to resume. 315 */ 316 void pci_cfg_access_unlock(struct pci_dev *dev) 317 { 318 unsigned long flags; 319 320 raw_spin_lock_irqsave(&pci_lock, flags); 321 322 /* 323 * This indicates a problem in the caller, but we don't need 324 * to kill them, unlike a double-block above. 325 */ 326 WARN_ON(!dev->block_cfg_access); 327 328 dev->block_cfg_access = 0; 329 raw_spin_unlock_irqrestore(&pci_lock, flags); 330 331 wake_up_all(&pci_cfg_wait); 332 } 333 EXPORT_SYMBOL_GPL(pci_cfg_access_unlock); 334 335 static inline int pcie_cap_version(const struct pci_dev *dev) 336 { 337 return pcie_caps_reg(dev) & PCI_EXP_FLAGS_VERS; 338 } 339 340 bool pcie_cap_has_lnkctl(const struct pci_dev *dev) 341 { 342 int type = pci_pcie_type(dev); 343 344 return type == PCI_EXP_TYPE_ENDPOINT || 345 type == PCI_EXP_TYPE_LEG_END || 346 type == PCI_EXP_TYPE_ROOT_PORT || 347 type == PCI_EXP_TYPE_UPSTREAM || 348 type == PCI_EXP_TYPE_DOWNSTREAM || 349 type == PCI_EXP_TYPE_PCI_BRIDGE || 350 type == PCI_EXP_TYPE_PCIE_BRIDGE; 351 } 352 353 bool pcie_cap_has_lnkctl2(const struct pci_dev *dev) 354 { 355 return pcie_cap_has_lnkctl(dev) && pcie_cap_version(dev) > 1; 356 } 357 358 static inline bool pcie_cap_has_sltctl(const struct pci_dev *dev) 359 { 360 return pcie_downstream_port(dev) && 361 pcie_caps_reg(dev) & PCI_EXP_FLAGS_SLOT; 362 } 363 364 bool pcie_cap_has_rtctl(const struct pci_dev *dev) 365 { 366 int type = pci_pcie_type(dev); 367 368 return type == PCI_EXP_TYPE_ROOT_PORT || 369 type == PCI_EXP_TYPE_RC_EC; 370 } 371 372 static bool pcie_capability_reg_implemented(struct pci_dev *dev, int pos) 373 { 374 if (!pci_is_pcie(dev)) 375 return false; 376 377 switch (pos) { 378 case PCI_EXP_FLAGS: 379 return true; 380 case PCI_EXP_DEVCAP: 381 case PCI_EXP_DEVCTL: 382 case PCI_EXP_DEVSTA: 383 return true; 384 case PCI_EXP_LNKCAP: 385 case PCI_EXP_LNKCTL: 386 case PCI_EXP_LNKSTA: 387 return pcie_cap_has_lnkctl(dev); 388 case PCI_EXP_SLTCAP: 389 case PCI_EXP_SLTCTL: 390 case PCI_EXP_SLTSTA: 391 return pcie_cap_has_sltctl(dev); 392 case PCI_EXP_RTCTL: 393 case PCI_EXP_RTCAP: 394 case PCI_EXP_RTSTA: 395 return pcie_cap_has_rtctl(dev); 396 case PCI_EXP_DEVCAP2: 397 case PCI_EXP_DEVCTL2: 398 return pcie_cap_version(dev) > 1; 399 case PCI_EXP_LNKCAP2: 400 case PCI_EXP_LNKCTL2: 401 case PCI_EXP_LNKSTA2: 402 return pcie_cap_has_lnkctl2(dev); 403 default: 404 return false; 405 } 406 } 407 408 /* 409 * Note that these accessor functions are only for the "PCI Express 410 * Capability" (see PCIe spec r3.0, sec 7.8). They do not apply to the 411 * other "PCI Express Extended Capabilities" (AER, VC, ACS, MFVC, etc.) 412 */ 413 int pcie_capability_read_word(struct pci_dev *dev, int pos, u16 *val) 414 { 415 int ret; 416 417 *val = 0; 418 if (pos & 1) 419 return PCIBIOS_BAD_REGISTER_NUMBER; 420 421 if (pcie_capability_reg_implemented(dev, pos)) { 422 ret = pci_read_config_word(dev, pci_pcie_cap(dev) + pos, val); 423 /* 424 * Reset *val to 0 if pci_read_config_word() fails; it may 425 * have been written as 0xFFFF (PCI_ERROR_RESPONSE) if the 426 * config read failed on PCI. 427 */ 428 if (ret) 429 *val = 0; 430 return ret; 431 } 432 433 /* 434 * For Functions that do not implement the Slot Capabilities, 435 * Slot Status, and Slot Control registers, these spaces must 436 * be hardwired to 0b, with the exception of the Presence Detect 437 * State bit in the Slot Status register of Downstream Ports, 438 * which must be hardwired to 1b. (PCIe Base Spec 3.0, sec 7.8) 439 */ 440 if (pci_is_pcie(dev) && pcie_downstream_port(dev) && 441 pos == PCI_EXP_SLTSTA) 442 *val = PCI_EXP_SLTSTA_PDS; 443 444 return 0; 445 } 446 EXPORT_SYMBOL(pcie_capability_read_word); 447 448 int pcie_capability_read_dword(struct pci_dev *dev, int pos, u32 *val) 449 { 450 int ret; 451 452 *val = 0; 453 if (pos & 3) 454 return PCIBIOS_BAD_REGISTER_NUMBER; 455 456 if (pcie_capability_reg_implemented(dev, pos)) { 457 ret = pci_read_config_dword(dev, pci_pcie_cap(dev) + pos, val); 458 /* 459 * Reset *val to 0 if pci_read_config_dword() fails; it may 460 * have been written as 0xFFFFFFFF (PCI_ERROR_RESPONSE) if 461 * the config read failed on PCI. 462 */ 463 if (ret) 464 *val = 0; 465 return ret; 466 } 467 468 if (pci_is_pcie(dev) && pcie_downstream_port(dev) && 469 pos == PCI_EXP_SLTSTA) 470 *val = PCI_EXP_SLTSTA_PDS; 471 472 return 0; 473 } 474 EXPORT_SYMBOL(pcie_capability_read_dword); 475 476 int pcie_capability_write_word(struct pci_dev *dev, int pos, u16 val) 477 { 478 if (pos & 1) 479 return PCIBIOS_BAD_REGISTER_NUMBER; 480 481 if (!pcie_capability_reg_implemented(dev, pos)) 482 return 0; 483 484 return pci_write_config_word(dev, pci_pcie_cap(dev) + pos, val); 485 } 486 EXPORT_SYMBOL(pcie_capability_write_word); 487 488 int pcie_capability_write_dword(struct pci_dev *dev, int pos, u32 val) 489 { 490 if (pos & 3) 491 return PCIBIOS_BAD_REGISTER_NUMBER; 492 493 if (!pcie_capability_reg_implemented(dev, pos)) 494 return 0; 495 496 return pci_write_config_dword(dev, pci_pcie_cap(dev) + pos, val); 497 } 498 EXPORT_SYMBOL(pcie_capability_write_dword); 499 500 int pcie_capability_clear_and_set_word_unlocked(struct pci_dev *dev, int pos, 501 u16 clear, u16 set) 502 { 503 int ret; 504 u16 val; 505 506 ret = pcie_capability_read_word(dev, pos, &val); 507 if (ret) 508 return ret; 509 510 val &= ~clear; 511 val |= set; 512 return pcie_capability_write_word(dev, pos, val); 513 } 514 EXPORT_SYMBOL(pcie_capability_clear_and_set_word_unlocked); 515 516 int pcie_capability_clear_and_set_word_locked(struct pci_dev *dev, int pos, 517 u16 clear, u16 set) 518 { 519 unsigned long flags; 520 int ret; 521 522 spin_lock_irqsave(&dev->pcie_cap_lock, flags); 523 ret = pcie_capability_clear_and_set_word_unlocked(dev, pos, clear, set); 524 spin_unlock_irqrestore(&dev->pcie_cap_lock, flags); 525 526 return ret; 527 } 528 EXPORT_SYMBOL(pcie_capability_clear_and_set_word_locked); 529 530 int pcie_capability_clear_and_set_dword(struct pci_dev *dev, int pos, 531 u32 clear, u32 set) 532 { 533 int ret; 534 u32 val; 535 536 ret = pcie_capability_read_dword(dev, pos, &val); 537 if (ret) 538 return ret; 539 540 val &= ~clear; 541 val |= set; 542 return pcie_capability_write_dword(dev, pos, val); 543 } 544 EXPORT_SYMBOL(pcie_capability_clear_and_set_dword); 545 546 int pci_read_config_byte(const struct pci_dev *dev, int where, u8 *val) 547 { 548 if (pci_dev_is_disconnected(dev)) { 549 PCI_SET_ERROR_RESPONSE(val); 550 return PCIBIOS_DEVICE_NOT_FOUND; 551 } 552 return pci_bus_read_config_byte(dev->bus, dev->devfn, where, val); 553 } 554 EXPORT_SYMBOL(pci_read_config_byte); 555 556 int pci_read_config_word(const struct pci_dev *dev, int where, u16 *val) 557 { 558 if (pci_dev_is_disconnected(dev)) { 559 PCI_SET_ERROR_RESPONSE(val); 560 return PCIBIOS_DEVICE_NOT_FOUND; 561 } 562 return pci_bus_read_config_word(dev->bus, dev->devfn, where, val); 563 } 564 EXPORT_SYMBOL(pci_read_config_word); 565 566 int pci_read_config_dword(const struct pci_dev *dev, int where, 567 u32 *val) 568 { 569 if (pci_dev_is_disconnected(dev)) { 570 PCI_SET_ERROR_RESPONSE(val); 571 return PCIBIOS_DEVICE_NOT_FOUND; 572 } 573 return pci_bus_read_config_dword(dev->bus, dev->devfn, where, val); 574 } 575 EXPORT_SYMBOL(pci_read_config_dword); 576 577 int pci_write_config_byte(const struct pci_dev *dev, int where, u8 val) 578 { 579 if (pci_dev_is_disconnected(dev)) 580 return PCIBIOS_DEVICE_NOT_FOUND; 581 return pci_bus_write_config_byte(dev->bus, dev->devfn, where, val); 582 } 583 EXPORT_SYMBOL(pci_write_config_byte); 584 585 int pci_write_config_word(const struct pci_dev *dev, int where, u16 val) 586 { 587 if (pci_dev_is_disconnected(dev)) 588 return PCIBIOS_DEVICE_NOT_FOUND; 589 return pci_bus_write_config_word(dev->bus, dev->devfn, where, val); 590 } 591 EXPORT_SYMBOL(pci_write_config_word); 592 593 int pci_write_config_dword(const struct pci_dev *dev, int where, 594 u32 val) 595 { 596 if (pci_dev_is_disconnected(dev)) 597 return PCIBIOS_DEVICE_NOT_FOUND; 598 return pci_bus_write_config_dword(dev->bus, dev->devfn, where, val); 599 } 600 EXPORT_SYMBOL(pci_write_config_dword); 601