1 #include <linux/delay.h> 2 #include <linux/pci.h> 3 #include <linux/module.h> 4 #include <linux/sched.h> 5 #include <linux/slab.h> 6 #include <linux/ioport.h> 7 #include <linux/wait.h> 8 9 #include "pci.h" 10 11 /* 12 * This interrupt-safe spinlock protects all accesses to PCI 13 * configuration space. 14 */ 15 16 DEFINE_RAW_SPINLOCK(pci_lock); 17 18 /* 19 * Wrappers for all PCI configuration access functions. They just check 20 * alignment, do locking and call the low-level functions pointed to 21 * by pci_dev->ops. 22 */ 23 24 #define PCI_byte_BAD 0 25 #define PCI_word_BAD (pos & 1) 26 #define PCI_dword_BAD (pos & 3) 27 28 #define PCI_OP_READ(size,type,len) \ 29 int pci_bus_read_config_##size \ 30 (struct pci_bus *bus, unsigned int devfn, int pos, type *value) \ 31 { \ 32 int res; \ 33 unsigned long flags; \ 34 u32 data = 0; \ 35 if (PCI_##size##_BAD) return PCIBIOS_BAD_REGISTER_NUMBER; \ 36 raw_spin_lock_irqsave(&pci_lock, flags); \ 37 res = bus->ops->read(bus, devfn, pos, len, &data); \ 38 *value = (type)data; \ 39 raw_spin_unlock_irqrestore(&pci_lock, flags); \ 40 return res; \ 41 } 42 43 #define PCI_OP_WRITE(size,type,len) \ 44 int pci_bus_write_config_##size \ 45 (struct pci_bus *bus, unsigned int devfn, int pos, type value) \ 46 { \ 47 int res; \ 48 unsigned long flags; \ 49 if (PCI_##size##_BAD) return PCIBIOS_BAD_REGISTER_NUMBER; \ 50 raw_spin_lock_irqsave(&pci_lock, flags); \ 51 res = bus->ops->write(bus, devfn, pos, len, value); \ 52 raw_spin_unlock_irqrestore(&pci_lock, flags); \ 53 return res; \ 54 } 55 56 PCI_OP_READ(byte, u8, 1) 57 PCI_OP_READ(word, u16, 2) 58 PCI_OP_READ(dword, u32, 4) 59 PCI_OP_WRITE(byte, u8, 1) 60 PCI_OP_WRITE(word, u16, 2) 61 PCI_OP_WRITE(dword, u32, 4) 62 63 EXPORT_SYMBOL(pci_bus_read_config_byte); 64 EXPORT_SYMBOL(pci_bus_read_config_word); 65 EXPORT_SYMBOL(pci_bus_read_config_dword); 66 EXPORT_SYMBOL(pci_bus_write_config_byte); 67 EXPORT_SYMBOL(pci_bus_write_config_word); 68 EXPORT_SYMBOL(pci_bus_write_config_dword); 69 70 /** 71 * pci_bus_set_ops - Set raw operations of pci bus 72 * @bus: pci bus struct 73 * @ops: new raw operations 74 * 75 * Return previous raw operations 76 */ 77 struct pci_ops *pci_bus_set_ops(struct pci_bus *bus, struct pci_ops *ops) 78 { 79 struct pci_ops *old_ops; 80 unsigned long flags; 81 82 raw_spin_lock_irqsave(&pci_lock, flags); 83 old_ops = bus->ops; 84 bus->ops = ops; 85 raw_spin_unlock_irqrestore(&pci_lock, flags); 86 return old_ops; 87 } 88 EXPORT_SYMBOL(pci_bus_set_ops); 89 90 /** 91 * pci_read_vpd - Read one entry from Vital Product Data 92 * @dev: pci device struct 93 * @pos: offset in vpd space 94 * @count: number of bytes to read 95 * @buf: pointer to where to store result 96 * 97 */ 98 ssize_t pci_read_vpd(struct pci_dev *dev, loff_t pos, size_t count, void *buf) 99 { 100 if (!dev->vpd || !dev->vpd->ops) 101 return -ENODEV; 102 return dev->vpd->ops->read(dev, pos, count, buf); 103 } 104 EXPORT_SYMBOL(pci_read_vpd); 105 106 /** 107 * pci_write_vpd - Write entry to Vital Product Data 108 * @dev: pci device struct 109 * @pos: offset in vpd space 110 * @count: number of bytes to write 111 * @buf: buffer containing write data 112 * 113 */ 114 ssize_t pci_write_vpd(struct pci_dev *dev, loff_t pos, size_t count, const void *buf) 115 { 116 if (!dev->vpd || !dev->vpd->ops) 117 return -ENODEV; 118 return dev->vpd->ops->write(dev, pos, count, buf); 119 } 120 EXPORT_SYMBOL(pci_write_vpd); 121 122 /* 123 * The following routines are to prevent the user from accessing PCI config 124 * space when it's unsafe to do so. Some devices require this during BIST and 125 * we're required to prevent it during D-state transitions. 126 * 127 * We have a bit per device to indicate it's blocked and a global wait queue 128 * for callers to sleep on until devices are unblocked. 129 */ 130 static DECLARE_WAIT_QUEUE_HEAD(pci_cfg_wait); 131 132 static noinline void pci_wait_cfg(struct pci_dev *dev) 133 { 134 DECLARE_WAITQUEUE(wait, current); 135 136 __add_wait_queue(&pci_cfg_wait, &wait); 137 do { 138 set_current_state(TASK_UNINTERRUPTIBLE); 139 raw_spin_unlock_irq(&pci_lock); 140 schedule(); 141 raw_spin_lock_irq(&pci_lock); 142 } while (dev->block_cfg_access); 143 __remove_wait_queue(&pci_cfg_wait, &wait); 144 } 145 146 /* Returns 0 on success, negative values indicate error. */ 147 #define PCI_USER_READ_CONFIG(size,type) \ 148 int pci_user_read_config_##size \ 149 (struct pci_dev *dev, int pos, type *val) \ 150 { \ 151 int ret = 0; \ 152 u32 data = -1; \ 153 if (PCI_##size##_BAD) \ 154 return -EINVAL; \ 155 raw_spin_lock_irq(&pci_lock); \ 156 if (unlikely(dev->block_cfg_access)) \ 157 pci_wait_cfg(dev); \ 158 ret = dev->bus->ops->read(dev->bus, dev->devfn, \ 159 pos, sizeof(type), &data); \ 160 raw_spin_unlock_irq(&pci_lock); \ 161 *val = (type)data; \ 162 if (ret > 0) \ 163 ret = -EINVAL; \ 164 return ret; \ 165 } 166 167 /* Returns 0 on success, negative values indicate error. */ 168 #define PCI_USER_WRITE_CONFIG(size,type) \ 169 int pci_user_write_config_##size \ 170 (struct pci_dev *dev, int pos, type val) \ 171 { \ 172 int ret = -EIO; \ 173 if (PCI_##size##_BAD) \ 174 return -EINVAL; \ 175 raw_spin_lock_irq(&pci_lock); \ 176 if (unlikely(dev->block_cfg_access)) \ 177 pci_wait_cfg(dev); \ 178 ret = dev->bus->ops->write(dev->bus, dev->devfn, \ 179 pos, sizeof(type), val); \ 180 raw_spin_unlock_irq(&pci_lock); \ 181 if (ret > 0) \ 182 ret = -EINVAL; \ 183 return ret; \ 184 } 185 186 PCI_USER_READ_CONFIG(byte, u8) 187 PCI_USER_READ_CONFIG(word, u16) 188 PCI_USER_READ_CONFIG(dword, u32) 189 PCI_USER_WRITE_CONFIG(byte, u8) 190 PCI_USER_WRITE_CONFIG(word, u16) 191 PCI_USER_WRITE_CONFIG(dword, u32) 192 193 /* VPD access through PCI 2.2+ VPD capability */ 194 195 #define PCI_VPD_PCI22_SIZE (PCI_VPD_ADDR_MASK + 1) 196 197 struct pci_vpd_pci22 { 198 struct pci_vpd base; 199 struct mutex lock; 200 u16 flag; 201 bool busy; 202 u8 cap; 203 }; 204 205 /* 206 * Wait for last operation to complete. 207 * This code has to spin since there is no other notification from the PCI 208 * hardware. Since the VPD is often implemented by serial attachment to an 209 * EEPROM, it may take many milliseconds to complete. 210 * 211 * Returns 0 on success, negative values indicate error. 212 */ 213 static int pci_vpd_pci22_wait(struct pci_dev *dev) 214 { 215 struct pci_vpd_pci22 *vpd = 216 container_of(dev->vpd, struct pci_vpd_pci22, base); 217 unsigned long timeout = jiffies + HZ/20 + 2; 218 u16 status; 219 int ret; 220 221 if (!vpd->busy) 222 return 0; 223 224 for (;;) { 225 ret = pci_user_read_config_word(dev, vpd->cap + PCI_VPD_ADDR, 226 &status); 227 if (ret < 0) 228 return ret; 229 230 if ((status & PCI_VPD_ADDR_F) == vpd->flag) { 231 vpd->busy = false; 232 return 0; 233 } 234 235 if (time_after(jiffies, timeout)) { 236 dev_printk(KERN_DEBUG, &dev->dev, 237 "vpd r/w failed. This is likely a firmware " 238 "bug on this device. Contact the card " 239 "vendor for a firmware update."); 240 return -ETIMEDOUT; 241 } 242 if (fatal_signal_pending(current)) 243 return -EINTR; 244 if (!cond_resched()) 245 udelay(10); 246 } 247 } 248 249 static ssize_t pci_vpd_pci22_read(struct pci_dev *dev, loff_t pos, size_t count, 250 void *arg) 251 { 252 struct pci_vpd_pci22 *vpd = 253 container_of(dev->vpd, struct pci_vpd_pci22, base); 254 int ret; 255 loff_t end = pos + count; 256 u8 *buf = arg; 257 258 if (pos < 0 || pos > vpd->base.len || end > vpd->base.len) 259 return -EINVAL; 260 261 if (mutex_lock_killable(&vpd->lock)) 262 return -EINTR; 263 264 ret = pci_vpd_pci22_wait(dev); 265 if (ret < 0) 266 goto out; 267 268 while (pos < end) { 269 u32 val; 270 unsigned int i, skip; 271 272 ret = pci_user_write_config_word(dev, vpd->cap + PCI_VPD_ADDR, 273 pos & ~3); 274 if (ret < 0) 275 break; 276 vpd->busy = true; 277 vpd->flag = PCI_VPD_ADDR_F; 278 ret = pci_vpd_pci22_wait(dev); 279 if (ret < 0) 280 break; 281 282 ret = pci_user_read_config_dword(dev, vpd->cap + PCI_VPD_DATA, &val); 283 if (ret < 0) 284 break; 285 286 skip = pos & 3; 287 for (i = 0; i < sizeof(u32); i++) { 288 if (i >= skip) { 289 *buf++ = val; 290 if (++pos == end) 291 break; 292 } 293 val >>= 8; 294 } 295 } 296 out: 297 mutex_unlock(&vpd->lock); 298 return ret ? ret : count; 299 } 300 301 static ssize_t pci_vpd_pci22_write(struct pci_dev *dev, loff_t pos, size_t count, 302 const void *arg) 303 { 304 struct pci_vpd_pci22 *vpd = 305 container_of(dev->vpd, struct pci_vpd_pci22, base); 306 const u8 *buf = arg; 307 loff_t end = pos + count; 308 int ret = 0; 309 310 if (pos < 0 || (pos & 3) || (count & 3) || end > vpd->base.len) 311 return -EINVAL; 312 313 if (mutex_lock_killable(&vpd->lock)) 314 return -EINTR; 315 316 ret = pci_vpd_pci22_wait(dev); 317 if (ret < 0) 318 goto out; 319 320 while (pos < end) { 321 u32 val; 322 323 val = *buf++; 324 val |= *buf++ << 8; 325 val |= *buf++ << 16; 326 val |= *buf++ << 24; 327 328 ret = pci_user_write_config_dword(dev, vpd->cap + PCI_VPD_DATA, val); 329 if (ret < 0) 330 break; 331 ret = pci_user_write_config_word(dev, vpd->cap + PCI_VPD_ADDR, 332 pos | PCI_VPD_ADDR_F); 333 if (ret < 0) 334 break; 335 336 vpd->busy = true; 337 vpd->flag = 0; 338 ret = pci_vpd_pci22_wait(dev); 339 if (ret < 0) 340 break; 341 342 pos += sizeof(u32); 343 } 344 out: 345 mutex_unlock(&vpd->lock); 346 return ret ? ret : count; 347 } 348 349 static void pci_vpd_pci22_release(struct pci_dev *dev) 350 { 351 kfree(container_of(dev->vpd, struct pci_vpd_pci22, base)); 352 } 353 354 static const struct pci_vpd_ops pci_vpd_pci22_ops = { 355 .read = pci_vpd_pci22_read, 356 .write = pci_vpd_pci22_write, 357 .release = pci_vpd_pci22_release, 358 }; 359 360 int pci_vpd_pci22_init(struct pci_dev *dev) 361 { 362 struct pci_vpd_pci22 *vpd; 363 u8 cap; 364 365 cap = pci_find_capability(dev, PCI_CAP_ID_VPD); 366 if (!cap) 367 return -ENODEV; 368 vpd = kzalloc(sizeof(*vpd), GFP_ATOMIC); 369 if (!vpd) 370 return -ENOMEM; 371 372 vpd->base.len = PCI_VPD_PCI22_SIZE; 373 vpd->base.ops = &pci_vpd_pci22_ops; 374 mutex_init(&vpd->lock); 375 vpd->cap = cap; 376 vpd->busy = false; 377 dev->vpd = &vpd->base; 378 return 0; 379 } 380 381 /** 382 * pci_vpd_truncate - Set available Vital Product Data size 383 * @dev: pci device struct 384 * @size: available memory in bytes 385 * 386 * Adjust size of available VPD area. 387 */ 388 int pci_vpd_truncate(struct pci_dev *dev, size_t size) 389 { 390 if (!dev->vpd) 391 return -EINVAL; 392 393 /* limited by the access method */ 394 if (size > dev->vpd->len) 395 return -EINVAL; 396 397 dev->vpd->len = size; 398 if (dev->vpd->attr) 399 dev->vpd->attr->size = size; 400 401 return 0; 402 } 403 EXPORT_SYMBOL(pci_vpd_truncate); 404 405 /** 406 * pci_cfg_access_lock - Lock PCI config reads/writes 407 * @dev: pci device struct 408 * 409 * When access is locked, any userspace reads or writes to config 410 * space and concurrent lock requests will sleep until access is 411 * allowed via pci_cfg_access_unlocked again. 412 */ 413 void pci_cfg_access_lock(struct pci_dev *dev) 414 { 415 might_sleep(); 416 417 raw_spin_lock_irq(&pci_lock); 418 if (dev->block_cfg_access) 419 pci_wait_cfg(dev); 420 dev->block_cfg_access = 1; 421 raw_spin_unlock_irq(&pci_lock); 422 } 423 EXPORT_SYMBOL_GPL(pci_cfg_access_lock); 424 425 /** 426 * pci_cfg_access_trylock - try to lock PCI config reads/writes 427 * @dev: pci device struct 428 * 429 * Same as pci_cfg_access_lock, but will return 0 if access is 430 * already locked, 1 otherwise. This function can be used from 431 * atomic contexts. 432 */ 433 bool pci_cfg_access_trylock(struct pci_dev *dev) 434 { 435 unsigned long flags; 436 bool locked = true; 437 438 raw_spin_lock_irqsave(&pci_lock, flags); 439 if (dev->block_cfg_access) 440 locked = false; 441 else 442 dev->block_cfg_access = 1; 443 raw_spin_unlock_irqrestore(&pci_lock, flags); 444 445 return locked; 446 } 447 EXPORT_SYMBOL_GPL(pci_cfg_access_trylock); 448 449 /** 450 * pci_cfg_access_unlock - Unlock PCI config reads/writes 451 * @dev: pci device struct 452 * 453 * This function allows PCI config accesses to resume. 454 */ 455 void pci_cfg_access_unlock(struct pci_dev *dev) 456 { 457 unsigned long flags; 458 459 raw_spin_lock_irqsave(&pci_lock, flags); 460 461 /* This indicates a problem in the caller, but we don't need 462 * to kill them, unlike a double-block above. */ 463 WARN_ON(!dev->block_cfg_access); 464 465 dev->block_cfg_access = 0; 466 wake_up_all(&pci_cfg_wait); 467 raw_spin_unlock_irqrestore(&pci_lock, flags); 468 } 469 EXPORT_SYMBOL_GPL(pci_cfg_access_unlock); 470