xref: /openbmc/linux/drivers/pci/access.c (revision bc05aa6e)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/delay.h>
3 #include <linux/pci.h>
4 #include <linux/module.h>
5 #include <linux/sched/signal.h>
6 #include <linux/slab.h>
7 #include <linux/ioport.h>
8 #include <linux/wait.h>
9 
10 #include "pci.h"
11 
12 /*
13  * This interrupt-safe spinlock protects all accesses to PCI
14  * configuration space.
15  */
16 
17 DEFINE_RAW_SPINLOCK(pci_lock);
18 
19 /*
20  *  Wrappers for all PCI configuration access functions.  They just check
21  *  alignment, do locking and call the low-level functions pointed to
22  *  by pci_dev->ops.
23  */
24 
25 #define PCI_byte_BAD 0
26 #define PCI_word_BAD (pos & 1)
27 #define PCI_dword_BAD (pos & 3)
28 
29 #ifdef CONFIG_PCI_LOCKLESS_CONFIG
30 # define pci_lock_config(f)	do { (void)(f); } while (0)
31 # define pci_unlock_config(f)	do { (void)(f); } while (0)
32 #else
33 # define pci_lock_config(f)	raw_spin_lock_irqsave(&pci_lock, f)
34 # define pci_unlock_config(f)	raw_spin_unlock_irqrestore(&pci_lock, f)
35 #endif
36 
37 #define PCI_OP_READ(size, type, len) \
38 int pci_bus_read_config_##size \
39 	(struct pci_bus *bus, unsigned int devfn, int pos, type *value)	\
40 {									\
41 	int res;							\
42 	unsigned long flags;						\
43 	u32 data = 0;							\
44 	if (PCI_##size##_BAD) return PCIBIOS_BAD_REGISTER_NUMBER;	\
45 	pci_lock_config(flags);						\
46 	res = bus->ops->read(bus, devfn, pos, len, &data);		\
47 	*value = (type)data;						\
48 	pci_unlock_config(flags);					\
49 	return res;							\
50 }
51 
52 #define PCI_OP_WRITE(size, type, len) \
53 int pci_bus_write_config_##size \
54 	(struct pci_bus *bus, unsigned int devfn, int pos, type value)	\
55 {									\
56 	int res;							\
57 	unsigned long flags;						\
58 	if (PCI_##size##_BAD) return PCIBIOS_BAD_REGISTER_NUMBER;	\
59 	pci_lock_config(flags);						\
60 	res = bus->ops->write(bus, devfn, pos, len, value);		\
61 	pci_unlock_config(flags);					\
62 	return res;							\
63 }
64 
65 PCI_OP_READ(byte, u8, 1)
66 PCI_OP_READ(word, u16, 2)
67 PCI_OP_READ(dword, u32, 4)
68 PCI_OP_WRITE(byte, u8, 1)
69 PCI_OP_WRITE(word, u16, 2)
70 PCI_OP_WRITE(dword, u32, 4)
71 
72 EXPORT_SYMBOL(pci_bus_read_config_byte);
73 EXPORT_SYMBOL(pci_bus_read_config_word);
74 EXPORT_SYMBOL(pci_bus_read_config_dword);
75 EXPORT_SYMBOL(pci_bus_write_config_byte);
76 EXPORT_SYMBOL(pci_bus_write_config_word);
77 EXPORT_SYMBOL(pci_bus_write_config_dword);
78 
79 int pci_generic_config_read(struct pci_bus *bus, unsigned int devfn,
80 			    int where, int size, u32 *val)
81 {
82 	void __iomem *addr;
83 
84 	addr = bus->ops->map_bus(bus, devfn, where);
85 	if (!addr) {
86 		*val = ~0;
87 		return PCIBIOS_DEVICE_NOT_FOUND;
88 	}
89 
90 	if (size == 1)
91 		*val = readb(addr);
92 	else if (size == 2)
93 		*val = readw(addr);
94 	else
95 		*val = readl(addr);
96 
97 	return PCIBIOS_SUCCESSFUL;
98 }
99 EXPORT_SYMBOL_GPL(pci_generic_config_read);
100 
101 int pci_generic_config_write(struct pci_bus *bus, unsigned int devfn,
102 			     int where, int size, u32 val)
103 {
104 	void __iomem *addr;
105 
106 	addr = bus->ops->map_bus(bus, devfn, where);
107 	if (!addr)
108 		return PCIBIOS_DEVICE_NOT_FOUND;
109 
110 	if (size == 1)
111 		writeb(val, addr);
112 	else if (size == 2)
113 		writew(val, addr);
114 	else
115 		writel(val, addr);
116 
117 	return PCIBIOS_SUCCESSFUL;
118 }
119 EXPORT_SYMBOL_GPL(pci_generic_config_write);
120 
121 int pci_generic_config_read32(struct pci_bus *bus, unsigned int devfn,
122 			      int where, int size, u32 *val)
123 {
124 	void __iomem *addr;
125 
126 	addr = bus->ops->map_bus(bus, devfn, where & ~0x3);
127 	if (!addr) {
128 		*val = ~0;
129 		return PCIBIOS_DEVICE_NOT_FOUND;
130 	}
131 
132 	*val = readl(addr);
133 
134 	if (size <= 2)
135 		*val = (*val >> (8 * (where & 3))) & ((1 << (size * 8)) - 1);
136 
137 	return PCIBIOS_SUCCESSFUL;
138 }
139 EXPORT_SYMBOL_GPL(pci_generic_config_read32);
140 
141 int pci_generic_config_write32(struct pci_bus *bus, unsigned int devfn,
142 			       int where, int size, u32 val)
143 {
144 	void __iomem *addr;
145 	u32 mask, tmp;
146 
147 	addr = bus->ops->map_bus(bus, devfn, where & ~0x3);
148 	if (!addr)
149 		return PCIBIOS_DEVICE_NOT_FOUND;
150 
151 	if (size == 4) {
152 		writel(val, addr);
153 		return PCIBIOS_SUCCESSFUL;
154 	}
155 
156 	/*
157 	 * In general, hardware that supports only 32-bit writes on PCI is
158 	 * not spec-compliant.  For example, software may perform a 16-bit
159 	 * write.  If the hardware only supports 32-bit accesses, we must
160 	 * do a 32-bit read, merge in the 16 bits we intend to write,
161 	 * followed by a 32-bit write.  If the 16 bits we *don't* intend to
162 	 * write happen to have any RW1C (write-one-to-clear) bits set, we
163 	 * just inadvertently cleared something we shouldn't have.
164 	 */
165 	dev_warn_ratelimited(&bus->dev, "%d-byte config write to %04x:%02x:%02x.%d offset %#x may corrupt adjacent RW1C bits\n",
166 			     size, pci_domain_nr(bus), bus->number,
167 			     PCI_SLOT(devfn), PCI_FUNC(devfn), where);
168 
169 	mask = ~(((1 << (size * 8)) - 1) << ((where & 0x3) * 8));
170 	tmp = readl(addr) & mask;
171 	tmp |= val << ((where & 0x3) * 8);
172 	writel(tmp, addr);
173 
174 	return PCIBIOS_SUCCESSFUL;
175 }
176 EXPORT_SYMBOL_GPL(pci_generic_config_write32);
177 
178 /**
179  * pci_bus_set_ops - Set raw operations of pci bus
180  * @bus:	pci bus struct
181  * @ops:	new raw operations
182  *
183  * Return previous raw operations
184  */
185 struct pci_ops *pci_bus_set_ops(struct pci_bus *bus, struct pci_ops *ops)
186 {
187 	struct pci_ops *old_ops;
188 	unsigned long flags;
189 
190 	raw_spin_lock_irqsave(&pci_lock, flags);
191 	old_ops = bus->ops;
192 	bus->ops = ops;
193 	raw_spin_unlock_irqrestore(&pci_lock, flags);
194 	return old_ops;
195 }
196 EXPORT_SYMBOL(pci_bus_set_ops);
197 
198 /*
199  * The following routines are to prevent the user from accessing PCI config
200  * space when it's unsafe to do so.  Some devices require this during BIST and
201  * we're required to prevent it during D-state transitions.
202  *
203  * We have a bit per device to indicate it's blocked and a global wait queue
204  * for callers to sleep on until devices are unblocked.
205  */
206 static DECLARE_WAIT_QUEUE_HEAD(pci_cfg_wait);
207 
208 static noinline void pci_wait_cfg(struct pci_dev *dev)
209 {
210 	DECLARE_WAITQUEUE(wait, current);
211 
212 	__add_wait_queue(&pci_cfg_wait, &wait);
213 	do {
214 		set_current_state(TASK_UNINTERRUPTIBLE);
215 		raw_spin_unlock_irq(&pci_lock);
216 		schedule();
217 		raw_spin_lock_irq(&pci_lock);
218 	} while (dev->block_cfg_access);
219 	__remove_wait_queue(&pci_cfg_wait, &wait);
220 }
221 
222 /* Returns 0 on success, negative values indicate error. */
223 #define PCI_USER_READ_CONFIG(size, type)					\
224 int pci_user_read_config_##size						\
225 	(struct pci_dev *dev, int pos, type *val)			\
226 {									\
227 	int ret = PCIBIOS_SUCCESSFUL;					\
228 	u32 data = -1;							\
229 	if (PCI_##size##_BAD)						\
230 		return -EINVAL;						\
231 	raw_spin_lock_irq(&pci_lock);				\
232 	if (unlikely(dev->block_cfg_access))				\
233 		pci_wait_cfg(dev);					\
234 	ret = dev->bus->ops->read(dev->bus, dev->devfn,			\
235 					pos, sizeof(type), &data);	\
236 	raw_spin_unlock_irq(&pci_lock);				\
237 	*val = (type)data;						\
238 	return pcibios_err_to_errno(ret);				\
239 }									\
240 EXPORT_SYMBOL_GPL(pci_user_read_config_##size);
241 
242 /* Returns 0 on success, negative values indicate error. */
243 #define PCI_USER_WRITE_CONFIG(size, type)				\
244 int pci_user_write_config_##size					\
245 	(struct pci_dev *dev, int pos, type val)			\
246 {									\
247 	int ret = PCIBIOS_SUCCESSFUL;					\
248 	if (PCI_##size##_BAD)						\
249 		return -EINVAL;						\
250 	raw_spin_lock_irq(&pci_lock);				\
251 	if (unlikely(dev->block_cfg_access))				\
252 		pci_wait_cfg(dev);					\
253 	ret = dev->bus->ops->write(dev->bus, dev->devfn,		\
254 					pos, sizeof(type), val);	\
255 	raw_spin_unlock_irq(&pci_lock);				\
256 	return pcibios_err_to_errno(ret);				\
257 }									\
258 EXPORT_SYMBOL_GPL(pci_user_write_config_##size);
259 
260 PCI_USER_READ_CONFIG(byte, u8)
261 PCI_USER_READ_CONFIG(word, u16)
262 PCI_USER_READ_CONFIG(dword, u32)
263 PCI_USER_WRITE_CONFIG(byte, u8)
264 PCI_USER_WRITE_CONFIG(word, u16)
265 PCI_USER_WRITE_CONFIG(dword, u32)
266 
267 /* VPD access through PCI 2.2+ VPD capability */
268 
269 /**
270  * pci_read_vpd - Read one entry from Vital Product Data
271  * @dev:	pci device struct
272  * @pos:	offset in vpd space
273  * @count:	number of bytes to read
274  * @buf:	pointer to where to store result
275  */
276 ssize_t pci_read_vpd(struct pci_dev *dev, loff_t pos, size_t count, void *buf)
277 {
278 	if (!dev->vpd || !dev->vpd->ops)
279 		return -ENODEV;
280 	return dev->vpd->ops->read(dev, pos, count, buf);
281 }
282 EXPORT_SYMBOL(pci_read_vpd);
283 
284 /**
285  * pci_write_vpd - Write entry to Vital Product Data
286  * @dev:	pci device struct
287  * @pos:	offset in vpd space
288  * @count:	number of bytes to write
289  * @buf:	buffer containing write data
290  */
291 ssize_t pci_write_vpd(struct pci_dev *dev, loff_t pos, size_t count, const void *buf)
292 {
293 	if (!dev->vpd || !dev->vpd->ops)
294 		return -ENODEV;
295 	return dev->vpd->ops->write(dev, pos, count, buf);
296 }
297 EXPORT_SYMBOL(pci_write_vpd);
298 
299 /**
300  * pci_set_vpd_size - Set size of Vital Product Data space
301  * @dev:	pci device struct
302  * @len:	size of vpd space
303  */
304 int pci_set_vpd_size(struct pci_dev *dev, size_t len)
305 {
306 	if (!dev->vpd || !dev->vpd->ops)
307 		return -ENODEV;
308 	return dev->vpd->ops->set_size(dev, len);
309 }
310 EXPORT_SYMBOL(pci_set_vpd_size);
311 
312 #define PCI_VPD_MAX_SIZE (PCI_VPD_ADDR_MASK + 1)
313 
314 /**
315  * pci_vpd_size - determine actual size of Vital Product Data
316  * @dev:	pci device struct
317  * @old_size:	current assumed size, also maximum allowed size
318  */
319 static size_t pci_vpd_size(struct pci_dev *dev, size_t old_size)
320 {
321 	size_t off = 0;
322 	unsigned char header[1+2];	/* 1 byte tag, 2 bytes length */
323 
324 	while (off < old_size &&
325 	       pci_read_vpd(dev, off, 1, header) == 1) {
326 		unsigned char tag;
327 
328 		if (header[0] & PCI_VPD_LRDT) {
329 			/* Large Resource Data Type Tag */
330 			tag = pci_vpd_lrdt_tag(header);
331 			/* Only read length from known tag items */
332 			if ((tag == PCI_VPD_LTIN_ID_STRING) ||
333 			    (tag == PCI_VPD_LTIN_RO_DATA) ||
334 			    (tag == PCI_VPD_LTIN_RW_DATA)) {
335 				if (pci_read_vpd(dev, off+1, 2,
336 						 &header[1]) != 2) {
337 					pci_warn(dev, "invalid large VPD tag %02x size at offset %zu",
338 						 tag, off + 1);
339 					return 0;
340 				}
341 				off += PCI_VPD_LRDT_TAG_SIZE +
342 					pci_vpd_lrdt_size(header);
343 			}
344 		} else {
345 			/* Short Resource Data Type Tag */
346 			off += PCI_VPD_SRDT_TAG_SIZE +
347 				pci_vpd_srdt_size(header);
348 			tag = pci_vpd_srdt_tag(header);
349 		}
350 
351 		if (tag == PCI_VPD_STIN_END)	/* End tag descriptor */
352 			return off;
353 
354 		if ((tag != PCI_VPD_LTIN_ID_STRING) &&
355 		    (tag != PCI_VPD_LTIN_RO_DATA) &&
356 		    (tag != PCI_VPD_LTIN_RW_DATA)) {
357 			pci_warn(dev, "invalid %s VPD tag %02x at offset %zu",
358 				 (header[0] & PCI_VPD_LRDT) ? "large" : "short",
359 				 tag, off);
360 			return 0;
361 		}
362 	}
363 	return 0;
364 }
365 
366 /*
367  * Wait for last operation to complete.
368  * This code has to spin since there is no other notification from the PCI
369  * hardware. Since the VPD is often implemented by serial attachment to an
370  * EEPROM, it may take many milliseconds to complete.
371  *
372  * Returns 0 on success, negative values indicate error.
373  */
374 static int pci_vpd_wait(struct pci_dev *dev)
375 {
376 	struct pci_vpd *vpd = dev->vpd;
377 	unsigned long timeout = jiffies + msecs_to_jiffies(125);
378 	unsigned long max_sleep = 16;
379 	u16 status;
380 	int ret;
381 
382 	if (!vpd->busy)
383 		return 0;
384 
385 	while (time_before(jiffies, timeout)) {
386 		ret = pci_user_read_config_word(dev, vpd->cap + PCI_VPD_ADDR,
387 						&status);
388 		if (ret < 0)
389 			return ret;
390 
391 		if ((status & PCI_VPD_ADDR_F) == vpd->flag) {
392 			vpd->busy = 0;
393 			return 0;
394 		}
395 
396 		if (fatal_signal_pending(current))
397 			return -EINTR;
398 
399 		usleep_range(10, max_sleep);
400 		if (max_sleep < 1024)
401 			max_sleep *= 2;
402 	}
403 
404 	pci_warn(dev, "VPD access failed.  This is likely a firmware bug on this device.  Contact the card vendor for a firmware update\n");
405 	return -ETIMEDOUT;
406 }
407 
408 static ssize_t pci_vpd_read(struct pci_dev *dev, loff_t pos, size_t count,
409 			    void *arg)
410 {
411 	struct pci_vpd *vpd = dev->vpd;
412 	int ret;
413 	loff_t end = pos + count;
414 	u8 *buf = arg;
415 
416 	if (pos < 0)
417 		return -EINVAL;
418 
419 	if (!vpd->valid) {
420 		vpd->valid = 1;
421 		vpd->len = pci_vpd_size(dev, vpd->len);
422 	}
423 
424 	if (vpd->len == 0)
425 		return -EIO;
426 
427 	if (pos > vpd->len)
428 		return 0;
429 
430 	if (end > vpd->len) {
431 		end = vpd->len;
432 		count = end - pos;
433 	}
434 
435 	if (mutex_lock_killable(&vpd->lock))
436 		return -EINTR;
437 
438 	ret = pci_vpd_wait(dev);
439 	if (ret < 0)
440 		goto out;
441 
442 	while (pos < end) {
443 		u32 val;
444 		unsigned int i, skip;
445 
446 		ret = pci_user_write_config_word(dev, vpd->cap + PCI_VPD_ADDR,
447 						 pos & ~3);
448 		if (ret < 0)
449 			break;
450 		vpd->busy = 1;
451 		vpd->flag = PCI_VPD_ADDR_F;
452 		ret = pci_vpd_wait(dev);
453 		if (ret < 0)
454 			break;
455 
456 		ret = pci_user_read_config_dword(dev, vpd->cap + PCI_VPD_DATA, &val);
457 		if (ret < 0)
458 			break;
459 
460 		skip = pos & 3;
461 		for (i = 0;  i < sizeof(u32); i++) {
462 			if (i >= skip) {
463 				*buf++ = val;
464 				if (++pos == end)
465 					break;
466 			}
467 			val >>= 8;
468 		}
469 	}
470 out:
471 	mutex_unlock(&vpd->lock);
472 	return ret ? ret : count;
473 }
474 
475 static ssize_t pci_vpd_write(struct pci_dev *dev, loff_t pos, size_t count,
476 			     const void *arg)
477 {
478 	struct pci_vpd *vpd = dev->vpd;
479 	const u8 *buf = arg;
480 	loff_t end = pos + count;
481 	int ret = 0;
482 
483 	if (pos < 0 || (pos & 3) || (count & 3))
484 		return -EINVAL;
485 
486 	if (!vpd->valid) {
487 		vpd->valid = 1;
488 		vpd->len = pci_vpd_size(dev, vpd->len);
489 	}
490 
491 	if (vpd->len == 0)
492 		return -EIO;
493 
494 	if (end > vpd->len)
495 		return -EINVAL;
496 
497 	if (mutex_lock_killable(&vpd->lock))
498 		return -EINTR;
499 
500 	ret = pci_vpd_wait(dev);
501 	if (ret < 0)
502 		goto out;
503 
504 	while (pos < end) {
505 		u32 val;
506 
507 		val = *buf++;
508 		val |= *buf++ << 8;
509 		val |= *buf++ << 16;
510 		val |= *buf++ << 24;
511 
512 		ret = pci_user_write_config_dword(dev, vpd->cap + PCI_VPD_DATA, val);
513 		if (ret < 0)
514 			break;
515 		ret = pci_user_write_config_word(dev, vpd->cap + PCI_VPD_ADDR,
516 						 pos | PCI_VPD_ADDR_F);
517 		if (ret < 0)
518 			break;
519 
520 		vpd->busy = 1;
521 		vpd->flag = 0;
522 		ret = pci_vpd_wait(dev);
523 		if (ret < 0)
524 			break;
525 
526 		pos += sizeof(u32);
527 	}
528 out:
529 	mutex_unlock(&vpd->lock);
530 	return ret ? ret : count;
531 }
532 
533 static int pci_vpd_set_size(struct pci_dev *dev, size_t len)
534 {
535 	struct pci_vpd *vpd = dev->vpd;
536 
537 	if (len == 0 || len > PCI_VPD_MAX_SIZE)
538 		return -EIO;
539 
540 	vpd->valid = 1;
541 	vpd->len = len;
542 
543 	return 0;
544 }
545 
546 static const struct pci_vpd_ops pci_vpd_ops = {
547 	.read = pci_vpd_read,
548 	.write = pci_vpd_write,
549 	.set_size = pci_vpd_set_size,
550 };
551 
552 static ssize_t pci_vpd_f0_read(struct pci_dev *dev, loff_t pos, size_t count,
553 			       void *arg)
554 {
555 	struct pci_dev *tdev = pci_get_slot(dev->bus,
556 					    PCI_DEVFN(PCI_SLOT(dev->devfn), 0));
557 	ssize_t ret;
558 
559 	if (!tdev)
560 		return -ENODEV;
561 
562 	ret = pci_read_vpd(tdev, pos, count, arg);
563 	pci_dev_put(tdev);
564 	return ret;
565 }
566 
567 static ssize_t pci_vpd_f0_write(struct pci_dev *dev, loff_t pos, size_t count,
568 				const void *arg)
569 {
570 	struct pci_dev *tdev = pci_get_slot(dev->bus,
571 					    PCI_DEVFN(PCI_SLOT(dev->devfn), 0));
572 	ssize_t ret;
573 
574 	if (!tdev)
575 		return -ENODEV;
576 
577 	ret = pci_write_vpd(tdev, pos, count, arg);
578 	pci_dev_put(tdev);
579 	return ret;
580 }
581 
582 static int pci_vpd_f0_set_size(struct pci_dev *dev, size_t len)
583 {
584 	struct pci_dev *tdev = pci_get_slot(dev->bus,
585 					    PCI_DEVFN(PCI_SLOT(dev->devfn), 0));
586 	int ret;
587 
588 	if (!tdev)
589 		return -ENODEV;
590 
591 	ret = pci_set_vpd_size(tdev, len);
592 	pci_dev_put(tdev);
593 	return ret;
594 }
595 
596 static const struct pci_vpd_ops pci_vpd_f0_ops = {
597 	.read = pci_vpd_f0_read,
598 	.write = pci_vpd_f0_write,
599 	.set_size = pci_vpd_f0_set_size,
600 };
601 
602 int pci_vpd_init(struct pci_dev *dev)
603 {
604 	struct pci_vpd *vpd;
605 	u8 cap;
606 
607 	cap = pci_find_capability(dev, PCI_CAP_ID_VPD);
608 	if (!cap)
609 		return -ENODEV;
610 
611 	vpd = kzalloc(sizeof(*vpd), GFP_ATOMIC);
612 	if (!vpd)
613 		return -ENOMEM;
614 
615 	vpd->len = PCI_VPD_MAX_SIZE;
616 	if (dev->dev_flags & PCI_DEV_FLAGS_VPD_REF_F0)
617 		vpd->ops = &pci_vpd_f0_ops;
618 	else
619 		vpd->ops = &pci_vpd_ops;
620 	mutex_init(&vpd->lock);
621 	vpd->cap = cap;
622 	vpd->busy = 0;
623 	vpd->valid = 0;
624 	dev->vpd = vpd;
625 	return 0;
626 }
627 
628 void pci_vpd_release(struct pci_dev *dev)
629 {
630 	kfree(dev->vpd);
631 }
632 
633 /**
634  * pci_cfg_access_lock - Lock PCI config reads/writes
635  * @dev:	pci device struct
636  *
637  * When access is locked, any userspace reads or writes to config
638  * space and concurrent lock requests will sleep until access is
639  * allowed via pci_cfg_access_unlock() again.
640  */
641 void pci_cfg_access_lock(struct pci_dev *dev)
642 {
643 	might_sleep();
644 
645 	raw_spin_lock_irq(&pci_lock);
646 	if (dev->block_cfg_access)
647 		pci_wait_cfg(dev);
648 	dev->block_cfg_access = 1;
649 	raw_spin_unlock_irq(&pci_lock);
650 }
651 EXPORT_SYMBOL_GPL(pci_cfg_access_lock);
652 
653 /**
654  * pci_cfg_access_trylock - try to lock PCI config reads/writes
655  * @dev:	pci device struct
656  *
657  * Same as pci_cfg_access_lock, but will return 0 if access is
658  * already locked, 1 otherwise. This function can be used from
659  * atomic contexts.
660  */
661 bool pci_cfg_access_trylock(struct pci_dev *dev)
662 {
663 	unsigned long flags;
664 	bool locked = true;
665 
666 	raw_spin_lock_irqsave(&pci_lock, flags);
667 	if (dev->block_cfg_access)
668 		locked = false;
669 	else
670 		dev->block_cfg_access = 1;
671 	raw_spin_unlock_irqrestore(&pci_lock, flags);
672 
673 	return locked;
674 }
675 EXPORT_SYMBOL_GPL(pci_cfg_access_trylock);
676 
677 /**
678  * pci_cfg_access_unlock - Unlock PCI config reads/writes
679  * @dev:	pci device struct
680  *
681  * This function allows PCI config accesses to resume.
682  */
683 void pci_cfg_access_unlock(struct pci_dev *dev)
684 {
685 	unsigned long flags;
686 
687 	raw_spin_lock_irqsave(&pci_lock, flags);
688 
689 	/* This indicates a problem in the caller, but we don't need
690 	 * to kill them, unlike a double-block above. */
691 	WARN_ON(!dev->block_cfg_access);
692 
693 	dev->block_cfg_access = 0;
694 	raw_spin_unlock_irqrestore(&pci_lock, flags);
695 
696 	wake_up_all(&pci_cfg_wait);
697 }
698 EXPORT_SYMBOL_GPL(pci_cfg_access_unlock);
699 
700 static inline int pcie_cap_version(const struct pci_dev *dev)
701 {
702 	return pcie_caps_reg(dev) & PCI_EXP_FLAGS_VERS;
703 }
704 
705 static bool pcie_downstream_port(const struct pci_dev *dev)
706 {
707 	int type = pci_pcie_type(dev);
708 
709 	return type == PCI_EXP_TYPE_ROOT_PORT ||
710 	       type == PCI_EXP_TYPE_DOWNSTREAM ||
711 	       type == PCI_EXP_TYPE_PCIE_BRIDGE;
712 }
713 
714 bool pcie_cap_has_lnkctl(const struct pci_dev *dev)
715 {
716 	int type = pci_pcie_type(dev);
717 
718 	return type == PCI_EXP_TYPE_ENDPOINT ||
719 	       type == PCI_EXP_TYPE_LEG_END ||
720 	       type == PCI_EXP_TYPE_ROOT_PORT ||
721 	       type == PCI_EXP_TYPE_UPSTREAM ||
722 	       type == PCI_EXP_TYPE_DOWNSTREAM ||
723 	       type == PCI_EXP_TYPE_PCI_BRIDGE ||
724 	       type == PCI_EXP_TYPE_PCIE_BRIDGE;
725 }
726 
727 static inline bool pcie_cap_has_sltctl(const struct pci_dev *dev)
728 {
729 	return pcie_downstream_port(dev) &&
730 	       pcie_caps_reg(dev) & PCI_EXP_FLAGS_SLOT;
731 }
732 
733 static inline bool pcie_cap_has_rtctl(const struct pci_dev *dev)
734 {
735 	int type = pci_pcie_type(dev);
736 
737 	return type == PCI_EXP_TYPE_ROOT_PORT ||
738 	       type == PCI_EXP_TYPE_RC_EC;
739 }
740 
741 static bool pcie_capability_reg_implemented(struct pci_dev *dev, int pos)
742 {
743 	if (!pci_is_pcie(dev))
744 		return false;
745 
746 	switch (pos) {
747 	case PCI_EXP_FLAGS:
748 		return true;
749 	case PCI_EXP_DEVCAP:
750 	case PCI_EXP_DEVCTL:
751 	case PCI_EXP_DEVSTA:
752 		return true;
753 	case PCI_EXP_LNKCAP:
754 	case PCI_EXP_LNKCTL:
755 	case PCI_EXP_LNKSTA:
756 		return pcie_cap_has_lnkctl(dev);
757 	case PCI_EXP_SLTCAP:
758 	case PCI_EXP_SLTCTL:
759 	case PCI_EXP_SLTSTA:
760 		return pcie_cap_has_sltctl(dev);
761 	case PCI_EXP_RTCTL:
762 	case PCI_EXP_RTCAP:
763 	case PCI_EXP_RTSTA:
764 		return pcie_cap_has_rtctl(dev);
765 	case PCI_EXP_DEVCAP2:
766 	case PCI_EXP_DEVCTL2:
767 	case PCI_EXP_LNKCAP2:
768 	case PCI_EXP_LNKCTL2:
769 	case PCI_EXP_LNKSTA2:
770 		return pcie_cap_version(dev) > 1;
771 	default:
772 		return false;
773 	}
774 }
775 
776 /*
777  * Note that these accessor functions are only for the "PCI Express
778  * Capability" (see PCIe spec r3.0, sec 7.8).  They do not apply to the
779  * other "PCI Express Extended Capabilities" (AER, VC, ACS, MFVC, etc.)
780  */
781 int pcie_capability_read_word(struct pci_dev *dev, int pos, u16 *val)
782 {
783 	int ret;
784 
785 	*val = 0;
786 	if (pos & 1)
787 		return -EINVAL;
788 
789 	if (pcie_capability_reg_implemented(dev, pos)) {
790 		ret = pci_read_config_word(dev, pci_pcie_cap(dev) + pos, val);
791 		/*
792 		 * Reset *val to 0 if pci_read_config_word() fails, it may
793 		 * have been written as 0xFFFF if hardware error happens
794 		 * during pci_read_config_word().
795 		 */
796 		if (ret)
797 			*val = 0;
798 		return ret;
799 	}
800 
801 	/*
802 	 * For Functions that do not implement the Slot Capabilities,
803 	 * Slot Status, and Slot Control registers, these spaces must
804 	 * be hardwired to 0b, with the exception of the Presence Detect
805 	 * State bit in the Slot Status register of Downstream Ports,
806 	 * which must be hardwired to 1b.  (PCIe Base Spec 3.0, sec 7.8)
807 	 */
808 	if (pci_is_pcie(dev) && pcie_downstream_port(dev) &&
809 	    pos == PCI_EXP_SLTSTA)
810 		*val = PCI_EXP_SLTSTA_PDS;
811 
812 	return 0;
813 }
814 EXPORT_SYMBOL(pcie_capability_read_word);
815 
816 int pcie_capability_read_dword(struct pci_dev *dev, int pos, u32 *val)
817 {
818 	int ret;
819 
820 	*val = 0;
821 	if (pos & 3)
822 		return -EINVAL;
823 
824 	if (pcie_capability_reg_implemented(dev, pos)) {
825 		ret = pci_read_config_dword(dev, pci_pcie_cap(dev) + pos, val);
826 		/*
827 		 * Reset *val to 0 if pci_read_config_dword() fails, it may
828 		 * have been written as 0xFFFFFFFF if hardware error happens
829 		 * during pci_read_config_dword().
830 		 */
831 		if (ret)
832 			*val = 0;
833 		return ret;
834 	}
835 
836 	if (pci_is_pcie(dev) && pcie_downstream_port(dev) &&
837 	    pos == PCI_EXP_SLTSTA)
838 		*val = PCI_EXP_SLTSTA_PDS;
839 
840 	return 0;
841 }
842 EXPORT_SYMBOL(pcie_capability_read_dword);
843 
844 int pcie_capability_write_word(struct pci_dev *dev, int pos, u16 val)
845 {
846 	if (pos & 1)
847 		return -EINVAL;
848 
849 	if (!pcie_capability_reg_implemented(dev, pos))
850 		return 0;
851 
852 	return pci_write_config_word(dev, pci_pcie_cap(dev) + pos, val);
853 }
854 EXPORT_SYMBOL(pcie_capability_write_word);
855 
856 int pcie_capability_write_dword(struct pci_dev *dev, int pos, u32 val)
857 {
858 	if (pos & 3)
859 		return -EINVAL;
860 
861 	if (!pcie_capability_reg_implemented(dev, pos))
862 		return 0;
863 
864 	return pci_write_config_dword(dev, pci_pcie_cap(dev) + pos, val);
865 }
866 EXPORT_SYMBOL(pcie_capability_write_dword);
867 
868 int pcie_capability_clear_and_set_word(struct pci_dev *dev, int pos,
869 				       u16 clear, u16 set)
870 {
871 	int ret;
872 	u16 val;
873 
874 	ret = pcie_capability_read_word(dev, pos, &val);
875 	if (!ret) {
876 		val &= ~clear;
877 		val |= set;
878 		ret = pcie_capability_write_word(dev, pos, val);
879 	}
880 
881 	return ret;
882 }
883 EXPORT_SYMBOL(pcie_capability_clear_and_set_word);
884 
885 int pcie_capability_clear_and_set_dword(struct pci_dev *dev, int pos,
886 					u32 clear, u32 set)
887 {
888 	int ret;
889 	u32 val;
890 
891 	ret = pcie_capability_read_dword(dev, pos, &val);
892 	if (!ret) {
893 		val &= ~clear;
894 		val |= set;
895 		ret = pcie_capability_write_dword(dev, pos, val);
896 	}
897 
898 	return ret;
899 }
900 EXPORT_SYMBOL(pcie_capability_clear_and_set_dword);
901 
902 int pci_read_config_byte(const struct pci_dev *dev, int where, u8 *val)
903 {
904 	if (pci_dev_is_disconnected(dev)) {
905 		*val = ~0;
906 		return PCIBIOS_DEVICE_NOT_FOUND;
907 	}
908 	return pci_bus_read_config_byte(dev->bus, dev->devfn, where, val);
909 }
910 EXPORT_SYMBOL(pci_read_config_byte);
911 
912 int pci_read_config_word(const struct pci_dev *dev, int where, u16 *val)
913 {
914 	if (pci_dev_is_disconnected(dev)) {
915 		*val = ~0;
916 		return PCIBIOS_DEVICE_NOT_FOUND;
917 	}
918 	return pci_bus_read_config_word(dev->bus, dev->devfn, where, val);
919 }
920 EXPORT_SYMBOL(pci_read_config_word);
921 
922 int pci_read_config_dword(const struct pci_dev *dev, int where,
923 					u32 *val)
924 {
925 	if (pci_dev_is_disconnected(dev)) {
926 		*val = ~0;
927 		return PCIBIOS_DEVICE_NOT_FOUND;
928 	}
929 	return pci_bus_read_config_dword(dev->bus, dev->devfn, where, val);
930 }
931 EXPORT_SYMBOL(pci_read_config_dword);
932 
933 int pci_write_config_byte(const struct pci_dev *dev, int where, u8 val)
934 {
935 	if (pci_dev_is_disconnected(dev))
936 		return PCIBIOS_DEVICE_NOT_FOUND;
937 	return pci_bus_write_config_byte(dev->bus, dev->devfn, where, val);
938 }
939 EXPORT_SYMBOL(pci_write_config_byte);
940 
941 int pci_write_config_word(const struct pci_dev *dev, int where, u16 val)
942 {
943 	if (pci_dev_is_disconnected(dev))
944 		return PCIBIOS_DEVICE_NOT_FOUND;
945 	return pci_bus_write_config_word(dev->bus, dev->devfn, where, val);
946 }
947 EXPORT_SYMBOL(pci_write_config_word);
948 
949 int pci_write_config_dword(const struct pci_dev *dev, int where,
950 					 u32 val)
951 {
952 	if (pci_dev_is_disconnected(dev))
953 		return PCIBIOS_DEVICE_NOT_FOUND;
954 	return pci_bus_write_config_dword(dev->bus, dev->devfn, where, val);
955 }
956 EXPORT_SYMBOL(pci_write_config_dword);
957