xref: /openbmc/linux/drivers/pci/pci.c (revision 5f32c314)
1 /*
2  *	PCI Bus Services, see include/linux/pci.h for further explanation.
3  *
4  *	Copyright 1993 -- 1997 Drew Eckhardt, Frederic Potter,
5  *	David Mosberger-Tang
6  *
7  *	Copyright 1997 -- 2000 Martin Mares <mj@ucw.cz>
8  */
9 
10 #include <linux/kernel.h>
11 #include <linux/delay.h>
12 #include <linux/init.h>
13 #include <linux/pci.h>
14 #include <linux/pm.h>
15 #include <linux/slab.h>
16 #include <linux/module.h>
17 #include <linux/spinlock.h>
18 #include <linux/string.h>
19 #include <linux/log2.h>
20 #include <linux/pci-aspm.h>
21 #include <linux/pm_wakeup.h>
22 #include <linux/interrupt.h>
23 #include <linux/device.h>
24 #include <linux/pm_runtime.h>
25 #include <linux/pci_hotplug.h>
26 #include <asm-generic/pci-bridge.h>
27 #include <asm/setup.h>
28 #include "pci.h"
29 
30 const char *pci_power_names[] = {
31 	"error", "D0", "D1", "D2", "D3hot", "D3cold", "unknown",
32 };
33 EXPORT_SYMBOL_GPL(pci_power_names);
34 
35 int isa_dma_bridge_buggy;
36 EXPORT_SYMBOL(isa_dma_bridge_buggy);
37 
38 int pci_pci_problems;
39 EXPORT_SYMBOL(pci_pci_problems);
40 
41 unsigned int pci_pm_d3_delay;
42 
43 static void pci_pme_list_scan(struct work_struct *work);
44 
45 static LIST_HEAD(pci_pme_list);
46 static DEFINE_MUTEX(pci_pme_list_mutex);
47 static DECLARE_DELAYED_WORK(pci_pme_work, pci_pme_list_scan);
48 
49 struct pci_pme_device {
50 	struct list_head list;
51 	struct pci_dev *dev;
52 };
53 
54 #define PME_TIMEOUT 1000 /* How long between PME checks */
55 
56 static void pci_dev_d3_sleep(struct pci_dev *dev)
57 {
58 	unsigned int delay = dev->d3_delay;
59 
60 	if (delay < pci_pm_d3_delay)
61 		delay = pci_pm_d3_delay;
62 
63 	msleep(delay);
64 }
65 
66 #ifdef CONFIG_PCI_DOMAINS
67 int pci_domains_supported = 1;
68 #endif
69 
70 #define DEFAULT_CARDBUS_IO_SIZE		(256)
71 #define DEFAULT_CARDBUS_MEM_SIZE	(64*1024*1024)
72 /* pci=cbmemsize=nnM,cbiosize=nn can override this */
73 unsigned long pci_cardbus_io_size = DEFAULT_CARDBUS_IO_SIZE;
74 unsigned long pci_cardbus_mem_size = DEFAULT_CARDBUS_MEM_SIZE;
75 
76 #define DEFAULT_HOTPLUG_IO_SIZE		(256)
77 #define DEFAULT_HOTPLUG_MEM_SIZE	(2*1024*1024)
78 /* pci=hpmemsize=nnM,hpiosize=nn can override this */
79 unsigned long pci_hotplug_io_size  = DEFAULT_HOTPLUG_IO_SIZE;
80 unsigned long pci_hotplug_mem_size = DEFAULT_HOTPLUG_MEM_SIZE;
81 
82 enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_TUNE_OFF;
83 
84 /*
85  * The default CLS is used if arch didn't set CLS explicitly and not
86  * all pci devices agree on the same value.  Arch can override either
87  * the dfl or actual value as it sees fit.  Don't forget this is
88  * measured in 32-bit words, not bytes.
89  */
90 u8 pci_dfl_cache_line_size = L1_CACHE_BYTES >> 2;
91 u8 pci_cache_line_size;
92 
93 /*
94  * If we set up a device for bus mastering, we need to check the latency
95  * timer as certain BIOSes forget to set it properly.
96  */
97 unsigned int pcibios_max_latency = 255;
98 
99 /* If set, the PCIe ARI capability will not be used. */
100 static bool pcie_ari_disabled;
101 
102 /**
103  * pci_bus_max_busnr - returns maximum PCI bus number of given bus' children
104  * @bus: pointer to PCI bus structure to search
105  *
106  * Given a PCI bus, returns the highest PCI bus number present in the set
107  * including the given PCI bus and its list of child PCI buses.
108  */
109 unsigned char pci_bus_max_busnr(struct pci_bus* bus)
110 {
111 	struct list_head *tmp;
112 	unsigned char max, n;
113 
114 	max = bus->busn_res.end;
115 	list_for_each(tmp, &bus->children) {
116 		n = pci_bus_max_busnr(pci_bus_b(tmp));
117 		if(n > max)
118 			max = n;
119 	}
120 	return max;
121 }
122 EXPORT_SYMBOL_GPL(pci_bus_max_busnr);
123 
124 #ifdef CONFIG_HAS_IOMEM
125 void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar)
126 {
127 	/*
128 	 * Make sure the BAR is actually a memory resource, not an IO resource
129 	 */
130 	if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM)) {
131 		WARN_ON(1);
132 		return NULL;
133 	}
134 	return ioremap_nocache(pci_resource_start(pdev, bar),
135 				     pci_resource_len(pdev, bar));
136 }
137 EXPORT_SYMBOL_GPL(pci_ioremap_bar);
138 #endif
139 
140 #define PCI_FIND_CAP_TTL	48
141 
142 static int __pci_find_next_cap_ttl(struct pci_bus *bus, unsigned int devfn,
143 				   u8 pos, int cap, int *ttl)
144 {
145 	u8 id;
146 
147 	while ((*ttl)--) {
148 		pci_bus_read_config_byte(bus, devfn, pos, &pos);
149 		if (pos < 0x40)
150 			break;
151 		pos &= ~3;
152 		pci_bus_read_config_byte(bus, devfn, pos + PCI_CAP_LIST_ID,
153 					 &id);
154 		if (id == 0xff)
155 			break;
156 		if (id == cap)
157 			return pos;
158 		pos += PCI_CAP_LIST_NEXT;
159 	}
160 	return 0;
161 }
162 
163 static int __pci_find_next_cap(struct pci_bus *bus, unsigned int devfn,
164 			       u8 pos, int cap)
165 {
166 	int ttl = PCI_FIND_CAP_TTL;
167 
168 	return __pci_find_next_cap_ttl(bus, devfn, pos, cap, &ttl);
169 }
170 
171 int pci_find_next_capability(struct pci_dev *dev, u8 pos, int cap)
172 {
173 	return __pci_find_next_cap(dev->bus, dev->devfn,
174 				   pos + PCI_CAP_LIST_NEXT, cap);
175 }
176 EXPORT_SYMBOL_GPL(pci_find_next_capability);
177 
178 static int __pci_bus_find_cap_start(struct pci_bus *bus,
179 				    unsigned int devfn, u8 hdr_type)
180 {
181 	u16 status;
182 
183 	pci_bus_read_config_word(bus, devfn, PCI_STATUS, &status);
184 	if (!(status & PCI_STATUS_CAP_LIST))
185 		return 0;
186 
187 	switch (hdr_type) {
188 	case PCI_HEADER_TYPE_NORMAL:
189 	case PCI_HEADER_TYPE_BRIDGE:
190 		return PCI_CAPABILITY_LIST;
191 	case PCI_HEADER_TYPE_CARDBUS:
192 		return PCI_CB_CAPABILITY_LIST;
193 	default:
194 		return 0;
195 	}
196 
197 	return 0;
198 }
199 
200 /**
201  * pci_find_capability - query for devices' capabilities
202  * @dev: PCI device to query
203  * @cap: capability code
204  *
205  * Tell if a device supports a given PCI capability.
206  * Returns the address of the requested capability structure within the
207  * device's PCI configuration space or 0 in case the device does not
208  * support it.  Possible values for @cap:
209  *
210  *  %PCI_CAP_ID_PM           Power Management
211  *  %PCI_CAP_ID_AGP          Accelerated Graphics Port
212  *  %PCI_CAP_ID_VPD          Vital Product Data
213  *  %PCI_CAP_ID_SLOTID       Slot Identification
214  *  %PCI_CAP_ID_MSI          Message Signalled Interrupts
215  *  %PCI_CAP_ID_CHSWP        CompactPCI HotSwap
216  *  %PCI_CAP_ID_PCIX         PCI-X
217  *  %PCI_CAP_ID_EXP          PCI Express
218  */
219 int pci_find_capability(struct pci_dev *dev, int cap)
220 {
221 	int pos;
222 
223 	pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type);
224 	if (pos)
225 		pos = __pci_find_next_cap(dev->bus, dev->devfn, pos, cap);
226 
227 	return pos;
228 }
229 
230 /**
231  * pci_bus_find_capability - query for devices' capabilities
232  * @bus:   the PCI bus to query
233  * @devfn: PCI device to query
234  * @cap:   capability code
235  *
236  * Like pci_find_capability() but works for pci devices that do not have a
237  * pci_dev structure set up yet.
238  *
239  * Returns the address of the requested capability structure within the
240  * device's PCI configuration space or 0 in case the device does not
241  * support it.
242  */
243 int pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap)
244 {
245 	int pos;
246 	u8 hdr_type;
247 
248 	pci_bus_read_config_byte(bus, devfn, PCI_HEADER_TYPE, &hdr_type);
249 
250 	pos = __pci_bus_find_cap_start(bus, devfn, hdr_type & 0x7f);
251 	if (pos)
252 		pos = __pci_find_next_cap(bus, devfn, pos, cap);
253 
254 	return pos;
255 }
256 
257 /**
258  * pci_find_next_ext_capability - Find an extended capability
259  * @dev: PCI device to query
260  * @start: address at which to start looking (0 to start at beginning of list)
261  * @cap: capability code
262  *
263  * Returns the address of the next matching extended capability structure
264  * within the device's PCI configuration space or 0 if the device does
265  * not support it.  Some capabilities can occur several times, e.g., the
266  * vendor-specific capability, and this provides a way to find them all.
267  */
268 int pci_find_next_ext_capability(struct pci_dev *dev, int start, int cap)
269 {
270 	u32 header;
271 	int ttl;
272 	int pos = PCI_CFG_SPACE_SIZE;
273 
274 	/* minimum 8 bytes per capability */
275 	ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8;
276 
277 	if (dev->cfg_size <= PCI_CFG_SPACE_SIZE)
278 		return 0;
279 
280 	if (start)
281 		pos = start;
282 
283 	if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
284 		return 0;
285 
286 	/*
287 	 * If we have no capabilities, this is indicated by cap ID,
288 	 * cap version and next pointer all being 0.
289 	 */
290 	if (header == 0)
291 		return 0;
292 
293 	while (ttl-- > 0) {
294 		if (PCI_EXT_CAP_ID(header) == cap && pos != start)
295 			return pos;
296 
297 		pos = PCI_EXT_CAP_NEXT(header);
298 		if (pos < PCI_CFG_SPACE_SIZE)
299 			break;
300 
301 		if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
302 			break;
303 	}
304 
305 	return 0;
306 }
307 EXPORT_SYMBOL_GPL(pci_find_next_ext_capability);
308 
309 /**
310  * pci_find_ext_capability - Find an extended capability
311  * @dev: PCI device to query
312  * @cap: capability code
313  *
314  * Returns the address of the requested extended capability structure
315  * within the device's PCI configuration space or 0 if the device does
316  * not support it.  Possible values for @cap:
317  *
318  *  %PCI_EXT_CAP_ID_ERR		Advanced Error Reporting
319  *  %PCI_EXT_CAP_ID_VC		Virtual Channel
320  *  %PCI_EXT_CAP_ID_DSN		Device Serial Number
321  *  %PCI_EXT_CAP_ID_PWR		Power Budgeting
322  */
323 int pci_find_ext_capability(struct pci_dev *dev, int cap)
324 {
325 	return pci_find_next_ext_capability(dev, 0, cap);
326 }
327 EXPORT_SYMBOL_GPL(pci_find_ext_capability);
328 
329 static int __pci_find_next_ht_cap(struct pci_dev *dev, int pos, int ht_cap)
330 {
331 	int rc, ttl = PCI_FIND_CAP_TTL;
332 	u8 cap, mask;
333 
334 	if (ht_cap == HT_CAPTYPE_SLAVE || ht_cap == HT_CAPTYPE_HOST)
335 		mask = HT_3BIT_CAP_MASK;
336 	else
337 		mask = HT_5BIT_CAP_MASK;
338 
339 	pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn, pos,
340 				      PCI_CAP_ID_HT, &ttl);
341 	while (pos) {
342 		rc = pci_read_config_byte(dev, pos + 3, &cap);
343 		if (rc != PCIBIOS_SUCCESSFUL)
344 			return 0;
345 
346 		if ((cap & mask) == ht_cap)
347 			return pos;
348 
349 		pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn,
350 					      pos + PCI_CAP_LIST_NEXT,
351 					      PCI_CAP_ID_HT, &ttl);
352 	}
353 
354 	return 0;
355 }
356 /**
357  * pci_find_next_ht_capability - query a device's Hypertransport capabilities
358  * @dev: PCI device to query
359  * @pos: Position from which to continue searching
360  * @ht_cap: Hypertransport capability code
361  *
362  * To be used in conjunction with pci_find_ht_capability() to search for
363  * all capabilities matching @ht_cap. @pos should always be a value returned
364  * from pci_find_ht_capability().
365  *
366  * NB. To be 100% safe against broken PCI devices, the caller should take
367  * steps to avoid an infinite loop.
368  */
369 int pci_find_next_ht_capability(struct pci_dev *dev, int pos, int ht_cap)
370 {
371 	return __pci_find_next_ht_cap(dev, pos + PCI_CAP_LIST_NEXT, ht_cap);
372 }
373 EXPORT_SYMBOL_GPL(pci_find_next_ht_capability);
374 
375 /**
376  * pci_find_ht_capability - query a device's Hypertransport capabilities
377  * @dev: PCI device to query
378  * @ht_cap: Hypertransport capability code
379  *
380  * Tell if a device supports a given Hypertransport capability.
381  * Returns an address within the device's PCI configuration space
382  * or 0 in case the device does not support the request capability.
383  * The address points to the PCI capability, of type PCI_CAP_ID_HT,
384  * which has a Hypertransport capability matching @ht_cap.
385  */
386 int pci_find_ht_capability(struct pci_dev *dev, int ht_cap)
387 {
388 	int pos;
389 
390 	pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type);
391 	if (pos)
392 		pos = __pci_find_next_ht_cap(dev, pos, ht_cap);
393 
394 	return pos;
395 }
396 EXPORT_SYMBOL_GPL(pci_find_ht_capability);
397 
398 /**
399  * pci_find_parent_resource - return resource region of parent bus of given region
400  * @dev: PCI device structure contains resources to be searched
401  * @res: child resource record for which parent is sought
402  *
403  *  For given resource region of given device, return the resource
404  *  region of parent bus the given region is contained in or where
405  *  it should be allocated from.
406  */
407 struct resource *
408 pci_find_parent_resource(const struct pci_dev *dev, struct resource *res)
409 {
410 	const struct pci_bus *bus = dev->bus;
411 	int i;
412 	struct resource *best = NULL, *r;
413 
414 	pci_bus_for_each_resource(bus, r, i) {
415 		if (!r)
416 			continue;
417 		if (res->start && !(res->start >= r->start && res->end <= r->end))
418 			continue;	/* Not contained */
419 		if ((res->flags ^ r->flags) & (IORESOURCE_IO | IORESOURCE_MEM))
420 			continue;	/* Wrong type */
421 		if (!((res->flags ^ r->flags) & IORESOURCE_PREFETCH))
422 			return r;	/* Exact match */
423 		/* We can't insert a non-prefetch resource inside a prefetchable parent .. */
424 		if (r->flags & IORESOURCE_PREFETCH)
425 			continue;
426 		/* .. but we can put a prefetchable resource inside a non-prefetchable one */
427 		if (!best)
428 			best = r;
429 	}
430 	return best;
431 }
432 
433 /**
434  * pci_wait_for_pending - wait for @mask bit(s) to clear in status word @pos
435  * @dev: the PCI device to operate on
436  * @pos: config space offset of status word
437  * @mask: mask of bit(s) to care about in status word
438  *
439  * Return 1 when mask bit(s) in status word clear, 0 otherwise.
440  */
441 int pci_wait_for_pending(struct pci_dev *dev, int pos, u16 mask)
442 {
443 	int i;
444 
445 	/* Wait for Transaction Pending bit clean */
446 	for (i = 0; i < 4; i++) {
447 		u16 status;
448 		if (i)
449 			msleep((1 << (i - 1)) * 100);
450 
451 		pci_read_config_word(dev, pos, &status);
452 		if (!(status & mask))
453 			return 1;
454 	}
455 
456 	return 0;
457 }
458 
459 /**
460  * pci_restore_bars - restore a devices BAR values (e.g. after wake-up)
461  * @dev: PCI device to have its BARs restored
462  *
463  * Restore the BAR values for a given device, so as to make it
464  * accessible by its driver.
465  */
466 static void
467 pci_restore_bars(struct pci_dev *dev)
468 {
469 	int i;
470 
471 	for (i = 0; i < PCI_BRIDGE_RESOURCES; i++)
472 		pci_update_resource(dev, i);
473 }
474 
475 static struct pci_platform_pm_ops *pci_platform_pm;
476 
477 int pci_set_platform_pm(struct pci_platform_pm_ops *ops)
478 {
479 	if (!ops->is_manageable || !ops->set_state || !ops->choose_state
480 	    || !ops->sleep_wake)
481 		return -EINVAL;
482 	pci_platform_pm = ops;
483 	return 0;
484 }
485 
486 static inline bool platform_pci_power_manageable(struct pci_dev *dev)
487 {
488 	return pci_platform_pm ? pci_platform_pm->is_manageable(dev) : false;
489 }
490 
491 static inline int platform_pci_set_power_state(struct pci_dev *dev,
492                                                 pci_power_t t)
493 {
494 	return pci_platform_pm ? pci_platform_pm->set_state(dev, t) : -ENOSYS;
495 }
496 
497 static inline pci_power_t platform_pci_choose_state(struct pci_dev *dev)
498 {
499 	return pci_platform_pm ?
500 			pci_platform_pm->choose_state(dev) : PCI_POWER_ERROR;
501 }
502 
503 static inline int platform_pci_sleep_wake(struct pci_dev *dev, bool enable)
504 {
505 	return pci_platform_pm ?
506 			pci_platform_pm->sleep_wake(dev, enable) : -ENODEV;
507 }
508 
509 static inline int platform_pci_run_wake(struct pci_dev *dev, bool enable)
510 {
511 	return pci_platform_pm ?
512 			pci_platform_pm->run_wake(dev, enable) : -ENODEV;
513 }
514 
515 /**
516  * pci_raw_set_power_state - Use PCI PM registers to set the power state of
517  *                           given PCI device
518  * @dev: PCI device to handle.
519  * @state: PCI power state (D0, D1, D2, D3hot) to put the device into.
520  *
521  * RETURN VALUE:
522  * -EINVAL if the requested state is invalid.
523  * -EIO if device does not support PCI PM or its PM capabilities register has a
524  * wrong version, or device doesn't support the requested state.
525  * 0 if device already is in the requested state.
526  * 0 if device's power state has been successfully changed.
527  */
528 static int pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state)
529 {
530 	u16 pmcsr;
531 	bool need_restore = false;
532 
533 	/* Check if we're already there */
534 	if (dev->current_state == state)
535 		return 0;
536 
537 	if (!dev->pm_cap)
538 		return -EIO;
539 
540 	if (state < PCI_D0 || state > PCI_D3hot)
541 		return -EINVAL;
542 
543 	/* Validate current state:
544 	 * Can enter D0 from any state, but if we can only go deeper
545 	 * to sleep if we're already in a low power state
546 	 */
547 	if (state != PCI_D0 && dev->current_state <= PCI_D3cold
548 	    && dev->current_state > state) {
549 		dev_err(&dev->dev, "invalid power transition "
550 			"(from state %d to %d)\n", dev->current_state, state);
551 		return -EINVAL;
552 	}
553 
554 	/* check if this device supports the desired state */
555 	if ((state == PCI_D1 && !dev->d1_support)
556 	   || (state == PCI_D2 && !dev->d2_support))
557 		return -EIO;
558 
559 	pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
560 
561 	/* If we're (effectively) in D3, force entire word to 0.
562 	 * This doesn't affect PME_Status, disables PME_En, and
563 	 * sets PowerState to 0.
564 	 */
565 	switch (dev->current_state) {
566 	case PCI_D0:
567 	case PCI_D1:
568 	case PCI_D2:
569 		pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
570 		pmcsr |= state;
571 		break;
572 	case PCI_D3hot:
573 	case PCI_D3cold:
574 	case PCI_UNKNOWN: /* Boot-up */
575 		if ((pmcsr & PCI_PM_CTRL_STATE_MASK) == PCI_D3hot
576 		 && !(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET))
577 			need_restore = true;
578 		/* Fall-through: force to D0 */
579 	default:
580 		pmcsr = 0;
581 		break;
582 	}
583 
584 	/* enter specified state */
585 	pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
586 
587 	/* Mandatory power management transition delays */
588 	/* see PCI PM 1.1 5.6.1 table 18 */
589 	if (state == PCI_D3hot || dev->current_state == PCI_D3hot)
590 		pci_dev_d3_sleep(dev);
591 	else if (state == PCI_D2 || dev->current_state == PCI_D2)
592 		udelay(PCI_PM_D2_DELAY);
593 
594 	pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
595 	dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
596 	if (dev->current_state != state && printk_ratelimit())
597 		dev_info(&dev->dev, "Refused to change power state, "
598 			"currently in D%d\n", dev->current_state);
599 
600 	/*
601 	 * According to section 5.4.1 of the "PCI BUS POWER MANAGEMENT
602 	 * INTERFACE SPECIFICATION, REV. 1.2", a device transitioning
603 	 * from D3hot to D0 _may_ perform an internal reset, thereby
604 	 * going to "D0 Uninitialized" rather than "D0 Initialized".
605 	 * For example, at least some versions of the 3c905B and the
606 	 * 3c556B exhibit this behaviour.
607 	 *
608 	 * At least some laptop BIOSen (e.g. the Thinkpad T21) leave
609 	 * devices in a D3hot state at boot.  Consequently, we need to
610 	 * restore at least the BARs so that the device will be
611 	 * accessible to its driver.
612 	 */
613 	if (need_restore)
614 		pci_restore_bars(dev);
615 
616 	if (dev->bus->self)
617 		pcie_aspm_pm_state_change(dev->bus->self);
618 
619 	return 0;
620 }
621 
622 /**
623  * pci_update_current_state - Read PCI power state of given device from its
624  *                            PCI PM registers and cache it
625  * @dev: PCI device to handle.
626  * @state: State to cache in case the device doesn't have the PM capability
627  */
628 void pci_update_current_state(struct pci_dev *dev, pci_power_t state)
629 {
630 	if (dev->pm_cap) {
631 		u16 pmcsr;
632 
633 		/*
634 		 * Configuration space is not accessible for device in
635 		 * D3cold, so just keep or set D3cold for safety
636 		 */
637 		if (dev->current_state == PCI_D3cold)
638 			return;
639 		if (state == PCI_D3cold) {
640 			dev->current_state = PCI_D3cold;
641 			return;
642 		}
643 		pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
644 		dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
645 	} else {
646 		dev->current_state = state;
647 	}
648 }
649 
650 /**
651  * pci_power_up - Put the given device into D0 forcibly
652  * @dev: PCI device to power up
653  */
654 void pci_power_up(struct pci_dev *dev)
655 {
656 	if (platform_pci_power_manageable(dev))
657 		platform_pci_set_power_state(dev, PCI_D0);
658 
659 	pci_raw_set_power_state(dev, PCI_D0);
660 	pci_update_current_state(dev, PCI_D0);
661 }
662 
663 /**
664  * pci_platform_power_transition - Use platform to change device power state
665  * @dev: PCI device to handle.
666  * @state: State to put the device into.
667  */
668 static int pci_platform_power_transition(struct pci_dev *dev, pci_power_t state)
669 {
670 	int error;
671 
672 	if (platform_pci_power_manageable(dev)) {
673 		error = platform_pci_set_power_state(dev, state);
674 		if (!error)
675 			pci_update_current_state(dev, state);
676 	} else
677 		error = -ENODEV;
678 
679 	if (error && !dev->pm_cap) /* Fall back to PCI_D0 */
680 		dev->current_state = PCI_D0;
681 
682 	return error;
683 }
684 
685 /**
686  * pci_wakeup - Wake up a PCI device
687  * @pci_dev: Device to handle.
688  * @ign: ignored parameter
689  */
690 static int pci_wakeup(struct pci_dev *pci_dev, void *ign)
691 {
692 	pci_wakeup_event(pci_dev);
693 	pm_request_resume(&pci_dev->dev);
694 	return 0;
695 }
696 
697 /**
698  * pci_wakeup_bus - Walk given bus and wake up devices on it
699  * @bus: Top bus of the subtree to walk.
700  */
701 static void pci_wakeup_bus(struct pci_bus *bus)
702 {
703 	if (bus)
704 		pci_walk_bus(bus, pci_wakeup, NULL);
705 }
706 
707 /**
708  * __pci_start_power_transition - Start power transition of a PCI device
709  * @dev: PCI device to handle.
710  * @state: State to put the device into.
711  */
712 static void __pci_start_power_transition(struct pci_dev *dev, pci_power_t state)
713 {
714 	if (state == PCI_D0) {
715 		pci_platform_power_transition(dev, PCI_D0);
716 		/*
717 		 * Mandatory power management transition delays, see
718 		 * PCI Express Base Specification Revision 2.0 Section
719 		 * 6.6.1: Conventional Reset.  Do not delay for
720 		 * devices powered on/off by corresponding bridge,
721 		 * because have already delayed for the bridge.
722 		 */
723 		if (dev->runtime_d3cold) {
724 			msleep(dev->d3cold_delay);
725 			/*
726 			 * When powering on a bridge from D3cold, the
727 			 * whole hierarchy may be powered on into
728 			 * D0uninitialized state, resume them to give
729 			 * them a chance to suspend again
730 			 */
731 			pci_wakeup_bus(dev->subordinate);
732 		}
733 	}
734 }
735 
736 /**
737  * __pci_dev_set_current_state - Set current state of a PCI device
738  * @dev: Device to handle
739  * @data: pointer to state to be set
740  */
741 static int __pci_dev_set_current_state(struct pci_dev *dev, void *data)
742 {
743 	pci_power_t state = *(pci_power_t *)data;
744 
745 	dev->current_state = state;
746 	return 0;
747 }
748 
749 /**
750  * __pci_bus_set_current_state - Walk given bus and set current state of devices
751  * @bus: Top bus of the subtree to walk.
752  * @state: state to be set
753  */
754 static void __pci_bus_set_current_state(struct pci_bus *bus, pci_power_t state)
755 {
756 	if (bus)
757 		pci_walk_bus(bus, __pci_dev_set_current_state, &state);
758 }
759 
760 /**
761  * __pci_complete_power_transition - Complete power transition of a PCI device
762  * @dev: PCI device to handle.
763  * @state: State to put the device into.
764  *
765  * This function should not be called directly by device drivers.
766  */
767 int __pci_complete_power_transition(struct pci_dev *dev, pci_power_t state)
768 {
769 	int ret;
770 
771 	if (state <= PCI_D0)
772 		return -EINVAL;
773 	ret = pci_platform_power_transition(dev, state);
774 	/* Power off the bridge may power off the whole hierarchy */
775 	if (!ret && state == PCI_D3cold)
776 		__pci_bus_set_current_state(dev->subordinate, PCI_D3cold);
777 	return ret;
778 }
779 EXPORT_SYMBOL_GPL(__pci_complete_power_transition);
780 
781 /**
782  * pci_set_power_state - Set the power state of a PCI device
783  * @dev: PCI device to handle.
784  * @state: PCI power state (D0, D1, D2, D3hot) to put the device into.
785  *
786  * Transition a device to a new power state, using the platform firmware and/or
787  * the device's PCI PM registers.
788  *
789  * RETURN VALUE:
790  * -EINVAL if the requested state is invalid.
791  * -EIO if device does not support PCI PM or its PM capabilities register has a
792  * wrong version, or device doesn't support the requested state.
793  * 0 if device already is in the requested state.
794  * 0 if device's power state has been successfully changed.
795  */
796 int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
797 {
798 	int error;
799 
800 	/* bound the state we're entering */
801 	if (state > PCI_D3cold)
802 		state = PCI_D3cold;
803 	else if (state < PCI_D0)
804 		state = PCI_D0;
805 	else if ((state == PCI_D1 || state == PCI_D2) && pci_no_d1d2(dev))
806 		/*
807 		 * If the device or the parent bridge do not support PCI PM,
808 		 * ignore the request if we're doing anything other than putting
809 		 * it into D0 (which would only happen on boot).
810 		 */
811 		return 0;
812 
813 	/* Check if we're already there */
814 	if (dev->current_state == state)
815 		return 0;
816 
817 	__pci_start_power_transition(dev, state);
818 
819 	/* This device is quirked not to be put into D3, so
820 	   don't put it in D3 */
821 	if (state >= PCI_D3hot && (dev->dev_flags & PCI_DEV_FLAGS_NO_D3))
822 		return 0;
823 
824 	/*
825 	 * To put device in D3cold, we put device into D3hot in native
826 	 * way, then put device into D3cold with platform ops
827 	 */
828 	error = pci_raw_set_power_state(dev, state > PCI_D3hot ?
829 					PCI_D3hot : state);
830 
831 	if (!__pci_complete_power_transition(dev, state))
832 		error = 0;
833 	/*
834 	 * When aspm_policy is "powersave" this call ensures
835 	 * that ASPM is configured.
836 	 */
837 	if (!error && dev->bus->self)
838 		pcie_aspm_powersave_config_link(dev->bus->self);
839 
840 	return error;
841 }
842 
843 /**
844  * pci_choose_state - Choose the power state of a PCI device
845  * @dev: PCI device to be suspended
846  * @state: target sleep state for the whole system. This is the value
847  *	that is passed to suspend() function.
848  *
849  * Returns PCI power state suitable for given device and given system
850  * message.
851  */
852 
853 pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state)
854 {
855 	pci_power_t ret;
856 
857 	if (!dev->pm_cap)
858 		return PCI_D0;
859 
860 	ret = platform_pci_choose_state(dev);
861 	if (ret != PCI_POWER_ERROR)
862 		return ret;
863 
864 	switch (state.event) {
865 	case PM_EVENT_ON:
866 		return PCI_D0;
867 	case PM_EVENT_FREEZE:
868 	case PM_EVENT_PRETHAW:
869 		/* REVISIT both freeze and pre-thaw "should" use D0 */
870 	case PM_EVENT_SUSPEND:
871 	case PM_EVENT_HIBERNATE:
872 		return PCI_D3hot;
873 	default:
874 		dev_info(&dev->dev, "unrecognized suspend event %d\n",
875 			 state.event);
876 		BUG();
877 	}
878 	return PCI_D0;
879 }
880 
881 EXPORT_SYMBOL(pci_choose_state);
882 
883 #define PCI_EXP_SAVE_REGS	7
884 
885 
886 static struct pci_cap_saved_state *_pci_find_saved_cap(struct pci_dev *pci_dev,
887 						       u16 cap, bool extended)
888 {
889 	struct pci_cap_saved_state *tmp;
890 
891 	hlist_for_each_entry(tmp, &pci_dev->saved_cap_space, next) {
892 		if (tmp->cap.cap_extended == extended && tmp->cap.cap_nr == cap)
893 			return tmp;
894 	}
895 	return NULL;
896 }
897 
898 struct pci_cap_saved_state *pci_find_saved_cap(struct pci_dev *dev, char cap)
899 {
900 	return _pci_find_saved_cap(dev, cap, false);
901 }
902 
903 struct pci_cap_saved_state *pci_find_saved_ext_cap(struct pci_dev *dev, u16 cap)
904 {
905 	return _pci_find_saved_cap(dev, cap, true);
906 }
907 
908 static int pci_save_pcie_state(struct pci_dev *dev)
909 {
910 	int i = 0;
911 	struct pci_cap_saved_state *save_state;
912 	u16 *cap;
913 
914 	if (!pci_is_pcie(dev))
915 		return 0;
916 
917 	save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
918 	if (!save_state) {
919 		dev_err(&dev->dev, "buffer not found in %s\n", __func__);
920 		return -ENOMEM;
921 	}
922 
923 	cap = (u16 *)&save_state->cap.data[0];
924 	pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &cap[i++]);
925 	pcie_capability_read_word(dev, PCI_EXP_LNKCTL, &cap[i++]);
926 	pcie_capability_read_word(dev, PCI_EXP_SLTCTL, &cap[i++]);
927 	pcie_capability_read_word(dev, PCI_EXP_RTCTL,  &cap[i++]);
928 	pcie_capability_read_word(dev, PCI_EXP_DEVCTL2, &cap[i++]);
929 	pcie_capability_read_word(dev, PCI_EXP_LNKCTL2, &cap[i++]);
930 	pcie_capability_read_word(dev, PCI_EXP_SLTCTL2, &cap[i++]);
931 
932 	return 0;
933 }
934 
935 static void pci_restore_pcie_state(struct pci_dev *dev)
936 {
937 	int i = 0;
938 	struct pci_cap_saved_state *save_state;
939 	u16 *cap;
940 
941 	save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
942 	if (!save_state)
943 		return;
944 
945 	cap = (u16 *)&save_state->cap.data[0];
946 	pcie_capability_write_word(dev, PCI_EXP_DEVCTL, cap[i++]);
947 	pcie_capability_write_word(dev, PCI_EXP_LNKCTL, cap[i++]);
948 	pcie_capability_write_word(dev, PCI_EXP_SLTCTL, cap[i++]);
949 	pcie_capability_write_word(dev, PCI_EXP_RTCTL, cap[i++]);
950 	pcie_capability_write_word(dev, PCI_EXP_DEVCTL2, cap[i++]);
951 	pcie_capability_write_word(dev, PCI_EXP_LNKCTL2, cap[i++]);
952 	pcie_capability_write_word(dev, PCI_EXP_SLTCTL2, cap[i++]);
953 }
954 
955 
956 static int pci_save_pcix_state(struct pci_dev *dev)
957 {
958 	int pos;
959 	struct pci_cap_saved_state *save_state;
960 
961 	pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
962 	if (pos <= 0)
963 		return 0;
964 
965 	save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
966 	if (!save_state) {
967 		dev_err(&dev->dev, "buffer not found in %s\n", __func__);
968 		return -ENOMEM;
969 	}
970 
971 	pci_read_config_word(dev, pos + PCI_X_CMD,
972 			     (u16 *)save_state->cap.data);
973 
974 	return 0;
975 }
976 
977 static void pci_restore_pcix_state(struct pci_dev *dev)
978 {
979 	int i = 0, pos;
980 	struct pci_cap_saved_state *save_state;
981 	u16 *cap;
982 
983 	save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
984 	pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
985 	if (!save_state || pos <= 0)
986 		return;
987 	cap = (u16 *)&save_state->cap.data[0];
988 
989 	pci_write_config_word(dev, pos + PCI_X_CMD, cap[i++]);
990 }
991 
992 
993 /**
994  * pci_save_state - save the PCI configuration space of a device before suspending
995  * @dev: - PCI device that we're dealing with
996  */
997 int
998 pci_save_state(struct pci_dev *dev)
999 {
1000 	int i;
1001 	/* XXX: 100% dword access ok here? */
1002 	for (i = 0; i < 16; i++)
1003 		pci_read_config_dword(dev, i * 4, &dev->saved_config_space[i]);
1004 	dev->state_saved = true;
1005 	if ((i = pci_save_pcie_state(dev)) != 0)
1006 		return i;
1007 	if ((i = pci_save_pcix_state(dev)) != 0)
1008 		return i;
1009 	if ((i = pci_save_vc_state(dev)) != 0)
1010 		return i;
1011 	return 0;
1012 }
1013 
1014 static void pci_restore_config_dword(struct pci_dev *pdev, int offset,
1015 				     u32 saved_val, int retry)
1016 {
1017 	u32 val;
1018 
1019 	pci_read_config_dword(pdev, offset, &val);
1020 	if (val == saved_val)
1021 		return;
1022 
1023 	for (;;) {
1024 		dev_dbg(&pdev->dev, "restoring config space at offset "
1025 			"%#x (was %#x, writing %#x)\n", offset, val, saved_val);
1026 		pci_write_config_dword(pdev, offset, saved_val);
1027 		if (retry-- <= 0)
1028 			return;
1029 
1030 		pci_read_config_dword(pdev, offset, &val);
1031 		if (val == saved_val)
1032 			return;
1033 
1034 		mdelay(1);
1035 	}
1036 }
1037 
1038 static void pci_restore_config_space_range(struct pci_dev *pdev,
1039 					   int start, int end, int retry)
1040 {
1041 	int index;
1042 
1043 	for (index = end; index >= start; index--)
1044 		pci_restore_config_dword(pdev, 4 * index,
1045 					 pdev->saved_config_space[index],
1046 					 retry);
1047 }
1048 
1049 static void pci_restore_config_space(struct pci_dev *pdev)
1050 {
1051 	if (pdev->hdr_type == PCI_HEADER_TYPE_NORMAL) {
1052 		pci_restore_config_space_range(pdev, 10, 15, 0);
1053 		/* Restore BARs before the command register. */
1054 		pci_restore_config_space_range(pdev, 4, 9, 10);
1055 		pci_restore_config_space_range(pdev, 0, 3, 0);
1056 	} else {
1057 		pci_restore_config_space_range(pdev, 0, 15, 0);
1058 	}
1059 }
1060 
1061 /**
1062  * pci_restore_state - Restore the saved state of a PCI device
1063  * @dev: - PCI device that we're dealing with
1064  */
1065 void pci_restore_state(struct pci_dev *dev)
1066 {
1067 	if (!dev->state_saved)
1068 		return;
1069 
1070 	/* PCI Express register must be restored first */
1071 	pci_restore_pcie_state(dev);
1072 	pci_restore_ats_state(dev);
1073 	pci_restore_vc_state(dev);
1074 
1075 	pci_restore_config_space(dev);
1076 
1077 	pci_restore_pcix_state(dev);
1078 	pci_restore_msi_state(dev);
1079 	pci_restore_iov_state(dev);
1080 
1081 	dev->state_saved = false;
1082 }
1083 
1084 struct pci_saved_state {
1085 	u32 config_space[16];
1086 	struct pci_cap_saved_data cap[0];
1087 };
1088 
1089 /**
1090  * pci_store_saved_state - Allocate and return an opaque struct containing
1091  *			   the device saved state.
1092  * @dev: PCI device that we're dealing with
1093  *
1094  * Return NULL if no state or error.
1095  */
1096 struct pci_saved_state *pci_store_saved_state(struct pci_dev *dev)
1097 {
1098 	struct pci_saved_state *state;
1099 	struct pci_cap_saved_state *tmp;
1100 	struct pci_cap_saved_data *cap;
1101 	size_t size;
1102 
1103 	if (!dev->state_saved)
1104 		return NULL;
1105 
1106 	size = sizeof(*state) + sizeof(struct pci_cap_saved_data);
1107 
1108 	hlist_for_each_entry(tmp, &dev->saved_cap_space, next)
1109 		size += sizeof(struct pci_cap_saved_data) + tmp->cap.size;
1110 
1111 	state = kzalloc(size, GFP_KERNEL);
1112 	if (!state)
1113 		return NULL;
1114 
1115 	memcpy(state->config_space, dev->saved_config_space,
1116 	       sizeof(state->config_space));
1117 
1118 	cap = state->cap;
1119 	hlist_for_each_entry(tmp, &dev->saved_cap_space, next) {
1120 		size_t len = sizeof(struct pci_cap_saved_data) + tmp->cap.size;
1121 		memcpy(cap, &tmp->cap, len);
1122 		cap = (struct pci_cap_saved_data *)((u8 *)cap + len);
1123 	}
1124 	/* Empty cap_save terminates list */
1125 
1126 	return state;
1127 }
1128 EXPORT_SYMBOL_GPL(pci_store_saved_state);
1129 
1130 /**
1131  * pci_load_saved_state - Reload the provided save state into struct pci_dev.
1132  * @dev: PCI device that we're dealing with
1133  * @state: Saved state returned from pci_store_saved_state()
1134  */
1135 static int pci_load_saved_state(struct pci_dev *dev,
1136 				struct pci_saved_state *state)
1137 {
1138 	struct pci_cap_saved_data *cap;
1139 
1140 	dev->state_saved = false;
1141 
1142 	if (!state)
1143 		return 0;
1144 
1145 	memcpy(dev->saved_config_space, state->config_space,
1146 	       sizeof(state->config_space));
1147 
1148 	cap = state->cap;
1149 	while (cap->size) {
1150 		struct pci_cap_saved_state *tmp;
1151 
1152 		tmp = _pci_find_saved_cap(dev, cap->cap_nr, cap->cap_extended);
1153 		if (!tmp || tmp->cap.size != cap->size)
1154 			return -EINVAL;
1155 
1156 		memcpy(tmp->cap.data, cap->data, tmp->cap.size);
1157 		cap = (struct pci_cap_saved_data *)((u8 *)cap +
1158 		       sizeof(struct pci_cap_saved_data) + cap->size);
1159 	}
1160 
1161 	dev->state_saved = true;
1162 	return 0;
1163 }
1164 
1165 /**
1166  * pci_load_and_free_saved_state - Reload the save state pointed to by state,
1167  *				   and free the memory allocated for it.
1168  * @dev: PCI device that we're dealing with
1169  * @state: Pointer to saved state returned from pci_store_saved_state()
1170  */
1171 int pci_load_and_free_saved_state(struct pci_dev *dev,
1172 				  struct pci_saved_state **state)
1173 {
1174 	int ret = pci_load_saved_state(dev, *state);
1175 	kfree(*state);
1176 	*state = NULL;
1177 	return ret;
1178 }
1179 EXPORT_SYMBOL_GPL(pci_load_and_free_saved_state);
1180 
1181 static int do_pci_enable_device(struct pci_dev *dev, int bars)
1182 {
1183 	int err;
1184 
1185 	err = pci_set_power_state(dev, PCI_D0);
1186 	if (err < 0 && err != -EIO)
1187 		return err;
1188 	err = pcibios_enable_device(dev, bars);
1189 	if (err < 0)
1190 		return err;
1191 	pci_fixup_device(pci_fixup_enable, dev);
1192 
1193 	return 0;
1194 }
1195 
1196 /**
1197  * pci_reenable_device - Resume abandoned device
1198  * @dev: PCI device to be resumed
1199  *
1200  *  Note this function is a backend of pci_default_resume and is not supposed
1201  *  to be called by normal code, write proper resume handler and use it instead.
1202  */
1203 int pci_reenable_device(struct pci_dev *dev)
1204 {
1205 	if (pci_is_enabled(dev))
1206 		return do_pci_enable_device(dev, (1 << PCI_NUM_RESOURCES) - 1);
1207 	return 0;
1208 }
1209 
1210 static void pci_enable_bridge(struct pci_dev *dev)
1211 {
1212 	struct pci_dev *bridge;
1213 	int retval;
1214 
1215 	bridge = pci_upstream_bridge(dev);
1216 	if (bridge)
1217 		pci_enable_bridge(bridge);
1218 
1219 	if (pci_is_enabled(dev)) {
1220 		if (!dev->is_busmaster)
1221 			pci_set_master(dev);
1222 		return;
1223 	}
1224 
1225 	retval = pci_enable_device(dev);
1226 	if (retval)
1227 		dev_err(&dev->dev, "Error enabling bridge (%d), continuing\n",
1228 			retval);
1229 	pci_set_master(dev);
1230 }
1231 
1232 static int pci_enable_device_flags(struct pci_dev *dev, unsigned long flags)
1233 {
1234 	struct pci_dev *bridge;
1235 	int err;
1236 	int i, bars = 0;
1237 
1238 	/*
1239 	 * Power state could be unknown at this point, either due to a fresh
1240 	 * boot or a device removal call.  So get the current power state
1241 	 * so that things like MSI message writing will behave as expected
1242 	 * (e.g. if the device really is in D0 at enable time).
1243 	 */
1244 	if (dev->pm_cap) {
1245 		u16 pmcsr;
1246 		pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1247 		dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
1248 	}
1249 
1250 	if (atomic_inc_return(&dev->enable_cnt) > 1)
1251 		return 0;		/* already enabled */
1252 
1253 	bridge = pci_upstream_bridge(dev);
1254 	if (bridge)
1255 		pci_enable_bridge(bridge);
1256 
1257 	/* only skip sriov related */
1258 	for (i = 0; i <= PCI_ROM_RESOURCE; i++)
1259 		if (dev->resource[i].flags & flags)
1260 			bars |= (1 << i);
1261 	for (i = PCI_BRIDGE_RESOURCES; i < DEVICE_COUNT_RESOURCE; i++)
1262 		if (dev->resource[i].flags & flags)
1263 			bars |= (1 << i);
1264 
1265 	err = do_pci_enable_device(dev, bars);
1266 	if (err < 0)
1267 		atomic_dec(&dev->enable_cnt);
1268 	return err;
1269 }
1270 
1271 /**
1272  * pci_enable_device_io - Initialize a device for use with IO space
1273  * @dev: PCI device to be initialized
1274  *
1275  *  Initialize device before it's used by a driver. Ask low-level code
1276  *  to enable I/O resources. Wake up the device if it was suspended.
1277  *  Beware, this function can fail.
1278  */
1279 int pci_enable_device_io(struct pci_dev *dev)
1280 {
1281 	return pci_enable_device_flags(dev, IORESOURCE_IO);
1282 }
1283 
1284 /**
1285  * pci_enable_device_mem - Initialize a device for use with Memory space
1286  * @dev: PCI device to be initialized
1287  *
1288  *  Initialize device before it's used by a driver. Ask low-level code
1289  *  to enable Memory resources. Wake up the device if it was suspended.
1290  *  Beware, this function can fail.
1291  */
1292 int pci_enable_device_mem(struct pci_dev *dev)
1293 {
1294 	return pci_enable_device_flags(dev, IORESOURCE_MEM);
1295 }
1296 
1297 /**
1298  * pci_enable_device - Initialize device before it's used by a driver.
1299  * @dev: PCI device to be initialized
1300  *
1301  *  Initialize device before it's used by a driver. Ask low-level code
1302  *  to enable I/O and memory. Wake up the device if it was suspended.
1303  *  Beware, this function can fail.
1304  *
1305  *  Note we don't actually enable the device many times if we call
1306  *  this function repeatedly (we just increment the count).
1307  */
1308 int pci_enable_device(struct pci_dev *dev)
1309 {
1310 	return pci_enable_device_flags(dev, IORESOURCE_MEM | IORESOURCE_IO);
1311 }
1312 
1313 /*
1314  * Managed PCI resources.  This manages device on/off, intx/msi/msix
1315  * on/off and BAR regions.  pci_dev itself records msi/msix status, so
1316  * there's no need to track it separately.  pci_devres is initialized
1317  * when a device is enabled using managed PCI device enable interface.
1318  */
1319 struct pci_devres {
1320 	unsigned int enabled:1;
1321 	unsigned int pinned:1;
1322 	unsigned int orig_intx:1;
1323 	unsigned int restore_intx:1;
1324 	u32 region_mask;
1325 };
1326 
1327 static void pcim_release(struct device *gendev, void *res)
1328 {
1329 	struct pci_dev *dev = container_of(gendev, struct pci_dev, dev);
1330 	struct pci_devres *this = res;
1331 	int i;
1332 
1333 	if (dev->msi_enabled)
1334 		pci_disable_msi(dev);
1335 	if (dev->msix_enabled)
1336 		pci_disable_msix(dev);
1337 
1338 	for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
1339 		if (this->region_mask & (1 << i))
1340 			pci_release_region(dev, i);
1341 
1342 	if (this->restore_intx)
1343 		pci_intx(dev, this->orig_intx);
1344 
1345 	if (this->enabled && !this->pinned)
1346 		pci_disable_device(dev);
1347 }
1348 
1349 static struct pci_devres * get_pci_dr(struct pci_dev *pdev)
1350 {
1351 	struct pci_devres *dr, *new_dr;
1352 
1353 	dr = devres_find(&pdev->dev, pcim_release, NULL, NULL);
1354 	if (dr)
1355 		return dr;
1356 
1357 	new_dr = devres_alloc(pcim_release, sizeof(*new_dr), GFP_KERNEL);
1358 	if (!new_dr)
1359 		return NULL;
1360 	return devres_get(&pdev->dev, new_dr, NULL, NULL);
1361 }
1362 
1363 static struct pci_devres * find_pci_dr(struct pci_dev *pdev)
1364 {
1365 	if (pci_is_managed(pdev))
1366 		return devres_find(&pdev->dev, pcim_release, NULL, NULL);
1367 	return NULL;
1368 }
1369 
1370 /**
1371  * pcim_enable_device - Managed pci_enable_device()
1372  * @pdev: PCI device to be initialized
1373  *
1374  * Managed pci_enable_device().
1375  */
1376 int pcim_enable_device(struct pci_dev *pdev)
1377 {
1378 	struct pci_devres *dr;
1379 	int rc;
1380 
1381 	dr = get_pci_dr(pdev);
1382 	if (unlikely(!dr))
1383 		return -ENOMEM;
1384 	if (dr->enabled)
1385 		return 0;
1386 
1387 	rc = pci_enable_device(pdev);
1388 	if (!rc) {
1389 		pdev->is_managed = 1;
1390 		dr->enabled = 1;
1391 	}
1392 	return rc;
1393 }
1394 
1395 /**
1396  * pcim_pin_device - Pin managed PCI device
1397  * @pdev: PCI device to pin
1398  *
1399  * Pin managed PCI device @pdev.  Pinned device won't be disabled on
1400  * driver detach.  @pdev must have been enabled with
1401  * pcim_enable_device().
1402  */
1403 void pcim_pin_device(struct pci_dev *pdev)
1404 {
1405 	struct pci_devres *dr;
1406 
1407 	dr = find_pci_dr(pdev);
1408 	WARN_ON(!dr || !dr->enabled);
1409 	if (dr)
1410 		dr->pinned = 1;
1411 }
1412 
1413 /*
1414  * pcibios_add_device - provide arch specific hooks when adding device dev
1415  * @dev: the PCI device being added
1416  *
1417  * Permits the platform to provide architecture specific functionality when
1418  * devices are added. This is the default implementation. Architecture
1419  * implementations can override this.
1420  */
1421 int __weak pcibios_add_device (struct pci_dev *dev)
1422 {
1423 	return 0;
1424 }
1425 
1426 /**
1427  * pcibios_release_device - provide arch specific hooks when releasing device dev
1428  * @dev: the PCI device being released
1429  *
1430  * Permits the platform to provide architecture specific functionality when
1431  * devices are released. This is the default implementation. Architecture
1432  * implementations can override this.
1433  */
1434 void __weak pcibios_release_device(struct pci_dev *dev) {}
1435 
1436 /**
1437  * pcibios_disable_device - disable arch specific PCI resources for device dev
1438  * @dev: the PCI device to disable
1439  *
1440  * Disables architecture specific PCI resources for the device. This
1441  * is the default implementation. Architecture implementations can
1442  * override this.
1443  */
1444 void __weak pcibios_disable_device (struct pci_dev *dev) {}
1445 
1446 static void do_pci_disable_device(struct pci_dev *dev)
1447 {
1448 	u16 pci_command;
1449 
1450 	pci_read_config_word(dev, PCI_COMMAND, &pci_command);
1451 	if (pci_command & PCI_COMMAND_MASTER) {
1452 		pci_command &= ~PCI_COMMAND_MASTER;
1453 		pci_write_config_word(dev, PCI_COMMAND, pci_command);
1454 	}
1455 
1456 	pcibios_disable_device(dev);
1457 }
1458 
1459 /**
1460  * pci_disable_enabled_device - Disable device without updating enable_cnt
1461  * @dev: PCI device to disable
1462  *
1463  * NOTE: This function is a backend of PCI power management routines and is
1464  * not supposed to be called drivers.
1465  */
1466 void pci_disable_enabled_device(struct pci_dev *dev)
1467 {
1468 	if (pci_is_enabled(dev))
1469 		do_pci_disable_device(dev);
1470 }
1471 
1472 /**
1473  * pci_disable_device - Disable PCI device after use
1474  * @dev: PCI device to be disabled
1475  *
1476  * Signal to the system that the PCI device is not in use by the system
1477  * anymore.  This only involves disabling PCI bus-mastering, if active.
1478  *
1479  * Note we don't actually disable the device until all callers of
1480  * pci_enable_device() have called pci_disable_device().
1481  */
1482 void
1483 pci_disable_device(struct pci_dev *dev)
1484 {
1485 	struct pci_devres *dr;
1486 
1487 	dr = find_pci_dr(dev);
1488 	if (dr)
1489 		dr->enabled = 0;
1490 
1491 	dev_WARN_ONCE(&dev->dev, atomic_read(&dev->enable_cnt) <= 0,
1492 		      "disabling already-disabled device");
1493 
1494 	if (atomic_dec_return(&dev->enable_cnt) != 0)
1495 		return;
1496 
1497 	do_pci_disable_device(dev);
1498 
1499 	dev->is_busmaster = 0;
1500 }
1501 
1502 /**
1503  * pcibios_set_pcie_reset_state - set reset state for device dev
1504  * @dev: the PCIe device reset
1505  * @state: Reset state to enter into
1506  *
1507  *
1508  * Sets the PCIe reset state for the device. This is the default
1509  * implementation. Architecture implementations can override this.
1510  */
1511 int __weak pcibios_set_pcie_reset_state(struct pci_dev *dev,
1512 					enum pcie_reset_state state)
1513 {
1514 	return -EINVAL;
1515 }
1516 
1517 /**
1518  * pci_set_pcie_reset_state - set reset state for device dev
1519  * @dev: the PCIe device reset
1520  * @state: Reset state to enter into
1521  *
1522  *
1523  * Sets the PCI reset state for the device.
1524  */
1525 int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state)
1526 {
1527 	return pcibios_set_pcie_reset_state(dev, state);
1528 }
1529 
1530 /**
1531  * pci_check_pme_status - Check if given device has generated PME.
1532  * @dev: Device to check.
1533  *
1534  * Check the PME status of the device and if set, clear it and clear PME enable
1535  * (if set).  Return 'true' if PME status and PME enable were both set or
1536  * 'false' otherwise.
1537  */
1538 bool pci_check_pme_status(struct pci_dev *dev)
1539 {
1540 	int pmcsr_pos;
1541 	u16 pmcsr;
1542 	bool ret = false;
1543 
1544 	if (!dev->pm_cap)
1545 		return false;
1546 
1547 	pmcsr_pos = dev->pm_cap + PCI_PM_CTRL;
1548 	pci_read_config_word(dev, pmcsr_pos, &pmcsr);
1549 	if (!(pmcsr & PCI_PM_CTRL_PME_STATUS))
1550 		return false;
1551 
1552 	/* Clear PME status. */
1553 	pmcsr |= PCI_PM_CTRL_PME_STATUS;
1554 	if (pmcsr & PCI_PM_CTRL_PME_ENABLE) {
1555 		/* Disable PME to avoid interrupt flood. */
1556 		pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
1557 		ret = true;
1558 	}
1559 
1560 	pci_write_config_word(dev, pmcsr_pos, pmcsr);
1561 
1562 	return ret;
1563 }
1564 
1565 /**
1566  * pci_pme_wakeup - Wake up a PCI device if its PME Status bit is set.
1567  * @dev: Device to handle.
1568  * @pme_poll_reset: Whether or not to reset the device's pme_poll flag.
1569  *
1570  * Check if @dev has generated PME and queue a resume request for it in that
1571  * case.
1572  */
1573 static int pci_pme_wakeup(struct pci_dev *dev, void *pme_poll_reset)
1574 {
1575 	if (pme_poll_reset && dev->pme_poll)
1576 		dev->pme_poll = false;
1577 
1578 	if (pci_check_pme_status(dev)) {
1579 		pci_wakeup_event(dev);
1580 		pm_request_resume(&dev->dev);
1581 	}
1582 	return 0;
1583 }
1584 
1585 /**
1586  * pci_pme_wakeup_bus - Walk given bus and wake up devices on it, if necessary.
1587  * @bus: Top bus of the subtree to walk.
1588  */
1589 void pci_pme_wakeup_bus(struct pci_bus *bus)
1590 {
1591 	if (bus)
1592 		pci_walk_bus(bus, pci_pme_wakeup, (void *)true);
1593 }
1594 
1595 
1596 /**
1597  * pci_pme_capable - check the capability of PCI device to generate PME#
1598  * @dev: PCI device to handle.
1599  * @state: PCI state from which device will issue PME#.
1600  */
1601 bool pci_pme_capable(struct pci_dev *dev, pci_power_t state)
1602 {
1603 	if (!dev->pm_cap)
1604 		return false;
1605 
1606 	return !!(dev->pme_support & (1 << state));
1607 }
1608 
1609 static void pci_pme_list_scan(struct work_struct *work)
1610 {
1611 	struct pci_pme_device *pme_dev, *n;
1612 
1613 	mutex_lock(&pci_pme_list_mutex);
1614 	if (!list_empty(&pci_pme_list)) {
1615 		list_for_each_entry_safe(pme_dev, n, &pci_pme_list, list) {
1616 			if (pme_dev->dev->pme_poll) {
1617 				struct pci_dev *bridge;
1618 
1619 				bridge = pme_dev->dev->bus->self;
1620 				/*
1621 				 * If bridge is in low power state, the
1622 				 * configuration space of subordinate devices
1623 				 * may be not accessible
1624 				 */
1625 				if (bridge && bridge->current_state != PCI_D0)
1626 					continue;
1627 				pci_pme_wakeup(pme_dev->dev, NULL);
1628 			} else {
1629 				list_del(&pme_dev->list);
1630 				kfree(pme_dev);
1631 			}
1632 		}
1633 		if (!list_empty(&pci_pme_list))
1634 			schedule_delayed_work(&pci_pme_work,
1635 					      msecs_to_jiffies(PME_TIMEOUT));
1636 	}
1637 	mutex_unlock(&pci_pme_list_mutex);
1638 }
1639 
1640 /**
1641  * pci_pme_active - enable or disable PCI device's PME# function
1642  * @dev: PCI device to handle.
1643  * @enable: 'true' to enable PME# generation; 'false' to disable it.
1644  *
1645  * The caller must verify that the device is capable of generating PME# before
1646  * calling this function with @enable equal to 'true'.
1647  */
1648 void pci_pme_active(struct pci_dev *dev, bool enable)
1649 {
1650 	u16 pmcsr;
1651 
1652 	if (!dev->pme_support)
1653 		return;
1654 
1655 	pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1656 	/* Clear PME_Status by writing 1 to it and enable PME# */
1657 	pmcsr |= PCI_PM_CTRL_PME_STATUS | PCI_PM_CTRL_PME_ENABLE;
1658 	if (!enable)
1659 		pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
1660 
1661 	pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
1662 
1663 	/*
1664 	 * PCI (as opposed to PCIe) PME requires that the device have
1665 	 * its PME# line hooked up correctly. Not all hardware vendors
1666 	 * do this, so the PME never gets delivered and the device
1667 	 * remains asleep. The easiest way around this is to
1668 	 * periodically walk the list of suspended devices and check
1669 	 * whether any have their PME flag set. The assumption is that
1670 	 * we'll wake up often enough anyway that this won't be a huge
1671 	 * hit, and the power savings from the devices will still be a
1672 	 * win.
1673 	 *
1674 	 * Although PCIe uses in-band PME message instead of PME# line
1675 	 * to report PME, PME does not work for some PCIe devices in
1676 	 * reality.  For example, there are devices that set their PME
1677 	 * status bits, but don't really bother to send a PME message;
1678 	 * there are PCI Express Root Ports that don't bother to
1679 	 * trigger interrupts when they receive PME messages from the
1680 	 * devices below.  So PME poll is used for PCIe devices too.
1681 	 */
1682 
1683 	if (dev->pme_poll) {
1684 		struct pci_pme_device *pme_dev;
1685 		if (enable) {
1686 			pme_dev = kmalloc(sizeof(struct pci_pme_device),
1687 					  GFP_KERNEL);
1688 			if (!pme_dev) {
1689 				dev_warn(&dev->dev, "can't enable PME#\n");
1690 				return;
1691 			}
1692 			pme_dev->dev = dev;
1693 			mutex_lock(&pci_pme_list_mutex);
1694 			list_add(&pme_dev->list, &pci_pme_list);
1695 			if (list_is_singular(&pci_pme_list))
1696 				schedule_delayed_work(&pci_pme_work,
1697 						      msecs_to_jiffies(PME_TIMEOUT));
1698 			mutex_unlock(&pci_pme_list_mutex);
1699 		} else {
1700 			mutex_lock(&pci_pme_list_mutex);
1701 			list_for_each_entry(pme_dev, &pci_pme_list, list) {
1702 				if (pme_dev->dev == dev) {
1703 					list_del(&pme_dev->list);
1704 					kfree(pme_dev);
1705 					break;
1706 				}
1707 			}
1708 			mutex_unlock(&pci_pme_list_mutex);
1709 		}
1710 	}
1711 
1712 	dev_dbg(&dev->dev, "PME# %s\n", enable ? "enabled" : "disabled");
1713 }
1714 
1715 /**
1716  * __pci_enable_wake - enable PCI device as wakeup event source
1717  * @dev: PCI device affected
1718  * @state: PCI state from which device will issue wakeup events
1719  * @runtime: True if the events are to be generated at run time
1720  * @enable: True to enable event generation; false to disable
1721  *
1722  * This enables the device as a wakeup event source, or disables it.
1723  * When such events involves platform-specific hooks, those hooks are
1724  * called automatically by this routine.
1725  *
1726  * Devices with legacy power management (no standard PCI PM capabilities)
1727  * always require such platform hooks.
1728  *
1729  * RETURN VALUE:
1730  * 0 is returned on success
1731  * -EINVAL is returned if device is not supposed to wake up the system
1732  * Error code depending on the platform is returned if both the platform and
1733  * the native mechanism fail to enable the generation of wake-up events
1734  */
1735 int __pci_enable_wake(struct pci_dev *dev, pci_power_t state,
1736 		      bool runtime, bool enable)
1737 {
1738 	int ret = 0;
1739 
1740 	if (enable && !runtime && !device_may_wakeup(&dev->dev))
1741 		return -EINVAL;
1742 
1743 	/* Don't do the same thing twice in a row for one device. */
1744 	if (!!enable == !!dev->wakeup_prepared)
1745 		return 0;
1746 
1747 	/*
1748 	 * According to "PCI System Architecture" 4th ed. by Tom Shanley & Don
1749 	 * Anderson we should be doing PME# wake enable followed by ACPI wake
1750 	 * enable.  To disable wake-up we call the platform first, for symmetry.
1751 	 */
1752 
1753 	if (enable) {
1754 		int error;
1755 
1756 		if (pci_pme_capable(dev, state))
1757 			pci_pme_active(dev, true);
1758 		else
1759 			ret = 1;
1760 		error = runtime ? platform_pci_run_wake(dev, true) :
1761 					platform_pci_sleep_wake(dev, true);
1762 		if (ret)
1763 			ret = error;
1764 		if (!ret)
1765 			dev->wakeup_prepared = true;
1766 	} else {
1767 		if (runtime)
1768 			platform_pci_run_wake(dev, false);
1769 		else
1770 			platform_pci_sleep_wake(dev, false);
1771 		pci_pme_active(dev, false);
1772 		dev->wakeup_prepared = false;
1773 	}
1774 
1775 	return ret;
1776 }
1777 EXPORT_SYMBOL(__pci_enable_wake);
1778 
1779 /**
1780  * pci_wake_from_d3 - enable/disable device to wake up from D3_hot or D3_cold
1781  * @dev: PCI device to prepare
1782  * @enable: True to enable wake-up event generation; false to disable
1783  *
1784  * Many drivers want the device to wake up the system from D3_hot or D3_cold
1785  * and this function allows them to set that up cleanly - pci_enable_wake()
1786  * should not be called twice in a row to enable wake-up due to PCI PM vs ACPI
1787  * ordering constraints.
1788  *
1789  * This function only returns error code if the device is not capable of
1790  * generating PME# from both D3_hot and D3_cold, and the platform is unable to
1791  * enable wake-up power for it.
1792  */
1793 int pci_wake_from_d3(struct pci_dev *dev, bool enable)
1794 {
1795 	return pci_pme_capable(dev, PCI_D3cold) ?
1796 			pci_enable_wake(dev, PCI_D3cold, enable) :
1797 			pci_enable_wake(dev, PCI_D3hot, enable);
1798 }
1799 
1800 /**
1801  * pci_target_state - find an appropriate low power state for a given PCI dev
1802  * @dev: PCI device
1803  *
1804  * Use underlying platform code to find a supported low power state for @dev.
1805  * If the platform can't manage @dev, return the deepest state from which it
1806  * can generate wake events, based on any available PME info.
1807  */
1808 static pci_power_t pci_target_state(struct pci_dev *dev)
1809 {
1810 	pci_power_t target_state = PCI_D3hot;
1811 
1812 	if (platform_pci_power_manageable(dev)) {
1813 		/*
1814 		 * Call the platform to choose the target state of the device
1815 		 * and enable wake-up from this state if supported.
1816 		 */
1817 		pci_power_t state = platform_pci_choose_state(dev);
1818 
1819 		switch (state) {
1820 		case PCI_POWER_ERROR:
1821 		case PCI_UNKNOWN:
1822 			break;
1823 		case PCI_D1:
1824 		case PCI_D2:
1825 			if (pci_no_d1d2(dev))
1826 				break;
1827 		default:
1828 			target_state = state;
1829 		}
1830 	} else if (!dev->pm_cap) {
1831 		target_state = PCI_D0;
1832 	} else if (device_may_wakeup(&dev->dev)) {
1833 		/*
1834 		 * Find the deepest state from which the device can generate
1835 		 * wake-up events, make it the target state and enable device
1836 		 * to generate PME#.
1837 		 */
1838 		if (dev->pme_support) {
1839 			while (target_state
1840 			      && !(dev->pme_support & (1 << target_state)))
1841 				target_state--;
1842 		}
1843 	}
1844 
1845 	return target_state;
1846 }
1847 
1848 /**
1849  * pci_prepare_to_sleep - prepare PCI device for system-wide transition into a sleep state
1850  * @dev: Device to handle.
1851  *
1852  * Choose the power state appropriate for the device depending on whether
1853  * it can wake up the system and/or is power manageable by the platform
1854  * (PCI_D3hot is the default) and put the device into that state.
1855  */
1856 int pci_prepare_to_sleep(struct pci_dev *dev)
1857 {
1858 	pci_power_t target_state = pci_target_state(dev);
1859 	int error;
1860 
1861 	if (target_state == PCI_POWER_ERROR)
1862 		return -EIO;
1863 
1864 	/* D3cold during system suspend/hibernate is not supported */
1865 	if (target_state > PCI_D3hot)
1866 		target_state = PCI_D3hot;
1867 
1868 	pci_enable_wake(dev, target_state, device_may_wakeup(&dev->dev));
1869 
1870 	error = pci_set_power_state(dev, target_state);
1871 
1872 	if (error)
1873 		pci_enable_wake(dev, target_state, false);
1874 
1875 	return error;
1876 }
1877 
1878 /**
1879  * pci_back_from_sleep - turn PCI device on during system-wide transition into working state
1880  * @dev: Device to handle.
1881  *
1882  * Disable device's system wake-up capability and put it into D0.
1883  */
1884 int pci_back_from_sleep(struct pci_dev *dev)
1885 {
1886 	pci_enable_wake(dev, PCI_D0, false);
1887 	return pci_set_power_state(dev, PCI_D0);
1888 }
1889 
1890 /**
1891  * pci_finish_runtime_suspend - Carry out PCI-specific part of runtime suspend.
1892  * @dev: PCI device being suspended.
1893  *
1894  * Prepare @dev to generate wake-up events at run time and put it into a low
1895  * power state.
1896  */
1897 int pci_finish_runtime_suspend(struct pci_dev *dev)
1898 {
1899 	pci_power_t target_state = pci_target_state(dev);
1900 	int error;
1901 
1902 	if (target_state == PCI_POWER_ERROR)
1903 		return -EIO;
1904 
1905 	dev->runtime_d3cold = target_state == PCI_D3cold;
1906 
1907 	__pci_enable_wake(dev, target_state, true, pci_dev_run_wake(dev));
1908 
1909 	error = pci_set_power_state(dev, target_state);
1910 
1911 	if (error) {
1912 		__pci_enable_wake(dev, target_state, true, false);
1913 		dev->runtime_d3cold = false;
1914 	}
1915 
1916 	return error;
1917 }
1918 
1919 /**
1920  * pci_dev_run_wake - Check if device can generate run-time wake-up events.
1921  * @dev: Device to check.
1922  *
1923  * Return true if the device itself is capable of generating wake-up events
1924  * (through the platform or using the native PCIe PME) or if the device supports
1925  * PME and one of its upstream bridges can generate wake-up events.
1926  */
1927 bool pci_dev_run_wake(struct pci_dev *dev)
1928 {
1929 	struct pci_bus *bus = dev->bus;
1930 
1931 	if (device_run_wake(&dev->dev))
1932 		return true;
1933 
1934 	if (!dev->pme_support)
1935 		return false;
1936 
1937 	while (bus->parent) {
1938 		struct pci_dev *bridge = bus->self;
1939 
1940 		if (device_run_wake(&bridge->dev))
1941 			return true;
1942 
1943 		bus = bus->parent;
1944 	}
1945 
1946 	/* We have reached the root bus. */
1947 	if (bus->bridge)
1948 		return device_run_wake(bus->bridge);
1949 
1950 	return false;
1951 }
1952 EXPORT_SYMBOL_GPL(pci_dev_run_wake);
1953 
1954 void pci_config_pm_runtime_get(struct pci_dev *pdev)
1955 {
1956 	struct device *dev = &pdev->dev;
1957 	struct device *parent = dev->parent;
1958 
1959 	if (parent)
1960 		pm_runtime_get_sync(parent);
1961 	pm_runtime_get_noresume(dev);
1962 	/*
1963 	 * pdev->current_state is set to PCI_D3cold during suspending,
1964 	 * so wait until suspending completes
1965 	 */
1966 	pm_runtime_barrier(dev);
1967 	/*
1968 	 * Only need to resume devices in D3cold, because config
1969 	 * registers are still accessible for devices suspended but
1970 	 * not in D3cold.
1971 	 */
1972 	if (pdev->current_state == PCI_D3cold)
1973 		pm_runtime_resume(dev);
1974 }
1975 
1976 void pci_config_pm_runtime_put(struct pci_dev *pdev)
1977 {
1978 	struct device *dev = &pdev->dev;
1979 	struct device *parent = dev->parent;
1980 
1981 	pm_runtime_put(dev);
1982 	if (parent)
1983 		pm_runtime_put_sync(parent);
1984 }
1985 
1986 /**
1987  * pci_pm_init - Initialize PM functions of given PCI device
1988  * @dev: PCI device to handle.
1989  */
1990 void pci_pm_init(struct pci_dev *dev)
1991 {
1992 	int pm;
1993 	u16 pmc;
1994 
1995 	pm_runtime_forbid(&dev->dev);
1996 	pm_runtime_set_active(&dev->dev);
1997 	pm_runtime_enable(&dev->dev);
1998 	device_enable_async_suspend(&dev->dev);
1999 	dev->wakeup_prepared = false;
2000 
2001 	dev->pm_cap = 0;
2002 	dev->pme_support = 0;
2003 
2004 	/* find PCI PM capability in list */
2005 	pm = pci_find_capability(dev, PCI_CAP_ID_PM);
2006 	if (!pm)
2007 		return;
2008 	/* Check device's ability to generate PME# */
2009 	pci_read_config_word(dev, pm + PCI_PM_PMC, &pmc);
2010 
2011 	if ((pmc & PCI_PM_CAP_VER_MASK) > 3) {
2012 		dev_err(&dev->dev, "unsupported PM cap regs version (%u)\n",
2013 			pmc & PCI_PM_CAP_VER_MASK);
2014 		return;
2015 	}
2016 
2017 	dev->pm_cap = pm;
2018 	dev->d3_delay = PCI_PM_D3_WAIT;
2019 	dev->d3cold_delay = PCI_PM_D3COLD_WAIT;
2020 	dev->d3cold_allowed = true;
2021 
2022 	dev->d1_support = false;
2023 	dev->d2_support = false;
2024 	if (!pci_no_d1d2(dev)) {
2025 		if (pmc & PCI_PM_CAP_D1)
2026 			dev->d1_support = true;
2027 		if (pmc & PCI_PM_CAP_D2)
2028 			dev->d2_support = true;
2029 
2030 		if (dev->d1_support || dev->d2_support)
2031 			dev_printk(KERN_DEBUG, &dev->dev, "supports%s%s\n",
2032 				   dev->d1_support ? " D1" : "",
2033 				   dev->d2_support ? " D2" : "");
2034 	}
2035 
2036 	pmc &= PCI_PM_CAP_PME_MASK;
2037 	if (pmc) {
2038 		dev_printk(KERN_DEBUG, &dev->dev,
2039 			 "PME# supported from%s%s%s%s%s\n",
2040 			 (pmc & PCI_PM_CAP_PME_D0) ? " D0" : "",
2041 			 (pmc & PCI_PM_CAP_PME_D1) ? " D1" : "",
2042 			 (pmc & PCI_PM_CAP_PME_D2) ? " D2" : "",
2043 			 (pmc & PCI_PM_CAP_PME_D3) ? " D3hot" : "",
2044 			 (pmc & PCI_PM_CAP_PME_D3cold) ? " D3cold" : "");
2045 		dev->pme_support = pmc >> PCI_PM_CAP_PME_SHIFT;
2046 		dev->pme_poll = true;
2047 		/*
2048 		 * Make device's PM flags reflect the wake-up capability, but
2049 		 * let the user space enable it to wake up the system as needed.
2050 		 */
2051 		device_set_wakeup_capable(&dev->dev, true);
2052 		/* Disable the PME# generation functionality */
2053 		pci_pme_active(dev, false);
2054 	}
2055 }
2056 
2057 static void pci_add_saved_cap(struct pci_dev *pci_dev,
2058 	struct pci_cap_saved_state *new_cap)
2059 {
2060 	hlist_add_head(&new_cap->next, &pci_dev->saved_cap_space);
2061 }
2062 
2063 /**
2064  * _pci_add_cap_save_buffer - allocate buffer for saving given
2065  *                            capability registers
2066  * @dev: the PCI device
2067  * @cap: the capability to allocate the buffer for
2068  * @extended: Standard or Extended capability ID
2069  * @size: requested size of the buffer
2070  */
2071 static int _pci_add_cap_save_buffer(struct pci_dev *dev, u16 cap,
2072 				    bool extended, unsigned int size)
2073 {
2074 	int pos;
2075 	struct pci_cap_saved_state *save_state;
2076 
2077 	if (extended)
2078 		pos = pci_find_ext_capability(dev, cap);
2079 	else
2080 		pos = pci_find_capability(dev, cap);
2081 
2082 	if (pos <= 0)
2083 		return 0;
2084 
2085 	save_state = kzalloc(sizeof(*save_state) + size, GFP_KERNEL);
2086 	if (!save_state)
2087 		return -ENOMEM;
2088 
2089 	save_state->cap.cap_nr = cap;
2090 	save_state->cap.cap_extended = extended;
2091 	save_state->cap.size = size;
2092 	pci_add_saved_cap(dev, save_state);
2093 
2094 	return 0;
2095 }
2096 
2097 int pci_add_cap_save_buffer(struct pci_dev *dev, char cap, unsigned int size)
2098 {
2099 	return _pci_add_cap_save_buffer(dev, cap, false, size);
2100 }
2101 
2102 int pci_add_ext_cap_save_buffer(struct pci_dev *dev, u16 cap, unsigned int size)
2103 {
2104 	return _pci_add_cap_save_buffer(dev, cap, true, size);
2105 }
2106 
2107 /**
2108  * pci_allocate_cap_save_buffers - allocate buffers for saving capabilities
2109  * @dev: the PCI device
2110  */
2111 void pci_allocate_cap_save_buffers(struct pci_dev *dev)
2112 {
2113 	int error;
2114 
2115 	error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_EXP,
2116 					PCI_EXP_SAVE_REGS * sizeof(u16));
2117 	if (error)
2118 		dev_err(&dev->dev,
2119 			"unable to preallocate PCI Express save buffer\n");
2120 
2121 	error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_PCIX, sizeof(u16));
2122 	if (error)
2123 		dev_err(&dev->dev,
2124 			"unable to preallocate PCI-X save buffer\n");
2125 
2126 	pci_allocate_vc_save_buffers(dev);
2127 }
2128 
2129 void pci_free_cap_save_buffers(struct pci_dev *dev)
2130 {
2131 	struct pci_cap_saved_state *tmp;
2132 	struct hlist_node *n;
2133 
2134 	hlist_for_each_entry_safe(tmp, n, &dev->saved_cap_space, next)
2135 		kfree(tmp);
2136 }
2137 
2138 /**
2139  * pci_configure_ari - enable or disable ARI forwarding
2140  * @dev: the PCI device
2141  *
2142  * If @dev and its upstream bridge both support ARI, enable ARI in the
2143  * bridge.  Otherwise, disable ARI in the bridge.
2144  */
2145 void pci_configure_ari(struct pci_dev *dev)
2146 {
2147 	u32 cap;
2148 	struct pci_dev *bridge;
2149 
2150 	if (pcie_ari_disabled || !pci_is_pcie(dev) || dev->devfn)
2151 		return;
2152 
2153 	bridge = dev->bus->self;
2154 	if (!bridge)
2155 		return;
2156 
2157 	pcie_capability_read_dword(bridge, PCI_EXP_DEVCAP2, &cap);
2158 	if (!(cap & PCI_EXP_DEVCAP2_ARI))
2159 		return;
2160 
2161 	if (pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI)) {
2162 		pcie_capability_set_word(bridge, PCI_EXP_DEVCTL2,
2163 					 PCI_EXP_DEVCTL2_ARI);
2164 		bridge->ari_enabled = 1;
2165 	} else {
2166 		pcie_capability_clear_word(bridge, PCI_EXP_DEVCTL2,
2167 					   PCI_EXP_DEVCTL2_ARI);
2168 		bridge->ari_enabled = 0;
2169 	}
2170 }
2171 
2172 static int pci_acs_enable;
2173 
2174 /**
2175  * pci_request_acs - ask for ACS to be enabled if supported
2176  */
2177 void pci_request_acs(void)
2178 {
2179 	pci_acs_enable = 1;
2180 }
2181 
2182 /**
2183  * pci_enable_acs - enable ACS if hardware support it
2184  * @dev: the PCI device
2185  */
2186 void pci_enable_acs(struct pci_dev *dev)
2187 {
2188 	int pos;
2189 	u16 cap;
2190 	u16 ctrl;
2191 
2192 	if (!pci_acs_enable)
2193 		return;
2194 
2195 	pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS);
2196 	if (!pos)
2197 		return;
2198 
2199 	pci_read_config_word(dev, pos + PCI_ACS_CAP, &cap);
2200 	pci_read_config_word(dev, pos + PCI_ACS_CTRL, &ctrl);
2201 
2202 	/* Source Validation */
2203 	ctrl |= (cap & PCI_ACS_SV);
2204 
2205 	/* P2P Request Redirect */
2206 	ctrl |= (cap & PCI_ACS_RR);
2207 
2208 	/* P2P Completion Redirect */
2209 	ctrl |= (cap & PCI_ACS_CR);
2210 
2211 	/* Upstream Forwarding */
2212 	ctrl |= (cap & PCI_ACS_UF);
2213 
2214 	pci_write_config_word(dev, pos + PCI_ACS_CTRL, ctrl);
2215 }
2216 
2217 static bool pci_acs_flags_enabled(struct pci_dev *pdev, u16 acs_flags)
2218 {
2219 	int pos;
2220 	u16 cap, ctrl;
2221 
2222 	pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ACS);
2223 	if (!pos)
2224 		return false;
2225 
2226 	/*
2227 	 * Except for egress control, capabilities are either required
2228 	 * or only required if controllable.  Features missing from the
2229 	 * capability field can therefore be assumed as hard-wired enabled.
2230 	 */
2231 	pci_read_config_word(pdev, pos + PCI_ACS_CAP, &cap);
2232 	acs_flags &= (cap | PCI_ACS_EC);
2233 
2234 	pci_read_config_word(pdev, pos + PCI_ACS_CTRL, &ctrl);
2235 	return (ctrl & acs_flags) == acs_flags;
2236 }
2237 
2238 /**
2239  * pci_acs_enabled - test ACS against required flags for a given device
2240  * @pdev: device to test
2241  * @acs_flags: required PCI ACS flags
2242  *
2243  * Return true if the device supports the provided flags.  Automatically
2244  * filters out flags that are not implemented on multifunction devices.
2245  *
2246  * Note that this interface checks the effective ACS capabilities of the
2247  * device rather than the actual capabilities.  For instance, most single
2248  * function endpoints are not required to support ACS because they have no
2249  * opportunity for peer-to-peer access.  We therefore return 'true'
2250  * regardless of whether the device exposes an ACS capability.  This makes
2251  * it much easier for callers of this function to ignore the actual type
2252  * or topology of the device when testing ACS support.
2253  */
2254 bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags)
2255 {
2256 	int ret;
2257 
2258 	ret = pci_dev_specific_acs_enabled(pdev, acs_flags);
2259 	if (ret >= 0)
2260 		return ret > 0;
2261 
2262 	/*
2263 	 * Conventional PCI and PCI-X devices never support ACS, either
2264 	 * effectively or actually.  The shared bus topology implies that
2265 	 * any device on the bus can receive or snoop DMA.
2266 	 */
2267 	if (!pci_is_pcie(pdev))
2268 		return false;
2269 
2270 	switch (pci_pcie_type(pdev)) {
2271 	/*
2272 	 * PCI/X-to-PCIe bridges are not specifically mentioned by the spec,
2273 	 * but since their primary interface is PCI/X, we conservatively
2274 	 * handle them as we would a non-PCIe device.
2275 	 */
2276 	case PCI_EXP_TYPE_PCIE_BRIDGE:
2277 	/*
2278 	 * PCIe 3.0, 6.12.1 excludes ACS on these devices.  "ACS is never
2279 	 * applicable... must never implement an ACS Extended Capability...".
2280 	 * This seems arbitrary, but we take a conservative interpretation
2281 	 * of this statement.
2282 	 */
2283 	case PCI_EXP_TYPE_PCI_BRIDGE:
2284 	case PCI_EXP_TYPE_RC_EC:
2285 		return false;
2286 	/*
2287 	 * PCIe 3.0, 6.12.1.1 specifies that downstream and root ports should
2288 	 * implement ACS in order to indicate their peer-to-peer capabilities,
2289 	 * regardless of whether they are single- or multi-function devices.
2290 	 */
2291 	case PCI_EXP_TYPE_DOWNSTREAM:
2292 	case PCI_EXP_TYPE_ROOT_PORT:
2293 		return pci_acs_flags_enabled(pdev, acs_flags);
2294 	/*
2295 	 * PCIe 3.0, 6.12.1.2 specifies ACS capabilities that should be
2296 	 * implemented by the remaining PCIe types to indicate peer-to-peer
2297 	 * capabilities, but only when they are part of a multifunction
2298 	 * device.  The footnote for section 6.12 indicates the specific
2299 	 * PCIe types included here.
2300 	 */
2301 	case PCI_EXP_TYPE_ENDPOINT:
2302 	case PCI_EXP_TYPE_UPSTREAM:
2303 	case PCI_EXP_TYPE_LEG_END:
2304 	case PCI_EXP_TYPE_RC_END:
2305 		if (!pdev->multifunction)
2306 			break;
2307 
2308 		return pci_acs_flags_enabled(pdev, acs_flags);
2309 	}
2310 
2311 	/*
2312 	 * PCIe 3.0, 6.12.1.3 specifies no ACS capabilities are applicable
2313 	 * to single function devices with the exception of downstream ports.
2314 	 */
2315 	return true;
2316 }
2317 
2318 /**
2319  * pci_acs_path_enable - test ACS flags from start to end in a hierarchy
2320  * @start: starting downstream device
2321  * @end: ending upstream device or NULL to search to the root bus
2322  * @acs_flags: required flags
2323  *
2324  * Walk up a device tree from start to end testing PCI ACS support.  If
2325  * any step along the way does not support the required flags, return false.
2326  */
2327 bool pci_acs_path_enabled(struct pci_dev *start,
2328 			  struct pci_dev *end, u16 acs_flags)
2329 {
2330 	struct pci_dev *pdev, *parent = start;
2331 
2332 	do {
2333 		pdev = parent;
2334 
2335 		if (!pci_acs_enabled(pdev, acs_flags))
2336 			return false;
2337 
2338 		if (pci_is_root_bus(pdev->bus))
2339 			return (end == NULL);
2340 
2341 		parent = pdev->bus->self;
2342 	} while (pdev != end);
2343 
2344 	return true;
2345 }
2346 
2347 /**
2348  * pci_swizzle_interrupt_pin - swizzle INTx for device behind bridge
2349  * @dev: the PCI device
2350  * @pin: the INTx pin (1=INTA, 2=INTB, 3=INTC, 4=INTD)
2351  *
2352  * Perform INTx swizzling for a device behind one level of bridge.  This is
2353  * required by section 9.1 of the PCI-to-PCI bridge specification for devices
2354  * behind bridges on add-in cards.  For devices with ARI enabled, the slot
2355  * number is always 0 (see the Implementation Note in section 2.2.8.1 of
2356  * the PCI Express Base Specification, Revision 2.1)
2357  */
2358 u8 pci_swizzle_interrupt_pin(const struct pci_dev *dev, u8 pin)
2359 {
2360 	int slot;
2361 
2362 	if (pci_ari_enabled(dev->bus))
2363 		slot = 0;
2364 	else
2365 		slot = PCI_SLOT(dev->devfn);
2366 
2367 	return (((pin - 1) + slot) % 4) + 1;
2368 }
2369 
2370 int
2371 pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge)
2372 {
2373 	u8 pin;
2374 
2375 	pin = dev->pin;
2376 	if (!pin)
2377 		return -1;
2378 
2379 	while (!pci_is_root_bus(dev->bus)) {
2380 		pin = pci_swizzle_interrupt_pin(dev, pin);
2381 		dev = dev->bus->self;
2382 	}
2383 	*bridge = dev;
2384 	return pin;
2385 }
2386 
2387 /**
2388  * pci_common_swizzle - swizzle INTx all the way to root bridge
2389  * @dev: the PCI device
2390  * @pinp: pointer to the INTx pin value (1=INTA, 2=INTB, 3=INTD, 4=INTD)
2391  *
2392  * Perform INTx swizzling for a device.  This traverses through all PCI-to-PCI
2393  * bridges all the way up to a PCI root bus.
2394  */
2395 u8 pci_common_swizzle(struct pci_dev *dev, u8 *pinp)
2396 {
2397 	u8 pin = *pinp;
2398 
2399 	while (!pci_is_root_bus(dev->bus)) {
2400 		pin = pci_swizzle_interrupt_pin(dev, pin);
2401 		dev = dev->bus->self;
2402 	}
2403 	*pinp = pin;
2404 	return PCI_SLOT(dev->devfn);
2405 }
2406 
2407 /**
2408  *	pci_release_region - Release a PCI bar
2409  *	@pdev: PCI device whose resources were previously reserved by pci_request_region
2410  *	@bar: BAR to release
2411  *
2412  *	Releases the PCI I/O and memory resources previously reserved by a
2413  *	successful call to pci_request_region.  Call this function only
2414  *	after all use of the PCI regions has ceased.
2415  */
2416 void pci_release_region(struct pci_dev *pdev, int bar)
2417 {
2418 	struct pci_devres *dr;
2419 
2420 	if (pci_resource_len(pdev, bar) == 0)
2421 		return;
2422 	if (pci_resource_flags(pdev, bar) & IORESOURCE_IO)
2423 		release_region(pci_resource_start(pdev, bar),
2424 				pci_resource_len(pdev, bar));
2425 	else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM)
2426 		release_mem_region(pci_resource_start(pdev, bar),
2427 				pci_resource_len(pdev, bar));
2428 
2429 	dr = find_pci_dr(pdev);
2430 	if (dr)
2431 		dr->region_mask &= ~(1 << bar);
2432 }
2433 
2434 /**
2435  *	__pci_request_region - Reserved PCI I/O and memory resource
2436  *	@pdev: PCI device whose resources are to be reserved
2437  *	@bar: BAR to be reserved
2438  *	@res_name: Name to be associated with resource.
2439  *	@exclusive: whether the region access is exclusive or not
2440  *
2441  *	Mark the PCI region associated with PCI device @pdev BR @bar as
2442  *	being reserved by owner @res_name.  Do not access any
2443  *	address inside the PCI regions unless this call returns
2444  *	successfully.
2445  *
2446  *	If @exclusive is set, then the region is marked so that userspace
2447  *	is explicitly not allowed to map the resource via /dev/mem or
2448  *	sysfs MMIO access.
2449  *
2450  *	Returns 0 on success, or %EBUSY on error.  A warning
2451  *	message is also printed on failure.
2452  */
2453 static int __pci_request_region(struct pci_dev *pdev, int bar, const char *res_name,
2454 									int exclusive)
2455 {
2456 	struct pci_devres *dr;
2457 
2458 	if (pci_resource_len(pdev, bar) == 0)
2459 		return 0;
2460 
2461 	if (pci_resource_flags(pdev, bar) & IORESOURCE_IO) {
2462 		if (!request_region(pci_resource_start(pdev, bar),
2463 			    pci_resource_len(pdev, bar), res_name))
2464 			goto err_out;
2465 	}
2466 	else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) {
2467 		if (!__request_mem_region(pci_resource_start(pdev, bar),
2468 					pci_resource_len(pdev, bar), res_name,
2469 					exclusive))
2470 			goto err_out;
2471 	}
2472 
2473 	dr = find_pci_dr(pdev);
2474 	if (dr)
2475 		dr->region_mask |= 1 << bar;
2476 
2477 	return 0;
2478 
2479 err_out:
2480 	dev_warn(&pdev->dev, "BAR %d: can't reserve %pR\n", bar,
2481 		 &pdev->resource[bar]);
2482 	return -EBUSY;
2483 }
2484 
2485 /**
2486  *	pci_request_region - Reserve PCI I/O and memory resource
2487  *	@pdev: PCI device whose resources are to be reserved
2488  *	@bar: BAR to be reserved
2489  *	@res_name: Name to be associated with resource
2490  *
2491  *	Mark the PCI region associated with PCI device @pdev BAR @bar as
2492  *	being reserved by owner @res_name.  Do not access any
2493  *	address inside the PCI regions unless this call returns
2494  *	successfully.
2495  *
2496  *	Returns 0 on success, or %EBUSY on error.  A warning
2497  *	message is also printed on failure.
2498  */
2499 int pci_request_region(struct pci_dev *pdev, int bar, const char *res_name)
2500 {
2501 	return __pci_request_region(pdev, bar, res_name, 0);
2502 }
2503 
2504 /**
2505  *	pci_request_region_exclusive - Reserved PCI I/O and memory resource
2506  *	@pdev: PCI device whose resources are to be reserved
2507  *	@bar: BAR to be reserved
2508  *	@res_name: Name to be associated with resource.
2509  *
2510  *	Mark the PCI region associated with PCI device @pdev BR @bar as
2511  *	being reserved by owner @res_name.  Do not access any
2512  *	address inside the PCI regions unless this call returns
2513  *	successfully.
2514  *
2515  *	Returns 0 on success, or %EBUSY on error.  A warning
2516  *	message is also printed on failure.
2517  *
2518  *	The key difference that _exclusive makes it that userspace is
2519  *	explicitly not allowed to map the resource via /dev/mem or
2520  *	sysfs.
2521  */
2522 int pci_request_region_exclusive(struct pci_dev *pdev, int bar, const char *res_name)
2523 {
2524 	return __pci_request_region(pdev, bar, res_name, IORESOURCE_EXCLUSIVE);
2525 }
2526 /**
2527  * pci_release_selected_regions - Release selected PCI I/O and memory resources
2528  * @pdev: PCI device whose resources were previously reserved
2529  * @bars: Bitmask of BARs to be released
2530  *
2531  * Release selected PCI I/O and memory resources previously reserved.
2532  * Call this function only after all use of the PCI regions has ceased.
2533  */
2534 void pci_release_selected_regions(struct pci_dev *pdev, int bars)
2535 {
2536 	int i;
2537 
2538 	for (i = 0; i < 6; i++)
2539 		if (bars & (1 << i))
2540 			pci_release_region(pdev, i);
2541 }
2542 
2543 static int __pci_request_selected_regions(struct pci_dev *pdev, int bars,
2544 				 const char *res_name, int excl)
2545 {
2546 	int i;
2547 
2548 	for (i = 0; i < 6; i++)
2549 		if (bars & (1 << i))
2550 			if (__pci_request_region(pdev, i, res_name, excl))
2551 				goto err_out;
2552 	return 0;
2553 
2554 err_out:
2555 	while(--i >= 0)
2556 		if (bars & (1 << i))
2557 			pci_release_region(pdev, i);
2558 
2559 	return -EBUSY;
2560 }
2561 
2562 
2563 /**
2564  * pci_request_selected_regions - Reserve selected PCI I/O and memory resources
2565  * @pdev: PCI device whose resources are to be reserved
2566  * @bars: Bitmask of BARs to be requested
2567  * @res_name: Name to be associated with resource
2568  */
2569 int pci_request_selected_regions(struct pci_dev *pdev, int bars,
2570 				 const char *res_name)
2571 {
2572 	return __pci_request_selected_regions(pdev, bars, res_name, 0);
2573 }
2574 
2575 int pci_request_selected_regions_exclusive(struct pci_dev *pdev,
2576 				 int bars, const char *res_name)
2577 {
2578 	return __pci_request_selected_regions(pdev, bars, res_name,
2579 			IORESOURCE_EXCLUSIVE);
2580 }
2581 
2582 /**
2583  *	pci_release_regions - Release reserved PCI I/O and memory resources
2584  *	@pdev: PCI device whose resources were previously reserved by pci_request_regions
2585  *
2586  *	Releases all PCI I/O and memory resources previously reserved by a
2587  *	successful call to pci_request_regions.  Call this function only
2588  *	after all use of the PCI regions has ceased.
2589  */
2590 
2591 void pci_release_regions(struct pci_dev *pdev)
2592 {
2593 	pci_release_selected_regions(pdev, (1 << 6) - 1);
2594 }
2595 
2596 /**
2597  *	pci_request_regions - Reserved PCI I/O and memory resources
2598  *	@pdev: PCI device whose resources are to be reserved
2599  *	@res_name: Name to be associated with resource.
2600  *
2601  *	Mark all PCI regions associated with PCI device @pdev as
2602  *	being reserved by owner @res_name.  Do not access any
2603  *	address inside the PCI regions unless this call returns
2604  *	successfully.
2605  *
2606  *	Returns 0 on success, or %EBUSY on error.  A warning
2607  *	message is also printed on failure.
2608  */
2609 int pci_request_regions(struct pci_dev *pdev, const char *res_name)
2610 {
2611 	return pci_request_selected_regions(pdev, ((1 << 6) - 1), res_name);
2612 }
2613 
2614 /**
2615  *	pci_request_regions_exclusive - Reserved PCI I/O and memory resources
2616  *	@pdev: PCI device whose resources are to be reserved
2617  *	@res_name: Name to be associated with resource.
2618  *
2619  *	Mark all PCI regions associated with PCI device @pdev as
2620  *	being reserved by owner @res_name.  Do not access any
2621  *	address inside the PCI regions unless this call returns
2622  *	successfully.
2623  *
2624  *	pci_request_regions_exclusive() will mark the region so that
2625  *	/dev/mem and the sysfs MMIO access will not be allowed.
2626  *
2627  *	Returns 0 on success, or %EBUSY on error.  A warning
2628  *	message is also printed on failure.
2629  */
2630 int pci_request_regions_exclusive(struct pci_dev *pdev, const char *res_name)
2631 {
2632 	return pci_request_selected_regions_exclusive(pdev,
2633 					((1 << 6) - 1), res_name);
2634 }
2635 
2636 static void __pci_set_master(struct pci_dev *dev, bool enable)
2637 {
2638 	u16 old_cmd, cmd;
2639 
2640 	pci_read_config_word(dev, PCI_COMMAND, &old_cmd);
2641 	if (enable)
2642 		cmd = old_cmd | PCI_COMMAND_MASTER;
2643 	else
2644 		cmd = old_cmd & ~PCI_COMMAND_MASTER;
2645 	if (cmd != old_cmd) {
2646 		dev_dbg(&dev->dev, "%s bus mastering\n",
2647 			enable ? "enabling" : "disabling");
2648 		pci_write_config_word(dev, PCI_COMMAND, cmd);
2649 	}
2650 	dev->is_busmaster = enable;
2651 }
2652 
2653 /**
2654  * pcibios_setup - process "pci=" kernel boot arguments
2655  * @str: string used to pass in "pci=" kernel boot arguments
2656  *
2657  * Process kernel boot arguments.  This is the default implementation.
2658  * Architecture specific implementations can override this as necessary.
2659  */
2660 char * __weak __init pcibios_setup(char *str)
2661 {
2662 	return str;
2663 }
2664 
2665 /**
2666  * pcibios_set_master - enable PCI bus-mastering for device dev
2667  * @dev: the PCI device to enable
2668  *
2669  * Enables PCI bus-mastering for the device.  This is the default
2670  * implementation.  Architecture specific implementations can override
2671  * this if necessary.
2672  */
2673 void __weak pcibios_set_master(struct pci_dev *dev)
2674 {
2675 	u8 lat;
2676 
2677 	/* The latency timer doesn't apply to PCIe (either Type 0 or Type 1) */
2678 	if (pci_is_pcie(dev))
2679 		return;
2680 
2681 	pci_read_config_byte(dev, PCI_LATENCY_TIMER, &lat);
2682 	if (lat < 16)
2683 		lat = (64 <= pcibios_max_latency) ? 64 : pcibios_max_latency;
2684 	else if (lat > pcibios_max_latency)
2685 		lat = pcibios_max_latency;
2686 	else
2687 		return;
2688 
2689 	pci_write_config_byte(dev, PCI_LATENCY_TIMER, lat);
2690 }
2691 
2692 /**
2693  * pci_set_master - enables bus-mastering for device dev
2694  * @dev: the PCI device to enable
2695  *
2696  * Enables bus-mastering on the device and calls pcibios_set_master()
2697  * to do the needed arch specific settings.
2698  */
2699 void pci_set_master(struct pci_dev *dev)
2700 {
2701 	__pci_set_master(dev, true);
2702 	pcibios_set_master(dev);
2703 }
2704 
2705 /**
2706  * pci_clear_master - disables bus-mastering for device dev
2707  * @dev: the PCI device to disable
2708  */
2709 void pci_clear_master(struct pci_dev *dev)
2710 {
2711 	__pci_set_master(dev, false);
2712 }
2713 
2714 /**
2715  * pci_set_cacheline_size - ensure the CACHE_LINE_SIZE register is programmed
2716  * @dev: the PCI device for which MWI is to be enabled
2717  *
2718  * Helper function for pci_set_mwi.
2719  * Originally copied from drivers/net/acenic.c.
2720  * Copyright 1998-2001 by Jes Sorensen, <jes@trained-monkey.org>.
2721  *
2722  * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
2723  */
2724 int pci_set_cacheline_size(struct pci_dev *dev)
2725 {
2726 	u8 cacheline_size;
2727 
2728 	if (!pci_cache_line_size)
2729 		return -EINVAL;
2730 
2731 	/* Validate current setting: the PCI_CACHE_LINE_SIZE must be
2732 	   equal to or multiple of the right value. */
2733 	pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
2734 	if (cacheline_size >= pci_cache_line_size &&
2735 	    (cacheline_size % pci_cache_line_size) == 0)
2736 		return 0;
2737 
2738 	/* Write the correct value. */
2739 	pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, pci_cache_line_size);
2740 	/* Read it back. */
2741 	pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
2742 	if (cacheline_size == pci_cache_line_size)
2743 		return 0;
2744 
2745 	dev_printk(KERN_DEBUG, &dev->dev, "cache line size of %d is not "
2746 		   "supported\n", pci_cache_line_size << 2);
2747 
2748 	return -EINVAL;
2749 }
2750 EXPORT_SYMBOL_GPL(pci_set_cacheline_size);
2751 
2752 #ifdef PCI_DISABLE_MWI
2753 int pci_set_mwi(struct pci_dev *dev)
2754 {
2755 	return 0;
2756 }
2757 
2758 int pci_try_set_mwi(struct pci_dev *dev)
2759 {
2760 	return 0;
2761 }
2762 
2763 void pci_clear_mwi(struct pci_dev *dev)
2764 {
2765 }
2766 
2767 #else
2768 
2769 /**
2770  * pci_set_mwi - enables memory-write-invalidate PCI transaction
2771  * @dev: the PCI device for which MWI is enabled
2772  *
2773  * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND.
2774  *
2775  * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
2776  */
2777 int
2778 pci_set_mwi(struct pci_dev *dev)
2779 {
2780 	int rc;
2781 	u16 cmd;
2782 
2783 	rc = pci_set_cacheline_size(dev);
2784 	if (rc)
2785 		return rc;
2786 
2787 	pci_read_config_word(dev, PCI_COMMAND, &cmd);
2788 	if (! (cmd & PCI_COMMAND_INVALIDATE)) {
2789 		dev_dbg(&dev->dev, "enabling Mem-Wr-Inval\n");
2790 		cmd |= PCI_COMMAND_INVALIDATE;
2791 		pci_write_config_word(dev, PCI_COMMAND, cmd);
2792 	}
2793 
2794 	return 0;
2795 }
2796 
2797 /**
2798  * pci_try_set_mwi - enables memory-write-invalidate PCI transaction
2799  * @dev: the PCI device for which MWI is enabled
2800  *
2801  * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND.
2802  * Callers are not required to check the return value.
2803  *
2804  * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
2805  */
2806 int pci_try_set_mwi(struct pci_dev *dev)
2807 {
2808 	int rc = pci_set_mwi(dev);
2809 	return rc;
2810 }
2811 
2812 /**
2813  * pci_clear_mwi - disables Memory-Write-Invalidate for device dev
2814  * @dev: the PCI device to disable
2815  *
2816  * Disables PCI Memory-Write-Invalidate transaction on the device
2817  */
2818 void
2819 pci_clear_mwi(struct pci_dev *dev)
2820 {
2821 	u16 cmd;
2822 
2823 	pci_read_config_word(dev, PCI_COMMAND, &cmd);
2824 	if (cmd & PCI_COMMAND_INVALIDATE) {
2825 		cmd &= ~PCI_COMMAND_INVALIDATE;
2826 		pci_write_config_word(dev, PCI_COMMAND, cmd);
2827 	}
2828 }
2829 #endif /* ! PCI_DISABLE_MWI */
2830 
2831 /**
2832  * pci_intx - enables/disables PCI INTx for device dev
2833  * @pdev: the PCI device to operate on
2834  * @enable: boolean: whether to enable or disable PCI INTx
2835  *
2836  * Enables/disables PCI INTx for device dev
2837  */
2838 void
2839 pci_intx(struct pci_dev *pdev, int enable)
2840 {
2841 	u16 pci_command, new;
2842 
2843 	pci_read_config_word(pdev, PCI_COMMAND, &pci_command);
2844 
2845 	if (enable) {
2846 		new = pci_command & ~PCI_COMMAND_INTX_DISABLE;
2847 	} else {
2848 		new = pci_command | PCI_COMMAND_INTX_DISABLE;
2849 	}
2850 
2851 	if (new != pci_command) {
2852 		struct pci_devres *dr;
2853 
2854 		pci_write_config_word(pdev, PCI_COMMAND, new);
2855 
2856 		dr = find_pci_dr(pdev);
2857 		if (dr && !dr->restore_intx) {
2858 			dr->restore_intx = 1;
2859 			dr->orig_intx = !enable;
2860 		}
2861 	}
2862 }
2863 
2864 /**
2865  * pci_intx_mask_supported - probe for INTx masking support
2866  * @dev: the PCI device to operate on
2867  *
2868  * Check if the device dev support INTx masking via the config space
2869  * command word.
2870  */
2871 bool pci_intx_mask_supported(struct pci_dev *dev)
2872 {
2873 	bool mask_supported = false;
2874 	u16 orig, new;
2875 
2876 	if (dev->broken_intx_masking)
2877 		return false;
2878 
2879 	pci_cfg_access_lock(dev);
2880 
2881 	pci_read_config_word(dev, PCI_COMMAND, &orig);
2882 	pci_write_config_word(dev, PCI_COMMAND,
2883 			      orig ^ PCI_COMMAND_INTX_DISABLE);
2884 	pci_read_config_word(dev, PCI_COMMAND, &new);
2885 
2886 	/*
2887 	 * There's no way to protect against hardware bugs or detect them
2888 	 * reliably, but as long as we know what the value should be, let's
2889 	 * go ahead and check it.
2890 	 */
2891 	if ((new ^ orig) & ~PCI_COMMAND_INTX_DISABLE) {
2892 		dev_err(&dev->dev, "Command register changed from "
2893 			"0x%x to 0x%x: driver or hardware bug?\n", orig, new);
2894 	} else if ((new ^ orig) & PCI_COMMAND_INTX_DISABLE) {
2895 		mask_supported = true;
2896 		pci_write_config_word(dev, PCI_COMMAND, orig);
2897 	}
2898 
2899 	pci_cfg_access_unlock(dev);
2900 	return mask_supported;
2901 }
2902 EXPORT_SYMBOL_GPL(pci_intx_mask_supported);
2903 
2904 static bool pci_check_and_set_intx_mask(struct pci_dev *dev, bool mask)
2905 {
2906 	struct pci_bus *bus = dev->bus;
2907 	bool mask_updated = true;
2908 	u32 cmd_status_dword;
2909 	u16 origcmd, newcmd;
2910 	unsigned long flags;
2911 	bool irq_pending;
2912 
2913 	/*
2914 	 * We do a single dword read to retrieve both command and status.
2915 	 * Document assumptions that make this possible.
2916 	 */
2917 	BUILD_BUG_ON(PCI_COMMAND % 4);
2918 	BUILD_BUG_ON(PCI_COMMAND + 2 != PCI_STATUS);
2919 
2920 	raw_spin_lock_irqsave(&pci_lock, flags);
2921 
2922 	bus->ops->read(bus, dev->devfn, PCI_COMMAND, 4, &cmd_status_dword);
2923 
2924 	irq_pending = (cmd_status_dword >> 16) & PCI_STATUS_INTERRUPT;
2925 
2926 	/*
2927 	 * Check interrupt status register to see whether our device
2928 	 * triggered the interrupt (when masking) or the next IRQ is
2929 	 * already pending (when unmasking).
2930 	 */
2931 	if (mask != irq_pending) {
2932 		mask_updated = false;
2933 		goto done;
2934 	}
2935 
2936 	origcmd = cmd_status_dword;
2937 	newcmd = origcmd & ~PCI_COMMAND_INTX_DISABLE;
2938 	if (mask)
2939 		newcmd |= PCI_COMMAND_INTX_DISABLE;
2940 	if (newcmd != origcmd)
2941 		bus->ops->write(bus, dev->devfn, PCI_COMMAND, 2, newcmd);
2942 
2943 done:
2944 	raw_spin_unlock_irqrestore(&pci_lock, flags);
2945 
2946 	return mask_updated;
2947 }
2948 
2949 /**
2950  * pci_check_and_mask_intx - mask INTx on pending interrupt
2951  * @dev: the PCI device to operate on
2952  *
2953  * Check if the device dev has its INTx line asserted, mask it and
2954  * return true in that case. False is returned if not interrupt was
2955  * pending.
2956  */
2957 bool pci_check_and_mask_intx(struct pci_dev *dev)
2958 {
2959 	return pci_check_and_set_intx_mask(dev, true);
2960 }
2961 EXPORT_SYMBOL_GPL(pci_check_and_mask_intx);
2962 
2963 /**
2964  * pci_check_and_unmask_intx - unmask INTx if no interrupt is pending
2965  * @dev: the PCI device to operate on
2966  *
2967  * Check if the device dev has its INTx line asserted, unmask it if not
2968  * and return true. False is returned and the mask remains active if
2969  * there was still an interrupt pending.
2970  */
2971 bool pci_check_and_unmask_intx(struct pci_dev *dev)
2972 {
2973 	return pci_check_and_set_intx_mask(dev, false);
2974 }
2975 EXPORT_SYMBOL_GPL(pci_check_and_unmask_intx);
2976 
2977 /**
2978  * pci_msi_off - disables any MSI or MSI-X capabilities
2979  * @dev: the PCI device to operate on
2980  *
2981  * If you want to use MSI, see pci_enable_msi() and friends.
2982  * This is a lower-level primitive that allows us to disable
2983  * MSI operation at the device level.
2984  */
2985 void pci_msi_off(struct pci_dev *dev)
2986 {
2987 	int pos;
2988 	u16 control;
2989 
2990 	/*
2991 	 * This looks like it could go in msi.c, but we need it even when
2992 	 * CONFIG_PCI_MSI=n.  For the same reason, we can't use
2993 	 * dev->msi_cap or dev->msix_cap here.
2994 	 */
2995 	pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
2996 	if (pos) {
2997 		pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control);
2998 		control &= ~PCI_MSI_FLAGS_ENABLE;
2999 		pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control);
3000 	}
3001 	pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
3002 	if (pos) {
3003 		pci_read_config_word(dev, pos + PCI_MSIX_FLAGS, &control);
3004 		control &= ~PCI_MSIX_FLAGS_ENABLE;
3005 		pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control);
3006 	}
3007 }
3008 EXPORT_SYMBOL_GPL(pci_msi_off);
3009 
3010 int pci_set_dma_max_seg_size(struct pci_dev *dev, unsigned int size)
3011 {
3012 	return dma_set_max_seg_size(&dev->dev, size);
3013 }
3014 EXPORT_SYMBOL(pci_set_dma_max_seg_size);
3015 
3016 int pci_set_dma_seg_boundary(struct pci_dev *dev, unsigned long mask)
3017 {
3018 	return dma_set_seg_boundary(&dev->dev, mask);
3019 }
3020 EXPORT_SYMBOL(pci_set_dma_seg_boundary);
3021 
3022 /**
3023  * pci_wait_for_pending_transaction - waits for pending transaction
3024  * @dev: the PCI device to operate on
3025  *
3026  * Return 0 if transaction is pending 1 otherwise.
3027  */
3028 int pci_wait_for_pending_transaction(struct pci_dev *dev)
3029 {
3030 	if (!pci_is_pcie(dev))
3031 		return 1;
3032 
3033 	return pci_wait_for_pending(dev, PCI_EXP_DEVSTA, PCI_EXP_DEVSTA_TRPND);
3034 }
3035 EXPORT_SYMBOL(pci_wait_for_pending_transaction);
3036 
3037 static int pcie_flr(struct pci_dev *dev, int probe)
3038 {
3039 	u32 cap;
3040 
3041 	pcie_capability_read_dword(dev, PCI_EXP_DEVCAP, &cap);
3042 	if (!(cap & PCI_EXP_DEVCAP_FLR))
3043 		return -ENOTTY;
3044 
3045 	if (probe)
3046 		return 0;
3047 
3048 	if (!pci_wait_for_pending_transaction(dev))
3049 		dev_err(&dev->dev, "transaction is not cleared; proceeding with reset anyway\n");
3050 
3051 	pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_BCR_FLR);
3052 
3053 	msleep(100);
3054 
3055 	return 0;
3056 }
3057 
3058 static int pci_af_flr(struct pci_dev *dev, int probe)
3059 {
3060 	int pos;
3061 	u8 cap;
3062 
3063 	pos = pci_find_capability(dev, PCI_CAP_ID_AF);
3064 	if (!pos)
3065 		return -ENOTTY;
3066 
3067 	pci_read_config_byte(dev, pos + PCI_AF_CAP, &cap);
3068 	if (!(cap & PCI_AF_CAP_TP) || !(cap & PCI_AF_CAP_FLR))
3069 		return -ENOTTY;
3070 
3071 	if (probe)
3072 		return 0;
3073 
3074 	/* Wait for Transaction Pending bit clean */
3075 	if (pci_wait_for_pending(dev, PCI_AF_STATUS, PCI_AF_STATUS_TP))
3076 		goto clear;
3077 
3078 	dev_err(&dev->dev, "transaction is not cleared; "
3079 			"proceeding with reset anyway\n");
3080 
3081 clear:
3082 	pci_write_config_byte(dev, pos + PCI_AF_CTRL, PCI_AF_CTRL_FLR);
3083 	msleep(100);
3084 
3085 	return 0;
3086 }
3087 
3088 /**
3089  * pci_pm_reset - Put device into PCI_D3 and back into PCI_D0.
3090  * @dev: Device to reset.
3091  * @probe: If set, only check if the device can be reset this way.
3092  *
3093  * If @dev supports native PCI PM and its PCI_PM_CTRL_NO_SOFT_RESET flag is
3094  * unset, it will be reinitialized internally when going from PCI_D3hot to
3095  * PCI_D0.  If that's the case and the device is not in a low-power state
3096  * already, force it into PCI_D3hot and back to PCI_D0, causing it to be reset.
3097  *
3098  * NOTE: This causes the caller to sleep for twice the device power transition
3099  * cooldown period, which for the D0->D3hot and D3hot->D0 transitions is 10 ms
3100  * by default (i.e. unless the @dev's d3_delay field has a different value).
3101  * Moreover, only devices in D0 can be reset by this function.
3102  */
3103 static int pci_pm_reset(struct pci_dev *dev, int probe)
3104 {
3105 	u16 csr;
3106 
3107 	if (!dev->pm_cap)
3108 		return -ENOTTY;
3109 
3110 	pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &csr);
3111 	if (csr & PCI_PM_CTRL_NO_SOFT_RESET)
3112 		return -ENOTTY;
3113 
3114 	if (probe)
3115 		return 0;
3116 
3117 	if (dev->current_state != PCI_D0)
3118 		return -EINVAL;
3119 
3120 	csr &= ~PCI_PM_CTRL_STATE_MASK;
3121 	csr |= PCI_D3hot;
3122 	pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
3123 	pci_dev_d3_sleep(dev);
3124 
3125 	csr &= ~PCI_PM_CTRL_STATE_MASK;
3126 	csr |= PCI_D0;
3127 	pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
3128 	pci_dev_d3_sleep(dev);
3129 
3130 	return 0;
3131 }
3132 
3133 /**
3134  * pci_reset_bridge_secondary_bus - Reset the secondary bus on a PCI bridge.
3135  * @dev: Bridge device
3136  *
3137  * Use the bridge control register to assert reset on the secondary bus.
3138  * Devices on the secondary bus are left in power-on state.
3139  */
3140 void pci_reset_bridge_secondary_bus(struct pci_dev *dev)
3141 {
3142 	u16 ctrl;
3143 
3144 	pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &ctrl);
3145 	ctrl |= PCI_BRIDGE_CTL_BUS_RESET;
3146 	pci_write_config_word(dev, PCI_BRIDGE_CONTROL, ctrl);
3147 	/*
3148 	 * PCI spec v3.0 7.6.4.2 requires minimum Trst of 1ms.  Double
3149 	 * this to 2ms to ensure that we meet the minimum requirement.
3150 	 */
3151 	msleep(2);
3152 
3153 	ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET;
3154 	pci_write_config_word(dev, PCI_BRIDGE_CONTROL, ctrl);
3155 
3156 	/*
3157 	 * Trhfa for conventional PCI is 2^25 clock cycles.
3158 	 * Assuming a minimum 33MHz clock this results in a 1s
3159 	 * delay before we can consider subordinate devices to
3160 	 * be re-initialized.  PCIe has some ways to shorten this,
3161 	 * but we don't make use of them yet.
3162 	 */
3163 	ssleep(1);
3164 }
3165 EXPORT_SYMBOL_GPL(pci_reset_bridge_secondary_bus);
3166 
3167 static int pci_parent_bus_reset(struct pci_dev *dev, int probe)
3168 {
3169 	struct pci_dev *pdev;
3170 
3171 	if (pci_is_root_bus(dev->bus) || dev->subordinate || !dev->bus->self)
3172 		return -ENOTTY;
3173 
3174 	list_for_each_entry(pdev, &dev->bus->devices, bus_list)
3175 		if (pdev != dev)
3176 			return -ENOTTY;
3177 
3178 	if (probe)
3179 		return 0;
3180 
3181 	pci_reset_bridge_secondary_bus(dev->bus->self);
3182 
3183 	return 0;
3184 }
3185 
3186 static int pci_reset_hotplug_slot(struct hotplug_slot *hotplug, int probe)
3187 {
3188 	int rc = -ENOTTY;
3189 
3190 	if (!hotplug || !try_module_get(hotplug->ops->owner))
3191 		return rc;
3192 
3193 	if (hotplug->ops->reset_slot)
3194 		rc = hotplug->ops->reset_slot(hotplug, probe);
3195 
3196 	module_put(hotplug->ops->owner);
3197 
3198 	return rc;
3199 }
3200 
3201 static int pci_dev_reset_slot_function(struct pci_dev *dev, int probe)
3202 {
3203 	struct pci_dev *pdev;
3204 
3205 	if (dev->subordinate || !dev->slot)
3206 		return -ENOTTY;
3207 
3208 	list_for_each_entry(pdev, &dev->bus->devices, bus_list)
3209 		if (pdev != dev && pdev->slot == dev->slot)
3210 			return -ENOTTY;
3211 
3212 	return pci_reset_hotplug_slot(dev->slot->hotplug, probe);
3213 }
3214 
3215 static int __pci_dev_reset(struct pci_dev *dev, int probe)
3216 {
3217 	int rc;
3218 
3219 	might_sleep();
3220 
3221 	rc = pci_dev_specific_reset(dev, probe);
3222 	if (rc != -ENOTTY)
3223 		goto done;
3224 
3225 	rc = pcie_flr(dev, probe);
3226 	if (rc != -ENOTTY)
3227 		goto done;
3228 
3229 	rc = pci_af_flr(dev, probe);
3230 	if (rc != -ENOTTY)
3231 		goto done;
3232 
3233 	rc = pci_pm_reset(dev, probe);
3234 	if (rc != -ENOTTY)
3235 		goto done;
3236 
3237 	rc = pci_dev_reset_slot_function(dev, probe);
3238 	if (rc != -ENOTTY)
3239 		goto done;
3240 
3241 	rc = pci_parent_bus_reset(dev, probe);
3242 done:
3243 	return rc;
3244 }
3245 
3246 static void pci_dev_lock(struct pci_dev *dev)
3247 {
3248 	pci_cfg_access_lock(dev);
3249 	/* block PM suspend, driver probe, etc. */
3250 	device_lock(&dev->dev);
3251 }
3252 
3253 /* Return 1 on successful lock, 0 on contention */
3254 static int pci_dev_trylock(struct pci_dev *dev)
3255 {
3256 	if (pci_cfg_access_trylock(dev)) {
3257 		if (device_trylock(&dev->dev))
3258 			return 1;
3259 		pci_cfg_access_unlock(dev);
3260 	}
3261 
3262 	return 0;
3263 }
3264 
3265 static void pci_dev_unlock(struct pci_dev *dev)
3266 {
3267 	device_unlock(&dev->dev);
3268 	pci_cfg_access_unlock(dev);
3269 }
3270 
3271 static void pci_dev_save_and_disable(struct pci_dev *dev)
3272 {
3273 	/*
3274 	 * Wake-up device prior to save.  PM registers default to D0 after
3275 	 * reset and a simple register restore doesn't reliably return
3276 	 * to a non-D0 state anyway.
3277 	 */
3278 	pci_set_power_state(dev, PCI_D0);
3279 
3280 	pci_save_state(dev);
3281 	/*
3282 	 * Disable the device by clearing the Command register, except for
3283 	 * INTx-disable which is set.  This not only disables MMIO and I/O port
3284 	 * BARs, but also prevents the device from being Bus Master, preventing
3285 	 * DMA from the device including MSI/MSI-X interrupts.  For PCI 2.3
3286 	 * compliant devices, INTx-disable prevents legacy interrupts.
3287 	 */
3288 	pci_write_config_word(dev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE);
3289 }
3290 
3291 static void pci_dev_restore(struct pci_dev *dev)
3292 {
3293 	pci_restore_state(dev);
3294 }
3295 
3296 static int pci_dev_reset(struct pci_dev *dev, int probe)
3297 {
3298 	int rc;
3299 
3300 	if (!probe)
3301 		pci_dev_lock(dev);
3302 
3303 	rc = __pci_dev_reset(dev, probe);
3304 
3305 	if (!probe)
3306 		pci_dev_unlock(dev);
3307 
3308 	return rc;
3309 }
3310 /**
3311  * __pci_reset_function - reset a PCI device function
3312  * @dev: PCI device to reset
3313  *
3314  * Some devices allow an individual function to be reset without affecting
3315  * other functions in the same device.  The PCI device must be responsive
3316  * to PCI config space in order to use this function.
3317  *
3318  * The device function is presumed to be unused when this function is called.
3319  * Resetting the device will make the contents of PCI configuration space
3320  * random, so any caller of this must be prepared to reinitialise the
3321  * device including MSI, bus mastering, BARs, decoding IO and memory spaces,
3322  * etc.
3323  *
3324  * Returns 0 if the device function was successfully reset or negative if the
3325  * device doesn't support resetting a single function.
3326  */
3327 int __pci_reset_function(struct pci_dev *dev)
3328 {
3329 	return pci_dev_reset(dev, 0);
3330 }
3331 EXPORT_SYMBOL_GPL(__pci_reset_function);
3332 
3333 /**
3334  * __pci_reset_function_locked - reset a PCI device function while holding
3335  * the @dev mutex lock.
3336  * @dev: PCI device to reset
3337  *
3338  * Some devices allow an individual function to be reset without affecting
3339  * other functions in the same device.  The PCI device must be responsive
3340  * to PCI config space in order to use this function.
3341  *
3342  * The device function is presumed to be unused and the caller is holding
3343  * the device mutex lock when this function is called.
3344  * Resetting the device will make the contents of PCI configuration space
3345  * random, so any caller of this must be prepared to reinitialise the
3346  * device including MSI, bus mastering, BARs, decoding IO and memory spaces,
3347  * etc.
3348  *
3349  * Returns 0 if the device function was successfully reset or negative if the
3350  * device doesn't support resetting a single function.
3351  */
3352 int __pci_reset_function_locked(struct pci_dev *dev)
3353 {
3354 	return __pci_dev_reset(dev, 0);
3355 }
3356 EXPORT_SYMBOL_GPL(__pci_reset_function_locked);
3357 
3358 /**
3359  * pci_probe_reset_function - check whether the device can be safely reset
3360  * @dev: PCI device to reset
3361  *
3362  * Some devices allow an individual function to be reset without affecting
3363  * other functions in the same device.  The PCI device must be responsive
3364  * to PCI config space in order to use this function.
3365  *
3366  * Returns 0 if the device function can be reset or negative if the
3367  * device doesn't support resetting a single function.
3368  */
3369 int pci_probe_reset_function(struct pci_dev *dev)
3370 {
3371 	return pci_dev_reset(dev, 1);
3372 }
3373 
3374 /**
3375  * pci_reset_function - quiesce and reset a PCI device function
3376  * @dev: PCI device to reset
3377  *
3378  * Some devices allow an individual function to be reset without affecting
3379  * other functions in the same device.  The PCI device must be responsive
3380  * to PCI config space in order to use this function.
3381  *
3382  * This function does not just reset the PCI portion of a device, but
3383  * clears all the state associated with the device.  This function differs
3384  * from __pci_reset_function in that it saves and restores device state
3385  * over the reset.
3386  *
3387  * Returns 0 if the device function was successfully reset or negative if the
3388  * device doesn't support resetting a single function.
3389  */
3390 int pci_reset_function(struct pci_dev *dev)
3391 {
3392 	int rc;
3393 
3394 	rc = pci_dev_reset(dev, 1);
3395 	if (rc)
3396 		return rc;
3397 
3398 	pci_dev_save_and_disable(dev);
3399 
3400 	rc = pci_dev_reset(dev, 0);
3401 
3402 	pci_dev_restore(dev);
3403 
3404 	return rc;
3405 }
3406 EXPORT_SYMBOL_GPL(pci_reset_function);
3407 
3408 /**
3409  * pci_try_reset_function - quiesce and reset a PCI device function
3410  * @dev: PCI device to reset
3411  *
3412  * Same as above, except return -EAGAIN if unable to lock device.
3413  */
3414 int pci_try_reset_function(struct pci_dev *dev)
3415 {
3416 	int rc;
3417 
3418 	rc = pci_dev_reset(dev, 1);
3419 	if (rc)
3420 		return rc;
3421 
3422 	pci_dev_save_and_disable(dev);
3423 
3424 	if (pci_dev_trylock(dev)) {
3425 		rc = __pci_dev_reset(dev, 0);
3426 		pci_dev_unlock(dev);
3427 	} else
3428 		rc = -EAGAIN;
3429 
3430 	pci_dev_restore(dev);
3431 
3432 	return rc;
3433 }
3434 EXPORT_SYMBOL_GPL(pci_try_reset_function);
3435 
3436 /* Lock devices from the top of the tree down */
3437 static void pci_bus_lock(struct pci_bus *bus)
3438 {
3439 	struct pci_dev *dev;
3440 
3441 	list_for_each_entry(dev, &bus->devices, bus_list) {
3442 		pci_dev_lock(dev);
3443 		if (dev->subordinate)
3444 			pci_bus_lock(dev->subordinate);
3445 	}
3446 }
3447 
3448 /* Unlock devices from the bottom of the tree up */
3449 static void pci_bus_unlock(struct pci_bus *bus)
3450 {
3451 	struct pci_dev *dev;
3452 
3453 	list_for_each_entry(dev, &bus->devices, bus_list) {
3454 		if (dev->subordinate)
3455 			pci_bus_unlock(dev->subordinate);
3456 		pci_dev_unlock(dev);
3457 	}
3458 }
3459 
3460 /* Return 1 on successful lock, 0 on contention */
3461 static int pci_bus_trylock(struct pci_bus *bus)
3462 {
3463 	struct pci_dev *dev;
3464 
3465 	list_for_each_entry(dev, &bus->devices, bus_list) {
3466 		if (!pci_dev_trylock(dev))
3467 			goto unlock;
3468 		if (dev->subordinate) {
3469 			if (!pci_bus_trylock(dev->subordinate)) {
3470 				pci_dev_unlock(dev);
3471 				goto unlock;
3472 			}
3473 		}
3474 	}
3475 	return 1;
3476 
3477 unlock:
3478 	list_for_each_entry_continue_reverse(dev, &bus->devices, bus_list) {
3479 		if (dev->subordinate)
3480 			pci_bus_unlock(dev->subordinate);
3481 		pci_dev_unlock(dev);
3482 	}
3483 	return 0;
3484 }
3485 
3486 /* Lock devices from the top of the tree down */
3487 static void pci_slot_lock(struct pci_slot *slot)
3488 {
3489 	struct pci_dev *dev;
3490 
3491 	list_for_each_entry(dev, &slot->bus->devices, bus_list) {
3492 		if (!dev->slot || dev->slot != slot)
3493 			continue;
3494 		pci_dev_lock(dev);
3495 		if (dev->subordinate)
3496 			pci_bus_lock(dev->subordinate);
3497 	}
3498 }
3499 
3500 /* Unlock devices from the bottom of the tree up */
3501 static void pci_slot_unlock(struct pci_slot *slot)
3502 {
3503 	struct pci_dev *dev;
3504 
3505 	list_for_each_entry(dev, &slot->bus->devices, bus_list) {
3506 		if (!dev->slot || dev->slot != slot)
3507 			continue;
3508 		if (dev->subordinate)
3509 			pci_bus_unlock(dev->subordinate);
3510 		pci_dev_unlock(dev);
3511 	}
3512 }
3513 
3514 /* Return 1 on successful lock, 0 on contention */
3515 static int pci_slot_trylock(struct pci_slot *slot)
3516 {
3517 	struct pci_dev *dev;
3518 
3519 	list_for_each_entry(dev, &slot->bus->devices, bus_list) {
3520 		if (!dev->slot || dev->slot != slot)
3521 			continue;
3522 		if (!pci_dev_trylock(dev))
3523 			goto unlock;
3524 		if (dev->subordinate) {
3525 			if (!pci_bus_trylock(dev->subordinate)) {
3526 				pci_dev_unlock(dev);
3527 				goto unlock;
3528 			}
3529 		}
3530 	}
3531 	return 1;
3532 
3533 unlock:
3534 	list_for_each_entry_continue_reverse(dev,
3535 					     &slot->bus->devices, bus_list) {
3536 		if (!dev->slot || dev->slot != slot)
3537 			continue;
3538 		if (dev->subordinate)
3539 			pci_bus_unlock(dev->subordinate);
3540 		pci_dev_unlock(dev);
3541 	}
3542 	return 0;
3543 }
3544 
3545 /* Save and disable devices from the top of the tree down */
3546 static void pci_bus_save_and_disable(struct pci_bus *bus)
3547 {
3548 	struct pci_dev *dev;
3549 
3550 	list_for_each_entry(dev, &bus->devices, bus_list) {
3551 		pci_dev_save_and_disable(dev);
3552 		if (dev->subordinate)
3553 			pci_bus_save_and_disable(dev->subordinate);
3554 	}
3555 }
3556 
3557 /*
3558  * Restore devices from top of the tree down - parent bridges need to be
3559  * restored before we can get to subordinate devices.
3560  */
3561 static void pci_bus_restore(struct pci_bus *bus)
3562 {
3563 	struct pci_dev *dev;
3564 
3565 	list_for_each_entry(dev, &bus->devices, bus_list) {
3566 		pci_dev_restore(dev);
3567 		if (dev->subordinate)
3568 			pci_bus_restore(dev->subordinate);
3569 	}
3570 }
3571 
3572 /* Save and disable devices from the top of the tree down */
3573 static void pci_slot_save_and_disable(struct pci_slot *slot)
3574 {
3575 	struct pci_dev *dev;
3576 
3577 	list_for_each_entry(dev, &slot->bus->devices, bus_list) {
3578 		if (!dev->slot || dev->slot != slot)
3579 			continue;
3580 		pci_dev_save_and_disable(dev);
3581 		if (dev->subordinate)
3582 			pci_bus_save_and_disable(dev->subordinate);
3583 	}
3584 }
3585 
3586 /*
3587  * Restore devices from top of the tree down - parent bridges need to be
3588  * restored before we can get to subordinate devices.
3589  */
3590 static void pci_slot_restore(struct pci_slot *slot)
3591 {
3592 	struct pci_dev *dev;
3593 
3594 	list_for_each_entry(dev, &slot->bus->devices, bus_list) {
3595 		if (!dev->slot || dev->slot != slot)
3596 			continue;
3597 		pci_dev_restore(dev);
3598 		if (dev->subordinate)
3599 			pci_bus_restore(dev->subordinate);
3600 	}
3601 }
3602 
3603 static int pci_slot_reset(struct pci_slot *slot, int probe)
3604 {
3605 	int rc;
3606 
3607 	if (!slot)
3608 		return -ENOTTY;
3609 
3610 	if (!probe)
3611 		pci_slot_lock(slot);
3612 
3613 	might_sleep();
3614 
3615 	rc = pci_reset_hotplug_slot(slot->hotplug, probe);
3616 
3617 	if (!probe)
3618 		pci_slot_unlock(slot);
3619 
3620 	return rc;
3621 }
3622 
3623 /**
3624  * pci_probe_reset_slot - probe whether a PCI slot can be reset
3625  * @slot: PCI slot to probe
3626  *
3627  * Return 0 if slot can be reset, negative if a slot reset is not supported.
3628  */
3629 int pci_probe_reset_slot(struct pci_slot *slot)
3630 {
3631 	return pci_slot_reset(slot, 1);
3632 }
3633 EXPORT_SYMBOL_GPL(pci_probe_reset_slot);
3634 
3635 /**
3636  * pci_reset_slot - reset a PCI slot
3637  * @slot: PCI slot to reset
3638  *
3639  * A PCI bus may host multiple slots, each slot may support a reset mechanism
3640  * independent of other slots.  For instance, some slots may support slot power
3641  * control.  In the case of a 1:1 bus to slot architecture, this function may
3642  * wrap the bus reset to avoid spurious slot related events such as hotplug.
3643  * Generally a slot reset should be attempted before a bus reset.  All of the
3644  * function of the slot and any subordinate buses behind the slot are reset
3645  * through this function.  PCI config space of all devices in the slot and
3646  * behind the slot is saved before and restored after reset.
3647  *
3648  * Return 0 on success, non-zero on error.
3649  */
3650 int pci_reset_slot(struct pci_slot *slot)
3651 {
3652 	int rc;
3653 
3654 	rc = pci_slot_reset(slot, 1);
3655 	if (rc)
3656 		return rc;
3657 
3658 	pci_slot_save_and_disable(slot);
3659 
3660 	rc = pci_slot_reset(slot, 0);
3661 
3662 	pci_slot_restore(slot);
3663 
3664 	return rc;
3665 }
3666 EXPORT_SYMBOL_GPL(pci_reset_slot);
3667 
3668 /**
3669  * pci_try_reset_slot - Try to reset a PCI slot
3670  * @slot: PCI slot to reset
3671  *
3672  * Same as above except return -EAGAIN if the slot cannot be locked
3673  */
3674 int pci_try_reset_slot(struct pci_slot *slot)
3675 {
3676 	int rc;
3677 
3678 	rc = pci_slot_reset(slot, 1);
3679 	if (rc)
3680 		return rc;
3681 
3682 	pci_slot_save_and_disable(slot);
3683 
3684 	if (pci_slot_trylock(slot)) {
3685 		might_sleep();
3686 		rc = pci_reset_hotplug_slot(slot->hotplug, 0);
3687 		pci_slot_unlock(slot);
3688 	} else
3689 		rc = -EAGAIN;
3690 
3691 	pci_slot_restore(slot);
3692 
3693 	return rc;
3694 }
3695 EXPORT_SYMBOL_GPL(pci_try_reset_slot);
3696 
3697 static int pci_bus_reset(struct pci_bus *bus, int probe)
3698 {
3699 	if (!bus->self)
3700 		return -ENOTTY;
3701 
3702 	if (probe)
3703 		return 0;
3704 
3705 	pci_bus_lock(bus);
3706 
3707 	might_sleep();
3708 
3709 	pci_reset_bridge_secondary_bus(bus->self);
3710 
3711 	pci_bus_unlock(bus);
3712 
3713 	return 0;
3714 }
3715 
3716 /**
3717  * pci_probe_reset_bus - probe whether a PCI bus can be reset
3718  * @bus: PCI bus to probe
3719  *
3720  * Return 0 if bus can be reset, negative if a bus reset is not supported.
3721  */
3722 int pci_probe_reset_bus(struct pci_bus *bus)
3723 {
3724 	return pci_bus_reset(bus, 1);
3725 }
3726 EXPORT_SYMBOL_GPL(pci_probe_reset_bus);
3727 
3728 /**
3729  * pci_reset_bus - reset a PCI bus
3730  * @bus: top level PCI bus to reset
3731  *
3732  * Do a bus reset on the given bus and any subordinate buses, saving
3733  * and restoring state of all devices.
3734  *
3735  * Return 0 on success, non-zero on error.
3736  */
3737 int pci_reset_bus(struct pci_bus *bus)
3738 {
3739 	int rc;
3740 
3741 	rc = pci_bus_reset(bus, 1);
3742 	if (rc)
3743 		return rc;
3744 
3745 	pci_bus_save_and_disable(bus);
3746 
3747 	rc = pci_bus_reset(bus, 0);
3748 
3749 	pci_bus_restore(bus);
3750 
3751 	return rc;
3752 }
3753 EXPORT_SYMBOL_GPL(pci_reset_bus);
3754 
3755 /**
3756  * pci_try_reset_bus - Try to reset a PCI bus
3757  * @bus: top level PCI bus to reset
3758  *
3759  * Same as above except return -EAGAIN if the bus cannot be locked
3760  */
3761 int pci_try_reset_bus(struct pci_bus *bus)
3762 {
3763 	int rc;
3764 
3765 	rc = pci_bus_reset(bus, 1);
3766 	if (rc)
3767 		return rc;
3768 
3769 	pci_bus_save_and_disable(bus);
3770 
3771 	if (pci_bus_trylock(bus)) {
3772 		might_sleep();
3773 		pci_reset_bridge_secondary_bus(bus->self);
3774 		pci_bus_unlock(bus);
3775 	} else
3776 		rc = -EAGAIN;
3777 
3778 	pci_bus_restore(bus);
3779 
3780 	return rc;
3781 }
3782 EXPORT_SYMBOL_GPL(pci_try_reset_bus);
3783 
3784 /**
3785  * pcix_get_max_mmrbc - get PCI-X maximum designed memory read byte count
3786  * @dev: PCI device to query
3787  *
3788  * Returns mmrbc: maximum designed memory read count in bytes
3789  *    or appropriate error value.
3790  */
3791 int pcix_get_max_mmrbc(struct pci_dev *dev)
3792 {
3793 	int cap;
3794 	u32 stat;
3795 
3796 	cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
3797 	if (!cap)
3798 		return -EINVAL;
3799 
3800 	if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat))
3801 		return -EINVAL;
3802 
3803 	return 512 << ((stat & PCI_X_STATUS_MAX_READ) >> 21);
3804 }
3805 EXPORT_SYMBOL(pcix_get_max_mmrbc);
3806 
3807 /**
3808  * pcix_get_mmrbc - get PCI-X maximum memory read byte count
3809  * @dev: PCI device to query
3810  *
3811  * Returns mmrbc: maximum memory read count in bytes
3812  *    or appropriate error value.
3813  */
3814 int pcix_get_mmrbc(struct pci_dev *dev)
3815 {
3816 	int cap;
3817 	u16 cmd;
3818 
3819 	cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
3820 	if (!cap)
3821 		return -EINVAL;
3822 
3823 	if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd))
3824 		return -EINVAL;
3825 
3826 	return 512 << ((cmd & PCI_X_CMD_MAX_READ) >> 2);
3827 }
3828 EXPORT_SYMBOL(pcix_get_mmrbc);
3829 
3830 /**
3831  * pcix_set_mmrbc - set PCI-X maximum memory read byte count
3832  * @dev: PCI device to query
3833  * @mmrbc: maximum memory read count in bytes
3834  *    valid values are 512, 1024, 2048, 4096
3835  *
3836  * If possible sets maximum memory read byte count, some bridges have erratas
3837  * that prevent this.
3838  */
3839 int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc)
3840 {
3841 	int cap;
3842 	u32 stat, v, o;
3843 	u16 cmd;
3844 
3845 	if (mmrbc < 512 || mmrbc > 4096 || !is_power_of_2(mmrbc))
3846 		return -EINVAL;
3847 
3848 	v = ffs(mmrbc) - 10;
3849 
3850 	cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
3851 	if (!cap)
3852 		return -EINVAL;
3853 
3854 	if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat))
3855 		return -EINVAL;
3856 
3857 	if (v > (stat & PCI_X_STATUS_MAX_READ) >> 21)
3858 		return -E2BIG;
3859 
3860 	if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd))
3861 		return -EINVAL;
3862 
3863 	o = (cmd & PCI_X_CMD_MAX_READ) >> 2;
3864 	if (o != v) {
3865 		if (v > o && (dev->bus->bus_flags & PCI_BUS_FLAGS_NO_MMRBC))
3866 			return -EIO;
3867 
3868 		cmd &= ~PCI_X_CMD_MAX_READ;
3869 		cmd |= v << 2;
3870 		if (pci_write_config_word(dev, cap + PCI_X_CMD, cmd))
3871 			return -EIO;
3872 	}
3873 	return 0;
3874 }
3875 EXPORT_SYMBOL(pcix_set_mmrbc);
3876 
3877 /**
3878  * pcie_get_readrq - get PCI Express read request size
3879  * @dev: PCI device to query
3880  *
3881  * Returns maximum memory read request in bytes
3882  *    or appropriate error value.
3883  */
3884 int pcie_get_readrq(struct pci_dev *dev)
3885 {
3886 	u16 ctl;
3887 
3888 	pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &ctl);
3889 
3890 	return 128 << ((ctl & PCI_EXP_DEVCTL_READRQ) >> 12);
3891 }
3892 EXPORT_SYMBOL(pcie_get_readrq);
3893 
3894 /**
3895  * pcie_set_readrq - set PCI Express maximum memory read request
3896  * @dev: PCI device to query
3897  * @rq: maximum memory read count in bytes
3898  *    valid values are 128, 256, 512, 1024, 2048, 4096
3899  *
3900  * If possible sets maximum memory read request in bytes
3901  */
3902 int pcie_set_readrq(struct pci_dev *dev, int rq)
3903 {
3904 	u16 v;
3905 
3906 	if (rq < 128 || rq > 4096 || !is_power_of_2(rq))
3907 		return -EINVAL;
3908 
3909 	/*
3910 	 * If using the "performance" PCIe config, we clamp the
3911 	 * read rq size to the max packet size to prevent the
3912 	 * host bridge generating requests larger than we can
3913 	 * cope with
3914 	 */
3915 	if (pcie_bus_config == PCIE_BUS_PERFORMANCE) {
3916 		int mps = pcie_get_mps(dev);
3917 
3918 		if (mps < rq)
3919 			rq = mps;
3920 	}
3921 
3922 	v = (ffs(rq) - 8) << 12;
3923 
3924 	return pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL,
3925 						  PCI_EXP_DEVCTL_READRQ, v);
3926 }
3927 EXPORT_SYMBOL(pcie_set_readrq);
3928 
3929 /**
3930  * pcie_get_mps - get PCI Express maximum payload size
3931  * @dev: PCI device to query
3932  *
3933  * Returns maximum payload size in bytes
3934  */
3935 int pcie_get_mps(struct pci_dev *dev)
3936 {
3937 	u16 ctl;
3938 
3939 	pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &ctl);
3940 
3941 	return 128 << ((ctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
3942 }
3943 EXPORT_SYMBOL(pcie_get_mps);
3944 
3945 /**
3946  * pcie_set_mps - set PCI Express maximum payload size
3947  * @dev: PCI device to query
3948  * @mps: maximum payload size in bytes
3949  *    valid values are 128, 256, 512, 1024, 2048, 4096
3950  *
3951  * If possible sets maximum payload size
3952  */
3953 int pcie_set_mps(struct pci_dev *dev, int mps)
3954 {
3955 	u16 v;
3956 
3957 	if (mps < 128 || mps > 4096 || !is_power_of_2(mps))
3958 		return -EINVAL;
3959 
3960 	v = ffs(mps) - 8;
3961 	if (v > dev->pcie_mpss)
3962 		return -EINVAL;
3963 	v <<= 5;
3964 
3965 	return pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL,
3966 						  PCI_EXP_DEVCTL_PAYLOAD, v);
3967 }
3968 EXPORT_SYMBOL(pcie_set_mps);
3969 
3970 /**
3971  * pcie_get_minimum_link - determine minimum link settings of a PCI device
3972  * @dev: PCI device to query
3973  * @speed: storage for minimum speed
3974  * @width: storage for minimum width
3975  *
3976  * This function will walk up the PCI device chain and determine the minimum
3977  * link width and speed of the device.
3978  */
3979 int pcie_get_minimum_link(struct pci_dev *dev, enum pci_bus_speed *speed,
3980 			  enum pcie_link_width *width)
3981 {
3982 	int ret;
3983 
3984 	*speed = PCI_SPEED_UNKNOWN;
3985 	*width = PCIE_LNK_WIDTH_UNKNOWN;
3986 
3987 	while (dev) {
3988 		u16 lnksta;
3989 		enum pci_bus_speed next_speed;
3990 		enum pcie_link_width next_width;
3991 
3992 		ret = pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &lnksta);
3993 		if (ret)
3994 			return ret;
3995 
3996 		next_speed = pcie_link_speed[lnksta & PCI_EXP_LNKSTA_CLS];
3997 		next_width = (lnksta & PCI_EXP_LNKSTA_NLW) >>
3998 			PCI_EXP_LNKSTA_NLW_SHIFT;
3999 
4000 		if (next_speed < *speed)
4001 			*speed = next_speed;
4002 
4003 		if (next_width < *width)
4004 			*width = next_width;
4005 
4006 		dev = dev->bus->self;
4007 	}
4008 
4009 	return 0;
4010 }
4011 EXPORT_SYMBOL(pcie_get_minimum_link);
4012 
4013 /**
4014  * pci_select_bars - Make BAR mask from the type of resource
4015  * @dev: the PCI device for which BAR mask is made
4016  * @flags: resource type mask to be selected
4017  *
4018  * This helper routine makes bar mask from the type of resource.
4019  */
4020 int pci_select_bars(struct pci_dev *dev, unsigned long flags)
4021 {
4022 	int i, bars = 0;
4023 	for (i = 0; i < PCI_NUM_RESOURCES; i++)
4024 		if (pci_resource_flags(dev, i) & flags)
4025 			bars |= (1 << i);
4026 	return bars;
4027 }
4028 
4029 /**
4030  * pci_resource_bar - get position of the BAR associated with a resource
4031  * @dev: the PCI device
4032  * @resno: the resource number
4033  * @type: the BAR type to be filled in
4034  *
4035  * Returns BAR position in config space, or 0 if the BAR is invalid.
4036  */
4037 int pci_resource_bar(struct pci_dev *dev, int resno, enum pci_bar_type *type)
4038 {
4039 	int reg;
4040 
4041 	if (resno < PCI_ROM_RESOURCE) {
4042 		*type = pci_bar_unknown;
4043 		return PCI_BASE_ADDRESS_0 + 4 * resno;
4044 	} else if (resno == PCI_ROM_RESOURCE) {
4045 		*type = pci_bar_mem32;
4046 		return dev->rom_base_reg;
4047 	} else if (resno < PCI_BRIDGE_RESOURCES) {
4048 		/* device specific resource */
4049 		reg = pci_iov_resource_bar(dev, resno, type);
4050 		if (reg)
4051 			return reg;
4052 	}
4053 
4054 	dev_err(&dev->dev, "BAR %d: invalid resource\n", resno);
4055 	return 0;
4056 }
4057 
4058 /* Some architectures require additional programming to enable VGA */
4059 static arch_set_vga_state_t arch_set_vga_state;
4060 
4061 void __init pci_register_set_vga_state(arch_set_vga_state_t func)
4062 {
4063 	arch_set_vga_state = func;	/* NULL disables */
4064 }
4065 
4066 static int pci_set_vga_state_arch(struct pci_dev *dev, bool decode,
4067 		      unsigned int command_bits, u32 flags)
4068 {
4069 	if (arch_set_vga_state)
4070 		return arch_set_vga_state(dev, decode, command_bits,
4071 						flags);
4072 	return 0;
4073 }
4074 
4075 /**
4076  * pci_set_vga_state - set VGA decode state on device and parents if requested
4077  * @dev: the PCI device
4078  * @decode: true = enable decoding, false = disable decoding
4079  * @command_bits: PCI_COMMAND_IO and/or PCI_COMMAND_MEMORY
4080  * @flags: traverse ancestors and change bridges
4081  * CHANGE_BRIDGE_ONLY / CHANGE_BRIDGE
4082  */
4083 int pci_set_vga_state(struct pci_dev *dev, bool decode,
4084 		      unsigned int command_bits, u32 flags)
4085 {
4086 	struct pci_bus *bus;
4087 	struct pci_dev *bridge;
4088 	u16 cmd;
4089 	int rc;
4090 
4091 	WARN_ON((flags & PCI_VGA_STATE_CHANGE_DECODES) & (command_bits & ~(PCI_COMMAND_IO|PCI_COMMAND_MEMORY)));
4092 
4093 	/* ARCH specific VGA enables */
4094 	rc = pci_set_vga_state_arch(dev, decode, command_bits, flags);
4095 	if (rc)
4096 		return rc;
4097 
4098 	if (flags & PCI_VGA_STATE_CHANGE_DECODES) {
4099 		pci_read_config_word(dev, PCI_COMMAND, &cmd);
4100 		if (decode == true)
4101 			cmd |= command_bits;
4102 		else
4103 			cmd &= ~command_bits;
4104 		pci_write_config_word(dev, PCI_COMMAND, cmd);
4105 	}
4106 
4107 	if (!(flags & PCI_VGA_STATE_CHANGE_BRIDGE))
4108 		return 0;
4109 
4110 	bus = dev->bus;
4111 	while (bus) {
4112 		bridge = bus->self;
4113 		if (bridge) {
4114 			pci_read_config_word(bridge, PCI_BRIDGE_CONTROL,
4115 					     &cmd);
4116 			if (decode == true)
4117 				cmd |= PCI_BRIDGE_CTL_VGA;
4118 			else
4119 				cmd &= ~PCI_BRIDGE_CTL_VGA;
4120 			pci_write_config_word(bridge, PCI_BRIDGE_CONTROL,
4121 					      cmd);
4122 		}
4123 		bus = bus->parent;
4124 	}
4125 	return 0;
4126 }
4127 
4128 bool pci_device_is_present(struct pci_dev *pdev)
4129 {
4130 	u32 v;
4131 
4132 	return pci_bus_read_dev_vendor_id(pdev->bus, pdev->devfn, &v, 0);
4133 }
4134 EXPORT_SYMBOL_GPL(pci_device_is_present);
4135 
4136 #define RESOURCE_ALIGNMENT_PARAM_SIZE COMMAND_LINE_SIZE
4137 static char resource_alignment_param[RESOURCE_ALIGNMENT_PARAM_SIZE] = {0};
4138 static DEFINE_SPINLOCK(resource_alignment_lock);
4139 
4140 /**
4141  * pci_specified_resource_alignment - get resource alignment specified by user.
4142  * @dev: the PCI device to get
4143  *
4144  * RETURNS: Resource alignment if it is specified.
4145  *          Zero if it is not specified.
4146  */
4147 static resource_size_t pci_specified_resource_alignment(struct pci_dev *dev)
4148 {
4149 	int seg, bus, slot, func, align_order, count;
4150 	resource_size_t align = 0;
4151 	char *p;
4152 
4153 	spin_lock(&resource_alignment_lock);
4154 	p = resource_alignment_param;
4155 	while (*p) {
4156 		count = 0;
4157 		if (sscanf(p, "%d%n", &align_order, &count) == 1 &&
4158 							p[count] == '@') {
4159 			p += count + 1;
4160 		} else {
4161 			align_order = -1;
4162 		}
4163 		if (sscanf(p, "%x:%x:%x.%x%n",
4164 			&seg, &bus, &slot, &func, &count) != 4) {
4165 			seg = 0;
4166 			if (sscanf(p, "%x:%x.%x%n",
4167 					&bus, &slot, &func, &count) != 3) {
4168 				/* Invalid format */
4169 				printk(KERN_ERR "PCI: Can't parse resource_alignment parameter: %s\n",
4170 					p);
4171 				break;
4172 			}
4173 		}
4174 		p += count;
4175 		if (seg == pci_domain_nr(dev->bus) &&
4176 			bus == dev->bus->number &&
4177 			slot == PCI_SLOT(dev->devfn) &&
4178 			func == PCI_FUNC(dev->devfn)) {
4179 			if (align_order == -1) {
4180 				align = PAGE_SIZE;
4181 			} else {
4182 				align = 1 << align_order;
4183 			}
4184 			/* Found */
4185 			break;
4186 		}
4187 		if (*p != ';' && *p != ',') {
4188 			/* End of param or invalid format */
4189 			break;
4190 		}
4191 		p++;
4192 	}
4193 	spin_unlock(&resource_alignment_lock);
4194 	return align;
4195 }
4196 
4197 /*
4198  * This function disables memory decoding and releases memory resources
4199  * of the device specified by kernel's boot parameter 'pci=resource_alignment='.
4200  * It also rounds up size to specified alignment.
4201  * Later on, the kernel will assign page-aligned memory resource back
4202  * to the device.
4203  */
4204 void pci_reassigndev_resource_alignment(struct pci_dev *dev)
4205 {
4206 	int i;
4207 	struct resource *r;
4208 	resource_size_t align, size;
4209 	u16 command;
4210 
4211 	/* check if specified PCI is target device to reassign */
4212 	align = pci_specified_resource_alignment(dev);
4213 	if (!align)
4214 		return;
4215 
4216 	if (dev->hdr_type == PCI_HEADER_TYPE_NORMAL &&
4217 	    (dev->class >> 8) == PCI_CLASS_BRIDGE_HOST) {
4218 		dev_warn(&dev->dev,
4219 			"Can't reassign resources to host bridge.\n");
4220 		return;
4221 	}
4222 
4223 	dev_info(&dev->dev,
4224 		"Disabling memory decoding and releasing memory resources.\n");
4225 	pci_read_config_word(dev, PCI_COMMAND, &command);
4226 	command &= ~PCI_COMMAND_MEMORY;
4227 	pci_write_config_word(dev, PCI_COMMAND, command);
4228 
4229 	for (i = 0; i < PCI_BRIDGE_RESOURCES; i++) {
4230 		r = &dev->resource[i];
4231 		if (!(r->flags & IORESOURCE_MEM))
4232 			continue;
4233 		size = resource_size(r);
4234 		if (size < align) {
4235 			size = align;
4236 			dev_info(&dev->dev,
4237 				"Rounding up size of resource #%d to %#llx.\n",
4238 				i, (unsigned long long)size);
4239 		}
4240 		r->end = size - 1;
4241 		r->start = 0;
4242 	}
4243 	/* Need to disable bridge's resource window,
4244 	 * to enable the kernel to reassign new resource
4245 	 * window later on.
4246 	 */
4247 	if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE &&
4248 	    (dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) {
4249 		for (i = PCI_BRIDGE_RESOURCES; i < PCI_NUM_RESOURCES; i++) {
4250 			r = &dev->resource[i];
4251 			if (!(r->flags & IORESOURCE_MEM))
4252 				continue;
4253 			r->end = resource_size(r) - 1;
4254 			r->start = 0;
4255 		}
4256 		pci_disable_bridge_window(dev);
4257 	}
4258 }
4259 
4260 static ssize_t pci_set_resource_alignment_param(const char *buf, size_t count)
4261 {
4262 	if (count > RESOURCE_ALIGNMENT_PARAM_SIZE - 1)
4263 		count = RESOURCE_ALIGNMENT_PARAM_SIZE - 1;
4264 	spin_lock(&resource_alignment_lock);
4265 	strncpy(resource_alignment_param, buf, count);
4266 	resource_alignment_param[count] = '\0';
4267 	spin_unlock(&resource_alignment_lock);
4268 	return count;
4269 }
4270 
4271 static ssize_t pci_get_resource_alignment_param(char *buf, size_t size)
4272 {
4273 	size_t count;
4274 	spin_lock(&resource_alignment_lock);
4275 	count = snprintf(buf, size, "%s", resource_alignment_param);
4276 	spin_unlock(&resource_alignment_lock);
4277 	return count;
4278 }
4279 
4280 static ssize_t pci_resource_alignment_show(struct bus_type *bus, char *buf)
4281 {
4282 	return pci_get_resource_alignment_param(buf, PAGE_SIZE);
4283 }
4284 
4285 static ssize_t pci_resource_alignment_store(struct bus_type *bus,
4286 					const char *buf, size_t count)
4287 {
4288 	return pci_set_resource_alignment_param(buf, count);
4289 }
4290 
4291 BUS_ATTR(resource_alignment, 0644, pci_resource_alignment_show,
4292 					pci_resource_alignment_store);
4293 
4294 static int __init pci_resource_alignment_sysfs_init(void)
4295 {
4296 	return bus_create_file(&pci_bus_type,
4297 					&bus_attr_resource_alignment);
4298 }
4299 
4300 late_initcall(pci_resource_alignment_sysfs_init);
4301 
4302 static void pci_no_domains(void)
4303 {
4304 #ifdef CONFIG_PCI_DOMAINS
4305 	pci_domains_supported = 0;
4306 #endif
4307 }
4308 
4309 /**
4310  * pci_ext_cfg_avail - can we access extended PCI config space?
4311  *
4312  * Returns 1 if we can access PCI extended config space (offsets
4313  * greater than 0xff). This is the default implementation. Architecture
4314  * implementations can override this.
4315  */
4316 int __weak pci_ext_cfg_avail(void)
4317 {
4318 	return 1;
4319 }
4320 
4321 void __weak pci_fixup_cardbus(struct pci_bus *bus)
4322 {
4323 }
4324 EXPORT_SYMBOL(pci_fixup_cardbus);
4325 
4326 static int __init pci_setup(char *str)
4327 {
4328 	while (str) {
4329 		char *k = strchr(str, ',');
4330 		if (k)
4331 			*k++ = 0;
4332 		if (*str && (str = pcibios_setup(str)) && *str) {
4333 			if (!strcmp(str, "nomsi")) {
4334 				pci_no_msi();
4335 			} else if (!strcmp(str, "noaer")) {
4336 				pci_no_aer();
4337 			} else if (!strncmp(str, "realloc=", 8)) {
4338 				pci_realloc_get_opt(str + 8);
4339 			} else if (!strncmp(str, "realloc", 7)) {
4340 				pci_realloc_get_opt("on");
4341 			} else if (!strcmp(str, "nodomains")) {
4342 				pci_no_domains();
4343 			} else if (!strncmp(str, "noari", 5)) {
4344 				pcie_ari_disabled = true;
4345 			} else if (!strncmp(str, "cbiosize=", 9)) {
4346 				pci_cardbus_io_size = memparse(str + 9, &str);
4347 			} else if (!strncmp(str, "cbmemsize=", 10)) {
4348 				pci_cardbus_mem_size = memparse(str + 10, &str);
4349 			} else if (!strncmp(str, "resource_alignment=", 19)) {
4350 				pci_set_resource_alignment_param(str + 19,
4351 							strlen(str + 19));
4352 			} else if (!strncmp(str, "ecrc=", 5)) {
4353 				pcie_ecrc_get_policy(str + 5);
4354 			} else if (!strncmp(str, "hpiosize=", 9)) {
4355 				pci_hotplug_io_size = memparse(str + 9, &str);
4356 			} else if (!strncmp(str, "hpmemsize=", 10)) {
4357 				pci_hotplug_mem_size = memparse(str + 10, &str);
4358 			} else if (!strncmp(str, "pcie_bus_tune_off", 17)) {
4359 				pcie_bus_config = PCIE_BUS_TUNE_OFF;
4360 			} else if (!strncmp(str, "pcie_bus_safe", 13)) {
4361 				pcie_bus_config = PCIE_BUS_SAFE;
4362 			} else if (!strncmp(str, "pcie_bus_perf", 13)) {
4363 				pcie_bus_config = PCIE_BUS_PERFORMANCE;
4364 			} else if (!strncmp(str, "pcie_bus_peer2peer", 18)) {
4365 				pcie_bus_config = PCIE_BUS_PEER2PEER;
4366 			} else if (!strncmp(str, "pcie_scan_all", 13)) {
4367 				pci_add_flags(PCI_SCAN_ALL_PCIE_DEVS);
4368 			} else {
4369 				printk(KERN_ERR "PCI: Unknown option `%s'\n",
4370 						str);
4371 			}
4372 		}
4373 		str = k;
4374 	}
4375 	return 0;
4376 }
4377 early_param("pci", pci_setup);
4378 
4379 EXPORT_SYMBOL(pci_reenable_device);
4380 EXPORT_SYMBOL(pci_enable_device_io);
4381 EXPORT_SYMBOL(pci_enable_device_mem);
4382 EXPORT_SYMBOL(pci_enable_device);
4383 EXPORT_SYMBOL(pcim_enable_device);
4384 EXPORT_SYMBOL(pcim_pin_device);
4385 EXPORT_SYMBOL(pci_disable_device);
4386 EXPORT_SYMBOL(pci_find_capability);
4387 EXPORT_SYMBOL(pci_bus_find_capability);
4388 EXPORT_SYMBOL(pci_release_regions);
4389 EXPORT_SYMBOL(pci_request_regions);
4390 EXPORT_SYMBOL(pci_request_regions_exclusive);
4391 EXPORT_SYMBOL(pci_release_region);
4392 EXPORT_SYMBOL(pci_request_region);
4393 EXPORT_SYMBOL(pci_request_region_exclusive);
4394 EXPORT_SYMBOL(pci_release_selected_regions);
4395 EXPORT_SYMBOL(pci_request_selected_regions);
4396 EXPORT_SYMBOL(pci_request_selected_regions_exclusive);
4397 EXPORT_SYMBOL(pci_set_master);
4398 EXPORT_SYMBOL(pci_clear_master);
4399 EXPORT_SYMBOL(pci_set_mwi);
4400 EXPORT_SYMBOL(pci_try_set_mwi);
4401 EXPORT_SYMBOL(pci_clear_mwi);
4402 EXPORT_SYMBOL_GPL(pci_intx);
4403 EXPORT_SYMBOL(pci_assign_resource);
4404 EXPORT_SYMBOL(pci_find_parent_resource);
4405 EXPORT_SYMBOL(pci_select_bars);
4406 
4407 EXPORT_SYMBOL(pci_set_power_state);
4408 EXPORT_SYMBOL(pci_save_state);
4409 EXPORT_SYMBOL(pci_restore_state);
4410 EXPORT_SYMBOL(pci_pme_capable);
4411 EXPORT_SYMBOL(pci_pme_active);
4412 EXPORT_SYMBOL(pci_wake_from_d3);
4413 EXPORT_SYMBOL(pci_prepare_to_sleep);
4414 EXPORT_SYMBOL(pci_back_from_sleep);
4415 EXPORT_SYMBOL_GPL(pci_set_pcie_reset_state);
4416