xref: /openbmc/linux/drivers/pci/pci.c (revision a2fb4d78)
1 /*
2  *	PCI Bus Services, see include/linux/pci.h for further explanation.
3  *
4  *	Copyright 1993 -- 1997 Drew Eckhardt, Frederic Potter,
5  *	David Mosberger-Tang
6  *
7  *	Copyright 1997 -- 2000 Martin Mares <mj@ucw.cz>
8  */
9 
10 #include <linux/kernel.h>
11 #include <linux/delay.h>
12 #include <linux/init.h>
13 #include <linux/pci.h>
14 #include <linux/pm.h>
15 #include <linux/slab.h>
16 #include <linux/module.h>
17 #include <linux/spinlock.h>
18 #include <linux/string.h>
19 #include <linux/log2.h>
20 #include <linux/pci-aspm.h>
21 #include <linux/pm_wakeup.h>
22 #include <linux/interrupt.h>
23 #include <linux/device.h>
24 #include <linux/pm_runtime.h>
25 #include <linux/pci_hotplug.h>
26 #include <asm-generic/pci-bridge.h>
27 #include <asm/setup.h>
28 #include "pci.h"
29 
30 const char *pci_power_names[] = {
31 	"error", "D0", "D1", "D2", "D3hot", "D3cold", "unknown",
32 };
33 EXPORT_SYMBOL_GPL(pci_power_names);
34 
35 int isa_dma_bridge_buggy;
36 EXPORT_SYMBOL(isa_dma_bridge_buggy);
37 
38 int pci_pci_problems;
39 EXPORT_SYMBOL(pci_pci_problems);
40 
41 unsigned int pci_pm_d3_delay;
42 
43 static void pci_pme_list_scan(struct work_struct *work);
44 
45 static LIST_HEAD(pci_pme_list);
46 static DEFINE_MUTEX(pci_pme_list_mutex);
47 static DECLARE_DELAYED_WORK(pci_pme_work, pci_pme_list_scan);
48 
49 struct pci_pme_device {
50 	struct list_head list;
51 	struct pci_dev *dev;
52 };
53 
54 #define PME_TIMEOUT 1000 /* How long between PME checks */
55 
56 static void pci_dev_d3_sleep(struct pci_dev *dev)
57 {
58 	unsigned int delay = dev->d3_delay;
59 
60 	if (delay < pci_pm_d3_delay)
61 		delay = pci_pm_d3_delay;
62 
63 	msleep(delay);
64 }
65 
66 #ifdef CONFIG_PCI_DOMAINS
67 int pci_domains_supported = 1;
68 #endif
69 
70 #define DEFAULT_CARDBUS_IO_SIZE		(256)
71 #define DEFAULT_CARDBUS_MEM_SIZE	(64*1024*1024)
72 /* pci=cbmemsize=nnM,cbiosize=nn can override this */
73 unsigned long pci_cardbus_io_size = DEFAULT_CARDBUS_IO_SIZE;
74 unsigned long pci_cardbus_mem_size = DEFAULT_CARDBUS_MEM_SIZE;
75 
76 #define DEFAULT_HOTPLUG_IO_SIZE		(256)
77 #define DEFAULT_HOTPLUG_MEM_SIZE	(2*1024*1024)
78 /* pci=hpmemsize=nnM,hpiosize=nn can override this */
79 unsigned long pci_hotplug_io_size  = DEFAULT_HOTPLUG_IO_SIZE;
80 unsigned long pci_hotplug_mem_size = DEFAULT_HOTPLUG_MEM_SIZE;
81 
82 enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_TUNE_OFF;
83 
84 /*
85  * The default CLS is used if arch didn't set CLS explicitly and not
86  * all pci devices agree on the same value.  Arch can override either
87  * the dfl or actual value as it sees fit.  Don't forget this is
88  * measured in 32-bit words, not bytes.
89  */
90 u8 pci_dfl_cache_line_size = L1_CACHE_BYTES >> 2;
91 u8 pci_cache_line_size;
92 
93 /*
94  * If we set up a device for bus mastering, we need to check the latency
95  * timer as certain BIOSes forget to set it properly.
96  */
97 unsigned int pcibios_max_latency = 255;
98 
99 /* If set, the PCIe ARI capability will not be used. */
100 static bool pcie_ari_disabled;
101 
102 /**
103  * pci_bus_max_busnr - returns maximum PCI bus number of given bus' children
104  * @bus: pointer to PCI bus structure to search
105  *
106  * Given a PCI bus, returns the highest PCI bus number present in the set
107  * including the given PCI bus and its list of child PCI buses.
108  */
109 unsigned char pci_bus_max_busnr(struct pci_bus* bus)
110 {
111 	struct list_head *tmp;
112 	unsigned char max, n;
113 
114 	max = bus->busn_res.end;
115 	list_for_each(tmp, &bus->children) {
116 		n = pci_bus_max_busnr(pci_bus_b(tmp));
117 		if(n > max)
118 			max = n;
119 	}
120 	return max;
121 }
122 EXPORT_SYMBOL_GPL(pci_bus_max_busnr);
123 
124 #ifdef CONFIG_HAS_IOMEM
125 void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar)
126 {
127 	/*
128 	 * Make sure the BAR is actually a memory resource, not an IO resource
129 	 */
130 	if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM)) {
131 		WARN_ON(1);
132 		return NULL;
133 	}
134 	return ioremap_nocache(pci_resource_start(pdev, bar),
135 				     pci_resource_len(pdev, bar));
136 }
137 EXPORT_SYMBOL_GPL(pci_ioremap_bar);
138 #endif
139 
140 #define PCI_FIND_CAP_TTL	48
141 
142 static int __pci_find_next_cap_ttl(struct pci_bus *bus, unsigned int devfn,
143 				   u8 pos, int cap, int *ttl)
144 {
145 	u8 id;
146 
147 	while ((*ttl)--) {
148 		pci_bus_read_config_byte(bus, devfn, pos, &pos);
149 		if (pos < 0x40)
150 			break;
151 		pos &= ~3;
152 		pci_bus_read_config_byte(bus, devfn, pos + PCI_CAP_LIST_ID,
153 					 &id);
154 		if (id == 0xff)
155 			break;
156 		if (id == cap)
157 			return pos;
158 		pos += PCI_CAP_LIST_NEXT;
159 	}
160 	return 0;
161 }
162 
163 static int __pci_find_next_cap(struct pci_bus *bus, unsigned int devfn,
164 			       u8 pos, int cap)
165 {
166 	int ttl = PCI_FIND_CAP_TTL;
167 
168 	return __pci_find_next_cap_ttl(bus, devfn, pos, cap, &ttl);
169 }
170 
171 int pci_find_next_capability(struct pci_dev *dev, u8 pos, int cap)
172 {
173 	return __pci_find_next_cap(dev->bus, dev->devfn,
174 				   pos + PCI_CAP_LIST_NEXT, cap);
175 }
176 EXPORT_SYMBOL_GPL(pci_find_next_capability);
177 
178 static int __pci_bus_find_cap_start(struct pci_bus *bus,
179 				    unsigned int devfn, u8 hdr_type)
180 {
181 	u16 status;
182 
183 	pci_bus_read_config_word(bus, devfn, PCI_STATUS, &status);
184 	if (!(status & PCI_STATUS_CAP_LIST))
185 		return 0;
186 
187 	switch (hdr_type) {
188 	case PCI_HEADER_TYPE_NORMAL:
189 	case PCI_HEADER_TYPE_BRIDGE:
190 		return PCI_CAPABILITY_LIST;
191 	case PCI_HEADER_TYPE_CARDBUS:
192 		return PCI_CB_CAPABILITY_LIST;
193 	default:
194 		return 0;
195 	}
196 
197 	return 0;
198 }
199 
200 /**
201  * pci_find_capability - query for devices' capabilities
202  * @dev: PCI device to query
203  * @cap: capability code
204  *
205  * Tell if a device supports a given PCI capability.
206  * Returns the address of the requested capability structure within the
207  * device's PCI configuration space or 0 in case the device does not
208  * support it.  Possible values for @cap:
209  *
210  *  %PCI_CAP_ID_PM           Power Management
211  *  %PCI_CAP_ID_AGP          Accelerated Graphics Port
212  *  %PCI_CAP_ID_VPD          Vital Product Data
213  *  %PCI_CAP_ID_SLOTID       Slot Identification
214  *  %PCI_CAP_ID_MSI          Message Signalled Interrupts
215  *  %PCI_CAP_ID_CHSWP        CompactPCI HotSwap
216  *  %PCI_CAP_ID_PCIX         PCI-X
217  *  %PCI_CAP_ID_EXP          PCI Express
218  */
219 int pci_find_capability(struct pci_dev *dev, int cap)
220 {
221 	int pos;
222 
223 	pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type);
224 	if (pos)
225 		pos = __pci_find_next_cap(dev->bus, dev->devfn, pos, cap);
226 
227 	return pos;
228 }
229 
230 /**
231  * pci_bus_find_capability - query for devices' capabilities
232  * @bus:   the PCI bus to query
233  * @devfn: PCI device to query
234  * @cap:   capability code
235  *
236  * Like pci_find_capability() but works for pci devices that do not have a
237  * pci_dev structure set up yet.
238  *
239  * Returns the address of the requested capability structure within the
240  * device's PCI configuration space or 0 in case the device does not
241  * support it.
242  */
243 int pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap)
244 {
245 	int pos;
246 	u8 hdr_type;
247 
248 	pci_bus_read_config_byte(bus, devfn, PCI_HEADER_TYPE, &hdr_type);
249 
250 	pos = __pci_bus_find_cap_start(bus, devfn, hdr_type & 0x7f);
251 	if (pos)
252 		pos = __pci_find_next_cap(bus, devfn, pos, cap);
253 
254 	return pos;
255 }
256 
257 /**
258  * pci_find_next_ext_capability - Find an extended capability
259  * @dev: PCI device to query
260  * @start: address at which to start looking (0 to start at beginning of list)
261  * @cap: capability code
262  *
263  * Returns the address of the next matching extended capability structure
264  * within the device's PCI configuration space or 0 if the device does
265  * not support it.  Some capabilities can occur several times, e.g., the
266  * vendor-specific capability, and this provides a way to find them all.
267  */
268 int pci_find_next_ext_capability(struct pci_dev *dev, int start, int cap)
269 {
270 	u32 header;
271 	int ttl;
272 	int pos = PCI_CFG_SPACE_SIZE;
273 
274 	/* minimum 8 bytes per capability */
275 	ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8;
276 
277 	if (dev->cfg_size <= PCI_CFG_SPACE_SIZE)
278 		return 0;
279 
280 	if (start)
281 		pos = start;
282 
283 	if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
284 		return 0;
285 
286 	/*
287 	 * If we have no capabilities, this is indicated by cap ID,
288 	 * cap version and next pointer all being 0.
289 	 */
290 	if (header == 0)
291 		return 0;
292 
293 	while (ttl-- > 0) {
294 		if (PCI_EXT_CAP_ID(header) == cap && pos != start)
295 			return pos;
296 
297 		pos = PCI_EXT_CAP_NEXT(header);
298 		if (pos < PCI_CFG_SPACE_SIZE)
299 			break;
300 
301 		if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
302 			break;
303 	}
304 
305 	return 0;
306 }
307 EXPORT_SYMBOL_GPL(pci_find_next_ext_capability);
308 
309 /**
310  * pci_find_ext_capability - Find an extended capability
311  * @dev: PCI device to query
312  * @cap: capability code
313  *
314  * Returns the address of the requested extended capability structure
315  * within the device's PCI configuration space or 0 if the device does
316  * not support it.  Possible values for @cap:
317  *
318  *  %PCI_EXT_CAP_ID_ERR		Advanced Error Reporting
319  *  %PCI_EXT_CAP_ID_VC		Virtual Channel
320  *  %PCI_EXT_CAP_ID_DSN		Device Serial Number
321  *  %PCI_EXT_CAP_ID_PWR		Power Budgeting
322  */
323 int pci_find_ext_capability(struct pci_dev *dev, int cap)
324 {
325 	return pci_find_next_ext_capability(dev, 0, cap);
326 }
327 EXPORT_SYMBOL_GPL(pci_find_ext_capability);
328 
329 static int __pci_find_next_ht_cap(struct pci_dev *dev, int pos, int ht_cap)
330 {
331 	int rc, ttl = PCI_FIND_CAP_TTL;
332 	u8 cap, mask;
333 
334 	if (ht_cap == HT_CAPTYPE_SLAVE || ht_cap == HT_CAPTYPE_HOST)
335 		mask = HT_3BIT_CAP_MASK;
336 	else
337 		mask = HT_5BIT_CAP_MASK;
338 
339 	pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn, pos,
340 				      PCI_CAP_ID_HT, &ttl);
341 	while (pos) {
342 		rc = pci_read_config_byte(dev, pos + 3, &cap);
343 		if (rc != PCIBIOS_SUCCESSFUL)
344 			return 0;
345 
346 		if ((cap & mask) == ht_cap)
347 			return pos;
348 
349 		pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn,
350 					      pos + PCI_CAP_LIST_NEXT,
351 					      PCI_CAP_ID_HT, &ttl);
352 	}
353 
354 	return 0;
355 }
356 /**
357  * pci_find_next_ht_capability - query a device's Hypertransport capabilities
358  * @dev: PCI device to query
359  * @pos: Position from which to continue searching
360  * @ht_cap: Hypertransport capability code
361  *
362  * To be used in conjunction with pci_find_ht_capability() to search for
363  * all capabilities matching @ht_cap. @pos should always be a value returned
364  * from pci_find_ht_capability().
365  *
366  * NB. To be 100% safe against broken PCI devices, the caller should take
367  * steps to avoid an infinite loop.
368  */
369 int pci_find_next_ht_capability(struct pci_dev *dev, int pos, int ht_cap)
370 {
371 	return __pci_find_next_ht_cap(dev, pos + PCI_CAP_LIST_NEXT, ht_cap);
372 }
373 EXPORT_SYMBOL_GPL(pci_find_next_ht_capability);
374 
375 /**
376  * pci_find_ht_capability - query a device's Hypertransport capabilities
377  * @dev: PCI device to query
378  * @ht_cap: Hypertransport capability code
379  *
380  * Tell if a device supports a given Hypertransport capability.
381  * Returns an address within the device's PCI configuration space
382  * or 0 in case the device does not support the request capability.
383  * The address points to the PCI capability, of type PCI_CAP_ID_HT,
384  * which has a Hypertransport capability matching @ht_cap.
385  */
386 int pci_find_ht_capability(struct pci_dev *dev, int ht_cap)
387 {
388 	int pos;
389 
390 	pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type);
391 	if (pos)
392 		pos = __pci_find_next_ht_cap(dev, pos, ht_cap);
393 
394 	return pos;
395 }
396 EXPORT_SYMBOL_GPL(pci_find_ht_capability);
397 
398 /**
399  * pci_find_parent_resource - return resource region of parent bus of given region
400  * @dev: PCI device structure contains resources to be searched
401  * @res: child resource record for which parent is sought
402  *
403  *  For given resource region of given device, return the resource
404  *  region of parent bus the given region is contained in or where
405  *  it should be allocated from.
406  */
407 struct resource *
408 pci_find_parent_resource(const struct pci_dev *dev, struct resource *res)
409 {
410 	const struct pci_bus *bus = dev->bus;
411 	int i;
412 	struct resource *best = NULL, *r;
413 
414 	pci_bus_for_each_resource(bus, r, i) {
415 		if (!r)
416 			continue;
417 		if (res->start && !(res->start >= r->start && res->end <= r->end))
418 			continue;	/* Not contained */
419 		if ((res->flags ^ r->flags) & (IORESOURCE_IO | IORESOURCE_MEM))
420 			continue;	/* Wrong type */
421 		if (!((res->flags ^ r->flags) & IORESOURCE_PREFETCH))
422 			return r;	/* Exact match */
423 		/* We can't insert a non-prefetch resource inside a prefetchable parent .. */
424 		if (r->flags & IORESOURCE_PREFETCH)
425 			continue;
426 		/* .. but we can put a prefetchable resource inside a non-prefetchable one */
427 		if (!best)
428 			best = r;
429 	}
430 	return best;
431 }
432 
433 /**
434  * pci_wait_for_pending - wait for @mask bit(s) to clear in status word @pos
435  * @dev: the PCI device to operate on
436  * @pos: config space offset of status word
437  * @mask: mask of bit(s) to care about in status word
438  *
439  * Return 1 when mask bit(s) in status word clear, 0 otherwise.
440  */
441 int pci_wait_for_pending(struct pci_dev *dev, int pos, u16 mask)
442 {
443 	int i;
444 
445 	/* Wait for Transaction Pending bit clean */
446 	for (i = 0; i < 4; i++) {
447 		u16 status;
448 		if (i)
449 			msleep((1 << (i - 1)) * 100);
450 
451 		pci_read_config_word(dev, pos, &status);
452 		if (!(status & mask))
453 			return 1;
454 	}
455 
456 	return 0;
457 }
458 
459 /**
460  * pci_restore_bars - restore a devices BAR values (e.g. after wake-up)
461  * @dev: PCI device to have its BARs restored
462  *
463  * Restore the BAR values for a given device, so as to make it
464  * accessible by its driver.
465  */
466 static void
467 pci_restore_bars(struct pci_dev *dev)
468 {
469 	int i;
470 
471 	for (i = 0; i < PCI_BRIDGE_RESOURCES; i++)
472 		pci_update_resource(dev, i);
473 }
474 
475 static struct pci_platform_pm_ops *pci_platform_pm;
476 
477 int pci_set_platform_pm(struct pci_platform_pm_ops *ops)
478 {
479 	if (!ops->is_manageable || !ops->set_state || !ops->choose_state
480 	    || !ops->sleep_wake)
481 		return -EINVAL;
482 	pci_platform_pm = ops;
483 	return 0;
484 }
485 
486 static inline bool platform_pci_power_manageable(struct pci_dev *dev)
487 {
488 	return pci_platform_pm ? pci_platform_pm->is_manageable(dev) : false;
489 }
490 
491 static inline int platform_pci_set_power_state(struct pci_dev *dev,
492                                                 pci_power_t t)
493 {
494 	return pci_platform_pm ? pci_platform_pm->set_state(dev, t) : -ENOSYS;
495 }
496 
497 static inline pci_power_t platform_pci_choose_state(struct pci_dev *dev)
498 {
499 	return pci_platform_pm ?
500 			pci_platform_pm->choose_state(dev) : PCI_POWER_ERROR;
501 }
502 
503 static inline int platform_pci_sleep_wake(struct pci_dev *dev, bool enable)
504 {
505 	return pci_platform_pm ?
506 			pci_platform_pm->sleep_wake(dev, enable) : -ENODEV;
507 }
508 
509 static inline int platform_pci_run_wake(struct pci_dev *dev, bool enable)
510 {
511 	return pci_platform_pm ?
512 			pci_platform_pm->run_wake(dev, enable) : -ENODEV;
513 }
514 
515 /**
516  * pci_raw_set_power_state - Use PCI PM registers to set the power state of
517  *                           given PCI device
518  * @dev: PCI device to handle.
519  * @state: PCI power state (D0, D1, D2, D3hot) to put the device into.
520  *
521  * RETURN VALUE:
522  * -EINVAL if the requested state is invalid.
523  * -EIO if device does not support PCI PM or its PM capabilities register has a
524  * wrong version, or device doesn't support the requested state.
525  * 0 if device already is in the requested state.
526  * 0 if device's power state has been successfully changed.
527  */
528 static int pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state)
529 {
530 	u16 pmcsr;
531 	bool need_restore = false;
532 
533 	/* Check if we're already there */
534 	if (dev->current_state == state)
535 		return 0;
536 
537 	if (!dev->pm_cap)
538 		return -EIO;
539 
540 	if (state < PCI_D0 || state > PCI_D3hot)
541 		return -EINVAL;
542 
543 	/* Validate current state:
544 	 * Can enter D0 from any state, but if we can only go deeper
545 	 * to sleep if we're already in a low power state
546 	 */
547 	if (state != PCI_D0 && dev->current_state <= PCI_D3cold
548 	    && dev->current_state > state) {
549 		dev_err(&dev->dev, "invalid power transition "
550 			"(from state %d to %d)\n", dev->current_state, state);
551 		return -EINVAL;
552 	}
553 
554 	/* check if this device supports the desired state */
555 	if ((state == PCI_D1 && !dev->d1_support)
556 	   || (state == PCI_D2 && !dev->d2_support))
557 		return -EIO;
558 
559 	pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
560 
561 	/* If we're (effectively) in D3, force entire word to 0.
562 	 * This doesn't affect PME_Status, disables PME_En, and
563 	 * sets PowerState to 0.
564 	 */
565 	switch (dev->current_state) {
566 	case PCI_D0:
567 	case PCI_D1:
568 	case PCI_D2:
569 		pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
570 		pmcsr |= state;
571 		break;
572 	case PCI_D3hot:
573 	case PCI_D3cold:
574 	case PCI_UNKNOWN: /* Boot-up */
575 		if ((pmcsr & PCI_PM_CTRL_STATE_MASK) == PCI_D3hot
576 		 && !(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET))
577 			need_restore = true;
578 		/* Fall-through: force to D0 */
579 	default:
580 		pmcsr = 0;
581 		break;
582 	}
583 
584 	/* enter specified state */
585 	pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
586 
587 	/* Mandatory power management transition delays */
588 	/* see PCI PM 1.1 5.6.1 table 18 */
589 	if (state == PCI_D3hot || dev->current_state == PCI_D3hot)
590 		pci_dev_d3_sleep(dev);
591 	else if (state == PCI_D2 || dev->current_state == PCI_D2)
592 		udelay(PCI_PM_D2_DELAY);
593 
594 	pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
595 	dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
596 	if (dev->current_state != state && printk_ratelimit())
597 		dev_info(&dev->dev, "Refused to change power state, "
598 			"currently in D%d\n", dev->current_state);
599 
600 	/*
601 	 * According to section 5.4.1 of the "PCI BUS POWER MANAGEMENT
602 	 * INTERFACE SPECIFICATION, REV. 1.2", a device transitioning
603 	 * from D3hot to D0 _may_ perform an internal reset, thereby
604 	 * going to "D0 Uninitialized" rather than "D0 Initialized".
605 	 * For example, at least some versions of the 3c905B and the
606 	 * 3c556B exhibit this behaviour.
607 	 *
608 	 * At least some laptop BIOSen (e.g. the Thinkpad T21) leave
609 	 * devices in a D3hot state at boot.  Consequently, we need to
610 	 * restore at least the BARs so that the device will be
611 	 * accessible to its driver.
612 	 */
613 	if (need_restore)
614 		pci_restore_bars(dev);
615 
616 	if (dev->bus->self)
617 		pcie_aspm_pm_state_change(dev->bus->self);
618 
619 	return 0;
620 }
621 
622 /**
623  * pci_update_current_state - Read PCI power state of given device from its
624  *                            PCI PM registers and cache it
625  * @dev: PCI device to handle.
626  * @state: State to cache in case the device doesn't have the PM capability
627  */
628 void pci_update_current_state(struct pci_dev *dev, pci_power_t state)
629 {
630 	if (dev->pm_cap) {
631 		u16 pmcsr;
632 
633 		/*
634 		 * Configuration space is not accessible for device in
635 		 * D3cold, so just keep or set D3cold for safety
636 		 */
637 		if (dev->current_state == PCI_D3cold)
638 			return;
639 		if (state == PCI_D3cold) {
640 			dev->current_state = PCI_D3cold;
641 			return;
642 		}
643 		pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
644 		dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
645 	} else {
646 		dev->current_state = state;
647 	}
648 }
649 
650 /**
651  * pci_power_up - Put the given device into D0 forcibly
652  * @dev: PCI device to power up
653  */
654 void pci_power_up(struct pci_dev *dev)
655 {
656 	if (platform_pci_power_manageable(dev))
657 		platform_pci_set_power_state(dev, PCI_D0);
658 
659 	pci_raw_set_power_state(dev, PCI_D0);
660 	pci_update_current_state(dev, PCI_D0);
661 }
662 
663 /**
664  * pci_platform_power_transition - Use platform to change device power state
665  * @dev: PCI device to handle.
666  * @state: State to put the device into.
667  */
668 static int pci_platform_power_transition(struct pci_dev *dev, pci_power_t state)
669 {
670 	int error;
671 
672 	if (platform_pci_power_manageable(dev)) {
673 		error = platform_pci_set_power_state(dev, state);
674 		if (!error)
675 			pci_update_current_state(dev, state);
676 	} else
677 		error = -ENODEV;
678 
679 	if (error && !dev->pm_cap) /* Fall back to PCI_D0 */
680 		dev->current_state = PCI_D0;
681 
682 	return error;
683 }
684 
685 /**
686  * pci_wakeup - Wake up a PCI device
687  * @pci_dev: Device to handle.
688  * @ign: ignored parameter
689  */
690 static int pci_wakeup(struct pci_dev *pci_dev, void *ign)
691 {
692 	pci_wakeup_event(pci_dev);
693 	pm_request_resume(&pci_dev->dev);
694 	return 0;
695 }
696 
697 /**
698  * pci_wakeup_bus - Walk given bus and wake up devices on it
699  * @bus: Top bus of the subtree to walk.
700  */
701 static void pci_wakeup_bus(struct pci_bus *bus)
702 {
703 	if (bus)
704 		pci_walk_bus(bus, pci_wakeup, NULL);
705 }
706 
707 /**
708  * __pci_start_power_transition - Start power transition of a PCI device
709  * @dev: PCI device to handle.
710  * @state: State to put the device into.
711  */
712 static void __pci_start_power_transition(struct pci_dev *dev, pci_power_t state)
713 {
714 	if (state == PCI_D0) {
715 		pci_platform_power_transition(dev, PCI_D0);
716 		/*
717 		 * Mandatory power management transition delays, see
718 		 * PCI Express Base Specification Revision 2.0 Section
719 		 * 6.6.1: Conventional Reset.  Do not delay for
720 		 * devices powered on/off by corresponding bridge,
721 		 * because have already delayed for the bridge.
722 		 */
723 		if (dev->runtime_d3cold) {
724 			msleep(dev->d3cold_delay);
725 			/*
726 			 * When powering on a bridge from D3cold, the
727 			 * whole hierarchy may be powered on into
728 			 * D0uninitialized state, resume them to give
729 			 * them a chance to suspend again
730 			 */
731 			pci_wakeup_bus(dev->subordinate);
732 		}
733 	}
734 }
735 
736 /**
737  * __pci_dev_set_current_state - Set current state of a PCI device
738  * @dev: Device to handle
739  * @data: pointer to state to be set
740  */
741 static int __pci_dev_set_current_state(struct pci_dev *dev, void *data)
742 {
743 	pci_power_t state = *(pci_power_t *)data;
744 
745 	dev->current_state = state;
746 	return 0;
747 }
748 
749 /**
750  * __pci_bus_set_current_state - Walk given bus and set current state of devices
751  * @bus: Top bus of the subtree to walk.
752  * @state: state to be set
753  */
754 static void __pci_bus_set_current_state(struct pci_bus *bus, pci_power_t state)
755 {
756 	if (bus)
757 		pci_walk_bus(bus, __pci_dev_set_current_state, &state);
758 }
759 
760 /**
761  * __pci_complete_power_transition - Complete power transition of a PCI device
762  * @dev: PCI device to handle.
763  * @state: State to put the device into.
764  *
765  * This function should not be called directly by device drivers.
766  */
767 int __pci_complete_power_transition(struct pci_dev *dev, pci_power_t state)
768 {
769 	int ret;
770 
771 	if (state <= PCI_D0)
772 		return -EINVAL;
773 	ret = pci_platform_power_transition(dev, state);
774 	/* Power off the bridge may power off the whole hierarchy */
775 	if (!ret && state == PCI_D3cold)
776 		__pci_bus_set_current_state(dev->subordinate, PCI_D3cold);
777 	return ret;
778 }
779 EXPORT_SYMBOL_GPL(__pci_complete_power_transition);
780 
781 /**
782  * pci_set_power_state - Set the power state of a PCI device
783  * @dev: PCI device to handle.
784  * @state: PCI power state (D0, D1, D2, D3hot) to put the device into.
785  *
786  * Transition a device to a new power state, using the platform firmware and/or
787  * the device's PCI PM registers.
788  *
789  * RETURN VALUE:
790  * -EINVAL if the requested state is invalid.
791  * -EIO if device does not support PCI PM or its PM capabilities register has a
792  * wrong version, or device doesn't support the requested state.
793  * 0 if device already is in the requested state.
794  * 0 if device's power state has been successfully changed.
795  */
796 int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
797 {
798 	int error;
799 
800 	/* bound the state we're entering */
801 	if (state > PCI_D3cold)
802 		state = PCI_D3cold;
803 	else if (state < PCI_D0)
804 		state = PCI_D0;
805 	else if ((state == PCI_D1 || state == PCI_D2) && pci_no_d1d2(dev))
806 		/*
807 		 * If the device or the parent bridge do not support PCI PM,
808 		 * ignore the request if we're doing anything other than putting
809 		 * it into D0 (which would only happen on boot).
810 		 */
811 		return 0;
812 
813 	/* Check if we're already there */
814 	if (dev->current_state == state)
815 		return 0;
816 
817 	__pci_start_power_transition(dev, state);
818 
819 	/* This device is quirked not to be put into D3, so
820 	   don't put it in D3 */
821 	if (state >= PCI_D3hot && (dev->dev_flags & PCI_DEV_FLAGS_NO_D3))
822 		return 0;
823 
824 	/*
825 	 * To put device in D3cold, we put device into D3hot in native
826 	 * way, then put device into D3cold with platform ops
827 	 */
828 	error = pci_raw_set_power_state(dev, state > PCI_D3hot ?
829 					PCI_D3hot : state);
830 
831 	if (!__pci_complete_power_transition(dev, state))
832 		error = 0;
833 	/*
834 	 * When aspm_policy is "powersave" this call ensures
835 	 * that ASPM is configured.
836 	 */
837 	if (!error && dev->bus->self)
838 		pcie_aspm_powersave_config_link(dev->bus->self);
839 
840 	return error;
841 }
842 
843 /**
844  * pci_choose_state - Choose the power state of a PCI device
845  * @dev: PCI device to be suspended
846  * @state: target sleep state for the whole system. This is the value
847  *	that is passed to suspend() function.
848  *
849  * Returns PCI power state suitable for given device and given system
850  * message.
851  */
852 
853 pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state)
854 {
855 	pci_power_t ret;
856 
857 	if (!dev->pm_cap)
858 		return PCI_D0;
859 
860 	ret = platform_pci_choose_state(dev);
861 	if (ret != PCI_POWER_ERROR)
862 		return ret;
863 
864 	switch (state.event) {
865 	case PM_EVENT_ON:
866 		return PCI_D0;
867 	case PM_EVENT_FREEZE:
868 	case PM_EVENT_PRETHAW:
869 		/* REVISIT both freeze and pre-thaw "should" use D0 */
870 	case PM_EVENT_SUSPEND:
871 	case PM_EVENT_HIBERNATE:
872 		return PCI_D3hot;
873 	default:
874 		dev_info(&dev->dev, "unrecognized suspend event %d\n",
875 			 state.event);
876 		BUG();
877 	}
878 	return PCI_D0;
879 }
880 
881 EXPORT_SYMBOL(pci_choose_state);
882 
883 #define PCI_EXP_SAVE_REGS	7
884 
885 
886 static struct pci_cap_saved_state *_pci_find_saved_cap(struct pci_dev *pci_dev,
887 						       u16 cap, bool extended)
888 {
889 	struct pci_cap_saved_state *tmp;
890 
891 	hlist_for_each_entry(tmp, &pci_dev->saved_cap_space, next) {
892 		if (tmp->cap.cap_extended == extended && tmp->cap.cap_nr == cap)
893 			return tmp;
894 	}
895 	return NULL;
896 }
897 
898 struct pci_cap_saved_state *pci_find_saved_cap(struct pci_dev *dev, char cap)
899 {
900 	return _pci_find_saved_cap(dev, cap, false);
901 }
902 
903 struct pci_cap_saved_state *pci_find_saved_ext_cap(struct pci_dev *dev, u16 cap)
904 {
905 	return _pci_find_saved_cap(dev, cap, true);
906 }
907 
908 static int pci_save_pcie_state(struct pci_dev *dev)
909 {
910 	int i = 0;
911 	struct pci_cap_saved_state *save_state;
912 	u16 *cap;
913 
914 	if (!pci_is_pcie(dev))
915 		return 0;
916 
917 	save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
918 	if (!save_state) {
919 		dev_err(&dev->dev, "buffer not found in %s\n", __func__);
920 		return -ENOMEM;
921 	}
922 
923 	cap = (u16 *)&save_state->cap.data[0];
924 	pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &cap[i++]);
925 	pcie_capability_read_word(dev, PCI_EXP_LNKCTL, &cap[i++]);
926 	pcie_capability_read_word(dev, PCI_EXP_SLTCTL, &cap[i++]);
927 	pcie_capability_read_word(dev, PCI_EXP_RTCTL,  &cap[i++]);
928 	pcie_capability_read_word(dev, PCI_EXP_DEVCTL2, &cap[i++]);
929 	pcie_capability_read_word(dev, PCI_EXP_LNKCTL2, &cap[i++]);
930 	pcie_capability_read_word(dev, PCI_EXP_SLTCTL2, &cap[i++]);
931 
932 	return 0;
933 }
934 
935 static void pci_restore_pcie_state(struct pci_dev *dev)
936 {
937 	int i = 0;
938 	struct pci_cap_saved_state *save_state;
939 	u16 *cap;
940 
941 	save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
942 	if (!save_state)
943 		return;
944 
945 	cap = (u16 *)&save_state->cap.data[0];
946 	pcie_capability_write_word(dev, PCI_EXP_DEVCTL, cap[i++]);
947 	pcie_capability_write_word(dev, PCI_EXP_LNKCTL, cap[i++]);
948 	pcie_capability_write_word(dev, PCI_EXP_SLTCTL, cap[i++]);
949 	pcie_capability_write_word(dev, PCI_EXP_RTCTL, cap[i++]);
950 	pcie_capability_write_word(dev, PCI_EXP_DEVCTL2, cap[i++]);
951 	pcie_capability_write_word(dev, PCI_EXP_LNKCTL2, cap[i++]);
952 	pcie_capability_write_word(dev, PCI_EXP_SLTCTL2, cap[i++]);
953 }
954 
955 
956 static int pci_save_pcix_state(struct pci_dev *dev)
957 {
958 	int pos;
959 	struct pci_cap_saved_state *save_state;
960 
961 	pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
962 	if (pos <= 0)
963 		return 0;
964 
965 	save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
966 	if (!save_state) {
967 		dev_err(&dev->dev, "buffer not found in %s\n", __func__);
968 		return -ENOMEM;
969 	}
970 
971 	pci_read_config_word(dev, pos + PCI_X_CMD,
972 			     (u16 *)save_state->cap.data);
973 
974 	return 0;
975 }
976 
977 static void pci_restore_pcix_state(struct pci_dev *dev)
978 {
979 	int i = 0, pos;
980 	struct pci_cap_saved_state *save_state;
981 	u16 *cap;
982 
983 	save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
984 	pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
985 	if (!save_state || pos <= 0)
986 		return;
987 	cap = (u16 *)&save_state->cap.data[0];
988 
989 	pci_write_config_word(dev, pos + PCI_X_CMD, cap[i++]);
990 }
991 
992 
993 /**
994  * pci_save_state - save the PCI configuration space of a device before suspending
995  * @dev: - PCI device that we're dealing with
996  */
997 int
998 pci_save_state(struct pci_dev *dev)
999 {
1000 	int i;
1001 	/* XXX: 100% dword access ok here? */
1002 	for (i = 0; i < 16; i++)
1003 		pci_read_config_dword(dev, i * 4, &dev->saved_config_space[i]);
1004 	dev->state_saved = true;
1005 	if ((i = pci_save_pcie_state(dev)) != 0)
1006 		return i;
1007 	if ((i = pci_save_pcix_state(dev)) != 0)
1008 		return i;
1009 	if ((i = pci_save_vc_state(dev)) != 0)
1010 		return i;
1011 	return 0;
1012 }
1013 
1014 static void pci_restore_config_dword(struct pci_dev *pdev, int offset,
1015 				     u32 saved_val, int retry)
1016 {
1017 	u32 val;
1018 
1019 	pci_read_config_dword(pdev, offset, &val);
1020 	if (val == saved_val)
1021 		return;
1022 
1023 	for (;;) {
1024 		dev_dbg(&pdev->dev, "restoring config space at offset "
1025 			"%#x (was %#x, writing %#x)\n", offset, val, saved_val);
1026 		pci_write_config_dword(pdev, offset, saved_val);
1027 		if (retry-- <= 0)
1028 			return;
1029 
1030 		pci_read_config_dword(pdev, offset, &val);
1031 		if (val == saved_val)
1032 			return;
1033 
1034 		mdelay(1);
1035 	}
1036 }
1037 
1038 static void pci_restore_config_space_range(struct pci_dev *pdev,
1039 					   int start, int end, int retry)
1040 {
1041 	int index;
1042 
1043 	for (index = end; index >= start; index--)
1044 		pci_restore_config_dword(pdev, 4 * index,
1045 					 pdev->saved_config_space[index],
1046 					 retry);
1047 }
1048 
1049 static void pci_restore_config_space(struct pci_dev *pdev)
1050 {
1051 	if (pdev->hdr_type == PCI_HEADER_TYPE_NORMAL) {
1052 		pci_restore_config_space_range(pdev, 10, 15, 0);
1053 		/* Restore BARs before the command register. */
1054 		pci_restore_config_space_range(pdev, 4, 9, 10);
1055 		pci_restore_config_space_range(pdev, 0, 3, 0);
1056 	} else {
1057 		pci_restore_config_space_range(pdev, 0, 15, 0);
1058 	}
1059 }
1060 
1061 /**
1062  * pci_restore_state - Restore the saved state of a PCI device
1063  * @dev: - PCI device that we're dealing with
1064  */
1065 void pci_restore_state(struct pci_dev *dev)
1066 {
1067 	if (!dev->state_saved)
1068 		return;
1069 
1070 	/* PCI Express register must be restored first */
1071 	pci_restore_pcie_state(dev);
1072 	pci_restore_ats_state(dev);
1073 	pci_restore_vc_state(dev);
1074 
1075 	pci_restore_config_space(dev);
1076 
1077 	pci_restore_pcix_state(dev);
1078 	pci_restore_msi_state(dev);
1079 	pci_restore_iov_state(dev);
1080 
1081 	dev->state_saved = false;
1082 }
1083 
1084 struct pci_saved_state {
1085 	u32 config_space[16];
1086 	struct pci_cap_saved_data cap[0];
1087 };
1088 
1089 /**
1090  * pci_store_saved_state - Allocate and return an opaque struct containing
1091  *			   the device saved state.
1092  * @dev: PCI device that we're dealing with
1093  *
1094  * Return NULL if no state or error.
1095  */
1096 struct pci_saved_state *pci_store_saved_state(struct pci_dev *dev)
1097 {
1098 	struct pci_saved_state *state;
1099 	struct pci_cap_saved_state *tmp;
1100 	struct pci_cap_saved_data *cap;
1101 	size_t size;
1102 
1103 	if (!dev->state_saved)
1104 		return NULL;
1105 
1106 	size = sizeof(*state) + sizeof(struct pci_cap_saved_data);
1107 
1108 	hlist_for_each_entry(tmp, &dev->saved_cap_space, next)
1109 		size += sizeof(struct pci_cap_saved_data) + tmp->cap.size;
1110 
1111 	state = kzalloc(size, GFP_KERNEL);
1112 	if (!state)
1113 		return NULL;
1114 
1115 	memcpy(state->config_space, dev->saved_config_space,
1116 	       sizeof(state->config_space));
1117 
1118 	cap = state->cap;
1119 	hlist_for_each_entry(tmp, &dev->saved_cap_space, next) {
1120 		size_t len = sizeof(struct pci_cap_saved_data) + tmp->cap.size;
1121 		memcpy(cap, &tmp->cap, len);
1122 		cap = (struct pci_cap_saved_data *)((u8 *)cap + len);
1123 	}
1124 	/* Empty cap_save terminates list */
1125 
1126 	return state;
1127 }
1128 EXPORT_SYMBOL_GPL(pci_store_saved_state);
1129 
1130 /**
1131  * pci_load_saved_state - Reload the provided save state into struct pci_dev.
1132  * @dev: PCI device that we're dealing with
1133  * @state: Saved state returned from pci_store_saved_state()
1134  */
1135 static int pci_load_saved_state(struct pci_dev *dev,
1136 				struct pci_saved_state *state)
1137 {
1138 	struct pci_cap_saved_data *cap;
1139 
1140 	dev->state_saved = false;
1141 
1142 	if (!state)
1143 		return 0;
1144 
1145 	memcpy(dev->saved_config_space, state->config_space,
1146 	       sizeof(state->config_space));
1147 
1148 	cap = state->cap;
1149 	while (cap->size) {
1150 		struct pci_cap_saved_state *tmp;
1151 
1152 		tmp = _pci_find_saved_cap(dev, cap->cap_nr, cap->cap_extended);
1153 		if (!tmp || tmp->cap.size != cap->size)
1154 			return -EINVAL;
1155 
1156 		memcpy(tmp->cap.data, cap->data, tmp->cap.size);
1157 		cap = (struct pci_cap_saved_data *)((u8 *)cap +
1158 		       sizeof(struct pci_cap_saved_data) + cap->size);
1159 	}
1160 
1161 	dev->state_saved = true;
1162 	return 0;
1163 }
1164 
1165 /**
1166  * pci_load_and_free_saved_state - Reload the save state pointed to by state,
1167  *				   and free the memory allocated for it.
1168  * @dev: PCI device that we're dealing with
1169  * @state: Pointer to saved state returned from pci_store_saved_state()
1170  */
1171 int pci_load_and_free_saved_state(struct pci_dev *dev,
1172 				  struct pci_saved_state **state)
1173 {
1174 	int ret = pci_load_saved_state(dev, *state);
1175 	kfree(*state);
1176 	*state = NULL;
1177 	return ret;
1178 }
1179 EXPORT_SYMBOL_GPL(pci_load_and_free_saved_state);
1180 
1181 static int do_pci_enable_device(struct pci_dev *dev, int bars)
1182 {
1183 	int err;
1184 	u16 cmd;
1185 	u8 pin;
1186 
1187 	err = pci_set_power_state(dev, PCI_D0);
1188 	if (err < 0 && err != -EIO)
1189 		return err;
1190 	err = pcibios_enable_device(dev, bars);
1191 	if (err < 0)
1192 		return err;
1193 	pci_fixup_device(pci_fixup_enable, dev);
1194 
1195 	if (dev->msi_enabled || dev->msix_enabled)
1196 		return 0;
1197 
1198 	pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
1199 	if (pin) {
1200 		pci_read_config_word(dev, PCI_COMMAND, &cmd);
1201 		if (cmd & PCI_COMMAND_INTX_DISABLE)
1202 			pci_write_config_word(dev, PCI_COMMAND,
1203 					      cmd & ~PCI_COMMAND_INTX_DISABLE);
1204 	}
1205 
1206 	return 0;
1207 }
1208 
1209 /**
1210  * pci_reenable_device - Resume abandoned device
1211  * @dev: PCI device to be resumed
1212  *
1213  *  Note this function is a backend of pci_default_resume and is not supposed
1214  *  to be called by normal code, write proper resume handler and use it instead.
1215  */
1216 int pci_reenable_device(struct pci_dev *dev)
1217 {
1218 	if (pci_is_enabled(dev))
1219 		return do_pci_enable_device(dev, (1 << PCI_NUM_RESOURCES) - 1);
1220 	return 0;
1221 }
1222 
1223 static void pci_enable_bridge(struct pci_dev *dev)
1224 {
1225 	struct pci_dev *bridge;
1226 	int retval;
1227 
1228 	bridge = pci_upstream_bridge(dev);
1229 	if (bridge)
1230 		pci_enable_bridge(bridge);
1231 
1232 	if (pci_is_enabled(dev)) {
1233 		if (!dev->is_busmaster)
1234 			pci_set_master(dev);
1235 		return;
1236 	}
1237 
1238 	retval = pci_enable_device(dev);
1239 	if (retval)
1240 		dev_err(&dev->dev, "Error enabling bridge (%d), continuing\n",
1241 			retval);
1242 	pci_set_master(dev);
1243 }
1244 
1245 static int pci_enable_device_flags(struct pci_dev *dev, unsigned long flags)
1246 {
1247 	struct pci_dev *bridge;
1248 	int err;
1249 	int i, bars = 0;
1250 
1251 	/*
1252 	 * Power state could be unknown at this point, either due to a fresh
1253 	 * boot or a device removal call.  So get the current power state
1254 	 * so that things like MSI message writing will behave as expected
1255 	 * (e.g. if the device really is in D0 at enable time).
1256 	 */
1257 	if (dev->pm_cap) {
1258 		u16 pmcsr;
1259 		pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1260 		dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
1261 	}
1262 
1263 	if (atomic_inc_return(&dev->enable_cnt) > 1)
1264 		return 0;		/* already enabled */
1265 
1266 	bridge = pci_upstream_bridge(dev);
1267 	if (bridge)
1268 		pci_enable_bridge(bridge);
1269 
1270 	/* only skip sriov related */
1271 	for (i = 0; i <= PCI_ROM_RESOURCE; i++)
1272 		if (dev->resource[i].flags & flags)
1273 			bars |= (1 << i);
1274 	for (i = PCI_BRIDGE_RESOURCES; i < DEVICE_COUNT_RESOURCE; i++)
1275 		if (dev->resource[i].flags & flags)
1276 			bars |= (1 << i);
1277 
1278 	err = do_pci_enable_device(dev, bars);
1279 	if (err < 0)
1280 		atomic_dec(&dev->enable_cnt);
1281 	return err;
1282 }
1283 
1284 /**
1285  * pci_enable_device_io - Initialize a device for use with IO space
1286  * @dev: PCI device to be initialized
1287  *
1288  *  Initialize device before it's used by a driver. Ask low-level code
1289  *  to enable I/O resources. Wake up the device if it was suspended.
1290  *  Beware, this function can fail.
1291  */
1292 int pci_enable_device_io(struct pci_dev *dev)
1293 {
1294 	return pci_enable_device_flags(dev, IORESOURCE_IO);
1295 }
1296 
1297 /**
1298  * pci_enable_device_mem - Initialize a device for use with Memory space
1299  * @dev: PCI device to be initialized
1300  *
1301  *  Initialize device before it's used by a driver. Ask low-level code
1302  *  to enable Memory resources. Wake up the device if it was suspended.
1303  *  Beware, this function can fail.
1304  */
1305 int pci_enable_device_mem(struct pci_dev *dev)
1306 {
1307 	return pci_enable_device_flags(dev, IORESOURCE_MEM);
1308 }
1309 
1310 /**
1311  * pci_enable_device - Initialize device before it's used by a driver.
1312  * @dev: PCI device to be initialized
1313  *
1314  *  Initialize device before it's used by a driver. Ask low-level code
1315  *  to enable I/O and memory. Wake up the device if it was suspended.
1316  *  Beware, this function can fail.
1317  *
1318  *  Note we don't actually enable the device many times if we call
1319  *  this function repeatedly (we just increment the count).
1320  */
1321 int pci_enable_device(struct pci_dev *dev)
1322 {
1323 	return pci_enable_device_flags(dev, IORESOURCE_MEM | IORESOURCE_IO);
1324 }
1325 
1326 /*
1327  * Managed PCI resources.  This manages device on/off, intx/msi/msix
1328  * on/off and BAR regions.  pci_dev itself records msi/msix status, so
1329  * there's no need to track it separately.  pci_devres is initialized
1330  * when a device is enabled using managed PCI device enable interface.
1331  */
1332 struct pci_devres {
1333 	unsigned int enabled:1;
1334 	unsigned int pinned:1;
1335 	unsigned int orig_intx:1;
1336 	unsigned int restore_intx:1;
1337 	u32 region_mask;
1338 };
1339 
1340 static void pcim_release(struct device *gendev, void *res)
1341 {
1342 	struct pci_dev *dev = container_of(gendev, struct pci_dev, dev);
1343 	struct pci_devres *this = res;
1344 	int i;
1345 
1346 	if (dev->msi_enabled)
1347 		pci_disable_msi(dev);
1348 	if (dev->msix_enabled)
1349 		pci_disable_msix(dev);
1350 
1351 	for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
1352 		if (this->region_mask & (1 << i))
1353 			pci_release_region(dev, i);
1354 
1355 	if (this->restore_intx)
1356 		pci_intx(dev, this->orig_intx);
1357 
1358 	if (this->enabled && !this->pinned)
1359 		pci_disable_device(dev);
1360 }
1361 
1362 static struct pci_devres * get_pci_dr(struct pci_dev *pdev)
1363 {
1364 	struct pci_devres *dr, *new_dr;
1365 
1366 	dr = devres_find(&pdev->dev, pcim_release, NULL, NULL);
1367 	if (dr)
1368 		return dr;
1369 
1370 	new_dr = devres_alloc(pcim_release, sizeof(*new_dr), GFP_KERNEL);
1371 	if (!new_dr)
1372 		return NULL;
1373 	return devres_get(&pdev->dev, new_dr, NULL, NULL);
1374 }
1375 
1376 static struct pci_devres * find_pci_dr(struct pci_dev *pdev)
1377 {
1378 	if (pci_is_managed(pdev))
1379 		return devres_find(&pdev->dev, pcim_release, NULL, NULL);
1380 	return NULL;
1381 }
1382 
1383 /**
1384  * pcim_enable_device - Managed pci_enable_device()
1385  * @pdev: PCI device to be initialized
1386  *
1387  * Managed pci_enable_device().
1388  */
1389 int pcim_enable_device(struct pci_dev *pdev)
1390 {
1391 	struct pci_devres *dr;
1392 	int rc;
1393 
1394 	dr = get_pci_dr(pdev);
1395 	if (unlikely(!dr))
1396 		return -ENOMEM;
1397 	if (dr->enabled)
1398 		return 0;
1399 
1400 	rc = pci_enable_device(pdev);
1401 	if (!rc) {
1402 		pdev->is_managed = 1;
1403 		dr->enabled = 1;
1404 	}
1405 	return rc;
1406 }
1407 
1408 /**
1409  * pcim_pin_device - Pin managed PCI device
1410  * @pdev: PCI device to pin
1411  *
1412  * Pin managed PCI device @pdev.  Pinned device won't be disabled on
1413  * driver detach.  @pdev must have been enabled with
1414  * pcim_enable_device().
1415  */
1416 void pcim_pin_device(struct pci_dev *pdev)
1417 {
1418 	struct pci_devres *dr;
1419 
1420 	dr = find_pci_dr(pdev);
1421 	WARN_ON(!dr || !dr->enabled);
1422 	if (dr)
1423 		dr->pinned = 1;
1424 }
1425 
1426 /*
1427  * pcibios_add_device - provide arch specific hooks when adding device dev
1428  * @dev: the PCI device being added
1429  *
1430  * Permits the platform to provide architecture specific functionality when
1431  * devices are added. This is the default implementation. Architecture
1432  * implementations can override this.
1433  */
1434 int __weak pcibios_add_device (struct pci_dev *dev)
1435 {
1436 	return 0;
1437 }
1438 
1439 /**
1440  * pcibios_release_device - provide arch specific hooks when releasing device dev
1441  * @dev: the PCI device being released
1442  *
1443  * Permits the platform to provide architecture specific functionality when
1444  * devices are released. This is the default implementation. Architecture
1445  * implementations can override this.
1446  */
1447 void __weak pcibios_release_device(struct pci_dev *dev) {}
1448 
1449 /**
1450  * pcibios_disable_device - disable arch specific PCI resources for device dev
1451  * @dev: the PCI device to disable
1452  *
1453  * Disables architecture specific PCI resources for the device. This
1454  * is the default implementation. Architecture implementations can
1455  * override this.
1456  */
1457 void __weak pcibios_disable_device (struct pci_dev *dev) {}
1458 
1459 static void do_pci_disable_device(struct pci_dev *dev)
1460 {
1461 	u16 pci_command;
1462 
1463 	pci_read_config_word(dev, PCI_COMMAND, &pci_command);
1464 	if (pci_command & PCI_COMMAND_MASTER) {
1465 		pci_command &= ~PCI_COMMAND_MASTER;
1466 		pci_write_config_word(dev, PCI_COMMAND, pci_command);
1467 	}
1468 
1469 	pcibios_disable_device(dev);
1470 }
1471 
1472 /**
1473  * pci_disable_enabled_device - Disable device without updating enable_cnt
1474  * @dev: PCI device to disable
1475  *
1476  * NOTE: This function is a backend of PCI power management routines and is
1477  * not supposed to be called drivers.
1478  */
1479 void pci_disable_enabled_device(struct pci_dev *dev)
1480 {
1481 	if (pci_is_enabled(dev))
1482 		do_pci_disable_device(dev);
1483 }
1484 
1485 /**
1486  * pci_disable_device - Disable PCI device after use
1487  * @dev: PCI device to be disabled
1488  *
1489  * Signal to the system that the PCI device is not in use by the system
1490  * anymore.  This only involves disabling PCI bus-mastering, if active.
1491  *
1492  * Note we don't actually disable the device until all callers of
1493  * pci_enable_device() have called pci_disable_device().
1494  */
1495 void
1496 pci_disable_device(struct pci_dev *dev)
1497 {
1498 	struct pci_devres *dr;
1499 
1500 	dr = find_pci_dr(dev);
1501 	if (dr)
1502 		dr->enabled = 0;
1503 
1504 	dev_WARN_ONCE(&dev->dev, atomic_read(&dev->enable_cnt) <= 0,
1505 		      "disabling already-disabled device");
1506 
1507 	if (atomic_dec_return(&dev->enable_cnt) != 0)
1508 		return;
1509 
1510 	do_pci_disable_device(dev);
1511 
1512 	dev->is_busmaster = 0;
1513 }
1514 
1515 /**
1516  * pcibios_set_pcie_reset_state - set reset state for device dev
1517  * @dev: the PCIe device reset
1518  * @state: Reset state to enter into
1519  *
1520  *
1521  * Sets the PCIe reset state for the device. This is the default
1522  * implementation. Architecture implementations can override this.
1523  */
1524 int __weak pcibios_set_pcie_reset_state(struct pci_dev *dev,
1525 					enum pcie_reset_state state)
1526 {
1527 	return -EINVAL;
1528 }
1529 
1530 /**
1531  * pci_set_pcie_reset_state - set reset state for device dev
1532  * @dev: the PCIe device reset
1533  * @state: Reset state to enter into
1534  *
1535  *
1536  * Sets the PCI reset state for the device.
1537  */
1538 int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state)
1539 {
1540 	return pcibios_set_pcie_reset_state(dev, state);
1541 }
1542 
1543 /**
1544  * pci_check_pme_status - Check if given device has generated PME.
1545  * @dev: Device to check.
1546  *
1547  * Check the PME status of the device and if set, clear it and clear PME enable
1548  * (if set).  Return 'true' if PME status and PME enable were both set or
1549  * 'false' otherwise.
1550  */
1551 bool pci_check_pme_status(struct pci_dev *dev)
1552 {
1553 	int pmcsr_pos;
1554 	u16 pmcsr;
1555 	bool ret = false;
1556 
1557 	if (!dev->pm_cap)
1558 		return false;
1559 
1560 	pmcsr_pos = dev->pm_cap + PCI_PM_CTRL;
1561 	pci_read_config_word(dev, pmcsr_pos, &pmcsr);
1562 	if (!(pmcsr & PCI_PM_CTRL_PME_STATUS))
1563 		return false;
1564 
1565 	/* Clear PME status. */
1566 	pmcsr |= PCI_PM_CTRL_PME_STATUS;
1567 	if (pmcsr & PCI_PM_CTRL_PME_ENABLE) {
1568 		/* Disable PME to avoid interrupt flood. */
1569 		pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
1570 		ret = true;
1571 	}
1572 
1573 	pci_write_config_word(dev, pmcsr_pos, pmcsr);
1574 
1575 	return ret;
1576 }
1577 
1578 /**
1579  * pci_pme_wakeup - Wake up a PCI device if its PME Status bit is set.
1580  * @dev: Device to handle.
1581  * @pme_poll_reset: Whether or not to reset the device's pme_poll flag.
1582  *
1583  * Check if @dev has generated PME and queue a resume request for it in that
1584  * case.
1585  */
1586 static int pci_pme_wakeup(struct pci_dev *dev, void *pme_poll_reset)
1587 {
1588 	if (pme_poll_reset && dev->pme_poll)
1589 		dev->pme_poll = false;
1590 
1591 	if (pci_check_pme_status(dev)) {
1592 		pci_wakeup_event(dev);
1593 		pm_request_resume(&dev->dev);
1594 	}
1595 	return 0;
1596 }
1597 
1598 /**
1599  * pci_pme_wakeup_bus - Walk given bus and wake up devices on it, if necessary.
1600  * @bus: Top bus of the subtree to walk.
1601  */
1602 void pci_pme_wakeup_bus(struct pci_bus *bus)
1603 {
1604 	if (bus)
1605 		pci_walk_bus(bus, pci_pme_wakeup, (void *)true);
1606 }
1607 
1608 
1609 /**
1610  * pci_pme_capable - check the capability of PCI device to generate PME#
1611  * @dev: PCI device to handle.
1612  * @state: PCI state from which device will issue PME#.
1613  */
1614 bool pci_pme_capable(struct pci_dev *dev, pci_power_t state)
1615 {
1616 	if (!dev->pm_cap)
1617 		return false;
1618 
1619 	return !!(dev->pme_support & (1 << state));
1620 }
1621 
1622 static void pci_pme_list_scan(struct work_struct *work)
1623 {
1624 	struct pci_pme_device *pme_dev, *n;
1625 
1626 	mutex_lock(&pci_pme_list_mutex);
1627 	if (!list_empty(&pci_pme_list)) {
1628 		list_for_each_entry_safe(pme_dev, n, &pci_pme_list, list) {
1629 			if (pme_dev->dev->pme_poll) {
1630 				struct pci_dev *bridge;
1631 
1632 				bridge = pme_dev->dev->bus->self;
1633 				/*
1634 				 * If bridge is in low power state, the
1635 				 * configuration space of subordinate devices
1636 				 * may be not accessible
1637 				 */
1638 				if (bridge && bridge->current_state != PCI_D0)
1639 					continue;
1640 				pci_pme_wakeup(pme_dev->dev, NULL);
1641 			} else {
1642 				list_del(&pme_dev->list);
1643 				kfree(pme_dev);
1644 			}
1645 		}
1646 		if (!list_empty(&pci_pme_list))
1647 			schedule_delayed_work(&pci_pme_work,
1648 					      msecs_to_jiffies(PME_TIMEOUT));
1649 	}
1650 	mutex_unlock(&pci_pme_list_mutex);
1651 }
1652 
1653 /**
1654  * pci_pme_active - enable or disable PCI device's PME# function
1655  * @dev: PCI device to handle.
1656  * @enable: 'true' to enable PME# generation; 'false' to disable it.
1657  *
1658  * The caller must verify that the device is capable of generating PME# before
1659  * calling this function with @enable equal to 'true'.
1660  */
1661 void pci_pme_active(struct pci_dev *dev, bool enable)
1662 {
1663 	u16 pmcsr;
1664 
1665 	if (!dev->pme_support)
1666 		return;
1667 
1668 	pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1669 	/* Clear PME_Status by writing 1 to it and enable PME# */
1670 	pmcsr |= PCI_PM_CTRL_PME_STATUS | PCI_PM_CTRL_PME_ENABLE;
1671 	if (!enable)
1672 		pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
1673 
1674 	pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
1675 
1676 	/*
1677 	 * PCI (as opposed to PCIe) PME requires that the device have
1678 	 * its PME# line hooked up correctly. Not all hardware vendors
1679 	 * do this, so the PME never gets delivered and the device
1680 	 * remains asleep. The easiest way around this is to
1681 	 * periodically walk the list of suspended devices and check
1682 	 * whether any have their PME flag set. The assumption is that
1683 	 * we'll wake up often enough anyway that this won't be a huge
1684 	 * hit, and the power savings from the devices will still be a
1685 	 * win.
1686 	 *
1687 	 * Although PCIe uses in-band PME message instead of PME# line
1688 	 * to report PME, PME does not work for some PCIe devices in
1689 	 * reality.  For example, there are devices that set their PME
1690 	 * status bits, but don't really bother to send a PME message;
1691 	 * there are PCI Express Root Ports that don't bother to
1692 	 * trigger interrupts when they receive PME messages from the
1693 	 * devices below.  So PME poll is used for PCIe devices too.
1694 	 */
1695 
1696 	if (dev->pme_poll) {
1697 		struct pci_pme_device *pme_dev;
1698 		if (enable) {
1699 			pme_dev = kmalloc(sizeof(struct pci_pme_device),
1700 					  GFP_KERNEL);
1701 			if (!pme_dev) {
1702 				dev_warn(&dev->dev, "can't enable PME#\n");
1703 				return;
1704 			}
1705 			pme_dev->dev = dev;
1706 			mutex_lock(&pci_pme_list_mutex);
1707 			list_add(&pme_dev->list, &pci_pme_list);
1708 			if (list_is_singular(&pci_pme_list))
1709 				schedule_delayed_work(&pci_pme_work,
1710 						      msecs_to_jiffies(PME_TIMEOUT));
1711 			mutex_unlock(&pci_pme_list_mutex);
1712 		} else {
1713 			mutex_lock(&pci_pme_list_mutex);
1714 			list_for_each_entry(pme_dev, &pci_pme_list, list) {
1715 				if (pme_dev->dev == dev) {
1716 					list_del(&pme_dev->list);
1717 					kfree(pme_dev);
1718 					break;
1719 				}
1720 			}
1721 			mutex_unlock(&pci_pme_list_mutex);
1722 		}
1723 	}
1724 
1725 	dev_dbg(&dev->dev, "PME# %s\n", enable ? "enabled" : "disabled");
1726 }
1727 
1728 /**
1729  * __pci_enable_wake - enable PCI device as wakeup event source
1730  * @dev: PCI device affected
1731  * @state: PCI state from which device will issue wakeup events
1732  * @runtime: True if the events are to be generated at run time
1733  * @enable: True to enable event generation; false to disable
1734  *
1735  * This enables the device as a wakeup event source, or disables it.
1736  * When such events involves platform-specific hooks, those hooks are
1737  * called automatically by this routine.
1738  *
1739  * Devices with legacy power management (no standard PCI PM capabilities)
1740  * always require such platform hooks.
1741  *
1742  * RETURN VALUE:
1743  * 0 is returned on success
1744  * -EINVAL is returned if device is not supposed to wake up the system
1745  * Error code depending on the platform is returned if both the platform and
1746  * the native mechanism fail to enable the generation of wake-up events
1747  */
1748 int __pci_enable_wake(struct pci_dev *dev, pci_power_t state,
1749 		      bool runtime, bool enable)
1750 {
1751 	int ret = 0;
1752 
1753 	if (enable && !runtime && !device_may_wakeup(&dev->dev))
1754 		return -EINVAL;
1755 
1756 	/* Don't do the same thing twice in a row for one device. */
1757 	if (!!enable == !!dev->wakeup_prepared)
1758 		return 0;
1759 
1760 	/*
1761 	 * According to "PCI System Architecture" 4th ed. by Tom Shanley & Don
1762 	 * Anderson we should be doing PME# wake enable followed by ACPI wake
1763 	 * enable.  To disable wake-up we call the platform first, for symmetry.
1764 	 */
1765 
1766 	if (enable) {
1767 		int error;
1768 
1769 		if (pci_pme_capable(dev, state))
1770 			pci_pme_active(dev, true);
1771 		else
1772 			ret = 1;
1773 		error = runtime ? platform_pci_run_wake(dev, true) :
1774 					platform_pci_sleep_wake(dev, true);
1775 		if (ret)
1776 			ret = error;
1777 		if (!ret)
1778 			dev->wakeup_prepared = true;
1779 	} else {
1780 		if (runtime)
1781 			platform_pci_run_wake(dev, false);
1782 		else
1783 			platform_pci_sleep_wake(dev, false);
1784 		pci_pme_active(dev, false);
1785 		dev->wakeup_prepared = false;
1786 	}
1787 
1788 	return ret;
1789 }
1790 EXPORT_SYMBOL(__pci_enable_wake);
1791 
1792 /**
1793  * pci_wake_from_d3 - enable/disable device to wake up from D3_hot or D3_cold
1794  * @dev: PCI device to prepare
1795  * @enable: True to enable wake-up event generation; false to disable
1796  *
1797  * Many drivers want the device to wake up the system from D3_hot or D3_cold
1798  * and this function allows them to set that up cleanly - pci_enable_wake()
1799  * should not be called twice in a row to enable wake-up due to PCI PM vs ACPI
1800  * ordering constraints.
1801  *
1802  * This function only returns error code if the device is not capable of
1803  * generating PME# from both D3_hot and D3_cold, and the platform is unable to
1804  * enable wake-up power for it.
1805  */
1806 int pci_wake_from_d3(struct pci_dev *dev, bool enable)
1807 {
1808 	return pci_pme_capable(dev, PCI_D3cold) ?
1809 			pci_enable_wake(dev, PCI_D3cold, enable) :
1810 			pci_enable_wake(dev, PCI_D3hot, enable);
1811 }
1812 
1813 /**
1814  * pci_target_state - find an appropriate low power state for a given PCI dev
1815  * @dev: PCI device
1816  *
1817  * Use underlying platform code to find a supported low power state for @dev.
1818  * If the platform can't manage @dev, return the deepest state from which it
1819  * can generate wake events, based on any available PME info.
1820  */
1821 static pci_power_t pci_target_state(struct pci_dev *dev)
1822 {
1823 	pci_power_t target_state = PCI_D3hot;
1824 
1825 	if (platform_pci_power_manageable(dev)) {
1826 		/*
1827 		 * Call the platform to choose the target state of the device
1828 		 * and enable wake-up from this state if supported.
1829 		 */
1830 		pci_power_t state = platform_pci_choose_state(dev);
1831 
1832 		switch (state) {
1833 		case PCI_POWER_ERROR:
1834 		case PCI_UNKNOWN:
1835 			break;
1836 		case PCI_D1:
1837 		case PCI_D2:
1838 			if (pci_no_d1d2(dev))
1839 				break;
1840 		default:
1841 			target_state = state;
1842 		}
1843 	} else if (!dev->pm_cap) {
1844 		target_state = PCI_D0;
1845 	} else if (device_may_wakeup(&dev->dev)) {
1846 		/*
1847 		 * Find the deepest state from which the device can generate
1848 		 * wake-up events, make it the target state and enable device
1849 		 * to generate PME#.
1850 		 */
1851 		if (dev->pme_support) {
1852 			while (target_state
1853 			      && !(dev->pme_support & (1 << target_state)))
1854 				target_state--;
1855 		}
1856 	}
1857 
1858 	return target_state;
1859 }
1860 
1861 /**
1862  * pci_prepare_to_sleep - prepare PCI device for system-wide transition into a sleep state
1863  * @dev: Device to handle.
1864  *
1865  * Choose the power state appropriate for the device depending on whether
1866  * it can wake up the system and/or is power manageable by the platform
1867  * (PCI_D3hot is the default) and put the device into that state.
1868  */
1869 int pci_prepare_to_sleep(struct pci_dev *dev)
1870 {
1871 	pci_power_t target_state = pci_target_state(dev);
1872 	int error;
1873 
1874 	if (target_state == PCI_POWER_ERROR)
1875 		return -EIO;
1876 
1877 	/* D3cold during system suspend/hibernate is not supported */
1878 	if (target_state > PCI_D3hot)
1879 		target_state = PCI_D3hot;
1880 
1881 	pci_enable_wake(dev, target_state, device_may_wakeup(&dev->dev));
1882 
1883 	error = pci_set_power_state(dev, target_state);
1884 
1885 	if (error)
1886 		pci_enable_wake(dev, target_state, false);
1887 
1888 	return error;
1889 }
1890 
1891 /**
1892  * pci_back_from_sleep - turn PCI device on during system-wide transition into working state
1893  * @dev: Device to handle.
1894  *
1895  * Disable device's system wake-up capability and put it into D0.
1896  */
1897 int pci_back_from_sleep(struct pci_dev *dev)
1898 {
1899 	pci_enable_wake(dev, PCI_D0, false);
1900 	return pci_set_power_state(dev, PCI_D0);
1901 }
1902 
1903 /**
1904  * pci_finish_runtime_suspend - Carry out PCI-specific part of runtime suspend.
1905  * @dev: PCI device being suspended.
1906  *
1907  * Prepare @dev to generate wake-up events at run time and put it into a low
1908  * power state.
1909  */
1910 int pci_finish_runtime_suspend(struct pci_dev *dev)
1911 {
1912 	pci_power_t target_state = pci_target_state(dev);
1913 	int error;
1914 
1915 	if (target_state == PCI_POWER_ERROR)
1916 		return -EIO;
1917 
1918 	dev->runtime_d3cold = target_state == PCI_D3cold;
1919 
1920 	__pci_enable_wake(dev, target_state, true, pci_dev_run_wake(dev));
1921 
1922 	error = pci_set_power_state(dev, target_state);
1923 
1924 	if (error) {
1925 		__pci_enable_wake(dev, target_state, true, false);
1926 		dev->runtime_d3cold = false;
1927 	}
1928 
1929 	return error;
1930 }
1931 
1932 /**
1933  * pci_dev_run_wake - Check if device can generate run-time wake-up events.
1934  * @dev: Device to check.
1935  *
1936  * Return true if the device itself is capable of generating wake-up events
1937  * (through the platform or using the native PCIe PME) or if the device supports
1938  * PME and one of its upstream bridges can generate wake-up events.
1939  */
1940 bool pci_dev_run_wake(struct pci_dev *dev)
1941 {
1942 	struct pci_bus *bus = dev->bus;
1943 
1944 	if (device_run_wake(&dev->dev))
1945 		return true;
1946 
1947 	if (!dev->pme_support)
1948 		return false;
1949 
1950 	while (bus->parent) {
1951 		struct pci_dev *bridge = bus->self;
1952 
1953 		if (device_run_wake(&bridge->dev))
1954 			return true;
1955 
1956 		bus = bus->parent;
1957 	}
1958 
1959 	/* We have reached the root bus. */
1960 	if (bus->bridge)
1961 		return device_run_wake(bus->bridge);
1962 
1963 	return false;
1964 }
1965 EXPORT_SYMBOL_GPL(pci_dev_run_wake);
1966 
1967 void pci_config_pm_runtime_get(struct pci_dev *pdev)
1968 {
1969 	struct device *dev = &pdev->dev;
1970 	struct device *parent = dev->parent;
1971 
1972 	if (parent)
1973 		pm_runtime_get_sync(parent);
1974 	pm_runtime_get_noresume(dev);
1975 	/*
1976 	 * pdev->current_state is set to PCI_D3cold during suspending,
1977 	 * so wait until suspending completes
1978 	 */
1979 	pm_runtime_barrier(dev);
1980 	/*
1981 	 * Only need to resume devices in D3cold, because config
1982 	 * registers are still accessible for devices suspended but
1983 	 * not in D3cold.
1984 	 */
1985 	if (pdev->current_state == PCI_D3cold)
1986 		pm_runtime_resume(dev);
1987 }
1988 
1989 void pci_config_pm_runtime_put(struct pci_dev *pdev)
1990 {
1991 	struct device *dev = &pdev->dev;
1992 	struct device *parent = dev->parent;
1993 
1994 	pm_runtime_put(dev);
1995 	if (parent)
1996 		pm_runtime_put_sync(parent);
1997 }
1998 
1999 /**
2000  * pci_pm_init - Initialize PM functions of given PCI device
2001  * @dev: PCI device to handle.
2002  */
2003 void pci_pm_init(struct pci_dev *dev)
2004 {
2005 	int pm;
2006 	u16 pmc;
2007 
2008 	pm_runtime_forbid(&dev->dev);
2009 	pm_runtime_set_active(&dev->dev);
2010 	pm_runtime_enable(&dev->dev);
2011 	device_enable_async_suspend(&dev->dev);
2012 	dev->wakeup_prepared = false;
2013 
2014 	dev->pm_cap = 0;
2015 	dev->pme_support = 0;
2016 
2017 	/* find PCI PM capability in list */
2018 	pm = pci_find_capability(dev, PCI_CAP_ID_PM);
2019 	if (!pm)
2020 		return;
2021 	/* Check device's ability to generate PME# */
2022 	pci_read_config_word(dev, pm + PCI_PM_PMC, &pmc);
2023 
2024 	if ((pmc & PCI_PM_CAP_VER_MASK) > 3) {
2025 		dev_err(&dev->dev, "unsupported PM cap regs version (%u)\n",
2026 			pmc & PCI_PM_CAP_VER_MASK);
2027 		return;
2028 	}
2029 
2030 	dev->pm_cap = pm;
2031 	dev->d3_delay = PCI_PM_D3_WAIT;
2032 	dev->d3cold_delay = PCI_PM_D3COLD_WAIT;
2033 	dev->d3cold_allowed = true;
2034 
2035 	dev->d1_support = false;
2036 	dev->d2_support = false;
2037 	if (!pci_no_d1d2(dev)) {
2038 		if (pmc & PCI_PM_CAP_D1)
2039 			dev->d1_support = true;
2040 		if (pmc & PCI_PM_CAP_D2)
2041 			dev->d2_support = true;
2042 
2043 		if (dev->d1_support || dev->d2_support)
2044 			dev_printk(KERN_DEBUG, &dev->dev, "supports%s%s\n",
2045 				   dev->d1_support ? " D1" : "",
2046 				   dev->d2_support ? " D2" : "");
2047 	}
2048 
2049 	pmc &= PCI_PM_CAP_PME_MASK;
2050 	if (pmc) {
2051 		dev_printk(KERN_DEBUG, &dev->dev,
2052 			 "PME# supported from%s%s%s%s%s\n",
2053 			 (pmc & PCI_PM_CAP_PME_D0) ? " D0" : "",
2054 			 (pmc & PCI_PM_CAP_PME_D1) ? " D1" : "",
2055 			 (pmc & PCI_PM_CAP_PME_D2) ? " D2" : "",
2056 			 (pmc & PCI_PM_CAP_PME_D3) ? " D3hot" : "",
2057 			 (pmc & PCI_PM_CAP_PME_D3cold) ? " D3cold" : "");
2058 		dev->pme_support = pmc >> PCI_PM_CAP_PME_SHIFT;
2059 		dev->pme_poll = true;
2060 		/*
2061 		 * Make device's PM flags reflect the wake-up capability, but
2062 		 * let the user space enable it to wake up the system as needed.
2063 		 */
2064 		device_set_wakeup_capable(&dev->dev, true);
2065 		/* Disable the PME# generation functionality */
2066 		pci_pme_active(dev, false);
2067 	}
2068 }
2069 
2070 static void pci_add_saved_cap(struct pci_dev *pci_dev,
2071 	struct pci_cap_saved_state *new_cap)
2072 {
2073 	hlist_add_head(&new_cap->next, &pci_dev->saved_cap_space);
2074 }
2075 
2076 /**
2077  * _pci_add_cap_save_buffer - allocate buffer for saving given
2078  *                            capability registers
2079  * @dev: the PCI device
2080  * @cap: the capability to allocate the buffer for
2081  * @extended: Standard or Extended capability ID
2082  * @size: requested size of the buffer
2083  */
2084 static int _pci_add_cap_save_buffer(struct pci_dev *dev, u16 cap,
2085 				    bool extended, unsigned int size)
2086 {
2087 	int pos;
2088 	struct pci_cap_saved_state *save_state;
2089 
2090 	if (extended)
2091 		pos = pci_find_ext_capability(dev, cap);
2092 	else
2093 		pos = pci_find_capability(dev, cap);
2094 
2095 	if (pos <= 0)
2096 		return 0;
2097 
2098 	save_state = kzalloc(sizeof(*save_state) + size, GFP_KERNEL);
2099 	if (!save_state)
2100 		return -ENOMEM;
2101 
2102 	save_state->cap.cap_nr = cap;
2103 	save_state->cap.cap_extended = extended;
2104 	save_state->cap.size = size;
2105 	pci_add_saved_cap(dev, save_state);
2106 
2107 	return 0;
2108 }
2109 
2110 int pci_add_cap_save_buffer(struct pci_dev *dev, char cap, unsigned int size)
2111 {
2112 	return _pci_add_cap_save_buffer(dev, cap, false, size);
2113 }
2114 
2115 int pci_add_ext_cap_save_buffer(struct pci_dev *dev, u16 cap, unsigned int size)
2116 {
2117 	return _pci_add_cap_save_buffer(dev, cap, true, size);
2118 }
2119 
2120 /**
2121  * pci_allocate_cap_save_buffers - allocate buffers for saving capabilities
2122  * @dev: the PCI device
2123  */
2124 void pci_allocate_cap_save_buffers(struct pci_dev *dev)
2125 {
2126 	int error;
2127 
2128 	error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_EXP,
2129 					PCI_EXP_SAVE_REGS * sizeof(u16));
2130 	if (error)
2131 		dev_err(&dev->dev,
2132 			"unable to preallocate PCI Express save buffer\n");
2133 
2134 	error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_PCIX, sizeof(u16));
2135 	if (error)
2136 		dev_err(&dev->dev,
2137 			"unable to preallocate PCI-X save buffer\n");
2138 
2139 	pci_allocate_vc_save_buffers(dev);
2140 }
2141 
2142 void pci_free_cap_save_buffers(struct pci_dev *dev)
2143 {
2144 	struct pci_cap_saved_state *tmp;
2145 	struct hlist_node *n;
2146 
2147 	hlist_for_each_entry_safe(tmp, n, &dev->saved_cap_space, next)
2148 		kfree(tmp);
2149 }
2150 
2151 /**
2152  * pci_configure_ari - enable or disable ARI forwarding
2153  * @dev: the PCI device
2154  *
2155  * If @dev and its upstream bridge both support ARI, enable ARI in the
2156  * bridge.  Otherwise, disable ARI in the bridge.
2157  */
2158 void pci_configure_ari(struct pci_dev *dev)
2159 {
2160 	u32 cap;
2161 	struct pci_dev *bridge;
2162 
2163 	if (pcie_ari_disabled || !pci_is_pcie(dev) || dev->devfn)
2164 		return;
2165 
2166 	bridge = dev->bus->self;
2167 	if (!bridge)
2168 		return;
2169 
2170 	pcie_capability_read_dword(bridge, PCI_EXP_DEVCAP2, &cap);
2171 	if (!(cap & PCI_EXP_DEVCAP2_ARI))
2172 		return;
2173 
2174 	if (pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI)) {
2175 		pcie_capability_set_word(bridge, PCI_EXP_DEVCTL2,
2176 					 PCI_EXP_DEVCTL2_ARI);
2177 		bridge->ari_enabled = 1;
2178 	} else {
2179 		pcie_capability_clear_word(bridge, PCI_EXP_DEVCTL2,
2180 					   PCI_EXP_DEVCTL2_ARI);
2181 		bridge->ari_enabled = 0;
2182 	}
2183 }
2184 
2185 static int pci_acs_enable;
2186 
2187 /**
2188  * pci_request_acs - ask for ACS to be enabled if supported
2189  */
2190 void pci_request_acs(void)
2191 {
2192 	pci_acs_enable = 1;
2193 }
2194 
2195 /**
2196  * pci_enable_acs - enable ACS if hardware support it
2197  * @dev: the PCI device
2198  */
2199 void pci_enable_acs(struct pci_dev *dev)
2200 {
2201 	int pos;
2202 	u16 cap;
2203 	u16 ctrl;
2204 
2205 	if (!pci_acs_enable)
2206 		return;
2207 
2208 	pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS);
2209 	if (!pos)
2210 		return;
2211 
2212 	pci_read_config_word(dev, pos + PCI_ACS_CAP, &cap);
2213 	pci_read_config_word(dev, pos + PCI_ACS_CTRL, &ctrl);
2214 
2215 	/* Source Validation */
2216 	ctrl |= (cap & PCI_ACS_SV);
2217 
2218 	/* P2P Request Redirect */
2219 	ctrl |= (cap & PCI_ACS_RR);
2220 
2221 	/* P2P Completion Redirect */
2222 	ctrl |= (cap & PCI_ACS_CR);
2223 
2224 	/* Upstream Forwarding */
2225 	ctrl |= (cap & PCI_ACS_UF);
2226 
2227 	pci_write_config_word(dev, pos + PCI_ACS_CTRL, ctrl);
2228 }
2229 
2230 static bool pci_acs_flags_enabled(struct pci_dev *pdev, u16 acs_flags)
2231 {
2232 	int pos;
2233 	u16 cap, ctrl;
2234 
2235 	pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ACS);
2236 	if (!pos)
2237 		return false;
2238 
2239 	/*
2240 	 * Except for egress control, capabilities are either required
2241 	 * or only required if controllable.  Features missing from the
2242 	 * capability field can therefore be assumed as hard-wired enabled.
2243 	 */
2244 	pci_read_config_word(pdev, pos + PCI_ACS_CAP, &cap);
2245 	acs_flags &= (cap | PCI_ACS_EC);
2246 
2247 	pci_read_config_word(pdev, pos + PCI_ACS_CTRL, &ctrl);
2248 	return (ctrl & acs_flags) == acs_flags;
2249 }
2250 
2251 /**
2252  * pci_acs_enabled - test ACS against required flags for a given device
2253  * @pdev: device to test
2254  * @acs_flags: required PCI ACS flags
2255  *
2256  * Return true if the device supports the provided flags.  Automatically
2257  * filters out flags that are not implemented on multifunction devices.
2258  *
2259  * Note that this interface checks the effective ACS capabilities of the
2260  * device rather than the actual capabilities.  For instance, most single
2261  * function endpoints are not required to support ACS because they have no
2262  * opportunity for peer-to-peer access.  We therefore return 'true'
2263  * regardless of whether the device exposes an ACS capability.  This makes
2264  * it much easier for callers of this function to ignore the actual type
2265  * or topology of the device when testing ACS support.
2266  */
2267 bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags)
2268 {
2269 	int ret;
2270 
2271 	ret = pci_dev_specific_acs_enabled(pdev, acs_flags);
2272 	if (ret >= 0)
2273 		return ret > 0;
2274 
2275 	/*
2276 	 * Conventional PCI and PCI-X devices never support ACS, either
2277 	 * effectively or actually.  The shared bus topology implies that
2278 	 * any device on the bus can receive or snoop DMA.
2279 	 */
2280 	if (!pci_is_pcie(pdev))
2281 		return false;
2282 
2283 	switch (pci_pcie_type(pdev)) {
2284 	/*
2285 	 * PCI/X-to-PCIe bridges are not specifically mentioned by the spec,
2286 	 * but since their primary interface is PCI/X, we conservatively
2287 	 * handle them as we would a non-PCIe device.
2288 	 */
2289 	case PCI_EXP_TYPE_PCIE_BRIDGE:
2290 	/*
2291 	 * PCIe 3.0, 6.12.1 excludes ACS on these devices.  "ACS is never
2292 	 * applicable... must never implement an ACS Extended Capability...".
2293 	 * This seems arbitrary, but we take a conservative interpretation
2294 	 * of this statement.
2295 	 */
2296 	case PCI_EXP_TYPE_PCI_BRIDGE:
2297 	case PCI_EXP_TYPE_RC_EC:
2298 		return false;
2299 	/*
2300 	 * PCIe 3.0, 6.12.1.1 specifies that downstream and root ports should
2301 	 * implement ACS in order to indicate their peer-to-peer capabilities,
2302 	 * regardless of whether they are single- or multi-function devices.
2303 	 */
2304 	case PCI_EXP_TYPE_DOWNSTREAM:
2305 	case PCI_EXP_TYPE_ROOT_PORT:
2306 		return pci_acs_flags_enabled(pdev, acs_flags);
2307 	/*
2308 	 * PCIe 3.0, 6.12.1.2 specifies ACS capabilities that should be
2309 	 * implemented by the remaining PCIe types to indicate peer-to-peer
2310 	 * capabilities, but only when they are part of a multifunction
2311 	 * device.  The footnote for section 6.12 indicates the specific
2312 	 * PCIe types included here.
2313 	 */
2314 	case PCI_EXP_TYPE_ENDPOINT:
2315 	case PCI_EXP_TYPE_UPSTREAM:
2316 	case PCI_EXP_TYPE_LEG_END:
2317 	case PCI_EXP_TYPE_RC_END:
2318 		if (!pdev->multifunction)
2319 			break;
2320 
2321 		return pci_acs_flags_enabled(pdev, acs_flags);
2322 	}
2323 
2324 	/*
2325 	 * PCIe 3.0, 6.12.1.3 specifies no ACS capabilities are applicable
2326 	 * to single function devices with the exception of downstream ports.
2327 	 */
2328 	return true;
2329 }
2330 
2331 /**
2332  * pci_acs_path_enable - test ACS flags from start to end in a hierarchy
2333  * @start: starting downstream device
2334  * @end: ending upstream device or NULL to search to the root bus
2335  * @acs_flags: required flags
2336  *
2337  * Walk up a device tree from start to end testing PCI ACS support.  If
2338  * any step along the way does not support the required flags, return false.
2339  */
2340 bool pci_acs_path_enabled(struct pci_dev *start,
2341 			  struct pci_dev *end, u16 acs_flags)
2342 {
2343 	struct pci_dev *pdev, *parent = start;
2344 
2345 	do {
2346 		pdev = parent;
2347 
2348 		if (!pci_acs_enabled(pdev, acs_flags))
2349 			return false;
2350 
2351 		if (pci_is_root_bus(pdev->bus))
2352 			return (end == NULL);
2353 
2354 		parent = pdev->bus->self;
2355 	} while (pdev != end);
2356 
2357 	return true;
2358 }
2359 
2360 /**
2361  * pci_swizzle_interrupt_pin - swizzle INTx for device behind bridge
2362  * @dev: the PCI device
2363  * @pin: the INTx pin (1=INTA, 2=INTB, 3=INTC, 4=INTD)
2364  *
2365  * Perform INTx swizzling for a device behind one level of bridge.  This is
2366  * required by section 9.1 of the PCI-to-PCI bridge specification for devices
2367  * behind bridges on add-in cards.  For devices with ARI enabled, the slot
2368  * number is always 0 (see the Implementation Note in section 2.2.8.1 of
2369  * the PCI Express Base Specification, Revision 2.1)
2370  */
2371 u8 pci_swizzle_interrupt_pin(const struct pci_dev *dev, u8 pin)
2372 {
2373 	int slot;
2374 
2375 	if (pci_ari_enabled(dev->bus))
2376 		slot = 0;
2377 	else
2378 		slot = PCI_SLOT(dev->devfn);
2379 
2380 	return (((pin - 1) + slot) % 4) + 1;
2381 }
2382 
2383 int
2384 pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge)
2385 {
2386 	u8 pin;
2387 
2388 	pin = dev->pin;
2389 	if (!pin)
2390 		return -1;
2391 
2392 	while (!pci_is_root_bus(dev->bus)) {
2393 		pin = pci_swizzle_interrupt_pin(dev, pin);
2394 		dev = dev->bus->self;
2395 	}
2396 	*bridge = dev;
2397 	return pin;
2398 }
2399 
2400 /**
2401  * pci_common_swizzle - swizzle INTx all the way to root bridge
2402  * @dev: the PCI device
2403  * @pinp: pointer to the INTx pin value (1=INTA, 2=INTB, 3=INTD, 4=INTD)
2404  *
2405  * Perform INTx swizzling for a device.  This traverses through all PCI-to-PCI
2406  * bridges all the way up to a PCI root bus.
2407  */
2408 u8 pci_common_swizzle(struct pci_dev *dev, u8 *pinp)
2409 {
2410 	u8 pin = *pinp;
2411 
2412 	while (!pci_is_root_bus(dev->bus)) {
2413 		pin = pci_swizzle_interrupt_pin(dev, pin);
2414 		dev = dev->bus->self;
2415 	}
2416 	*pinp = pin;
2417 	return PCI_SLOT(dev->devfn);
2418 }
2419 
2420 /**
2421  *	pci_release_region - Release a PCI bar
2422  *	@pdev: PCI device whose resources were previously reserved by pci_request_region
2423  *	@bar: BAR to release
2424  *
2425  *	Releases the PCI I/O and memory resources previously reserved by a
2426  *	successful call to pci_request_region.  Call this function only
2427  *	after all use of the PCI regions has ceased.
2428  */
2429 void pci_release_region(struct pci_dev *pdev, int bar)
2430 {
2431 	struct pci_devres *dr;
2432 
2433 	if (pci_resource_len(pdev, bar) == 0)
2434 		return;
2435 	if (pci_resource_flags(pdev, bar) & IORESOURCE_IO)
2436 		release_region(pci_resource_start(pdev, bar),
2437 				pci_resource_len(pdev, bar));
2438 	else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM)
2439 		release_mem_region(pci_resource_start(pdev, bar),
2440 				pci_resource_len(pdev, bar));
2441 
2442 	dr = find_pci_dr(pdev);
2443 	if (dr)
2444 		dr->region_mask &= ~(1 << bar);
2445 }
2446 
2447 /**
2448  *	__pci_request_region - Reserved PCI I/O and memory resource
2449  *	@pdev: PCI device whose resources are to be reserved
2450  *	@bar: BAR to be reserved
2451  *	@res_name: Name to be associated with resource.
2452  *	@exclusive: whether the region access is exclusive or not
2453  *
2454  *	Mark the PCI region associated with PCI device @pdev BR @bar as
2455  *	being reserved by owner @res_name.  Do not access any
2456  *	address inside the PCI regions unless this call returns
2457  *	successfully.
2458  *
2459  *	If @exclusive is set, then the region is marked so that userspace
2460  *	is explicitly not allowed to map the resource via /dev/mem or
2461  *	sysfs MMIO access.
2462  *
2463  *	Returns 0 on success, or %EBUSY on error.  A warning
2464  *	message is also printed on failure.
2465  */
2466 static int __pci_request_region(struct pci_dev *pdev, int bar, const char *res_name,
2467 									int exclusive)
2468 {
2469 	struct pci_devres *dr;
2470 
2471 	if (pci_resource_len(pdev, bar) == 0)
2472 		return 0;
2473 
2474 	if (pci_resource_flags(pdev, bar) & IORESOURCE_IO) {
2475 		if (!request_region(pci_resource_start(pdev, bar),
2476 			    pci_resource_len(pdev, bar), res_name))
2477 			goto err_out;
2478 	}
2479 	else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) {
2480 		if (!__request_mem_region(pci_resource_start(pdev, bar),
2481 					pci_resource_len(pdev, bar), res_name,
2482 					exclusive))
2483 			goto err_out;
2484 	}
2485 
2486 	dr = find_pci_dr(pdev);
2487 	if (dr)
2488 		dr->region_mask |= 1 << bar;
2489 
2490 	return 0;
2491 
2492 err_out:
2493 	dev_warn(&pdev->dev, "BAR %d: can't reserve %pR\n", bar,
2494 		 &pdev->resource[bar]);
2495 	return -EBUSY;
2496 }
2497 
2498 /**
2499  *	pci_request_region - Reserve PCI I/O and memory resource
2500  *	@pdev: PCI device whose resources are to be reserved
2501  *	@bar: BAR to be reserved
2502  *	@res_name: Name to be associated with resource
2503  *
2504  *	Mark the PCI region associated with PCI device @pdev BAR @bar as
2505  *	being reserved by owner @res_name.  Do not access any
2506  *	address inside the PCI regions unless this call returns
2507  *	successfully.
2508  *
2509  *	Returns 0 on success, or %EBUSY on error.  A warning
2510  *	message is also printed on failure.
2511  */
2512 int pci_request_region(struct pci_dev *pdev, int bar, const char *res_name)
2513 {
2514 	return __pci_request_region(pdev, bar, res_name, 0);
2515 }
2516 
2517 /**
2518  *	pci_request_region_exclusive - Reserved PCI I/O and memory resource
2519  *	@pdev: PCI device whose resources are to be reserved
2520  *	@bar: BAR to be reserved
2521  *	@res_name: Name to be associated with resource.
2522  *
2523  *	Mark the PCI region associated with PCI device @pdev BR @bar as
2524  *	being reserved by owner @res_name.  Do not access any
2525  *	address inside the PCI regions unless this call returns
2526  *	successfully.
2527  *
2528  *	Returns 0 on success, or %EBUSY on error.  A warning
2529  *	message is also printed on failure.
2530  *
2531  *	The key difference that _exclusive makes it that userspace is
2532  *	explicitly not allowed to map the resource via /dev/mem or
2533  *	sysfs.
2534  */
2535 int pci_request_region_exclusive(struct pci_dev *pdev, int bar, const char *res_name)
2536 {
2537 	return __pci_request_region(pdev, bar, res_name, IORESOURCE_EXCLUSIVE);
2538 }
2539 /**
2540  * pci_release_selected_regions - Release selected PCI I/O and memory resources
2541  * @pdev: PCI device whose resources were previously reserved
2542  * @bars: Bitmask of BARs to be released
2543  *
2544  * Release selected PCI I/O and memory resources previously reserved.
2545  * Call this function only after all use of the PCI regions has ceased.
2546  */
2547 void pci_release_selected_regions(struct pci_dev *pdev, int bars)
2548 {
2549 	int i;
2550 
2551 	for (i = 0; i < 6; i++)
2552 		if (bars & (1 << i))
2553 			pci_release_region(pdev, i);
2554 }
2555 
2556 static int __pci_request_selected_regions(struct pci_dev *pdev, int bars,
2557 				 const char *res_name, int excl)
2558 {
2559 	int i;
2560 
2561 	for (i = 0; i < 6; i++)
2562 		if (bars & (1 << i))
2563 			if (__pci_request_region(pdev, i, res_name, excl))
2564 				goto err_out;
2565 	return 0;
2566 
2567 err_out:
2568 	while(--i >= 0)
2569 		if (bars & (1 << i))
2570 			pci_release_region(pdev, i);
2571 
2572 	return -EBUSY;
2573 }
2574 
2575 
2576 /**
2577  * pci_request_selected_regions - Reserve selected PCI I/O and memory resources
2578  * @pdev: PCI device whose resources are to be reserved
2579  * @bars: Bitmask of BARs to be requested
2580  * @res_name: Name to be associated with resource
2581  */
2582 int pci_request_selected_regions(struct pci_dev *pdev, int bars,
2583 				 const char *res_name)
2584 {
2585 	return __pci_request_selected_regions(pdev, bars, res_name, 0);
2586 }
2587 
2588 int pci_request_selected_regions_exclusive(struct pci_dev *pdev,
2589 				 int bars, const char *res_name)
2590 {
2591 	return __pci_request_selected_regions(pdev, bars, res_name,
2592 			IORESOURCE_EXCLUSIVE);
2593 }
2594 
2595 /**
2596  *	pci_release_regions - Release reserved PCI I/O and memory resources
2597  *	@pdev: PCI device whose resources were previously reserved by pci_request_regions
2598  *
2599  *	Releases all PCI I/O and memory resources previously reserved by a
2600  *	successful call to pci_request_regions.  Call this function only
2601  *	after all use of the PCI regions has ceased.
2602  */
2603 
2604 void pci_release_regions(struct pci_dev *pdev)
2605 {
2606 	pci_release_selected_regions(pdev, (1 << 6) - 1);
2607 }
2608 
2609 /**
2610  *	pci_request_regions - Reserved PCI I/O and memory resources
2611  *	@pdev: PCI device whose resources are to be reserved
2612  *	@res_name: Name to be associated with resource.
2613  *
2614  *	Mark all PCI regions associated with PCI device @pdev as
2615  *	being reserved by owner @res_name.  Do not access any
2616  *	address inside the PCI regions unless this call returns
2617  *	successfully.
2618  *
2619  *	Returns 0 on success, or %EBUSY on error.  A warning
2620  *	message is also printed on failure.
2621  */
2622 int pci_request_regions(struct pci_dev *pdev, const char *res_name)
2623 {
2624 	return pci_request_selected_regions(pdev, ((1 << 6) - 1), res_name);
2625 }
2626 
2627 /**
2628  *	pci_request_regions_exclusive - Reserved PCI I/O and memory resources
2629  *	@pdev: PCI device whose resources are to be reserved
2630  *	@res_name: Name to be associated with resource.
2631  *
2632  *	Mark all PCI regions associated with PCI device @pdev as
2633  *	being reserved by owner @res_name.  Do not access any
2634  *	address inside the PCI regions unless this call returns
2635  *	successfully.
2636  *
2637  *	pci_request_regions_exclusive() will mark the region so that
2638  *	/dev/mem and the sysfs MMIO access will not be allowed.
2639  *
2640  *	Returns 0 on success, or %EBUSY on error.  A warning
2641  *	message is also printed on failure.
2642  */
2643 int pci_request_regions_exclusive(struct pci_dev *pdev, const char *res_name)
2644 {
2645 	return pci_request_selected_regions_exclusive(pdev,
2646 					((1 << 6) - 1), res_name);
2647 }
2648 
2649 static void __pci_set_master(struct pci_dev *dev, bool enable)
2650 {
2651 	u16 old_cmd, cmd;
2652 
2653 	pci_read_config_word(dev, PCI_COMMAND, &old_cmd);
2654 	if (enable)
2655 		cmd = old_cmd | PCI_COMMAND_MASTER;
2656 	else
2657 		cmd = old_cmd & ~PCI_COMMAND_MASTER;
2658 	if (cmd != old_cmd) {
2659 		dev_dbg(&dev->dev, "%s bus mastering\n",
2660 			enable ? "enabling" : "disabling");
2661 		pci_write_config_word(dev, PCI_COMMAND, cmd);
2662 	}
2663 	dev->is_busmaster = enable;
2664 }
2665 
2666 /**
2667  * pcibios_setup - process "pci=" kernel boot arguments
2668  * @str: string used to pass in "pci=" kernel boot arguments
2669  *
2670  * Process kernel boot arguments.  This is the default implementation.
2671  * Architecture specific implementations can override this as necessary.
2672  */
2673 char * __weak __init pcibios_setup(char *str)
2674 {
2675 	return str;
2676 }
2677 
2678 /**
2679  * pcibios_set_master - enable PCI bus-mastering for device dev
2680  * @dev: the PCI device to enable
2681  *
2682  * Enables PCI bus-mastering for the device.  This is the default
2683  * implementation.  Architecture specific implementations can override
2684  * this if necessary.
2685  */
2686 void __weak pcibios_set_master(struct pci_dev *dev)
2687 {
2688 	u8 lat;
2689 
2690 	/* The latency timer doesn't apply to PCIe (either Type 0 or Type 1) */
2691 	if (pci_is_pcie(dev))
2692 		return;
2693 
2694 	pci_read_config_byte(dev, PCI_LATENCY_TIMER, &lat);
2695 	if (lat < 16)
2696 		lat = (64 <= pcibios_max_latency) ? 64 : pcibios_max_latency;
2697 	else if (lat > pcibios_max_latency)
2698 		lat = pcibios_max_latency;
2699 	else
2700 		return;
2701 
2702 	pci_write_config_byte(dev, PCI_LATENCY_TIMER, lat);
2703 }
2704 
2705 /**
2706  * pci_set_master - enables bus-mastering for device dev
2707  * @dev: the PCI device to enable
2708  *
2709  * Enables bus-mastering on the device and calls pcibios_set_master()
2710  * to do the needed arch specific settings.
2711  */
2712 void pci_set_master(struct pci_dev *dev)
2713 {
2714 	__pci_set_master(dev, true);
2715 	pcibios_set_master(dev);
2716 }
2717 
2718 /**
2719  * pci_clear_master - disables bus-mastering for device dev
2720  * @dev: the PCI device to disable
2721  */
2722 void pci_clear_master(struct pci_dev *dev)
2723 {
2724 	__pci_set_master(dev, false);
2725 }
2726 
2727 /**
2728  * pci_set_cacheline_size - ensure the CACHE_LINE_SIZE register is programmed
2729  * @dev: the PCI device for which MWI is to be enabled
2730  *
2731  * Helper function for pci_set_mwi.
2732  * Originally copied from drivers/net/acenic.c.
2733  * Copyright 1998-2001 by Jes Sorensen, <jes@trained-monkey.org>.
2734  *
2735  * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
2736  */
2737 int pci_set_cacheline_size(struct pci_dev *dev)
2738 {
2739 	u8 cacheline_size;
2740 
2741 	if (!pci_cache_line_size)
2742 		return -EINVAL;
2743 
2744 	/* Validate current setting: the PCI_CACHE_LINE_SIZE must be
2745 	   equal to or multiple of the right value. */
2746 	pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
2747 	if (cacheline_size >= pci_cache_line_size &&
2748 	    (cacheline_size % pci_cache_line_size) == 0)
2749 		return 0;
2750 
2751 	/* Write the correct value. */
2752 	pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, pci_cache_line_size);
2753 	/* Read it back. */
2754 	pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
2755 	if (cacheline_size == pci_cache_line_size)
2756 		return 0;
2757 
2758 	dev_printk(KERN_DEBUG, &dev->dev, "cache line size of %d is not "
2759 		   "supported\n", pci_cache_line_size << 2);
2760 
2761 	return -EINVAL;
2762 }
2763 EXPORT_SYMBOL_GPL(pci_set_cacheline_size);
2764 
2765 #ifdef PCI_DISABLE_MWI
2766 int pci_set_mwi(struct pci_dev *dev)
2767 {
2768 	return 0;
2769 }
2770 
2771 int pci_try_set_mwi(struct pci_dev *dev)
2772 {
2773 	return 0;
2774 }
2775 
2776 void pci_clear_mwi(struct pci_dev *dev)
2777 {
2778 }
2779 
2780 #else
2781 
2782 /**
2783  * pci_set_mwi - enables memory-write-invalidate PCI transaction
2784  * @dev: the PCI device for which MWI is enabled
2785  *
2786  * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND.
2787  *
2788  * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
2789  */
2790 int
2791 pci_set_mwi(struct pci_dev *dev)
2792 {
2793 	int rc;
2794 	u16 cmd;
2795 
2796 	rc = pci_set_cacheline_size(dev);
2797 	if (rc)
2798 		return rc;
2799 
2800 	pci_read_config_word(dev, PCI_COMMAND, &cmd);
2801 	if (! (cmd & PCI_COMMAND_INVALIDATE)) {
2802 		dev_dbg(&dev->dev, "enabling Mem-Wr-Inval\n");
2803 		cmd |= PCI_COMMAND_INVALIDATE;
2804 		pci_write_config_word(dev, PCI_COMMAND, cmd);
2805 	}
2806 
2807 	return 0;
2808 }
2809 
2810 /**
2811  * pci_try_set_mwi - enables memory-write-invalidate PCI transaction
2812  * @dev: the PCI device for which MWI is enabled
2813  *
2814  * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND.
2815  * Callers are not required to check the return value.
2816  *
2817  * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
2818  */
2819 int pci_try_set_mwi(struct pci_dev *dev)
2820 {
2821 	int rc = pci_set_mwi(dev);
2822 	return rc;
2823 }
2824 
2825 /**
2826  * pci_clear_mwi - disables Memory-Write-Invalidate for device dev
2827  * @dev: the PCI device to disable
2828  *
2829  * Disables PCI Memory-Write-Invalidate transaction on the device
2830  */
2831 void
2832 pci_clear_mwi(struct pci_dev *dev)
2833 {
2834 	u16 cmd;
2835 
2836 	pci_read_config_word(dev, PCI_COMMAND, &cmd);
2837 	if (cmd & PCI_COMMAND_INVALIDATE) {
2838 		cmd &= ~PCI_COMMAND_INVALIDATE;
2839 		pci_write_config_word(dev, PCI_COMMAND, cmd);
2840 	}
2841 }
2842 #endif /* ! PCI_DISABLE_MWI */
2843 
2844 /**
2845  * pci_intx - enables/disables PCI INTx for device dev
2846  * @pdev: the PCI device to operate on
2847  * @enable: boolean: whether to enable or disable PCI INTx
2848  *
2849  * Enables/disables PCI INTx for device dev
2850  */
2851 void
2852 pci_intx(struct pci_dev *pdev, int enable)
2853 {
2854 	u16 pci_command, new;
2855 
2856 	pci_read_config_word(pdev, PCI_COMMAND, &pci_command);
2857 
2858 	if (enable) {
2859 		new = pci_command & ~PCI_COMMAND_INTX_DISABLE;
2860 	} else {
2861 		new = pci_command | PCI_COMMAND_INTX_DISABLE;
2862 	}
2863 
2864 	if (new != pci_command) {
2865 		struct pci_devres *dr;
2866 
2867 		pci_write_config_word(pdev, PCI_COMMAND, new);
2868 
2869 		dr = find_pci_dr(pdev);
2870 		if (dr && !dr->restore_intx) {
2871 			dr->restore_intx = 1;
2872 			dr->orig_intx = !enable;
2873 		}
2874 	}
2875 }
2876 
2877 /**
2878  * pci_intx_mask_supported - probe for INTx masking support
2879  * @dev: the PCI device to operate on
2880  *
2881  * Check if the device dev support INTx masking via the config space
2882  * command word.
2883  */
2884 bool pci_intx_mask_supported(struct pci_dev *dev)
2885 {
2886 	bool mask_supported = false;
2887 	u16 orig, new;
2888 
2889 	if (dev->broken_intx_masking)
2890 		return false;
2891 
2892 	pci_cfg_access_lock(dev);
2893 
2894 	pci_read_config_word(dev, PCI_COMMAND, &orig);
2895 	pci_write_config_word(dev, PCI_COMMAND,
2896 			      orig ^ PCI_COMMAND_INTX_DISABLE);
2897 	pci_read_config_word(dev, PCI_COMMAND, &new);
2898 
2899 	/*
2900 	 * There's no way to protect against hardware bugs or detect them
2901 	 * reliably, but as long as we know what the value should be, let's
2902 	 * go ahead and check it.
2903 	 */
2904 	if ((new ^ orig) & ~PCI_COMMAND_INTX_DISABLE) {
2905 		dev_err(&dev->dev, "Command register changed from "
2906 			"0x%x to 0x%x: driver or hardware bug?\n", orig, new);
2907 	} else if ((new ^ orig) & PCI_COMMAND_INTX_DISABLE) {
2908 		mask_supported = true;
2909 		pci_write_config_word(dev, PCI_COMMAND, orig);
2910 	}
2911 
2912 	pci_cfg_access_unlock(dev);
2913 	return mask_supported;
2914 }
2915 EXPORT_SYMBOL_GPL(pci_intx_mask_supported);
2916 
2917 static bool pci_check_and_set_intx_mask(struct pci_dev *dev, bool mask)
2918 {
2919 	struct pci_bus *bus = dev->bus;
2920 	bool mask_updated = true;
2921 	u32 cmd_status_dword;
2922 	u16 origcmd, newcmd;
2923 	unsigned long flags;
2924 	bool irq_pending;
2925 
2926 	/*
2927 	 * We do a single dword read to retrieve both command and status.
2928 	 * Document assumptions that make this possible.
2929 	 */
2930 	BUILD_BUG_ON(PCI_COMMAND % 4);
2931 	BUILD_BUG_ON(PCI_COMMAND + 2 != PCI_STATUS);
2932 
2933 	raw_spin_lock_irqsave(&pci_lock, flags);
2934 
2935 	bus->ops->read(bus, dev->devfn, PCI_COMMAND, 4, &cmd_status_dword);
2936 
2937 	irq_pending = (cmd_status_dword >> 16) & PCI_STATUS_INTERRUPT;
2938 
2939 	/*
2940 	 * Check interrupt status register to see whether our device
2941 	 * triggered the interrupt (when masking) or the next IRQ is
2942 	 * already pending (when unmasking).
2943 	 */
2944 	if (mask != irq_pending) {
2945 		mask_updated = false;
2946 		goto done;
2947 	}
2948 
2949 	origcmd = cmd_status_dword;
2950 	newcmd = origcmd & ~PCI_COMMAND_INTX_DISABLE;
2951 	if (mask)
2952 		newcmd |= PCI_COMMAND_INTX_DISABLE;
2953 	if (newcmd != origcmd)
2954 		bus->ops->write(bus, dev->devfn, PCI_COMMAND, 2, newcmd);
2955 
2956 done:
2957 	raw_spin_unlock_irqrestore(&pci_lock, flags);
2958 
2959 	return mask_updated;
2960 }
2961 
2962 /**
2963  * pci_check_and_mask_intx - mask INTx on pending interrupt
2964  * @dev: the PCI device to operate on
2965  *
2966  * Check if the device dev has its INTx line asserted, mask it and
2967  * return true in that case. False is returned if not interrupt was
2968  * pending.
2969  */
2970 bool pci_check_and_mask_intx(struct pci_dev *dev)
2971 {
2972 	return pci_check_and_set_intx_mask(dev, true);
2973 }
2974 EXPORT_SYMBOL_GPL(pci_check_and_mask_intx);
2975 
2976 /**
2977  * pci_check_and_unmask_intx - unmask INTx if no interrupt is pending
2978  * @dev: the PCI device to operate on
2979  *
2980  * Check if the device dev has its INTx line asserted, unmask it if not
2981  * and return true. False is returned and the mask remains active if
2982  * there was still an interrupt pending.
2983  */
2984 bool pci_check_and_unmask_intx(struct pci_dev *dev)
2985 {
2986 	return pci_check_and_set_intx_mask(dev, false);
2987 }
2988 EXPORT_SYMBOL_GPL(pci_check_and_unmask_intx);
2989 
2990 /**
2991  * pci_msi_off - disables any MSI or MSI-X capabilities
2992  * @dev: the PCI device to operate on
2993  *
2994  * If you want to use MSI, see pci_enable_msi() and friends.
2995  * This is a lower-level primitive that allows us to disable
2996  * MSI operation at the device level.
2997  */
2998 void pci_msi_off(struct pci_dev *dev)
2999 {
3000 	int pos;
3001 	u16 control;
3002 
3003 	/*
3004 	 * This looks like it could go in msi.c, but we need it even when
3005 	 * CONFIG_PCI_MSI=n.  For the same reason, we can't use
3006 	 * dev->msi_cap or dev->msix_cap here.
3007 	 */
3008 	pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
3009 	if (pos) {
3010 		pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control);
3011 		control &= ~PCI_MSI_FLAGS_ENABLE;
3012 		pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control);
3013 	}
3014 	pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
3015 	if (pos) {
3016 		pci_read_config_word(dev, pos + PCI_MSIX_FLAGS, &control);
3017 		control &= ~PCI_MSIX_FLAGS_ENABLE;
3018 		pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control);
3019 	}
3020 }
3021 EXPORT_SYMBOL_GPL(pci_msi_off);
3022 
3023 int pci_set_dma_max_seg_size(struct pci_dev *dev, unsigned int size)
3024 {
3025 	return dma_set_max_seg_size(&dev->dev, size);
3026 }
3027 EXPORT_SYMBOL(pci_set_dma_max_seg_size);
3028 
3029 int pci_set_dma_seg_boundary(struct pci_dev *dev, unsigned long mask)
3030 {
3031 	return dma_set_seg_boundary(&dev->dev, mask);
3032 }
3033 EXPORT_SYMBOL(pci_set_dma_seg_boundary);
3034 
3035 /**
3036  * pci_wait_for_pending_transaction - waits for pending transaction
3037  * @dev: the PCI device to operate on
3038  *
3039  * Return 0 if transaction is pending 1 otherwise.
3040  */
3041 int pci_wait_for_pending_transaction(struct pci_dev *dev)
3042 {
3043 	if (!pci_is_pcie(dev))
3044 		return 1;
3045 
3046 	return pci_wait_for_pending(dev, PCI_EXP_DEVSTA, PCI_EXP_DEVSTA_TRPND);
3047 }
3048 EXPORT_SYMBOL(pci_wait_for_pending_transaction);
3049 
3050 static int pcie_flr(struct pci_dev *dev, int probe)
3051 {
3052 	u32 cap;
3053 
3054 	pcie_capability_read_dword(dev, PCI_EXP_DEVCAP, &cap);
3055 	if (!(cap & PCI_EXP_DEVCAP_FLR))
3056 		return -ENOTTY;
3057 
3058 	if (probe)
3059 		return 0;
3060 
3061 	if (!pci_wait_for_pending_transaction(dev))
3062 		dev_err(&dev->dev, "transaction is not cleared; proceeding with reset anyway\n");
3063 
3064 	pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_BCR_FLR);
3065 
3066 	msleep(100);
3067 
3068 	return 0;
3069 }
3070 
3071 static int pci_af_flr(struct pci_dev *dev, int probe)
3072 {
3073 	int pos;
3074 	u8 cap;
3075 
3076 	pos = pci_find_capability(dev, PCI_CAP_ID_AF);
3077 	if (!pos)
3078 		return -ENOTTY;
3079 
3080 	pci_read_config_byte(dev, pos + PCI_AF_CAP, &cap);
3081 	if (!(cap & PCI_AF_CAP_TP) || !(cap & PCI_AF_CAP_FLR))
3082 		return -ENOTTY;
3083 
3084 	if (probe)
3085 		return 0;
3086 
3087 	/* Wait for Transaction Pending bit clean */
3088 	if (pci_wait_for_pending(dev, PCI_AF_STATUS, PCI_AF_STATUS_TP))
3089 		goto clear;
3090 
3091 	dev_err(&dev->dev, "transaction is not cleared; "
3092 			"proceeding with reset anyway\n");
3093 
3094 clear:
3095 	pci_write_config_byte(dev, pos + PCI_AF_CTRL, PCI_AF_CTRL_FLR);
3096 	msleep(100);
3097 
3098 	return 0;
3099 }
3100 
3101 /**
3102  * pci_pm_reset - Put device into PCI_D3 and back into PCI_D0.
3103  * @dev: Device to reset.
3104  * @probe: If set, only check if the device can be reset this way.
3105  *
3106  * If @dev supports native PCI PM and its PCI_PM_CTRL_NO_SOFT_RESET flag is
3107  * unset, it will be reinitialized internally when going from PCI_D3hot to
3108  * PCI_D0.  If that's the case and the device is not in a low-power state
3109  * already, force it into PCI_D3hot and back to PCI_D0, causing it to be reset.
3110  *
3111  * NOTE: This causes the caller to sleep for twice the device power transition
3112  * cooldown period, which for the D0->D3hot and D3hot->D0 transitions is 10 ms
3113  * by default (i.e. unless the @dev's d3_delay field has a different value).
3114  * Moreover, only devices in D0 can be reset by this function.
3115  */
3116 static int pci_pm_reset(struct pci_dev *dev, int probe)
3117 {
3118 	u16 csr;
3119 
3120 	if (!dev->pm_cap)
3121 		return -ENOTTY;
3122 
3123 	pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &csr);
3124 	if (csr & PCI_PM_CTRL_NO_SOFT_RESET)
3125 		return -ENOTTY;
3126 
3127 	if (probe)
3128 		return 0;
3129 
3130 	if (dev->current_state != PCI_D0)
3131 		return -EINVAL;
3132 
3133 	csr &= ~PCI_PM_CTRL_STATE_MASK;
3134 	csr |= PCI_D3hot;
3135 	pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
3136 	pci_dev_d3_sleep(dev);
3137 
3138 	csr &= ~PCI_PM_CTRL_STATE_MASK;
3139 	csr |= PCI_D0;
3140 	pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
3141 	pci_dev_d3_sleep(dev);
3142 
3143 	return 0;
3144 }
3145 
3146 /**
3147  * pci_reset_bridge_secondary_bus - Reset the secondary bus on a PCI bridge.
3148  * @dev: Bridge device
3149  *
3150  * Use the bridge control register to assert reset on the secondary bus.
3151  * Devices on the secondary bus are left in power-on state.
3152  */
3153 void pci_reset_bridge_secondary_bus(struct pci_dev *dev)
3154 {
3155 	u16 ctrl;
3156 
3157 	pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &ctrl);
3158 	ctrl |= PCI_BRIDGE_CTL_BUS_RESET;
3159 	pci_write_config_word(dev, PCI_BRIDGE_CONTROL, ctrl);
3160 	/*
3161 	 * PCI spec v3.0 7.6.4.2 requires minimum Trst of 1ms.  Double
3162 	 * this to 2ms to ensure that we meet the minimum requirement.
3163 	 */
3164 	msleep(2);
3165 
3166 	ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET;
3167 	pci_write_config_word(dev, PCI_BRIDGE_CONTROL, ctrl);
3168 
3169 	/*
3170 	 * Trhfa for conventional PCI is 2^25 clock cycles.
3171 	 * Assuming a minimum 33MHz clock this results in a 1s
3172 	 * delay before we can consider subordinate devices to
3173 	 * be re-initialized.  PCIe has some ways to shorten this,
3174 	 * but we don't make use of them yet.
3175 	 */
3176 	ssleep(1);
3177 }
3178 EXPORT_SYMBOL_GPL(pci_reset_bridge_secondary_bus);
3179 
3180 static int pci_parent_bus_reset(struct pci_dev *dev, int probe)
3181 {
3182 	struct pci_dev *pdev;
3183 
3184 	if (pci_is_root_bus(dev->bus) || dev->subordinate || !dev->bus->self)
3185 		return -ENOTTY;
3186 
3187 	list_for_each_entry(pdev, &dev->bus->devices, bus_list)
3188 		if (pdev != dev)
3189 			return -ENOTTY;
3190 
3191 	if (probe)
3192 		return 0;
3193 
3194 	pci_reset_bridge_secondary_bus(dev->bus->self);
3195 
3196 	return 0;
3197 }
3198 
3199 static int pci_reset_hotplug_slot(struct hotplug_slot *hotplug, int probe)
3200 {
3201 	int rc = -ENOTTY;
3202 
3203 	if (!hotplug || !try_module_get(hotplug->ops->owner))
3204 		return rc;
3205 
3206 	if (hotplug->ops->reset_slot)
3207 		rc = hotplug->ops->reset_slot(hotplug, probe);
3208 
3209 	module_put(hotplug->ops->owner);
3210 
3211 	return rc;
3212 }
3213 
3214 static int pci_dev_reset_slot_function(struct pci_dev *dev, int probe)
3215 {
3216 	struct pci_dev *pdev;
3217 
3218 	if (dev->subordinate || !dev->slot)
3219 		return -ENOTTY;
3220 
3221 	list_for_each_entry(pdev, &dev->bus->devices, bus_list)
3222 		if (pdev != dev && pdev->slot == dev->slot)
3223 			return -ENOTTY;
3224 
3225 	return pci_reset_hotplug_slot(dev->slot->hotplug, probe);
3226 }
3227 
3228 static int __pci_dev_reset(struct pci_dev *dev, int probe)
3229 {
3230 	int rc;
3231 
3232 	might_sleep();
3233 
3234 	rc = pci_dev_specific_reset(dev, probe);
3235 	if (rc != -ENOTTY)
3236 		goto done;
3237 
3238 	rc = pcie_flr(dev, probe);
3239 	if (rc != -ENOTTY)
3240 		goto done;
3241 
3242 	rc = pci_af_flr(dev, probe);
3243 	if (rc != -ENOTTY)
3244 		goto done;
3245 
3246 	rc = pci_pm_reset(dev, probe);
3247 	if (rc != -ENOTTY)
3248 		goto done;
3249 
3250 	rc = pci_dev_reset_slot_function(dev, probe);
3251 	if (rc != -ENOTTY)
3252 		goto done;
3253 
3254 	rc = pci_parent_bus_reset(dev, probe);
3255 done:
3256 	return rc;
3257 }
3258 
3259 static void pci_dev_lock(struct pci_dev *dev)
3260 {
3261 	pci_cfg_access_lock(dev);
3262 	/* block PM suspend, driver probe, etc. */
3263 	device_lock(&dev->dev);
3264 }
3265 
3266 /* Return 1 on successful lock, 0 on contention */
3267 static int pci_dev_trylock(struct pci_dev *dev)
3268 {
3269 	if (pci_cfg_access_trylock(dev)) {
3270 		if (device_trylock(&dev->dev))
3271 			return 1;
3272 		pci_cfg_access_unlock(dev);
3273 	}
3274 
3275 	return 0;
3276 }
3277 
3278 static void pci_dev_unlock(struct pci_dev *dev)
3279 {
3280 	device_unlock(&dev->dev);
3281 	pci_cfg_access_unlock(dev);
3282 }
3283 
3284 static void pci_dev_save_and_disable(struct pci_dev *dev)
3285 {
3286 	/*
3287 	 * Wake-up device prior to save.  PM registers default to D0 after
3288 	 * reset and a simple register restore doesn't reliably return
3289 	 * to a non-D0 state anyway.
3290 	 */
3291 	pci_set_power_state(dev, PCI_D0);
3292 
3293 	pci_save_state(dev);
3294 	/*
3295 	 * Disable the device by clearing the Command register, except for
3296 	 * INTx-disable which is set.  This not only disables MMIO and I/O port
3297 	 * BARs, but also prevents the device from being Bus Master, preventing
3298 	 * DMA from the device including MSI/MSI-X interrupts.  For PCI 2.3
3299 	 * compliant devices, INTx-disable prevents legacy interrupts.
3300 	 */
3301 	pci_write_config_word(dev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE);
3302 }
3303 
3304 static void pci_dev_restore(struct pci_dev *dev)
3305 {
3306 	pci_restore_state(dev);
3307 }
3308 
3309 static int pci_dev_reset(struct pci_dev *dev, int probe)
3310 {
3311 	int rc;
3312 
3313 	if (!probe)
3314 		pci_dev_lock(dev);
3315 
3316 	rc = __pci_dev_reset(dev, probe);
3317 
3318 	if (!probe)
3319 		pci_dev_unlock(dev);
3320 
3321 	return rc;
3322 }
3323 /**
3324  * __pci_reset_function - reset a PCI device function
3325  * @dev: PCI device to reset
3326  *
3327  * Some devices allow an individual function to be reset without affecting
3328  * other functions in the same device.  The PCI device must be responsive
3329  * to PCI config space in order to use this function.
3330  *
3331  * The device function is presumed to be unused when this function is called.
3332  * Resetting the device will make the contents of PCI configuration space
3333  * random, so any caller of this must be prepared to reinitialise the
3334  * device including MSI, bus mastering, BARs, decoding IO and memory spaces,
3335  * etc.
3336  *
3337  * Returns 0 if the device function was successfully reset or negative if the
3338  * device doesn't support resetting a single function.
3339  */
3340 int __pci_reset_function(struct pci_dev *dev)
3341 {
3342 	return pci_dev_reset(dev, 0);
3343 }
3344 EXPORT_SYMBOL_GPL(__pci_reset_function);
3345 
3346 /**
3347  * __pci_reset_function_locked - reset a PCI device function while holding
3348  * the @dev mutex lock.
3349  * @dev: PCI device to reset
3350  *
3351  * Some devices allow an individual function to be reset without affecting
3352  * other functions in the same device.  The PCI device must be responsive
3353  * to PCI config space in order to use this function.
3354  *
3355  * The device function is presumed to be unused and the caller is holding
3356  * the device mutex lock when this function is called.
3357  * Resetting the device will make the contents of PCI configuration space
3358  * random, so any caller of this must be prepared to reinitialise the
3359  * device including MSI, bus mastering, BARs, decoding IO and memory spaces,
3360  * etc.
3361  *
3362  * Returns 0 if the device function was successfully reset or negative if the
3363  * device doesn't support resetting a single function.
3364  */
3365 int __pci_reset_function_locked(struct pci_dev *dev)
3366 {
3367 	return __pci_dev_reset(dev, 0);
3368 }
3369 EXPORT_SYMBOL_GPL(__pci_reset_function_locked);
3370 
3371 /**
3372  * pci_probe_reset_function - check whether the device can be safely reset
3373  * @dev: PCI device to reset
3374  *
3375  * Some devices allow an individual function to be reset without affecting
3376  * other functions in the same device.  The PCI device must be responsive
3377  * to PCI config space in order to use this function.
3378  *
3379  * Returns 0 if the device function can be reset or negative if the
3380  * device doesn't support resetting a single function.
3381  */
3382 int pci_probe_reset_function(struct pci_dev *dev)
3383 {
3384 	return pci_dev_reset(dev, 1);
3385 }
3386 
3387 /**
3388  * pci_reset_function - quiesce and reset a PCI device function
3389  * @dev: PCI device to reset
3390  *
3391  * Some devices allow an individual function to be reset without affecting
3392  * other functions in the same device.  The PCI device must be responsive
3393  * to PCI config space in order to use this function.
3394  *
3395  * This function does not just reset the PCI portion of a device, but
3396  * clears all the state associated with the device.  This function differs
3397  * from __pci_reset_function in that it saves and restores device state
3398  * over the reset.
3399  *
3400  * Returns 0 if the device function was successfully reset or negative if the
3401  * device doesn't support resetting a single function.
3402  */
3403 int pci_reset_function(struct pci_dev *dev)
3404 {
3405 	int rc;
3406 
3407 	rc = pci_dev_reset(dev, 1);
3408 	if (rc)
3409 		return rc;
3410 
3411 	pci_dev_save_and_disable(dev);
3412 
3413 	rc = pci_dev_reset(dev, 0);
3414 
3415 	pci_dev_restore(dev);
3416 
3417 	return rc;
3418 }
3419 EXPORT_SYMBOL_GPL(pci_reset_function);
3420 
3421 /**
3422  * pci_try_reset_function - quiesce and reset a PCI device function
3423  * @dev: PCI device to reset
3424  *
3425  * Same as above, except return -EAGAIN if unable to lock device.
3426  */
3427 int pci_try_reset_function(struct pci_dev *dev)
3428 {
3429 	int rc;
3430 
3431 	rc = pci_dev_reset(dev, 1);
3432 	if (rc)
3433 		return rc;
3434 
3435 	pci_dev_save_and_disable(dev);
3436 
3437 	if (pci_dev_trylock(dev)) {
3438 		rc = __pci_dev_reset(dev, 0);
3439 		pci_dev_unlock(dev);
3440 	} else
3441 		rc = -EAGAIN;
3442 
3443 	pci_dev_restore(dev);
3444 
3445 	return rc;
3446 }
3447 EXPORT_SYMBOL_GPL(pci_try_reset_function);
3448 
3449 /* Lock devices from the top of the tree down */
3450 static void pci_bus_lock(struct pci_bus *bus)
3451 {
3452 	struct pci_dev *dev;
3453 
3454 	list_for_each_entry(dev, &bus->devices, bus_list) {
3455 		pci_dev_lock(dev);
3456 		if (dev->subordinate)
3457 			pci_bus_lock(dev->subordinate);
3458 	}
3459 }
3460 
3461 /* Unlock devices from the bottom of the tree up */
3462 static void pci_bus_unlock(struct pci_bus *bus)
3463 {
3464 	struct pci_dev *dev;
3465 
3466 	list_for_each_entry(dev, &bus->devices, bus_list) {
3467 		if (dev->subordinate)
3468 			pci_bus_unlock(dev->subordinate);
3469 		pci_dev_unlock(dev);
3470 	}
3471 }
3472 
3473 /* Return 1 on successful lock, 0 on contention */
3474 static int pci_bus_trylock(struct pci_bus *bus)
3475 {
3476 	struct pci_dev *dev;
3477 
3478 	list_for_each_entry(dev, &bus->devices, bus_list) {
3479 		if (!pci_dev_trylock(dev))
3480 			goto unlock;
3481 		if (dev->subordinate) {
3482 			if (!pci_bus_trylock(dev->subordinate)) {
3483 				pci_dev_unlock(dev);
3484 				goto unlock;
3485 			}
3486 		}
3487 	}
3488 	return 1;
3489 
3490 unlock:
3491 	list_for_each_entry_continue_reverse(dev, &bus->devices, bus_list) {
3492 		if (dev->subordinate)
3493 			pci_bus_unlock(dev->subordinate);
3494 		pci_dev_unlock(dev);
3495 	}
3496 	return 0;
3497 }
3498 
3499 /* Lock devices from the top of the tree down */
3500 static void pci_slot_lock(struct pci_slot *slot)
3501 {
3502 	struct pci_dev *dev;
3503 
3504 	list_for_each_entry(dev, &slot->bus->devices, bus_list) {
3505 		if (!dev->slot || dev->slot != slot)
3506 			continue;
3507 		pci_dev_lock(dev);
3508 		if (dev->subordinate)
3509 			pci_bus_lock(dev->subordinate);
3510 	}
3511 }
3512 
3513 /* Unlock devices from the bottom of the tree up */
3514 static void pci_slot_unlock(struct pci_slot *slot)
3515 {
3516 	struct pci_dev *dev;
3517 
3518 	list_for_each_entry(dev, &slot->bus->devices, bus_list) {
3519 		if (!dev->slot || dev->slot != slot)
3520 			continue;
3521 		if (dev->subordinate)
3522 			pci_bus_unlock(dev->subordinate);
3523 		pci_dev_unlock(dev);
3524 	}
3525 }
3526 
3527 /* Return 1 on successful lock, 0 on contention */
3528 static int pci_slot_trylock(struct pci_slot *slot)
3529 {
3530 	struct pci_dev *dev;
3531 
3532 	list_for_each_entry(dev, &slot->bus->devices, bus_list) {
3533 		if (!dev->slot || dev->slot != slot)
3534 			continue;
3535 		if (!pci_dev_trylock(dev))
3536 			goto unlock;
3537 		if (dev->subordinate) {
3538 			if (!pci_bus_trylock(dev->subordinate)) {
3539 				pci_dev_unlock(dev);
3540 				goto unlock;
3541 			}
3542 		}
3543 	}
3544 	return 1;
3545 
3546 unlock:
3547 	list_for_each_entry_continue_reverse(dev,
3548 					     &slot->bus->devices, bus_list) {
3549 		if (!dev->slot || dev->slot != slot)
3550 			continue;
3551 		if (dev->subordinate)
3552 			pci_bus_unlock(dev->subordinate);
3553 		pci_dev_unlock(dev);
3554 	}
3555 	return 0;
3556 }
3557 
3558 /* Save and disable devices from the top of the tree down */
3559 static void pci_bus_save_and_disable(struct pci_bus *bus)
3560 {
3561 	struct pci_dev *dev;
3562 
3563 	list_for_each_entry(dev, &bus->devices, bus_list) {
3564 		pci_dev_save_and_disable(dev);
3565 		if (dev->subordinate)
3566 			pci_bus_save_and_disable(dev->subordinate);
3567 	}
3568 }
3569 
3570 /*
3571  * Restore devices from top of the tree down - parent bridges need to be
3572  * restored before we can get to subordinate devices.
3573  */
3574 static void pci_bus_restore(struct pci_bus *bus)
3575 {
3576 	struct pci_dev *dev;
3577 
3578 	list_for_each_entry(dev, &bus->devices, bus_list) {
3579 		pci_dev_restore(dev);
3580 		if (dev->subordinate)
3581 			pci_bus_restore(dev->subordinate);
3582 	}
3583 }
3584 
3585 /* Save and disable devices from the top of the tree down */
3586 static void pci_slot_save_and_disable(struct pci_slot *slot)
3587 {
3588 	struct pci_dev *dev;
3589 
3590 	list_for_each_entry(dev, &slot->bus->devices, bus_list) {
3591 		if (!dev->slot || dev->slot != slot)
3592 			continue;
3593 		pci_dev_save_and_disable(dev);
3594 		if (dev->subordinate)
3595 			pci_bus_save_and_disable(dev->subordinate);
3596 	}
3597 }
3598 
3599 /*
3600  * Restore devices from top of the tree down - parent bridges need to be
3601  * restored before we can get to subordinate devices.
3602  */
3603 static void pci_slot_restore(struct pci_slot *slot)
3604 {
3605 	struct pci_dev *dev;
3606 
3607 	list_for_each_entry(dev, &slot->bus->devices, bus_list) {
3608 		if (!dev->slot || dev->slot != slot)
3609 			continue;
3610 		pci_dev_restore(dev);
3611 		if (dev->subordinate)
3612 			pci_bus_restore(dev->subordinate);
3613 	}
3614 }
3615 
3616 static int pci_slot_reset(struct pci_slot *slot, int probe)
3617 {
3618 	int rc;
3619 
3620 	if (!slot)
3621 		return -ENOTTY;
3622 
3623 	if (!probe)
3624 		pci_slot_lock(slot);
3625 
3626 	might_sleep();
3627 
3628 	rc = pci_reset_hotplug_slot(slot->hotplug, probe);
3629 
3630 	if (!probe)
3631 		pci_slot_unlock(slot);
3632 
3633 	return rc;
3634 }
3635 
3636 /**
3637  * pci_probe_reset_slot - probe whether a PCI slot can be reset
3638  * @slot: PCI slot to probe
3639  *
3640  * Return 0 if slot can be reset, negative if a slot reset is not supported.
3641  */
3642 int pci_probe_reset_slot(struct pci_slot *slot)
3643 {
3644 	return pci_slot_reset(slot, 1);
3645 }
3646 EXPORT_SYMBOL_GPL(pci_probe_reset_slot);
3647 
3648 /**
3649  * pci_reset_slot - reset a PCI slot
3650  * @slot: PCI slot to reset
3651  *
3652  * A PCI bus may host multiple slots, each slot may support a reset mechanism
3653  * independent of other slots.  For instance, some slots may support slot power
3654  * control.  In the case of a 1:1 bus to slot architecture, this function may
3655  * wrap the bus reset to avoid spurious slot related events such as hotplug.
3656  * Generally a slot reset should be attempted before a bus reset.  All of the
3657  * function of the slot and any subordinate buses behind the slot are reset
3658  * through this function.  PCI config space of all devices in the slot and
3659  * behind the slot is saved before and restored after reset.
3660  *
3661  * Return 0 on success, non-zero on error.
3662  */
3663 int pci_reset_slot(struct pci_slot *slot)
3664 {
3665 	int rc;
3666 
3667 	rc = pci_slot_reset(slot, 1);
3668 	if (rc)
3669 		return rc;
3670 
3671 	pci_slot_save_and_disable(slot);
3672 
3673 	rc = pci_slot_reset(slot, 0);
3674 
3675 	pci_slot_restore(slot);
3676 
3677 	return rc;
3678 }
3679 EXPORT_SYMBOL_GPL(pci_reset_slot);
3680 
3681 /**
3682  * pci_try_reset_slot - Try to reset a PCI slot
3683  * @slot: PCI slot to reset
3684  *
3685  * Same as above except return -EAGAIN if the slot cannot be locked
3686  */
3687 int pci_try_reset_slot(struct pci_slot *slot)
3688 {
3689 	int rc;
3690 
3691 	rc = pci_slot_reset(slot, 1);
3692 	if (rc)
3693 		return rc;
3694 
3695 	pci_slot_save_and_disable(slot);
3696 
3697 	if (pci_slot_trylock(slot)) {
3698 		might_sleep();
3699 		rc = pci_reset_hotplug_slot(slot->hotplug, 0);
3700 		pci_slot_unlock(slot);
3701 	} else
3702 		rc = -EAGAIN;
3703 
3704 	pci_slot_restore(slot);
3705 
3706 	return rc;
3707 }
3708 EXPORT_SYMBOL_GPL(pci_try_reset_slot);
3709 
3710 static int pci_bus_reset(struct pci_bus *bus, int probe)
3711 {
3712 	if (!bus->self)
3713 		return -ENOTTY;
3714 
3715 	if (probe)
3716 		return 0;
3717 
3718 	pci_bus_lock(bus);
3719 
3720 	might_sleep();
3721 
3722 	pci_reset_bridge_secondary_bus(bus->self);
3723 
3724 	pci_bus_unlock(bus);
3725 
3726 	return 0;
3727 }
3728 
3729 /**
3730  * pci_probe_reset_bus - probe whether a PCI bus can be reset
3731  * @bus: PCI bus to probe
3732  *
3733  * Return 0 if bus can be reset, negative if a bus reset is not supported.
3734  */
3735 int pci_probe_reset_bus(struct pci_bus *bus)
3736 {
3737 	return pci_bus_reset(bus, 1);
3738 }
3739 EXPORT_SYMBOL_GPL(pci_probe_reset_bus);
3740 
3741 /**
3742  * pci_reset_bus - reset a PCI bus
3743  * @bus: top level PCI bus to reset
3744  *
3745  * Do a bus reset on the given bus and any subordinate buses, saving
3746  * and restoring state of all devices.
3747  *
3748  * Return 0 on success, non-zero on error.
3749  */
3750 int pci_reset_bus(struct pci_bus *bus)
3751 {
3752 	int rc;
3753 
3754 	rc = pci_bus_reset(bus, 1);
3755 	if (rc)
3756 		return rc;
3757 
3758 	pci_bus_save_and_disable(bus);
3759 
3760 	rc = pci_bus_reset(bus, 0);
3761 
3762 	pci_bus_restore(bus);
3763 
3764 	return rc;
3765 }
3766 EXPORT_SYMBOL_GPL(pci_reset_bus);
3767 
3768 /**
3769  * pci_try_reset_bus - Try to reset a PCI bus
3770  * @bus: top level PCI bus to reset
3771  *
3772  * Same as above except return -EAGAIN if the bus cannot be locked
3773  */
3774 int pci_try_reset_bus(struct pci_bus *bus)
3775 {
3776 	int rc;
3777 
3778 	rc = pci_bus_reset(bus, 1);
3779 	if (rc)
3780 		return rc;
3781 
3782 	pci_bus_save_and_disable(bus);
3783 
3784 	if (pci_bus_trylock(bus)) {
3785 		might_sleep();
3786 		pci_reset_bridge_secondary_bus(bus->self);
3787 		pci_bus_unlock(bus);
3788 	} else
3789 		rc = -EAGAIN;
3790 
3791 	pci_bus_restore(bus);
3792 
3793 	return rc;
3794 }
3795 EXPORT_SYMBOL_GPL(pci_try_reset_bus);
3796 
3797 /**
3798  * pcix_get_max_mmrbc - get PCI-X maximum designed memory read byte count
3799  * @dev: PCI device to query
3800  *
3801  * Returns mmrbc: maximum designed memory read count in bytes
3802  *    or appropriate error value.
3803  */
3804 int pcix_get_max_mmrbc(struct pci_dev *dev)
3805 {
3806 	int cap;
3807 	u32 stat;
3808 
3809 	cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
3810 	if (!cap)
3811 		return -EINVAL;
3812 
3813 	if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat))
3814 		return -EINVAL;
3815 
3816 	return 512 << ((stat & PCI_X_STATUS_MAX_READ) >> 21);
3817 }
3818 EXPORT_SYMBOL(pcix_get_max_mmrbc);
3819 
3820 /**
3821  * pcix_get_mmrbc - get PCI-X maximum memory read byte count
3822  * @dev: PCI device to query
3823  *
3824  * Returns mmrbc: maximum memory read count in bytes
3825  *    or appropriate error value.
3826  */
3827 int pcix_get_mmrbc(struct pci_dev *dev)
3828 {
3829 	int cap;
3830 	u16 cmd;
3831 
3832 	cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
3833 	if (!cap)
3834 		return -EINVAL;
3835 
3836 	if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd))
3837 		return -EINVAL;
3838 
3839 	return 512 << ((cmd & PCI_X_CMD_MAX_READ) >> 2);
3840 }
3841 EXPORT_SYMBOL(pcix_get_mmrbc);
3842 
3843 /**
3844  * pcix_set_mmrbc - set PCI-X maximum memory read byte count
3845  * @dev: PCI device to query
3846  * @mmrbc: maximum memory read count in bytes
3847  *    valid values are 512, 1024, 2048, 4096
3848  *
3849  * If possible sets maximum memory read byte count, some bridges have erratas
3850  * that prevent this.
3851  */
3852 int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc)
3853 {
3854 	int cap;
3855 	u32 stat, v, o;
3856 	u16 cmd;
3857 
3858 	if (mmrbc < 512 || mmrbc > 4096 || !is_power_of_2(mmrbc))
3859 		return -EINVAL;
3860 
3861 	v = ffs(mmrbc) - 10;
3862 
3863 	cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
3864 	if (!cap)
3865 		return -EINVAL;
3866 
3867 	if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat))
3868 		return -EINVAL;
3869 
3870 	if (v > (stat & PCI_X_STATUS_MAX_READ) >> 21)
3871 		return -E2BIG;
3872 
3873 	if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd))
3874 		return -EINVAL;
3875 
3876 	o = (cmd & PCI_X_CMD_MAX_READ) >> 2;
3877 	if (o != v) {
3878 		if (v > o && (dev->bus->bus_flags & PCI_BUS_FLAGS_NO_MMRBC))
3879 			return -EIO;
3880 
3881 		cmd &= ~PCI_X_CMD_MAX_READ;
3882 		cmd |= v << 2;
3883 		if (pci_write_config_word(dev, cap + PCI_X_CMD, cmd))
3884 			return -EIO;
3885 	}
3886 	return 0;
3887 }
3888 EXPORT_SYMBOL(pcix_set_mmrbc);
3889 
3890 /**
3891  * pcie_get_readrq - get PCI Express read request size
3892  * @dev: PCI device to query
3893  *
3894  * Returns maximum memory read request in bytes
3895  *    or appropriate error value.
3896  */
3897 int pcie_get_readrq(struct pci_dev *dev)
3898 {
3899 	u16 ctl;
3900 
3901 	pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &ctl);
3902 
3903 	return 128 << ((ctl & PCI_EXP_DEVCTL_READRQ) >> 12);
3904 }
3905 EXPORT_SYMBOL(pcie_get_readrq);
3906 
3907 /**
3908  * pcie_set_readrq - set PCI Express maximum memory read request
3909  * @dev: PCI device to query
3910  * @rq: maximum memory read count in bytes
3911  *    valid values are 128, 256, 512, 1024, 2048, 4096
3912  *
3913  * If possible sets maximum memory read request in bytes
3914  */
3915 int pcie_set_readrq(struct pci_dev *dev, int rq)
3916 {
3917 	u16 v;
3918 
3919 	if (rq < 128 || rq > 4096 || !is_power_of_2(rq))
3920 		return -EINVAL;
3921 
3922 	/*
3923 	 * If using the "performance" PCIe config, we clamp the
3924 	 * read rq size to the max packet size to prevent the
3925 	 * host bridge generating requests larger than we can
3926 	 * cope with
3927 	 */
3928 	if (pcie_bus_config == PCIE_BUS_PERFORMANCE) {
3929 		int mps = pcie_get_mps(dev);
3930 
3931 		if (mps < rq)
3932 			rq = mps;
3933 	}
3934 
3935 	v = (ffs(rq) - 8) << 12;
3936 
3937 	return pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL,
3938 						  PCI_EXP_DEVCTL_READRQ, v);
3939 }
3940 EXPORT_SYMBOL(pcie_set_readrq);
3941 
3942 /**
3943  * pcie_get_mps - get PCI Express maximum payload size
3944  * @dev: PCI device to query
3945  *
3946  * Returns maximum payload size in bytes
3947  */
3948 int pcie_get_mps(struct pci_dev *dev)
3949 {
3950 	u16 ctl;
3951 
3952 	pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &ctl);
3953 
3954 	return 128 << ((ctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
3955 }
3956 EXPORT_SYMBOL(pcie_get_mps);
3957 
3958 /**
3959  * pcie_set_mps - set PCI Express maximum payload size
3960  * @dev: PCI device to query
3961  * @mps: maximum payload size in bytes
3962  *    valid values are 128, 256, 512, 1024, 2048, 4096
3963  *
3964  * If possible sets maximum payload size
3965  */
3966 int pcie_set_mps(struct pci_dev *dev, int mps)
3967 {
3968 	u16 v;
3969 
3970 	if (mps < 128 || mps > 4096 || !is_power_of_2(mps))
3971 		return -EINVAL;
3972 
3973 	v = ffs(mps) - 8;
3974 	if (v > dev->pcie_mpss)
3975 		return -EINVAL;
3976 	v <<= 5;
3977 
3978 	return pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL,
3979 						  PCI_EXP_DEVCTL_PAYLOAD, v);
3980 }
3981 EXPORT_SYMBOL(pcie_set_mps);
3982 
3983 /**
3984  * pcie_get_minimum_link - determine minimum link settings of a PCI device
3985  * @dev: PCI device to query
3986  * @speed: storage for minimum speed
3987  * @width: storage for minimum width
3988  *
3989  * This function will walk up the PCI device chain and determine the minimum
3990  * link width and speed of the device.
3991  */
3992 int pcie_get_minimum_link(struct pci_dev *dev, enum pci_bus_speed *speed,
3993 			  enum pcie_link_width *width)
3994 {
3995 	int ret;
3996 
3997 	*speed = PCI_SPEED_UNKNOWN;
3998 	*width = PCIE_LNK_WIDTH_UNKNOWN;
3999 
4000 	while (dev) {
4001 		u16 lnksta;
4002 		enum pci_bus_speed next_speed;
4003 		enum pcie_link_width next_width;
4004 
4005 		ret = pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &lnksta);
4006 		if (ret)
4007 			return ret;
4008 
4009 		next_speed = pcie_link_speed[lnksta & PCI_EXP_LNKSTA_CLS];
4010 		next_width = (lnksta & PCI_EXP_LNKSTA_NLW) >>
4011 			PCI_EXP_LNKSTA_NLW_SHIFT;
4012 
4013 		if (next_speed < *speed)
4014 			*speed = next_speed;
4015 
4016 		if (next_width < *width)
4017 			*width = next_width;
4018 
4019 		dev = dev->bus->self;
4020 	}
4021 
4022 	return 0;
4023 }
4024 EXPORT_SYMBOL(pcie_get_minimum_link);
4025 
4026 /**
4027  * pci_select_bars - Make BAR mask from the type of resource
4028  * @dev: the PCI device for which BAR mask is made
4029  * @flags: resource type mask to be selected
4030  *
4031  * This helper routine makes bar mask from the type of resource.
4032  */
4033 int pci_select_bars(struct pci_dev *dev, unsigned long flags)
4034 {
4035 	int i, bars = 0;
4036 	for (i = 0; i < PCI_NUM_RESOURCES; i++)
4037 		if (pci_resource_flags(dev, i) & flags)
4038 			bars |= (1 << i);
4039 	return bars;
4040 }
4041 
4042 /**
4043  * pci_resource_bar - get position of the BAR associated with a resource
4044  * @dev: the PCI device
4045  * @resno: the resource number
4046  * @type: the BAR type to be filled in
4047  *
4048  * Returns BAR position in config space, or 0 if the BAR is invalid.
4049  */
4050 int pci_resource_bar(struct pci_dev *dev, int resno, enum pci_bar_type *type)
4051 {
4052 	int reg;
4053 
4054 	if (resno < PCI_ROM_RESOURCE) {
4055 		*type = pci_bar_unknown;
4056 		return PCI_BASE_ADDRESS_0 + 4 * resno;
4057 	} else if (resno == PCI_ROM_RESOURCE) {
4058 		*type = pci_bar_mem32;
4059 		return dev->rom_base_reg;
4060 	} else if (resno < PCI_BRIDGE_RESOURCES) {
4061 		/* device specific resource */
4062 		reg = pci_iov_resource_bar(dev, resno, type);
4063 		if (reg)
4064 			return reg;
4065 	}
4066 
4067 	dev_err(&dev->dev, "BAR %d: invalid resource\n", resno);
4068 	return 0;
4069 }
4070 
4071 /* Some architectures require additional programming to enable VGA */
4072 static arch_set_vga_state_t arch_set_vga_state;
4073 
4074 void __init pci_register_set_vga_state(arch_set_vga_state_t func)
4075 {
4076 	arch_set_vga_state = func;	/* NULL disables */
4077 }
4078 
4079 static int pci_set_vga_state_arch(struct pci_dev *dev, bool decode,
4080 		      unsigned int command_bits, u32 flags)
4081 {
4082 	if (arch_set_vga_state)
4083 		return arch_set_vga_state(dev, decode, command_bits,
4084 						flags);
4085 	return 0;
4086 }
4087 
4088 /**
4089  * pci_set_vga_state - set VGA decode state on device and parents if requested
4090  * @dev: the PCI device
4091  * @decode: true = enable decoding, false = disable decoding
4092  * @command_bits: PCI_COMMAND_IO and/or PCI_COMMAND_MEMORY
4093  * @flags: traverse ancestors and change bridges
4094  * CHANGE_BRIDGE_ONLY / CHANGE_BRIDGE
4095  */
4096 int pci_set_vga_state(struct pci_dev *dev, bool decode,
4097 		      unsigned int command_bits, u32 flags)
4098 {
4099 	struct pci_bus *bus;
4100 	struct pci_dev *bridge;
4101 	u16 cmd;
4102 	int rc;
4103 
4104 	WARN_ON((flags & PCI_VGA_STATE_CHANGE_DECODES) & (command_bits & ~(PCI_COMMAND_IO|PCI_COMMAND_MEMORY)));
4105 
4106 	/* ARCH specific VGA enables */
4107 	rc = pci_set_vga_state_arch(dev, decode, command_bits, flags);
4108 	if (rc)
4109 		return rc;
4110 
4111 	if (flags & PCI_VGA_STATE_CHANGE_DECODES) {
4112 		pci_read_config_word(dev, PCI_COMMAND, &cmd);
4113 		if (decode == true)
4114 			cmd |= command_bits;
4115 		else
4116 			cmd &= ~command_bits;
4117 		pci_write_config_word(dev, PCI_COMMAND, cmd);
4118 	}
4119 
4120 	if (!(flags & PCI_VGA_STATE_CHANGE_BRIDGE))
4121 		return 0;
4122 
4123 	bus = dev->bus;
4124 	while (bus) {
4125 		bridge = bus->self;
4126 		if (bridge) {
4127 			pci_read_config_word(bridge, PCI_BRIDGE_CONTROL,
4128 					     &cmd);
4129 			if (decode == true)
4130 				cmd |= PCI_BRIDGE_CTL_VGA;
4131 			else
4132 				cmd &= ~PCI_BRIDGE_CTL_VGA;
4133 			pci_write_config_word(bridge, PCI_BRIDGE_CONTROL,
4134 					      cmd);
4135 		}
4136 		bus = bus->parent;
4137 	}
4138 	return 0;
4139 }
4140 
4141 bool pci_device_is_present(struct pci_dev *pdev)
4142 {
4143 	u32 v;
4144 
4145 	return pci_bus_read_dev_vendor_id(pdev->bus, pdev->devfn, &v, 0);
4146 }
4147 EXPORT_SYMBOL_GPL(pci_device_is_present);
4148 
4149 #define RESOURCE_ALIGNMENT_PARAM_SIZE COMMAND_LINE_SIZE
4150 static char resource_alignment_param[RESOURCE_ALIGNMENT_PARAM_SIZE] = {0};
4151 static DEFINE_SPINLOCK(resource_alignment_lock);
4152 
4153 /**
4154  * pci_specified_resource_alignment - get resource alignment specified by user.
4155  * @dev: the PCI device to get
4156  *
4157  * RETURNS: Resource alignment if it is specified.
4158  *          Zero if it is not specified.
4159  */
4160 static resource_size_t pci_specified_resource_alignment(struct pci_dev *dev)
4161 {
4162 	int seg, bus, slot, func, align_order, count;
4163 	resource_size_t align = 0;
4164 	char *p;
4165 
4166 	spin_lock(&resource_alignment_lock);
4167 	p = resource_alignment_param;
4168 	while (*p) {
4169 		count = 0;
4170 		if (sscanf(p, "%d%n", &align_order, &count) == 1 &&
4171 							p[count] == '@') {
4172 			p += count + 1;
4173 		} else {
4174 			align_order = -1;
4175 		}
4176 		if (sscanf(p, "%x:%x:%x.%x%n",
4177 			&seg, &bus, &slot, &func, &count) != 4) {
4178 			seg = 0;
4179 			if (sscanf(p, "%x:%x.%x%n",
4180 					&bus, &slot, &func, &count) != 3) {
4181 				/* Invalid format */
4182 				printk(KERN_ERR "PCI: Can't parse resource_alignment parameter: %s\n",
4183 					p);
4184 				break;
4185 			}
4186 		}
4187 		p += count;
4188 		if (seg == pci_domain_nr(dev->bus) &&
4189 			bus == dev->bus->number &&
4190 			slot == PCI_SLOT(dev->devfn) &&
4191 			func == PCI_FUNC(dev->devfn)) {
4192 			if (align_order == -1) {
4193 				align = PAGE_SIZE;
4194 			} else {
4195 				align = 1 << align_order;
4196 			}
4197 			/* Found */
4198 			break;
4199 		}
4200 		if (*p != ';' && *p != ',') {
4201 			/* End of param or invalid format */
4202 			break;
4203 		}
4204 		p++;
4205 	}
4206 	spin_unlock(&resource_alignment_lock);
4207 	return align;
4208 }
4209 
4210 /*
4211  * This function disables memory decoding and releases memory resources
4212  * of the device specified by kernel's boot parameter 'pci=resource_alignment='.
4213  * It also rounds up size to specified alignment.
4214  * Later on, the kernel will assign page-aligned memory resource back
4215  * to the device.
4216  */
4217 void pci_reassigndev_resource_alignment(struct pci_dev *dev)
4218 {
4219 	int i;
4220 	struct resource *r;
4221 	resource_size_t align, size;
4222 	u16 command;
4223 
4224 	/* check if specified PCI is target device to reassign */
4225 	align = pci_specified_resource_alignment(dev);
4226 	if (!align)
4227 		return;
4228 
4229 	if (dev->hdr_type == PCI_HEADER_TYPE_NORMAL &&
4230 	    (dev->class >> 8) == PCI_CLASS_BRIDGE_HOST) {
4231 		dev_warn(&dev->dev,
4232 			"Can't reassign resources to host bridge.\n");
4233 		return;
4234 	}
4235 
4236 	dev_info(&dev->dev,
4237 		"Disabling memory decoding and releasing memory resources.\n");
4238 	pci_read_config_word(dev, PCI_COMMAND, &command);
4239 	command &= ~PCI_COMMAND_MEMORY;
4240 	pci_write_config_word(dev, PCI_COMMAND, command);
4241 
4242 	for (i = 0; i < PCI_BRIDGE_RESOURCES; i++) {
4243 		r = &dev->resource[i];
4244 		if (!(r->flags & IORESOURCE_MEM))
4245 			continue;
4246 		size = resource_size(r);
4247 		if (size < align) {
4248 			size = align;
4249 			dev_info(&dev->dev,
4250 				"Rounding up size of resource #%d to %#llx.\n",
4251 				i, (unsigned long long)size);
4252 		}
4253 		r->end = size - 1;
4254 		r->start = 0;
4255 	}
4256 	/* Need to disable bridge's resource window,
4257 	 * to enable the kernel to reassign new resource
4258 	 * window later on.
4259 	 */
4260 	if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE &&
4261 	    (dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) {
4262 		for (i = PCI_BRIDGE_RESOURCES; i < PCI_NUM_RESOURCES; i++) {
4263 			r = &dev->resource[i];
4264 			if (!(r->flags & IORESOURCE_MEM))
4265 				continue;
4266 			r->end = resource_size(r) - 1;
4267 			r->start = 0;
4268 		}
4269 		pci_disable_bridge_window(dev);
4270 	}
4271 }
4272 
4273 static ssize_t pci_set_resource_alignment_param(const char *buf, size_t count)
4274 {
4275 	if (count > RESOURCE_ALIGNMENT_PARAM_SIZE - 1)
4276 		count = RESOURCE_ALIGNMENT_PARAM_SIZE - 1;
4277 	spin_lock(&resource_alignment_lock);
4278 	strncpy(resource_alignment_param, buf, count);
4279 	resource_alignment_param[count] = '\0';
4280 	spin_unlock(&resource_alignment_lock);
4281 	return count;
4282 }
4283 
4284 static ssize_t pci_get_resource_alignment_param(char *buf, size_t size)
4285 {
4286 	size_t count;
4287 	spin_lock(&resource_alignment_lock);
4288 	count = snprintf(buf, size, "%s", resource_alignment_param);
4289 	spin_unlock(&resource_alignment_lock);
4290 	return count;
4291 }
4292 
4293 static ssize_t pci_resource_alignment_show(struct bus_type *bus, char *buf)
4294 {
4295 	return pci_get_resource_alignment_param(buf, PAGE_SIZE);
4296 }
4297 
4298 static ssize_t pci_resource_alignment_store(struct bus_type *bus,
4299 					const char *buf, size_t count)
4300 {
4301 	return pci_set_resource_alignment_param(buf, count);
4302 }
4303 
4304 BUS_ATTR(resource_alignment, 0644, pci_resource_alignment_show,
4305 					pci_resource_alignment_store);
4306 
4307 static int __init pci_resource_alignment_sysfs_init(void)
4308 {
4309 	return bus_create_file(&pci_bus_type,
4310 					&bus_attr_resource_alignment);
4311 }
4312 
4313 late_initcall(pci_resource_alignment_sysfs_init);
4314 
4315 static void pci_no_domains(void)
4316 {
4317 #ifdef CONFIG_PCI_DOMAINS
4318 	pci_domains_supported = 0;
4319 #endif
4320 }
4321 
4322 /**
4323  * pci_ext_cfg_avail - can we access extended PCI config space?
4324  *
4325  * Returns 1 if we can access PCI extended config space (offsets
4326  * greater than 0xff). This is the default implementation. Architecture
4327  * implementations can override this.
4328  */
4329 int __weak pci_ext_cfg_avail(void)
4330 {
4331 	return 1;
4332 }
4333 
4334 void __weak pci_fixup_cardbus(struct pci_bus *bus)
4335 {
4336 }
4337 EXPORT_SYMBOL(pci_fixup_cardbus);
4338 
4339 static int __init pci_setup(char *str)
4340 {
4341 	while (str) {
4342 		char *k = strchr(str, ',');
4343 		if (k)
4344 			*k++ = 0;
4345 		if (*str && (str = pcibios_setup(str)) && *str) {
4346 			if (!strcmp(str, "nomsi")) {
4347 				pci_no_msi();
4348 			} else if (!strcmp(str, "noaer")) {
4349 				pci_no_aer();
4350 			} else if (!strncmp(str, "realloc=", 8)) {
4351 				pci_realloc_get_opt(str + 8);
4352 			} else if (!strncmp(str, "realloc", 7)) {
4353 				pci_realloc_get_opt("on");
4354 			} else if (!strcmp(str, "nodomains")) {
4355 				pci_no_domains();
4356 			} else if (!strncmp(str, "noari", 5)) {
4357 				pcie_ari_disabled = true;
4358 			} else if (!strncmp(str, "cbiosize=", 9)) {
4359 				pci_cardbus_io_size = memparse(str + 9, &str);
4360 			} else if (!strncmp(str, "cbmemsize=", 10)) {
4361 				pci_cardbus_mem_size = memparse(str + 10, &str);
4362 			} else if (!strncmp(str, "resource_alignment=", 19)) {
4363 				pci_set_resource_alignment_param(str + 19,
4364 							strlen(str + 19));
4365 			} else if (!strncmp(str, "ecrc=", 5)) {
4366 				pcie_ecrc_get_policy(str + 5);
4367 			} else if (!strncmp(str, "hpiosize=", 9)) {
4368 				pci_hotplug_io_size = memparse(str + 9, &str);
4369 			} else if (!strncmp(str, "hpmemsize=", 10)) {
4370 				pci_hotplug_mem_size = memparse(str + 10, &str);
4371 			} else if (!strncmp(str, "pcie_bus_tune_off", 17)) {
4372 				pcie_bus_config = PCIE_BUS_TUNE_OFF;
4373 			} else if (!strncmp(str, "pcie_bus_safe", 13)) {
4374 				pcie_bus_config = PCIE_BUS_SAFE;
4375 			} else if (!strncmp(str, "pcie_bus_perf", 13)) {
4376 				pcie_bus_config = PCIE_BUS_PERFORMANCE;
4377 			} else if (!strncmp(str, "pcie_bus_peer2peer", 18)) {
4378 				pcie_bus_config = PCIE_BUS_PEER2PEER;
4379 			} else if (!strncmp(str, "pcie_scan_all", 13)) {
4380 				pci_add_flags(PCI_SCAN_ALL_PCIE_DEVS);
4381 			} else {
4382 				printk(KERN_ERR "PCI: Unknown option `%s'\n",
4383 						str);
4384 			}
4385 		}
4386 		str = k;
4387 	}
4388 	return 0;
4389 }
4390 early_param("pci", pci_setup);
4391 
4392 EXPORT_SYMBOL(pci_reenable_device);
4393 EXPORT_SYMBOL(pci_enable_device_io);
4394 EXPORT_SYMBOL(pci_enable_device_mem);
4395 EXPORT_SYMBOL(pci_enable_device);
4396 EXPORT_SYMBOL(pcim_enable_device);
4397 EXPORT_SYMBOL(pcim_pin_device);
4398 EXPORT_SYMBOL(pci_disable_device);
4399 EXPORT_SYMBOL(pci_find_capability);
4400 EXPORT_SYMBOL(pci_bus_find_capability);
4401 EXPORT_SYMBOL(pci_release_regions);
4402 EXPORT_SYMBOL(pci_request_regions);
4403 EXPORT_SYMBOL(pci_request_regions_exclusive);
4404 EXPORT_SYMBOL(pci_release_region);
4405 EXPORT_SYMBOL(pci_request_region);
4406 EXPORT_SYMBOL(pci_request_region_exclusive);
4407 EXPORT_SYMBOL(pci_release_selected_regions);
4408 EXPORT_SYMBOL(pci_request_selected_regions);
4409 EXPORT_SYMBOL(pci_request_selected_regions_exclusive);
4410 EXPORT_SYMBOL(pci_set_master);
4411 EXPORT_SYMBOL(pci_clear_master);
4412 EXPORT_SYMBOL(pci_set_mwi);
4413 EXPORT_SYMBOL(pci_try_set_mwi);
4414 EXPORT_SYMBOL(pci_clear_mwi);
4415 EXPORT_SYMBOL_GPL(pci_intx);
4416 EXPORT_SYMBOL(pci_assign_resource);
4417 EXPORT_SYMBOL(pci_find_parent_resource);
4418 EXPORT_SYMBOL(pci_select_bars);
4419 
4420 EXPORT_SYMBOL(pci_set_power_state);
4421 EXPORT_SYMBOL(pci_save_state);
4422 EXPORT_SYMBOL(pci_restore_state);
4423 EXPORT_SYMBOL(pci_pme_capable);
4424 EXPORT_SYMBOL(pci_pme_active);
4425 EXPORT_SYMBOL(pci_wake_from_d3);
4426 EXPORT_SYMBOL(pci_prepare_to_sleep);
4427 EXPORT_SYMBOL(pci_back_from_sleep);
4428 EXPORT_SYMBOL_GPL(pci_set_pcie_reset_state);
4429