xref: /openbmc/linux/drivers/pci/pci.c (revision f7d84fa7)
1 /*
2  *	PCI Bus Services, see include/linux/pci.h for further explanation.
3  *
4  *	Copyright 1993 -- 1997 Drew Eckhardt, Frederic Potter,
5  *	David Mosberger-Tang
6  *
7  *	Copyright 1997 -- 2000 Martin Mares <mj@ucw.cz>
8  */
9 
10 #include <linux/acpi.h>
11 #include <linux/kernel.h>
12 #include <linux/delay.h>
13 #include <linux/dmi.h>
14 #include <linux/init.h>
15 #include <linux/of.h>
16 #include <linux/of_pci.h>
17 #include <linux/pci.h>
18 #include <linux/pm.h>
19 #include <linux/slab.h>
20 #include <linux/module.h>
21 #include <linux/spinlock.h>
22 #include <linux/string.h>
23 #include <linux/log2.h>
24 #include <linux/pci-aspm.h>
25 #include <linux/pm_wakeup.h>
26 #include <linux/interrupt.h>
27 #include <linux/device.h>
28 #include <linux/pm_runtime.h>
29 #include <linux/pci_hotplug.h>
30 #include <linux/vmalloc.h>
31 #include <asm/setup.h>
32 #include <asm/dma.h>
33 #include <linux/aer.h>
34 #include "pci.h"
35 
36 const char *pci_power_names[] = {
37 	"error", "D0", "D1", "D2", "D3hot", "D3cold", "unknown",
38 };
39 EXPORT_SYMBOL_GPL(pci_power_names);
40 
41 int isa_dma_bridge_buggy;
42 EXPORT_SYMBOL(isa_dma_bridge_buggy);
43 
44 int pci_pci_problems;
45 EXPORT_SYMBOL(pci_pci_problems);
46 
47 unsigned int pci_pm_d3_delay;
48 
49 static void pci_pme_list_scan(struct work_struct *work);
50 
51 static LIST_HEAD(pci_pme_list);
52 static DEFINE_MUTEX(pci_pme_list_mutex);
53 static DECLARE_DELAYED_WORK(pci_pme_work, pci_pme_list_scan);
54 
55 struct pci_pme_device {
56 	struct list_head list;
57 	struct pci_dev *dev;
58 };
59 
60 #define PME_TIMEOUT 1000 /* How long between PME checks */
61 
62 static void pci_dev_d3_sleep(struct pci_dev *dev)
63 {
64 	unsigned int delay = dev->d3_delay;
65 
66 	if (delay < pci_pm_d3_delay)
67 		delay = pci_pm_d3_delay;
68 
69 	if (delay)
70 		msleep(delay);
71 }
72 
73 #ifdef CONFIG_PCI_DOMAINS
74 int pci_domains_supported = 1;
75 #endif
76 
77 #define DEFAULT_CARDBUS_IO_SIZE		(256)
78 #define DEFAULT_CARDBUS_MEM_SIZE	(64*1024*1024)
79 /* pci=cbmemsize=nnM,cbiosize=nn can override this */
80 unsigned long pci_cardbus_io_size = DEFAULT_CARDBUS_IO_SIZE;
81 unsigned long pci_cardbus_mem_size = DEFAULT_CARDBUS_MEM_SIZE;
82 
83 #define DEFAULT_HOTPLUG_IO_SIZE		(256)
84 #define DEFAULT_HOTPLUG_MEM_SIZE	(2*1024*1024)
85 /* pci=hpmemsize=nnM,hpiosize=nn can override this */
86 unsigned long pci_hotplug_io_size  = DEFAULT_HOTPLUG_IO_SIZE;
87 unsigned long pci_hotplug_mem_size = DEFAULT_HOTPLUG_MEM_SIZE;
88 
89 #define DEFAULT_HOTPLUG_BUS_SIZE	1
90 unsigned long pci_hotplug_bus_size = DEFAULT_HOTPLUG_BUS_SIZE;
91 
92 enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_DEFAULT;
93 
94 /*
95  * The default CLS is used if arch didn't set CLS explicitly and not
96  * all pci devices agree on the same value.  Arch can override either
97  * the dfl or actual value as it sees fit.  Don't forget this is
98  * measured in 32-bit words, not bytes.
99  */
100 u8 pci_dfl_cache_line_size = L1_CACHE_BYTES >> 2;
101 u8 pci_cache_line_size;
102 
103 /*
104  * If we set up a device for bus mastering, we need to check the latency
105  * timer as certain BIOSes forget to set it properly.
106  */
107 unsigned int pcibios_max_latency = 255;
108 
109 /* If set, the PCIe ARI capability will not be used. */
110 static bool pcie_ari_disabled;
111 
112 /* Disable bridge_d3 for all PCIe ports */
113 static bool pci_bridge_d3_disable;
114 /* Force bridge_d3 for all PCIe ports */
115 static bool pci_bridge_d3_force;
116 
117 static int __init pcie_port_pm_setup(char *str)
118 {
119 	if (!strcmp(str, "off"))
120 		pci_bridge_d3_disable = true;
121 	else if (!strcmp(str, "force"))
122 		pci_bridge_d3_force = true;
123 	return 1;
124 }
125 __setup("pcie_port_pm=", pcie_port_pm_setup);
126 
127 /**
128  * pci_bus_max_busnr - returns maximum PCI bus number of given bus' children
129  * @bus: pointer to PCI bus structure to search
130  *
131  * Given a PCI bus, returns the highest PCI bus number present in the set
132  * including the given PCI bus and its list of child PCI buses.
133  */
134 unsigned char pci_bus_max_busnr(struct pci_bus *bus)
135 {
136 	struct pci_bus *tmp;
137 	unsigned char max, n;
138 
139 	max = bus->busn_res.end;
140 	list_for_each_entry(tmp, &bus->children, node) {
141 		n = pci_bus_max_busnr(tmp);
142 		if (n > max)
143 			max = n;
144 	}
145 	return max;
146 }
147 EXPORT_SYMBOL_GPL(pci_bus_max_busnr);
148 
149 #ifdef CONFIG_HAS_IOMEM
150 void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar)
151 {
152 	struct resource *res = &pdev->resource[bar];
153 
154 	/*
155 	 * Make sure the BAR is actually a memory resource, not an IO resource
156 	 */
157 	if (res->flags & IORESOURCE_UNSET || !(res->flags & IORESOURCE_MEM)) {
158 		dev_warn(&pdev->dev, "can't ioremap BAR %d: %pR\n", bar, res);
159 		return NULL;
160 	}
161 	return ioremap_nocache(res->start, resource_size(res));
162 }
163 EXPORT_SYMBOL_GPL(pci_ioremap_bar);
164 
165 void __iomem *pci_ioremap_wc_bar(struct pci_dev *pdev, int bar)
166 {
167 	/*
168 	 * Make sure the BAR is actually a memory resource, not an IO resource
169 	 */
170 	if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM)) {
171 		WARN_ON(1);
172 		return NULL;
173 	}
174 	return ioremap_wc(pci_resource_start(pdev, bar),
175 			  pci_resource_len(pdev, bar));
176 }
177 EXPORT_SYMBOL_GPL(pci_ioremap_wc_bar);
178 #endif
179 
180 
181 static int __pci_find_next_cap_ttl(struct pci_bus *bus, unsigned int devfn,
182 				   u8 pos, int cap, int *ttl)
183 {
184 	u8 id;
185 	u16 ent;
186 
187 	pci_bus_read_config_byte(bus, devfn, pos, &pos);
188 
189 	while ((*ttl)--) {
190 		if (pos < 0x40)
191 			break;
192 		pos &= ~3;
193 		pci_bus_read_config_word(bus, devfn, pos, &ent);
194 
195 		id = ent & 0xff;
196 		if (id == 0xff)
197 			break;
198 		if (id == cap)
199 			return pos;
200 		pos = (ent >> 8);
201 	}
202 	return 0;
203 }
204 
205 static int __pci_find_next_cap(struct pci_bus *bus, unsigned int devfn,
206 			       u8 pos, int cap)
207 {
208 	int ttl = PCI_FIND_CAP_TTL;
209 
210 	return __pci_find_next_cap_ttl(bus, devfn, pos, cap, &ttl);
211 }
212 
213 int pci_find_next_capability(struct pci_dev *dev, u8 pos, int cap)
214 {
215 	return __pci_find_next_cap(dev->bus, dev->devfn,
216 				   pos + PCI_CAP_LIST_NEXT, cap);
217 }
218 EXPORT_SYMBOL_GPL(pci_find_next_capability);
219 
220 static int __pci_bus_find_cap_start(struct pci_bus *bus,
221 				    unsigned int devfn, u8 hdr_type)
222 {
223 	u16 status;
224 
225 	pci_bus_read_config_word(bus, devfn, PCI_STATUS, &status);
226 	if (!(status & PCI_STATUS_CAP_LIST))
227 		return 0;
228 
229 	switch (hdr_type) {
230 	case PCI_HEADER_TYPE_NORMAL:
231 	case PCI_HEADER_TYPE_BRIDGE:
232 		return PCI_CAPABILITY_LIST;
233 	case PCI_HEADER_TYPE_CARDBUS:
234 		return PCI_CB_CAPABILITY_LIST;
235 	}
236 
237 	return 0;
238 }
239 
240 /**
241  * pci_find_capability - query for devices' capabilities
242  * @dev: PCI device to query
243  * @cap: capability code
244  *
245  * Tell if a device supports a given PCI capability.
246  * Returns the address of the requested capability structure within the
247  * device's PCI configuration space or 0 in case the device does not
248  * support it.  Possible values for @cap:
249  *
250  *  %PCI_CAP_ID_PM           Power Management
251  *  %PCI_CAP_ID_AGP          Accelerated Graphics Port
252  *  %PCI_CAP_ID_VPD          Vital Product Data
253  *  %PCI_CAP_ID_SLOTID       Slot Identification
254  *  %PCI_CAP_ID_MSI          Message Signalled Interrupts
255  *  %PCI_CAP_ID_CHSWP        CompactPCI HotSwap
256  *  %PCI_CAP_ID_PCIX         PCI-X
257  *  %PCI_CAP_ID_EXP          PCI Express
258  */
259 int pci_find_capability(struct pci_dev *dev, int cap)
260 {
261 	int pos;
262 
263 	pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type);
264 	if (pos)
265 		pos = __pci_find_next_cap(dev->bus, dev->devfn, pos, cap);
266 
267 	return pos;
268 }
269 EXPORT_SYMBOL(pci_find_capability);
270 
271 /**
272  * pci_bus_find_capability - query for devices' capabilities
273  * @bus:   the PCI bus to query
274  * @devfn: PCI device to query
275  * @cap:   capability code
276  *
277  * Like pci_find_capability() but works for pci devices that do not have a
278  * pci_dev structure set up yet.
279  *
280  * Returns the address of the requested capability structure within the
281  * device's PCI configuration space or 0 in case the device does not
282  * support it.
283  */
284 int pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap)
285 {
286 	int pos;
287 	u8 hdr_type;
288 
289 	pci_bus_read_config_byte(bus, devfn, PCI_HEADER_TYPE, &hdr_type);
290 
291 	pos = __pci_bus_find_cap_start(bus, devfn, hdr_type & 0x7f);
292 	if (pos)
293 		pos = __pci_find_next_cap(bus, devfn, pos, cap);
294 
295 	return pos;
296 }
297 EXPORT_SYMBOL(pci_bus_find_capability);
298 
299 /**
300  * pci_find_next_ext_capability - Find an extended capability
301  * @dev: PCI device to query
302  * @start: address at which to start looking (0 to start at beginning of list)
303  * @cap: capability code
304  *
305  * Returns the address of the next matching extended capability structure
306  * within the device's PCI configuration space or 0 if the device does
307  * not support it.  Some capabilities can occur several times, e.g., the
308  * vendor-specific capability, and this provides a way to find them all.
309  */
310 int pci_find_next_ext_capability(struct pci_dev *dev, int start, int cap)
311 {
312 	u32 header;
313 	int ttl;
314 	int pos = PCI_CFG_SPACE_SIZE;
315 
316 	/* minimum 8 bytes per capability */
317 	ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8;
318 
319 	if (dev->cfg_size <= PCI_CFG_SPACE_SIZE)
320 		return 0;
321 
322 	if (start)
323 		pos = start;
324 
325 	if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
326 		return 0;
327 
328 	/*
329 	 * If we have no capabilities, this is indicated by cap ID,
330 	 * cap version and next pointer all being 0.
331 	 */
332 	if (header == 0)
333 		return 0;
334 
335 	while (ttl-- > 0) {
336 		if (PCI_EXT_CAP_ID(header) == cap && pos != start)
337 			return pos;
338 
339 		pos = PCI_EXT_CAP_NEXT(header);
340 		if (pos < PCI_CFG_SPACE_SIZE)
341 			break;
342 
343 		if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
344 			break;
345 	}
346 
347 	return 0;
348 }
349 EXPORT_SYMBOL_GPL(pci_find_next_ext_capability);
350 
351 /**
352  * pci_find_ext_capability - Find an extended capability
353  * @dev: PCI device to query
354  * @cap: capability code
355  *
356  * Returns the address of the requested extended capability structure
357  * within the device's PCI configuration space or 0 if the device does
358  * not support it.  Possible values for @cap:
359  *
360  *  %PCI_EXT_CAP_ID_ERR		Advanced Error Reporting
361  *  %PCI_EXT_CAP_ID_VC		Virtual Channel
362  *  %PCI_EXT_CAP_ID_DSN		Device Serial Number
363  *  %PCI_EXT_CAP_ID_PWR		Power Budgeting
364  */
365 int pci_find_ext_capability(struct pci_dev *dev, int cap)
366 {
367 	return pci_find_next_ext_capability(dev, 0, cap);
368 }
369 EXPORT_SYMBOL_GPL(pci_find_ext_capability);
370 
371 static int __pci_find_next_ht_cap(struct pci_dev *dev, int pos, int ht_cap)
372 {
373 	int rc, ttl = PCI_FIND_CAP_TTL;
374 	u8 cap, mask;
375 
376 	if (ht_cap == HT_CAPTYPE_SLAVE || ht_cap == HT_CAPTYPE_HOST)
377 		mask = HT_3BIT_CAP_MASK;
378 	else
379 		mask = HT_5BIT_CAP_MASK;
380 
381 	pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn, pos,
382 				      PCI_CAP_ID_HT, &ttl);
383 	while (pos) {
384 		rc = pci_read_config_byte(dev, pos + 3, &cap);
385 		if (rc != PCIBIOS_SUCCESSFUL)
386 			return 0;
387 
388 		if ((cap & mask) == ht_cap)
389 			return pos;
390 
391 		pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn,
392 					      pos + PCI_CAP_LIST_NEXT,
393 					      PCI_CAP_ID_HT, &ttl);
394 	}
395 
396 	return 0;
397 }
398 /**
399  * pci_find_next_ht_capability - query a device's Hypertransport capabilities
400  * @dev: PCI device to query
401  * @pos: Position from which to continue searching
402  * @ht_cap: Hypertransport capability code
403  *
404  * To be used in conjunction with pci_find_ht_capability() to search for
405  * all capabilities matching @ht_cap. @pos should always be a value returned
406  * from pci_find_ht_capability().
407  *
408  * NB. To be 100% safe against broken PCI devices, the caller should take
409  * steps to avoid an infinite loop.
410  */
411 int pci_find_next_ht_capability(struct pci_dev *dev, int pos, int ht_cap)
412 {
413 	return __pci_find_next_ht_cap(dev, pos + PCI_CAP_LIST_NEXT, ht_cap);
414 }
415 EXPORT_SYMBOL_GPL(pci_find_next_ht_capability);
416 
417 /**
418  * pci_find_ht_capability - query a device's Hypertransport capabilities
419  * @dev: PCI device to query
420  * @ht_cap: Hypertransport capability code
421  *
422  * Tell if a device supports a given Hypertransport capability.
423  * Returns an address within the device's PCI configuration space
424  * or 0 in case the device does not support the request capability.
425  * The address points to the PCI capability, of type PCI_CAP_ID_HT,
426  * which has a Hypertransport capability matching @ht_cap.
427  */
428 int pci_find_ht_capability(struct pci_dev *dev, int ht_cap)
429 {
430 	int pos;
431 
432 	pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type);
433 	if (pos)
434 		pos = __pci_find_next_ht_cap(dev, pos, ht_cap);
435 
436 	return pos;
437 }
438 EXPORT_SYMBOL_GPL(pci_find_ht_capability);
439 
440 /**
441  * pci_find_parent_resource - return resource region of parent bus of given region
442  * @dev: PCI device structure contains resources to be searched
443  * @res: child resource record for which parent is sought
444  *
445  *  For given resource region of given device, return the resource
446  *  region of parent bus the given region is contained in.
447  */
448 struct resource *pci_find_parent_resource(const struct pci_dev *dev,
449 					  struct resource *res)
450 {
451 	const struct pci_bus *bus = dev->bus;
452 	struct resource *r;
453 	int i;
454 
455 	pci_bus_for_each_resource(bus, r, i) {
456 		if (!r)
457 			continue;
458 		if (res->start && resource_contains(r, res)) {
459 
460 			/*
461 			 * If the window is prefetchable but the BAR is
462 			 * not, the allocator made a mistake.
463 			 */
464 			if (r->flags & IORESOURCE_PREFETCH &&
465 			    !(res->flags & IORESOURCE_PREFETCH))
466 				return NULL;
467 
468 			/*
469 			 * If we're below a transparent bridge, there may
470 			 * be both a positively-decoded aperture and a
471 			 * subtractively-decoded region that contain the BAR.
472 			 * We want the positively-decoded one, so this depends
473 			 * on pci_bus_for_each_resource() giving us those
474 			 * first.
475 			 */
476 			return r;
477 		}
478 	}
479 	return NULL;
480 }
481 EXPORT_SYMBOL(pci_find_parent_resource);
482 
483 /**
484  * pci_find_resource - Return matching PCI device resource
485  * @dev: PCI device to query
486  * @res: Resource to look for
487  *
488  * Goes over standard PCI resources (BARs) and checks if the given resource
489  * is partially or fully contained in any of them. In that case the
490  * matching resource is returned, %NULL otherwise.
491  */
492 struct resource *pci_find_resource(struct pci_dev *dev, struct resource *res)
493 {
494 	int i;
495 
496 	for (i = 0; i < PCI_ROM_RESOURCE; i++) {
497 		struct resource *r = &dev->resource[i];
498 
499 		if (r->start && resource_contains(r, res))
500 			return r;
501 	}
502 
503 	return NULL;
504 }
505 EXPORT_SYMBOL(pci_find_resource);
506 
507 /**
508  * pci_find_pcie_root_port - return PCIe Root Port
509  * @dev: PCI device to query
510  *
511  * Traverse up the parent chain and return the PCIe Root Port PCI Device
512  * for a given PCI Device.
513  */
514 struct pci_dev *pci_find_pcie_root_port(struct pci_dev *dev)
515 {
516 	struct pci_dev *bridge, *highest_pcie_bridge = NULL;
517 
518 	bridge = pci_upstream_bridge(dev);
519 	while (bridge && pci_is_pcie(bridge)) {
520 		highest_pcie_bridge = bridge;
521 		bridge = pci_upstream_bridge(bridge);
522 	}
523 
524 	if (pci_pcie_type(highest_pcie_bridge) != PCI_EXP_TYPE_ROOT_PORT)
525 		return NULL;
526 
527 	return highest_pcie_bridge;
528 }
529 EXPORT_SYMBOL(pci_find_pcie_root_port);
530 
531 /**
532  * pci_wait_for_pending - wait for @mask bit(s) to clear in status word @pos
533  * @dev: the PCI device to operate on
534  * @pos: config space offset of status word
535  * @mask: mask of bit(s) to care about in status word
536  *
537  * Return 1 when mask bit(s) in status word clear, 0 otherwise.
538  */
539 int pci_wait_for_pending(struct pci_dev *dev, int pos, u16 mask)
540 {
541 	int i;
542 
543 	/* Wait for Transaction Pending bit clean */
544 	for (i = 0; i < 4; i++) {
545 		u16 status;
546 		if (i)
547 			msleep((1 << (i - 1)) * 100);
548 
549 		pci_read_config_word(dev, pos, &status);
550 		if (!(status & mask))
551 			return 1;
552 	}
553 
554 	return 0;
555 }
556 
557 /**
558  * pci_restore_bars - restore a device's BAR values (e.g. after wake-up)
559  * @dev: PCI device to have its BARs restored
560  *
561  * Restore the BAR values for a given device, so as to make it
562  * accessible by its driver.
563  */
564 static void pci_restore_bars(struct pci_dev *dev)
565 {
566 	int i;
567 
568 	for (i = 0; i < PCI_BRIDGE_RESOURCES; i++)
569 		pci_update_resource(dev, i);
570 }
571 
572 static const struct pci_platform_pm_ops *pci_platform_pm;
573 
574 int pci_set_platform_pm(const struct pci_platform_pm_ops *ops)
575 {
576 	if (!ops->is_manageable || !ops->set_state  || !ops->get_state ||
577 	    !ops->choose_state  || !ops->sleep_wake || !ops->run_wake  ||
578 	    !ops->need_resume)
579 		return -EINVAL;
580 	pci_platform_pm = ops;
581 	return 0;
582 }
583 
584 static inline bool platform_pci_power_manageable(struct pci_dev *dev)
585 {
586 	return pci_platform_pm ? pci_platform_pm->is_manageable(dev) : false;
587 }
588 
589 static inline int platform_pci_set_power_state(struct pci_dev *dev,
590 					       pci_power_t t)
591 {
592 	return pci_platform_pm ? pci_platform_pm->set_state(dev, t) : -ENOSYS;
593 }
594 
595 static inline pci_power_t platform_pci_get_power_state(struct pci_dev *dev)
596 {
597 	return pci_platform_pm ? pci_platform_pm->get_state(dev) : PCI_UNKNOWN;
598 }
599 
600 static inline pci_power_t platform_pci_choose_state(struct pci_dev *dev)
601 {
602 	return pci_platform_pm ?
603 			pci_platform_pm->choose_state(dev) : PCI_POWER_ERROR;
604 }
605 
606 static inline int platform_pci_sleep_wake(struct pci_dev *dev, bool enable)
607 {
608 	return pci_platform_pm ?
609 			pci_platform_pm->sleep_wake(dev, enable) : -ENODEV;
610 }
611 
612 static inline int platform_pci_run_wake(struct pci_dev *dev, bool enable)
613 {
614 	return pci_platform_pm ?
615 			pci_platform_pm->run_wake(dev, enable) : -ENODEV;
616 }
617 
618 static inline bool platform_pci_need_resume(struct pci_dev *dev)
619 {
620 	return pci_platform_pm ? pci_platform_pm->need_resume(dev) : false;
621 }
622 
623 /**
624  * pci_raw_set_power_state - Use PCI PM registers to set the power state of
625  *                           given PCI device
626  * @dev: PCI device to handle.
627  * @state: PCI power state (D0, D1, D2, D3hot) to put the device into.
628  *
629  * RETURN VALUE:
630  * -EINVAL if the requested state is invalid.
631  * -EIO if device does not support PCI PM or its PM capabilities register has a
632  * wrong version, or device doesn't support the requested state.
633  * 0 if device already is in the requested state.
634  * 0 if device's power state has been successfully changed.
635  */
636 static int pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state)
637 {
638 	u16 pmcsr;
639 	bool need_restore = false;
640 
641 	/* Check if we're already there */
642 	if (dev->current_state == state)
643 		return 0;
644 
645 	if (!dev->pm_cap)
646 		return -EIO;
647 
648 	if (state < PCI_D0 || state > PCI_D3hot)
649 		return -EINVAL;
650 
651 	/* Validate current state:
652 	 * Can enter D0 from any state, but if we can only go deeper
653 	 * to sleep if we're already in a low power state
654 	 */
655 	if (state != PCI_D0 && dev->current_state <= PCI_D3cold
656 	    && dev->current_state > state) {
657 		dev_err(&dev->dev, "invalid power transition (from state %d to %d)\n",
658 			dev->current_state, state);
659 		return -EINVAL;
660 	}
661 
662 	/* check if this device supports the desired state */
663 	if ((state == PCI_D1 && !dev->d1_support)
664 	   || (state == PCI_D2 && !dev->d2_support))
665 		return -EIO;
666 
667 	pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
668 
669 	/* If we're (effectively) in D3, force entire word to 0.
670 	 * This doesn't affect PME_Status, disables PME_En, and
671 	 * sets PowerState to 0.
672 	 */
673 	switch (dev->current_state) {
674 	case PCI_D0:
675 	case PCI_D1:
676 	case PCI_D2:
677 		pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
678 		pmcsr |= state;
679 		break;
680 	case PCI_D3hot:
681 	case PCI_D3cold:
682 	case PCI_UNKNOWN: /* Boot-up */
683 		if ((pmcsr & PCI_PM_CTRL_STATE_MASK) == PCI_D3hot
684 		 && !(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET))
685 			need_restore = true;
686 		/* Fall-through: force to D0 */
687 	default:
688 		pmcsr = 0;
689 		break;
690 	}
691 
692 	/* enter specified state */
693 	pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
694 
695 	/* Mandatory power management transition delays */
696 	/* see PCI PM 1.1 5.6.1 table 18 */
697 	if (state == PCI_D3hot || dev->current_state == PCI_D3hot)
698 		pci_dev_d3_sleep(dev);
699 	else if (state == PCI_D2 || dev->current_state == PCI_D2)
700 		udelay(PCI_PM_D2_DELAY);
701 
702 	pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
703 	dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
704 	if (dev->current_state != state && printk_ratelimit())
705 		dev_info(&dev->dev, "Refused to change power state, currently in D%d\n",
706 			 dev->current_state);
707 
708 	/*
709 	 * According to section 5.4.1 of the "PCI BUS POWER MANAGEMENT
710 	 * INTERFACE SPECIFICATION, REV. 1.2", a device transitioning
711 	 * from D3hot to D0 _may_ perform an internal reset, thereby
712 	 * going to "D0 Uninitialized" rather than "D0 Initialized".
713 	 * For example, at least some versions of the 3c905B and the
714 	 * 3c556B exhibit this behaviour.
715 	 *
716 	 * At least some laptop BIOSen (e.g. the Thinkpad T21) leave
717 	 * devices in a D3hot state at boot.  Consequently, we need to
718 	 * restore at least the BARs so that the device will be
719 	 * accessible to its driver.
720 	 */
721 	if (need_restore)
722 		pci_restore_bars(dev);
723 
724 	if (dev->bus->self)
725 		pcie_aspm_pm_state_change(dev->bus->self);
726 
727 	return 0;
728 }
729 
730 /**
731  * pci_update_current_state - Read power state of given device and cache it
732  * @dev: PCI device to handle.
733  * @state: State to cache in case the device doesn't have the PM capability
734  *
735  * The power state is read from the PMCSR register, which however is
736  * inaccessible in D3cold.  The platform firmware is therefore queried first
737  * to detect accessibility of the register.  In case the platform firmware
738  * reports an incorrect state or the device isn't power manageable by the
739  * platform at all, we try to detect D3cold by testing accessibility of the
740  * vendor ID in config space.
741  */
742 void pci_update_current_state(struct pci_dev *dev, pci_power_t state)
743 {
744 	if (platform_pci_get_power_state(dev) == PCI_D3cold ||
745 	    !pci_device_is_present(dev)) {
746 		dev->current_state = PCI_D3cold;
747 	} else if (dev->pm_cap) {
748 		u16 pmcsr;
749 
750 		pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
751 		dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
752 	} else {
753 		dev->current_state = state;
754 	}
755 }
756 
757 /**
758  * pci_power_up - Put the given device into D0 forcibly
759  * @dev: PCI device to power up
760  */
761 void pci_power_up(struct pci_dev *dev)
762 {
763 	if (platform_pci_power_manageable(dev))
764 		platform_pci_set_power_state(dev, PCI_D0);
765 
766 	pci_raw_set_power_state(dev, PCI_D0);
767 	pci_update_current_state(dev, PCI_D0);
768 }
769 
770 /**
771  * pci_platform_power_transition - Use platform to change device power state
772  * @dev: PCI device to handle.
773  * @state: State to put the device into.
774  */
775 static int pci_platform_power_transition(struct pci_dev *dev, pci_power_t state)
776 {
777 	int error;
778 
779 	if (platform_pci_power_manageable(dev)) {
780 		error = platform_pci_set_power_state(dev, state);
781 		if (!error)
782 			pci_update_current_state(dev, state);
783 	} else
784 		error = -ENODEV;
785 
786 	if (error && !dev->pm_cap) /* Fall back to PCI_D0 */
787 		dev->current_state = PCI_D0;
788 
789 	return error;
790 }
791 
792 /**
793  * pci_wakeup - Wake up a PCI device
794  * @pci_dev: Device to handle.
795  * @ign: ignored parameter
796  */
797 static int pci_wakeup(struct pci_dev *pci_dev, void *ign)
798 {
799 	pci_wakeup_event(pci_dev);
800 	pm_request_resume(&pci_dev->dev);
801 	return 0;
802 }
803 
804 /**
805  * pci_wakeup_bus - Walk given bus and wake up devices on it
806  * @bus: Top bus of the subtree to walk.
807  */
808 static void pci_wakeup_bus(struct pci_bus *bus)
809 {
810 	if (bus)
811 		pci_walk_bus(bus, pci_wakeup, NULL);
812 }
813 
814 /**
815  * __pci_start_power_transition - Start power transition of a PCI device
816  * @dev: PCI device to handle.
817  * @state: State to put the device into.
818  */
819 static void __pci_start_power_transition(struct pci_dev *dev, pci_power_t state)
820 {
821 	if (state == PCI_D0) {
822 		pci_platform_power_transition(dev, PCI_D0);
823 		/*
824 		 * Mandatory power management transition delays, see
825 		 * PCI Express Base Specification Revision 2.0 Section
826 		 * 6.6.1: Conventional Reset.  Do not delay for
827 		 * devices powered on/off by corresponding bridge,
828 		 * because have already delayed for the bridge.
829 		 */
830 		if (dev->runtime_d3cold) {
831 			if (dev->d3cold_delay)
832 				msleep(dev->d3cold_delay);
833 			/*
834 			 * When powering on a bridge from D3cold, the
835 			 * whole hierarchy may be powered on into
836 			 * D0uninitialized state, resume them to give
837 			 * them a chance to suspend again
838 			 */
839 			pci_wakeup_bus(dev->subordinate);
840 		}
841 	}
842 }
843 
844 /**
845  * __pci_dev_set_current_state - Set current state of a PCI device
846  * @dev: Device to handle
847  * @data: pointer to state to be set
848  */
849 static int __pci_dev_set_current_state(struct pci_dev *dev, void *data)
850 {
851 	pci_power_t state = *(pci_power_t *)data;
852 
853 	dev->current_state = state;
854 	return 0;
855 }
856 
857 /**
858  * __pci_bus_set_current_state - Walk given bus and set current state of devices
859  * @bus: Top bus of the subtree to walk.
860  * @state: state to be set
861  */
862 static void __pci_bus_set_current_state(struct pci_bus *bus, pci_power_t state)
863 {
864 	if (bus)
865 		pci_walk_bus(bus, __pci_dev_set_current_state, &state);
866 }
867 
868 /**
869  * __pci_complete_power_transition - Complete power transition of a PCI device
870  * @dev: PCI device to handle.
871  * @state: State to put the device into.
872  *
873  * This function should not be called directly by device drivers.
874  */
875 int __pci_complete_power_transition(struct pci_dev *dev, pci_power_t state)
876 {
877 	int ret;
878 
879 	if (state <= PCI_D0)
880 		return -EINVAL;
881 	ret = pci_platform_power_transition(dev, state);
882 	/* Power off the bridge may power off the whole hierarchy */
883 	if (!ret && state == PCI_D3cold)
884 		__pci_bus_set_current_state(dev->subordinate, PCI_D3cold);
885 	return ret;
886 }
887 EXPORT_SYMBOL_GPL(__pci_complete_power_transition);
888 
889 /**
890  * pci_set_power_state - Set the power state of a PCI device
891  * @dev: PCI device to handle.
892  * @state: PCI power state (D0, D1, D2, D3hot) to put the device into.
893  *
894  * Transition a device to a new power state, using the platform firmware and/or
895  * the device's PCI PM registers.
896  *
897  * RETURN VALUE:
898  * -EINVAL if the requested state is invalid.
899  * -EIO if device does not support PCI PM or its PM capabilities register has a
900  * wrong version, or device doesn't support the requested state.
901  * 0 if device already is in the requested state.
902  * 0 if device's power state has been successfully changed.
903  */
904 int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
905 {
906 	int error;
907 
908 	/* bound the state we're entering */
909 	if (state > PCI_D3cold)
910 		state = PCI_D3cold;
911 	else if (state < PCI_D0)
912 		state = PCI_D0;
913 	else if ((state == PCI_D1 || state == PCI_D2) && pci_no_d1d2(dev))
914 		/*
915 		 * If the device or the parent bridge do not support PCI PM,
916 		 * ignore the request if we're doing anything other than putting
917 		 * it into D0 (which would only happen on boot).
918 		 */
919 		return 0;
920 
921 	/* Check if we're already there */
922 	if (dev->current_state == state)
923 		return 0;
924 
925 	__pci_start_power_transition(dev, state);
926 
927 	/* This device is quirked not to be put into D3, so
928 	   don't put it in D3 */
929 	if (state >= PCI_D3hot && (dev->dev_flags & PCI_DEV_FLAGS_NO_D3))
930 		return 0;
931 
932 	/*
933 	 * To put device in D3cold, we put device into D3hot in native
934 	 * way, then put device into D3cold with platform ops
935 	 */
936 	error = pci_raw_set_power_state(dev, state > PCI_D3hot ?
937 					PCI_D3hot : state);
938 
939 	if (!__pci_complete_power_transition(dev, state))
940 		error = 0;
941 
942 	return error;
943 }
944 EXPORT_SYMBOL(pci_set_power_state);
945 
946 /**
947  * pci_choose_state - Choose the power state of a PCI device
948  * @dev: PCI device to be suspended
949  * @state: target sleep state for the whole system. This is the value
950  *	that is passed to suspend() function.
951  *
952  * Returns PCI power state suitable for given device and given system
953  * message.
954  */
955 
956 pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state)
957 {
958 	pci_power_t ret;
959 
960 	if (!dev->pm_cap)
961 		return PCI_D0;
962 
963 	ret = platform_pci_choose_state(dev);
964 	if (ret != PCI_POWER_ERROR)
965 		return ret;
966 
967 	switch (state.event) {
968 	case PM_EVENT_ON:
969 		return PCI_D0;
970 	case PM_EVENT_FREEZE:
971 	case PM_EVENT_PRETHAW:
972 		/* REVISIT both freeze and pre-thaw "should" use D0 */
973 	case PM_EVENT_SUSPEND:
974 	case PM_EVENT_HIBERNATE:
975 		return PCI_D3hot;
976 	default:
977 		dev_info(&dev->dev, "unrecognized suspend event %d\n",
978 			 state.event);
979 		BUG();
980 	}
981 	return PCI_D0;
982 }
983 EXPORT_SYMBOL(pci_choose_state);
984 
985 #define PCI_EXP_SAVE_REGS	7
986 
987 static struct pci_cap_saved_state *_pci_find_saved_cap(struct pci_dev *pci_dev,
988 						       u16 cap, bool extended)
989 {
990 	struct pci_cap_saved_state *tmp;
991 
992 	hlist_for_each_entry(tmp, &pci_dev->saved_cap_space, next) {
993 		if (tmp->cap.cap_extended == extended && tmp->cap.cap_nr == cap)
994 			return tmp;
995 	}
996 	return NULL;
997 }
998 
999 struct pci_cap_saved_state *pci_find_saved_cap(struct pci_dev *dev, char cap)
1000 {
1001 	return _pci_find_saved_cap(dev, cap, false);
1002 }
1003 
1004 struct pci_cap_saved_state *pci_find_saved_ext_cap(struct pci_dev *dev, u16 cap)
1005 {
1006 	return _pci_find_saved_cap(dev, cap, true);
1007 }
1008 
1009 static int pci_save_pcie_state(struct pci_dev *dev)
1010 {
1011 	int i = 0;
1012 	struct pci_cap_saved_state *save_state;
1013 	u16 *cap;
1014 
1015 	if (!pci_is_pcie(dev))
1016 		return 0;
1017 
1018 	save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
1019 	if (!save_state) {
1020 		dev_err(&dev->dev, "buffer not found in %s\n", __func__);
1021 		return -ENOMEM;
1022 	}
1023 
1024 	cap = (u16 *)&save_state->cap.data[0];
1025 	pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &cap[i++]);
1026 	pcie_capability_read_word(dev, PCI_EXP_LNKCTL, &cap[i++]);
1027 	pcie_capability_read_word(dev, PCI_EXP_SLTCTL, &cap[i++]);
1028 	pcie_capability_read_word(dev, PCI_EXP_RTCTL,  &cap[i++]);
1029 	pcie_capability_read_word(dev, PCI_EXP_DEVCTL2, &cap[i++]);
1030 	pcie_capability_read_word(dev, PCI_EXP_LNKCTL2, &cap[i++]);
1031 	pcie_capability_read_word(dev, PCI_EXP_SLTCTL2, &cap[i++]);
1032 
1033 	return 0;
1034 }
1035 
1036 static void pci_restore_pcie_state(struct pci_dev *dev)
1037 {
1038 	int i = 0;
1039 	struct pci_cap_saved_state *save_state;
1040 	u16 *cap;
1041 
1042 	save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
1043 	if (!save_state)
1044 		return;
1045 
1046 	cap = (u16 *)&save_state->cap.data[0];
1047 	pcie_capability_write_word(dev, PCI_EXP_DEVCTL, cap[i++]);
1048 	pcie_capability_write_word(dev, PCI_EXP_LNKCTL, cap[i++]);
1049 	pcie_capability_write_word(dev, PCI_EXP_SLTCTL, cap[i++]);
1050 	pcie_capability_write_word(dev, PCI_EXP_RTCTL, cap[i++]);
1051 	pcie_capability_write_word(dev, PCI_EXP_DEVCTL2, cap[i++]);
1052 	pcie_capability_write_word(dev, PCI_EXP_LNKCTL2, cap[i++]);
1053 	pcie_capability_write_word(dev, PCI_EXP_SLTCTL2, cap[i++]);
1054 }
1055 
1056 
1057 static int pci_save_pcix_state(struct pci_dev *dev)
1058 {
1059 	int pos;
1060 	struct pci_cap_saved_state *save_state;
1061 
1062 	pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
1063 	if (!pos)
1064 		return 0;
1065 
1066 	save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
1067 	if (!save_state) {
1068 		dev_err(&dev->dev, "buffer not found in %s\n", __func__);
1069 		return -ENOMEM;
1070 	}
1071 
1072 	pci_read_config_word(dev, pos + PCI_X_CMD,
1073 			     (u16 *)save_state->cap.data);
1074 
1075 	return 0;
1076 }
1077 
1078 static void pci_restore_pcix_state(struct pci_dev *dev)
1079 {
1080 	int i = 0, pos;
1081 	struct pci_cap_saved_state *save_state;
1082 	u16 *cap;
1083 
1084 	save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
1085 	pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
1086 	if (!save_state || !pos)
1087 		return;
1088 	cap = (u16 *)&save_state->cap.data[0];
1089 
1090 	pci_write_config_word(dev, pos + PCI_X_CMD, cap[i++]);
1091 }
1092 
1093 
1094 /**
1095  * pci_save_state - save the PCI configuration space of a device before suspending
1096  * @dev: - PCI device that we're dealing with
1097  */
1098 int pci_save_state(struct pci_dev *dev)
1099 {
1100 	int i;
1101 	/* XXX: 100% dword access ok here? */
1102 	for (i = 0; i < 16; i++)
1103 		pci_read_config_dword(dev, i * 4, &dev->saved_config_space[i]);
1104 	dev->state_saved = true;
1105 
1106 	i = pci_save_pcie_state(dev);
1107 	if (i != 0)
1108 		return i;
1109 
1110 	i = pci_save_pcix_state(dev);
1111 	if (i != 0)
1112 		return i;
1113 
1114 	return pci_save_vc_state(dev);
1115 }
1116 EXPORT_SYMBOL(pci_save_state);
1117 
1118 static void pci_restore_config_dword(struct pci_dev *pdev, int offset,
1119 				     u32 saved_val, int retry)
1120 {
1121 	u32 val;
1122 
1123 	pci_read_config_dword(pdev, offset, &val);
1124 	if (val == saved_val)
1125 		return;
1126 
1127 	for (;;) {
1128 		dev_dbg(&pdev->dev, "restoring config space at offset %#x (was %#x, writing %#x)\n",
1129 			offset, val, saved_val);
1130 		pci_write_config_dword(pdev, offset, saved_val);
1131 		if (retry-- <= 0)
1132 			return;
1133 
1134 		pci_read_config_dword(pdev, offset, &val);
1135 		if (val == saved_val)
1136 			return;
1137 
1138 		mdelay(1);
1139 	}
1140 }
1141 
1142 static void pci_restore_config_space_range(struct pci_dev *pdev,
1143 					   int start, int end, int retry)
1144 {
1145 	int index;
1146 
1147 	for (index = end; index >= start; index--)
1148 		pci_restore_config_dword(pdev, 4 * index,
1149 					 pdev->saved_config_space[index],
1150 					 retry);
1151 }
1152 
1153 static void pci_restore_config_space(struct pci_dev *pdev)
1154 {
1155 	if (pdev->hdr_type == PCI_HEADER_TYPE_NORMAL) {
1156 		pci_restore_config_space_range(pdev, 10, 15, 0);
1157 		/* Restore BARs before the command register. */
1158 		pci_restore_config_space_range(pdev, 4, 9, 10);
1159 		pci_restore_config_space_range(pdev, 0, 3, 0);
1160 	} else {
1161 		pci_restore_config_space_range(pdev, 0, 15, 0);
1162 	}
1163 }
1164 
1165 /**
1166  * pci_restore_state - Restore the saved state of a PCI device
1167  * @dev: - PCI device that we're dealing with
1168  */
1169 void pci_restore_state(struct pci_dev *dev)
1170 {
1171 	if (!dev->state_saved)
1172 		return;
1173 
1174 	/* PCI Express register must be restored first */
1175 	pci_restore_pcie_state(dev);
1176 	pci_restore_ats_state(dev);
1177 	pci_restore_vc_state(dev);
1178 
1179 	pci_cleanup_aer_error_status_regs(dev);
1180 
1181 	pci_restore_config_space(dev);
1182 
1183 	pci_restore_pcix_state(dev);
1184 	pci_restore_msi_state(dev);
1185 
1186 	/* Restore ACS and IOV configuration state */
1187 	pci_enable_acs(dev);
1188 	pci_restore_iov_state(dev);
1189 
1190 	dev->state_saved = false;
1191 }
1192 EXPORT_SYMBOL(pci_restore_state);
1193 
1194 struct pci_saved_state {
1195 	u32 config_space[16];
1196 	struct pci_cap_saved_data cap[0];
1197 };
1198 
1199 /**
1200  * pci_store_saved_state - Allocate and return an opaque struct containing
1201  *			   the device saved state.
1202  * @dev: PCI device that we're dealing with
1203  *
1204  * Return NULL if no state or error.
1205  */
1206 struct pci_saved_state *pci_store_saved_state(struct pci_dev *dev)
1207 {
1208 	struct pci_saved_state *state;
1209 	struct pci_cap_saved_state *tmp;
1210 	struct pci_cap_saved_data *cap;
1211 	size_t size;
1212 
1213 	if (!dev->state_saved)
1214 		return NULL;
1215 
1216 	size = sizeof(*state) + sizeof(struct pci_cap_saved_data);
1217 
1218 	hlist_for_each_entry(tmp, &dev->saved_cap_space, next)
1219 		size += sizeof(struct pci_cap_saved_data) + tmp->cap.size;
1220 
1221 	state = kzalloc(size, GFP_KERNEL);
1222 	if (!state)
1223 		return NULL;
1224 
1225 	memcpy(state->config_space, dev->saved_config_space,
1226 	       sizeof(state->config_space));
1227 
1228 	cap = state->cap;
1229 	hlist_for_each_entry(tmp, &dev->saved_cap_space, next) {
1230 		size_t len = sizeof(struct pci_cap_saved_data) + tmp->cap.size;
1231 		memcpy(cap, &tmp->cap, len);
1232 		cap = (struct pci_cap_saved_data *)((u8 *)cap + len);
1233 	}
1234 	/* Empty cap_save terminates list */
1235 
1236 	return state;
1237 }
1238 EXPORT_SYMBOL_GPL(pci_store_saved_state);
1239 
1240 /**
1241  * pci_load_saved_state - Reload the provided save state into struct pci_dev.
1242  * @dev: PCI device that we're dealing with
1243  * @state: Saved state returned from pci_store_saved_state()
1244  */
1245 int pci_load_saved_state(struct pci_dev *dev,
1246 			 struct pci_saved_state *state)
1247 {
1248 	struct pci_cap_saved_data *cap;
1249 
1250 	dev->state_saved = false;
1251 
1252 	if (!state)
1253 		return 0;
1254 
1255 	memcpy(dev->saved_config_space, state->config_space,
1256 	       sizeof(state->config_space));
1257 
1258 	cap = state->cap;
1259 	while (cap->size) {
1260 		struct pci_cap_saved_state *tmp;
1261 
1262 		tmp = _pci_find_saved_cap(dev, cap->cap_nr, cap->cap_extended);
1263 		if (!tmp || tmp->cap.size != cap->size)
1264 			return -EINVAL;
1265 
1266 		memcpy(tmp->cap.data, cap->data, tmp->cap.size);
1267 		cap = (struct pci_cap_saved_data *)((u8 *)cap +
1268 		       sizeof(struct pci_cap_saved_data) + cap->size);
1269 	}
1270 
1271 	dev->state_saved = true;
1272 	return 0;
1273 }
1274 EXPORT_SYMBOL_GPL(pci_load_saved_state);
1275 
1276 /**
1277  * pci_load_and_free_saved_state - Reload the save state pointed to by state,
1278  *				   and free the memory allocated for it.
1279  * @dev: PCI device that we're dealing with
1280  * @state: Pointer to saved state returned from pci_store_saved_state()
1281  */
1282 int pci_load_and_free_saved_state(struct pci_dev *dev,
1283 				  struct pci_saved_state **state)
1284 {
1285 	int ret = pci_load_saved_state(dev, *state);
1286 	kfree(*state);
1287 	*state = NULL;
1288 	return ret;
1289 }
1290 EXPORT_SYMBOL_GPL(pci_load_and_free_saved_state);
1291 
1292 int __weak pcibios_enable_device(struct pci_dev *dev, int bars)
1293 {
1294 	return pci_enable_resources(dev, bars);
1295 }
1296 
1297 static int do_pci_enable_device(struct pci_dev *dev, int bars)
1298 {
1299 	int err;
1300 	struct pci_dev *bridge;
1301 	u16 cmd;
1302 	u8 pin;
1303 
1304 	err = pci_set_power_state(dev, PCI_D0);
1305 	if (err < 0 && err != -EIO)
1306 		return err;
1307 
1308 	bridge = pci_upstream_bridge(dev);
1309 	if (bridge)
1310 		pcie_aspm_powersave_config_link(bridge);
1311 
1312 	err = pcibios_enable_device(dev, bars);
1313 	if (err < 0)
1314 		return err;
1315 	pci_fixup_device(pci_fixup_enable, dev);
1316 
1317 	if (dev->msi_enabled || dev->msix_enabled)
1318 		return 0;
1319 
1320 	pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
1321 	if (pin) {
1322 		pci_read_config_word(dev, PCI_COMMAND, &cmd);
1323 		if (cmd & PCI_COMMAND_INTX_DISABLE)
1324 			pci_write_config_word(dev, PCI_COMMAND,
1325 					      cmd & ~PCI_COMMAND_INTX_DISABLE);
1326 	}
1327 
1328 	return 0;
1329 }
1330 
1331 /**
1332  * pci_reenable_device - Resume abandoned device
1333  * @dev: PCI device to be resumed
1334  *
1335  *  Note this function is a backend of pci_default_resume and is not supposed
1336  *  to be called by normal code, write proper resume handler and use it instead.
1337  */
1338 int pci_reenable_device(struct pci_dev *dev)
1339 {
1340 	if (pci_is_enabled(dev))
1341 		return do_pci_enable_device(dev, (1 << PCI_NUM_RESOURCES) - 1);
1342 	return 0;
1343 }
1344 EXPORT_SYMBOL(pci_reenable_device);
1345 
1346 static void pci_enable_bridge(struct pci_dev *dev)
1347 {
1348 	struct pci_dev *bridge;
1349 	int retval;
1350 
1351 	bridge = pci_upstream_bridge(dev);
1352 	if (bridge)
1353 		pci_enable_bridge(bridge);
1354 
1355 	if (pci_is_enabled(dev)) {
1356 		if (!dev->is_busmaster)
1357 			pci_set_master(dev);
1358 		return;
1359 	}
1360 
1361 	retval = pci_enable_device(dev);
1362 	if (retval)
1363 		dev_err(&dev->dev, "Error enabling bridge (%d), continuing\n",
1364 			retval);
1365 	pci_set_master(dev);
1366 }
1367 
1368 static int pci_enable_device_flags(struct pci_dev *dev, unsigned long flags)
1369 {
1370 	struct pci_dev *bridge;
1371 	int err;
1372 	int i, bars = 0;
1373 
1374 	/*
1375 	 * Power state could be unknown at this point, either due to a fresh
1376 	 * boot or a device removal call.  So get the current power state
1377 	 * so that things like MSI message writing will behave as expected
1378 	 * (e.g. if the device really is in D0 at enable time).
1379 	 */
1380 	if (dev->pm_cap) {
1381 		u16 pmcsr;
1382 		pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1383 		dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
1384 	}
1385 
1386 	if (atomic_inc_return(&dev->enable_cnt) > 1)
1387 		return 0;		/* already enabled */
1388 
1389 	bridge = pci_upstream_bridge(dev);
1390 	if (bridge)
1391 		pci_enable_bridge(bridge);
1392 
1393 	/* only skip sriov related */
1394 	for (i = 0; i <= PCI_ROM_RESOURCE; i++)
1395 		if (dev->resource[i].flags & flags)
1396 			bars |= (1 << i);
1397 	for (i = PCI_BRIDGE_RESOURCES; i < DEVICE_COUNT_RESOURCE; i++)
1398 		if (dev->resource[i].flags & flags)
1399 			bars |= (1 << i);
1400 
1401 	err = do_pci_enable_device(dev, bars);
1402 	if (err < 0)
1403 		atomic_dec(&dev->enable_cnt);
1404 	return err;
1405 }
1406 
1407 /**
1408  * pci_enable_device_io - Initialize a device for use with IO space
1409  * @dev: PCI device to be initialized
1410  *
1411  *  Initialize device before it's used by a driver. Ask low-level code
1412  *  to enable I/O resources. Wake up the device if it was suspended.
1413  *  Beware, this function can fail.
1414  */
1415 int pci_enable_device_io(struct pci_dev *dev)
1416 {
1417 	return pci_enable_device_flags(dev, IORESOURCE_IO);
1418 }
1419 EXPORT_SYMBOL(pci_enable_device_io);
1420 
1421 /**
1422  * pci_enable_device_mem - Initialize a device for use with Memory space
1423  * @dev: PCI device to be initialized
1424  *
1425  *  Initialize device before it's used by a driver. Ask low-level code
1426  *  to enable Memory resources. Wake up the device if it was suspended.
1427  *  Beware, this function can fail.
1428  */
1429 int pci_enable_device_mem(struct pci_dev *dev)
1430 {
1431 	return pci_enable_device_flags(dev, IORESOURCE_MEM);
1432 }
1433 EXPORT_SYMBOL(pci_enable_device_mem);
1434 
1435 /**
1436  * pci_enable_device - Initialize device before it's used by a driver.
1437  * @dev: PCI device to be initialized
1438  *
1439  *  Initialize device before it's used by a driver. Ask low-level code
1440  *  to enable I/O and memory. Wake up the device if it was suspended.
1441  *  Beware, this function can fail.
1442  *
1443  *  Note we don't actually enable the device many times if we call
1444  *  this function repeatedly (we just increment the count).
1445  */
1446 int pci_enable_device(struct pci_dev *dev)
1447 {
1448 	return pci_enable_device_flags(dev, IORESOURCE_MEM | IORESOURCE_IO);
1449 }
1450 EXPORT_SYMBOL(pci_enable_device);
1451 
1452 /*
1453  * Managed PCI resources.  This manages device on/off, intx/msi/msix
1454  * on/off and BAR regions.  pci_dev itself records msi/msix status, so
1455  * there's no need to track it separately.  pci_devres is initialized
1456  * when a device is enabled using managed PCI device enable interface.
1457  */
1458 struct pci_devres {
1459 	unsigned int enabled:1;
1460 	unsigned int pinned:1;
1461 	unsigned int orig_intx:1;
1462 	unsigned int restore_intx:1;
1463 	u32 region_mask;
1464 };
1465 
1466 static void pcim_release(struct device *gendev, void *res)
1467 {
1468 	struct pci_dev *dev = to_pci_dev(gendev);
1469 	struct pci_devres *this = res;
1470 	int i;
1471 
1472 	if (dev->msi_enabled)
1473 		pci_disable_msi(dev);
1474 	if (dev->msix_enabled)
1475 		pci_disable_msix(dev);
1476 
1477 	for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
1478 		if (this->region_mask & (1 << i))
1479 			pci_release_region(dev, i);
1480 
1481 	if (this->restore_intx)
1482 		pci_intx(dev, this->orig_intx);
1483 
1484 	if (this->enabled && !this->pinned)
1485 		pci_disable_device(dev);
1486 }
1487 
1488 static struct pci_devres *get_pci_dr(struct pci_dev *pdev)
1489 {
1490 	struct pci_devres *dr, *new_dr;
1491 
1492 	dr = devres_find(&pdev->dev, pcim_release, NULL, NULL);
1493 	if (dr)
1494 		return dr;
1495 
1496 	new_dr = devres_alloc(pcim_release, sizeof(*new_dr), GFP_KERNEL);
1497 	if (!new_dr)
1498 		return NULL;
1499 	return devres_get(&pdev->dev, new_dr, NULL, NULL);
1500 }
1501 
1502 static struct pci_devres *find_pci_dr(struct pci_dev *pdev)
1503 {
1504 	if (pci_is_managed(pdev))
1505 		return devres_find(&pdev->dev, pcim_release, NULL, NULL);
1506 	return NULL;
1507 }
1508 
1509 /**
1510  * pcim_enable_device - Managed pci_enable_device()
1511  * @pdev: PCI device to be initialized
1512  *
1513  * Managed pci_enable_device().
1514  */
1515 int pcim_enable_device(struct pci_dev *pdev)
1516 {
1517 	struct pci_devres *dr;
1518 	int rc;
1519 
1520 	dr = get_pci_dr(pdev);
1521 	if (unlikely(!dr))
1522 		return -ENOMEM;
1523 	if (dr->enabled)
1524 		return 0;
1525 
1526 	rc = pci_enable_device(pdev);
1527 	if (!rc) {
1528 		pdev->is_managed = 1;
1529 		dr->enabled = 1;
1530 	}
1531 	return rc;
1532 }
1533 EXPORT_SYMBOL(pcim_enable_device);
1534 
1535 /**
1536  * pcim_pin_device - Pin managed PCI device
1537  * @pdev: PCI device to pin
1538  *
1539  * Pin managed PCI device @pdev.  Pinned device won't be disabled on
1540  * driver detach.  @pdev must have been enabled with
1541  * pcim_enable_device().
1542  */
1543 void pcim_pin_device(struct pci_dev *pdev)
1544 {
1545 	struct pci_devres *dr;
1546 
1547 	dr = find_pci_dr(pdev);
1548 	WARN_ON(!dr || !dr->enabled);
1549 	if (dr)
1550 		dr->pinned = 1;
1551 }
1552 EXPORT_SYMBOL(pcim_pin_device);
1553 
1554 /*
1555  * pcibios_add_device - provide arch specific hooks when adding device dev
1556  * @dev: the PCI device being added
1557  *
1558  * Permits the platform to provide architecture specific functionality when
1559  * devices are added. This is the default implementation. Architecture
1560  * implementations can override this.
1561  */
1562 int __weak pcibios_add_device(struct pci_dev *dev)
1563 {
1564 	return 0;
1565 }
1566 
1567 /**
1568  * pcibios_release_device - provide arch specific hooks when releasing device dev
1569  * @dev: the PCI device being released
1570  *
1571  * Permits the platform to provide architecture specific functionality when
1572  * devices are released. This is the default implementation. Architecture
1573  * implementations can override this.
1574  */
1575 void __weak pcibios_release_device(struct pci_dev *dev) {}
1576 
1577 /**
1578  * pcibios_disable_device - disable arch specific PCI resources for device dev
1579  * @dev: the PCI device to disable
1580  *
1581  * Disables architecture specific PCI resources for the device. This
1582  * is the default implementation. Architecture implementations can
1583  * override this.
1584  */
1585 void __weak pcibios_disable_device(struct pci_dev *dev) {}
1586 
1587 /**
1588  * pcibios_penalize_isa_irq - penalize an ISA IRQ
1589  * @irq: ISA IRQ to penalize
1590  * @active: IRQ active or not
1591  *
1592  * Permits the platform to provide architecture-specific functionality when
1593  * penalizing ISA IRQs. This is the default implementation. Architecture
1594  * implementations can override this.
1595  */
1596 void __weak pcibios_penalize_isa_irq(int irq, int active) {}
1597 
1598 static void do_pci_disable_device(struct pci_dev *dev)
1599 {
1600 	u16 pci_command;
1601 
1602 	pci_read_config_word(dev, PCI_COMMAND, &pci_command);
1603 	if (pci_command & PCI_COMMAND_MASTER) {
1604 		pci_command &= ~PCI_COMMAND_MASTER;
1605 		pci_write_config_word(dev, PCI_COMMAND, pci_command);
1606 	}
1607 
1608 	pcibios_disable_device(dev);
1609 }
1610 
1611 /**
1612  * pci_disable_enabled_device - Disable device without updating enable_cnt
1613  * @dev: PCI device to disable
1614  *
1615  * NOTE: This function is a backend of PCI power management routines and is
1616  * not supposed to be called drivers.
1617  */
1618 void pci_disable_enabled_device(struct pci_dev *dev)
1619 {
1620 	if (pci_is_enabled(dev))
1621 		do_pci_disable_device(dev);
1622 }
1623 
1624 /**
1625  * pci_disable_device - Disable PCI device after use
1626  * @dev: PCI device to be disabled
1627  *
1628  * Signal to the system that the PCI device is not in use by the system
1629  * anymore.  This only involves disabling PCI bus-mastering, if active.
1630  *
1631  * Note we don't actually disable the device until all callers of
1632  * pci_enable_device() have called pci_disable_device().
1633  */
1634 void pci_disable_device(struct pci_dev *dev)
1635 {
1636 	struct pci_devres *dr;
1637 
1638 	dr = find_pci_dr(dev);
1639 	if (dr)
1640 		dr->enabled = 0;
1641 
1642 	dev_WARN_ONCE(&dev->dev, atomic_read(&dev->enable_cnt) <= 0,
1643 		      "disabling already-disabled device");
1644 
1645 	if (atomic_dec_return(&dev->enable_cnt) != 0)
1646 		return;
1647 
1648 	do_pci_disable_device(dev);
1649 
1650 	dev->is_busmaster = 0;
1651 }
1652 EXPORT_SYMBOL(pci_disable_device);
1653 
1654 /**
1655  * pcibios_set_pcie_reset_state - set reset state for device dev
1656  * @dev: the PCIe device reset
1657  * @state: Reset state to enter into
1658  *
1659  *
1660  * Sets the PCIe reset state for the device. This is the default
1661  * implementation. Architecture implementations can override this.
1662  */
1663 int __weak pcibios_set_pcie_reset_state(struct pci_dev *dev,
1664 					enum pcie_reset_state state)
1665 {
1666 	return -EINVAL;
1667 }
1668 
1669 /**
1670  * pci_set_pcie_reset_state - set reset state for device dev
1671  * @dev: the PCIe device reset
1672  * @state: Reset state to enter into
1673  *
1674  *
1675  * Sets the PCI reset state for the device.
1676  */
1677 int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state)
1678 {
1679 	return pcibios_set_pcie_reset_state(dev, state);
1680 }
1681 EXPORT_SYMBOL_GPL(pci_set_pcie_reset_state);
1682 
1683 /**
1684  * pci_check_pme_status - Check if given device has generated PME.
1685  * @dev: Device to check.
1686  *
1687  * Check the PME status of the device and if set, clear it and clear PME enable
1688  * (if set).  Return 'true' if PME status and PME enable were both set or
1689  * 'false' otherwise.
1690  */
1691 bool pci_check_pme_status(struct pci_dev *dev)
1692 {
1693 	int pmcsr_pos;
1694 	u16 pmcsr;
1695 	bool ret = false;
1696 
1697 	if (!dev->pm_cap)
1698 		return false;
1699 
1700 	pmcsr_pos = dev->pm_cap + PCI_PM_CTRL;
1701 	pci_read_config_word(dev, pmcsr_pos, &pmcsr);
1702 	if (!(pmcsr & PCI_PM_CTRL_PME_STATUS))
1703 		return false;
1704 
1705 	/* Clear PME status. */
1706 	pmcsr |= PCI_PM_CTRL_PME_STATUS;
1707 	if (pmcsr & PCI_PM_CTRL_PME_ENABLE) {
1708 		/* Disable PME to avoid interrupt flood. */
1709 		pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
1710 		ret = true;
1711 	}
1712 
1713 	pci_write_config_word(dev, pmcsr_pos, pmcsr);
1714 
1715 	return ret;
1716 }
1717 
1718 /**
1719  * pci_pme_wakeup - Wake up a PCI device if its PME Status bit is set.
1720  * @dev: Device to handle.
1721  * @pme_poll_reset: Whether or not to reset the device's pme_poll flag.
1722  *
1723  * Check if @dev has generated PME and queue a resume request for it in that
1724  * case.
1725  */
1726 static int pci_pme_wakeup(struct pci_dev *dev, void *pme_poll_reset)
1727 {
1728 	if (pme_poll_reset && dev->pme_poll)
1729 		dev->pme_poll = false;
1730 
1731 	if (pci_check_pme_status(dev)) {
1732 		pci_wakeup_event(dev);
1733 		pm_request_resume(&dev->dev);
1734 	}
1735 	return 0;
1736 }
1737 
1738 /**
1739  * pci_pme_wakeup_bus - Walk given bus and wake up devices on it, if necessary.
1740  * @bus: Top bus of the subtree to walk.
1741  */
1742 void pci_pme_wakeup_bus(struct pci_bus *bus)
1743 {
1744 	if (bus)
1745 		pci_walk_bus(bus, pci_pme_wakeup, (void *)true);
1746 }
1747 
1748 
1749 /**
1750  * pci_pme_capable - check the capability of PCI device to generate PME#
1751  * @dev: PCI device to handle.
1752  * @state: PCI state from which device will issue PME#.
1753  */
1754 bool pci_pme_capable(struct pci_dev *dev, pci_power_t state)
1755 {
1756 	if (!dev->pm_cap)
1757 		return false;
1758 
1759 	return !!(dev->pme_support & (1 << state));
1760 }
1761 EXPORT_SYMBOL(pci_pme_capable);
1762 
1763 static void pci_pme_list_scan(struct work_struct *work)
1764 {
1765 	struct pci_pme_device *pme_dev, *n;
1766 
1767 	mutex_lock(&pci_pme_list_mutex);
1768 	list_for_each_entry_safe(pme_dev, n, &pci_pme_list, list) {
1769 		if (pme_dev->dev->pme_poll) {
1770 			struct pci_dev *bridge;
1771 
1772 			bridge = pme_dev->dev->bus->self;
1773 			/*
1774 			 * If bridge is in low power state, the
1775 			 * configuration space of subordinate devices
1776 			 * may be not accessible
1777 			 */
1778 			if (bridge && bridge->current_state != PCI_D0)
1779 				continue;
1780 			pci_pme_wakeup(pme_dev->dev, NULL);
1781 		} else {
1782 			list_del(&pme_dev->list);
1783 			kfree(pme_dev);
1784 		}
1785 	}
1786 	if (!list_empty(&pci_pme_list))
1787 		queue_delayed_work(system_freezable_wq, &pci_pme_work,
1788 				   msecs_to_jiffies(PME_TIMEOUT));
1789 	mutex_unlock(&pci_pme_list_mutex);
1790 }
1791 
1792 static void __pci_pme_active(struct pci_dev *dev, bool enable)
1793 {
1794 	u16 pmcsr;
1795 
1796 	if (!dev->pme_support)
1797 		return;
1798 
1799 	pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1800 	/* Clear PME_Status by writing 1 to it and enable PME# */
1801 	pmcsr |= PCI_PM_CTRL_PME_STATUS | PCI_PM_CTRL_PME_ENABLE;
1802 	if (!enable)
1803 		pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
1804 
1805 	pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
1806 }
1807 
1808 /**
1809  * pci_pme_active - enable or disable PCI device's PME# function
1810  * @dev: PCI device to handle.
1811  * @enable: 'true' to enable PME# generation; 'false' to disable it.
1812  *
1813  * The caller must verify that the device is capable of generating PME# before
1814  * calling this function with @enable equal to 'true'.
1815  */
1816 void pci_pme_active(struct pci_dev *dev, bool enable)
1817 {
1818 	__pci_pme_active(dev, enable);
1819 
1820 	/*
1821 	 * PCI (as opposed to PCIe) PME requires that the device have
1822 	 * its PME# line hooked up correctly. Not all hardware vendors
1823 	 * do this, so the PME never gets delivered and the device
1824 	 * remains asleep. The easiest way around this is to
1825 	 * periodically walk the list of suspended devices and check
1826 	 * whether any have their PME flag set. The assumption is that
1827 	 * we'll wake up often enough anyway that this won't be a huge
1828 	 * hit, and the power savings from the devices will still be a
1829 	 * win.
1830 	 *
1831 	 * Although PCIe uses in-band PME message instead of PME# line
1832 	 * to report PME, PME does not work for some PCIe devices in
1833 	 * reality.  For example, there are devices that set their PME
1834 	 * status bits, but don't really bother to send a PME message;
1835 	 * there are PCI Express Root Ports that don't bother to
1836 	 * trigger interrupts when they receive PME messages from the
1837 	 * devices below.  So PME poll is used for PCIe devices too.
1838 	 */
1839 
1840 	if (dev->pme_poll) {
1841 		struct pci_pme_device *pme_dev;
1842 		if (enable) {
1843 			pme_dev = kmalloc(sizeof(struct pci_pme_device),
1844 					  GFP_KERNEL);
1845 			if (!pme_dev) {
1846 				dev_warn(&dev->dev, "can't enable PME#\n");
1847 				return;
1848 			}
1849 			pme_dev->dev = dev;
1850 			mutex_lock(&pci_pme_list_mutex);
1851 			list_add(&pme_dev->list, &pci_pme_list);
1852 			if (list_is_singular(&pci_pme_list))
1853 				queue_delayed_work(system_freezable_wq,
1854 						   &pci_pme_work,
1855 						   msecs_to_jiffies(PME_TIMEOUT));
1856 			mutex_unlock(&pci_pme_list_mutex);
1857 		} else {
1858 			mutex_lock(&pci_pme_list_mutex);
1859 			list_for_each_entry(pme_dev, &pci_pme_list, list) {
1860 				if (pme_dev->dev == dev) {
1861 					list_del(&pme_dev->list);
1862 					kfree(pme_dev);
1863 					break;
1864 				}
1865 			}
1866 			mutex_unlock(&pci_pme_list_mutex);
1867 		}
1868 	}
1869 
1870 	dev_dbg(&dev->dev, "PME# %s\n", enable ? "enabled" : "disabled");
1871 }
1872 EXPORT_SYMBOL(pci_pme_active);
1873 
1874 /**
1875  * __pci_enable_wake - enable PCI device as wakeup event source
1876  * @dev: PCI device affected
1877  * @state: PCI state from which device will issue wakeup events
1878  * @runtime: True if the events are to be generated at run time
1879  * @enable: True to enable event generation; false to disable
1880  *
1881  * This enables the device as a wakeup event source, or disables it.
1882  * When such events involves platform-specific hooks, those hooks are
1883  * called automatically by this routine.
1884  *
1885  * Devices with legacy power management (no standard PCI PM capabilities)
1886  * always require such platform hooks.
1887  *
1888  * RETURN VALUE:
1889  * 0 is returned on success
1890  * -EINVAL is returned if device is not supposed to wake up the system
1891  * Error code depending on the platform is returned if both the platform and
1892  * the native mechanism fail to enable the generation of wake-up events
1893  */
1894 int __pci_enable_wake(struct pci_dev *dev, pci_power_t state,
1895 		      bool runtime, bool enable)
1896 {
1897 	int ret = 0;
1898 
1899 	if (enable && !runtime && !device_may_wakeup(&dev->dev))
1900 		return -EINVAL;
1901 
1902 	/* Don't do the same thing twice in a row for one device. */
1903 	if (!!enable == !!dev->wakeup_prepared)
1904 		return 0;
1905 
1906 	/*
1907 	 * According to "PCI System Architecture" 4th ed. by Tom Shanley & Don
1908 	 * Anderson we should be doing PME# wake enable followed by ACPI wake
1909 	 * enable.  To disable wake-up we call the platform first, for symmetry.
1910 	 */
1911 
1912 	if (enable) {
1913 		int error;
1914 
1915 		if (pci_pme_capable(dev, state))
1916 			pci_pme_active(dev, true);
1917 		else
1918 			ret = 1;
1919 		error = runtime ? platform_pci_run_wake(dev, true) :
1920 					platform_pci_sleep_wake(dev, true);
1921 		if (ret)
1922 			ret = error;
1923 		if (!ret)
1924 			dev->wakeup_prepared = true;
1925 	} else {
1926 		if (runtime)
1927 			platform_pci_run_wake(dev, false);
1928 		else
1929 			platform_pci_sleep_wake(dev, false);
1930 		pci_pme_active(dev, false);
1931 		dev->wakeup_prepared = false;
1932 	}
1933 
1934 	return ret;
1935 }
1936 EXPORT_SYMBOL(__pci_enable_wake);
1937 
1938 /**
1939  * pci_wake_from_d3 - enable/disable device to wake up from D3_hot or D3_cold
1940  * @dev: PCI device to prepare
1941  * @enable: True to enable wake-up event generation; false to disable
1942  *
1943  * Many drivers want the device to wake up the system from D3_hot or D3_cold
1944  * and this function allows them to set that up cleanly - pci_enable_wake()
1945  * should not be called twice in a row to enable wake-up due to PCI PM vs ACPI
1946  * ordering constraints.
1947  *
1948  * This function only returns error code if the device is not capable of
1949  * generating PME# from both D3_hot and D3_cold, and the platform is unable to
1950  * enable wake-up power for it.
1951  */
1952 int pci_wake_from_d3(struct pci_dev *dev, bool enable)
1953 {
1954 	return pci_pme_capable(dev, PCI_D3cold) ?
1955 			pci_enable_wake(dev, PCI_D3cold, enable) :
1956 			pci_enable_wake(dev, PCI_D3hot, enable);
1957 }
1958 EXPORT_SYMBOL(pci_wake_from_d3);
1959 
1960 /**
1961  * pci_target_state - find an appropriate low power state for a given PCI dev
1962  * @dev: PCI device
1963  *
1964  * Use underlying platform code to find a supported low power state for @dev.
1965  * If the platform can't manage @dev, return the deepest state from which it
1966  * can generate wake events, based on any available PME info.
1967  */
1968 static pci_power_t pci_target_state(struct pci_dev *dev)
1969 {
1970 	pci_power_t target_state = PCI_D3hot;
1971 
1972 	if (platform_pci_power_manageable(dev)) {
1973 		/*
1974 		 * Call the platform to choose the target state of the device
1975 		 * and enable wake-up from this state if supported.
1976 		 */
1977 		pci_power_t state = platform_pci_choose_state(dev);
1978 
1979 		switch (state) {
1980 		case PCI_POWER_ERROR:
1981 		case PCI_UNKNOWN:
1982 			break;
1983 		case PCI_D1:
1984 		case PCI_D2:
1985 			if (pci_no_d1d2(dev))
1986 				break;
1987 		default:
1988 			target_state = state;
1989 		}
1990 
1991 		return target_state;
1992 	}
1993 
1994 	if (!dev->pm_cap)
1995 		target_state = PCI_D0;
1996 
1997 	/*
1998 	 * If the device is in D3cold even though it's not power-manageable by
1999 	 * the platform, it may have been powered down by non-standard means.
2000 	 * Best to let it slumber.
2001 	 */
2002 	if (dev->current_state == PCI_D3cold)
2003 		target_state = PCI_D3cold;
2004 
2005 	if (device_may_wakeup(&dev->dev)) {
2006 		/*
2007 		 * Find the deepest state from which the device can generate
2008 		 * wake-up events, make it the target state and enable device
2009 		 * to generate PME#.
2010 		 */
2011 		if (dev->pme_support) {
2012 			while (target_state
2013 			      && !(dev->pme_support & (1 << target_state)))
2014 				target_state--;
2015 		}
2016 	}
2017 
2018 	return target_state;
2019 }
2020 
2021 /**
2022  * pci_prepare_to_sleep - prepare PCI device for system-wide transition into a sleep state
2023  * @dev: Device to handle.
2024  *
2025  * Choose the power state appropriate for the device depending on whether
2026  * it can wake up the system and/or is power manageable by the platform
2027  * (PCI_D3hot is the default) and put the device into that state.
2028  */
2029 int pci_prepare_to_sleep(struct pci_dev *dev)
2030 {
2031 	pci_power_t target_state = pci_target_state(dev);
2032 	int error;
2033 
2034 	if (target_state == PCI_POWER_ERROR)
2035 		return -EIO;
2036 
2037 	pci_enable_wake(dev, target_state, device_may_wakeup(&dev->dev));
2038 
2039 	error = pci_set_power_state(dev, target_state);
2040 
2041 	if (error)
2042 		pci_enable_wake(dev, target_state, false);
2043 
2044 	return error;
2045 }
2046 EXPORT_SYMBOL(pci_prepare_to_sleep);
2047 
2048 /**
2049  * pci_back_from_sleep - turn PCI device on during system-wide transition into working state
2050  * @dev: Device to handle.
2051  *
2052  * Disable device's system wake-up capability and put it into D0.
2053  */
2054 int pci_back_from_sleep(struct pci_dev *dev)
2055 {
2056 	pci_enable_wake(dev, PCI_D0, false);
2057 	return pci_set_power_state(dev, PCI_D0);
2058 }
2059 EXPORT_SYMBOL(pci_back_from_sleep);
2060 
2061 /**
2062  * pci_finish_runtime_suspend - Carry out PCI-specific part of runtime suspend.
2063  * @dev: PCI device being suspended.
2064  *
2065  * Prepare @dev to generate wake-up events at run time and put it into a low
2066  * power state.
2067  */
2068 int pci_finish_runtime_suspend(struct pci_dev *dev)
2069 {
2070 	pci_power_t target_state = pci_target_state(dev);
2071 	int error;
2072 
2073 	if (target_state == PCI_POWER_ERROR)
2074 		return -EIO;
2075 
2076 	dev->runtime_d3cold = target_state == PCI_D3cold;
2077 
2078 	__pci_enable_wake(dev, target_state, true, pci_dev_run_wake(dev));
2079 
2080 	error = pci_set_power_state(dev, target_state);
2081 
2082 	if (error) {
2083 		__pci_enable_wake(dev, target_state, true, false);
2084 		dev->runtime_d3cold = false;
2085 	}
2086 
2087 	return error;
2088 }
2089 
2090 /**
2091  * pci_dev_run_wake - Check if device can generate run-time wake-up events.
2092  * @dev: Device to check.
2093  *
2094  * Return true if the device itself is capable of generating wake-up events
2095  * (through the platform or using the native PCIe PME) or if the device supports
2096  * PME and one of its upstream bridges can generate wake-up events.
2097  */
2098 bool pci_dev_run_wake(struct pci_dev *dev)
2099 {
2100 	struct pci_bus *bus = dev->bus;
2101 
2102 	if (device_run_wake(&dev->dev))
2103 		return true;
2104 
2105 	if (!dev->pme_support)
2106 		return false;
2107 
2108 	/* PME-capable in principle, but not from the intended sleep state */
2109 	if (!pci_pme_capable(dev, pci_target_state(dev)))
2110 		return false;
2111 
2112 	while (bus->parent) {
2113 		struct pci_dev *bridge = bus->self;
2114 
2115 		if (device_run_wake(&bridge->dev))
2116 			return true;
2117 
2118 		bus = bus->parent;
2119 	}
2120 
2121 	/* We have reached the root bus. */
2122 	if (bus->bridge)
2123 		return device_run_wake(bus->bridge);
2124 
2125 	return false;
2126 }
2127 EXPORT_SYMBOL_GPL(pci_dev_run_wake);
2128 
2129 /**
2130  * pci_dev_keep_suspended - Check if the device can stay in the suspended state.
2131  * @pci_dev: Device to check.
2132  *
2133  * Return 'true' if the device is runtime-suspended, it doesn't have to be
2134  * reconfigured due to wakeup settings difference between system and runtime
2135  * suspend and the current power state of it is suitable for the upcoming
2136  * (system) transition.
2137  *
2138  * If the device is not configured for system wakeup, disable PME for it before
2139  * returning 'true' to prevent it from waking up the system unnecessarily.
2140  */
2141 bool pci_dev_keep_suspended(struct pci_dev *pci_dev)
2142 {
2143 	struct device *dev = &pci_dev->dev;
2144 
2145 	if (!pm_runtime_suspended(dev)
2146 	    || pci_target_state(pci_dev) != pci_dev->current_state
2147 	    || platform_pci_need_resume(pci_dev)
2148 	    || (pci_dev->dev_flags & PCI_DEV_FLAGS_NEEDS_RESUME))
2149 		return false;
2150 
2151 	/*
2152 	 * At this point the device is good to go unless it's been configured
2153 	 * to generate PME at the runtime suspend time, but it is not supposed
2154 	 * to wake up the system.  In that case, simply disable PME for it
2155 	 * (it will have to be re-enabled on exit from system resume).
2156 	 *
2157 	 * If the device's power state is D3cold and the platform check above
2158 	 * hasn't triggered, the device's configuration is suitable and we don't
2159 	 * need to manipulate it at all.
2160 	 */
2161 	spin_lock_irq(&dev->power.lock);
2162 
2163 	if (pm_runtime_suspended(dev) && pci_dev->current_state < PCI_D3cold &&
2164 	    !device_may_wakeup(dev))
2165 		__pci_pme_active(pci_dev, false);
2166 
2167 	spin_unlock_irq(&dev->power.lock);
2168 	return true;
2169 }
2170 
2171 /**
2172  * pci_dev_complete_resume - Finalize resume from system sleep for a device.
2173  * @pci_dev: Device to handle.
2174  *
2175  * If the device is runtime suspended and wakeup-capable, enable PME for it as
2176  * it might have been disabled during the prepare phase of system suspend if
2177  * the device was not configured for system wakeup.
2178  */
2179 void pci_dev_complete_resume(struct pci_dev *pci_dev)
2180 {
2181 	struct device *dev = &pci_dev->dev;
2182 
2183 	if (!pci_dev_run_wake(pci_dev))
2184 		return;
2185 
2186 	spin_lock_irq(&dev->power.lock);
2187 
2188 	if (pm_runtime_suspended(dev) && pci_dev->current_state < PCI_D3cold)
2189 		__pci_pme_active(pci_dev, true);
2190 
2191 	spin_unlock_irq(&dev->power.lock);
2192 }
2193 
2194 void pci_config_pm_runtime_get(struct pci_dev *pdev)
2195 {
2196 	struct device *dev = &pdev->dev;
2197 	struct device *parent = dev->parent;
2198 
2199 	if (parent)
2200 		pm_runtime_get_sync(parent);
2201 	pm_runtime_get_noresume(dev);
2202 	/*
2203 	 * pdev->current_state is set to PCI_D3cold during suspending,
2204 	 * so wait until suspending completes
2205 	 */
2206 	pm_runtime_barrier(dev);
2207 	/*
2208 	 * Only need to resume devices in D3cold, because config
2209 	 * registers are still accessible for devices suspended but
2210 	 * not in D3cold.
2211 	 */
2212 	if (pdev->current_state == PCI_D3cold)
2213 		pm_runtime_resume(dev);
2214 }
2215 
2216 void pci_config_pm_runtime_put(struct pci_dev *pdev)
2217 {
2218 	struct device *dev = &pdev->dev;
2219 	struct device *parent = dev->parent;
2220 
2221 	pm_runtime_put(dev);
2222 	if (parent)
2223 		pm_runtime_put_sync(parent);
2224 }
2225 
2226 /**
2227  * pci_bridge_d3_possible - Is it possible to put the bridge into D3
2228  * @bridge: Bridge to check
2229  *
2230  * This function checks if it is possible to move the bridge to D3.
2231  * Currently we only allow D3 for recent enough PCIe ports.
2232  */
2233 bool pci_bridge_d3_possible(struct pci_dev *bridge)
2234 {
2235 	unsigned int year;
2236 
2237 	if (!pci_is_pcie(bridge))
2238 		return false;
2239 
2240 	switch (pci_pcie_type(bridge)) {
2241 	case PCI_EXP_TYPE_ROOT_PORT:
2242 	case PCI_EXP_TYPE_UPSTREAM:
2243 	case PCI_EXP_TYPE_DOWNSTREAM:
2244 		if (pci_bridge_d3_disable)
2245 			return false;
2246 
2247 		/*
2248 		 * Hotplug interrupts cannot be delivered if the link is down,
2249 		 * so parents of a hotplug port must stay awake. In addition,
2250 		 * hotplug ports handled by firmware in System Management Mode
2251 		 * may not be put into D3 by the OS (Thunderbolt on non-Macs).
2252 		 * For simplicity, disallow in general for now.
2253 		 */
2254 		if (bridge->is_hotplug_bridge)
2255 			return false;
2256 
2257 		if (pci_bridge_d3_force)
2258 			return true;
2259 
2260 		/*
2261 		 * It should be safe to put PCIe ports from 2015 or newer
2262 		 * to D3.
2263 		 */
2264 		if (dmi_get_date(DMI_BIOS_DATE, &year, NULL, NULL) &&
2265 		    year >= 2015) {
2266 			return true;
2267 		}
2268 		break;
2269 	}
2270 
2271 	return false;
2272 }
2273 
2274 static int pci_dev_check_d3cold(struct pci_dev *dev, void *data)
2275 {
2276 	bool *d3cold_ok = data;
2277 
2278 	if (/* The device needs to be allowed to go D3cold ... */
2279 	    dev->no_d3cold || !dev->d3cold_allowed ||
2280 
2281 	    /* ... and if it is wakeup capable to do so from D3cold. */
2282 	    (device_may_wakeup(&dev->dev) &&
2283 	     !pci_pme_capable(dev, PCI_D3cold)) ||
2284 
2285 	    /* If it is a bridge it must be allowed to go to D3. */
2286 	    !pci_power_manageable(dev))
2287 
2288 		*d3cold_ok = false;
2289 
2290 	return !*d3cold_ok;
2291 }
2292 
2293 /*
2294  * pci_bridge_d3_update - Update bridge D3 capabilities
2295  * @dev: PCI device which is changed
2296  *
2297  * Update upstream bridge PM capabilities accordingly depending on if the
2298  * device PM configuration was changed or the device is being removed.  The
2299  * change is also propagated upstream.
2300  */
2301 void pci_bridge_d3_update(struct pci_dev *dev)
2302 {
2303 	bool remove = !device_is_registered(&dev->dev);
2304 	struct pci_dev *bridge;
2305 	bool d3cold_ok = true;
2306 
2307 	bridge = pci_upstream_bridge(dev);
2308 	if (!bridge || !pci_bridge_d3_possible(bridge))
2309 		return;
2310 
2311 	/*
2312 	 * If D3 is currently allowed for the bridge, removing one of its
2313 	 * children won't change that.
2314 	 */
2315 	if (remove && bridge->bridge_d3)
2316 		return;
2317 
2318 	/*
2319 	 * If D3 is currently allowed for the bridge and a child is added or
2320 	 * changed, disallowance of D3 can only be caused by that child, so
2321 	 * we only need to check that single device, not any of its siblings.
2322 	 *
2323 	 * If D3 is currently not allowed for the bridge, checking the device
2324 	 * first may allow us to skip checking its siblings.
2325 	 */
2326 	if (!remove)
2327 		pci_dev_check_d3cold(dev, &d3cold_ok);
2328 
2329 	/*
2330 	 * If D3 is currently not allowed for the bridge, this may be caused
2331 	 * either by the device being changed/removed or any of its siblings,
2332 	 * so we need to go through all children to find out if one of them
2333 	 * continues to block D3.
2334 	 */
2335 	if (d3cold_ok && !bridge->bridge_d3)
2336 		pci_walk_bus(bridge->subordinate, pci_dev_check_d3cold,
2337 			     &d3cold_ok);
2338 
2339 	if (bridge->bridge_d3 != d3cold_ok) {
2340 		bridge->bridge_d3 = d3cold_ok;
2341 		/* Propagate change to upstream bridges */
2342 		pci_bridge_d3_update(bridge);
2343 	}
2344 }
2345 
2346 /**
2347  * pci_d3cold_enable - Enable D3cold for device
2348  * @dev: PCI device to handle
2349  *
2350  * This function can be used in drivers to enable D3cold from the device
2351  * they handle.  It also updates upstream PCI bridge PM capabilities
2352  * accordingly.
2353  */
2354 void pci_d3cold_enable(struct pci_dev *dev)
2355 {
2356 	if (dev->no_d3cold) {
2357 		dev->no_d3cold = false;
2358 		pci_bridge_d3_update(dev);
2359 	}
2360 }
2361 EXPORT_SYMBOL_GPL(pci_d3cold_enable);
2362 
2363 /**
2364  * pci_d3cold_disable - Disable D3cold for device
2365  * @dev: PCI device to handle
2366  *
2367  * This function can be used in drivers to disable D3cold from the device
2368  * they handle.  It also updates upstream PCI bridge PM capabilities
2369  * accordingly.
2370  */
2371 void pci_d3cold_disable(struct pci_dev *dev)
2372 {
2373 	if (!dev->no_d3cold) {
2374 		dev->no_d3cold = true;
2375 		pci_bridge_d3_update(dev);
2376 	}
2377 }
2378 EXPORT_SYMBOL_GPL(pci_d3cold_disable);
2379 
2380 /**
2381  * pci_pm_init - Initialize PM functions of given PCI device
2382  * @dev: PCI device to handle.
2383  */
2384 void pci_pm_init(struct pci_dev *dev)
2385 {
2386 	int pm;
2387 	u16 pmc;
2388 
2389 	pm_runtime_forbid(&dev->dev);
2390 	pm_runtime_set_active(&dev->dev);
2391 	pm_runtime_enable(&dev->dev);
2392 	device_enable_async_suspend(&dev->dev);
2393 	dev->wakeup_prepared = false;
2394 
2395 	dev->pm_cap = 0;
2396 	dev->pme_support = 0;
2397 
2398 	/* find PCI PM capability in list */
2399 	pm = pci_find_capability(dev, PCI_CAP_ID_PM);
2400 	if (!pm)
2401 		return;
2402 	/* Check device's ability to generate PME# */
2403 	pci_read_config_word(dev, pm + PCI_PM_PMC, &pmc);
2404 
2405 	if ((pmc & PCI_PM_CAP_VER_MASK) > 3) {
2406 		dev_err(&dev->dev, "unsupported PM cap regs version (%u)\n",
2407 			pmc & PCI_PM_CAP_VER_MASK);
2408 		return;
2409 	}
2410 
2411 	dev->pm_cap = pm;
2412 	dev->d3_delay = PCI_PM_D3_WAIT;
2413 	dev->d3cold_delay = PCI_PM_D3COLD_WAIT;
2414 	dev->bridge_d3 = pci_bridge_d3_possible(dev);
2415 	dev->d3cold_allowed = true;
2416 
2417 	dev->d1_support = false;
2418 	dev->d2_support = false;
2419 	if (!pci_no_d1d2(dev)) {
2420 		if (pmc & PCI_PM_CAP_D1)
2421 			dev->d1_support = true;
2422 		if (pmc & PCI_PM_CAP_D2)
2423 			dev->d2_support = true;
2424 
2425 		if (dev->d1_support || dev->d2_support)
2426 			dev_printk(KERN_DEBUG, &dev->dev, "supports%s%s\n",
2427 				   dev->d1_support ? " D1" : "",
2428 				   dev->d2_support ? " D2" : "");
2429 	}
2430 
2431 	pmc &= PCI_PM_CAP_PME_MASK;
2432 	if (pmc) {
2433 		dev_printk(KERN_DEBUG, &dev->dev,
2434 			 "PME# supported from%s%s%s%s%s\n",
2435 			 (pmc & PCI_PM_CAP_PME_D0) ? " D0" : "",
2436 			 (pmc & PCI_PM_CAP_PME_D1) ? " D1" : "",
2437 			 (pmc & PCI_PM_CAP_PME_D2) ? " D2" : "",
2438 			 (pmc & PCI_PM_CAP_PME_D3) ? " D3hot" : "",
2439 			 (pmc & PCI_PM_CAP_PME_D3cold) ? " D3cold" : "");
2440 		dev->pme_support = pmc >> PCI_PM_CAP_PME_SHIFT;
2441 		dev->pme_poll = true;
2442 		/*
2443 		 * Make device's PM flags reflect the wake-up capability, but
2444 		 * let the user space enable it to wake up the system as needed.
2445 		 */
2446 		device_set_wakeup_capable(&dev->dev, true);
2447 		/* Disable the PME# generation functionality */
2448 		pci_pme_active(dev, false);
2449 	}
2450 }
2451 
2452 static unsigned long pci_ea_flags(struct pci_dev *dev, u8 prop)
2453 {
2454 	unsigned long flags = IORESOURCE_PCI_FIXED | IORESOURCE_PCI_EA_BEI;
2455 
2456 	switch (prop) {
2457 	case PCI_EA_P_MEM:
2458 	case PCI_EA_P_VF_MEM:
2459 		flags |= IORESOURCE_MEM;
2460 		break;
2461 	case PCI_EA_P_MEM_PREFETCH:
2462 	case PCI_EA_P_VF_MEM_PREFETCH:
2463 		flags |= IORESOURCE_MEM | IORESOURCE_PREFETCH;
2464 		break;
2465 	case PCI_EA_P_IO:
2466 		flags |= IORESOURCE_IO;
2467 		break;
2468 	default:
2469 		return 0;
2470 	}
2471 
2472 	return flags;
2473 }
2474 
2475 static struct resource *pci_ea_get_resource(struct pci_dev *dev, u8 bei,
2476 					    u8 prop)
2477 {
2478 	if (bei <= PCI_EA_BEI_BAR5 && prop <= PCI_EA_P_IO)
2479 		return &dev->resource[bei];
2480 #ifdef CONFIG_PCI_IOV
2481 	else if (bei >= PCI_EA_BEI_VF_BAR0 && bei <= PCI_EA_BEI_VF_BAR5 &&
2482 		 (prop == PCI_EA_P_VF_MEM || prop == PCI_EA_P_VF_MEM_PREFETCH))
2483 		return &dev->resource[PCI_IOV_RESOURCES +
2484 				      bei - PCI_EA_BEI_VF_BAR0];
2485 #endif
2486 	else if (bei == PCI_EA_BEI_ROM)
2487 		return &dev->resource[PCI_ROM_RESOURCE];
2488 	else
2489 		return NULL;
2490 }
2491 
2492 /* Read an Enhanced Allocation (EA) entry */
2493 static int pci_ea_read(struct pci_dev *dev, int offset)
2494 {
2495 	struct resource *res;
2496 	int ent_size, ent_offset = offset;
2497 	resource_size_t start, end;
2498 	unsigned long flags;
2499 	u32 dw0, bei, base, max_offset;
2500 	u8 prop;
2501 	bool support_64 = (sizeof(resource_size_t) >= 8);
2502 
2503 	pci_read_config_dword(dev, ent_offset, &dw0);
2504 	ent_offset += 4;
2505 
2506 	/* Entry size field indicates DWORDs after 1st */
2507 	ent_size = ((dw0 & PCI_EA_ES) + 1) << 2;
2508 
2509 	if (!(dw0 & PCI_EA_ENABLE)) /* Entry not enabled */
2510 		goto out;
2511 
2512 	bei = (dw0 & PCI_EA_BEI) >> 4;
2513 	prop = (dw0 & PCI_EA_PP) >> 8;
2514 
2515 	/*
2516 	 * If the Property is in the reserved range, try the Secondary
2517 	 * Property instead.
2518 	 */
2519 	if (prop > PCI_EA_P_BRIDGE_IO && prop < PCI_EA_P_MEM_RESERVED)
2520 		prop = (dw0 & PCI_EA_SP) >> 16;
2521 	if (prop > PCI_EA_P_BRIDGE_IO)
2522 		goto out;
2523 
2524 	res = pci_ea_get_resource(dev, bei, prop);
2525 	if (!res) {
2526 		dev_err(&dev->dev, "Unsupported EA entry BEI: %u\n", bei);
2527 		goto out;
2528 	}
2529 
2530 	flags = pci_ea_flags(dev, prop);
2531 	if (!flags) {
2532 		dev_err(&dev->dev, "Unsupported EA properties: %#x\n", prop);
2533 		goto out;
2534 	}
2535 
2536 	/* Read Base */
2537 	pci_read_config_dword(dev, ent_offset, &base);
2538 	start = (base & PCI_EA_FIELD_MASK);
2539 	ent_offset += 4;
2540 
2541 	/* Read MaxOffset */
2542 	pci_read_config_dword(dev, ent_offset, &max_offset);
2543 	ent_offset += 4;
2544 
2545 	/* Read Base MSBs (if 64-bit entry) */
2546 	if (base & PCI_EA_IS_64) {
2547 		u32 base_upper;
2548 
2549 		pci_read_config_dword(dev, ent_offset, &base_upper);
2550 		ent_offset += 4;
2551 
2552 		flags |= IORESOURCE_MEM_64;
2553 
2554 		/* entry starts above 32-bit boundary, can't use */
2555 		if (!support_64 && base_upper)
2556 			goto out;
2557 
2558 		if (support_64)
2559 			start |= ((u64)base_upper << 32);
2560 	}
2561 
2562 	end = start + (max_offset | 0x03);
2563 
2564 	/* Read MaxOffset MSBs (if 64-bit entry) */
2565 	if (max_offset & PCI_EA_IS_64) {
2566 		u32 max_offset_upper;
2567 
2568 		pci_read_config_dword(dev, ent_offset, &max_offset_upper);
2569 		ent_offset += 4;
2570 
2571 		flags |= IORESOURCE_MEM_64;
2572 
2573 		/* entry too big, can't use */
2574 		if (!support_64 && max_offset_upper)
2575 			goto out;
2576 
2577 		if (support_64)
2578 			end += ((u64)max_offset_upper << 32);
2579 	}
2580 
2581 	if (end < start) {
2582 		dev_err(&dev->dev, "EA Entry crosses address boundary\n");
2583 		goto out;
2584 	}
2585 
2586 	if (ent_size != ent_offset - offset) {
2587 		dev_err(&dev->dev,
2588 			"EA Entry Size (%d) does not match length read (%d)\n",
2589 			ent_size, ent_offset - offset);
2590 		goto out;
2591 	}
2592 
2593 	res->name = pci_name(dev);
2594 	res->start = start;
2595 	res->end = end;
2596 	res->flags = flags;
2597 
2598 	if (bei <= PCI_EA_BEI_BAR5)
2599 		dev_printk(KERN_DEBUG, &dev->dev, "BAR %d: %pR (from Enhanced Allocation, properties %#02x)\n",
2600 			   bei, res, prop);
2601 	else if (bei == PCI_EA_BEI_ROM)
2602 		dev_printk(KERN_DEBUG, &dev->dev, "ROM: %pR (from Enhanced Allocation, properties %#02x)\n",
2603 			   res, prop);
2604 	else if (bei >= PCI_EA_BEI_VF_BAR0 && bei <= PCI_EA_BEI_VF_BAR5)
2605 		dev_printk(KERN_DEBUG, &dev->dev, "VF BAR %d: %pR (from Enhanced Allocation, properties %#02x)\n",
2606 			   bei - PCI_EA_BEI_VF_BAR0, res, prop);
2607 	else
2608 		dev_printk(KERN_DEBUG, &dev->dev, "BEI %d res: %pR (from Enhanced Allocation, properties %#02x)\n",
2609 			   bei, res, prop);
2610 
2611 out:
2612 	return offset + ent_size;
2613 }
2614 
2615 /* Enhanced Allocation Initialization */
2616 void pci_ea_init(struct pci_dev *dev)
2617 {
2618 	int ea;
2619 	u8 num_ent;
2620 	int offset;
2621 	int i;
2622 
2623 	/* find PCI EA capability in list */
2624 	ea = pci_find_capability(dev, PCI_CAP_ID_EA);
2625 	if (!ea)
2626 		return;
2627 
2628 	/* determine the number of entries */
2629 	pci_bus_read_config_byte(dev->bus, dev->devfn, ea + PCI_EA_NUM_ENT,
2630 					&num_ent);
2631 	num_ent &= PCI_EA_NUM_ENT_MASK;
2632 
2633 	offset = ea + PCI_EA_FIRST_ENT;
2634 
2635 	/* Skip DWORD 2 for type 1 functions */
2636 	if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE)
2637 		offset += 4;
2638 
2639 	/* parse each EA entry */
2640 	for (i = 0; i < num_ent; ++i)
2641 		offset = pci_ea_read(dev, offset);
2642 }
2643 
2644 static void pci_add_saved_cap(struct pci_dev *pci_dev,
2645 	struct pci_cap_saved_state *new_cap)
2646 {
2647 	hlist_add_head(&new_cap->next, &pci_dev->saved_cap_space);
2648 }
2649 
2650 /**
2651  * _pci_add_cap_save_buffer - allocate buffer for saving given
2652  *                            capability registers
2653  * @dev: the PCI device
2654  * @cap: the capability to allocate the buffer for
2655  * @extended: Standard or Extended capability ID
2656  * @size: requested size of the buffer
2657  */
2658 static int _pci_add_cap_save_buffer(struct pci_dev *dev, u16 cap,
2659 				    bool extended, unsigned int size)
2660 {
2661 	int pos;
2662 	struct pci_cap_saved_state *save_state;
2663 
2664 	if (extended)
2665 		pos = pci_find_ext_capability(dev, cap);
2666 	else
2667 		pos = pci_find_capability(dev, cap);
2668 
2669 	if (!pos)
2670 		return 0;
2671 
2672 	save_state = kzalloc(sizeof(*save_state) + size, GFP_KERNEL);
2673 	if (!save_state)
2674 		return -ENOMEM;
2675 
2676 	save_state->cap.cap_nr = cap;
2677 	save_state->cap.cap_extended = extended;
2678 	save_state->cap.size = size;
2679 	pci_add_saved_cap(dev, save_state);
2680 
2681 	return 0;
2682 }
2683 
2684 int pci_add_cap_save_buffer(struct pci_dev *dev, char cap, unsigned int size)
2685 {
2686 	return _pci_add_cap_save_buffer(dev, cap, false, size);
2687 }
2688 
2689 int pci_add_ext_cap_save_buffer(struct pci_dev *dev, u16 cap, unsigned int size)
2690 {
2691 	return _pci_add_cap_save_buffer(dev, cap, true, size);
2692 }
2693 
2694 /**
2695  * pci_allocate_cap_save_buffers - allocate buffers for saving capabilities
2696  * @dev: the PCI device
2697  */
2698 void pci_allocate_cap_save_buffers(struct pci_dev *dev)
2699 {
2700 	int error;
2701 
2702 	error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_EXP,
2703 					PCI_EXP_SAVE_REGS * sizeof(u16));
2704 	if (error)
2705 		dev_err(&dev->dev,
2706 			"unable to preallocate PCI Express save buffer\n");
2707 
2708 	error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_PCIX, sizeof(u16));
2709 	if (error)
2710 		dev_err(&dev->dev,
2711 			"unable to preallocate PCI-X save buffer\n");
2712 
2713 	pci_allocate_vc_save_buffers(dev);
2714 }
2715 
2716 void pci_free_cap_save_buffers(struct pci_dev *dev)
2717 {
2718 	struct pci_cap_saved_state *tmp;
2719 	struct hlist_node *n;
2720 
2721 	hlist_for_each_entry_safe(tmp, n, &dev->saved_cap_space, next)
2722 		kfree(tmp);
2723 }
2724 
2725 /**
2726  * pci_configure_ari - enable or disable ARI forwarding
2727  * @dev: the PCI device
2728  *
2729  * If @dev and its upstream bridge both support ARI, enable ARI in the
2730  * bridge.  Otherwise, disable ARI in the bridge.
2731  */
2732 void pci_configure_ari(struct pci_dev *dev)
2733 {
2734 	u32 cap;
2735 	struct pci_dev *bridge;
2736 
2737 	if (pcie_ari_disabled || !pci_is_pcie(dev) || dev->devfn)
2738 		return;
2739 
2740 	bridge = dev->bus->self;
2741 	if (!bridge)
2742 		return;
2743 
2744 	pcie_capability_read_dword(bridge, PCI_EXP_DEVCAP2, &cap);
2745 	if (!(cap & PCI_EXP_DEVCAP2_ARI))
2746 		return;
2747 
2748 	if (pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI)) {
2749 		pcie_capability_set_word(bridge, PCI_EXP_DEVCTL2,
2750 					 PCI_EXP_DEVCTL2_ARI);
2751 		bridge->ari_enabled = 1;
2752 	} else {
2753 		pcie_capability_clear_word(bridge, PCI_EXP_DEVCTL2,
2754 					   PCI_EXP_DEVCTL2_ARI);
2755 		bridge->ari_enabled = 0;
2756 	}
2757 }
2758 
2759 static int pci_acs_enable;
2760 
2761 /**
2762  * pci_request_acs - ask for ACS to be enabled if supported
2763  */
2764 void pci_request_acs(void)
2765 {
2766 	pci_acs_enable = 1;
2767 }
2768 
2769 /**
2770  * pci_std_enable_acs - enable ACS on devices using standard ACS capabilites
2771  * @dev: the PCI device
2772  */
2773 static void pci_std_enable_acs(struct pci_dev *dev)
2774 {
2775 	int pos;
2776 	u16 cap;
2777 	u16 ctrl;
2778 
2779 	pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS);
2780 	if (!pos)
2781 		return;
2782 
2783 	pci_read_config_word(dev, pos + PCI_ACS_CAP, &cap);
2784 	pci_read_config_word(dev, pos + PCI_ACS_CTRL, &ctrl);
2785 
2786 	/* Source Validation */
2787 	ctrl |= (cap & PCI_ACS_SV);
2788 
2789 	/* P2P Request Redirect */
2790 	ctrl |= (cap & PCI_ACS_RR);
2791 
2792 	/* P2P Completion Redirect */
2793 	ctrl |= (cap & PCI_ACS_CR);
2794 
2795 	/* Upstream Forwarding */
2796 	ctrl |= (cap & PCI_ACS_UF);
2797 
2798 	pci_write_config_word(dev, pos + PCI_ACS_CTRL, ctrl);
2799 }
2800 
2801 /**
2802  * pci_enable_acs - enable ACS if hardware support it
2803  * @dev: the PCI device
2804  */
2805 void pci_enable_acs(struct pci_dev *dev)
2806 {
2807 	if (!pci_acs_enable)
2808 		return;
2809 
2810 	if (!pci_dev_specific_enable_acs(dev))
2811 		return;
2812 
2813 	pci_std_enable_acs(dev);
2814 }
2815 
2816 static bool pci_acs_flags_enabled(struct pci_dev *pdev, u16 acs_flags)
2817 {
2818 	int pos;
2819 	u16 cap, ctrl;
2820 
2821 	pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ACS);
2822 	if (!pos)
2823 		return false;
2824 
2825 	/*
2826 	 * Except for egress control, capabilities are either required
2827 	 * or only required if controllable.  Features missing from the
2828 	 * capability field can therefore be assumed as hard-wired enabled.
2829 	 */
2830 	pci_read_config_word(pdev, pos + PCI_ACS_CAP, &cap);
2831 	acs_flags &= (cap | PCI_ACS_EC);
2832 
2833 	pci_read_config_word(pdev, pos + PCI_ACS_CTRL, &ctrl);
2834 	return (ctrl & acs_flags) == acs_flags;
2835 }
2836 
2837 /**
2838  * pci_acs_enabled - test ACS against required flags for a given device
2839  * @pdev: device to test
2840  * @acs_flags: required PCI ACS flags
2841  *
2842  * Return true if the device supports the provided flags.  Automatically
2843  * filters out flags that are not implemented on multifunction devices.
2844  *
2845  * Note that this interface checks the effective ACS capabilities of the
2846  * device rather than the actual capabilities.  For instance, most single
2847  * function endpoints are not required to support ACS because they have no
2848  * opportunity for peer-to-peer access.  We therefore return 'true'
2849  * regardless of whether the device exposes an ACS capability.  This makes
2850  * it much easier for callers of this function to ignore the actual type
2851  * or topology of the device when testing ACS support.
2852  */
2853 bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags)
2854 {
2855 	int ret;
2856 
2857 	ret = pci_dev_specific_acs_enabled(pdev, acs_flags);
2858 	if (ret >= 0)
2859 		return ret > 0;
2860 
2861 	/*
2862 	 * Conventional PCI and PCI-X devices never support ACS, either
2863 	 * effectively or actually.  The shared bus topology implies that
2864 	 * any device on the bus can receive or snoop DMA.
2865 	 */
2866 	if (!pci_is_pcie(pdev))
2867 		return false;
2868 
2869 	switch (pci_pcie_type(pdev)) {
2870 	/*
2871 	 * PCI/X-to-PCIe bridges are not specifically mentioned by the spec,
2872 	 * but since their primary interface is PCI/X, we conservatively
2873 	 * handle them as we would a non-PCIe device.
2874 	 */
2875 	case PCI_EXP_TYPE_PCIE_BRIDGE:
2876 	/*
2877 	 * PCIe 3.0, 6.12.1 excludes ACS on these devices.  "ACS is never
2878 	 * applicable... must never implement an ACS Extended Capability...".
2879 	 * This seems arbitrary, but we take a conservative interpretation
2880 	 * of this statement.
2881 	 */
2882 	case PCI_EXP_TYPE_PCI_BRIDGE:
2883 	case PCI_EXP_TYPE_RC_EC:
2884 		return false;
2885 	/*
2886 	 * PCIe 3.0, 6.12.1.1 specifies that downstream and root ports should
2887 	 * implement ACS in order to indicate their peer-to-peer capabilities,
2888 	 * regardless of whether they are single- or multi-function devices.
2889 	 */
2890 	case PCI_EXP_TYPE_DOWNSTREAM:
2891 	case PCI_EXP_TYPE_ROOT_PORT:
2892 		return pci_acs_flags_enabled(pdev, acs_flags);
2893 	/*
2894 	 * PCIe 3.0, 6.12.1.2 specifies ACS capabilities that should be
2895 	 * implemented by the remaining PCIe types to indicate peer-to-peer
2896 	 * capabilities, but only when they are part of a multifunction
2897 	 * device.  The footnote for section 6.12 indicates the specific
2898 	 * PCIe types included here.
2899 	 */
2900 	case PCI_EXP_TYPE_ENDPOINT:
2901 	case PCI_EXP_TYPE_UPSTREAM:
2902 	case PCI_EXP_TYPE_LEG_END:
2903 	case PCI_EXP_TYPE_RC_END:
2904 		if (!pdev->multifunction)
2905 			break;
2906 
2907 		return pci_acs_flags_enabled(pdev, acs_flags);
2908 	}
2909 
2910 	/*
2911 	 * PCIe 3.0, 6.12.1.3 specifies no ACS capabilities are applicable
2912 	 * to single function devices with the exception of downstream ports.
2913 	 */
2914 	return true;
2915 }
2916 
2917 /**
2918  * pci_acs_path_enable - test ACS flags from start to end in a hierarchy
2919  * @start: starting downstream device
2920  * @end: ending upstream device or NULL to search to the root bus
2921  * @acs_flags: required flags
2922  *
2923  * Walk up a device tree from start to end testing PCI ACS support.  If
2924  * any step along the way does not support the required flags, return false.
2925  */
2926 bool pci_acs_path_enabled(struct pci_dev *start,
2927 			  struct pci_dev *end, u16 acs_flags)
2928 {
2929 	struct pci_dev *pdev, *parent = start;
2930 
2931 	do {
2932 		pdev = parent;
2933 
2934 		if (!pci_acs_enabled(pdev, acs_flags))
2935 			return false;
2936 
2937 		if (pci_is_root_bus(pdev->bus))
2938 			return (end == NULL);
2939 
2940 		parent = pdev->bus->self;
2941 	} while (pdev != end);
2942 
2943 	return true;
2944 }
2945 
2946 /**
2947  * pci_swizzle_interrupt_pin - swizzle INTx for device behind bridge
2948  * @dev: the PCI device
2949  * @pin: the INTx pin (1=INTA, 2=INTB, 3=INTC, 4=INTD)
2950  *
2951  * Perform INTx swizzling for a device behind one level of bridge.  This is
2952  * required by section 9.1 of the PCI-to-PCI bridge specification for devices
2953  * behind bridges on add-in cards.  For devices with ARI enabled, the slot
2954  * number is always 0 (see the Implementation Note in section 2.2.8.1 of
2955  * the PCI Express Base Specification, Revision 2.1)
2956  */
2957 u8 pci_swizzle_interrupt_pin(const struct pci_dev *dev, u8 pin)
2958 {
2959 	int slot;
2960 
2961 	if (pci_ari_enabled(dev->bus))
2962 		slot = 0;
2963 	else
2964 		slot = PCI_SLOT(dev->devfn);
2965 
2966 	return (((pin - 1) + slot) % 4) + 1;
2967 }
2968 
2969 int pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge)
2970 {
2971 	u8 pin;
2972 
2973 	pin = dev->pin;
2974 	if (!pin)
2975 		return -1;
2976 
2977 	while (!pci_is_root_bus(dev->bus)) {
2978 		pin = pci_swizzle_interrupt_pin(dev, pin);
2979 		dev = dev->bus->self;
2980 	}
2981 	*bridge = dev;
2982 	return pin;
2983 }
2984 
2985 /**
2986  * pci_common_swizzle - swizzle INTx all the way to root bridge
2987  * @dev: the PCI device
2988  * @pinp: pointer to the INTx pin value (1=INTA, 2=INTB, 3=INTD, 4=INTD)
2989  *
2990  * Perform INTx swizzling for a device.  This traverses through all PCI-to-PCI
2991  * bridges all the way up to a PCI root bus.
2992  */
2993 u8 pci_common_swizzle(struct pci_dev *dev, u8 *pinp)
2994 {
2995 	u8 pin = *pinp;
2996 
2997 	while (!pci_is_root_bus(dev->bus)) {
2998 		pin = pci_swizzle_interrupt_pin(dev, pin);
2999 		dev = dev->bus->self;
3000 	}
3001 	*pinp = pin;
3002 	return PCI_SLOT(dev->devfn);
3003 }
3004 EXPORT_SYMBOL_GPL(pci_common_swizzle);
3005 
3006 /**
3007  *	pci_release_region - Release a PCI bar
3008  *	@pdev: PCI device whose resources were previously reserved by pci_request_region
3009  *	@bar: BAR to release
3010  *
3011  *	Releases the PCI I/O and memory resources previously reserved by a
3012  *	successful call to pci_request_region.  Call this function only
3013  *	after all use of the PCI regions has ceased.
3014  */
3015 void pci_release_region(struct pci_dev *pdev, int bar)
3016 {
3017 	struct pci_devres *dr;
3018 
3019 	if (pci_resource_len(pdev, bar) == 0)
3020 		return;
3021 	if (pci_resource_flags(pdev, bar) & IORESOURCE_IO)
3022 		release_region(pci_resource_start(pdev, bar),
3023 				pci_resource_len(pdev, bar));
3024 	else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM)
3025 		release_mem_region(pci_resource_start(pdev, bar),
3026 				pci_resource_len(pdev, bar));
3027 
3028 	dr = find_pci_dr(pdev);
3029 	if (dr)
3030 		dr->region_mask &= ~(1 << bar);
3031 }
3032 EXPORT_SYMBOL(pci_release_region);
3033 
3034 /**
3035  *	__pci_request_region - Reserved PCI I/O and memory resource
3036  *	@pdev: PCI device whose resources are to be reserved
3037  *	@bar: BAR to be reserved
3038  *	@res_name: Name to be associated with resource.
3039  *	@exclusive: whether the region access is exclusive or not
3040  *
3041  *	Mark the PCI region associated with PCI device @pdev BR @bar as
3042  *	being reserved by owner @res_name.  Do not access any
3043  *	address inside the PCI regions unless this call returns
3044  *	successfully.
3045  *
3046  *	If @exclusive is set, then the region is marked so that userspace
3047  *	is explicitly not allowed to map the resource via /dev/mem or
3048  *	sysfs MMIO access.
3049  *
3050  *	Returns 0 on success, or %EBUSY on error.  A warning
3051  *	message is also printed on failure.
3052  */
3053 static int __pci_request_region(struct pci_dev *pdev, int bar,
3054 				const char *res_name, int exclusive)
3055 {
3056 	struct pci_devres *dr;
3057 
3058 	if (pci_resource_len(pdev, bar) == 0)
3059 		return 0;
3060 
3061 	if (pci_resource_flags(pdev, bar) & IORESOURCE_IO) {
3062 		if (!request_region(pci_resource_start(pdev, bar),
3063 			    pci_resource_len(pdev, bar), res_name))
3064 			goto err_out;
3065 	} else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) {
3066 		if (!__request_mem_region(pci_resource_start(pdev, bar),
3067 					pci_resource_len(pdev, bar), res_name,
3068 					exclusive))
3069 			goto err_out;
3070 	}
3071 
3072 	dr = find_pci_dr(pdev);
3073 	if (dr)
3074 		dr->region_mask |= 1 << bar;
3075 
3076 	return 0;
3077 
3078 err_out:
3079 	dev_warn(&pdev->dev, "BAR %d: can't reserve %pR\n", bar,
3080 		 &pdev->resource[bar]);
3081 	return -EBUSY;
3082 }
3083 
3084 /**
3085  *	pci_request_region - Reserve PCI I/O and memory resource
3086  *	@pdev: PCI device whose resources are to be reserved
3087  *	@bar: BAR to be reserved
3088  *	@res_name: Name to be associated with resource
3089  *
3090  *	Mark the PCI region associated with PCI device @pdev BAR @bar as
3091  *	being reserved by owner @res_name.  Do not access any
3092  *	address inside the PCI regions unless this call returns
3093  *	successfully.
3094  *
3095  *	Returns 0 on success, or %EBUSY on error.  A warning
3096  *	message is also printed on failure.
3097  */
3098 int pci_request_region(struct pci_dev *pdev, int bar, const char *res_name)
3099 {
3100 	return __pci_request_region(pdev, bar, res_name, 0);
3101 }
3102 EXPORT_SYMBOL(pci_request_region);
3103 
3104 /**
3105  *	pci_request_region_exclusive - Reserved PCI I/O and memory resource
3106  *	@pdev: PCI device whose resources are to be reserved
3107  *	@bar: BAR to be reserved
3108  *	@res_name: Name to be associated with resource.
3109  *
3110  *	Mark the PCI region associated with PCI device @pdev BR @bar as
3111  *	being reserved by owner @res_name.  Do not access any
3112  *	address inside the PCI regions unless this call returns
3113  *	successfully.
3114  *
3115  *	Returns 0 on success, or %EBUSY on error.  A warning
3116  *	message is also printed on failure.
3117  *
3118  *	The key difference that _exclusive makes it that userspace is
3119  *	explicitly not allowed to map the resource via /dev/mem or
3120  *	sysfs.
3121  */
3122 int pci_request_region_exclusive(struct pci_dev *pdev, int bar,
3123 				 const char *res_name)
3124 {
3125 	return __pci_request_region(pdev, bar, res_name, IORESOURCE_EXCLUSIVE);
3126 }
3127 EXPORT_SYMBOL(pci_request_region_exclusive);
3128 
3129 /**
3130  * pci_release_selected_regions - Release selected PCI I/O and memory resources
3131  * @pdev: PCI device whose resources were previously reserved
3132  * @bars: Bitmask of BARs to be released
3133  *
3134  * Release selected PCI I/O and memory resources previously reserved.
3135  * Call this function only after all use of the PCI regions has ceased.
3136  */
3137 void pci_release_selected_regions(struct pci_dev *pdev, int bars)
3138 {
3139 	int i;
3140 
3141 	for (i = 0; i < 6; i++)
3142 		if (bars & (1 << i))
3143 			pci_release_region(pdev, i);
3144 }
3145 EXPORT_SYMBOL(pci_release_selected_regions);
3146 
3147 static int __pci_request_selected_regions(struct pci_dev *pdev, int bars,
3148 					  const char *res_name, int excl)
3149 {
3150 	int i;
3151 
3152 	for (i = 0; i < 6; i++)
3153 		if (bars & (1 << i))
3154 			if (__pci_request_region(pdev, i, res_name, excl))
3155 				goto err_out;
3156 	return 0;
3157 
3158 err_out:
3159 	while (--i >= 0)
3160 		if (bars & (1 << i))
3161 			pci_release_region(pdev, i);
3162 
3163 	return -EBUSY;
3164 }
3165 
3166 
3167 /**
3168  * pci_request_selected_regions - Reserve selected PCI I/O and memory resources
3169  * @pdev: PCI device whose resources are to be reserved
3170  * @bars: Bitmask of BARs to be requested
3171  * @res_name: Name to be associated with resource
3172  */
3173 int pci_request_selected_regions(struct pci_dev *pdev, int bars,
3174 				 const char *res_name)
3175 {
3176 	return __pci_request_selected_regions(pdev, bars, res_name, 0);
3177 }
3178 EXPORT_SYMBOL(pci_request_selected_regions);
3179 
3180 int pci_request_selected_regions_exclusive(struct pci_dev *pdev, int bars,
3181 					   const char *res_name)
3182 {
3183 	return __pci_request_selected_regions(pdev, bars, res_name,
3184 			IORESOURCE_EXCLUSIVE);
3185 }
3186 EXPORT_SYMBOL(pci_request_selected_regions_exclusive);
3187 
3188 /**
3189  *	pci_release_regions - Release reserved PCI I/O and memory resources
3190  *	@pdev: PCI device whose resources were previously reserved by pci_request_regions
3191  *
3192  *	Releases all PCI I/O and memory resources previously reserved by a
3193  *	successful call to pci_request_regions.  Call this function only
3194  *	after all use of the PCI regions has ceased.
3195  */
3196 
3197 void pci_release_regions(struct pci_dev *pdev)
3198 {
3199 	pci_release_selected_regions(pdev, (1 << 6) - 1);
3200 }
3201 EXPORT_SYMBOL(pci_release_regions);
3202 
3203 /**
3204  *	pci_request_regions - Reserved PCI I/O and memory resources
3205  *	@pdev: PCI device whose resources are to be reserved
3206  *	@res_name: Name to be associated with resource.
3207  *
3208  *	Mark all PCI regions associated with PCI device @pdev as
3209  *	being reserved by owner @res_name.  Do not access any
3210  *	address inside the PCI regions unless this call returns
3211  *	successfully.
3212  *
3213  *	Returns 0 on success, or %EBUSY on error.  A warning
3214  *	message is also printed on failure.
3215  */
3216 int pci_request_regions(struct pci_dev *pdev, const char *res_name)
3217 {
3218 	return pci_request_selected_regions(pdev, ((1 << 6) - 1), res_name);
3219 }
3220 EXPORT_SYMBOL(pci_request_regions);
3221 
3222 /**
3223  *	pci_request_regions_exclusive - Reserved PCI I/O and memory resources
3224  *	@pdev: PCI device whose resources are to be reserved
3225  *	@res_name: Name to be associated with resource.
3226  *
3227  *	Mark all PCI regions associated with PCI device @pdev as
3228  *	being reserved by owner @res_name.  Do not access any
3229  *	address inside the PCI regions unless this call returns
3230  *	successfully.
3231  *
3232  *	pci_request_regions_exclusive() will mark the region so that
3233  *	/dev/mem and the sysfs MMIO access will not be allowed.
3234  *
3235  *	Returns 0 on success, or %EBUSY on error.  A warning
3236  *	message is also printed on failure.
3237  */
3238 int pci_request_regions_exclusive(struct pci_dev *pdev, const char *res_name)
3239 {
3240 	return pci_request_selected_regions_exclusive(pdev,
3241 					((1 << 6) - 1), res_name);
3242 }
3243 EXPORT_SYMBOL(pci_request_regions_exclusive);
3244 
3245 #ifdef PCI_IOBASE
3246 struct io_range {
3247 	struct list_head list;
3248 	phys_addr_t start;
3249 	resource_size_t size;
3250 };
3251 
3252 static LIST_HEAD(io_range_list);
3253 static DEFINE_SPINLOCK(io_range_lock);
3254 #endif
3255 
3256 /*
3257  * Record the PCI IO range (expressed as CPU physical address + size).
3258  * Return a negative value if an error has occured, zero otherwise
3259  */
3260 int __weak pci_register_io_range(phys_addr_t addr, resource_size_t size)
3261 {
3262 	int err = 0;
3263 
3264 #ifdef PCI_IOBASE
3265 	struct io_range *range;
3266 	resource_size_t allocated_size = 0;
3267 
3268 	/* check if the range hasn't been previously recorded */
3269 	spin_lock(&io_range_lock);
3270 	list_for_each_entry(range, &io_range_list, list) {
3271 		if (addr >= range->start && addr + size <= range->start + size) {
3272 			/* range already registered, bail out */
3273 			goto end_register;
3274 		}
3275 		allocated_size += range->size;
3276 	}
3277 
3278 	/* range not registed yet, check for available space */
3279 	if (allocated_size + size - 1 > IO_SPACE_LIMIT) {
3280 		/* if it's too big check if 64K space can be reserved */
3281 		if (allocated_size + SZ_64K - 1 > IO_SPACE_LIMIT) {
3282 			err = -E2BIG;
3283 			goto end_register;
3284 		}
3285 
3286 		size = SZ_64K;
3287 		pr_warn("Requested IO range too big, new size set to 64K\n");
3288 	}
3289 
3290 	/* add the range to the list */
3291 	range = kzalloc(sizeof(*range), GFP_ATOMIC);
3292 	if (!range) {
3293 		err = -ENOMEM;
3294 		goto end_register;
3295 	}
3296 
3297 	range->start = addr;
3298 	range->size = size;
3299 
3300 	list_add_tail(&range->list, &io_range_list);
3301 
3302 end_register:
3303 	spin_unlock(&io_range_lock);
3304 #endif
3305 
3306 	return err;
3307 }
3308 
3309 phys_addr_t pci_pio_to_address(unsigned long pio)
3310 {
3311 	phys_addr_t address = (phys_addr_t)OF_BAD_ADDR;
3312 
3313 #ifdef PCI_IOBASE
3314 	struct io_range *range;
3315 	resource_size_t allocated_size = 0;
3316 
3317 	if (pio > IO_SPACE_LIMIT)
3318 		return address;
3319 
3320 	spin_lock(&io_range_lock);
3321 	list_for_each_entry(range, &io_range_list, list) {
3322 		if (pio >= allocated_size && pio < allocated_size + range->size) {
3323 			address = range->start + pio - allocated_size;
3324 			break;
3325 		}
3326 		allocated_size += range->size;
3327 	}
3328 	spin_unlock(&io_range_lock);
3329 #endif
3330 
3331 	return address;
3332 }
3333 
3334 unsigned long __weak pci_address_to_pio(phys_addr_t address)
3335 {
3336 #ifdef PCI_IOBASE
3337 	struct io_range *res;
3338 	resource_size_t offset = 0;
3339 	unsigned long addr = -1;
3340 
3341 	spin_lock(&io_range_lock);
3342 	list_for_each_entry(res, &io_range_list, list) {
3343 		if (address >= res->start && address < res->start + res->size) {
3344 			addr = address - res->start + offset;
3345 			break;
3346 		}
3347 		offset += res->size;
3348 	}
3349 	spin_unlock(&io_range_lock);
3350 
3351 	return addr;
3352 #else
3353 	if (address > IO_SPACE_LIMIT)
3354 		return (unsigned long)-1;
3355 
3356 	return (unsigned long) address;
3357 #endif
3358 }
3359 
3360 /**
3361  *	pci_remap_iospace - Remap the memory mapped I/O space
3362  *	@res: Resource describing the I/O space
3363  *	@phys_addr: physical address of range to be mapped
3364  *
3365  *	Remap the memory mapped I/O space described by the @res
3366  *	and the CPU physical address @phys_addr into virtual address space.
3367  *	Only architectures that have memory mapped IO functions defined
3368  *	(and the PCI_IOBASE value defined) should call this function.
3369  */
3370 int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr)
3371 {
3372 #if defined(PCI_IOBASE) && defined(CONFIG_MMU)
3373 	unsigned long vaddr = (unsigned long)PCI_IOBASE + res->start;
3374 
3375 	if (!(res->flags & IORESOURCE_IO))
3376 		return -EINVAL;
3377 
3378 	if (res->end > IO_SPACE_LIMIT)
3379 		return -EINVAL;
3380 
3381 	return ioremap_page_range(vaddr, vaddr + resource_size(res), phys_addr,
3382 				  pgprot_device(PAGE_KERNEL));
3383 #else
3384 	/* this architecture does not have memory mapped I/O space,
3385 	   so this function should never be called */
3386 	WARN_ONCE(1, "This architecture does not support memory mapped I/O\n");
3387 	return -ENODEV;
3388 #endif
3389 }
3390 EXPORT_SYMBOL(pci_remap_iospace);
3391 
3392 /**
3393  *	pci_unmap_iospace - Unmap the memory mapped I/O space
3394  *	@res: resource to be unmapped
3395  *
3396  *	Unmap the CPU virtual address @res from virtual address space.
3397  *	Only architectures that have memory mapped IO functions defined
3398  *	(and the PCI_IOBASE value defined) should call this function.
3399  */
3400 void pci_unmap_iospace(struct resource *res)
3401 {
3402 #if defined(PCI_IOBASE) && defined(CONFIG_MMU)
3403 	unsigned long vaddr = (unsigned long)PCI_IOBASE + res->start;
3404 
3405 	unmap_kernel_range(vaddr, resource_size(res));
3406 #endif
3407 }
3408 EXPORT_SYMBOL(pci_unmap_iospace);
3409 
3410 /**
3411  * devm_pci_remap_cfgspace - Managed pci_remap_cfgspace()
3412  * @dev: Generic device to remap IO address for
3413  * @offset: Resource address to map
3414  * @size: Size of map
3415  *
3416  * Managed pci_remap_cfgspace().  Map is automatically unmapped on driver
3417  * detach.
3418  */
3419 void __iomem *devm_pci_remap_cfgspace(struct device *dev,
3420 				      resource_size_t offset,
3421 				      resource_size_t size)
3422 {
3423 	void __iomem **ptr, *addr;
3424 
3425 	ptr = devres_alloc(devm_ioremap_release, sizeof(*ptr), GFP_KERNEL);
3426 	if (!ptr)
3427 		return NULL;
3428 
3429 	addr = pci_remap_cfgspace(offset, size);
3430 	if (addr) {
3431 		*ptr = addr;
3432 		devres_add(dev, ptr);
3433 	} else
3434 		devres_free(ptr);
3435 
3436 	return addr;
3437 }
3438 EXPORT_SYMBOL(devm_pci_remap_cfgspace);
3439 
3440 /**
3441  * devm_pci_remap_cfg_resource - check, request region and ioremap cfg resource
3442  * @dev: generic device to handle the resource for
3443  * @res: configuration space resource to be handled
3444  *
3445  * Checks that a resource is a valid memory region, requests the memory
3446  * region and ioremaps with pci_remap_cfgspace() API that ensures the
3447  * proper PCI configuration space memory attributes are guaranteed.
3448  *
3449  * All operations are managed and will be undone on driver detach.
3450  *
3451  * Returns a pointer to the remapped memory or an ERR_PTR() encoded error code
3452  * on failure. Usage example:
3453  *
3454  *	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
3455  *	base = devm_pci_remap_cfg_resource(&pdev->dev, res);
3456  *	if (IS_ERR(base))
3457  *		return PTR_ERR(base);
3458  */
3459 void __iomem *devm_pci_remap_cfg_resource(struct device *dev,
3460 					  struct resource *res)
3461 {
3462 	resource_size_t size;
3463 	const char *name;
3464 	void __iomem *dest_ptr;
3465 
3466 	BUG_ON(!dev);
3467 
3468 	if (!res || resource_type(res) != IORESOURCE_MEM) {
3469 		dev_err(dev, "invalid resource\n");
3470 		return IOMEM_ERR_PTR(-EINVAL);
3471 	}
3472 
3473 	size = resource_size(res);
3474 	name = res->name ?: dev_name(dev);
3475 
3476 	if (!devm_request_mem_region(dev, res->start, size, name)) {
3477 		dev_err(dev, "can't request region for resource %pR\n", res);
3478 		return IOMEM_ERR_PTR(-EBUSY);
3479 	}
3480 
3481 	dest_ptr = devm_pci_remap_cfgspace(dev, res->start, size);
3482 	if (!dest_ptr) {
3483 		dev_err(dev, "ioremap failed for resource %pR\n", res);
3484 		devm_release_mem_region(dev, res->start, size);
3485 		dest_ptr = IOMEM_ERR_PTR(-ENOMEM);
3486 	}
3487 
3488 	return dest_ptr;
3489 }
3490 EXPORT_SYMBOL(devm_pci_remap_cfg_resource);
3491 
3492 static void __pci_set_master(struct pci_dev *dev, bool enable)
3493 {
3494 	u16 old_cmd, cmd;
3495 
3496 	pci_read_config_word(dev, PCI_COMMAND, &old_cmd);
3497 	if (enable)
3498 		cmd = old_cmd | PCI_COMMAND_MASTER;
3499 	else
3500 		cmd = old_cmd & ~PCI_COMMAND_MASTER;
3501 	if (cmd != old_cmd) {
3502 		dev_dbg(&dev->dev, "%s bus mastering\n",
3503 			enable ? "enabling" : "disabling");
3504 		pci_write_config_word(dev, PCI_COMMAND, cmd);
3505 	}
3506 	dev->is_busmaster = enable;
3507 }
3508 
3509 /**
3510  * pcibios_setup - process "pci=" kernel boot arguments
3511  * @str: string used to pass in "pci=" kernel boot arguments
3512  *
3513  * Process kernel boot arguments.  This is the default implementation.
3514  * Architecture specific implementations can override this as necessary.
3515  */
3516 char * __weak __init pcibios_setup(char *str)
3517 {
3518 	return str;
3519 }
3520 
3521 /**
3522  * pcibios_set_master - enable PCI bus-mastering for device dev
3523  * @dev: the PCI device to enable
3524  *
3525  * Enables PCI bus-mastering for the device.  This is the default
3526  * implementation.  Architecture specific implementations can override
3527  * this if necessary.
3528  */
3529 void __weak pcibios_set_master(struct pci_dev *dev)
3530 {
3531 	u8 lat;
3532 
3533 	/* The latency timer doesn't apply to PCIe (either Type 0 or Type 1) */
3534 	if (pci_is_pcie(dev))
3535 		return;
3536 
3537 	pci_read_config_byte(dev, PCI_LATENCY_TIMER, &lat);
3538 	if (lat < 16)
3539 		lat = (64 <= pcibios_max_latency) ? 64 : pcibios_max_latency;
3540 	else if (lat > pcibios_max_latency)
3541 		lat = pcibios_max_latency;
3542 	else
3543 		return;
3544 
3545 	pci_write_config_byte(dev, PCI_LATENCY_TIMER, lat);
3546 }
3547 
3548 /**
3549  * pci_set_master - enables bus-mastering for device dev
3550  * @dev: the PCI device to enable
3551  *
3552  * Enables bus-mastering on the device and calls pcibios_set_master()
3553  * to do the needed arch specific settings.
3554  */
3555 void pci_set_master(struct pci_dev *dev)
3556 {
3557 	__pci_set_master(dev, true);
3558 	pcibios_set_master(dev);
3559 }
3560 EXPORT_SYMBOL(pci_set_master);
3561 
3562 /**
3563  * pci_clear_master - disables bus-mastering for device dev
3564  * @dev: the PCI device to disable
3565  */
3566 void pci_clear_master(struct pci_dev *dev)
3567 {
3568 	__pci_set_master(dev, false);
3569 }
3570 EXPORT_SYMBOL(pci_clear_master);
3571 
3572 /**
3573  * pci_set_cacheline_size - ensure the CACHE_LINE_SIZE register is programmed
3574  * @dev: the PCI device for which MWI is to be enabled
3575  *
3576  * Helper function for pci_set_mwi.
3577  * Originally copied from drivers/net/acenic.c.
3578  * Copyright 1998-2001 by Jes Sorensen, <jes@trained-monkey.org>.
3579  *
3580  * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
3581  */
3582 int pci_set_cacheline_size(struct pci_dev *dev)
3583 {
3584 	u8 cacheline_size;
3585 
3586 	if (!pci_cache_line_size)
3587 		return -EINVAL;
3588 
3589 	/* Validate current setting: the PCI_CACHE_LINE_SIZE must be
3590 	   equal to or multiple of the right value. */
3591 	pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
3592 	if (cacheline_size >= pci_cache_line_size &&
3593 	    (cacheline_size % pci_cache_line_size) == 0)
3594 		return 0;
3595 
3596 	/* Write the correct value. */
3597 	pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, pci_cache_line_size);
3598 	/* Read it back. */
3599 	pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
3600 	if (cacheline_size == pci_cache_line_size)
3601 		return 0;
3602 
3603 	dev_printk(KERN_DEBUG, &dev->dev, "cache line size of %d is not supported\n",
3604 		   pci_cache_line_size << 2);
3605 
3606 	return -EINVAL;
3607 }
3608 EXPORT_SYMBOL_GPL(pci_set_cacheline_size);
3609 
3610 /**
3611  * pci_set_mwi - enables memory-write-invalidate PCI transaction
3612  * @dev: the PCI device for which MWI is enabled
3613  *
3614  * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND.
3615  *
3616  * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
3617  */
3618 int pci_set_mwi(struct pci_dev *dev)
3619 {
3620 #ifdef PCI_DISABLE_MWI
3621 	return 0;
3622 #else
3623 	int rc;
3624 	u16 cmd;
3625 
3626 	rc = pci_set_cacheline_size(dev);
3627 	if (rc)
3628 		return rc;
3629 
3630 	pci_read_config_word(dev, PCI_COMMAND, &cmd);
3631 	if (!(cmd & PCI_COMMAND_INVALIDATE)) {
3632 		dev_dbg(&dev->dev, "enabling Mem-Wr-Inval\n");
3633 		cmd |= PCI_COMMAND_INVALIDATE;
3634 		pci_write_config_word(dev, PCI_COMMAND, cmd);
3635 	}
3636 	return 0;
3637 #endif
3638 }
3639 EXPORT_SYMBOL(pci_set_mwi);
3640 
3641 /**
3642  * pci_try_set_mwi - enables memory-write-invalidate PCI transaction
3643  * @dev: the PCI device for which MWI is enabled
3644  *
3645  * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND.
3646  * Callers are not required to check the return value.
3647  *
3648  * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
3649  */
3650 int pci_try_set_mwi(struct pci_dev *dev)
3651 {
3652 #ifdef PCI_DISABLE_MWI
3653 	return 0;
3654 #else
3655 	return pci_set_mwi(dev);
3656 #endif
3657 }
3658 EXPORT_SYMBOL(pci_try_set_mwi);
3659 
3660 /**
3661  * pci_clear_mwi - disables Memory-Write-Invalidate for device dev
3662  * @dev: the PCI device to disable
3663  *
3664  * Disables PCI Memory-Write-Invalidate transaction on the device
3665  */
3666 void pci_clear_mwi(struct pci_dev *dev)
3667 {
3668 #ifndef PCI_DISABLE_MWI
3669 	u16 cmd;
3670 
3671 	pci_read_config_word(dev, PCI_COMMAND, &cmd);
3672 	if (cmd & PCI_COMMAND_INVALIDATE) {
3673 		cmd &= ~PCI_COMMAND_INVALIDATE;
3674 		pci_write_config_word(dev, PCI_COMMAND, cmd);
3675 	}
3676 #endif
3677 }
3678 EXPORT_SYMBOL(pci_clear_mwi);
3679 
3680 /**
3681  * pci_intx - enables/disables PCI INTx for device dev
3682  * @pdev: the PCI device to operate on
3683  * @enable: boolean: whether to enable or disable PCI INTx
3684  *
3685  * Enables/disables PCI INTx for device dev
3686  */
3687 void pci_intx(struct pci_dev *pdev, int enable)
3688 {
3689 	u16 pci_command, new;
3690 
3691 	pci_read_config_word(pdev, PCI_COMMAND, &pci_command);
3692 
3693 	if (enable)
3694 		new = pci_command & ~PCI_COMMAND_INTX_DISABLE;
3695 	else
3696 		new = pci_command | PCI_COMMAND_INTX_DISABLE;
3697 
3698 	if (new != pci_command) {
3699 		struct pci_devres *dr;
3700 
3701 		pci_write_config_word(pdev, PCI_COMMAND, new);
3702 
3703 		dr = find_pci_dr(pdev);
3704 		if (dr && !dr->restore_intx) {
3705 			dr->restore_intx = 1;
3706 			dr->orig_intx = !enable;
3707 		}
3708 	}
3709 }
3710 EXPORT_SYMBOL_GPL(pci_intx);
3711 
3712 /**
3713  * pci_intx_mask_supported - probe for INTx masking support
3714  * @dev: the PCI device to operate on
3715  *
3716  * Check if the device dev support INTx masking via the config space
3717  * command word.
3718  */
3719 bool pci_intx_mask_supported(struct pci_dev *dev)
3720 {
3721 	bool mask_supported = false;
3722 	u16 orig, new;
3723 
3724 	if (dev->broken_intx_masking)
3725 		return false;
3726 
3727 	pci_cfg_access_lock(dev);
3728 
3729 	pci_read_config_word(dev, PCI_COMMAND, &orig);
3730 	pci_write_config_word(dev, PCI_COMMAND,
3731 			      orig ^ PCI_COMMAND_INTX_DISABLE);
3732 	pci_read_config_word(dev, PCI_COMMAND, &new);
3733 
3734 	/*
3735 	 * There's no way to protect against hardware bugs or detect them
3736 	 * reliably, but as long as we know what the value should be, let's
3737 	 * go ahead and check it.
3738 	 */
3739 	if ((new ^ orig) & ~PCI_COMMAND_INTX_DISABLE) {
3740 		dev_err(&dev->dev, "Command register changed from 0x%x to 0x%x: driver or hardware bug?\n",
3741 			orig, new);
3742 	} else if ((new ^ orig) & PCI_COMMAND_INTX_DISABLE) {
3743 		mask_supported = true;
3744 		pci_write_config_word(dev, PCI_COMMAND, orig);
3745 	}
3746 
3747 	pci_cfg_access_unlock(dev);
3748 	return mask_supported;
3749 }
3750 EXPORT_SYMBOL_GPL(pci_intx_mask_supported);
3751 
3752 static bool pci_check_and_set_intx_mask(struct pci_dev *dev, bool mask)
3753 {
3754 	struct pci_bus *bus = dev->bus;
3755 	bool mask_updated = true;
3756 	u32 cmd_status_dword;
3757 	u16 origcmd, newcmd;
3758 	unsigned long flags;
3759 	bool irq_pending;
3760 
3761 	/*
3762 	 * We do a single dword read to retrieve both command and status.
3763 	 * Document assumptions that make this possible.
3764 	 */
3765 	BUILD_BUG_ON(PCI_COMMAND % 4);
3766 	BUILD_BUG_ON(PCI_COMMAND + 2 != PCI_STATUS);
3767 
3768 	raw_spin_lock_irqsave(&pci_lock, flags);
3769 
3770 	bus->ops->read(bus, dev->devfn, PCI_COMMAND, 4, &cmd_status_dword);
3771 
3772 	irq_pending = (cmd_status_dword >> 16) & PCI_STATUS_INTERRUPT;
3773 
3774 	/*
3775 	 * Check interrupt status register to see whether our device
3776 	 * triggered the interrupt (when masking) or the next IRQ is
3777 	 * already pending (when unmasking).
3778 	 */
3779 	if (mask != irq_pending) {
3780 		mask_updated = false;
3781 		goto done;
3782 	}
3783 
3784 	origcmd = cmd_status_dword;
3785 	newcmd = origcmd & ~PCI_COMMAND_INTX_DISABLE;
3786 	if (mask)
3787 		newcmd |= PCI_COMMAND_INTX_DISABLE;
3788 	if (newcmd != origcmd)
3789 		bus->ops->write(bus, dev->devfn, PCI_COMMAND, 2, newcmd);
3790 
3791 done:
3792 	raw_spin_unlock_irqrestore(&pci_lock, flags);
3793 
3794 	return mask_updated;
3795 }
3796 
3797 /**
3798  * pci_check_and_mask_intx - mask INTx on pending interrupt
3799  * @dev: the PCI device to operate on
3800  *
3801  * Check if the device dev has its INTx line asserted, mask it and
3802  * return true in that case. False is returned if not interrupt was
3803  * pending.
3804  */
3805 bool pci_check_and_mask_intx(struct pci_dev *dev)
3806 {
3807 	return pci_check_and_set_intx_mask(dev, true);
3808 }
3809 EXPORT_SYMBOL_GPL(pci_check_and_mask_intx);
3810 
3811 /**
3812  * pci_check_and_unmask_intx - unmask INTx if no interrupt is pending
3813  * @dev: the PCI device to operate on
3814  *
3815  * Check if the device dev has its INTx line asserted, unmask it if not
3816  * and return true. False is returned and the mask remains active if
3817  * there was still an interrupt pending.
3818  */
3819 bool pci_check_and_unmask_intx(struct pci_dev *dev)
3820 {
3821 	return pci_check_and_set_intx_mask(dev, false);
3822 }
3823 EXPORT_SYMBOL_GPL(pci_check_and_unmask_intx);
3824 
3825 /**
3826  * pci_wait_for_pending_transaction - waits for pending transaction
3827  * @dev: the PCI device to operate on
3828  *
3829  * Return 0 if transaction is pending 1 otherwise.
3830  */
3831 int pci_wait_for_pending_transaction(struct pci_dev *dev)
3832 {
3833 	if (!pci_is_pcie(dev))
3834 		return 1;
3835 
3836 	return pci_wait_for_pending(dev, pci_pcie_cap(dev) + PCI_EXP_DEVSTA,
3837 				    PCI_EXP_DEVSTA_TRPND);
3838 }
3839 EXPORT_SYMBOL(pci_wait_for_pending_transaction);
3840 
3841 /*
3842  * We should only need to wait 100ms after FLR, but some devices take longer.
3843  * Wait for up to 1000ms for config space to return something other than -1.
3844  * Intel IGD requires this when an LCD panel is attached.  We read the 2nd
3845  * dword because VFs don't implement the 1st dword.
3846  */
3847 static void pci_flr_wait(struct pci_dev *dev)
3848 {
3849 	int i = 0;
3850 	u32 id;
3851 
3852 	do {
3853 		msleep(100);
3854 		pci_read_config_dword(dev, PCI_COMMAND, &id);
3855 	} while (i++ < 10 && id == ~0);
3856 
3857 	if (id == ~0)
3858 		dev_warn(&dev->dev, "Failed to return from FLR\n");
3859 	else if (i > 1)
3860 		dev_info(&dev->dev, "Required additional %dms to return from FLR\n",
3861 			 (i - 1) * 100);
3862 }
3863 
3864 /**
3865  * pcie_has_flr - check if a device supports function level resets
3866  * @dev:	device to check
3867  *
3868  * Returns true if the device advertises support for PCIe function level
3869  * resets.
3870  */
3871 static bool pcie_has_flr(struct pci_dev *dev)
3872 {
3873 	u32 cap;
3874 
3875 	if (dev->dev_flags & PCI_DEV_FLAGS_NO_FLR_RESET)
3876 		return false;
3877 
3878 	pcie_capability_read_dword(dev, PCI_EXP_DEVCAP, &cap);
3879 	return cap & PCI_EXP_DEVCAP_FLR;
3880 }
3881 
3882 /**
3883  * pcie_flr - initiate a PCIe function level reset
3884  * @dev:	device to reset
3885  *
3886  * Initiate a function level reset on @dev.  The caller should ensure the
3887  * device supports FLR before calling this function, e.g. by using the
3888  * pcie_has_flr() helper.
3889  */
3890 void pcie_flr(struct pci_dev *dev)
3891 {
3892 	if (!pci_wait_for_pending_transaction(dev))
3893 		dev_err(&dev->dev, "timed out waiting for pending transaction; performing function level reset anyway\n");
3894 
3895 	pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_BCR_FLR);
3896 	pci_flr_wait(dev);
3897 }
3898 EXPORT_SYMBOL_GPL(pcie_flr);
3899 
3900 static int pci_af_flr(struct pci_dev *dev, int probe)
3901 {
3902 	int pos;
3903 	u8 cap;
3904 
3905 	pos = pci_find_capability(dev, PCI_CAP_ID_AF);
3906 	if (!pos)
3907 		return -ENOTTY;
3908 
3909 	if (dev->dev_flags & PCI_DEV_FLAGS_NO_FLR_RESET)
3910 		return -ENOTTY;
3911 
3912 	pci_read_config_byte(dev, pos + PCI_AF_CAP, &cap);
3913 	if (!(cap & PCI_AF_CAP_TP) || !(cap & PCI_AF_CAP_FLR))
3914 		return -ENOTTY;
3915 
3916 	if (probe)
3917 		return 0;
3918 
3919 	/*
3920 	 * Wait for Transaction Pending bit to clear.  A word-aligned test
3921 	 * is used, so we use the conrol offset rather than status and shift
3922 	 * the test bit to match.
3923 	 */
3924 	if (!pci_wait_for_pending(dev, pos + PCI_AF_CTRL,
3925 				 PCI_AF_STATUS_TP << 8))
3926 		dev_err(&dev->dev, "timed out waiting for pending transaction; performing AF function level reset anyway\n");
3927 
3928 	pci_write_config_byte(dev, pos + PCI_AF_CTRL, PCI_AF_CTRL_FLR);
3929 	pci_flr_wait(dev);
3930 	return 0;
3931 }
3932 
3933 /**
3934  * pci_pm_reset - Put device into PCI_D3 and back into PCI_D0.
3935  * @dev: Device to reset.
3936  * @probe: If set, only check if the device can be reset this way.
3937  *
3938  * If @dev supports native PCI PM and its PCI_PM_CTRL_NO_SOFT_RESET flag is
3939  * unset, it will be reinitialized internally when going from PCI_D3hot to
3940  * PCI_D0.  If that's the case and the device is not in a low-power state
3941  * already, force it into PCI_D3hot and back to PCI_D0, causing it to be reset.
3942  *
3943  * NOTE: This causes the caller to sleep for twice the device power transition
3944  * cooldown period, which for the D0->D3hot and D3hot->D0 transitions is 10 ms
3945  * by default (i.e. unless the @dev's d3_delay field has a different value).
3946  * Moreover, only devices in D0 can be reset by this function.
3947  */
3948 static int pci_pm_reset(struct pci_dev *dev, int probe)
3949 {
3950 	u16 csr;
3951 
3952 	if (!dev->pm_cap || dev->dev_flags & PCI_DEV_FLAGS_NO_PM_RESET)
3953 		return -ENOTTY;
3954 
3955 	pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &csr);
3956 	if (csr & PCI_PM_CTRL_NO_SOFT_RESET)
3957 		return -ENOTTY;
3958 
3959 	if (probe)
3960 		return 0;
3961 
3962 	if (dev->current_state != PCI_D0)
3963 		return -EINVAL;
3964 
3965 	csr &= ~PCI_PM_CTRL_STATE_MASK;
3966 	csr |= PCI_D3hot;
3967 	pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
3968 	pci_dev_d3_sleep(dev);
3969 
3970 	csr &= ~PCI_PM_CTRL_STATE_MASK;
3971 	csr |= PCI_D0;
3972 	pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
3973 	pci_dev_d3_sleep(dev);
3974 
3975 	return 0;
3976 }
3977 
3978 void pci_reset_secondary_bus(struct pci_dev *dev)
3979 {
3980 	u16 ctrl;
3981 
3982 	pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &ctrl);
3983 	ctrl |= PCI_BRIDGE_CTL_BUS_RESET;
3984 	pci_write_config_word(dev, PCI_BRIDGE_CONTROL, ctrl);
3985 	/*
3986 	 * PCI spec v3.0 7.6.4.2 requires minimum Trst of 1ms.  Double
3987 	 * this to 2ms to ensure that we meet the minimum requirement.
3988 	 */
3989 	msleep(2);
3990 
3991 	ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET;
3992 	pci_write_config_word(dev, PCI_BRIDGE_CONTROL, ctrl);
3993 
3994 	/*
3995 	 * Trhfa for conventional PCI is 2^25 clock cycles.
3996 	 * Assuming a minimum 33MHz clock this results in a 1s
3997 	 * delay before we can consider subordinate devices to
3998 	 * be re-initialized.  PCIe has some ways to shorten this,
3999 	 * but we don't make use of them yet.
4000 	 */
4001 	ssleep(1);
4002 }
4003 
4004 void __weak pcibios_reset_secondary_bus(struct pci_dev *dev)
4005 {
4006 	pci_reset_secondary_bus(dev);
4007 }
4008 
4009 /**
4010  * pci_reset_bridge_secondary_bus - Reset the secondary bus on a PCI bridge.
4011  * @dev: Bridge device
4012  *
4013  * Use the bridge control register to assert reset on the secondary bus.
4014  * Devices on the secondary bus are left in power-on state.
4015  */
4016 void pci_reset_bridge_secondary_bus(struct pci_dev *dev)
4017 {
4018 	pcibios_reset_secondary_bus(dev);
4019 }
4020 EXPORT_SYMBOL_GPL(pci_reset_bridge_secondary_bus);
4021 
4022 static int pci_parent_bus_reset(struct pci_dev *dev, int probe)
4023 {
4024 	struct pci_dev *pdev;
4025 
4026 	if (pci_is_root_bus(dev->bus) || dev->subordinate ||
4027 	    !dev->bus->self || dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET)
4028 		return -ENOTTY;
4029 
4030 	list_for_each_entry(pdev, &dev->bus->devices, bus_list)
4031 		if (pdev != dev)
4032 			return -ENOTTY;
4033 
4034 	if (probe)
4035 		return 0;
4036 
4037 	pci_reset_bridge_secondary_bus(dev->bus->self);
4038 
4039 	return 0;
4040 }
4041 
4042 static int pci_reset_hotplug_slot(struct hotplug_slot *hotplug, int probe)
4043 {
4044 	int rc = -ENOTTY;
4045 
4046 	if (!hotplug || !try_module_get(hotplug->ops->owner))
4047 		return rc;
4048 
4049 	if (hotplug->ops->reset_slot)
4050 		rc = hotplug->ops->reset_slot(hotplug, probe);
4051 
4052 	module_put(hotplug->ops->owner);
4053 
4054 	return rc;
4055 }
4056 
4057 static int pci_dev_reset_slot_function(struct pci_dev *dev, int probe)
4058 {
4059 	struct pci_dev *pdev;
4060 
4061 	if (dev->subordinate || !dev->slot ||
4062 	    dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET)
4063 		return -ENOTTY;
4064 
4065 	list_for_each_entry(pdev, &dev->bus->devices, bus_list)
4066 		if (pdev != dev && pdev->slot == dev->slot)
4067 			return -ENOTTY;
4068 
4069 	return pci_reset_hotplug_slot(dev->slot->hotplug, probe);
4070 }
4071 
4072 static int __pci_dev_reset(struct pci_dev *dev, int probe)
4073 {
4074 	int rc;
4075 
4076 	might_sleep();
4077 
4078 	rc = pci_dev_specific_reset(dev, probe);
4079 	if (rc != -ENOTTY)
4080 		goto done;
4081 
4082 	if (pcie_has_flr(dev)) {
4083 		if (!probe)
4084 			pcie_flr(dev);
4085 		rc = 0;
4086 		goto done;
4087 	}
4088 
4089 	rc = pci_af_flr(dev, probe);
4090 	if (rc != -ENOTTY)
4091 		goto done;
4092 
4093 	rc = pci_pm_reset(dev, probe);
4094 	if (rc != -ENOTTY)
4095 		goto done;
4096 
4097 	rc = pci_dev_reset_slot_function(dev, probe);
4098 	if (rc != -ENOTTY)
4099 		goto done;
4100 
4101 	rc = pci_parent_bus_reset(dev, probe);
4102 done:
4103 	return rc;
4104 }
4105 
4106 static void pci_dev_lock(struct pci_dev *dev)
4107 {
4108 	pci_cfg_access_lock(dev);
4109 	/* block PM suspend, driver probe, etc. */
4110 	device_lock(&dev->dev);
4111 }
4112 
4113 /* Return 1 on successful lock, 0 on contention */
4114 static int pci_dev_trylock(struct pci_dev *dev)
4115 {
4116 	if (pci_cfg_access_trylock(dev)) {
4117 		if (device_trylock(&dev->dev))
4118 			return 1;
4119 		pci_cfg_access_unlock(dev);
4120 	}
4121 
4122 	return 0;
4123 }
4124 
4125 static void pci_dev_unlock(struct pci_dev *dev)
4126 {
4127 	device_unlock(&dev->dev);
4128 	pci_cfg_access_unlock(dev);
4129 }
4130 
4131 /**
4132  * pci_reset_notify - notify device driver of reset
4133  * @dev: device to be notified of reset
4134  * @prepare: 'true' if device is about to be reset; 'false' if reset attempt
4135  *           completed
4136  *
4137  * Must be called prior to device access being disabled and after device
4138  * access is restored.
4139  */
4140 static void pci_reset_notify(struct pci_dev *dev, bool prepare)
4141 {
4142 	const struct pci_error_handlers *err_handler =
4143 			dev->driver ? dev->driver->err_handler : NULL;
4144 	if (err_handler && err_handler->reset_notify)
4145 		err_handler->reset_notify(dev, prepare);
4146 }
4147 
4148 static void pci_dev_save_and_disable(struct pci_dev *dev)
4149 {
4150 	pci_reset_notify(dev, true);
4151 
4152 	/*
4153 	 * Wake-up device prior to save.  PM registers default to D0 after
4154 	 * reset and a simple register restore doesn't reliably return
4155 	 * to a non-D0 state anyway.
4156 	 */
4157 	pci_set_power_state(dev, PCI_D0);
4158 
4159 	pci_save_state(dev);
4160 	/*
4161 	 * Disable the device by clearing the Command register, except for
4162 	 * INTx-disable which is set.  This not only disables MMIO and I/O port
4163 	 * BARs, but also prevents the device from being Bus Master, preventing
4164 	 * DMA from the device including MSI/MSI-X interrupts.  For PCI 2.3
4165 	 * compliant devices, INTx-disable prevents legacy interrupts.
4166 	 */
4167 	pci_write_config_word(dev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE);
4168 }
4169 
4170 static void pci_dev_restore(struct pci_dev *dev)
4171 {
4172 	pci_restore_state(dev);
4173 	pci_reset_notify(dev, false);
4174 }
4175 
4176 static int pci_dev_reset(struct pci_dev *dev, int probe)
4177 {
4178 	int rc;
4179 
4180 	if (!probe)
4181 		pci_dev_lock(dev);
4182 
4183 	rc = __pci_dev_reset(dev, probe);
4184 
4185 	if (!probe)
4186 		pci_dev_unlock(dev);
4187 
4188 	return rc;
4189 }
4190 
4191 /**
4192  * __pci_reset_function - reset a PCI device function
4193  * @dev: PCI device to reset
4194  *
4195  * Some devices allow an individual function to be reset without affecting
4196  * other functions in the same device.  The PCI device must be responsive
4197  * to PCI config space in order to use this function.
4198  *
4199  * The device function is presumed to be unused when this function is called.
4200  * Resetting the device will make the contents of PCI configuration space
4201  * random, so any caller of this must be prepared to reinitialise the
4202  * device including MSI, bus mastering, BARs, decoding IO and memory spaces,
4203  * etc.
4204  *
4205  * Returns 0 if the device function was successfully reset or negative if the
4206  * device doesn't support resetting a single function.
4207  */
4208 int __pci_reset_function(struct pci_dev *dev)
4209 {
4210 	return pci_dev_reset(dev, 0);
4211 }
4212 EXPORT_SYMBOL_GPL(__pci_reset_function);
4213 
4214 /**
4215  * __pci_reset_function_locked - reset a PCI device function while holding
4216  * the @dev mutex lock.
4217  * @dev: PCI device to reset
4218  *
4219  * Some devices allow an individual function to be reset without affecting
4220  * other functions in the same device.  The PCI device must be responsive
4221  * to PCI config space in order to use this function.
4222  *
4223  * The device function is presumed to be unused and the caller is holding
4224  * the device mutex lock when this function is called.
4225  * Resetting the device will make the contents of PCI configuration space
4226  * random, so any caller of this must be prepared to reinitialise the
4227  * device including MSI, bus mastering, BARs, decoding IO and memory spaces,
4228  * etc.
4229  *
4230  * Returns 0 if the device function was successfully reset or negative if the
4231  * device doesn't support resetting a single function.
4232  */
4233 int __pci_reset_function_locked(struct pci_dev *dev)
4234 {
4235 	return __pci_dev_reset(dev, 0);
4236 }
4237 EXPORT_SYMBOL_GPL(__pci_reset_function_locked);
4238 
4239 /**
4240  * pci_probe_reset_function - check whether the device can be safely reset
4241  * @dev: PCI device to reset
4242  *
4243  * Some devices allow an individual function to be reset without affecting
4244  * other functions in the same device.  The PCI device must be responsive
4245  * to PCI config space in order to use this function.
4246  *
4247  * Returns 0 if the device function can be reset or negative if the
4248  * device doesn't support resetting a single function.
4249  */
4250 int pci_probe_reset_function(struct pci_dev *dev)
4251 {
4252 	return pci_dev_reset(dev, 1);
4253 }
4254 
4255 /**
4256  * pci_reset_function - quiesce and reset a PCI device function
4257  * @dev: PCI device to reset
4258  *
4259  * Some devices allow an individual function to be reset without affecting
4260  * other functions in the same device.  The PCI device must be responsive
4261  * to PCI config space in order to use this function.
4262  *
4263  * This function does not just reset the PCI portion of a device, but
4264  * clears all the state associated with the device.  This function differs
4265  * from __pci_reset_function in that it saves and restores device state
4266  * over the reset.
4267  *
4268  * Returns 0 if the device function was successfully reset or negative if the
4269  * device doesn't support resetting a single function.
4270  */
4271 int pci_reset_function(struct pci_dev *dev)
4272 {
4273 	int rc;
4274 
4275 	rc = pci_dev_reset(dev, 1);
4276 	if (rc)
4277 		return rc;
4278 
4279 	pci_dev_save_and_disable(dev);
4280 
4281 	rc = pci_dev_reset(dev, 0);
4282 
4283 	pci_dev_restore(dev);
4284 
4285 	return rc;
4286 }
4287 EXPORT_SYMBOL_GPL(pci_reset_function);
4288 
4289 /**
4290  * pci_try_reset_function - quiesce and reset a PCI device function
4291  * @dev: PCI device to reset
4292  *
4293  * Same as above, except return -EAGAIN if unable to lock device.
4294  */
4295 int pci_try_reset_function(struct pci_dev *dev)
4296 {
4297 	int rc;
4298 
4299 	rc = pci_dev_reset(dev, 1);
4300 	if (rc)
4301 		return rc;
4302 
4303 	pci_dev_save_and_disable(dev);
4304 
4305 	if (pci_dev_trylock(dev)) {
4306 		rc = __pci_dev_reset(dev, 0);
4307 		pci_dev_unlock(dev);
4308 	} else
4309 		rc = -EAGAIN;
4310 
4311 	pci_dev_restore(dev);
4312 
4313 	return rc;
4314 }
4315 EXPORT_SYMBOL_GPL(pci_try_reset_function);
4316 
4317 /* Do any devices on or below this bus prevent a bus reset? */
4318 static bool pci_bus_resetable(struct pci_bus *bus)
4319 {
4320 	struct pci_dev *dev;
4321 
4322 	list_for_each_entry(dev, &bus->devices, bus_list) {
4323 		if (dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET ||
4324 		    (dev->subordinate && !pci_bus_resetable(dev->subordinate)))
4325 			return false;
4326 	}
4327 
4328 	return true;
4329 }
4330 
4331 /* Lock devices from the top of the tree down */
4332 static void pci_bus_lock(struct pci_bus *bus)
4333 {
4334 	struct pci_dev *dev;
4335 
4336 	list_for_each_entry(dev, &bus->devices, bus_list) {
4337 		pci_dev_lock(dev);
4338 		if (dev->subordinate)
4339 			pci_bus_lock(dev->subordinate);
4340 	}
4341 }
4342 
4343 /* Unlock devices from the bottom of the tree up */
4344 static void pci_bus_unlock(struct pci_bus *bus)
4345 {
4346 	struct pci_dev *dev;
4347 
4348 	list_for_each_entry(dev, &bus->devices, bus_list) {
4349 		if (dev->subordinate)
4350 			pci_bus_unlock(dev->subordinate);
4351 		pci_dev_unlock(dev);
4352 	}
4353 }
4354 
4355 /* Return 1 on successful lock, 0 on contention */
4356 static int pci_bus_trylock(struct pci_bus *bus)
4357 {
4358 	struct pci_dev *dev;
4359 
4360 	list_for_each_entry(dev, &bus->devices, bus_list) {
4361 		if (!pci_dev_trylock(dev))
4362 			goto unlock;
4363 		if (dev->subordinate) {
4364 			if (!pci_bus_trylock(dev->subordinate)) {
4365 				pci_dev_unlock(dev);
4366 				goto unlock;
4367 			}
4368 		}
4369 	}
4370 	return 1;
4371 
4372 unlock:
4373 	list_for_each_entry_continue_reverse(dev, &bus->devices, bus_list) {
4374 		if (dev->subordinate)
4375 			pci_bus_unlock(dev->subordinate);
4376 		pci_dev_unlock(dev);
4377 	}
4378 	return 0;
4379 }
4380 
4381 /* Do any devices on or below this slot prevent a bus reset? */
4382 static bool pci_slot_resetable(struct pci_slot *slot)
4383 {
4384 	struct pci_dev *dev;
4385 
4386 	list_for_each_entry(dev, &slot->bus->devices, bus_list) {
4387 		if (!dev->slot || dev->slot != slot)
4388 			continue;
4389 		if (dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET ||
4390 		    (dev->subordinate && !pci_bus_resetable(dev->subordinate)))
4391 			return false;
4392 	}
4393 
4394 	return true;
4395 }
4396 
4397 /* Lock devices from the top of the tree down */
4398 static void pci_slot_lock(struct pci_slot *slot)
4399 {
4400 	struct pci_dev *dev;
4401 
4402 	list_for_each_entry(dev, &slot->bus->devices, bus_list) {
4403 		if (!dev->slot || dev->slot != slot)
4404 			continue;
4405 		pci_dev_lock(dev);
4406 		if (dev->subordinate)
4407 			pci_bus_lock(dev->subordinate);
4408 	}
4409 }
4410 
4411 /* Unlock devices from the bottom of the tree up */
4412 static void pci_slot_unlock(struct pci_slot *slot)
4413 {
4414 	struct pci_dev *dev;
4415 
4416 	list_for_each_entry(dev, &slot->bus->devices, bus_list) {
4417 		if (!dev->slot || dev->slot != slot)
4418 			continue;
4419 		if (dev->subordinate)
4420 			pci_bus_unlock(dev->subordinate);
4421 		pci_dev_unlock(dev);
4422 	}
4423 }
4424 
4425 /* Return 1 on successful lock, 0 on contention */
4426 static int pci_slot_trylock(struct pci_slot *slot)
4427 {
4428 	struct pci_dev *dev;
4429 
4430 	list_for_each_entry(dev, &slot->bus->devices, bus_list) {
4431 		if (!dev->slot || dev->slot != slot)
4432 			continue;
4433 		if (!pci_dev_trylock(dev))
4434 			goto unlock;
4435 		if (dev->subordinate) {
4436 			if (!pci_bus_trylock(dev->subordinate)) {
4437 				pci_dev_unlock(dev);
4438 				goto unlock;
4439 			}
4440 		}
4441 	}
4442 	return 1;
4443 
4444 unlock:
4445 	list_for_each_entry_continue_reverse(dev,
4446 					     &slot->bus->devices, bus_list) {
4447 		if (!dev->slot || dev->slot != slot)
4448 			continue;
4449 		if (dev->subordinate)
4450 			pci_bus_unlock(dev->subordinate);
4451 		pci_dev_unlock(dev);
4452 	}
4453 	return 0;
4454 }
4455 
4456 /* Save and disable devices from the top of the tree down */
4457 static void pci_bus_save_and_disable(struct pci_bus *bus)
4458 {
4459 	struct pci_dev *dev;
4460 
4461 	list_for_each_entry(dev, &bus->devices, bus_list) {
4462 		pci_dev_save_and_disable(dev);
4463 		if (dev->subordinate)
4464 			pci_bus_save_and_disable(dev->subordinate);
4465 	}
4466 }
4467 
4468 /*
4469  * Restore devices from top of the tree down - parent bridges need to be
4470  * restored before we can get to subordinate devices.
4471  */
4472 static void pci_bus_restore(struct pci_bus *bus)
4473 {
4474 	struct pci_dev *dev;
4475 
4476 	list_for_each_entry(dev, &bus->devices, bus_list) {
4477 		pci_dev_restore(dev);
4478 		if (dev->subordinate)
4479 			pci_bus_restore(dev->subordinate);
4480 	}
4481 }
4482 
4483 /* Save and disable devices from the top of the tree down */
4484 static void pci_slot_save_and_disable(struct pci_slot *slot)
4485 {
4486 	struct pci_dev *dev;
4487 
4488 	list_for_each_entry(dev, &slot->bus->devices, bus_list) {
4489 		if (!dev->slot || dev->slot != slot)
4490 			continue;
4491 		pci_dev_save_and_disable(dev);
4492 		if (dev->subordinate)
4493 			pci_bus_save_and_disable(dev->subordinate);
4494 	}
4495 }
4496 
4497 /*
4498  * Restore devices from top of the tree down - parent bridges need to be
4499  * restored before we can get to subordinate devices.
4500  */
4501 static void pci_slot_restore(struct pci_slot *slot)
4502 {
4503 	struct pci_dev *dev;
4504 
4505 	list_for_each_entry(dev, &slot->bus->devices, bus_list) {
4506 		if (!dev->slot || dev->slot != slot)
4507 			continue;
4508 		pci_dev_restore(dev);
4509 		if (dev->subordinate)
4510 			pci_bus_restore(dev->subordinate);
4511 	}
4512 }
4513 
4514 static int pci_slot_reset(struct pci_slot *slot, int probe)
4515 {
4516 	int rc;
4517 
4518 	if (!slot || !pci_slot_resetable(slot))
4519 		return -ENOTTY;
4520 
4521 	if (!probe)
4522 		pci_slot_lock(slot);
4523 
4524 	might_sleep();
4525 
4526 	rc = pci_reset_hotplug_slot(slot->hotplug, probe);
4527 
4528 	if (!probe)
4529 		pci_slot_unlock(slot);
4530 
4531 	return rc;
4532 }
4533 
4534 /**
4535  * pci_probe_reset_slot - probe whether a PCI slot can be reset
4536  * @slot: PCI slot to probe
4537  *
4538  * Return 0 if slot can be reset, negative if a slot reset is not supported.
4539  */
4540 int pci_probe_reset_slot(struct pci_slot *slot)
4541 {
4542 	return pci_slot_reset(slot, 1);
4543 }
4544 EXPORT_SYMBOL_GPL(pci_probe_reset_slot);
4545 
4546 /**
4547  * pci_reset_slot - reset a PCI slot
4548  * @slot: PCI slot to reset
4549  *
4550  * A PCI bus may host multiple slots, each slot may support a reset mechanism
4551  * independent of other slots.  For instance, some slots may support slot power
4552  * control.  In the case of a 1:1 bus to slot architecture, this function may
4553  * wrap the bus reset to avoid spurious slot related events such as hotplug.
4554  * Generally a slot reset should be attempted before a bus reset.  All of the
4555  * function of the slot and any subordinate buses behind the slot are reset
4556  * through this function.  PCI config space of all devices in the slot and
4557  * behind the slot is saved before and restored after reset.
4558  *
4559  * Return 0 on success, non-zero on error.
4560  */
4561 int pci_reset_slot(struct pci_slot *slot)
4562 {
4563 	int rc;
4564 
4565 	rc = pci_slot_reset(slot, 1);
4566 	if (rc)
4567 		return rc;
4568 
4569 	pci_slot_save_and_disable(slot);
4570 
4571 	rc = pci_slot_reset(slot, 0);
4572 
4573 	pci_slot_restore(slot);
4574 
4575 	return rc;
4576 }
4577 EXPORT_SYMBOL_GPL(pci_reset_slot);
4578 
4579 /**
4580  * pci_try_reset_slot - Try to reset a PCI slot
4581  * @slot: PCI slot to reset
4582  *
4583  * Same as above except return -EAGAIN if the slot cannot be locked
4584  */
4585 int pci_try_reset_slot(struct pci_slot *slot)
4586 {
4587 	int rc;
4588 
4589 	rc = pci_slot_reset(slot, 1);
4590 	if (rc)
4591 		return rc;
4592 
4593 	pci_slot_save_and_disable(slot);
4594 
4595 	if (pci_slot_trylock(slot)) {
4596 		might_sleep();
4597 		rc = pci_reset_hotplug_slot(slot->hotplug, 0);
4598 		pci_slot_unlock(slot);
4599 	} else
4600 		rc = -EAGAIN;
4601 
4602 	pci_slot_restore(slot);
4603 
4604 	return rc;
4605 }
4606 EXPORT_SYMBOL_GPL(pci_try_reset_slot);
4607 
4608 static int pci_bus_reset(struct pci_bus *bus, int probe)
4609 {
4610 	if (!bus->self || !pci_bus_resetable(bus))
4611 		return -ENOTTY;
4612 
4613 	if (probe)
4614 		return 0;
4615 
4616 	pci_bus_lock(bus);
4617 
4618 	might_sleep();
4619 
4620 	pci_reset_bridge_secondary_bus(bus->self);
4621 
4622 	pci_bus_unlock(bus);
4623 
4624 	return 0;
4625 }
4626 
4627 /**
4628  * pci_probe_reset_bus - probe whether a PCI bus can be reset
4629  * @bus: PCI bus to probe
4630  *
4631  * Return 0 if bus can be reset, negative if a bus reset is not supported.
4632  */
4633 int pci_probe_reset_bus(struct pci_bus *bus)
4634 {
4635 	return pci_bus_reset(bus, 1);
4636 }
4637 EXPORT_SYMBOL_GPL(pci_probe_reset_bus);
4638 
4639 /**
4640  * pci_reset_bus - reset a PCI bus
4641  * @bus: top level PCI bus to reset
4642  *
4643  * Do a bus reset on the given bus and any subordinate buses, saving
4644  * and restoring state of all devices.
4645  *
4646  * Return 0 on success, non-zero on error.
4647  */
4648 int pci_reset_bus(struct pci_bus *bus)
4649 {
4650 	int rc;
4651 
4652 	rc = pci_bus_reset(bus, 1);
4653 	if (rc)
4654 		return rc;
4655 
4656 	pci_bus_save_and_disable(bus);
4657 
4658 	rc = pci_bus_reset(bus, 0);
4659 
4660 	pci_bus_restore(bus);
4661 
4662 	return rc;
4663 }
4664 EXPORT_SYMBOL_GPL(pci_reset_bus);
4665 
4666 /**
4667  * pci_try_reset_bus - Try to reset a PCI bus
4668  * @bus: top level PCI bus to reset
4669  *
4670  * Same as above except return -EAGAIN if the bus cannot be locked
4671  */
4672 int pci_try_reset_bus(struct pci_bus *bus)
4673 {
4674 	int rc;
4675 
4676 	rc = pci_bus_reset(bus, 1);
4677 	if (rc)
4678 		return rc;
4679 
4680 	pci_bus_save_and_disable(bus);
4681 
4682 	if (pci_bus_trylock(bus)) {
4683 		might_sleep();
4684 		pci_reset_bridge_secondary_bus(bus->self);
4685 		pci_bus_unlock(bus);
4686 	} else
4687 		rc = -EAGAIN;
4688 
4689 	pci_bus_restore(bus);
4690 
4691 	return rc;
4692 }
4693 EXPORT_SYMBOL_GPL(pci_try_reset_bus);
4694 
4695 /**
4696  * pcix_get_max_mmrbc - get PCI-X maximum designed memory read byte count
4697  * @dev: PCI device to query
4698  *
4699  * Returns mmrbc: maximum designed memory read count in bytes
4700  *    or appropriate error value.
4701  */
4702 int pcix_get_max_mmrbc(struct pci_dev *dev)
4703 {
4704 	int cap;
4705 	u32 stat;
4706 
4707 	cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
4708 	if (!cap)
4709 		return -EINVAL;
4710 
4711 	if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat))
4712 		return -EINVAL;
4713 
4714 	return 512 << ((stat & PCI_X_STATUS_MAX_READ) >> 21);
4715 }
4716 EXPORT_SYMBOL(pcix_get_max_mmrbc);
4717 
4718 /**
4719  * pcix_get_mmrbc - get PCI-X maximum memory read byte count
4720  * @dev: PCI device to query
4721  *
4722  * Returns mmrbc: maximum memory read count in bytes
4723  *    or appropriate error value.
4724  */
4725 int pcix_get_mmrbc(struct pci_dev *dev)
4726 {
4727 	int cap;
4728 	u16 cmd;
4729 
4730 	cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
4731 	if (!cap)
4732 		return -EINVAL;
4733 
4734 	if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd))
4735 		return -EINVAL;
4736 
4737 	return 512 << ((cmd & PCI_X_CMD_MAX_READ) >> 2);
4738 }
4739 EXPORT_SYMBOL(pcix_get_mmrbc);
4740 
4741 /**
4742  * pcix_set_mmrbc - set PCI-X maximum memory read byte count
4743  * @dev: PCI device to query
4744  * @mmrbc: maximum memory read count in bytes
4745  *    valid values are 512, 1024, 2048, 4096
4746  *
4747  * If possible sets maximum memory read byte count, some bridges have erratas
4748  * that prevent this.
4749  */
4750 int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc)
4751 {
4752 	int cap;
4753 	u32 stat, v, o;
4754 	u16 cmd;
4755 
4756 	if (mmrbc < 512 || mmrbc > 4096 || !is_power_of_2(mmrbc))
4757 		return -EINVAL;
4758 
4759 	v = ffs(mmrbc) - 10;
4760 
4761 	cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
4762 	if (!cap)
4763 		return -EINVAL;
4764 
4765 	if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat))
4766 		return -EINVAL;
4767 
4768 	if (v > (stat & PCI_X_STATUS_MAX_READ) >> 21)
4769 		return -E2BIG;
4770 
4771 	if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd))
4772 		return -EINVAL;
4773 
4774 	o = (cmd & PCI_X_CMD_MAX_READ) >> 2;
4775 	if (o != v) {
4776 		if (v > o && (dev->bus->bus_flags & PCI_BUS_FLAGS_NO_MMRBC))
4777 			return -EIO;
4778 
4779 		cmd &= ~PCI_X_CMD_MAX_READ;
4780 		cmd |= v << 2;
4781 		if (pci_write_config_word(dev, cap + PCI_X_CMD, cmd))
4782 			return -EIO;
4783 	}
4784 	return 0;
4785 }
4786 EXPORT_SYMBOL(pcix_set_mmrbc);
4787 
4788 /**
4789  * pcie_get_readrq - get PCI Express read request size
4790  * @dev: PCI device to query
4791  *
4792  * Returns maximum memory read request in bytes
4793  *    or appropriate error value.
4794  */
4795 int pcie_get_readrq(struct pci_dev *dev)
4796 {
4797 	u16 ctl;
4798 
4799 	pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &ctl);
4800 
4801 	return 128 << ((ctl & PCI_EXP_DEVCTL_READRQ) >> 12);
4802 }
4803 EXPORT_SYMBOL(pcie_get_readrq);
4804 
4805 /**
4806  * pcie_set_readrq - set PCI Express maximum memory read request
4807  * @dev: PCI device to query
4808  * @rq: maximum memory read count in bytes
4809  *    valid values are 128, 256, 512, 1024, 2048, 4096
4810  *
4811  * If possible sets maximum memory read request in bytes
4812  */
4813 int pcie_set_readrq(struct pci_dev *dev, int rq)
4814 {
4815 	u16 v;
4816 
4817 	if (rq < 128 || rq > 4096 || !is_power_of_2(rq))
4818 		return -EINVAL;
4819 
4820 	/*
4821 	 * If using the "performance" PCIe config, we clamp the
4822 	 * read rq size to the max packet size to prevent the
4823 	 * host bridge generating requests larger than we can
4824 	 * cope with
4825 	 */
4826 	if (pcie_bus_config == PCIE_BUS_PERFORMANCE) {
4827 		int mps = pcie_get_mps(dev);
4828 
4829 		if (mps < rq)
4830 			rq = mps;
4831 	}
4832 
4833 	v = (ffs(rq) - 8) << 12;
4834 
4835 	return pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL,
4836 						  PCI_EXP_DEVCTL_READRQ, v);
4837 }
4838 EXPORT_SYMBOL(pcie_set_readrq);
4839 
4840 /**
4841  * pcie_get_mps - get PCI Express maximum payload size
4842  * @dev: PCI device to query
4843  *
4844  * Returns maximum payload size in bytes
4845  */
4846 int pcie_get_mps(struct pci_dev *dev)
4847 {
4848 	u16 ctl;
4849 
4850 	pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &ctl);
4851 
4852 	return 128 << ((ctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
4853 }
4854 EXPORT_SYMBOL(pcie_get_mps);
4855 
4856 /**
4857  * pcie_set_mps - set PCI Express maximum payload size
4858  * @dev: PCI device to query
4859  * @mps: maximum payload size in bytes
4860  *    valid values are 128, 256, 512, 1024, 2048, 4096
4861  *
4862  * If possible sets maximum payload size
4863  */
4864 int pcie_set_mps(struct pci_dev *dev, int mps)
4865 {
4866 	u16 v;
4867 
4868 	if (mps < 128 || mps > 4096 || !is_power_of_2(mps))
4869 		return -EINVAL;
4870 
4871 	v = ffs(mps) - 8;
4872 	if (v > dev->pcie_mpss)
4873 		return -EINVAL;
4874 	v <<= 5;
4875 
4876 	return pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL,
4877 						  PCI_EXP_DEVCTL_PAYLOAD, v);
4878 }
4879 EXPORT_SYMBOL(pcie_set_mps);
4880 
4881 /**
4882  * pcie_get_minimum_link - determine minimum link settings of a PCI device
4883  * @dev: PCI device to query
4884  * @speed: storage for minimum speed
4885  * @width: storage for minimum width
4886  *
4887  * This function will walk up the PCI device chain and determine the minimum
4888  * link width and speed of the device.
4889  */
4890 int pcie_get_minimum_link(struct pci_dev *dev, enum pci_bus_speed *speed,
4891 			  enum pcie_link_width *width)
4892 {
4893 	int ret;
4894 
4895 	*speed = PCI_SPEED_UNKNOWN;
4896 	*width = PCIE_LNK_WIDTH_UNKNOWN;
4897 
4898 	while (dev) {
4899 		u16 lnksta;
4900 		enum pci_bus_speed next_speed;
4901 		enum pcie_link_width next_width;
4902 
4903 		ret = pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &lnksta);
4904 		if (ret)
4905 			return ret;
4906 
4907 		next_speed = pcie_link_speed[lnksta & PCI_EXP_LNKSTA_CLS];
4908 		next_width = (lnksta & PCI_EXP_LNKSTA_NLW) >>
4909 			PCI_EXP_LNKSTA_NLW_SHIFT;
4910 
4911 		if (next_speed < *speed)
4912 			*speed = next_speed;
4913 
4914 		if (next_width < *width)
4915 			*width = next_width;
4916 
4917 		dev = dev->bus->self;
4918 	}
4919 
4920 	return 0;
4921 }
4922 EXPORT_SYMBOL(pcie_get_minimum_link);
4923 
4924 /**
4925  * pci_select_bars - Make BAR mask from the type of resource
4926  * @dev: the PCI device for which BAR mask is made
4927  * @flags: resource type mask to be selected
4928  *
4929  * This helper routine makes bar mask from the type of resource.
4930  */
4931 int pci_select_bars(struct pci_dev *dev, unsigned long flags)
4932 {
4933 	int i, bars = 0;
4934 	for (i = 0; i < PCI_NUM_RESOURCES; i++)
4935 		if (pci_resource_flags(dev, i) & flags)
4936 			bars |= (1 << i);
4937 	return bars;
4938 }
4939 EXPORT_SYMBOL(pci_select_bars);
4940 
4941 /* Some architectures require additional programming to enable VGA */
4942 static arch_set_vga_state_t arch_set_vga_state;
4943 
4944 void __init pci_register_set_vga_state(arch_set_vga_state_t func)
4945 {
4946 	arch_set_vga_state = func;	/* NULL disables */
4947 }
4948 
4949 static int pci_set_vga_state_arch(struct pci_dev *dev, bool decode,
4950 				  unsigned int command_bits, u32 flags)
4951 {
4952 	if (arch_set_vga_state)
4953 		return arch_set_vga_state(dev, decode, command_bits,
4954 						flags);
4955 	return 0;
4956 }
4957 
4958 /**
4959  * pci_set_vga_state - set VGA decode state on device and parents if requested
4960  * @dev: the PCI device
4961  * @decode: true = enable decoding, false = disable decoding
4962  * @command_bits: PCI_COMMAND_IO and/or PCI_COMMAND_MEMORY
4963  * @flags: traverse ancestors and change bridges
4964  * CHANGE_BRIDGE_ONLY / CHANGE_BRIDGE
4965  */
4966 int pci_set_vga_state(struct pci_dev *dev, bool decode,
4967 		      unsigned int command_bits, u32 flags)
4968 {
4969 	struct pci_bus *bus;
4970 	struct pci_dev *bridge;
4971 	u16 cmd;
4972 	int rc;
4973 
4974 	WARN_ON((flags & PCI_VGA_STATE_CHANGE_DECODES) && (command_bits & ~(PCI_COMMAND_IO|PCI_COMMAND_MEMORY)));
4975 
4976 	/* ARCH specific VGA enables */
4977 	rc = pci_set_vga_state_arch(dev, decode, command_bits, flags);
4978 	if (rc)
4979 		return rc;
4980 
4981 	if (flags & PCI_VGA_STATE_CHANGE_DECODES) {
4982 		pci_read_config_word(dev, PCI_COMMAND, &cmd);
4983 		if (decode == true)
4984 			cmd |= command_bits;
4985 		else
4986 			cmd &= ~command_bits;
4987 		pci_write_config_word(dev, PCI_COMMAND, cmd);
4988 	}
4989 
4990 	if (!(flags & PCI_VGA_STATE_CHANGE_BRIDGE))
4991 		return 0;
4992 
4993 	bus = dev->bus;
4994 	while (bus) {
4995 		bridge = bus->self;
4996 		if (bridge) {
4997 			pci_read_config_word(bridge, PCI_BRIDGE_CONTROL,
4998 					     &cmd);
4999 			if (decode == true)
5000 				cmd |= PCI_BRIDGE_CTL_VGA;
5001 			else
5002 				cmd &= ~PCI_BRIDGE_CTL_VGA;
5003 			pci_write_config_word(bridge, PCI_BRIDGE_CONTROL,
5004 					      cmd);
5005 		}
5006 		bus = bus->parent;
5007 	}
5008 	return 0;
5009 }
5010 
5011 /**
5012  * pci_add_dma_alias - Add a DMA devfn alias for a device
5013  * @dev: the PCI device for which alias is added
5014  * @devfn: alias slot and function
5015  *
5016  * This helper encodes 8-bit devfn as bit number in dma_alias_mask.
5017  * It should be called early, preferably as PCI fixup header quirk.
5018  */
5019 void pci_add_dma_alias(struct pci_dev *dev, u8 devfn)
5020 {
5021 	if (!dev->dma_alias_mask)
5022 		dev->dma_alias_mask = kcalloc(BITS_TO_LONGS(U8_MAX),
5023 					      sizeof(long), GFP_KERNEL);
5024 	if (!dev->dma_alias_mask) {
5025 		dev_warn(&dev->dev, "Unable to allocate DMA alias mask\n");
5026 		return;
5027 	}
5028 
5029 	set_bit(devfn, dev->dma_alias_mask);
5030 	dev_info(&dev->dev, "Enabling fixed DMA alias to %02x.%d\n",
5031 		 PCI_SLOT(devfn), PCI_FUNC(devfn));
5032 }
5033 
5034 bool pci_devs_are_dma_aliases(struct pci_dev *dev1, struct pci_dev *dev2)
5035 {
5036 	return (dev1->dma_alias_mask &&
5037 		test_bit(dev2->devfn, dev1->dma_alias_mask)) ||
5038 	       (dev2->dma_alias_mask &&
5039 		test_bit(dev1->devfn, dev2->dma_alias_mask));
5040 }
5041 
5042 bool pci_device_is_present(struct pci_dev *pdev)
5043 {
5044 	u32 v;
5045 
5046 	if (pci_dev_is_disconnected(pdev))
5047 		return false;
5048 	return pci_bus_read_dev_vendor_id(pdev->bus, pdev->devfn, &v, 0);
5049 }
5050 EXPORT_SYMBOL_GPL(pci_device_is_present);
5051 
5052 void pci_ignore_hotplug(struct pci_dev *dev)
5053 {
5054 	struct pci_dev *bridge = dev->bus->self;
5055 
5056 	dev->ignore_hotplug = 1;
5057 	/* Propagate the "ignore hotplug" setting to the parent bridge. */
5058 	if (bridge)
5059 		bridge->ignore_hotplug = 1;
5060 }
5061 EXPORT_SYMBOL_GPL(pci_ignore_hotplug);
5062 
5063 resource_size_t __weak pcibios_default_alignment(void)
5064 {
5065 	return 0;
5066 }
5067 
5068 #define RESOURCE_ALIGNMENT_PARAM_SIZE COMMAND_LINE_SIZE
5069 static char resource_alignment_param[RESOURCE_ALIGNMENT_PARAM_SIZE] = {0};
5070 static DEFINE_SPINLOCK(resource_alignment_lock);
5071 
5072 /**
5073  * pci_specified_resource_alignment - get resource alignment specified by user.
5074  * @dev: the PCI device to get
5075  * @resize: whether or not to change resources' size when reassigning alignment
5076  *
5077  * RETURNS: Resource alignment if it is specified.
5078  *          Zero if it is not specified.
5079  */
5080 static resource_size_t pci_specified_resource_alignment(struct pci_dev *dev,
5081 							bool *resize)
5082 {
5083 	int seg, bus, slot, func, align_order, count;
5084 	unsigned short vendor, device, subsystem_vendor, subsystem_device;
5085 	resource_size_t align = pcibios_default_alignment();
5086 	char *p;
5087 
5088 	spin_lock(&resource_alignment_lock);
5089 	p = resource_alignment_param;
5090 	if (!*p && !align)
5091 		goto out;
5092 	if (pci_has_flag(PCI_PROBE_ONLY)) {
5093 		align = 0;
5094 		pr_info_once("PCI: Ignoring requested alignments (PCI_PROBE_ONLY)\n");
5095 		goto out;
5096 	}
5097 
5098 	while (*p) {
5099 		count = 0;
5100 		if (sscanf(p, "%d%n", &align_order, &count) == 1 &&
5101 							p[count] == '@') {
5102 			p += count + 1;
5103 		} else {
5104 			align_order = -1;
5105 		}
5106 		if (strncmp(p, "pci:", 4) == 0) {
5107 			/* PCI vendor/device (subvendor/subdevice) ids are specified */
5108 			p += 4;
5109 			if (sscanf(p, "%hx:%hx:%hx:%hx%n",
5110 				&vendor, &device, &subsystem_vendor, &subsystem_device, &count) != 4) {
5111 				if (sscanf(p, "%hx:%hx%n", &vendor, &device, &count) != 2) {
5112 					printk(KERN_ERR "PCI: Can't parse resource_alignment parameter: pci:%s\n",
5113 						p);
5114 					break;
5115 				}
5116 				subsystem_vendor = subsystem_device = 0;
5117 			}
5118 			p += count;
5119 			if ((!vendor || (vendor == dev->vendor)) &&
5120 				(!device || (device == dev->device)) &&
5121 				(!subsystem_vendor || (subsystem_vendor == dev->subsystem_vendor)) &&
5122 				(!subsystem_device || (subsystem_device == dev->subsystem_device))) {
5123 				*resize = true;
5124 				if (align_order == -1)
5125 					align = PAGE_SIZE;
5126 				else
5127 					align = 1 << align_order;
5128 				/* Found */
5129 				break;
5130 			}
5131 		}
5132 		else {
5133 			if (sscanf(p, "%x:%x:%x.%x%n",
5134 				&seg, &bus, &slot, &func, &count) != 4) {
5135 				seg = 0;
5136 				if (sscanf(p, "%x:%x.%x%n",
5137 						&bus, &slot, &func, &count) != 3) {
5138 					/* Invalid format */
5139 					printk(KERN_ERR "PCI: Can't parse resource_alignment parameter: %s\n",
5140 						p);
5141 					break;
5142 				}
5143 			}
5144 			p += count;
5145 			if (seg == pci_domain_nr(dev->bus) &&
5146 				bus == dev->bus->number &&
5147 				slot == PCI_SLOT(dev->devfn) &&
5148 				func == PCI_FUNC(dev->devfn)) {
5149 				*resize = true;
5150 				if (align_order == -1)
5151 					align = PAGE_SIZE;
5152 				else
5153 					align = 1 << align_order;
5154 				/* Found */
5155 				break;
5156 			}
5157 		}
5158 		if (*p != ';' && *p != ',') {
5159 			/* End of param or invalid format */
5160 			break;
5161 		}
5162 		p++;
5163 	}
5164 out:
5165 	spin_unlock(&resource_alignment_lock);
5166 	return align;
5167 }
5168 
5169 static void pci_request_resource_alignment(struct pci_dev *dev, int bar,
5170 					   resource_size_t align, bool resize)
5171 {
5172 	struct resource *r = &dev->resource[bar];
5173 	resource_size_t size;
5174 
5175 	if (!(r->flags & IORESOURCE_MEM))
5176 		return;
5177 
5178 	if (r->flags & IORESOURCE_PCI_FIXED) {
5179 		dev_info(&dev->dev, "BAR%d %pR: ignoring requested alignment %#llx\n",
5180 			 bar, r, (unsigned long long)align);
5181 		return;
5182 	}
5183 
5184 	size = resource_size(r);
5185 	if (size >= align)
5186 		return;
5187 
5188 	/*
5189 	 * Increase the alignment of the resource.  There are two ways we
5190 	 * can do this:
5191 	 *
5192 	 * 1) Increase the size of the resource.  BARs are aligned on their
5193 	 *    size, so when we reallocate space for this resource, we'll
5194 	 *    allocate it with the larger alignment.  This also prevents
5195 	 *    assignment of any other BARs inside the alignment region, so
5196 	 *    if we're requesting page alignment, this means no other BARs
5197 	 *    will share the page.
5198 	 *
5199 	 *    The disadvantage is that this makes the resource larger than
5200 	 *    the hardware BAR, which may break drivers that compute things
5201 	 *    based on the resource size, e.g., to find registers at a
5202 	 *    fixed offset before the end of the BAR.
5203 	 *
5204 	 * 2) Retain the resource size, but use IORESOURCE_STARTALIGN and
5205 	 *    set r->start to the desired alignment.  By itself this
5206 	 *    doesn't prevent other BARs being put inside the alignment
5207 	 *    region, but if we realign *every* resource of every device in
5208 	 *    the system, none of them will share an alignment region.
5209 	 *
5210 	 * When the user has requested alignment for only some devices via
5211 	 * the "pci=resource_alignment" argument, "resize" is true and we
5212 	 * use the first method.  Otherwise we assume we're aligning all
5213 	 * devices and we use the second.
5214 	 */
5215 
5216 	dev_info(&dev->dev, "BAR%d %pR: requesting alignment to %#llx\n",
5217 		 bar, r, (unsigned long long)align);
5218 
5219 	if (resize) {
5220 		r->start = 0;
5221 		r->end = align - 1;
5222 	} else {
5223 		r->flags &= ~IORESOURCE_SIZEALIGN;
5224 		r->flags |= IORESOURCE_STARTALIGN;
5225 		r->start = align;
5226 		r->end = r->start + size - 1;
5227 	}
5228 	r->flags |= IORESOURCE_UNSET;
5229 }
5230 
5231 /*
5232  * This function disables memory decoding and releases memory resources
5233  * of the device specified by kernel's boot parameter 'pci=resource_alignment='.
5234  * It also rounds up size to specified alignment.
5235  * Later on, the kernel will assign page-aligned memory resource back
5236  * to the device.
5237  */
5238 void pci_reassigndev_resource_alignment(struct pci_dev *dev)
5239 {
5240 	int i;
5241 	struct resource *r;
5242 	resource_size_t align;
5243 	u16 command;
5244 	bool resize = false;
5245 
5246 	/*
5247 	 * VF BARs are read-only zero according to SR-IOV spec r1.1, sec
5248 	 * 3.4.1.11.  Their resources are allocated from the space
5249 	 * described by the VF BARx register in the PF's SR-IOV capability.
5250 	 * We can't influence their alignment here.
5251 	 */
5252 	if (dev->is_virtfn)
5253 		return;
5254 
5255 	/* check if specified PCI is target device to reassign */
5256 	align = pci_specified_resource_alignment(dev, &resize);
5257 	if (!align)
5258 		return;
5259 
5260 	if (dev->hdr_type == PCI_HEADER_TYPE_NORMAL &&
5261 	    (dev->class >> 8) == PCI_CLASS_BRIDGE_HOST) {
5262 		dev_warn(&dev->dev,
5263 			"Can't reassign resources to host bridge.\n");
5264 		return;
5265 	}
5266 
5267 	dev_info(&dev->dev,
5268 		"Disabling memory decoding and releasing memory resources.\n");
5269 	pci_read_config_word(dev, PCI_COMMAND, &command);
5270 	command &= ~PCI_COMMAND_MEMORY;
5271 	pci_write_config_word(dev, PCI_COMMAND, command);
5272 
5273 	for (i = 0; i <= PCI_ROM_RESOURCE; i++)
5274 		pci_request_resource_alignment(dev, i, align, resize);
5275 
5276 	/*
5277 	 * Need to disable bridge's resource window,
5278 	 * to enable the kernel to reassign new resource
5279 	 * window later on.
5280 	 */
5281 	if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE &&
5282 	    (dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) {
5283 		for (i = PCI_BRIDGE_RESOURCES; i < PCI_NUM_RESOURCES; i++) {
5284 			r = &dev->resource[i];
5285 			if (!(r->flags & IORESOURCE_MEM))
5286 				continue;
5287 			r->flags |= IORESOURCE_UNSET;
5288 			r->end = resource_size(r) - 1;
5289 			r->start = 0;
5290 		}
5291 		pci_disable_bridge_window(dev);
5292 	}
5293 }
5294 
5295 static ssize_t pci_set_resource_alignment_param(const char *buf, size_t count)
5296 {
5297 	if (count > RESOURCE_ALIGNMENT_PARAM_SIZE - 1)
5298 		count = RESOURCE_ALIGNMENT_PARAM_SIZE - 1;
5299 	spin_lock(&resource_alignment_lock);
5300 	strncpy(resource_alignment_param, buf, count);
5301 	resource_alignment_param[count] = '\0';
5302 	spin_unlock(&resource_alignment_lock);
5303 	return count;
5304 }
5305 
5306 static ssize_t pci_get_resource_alignment_param(char *buf, size_t size)
5307 {
5308 	size_t count;
5309 	spin_lock(&resource_alignment_lock);
5310 	count = snprintf(buf, size, "%s", resource_alignment_param);
5311 	spin_unlock(&resource_alignment_lock);
5312 	return count;
5313 }
5314 
5315 static ssize_t pci_resource_alignment_show(struct bus_type *bus, char *buf)
5316 {
5317 	return pci_get_resource_alignment_param(buf, PAGE_SIZE);
5318 }
5319 
5320 static ssize_t pci_resource_alignment_store(struct bus_type *bus,
5321 					const char *buf, size_t count)
5322 {
5323 	return pci_set_resource_alignment_param(buf, count);
5324 }
5325 
5326 static BUS_ATTR(resource_alignment, 0644, pci_resource_alignment_show,
5327 					pci_resource_alignment_store);
5328 
5329 static int __init pci_resource_alignment_sysfs_init(void)
5330 {
5331 	return bus_create_file(&pci_bus_type,
5332 					&bus_attr_resource_alignment);
5333 }
5334 late_initcall(pci_resource_alignment_sysfs_init);
5335 
5336 static void pci_no_domains(void)
5337 {
5338 #ifdef CONFIG_PCI_DOMAINS
5339 	pci_domains_supported = 0;
5340 #endif
5341 }
5342 
5343 #ifdef CONFIG_PCI_DOMAINS
5344 static atomic_t __domain_nr = ATOMIC_INIT(-1);
5345 
5346 int pci_get_new_domain_nr(void)
5347 {
5348 	return atomic_inc_return(&__domain_nr);
5349 }
5350 
5351 #ifdef CONFIG_PCI_DOMAINS_GENERIC
5352 static int of_pci_bus_find_domain_nr(struct device *parent)
5353 {
5354 	static int use_dt_domains = -1;
5355 	int domain = -1;
5356 
5357 	if (parent)
5358 		domain = of_get_pci_domain_nr(parent->of_node);
5359 	/*
5360 	 * Check DT domain and use_dt_domains values.
5361 	 *
5362 	 * If DT domain property is valid (domain >= 0) and
5363 	 * use_dt_domains != 0, the DT assignment is valid since this means
5364 	 * we have not previously allocated a domain number by using
5365 	 * pci_get_new_domain_nr(); we should also update use_dt_domains to
5366 	 * 1, to indicate that we have just assigned a domain number from
5367 	 * DT.
5368 	 *
5369 	 * If DT domain property value is not valid (ie domain < 0), and we
5370 	 * have not previously assigned a domain number from DT
5371 	 * (use_dt_domains != 1) we should assign a domain number by
5372 	 * using the:
5373 	 *
5374 	 * pci_get_new_domain_nr()
5375 	 *
5376 	 * API and update the use_dt_domains value to keep track of method we
5377 	 * are using to assign domain numbers (use_dt_domains = 0).
5378 	 *
5379 	 * All other combinations imply we have a platform that is trying
5380 	 * to mix domain numbers obtained from DT and pci_get_new_domain_nr(),
5381 	 * which is a recipe for domain mishandling and it is prevented by
5382 	 * invalidating the domain value (domain = -1) and printing a
5383 	 * corresponding error.
5384 	 */
5385 	if (domain >= 0 && use_dt_domains) {
5386 		use_dt_domains = 1;
5387 	} else if (domain < 0 && use_dt_domains != 1) {
5388 		use_dt_domains = 0;
5389 		domain = pci_get_new_domain_nr();
5390 	} else {
5391 		dev_err(parent, "Node %s has inconsistent \"linux,pci-domain\" property in DT\n",
5392 			parent->of_node->full_name);
5393 		domain = -1;
5394 	}
5395 
5396 	return domain;
5397 }
5398 
5399 int pci_bus_find_domain_nr(struct pci_bus *bus, struct device *parent)
5400 {
5401 	return acpi_disabled ? of_pci_bus_find_domain_nr(parent) :
5402 			       acpi_pci_bus_find_domain_nr(bus);
5403 }
5404 #endif
5405 #endif
5406 
5407 /**
5408  * pci_ext_cfg_avail - can we access extended PCI config space?
5409  *
5410  * Returns 1 if we can access PCI extended config space (offsets
5411  * greater than 0xff). This is the default implementation. Architecture
5412  * implementations can override this.
5413  */
5414 int __weak pci_ext_cfg_avail(void)
5415 {
5416 	return 1;
5417 }
5418 
5419 void __weak pci_fixup_cardbus(struct pci_bus *bus)
5420 {
5421 }
5422 EXPORT_SYMBOL(pci_fixup_cardbus);
5423 
5424 static int __init pci_setup(char *str)
5425 {
5426 	while (str) {
5427 		char *k = strchr(str, ',');
5428 		if (k)
5429 			*k++ = 0;
5430 		if (*str && (str = pcibios_setup(str)) && *str) {
5431 			if (!strcmp(str, "nomsi")) {
5432 				pci_no_msi();
5433 			} else if (!strcmp(str, "noaer")) {
5434 				pci_no_aer();
5435 			} else if (!strncmp(str, "realloc=", 8)) {
5436 				pci_realloc_get_opt(str + 8);
5437 			} else if (!strncmp(str, "realloc", 7)) {
5438 				pci_realloc_get_opt("on");
5439 			} else if (!strcmp(str, "nodomains")) {
5440 				pci_no_domains();
5441 			} else if (!strncmp(str, "noari", 5)) {
5442 				pcie_ari_disabled = true;
5443 			} else if (!strncmp(str, "cbiosize=", 9)) {
5444 				pci_cardbus_io_size = memparse(str + 9, &str);
5445 			} else if (!strncmp(str, "cbmemsize=", 10)) {
5446 				pci_cardbus_mem_size = memparse(str + 10, &str);
5447 			} else if (!strncmp(str, "resource_alignment=", 19)) {
5448 				pci_set_resource_alignment_param(str + 19,
5449 							strlen(str + 19));
5450 			} else if (!strncmp(str, "ecrc=", 5)) {
5451 				pcie_ecrc_get_policy(str + 5);
5452 			} else if (!strncmp(str, "hpiosize=", 9)) {
5453 				pci_hotplug_io_size = memparse(str + 9, &str);
5454 			} else if (!strncmp(str, "hpmemsize=", 10)) {
5455 				pci_hotplug_mem_size = memparse(str + 10, &str);
5456 			} else if (!strncmp(str, "hpbussize=", 10)) {
5457 				pci_hotplug_bus_size =
5458 					simple_strtoul(str + 10, &str, 0);
5459 				if (pci_hotplug_bus_size > 0xff)
5460 					pci_hotplug_bus_size = DEFAULT_HOTPLUG_BUS_SIZE;
5461 			} else if (!strncmp(str, "pcie_bus_tune_off", 17)) {
5462 				pcie_bus_config = PCIE_BUS_TUNE_OFF;
5463 			} else if (!strncmp(str, "pcie_bus_safe", 13)) {
5464 				pcie_bus_config = PCIE_BUS_SAFE;
5465 			} else if (!strncmp(str, "pcie_bus_perf", 13)) {
5466 				pcie_bus_config = PCIE_BUS_PERFORMANCE;
5467 			} else if (!strncmp(str, "pcie_bus_peer2peer", 18)) {
5468 				pcie_bus_config = PCIE_BUS_PEER2PEER;
5469 			} else if (!strncmp(str, "pcie_scan_all", 13)) {
5470 				pci_add_flags(PCI_SCAN_ALL_PCIE_DEVS);
5471 			} else {
5472 				printk(KERN_ERR "PCI: Unknown option `%s'\n",
5473 						str);
5474 			}
5475 		}
5476 		str = k;
5477 	}
5478 	return 0;
5479 }
5480 early_param("pci", pci_setup);
5481