xref: /openbmc/linux/drivers/pci/pci.c (revision ce932d0c5589e9766e089c22c66890dfc48fbd94)
1 /*
2  *	PCI Bus Services, see include/linux/pci.h for further explanation.
3  *
4  *	Copyright 1993 -- 1997 Drew Eckhardt, Frederic Potter,
5  *	David Mosberger-Tang
6  *
7  *	Copyright 1997 -- 2000 Martin Mares <mj@ucw.cz>
8  */
9 
10 #include <linux/kernel.h>
11 #include <linux/delay.h>
12 #include <linux/init.h>
13 #include <linux/pci.h>
14 #include <linux/pm.h>
15 #include <linux/slab.h>
16 #include <linux/module.h>
17 #include <linux/spinlock.h>
18 #include <linux/string.h>
19 #include <linux/log2.h>
20 #include <linux/pci-aspm.h>
21 #include <linux/pm_wakeup.h>
22 #include <linux/interrupt.h>
23 #include <linux/device.h>
24 #include <linux/pm_runtime.h>
25 #include <asm/setup.h>
26 #include "pci.h"
27 
28 const char *pci_power_names[] = {
29 	"error", "D0", "D1", "D2", "D3hot", "D3cold", "unknown",
30 };
31 EXPORT_SYMBOL_GPL(pci_power_names);
32 
33 int isa_dma_bridge_buggy;
34 EXPORT_SYMBOL(isa_dma_bridge_buggy);
35 
36 int pci_pci_problems;
37 EXPORT_SYMBOL(pci_pci_problems);
38 
39 unsigned int pci_pm_d3_delay;
40 
41 static void pci_pme_list_scan(struct work_struct *work);
42 
43 static LIST_HEAD(pci_pme_list);
44 static DEFINE_MUTEX(pci_pme_list_mutex);
45 static DECLARE_DELAYED_WORK(pci_pme_work, pci_pme_list_scan);
46 
47 struct pci_pme_device {
48 	struct list_head list;
49 	struct pci_dev *dev;
50 };
51 
52 #define PME_TIMEOUT 1000 /* How long between PME checks */
53 
54 static void pci_dev_d3_sleep(struct pci_dev *dev)
55 {
56 	unsigned int delay = dev->d3_delay;
57 
58 	if (delay < pci_pm_d3_delay)
59 		delay = pci_pm_d3_delay;
60 
61 	msleep(delay);
62 }
63 
64 #ifdef CONFIG_PCI_DOMAINS
65 int pci_domains_supported = 1;
66 #endif
67 
68 #define DEFAULT_CARDBUS_IO_SIZE		(256)
69 #define DEFAULT_CARDBUS_MEM_SIZE	(64*1024*1024)
70 /* pci=cbmemsize=nnM,cbiosize=nn can override this */
71 unsigned long pci_cardbus_io_size = DEFAULT_CARDBUS_IO_SIZE;
72 unsigned long pci_cardbus_mem_size = DEFAULT_CARDBUS_MEM_SIZE;
73 
74 #define DEFAULT_HOTPLUG_IO_SIZE		(256)
75 #define DEFAULT_HOTPLUG_MEM_SIZE	(2*1024*1024)
76 /* pci=hpmemsize=nnM,hpiosize=nn can override this */
77 unsigned long pci_hotplug_io_size  = DEFAULT_HOTPLUG_IO_SIZE;
78 unsigned long pci_hotplug_mem_size = DEFAULT_HOTPLUG_MEM_SIZE;
79 
80 enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_TUNE_OFF;
81 
82 /*
83  * The default CLS is used if arch didn't set CLS explicitly and not
84  * all pci devices agree on the same value.  Arch can override either
85  * the dfl or actual value as it sees fit.  Don't forget this is
86  * measured in 32-bit words, not bytes.
87  */
88 u8 pci_dfl_cache_line_size __devinitdata = L1_CACHE_BYTES >> 2;
89 u8 pci_cache_line_size;
90 
91 /*
92  * If we set up a device for bus mastering, we need to check the latency
93  * timer as certain BIOSes forget to set it properly.
94  */
95 unsigned int pcibios_max_latency = 255;
96 
97 /* If set, the PCIe ARI capability will not be used. */
98 static bool pcie_ari_disabled;
99 
100 /**
101  * pci_bus_max_busnr - returns maximum PCI bus number of given bus' children
102  * @bus: pointer to PCI bus structure to search
103  *
104  * Given a PCI bus, returns the highest PCI bus number present in the set
105  * including the given PCI bus and its list of child PCI buses.
106  */
107 unsigned char pci_bus_max_busnr(struct pci_bus* bus)
108 {
109 	struct list_head *tmp;
110 	unsigned char max, n;
111 
112 	max = bus->subordinate;
113 	list_for_each(tmp, &bus->children) {
114 		n = pci_bus_max_busnr(pci_bus_b(tmp));
115 		if(n > max)
116 			max = n;
117 	}
118 	return max;
119 }
120 EXPORT_SYMBOL_GPL(pci_bus_max_busnr);
121 
122 #ifdef CONFIG_HAS_IOMEM
123 void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar)
124 {
125 	/*
126 	 * Make sure the BAR is actually a memory resource, not an IO resource
127 	 */
128 	if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM)) {
129 		WARN_ON(1);
130 		return NULL;
131 	}
132 	return ioremap_nocache(pci_resource_start(pdev, bar),
133 				     pci_resource_len(pdev, bar));
134 }
135 EXPORT_SYMBOL_GPL(pci_ioremap_bar);
136 #endif
137 
138 #if 0
139 /**
140  * pci_max_busnr - returns maximum PCI bus number
141  *
142  * Returns the highest PCI bus number present in the system global list of
143  * PCI buses.
144  */
145 unsigned char __devinit
146 pci_max_busnr(void)
147 {
148 	struct pci_bus *bus = NULL;
149 	unsigned char max, n;
150 
151 	max = 0;
152 	while ((bus = pci_find_next_bus(bus)) != NULL) {
153 		n = pci_bus_max_busnr(bus);
154 		if(n > max)
155 			max = n;
156 	}
157 	return max;
158 }
159 
160 #endif  /*  0  */
161 
162 #define PCI_FIND_CAP_TTL	48
163 
164 static int __pci_find_next_cap_ttl(struct pci_bus *bus, unsigned int devfn,
165 				   u8 pos, int cap, int *ttl)
166 {
167 	u8 id;
168 
169 	while ((*ttl)--) {
170 		pci_bus_read_config_byte(bus, devfn, pos, &pos);
171 		if (pos < 0x40)
172 			break;
173 		pos &= ~3;
174 		pci_bus_read_config_byte(bus, devfn, pos + PCI_CAP_LIST_ID,
175 					 &id);
176 		if (id == 0xff)
177 			break;
178 		if (id == cap)
179 			return pos;
180 		pos += PCI_CAP_LIST_NEXT;
181 	}
182 	return 0;
183 }
184 
185 static int __pci_find_next_cap(struct pci_bus *bus, unsigned int devfn,
186 			       u8 pos, int cap)
187 {
188 	int ttl = PCI_FIND_CAP_TTL;
189 
190 	return __pci_find_next_cap_ttl(bus, devfn, pos, cap, &ttl);
191 }
192 
193 int pci_find_next_capability(struct pci_dev *dev, u8 pos, int cap)
194 {
195 	return __pci_find_next_cap(dev->bus, dev->devfn,
196 				   pos + PCI_CAP_LIST_NEXT, cap);
197 }
198 EXPORT_SYMBOL_GPL(pci_find_next_capability);
199 
200 static int __pci_bus_find_cap_start(struct pci_bus *bus,
201 				    unsigned int devfn, u8 hdr_type)
202 {
203 	u16 status;
204 
205 	pci_bus_read_config_word(bus, devfn, PCI_STATUS, &status);
206 	if (!(status & PCI_STATUS_CAP_LIST))
207 		return 0;
208 
209 	switch (hdr_type) {
210 	case PCI_HEADER_TYPE_NORMAL:
211 	case PCI_HEADER_TYPE_BRIDGE:
212 		return PCI_CAPABILITY_LIST;
213 	case PCI_HEADER_TYPE_CARDBUS:
214 		return PCI_CB_CAPABILITY_LIST;
215 	default:
216 		return 0;
217 	}
218 
219 	return 0;
220 }
221 
222 /**
223  * pci_find_capability - query for devices' capabilities
224  * @dev: PCI device to query
225  * @cap: capability code
226  *
227  * Tell if a device supports a given PCI capability.
228  * Returns the address of the requested capability structure within the
229  * device's PCI configuration space or 0 in case the device does not
230  * support it.  Possible values for @cap:
231  *
232  *  %PCI_CAP_ID_PM           Power Management
233  *  %PCI_CAP_ID_AGP          Accelerated Graphics Port
234  *  %PCI_CAP_ID_VPD          Vital Product Data
235  *  %PCI_CAP_ID_SLOTID       Slot Identification
236  *  %PCI_CAP_ID_MSI          Message Signalled Interrupts
237  *  %PCI_CAP_ID_CHSWP        CompactPCI HotSwap
238  *  %PCI_CAP_ID_PCIX         PCI-X
239  *  %PCI_CAP_ID_EXP          PCI Express
240  */
241 int pci_find_capability(struct pci_dev *dev, int cap)
242 {
243 	int pos;
244 
245 	pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type);
246 	if (pos)
247 		pos = __pci_find_next_cap(dev->bus, dev->devfn, pos, cap);
248 
249 	return pos;
250 }
251 
252 /**
253  * pci_bus_find_capability - query for devices' capabilities
254  * @bus:   the PCI bus to query
255  * @devfn: PCI device to query
256  * @cap:   capability code
257  *
258  * Like pci_find_capability() but works for pci devices that do not have a
259  * pci_dev structure set up yet.
260  *
261  * Returns the address of the requested capability structure within the
262  * device's PCI configuration space or 0 in case the device does not
263  * support it.
264  */
265 int pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap)
266 {
267 	int pos;
268 	u8 hdr_type;
269 
270 	pci_bus_read_config_byte(bus, devfn, PCI_HEADER_TYPE, &hdr_type);
271 
272 	pos = __pci_bus_find_cap_start(bus, devfn, hdr_type & 0x7f);
273 	if (pos)
274 		pos = __pci_find_next_cap(bus, devfn, pos, cap);
275 
276 	return pos;
277 }
278 
279 /**
280  * pci_find_ext_capability - Find an extended capability
281  * @dev: PCI device to query
282  * @cap: capability code
283  *
284  * Returns the address of the requested extended capability structure
285  * within the device's PCI configuration space or 0 if the device does
286  * not support it.  Possible values for @cap:
287  *
288  *  %PCI_EXT_CAP_ID_ERR		Advanced Error Reporting
289  *  %PCI_EXT_CAP_ID_VC		Virtual Channel
290  *  %PCI_EXT_CAP_ID_DSN		Device Serial Number
291  *  %PCI_EXT_CAP_ID_PWR		Power Budgeting
292  */
293 int pci_find_ext_capability(struct pci_dev *dev, int cap)
294 {
295 	u32 header;
296 	int ttl;
297 	int pos = PCI_CFG_SPACE_SIZE;
298 
299 	/* minimum 8 bytes per capability */
300 	ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8;
301 
302 	if (dev->cfg_size <= PCI_CFG_SPACE_SIZE)
303 		return 0;
304 
305 	if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
306 		return 0;
307 
308 	/*
309 	 * If we have no capabilities, this is indicated by cap ID,
310 	 * cap version and next pointer all being 0.
311 	 */
312 	if (header == 0)
313 		return 0;
314 
315 	while (ttl-- > 0) {
316 		if (PCI_EXT_CAP_ID(header) == cap)
317 			return pos;
318 
319 		pos = PCI_EXT_CAP_NEXT(header);
320 		if (pos < PCI_CFG_SPACE_SIZE)
321 			break;
322 
323 		if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
324 			break;
325 	}
326 
327 	return 0;
328 }
329 EXPORT_SYMBOL_GPL(pci_find_ext_capability);
330 
331 /**
332  * pci_bus_find_ext_capability - find an extended capability
333  * @bus:   the PCI bus to query
334  * @devfn: PCI device to query
335  * @cap:   capability code
336  *
337  * Like pci_find_ext_capability() but works for pci devices that do not have a
338  * pci_dev structure set up yet.
339  *
340  * Returns the address of the requested capability structure within the
341  * device's PCI configuration space or 0 in case the device does not
342  * support it.
343  */
344 int pci_bus_find_ext_capability(struct pci_bus *bus, unsigned int devfn,
345 				int cap)
346 {
347 	u32 header;
348 	int ttl;
349 	int pos = PCI_CFG_SPACE_SIZE;
350 
351 	/* minimum 8 bytes per capability */
352 	ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8;
353 
354 	if (!pci_bus_read_config_dword(bus, devfn, pos, &header))
355 		return 0;
356 	if (header == 0xffffffff || header == 0)
357 		return 0;
358 
359 	while (ttl-- > 0) {
360 		if (PCI_EXT_CAP_ID(header) == cap)
361 			return pos;
362 
363 		pos = PCI_EXT_CAP_NEXT(header);
364 		if (pos < PCI_CFG_SPACE_SIZE)
365 			break;
366 
367 		if (!pci_bus_read_config_dword(bus, devfn, pos, &header))
368 			break;
369 	}
370 
371 	return 0;
372 }
373 
374 static int __pci_find_next_ht_cap(struct pci_dev *dev, int pos, int ht_cap)
375 {
376 	int rc, ttl = PCI_FIND_CAP_TTL;
377 	u8 cap, mask;
378 
379 	if (ht_cap == HT_CAPTYPE_SLAVE || ht_cap == HT_CAPTYPE_HOST)
380 		mask = HT_3BIT_CAP_MASK;
381 	else
382 		mask = HT_5BIT_CAP_MASK;
383 
384 	pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn, pos,
385 				      PCI_CAP_ID_HT, &ttl);
386 	while (pos) {
387 		rc = pci_read_config_byte(dev, pos + 3, &cap);
388 		if (rc != PCIBIOS_SUCCESSFUL)
389 			return 0;
390 
391 		if ((cap & mask) == ht_cap)
392 			return pos;
393 
394 		pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn,
395 					      pos + PCI_CAP_LIST_NEXT,
396 					      PCI_CAP_ID_HT, &ttl);
397 	}
398 
399 	return 0;
400 }
401 /**
402  * pci_find_next_ht_capability - query a device's Hypertransport capabilities
403  * @dev: PCI device to query
404  * @pos: Position from which to continue searching
405  * @ht_cap: Hypertransport capability code
406  *
407  * To be used in conjunction with pci_find_ht_capability() to search for
408  * all capabilities matching @ht_cap. @pos should always be a value returned
409  * from pci_find_ht_capability().
410  *
411  * NB. To be 100% safe against broken PCI devices, the caller should take
412  * steps to avoid an infinite loop.
413  */
414 int pci_find_next_ht_capability(struct pci_dev *dev, int pos, int ht_cap)
415 {
416 	return __pci_find_next_ht_cap(dev, pos + PCI_CAP_LIST_NEXT, ht_cap);
417 }
418 EXPORT_SYMBOL_GPL(pci_find_next_ht_capability);
419 
420 /**
421  * pci_find_ht_capability - query a device's Hypertransport capabilities
422  * @dev: PCI device to query
423  * @ht_cap: Hypertransport capability code
424  *
425  * Tell if a device supports a given Hypertransport capability.
426  * Returns an address within the device's PCI configuration space
427  * or 0 in case the device does not support the request capability.
428  * The address points to the PCI capability, of type PCI_CAP_ID_HT,
429  * which has a Hypertransport capability matching @ht_cap.
430  */
431 int pci_find_ht_capability(struct pci_dev *dev, int ht_cap)
432 {
433 	int pos;
434 
435 	pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type);
436 	if (pos)
437 		pos = __pci_find_next_ht_cap(dev, pos, ht_cap);
438 
439 	return pos;
440 }
441 EXPORT_SYMBOL_GPL(pci_find_ht_capability);
442 
443 /**
444  * pci_find_parent_resource - return resource region of parent bus of given region
445  * @dev: PCI device structure contains resources to be searched
446  * @res: child resource record for which parent is sought
447  *
448  *  For given resource region of given device, return the resource
449  *  region of parent bus the given region is contained in or where
450  *  it should be allocated from.
451  */
452 struct resource *
453 pci_find_parent_resource(const struct pci_dev *dev, struct resource *res)
454 {
455 	const struct pci_bus *bus = dev->bus;
456 	int i;
457 	struct resource *best = NULL, *r;
458 
459 	pci_bus_for_each_resource(bus, r, i) {
460 		if (!r)
461 			continue;
462 		if (res->start && !(res->start >= r->start && res->end <= r->end))
463 			continue;	/* Not contained */
464 		if ((res->flags ^ r->flags) & (IORESOURCE_IO | IORESOURCE_MEM))
465 			continue;	/* Wrong type */
466 		if (!((res->flags ^ r->flags) & IORESOURCE_PREFETCH))
467 			return r;	/* Exact match */
468 		/* We can't insert a non-prefetch resource inside a prefetchable parent .. */
469 		if (r->flags & IORESOURCE_PREFETCH)
470 			continue;
471 		/* .. but we can put a prefetchable resource inside a non-prefetchable one */
472 		if (!best)
473 			best = r;
474 	}
475 	return best;
476 }
477 
478 /**
479  * pci_restore_bars - restore a devices BAR values (e.g. after wake-up)
480  * @dev: PCI device to have its BARs restored
481  *
482  * Restore the BAR values for a given device, so as to make it
483  * accessible by its driver.
484  */
485 static void
486 pci_restore_bars(struct pci_dev *dev)
487 {
488 	int i;
489 
490 	for (i = 0; i < PCI_BRIDGE_RESOURCES; i++)
491 		pci_update_resource(dev, i);
492 }
493 
494 static struct pci_platform_pm_ops *pci_platform_pm;
495 
496 int pci_set_platform_pm(struct pci_platform_pm_ops *ops)
497 {
498 	if (!ops->is_manageable || !ops->set_state || !ops->choose_state
499 	    || !ops->sleep_wake || !ops->can_wakeup)
500 		return -EINVAL;
501 	pci_platform_pm = ops;
502 	return 0;
503 }
504 
505 static inline bool platform_pci_power_manageable(struct pci_dev *dev)
506 {
507 	return pci_platform_pm ? pci_platform_pm->is_manageable(dev) : false;
508 }
509 
510 static inline int platform_pci_set_power_state(struct pci_dev *dev,
511                                                 pci_power_t t)
512 {
513 	return pci_platform_pm ? pci_platform_pm->set_state(dev, t) : -ENOSYS;
514 }
515 
516 static inline pci_power_t platform_pci_choose_state(struct pci_dev *dev)
517 {
518 	return pci_platform_pm ?
519 			pci_platform_pm->choose_state(dev) : PCI_POWER_ERROR;
520 }
521 
522 static inline bool platform_pci_can_wakeup(struct pci_dev *dev)
523 {
524 	return pci_platform_pm ? pci_platform_pm->can_wakeup(dev) : false;
525 }
526 
527 static inline int platform_pci_sleep_wake(struct pci_dev *dev, bool enable)
528 {
529 	return pci_platform_pm ?
530 			pci_platform_pm->sleep_wake(dev, enable) : -ENODEV;
531 }
532 
533 static inline int platform_pci_run_wake(struct pci_dev *dev, bool enable)
534 {
535 	return pci_platform_pm ?
536 			pci_platform_pm->run_wake(dev, enable) : -ENODEV;
537 }
538 
539 /**
540  * pci_raw_set_power_state - Use PCI PM registers to set the power state of
541  *                           given PCI device
542  * @dev: PCI device to handle.
543  * @state: PCI power state (D0, D1, D2, D3hot) to put the device into.
544  *
545  * RETURN VALUE:
546  * -EINVAL if the requested state is invalid.
547  * -EIO if device does not support PCI PM or its PM capabilities register has a
548  * wrong version, or device doesn't support the requested state.
549  * 0 if device already is in the requested state.
550  * 0 if device's power state has been successfully changed.
551  */
552 static int pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state)
553 {
554 	u16 pmcsr;
555 	bool need_restore = false;
556 
557 	/* Check if we're already there */
558 	if (dev->current_state == state)
559 		return 0;
560 
561 	if (!dev->pm_cap)
562 		return -EIO;
563 
564 	if (state < PCI_D0 || state > PCI_D3hot)
565 		return -EINVAL;
566 
567 	/* Validate current state:
568 	 * Can enter D0 from any state, but if we can only go deeper
569 	 * to sleep if we're already in a low power state
570 	 */
571 	if (state != PCI_D0 && dev->current_state <= PCI_D3cold
572 	    && dev->current_state > state) {
573 		dev_err(&dev->dev, "invalid power transition "
574 			"(from state %d to %d)\n", dev->current_state, state);
575 		return -EINVAL;
576 	}
577 
578 	/* check if this device supports the desired state */
579 	if ((state == PCI_D1 && !dev->d1_support)
580 	   || (state == PCI_D2 && !dev->d2_support))
581 		return -EIO;
582 
583 	pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
584 
585 	/* If we're (effectively) in D3, force entire word to 0.
586 	 * This doesn't affect PME_Status, disables PME_En, and
587 	 * sets PowerState to 0.
588 	 */
589 	switch (dev->current_state) {
590 	case PCI_D0:
591 	case PCI_D1:
592 	case PCI_D2:
593 		pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
594 		pmcsr |= state;
595 		break;
596 	case PCI_D3hot:
597 	case PCI_D3cold:
598 	case PCI_UNKNOWN: /* Boot-up */
599 		if ((pmcsr & PCI_PM_CTRL_STATE_MASK) == PCI_D3hot
600 		 && !(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET))
601 			need_restore = true;
602 		/* Fall-through: force to D0 */
603 	default:
604 		pmcsr = 0;
605 		break;
606 	}
607 
608 	/* enter specified state */
609 	pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
610 
611 	/* Mandatory power management transition delays */
612 	/* see PCI PM 1.1 5.6.1 table 18 */
613 	if (state == PCI_D3hot || dev->current_state == PCI_D3hot)
614 		pci_dev_d3_sleep(dev);
615 	else if (state == PCI_D2 || dev->current_state == PCI_D2)
616 		udelay(PCI_PM_D2_DELAY);
617 
618 	pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
619 	dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
620 	if (dev->current_state != state && printk_ratelimit())
621 		dev_info(&dev->dev, "Refused to change power state, "
622 			"currently in D%d\n", dev->current_state);
623 
624 	/* According to section 5.4.1 of the "PCI BUS POWER MANAGEMENT
625 	 * INTERFACE SPECIFICATION, REV. 1.2", a device transitioning
626 	 * from D3hot to D0 _may_ perform an internal reset, thereby
627 	 * going to "D0 Uninitialized" rather than "D0 Initialized".
628 	 * For example, at least some versions of the 3c905B and the
629 	 * 3c556B exhibit this behaviour.
630 	 *
631 	 * At least some laptop BIOSen (e.g. the Thinkpad T21) leave
632 	 * devices in a D3hot state at boot.  Consequently, we need to
633 	 * restore at least the BARs so that the device will be
634 	 * accessible to its driver.
635 	 */
636 	if (need_restore)
637 		pci_restore_bars(dev);
638 
639 	if (dev->bus->self)
640 		pcie_aspm_pm_state_change(dev->bus->self);
641 
642 	return 0;
643 }
644 
645 /**
646  * pci_update_current_state - Read PCI power state of given device from its
647  *                            PCI PM registers and cache it
648  * @dev: PCI device to handle.
649  * @state: State to cache in case the device doesn't have the PM capability
650  */
651 void pci_update_current_state(struct pci_dev *dev, pci_power_t state)
652 {
653 	if (dev->pm_cap) {
654 		u16 pmcsr;
655 
656 		pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
657 		dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
658 	} else {
659 		dev->current_state = state;
660 	}
661 }
662 
663 /**
664  * pci_platform_power_transition - Use platform to change device power state
665  * @dev: PCI device to handle.
666  * @state: State to put the device into.
667  */
668 static int pci_platform_power_transition(struct pci_dev *dev, pci_power_t state)
669 {
670 	int error;
671 
672 	if (platform_pci_power_manageable(dev)) {
673 		error = platform_pci_set_power_state(dev, state);
674 		if (!error)
675 			pci_update_current_state(dev, state);
676 		/* Fall back to PCI_D0 if native PM is not supported */
677 		if (!dev->pm_cap)
678 			dev->current_state = PCI_D0;
679 	} else {
680 		error = -ENODEV;
681 		/* Fall back to PCI_D0 if native PM is not supported */
682 		if (!dev->pm_cap)
683 			dev->current_state = PCI_D0;
684 	}
685 
686 	return error;
687 }
688 
689 /**
690  * __pci_start_power_transition - Start power transition of a PCI device
691  * @dev: PCI device to handle.
692  * @state: State to put the device into.
693  */
694 static void __pci_start_power_transition(struct pci_dev *dev, pci_power_t state)
695 {
696 	if (state == PCI_D0)
697 		pci_platform_power_transition(dev, PCI_D0);
698 }
699 
700 /**
701  * __pci_complete_power_transition - Complete power transition of a PCI device
702  * @dev: PCI device to handle.
703  * @state: State to put the device into.
704  *
705  * This function should not be called directly by device drivers.
706  */
707 int __pci_complete_power_transition(struct pci_dev *dev, pci_power_t state)
708 {
709 	return state >= PCI_D0 ?
710 			pci_platform_power_transition(dev, state) : -EINVAL;
711 }
712 EXPORT_SYMBOL_GPL(__pci_complete_power_transition);
713 
714 /**
715  * pci_set_power_state - Set the power state of a PCI device
716  * @dev: PCI device to handle.
717  * @state: PCI power state (D0, D1, D2, D3hot) to put the device into.
718  *
719  * Transition a device to a new power state, using the platform firmware and/or
720  * the device's PCI PM registers.
721  *
722  * RETURN VALUE:
723  * -EINVAL if the requested state is invalid.
724  * -EIO if device does not support PCI PM or its PM capabilities register has a
725  * wrong version, or device doesn't support the requested state.
726  * 0 if device already is in the requested state.
727  * 0 if device's power state has been successfully changed.
728  */
729 int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
730 {
731 	int error;
732 
733 	/* bound the state we're entering */
734 	if (state > PCI_D3hot)
735 		state = PCI_D3hot;
736 	else if (state < PCI_D0)
737 		state = PCI_D0;
738 	else if ((state == PCI_D1 || state == PCI_D2) && pci_no_d1d2(dev))
739 		/*
740 		 * If the device or the parent bridge do not support PCI PM,
741 		 * ignore the request if we're doing anything other than putting
742 		 * it into D0 (which would only happen on boot).
743 		 */
744 		return 0;
745 
746 	__pci_start_power_transition(dev, state);
747 
748 	/* This device is quirked not to be put into D3, so
749 	   don't put it in D3 */
750 	if (state == PCI_D3hot && (dev->dev_flags & PCI_DEV_FLAGS_NO_D3))
751 		return 0;
752 
753 	error = pci_raw_set_power_state(dev, state);
754 
755 	if (!__pci_complete_power_transition(dev, state))
756 		error = 0;
757 	/*
758 	 * When aspm_policy is "powersave" this call ensures
759 	 * that ASPM is configured.
760 	 */
761 	if (!error && dev->bus->self)
762 		pcie_aspm_powersave_config_link(dev->bus->self);
763 
764 	return error;
765 }
766 
767 /**
768  * pci_choose_state - Choose the power state of a PCI device
769  * @dev: PCI device to be suspended
770  * @state: target sleep state for the whole system. This is the value
771  *	that is passed to suspend() function.
772  *
773  * Returns PCI power state suitable for given device and given system
774  * message.
775  */
776 
777 pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state)
778 {
779 	pci_power_t ret;
780 
781 	if (!pci_find_capability(dev, PCI_CAP_ID_PM))
782 		return PCI_D0;
783 
784 	ret = platform_pci_choose_state(dev);
785 	if (ret != PCI_POWER_ERROR)
786 		return ret;
787 
788 	switch (state.event) {
789 	case PM_EVENT_ON:
790 		return PCI_D0;
791 	case PM_EVENT_FREEZE:
792 	case PM_EVENT_PRETHAW:
793 		/* REVISIT both freeze and pre-thaw "should" use D0 */
794 	case PM_EVENT_SUSPEND:
795 	case PM_EVENT_HIBERNATE:
796 		return PCI_D3hot;
797 	default:
798 		dev_info(&dev->dev, "unrecognized suspend event %d\n",
799 			 state.event);
800 		BUG();
801 	}
802 	return PCI_D0;
803 }
804 
805 EXPORT_SYMBOL(pci_choose_state);
806 
807 #define PCI_EXP_SAVE_REGS	7
808 
809 #define pcie_cap_has_devctl(type, flags)	1
810 #define pcie_cap_has_lnkctl(type, flags)		\
811 		((flags & PCI_EXP_FLAGS_VERS) > 1 ||	\
812 		 (type == PCI_EXP_TYPE_ROOT_PORT ||	\
813 		  type == PCI_EXP_TYPE_ENDPOINT ||	\
814 		  type == PCI_EXP_TYPE_LEG_END))
815 #define pcie_cap_has_sltctl(type, flags)		\
816 		((flags & PCI_EXP_FLAGS_VERS) > 1 ||	\
817 		 ((type == PCI_EXP_TYPE_ROOT_PORT) ||	\
818 		  (type == PCI_EXP_TYPE_DOWNSTREAM &&	\
819 		   (flags & PCI_EXP_FLAGS_SLOT))))
820 #define pcie_cap_has_rtctl(type, flags)			\
821 		((flags & PCI_EXP_FLAGS_VERS) > 1 ||	\
822 		 (type == PCI_EXP_TYPE_ROOT_PORT ||	\
823 		  type == PCI_EXP_TYPE_RC_EC))
824 #define pcie_cap_has_devctl2(type, flags)		\
825 		((flags & PCI_EXP_FLAGS_VERS) > 1)
826 #define pcie_cap_has_lnkctl2(type, flags)		\
827 		((flags & PCI_EXP_FLAGS_VERS) > 1)
828 #define pcie_cap_has_sltctl2(type, flags)		\
829 		((flags & PCI_EXP_FLAGS_VERS) > 1)
830 
831 static struct pci_cap_saved_state *pci_find_saved_cap(
832 	struct pci_dev *pci_dev, char cap)
833 {
834 	struct pci_cap_saved_state *tmp;
835 	struct hlist_node *pos;
836 
837 	hlist_for_each_entry(tmp, pos, &pci_dev->saved_cap_space, next) {
838 		if (tmp->cap.cap_nr == cap)
839 			return tmp;
840 	}
841 	return NULL;
842 }
843 
844 static int pci_save_pcie_state(struct pci_dev *dev)
845 {
846 	int pos, i = 0;
847 	struct pci_cap_saved_state *save_state;
848 	u16 *cap;
849 	u16 flags;
850 
851 	pos = pci_pcie_cap(dev);
852 	if (!pos)
853 		return 0;
854 
855 	save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
856 	if (!save_state) {
857 		dev_err(&dev->dev, "buffer not found in %s\n", __func__);
858 		return -ENOMEM;
859 	}
860 	cap = (u16 *)&save_state->cap.data[0];
861 
862 	pci_read_config_word(dev, pos + PCI_EXP_FLAGS, &flags);
863 
864 	if (pcie_cap_has_devctl(dev->pcie_type, flags))
865 		pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, &cap[i++]);
866 	if (pcie_cap_has_lnkctl(dev->pcie_type, flags))
867 		pci_read_config_word(dev, pos + PCI_EXP_LNKCTL, &cap[i++]);
868 	if (pcie_cap_has_sltctl(dev->pcie_type, flags))
869 		pci_read_config_word(dev, pos + PCI_EXP_SLTCTL, &cap[i++]);
870 	if (pcie_cap_has_rtctl(dev->pcie_type, flags))
871 		pci_read_config_word(dev, pos + PCI_EXP_RTCTL, &cap[i++]);
872 	if (pcie_cap_has_devctl2(dev->pcie_type, flags))
873 		pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &cap[i++]);
874 	if (pcie_cap_has_lnkctl2(dev->pcie_type, flags))
875 		pci_read_config_word(dev, pos + PCI_EXP_LNKCTL2, &cap[i++]);
876 	if (pcie_cap_has_sltctl2(dev->pcie_type, flags))
877 		pci_read_config_word(dev, pos + PCI_EXP_SLTCTL2, &cap[i++]);
878 
879 	return 0;
880 }
881 
882 static void pci_restore_pcie_state(struct pci_dev *dev)
883 {
884 	int i = 0, pos;
885 	struct pci_cap_saved_state *save_state;
886 	u16 *cap;
887 	u16 flags;
888 
889 	save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
890 	pos = pci_find_capability(dev, PCI_CAP_ID_EXP);
891 	if (!save_state || pos <= 0)
892 		return;
893 	cap = (u16 *)&save_state->cap.data[0];
894 
895 	pci_read_config_word(dev, pos + PCI_EXP_FLAGS, &flags);
896 
897 	if (pcie_cap_has_devctl(dev->pcie_type, flags))
898 		pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, cap[i++]);
899 	if (pcie_cap_has_lnkctl(dev->pcie_type, flags))
900 		pci_write_config_word(dev, pos + PCI_EXP_LNKCTL, cap[i++]);
901 	if (pcie_cap_has_sltctl(dev->pcie_type, flags))
902 		pci_write_config_word(dev, pos + PCI_EXP_SLTCTL, cap[i++]);
903 	if (pcie_cap_has_rtctl(dev->pcie_type, flags))
904 		pci_write_config_word(dev, pos + PCI_EXP_RTCTL, cap[i++]);
905 	if (pcie_cap_has_devctl2(dev->pcie_type, flags))
906 		pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, cap[i++]);
907 	if (pcie_cap_has_lnkctl2(dev->pcie_type, flags))
908 		pci_write_config_word(dev, pos + PCI_EXP_LNKCTL2, cap[i++]);
909 	if (pcie_cap_has_sltctl2(dev->pcie_type, flags))
910 		pci_write_config_word(dev, pos + PCI_EXP_SLTCTL2, cap[i++]);
911 }
912 
913 
914 static int pci_save_pcix_state(struct pci_dev *dev)
915 {
916 	int pos;
917 	struct pci_cap_saved_state *save_state;
918 
919 	pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
920 	if (pos <= 0)
921 		return 0;
922 
923 	save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
924 	if (!save_state) {
925 		dev_err(&dev->dev, "buffer not found in %s\n", __func__);
926 		return -ENOMEM;
927 	}
928 
929 	pci_read_config_word(dev, pos + PCI_X_CMD,
930 			     (u16 *)save_state->cap.data);
931 
932 	return 0;
933 }
934 
935 static void pci_restore_pcix_state(struct pci_dev *dev)
936 {
937 	int i = 0, pos;
938 	struct pci_cap_saved_state *save_state;
939 	u16 *cap;
940 
941 	save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
942 	pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
943 	if (!save_state || pos <= 0)
944 		return;
945 	cap = (u16 *)&save_state->cap.data[0];
946 
947 	pci_write_config_word(dev, pos + PCI_X_CMD, cap[i++]);
948 }
949 
950 
951 /**
952  * pci_save_state - save the PCI configuration space of a device before suspending
953  * @dev: - PCI device that we're dealing with
954  */
955 int
956 pci_save_state(struct pci_dev *dev)
957 {
958 	int i;
959 	/* XXX: 100% dword access ok here? */
960 	for (i = 0; i < 16; i++)
961 		pci_read_config_dword(dev, i * 4, &dev->saved_config_space[i]);
962 	dev->state_saved = true;
963 	if ((i = pci_save_pcie_state(dev)) != 0)
964 		return i;
965 	if ((i = pci_save_pcix_state(dev)) != 0)
966 		return i;
967 	return 0;
968 }
969 
970 static void pci_restore_config_dword(struct pci_dev *pdev, int offset,
971 				     u32 saved_val, int retry)
972 {
973 	u32 val;
974 
975 	pci_read_config_dword(pdev, offset, &val);
976 	if (val == saved_val)
977 		return;
978 
979 	for (;;) {
980 		dev_dbg(&pdev->dev, "restoring config space at offset "
981 			"%#x (was %#x, writing %#x)\n", offset, val, saved_val);
982 		pci_write_config_dword(pdev, offset, saved_val);
983 		if (retry-- <= 0)
984 			return;
985 
986 		pci_read_config_dword(pdev, offset, &val);
987 		if (val == saved_val)
988 			return;
989 
990 		mdelay(1);
991 	}
992 }
993 
994 static void pci_restore_config_space_range(struct pci_dev *pdev,
995 					   int start, int end, int retry)
996 {
997 	int index;
998 
999 	for (index = end; index >= start; index--)
1000 		pci_restore_config_dword(pdev, 4 * index,
1001 					 pdev->saved_config_space[index],
1002 					 retry);
1003 }
1004 
1005 static void pci_restore_config_space(struct pci_dev *pdev)
1006 {
1007 	if (pdev->hdr_type == PCI_HEADER_TYPE_NORMAL) {
1008 		pci_restore_config_space_range(pdev, 10, 15, 0);
1009 		/* Restore BARs before the command register. */
1010 		pci_restore_config_space_range(pdev, 4, 9, 10);
1011 		pci_restore_config_space_range(pdev, 0, 3, 0);
1012 	} else {
1013 		pci_restore_config_space_range(pdev, 0, 15, 0);
1014 	}
1015 }
1016 
1017 /**
1018  * pci_restore_state - Restore the saved state of a PCI device
1019  * @dev: - PCI device that we're dealing with
1020  */
1021 void pci_restore_state(struct pci_dev *dev)
1022 {
1023 	if (!dev->state_saved)
1024 		return;
1025 
1026 	/* PCI Express register must be restored first */
1027 	pci_restore_pcie_state(dev);
1028 	pci_restore_ats_state(dev);
1029 
1030 	pci_restore_config_space(dev);
1031 
1032 	pci_restore_pcix_state(dev);
1033 	pci_restore_msi_state(dev);
1034 	pci_restore_iov_state(dev);
1035 
1036 	dev->state_saved = false;
1037 }
1038 
1039 struct pci_saved_state {
1040 	u32 config_space[16];
1041 	struct pci_cap_saved_data cap[0];
1042 };
1043 
1044 /**
1045  * pci_store_saved_state - Allocate and return an opaque struct containing
1046  *			   the device saved state.
1047  * @dev: PCI device that we're dealing with
1048  *
1049  * Rerturn NULL if no state or error.
1050  */
1051 struct pci_saved_state *pci_store_saved_state(struct pci_dev *dev)
1052 {
1053 	struct pci_saved_state *state;
1054 	struct pci_cap_saved_state *tmp;
1055 	struct pci_cap_saved_data *cap;
1056 	struct hlist_node *pos;
1057 	size_t size;
1058 
1059 	if (!dev->state_saved)
1060 		return NULL;
1061 
1062 	size = sizeof(*state) + sizeof(struct pci_cap_saved_data);
1063 
1064 	hlist_for_each_entry(tmp, pos, &dev->saved_cap_space, next)
1065 		size += sizeof(struct pci_cap_saved_data) + tmp->cap.size;
1066 
1067 	state = kzalloc(size, GFP_KERNEL);
1068 	if (!state)
1069 		return NULL;
1070 
1071 	memcpy(state->config_space, dev->saved_config_space,
1072 	       sizeof(state->config_space));
1073 
1074 	cap = state->cap;
1075 	hlist_for_each_entry(tmp, pos, &dev->saved_cap_space, next) {
1076 		size_t len = sizeof(struct pci_cap_saved_data) + tmp->cap.size;
1077 		memcpy(cap, &tmp->cap, len);
1078 		cap = (struct pci_cap_saved_data *)((u8 *)cap + len);
1079 	}
1080 	/* Empty cap_save terminates list */
1081 
1082 	return state;
1083 }
1084 EXPORT_SYMBOL_GPL(pci_store_saved_state);
1085 
1086 /**
1087  * pci_load_saved_state - Reload the provided save state into struct pci_dev.
1088  * @dev: PCI device that we're dealing with
1089  * @state: Saved state returned from pci_store_saved_state()
1090  */
1091 int pci_load_saved_state(struct pci_dev *dev, struct pci_saved_state *state)
1092 {
1093 	struct pci_cap_saved_data *cap;
1094 
1095 	dev->state_saved = false;
1096 
1097 	if (!state)
1098 		return 0;
1099 
1100 	memcpy(dev->saved_config_space, state->config_space,
1101 	       sizeof(state->config_space));
1102 
1103 	cap = state->cap;
1104 	while (cap->size) {
1105 		struct pci_cap_saved_state *tmp;
1106 
1107 		tmp = pci_find_saved_cap(dev, cap->cap_nr);
1108 		if (!tmp || tmp->cap.size != cap->size)
1109 			return -EINVAL;
1110 
1111 		memcpy(tmp->cap.data, cap->data, tmp->cap.size);
1112 		cap = (struct pci_cap_saved_data *)((u8 *)cap +
1113 		       sizeof(struct pci_cap_saved_data) + cap->size);
1114 	}
1115 
1116 	dev->state_saved = true;
1117 	return 0;
1118 }
1119 EXPORT_SYMBOL_GPL(pci_load_saved_state);
1120 
1121 /**
1122  * pci_load_and_free_saved_state - Reload the save state pointed to by state,
1123  *				   and free the memory allocated for it.
1124  * @dev: PCI device that we're dealing with
1125  * @state: Pointer to saved state returned from pci_store_saved_state()
1126  */
1127 int pci_load_and_free_saved_state(struct pci_dev *dev,
1128 				  struct pci_saved_state **state)
1129 {
1130 	int ret = pci_load_saved_state(dev, *state);
1131 	kfree(*state);
1132 	*state = NULL;
1133 	return ret;
1134 }
1135 EXPORT_SYMBOL_GPL(pci_load_and_free_saved_state);
1136 
1137 static int do_pci_enable_device(struct pci_dev *dev, int bars)
1138 {
1139 	int err;
1140 
1141 	err = pci_set_power_state(dev, PCI_D0);
1142 	if (err < 0 && err != -EIO)
1143 		return err;
1144 	err = pcibios_enable_device(dev, bars);
1145 	if (err < 0)
1146 		return err;
1147 	pci_fixup_device(pci_fixup_enable, dev);
1148 
1149 	return 0;
1150 }
1151 
1152 /**
1153  * pci_reenable_device - Resume abandoned device
1154  * @dev: PCI device to be resumed
1155  *
1156  *  Note this function is a backend of pci_default_resume and is not supposed
1157  *  to be called by normal code, write proper resume handler and use it instead.
1158  */
1159 int pci_reenable_device(struct pci_dev *dev)
1160 {
1161 	if (pci_is_enabled(dev))
1162 		return do_pci_enable_device(dev, (1 << PCI_NUM_RESOURCES) - 1);
1163 	return 0;
1164 }
1165 
1166 static int __pci_enable_device_flags(struct pci_dev *dev,
1167 				     resource_size_t flags)
1168 {
1169 	int err;
1170 	int i, bars = 0;
1171 
1172 	/*
1173 	 * Power state could be unknown at this point, either due to a fresh
1174 	 * boot or a device removal call.  So get the current power state
1175 	 * so that things like MSI message writing will behave as expected
1176 	 * (e.g. if the device really is in D0 at enable time).
1177 	 */
1178 	if (dev->pm_cap) {
1179 		u16 pmcsr;
1180 		pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1181 		dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
1182 	}
1183 
1184 	if (atomic_add_return(1, &dev->enable_cnt) > 1)
1185 		return 0;		/* already enabled */
1186 
1187 	/* only skip sriov related */
1188 	for (i = 0; i <= PCI_ROM_RESOURCE; i++)
1189 		if (dev->resource[i].flags & flags)
1190 			bars |= (1 << i);
1191 	for (i = PCI_BRIDGE_RESOURCES; i < DEVICE_COUNT_RESOURCE; i++)
1192 		if (dev->resource[i].flags & flags)
1193 			bars |= (1 << i);
1194 
1195 	err = do_pci_enable_device(dev, bars);
1196 	if (err < 0)
1197 		atomic_dec(&dev->enable_cnt);
1198 	return err;
1199 }
1200 
1201 /**
1202  * pci_enable_device_io - Initialize a device for use with IO space
1203  * @dev: PCI device to be initialized
1204  *
1205  *  Initialize device before it's used by a driver. Ask low-level code
1206  *  to enable I/O resources. Wake up the device if it was suspended.
1207  *  Beware, this function can fail.
1208  */
1209 int pci_enable_device_io(struct pci_dev *dev)
1210 {
1211 	return __pci_enable_device_flags(dev, IORESOURCE_IO);
1212 }
1213 
1214 /**
1215  * pci_enable_device_mem - Initialize a device for use with Memory space
1216  * @dev: PCI device to be initialized
1217  *
1218  *  Initialize device before it's used by a driver. Ask low-level code
1219  *  to enable Memory resources. Wake up the device if it was suspended.
1220  *  Beware, this function can fail.
1221  */
1222 int pci_enable_device_mem(struct pci_dev *dev)
1223 {
1224 	return __pci_enable_device_flags(dev, IORESOURCE_MEM);
1225 }
1226 
1227 /**
1228  * pci_enable_device - Initialize device before it's used by a driver.
1229  * @dev: PCI device to be initialized
1230  *
1231  *  Initialize device before it's used by a driver. Ask low-level code
1232  *  to enable I/O and memory. Wake up the device if it was suspended.
1233  *  Beware, this function can fail.
1234  *
1235  *  Note we don't actually enable the device many times if we call
1236  *  this function repeatedly (we just increment the count).
1237  */
1238 int pci_enable_device(struct pci_dev *dev)
1239 {
1240 	return __pci_enable_device_flags(dev, IORESOURCE_MEM | IORESOURCE_IO);
1241 }
1242 
1243 /*
1244  * Managed PCI resources.  This manages device on/off, intx/msi/msix
1245  * on/off and BAR regions.  pci_dev itself records msi/msix status, so
1246  * there's no need to track it separately.  pci_devres is initialized
1247  * when a device is enabled using managed PCI device enable interface.
1248  */
1249 struct pci_devres {
1250 	unsigned int enabled:1;
1251 	unsigned int pinned:1;
1252 	unsigned int orig_intx:1;
1253 	unsigned int restore_intx:1;
1254 	u32 region_mask;
1255 };
1256 
1257 static void pcim_release(struct device *gendev, void *res)
1258 {
1259 	struct pci_dev *dev = container_of(gendev, struct pci_dev, dev);
1260 	struct pci_devres *this = res;
1261 	int i;
1262 
1263 	if (dev->msi_enabled)
1264 		pci_disable_msi(dev);
1265 	if (dev->msix_enabled)
1266 		pci_disable_msix(dev);
1267 
1268 	for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
1269 		if (this->region_mask & (1 << i))
1270 			pci_release_region(dev, i);
1271 
1272 	if (this->restore_intx)
1273 		pci_intx(dev, this->orig_intx);
1274 
1275 	if (this->enabled && !this->pinned)
1276 		pci_disable_device(dev);
1277 }
1278 
1279 static struct pci_devres * get_pci_dr(struct pci_dev *pdev)
1280 {
1281 	struct pci_devres *dr, *new_dr;
1282 
1283 	dr = devres_find(&pdev->dev, pcim_release, NULL, NULL);
1284 	if (dr)
1285 		return dr;
1286 
1287 	new_dr = devres_alloc(pcim_release, sizeof(*new_dr), GFP_KERNEL);
1288 	if (!new_dr)
1289 		return NULL;
1290 	return devres_get(&pdev->dev, new_dr, NULL, NULL);
1291 }
1292 
1293 static struct pci_devres * find_pci_dr(struct pci_dev *pdev)
1294 {
1295 	if (pci_is_managed(pdev))
1296 		return devres_find(&pdev->dev, pcim_release, NULL, NULL);
1297 	return NULL;
1298 }
1299 
1300 /**
1301  * pcim_enable_device - Managed pci_enable_device()
1302  * @pdev: PCI device to be initialized
1303  *
1304  * Managed pci_enable_device().
1305  */
1306 int pcim_enable_device(struct pci_dev *pdev)
1307 {
1308 	struct pci_devres *dr;
1309 	int rc;
1310 
1311 	dr = get_pci_dr(pdev);
1312 	if (unlikely(!dr))
1313 		return -ENOMEM;
1314 	if (dr->enabled)
1315 		return 0;
1316 
1317 	rc = pci_enable_device(pdev);
1318 	if (!rc) {
1319 		pdev->is_managed = 1;
1320 		dr->enabled = 1;
1321 	}
1322 	return rc;
1323 }
1324 
1325 /**
1326  * pcim_pin_device - Pin managed PCI device
1327  * @pdev: PCI device to pin
1328  *
1329  * Pin managed PCI device @pdev.  Pinned device won't be disabled on
1330  * driver detach.  @pdev must have been enabled with
1331  * pcim_enable_device().
1332  */
1333 void pcim_pin_device(struct pci_dev *pdev)
1334 {
1335 	struct pci_devres *dr;
1336 
1337 	dr = find_pci_dr(pdev);
1338 	WARN_ON(!dr || !dr->enabled);
1339 	if (dr)
1340 		dr->pinned = 1;
1341 }
1342 
1343 /**
1344  * pcibios_disable_device - disable arch specific PCI resources for device dev
1345  * @dev: the PCI device to disable
1346  *
1347  * Disables architecture specific PCI resources for the device. This
1348  * is the default implementation. Architecture implementations can
1349  * override this.
1350  */
1351 void __attribute__ ((weak)) pcibios_disable_device (struct pci_dev *dev) {}
1352 
1353 static void do_pci_disable_device(struct pci_dev *dev)
1354 {
1355 	u16 pci_command;
1356 
1357 	pci_read_config_word(dev, PCI_COMMAND, &pci_command);
1358 	if (pci_command & PCI_COMMAND_MASTER) {
1359 		pci_command &= ~PCI_COMMAND_MASTER;
1360 		pci_write_config_word(dev, PCI_COMMAND, pci_command);
1361 	}
1362 
1363 	pcibios_disable_device(dev);
1364 }
1365 
1366 /**
1367  * pci_disable_enabled_device - Disable device without updating enable_cnt
1368  * @dev: PCI device to disable
1369  *
1370  * NOTE: This function is a backend of PCI power management routines and is
1371  * not supposed to be called drivers.
1372  */
1373 void pci_disable_enabled_device(struct pci_dev *dev)
1374 {
1375 	if (pci_is_enabled(dev))
1376 		do_pci_disable_device(dev);
1377 }
1378 
1379 /**
1380  * pci_disable_device - Disable PCI device after use
1381  * @dev: PCI device to be disabled
1382  *
1383  * Signal to the system that the PCI device is not in use by the system
1384  * anymore.  This only involves disabling PCI bus-mastering, if active.
1385  *
1386  * Note we don't actually disable the device until all callers of
1387  * pci_enable_device() have called pci_disable_device().
1388  */
1389 void
1390 pci_disable_device(struct pci_dev *dev)
1391 {
1392 	struct pci_devres *dr;
1393 
1394 	dr = find_pci_dr(dev);
1395 	if (dr)
1396 		dr->enabled = 0;
1397 
1398 	if (atomic_sub_return(1, &dev->enable_cnt) != 0)
1399 		return;
1400 
1401 	do_pci_disable_device(dev);
1402 
1403 	dev->is_busmaster = 0;
1404 }
1405 
1406 /**
1407  * pcibios_set_pcie_reset_state - set reset state for device dev
1408  * @dev: the PCIe device reset
1409  * @state: Reset state to enter into
1410  *
1411  *
1412  * Sets the PCIe reset state for the device. This is the default
1413  * implementation. Architecture implementations can override this.
1414  */
1415 int __attribute__ ((weak)) pcibios_set_pcie_reset_state(struct pci_dev *dev,
1416 							enum pcie_reset_state state)
1417 {
1418 	return -EINVAL;
1419 }
1420 
1421 /**
1422  * pci_set_pcie_reset_state - set reset state for device dev
1423  * @dev: the PCIe device reset
1424  * @state: Reset state to enter into
1425  *
1426  *
1427  * Sets the PCI reset state for the device.
1428  */
1429 int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state)
1430 {
1431 	return pcibios_set_pcie_reset_state(dev, state);
1432 }
1433 
1434 /**
1435  * pci_check_pme_status - Check if given device has generated PME.
1436  * @dev: Device to check.
1437  *
1438  * Check the PME status of the device and if set, clear it and clear PME enable
1439  * (if set).  Return 'true' if PME status and PME enable were both set or
1440  * 'false' otherwise.
1441  */
1442 bool pci_check_pme_status(struct pci_dev *dev)
1443 {
1444 	int pmcsr_pos;
1445 	u16 pmcsr;
1446 	bool ret = false;
1447 
1448 	if (!dev->pm_cap)
1449 		return false;
1450 
1451 	pmcsr_pos = dev->pm_cap + PCI_PM_CTRL;
1452 	pci_read_config_word(dev, pmcsr_pos, &pmcsr);
1453 	if (!(pmcsr & PCI_PM_CTRL_PME_STATUS))
1454 		return false;
1455 
1456 	/* Clear PME status. */
1457 	pmcsr |= PCI_PM_CTRL_PME_STATUS;
1458 	if (pmcsr & PCI_PM_CTRL_PME_ENABLE) {
1459 		/* Disable PME to avoid interrupt flood. */
1460 		pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
1461 		ret = true;
1462 	}
1463 
1464 	pci_write_config_word(dev, pmcsr_pos, pmcsr);
1465 
1466 	return ret;
1467 }
1468 
1469 /**
1470  * pci_pme_wakeup - Wake up a PCI device if its PME Status bit is set.
1471  * @dev: Device to handle.
1472  * @pme_poll_reset: Whether or not to reset the device's pme_poll flag.
1473  *
1474  * Check if @dev has generated PME and queue a resume request for it in that
1475  * case.
1476  */
1477 static int pci_pme_wakeup(struct pci_dev *dev, void *pme_poll_reset)
1478 {
1479 	if (pme_poll_reset && dev->pme_poll)
1480 		dev->pme_poll = false;
1481 
1482 	if (pci_check_pme_status(dev)) {
1483 		pci_wakeup_event(dev);
1484 		pm_request_resume(&dev->dev);
1485 	}
1486 	return 0;
1487 }
1488 
1489 /**
1490  * pci_pme_wakeup_bus - Walk given bus and wake up devices on it, if necessary.
1491  * @bus: Top bus of the subtree to walk.
1492  */
1493 void pci_pme_wakeup_bus(struct pci_bus *bus)
1494 {
1495 	if (bus)
1496 		pci_walk_bus(bus, pci_pme_wakeup, (void *)true);
1497 }
1498 
1499 /**
1500  * pci_pme_capable - check the capability of PCI device to generate PME#
1501  * @dev: PCI device to handle.
1502  * @state: PCI state from which device will issue PME#.
1503  */
1504 bool pci_pme_capable(struct pci_dev *dev, pci_power_t state)
1505 {
1506 	if (!dev->pm_cap)
1507 		return false;
1508 
1509 	return !!(dev->pme_support & (1 << state));
1510 }
1511 
1512 static void pci_pme_list_scan(struct work_struct *work)
1513 {
1514 	struct pci_pme_device *pme_dev, *n;
1515 
1516 	mutex_lock(&pci_pme_list_mutex);
1517 	if (!list_empty(&pci_pme_list)) {
1518 		list_for_each_entry_safe(pme_dev, n, &pci_pme_list, list) {
1519 			if (pme_dev->dev->pme_poll) {
1520 				pci_pme_wakeup(pme_dev->dev, NULL);
1521 			} else {
1522 				list_del(&pme_dev->list);
1523 				kfree(pme_dev);
1524 			}
1525 		}
1526 		if (!list_empty(&pci_pme_list))
1527 			schedule_delayed_work(&pci_pme_work,
1528 					      msecs_to_jiffies(PME_TIMEOUT));
1529 	}
1530 	mutex_unlock(&pci_pme_list_mutex);
1531 }
1532 
1533 /**
1534  * pci_pme_active - enable or disable PCI device's PME# function
1535  * @dev: PCI device to handle.
1536  * @enable: 'true' to enable PME# generation; 'false' to disable it.
1537  *
1538  * The caller must verify that the device is capable of generating PME# before
1539  * calling this function with @enable equal to 'true'.
1540  */
1541 void pci_pme_active(struct pci_dev *dev, bool enable)
1542 {
1543 	u16 pmcsr;
1544 
1545 	if (!dev->pm_cap)
1546 		return;
1547 
1548 	pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1549 	/* Clear PME_Status by writing 1 to it and enable PME# */
1550 	pmcsr |= PCI_PM_CTRL_PME_STATUS | PCI_PM_CTRL_PME_ENABLE;
1551 	if (!enable)
1552 		pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
1553 
1554 	pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
1555 
1556 	/* PCI (as opposed to PCIe) PME requires that the device have
1557 	   its PME# line hooked up correctly. Not all hardware vendors
1558 	   do this, so the PME never gets delivered and the device
1559 	   remains asleep. The easiest way around this is to
1560 	   periodically walk the list of suspended devices and check
1561 	   whether any have their PME flag set. The assumption is that
1562 	   we'll wake up often enough anyway that this won't be a huge
1563 	   hit, and the power savings from the devices will still be a
1564 	   win. */
1565 
1566 	if (dev->pme_poll) {
1567 		struct pci_pme_device *pme_dev;
1568 		if (enable) {
1569 			pme_dev = kmalloc(sizeof(struct pci_pme_device),
1570 					  GFP_KERNEL);
1571 			if (!pme_dev)
1572 				goto out;
1573 			pme_dev->dev = dev;
1574 			mutex_lock(&pci_pme_list_mutex);
1575 			list_add(&pme_dev->list, &pci_pme_list);
1576 			if (list_is_singular(&pci_pme_list))
1577 				schedule_delayed_work(&pci_pme_work,
1578 						      msecs_to_jiffies(PME_TIMEOUT));
1579 			mutex_unlock(&pci_pme_list_mutex);
1580 		} else {
1581 			mutex_lock(&pci_pme_list_mutex);
1582 			list_for_each_entry(pme_dev, &pci_pme_list, list) {
1583 				if (pme_dev->dev == dev) {
1584 					list_del(&pme_dev->list);
1585 					kfree(pme_dev);
1586 					break;
1587 				}
1588 			}
1589 			mutex_unlock(&pci_pme_list_mutex);
1590 		}
1591 	}
1592 
1593 out:
1594 	dev_dbg(&dev->dev, "PME# %s\n", enable ? "enabled" : "disabled");
1595 }
1596 
1597 /**
1598  * __pci_enable_wake - enable PCI device as wakeup event source
1599  * @dev: PCI device affected
1600  * @state: PCI state from which device will issue wakeup events
1601  * @runtime: True if the events are to be generated at run time
1602  * @enable: True to enable event generation; false to disable
1603  *
1604  * This enables the device as a wakeup event source, or disables it.
1605  * When such events involves platform-specific hooks, those hooks are
1606  * called automatically by this routine.
1607  *
1608  * Devices with legacy power management (no standard PCI PM capabilities)
1609  * always require such platform hooks.
1610  *
1611  * RETURN VALUE:
1612  * 0 is returned on success
1613  * -EINVAL is returned if device is not supposed to wake up the system
1614  * Error code depending on the platform is returned if both the platform and
1615  * the native mechanism fail to enable the generation of wake-up events
1616  */
1617 int __pci_enable_wake(struct pci_dev *dev, pci_power_t state,
1618 		      bool runtime, bool enable)
1619 {
1620 	int ret = 0;
1621 
1622 	if (enable && !runtime && !device_may_wakeup(&dev->dev))
1623 		return -EINVAL;
1624 
1625 	/* Don't do the same thing twice in a row for one device. */
1626 	if (!!enable == !!dev->wakeup_prepared)
1627 		return 0;
1628 
1629 	/*
1630 	 * According to "PCI System Architecture" 4th ed. by Tom Shanley & Don
1631 	 * Anderson we should be doing PME# wake enable followed by ACPI wake
1632 	 * enable.  To disable wake-up we call the platform first, for symmetry.
1633 	 */
1634 
1635 	if (enable) {
1636 		int error;
1637 
1638 		if (pci_pme_capable(dev, state))
1639 			pci_pme_active(dev, true);
1640 		else
1641 			ret = 1;
1642 		error = runtime ? platform_pci_run_wake(dev, true) :
1643 					platform_pci_sleep_wake(dev, true);
1644 		if (ret)
1645 			ret = error;
1646 		if (!ret)
1647 			dev->wakeup_prepared = true;
1648 	} else {
1649 		if (runtime)
1650 			platform_pci_run_wake(dev, false);
1651 		else
1652 			platform_pci_sleep_wake(dev, false);
1653 		pci_pme_active(dev, false);
1654 		dev->wakeup_prepared = false;
1655 	}
1656 
1657 	return ret;
1658 }
1659 EXPORT_SYMBOL(__pci_enable_wake);
1660 
1661 /**
1662  * pci_wake_from_d3 - enable/disable device to wake up from D3_hot or D3_cold
1663  * @dev: PCI device to prepare
1664  * @enable: True to enable wake-up event generation; false to disable
1665  *
1666  * Many drivers want the device to wake up the system from D3_hot or D3_cold
1667  * and this function allows them to set that up cleanly - pci_enable_wake()
1668  * should not be called twice in a row to enable wake-up due to PCI PM vs ACPI
1669  * ordering constraints.
1670  *
1671  * This function only returns error code if the device is not capable of
1672  * generating PME# from both D3_hot and D3_cold, and the platform is unable to
1673  * enable wake-up power for it.
1674  */
1675 int pci_wake_from_d3(struct pci_dev *dev, bool enable)
1676 {
1677 	return pci_pme_capable(dev, PCI_D3cold) ?
1678 			pci_enable_wake(dev, PCI_D3cold, enable) :
1679 			pci_enable_wake(dev, PCI_D3hot, enable);
1680 }
1681 
1682 /**
1683  * pci_target_state - find an appropriate low power state for a given PCI dev
1684  * @dev: PCI device
1685  *
1686  * Use underlying platform code to find a supported low power state for @dev.
1687  * If the platform can't manage @dev, return the deepest state from which it
1688  * can generate wake events, based on any available PME info.
1689  */
1690 pci_power_t pci_target_state(struct pci_dev *dev)
1691 {
1692 	pci_power_t target_state = PCI_D3hot;
1693 
1694 	if (platform_pci_power_manageable(dev)) {
1695 		/*
1696 		 * Call the platform to choose the target state of the device
1697 		 * and enable wake-up from this state if supported.
1698 		 */
1699 		pci_power_t state = platform_pci_choose_state(dev);
1700 
1701 		switch (state) {
1702 		case PCI_POWER_ERROR:
1703 		case PCI_UNKNOWN:
1704 			break;
1705 		case PCI_D1:
1706 		case PCI_D2:
1707 			if (pci_no_d1d2(dev))
1708 				break;
1709 		default:
1710 			target_state = state;
1711 		}
1712 	} else if (!dev->pm_cap) {
1713 		target_state = PCI_D0;
1714 	} else if (device_may_wakeup(&dev->dev)) {
1715 		/*
1716 		 * Find the deepest state from which the device can generate
1717 		 * wake-up events, make it the target state and enable device
1718 		 * to generate PME#.
1719 		 */
1720 		if (dev->pme_support) {
1721 			while (target_state
1722 			      && !(dev->pme_support & (1 << target_state)))
1723 				target_state--;
1724 		}
1725 	}
1726 
1727 	return target_state;
1728 }
1729 
1730 /**
1731  * pci_prepare_to_sleep - prepare PCI device for system-wide transition into a sleep state
1732  * @dev: Device to handle.
1733  *
1734  * Choose the power state appropriate for the device depending on whether
1735  * it can wake up the system and/or is power manageable by the platform
1736  * (PCI_D3hot is the default) and put the device into that state.
1737  */
1738 int pci_prepare_to_sleep(struct pci_dev *dev)
1739 {
1740 	pci_power_t target_state = pci_target_state(dev);
1741 	int error;
1742 
1743 	if (target_state == PCI_POWER_ERROR)
1744 		return -EIO;
1745 
1746 	pci_enable_wake(dev, target_state, device_may_wakeup(&dev->dev));
1747 
1748 	error = pci_set_power_state(dev, target_state);
1749 
1750 	if (error)
1751 		pci_enable_wake(dev, target_state, false);
1752 
1753 	return error;
1754 }
1755 
1756 /**
1757  * pci_back_from_sleep - turn PCI device on during system-wide transition into working state
1758  * @dev: Device to handle.
1759  *
1760  * Disable device's system wake-up capability and put it into D0.
1761  */
1762 int pci_back_from_sleep(struct pci_dev *dev)
1763 {
1764 	pci_enable_wake(dev, PCI_D0, false);
1765 	return pci_set_power_state(dev, PCI_D0);
1766 }
1767 
1768 /**
1769  * pci_finish_runtime_suspend - Carry out PCI-specific part of runtime suspend.
1770  * @dev: PCI device being suspended.
1771  *
1772  * Prepare @dev to generate wake-up events at run time and put it into a low
1773  * power state.
1774  */
1775 int pci_finish_runtime_suspend(struct pci_dev *dev)
1776 {
1777 	pci_power_t target_state = pci_target_state(dev);
1778 	int error;
1779 
1780 	if (target_state == PCI_POWER_ERROR)
1781 		return -EIO;
1782 
1783 	__pci_enable_wake(dev, target_state, true, pci_dev_run_wake(dev));
1784 
1785 	error = pci_set_power_state(dev, target_state);
1786 
1787 	if (error)
1788 		__pci_enable_wake(dev, target_state, true, false);
1789 
1790 	return error;
1791 }
1792 
1793 /**
1794  * pci_dev_run_wake - Check if device can generate run-time wake-up events.
1795  * @dev: Device to check.
1796  *
1797  * Return true if the device itself is cabable of generating wake-up events
1798  * (through the platform or using the native PCIe PME) or if the device supports
1799  * PME and one of its upstream bridges can generate wake-up events.
1800  */
1801 bool pci_dev_run_wake(struct pci_dev *dev)
1802 {
1803 	struct pci_bus *bus = dev->bus;
1804 
1805 	if (device_run_wake(&dev->dev))
1806 		return true;
1807 
1808 	if (!dev->pme_support)
1809 		return false;
1810 
1811 	while (bus->parent) {
1812 		struct pci_dev *bridge = bus->self;
1813 
1814 		if (device_run_wake(&bridge->dev))
1815 			return true;
1816 
1817 		bus = bus->parent;
1818 	}
1819 
1820 	/* We have reached the root bus. */
1821 	if (bus->bridge)
1822 		return device_run_wake(bus->bridge);
1823 
1824 	return false;
1825 }
1826 EXPORT_SYMBOL_GPL(pci_dev_run_wake);
1827 
1828 /**
1829  * pci_pm_init - Initialize PM functions of given PCI device
1830  * @dev: PCI device to handle.
1831  */
1832 void pci_pm_init(struct pci_dev *dev)
1833 {
1834 	int pm;
1835 	u16 pmc;
1836 
1837 	pm_runtime_forbid(&dev->dev);
1838 	device_enable_async_suspend(&dev->dev);
1839 	dev->wakeup_prepared = false;
1840 
1841 	dev->pm_cap = 0;
1842 
1843 	/* find PCI PM capability in list */
1844 	pm = pci_find_capability(dev, PCI_CAP_ID_PM);
1845 	if (!pm)
1846 		return;
1847 	/* Check device's ability to generate PME# */
1848 	pci_read_config_word(dev, pm + PCI_PM_PMC, &pmc);
1849 
1850 	if ((pmc & PCI_PM_CAP_VER_MASK) > 3) {
1851 		dev_err(&dev->dev, "unsupported PM cap regs version (%u)\n",
1852 			pmc & PCI_PM_CAP_VER_MASK);
1853 		return;
1854 	}
1855 
1856 	dev->pm_cap = pm;
1857 	dev->d3_delay = PCI_PM_D3_WAIT;
1858 
1859 	dev->d1_support = false;
1860 	dev->d2_support = false;
1861 	if (!pci_no_d1d2(dev)) {
1862 		if (pmc & PCI_PM_CAP_D1)
1863 			dev->d1_support = true;
1864 		if (pmc & PCI_PM_CAP_D2)
1865 			dev->d2_support = true;
1866 
1867 		if (dev->d1_support || dev->d2_support)
1868 			dev_printk(KERN_DEBUG, &dev->dev, "supports%s%s\n",
1869 				   dev->d1_support ? " D1" : "",
1870 				   dev->d2_support ? " D2" : "");
1871 	}
1872 
1873 	pmc &= PCI_PM_CAP_PME_MASK;
1874 	if (pmc) {
1875 		dev_printk(KERN_DEBUG, &dev->dev,
1876 			 "PME# supported from%s%s%s%s%s\n",
1877 			 (pmc & PCI_PM_CAP_PME_D0) ? " D0" : "",
1878 			 (pmc & PCI_PM_CAP_PME_D1) ? " D1" : "",
1879 			 (pmc & PCI_PM_CAP_PME_D2) ? " D2" : "",
1880 			 (pmc & PCI_PM_CAP_PME_D3) ? " D3hot" : "",
1881 			 (pmc & PCI_PM_CAP_PME_D3cold) ? " D3cold" : "");
1882 		dev->pme_support = pmc >> PCI_PM_CAP_PME_SHIFT;
1883 		dev->pme_poll = true;
1884 		/*
1885 		 * Make device's PM flags reflect the wake-up capability, but
1886 		 * let the user space enable it to wake up the system as needed.
1887 		 */
1888 		device_set_wakeup_capable(&dev->dev, true);
1889 		/* Disable the PME# generation functionality */
1890 		pci_pme_active(dev, false);
1891 	} else {
1892 		dev->pme_support = 0;
1893 	}
1894 }
1895 
1896 /**
1897  * platform_pci_wakeup_init - init platform wakeup if present
1898  * @dev: PCI device
1899  *
1900  * Some devices don't have PCI PM caps but can still generate wakeup
1901  * events through platform methods (like ACPI events).  If @dev supports
1902  * platform wakeup events, set the device flag to indicate as much.  This
1903  * may be redundant if the device also supports PCI PM caps, but double
1904  * initialization should be safe in that case.
1905  */
1906 void platform_pci_wakeup_init(struct pci_dev *dev)
1907 {
1908 	if (!platform_pci_can_wakeup(dev))
1909 		return;
1910 
1911 	device_set_wakeup_capable(&dev->dev, true);
1912 	platform_pci_sleep_wake(dev, false);
1913 }
1914 
1915 static void pci_add_saved_cap(struct pci_dev *pci_dev,
1916 	struct pci_cap_saved_state *new_cap)
1917 {
1918 	hlist_add_head(&new_cap->next, &pci_dev->saved_cap_space);
1919 }
1920 
1921 /**
1922  * pci_add_save_buffer - allocate buffer for saving given capability registers
1923  * @dev: the PCI device
1924  * @cap: the capability to allocate the buffer for
1925  * @size: requested size of the buffer
1926  */
1927 static int pci_add_cap_save_buffer(
1928 	struct pci_dev *dev, char cap, unsigned int size)
1929 {
1930 	int pos;
1931 	struct pci_cap_saved_state *save_state;
1932 
1933 	pos = pci_find_capability(dev, cap);
1934 	if (pos <= 0)
1935 		return 0;
1936 
1937 	save_state = kzalloc(sizeof(*save_state) + size, GFP_KERNEL);
1938 	if (!save_state)
1939 		return -ENOMEM;
1940 
1941 	save_state->cap.cap_nr = cap;
1942 	save_state->cap.size = size;
1943 	pci_add_saved_cap(dev, save_state);
1944 
1945 	return 0;
1946 }
1947 
1948 /**
1949  * pci_allocate_cap_save_buffers - allocate buffers for saving capabilities
1950  * @dev: the PCI device
1951  */
1952 void pci_allocate_cap_save_buffers(struct pci_dev *dev)
1953 {
1954 	int error;
1955 
1956 	error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_EXP,
1957 					PCI_EXP_SAVE_REGS * sizeof(u16));
1958 	if (error)
1959 		dev_err(&dev->dev,
1960 			"unable to preallocate PCI Express save buffer\n");
1961 
1962 	error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_PCIX, sizeof(u16));
1963 	if (error)
1964 		dev_err(&dev->dev,
1965 			"unable to preallocate PCI-X save buffer\n");
1966 }
1967 
1968 void pci_free_cap_save_buffers(struct pci_dev *dev)
1969 {
1970 	struct pci_cap_saved_state *tmp;
1971 	struct hlist_node *pos, *n;
1972 
1973 	hlist_for_each_entry_safe(tmp, pos, n, &dev->saved_cap_space, next)
1974 		kfree(tmp);
1975 }
1976 
1977 /**
1978  * pci_enable_ari - enable ARI forwarding if hardware support it
1979  * @dev: the PCI device
1980  */
1981 void pci_enable_ari(struct pci_dev *dev)
1982 {
1983 	int pos;
1984 	u32 cap;
1985 	u16 flags, ctrl;
1986 	struct pci_dev *bridge;
1987 
1988 	if (pcie_ari_disabled || !pci_is_pcie(dev) || dev->devfn)
1989 		return;
1990 
1991 	pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI);
1992 	if (!pos)
1993 		return;
1994 
1995 	bridge = dev->bus->self;
1996 	if (!bridge || !pci_is_pcie(bridge))
1997 		return;
1998 
1999 	pos = pci_pcie_cap(bridge);
2000 	if (!pos)
2001 		return;
2002 
2003 	/* ARI is a PCIe v2 feature */
2004 	pci_read_config_word(bridge, pos + PCI_EXP_FLAGS, &flags);
2005 	if ((flags & PCI_EXP_FLAGS_VERS) < 2)
2006 		return;
2007 
2008 	pci_read_config_dword(bridge, pos + PCI_EXP_DEVCAP2, &cap);
2009 	if (!(cap & PCI_EXP_DEVCAP2_ARI))
2010 		return;
2011 
2012 	pci_read_config_word(bridge, pos + PCI_EXP_DEVCTL2, &ctrl);
2013 	ctrl |= PCI_EXP_DEVCTL2_ARI;
2014 	pci_write_config_word(bridge, pos + PCI_EXP_DEVCTL2, ctrl);
2015 
2016 	bridge->ari_enabled = 1;
2017 }
2018 
2019 /**
2020  * pci_enable_ido - enable ID-based ordering on a device
2021  * @dev: the PCI device
2022  * @type: which types of IDO to enable
2023  *
2024  * Enable ID-based ordering on @dev.  @type can contain the bits
2025  * %PCI_EXP_IDO_REQUEST and/or %PCI_EXP_IDO_COMPLETION to indicate
2026  * which types of transactions are allowed to be re-ordered.
2027  */
2028 void pci_enable_ido(struct pci_dev *dev, unsigned long type)
2029 {
2030 	int pos;
2031 	u16 ctrl;
2032 
2033 	pos = pci_pcie_cap(dev);
2034 	if (!pos)
2035 		return;
2036 
2037 	pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &ctrl);
2038 	if (type & PCI_EXP_IDO_REQUEST)
2039 		ctrl |= PCI_EXP_IDO_REQ_EN;
2040 	if (type & PCI_EXP_IDO_COMPLETION)
2041 		ctrl |= PCI_EXP_IDO_CMP_EN;
2042 	pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, ctrl);
2043 }
2044 EXPORT_SYMBOL(pci_enable_ido);
2045 
2046 /**
2047  * pci_disable_ido - disable ID-based ordering on a device
2048  * @dev: the PCI device
2049  * @type: which types of IDO to disable
2050  */
2051 void pci_disable_ido(struct pci_dev *dev, unsigned long type)
2052 {
2053 	int pos;
2054 	u16 ctrl;
2055 
2056 	if (!pci_is_pcie(dev))
2057 		return;
2058 
2059 	pos = pci_pcie_cap(dev);
2060 	if (!pos)
2061 		return;
2062 
2063 	pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &ctrl);
2064 	if (type & PCI_EXP_IDO_REQUEST)
2065 		ctrl &= ~PCI_EXP_IDO_REQ_EN;
2066 	if (type & PCI_EXP_IDO_COMPLETION)
2067 		ctrl &= ~PCI_EXP_IDO_CMP_EN;
2068 	pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, ctrl);
2069 }
2070 EXPORT_SYMBOL(pci_disable_ido);
2071 
2072 /**
2073  * pci_enable_obff - enable optimized buffer flush/fill
2074  * @dev: PCI device
2075  * @type: type of signaling to use
2076  *
2077  * Try to enable @type OBFF signaling on @dev.  It will try using WAKE#
2078  * signaling if possible, falling back to message signaling only if
2079  * WAKE# isn't supported.  @type should indicate whether the PCIe link
2080  * be brought out of L0s or L1 to send the message.  It should be either
2081  * %PCI_EXP_OBFF_SIGNAL_ALWAYS or %PCI_OBFF_SIGNAL_L0.
2082  *
2083  * If your device can benefit from receiving all messages, even at the
2084  * power cost of bringing the link back up from a low power state, use
2085  * %PCI_EXP_OBFF_SIGNAL_ALWAYS.  Otherwise, use %PCI_OBFF_SIGNAL_L0 (the
2086  * preferred type).
2087  *
2088  * RETURNS:
2089  * Zero on success, appropriate error number on failure.
2090  */
2091 int pci_enable_obff(struct pci_dev *dev, enum pci_obff_signal_type type)
2092 {
2093 	int pos;
2094 	u32 cap;
2095 	u16 ctrl;
2096 	int ret;
2097 
2098 	if (!pci_is_pcie(dev))
2099 		return -ENOTSUPP;
2100 
2101 	pos = pci_pcie_cap(dev);
2102 	if (!pos)
2103 		return -ENOTSUPP;
2104 
2105 	pci_read_config_dword(dev, pos + PCI_EXP_DEVCAP2, &cap);
2106 	if (!(cap & PCI_EXP_OBFF_MASK))
2107 		return -ENOTSUPP; /* no OBFF support at all */
2108 
2109 	/* Make sure the topology supports OBFF as well */
2110 	if (dev->bus) {
2111 		ret = pci_enable_obff(dev->bus->self, type);
2112 		if (ret)
2113 			return ret;
2114 	}
2115 
2116 	pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &ctrl);
2117 	if (cap & PCI_EXP_OBFF_WAKE)
2118 		ctrl |= PCI_EXP_OBFF_WAKE_EN;
2119 	else {
2120 		switch (type) {
2121 		case PCI_EXP_OBFF_SIGNAL_L0:
2122 			if (!(ctrl & PCI_EXP_OBFF_WAKE_EN))
2123 				ctrl |= PCI_EXP_OBFF_MSGA_EN;
2124 			break;
2125 		case PCI_EXP_OBFF_SIGNAL_ALWAYS:
2126 			ctrl &= ~PCI_EXP_OBFF_WAKE_EN;
2127 			ctrl |= PCI_EXP_OBFF_MSGB_EN;
2128 			break;
2129 		default:
2130 			WARN(1, "bad OBFF signal type\n");
2131 			return -ENOTSUPP;
2132 		}
2133 	}
2134 	pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, ctrl);
2135 
2136 	return 0;
2137 }
2138 EXPORT_SYMBOL(pci_enable_obff);
2139 
2140 /**
2141  * pci_disable_obff - disable optimized buffer flush/fill
2142  * @dev: PCI device
2143  *
2144  * Disable OBFF on @dev.
2145  */
2146 void pci_disable_obff(struct pci_dev *dev)
2147 {
2148 	int pos;
2149 	u16 ctrl;
2150 
2151 	if (!pci_is_pcie(dev))
2152 		return;
2153 
2154 	pos = pci_pcie_cap(dev);
2155 	if (!pos)
2156 		return;
2157 
2158 	pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &ctrl);
2159 	ctrl &= ~PCI_EXP_OBFF_WAKE_EN;
2160 	pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, ctrl);
2161 }
2162 EXPORT_SYMBOL(pci_disable_obff);
2163 
2164 /**
2165  * pci_ltr_supported - check whether a device supports LTR
2166  * @dev: PCI device
2167  *
2168  * RETURNS:
2169  * True if @dev supports latency tolerance reporting, false otherwise.
2170  */
2171 bool pci_ltr_supported(struct pci_dev *dev)
2172 {
2173 	int pos;
2174 	u32 cap;
2175 
2176 	if (!pci_is_pcie(dev))
2177 		return false;
2178 
2179 	pos = pci_pcie_cap(dev);
2180 	if (!pos)
2181 		return false;
2182 
2183 	pci_read_config_dword(dev, pos + PCI_EXP_DEVCAP2, &cap);
2184 
2185 	return cap & PCI_EXP_DEVCAP2_LTR;
2186 }
2187 EXPORT_SYMBOL(pci_ltr_supported);
2188 
2189 /**
2190  * pci_enable_ltr - enable latency tolerance reporting
2191  * @dev: PCI device
2192  *
2193  * Enable LTR on @dev if possible, which means enabling it first on
2194  * upstream ports.
2195  *
2196  * RETURNS:
2197  * Zero on success, errno on failure.
2198  */
2199 int pci_enable_ltr(struct pci_dev *dev)
2200 {
2201 	int pos;
2202 	u16 ctrl;
2203 	int ret;
2204 
2205 	if (!pci_ltr_supported(dev))
2206 		return -ENOTSUPP;
2207 
2208 	pos = pci_pcie_cap(dev);
2209 	if (!pos)
2210 		return -ENOTSUPP;
2211 
2212 	/* Only primary function can enable/disable LTR */
2213 	if (PCI_FUNC(dev->devfn) != 0)
2214 		return -EINVAL;
2215 
2216 	/* Enable upstream ports first */
2217 	if (dev->bus) {
2218 		ret = pci_enable_ltr(dev->bus->self);
2219 		if (ret)
2220 			return ret;
2221 	}
2222 
2223 	pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &ctrl);
2224 	ctrl |= PCI_EXP_LTR_EN;
2225 	pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, ctrl);
2226 
2227 	return 0;
2228 }
2229 EXPORT_SYMBOL(pci_enable_ltr);
2230 
2231 /**
2232  * pci_disable_ltr - disable latency tolerance reporting
2233  * @dev: PCI device
2234  */
2235 void pci_disable_ltr(struct pci_dev *dev)
2236 {
2237 	int pos;
2238 	u16 ctrl;
2239 
2240 	if (!pci_ltr_supported(dev))
2241 		return;
2242 
2243 	pos = pci_pcie_cap(dev);
2244 	if (!pos)
2245 		return;
2246 
2247 	/* Only primary function can enable/disable LTR */
2248 	if (PCI_FUNC(dev->devfn) != 0)
2249 		return;
2250 
2251 	pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &ctrl);
2252 	ctrl &= ~PCI_EXP_LTR_EN;
2253 	pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, ctrl);
2254 }
2255 EXPORT_SYMBOL(pci_disable_ltr);
2256 
2257 static int __pci_ltr_scale(int *val)
2258 {
2259 	int scale = 0;
2260 
2261 	while (*val > 1023) {
2262 		*val = (*val + 31) / 32;
2263 		scale++;
2264 	}
2265 	return scale;
2266 }
2267 
2268 /**
2269  * pci_set_ltr - set LTR latency values
2270  * @dev: PCI device
2271  * @snoop_lat_ns: snoop latency in nanoseconds
2272  * @nosnoop_lat_ns: nosnoop latency in nanoseconds
2273  *
2274  * Figure out the scale and set the LTR values accordingly.
2275  */
2276 int pci_set_ltr(struct pci_dev *dev, int snoop_lat_ns, int nosnoop_lat_ns)
2277 {
2278 	int pos, ret, snoop_scale, nosnoop_scale;
2279 	u16 val;
2280 
2281 	if (!pci_ltr_supported(dev))
2282 		return -ENOTSUPP;
2283 
2284 	snoop_scale = __pci_ltr_scale(&snoop_lat_ns);
2285 	nosnoop_scale = __pci_ltr_scale(&nosnoop_lat_ns);
2286 
2287 	if (snoop_lat_ns > PCI_LTR_VALUE_MASK ||
2288 	    nosnoop_lat_ns > PCI_LTR_VALUE_MASK)
2289 		return -EINVAL;
2290 
2291 	if ((snoop_scale > (PCI_LTR_SCALE_MASK >> PCI_LTR_SCALE_SHIFT)) ||
2292 	    (nosnoop_scale > (PCI_LTR_SCALE_MASK >> PCI_LTR_SCALE_SHIFT)))
2293 		return -EINVAL;
2294 
2295 	pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_LTR);
2296 	if (!pos)
2297 		return -ENOTSUPP;
2298 
2299 	val = (snoop_scale << PCI_LTR_SCALE_SHIFT) | snoop_lat_ns;
2300 	ret = pci_write_config_word(dev, pos + PCI_LTR_MAX_SNOOP_LAT, val);
2301 	if (ret != 4)
2302 		return -EIO;
2303 
2304 	val = (nosnoop_scale << PCI_LTR_SCALE_SHIFT) | nosnoop_lat_ns;
2305 	ret = pci_write_config_word(dev, pos + PCI_LTR_MAX_NOSNOOP_LAT, val);
2306 	if (ret != 4)
2307 		return -EIO;
2308 
2309 	return 0;
2310 }
2311 EXPORT_SYMBOL(pci_set_ltr);
2312 
2313 static int pci_acs_enable;
2314 
2315 /**
2316  * pci_request_acs - ask for ACS to be enabled if supported
2317  */
2318 void pci_request_acs(void)
2319 {
2320 	pci_acs_enable = 1;
2321 }
2322 
2323 /**
2324  * pci_enable_acs - enable ACS if hardware support it
2325  * @dev: the PCI device
2326  */
2327 void pci_enable_acs(struct pci_dev *dev)
2328 {
2329 	int pos;
2330 	u16 cap;
2331 	u16 ctrl;
2332 
2333 	if (!pci_acs_enable)
2334 		return;
2335 
2336 	if (!pci_is_pcie(dev))
2337 		return;
2338 
2339 	pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS);
2340 	if (!pos)
2341 		return;
2342 
2343 	pci_read_config_word(dev, pos + PCI_ACS_CAP, &cap);
2344 	pci_read_config_word(dev, pos + PCI_ACS_CTRL, &ctrl);
2345 
2346 	/* Source Validation */
2347 	ctrl |= (cap & PCI_ACS_SV);
2348 
2349 	/* P2P Request Redirect */
2350 	ctrl |= (cap & PCI_ACS_RR);
2351 
2352 	/* P2P Completion Redirect */
2353 	ctrl |= (cap & PCI_ACS_CR);
2354 
2355 	/* Upstream Forwarding */
2356 	ctrl |= (cap & PCI_ACS_UF);
2357 
2358 	pci_write_config_word(dev, pos + PCI_ACS_CTRL, ctrl);
2359 }
2360 
2361 /**
2362  * pci_swizzle_interrupt_pin - swizzle INTx for device behind bridge
2363  * @dev: the PCI device
2364  * @pin: the INTx pin (1=INTA, 2=INTB, 3=INTD, 4=INTD)
2365  *
2366  * Perform INTx swizzling for a device behind one level of bridge.  This is
2367  * required by section 9.1 of the PCI-to-PCI bridge specification for devices
2368  * behind bridges on add-in cards.  For devices with ARI enabled, the slot
2369  * number is always 0 (see the Implementation Note in section 2.2.8.1 of
2370  * the PCI Express Base Specification, Revision 2.1)
2371  */
2372 u8 pci_swizzle_interrupt_pin(struct pci_dev *dev, u8 pin)
2373 {
2374 	int slot;
2375 
2376 	if (pci_ari_enabled(dev->bus))
2377 		slot = 0;
2378 	else
2379 		slot = PCI_SLOT(dev->devfn);
2380 
2381 	return (((pin - 1) + slot) % 4) + 1;
2382 }
2383 
2384 int
2385 pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge)
2386 {
2387 	u8 pin;
2388 
2389 	pin = dev->pin;
2390 	if (!pin)
2391 		return -1;
2392 
2393 	while (!pci_is_root_bus(dev->bus)) {
2394 		pin = pci_swizzle_interrupt_pin(dev, pin);
2395 		dev = dev->bus->self;
2396 	}
2397 	*bridge = dev;
2398 	return pin;
2399 }
2400 
2401 /**
2402  * pci_common_swizzle - swizzle INTx all the way to root bridge
2403  * @dev: the PCI device
2404  * @pinp: pointer to the INTx pin value (1=INTA, 2=INTB, 3=INTD, 4=INTD)
2405  *
2406  * Perform INTx swizzling for a device.  This traverses through all PCI-to-PCI
2407  * bridges all the way up to a PCI root bus.
2408  */
2409 u8 pci_common_swizzle(struct pci_dev *dev, u8 *pinp)
2410 {
2411 	u8 pin = *pinp;
2412 
2413 	while (!pci_is_root_bus(dev->bus)) {
2414 		pin = pci_swizzle_interrupt_pin(dev, pin);
2415 		dev = dev->bus->self;
2416 	}
2417 	*pinp = pin;
2418 	return PCI_SLOT(dev->devfn);
2419 }
2420 
2421 /**
2422  *	pci_release_region - Release a PCI bar
2423  *	@pdev: PCI device whose resources were previously reserved by pci_request_region
2424  *	@bar: BAR to release
2425  *
2426  *	Releases the PCI I/O and memory resources previously reserved by a
2427  *	successful call to pci_request_region.  Call this function only
2428  *	after all use of the PCI regions has ceased.
2429  */
2430 void pci_release_region(struct pci_dev *pdev, int bar)
2431 {
2432 	struct pci_devres *dr;
2433 
2434 	if (pci_resource_len(pdev, bar) == 0)
2435 		return;
2436 	if (pci_resource_flags(pdev, bar) & IORESOURCE_IO)
2437 		release_region(pci_resource_start(pdev, bar),
2438 				pci_resource_len(pdev, bar));
2439 	else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM)
2440 		release_mem_region(pci_resource_start(pdev, bar),
2441 				pci_resource_len(pdev, bar));
2442 
2443 	dr = find_pci_dr(pdev);
2444 	if (dr)
2445 		dr->region_mask &= ~(1 << bar);
2446 }
2447 
2448 /**
2449  *	__pci_request_region - Reserved PCI I/O and memory resource
2450  *	@pdev: PCI device whose resources are to be reserved
2451  *	@bar: BAR to be reserved
2452  *	@res_name: Name to be associated with resource.
2453  *	@exclusive: whether the region access is exclusive or not
2454  *
2455  *	Mark the PCI region associated with PCI device @pdev BR @bar as
2456  *	being reserved by owner @res_name.  Do not access any
2457  *	address inside the PCI regions unless this call returns
2458  *	successfully.
2459  *
2460  *	If @exclusive is set, then the region is marked so that userspace
2461  *	is explicitly not allowed to map the resource via /dev/mem or
2462  * 	sysfs MMIO access.
2463  *
2464  *	Returns 0 on success, or %EBUSY on error.  A warning
2465  *	message is also printed on failure.
2466  */
2467 static int __pci_request_region(struct pci_dev *pdev, int bar, const char *res_name,
2468 									int exclusive)
2469 {
2470 	struct pci_devres *dr;
2471 
2472 	if (pci_resource_len(pdev, bar) == 0)
2473 		return 0;
2474 
2475 	if (pci_resource_flags(pdev, bar) & IORESOURCE_IO) {
2476 		if (!request_region(pci_resource_start(pdev, bar),
2477 			    pci_resource_len(pdev, bar), res_name))
2478 			goto err_out;
2479 	}
2480 	else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) {
2481 		if (!__request_mem_region(pci_resource_start(pdev, bar),
2482 					pci_resource_len(pdev, bar), res_name,
2483 					exclusive))
2484 			goto err_out;
2485 	}
2486 
2487 	dr = find_pci_dr(pdev);
2488 	if (dr)
2489 		dr->region_mask |= 1 << bar;
2490 
2491 	return 0;
2492 
2493 err_out:
2494 	dev_warn(&pdev->dev, "BAR %d: can't reserve %pR\n", bar,
2495 		 &pdev->resource[bar]);
2496 	return -EBUSY;
2497 }
2498 
2499 /**
2500  *	pci_request_region - Reserve PCI I/O and memory resource
2501  *	@pdev: PCI device whose resources are to be reserved
2502  *	@bar: BAR to be reserved
2503  *	@res_name: Name to be associated with resource
2504  *
2505  *	Mark the PCI region associated with PCI device @pdev BAR @bar as
2506  *	being reserved by owner @res_name.  Do not access any
2507  *	address inside the PCI regions unless this call returns
2508  *	successfully.
2509  *
2510  *	Returns 0 on success, or %EBUSY on error.  A warning
2511  *	message is also printed on failure.
2512  */
2513 int pci_request_region(struct pci_dev *pdev, int bar, const char *res_name)
2514 {
2515 	return __pci_request_region(pdev, bar, res_name, 0);
2516 }
2517 
2518 /**
2519  *	pci_request_region_exclusive - Reserved PCI I/O and memory resource
2520  *	@pdev: PCI device whose resources are to be reserved
2521  *	@bar: BAR to be reserved
2522  *	@res_name: Name to be associated with resource.
2523  *
2524  *	Mark the PCI region associated with PCI device @pdev BR @bar as
2525  *	being reserved by owner @res_name.  Do not access any
2526  *	address inside the PCI regions unless this call returns
2527  *	successfully.
2528  *
2529  *	Returns 0 on success, or %EBUSY on error.  A warning
2530  *	message is also printed on failure.
2531  *
2532  *	The key difference that _exclusive makes it that userspace is
2533  *	explicitly not allowed to map the resource via /dev/mem or
2534  * 	sysfs.
2535  */
2536 int pci_request_region_exclusive(struct pci_dev *pdev, int bar, const char *res_name)
2537 {
2538 	return __pci_request_region(pdev, bar, res_name, IORESOURCE_EXCLUSIVE);
2539 }
2540 /**
2541  * pci_release_selected_regions - Release selected PCI I/O and memory resources
2542  * @pdev: PCI device whose resources were previously reserved
2543  * @bars: Bitmask of BARs to be released
2544  *
2545  * Release selected PCI I/O and memory resources previously reserved.
2546  * Call this function only after all use of the PCI regions has ceased.
2547  */
2548 void pci_release_selected_regions(struct pci_dev *pdev, int bars)
2549 {
2550 	int i;
2551 
2552 	for (i = 0; i < 6; i++)
2553 		if (bars & (1 << i))
2554 			pci_release_region(pdev, i);
2555 }
2556 
2557 int __pci_request_selected_regions(struct pci_dev *pdev, int bars,
2558 				 const char *res_name, int excl)
2559 {
2560 	int i;
2561 
2562 	for (i = 0; i < 6; i++)
2563 		if (bars & (1 << i))
2564 			if (__pci_request_region(pdev, i, res_name, excl))
2565 				goto err_out;
2566 	return 0;
2567 
2568 err_out:
2569 	while(--i >= 0)
2570 		if (bars & (1 << i))
2571 			pci_release_region(pdev, i);
2572 
2573 	return -EBUSY;
2574 }
2575 
2576 
2577 /**
2578  * pci_request_selected_regions - Reserve selected PCI I/O and memory resources
2579  * @pdev: PCI device whose resources are to be reserved
2580  * @bars: Bitmask of BARs to be requested
2581  * @res_name: Name to be associated with resource
2582  */
2583 int pci_request_selected_regions(struct pci_dev *pdev, int bars,
2584 				 const char *res_name)
2585 {
2586 	return __pci_request_selected_regions(pdev, bars, res_name, 0);
2587 }
2588 
2589 int pci_request_selected_regions_exclusive(struct pci_dev *pdev,
2590 				 int bars, const char *res_name)
2591 {
2592 	return __pci_request_selected_regions(pdev, bars, res_name,
2593 			IORESOURCE_EXCLUSIVE);
2594 }
2595 
2596 /**
2597  *	pci_release_regions - Release reserved PCI I/O and memory resources
2598  *	@pdev: PCI device whose resources were previously reserved by pci_request_regions
2599  *
2600  *	Releases all PCI I/O and memory resources previously reserved by a
2601  *	successful call to pci_request_regions.  Call this function only
2602  *	after all use of the PCI regions has ceased.
2603  */
2604 
2605 void pci_release_regions(struct pci_dev *pdev)
2606 {
2607 	pci_release_selected_regions(pdev, (1 << 6) - 1);
2608 }
2609 
2610 /**
2611  *	pci_request_regions - Reserved PCI I/O and memory resources
2612  *	@pdev: PCI device whose resources are to be reserved
2613  *	@res_name: Name to be associated with resource.
2614  *
2615  *	Mark all PCI regions associated with PCI device @pdev as
2616  *	being reserved by owner @res_name.  Do not access any
2617  *	address inside the PCI regions unless this call returns
2618  *	successfully.
2619  *
2620  *	Returns 0 on success, or %EBUSY on error.  A warning
2621  *	message is also printed on failure.
2622  */
2623 int pci_request_regions(struct pci_dev *pdev, const char *res_name)
2624 {
2625 	return pci_request_selected_regions(pdev, ((1 << 6) - 1), res_name);
2626 }
2627 
2628 /**
2629  *	pci_request_regions_exclusive - Reserved PCI I/O and memory resources
2630  *	@pdev: PCI device whose resources are to be reserved
2631  *	@res_name: Name to be associated with resource.
2632  *
2633  *	Mark all PCI regions associated with PCI device @pdev as
2634  *	being reserved by owner @res_name.  Do not access any
2635  *	address inside the PCI regions unless this call returns
2636  *	successfully.
2637  *
2638  *	pci_request_regions_exclusive() will mark the region so that
2639  * 	/dev/mem and the sysfs MMIO access will not be allowed.
2640  *
2641  *	Returns 0 on success, or %EBUSY on error.  A warning
2642  *	message is also printed on failure.
2643  */
2644 int pci_request_regions_exclusive(struct pci_dev *pdev, const char *res_name)
2645 {
2646 	return pci_request_selected_regions_exclusive(pdev,
2647 					((1 << 6) - 1), res_name);
2648 }
2649 
2650 static void __pci_set_master(struct pci_dev *dev, bool enable)
2651 {
2652 	u16 old_cmd, cmd;
2653 
2654 	pci_read_config_word(dev, PCI_COMMAND, &old_cmd);
2655 	if (enable)
2656 		cmd = old_cmd | PCI_COMMAND_MASTER;
2657 	else
2658 		cmd = old_cmd & ~PCI_COMMAND_MASTER;
2659 	if (cmd != old_cmd) {
2660 		dev_dbg(&dev->dev, "%s bus mastering\n",
2661 			enable ? "enabling" : "disabling");
2662 		pci_write_config_word(dev, PCI_COMMAND, cmd);
2663 	}
2664 	dev->is_busmaster = enable;
2665 }
2666 
2667 /**
2668  * pcibios_set_master - enable PCI bus-mastering for device dev
2669  * @dev: the PCI device to enable
2670  *
2671  * Enables PCI bus-mastering for the device.  This is the default
2672  * implementation.  Architecture specific implementations can override
2673  * this if necessary.
2674  */
2675 void __weak pcibios_set_master(struct pci_dev *dev)
2676 {
2677 	u8 lat;
2678 
2679 	/* The latency timer doesn't apply to PCIe (either Type 0 or Type 1) */
2680 	if (pci_is_pcie(dev))
2681 		return;
2682 
2683 	pci_read_config_byte(dev, PCI_LATENCY_TIMER, &lat);
2684 	if (lat < 16)
2685 		lat = (64 <= pcibios_max_latency) ? 64 : pcibios_max_latency;
2686 	else if (lat > pcibios_max_latency)
2687 		lat = pcibios_max_latency;
2688 	else
2689 		return;
2690 	dev_printk(KERN_DEBUG, &dev->dev, "setting latency timer to %d\n", lat);
2691 	pci_write_config_byte(dev, PCI_LATENCY_TIMER, lat);
2692 }
2693 
2694 /**
2695  * pci_set_master - enables bus-mastering for device dev
2696  * @dev: the PCI device to enable
2697  *
2698  * Enables bus-mastering on the device and calls pcibios_set_master()
2699  * to do the needed arch specific settings.
2700  */
2701 void pci_set_master(struct pci_dev *dev)
2702 {
2703 	__pci_set_master(dev, true);
2704 	pcibios_set_master(dev);
2705 }
2706 
2707 /**
2708  * pci_clear_master - disables bus-mastering for device dev
2709  * @dev: the PCI device to disable
2710  */
2711 void pci_clear_master(struct pci_dev *dev)
2712 {
2713 	__pci_set_master(dev, false);
2714 }
2715 
2716 /**
2717  * pci_set_cacheline_size - ensure the CACHE_LINE_SIZE register is programmed
2718  * @dev: the PCI device for which MWI is to be enabled
2719  *
2720  * Helper function for pci_set_mwi.
2721  * Originally copied from drivers/net/acenic.c.
2722  * Copyright 1998-2001 by Jes Sorensen, <jes@trained-monkey.org>.
2723  *
2724  * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
2725  */
2726 int pci_set_cacheline_size(struct pci_dev *dev)
2727 {
2728 	u8 cacheline_size;
2729 
2730 	if (!pci_cache_line_size)
2731 		return -EINVAL;
2732 
2733 	/* Validate current setting: the PCI_CACHE_LINE_SIZE must be
2734 	   equal to or multiple of the right value. */
2735 	pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
2736 	if (cacheline_size >= pci_cache_line_size &&
2737 	    (cacheline_size % pci_cache_line_size) == 0)
2738 		return 0;
2739 
2740 	/* Write the correct value. */
2741 	pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, pci_cache_line_size);
2742 	/* Read it back. */
2743 	pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
2744 	if (cacheline_size == pci_cache_line_size)
2745 		return 0;
2746 
2747 	dev_printk(KERN_DEBUG, &dev->dev, "cache line size of %d is not "
2748 		   "supported\n", pci_cache_line_size << 2);
2749 
2750 	return -EINVAL;
2751 }
2752 EXPORT_SYMBOL_GPL(pci_set_cacheline_size);
2753 
2754 #ifdef PCI_DISABLE_MWI
2755 int pci_set_mwi(struct pci_dev *dev)
2756 {
2757 	return 0;
2758 }
2759 
2760 int pci_try_set_mwi(struct pci_dev *dev)
2761 {
2762 	return 0;
2763 }
2764 
2765 void pci_clear_mwi(struct pci_dev *dev)
2766 {
2767 }
2768 
2769 #else
2770 
2771 /**
2772  * pci_set_mwi - enables memory-write-invalidate PCI transaction
2773  * @dev: the PCI device for which MWI is enabled
2774  *
2775  * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND.
2776  *
2777  * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
2778  */
2779 int
2780 pci_set_mwi(struct pci_dev *dev)
2781 {
2782 	int rc;
2783 	u16 cmd;
2784 
2785 	rc = pci_set_cacheline_size(dev);
2786 	if (rc)
2787 		return rc;
2788 
2789 	pci_read_config_word(dev, PCI_COMMAND, &cmd);
2790 	if (! (cmd & PCI_COMMAND_INVALIDATE)) {
2791 		dev_dbg(&dev->dev, "enabling Mem-Wr-Inval\n");
2792 		cmd |= PCI_COMMAND_INVALIDATE;
2793 		pci_write_config_word(dev, PCI_COMMAND, cmd);
2794 	}
2795 
2796 	return 0;
2797 }
2798 
2799 /**
2800  * pci_try_set_mwi - enables memory-write-invalidate PCI transaction
2801  * @dev: the PCI device for which MWI is enabled
2802  *
2803  * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND.
2804  * Callers are not required to check the return value.
2805  *
2806  * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
2807  */
2808 int pci_try_set_mwi(struct pci_dev *dev)
2809 {
2810 	int rc = pci_set_mwi(dev);
2811 	return rc;
2812 }
2813 
2814 /**
2815  * pci_clear_mwi - disables Memory-Write-Invalidate for device dev
2816  * @dev: the PCI device to disable
2817  *
2818  * Disables PCI Memory-Write-Invalidate transaction on the device
2819  */
2820 void
2821 pci_clear_mwi(struct pci_dev *dev)
2822 {
2823 	u16 cmd;
2824 
2825 	pci_read_config_word(dev, PCI_COMMAND, &cmd);
2826 	if (cmd & PCI_COMMAND_INVALIDATE) {
2827 		cmd &= ~PCI_COMMAND_INVALIDATE;
2828 		pci_write_config_word(dev, PCI_COMMAND, cmd);
2829 	}
2830 }
2831 #endif /* ! PCI_DISABLE_MWI */
2832 
2833 /**
2834  * pci_intx - enables/disables PCI INTx for device dev
2835  * @pdev: the PCI device to operate on
2836  * @enable: boolean: whether to enable or disable PCI INTx
2837  *
2838  * Enables/disables PCI INTx for device dev
2839  */
2840 void
2841 pci_intx(struct pci_dev *pdev, int enable)
2842 {
2843 	u16 pci_command, new;
2844 
2845 	pci_read_config_word(pdev, PCI_COMMAND, &pci_command);
2846 
2847 	if (enable) {
2848 		new = pci_command & ~PCI_COMMAND_INTX_DISABLE;
2849 	} else {
2850 		new = pci_command | PCI_COMMAND_INTX_DISABLE;
2851 	}
2852 
2853 	if (new != pci_command) {
2854 		struct pci_devres *dr;
2855 
2856 		pci_write_config_word(pdev, PCI_COMMAND, new);
2857 
2858 		dr = find_pci_dr(pdev);
2859 		if (dr && !dr->restore_intx) {
2860 			dr->restore_intx = 1;
2861 			dr->orig_intx = !enable;
2862 		}
2863 	}
2864 }
2865 
2866 /**
2867  * pci_intx_mask_supported - probe for INTx masking support
2868  * @dev: the PCI device to operate on
2869  *
2870  * Check if the device dev support INTx masking via the config space
2871  * command word.
2872  */
2873 bool pci_intx_mask_supported(struct pci_dev *dev)
2874 {
2875 	bool mask_supported = false;
2876 	u16 orig, new;
2877 
2878 	pci_cfg_access_lock(dev);
2879 
2880 	pci_read_config_word(dev, PCI_COMMAND, &orig);
2881 	pci_write_config_word(dev, PCI_COMMAND,
2882 			      orig ^ PCI_COMMAND_INTX_DISABLE);
2883 	pci_read_config_word(dev, PCI_COMMAND, &new);
2884 
2885 	/*
2886 	 * There's no way to protect against hardware bugs or detect them
2887 	 * reliably, but as long as we know what the value should be, let's
2888 	 * go ahead and check it.
2889 	 */
2890 	if ((new ^ orig) & ~PCI_COMMAND_INTX_DISABLE) {
2891 		dev_err(&dev->dev, "Command register changed from "
2892 			"0x%x to 0x%x: driver or hardware bug?\n", orig, new);
2893 	} else if ((new ^ orig) & PCI_COMMAND_INTX_DISABLE) {
2894 		mask_supported = true;
2895 		pci_write_config_word(dev, PCI_COMMAND, orig);
2896 	}
2897 
2898 	pci_cfg_access_unlock(dev);
2899 	return mask_supported;
2900 }
2901 EXPORT_SYMBOL_GPL(pci_intx_mask_supported);
2902 
2903 static bool pci_check_and_set_intx_mask(struct pci_dev *dev, bool mask)
2904 {
2905 	struct pci_bus *bus = dev->bus;
2906 	bool mask_updated = true;
2907 	u32 cmd_status_dword;
2908 	u16 origcmd, newcmd;
2909 	unsigned long flags;
2910 	bool irq_pending;
2911 
2912 	/*
2913 	 * We do a single dword read to retrieve both command and status.
2914 	 * Document assumptions that make this possible.
2915 	 */
2916 	BUILD_BUG_ON(PCI_COMMAND % 4);
2917 	BUILD_BUG_ON(PCI_COMMAND + 2 != PCI_STATUS);
2918 
2919 	raw_spin_lock_irqsave(&pci_lock, flags);
2920 
2921 	bus->ops->read(bus, dev->devfn, PCI_COMMAND, 4, &cmd_status_dword);
2922 
2923 	irq_pending = (cmd_status_dword >> 16) & PCI_STATUS_INTERRUPT;
2924 
2925 	/*
2926 	 * Check interrupt status register to see whether our device
2927 	 * triggered the interrupt (when masking) or the next IRQ is
2928 	 * already pending (when unmasking).
2929 	 */
2930 	if (mask != irq_pending) {
2931 		mask_updated = false;
2932 		goto done;
2933 	}
2934 
2935 	origcmd = cmd_status_dword;
2936 	newcmd = origcmd & ~PCI_COMMAND_INTX_DISABLE;
2937 	if (mask)
2938 		newcmd |= PCI_COMMAND_INTX_DISABLE;
2939 	if (newcmd != origcmd)
2940 		bus->ops->write(bus, dev->devfn, PCI_COMMAND, 2, newcmd);
2941 
2942 done:
2943 	raw_spin_unlock_irqrestore(&pci_lock, flags);
2944 
2945 	return mask_updated;
2946 }
2947 
2948 /**
2949  * pci_check_and_mask_intx - mask INTx on pending interrupt
2950  * @dev: the PCI device to operate on
2951  *
2952  * Check if the device dev has its INTx line asserted, mask it and
2953  * return true in that case. False is returned if not interrupt was
2954  * pending.
2955  */
2956 bool pci_check_and_mask_intx(struct pci_dev *dev)
2957 {
2958 	return pci_check_and_set_intx_mask(dev, true);
2959 }
2960 EXPORT_SYMBOL_GPL(pci_check_and_mask_intx);
2961 
2962 /**
2963  * pci_check_and_mask_intx - unmask INTx of no interrupt is pending
2964  * @dev: the PCI device to operate on
2965  *
2966  * Check if the device dev has its INTx line asserted, unmask it if not
2967  * and return true. False is returned and the mask remains active if
2968  * there was still an interrupt pending.
2969  */
2970 bool pci_check_and_unmask_intx(struct pci_dev *dev)
2971 {
2972 	return pci_check_and_set_intx_mask(dev, false);
2973 }
2974 EXPORT_SYMBOL_GPL(pci_check_and_unmask_intx);
2975 
2976 /**
2977  * pci_msi_off - disables any msi or msix capabilities
2978  * @dev: the PCI device to operate on
2979  *
2980  * If you want to use msi see pci_enable_msi and friends.
2981  * This is a lower level primitive that allows us to disable
2982  * msi operation at the device level.
2983  */
2984 void pci_msi_off(struct pci_dev *dev)
2985 {
2986 	int pos;
2987 	u16 control;
2988 
2989 	pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
2990 	if (pos) {
2991 		pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control);
2992 		control &= ~PCI_MSI_FLAGS_ENABLE;
2993 		pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control);
2994 	}
2995 	pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
2996 	if (pos) {
2997 		pci_read_config_word(dev, pos + PCI_MSIX_FLAGS, &control);
2998 		control &= ~PCI_MSIX_FLAGS_ENABLE;
2999 		pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control);
3000 	}
3001 }
3002 EXPORT_SYMBOL_GPL(pci_msi_off);
3003 
3004 int pci_set_dma_max_seg_size(struct pci_dev *dev, unsigned int size)
3005 {
3006 	return dma_set_max_seg_size(&dev->dev, size);
3007 }
3008 EXPORT_SYMBOL(pci_set_dma_max_seg_size);
3009 
3010 int pci_set_dma_seg_boundary(struct pci_dev *dev, unsigned long mask)
3011 {
3012 	return dma_set_seg_boundary(&dev->dev, mask);
3013 }
3014 EXPORT_SYMBOL(pci_set_dma_seg_boundary);
3015 
3016 static int pcie_flr(struct pci_dev *dev, int probe)
3017 {
3018 	int i;
3019 	int pos;
3020 	u32 cap;
3021 	u16 status, control;
3022 
3023 	pos = pci_pcie_cap(dev);
3024 	if (!pos)
3025 		return -ENOTTY;
3026 
3027 	pci_read_config_dword(dev, pos + PCI_EXP_DEVCAP, &cap);
3028 	if (!(cap & PCI_EXP_DEVCAP_FLR))
3029 		return -ENOTTY;
3030 
3031 	if (probe)
3032 		return 0;
3033 
3034 	/* Wait for Transaction Pending bit clean */
3035 	for (i = 0; i < 4; i++) {
3036 		if (i)
3037 			msleep((1 << (i - 1)) * 100);
3038 
3039 		pci_read_config_word(dev, pos + PCI_EXP_DEVSTA, &status);
3040 		if (!(status & PCI_EXP_DEVSTA_TRPND))
3041 			goto clear;
3042 	}
3043 
3044 	dev_err(&dev->dev, "transaction is not cleared; "
3045 			"proceeding with reset anyway\n");
3046 
3047 clear:
3048 	pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, &control);
3049 	control |= PCI_EXP_DEVCTL_BCR_FLR;
3050 	pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, control);
3051 
3052 	msleep(100);
3053 
3054 	return 0;
3055 }
3056 
3057 static int pci_af_flr(struct pci_dev *dev, int probe)
3058 {
3059 	int i;
3060 	int pos;
3061 	u8 cap;
3062 	u8 status;
3063 
3064 	pos = pci_find_capability(dev, PCI_CAP_ID_AF);
3065 	if (!pos)
3066 		return -ENOTTY;
3067 
3068 	pci_read_config_byte(dev, pos + PCI_AF_CAP, &cap);
3069 	if (!(cap & PCI_AF_CAP_TP) || !(cap & PCI_AF_CAP_FLR))
3070 		return -ENOTTY;
3071 
3072 	if (probe)
3073 		return 0;
3074 
3075 	/* Wait for Transaction Pending bit clean */
3076 	for (i = 0; i < 4; i++) {
3077 		if (i)
3078 			msleep((1 << (i - 1)) * 100);
3079 
3080 		pci_read_config_byte(dev, pos + PCI_AF_STATUS, &status);
3081 		if (!(status & PCI_AF_STATUS_TP))
3082 			goto clear;
3083 	}
3084 
3085 	dev_err(&dev->dev, "transaction is not cleared; "
3086 			"proceeding with reset anyway\n");
3087 
3088 clear:
3089 	pci_write_config_byte(dev, pos + PCI_AF_CTRL, PCI_AF_CTRL_FLR);
3090 	msleep(100);
3091 
3092 	return 0;
3093 }
3094 
3095 /**
3096  * pci_pm_reset - Put device into PCI_D3 and back into PCI_D0.
3097  * @dev: Device to reset.
3098  * @probe: If set, only check if the device can be reset this way.
3099  *
3100  * If @dev supports native PCI PM and its PCI_PM_CTRL_NO_SOFT_RESET flag is
3101  * unset, it will be reinitialized internally when going from PCI_D3hot to
3102  * PCI_D0.  If that's the case and the device is not in a low-power state
3103  * already, force it into PCI_D3hot and back to PCI_D0, causing it to be reset.
3104  *
3105  * NOTE: This causes the caller to sleep for twice the device power transition
3106  * cooldown period, which for the D0->D3hot and D3hot->D0 transitions is 10 ms
3107  * by devault (i.e. unless the @dev's d3_delay field has a different value).
3108  * Moreover, only devices in D0 can be reset by this function.
3109  */
3110 static int pci_pm_reset(struct pci_dev *dev, int probe)
3111 {
3112 	u16 csr;
3113 
3114 	if (!dev->pm_cap)
3115 		return -ENOTTY;
3116 
3117 	pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &csr);
3118 	if (csr & PCI_PM_CTRL_NO_SOFT_RESET)
3119 		return -ENOTTY;
3120 
3121 	if (probe)
3122 		return 0;
3123 
3124 	if (dev->current_state != PCI_D0)
3125 		return -EINVAL;
3126 
3127 	csr &= ~PCI_PM_CTRL_STATE_MASK;
3128 	csr |= PCI_D3hot;
3129 	pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
3130 	pci_dev_d3_sleep(dev);
3131 
3132 	csr &= ~PCI_PM_CTRL_STATE_MASK;
3133 	csr |= PCI_D0;
3134 	pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
3135 	pci_dev_d3_sleep(dev);
3136 
3137 	return 0;
3138 }
3139 
3140 static int pci_parent_bus_reset(struct pci_dev *dev, int probe)
3141 {
3142 	u16 ctrl;
3143 	struct pci_dev *pdev;
3144 
3145 	if (pci_is_root_bus(dev->bus) || dev->subordinate || !dev->bus->self)
3146 		return -ENOTTY;
3147 
3148 	list_for_each_entry(pdev, &dev->bus->devices, bus_list)
3149 		if (pdev != dev)
3150 			return -ENOTTY;
3151 
3152 	if (probe)
3153 		return 0;
3154 
3155 	pci_read_config_word(dev->bus->self, PCI_BRIDGE_CONTROL, &ctrl);
3156 	ctrl |= PCI_BRIDGE_CTL_BUS_RESET;
3157 	pci_write_config_word(dev->bus->self, PCI_BRIDGE_CONTROL, ctrl);
3158 	msleep(100);
3159 
3160 	ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET;
3161 	pci_write_config_word(dev->bus->self, PCI_BRIDGE_CONTROL, ctrl);
3162 	msleep(100);
3163 
3164 	return 0;
3165 }
3166 
3167 static int pci_dev_reset(struct pci_dev *dev, int probe)
3168 {
3169 	int rc;
3170 
3171 	might_sleep();
3172 
3173 	if (!probe) {
3174 		pci_cfg_access_lock(dev);
3175 		/* block PM suspend, driver probe, etc. */
3176 		device_lock(&dev->dev);
3177 	}
3178 
3179 	rc = pci_dev_specific_reset(dev, probe);
3180 	if (rc != -ENOTTY)
3181 		goto done;
3182 
3183 	rc = pcie_flr(dev, probe);
3184 	if (rc != -ENOTTY)
3185 		goto done;
3186 
3187 	rc = pci_af_flr(dev, probe);
3188 	if (rc != -ENOTTY)
3189 		goto done;
3190 
3191 	rc = pci_pm_reset(dev, probe);
3192 	if (rc != -ENOTTY)
3193 		goto done;
3194 
3195 	rc = pci_parent_bus_reset(dev, probe);
3196 done:
3197 	if (!probe) {
3198 		device_unlock(&dev->dev);
3199 		pci_cfg_access_unlock(dev);
3200 	}
3201 
3202 	return rc;
3203 }
3204 
3205 /**
3206  * __pci_reset_function - reset a PCI device function
3207  * @dev: PCI device to reset
3208  *
3209  * Some devices allow an individual function to be reset without affecting
3210  * other functions in the same device.  The PCI device must be responsive
3211  * to PCI config space in order to use this function.
3212  *
3213  * The device function is presumed to be unused when this function is called.
3214  * Resetting the device will make the contents of PCI configuration space
3215  * random, so any caller of this must be prepared to reinitialise the
3216  * device including MSI, bus mastering, BARs, decoding IO and memory spaces,
3217  * etc.
3218  *
3219  * Returns 0 if the device function was successfully reset or negative if the
3220  * device doesn't support resetting a single function.
3221  */
3222 int __pci_reset_function(struct pci_dev *dev)
3223 {
3224 	return pci_dev_reset(dev, 0);
3225 }
3226 EXPORT_SYMBOL_GPL(__pci_reset_function);
3227 
3228 /**
3229  * __pci_reset_function_locked - reset a PCI device function while holding
3230  * the @dev mutex lock.
3231  * @dev: PCI device to reset
3232  *
3233  * Some devices allow an individual function to be reset without affecting
3234  * other functions in the same device.  The PCI device must be responsive
3235  * to PCI config space in order to use this function.
3236  *
3237  * The device function is presumed to be unused and the caller is holding
3238  * the device mutex lock when this function is called.
3239  * Resetting the device will make the contents of PCI configuration space
3240  * random, so any caller of this must be prepared to reinitialise the
3241  * device including MSI, bus mastering, BARs, decoding IO and memory spaces,
3242  * etc.
3243  *
3244  * Returns 0 if the device function was successfully reset or negative if the
3245  * device doesn't support resetting a single function.
3246  */
3247 int __pci_reset_function_locked(struct pci_dev *dev)
3248 {
3249 	return pci_dev_reset(dev, 1);
3250 }
3251 EXPORT_SYMBOL_GPL(__pci_reset_function_locked);
3252 
3253 /**
3254  * pci_probe_reset_function - check whether the device can be safely reset
3255  * @dev: PCI device to reset
3256  *
3257  * Some devices allow an individual function to be reset without affecting
3258  * other functions in the same device.  The PCI device must be responsive
3259  * to PCI config space in order to use this function.
3260  *
3261  * Returns 0 if the device function can be reset or negative if the
3262  * device doesn't support resetting a single function.
3263  */
3264 int pci_probe_reset_function(struct pci_dev *dev)
3265 {
3266 	return pci_dev_reset(dev, 1);
3267 }
3268 
3269 /**
3270  * pci_reset_function - quiesce and reset a PCI device function
3271  * @dev: PCI device to reset
3272  *
3273  * Some devices allow an individual function to be reset without affecting
3274  * other functions in the same device.  The PCI device must be responsive
3275  * to PCI config space in order to use this function.
3276  *
3277  * This function does not just reset the PCI portion of a device, but
3278  * clears all the state associated with the device.  This function differs
3279  * from __pci_reset_function in that it saves and restores device state
3280  * over the reset.
3281  *
3282  * Returns 0 if the device function was successfully reset or negative if the
3283  * device doesn't support resetting a single function.
3284  */
3285 int pci_reset_function(struct pci_dev *dev)
3286 {
3287 	int rc;
3288 
3289 	rc = pci_dev_reset(dev, 1);
3290 	if (rc)
3291 		return rc;
3292 
3293 	pci_save_state(dev);
3294 
3295 	/*
3296 	 * both INTx and MSI are disabled after the Interrupt Disable bit
3297 	 * is set and the Bus Master bit is cleared.
3298 	 */
3299 	pci_write_config_word(dev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE);
3300 
3301 	rc = pci_dev_reset(dev, 0);
3302 
3303 	pci_restore_state(dev);
3304 
3305 	return rc;
3306 }
3307 EXPORT_SYMBOL_GPL(pci_reset_function);
3308 
3309 /**
3310  * pcix_get_max_mmrbc - get PCI-X maximum designed memory read byte count
3311  * @dev: PCI device to query
3312  *
3313  * Returns mmrbc: maximum designed memory read count in bytes
3314  *    or appropriate error value.
3315  */
3316 int pcix_get_max_mmrbc(struct pci_dev *dev)
3317 {
3318 	int cap;
3319 	u32 stat;
3320 
3321 	cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
3322 	if (!cap)
3323 		return -EINVAL;
3324 
3325 	if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat))
3326 		return -EINVAL;
3327 
3328 	return 512 << ((stat & PCI_X_STATUS_MAX_READ) >> 21);
3329 }
3330 EXPORT_SYMBOL(pcix_get_max_mmrbc);
3331 
3332 /**
3333  * pcix_get_mmrbc - get PCI-X maximum memory read byte count
3334  * @dev: PCI device to query
3335  *
3336  * Returns mmrbc: maximum memory read count in bytes
3337  *    or appropriate error value.
3338  */
3339 int pcix_get_mmrbc(struct pci_dev *dev)
3340 {
3341 	int cap;
3342 	u16 cmd;
3343 
3344 	cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
3345 	if (!cap)
3346 		return -EINVAL;
3347 
3348 	if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd))
3349 		return -EINVAL;
3350 
3351 	return 512 << ((cmd & PCI_X_CMD_MAX_READ) >> 2);
3352 }
3353 EXPORT_SYMBOL(pcix_get_mmrbc);
3354 
3355 /**
3356  * pcix_set_mmrbc - set PCI-X maximum memory read byte count
3357  * @dev: PCI device to query
3358  * @mmrbc: maximum memory read count in bytes
3359  *    valid values are 512, 1024, 2048, 4096
3360  *
3361  * If possible sets maximum memory read byte count, some bridges have erratas
3362  * that prevent this.
3363  */
3364 int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc)
3365 {
3366 	int cap;
3367 	u32 stat, v, o;
3368 	u16 cmd;
3369 
3370 	if (mmrbc < 512 || mmrbc > 4096 || !is_power_of_2(mmrbc))
3371 		return -EINVAL;
3372 
3373 	v = ffs(mmrbc) - 10;
3374 
3375 	cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
3376 	if (!cap)
3377 		return -EINVAL;
3378 
3379 	if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat))
3380 		return -EINVAL;
3381 
3382 	if (v > (stat & PCI_X_STATUS_MAX_READ) >> 21)
3383 		return -E2BIG;
3384 
3385 	if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd))
3386 		return -EINVAL;
3387 
3388 	o = (cmd & PCI_X_CMD_MAX_READ) >> 2;
3389 	if (o != v) {
3390 		if (v > o && dev->bus &&
3391 		   (dev->bus->bus_flags & PCI_BUS_FLAGS_NO_MMRBC))
3392 			return -EIO;
3393 
3394 		cmd &= ~PCI_X_CMD_MAX_READ;
3395 		cmd |= v << 2;
3396 		if (pci_write_config_word(dev, cap + PCI_X_CMD, cmd))
3397 			return -EIO;
3398 	}
3399 	return 0;
3400 }
3401 EXPORT_SYMBOL(pcix_set_mmrbc);
3402 
3403 /**
3404  * pcie_get_readrq - get PCI Express read request size
3405  * @dev: PCI device to query
3406  *
3407  * Returns maximum memory read request in bytes
3408  *    or appropriate error value.
3409  */
3410 int pcie_get_readrq(struct pci_dev *dev)
3411 {
3412 	int ret, cap;
3413 	u16 ctl;
3414 
3415 	cap = pci_pcie_cap(dev);
3416 	if (!cap)
3417 		return -EINVAL;
3418 
3419 	ret = pci_read_config_word(dev, cap + PCI_EXP_DEVCTL, &ctl);
3420 	if (!ret)
3421 		ret = 128 << ((ctl & PCI_EXP_DEVCTL_READRQ) >> 12);
3422 
3423 	return ret;
3424 }
3425 EXPORT_SYMBOL(pcie_get_readrq);
3426 
3427 /**
3428  * pcie_set_readrq - set PCI Express maximum memory read request
3429  * @dev: PCI device to query
3430  * @rq: maximum memory read count in bytes
3431  *    valid values are 128, 256, 512, 1024, 2048, 4096
3432  *
3433  * If possible sets maximum memory read request in bytes
3434  */
3435 int pcie_set_readrq(struct pci_dev *dev, int rq)
3436 {
3437 	int cap, err = -EINVAL;
3438 	u16 ctl, v;
3439 
3440 	if (rq < 128 || rq > 4096 || !is_power_of_2(rq))
3441 		goto out;
3442 
3443 	cap = pci_pcie_cap(dev);
3444 	if (!cap)
3445 		goto out;
3446 
3447 	err = pci_read_config_word(dev, cap + PCI_EXP_DEVCTL, &ctl);
3448 	if (err)
3449 		goto out;
3450 	/*
3451 	 * If using the "performance" PCIe config, we clamp the
3452 	 * read rq size to the max packet size to prevent the
3453 	 * host bridge generating requests larger than we can
3454 	 * cope with
3455 	 */
3456 	if (pcie_bus_config == PCIE_BUS_PERFORMANCE) {
3457 		int mps = pcie_get_mps(dev);
3458 
3459 		if (mps < 0)
3460 			return mps;
3461 		if (mps < rq)
3462 			rq = mps;
3463 	}
3464 
3465 	v = (ffs(rq) - 8) << 12;
3466 
3467 	if ((ctl & PCI_EXP_DEVCTL_READRQ) != v) {
3468 		ctl &= ~PCI_EXP_DEVCTL_READRQ;
3469 		ctl |= v;
3470 		err = pci_write_config_word(dev, cap + PCI_EXP_DEVCTL, ctl);
3471 	}
3472 
3473 out:
3474 	return err;
3475 }
3476 EXPORT_SYMBOL(pcie_set_readrq);
3477 
3478 /**
3479  * pcie_get_mps - get PCI Express maximum payload size
3480  * @dev: PCI device to query
3481  *
3482  * Returns maximum payload size in bytes
3483  *    or appropriate error value.
3484  */
3485 int pcie_get_mps(struct pci_dev *dev)
3486 {
3487 	int ret, cap;
3488 	u16 ctl;
3489 
3490 	cap = pci_pcie_cap(dev);
3491 	if (!cap)
3492 		return -EINVAL;
3493 
3494 	ret = pci_read_config_word(dev, cap + PCI_EXP_DEVCTL, &ctl);
3495 	if (!ret)
3496 		ret = 128 << ((ctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
3497 
3498 	return ret;
3499 }
3500 
3501 /**
3502  * pcie_set_mps - set PCI Express maximum payload size
3503  * @dev: PCI device to query
3504  * @mps: maximum payload size in bytes
3505  *    valid values are 128, 256, 512, 1024, 2048, 4096
3506  *
3507  * If possible sets maximum payload size
3508  */
3509 int pcie_set_mps(struct pci_dev *dev, int mps)
3510 {
3511 	int cap, err = -EINVAL;
3512 	u16 ctl, v;
3513 
3514 	if (mps < 128 || mps > 4096 || !is_power_of_2(mps))
3515 		goto out;
3516 
3517 	v = ffs(mps) - 8;
3518 	if (v > dev->pcie_mpss)
3519 		goto out;
3520 	v <<= 5;
3521 
3522 	cap = pci_pcie_cap(dev);
3523 	if (!cap)
3524 		goto out;
3525 
3526 	err = pci_read_config_word(dev, cap + PCI_EXP_DEVCTL, &ctl);
3527 	if (err)
3528 		goto out;
3529 
3530 	if ((ctl & PCI_EXP_DEVCTL_PAYLOAD) != v) {
3531 		ctl &= ~PCI_EXP_DEVCTL_PAYLOAD;
3532 		ctl |= v;
3533 		err = pci_write_config_word(dev, cap + PCI_EXP_DEVCTL, ctl);
3534 	}
3535 out:
3536 	return err;
3537 }
3538 
3539 /**
3540  * pci_select_bars - Make BAR mask from the type of resource
3541  * @dev: the PCI device for which BAR mask is made
3542  * @flags: resource type mask to be selected
3543  *
3544  * This helper routine makes bar mask from the type of resource.
3545  */
3546 int pci_select_bars(struct pci_dev *dev, unsigned long flags)
3547 {
3548 	int i, bars = 0;
3549 	for (i = 0; i < PCI_NUM_RESOURCES; i++)
3550 		if (pci_resource_flags(dev, i) & flags)
3551 			bars |= (1 << i);
3552 	return bars;
3553 }
3554 
3555 /**
3556  * pci_resource_bar - get position of the BAR associated with a resource
3557  * @dev: the PCI device
3558  * @resno: the resource number
3559  * @type: the BAR type to be filled in
3560  *
3561  * Returns BAR position in config space, or 0 if the BAR is invalid.
3562  */
3563 int pci_resource_bar(struct pci_dev *dev, int resno, enum pci_bar_type *type)
3564 {
3565 	int reg;
3566 
3567 	if (resno < PCI_ROM_RESOURCE) {
3568 		*type = pci_bar_unknown;
3569 		return PCI_BASE_ADDRESS_0 + 4 * resno;
3570 	} else if (resno == PCI_ROM_RESOURCE) {
3571 		*type = pci_bar_mem32;
3572 		return dev->rom_base_reg;
3573 	} else if (resno < PCI_BRIDGE_RESOURCES) {
3574 		/* device specific resource */
3575 		reg = pci_iov_resource_bar(dev, resno, type);
3576 		if (reg)
3577 			return reg;
3578 	}
3579 
3580 	dev_err(&dev->dev, "BAR %d: invalid resource\n", resno);
3581 	return 0;
3582 }
3583 
3584 /* Some architectures require additional programming to enable VGA */
3585 static arch_set_vga_state_t arch_set_vga_state;
3586 
3587 void __init pci_register_set_vga_state(arch_set_vga_state_t func)
3588 {
3589 	arch_set_vga_state = func;	/* NULL disables */
3590 }
3591 
3592 static int pci_set_vga_state_arch(struct pci_dev *dev, bool decode,
3593 		      unsigned int command_bits, u32 flags)
3594 {
3595 	if (arch_set_vga_state)
3596 		return arch_set_vga_state(dev, decode, command_bits,
3597 						flags);
3598 	return 0;
3599 }
3600 
3601 /**
3602  * pci_set_vga_state - set VGA decode state on device and parents if requested
3603  * @dev: the PCI device
3604  * @decode: true = enable decoding, false = disable decoding
3605  * @command_bits: PCI_COMMAND_IO and/or PCI_COMMAND_MEMORY
3606  * @flags: traverse ancestors and change bridges
3607  * CHANGE_BRIDGE_ONLY / CHANGE_BRIDGE
3608  */
3609 int pci_set_vga_state(struct pci_dev *dev, bool decode,
3610 		      unsigned int command_bits, u32 flags)
3611 {
3612 	struct pci_bus *bus;
3613 	struct pci_dev *bridge;
3614 	u16 cmd;
3615 	int rc;
3616 
3617 	WARN_ON((flags & PCI_VGA_STATE_CHANGE_DECODES) & (command_bits & ~(PCI_COMMAND_IO|PCI_COMMAND_MEMORY)));
3618 
3619 	/* ARCH specific VGA enables */
3620 	rc = pci_set_vga_state_arch(dev, decode, command_bits, flags);
3621 	if (rc)
3622 		return rc;
3623 
3624 	if (flags & PCI_VGA_STATE_CHANGE_DECODES) {
3625 		pci_read_config_word(dev, PCI_COMMAND, &cmd);
3626 		if (decode == true)
3627 			cmd |= command_bits;
3628 		else
3629 			cmd &= ~command_bits;
3630 		pci_write_config_word(dev, PCI_COMMAND, cmd);
3631 	}
3632 
3633 	if (!(flags & PCI_VGA_STATE_CHANGE_BRIDGE))
3634 		return 0;
3635 
3636 	bus = dev->bus;
3637 	while (bus) {
3638 		bridge = bus->self;
3639 		if (bridge) {
3640 			pci_read_config_word(bridge, PCI_BRIDGE_CONTROL,
3641 					     &cmd);
3642 			if (decode == true)
3643 				cmd |= PCI_BRIDGE_CTL_VGA;
3644 			else
3645 				cmd &= ~PCI_BRIDGE_CTL_VGA;
3646 			pci_write_config_word(bridge, PCI_BRIDGE_CONTROL,
3647 					      cmd);
3648 		}
3649 		bus = bus->parent;
3650 	}
3651 	return 0;
3652 }
3653 
3654 #define RESOURCE_ALIGNMENT_PARAM_SIZE COMMAND_LINE_SIZE
3655 static char resource_alignment_param[RESOURCE_ALIGNMENT_PARAM_SIZE] = {0};
3656 static DEFINE_SPINLOCK(resource_alignment_lock);
3657 
3658 /**
3659  * pci_specified_resource_alignment - get resource alignment specified by user.
3660  * @dev: the PCI device to get
3661  *
3662  * RETURNS: Resource alignment if it is specified.
3663  *          Zero if it is not specified.
3664  */
3665 resource_size_t pci_specified_resource_alignment(struct pci_dev *dev)
3666 {
3667 	int seg, bus, slot, func, align_order, count;
3668 	resource_size_t align = 0;
3669 	char *p;
3670 
3671 	spin_lock(&resource_alignment_lock);
3672 	p = resource_alignment_param;
3673 	while (*p) {
3674 		count = 0;
3675 		if (sscanf(p, "%d%n", &align_order, &count) == 1 &&
3676 							p[count] == '@') {
3677 			p += count + 1;
3678 		} else {
3679 			align_order = -1;
3680 		}
3681 		if (sscanf(p, "%x:%x:%x.%x%n",
3682 			&seg, &bus, &slot, &func, &count) != 4) {
3683 			seg = 0;
3684 			if (sscanf(p, "%x:%x.%x%n",
3685 					&bus, &slot, &func, &count) != 3) {
3686 				/* Invalid format */
3687 				printk(KERN_ERR "PCI: Can't parse resource_alignment parameter: %s\n",
3688 					p);
3689 				break;
3690 			}
3691 		}
3692 		p += count;
3693 		if (seg == pci_domain_nr(dev->bus) &&
3694 			bus == dev->bus->number &&
3695 			slot == PCI_SLOT(dev->devfn) &&
3696 			func == PCI_FUNC(dev->devfn)) {
3697 			if (align_order == -1) {
3698 				align = PAGE_SIZE;
3699 			} else {
3700 				align = 1 << align_order;
3701 			}
3702 			/* Found */
3703 			break;
3704 		}
3705 		if (*p != ';' && *p != ',') {
3706 			/* End of param or invalid format */
3707 			break;
3708 		}
3709 		p++;
3710 	}
3711 	spin_unlock(&resource_alignment_lock);
3712 	return align;
3713 }
3714 
3715 /**
3716  * pci_is_reassigndev - check if specified PCI is target device to reassign
3717  * @dev: the PCI device to check
3718  *
3719  * RETURNS: non-zero for PCI device is a target device to reassign,
3720  *          or zero is not.
3721  */
3722 int pci_is_reassigndev(struct pci_dev *dev)
3723 {
3724 	return (pci_specified_resource_alignment(dev) != 0);
3725 }
3726 
3727 /*
3728  * This function disables memory decoding and releases memory resources
3729  * of the device specified by kernel's boot parameter 'pci=resource_alignment='.
3730  * It also rounds up size to specified alignment.
3731  * Later on, the kernel will assign page-aligned memory resource back
3732  * to the device.
3733  */
3734 void pci_reassigndev_resource_alignment(struct pci_dev *dev)
3735 {
3736 	int i;
3737 	struct resource *r;
3738 	resource_size_t align, size;
3739 	u16 command;
3740 
3741 	if (!pci_is_reassigndev(dev))
3742 		return;
3743 
3744 	if (dev->hdr_type == PCI_HEADER_TYPE_NORMAL &&
3745 	    (dev->class >> 8) == PCI_CLASS_BRIDGE_HOST) {
3746 		dev_warn(&dev->dev,
3747 			"Can't reassign resources to host bridge.\n");
3748 		return;
3749 	}
3750 
3751 	dev_info(&dev->dev,
3752 		"Disabling memory decoding and releasing memory resources.\n");
3753 	pci_read_config_word(dev, PCI_COMMAND, &command);
3754 	command &= ~PCI_COMMAND_MEMORY;
3755 	pci_write_config_word(dev, PCI_COMMAND, command);
3756 
3757 	align = pci_specified_resource_alignment(dev);
3758 	for (i = 0; i < PCI_BRIDGE_RESOURCES; i++) {
3759 		r = &dev->resource[i];
3760 		if (!(r->flags & IORESOURCE_MEM))
3761 			continue;
3762 		size = resource_size(r);
3763 		if (size < align) {
3764 			size = align;
3765 			dev_info(&dev->dev,
3766 				"Rounding up size of resource #%d to %#llx.\n",
3767 				i, (unsigned long long)size);
3768 		}
3769 		r->end = size - 1;
3770 		r->start = 0;
3771 	}
3772 	/* Need to disable bridge's resource window,
3773 	 * to enable the kernel to reassign new resource
3774 	 * window later on.
3775 	 */
3776 	if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE &&
3777 	    (dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) {
3778 		for (i = PCI_BRIDGE_RESOURCES; i < PCI_NUM_RESOURCES; i++) {
3779 			r = &dev->resource[i];
3780 			if (!(r->flags & IORESOURCE_MEM))
3781 				continue;
3782 			r->end = resource_size(r) - 1;
3783 			r->start = 0;
3784 		}
3785 		pci_disable_bridge_window(dev);
3786 	}
3787 }
3788 
3789 ssize_t pci_set_resource_alignment_param(const char *buf, size_t count)
3790 {
3791 	if (count > RESOURCE_ALIGNMENT_PARAM_SIZE - 1)
3792 		count = RESOURCE_ALIGNMENT_PARAM_SIZE - 1;
3793 	spin_lock(&resource_alignment_lock);
3794 	strncpy(resource_alignment_param, buf, count);
3795 	resource_alignment_param[count] = '\0';
3796 	spin_unlock(&resource_alignment_lock);
3797 	return count;
3798 }
3799 
3800 ssize_t pci_get_resource_alignment_param(char *buf, size_t size)
3801 {
3802 	size_t count;
3803 	spin_lock(&resource_alignment_lock);
3804 	count = snprintf(buf, size, "%s", resource_alignment_param);
3805 	spin_unlock(&resource_alignment_lock);
3806 	return count;
3807 }
3808 
3809 static ssize_t pci_resource_alignment_show(struct bus_type *bus, char *buf)
3810 {
3811 	return pci_get_resource_alignment_param(buf, PAGE_SIZE);
3812 }
3813 
3814 static ssize_t pci_resource_alignment_store(struct bus_type *bus,
3815 					const char *buf, size_t count)
3816 {
3817 	return pci_set_resource_alignment_param(buf, count);
3818 }
3819 
3820 BUS_ATTR(resource_alignment, 0644, pci_resource_alignment_show,
3821 					pci_resource_alignment_store);
3822 
3823 static int __init pci_resource_alignment_sysfs_init(void)
3824 {
3825 	return bus_create_file(&pci_bus_type,
3826 					&bus_attr_resource_alignment);
3827 }
3828 
3829 late_initcall(pci_resource_alignment_sysfs_init);
3830 
3831 static void __devinit pci_no_domains(void)
3832 {
3833 #ifdef CONFIG_PCI_DOMAINS
3834 	pci_domains_supported = 0;
3835 #endif
3836 }
3837 
3838 /**
3839  * pci_ext_cfg_enabled - can we access extended PCI config space?
3840  * @dev: The PCI device of the root bridge.
3841  *
3842  * Returns 1 if we can access PCI extended config space (offsets
3843  * greater than 0xff). This is the default implementation. Architecture
3844  * implementations can override this.
3845  */
3846 int __attribute__ ((weak)) pci_ext_cfg_avail(struct pci_dev *dev)
3847 {
3848 	return 1;
3849 }
3850 
3851 void __weak pci_fixup_cardbus(struct pci_bus *bus)
3852 {
3853 }
3854 EXPORT_SYMBOL(pci_fixup_cardbus);
3855 
3856 static int __init pci_setup(char *str)
3857 {
3858 	while (str) {
3859 		char *k = strchr(str, ',');
3860 		if (k)
3861 			*k++ = 0;
3862 		if (*str && (str = pcibios_setup(str)) && *str) {
3863 			if (!strcmp(str, "nomsi")) {
3864 				pci_no_msi();
3865 			} else if (!strcmp(str, "noaer")) {
3866 				pci_no_aer();
3867 			} else if (!strncmp(str, "realloc=", 8)) {
3868 				pci_realloc_get_opt(str + 8);
3869 			} else if (!strncmp(str, "realloc", 7)) {
3870 				pci_realloc_get_opt("on");
3871 			} else if (!strcmp(str, "nodomains")) {
3872 				pci_no_domains();
3873 			} else if (!strncmp(str, "noari", 5)) {
3874 				pcie_ari_disabled = true;
3875 			} else if (!strncmp(str, "cbiosize=", 9)) {
3876 				pci_cardbus_io_size = memparse(str + 9, &str);
3877 			} else if (!strncmp(str, "cbmemsize=", 10)) {
3878 				pci_cardbus_mem_size = memparse(str + 10, &str);
3879 			} else if (!strncmp(str, "resource_alignment=", 19)) {
3880 				pci_set_resource_alignment_param(str + 19,
3881 							strlen(str + 19));
3882 			} else if (!strncmp(str, "ecrc=", 5)) {
3883 				pcie_ecrc_get_policy(str + 5);
3884 			} else if (!strncmp(str, "hpiosize=", 9)) {
3885 				pci_hotplug_io_size = memparse(str + 9, &str);
3886 			} else if (!strncmp(str, "hpmemsize=", 10)) {
3887 				pci_hotplug_mem_size = memparse(str + 10, &str);
3888 			} else if (!strncmp(str, "pcie_bus_tune_off", 17)) {
3889 				pcie_bus_config = PCIE_BUS_TUNE_OFF;
3890 			} else if (!strncmp(str, "pcie_bus_safe", 13)) {
3891 				pcie_bus_config = PCIE_BUS_SAFE;
3892 			} else if (!strncmp(str, "pcie_bus_perf", 13)) {
3893 				pcie_bus_config = PCIE_BUS_PERFORMANCE;
3894 			} else if (!strncmp(str, "pcie_bus_peer2peer", 18)) {
3895 				pcie_bus_config = PCIE_BUS_PEER2PEER;
3896 			} else {
3897 				printk(KERN_ERR "PCI: Unknown option `%s'\n",
3898 						str);
3899 			}
3900 		}
3901 		str = k;
3902 	}
3903 	return 0;
3904 }
3905 early_param("pci", pci_setup);
3906 
3907 EXPORT_SYMBOL(pci_reenable_device);
3908 EXPORT_SYMBOL(pci_enable_device_io);
3909 EXPORT_SYMBOL(pci_enable_device_mem);
3910 EXPORT_SYMBOL(pci_enable_device);
3911 EXPORT_SYMBOL(pcim_enable_device);
3912 EXPORT_SYMBOL(pcim_pin_device);
3913 EXPORT_SYMBOL(pci_disable_device);
3914 EXPORT_SYMBOL(pci_find_capability);
3915 EXPORT_SYMBOL(pci_bus_find_capability);
3916 EXPORT_SYMBOL(pci_release_regions);
3917 EXPORT_SYMBOL(pci_request_regions);
3918 EXPORT_SYMBOL(pci_request_regions_exclusive);
3919 EXPORT_SYMBOL(pci_release_region);
3920 EXPORT_SYMBOL(pci_request_region);
3921 EXPORT_SYMBOL(pci_request_region_exclusive);
3922 EXPORT_SYMBOL(pci_release_selected_regions);
3923 EXPORT_SYMBOL(pci_request_selected_regions);
3924 EXPORT_SYMBOL(pci_request_selected_regions_exclusive);
3925 EXPORT_SYMBOL(pci_set_master);
3926 EXPORT_SYMBOL(pci_clear_master);
3927 EXPORT_SYMBOL(pci_set_mwi);
3928 EXPORT_SYMBOL(pci_try_set_mwi);
3929 EXPORT_SYMBOL(pci_clear_mwi);
3930 EXPORT_SYMBOL_GPL(pci_intx);
3931 EXPORT_SYMBOL(pci_assign_resource);
3932 EXPORT_SYMBOL(pci_find_parent_resource);
3933 EXPORT_SYMBOL(pci_select_bars);
3934 
3935 EXPORT_SYMBOL(pci_set_power_state);
3936 EXPORT_SYMBOL(pci_save_state);
3937 EXPORT_SYMBOL(pci_restore_state);
3938 EXPORT_SYMBOL(pci_pme_capable);
3939 EXPORT_SYMBOL(pci_pme_active);
3940 EXPORT_SYMBOL(pci_wake_from_d3);
3941 EXPORT_SYMBOL(pci_target_state);
3942 EXPORT_SYMBOL(pci_prepare_to_sleep);
3943 EXPORT_SYMBOL(pci_back_from_sleep);
3944 EXPORT_SYMBOL_GPL(pci_set_pcie_reset_state);
3945