xref: /openbmc/linux/drivers/pci/pci.c (revision 89a74ecccd1f78e51faf6287e5c0e93a92ac096e)
1 /*
2  *	PCI Bus Services, see include/linux/pci.h for further explanation.
3  *
4  *	Copyright 1993 -- 1997 Drew Eckhardt, Frederic Potter,
5  *	David Mosberger-Tang
6  *
7  *	Copyright 1997 -- 2000 Martin Mares <mj@ucw.cz>
8  */
9 
10 #include <linux/kernel.h>
11 #include <linux/delay.h>
12 #include <linux/init.h>
13 #include <linux/pci.h>
14 #include <linux/pm.h>
15 #include <linux/module.h>
16 #include <linux/spinlock.h>
17 #include <linux/string.h>
18 #include <linux/log2.h>
19 #include <linux/pci-aspm.h>
20 #include <linux/pm_wakeup.h>
21 #include <linux/interrupt.h>
22 #include <linux/device.h>
23 #include <linux/pm_runtime.h>
24 #include <asm/setup.h>
25 #include "pci.h"
26 
27 const char *pci_power_names[] = {
28 	"error", "D0", "D1", "D2", "D3hot", "D3cold", "unknown",
29 };
30 EXPORT_SYMBOL_GPL(pci_power_names);
31 
32 int isa_dma_bridge_buggy;
33 EXPORT_SYMBOL(isa_dma_bridge_buggy);
34 
35 int pci_pci_problems;
36 EXPORT_SYMBOL(pci_pci_problems);
37 
38 unsigned int pci_pm_d3_delay;
39 
40 static void pci_dev_d3_sleep(struct pci_dev *dev)
41 {
42 	unsigned int delay = dev->d3_delay;
43 
44 	if (delay < pci_pm_d3_delay)
45 		delay = pci_pm_d3_delay;
46 
47 	msleep(delay);
48 }
49 
50 #ifdef CONFIG_PCI_DOMAINS
51 int pci_domains_supported = 1;
52 #endif
53 
54 #define DEFAULT_CARDBUS_IO_SIZE		(256)
55 #define DEFAULT_CARDBUS_MEM_SIZE	(64*1024*1024)
56 /* pci=cbmemsize=nnM,cbiosize=nn can override this */
57 unsigned long pci_cardbus_io_size = DEFAULT_CARDBUS_IO_SIZE;
58 unsigned long pci_cardbus_mem_size = DEFAULT_CARDBUS_MEM_SIZE;
59 
60 #define DEFAULT_HOTPLUG_IO_SIZE		(256)
61 #define DEFAULT_HOTPLUG_MEM_SIZE	(2*1024*1024)
62 /* pci=hpmemsize=nnM,hpiosize=nn can override this */
63 unsigned long pci_hotplug_io_size  = DEFAULT_HOTPLUG_IO_SIZE;
64 unsigned long pci_hotplug_mem_size = DEFAULT_HOTPLUG_MEM_SIZE;
65 
66 /*
67  * The default CLS is used if arch didn't set CLS explicitly and not
68  * all pci devices agree on the same value.  Arch can override either
69  * the dfl or actual value as it sees fit.  Don't forget this is
70  * measured in 32-bit words, not bytes.
71  */
72 u8 pci_dfl_cache_line_size __devinitdata = L1_CACHE_BYTES >> 2;
73 u8 pci_cache_line_size;
74 
75 /**
76  * pci_bus_max_busnr - returns maximum PCI bus number of given bus' children
77  * @bus: pointer to PCI bus structure to search
78  *
79  * Given a PCI bus, returns the highest PCI bus number present in the set
80  * including the given PCI bus and its list of child PCI buses.
81  */
82 unsigned char pci_bus_max_busnr(struct pci_bus* bus)
83 {
84 	struct list_head *tmp;
85 	unsigned char max, n;
86 
87 	max = bus->subordinate;
88 	list_for_each(tmp, &bus->children) {
89 		n = pci_bus_max_busnr(pci_bus_b(tmp));
90 		if(n > max)
91 			max = n;
92 	}
93 	return max;
94 }
95 EXPORT_SYMBOL_GPL(pci_bus_max_busnr);
96 
97 #ifdef CONFIG_HAS_IOMEM
98 void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar)
99 {
100 	/*
101 	 * Make sure the BAR is actually a memory resource, not an IO resource
102 	 */
103 	if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM)) {
104 		WARN_ON(1);
105 		return NULL;
106 	}
107 	return ioremap_nocache(pci_resource_start(pdev, bar),
108 				     pci_resource_len(pdev, bar));
109 }
110 EXPORT_SYMBOL_GPL(pci_ioremap_bar);
111 #endif
112 
113 #if 0
114 /**
115  * pci_max_busnr - returns maximum PCI bus number
116  *
117  * Returns the highest PCI bus number present in the system global list of
118  * PCI buses.
119  */
120 unsigned char __devinit
121 pci_max_busnr(void)
122 {
123 	struct pci_bus *bus = NULL;
124 	unsigned char max, n;
125 
126 	max = 0;
127 	while ((bus = pci_find_next_bus(bus)) != NULL) {
128 		n = pci_bus_max_busnr(bus);
129 		if(n > max)
130 			max = n;
131 	}
132 	return max;
133 }
134 
135 #endif  /*  0  */
136 
137 #define PCI_FIND_CAP_TTL	48
138 
139 static int __pci_find_next_cap_ttl(struct pci_bus *bus, unsigned int devfn,
140 				   u8 pos, int cap, int *ttl)
141 {
142 	u8 id;
143 
144 	while ((*ttl)--) {
145 		pci_bus_read_config_byte(bus, devfn, pos, &pos);
146 		if (pos < 0x40)
147 			break;
148 		pos &= ~3;
149 		pci_bus_read_config_byte(bus, devfn, pos + PCI_CAP_LIST_ID,
150 					 &id);
151 		if (id == 0xff)
152 			break;
153 		if (id == cap)
154 			return pos;
155 		pos += PCI_CAP_LIST_NEXT;
156 	}
157 	return 0;
158 }
159 
160 static int __pci_find_next_cap(struct pci_bus *bus, unsigned int devfn,
161 			       u8 pos, int cap)
162 {
163 	int ttl = PCI_FIND_CAP_TTL;
164 
165 	return __pci_find_next_cap_ttl(bus, devfn, pos, cap, &ttl);
166 }
167 
168 int pci_find_next_capability(struct pci_dev *dev, u8 pos, int cap)
169 {
170 	return __pci_find_next_cap(dev->bus, dev->devfn,
171 				   pos + PCI_CAP_LIST_NEXT, cap);
172 }
173 EXPORT_SYMBOL_GPL(pci_find_next_capability);
174 
175 static int __pci_bus_find_cap_start(struct pci_bus *bus,
176 				    unsigned int devfn, u8 hdr_type)
177 {
178 	u16 status;
179 
180 	pci_bus_read_config_word(bus, devfn, PCI_STATUS, &status);
181 	if (!(status & PCI_STATUS_CAP_LIST))
182 		return 0;
183 
184 	switch (hdr_type) {
185 	case PCI_HEADER_TYPE_NORMAL:
186 	case PCI_HEADER_TYPE_BRIDGE:
187 		return PCI_CAPABILITY_LIST;
188 	case PCI_HEADER_TYPE_CARDBUS:
189 		return PCI_CB_CAPABILITY_LIST;
190 	default:
191 		return 0;
192 	}
193 
194 	return 0;
195 }
196 
197 /**
198  * pci_find_capability - query for devices' capabilities
199  * @dev: PCI device to query
200  * @cap: capability code
201  *
202  * Tell if a device supports a given PCI capability.
203  * Returns the address of the requested capability structure within the
204  * device's PCI configuration space or 0 in case the device does not
205  * support it.  Possible values for @cap:
206  *
207  *  %PCI_CAP_ID_PM           Power Management
208  *  %PCI_CAP_ID_AGP          Accelerated Graphics Port
209  *  %PCI_CAP_ID_VPD          Vital Product Data
210  *  %PCI_CAP_ID_SLOTID       Slot Identification
211  *  %PCI_CAP_ID_MSI          Message Signalled Interrupts
212  *  %PCI_CAP_ID_CHSWP        CompactPCI HotSwap
213  *  %PCI_CAP_ID_PCIX         PCI-X
214  *  %PCI_CAP_ID_EXP          PCI Express
215  */
216 int pci_find_capability(struct pci_dev *dev, int cap)
217 {
218 	int pos;
219 
220 	pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type);
221 	if (pos)
222 		pos = __pci_find_next_cap(dev->bus, dev->devfn, pos, cap);
223 
224 	return pos;
225 }
226 
227 /**
228  * pci_bus_find_capability - query for devices' capabilities
229  * @bus:   the PCI bus to query
230  * @devfn: PCI device to query
231  * @cap:   capability code
232  *
233  * Like pci_find_capability() but works for pci devices that do not have a
234  * pci_dev structure set up yet.
235  *
236  * Returns the address of the requested capability structure within the
237  * device's PCI configuration space or 0 in case the device does not
238  * support it.
239  */
240 int pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap)
241 {
242 	int pos;
243 	u8 hdr_type;
244 
245 	pci_bus_read_config_byte(bus, devfn, PCI_HEADER_TYPE, &hdr_type);
246 
247 	pos = __pci_bus_find_cap_start(bus, devfn, hdr_type & 0x7f);
248 	if (pos)
249 		pos = __pci_find_next_cap(bus, devfn, pos, cap);
250 
251 	return pos;
252 }
253 
254 /**
255  * pci_find_ext_capability - Find an extended capability
256  * @dev: PCI device to query
257  * @cap: capability code
258  *
259  * Returns the address of the requested extended capability structure
260  * within the device's PCI configuration space or 0 if the device does
261  * not support it.  Possible values for @cap:
262  *
263  *  %PCI_EXT_CAP_ID_ERR		Advanced Error Reporting
264  *  %PCI_EXT_CAP_ID_VC		Virtual Channel
265  *  %PCI_EXT_CAP_ID_DSN		Device Serial Number
266  *  %PCI_EXT_CAP_ID_PWR		Power Budgeting
267  */
268 int pci_find_ext_capability(struct pci_dev *dev, int cap)
269 {
270 	u32 header;
271 	int ttl;
272 	int pos = PCI_CFG_SPACE_SIZE;
273 
274 	/* minimum 8 bytes per capability */
275 	ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8;
276 
277 	if (dev->cfg_size <= PCI_CFG_SPACE_SIZE)
278 		return 0;
279 
280 	if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
281 		return 0;
282 
283 	/*
284 	 * If we have no capabilities, this is indicated by cap ID,
285 	 * cap version and next pointer all being 0.
286 	 */
287 	if (header == 0)
288 		return 0;
289 
290 	while (ttl-- > 0) {
291 		if (PCI_EXT_CAP_ID(header) == cap)
292 			return pos;
293 
294 		pos = PCI_EXT_CAP_NEXT(header);
295 		if (pos < PCI_CFG_SPACE_SIZE)
296 			break;
297 
298 		if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
299 			break;
300 	}
301 
302 	return 0;
303 }
304 EXPORT_SYMBOL_GPL(pci_find_ext_capability);
305 
306 static int __pci_find_next_ht_cap(struct pci_dev *dev, int pos, int ht_cap)
307 {
308 	int rc, ttl = PCI_FIND_CAP_TTL;
309 	u8 cap, mask;
310 
311 	if (ht_cap == HT_CAPTYPE_SLAVE || ht_cap == HT_CAPTYPE_HOST)
312 		mask = HT_3BIT_CAP_MASK;
313 	else
314 		mask = HT_5BIT_CAP_MASK;
315 
316 	pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn, pos,
317 				      PCI_CAP_ID_HT, &ttl);
318 	while (pos) {
319 		rc = pci_read_config_byte(dev, pos + 3, &cap);
320 		if (rc != PCIBIOS_SUCCESSFUL)
321 			return 0;
322 
323 		if ((cap & mask) == ht_cap)
324 			return pos;
325 
326 		pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn,
327 					      pos + PCI_CAP_LIST_NEXT,
328 					      PCI_CAP_ID_HT, &ttl);
329 	}
330 
331 	return 0;
332 }
333 /**
334  * pci_find_next_ht_capability - query a device's Hypertransport capabilities
335  * @dev: PCI device to query
336  * @pos: Position from which to continue searching
337  * @ht_cap: Hypertransport capability code
338  *
339  * To be used in conjunction with pci_find_ht_capability() to search for
340  * all capabilities matching @ht_cap. @pos should always be a value returned
341  * from pci_find_ht_capability().
342  *
343  * NB. To be 100% safe against broken PCI devices, the caller should take
344  * steps to avoid an infinite loop.
345  */
346 int pci_find_next_ht_capability(struct pci_dev *dev, int pos, int ht_cap)
347 {
348 	return __pci_find_next_ht_cap(dev, pos + PCI_CAP_LIST_NEXT, ht_cap);
349 }
350 EXPORT_SYMBOL_GPL(pci_find_next_ht_capability);
351 
352 /**
353  * pci_find_ht_capability - query a device's Hypertransport capabilities
354  * @dev: PCI device to query
355  * @ht_cap: Hypertransport capability code
356  *
357  * Tell if a device supports a given Hypertransport capability.
358  * Returns an address within the device's PCI configuration space
359  * or 0 in case the device does not support the request capability.
360  * The address points to the PCI capability, of type PCI_CAP_ID_HT,
361  * which has a Hypertransport capability matching @ht_cap.
362  */
363 int pci_find_ht_capability(struct pci_dev *dev, int ht_cap)
364 {
365 	int pos;
366 
367 	pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type);
368 	if (pos)
369 		pos = __pci_find_next_ht_cap(dev, pos, ht_cap);
370 
371 	return pos;
372 }
373 EXPORT_SYMBOL_GPL(pci_find_ht_capability);
374 
375 /**
376  * pci_find_parent_resource - return resource region of parent bus of given region
377  * @dev: PCI device structure contains resources to be searched
378  * @res: child resource record for which parent is sought
379  *
380  *  For given resource region of given device, return the resource
381  *  region of parent bus the given region is contained in or where
382  *  it should be allocated from.
383  */
384 struct resource *
385 pci_find_parent_resource(const struct pci_dev *dev, struct resource *res)
386 {
387 	const struct pci_bus *bus = dev->bus;
388 	int i;
389 	struct resource *best = NULL, *r;
390 
391 	pci_bus_for_each_resource(bus, r, i) {
392 		if (!r)
393 			continue;
394 		if (res->start && !(res->start >= r->start && res->end <= r->end))
395 			continue;	/* Not contained */
396 		if ((res->flags ^ r->flags) & (IORESOURCE_IO | IORESOURCE_MEM))
397 			continue;	/* Wrong type */
398 		if (!((res->flags ^ r->flags) & IORESOURCE_PREFETCH))
399 			return r;	/* Exact match */
400 		/* We can't insert a non-prefetch resource inside a prefetchable parent .. */
401 		if (r->flags & IORESOURCE_PREFETCH)
402 			continue;
403 		/* .. but we can put a prefetchable resource inside a non-prefetchable one */
404 		if (!best)
405 			best = r;
406 	}
407 	return best;
408 }
409 
410 /**
411  * pci_restore_bars - restore a devices BAR values (e.g. after wake-up)
412  * @dev: PCI device to have its BARs restored
413  *
414  * Restore the BAR values for a given device, so as to make it
415  * accessible by its driver.
416  */
417 static void
418 pci_restore_bars(struct pci_dev *dev)
419 {
420 	int i;
421 
422 	for (i = 0; i < PCI_BRIDGE_RESOURCES; i++)
423 		pci_update_resource(dev, i);
424 }
425 
426 static struct pci_platform_pm_ops *pci_platform_pm;
427 
428 int pci_set_platform_pm(struct pci_platform_pm_ops *ops)
429 {
430 	if (!ops->is_manageable || !ops->set_state || !ops->choose_state
431 	    || !ops->sleep_wake || !ops->can_wakeup)
432 		return -EINVAL;
433 	pci_platform_pm = ops;
434 	return 0;
435 }
436 
437 static inline bool platform_pci_power_manageable(struct pci_dev *dev)
438 {
439 	return pci_platform_pm ? pci_platform_pm->is_manageable(dev) : false;
440 }
441 
442 static inline int platform_pci_set_power_state(struct pci_dev *dev,
443                                                 pci_power_t t)
444 {
445 	return pci_platform_pm ? pci_platform_pm->set_state(dev, t) : -ENOSYS;
446 }
447 
448 static inline pci_power_t platform_pci_choose_state(struct pci_dev *dev)
449 {
450 	return pci_platform_pm ?
451 			pci_platform_pm->choose_state(dev) : PCI_POWER_ERROR;
452 }
453 
454 static inline bool platform_pci_can_wakeup(struct pci_dev *dev)
455 {
456 	return pci_platform_pm ? pci_platform_pm->can_wakeup(dev) : false;
457 }
458 
459 static inline int platform_pci_sleep_wake(struct pci_dev *dev, bool enable)
460 {
461 	return pci_platform_pm ?
462 			pci_platform_pm->sleep_wake(dev, enable) : -ENODEV;
463 }
464 
465 static inline int platform_pci_run_wake(struct pci_dev *dev, bool enable)
466 {
467 	return pci_platform_pm ?
468 			pci_platform_pm->run_wake(dev, enable) : -ENODEV;
469 }
470 
471 /**
472  * pci_raw_set_power_state - Use PCI PM registers to set the power state of
473  *                           given PCI device
474  * @dev: PCI device to handle.
475  * @state: PCI power state (D0, D1, D2, D3hot) to put the device into.
476  *
477  * RETURN VALUE:
478  * -EINVAL if the requested state is invalid.
479  * -EIO if device does not support PCI PM or its PM capabilities register has a
480  * wrong version, or device doesn't support the requested state.
481  * 0 if device already is in the requested state.
482  * 0 if device's power state has been successfully changed.
483  */
484 static int pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state)
485 {
486 	u16 pmcsr;
487 	bool need_restore = false;
488 
489 	/* Check if we're already there */
490 	if (dev->current_state == state)
491 		return 0;
492 
493 	if (!dev->pm_cap)
494 		return -EIO;
495 
496 	if (state < PCI_D0 || state > PCI_D3hot)
497 		return -EINVAL;
498 
499 	/* Validate current state:
500 	 * Can enter D0 from any state, but if we can only go deeper
501 	 * to sleep if we're already in a low power state
502 	 */
503 	if (state != PCI_D0 && dev->current_state <= PCI_D3cold
504 	    && dev->current_state > state) {
505 		dev_err(&dev->dev, "invalid power transition "
506 			"(from state %d to %d)\n", dev->current_state, state);
507 		return -EINVAL;
508 	}
509 
510 	/* check if this device supports the desired state */
511 	if ((state == PCI_D1 && !dev->d1_support)
512 	   || (state == PCI_D2 && !dev->d2_support))
513 		return -EIO;
514 
515 	pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
516 
517 	/* If we're (effectively) in D3, force entire word to 0.
518 	 * This doesn't affect PME_Status, disables PME_En, and
519 	 * sets PowerState to 0.
520 	 */
521 	switch (dev->current_state) {
522 	case PCI_D0:
523 	case PCI_D1:
524 	case PCI_D2:
525 		pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
526 		pmcsr |= state;
527 		break;
528 	case PCI_D3hot:
529 	case PCI_D3cold:
530 	case PCI_UNKNOWN: /* Boot-up */
531 		if ((pmcsr & PCI_PM_CTRL_STATE_MASK) == PCI_D3hot
532 		 && !(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET))
533 			need_restore = true;
534 		/* Fall-through: force to D0 */
535 	default:
536 		pmcsr = 0;
537 		break;
538 	}
539 
540 	/* enter specified state */
541 	pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
542 
543 	/* Mandatory power management transition delays */
544 	/* see PCI PM 1.1 5.6.1 table 18 */
545 	if (state == PCI_D3hot || dev->current_state == PCI_D3hot)
546 		pci_dev_d3_sleep(dev);
547 	else if (state == PCI_D2 || dev->current_state == PCI_D2)
548 		udelay(PCI_PM_D2_DELAY);
549 
550 	pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
551 	dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
552 	if (dev->current_state != state && printk_ratelimit())
553 		dev_info(&dev->dev, "Refused to change power state, "
554 			"currently in D%d\n", dev->current_state);
555 
556 	/* According to section 5.4.1 of the "PCI BUS POWER MANAGEMENT
557 	 * INTERFACE SPECIFICATION, REV. 1.2", a device transitioning
558 	 * from D3hot to D0 _may_ perform an internal reset, thereby
559 	 * going to "D0 Uninitialized" rather than "D0 Initialized".
560 	 * For example, at least some versions of the 3c905B and the
561 	 * 3c556B exhibit this behaviour.
562 	 *
563 	 * At least some laptop BIOSen (e.g. the Thinkpad T21) leave
564 	 * devices in a D3hot state at boot.  Consequently, we need to
565 	 * restore at least the BARs so that the device will be
566 	 * accessible to its driver.
567 	 */
568 	if (need_restore)
569 		pci_restore_bars(dev);
570 
571 	if (dev->bus->self)
572 		pcie_aspm_pm_state_change(dev->bus->self);
573 
574 	return 0;
575 }
576 
577 /**
578  * pci_update_current_state - Read PCI power state of given device from its
579  *                            PCI PM registers and cache it
580  * @dev: PCI device to handle.
581  * @state: State to cache in case the device doesn't have the PM capability
582  */
583 void pci_update_current_state(struct pci_dev *dev, pci_power_t state)
584 {
585 	if (dev->pm_cap) {
586 		u16 pmcsr;
587 
588 		pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
589 		dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
590 	} else {
591 		dev->current_state = state;
592 	}
593 }
594 
595 /**
596  * pci_platform_power_transition - Use platform to change device power state
597  * @dev: PCI device to handle.
598  * @state: State to put the device into.
599  */
600 static int pci_platform_power_transition(struct pci_dev *dev, pci_power_t state)
601 {
602 	int error;
603 
604 	if (platform_pci_power_manageable(dev)) {
605 		error = platform_pci_set_power_state(dev, state);
606 		if (!error)
607 			pci_update_current_state(dev, state);
608 	} else {
609 		error = -ENODEV;
610 		/* Fall back to PCI_D0 if native PM is not supported */
611 		if (!dev->pm_cap)
612 			dev->current_state = PCI_D0;
613 	}
614 
615 	return error;
616 }
617 
618 /**
619  * __pci_start_power_transition - Start power transition of a PCI device
620  * @dev: PCI device to handle.
621  * @state: State to put the device into.
622  */
623 static void __pci_start_power_transition(struct pci_dev *dev, pci_power_t state)
624 {
625 	if (state == PCI_D0)
626 		pci_platform_power_transition(dev, PCI_D0);
627 }
628 
629 /**
630  * __pci_complete_power_transition - Complete power transition of a PCI device
631  * @dev: PCI device to handle.
632  * @state: State to put the device into.
633  *
634  * This function should not be called directly by device drivers.
635  */
636 int __pci_complete_power_transition(struct pci_dev *dev, pci_power_t state)
637 {
638 	return state > PCI_D0 ?
639 			pci_platform_power_transition(dev, state) : -EINVAL;
640 }
641 EXPORT_SYMBOL_GPL(__pci_complete_power_transition);
642 
643 /**
644  * pci_set_power_state - Set the power state of a PCI device
645  * @dev: PCI device to handle.
646  * @state: PCI power state (D0, D1, D2, D3hot) to put the device into.
647  *
648  * Transition a device to a new power state, using the platform firmware and/or
649  * the device's PCI PM registers.
650  *
651  * RETURN VALUE:
652  * -EINVAL if the requested state is invalid.
653  * -EIO if device does not support PCI PM or its PM capabilities register has a
654  * wrong version, or device doesn't support the requested state.
655  * 0 if device already is in the requested state.
656  * 0 if device's power state has been successfully changed.
657  */
658 int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
659 {
660 	int error;
661 
662 	/* bound the state we're entering */
663 	if (state > PCI_D3hot)
664 		state = PCI_D3hot;
665 	else if (state < PCI_D0)
666 		state = PCI_D0;
667 	else if ((state == PCI_D1 || state == PCI_D2) && pci_no_d1d2(dev))
668 		/*
669 		 * If the device or the parent bridge do not support PCI PM,
670 		 * ignore the request if we're doing anything other than putting
671 		 * it into D0 (which would only happen on boot).
672 		 */
673 		return 0;
674 
675 	/* Check if we're already there */
676 	if (dev->current_state == state)
677 		return 0;
678 
679 	__pci_start_power_transition(dev, state);
680 
681 	/* This device is quirked not to be put into D3, so
682 	   don't put it in D3 */
683 	if (state == PCI_D3hot && (dev->dev_flags & PCI_DEV_FLAGS_NO_D3))
684 		return 0;
685 
686 	error = pci_raw_set_power_state(dev, state);
687 
688 	if (!__pci_complete_power_transition(dev, state))
689 		error = 0;
690 
691 	return error;
692 }
693 
694 /**
695  * pci_choose_state - Choose the power state of a PCI device
696  * @dev: PCI device to be suspended
697  * @state: target sleep state for the whole system. This is the value
698  *	that is passed to suspend() function.
699  *
700  * Returns PCI power state suitable for given device and given system
701  * message.
702  */
703 
704 pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state)
705 {
706 	pci_power_t ret;
707 
708 	if (!pci_find_capability(dev, PCI_CAP_ID_PM))
709 		return PCI_D0;
710 
711 	ret = platform_pci_choose_state(dev);
712 	if (ret != PCI_POWER_ERROR)
713 		return ret;
714 
715 	switch (state.event) {
716 	case PM_EVENT_ON:
717 		return PCI_D0;
718 	case PM_EVENT_FREEZE:
719 	case PM_EVENT_PRETHAW:
720 		/* REVISIT both freeze and pre-thaw "should" use D0 */
721 	case PM_EVENT_SUSPEND:
722 	case PM_EVENT_HIBERNATE:
723 		return PCI_D3hot;
724 	default:
725 		dev_info(&dev->dev, "unrecognized suspend event %d\n",
726 			 state.event);
727 		BUG();
728 	}
729 	return PCI_D0;
730 }
731 
732 EXPORT_SYMBOL(pci_choose_state);
733 
734 #define PCI_EXP_SAVE_REGS	7
735 
736 #define pcie_cap_has_devctl(type, flags)	1
737 #define pcie_cap_has_lnkctl(type, flags)		\
738 		((flags & PCI_EXP_FLAGS_VERS) > 1 ||	\
739 		 (type == PCI_EXP_TYPE_ROOT_PORT ||	\
740 		  type == PCI_EXP_TYPE_ENDPOINT ||	\
741 		  type == PCI_EXP_TYPE_LEG_END))
742 #define pcie_cap_has_sltctl(type, flags)		\
743 		((flags & PCI_EXP_FLAGS_VERS) > 1 ||	\
744 		 ((type == PCI_EXP_TYPE_ROOT_PORT) ||	\
745 		  (type == PCI_EXP_TYPE_DOWNSTREAM &&	\
746 		   (flags & PCI_EXP_FLAGS_SLOT))))
747 #define pcie_cap_has_rtctl(type, flags)			\
748 		((flags & PCI_EXP_FLAGS_VERS) > 1 ||	\
749 		 (type == PCI_EXP_TYPE_ROOT_PORT ||	\
750 		  type == PCI_EXP_TYPE_RC_EC))
751 #define pcie_cap_has_devctl2(type, flags)		\
752 		((flags & PCI_EXP_FLAGS_VERS) > 1)
753 #define pcie_cap_has_lnkctl2(type, flags)		\
754 		((flags & PCI_EXP_FLAGS_VERS) > 1)
755 #define pcie_cap_has_sltctl2(type, flags)		\
756 		((flags & PCI_EXP_FLAGS_VERS) > 1)
757 
758 static int pci_save_pcie_state(struct pci_dev *dev)
759 {
760 	int pos, i = 0;
761 	struct pci_cap_saved_state *save_state;
762 	u16 *cap;
763 	u16 flags;
764 
765 	pos = pci_pcie_cap(dev);
766 	if (!pos)
767 		return 0;
768 
769 	save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
770 	if (!save_state) {
771 		dev_err(&dev->dev, "buffer not found in %s\n", __func__);
772 		return -ENOMEM;
773 	}
774 	cap = (u16 *)&save_state->data[0];
775 
776 	pci_read_config_word(dev, pos + PCI_EXP_FLAGS, &flags);
777 
778 	if (pcie_cap_has_devctl(dev->pcie_type, flags))
779 		pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, &cap[i++]);
780 	if (pcie_cap_has_lnkctl(dev->pcie_type, flags))
781 		pci_read_config_word(dev, pos + PCI_EXP_LNKCTL, &cap[i++]);
782 	if (pcie_cap_has_sltctl(dev->pcie_type, flags))
783 		pci_read_config_word(dev, pos + PCI_EXP_SLTCTL, &cap[i++]);
784 	if (pcie_cap_has_rtctl(dev->pcie_type, flags))
785 		pci_read_config_word(dev, pos + PCI_EXP_RTCTL, &cap[i++]);
786 	if (pcie_cap_has_devctl2(dev->pcie_type, flags))
787 		pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &cap[i++]);
788 	if (pcie_cap_has_lnkctl2(dev->pcie_type, flags))
789 		pci_read_config_word(dev, pos + PCI_EXP_LNKCTL2, &cap[i++]);
790 	if (pcie_cap_has_sltctl2(dev->pcie_type, flags))
791 		pci_read_config_word(dev, pos + PCI_EXP_SLTCTL2, &cap[i++]);
792 
793 	return 0;
794 }
795 
796 static void pci_restore_pcie_state(struct pci_dev *dev)
797 {
798 	int i = 0, pos;
799 	struct pci_cap_saved_state *save_state;
800 	u16 *cap;
801 	u16 flags;
802 
803 	save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
804 	pos = pci_find_capability(dev, PCI_CAP_ID_EXP);
805 	if (!save_state || pos <= 0)
806 		return;
807 	cap = (u16 *)&save_state->data[0];
808 
809 	pci_read_config_word(dev, pos + PCI_EXP_FLAGS, &flags);
810 
811 	if (pcie_cap_has_devctl(dev->pcie_type, flags))
812 		pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, cap[i++]);
813 	if (pcie_cap_has_lnkctl(dev->pcie_type, flags))
814 		pci_write_config_word(dev, pos + PCI_EXP_LNKCTL, cap[i++]);
815 	if (pcie_cap_has_sltctl(dev->pcie_type, flags))
816 		pci_write_config_word(dev, pos + PCI_EXP_SLTCTL, cap[i++]);
817 	if (pcie_cap_has_rtctl(dev->pcie_type, flags))
818 		pci_write_config_word(dev, pos + PCI_EXP_RTCTL, cap[i++]);
819 	if (pcie_cap_has_devctl2(dev->pcie_type, flags))
820 		pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, cap[i++]);
821 	if (pcie_cap_has_lnkctl2(dev->pcie_type, flags))
822 		pci_write_config_word(dev, pos + PCI_EXP_LNKCTL2, cap[i++]);
823 	if (pcie_cap_has_sltctl2(dev->pcie_type, flags))
824 		pci_write_config_word(dev, pos + PCI_EXP_SLTCTL2, cap[i++]);
825 }
826 
827 
828 static int pci_save_pcix_state(struct pci_dev *dev)
829 {
830 	int pos;
831 	struct pci_cap_saved_state *save_state;
832 
833 	pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
834 	if (pos <= 0)
835 		return 0;
836 
837 	save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
838 	if (!save_state) {
839 		dev_err(&dev->dev, "buffer not found in %s\n", __func__);
840 		return -ENOMEM;
841 	}
842 
843 	pci_read_config_word(dev, pos + PCI_X_CMD, (u16 *)save_state->data);
844 
845 	return 0;
846 }
847 
848 static void pci_restore_pcix_state(struct pci_dev *dev)
849 {
850 	int i = 0, pos;
851 	struct pci_cap_saved_state *save_state;
852 	u16 *cap;
853 
854 	save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
855 	pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
856 	if (!save_state || pos <= 0)
857 		return;
858 	cap = (u16 *)&save_state->data[0];
859 
860 	pci_write_config_word(dev, pos + PCI_X_CMD, cap[i++]);
861 }
862 
863 
864 /**
865  * pci_save_state - save the PCI configuration space of a device before suspending
866  * @dev: - PCI device that we're dealing with
867  */
868 int
869 pci_save_state(struct pci_dev *dev)
870 {
871 	int i;
872 	/* XXX: 100% dword access ok here? */
873 	for (i = 0; i < 16; i++)
874 		pci_read_config_dword(dev, i * 4, &dev->saved_config_space[i]);
875 	dev->state_saved = true;
876 	if ((i = pci_save_pcie_state(dev)) != 0)
877 		return i;
878 	if ((i = pci_save_pcix_state(dev)) != 0)
879 		return i;
880 	return 0;
881 }
882 
883 /**
884  * pci_restore_state - Restore the saved state of a PCI device
885  * @dev: - PCI device that we're dealing with
886  */
887 int
888 pci_restore_state(struct pci_dev *dev)
889 {
890 	int i;
891 	u32 val;
892 
893 	if (!dev->state_saved)
894 		return 0;
895 
896 	/* PCI Express register must be restored first */
897 	pci_restore_pcie_state(dev);
898 
899 	/*
900 	 * The Base Address register should be programmed before the command
901 	 * register(s)
902 	 */
903 	for (i = 15; i >= 0; i--) {
904 		pci_read_config_dword(dev, i * 4, &val);
905 		if (val != dev->saved_config_space[i]) {
906 			dev_printk(KERN_DEBUG, &dev->dev, "restoring config "
907 				"space at offset %#x (was %#x, writing %#x)\n",
908 				i, val, (int)dev->saved_config_space[i]);
909 			pci_write_config_dword(dev,i * 4,
910 				dev->saved_config_space[i]);
911 		}
912 	}
913 	pci_restore_pcix_state(dev);
914 	pci_restore_msi_state(dev);
915 	pci_restore_iov_state(dev);
916 
917 	dev->state_saved = false;
918 
919 	return 0;
920 }
921 
922 static int do_pci_enable_device(struct pci_dev *dev, int bars)
923 {
924 	int err;
925 
926 	err = pci_set_power_state(dev, PCI_D0);
927 	if (err < 0 && err != -EIO)
928 		return err;
929 	err = pcibios_enable_device(dev, bars);
930 	if (err < 0)
931 		return err;
932 	pci_fixup_device(pci_fixup_enable, dev);
933 
934 	return 0;
935 }
936 
937 /**
938  * pci_reenable_device - Resume abandoned device
939  * @dev: PCI device to be resumed
940  *
941  *  Note this function is a backend of pci_default_resume and is not supposed
942  *  to be called by normal code, write proper resume handler and use it instead.
943  */
944 int pci_reenable_device(struct pci_dev *dev)
945 {
946 	if (pci_is_enabled(dev))
947 		return do_pci_enable_device(dev, (1 << PCI_NUM_RESOURCES) - 1);
948 	return 0;
949 }
950 
951 static int __pci_enable_device_flags(struct pci_dev *dev,
952 				     resource_size_t flags)
953 {
954 	int err;
955 	int i, bars = 0;
956 
957 	if (atomic_add_return(1, &dev->enable_cnt) > 1)
958 		return 0;		/* already enabled */
959 
960 	for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
961 		if (dev->resource[i].flags & flags)
962 			bars |= (1 << i);
963 
964 	err = do_pci_enable_device(dev, bars);
965 	if (err < 0)
966 		atomic_dec(&dev->enable_cnt);
967 	return err;
968 }
969 
970 /**
971  * pci_enable_device_io - Initialize a device for use with IO space
972  * @dev: PCI device to be initialized
973  *
974  *  Initialize device before it's used by a driver. Ask low-level code
975  *  to enable I/O resources. Wake up the device if it was suspended.
976  *  Beware, this function can fail.
977  */
978 int pci_enable_device_io(struct pci_dev *dev)
979 {
980 	return __pci_enable_device_flags(dev, IORESOURCE_IO);
981 }
982 
983 /**
984  * pci_enable_device_mem - Initialize a device for use with Memory space
985  * @dev: PCI device to be initialized
986  *
987  *  Initialize device before it's used by a driver. Ask low-level code
988  *  to enable Memory resources. Wake up the device if it was suspended.
989  *  Beware, this function can fail.
990  */
991 int pci_enable_device_mem(struct pci_dev *dev)
992 {
993 	return __pci_enable_device_flags(dev, IORESOURCE_MEM);
994 }
995 
996 /**
997  * pci_enable_device - Initialize device before it's used by a driver.
998  * @dev: PCI device to be initialized
999  *
1000  *  Initialize device before it's used by a driver. Ask low-level code
1001  *  to enable I/O and memory. Wake up the device if it was suspended.
1002  *  Beware, this function can fail.
1003  *
1004  *  Note we don't actually enable the device many times if we call
1005  *  this function repeatedly (we just increment the count).
1006  */
1007 int pci_enable_device(struct pci_dev *dev)
1008 {
1009 	return __pci_enable_device_flags(dev, IORESOURCE_MEM | IORESOURCE_IO);
1010 }
1011 
1012 /*
1013  * Managed PCI resources.  This manages device on/off, intx/msi/msix
1014  * on/off and BAR regions.  pci_dev itself records msi/msix status, so
1015  * there's no need to track it separately.  pci_devres is initialized
1016  * when a device is enabled using managed PCI device enable interface.
1017  */
1018 struct pci_devres {
1019 	unsigned int enabled:1;
1020 	unsigned int pinned:1;
1021 	unsigned int orig_intx:1;
1022 	unsigned int restore_intx:1;
1023 	u32 region_mask;
1024 };
1025 
1026 static void pcim_release(struct device *gendev, void *res)
1027 {
1028 	struct pci_dev *dev = container_of(gendev, struct pci_dev, dev);
1029 	struct pci_devres *this = res;
1030 	int i;
1031 
1032 	if (dev->msi_enabled)
1033 		pci_disable_msi(dev);
1034 	if (dev->msix_enabled)
1035 		pci_disable_msix(dev);
1036 
1037 	for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
1038 		if (this->region_mask & (1 << i))
1039 			pci_release_region(dev, i);
1040 
1041 	if (this->restore_intx)
1042 		pci_intx(dev, this->orig_intx);
1043 
1044 	if (this->enabled && !this->pinned)
1045 		pci_disable_device(dev);
1046 }
1047 
1048 static struct pci_devres * get_pci_dr(struct pci_dev *pdev)
1049 {
1050 	struct pci_devres *dr, *new_dr;
1051 
1052 	dr = devres_find(&pdev->dev, pcim_release, NULL, NULL);
1053 	if (dr)
1054 		return dr;
1055 
1056 	new_dr = devres_alloc(pcim_release, sizeof(*new_dr), GFP_KERNEL);
1057 	if (!new_dr)
1058 		return NULL;
1059 	return devres_get(&pdev->dev, new_dr, NULL, NULL);
1060 }
1061 
1062 static struct pci_devres * find_pci_dr(struct pci_dev *pdev)
1063 {
1064 	if (pci_is_managed(pdev))
1065 		return devres_find(&pdev->dev, pcim_release, NULL, NULL);
1066 	return NULL;
1067 }
1068 
1069 /**
1070  * pcim_enable_device - Managed pci_enable_device()
1071  * @pdev: PCI device to be initialized
1072  *
1073  * Managed pci_enable_device().
1074  */
1075 int pcim_enable_device(struct pci_dev *pdev)
1076 {
1077 	struct pci_devres *dr;
1078 	int rc;
1079 
1080 	dr = get_pci_dr(pdev);
1081 	if (unlikely(!dr))
1082 		return -ENOMEM;
1083 	if (dr->enabled)
1084 		return 0;
1085 
1086 	rc = pci_enable_device(pdev);
1087 	if (!rc) {
1088 		pdev->is_managed = 1;
1089 		dr->enabled = 1;
1090 	}
1091 	return rc;
1092 }
1093 
1094 /**
1095  * pcim_pin_device - Pin managed PCI device
1096  * @pdev: PCI device to pin
1097  *
1098  * Pin managed PCI device @pdev.  Pinned device won't be disabled on
1099  * driver detach.  @pdev must have been enabled with
1100  * pcim_enable_device().
1101  */
1102 void pcim_pin_device(struct pci_dev *pdev)
1103 {
1104 	struct pci_devres *dr;
1105 
1106 	dr = find_pci_dr(pdev);
1107 	WARN_ON(!dr || !dr->enabled);
1108 	if (dr)
1109 		dr->pinned = 1;
1110 }
1111 
1112 /**
1113  * pcibios_disable_device - disable arch specific PCI resources for device dev
1114  * @dev: the PCI device to disable
1115  *
1116  * Disables architecture specific PCI resources for the device. This
1117  * is the default implementation. Architecture implementations can
1118  * override this.
1119  */
1120 void __attribute__ ((weak)) pcibios_disable_device (struct pci_dev *dev) {}
1121 
1122 static void do_pci_disable_device(struct pci_dev *dev)
1123 {
1124 	u16 pci_command;
1125 
1126 	pci_read_config_word(dev, PCI_COMMAND, &pci_command);
1127 	if (pci_command & PCI_COMMAND_MASTER) {
1128 		pci_command &= ~PCI_COMMAND_MASTER;
1129 		pci_write_config_word(dev, PCI_COMMAND, pci_command);
1130 	}
1131 
1132 	pcibios_disable_device(dev);
1133 }
1134 
1135 /**
1136  * pci_disable_enabled_device - Disable device without updating enable_cnt
1137  * @dev: PCI device to disable
1138  *
1139  * NOTE: This function is a backend of PCI power management routines and is
1140  * not supposed to be called drivers.
1141  */
1142 void pci_disable_enabled_device(struct pci_dev *dev)
1143 {
1144 	if (pci_is_enabled(dev))
1145 		do_pci_disable_device(dev);
1146 }
1147 
1148 /**
1149  * pci_disable_device - Disable PCI device after use
1150  * @dev: PCI device to be disabled
1151  *
1152  * Signal to the system that the PCI device is not in use by the system
1153  * anymore.  This only involves disabling PCI bus-mastering, if active.
1154  *
1155  * Note we don't actually disable the device until all callers of
1156  * pci_device_enable() have called pci_device_disable().
1157  */
1158 void
1159 pci_disable_device(struct pci_dev *dev)
1160 {
1161 	struct pci_devres *dr;
1162 
1163 	dr = find_pci_dr(dev);
1164 	if (dr)
1165 		dr->enabled = 0;
1166 
1167 	if (atomic_sub_return(1, &dev->enable_cnt) != 0)
1168 		return;
1169 
1170 	do_pci_disable_device(dev);
1171 
1172 	dev->is_busmaster = 0;
1173 }
1174 
1175 /**
1176  * pcibios_set_pcie_reset_state - set reset state for device dev
1177  * @dev: the PCIe device reset
1178  * @state: Reset state to enter into
1179  *
1180  *
1181  * Sets the PCIe reset state for the device. This is the default
1182  * implementation. Architecture implementations can override this.
1183  */
1184 int __attribute__ ((weak)) pcibios_set_pcie_reset_state(struct pci_dev *dev,
1185 							enum pcie_reset_state state)
1186 {
1187 	return -EINVAL;
1188 }
1189 
1190 /**
1191  * pci_set_pcie_reset_state - set reset state for device dev
1192  * @dev: the PCIe device reset
1193  * @state: Reset state to enter into
1194  *
1195  *
1196  * Sets the PCI reset state for the device.
1197  */
1198 int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state)
1199 {
1200 	return pcibios_set_pcie_reset_state(dev, state);
1201 }
1202 
1203 /**
1204  * pci_check_pme_status - Check if given device has generated PME.
1205  * @dev: Device to check.
1206  *
1207  * Check the PME status of the device and if set, clear it and clear PME enable
1208  * (if set).  Return 'true' if PME status and PME enable were both set or
1209  * 'false' otherwise.
1210  */
1211 bool pci_check_pme_status(struct pci_dev *dev)
1212 {
1213 	int pmcsr_pos;
1214 	u16 pmcsr;
1215 	bool ret = false;
1216 
1217 	if (!dev->pm_cap)
1218 		return false;
1219 
1220 	pmcsr_pos = dev->pm_cap + PCI_PM_CTRL;
1221 	pci_read_config_word(dev, pmcsr_pos, &pmcsr);
1222 	if (!(pmcsr & PCI_PM_CTRL_PME_STATUS))
1223 		return false;
1224 
1225 	/* Clear PME status. */
1226 	pmcsr |= PCI_PM_CTRL_PME_STATUS;
1227 	if (pmcsr & PCI_PM_CTRL_PME_ENABLE) {
1228 		/* Disable PME to avoid interrupt flood. */
1229 		pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
1230 		ret = true;
1231 	}
1232 
1233 	pci_write_config_word(dev, pmcsr_pos, pmcsr);
1234 
1235 	return ret;
1236 }
1237 
1238 /**
1239  * pci_pme_wakeup - Wake up a PCI device if its PME Status bit is set.
1240  * @dev: Device to handle.
1241  * @ign: Ignored.
1242  *
1243  * Check if @dev has generated PME and queue a resume request for it in that
1244  * case.
1245  */
1246 static int pci_pme_wakeup(struct pci_dev *dev, void *ign)
1247 {
1248 	if (pci_check_pme_status(dev))
1249 		pm_request_resume(&dev->dev);
1250 	return 0;
1251 }
1252 
1253 /**
1254  * pci_pme_wakeup_bus - Walk given bus and wake up devices on it, if necessary.
1255  * @bus: Top bus of the subtree to walk.
1256  */
1257 void pci_pme_wakeup_bus(struct pci_bus *bus)
1258 {
1259 	if (bus)
1260 		pci_walk_bus(bus, pci_pme_wakeup, NULL);
1261 }
1262 
1263 /**
1264  * pci_pme_capable - check the capability of PCI device to generate PME#
1265  * @dev: PCI device to handle.
1266  * @state: PCI state from which device will issue PME#.
1267  */
1268 bool pci_pme_capable(struct pci_dev *dev, pci_power_t state)
1269 {
1270 	if (!dev->pm_cap)
1271 		return false;
1272 
1273 	return !!(dev->pme_support & (1 << state));
1274 }
1275 
1276 /**
1277  * pci_pme_active - enable or disable PCI device's PME# function
1278  * @dev: PCI device to handle.
1279  * @enable: 'true' to enable PME# generation; 'false' to disable it.
1280  *
1281  * The caller must verify that the device is capable of generating PME# before
1282  * calling this function with @enable equal to 'true'.
1283  */
1284 void pci_pme_active(struct pci_dev *dev, bool enable)
1285 {
1286 	u16 pmcsr;
1287 
1288 	if (!dev->pm_cap)
1289 		return;
1290 
1291 	pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1292 	/* Clear PME_Status by writing 1 to it and enable PME# */
1293 	pmcsr |= PCI_PM_CTRL_PME_STATUS | PCI_PM_CTRL_PME_ENABLE;
1294 	if (!enable)
1295 		pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
1296 
1297 	pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
1298 
1299 	dev_printk(KERN_DEBUG, &dev->dev, "PME# %s\n",
1300 			enable ? "enabled" : "disabled");
1301 }
1302 
1303 /**
1304  * __pci_enable_wake - enable PCI device as wakeup event source
1305  * @dev: PCI device affected
1306  * @state: PCI state from which device will issue wakeup events
1307  * @runtime: True if the events are to be generated at run time
1308  * @enable: True to enable event generation; false to disable
1309  *
1310  * This enables the device as a wakeup event source, or disables it.
1311  * When such events involves platform-specific hooks, those hooks are
1312  * called automatically by this routine.
1313  *
1314  * Devices with legacy power management (no standard PCI PM capabilities)
1315  * always require such platform hooks.
1316  *
1317  * RETURN VALUE:
1318  * 0 is returned on success
1319  * -EINVAL is returned if device is not supposed to wake up the system
1320  * Error code depending on the platform is returned if both the platform and
1321  * the native mechanism fail to enable the generation of wake-up events
1322  */
1323 int __pci_enable_wake(struct pci_dev *dev, pci_power_t state,
1324 		      bool runtime, bool enable)
1325 {
1326 	int ret = 0;
1327 
1328 	if (enable && !runtime && !device_may_wakeup(&dev->dev))
1329 		return -EINVAL;
1330 
1331 	/* Don't do the same thing twice in a row for one device. */
1332 	if (!!enable == !!dev->wakeup_prepared)
1333 		return 0;
1334 
1335 	/*
1336 	 * According to "PCI System Architecture" 4th ed. by Tom Shanley & Don
1337 	 * Anderson we should be doing PME# wake enable followed by ACPI wake
1338 	 * enable.  To disable wake-up we call the platform first, for symmetry.
1339 	 */
1340 
1341 	if (enable) {
1342 		int error;
1343 
1344 		if (pci_pme_capable(dev, state))
1345 			pci_pme_active(dev, true);
1346 		else
1347 			ret = 1;
1348 		error = runtime ? platform_pci_run_wake(dev, true) :
1349 					platform_pci_sleep_wake(dev, true);
1350 		if (ret)
1351 			ret = error;
1352 		if (!ret)
1353 			dev->wakeup_prepared = true;
1354 	} else {
1355 		if (runtime)
1356 			platform_pci_run_wake(dev, false);
1357 		else
1358 			platform_pci_sleep_wake(dev, false);
1359 		pci_pme_active(dev, false);
1360 		dev->wakeup_prepared = false;
1361 	}
1362 
1363 	return ret;
1364 }
1365 EXPORT_SYMBOL(__pci_enable_wake);
1366 
1367 /**
1368  * pci_wake_from_d3 - enable/disable device to wake up from D3_hot or D3_cold
1369  * @dev: PCI device to prepare
1370  * @enable: True to enable wake-up event generation; false to disable
1371  *
1372  * Many drivers want the device to wake up the system from D3_hot or D3_cold
1373  * and this function allows them to set that up cleanly - pci_enable_wake()
1374  * should not be called twice in a row to enable wake-up due to PCI PM vs ACPI
1375  * ordering constraints.
1376  *
1377  * This function only returns error code if the device is not capable of
1378  * generating PME# from both D3_hot and D3_cold, and the platform is unable to
1379  * enable wake-up power for it.
1380  */
1381 int pci_wake_from_d3(struct pci_dev *dev, bool enable)
1382 {
1383 	return pci_pme_capable(dev, PCI_D3cold) ?
1384 			pci_enable_wake(dev, PCI_D3cold, enable) :
1385 			pci_enable_wake(dev, PCI_D3hot, enable);
1386 }
1387 
1388 /**
1389  * pci_target_state - find an appropriate low power state for a given PCI dev
1390  * @dev: PCI device
1391  *
1392  * Use underlying platform code to find a supported low power state for @dev.
1393  * If the platform can't manage @dev, return the deepest state from which it
1394  * can generate wake events, based on any available PME info.
1395  */
1396 pci_power_t pci_target_state(struct pci_dev *dev)
1397 {
1398 	pci_power_t target_state = PCI_D3hot;
1399 
1400 	if (platform_pci_power_manageable(dev)) {
1401 		/*
1402 		 * Call the platform to choose the target state of the device
1403 		 * and enable wake-up from this state if supported.
1404 		 */
1405 		pci_power_t state = platform_pci_choose_state(dev);
1406 
1407 		switch (state) {
1408 		case PCI_POWER_ERROR:
1409 		case PCI_UNKNOWN:
1410 			break;
1411 		case PCI_D1:
1412 		case PCI_D2:
1413 			if (pci_no_d1d2(dev))
1414 				break;
1415 		default:
1416 			target_state = state;
1417 		}
1418 	} else if (!dev->pm_cap) {
1419 		target_state = PCI_D0;
1420 	} else if (device_may_wakeup(&dev->dev)) {
1421 		/*
1422 		 * Find the deepest state from which the device can generate
1423 		 * wake-up events, make it the target state and enable device
1424 		 * to generate PME#.
1425 		 */
1426 		if (dev->pme_support) {
1427 			while (target_state
1428 			      && !(dev->pme_support & (1 << target_state)))
1429 				target_state--;
1430 		}
1431 	}
1432 
1433 	return target_state;
1434 }
1435 
1436 /**
1437  * pci_prepare_to_sleep - prepare PCI device for system-wide transition into a sleep state
1438  * @dev: Device to handle.
1439  *
1440  * Choose the power state appropriate for the device depending on whether
1441  * it can wake up the system and/or is power manageable by the platform
1442  * (PCI_D3hot is the default) and put the device into that state.
1443  */
1444 int pci_prepare_to_sleep(struct pci_dev *dev)
1445 {
1446 	pci_power_t target_state = pci_target_state(dev);
1447 	int error;
1448 
1449 	if (target_state == PCI_POWER_ERROR)
1450 		return -EIO;
1451 
1452 	pci_enable_wake(dev, target_state, device_may_wakeup(&dev->dev));
1453 
1454 	error = pci_set_power_state(dev, target_state);
1455 
1456 	if (error)
1457 		pci_enable_wake(dev, target_state, false);
1458 
1459 	return error;
1460 }
1461 
1462 /**
1463  * pci_back_from_sleep - turn PCI device on during system-wide transition into working state
1464  * @dev: Device to handle.
1465  *
1466  * Disable device's sytem wake-up capability and put it into D0.
1467  */
1468 int pci_back_from_sleep(struct pci_dev *dev)
1469 {
1470 	pci_enable_wake(dev, PCI_D0, false);
1471 	return pci_set_power_state(dev, PCI_D0);
1472 }
1473 
1474 /**
1475  * pci_finish_runtime_suspend - Carry out PCI-specific part of runtime suspend.
1476  * @dev: PCI device being suspended.
1477  *
1478  * Prepare @dev to generate wake-up events at run time and put it into a low
1479  * power state.
1480  */
1481 int pci_finish_runtime_suspend(struct pci_dev *dev)
1482 {
1483 	pci_power_t target_state = pci_target_state(dev);
1484 	int error;
1485 
1486 	if (target_state == PCI_POWER_ERROR)
1487 		return -EIO;
1488 
1489 	__pci_enable_wake(dev, target_state, true, pci_dev_run_wake(dev));
1490 
1491 	error = pci_set_power_state(dev, target_state);
1492 
1493 	if (error)
1494 		__pci_enable_wake(dev, target_state, true, false);
1495 
1496 	return error;
1497 }
1498 
1499 /**
1500  * pci_dev_run_wake - Check if device can generate run-time wake-up events.
1501  * @dev: Device to check.
1502  *
1503  * Return true if the device itself is cabable of generating wake-up events
1504  * (through the platform or using the native PCIe PME) or if the device supports
1505  * PME and one of its upstream bridges can generate wake-up events.
1506  */
1507 bool pci_dev_run_wake(struct pci_dev *dev)
1508 {
1509 	struct pci_bus *bus = dev->bus;
1510 
1511 	if (device_run_wake(&dev->dev))
1512 		return true;
1513 
1514 	if (!dev->pme_support)
1515 		return false;
1516 
1517 	while (bus->parent) {
1518 		struct pci_dev *bridge = bus->self;
1519 
1520 		if (device_run_wake(&bridge->dev))
1521 			return true;
1522 
1523 		bus = bus->parent;
1524 	}
1525 
1526 	/* We have reached the root bus. */
1527 	if (bus->bridge)
1528 		return device_run_wake(bus->bridge);
1529 
1530 	return false;
1531 }
1532 EXPORT_SYMBOL_GPL(pci_dev_run_wake);
1533 
1534 /**
1535  * pci_pm_init - Initialize PM functions of given PCI device
1536  * @dev: PCI device to handle.
1537  */
1538 void pci_pm_init(struct pci_dev *dev)
1539 {
1540 	int pm;
1541 	u16 pmc;
1542 
1543 	dev->wakeup_prepared = false;
1544 	dev->pm_cap = 0;
1545 
1546 	/* find PCI PM capability in list */
1547 	pm = pci_find_capability(dev, PCI_CAP_ID_PM);
1548 	if (!pm)
1549 		return;
1550 	/* Check device's ability to generate PME# */
1551 	pci_read_config_word(dev, pm + PCI_PM_PMC, &pmc);
1552 
1553 	if ((pmc & PCI_PM_CAP_VER_MASK) > 3) {
1554 		dev_err(&dev->dev, "unsupported PM cap regs version (%u)\n",
1555 			pmc & PCI_PM_CAP_VER_MASK);
1556 		return;
1557 	}
1558 
1559 	dev->pm_cap = pm;
1560 	dev->d3_delay = PCI_PM_D3_WAIT;
1561 
1562 	dev->d1_support = false;
1563 	dev->d2_support = false;
1564 	if (!pci_no_d1d2(dev)) {
1565 		if (pmc & PCI_PM_CAP_D1)
1566 			dev->d1_support = true;
1567 		if (pmc & PCI_PM_CAP_D2)
1568 			dev->d2_support = true;
1569 
1570 		if (dev->d1_support || dev->d2_support)
1571 			dev_printk(KERN_DEBUG, &dev->dev, "supports%s%s\n",
1572 				   dev->d1_support ? " D1" : "",
1573 				   dev->d2_support ? " D2" : "");
1574 	}
1575 
1576 	pmc &= PCI_PM_CAP_PME_MASK;
1577 	if (pmc) {
1578 		dev_printk(KERN_DEBUG, &dev->dev,
1579 			 "PME# supported from%s%s%s%s%s\n",
1580 			 (pmc & PCI_PM_CAP_PME_D0) ? " D0" : "",
1581 			 (pmc & PCI_PM_CAP_PME_D1) ? " D1" : "",
1582 			 (pmc & PCI_PM_CAP_PME_D2) ? " D2" : "",
1583 			 (pmc & PCI_PM_CAP_PME_D3) ? " D3hot" : "",
1584 			 (pmc & PCI_PM_CAP_PME_D3cold) ? " D3cold" : "");
1585 		dev->pme_support = pmc >> PCI_PM_CAP_PME_SHIFT;
1586 		/*
1587 		 * Make device's PM flags reflect the wake-up capability, but
1588 		 * let the user space enable it to wake up the system as needed.
1589 		 */
1590 		device_set_wakeup_capable(&dev->dev, true);
1591 		device_set_wakeup_enable(&dev->dev, false);
1592 		/* Disable the PME# generation functionality */
1593 		pci_pme_active(dev, false);
1594 	} else {
1595 		dev->pme_support = 0;
1596 	}
1597 }
1598 
1599 /**
1600  * platform_pci_wakeup_init - init platform wakeup if present
1601  * @dev: PCI device
1602  *
1603  * Some devices don't have PCI PM caps but can still generate wakeup
1604  * events through platform methods (like ACPI events).  If @dev supports
1605  * platform wakeup events, set the device flag to indicate as much.  This
1606  * may be redundant if the device also supports PCI PM caps, but double
1607  * initialization should be safe in that case.
1608  */
1609 void platform_pci_wakeup_init(struct pci_dev *dev)
1610 {
1611 	if (!platform_pci_can_wakeup(dev))
1612 		return;
1613 
1614 	device_set_wakeup_capable(&dev->dev, true);
1615 	device_set_wakeup_enable(&dev->dev, false);
1616 	platform_pci_sleep_wake(dev, false);
1617 }
1618 
1619 /**
1620  * pci_add_save_buffer - allocate buffer for saving given capability registers
1621  * @dev: the PCI device
1622  * @cap: the capability to allocate the buffer for
1623  * @size: requested size of the buffer
1624  */
1625 static int pci_add_cap_save_buffer(
1626 	struct pci_dev *dev, char cap, unsigned int size)
1627 {
1628 	int pos;
1629 	struct pci_cap_saved_state *save_state;
1630 
1631 	pos = pci_find_capability(dev, cap);
1632 	if (pos <= 0)
1633 		return 0;
1634 
1635 	save_state = kzalloc(sizeof(*save_state) + size, GFP_KERNEL);
1636 	if (!save_state)
1637 		return -ENOMEM;
1638 
1639 	save_state->cap_nr = cap;
1640 	pci_add_saved_cap(dev, save_state);
1641 
1642 	return 0;
1643 }
1644 
1645 /**
1646  * pci_allocate_cap_save_buffers - allocate buffers for saving capabilities
1647  * @dev: the PCI device
1648  */
1649 void pci_allocate_cap_save_buffers(struct pci_dev *dev)
1650 {
1651 	int error;
1652 
1653 	error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_EXP,
1654 					PCI_EXP_SAVE_REGS * sizeof(u16));
1655 	if (error)
1656 		dev_err(&dev->dev,
1657 			"unable to preallocate PCI Express save buffer\n");
1658 
1659 	error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_PCIX, sizeof(u16));
1660 	if (error)
1661 		dev_err(&dev->dev,
1662 			"unable to preallocate PCI-X save buffer\n");
1663 }
1664 
1665 /**
1666  * pci_enable_ari - enable ARI forwarding if hardware support it
1667  * @dev: the PCI device
1668  */
1669 void pci_enable_ari(struct pci_dev *dev)
1670 {
1671 	int pos;
1672 	u32 cap;
1673 	u16 ctrl;
1674 	struct pci_dev *bridge;
1675 
1676 	if (!pci_is_pcie(dev) || dev->devfn)
1677 		return;
1678 
1679 	pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI);
1680 	if (!pos)
1681 		return;
1682 
1683 	bridge = dev->bus->self;
1684 	if (!bridge || !pci_is_pcie(bridge))
1685 		return;
1686 
1687 	pos = pci_pcie_cap(bridge);
1688 	if (!pos)
1689 		return;
1690 
1691 	pci_read_config_dword(bridge, pos + PCI_EXP_DEVCAP2, &cap);
1692 	if (!(cap & PCI_EXP_DEVCAP2_ARI))
1693 		return;
1694 
1695 	pci_read_config_word(bridge, pos + PCI_EXP_DEVCTL2, &ctrl);
1696 	ctrl |= PCI_EXP_DEVCTL2_ARI;
1697 	pci_write_config_word(bridge, pos + PCI_EXP_DEVCTL2, ctrl);
1698 
1699 	bridge->ari_enabled = 1;
1700 }
1701 
1702 static int pci_acs_enable;
1703 
1704 /**
1705  * pci_request_acs - ask for ACS to be enabled if supported
1706  */
1707 void pci_request_acs(void)
1708 {
1709 	pci_acs_enable = 1;
1710 }
1711 
1712 /**
1713  * pci_enable_acs - enable ACS if hardware support it
1714  * @dev: the PCI device
1715  */
1716 void pci_enable_acs(struct pci_dev *dev)
1717 {
1718 	int pos;
1719 	u16 cap;
1720 	u16 ctrl;
1721 
1722 	if (!pci_acs_enable)
1723 		return;
1724 
1725 	if (!pci_is_pcie(dev))
1726 		return;
1727 
1728 	pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS);
1729 	if (!pos)
1730 		return;
1731 
1732 	pci_read_config_word(dev, pos + PCI_ACS_CAP, &cap);
1733 	pci_read_config_word(dev, pos + PCI_ACS_CTRL, &ctrl);
1734 
1735 	/* Source Validation */
1736 	ctrl |= (cap & PCI_ACS_SV);
1737 
1738 	/* P2P Request Redirect */
1739 	ctrl |= (cap & PCI_ACS_RR);
1740 
1741 	/* P2P Completion Redirect */
1742 	ctrl |= (cap & PCI_ACS_CR);
1743 
1744 	/* Upstream Forwarding */
1745 	ctrl |= (cap & PCI_ACS_UF);
1746 
1747 	pci_write_config_word(dev, pos + PCI_ACS_CTRL, ctrl);
1748 }
1749 
1750 /**
1751  * pci_swizzle_interrupt_pin - swizzle INTx for device behind bridge
1752  * @dev: the PCI device
1753  * @pin: the INTx pin (1=INTA, 2=INTB, 3=INTD, 4=INTD)
1754  *
1755  * Perform INTx swizzling for a device behind one level of bridge.  This is
1756  * required by section 9.1 of the PCI-to-PCI bridge specification for devices
1757  * behind bridges on add-in cards.  For devices with ARI enabled, the slot
1758  * number is always 0 (see the Implementation Note in section 2.2.8.1 of
1759  * the PCI Express Base Specification, Revision 2.1)
1760  */
1761 u8 pci_swizzle_interrupt_pin(struct pci_dev *dev, u8 pin)
1762 {
1763 	int slot;
1764 
1765 	if (pci_ari_enabled(dev->bus))
1766 		slot = 0;
1767 	else
1768 		slot = PCI_SLOT(dev->devfn);
1769 
1770 	return (((pin - 1) + slot) % 4) + 1;
1771 }
1772 
1773 int
1774 pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge)
1775 {
1776 	u8 pin;
1777 
1778 	pin = dev->pin;
1779 	if (!pin)
1780 		return -1;
1781 
1782 	while (!pci_is_root_bus(dev->bus)) {
1783 		pin = pci_swizzle_interrupt_pin(dev, pin);
1784 		dev = dev->bus->self;
1785 	}
1786 	*bridge = dev;
1787 	return pin;
1788 }
1789 
1790 /**
1791  * pci_common_swizzle - swizzle INTx all the way to root bridge
1792  * @dev: the PCI device
1793  * @pinp: pointer to the INTx pin value (1=INTA, 2=INTB, 3=INTD, 4=INTD)
1794  *
1795  * Perform INTx swizzling for a device.  This traverses through all PCI-to-PCI
1796  * bridges all the way up to a PCI root bus.
1797  */
1798 u8 pci_common_swizzle(struct pci_dev *dev, u8 *pinp)
1799 {
1800 	u8 pin = *pinp;
1801 
1802 	while (!pci_is_root_bus(dev->bus)) {
1803 		pin = pci_swizzle_interrupt_pin(dev, pin);
1804 		dev = dev->bus->self;
1805 	}
1806 	*pinp = pin;
1807 	return PCI_SLOT(dev->devfn);
1808 }
1809 
1810 /**
1811  *	pci_release_region - Release a PCI bar
1812  *	@pdev: PCI device whose resources were previously reserved by pci_request_region
1813  *	@bar: BAR to release
1814  *
1815  *	Releases the PCI I/O and memory resources previously reserved by a
1816  *	successful call to pci_request_region.  Call this function only
1817  *	after all use of the PCI regions has ceased.
1818  */
1819 void pci_release_region(struct pci_dev *pdev, int bar)
1820 {
1821 	struct pci_devres *dr;
1822 
1823 	if (pci_resource_len(pdev, bar) == 0)
1824 		return;
1825 	if (pci_resource_flags(pdev, bar) & IORESOURCE_IO)
1826 		release_region(pci_resource_start(pdev, bar),
1827 				pci_resource_len(pdev, bar));
1828 	else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM)
1829 		release_mem_region(pci_resource_start(pdev, bar),
1830 				pci_resource_len(pdev, bar));
1831 
1832 	dr = find_pci_dr(pdev);
1833 	if (dr)
1834 		dr->region_mask &= ~(1 << bar);
1835 }
1836 
1837 /**
1838  *	__pci_request_region - Reserved PCI I/O and memory resource
1839  *	@pdev: PCI device whose resources are to be reserved
1840  *	@bar: BAR to be reserved
1841  *	@res_name: Name to be associated with resource.
1842  *	@exclusive: whether the region access is exclusive or not
1843  *
1844  *	Mark the PCI region associated with PCI device @pdev BR @bar as
1845  *	being reserved by owner @res_name.  Do not access any
1846  *	address inside the PCI regions unless this call returns
1847  *	successfully.
1848  *
1849  *	If @exclusive is set, then the region is marked so that userspace
1850  *	is explicitly not allowed to map the resource via /dev/mem or
1851  * 	sysfs MMIO access.
1852  *
1853  *	Returns 0 on success, or %EBUSY on error.  A warning
1854  *	message is also printed on failure.
1855  */
1856 static int __pci_request_region(struct pci_dev *pdev, int bar, const char *res_name,
1857 									int exclusive)
1858 {
1859 	struct pci_devres *dr;
1860 
1861 	if (pci_resource_len(pdev, bar) == 0)
1862 		return 0;
1863 
1864 	if (pci_resource_flags(pdev, bar) & IORESOURCE_IO) {
1865 		if (!request_region(pci_resource_start(pdev, bar),
1866 			    pci_resource_len(pdev, bar), res_name))
1867 			goto err_out;
1868 	}
1869 	else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) {
1870 		if (!__request_mem_region(pci_resource_start(pdev, bar),
1871 					pci_resource_len(pdev, bar), res_name,
1872 					exclusive))
1873 			goto err_out;
1874 	}
1875 
1876 	dr = find_pci_dr(pdev);
1877 	if (dr)
1878 		dr->region_mask |= 1 << bar;
1879 
1880 	return 0;
1881 
1882 err_out:
1883 	dev_warn(&pdev->dev, "BAR %d: can't reserve %pR\n", bar,
1884 		 &pdev->resource[bar]);
1885 	return -EBUSY;
1886 }
1887 
1888 /**
1889  *	pci_request_region - Reserve PCI I/O and memory resource
1890  *	@pdev: PCI device whose resources are to be reserved
1891  *	@bar: BAR to be reserved
1892  *	@res_name: Name to be associated with resource
1893  *
1894  *	Mark the PCI region associated with PCI device @pdev BAR @bar as
1895  *	being reserved by owner @res_name.  Do not access any
1896  *	address inside the PCI regions unless this call returns
1897  *	successfully.
1898  *
1899  *	Returns 0 on success, or %EBUSY on error.  A warning
1900  *	message is also printed on failure.
1901  */
1902 int pci_request_region(struct pci_dev *pdev, int bar, const char *res_name)
1903 {
1904 	return __pci_request_region(pdev, bar, res_name, 0);
1905 }
1906 
1907 /**
1908  *	pci_request_region_exclusive - Reserved PCI I/O and memory resource
1909  *	@pdev: PCI device whose resources are to be reserved
1910  *	@bar: BAR to be reserved
1911  *	@res_name: Name to be associated with resource.
1912  *
1913  *	Mark the PCI region associated with PCI device @pdev BR @bar as
1914  *	being reserved by owner @res_name.  Do not access any
1915  *	address inside the PCI regions unless this call returns
1916  *	successfully.
1917  *
1918  *	Returns 0 on success, or %EBUSY on error.  A warning
1919  *	message is also printed on failure.
1920  *
1921  *	The key difference that _exclusive makes it that userspace is
1922  *	explicitly not allowed to map the resource via /dev/mem or
1923  * 	sysfs.
1924  */
1925 int pci_request_region_exclusive(struct pci_dev *pdev, int bar, const char *res_name)
1926 {
1927 	return __pci_request_region(pdev, bar, res_name, IORESOURCE_EXCLUSIVE);
1928 }
1929 /**
1930  * pci_release_selected_regions - Release selected PCI I/O and memory resources
1931  * @pdev: PCI device whose resources were previously reserved
1932  * @bars: Bitmask of BARs to be released
1933  *
1934  * Release selected PCI I/O and memory resources previously reserved.
1935  * Call this function only after all use of the PCI regions has ceased.
1936  */
1937 void pci_release_selected_regions(struct pci_dev *pdev, int bars)
1938 {
1939 	int i;
1940 
1941 	for (i = 0; i < 6; i++)
1942 		if (bars & (1 << i))
1943 			pci_release_region(pdev, i);
1944 }
1945 
1946 int __pci_request_selected_regions(struct pci_dev *pdev, int bars,
1947 				 const char *res_name, int excl)
1948 {
1949 	int i;
1950 
1951 	for (i = 0; i < 6; i++)
1952 		if (bars & (1 << i))
1953 			if (__pci_request_region(pdev, i, res_name, excl))
1954 				goto err_out;
1955 	return 0;
1956 
1957 err_out:
1958 	while(--i >= 0)
1959 		if (bars & (1 << i))
1960 			pci_release_region(pdev, i);
1961 
1962 	return -EBUSY;
1963 }
1964 
1965 
1966 /**
1967  * pci_request_selected_regions - Reserve selected PCI I/O and memory resources
1968  * @pdev: PCI device whose resources are to be reserved
1969  * @bars: Bitmask of BARs to be requested
1970  * @res_name: Name to be associated with resource
1971  */
1972 int pci_request_selected_regions(struct pci_dev *pdev, int bars,
1973 				 const char *res_name)
1974 {
1975 	return __pci_request_selected_regions(pdev, bars, res_name, 0);
1976 }
1977 
1978 int pci_request_selected_regions_exclusive(struct pci_dev *pdev,
1979 				 int bars, const char *res_name)
1980 {
1981 	return __pci_request_selected_regions(pdev, bars, res_name,
1982 			IORESOURCE_EXCLUSIVE);
1983 }
1984 
1985 /**
1986  *	pci_release_regions - Release reserved PCI I/O and memory resources
1987  *	@pdev: PCI device whose resources were previously reserved by pci_request_regions
1988  *
1989  *	Releases all PCI I/O and memory resources previously reserved by a
1990  *	successful call to pci_request_regions.  Call this function only
1991  *	after all use of the PCI regions has ceased.
1992  */
1993 
1994 void pci_release_regions(struct pci_dev *pdev)
1995 {
1996 	pci_release_selected_regions(pdev, (1 << 6) - 1);
1997 }
1998 
1999 /**
2000  *	pci_request_regions - Reserved PCI I/O and memory resources
2001  *	@pdev: PCI device whose resources are to be reserved
2002  *	@res_name: Name to be associated with resource.
2003  *
2004  *	Mark all PCI regions associated with PCI device @pdev as
2005  *	being reserved by owner @res_name.  Do not access any
2006  *	address inside the PCI regions unless this call returns
2007  *	successfully.
2008  *
2009  *	Returns 0 on success, or %EBUSY on error.  A warning
2010  *	message is also printed on failure.
2011  */
2012 int pci_request_regions(struct pci_dev *pdev, const char *res_name)
2013 {
2014 	return pci_request_selected_regions(pdev, ((1 << 6) - 1), res_name);
2015 }
2016 
2017 /**
2018  *	pci_request_regions_exclusive - Reserved PCI I/O and memory resources
2019  *	@pdev: PCI device whose resources are to be reserved
2020  *	@res_name: Name to be associated with resource.
2021  *
2022  *	Mark all PCI regions associated with PCI device @pdev as
2023  *	being reserved by owner @res_name.  Do not access any
2024  *	address inside the PCI regions unless this call returns
2025  *	successfully.
2026  *
2027  *	pci_request_regions_exclusive() will mark the region so that
2028  * 	/dev/mem and the sysfs MMIO access will not be allowed.
2029  *
2030  *	Returns 0 on success, or %EBUSY on error.  A warning
2031  *	message is also printed on failure.
2032  */
2033 int pci_request_regions_exclusive(struct pci_dev *pdev, const char *res_name)
2034 {
2035 	return pci_request_selected_regions_exclusive(pdev,
2036 					((1 << 6) - 1), res_name);
2037 }
2038 
2039 static void __pci_set_master(struct pci_dev *dev, bool enable)
2040 {
2041 	u16 old_cmd, cmd;
2042 
2043 	pci_read_config_word(dev, PCI_COMMAND, &old_cmd);
2044 	if (enable)
2045 		cmd = old_cmd | PCI_COMMAND_MASTER;
2046 	else
2047 		cmd = old_cmd & ~PCI_COMMAND_MASTER;
2048 	if (cmd != old_cmd) {
2049 		dev_dbg(&dev->dev, "%s bus mastering\n",
2050 			enable ? "enabling" : "disabling");
2051 		pci_write_config_word(dev, PCI_COMMAND, cmd);
2052 	}
2053 	dev->is_busmaster = enable;
2054 }
2055 
2056 /**
2057  * pci_set_master - enables bus-mastering for device dev
2058  * @dev: the PCI device to enable
2059  *
2060  * Enables bus-mastering on the device and calls pcibios_set_master()
2061  * to do the needed arch specific settings.
2062  */
2063 void pci_set_master(struct pci_dev *dev)
2064 {
2065 	__pci_set_master(dev, true);
2066 	pcibios_set_master(dev);
2067 }
2068 
2069 /**
2070  * pci_clear_master - disables bus-mastering for device dev
2071  * @dev: the PCI device to disable
2072  */
2073 void pci_clear_master(struct pci_dev *dev)
2074 {
2075 	__pci_set_master(dev, false);
2076 }
2077 
2078 /**
2079  * pci_set_cacheline_size - ensure the CACHE_LINE_SIZE register is programmed
2080  * @dev: the PCI device for which MWI is to be enabled
2081  *
2082  * Helper function for pci_set_mwi.
2083  * Originally copied from drivers/net/acenic.c.
2084  * Copyright 1998-2001 by Jes Sorensen, <jes@trained-monkey.org>.
2085  *
2086  * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
2087  */
2088 int pci_set_cacheline_size(struct pci_dev *dev)
2089 {
2090 	u8 cacheline_size;
2091 
2092 	if (!pci_cache_line_size)
2093 		return -EINVAL;
2094 
2095 	/* Validate current setting: the PCI_CACHE_LINE_SIZE must be
2096 	   equal to or multiple of the right value. */
2097 	pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
2098 	if (cacheline_size >= pci_cache_line_size &&
2099 	    (cacheline_size % pci_cache_line_size) == 0)
2100 		return 0;
2101 
2102 	/* Write the correct value. */
2103 	pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, pci_cache_line_size);
2104 	/* Read it back. */
2105 	pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
2106 	if (cacheline_size == pci_cache_line_size)
2107 		return 0;
2108 
2109 	dev_printk(KERN_DEBUG, &dev->dev, "cache line size of %d is not "
2110 		   "supported\n", pci_cache_line_size << 2);
2111 
2112 	return -EINVAL;
2113 }
2114 EXPORT_SYMBOL_GPL(pci_set_cacheline_size);
2115 
2116 #ifdef PCI_DISABLE_MWI
2117 int pci_set_mwi(struct pci_dev *dev)
2118 {
2119 	return 0;
2120 }
2121 
2122 int pci_try_set_mwi(struct pci_dev *dev)
2123 {
2124 	return 0;
2125 }
2126 
2127 void pci_clear_mwi(struct pci_dev *dev)
2128 {
2129 }
2130 
2131 #else
2132 
2133 /**
2134  * pci_set_mwi - enables memory-write-invalidate PCI transaction
2135  * @dev: the PCI device for which MWI is enabled
2136  *
2137  * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND.
2138  *
2139  * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
2140  */
2141 int
2142 pci_set_mwi(struct pci_dev *dev)
2143 {
2144 	int rc;
2145 	u16 cmd;
2146 
2147 	rc = pci_set_cacheline_size(dev);
2148 	if (rc)
2149 		return rc;
2150 
2151 	pci_read_config_word(dev, PCI_COMMAND, &cmd);
2152 	if (! (cmd & PCI_COMMAND_INVALIDATE)) {
2153 		dev_dbg(&dev->dev, "enabling Mem-Wr-Inval\n");
2154 		cmd |= PCI_COMMAND_INVALIDATE;
2155 		pci_write_config_word(dev, PCI_COMMAND, cmd);
2156 	}
2157 
2158 	return 0;
2159 }
2160 
2161 /**
2162  * pci_try_set_mwi - enables memory-write-invalidate PCI transaction
2163  * @dev: the PCI device for which MWI is enabled
2164  *
2165  * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND.
2166  * Callers are not required to check the return value.
2167  *
2168  * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
2169  */
2170 int pci_try_set_mwi(struct pci_dev *dev)
2171 {
2172 	int rc = pci_set_mwi(dev);
2173 	return rc;
2174 }
2175 
2176 /**
2177  * pci_clear_mwi - disables Memory-Write-Invalidate for device dev
2178  * @dev: the PCI device to disable
2179  *
2180  * Disables PCI Memory-Write-Invalidate transaction on the device
2181  */
2182 void
2183 pci_clear_mwi(struct pci_dev *dev)
2184 {
2185 	u16 cmd;
2186 
2187 	pci_read_config_word(dev, PCI_COMMAND, &cmd);
2188 	if (cmd & PCI_COMMAND_INVALIDATE) {
2189 		cmd &= ~PCI_COMMAND_INVALIDATE;
2190 		pci_write_config_word(dev, PCI_COMMAND, cmd);
2191 	}
2192 }
2193 #endif /* ! PCI_DISABLE_MWI */
2194 
2195 /**
2196  * pci_intx - enables/disables PCI INTx for device dev
2197  * @pdev: the PCI device to operate on
2198  * @enable: boolean: whether to enable or disable PCI INTx
2199  *
2200  * Enables/disables PCI INTx for device dev
2201  */
2202 void
2203 pci_intx(struct pci_dev *pdev, int enable)
2204 {
2205 	u16 pci_command, new;
2206 
2207 	pci_read_config_word(pdev, PCI_COMMAND, &pci_command);
2208 
2209 	if (enable) {
2210 		new = pci_command & ~PCI_COMMAND_INTX_DISABLE;
2211 	} else {
2212 		new = pci_command | PCI_COMMAND_INTX_DISABLE;
2213 	}
2214 
2215 	if (new != pci_command) {
2216 		struct pci_devres *dr;
2217 
2218 		pci_write_config_word(pdev, PCI_COMMAND, new);
2219 
2220 		dr = find_pci_dr(pdev);
2221 		if (dr && !dr->restore_intx) {
2222 			dr->restore_intx = 1;
2223 			dr->orig_intx = !enable;
2224 		}
2225 	}
2226 }
2227 
2228 /**
2229  * pci_msi_off - disables any msi or msix capabilities
2230  * @dev: the PCI device to operate on
2231  *
2232  * If you want to use msi see pci_enable_msi and friends.
2233  * This is a lower level primitive that allows us to disable
2234  * msi operation at the device level.
2235  */
2236 void pci_msi_off(struct pci_dev *dev)
2237 {
2238 	int pos;
2239 	u16 control;
2240 
2241 	pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
2242 	if (pos) {
2243 		pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control);
2244 		control &= ~PCI_MSI_FLAGS_ENABLE;
2245 		pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control);
2246 	}
2247 	pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
2248 	if (pos) {
2249 		pci_read_config_word(dev, pos + PCI_MSIX_FLAGS, &control);
2250 		control &= ~PCI_MSIX_FLAGS_ENABLE;
2251 		pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control);
2252 	}
2253 }
2254 
2255 #ifndef HAVE_ARCH_PCI_SET_DMA_MASK
2256 /*
2257  * These can be overridden by arch-specific implementations
2258  */
2259 int
2260 pci_set_dma_mask(struct pci_dev *dev, u64 mask)
2261 {
2262 	if (!pci_dma_supported(dev, mask))
2263 		return -EIO;
2264 
2265 	dev->dma_mask = mask;
2266 	dev_dbg(&dev->dev, "using %dbit DMA mask\n", fls64(mask));
2267 
2268 	return 0;
2269 }
2270 
2271 int
2272 pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask)
2273 {
2274 	if (!pci_dma_supported(dev, mask))
2275 		return -EIO;
2276 
2277 	dev->dev.coherent_dma_mask = mask;
2278 	dev_dbg(&dev->dev, "using %dbit consistent DMA mask\n", fls64(mask));
2279 
2280 	return 0;
2281 }
2282 #endif
2283 
2284 #ifndef HAVE_ARCH_PCI_SET_DMA_MAX_SEGMENT_SIZE
2285 int pci_set_dma_max_seg_size(struct pci_dev *dev, unsigned int size)
2286 {
2287 	return dma_set_max_seg_size(&dev->dev, size);
2288 }
2289 EXPORT_SYMBOL(pci_set_dma_max_seg_size);
2290 #endif
2291 
2292 #ifndef HAVE_ARCH_PCI_SET_DMA_SEGMENT_BOUNDARY
2293 int pci_set_dma_seg_boundary(struct pci_dev *dev, unsigned long mask)
2294 {
2295 	return dma_set_seg_boundary(&dev->dev, mask);
2296 }
2297 EXPORT_SYMBOL(pci_set_dma_seg_boundary);
2298 #endif
2299 
2300 static int pcie_flr(struct pci_dev *dev, int probe)
2301 {
2302 	int i;
2303 	int pos;
2304 	u32 cap;
2305 	u16 status, control;
2306 
2307 	pos = pci_pcie_cap(dev);
2308 	if (!pos)
2309 		return -ENOTTY;
2310 
2311 	pci_read_config_dword(dev, pos + PCI_EXP_DEVCAP, &cap);
2312 	if (!(cap & PCI_EXP_DEVCAP_FLR))
2313 		return -ENOTTY;
2314 
2315 	if (probe)
2316 		return 0;
2317 
2318 	/* Wait for Transaction Pending bit clean */
2319 	for (i = 0; i < 4; i++) {
2320 		if (i)
2321 			msleep((1 << (i - 1)) * 100);
2322 
2323 		pci_read_config_word(dev, pos + PCI_EXP_DEVSTA, &status);
2324 		if (!(status & PCI_EXP_DEVSTA_TRPND))
2325 			goto clear;
2326 	}
2327 
2328 	dev_err(&dev->dev, "transaction is not cleared; "
2329 			"proceeding with reset anyway\n");
2330 
2331 clear:
2332 	pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, &control);
2333 	control |= PCI_EXP_DEVCTL_BCR_FLR;
2334 	pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, control);
2335 
2336 	msleep(100);
2337 
2338 	return 0;
2339 }
2340 
2341 static int pci_af_flr(struct pci_dev *dev, int probe)
2342 {
2343 	int i;
2344 	int pos;
2345 	u8 cap;
2346 	u8 status;
2347 
2348 	pos = pci_find_capability(dev, PCI_CAP_ID_AF);
2349 	if (!pos)
2350 		return -ENOTTY;
2351 
2352 	pci_read_config_byte(dev, pos + PCI_AF_CAP, &cap);
2353 	if (!(cap & PCI_AF_CAP_TP) || !(cap & PCI_AF_CAP_FLR))
2354 		return -ENOTTY;
2355 
2356 	if (probe)
2357 		return 0;
2358 
2359 	/* Wait for Transaction Pending bit clean */
2360 	for (i = 0; i < 4; i++) {
2361 		if (i)
2362 			msleep((1 << (i - 1)) * 100);
2363 
2364 		pci_read_config_byte(dev, pos + PCI_AF_STATUS, &status);
2365 		if (!(status & PCI_AF_STATUS_TP))
2366 			goto clear;
2367 	}
2368 
2369 	dev_err(&dev->dev, "transaction is not cleared; "
2370 			"proceeding with reset anyway\n");
2371 
2372 clear:
2373 	pci_write_config_byte(dev, pos + PCI_AF_CTRL, PCI_AF_CTRL_FLR);
2374 	msleep(100);
2375 
2376 	return 0;
2377 }
2378 
2379 static int pci_pm_reset(struct pci_dev *dev, int probe)
2380 {
2381 	u16 csr;
2382 
2383 	if (!dev->pm_cap)
2384 		return -ENOTTY;
2385 
2386 	pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &csr);
2387 	if (csr & PCI_PM_CTRL_NO_SOFT_RESET)
2388 		return -ENOTTY;
2389 
2390 	if (probe)
2391 		return 0;
2392 
2393 	if (dev->current_state != PCI_D0)
2394 		return -EINVAL;
2395 
2396 	csr &= ~PCI_PM_CTRL_STATE_MASK;
2397 	csr |= PCI_D3hot;
2398 	pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
2399 	pci_dev_d3_sleep(dev);
2400 
2401 	csr &= ~PCI_PM_CTRL_STATE_MASK;
2402 	csr |= PCI_D0;
2403 	pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
2404 	pci_dev_d3_sleep(dev);
2405 
2406 	return 0;
2407 }
2408 
2409 static int pci_parent_bus_reset(struct pci_dev *dev, int probe)
2410 {
2411 	u16 ctrl;
2412 	struct pci_dev *pdev;
2413 
2414 	if (pci_is_root_bus(dev->bus) || dev->subordinate || !dev->bus->self)
2415 		return -ENOTTY;
2416 
2417 	list_for_each_entry(pdev, &dev->bus->devices, bus_list)
2418 		if (pdev != dev)
2419 			return -ENOTTY;
2420 
2421 	if (probe)
2422 		return 0;
2423 
2424 	pci_read_config_word(dev->bus->self, PCI_BRIDGE_CONTROL, &ctrl);
2425 	ctrl |= PCI_BRIDGE_CTL_BUS_RESET;
2426 	pci_write_config_word(dev->bus->self, PCI_BRIDGE_CONTROL, ctrl);
2427 	msleep(100);
2428 
2429 	ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET;
2430 	pci_write_config_word(dev->bus->self, PCI_BRIDGE_CONTROL, ctrl);
2431 	msleep(100);
2432 
2433 	return 0;
2434 }
2435 
2436 static int pci_dev_reset(struct pci_dev *dev, int probe)
2437 {
2438 	int rc;
2439 
2440 	might_sleep();
2441 
2442 	if (!probe) {
2443 		pci_block_user_cfg_access(dev);
2444 		/* block PM suspend, driver probe, etc. */
2445 		down(&dev->dev.sem);
2446 	}
2447 
2448 	rc = pci_dev_specific_reset(dev, probe);
2449 	if (rc != -ENOTTY)
2450 		goto done;
2451 
2452 	rc = pcie_flr(dev, probe);
2453 	if (rc != -ENOTTY)
2454 		goto done;
2455 
2456 	rc = pci_af_flr(dev, probe);
2457 	if (rc != -ENOTTY)
2458 		goto done;
2459 
2460 	rc = pci_pm_reset(dev, probe);
2461 	if (rc != -ENOTTY)
2462 		goto done;
2463 
2464 	rc = pci_parent_bus_reset(dev, probe);
2465 done:
2466 	if (!probe) {
2467 		up(&dev->dev.sem);
2468 		pci_unblock_user_cfg_access(dev);
2469 	}
2470 
2471 	return rc;
2472 }
2473 
2474 /**
2475  * __pci_reset_function - reset a PCI device function
2476  * @dev: PCI device to reset
2477  *
2478  * Some devices allow an individual function to be reset without affecting
2479  * other functions in the same device.  The PCI device must be responsive
2480  * to PCI config space in order to use this function.
2481  *
2482  * The device function is presumed to be unused when this function is called.
2483  * Resetting the device will make the contents of PCI configuration space
2484  * random, so any caller of this must be prepared to reinitialise the
2485  * device including MSI, bus mastering, BARs, decoding IO and memory spaces,
2486  * etc.
2487  *
2488  * Returns 0 if the device function was successfully reset or negative if the
2489  * device doesn't support resetting a single function.
2490  */
2491 int __pci_reset_function(struct pci_dev *dev)
2492 {
2493 	return pci_dev_reset(dev, 0);
2494 }
2495 EXPORT_SYMBOL_GPL(__pci_reset_function);
2496 
2497 /**
2498  * pci_probe_reset_function - check whether the device can be safely reset
2499  * @dev: PCI device to reset
2500  *
2501  * Some devices allow an individual function to be reset without affecting
2502  * other functions in the same device.  The PCI device must be responsive
2503  * to PCI config space in order to use this function.
2504  *
2505  * Returns 0 if the device function can be reset or negative if the
2506  * device doesn't support resetting a single function.
2507  */
2508 int pci_probe_reset_function(struct pci_dev *dev)
2509 {
2510 	return pci_dev_reset(dev, 1);
2511 }
2512 
2513 /**
2514  * pci_reset_function - quiesce and reset a PCI device function
2515  * @dev: PCI device to reset
2516  *
2517  * Some devices allow an individual function to be reset without affecting
2518  * other functions in the same device.  The PCI device must be responsive
2519  * to PCI config space in order to use this function.
2520  *
2521  * This function does not just reset the PCI portion of a device, but
2522  * clears all the state associated with the device.  This function differs
2523  * from __pci_reset_function in that it saves and restores device state
2524  * over the reset.
2525  *
2526  * Returns 0 if the device function was successfully reset or negative if the
2527  * device doesn't support resetting a single function.
2528  */
2529 int pci_reset_function(struct pci_dev *dev)
2530 {
2531 	int rc;
2532 
2533 	rc = pci_dev_reset(dev, 1);
2534 	if (rc)
2535 		return rc;
2536 
2537 	pci_save_state(dev);
2538 
2539 	/*
2540 	 * both INTx and MSI are disabled after the Interrupt Disable bit
2541 	 * is set and the Bus Master bit is cleared.
2542 	 */
2543 	pci_write_config_word(dev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE);
2544 
2545 	rc = pci_dev_reset(dev, 0);
2546 
2547 	pci_restore_state(dev);
2548 
2549 	return rc;
2550 }
2551 EXPORT_SYMBOL_GPL(pci_reset_function);
2552 
2553 /**
2554  * pcix_get_max_mmrbc - get PCI-X maximum designed memory read byte count
2555  * @dev: PCI device to query
2556  *
2557  * Returns mmrbc: maximum designed memory read count in bytes
2558  *    or appropriate error value.
2559  */
2560 int pcix_get_max_mmrbc(struct pci_dev *dev)
2561 {
2562 	int err, cap;
2563 	u32 stat;
2564 
2565 	cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
2566 	if (!cap)
2567 		return -EINVAL;
2568 
2569 	err = pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat);
2570 	if (err)
2571 		return -EINVAL;
2572 
2573 	return (stat & PCI_X_STATUS_MAX_READ) >> 12;
2574 }
2575 EXPORT_SYMBOL(pcix_get_max_mmrbc);
2576 
2577 /**
2578  * pcix_get_mmrbc - get PCI-X maximum memory read byte count
2579  * @dev: PCI device to query
2580  *
2581  * Returns mmrbc: maximum memory read count in bytes
2582  *    or appropriate error value.
2583  */
2584 int pcix_get_mmrbc(struct pci_dev *dev)
2585 {
2586 	int ret, cap;
2587 	u32 cmd;
2588 
2589 	cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
2590 	if (!cap)
2591 		return -EINVAL;
2592 
2593 	ret = pci_read_config_dword(dev, cap + PCI_X_CMD, &cmd);
2594 	if (!ret)
2595 		ret = 512 << ((cmd & PCI_X_CMD_MAX_READ) >> 2);
2596 
2597 	return ret;
2598 }
2599 EXPORT_SYMBOL(pcix_get_mmrbc);
2600 
2601 /**
2602  * pcix_set_mmrbc - set PCI-X maximum memory read byte count
2603  * @dev: PCI device to query
2604  * @mmrbc: maximum memory read count in bytes
2605  *    valid values are 512, 1024, 2048, 4096
2606  *
2607  * If possible sets maximum memory read byte count, some bridges have erratas
2608  * that prevent this.
2609  */
2610 int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc)
2611 {
2612 	int cap, err = -EINVAL;
2613 	u32 stat, cmd, v, o;
2614 
2615 	if (mmrbc < 512 || mmrbc > 4096 || !is_power_of_2(mmrbc))
2616 		goto out;
2617 
2618 	v = ffs(mmrbc) - 10;
2619 
2620 	cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
2621 	if (!cap)
2622 		goto out;
2623 
2624 	err = pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat);
2625 	if (err)
2626 		goto out;
2627 
2628 	if (v > (stat & PCI_X_STATUS_MAX_READ) >> 21)
2629 		return -E2BIG;
2630 
2631 	err = pci_read_config_dword(dev, cap + PCI_X_CMD, &cmd);
2632 	if (err)
2633 		goto out;
2634 
2635 	o = (cmd & PCI_X_CMD_MAX_READ) >> 2;
2636 	if (o != v) {
2637 		if (v > o && dev->bus &&
2638 		   (dev->bus->bus_flags & PCI_BUS_FLAGS_NO_MMRBC))
2639 			return -EIO;
2640 
2641 		cmd &= ~PCI_X_CMD_MAX_READ;
2642 		cmd |= v << 2;
2643 		err = pci_write_config_dword(dev, cap + PCI_X_CMD, cmd);
2644 	}
2645 out:
2646 	return err;
2647 }
2648 EXPORT_SYMBOL(pcix_set_mmrbc);
2649 
2650 /**
2651  * pcie_get_readrq - get PCI Express read request size
2652  * @dev: PCI device to query
2653  *
2654  * Returns maximum memory read request in bytes
2655  *    or appropriate error value.
2656  */
2657 int pcie_get_readrq(struct pci_dev *dev)
2658 {
2659 	int ret, cap;
2660 	u16 ctl;
2661 
2662 	cap = pci_pcie_cap(dev);
2663 	if (!cap)
2664 		return -EINVAL;
2665 
2666 	ret = pci_read_config_word(dev, cap + PCI_EXP_DEVCTL, &ctl);
2667 	if (!ret)
2668 	ret = 128 << ((ctl & PCI_EXP_DEVCTL_READRQ) >> 12);
2669 
2670 	return ret;
2671 }
2672 EXPORT_SYMBOL(pcie_get_readrq);
2673 
2674 /**
2675  * pcie_set_readrq - set PCI Express maximum memory read request
2676  * @dev: PCI device to query
2677  * @rq: maximum memory read count in bytes
2678  *    valid values are 128, 256, 512, 1024, 2048, 4096
2679  *
2680  * If possible sets maximum read byte count
2681  */
2682 int pcie_set_readrq(struct pci_dev *dev, int rq)
2683 {
2684 	int cap, err = -EINVAL;
2685 	u16 ctl, v;
2686 
2687 	if (rq < 128 || rq > 4096 || !is_power_of_2(rq))
2688 		goto out;
2689 
2690 	v = (ffs(rq) - 8) << 12;
2691 
2692 	cap = pci_pcie_cap(dev);
2693 	if (!cap)
2694 		goto out;
2695 
2696 	err = pci_read_config_word(dev, cap + PCI_EXP_DEVCTL, &ctl);
2697 	if (err)
2698 		goto out;
2699 
2700 	if ((ctl & PCI_EXP_DEVCTL_READRQ) != v) {
2701 		ctl &= ~PCI_EXP_DEVCTL_READRQ;
2702 		ctl |= v;
2703 		err = pci_write_config_dword(dev, cap + PCI_EXP_DEVCTL, ctl);
2704 	}
2705 
2706 out:
2707 	return err;
2708 }
2709 EXPORT_SYMBOL(pcie_set_readrq);
2710 
2711 /**
2712  * pci_select_bars - Make BAR mask from the type of resource
2713  * @dev: the PCI device for which BAR mask is made
2714  * @flags: resource type mask to be selected
2715  *
2716  * This helper routine makes bar mask from the type of resource.
2717  */
2718 int pci_select_bars(struct pci_dev *dev, unsigned long flags)
2719 {
2720 	int i, bars = 0;
2721 	for (i = 0; i < PCI_NUM_RESOURCES; i++)
2722 		if (pci_resource_flags(dev, i) & flags)
2723 			bars |= (1 << i);
2724 	return bars;
2725 }
2726 
2727 /**
2728  * pci_resource_bar - get position of the BAR associated with a resource
2729  * @dev: the PCI device
2730  * @resno: the resource number
2731  * @type: the BAR type to be filled in
2732  *
2733  * Returns BAR position in config space, or 0 if the BAR is invalid.
2734  */
2735 int pci_resource_bar(struct pci_dev *dev, int resno, enum pci_bar_type *type)
2736 {
2737 	int reg;
2738 
2739 	if (resno < PCI_ROM_RESOURCE) {
2740 		*type = pci_bar_unknown;
2741 		return PCI_BASE_ADDRESS_0 + 4 * resno;
2742 	} else if (resno == PCI_ROM_RESOURCE) {
2743 		*type = pci_bar_mem32;
2744 		return dev->rom_base_reg;
2745 	} else if (resno < PCI_BRIDGE_RESOURCES) {
2746 		/* device specific resource */
2747 		reg = pci_iov_resource_bar(dev, resno, type);
2748 		if (reg)
2749 			return reg;
2750 	}
2751 
2752 	dev_err(&dev->dev, "BAR %d: invalid resource\n", resno);
2753 	return 0;
2754 }
2755 
2756 /**
2757  * pci_set_vga_state - set VGA decode state on device and parents if requested
2758  * @dev: the PCI device
2759  * @decode: true = enable decoding, false = disable decoding
2760  * @command_bits: PCI_COMMAND_IO and/or PCI_COMMAND_MEMORY
2761  * @change_bridge: traverse ancestors and change bridges
2762  */
2763 int pci_set_vga_state(struct pci_dev *dev, bool decode,
2764 		      unsigned int command_bits, bool change_bridge)
2765 {
2766 	struct pci_bus *bus;
2767 	struct pci_dev *bridge;
2768 	u16 cmd;
2769 
2770 	WARN_ON(command_bits & ~(PCI_COMMAND_IO|PCI_COMMAND_MEMORY));
2771 
2772 	pci_read_config_word(dev, PCI_COMMAND, &cmd);
2773 	if (decode == true)
2774 		cmd |= command_bits;
2775 	else
2776 		cmd &= ~command_bits;
2777 	pci_write_config_word(dev, PCI_COMMAND, cmd);
2778 
2779 	if (change_bridge == false)
2780 		return 0;
2781 
2782 	bus = dev->bus;
2783 	while (bus) {
2784 		bridge = bus->self;
2785 		if (bridge) {
2786 			pci_read_config_word(bridge, PCI_BRIDGE_CONTROL,
2787 					     &cmd);
2788 			if (decode == true)
2789 				cmd |= PCI_BRIDGE_CTL_VGA;
2790 			else
2791 				cmd &= ~PCI_BRIDGE_CTL_VGA;
2792 			pci_write_config_word(bridge, PCI_BRIDGE_CONTROL,
2793 					      cmd);
2794 		}
2795 		bus = bus->parent;
2796 	}
2797 	return 0;
2798 }
2799 
2800 #define RESOURCE_ALIGNMENT_PARAM_SIZE COMMAND_LINE_SIZE
2801 static char resource_alignment_param[RESOURCE_ALIGNMENT_PARAM_SIZE] = {0};
2802 static DEFINE_SPINLOCK(resource_alignment_lock);
2803 
2804 /**
2805  * pci_specified_resource_alignment - get resource alignment specified by user.
2806  * @dev: the PCI device to get
2807  *
2808  * RETURNS: Resource alignment if it is specified.
2809  *          Zero if it is not specified.
2810  */
2811 resource_size_t pci_specified_resource_alignment(struct pci_dev *dev)
2812 {
2813 	int seg, bus, slot, func, align_order, count;
2814 	resource_size_t align = 0;
2815 	char *p;
2816 
2817 	spin_lock(&resource_alignment_lock);
2818 	p = resource_alignment_param;
2819 	while (*p) {
2820 		count = 0;
2821 		if (sscanf(p, "%d%n", &align_order, &count) == 1 &&
2822 							p[count] == '@') {
2823 			p += count + 1;
2824 		} else {
2825 			align_order = -1;
2826 		}
2827 		if (sscanf(p, "%x:%x:%x.%x%n",
2828 			&seg, &bus, &slot, &func, &count) != 4) {
2829 			seg = 0;
2830 			if (sscanf(p, "%x:%x.%x%n",
2831 					&bus, &slot, &func, &count) != 3) {
2832 				/* Invalid format */
2833 				printk(KERN_ERR "PCI: Can't parse resource_alignment parameter: %s\n",
2834 					p);
2835 				break;
2836 			}
2837 		}
2838 		p += count;
2839 		if (seg == pci_domain_nr(dev->bus) &&
2840 			bus == dev->bus->number &&
2841 			slot == PCI_SLOT(dev->devfn) &&
2842 			func == PCI_FUNC(dev->devfn)) {
2843 			if (align_order == -1) {
2844 				align = PAGE_SIZE;
2845 			} else {
2846 				align = 1 << align_order;
2847 			}
2848 			/* Found */
2849 			break;
2850 		}
2851 		if (*p != ';' && *p != ',') {
2852 			/* End of param or invalid format */
2853 			break;
2854 		}
2855 		p++;
2856 	}
2857 	spin_unlock(&resource_alignment_lock);
2858 	return align;
2859 }
2860 
2861 /**
2862  * pci_is_reassigndev - check if specified PCI is target device to reassign
2863  * @dev: the PCI device to check
2864  *
2865  * RETURNS: non-zero for PCI device is a target device to reassign,
2866  *          or zero is not.
2867  */
2868 int pci_is_reassigndev(struct pci_dev *dev)
2869 {
2870 	return (pci_specified_resource_alignment(dev) != 0);
2871 }
2872 
2873 ssize_t pci_set_resource_alignment_param(const char *buf, size_t count)
2874 {
2875 	if (count > RESOURCE_ALIGNMENT_PARAM_SIZE - 1)
2876 		count = RESOURCE_ALIGNMENT_PARAM_SIZE - 1;
2877 	spin_lock(&resource_alignment_lock);
2878 	strncpy(resource_alignment_param, buf, count);
2879 	resource_alignment_param[count] = '\0';
2880 	spin_unlock(&resource_alignment_lock);
2881 	return count;
2882 }
2883 
2884 ssize_t pci_get_resource_alignment_param(char *buf, size_t size)
2885 {
2886 	size_t count;
2887 	spin_lock(&resource_alignment_lock);
2888 	count = snprintf(buf, size, "%s", resource_alignment_param);
2889 	spin_unlock(&resource_alignment_lock);
2890 	return count;
2891 }
2892 
2893 static ssize_t pci_resource_alignment_show(struct bus_type *bus, char *buf)
2894 {
2895 	return pci_get_resource_alignment_param(buf, PAGE_SIZE);
2896 }
2897 
2898 static ssize_t pci_resource_alignment_store(struct bus_type *bus,
2899 					const char *buf, size_t count)
2900 {
2901 	return pci_set_resource_alignment_param(buf, count);
2902 }
2903 
2904 BUS_ATTR(resource_alignment, 0644, pci_resource_alignment_show,
2905 					pci_resource_alignment_store);
2906 
2907 static int __init pci_resource_alignment_sysfs_init(void)
2908 {
2909 	return bus_create_file(&pci_bus_type,
2910 					&bus_attr_resource_alignment);
2911 }
2912 
2913 late_initcall(pci_resource_alignment_sysfs_init);
2914 
2915 static void __devinit pci_no_domains(void)
2916 {
2917 #ifdef CONFIG_PCI_DOMAINS
2918 	pci_domains_supported = 0;
2919 #endif
2920 }
2921 
2922 /**
2923  * pci_ext_cfg_enabled - can we access extended PCI config space?
2924  * @dev: The PCI device of the root bridge.
2925  *
2926  * Returns 1 if we can access PCI extended config space (offsets
2927  * greater than 0xff). This is the default implementation. Architecture
2928  * implementations can override this.
2929  */
2930 int __attribute__ ((weak)) pci_ext_cfg_avail(struct pci_dev *dev)
2931 {
2932 	return 1;
2933 }
2934 
2935 void __weak pci_fixup_cardbus(struct pci_bus *bus)
2936 {
2937 }
2938 EXPORT_SYMBOL(pci_fixup_cardbus);
2939 
2940 static int __init pci_setup(char *str)
2941 {
2942 	while (str) {
2943 		char *k = strchr(str, ',');
2944 		if (k)
2945 			*k++ = 0;
2946 		if (*str && (str = pcibios_setup(str)) && *str) {
2947 			if (!strcmp(str, "nomsi")) {
2948 				pci_no_msi();
2949 			} else if (!strcmp(str, "noaer")) {
2950 				pci_no_aer();
2951 			} else if (!strcmp(str, "nodomains")) {
2952 				pci_no_domains();
2953 			} else if (!strncmp(str, "cbiosize=", 9)) {
2954 				pci_cardbus_io_size = memparse(str + 9, &str);
2955 			} else if (!strncmp(str, "cbmemsize=", 10)) {
2956 				pci_cardbus_mem_size = memparse(str + 10, &str);
2957 			} else if (!strncmp(str, "resource_alignment=", 19)) {
2958 				pci_set_resource_alignment_param(str + 19,
2959 							strlen(str + 19));
2960 			} else if (!strncmp(str, "ecrc=", 5)) {
2961 				pcie_ecrc_get_policy(str + 5);
2962 			} else if (!strncmp(str, "hpiosize=", 9)) {
2963 				pci_hotplug_io_size = memparse(str + 9, &str);
2964 			} else if (!strncmp(str, "hpmemsize=", 10)) {
2965 				pci_hotplug_mem_size = memparse(str + 10, &str);
2966 			} else {
2967 				printk(KERN_ERR "PCI: Unknown option `%s'\n",
2968 						str);
2969 			}
2970 		}
2971 		str = k;
2972 	}
2973 	return 0;
2974 }
2975 early_param("pci", pci_setup);
2976 
2977 EXPORT_SYMBOL(pci_reenable_device);
2978 EXPORT_SYMBOL(pci_enable_device_io);
2979 EXPORT_SYMBOL(pci_enable_device_mem);
2980 EXPORT_SYMBOL(pci_enable_device);
2981 EXPORT_SYMBOL(pcim_enable_device);
2982 EXPORT_SYMBOL(pcim_pin_device);
2983 EXPORT_SYMBOL(pci_disable_device);
2984 EXPORT_SYMBOL(pci_find_capability);
2985 EXPORT_SYMBOL(pci_bus_find_capability);
2986 EXPORT_SYMBOL(pci_release_regions);
2987 EXPORT_SYMBOL(pci_request_regions);
2988 EXPORT_SYMBOL(pci_request_regions_exclusive);
2989 EXPORT_SYMBOL(pci_release_region);
2990 EXPORT_SYMBOL(pci_request_region);
2991 EXPORT_SYMBOL(pci_request_region_exclusive);
2992 EXPORT_SYMBOL(pci_release_selected_regions);
2993 EXPORT_SYMBOL(pci_request_selected_regions);
2994 EXPORT_SYMBOL(pci_request_selected_regions_exclusive);
2995 EXPORT_SYMBOL(pci_set_master);
2996 EXPORT_SYMBOL(pci_clear_master);
2997 EXPORT_SYMBOL(pci_set_mwi);
2998 EXPORT_SYMBOL(pci_try_set_mwi);
2999 EXPORT_SYMBOL(pci_clear_mwi);
3000 EXPORT_SYMBOL_GPL(pci_intx);
3001 EXPORT_SYMBOL(pci_set_dma_mask);
3002 EXPORT_SYMBOL(pci_set_consistent_dma_mask);
3003 EXPORT_SYMBOL(pci_assign_resource);
3004 EXPORT_SYMBOL(pci_find_parent_resource);
3005 EXPORT_SYMBOL(pci_select_bars);
3006 
3007 EXPORT_SYMBOL(pci_set_power_state);
3008 EXPORT_SYMBOL(pci_save_state);
3009 EXPORT_SYMBOL(pci_restore_state);
3010 EXPORT_SYMBOL(pci_pme_capable);
3011 EXPORT_SYMBOL(pci_pme_active);
3012 EXPORT_SYMBOL(pci_wake_from_d3);
3013 EXPORT_SYMBOL(pci_target_state);
3014 EXPORT_SYMBOL(pci_prepare_to_sleep);
3015 EXPORT_SYMBOL(pci_back_from_sleep);
3016 EXPORT_SYMBOL_GPL(pci_set_pcie_reset_state);
3017 
3018