xref: /openbmc/linux/drivers/pci/pci.c (revision 0bf49ffb)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * PCI Bus Services, see include/linux/pci.h for further explanation.
4  *
5  * Copyright 1993 -- 1997 Drew Eckhardt, Frederic Potter,
6  * David Mosberger-Tang
7  *
8  * Copyright 1997 -- 2000 Martin Mares <mj@ucw.cz>
9  */
10 
11 #include <linux/acpi.h>
12 #include <linux/kernel.h>
13 #include <linux/delay.h>
14 #include <linux/dmi.h>
15 #include <linux/init.h>
16 #include <linux/msi.h>
17 #include <linux/of.h>
18 #include <linux/pci.h>
19 #include <linux/pm.h>
20 #include <linux/slab.h>
21 #include <linux/module.h>
22 #include <linux/spinlock.h>
23 #include <linux/string.h>
24 #include <linux/log2.h>
25 #include <linux/logic_pio.h>
26 #include <linux/pm_wakeup.h>
27 #include <linux/interrupt.h>
28 #include <linux/device.h>
29 #include <linux/pm_runtime.h>
30 #include <linux/pci_hotplug.h>
31 #include <linux/vmalloc.h>
32 #include <asm/dma.h>
33 #include <linux/aer.h>
34 #include "pci.h"
35 
36 DEFINE_MUTEX(pci_slot_mutex);
37 
38 const char *pci_power_names[] = {
39 	"error", "D0", "D1", "D2", "D3hot", "D3cold", "unknown",
40 };
41 EXPORT_SYMBOL_GPL(pci_power_names);
42 
43 int isa_dma_bridge_buggy;
44 EXPORT_SYMBOL(isa_dma_bridge_buggy);
45 
46 int pci_pci_problems;
47 EXPORT_SYMBOL(pci_pci_problems);
48 
49 unsigned int pci_pm_d3hot_delay;
50 
51 static void pci_pme_list_scan(struct work_struct *work);
52 
53 static LIST_HEAD(pci_pme_list);
54 static DEFINE_MUTEX(pci_pme_list_mutex);
55 static DECLARE_DELAYED_WORK(pci_pme_work, pci_pme_list_scan);
56 
57 struct pci_pme_device {
58 	struct list_head list;
59 	struct pci_dev *dev;
60 };
61 
62 #define PME_TIMEOUT 1000 /* How long between PME checks */
63 
64 static void pci_dev_d3_sleep(struct pci_dev *dev)
65 {
66 	unsigned int delay = dev->d3hot_delay;
67 
68 	if (delay < pci_pm_d3hot_delay)
69 		delay = pci_pm_d3hot_delay;
70 
71 	if (delay)
72 		msleep(delay);
73 }
74 
75 #ifdef CONFIG_PCI_DOMAINS
76 int pci_domains_supported = 1;
77 #endif
78 
79 #define DEFAULT_CARDBUS_IO_SIZE		(256)
80 #define DEFAULT_CARDBUS_MEM_SIZE	(64*1024*1024)
81 /* pci=cbmemsize=nnM,cbiosize=nn can override this */
82 unsigned long pci_cardbus_io_size = DEFAULT_CARDBUS_IO_SIZE;
83 unsigned long pci_cardbus_mem_size = DEFAULT_CARDBUS_MEM_SIZE;
84 
85 #define DEFAULT_HOTPLUG_IO_SIZE		(256)
86 #define DEFAULT_HOTPLUG_MMIO_SIZE	(2*1024*1024)
87 #define DEFAULT_HOTPLUG_MMIO_PREF_SIZE	(2*1024*1024)
88 /* hpiosize=nn can override this */
89 unsigned long pci_hotplug_io_size  = DEFAULT_HOTPLUG_IO_SIZE;
90 /*
91  * pci=hpmmiosize=nnM overrides non-prefetchable MMIO size,
92  * pci=hpmmioprefsize=nnM overrides prefetchable MMIO size;
93  * pci=hpmemsize=nnM overrides both
94  */
95 unsigned long pci_hotplug_mmio_size = DEFAULT_HOTPLUG_MMIO_SIZE;
96 unsigned long pci_hotplug_mmio_pref_size = DEFAULT_HOTPLUG_MMIO_PREF_SIZE;
97 
98 #define DEFAULT_HOTPLUG_BUS_SIZE	1
99 unsigned long pci_hotplug_bus_size = DEFAULT_HOTPLUG_BUS_SIZE;
100 
101 
102 /* PCIe MPS/MRRS strategy; can be overridden by kernel command-line param */
103 #ifdef CONFIG_PCIE_BUS_TUNE_OFF
104 enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_TUNE_OFF;
105 #elif defined CONFIG_PCIE_BUS_SAFE
106 enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_SAFE;
107 #elif defined CONFIG_PCIE_BUS_PERFORMANCE
108 enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_PERFORMANCE;
109 #elif defined CONFIG_PCIE_BUS_PEER2PEER
110 enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_PEER2PEER;
111 #else
112 enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_DEFAULT;
113 #endif
114 
115 /*
116  * The default CLS is used if arch didn't set CLS explicitly and not
117  * all pci devices agree on the same value.  Arch can override either
118  * the dfl or actual value as it sees fit.  Don't forget this is
119  * measured in 32-bit words, not bytes.
120  */
121 u8 pci_dfl_cache_line_size = L1_CACHE_BYTES >> 2;
122 u8 pci_cache_line_size;
123 
124 /*
125  * If we set up a device for bus mastering, we need to check the latency
126  * timer as certain BIOSes forget to set it properly.
127  */
128 unsigned int pcibios_max_latency = 255;
129 
130 /* If set, the PCIe ARI capability will not be used. */
131 static bool pcie_ari_disabled;
132 
133 /* If set, the PCIe ATS capability will not be used. */
134 static bool pcie_ats_disabled;
135 
136 /* If set, the PCI config space of each device is printed during boot. */
137 bool pci_early_dump;
138 
139 bool pci_ats_disabled(void)
140 {
141 	return pcie_ats_disabled;
142 }
143 EXPORT_SYMBOL_GPL(pci_ats_disabled);
144 
145 /* Disable bridge_d3 for all PCIe ports */
146 static bool pci_bridge_d3_disable;
147 /* Force bridge_d3 for all PCIe ports */
148 static bool pci_bridge_d3_force;
149 
150 static int __init pcie_port_pm_setup(char *str)
151 {
152 	if (!strcmp(str, "off"))
153 		pci_bridge_d3_disable = true;
154 	else if (!strcmp(str, "force"))
155 		pci_bridge_d3_force = true;
156 	return 1;
157 }
158 __setup("pcie_port_pm=", pcie_port_pm_setup);
159 
160 /* Time to wait after a reset for device to become responsive */
161 #define PCIE_RESET_READY_POLL_MS 60000
162 
163 /**
164  * pci_bus_max_busnr - returns maximum PCI bus number of given bus' children
165  * @bus: pointer to PCI bus structure to search
166  *
167  * Given a PCI bus, returns the highest PCI bus number present in the set
168  * including the given PCI bus and its list of child PCI buses.
169  */
170 unsigned char pci_bus_max_busnr(struct pci_bus *bus)
171 {
172 	struct pci_bus *tmp;
173 	unsigned char max, n;
174 
175 	max = bus->busn_res.end;
176 	list_for_each_entry(tmp, &bus->children, node) {
177 		n = pci_bus_max_busnr(tmp);
178 		if (n > max)
179 			max = n;
180 	}
181 	return max;
182 }
183 EXPORT_SYMBOL_GPL(pci_bus_max_busnr);
184 
185 /**
186  * pci_status_get_and_clear_errors - return and clear error bits in PCI_STATUS
187  * @pdev: the PCI device
188  *
189  * Returns error bits set in PCI_STATUS and clears them.
190  */
191 int pci_status_get_and_clear_errors(struct pci_dev *pdev)
192 {
193 	u16 status;
194 	int ret;
195 
196 	ret = pci_read_config_word(pdev, PCI_STATUS, &status);
197 	if (ret != PCIBIOS_SUCCESSFUL)
198 		return -EIO;
199 
200 	status &= PCI_STATUS_ERROR_BITS;
201 	if (status)
202 		pci_write_config_word(pdev, PCI_STATUS, status);
203 
204 	return status;
205 }
206 EXPORT_SYMBOL_GPL(pci_status_get_and_clear_errors);
207 
208 #ifdef CONFIG_HAS_IOMEM
209 void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar)
210 {
211 	struct resource *res = &pdev->resource[bar];
212 
213 	/*
214 	 * Make sure the BAR is actually a memory resource, not an IO resource
215 	 */
216 	if (res->flags & IORESOURCE_UNSET || !(res->flags & IORESOURCE_MEM)) {
217 		pci_warn(pdev, "can't ioremap BAR %d: %pR\n", bar, res);
218 		return NULL;
219 	}
220 	return ioremap(res->start, resource_size(res));
221 }
222 EXPORT_SYMBOL_GPL(pci_ioremap_bar);
223 
224 void __iomem *pci_ioremap_wc_bar(struct pci_dev *pdev, int bar)
225 {
226 	/*
227 	 * Make sure the BAR is actually a memory resource, not an IO resource
228 	 */
229 	if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM)) {
230 		WARN_ON(1);
231 		return NULL;
232 	}
233 	return ioremap_wc(pci_resource_start(pdev, bar),
234 			  pci_resource_len(pdev, bar));
235 }
236 EXPORT_SYMBOL_GPL(pci_ioremap_wc_bar);
237 #endif
238 
239 /**
240  * pci_dev_str_match_path - test if a path string matches a device
241  * @dev: the PCI device to test
242  * @path: string to match the device against
243  * @endptr: pointer to the string after the match
244  *
245  * Test if a string (typically from a kernel parameter) formatted as a
246  * path of device/function addresses matches a PCI device. The string must
247  * be of the form:
248  *
249  *   [<domain>:]<bus>:<device>.<func>[/<device>.<func>]*
250  *
251  * A path for a device can be obtained using 'lspci -t'.  Using a path
252  * is more robust against bus renumbering than using only a single bus,
253  * device and function address.
254  *
255  * Returns 1 if the string matches the device, 0 if it does not and
256  * a negative error code if it fails to parse the string.
257  */
258 static int pci_dev_str_match_path(struct pci_dev *dev, const char *path,
259 				  const char **endptr)
260 {
261 	int ret;
262 	int seg, bus, slot, func;
263 	char *wpath, *p;
264 	char end;
265 
266 	*endptr = strchrnul(path, ';');
267 
268 	wpath = kmemdup_nul(path, *endptr - path, GFP_KERNEL);
269 	if (!wpath)
270 		return -ENOMEM;
271 
272 	while (1) {
273 		p = strrchr(wpath, '/');
274 		if (!p)
275 			break;
276 		ret = sscanf(p, "/%x.%x%c", &slot, &func, &end);
277 		if (ret != 2) {
278 			ret = -EINVAL;
279 			goto free_and_exit;
280 		}
281 
282 		if (dev->devfn != PCI_DEVFN(slot, func)) {
283 			ret = 0;
284 			goto free_and_exit;
285 		}
286 
287 		/*
288 		 * Note: we don't need to get a reference to the upstream
289 		 * bridge because we hold a reference to the top level
290 		 * device which should hold a reference to the bridge,
291 		 * and so on.
292 		 */
293 		dev = pci_upstream_bridge(dev);
294 		if (!dev) {
295 			ret = 0;
296 			goto free_and_exit;
297 		}
298 
299 		*p = 0;
300 	}
301 
302 	ret = sscanf(wpath, "%x:%x:%x.%x%c", &seg, &bus, &slot,
303 		     &func, &end);
304 	if (ret != 4) {
305 		seg = 0;
306 		ret = sscanf(wpath, "%x:%x.%x%c", &bus, &slot, &func, &end);
307 		if (ret != 3) {
308 			ret = -EINVAL;
309 			goto free_and_exit;
310 		}
311 	}
312 
313 	ret = (seg == pci_domain_nr(dev->bus) &&
314 	       bus == dev->bus->number &&
315 	       dev->devfn == PCI_DEVFN(slot, func));
316 
317 free_and_exit:
318 	kfree(wpath);
319 	return ret;
320 }
321 
322 /**
323  * pci_dev_str_match - test if a string matches a device
324  * @dev: the PCI device to test
325  * @p: string to match the device against
326  * @endptr: pointer to the string after the match
327  *
328  * Test if a string (typically from a kernel parameter) matches a specified
329  * PCI device. The string may be of one of the following formats:
330  *
331  *   [<domain>:]<bus>:<device>.<func>[/<device>.<func>]*
332  *   pci:<vendor>:<device>[:<subvendor>:<subdevice>]
333  *
334  * The first format specifies a PCI bus/device/function address which
335  * may change if new hardware is inserted, if motherboard firmware changes,
336  * or due to changes caused in kernel parameters. If the domain is
337  * left unspecified, it is taken to be 0.  In order to be robust against
338  * bus renumbering issues, a path of PCI device/function numbers may be used
339  * to address the specific device.  The path for a device can be determined
340  * through the use of 'lspci -t'.
341  *
342  * The second format matches devices using IDs in the configuration
343  * space which may match multiple devices in the system. A value of 0
344  * for any field will match all devices. (Note: this differs from
345  * in-kernel code that uses PCI_ANY_ID which is ~0; this is for
346  * legacy reasons and convenience so users don't have to specify
347  * FFFFFFFFs on the command line.)
348  *
349  * Returns 1 if the string matches the device, 0 if it does not and
350  * a negative error code if the string cannot be parsed.
351  */
352 static int pci_dev_str_match(struct pci_dev *dev, const char *p,
353 			     const char **endptr)
354 {
355 	int ret;
356 	int count;
357 	unsigned short vendor, device, subsystem_vendor, subsystem_device;
358 
359 	if (strncmp(p, "pci:", 4) == 0) {
360 		/* PCI vendor/device (subvendor/subdevice) IDs are specified */
361 		p += 4;
362 		ret = sscanf(p, "%hx:%hx:%hx:%hx%n", &vendor, &device,
363 			     &subsystem_vendor, &subsystem_device, &count);
364 		if (ret != 4) {
365 			ret = sscanf(p, "%hx:%hx%n", &vendor, &device, &count);
366 			if (ret != 2)
367 				return -EINVAL;
368 
369 			subsystem_vendor = 0;
370 			subsystem_device = 0;
371 		}
372 
373 		p += count;
374 
375 		if ((!vendor || vendor == dev->vendor) &&
376 		    (!device || device == dev->device) &&
377 		    (!subsystem_vendor ||
378 			    subsystem_vendor == dev->subsystem_vendor) &&
379 		    (!subsystem_device ||
380 			    subsystem_device == dev->subsystem_device))
381 			goto found;
382 	} else {
383 		/*
384 		 * PCI Bus, Device, Function IDs are specified
385 		 * (optionally, may include a path of devfns following it)
386 		 */
387 		ret = pci_dev_str_match_path(dev, p, &p);
388 		if (ret < 0)
389 			return ret;
390 		else if (ret)
391 			goto found;
392 	}
393 
394 	*endptr = p;
395 	return 0;
396 
397 found:
398 	*endptr = p;
399 	return 1;
400 }
401 
402 static u8 __pci_find_next_cap_ttl(struct pci_bus *bus, unsigned int devfn,
403 				  u8 pos, int cap, int *ttl)
404 {
405 	u8 id;
406 	u16 ent;
407 
408 	pci_bus_read_config_byte(bus, devfn, pos, &pos);
409 
410 	while ((*ttl)--) {
411 		if (pos < 0x40)
412 			break;
413 		pos &= ~3;
414 		pci_bus_read_config_word(bus, devfn, pos, &ent);
415 
416 		id = ent & 0xff;
417 		if (id == 0xff)
418 			break;
419 		if (id == cap)
420 			return pos;
421 		pos = (ent >> 8);
422 	}
423 	return 0;
424 }
425 
426 static u8 __pci_find_next_cap(struct pci_bus *bus, unsigned int devfn,
427 			      u8 pos, int cap)
428 {
429 	int ttl = PCI_FIND_CAP_TTL;
430 
431 	return __pci_find_next_cap_ttl(bus, devfn, pos, cap, &ttl);
432 }
433 
434 u8 pci_find_next_capability(struct pci_dev *dev, u8 pos, int cap)
435 {
436 	return __pci_find_next_cap(dev->bus, dev->devfn,
437 				   pos + PCI_CAP_LIST_NEXT, cap);
438 }
439 EXPORT_SYMBOL_GPL(pci_find_next_capability);
440 
441 static u8 __pci_bus_find_cap_start(struct pci_bus *bus,
442 				    unsigned int devfn, u8 hdr_type)
443 {
444 	u16 status;
445 
446 	pci_bus_read_config_word(bus, devfn, PCI_STATUS, &status);
447 	if (!(status & PCI_STATUS_CAP_LIST))
448 		return 0;
449 
450 	switch (hdr_type) {
451 	case PCI_HEADER_TYPE_NORMAL:
452 	case PCI_HEADER_TYPE_BRIDGE:
453 		return PCI_CAPABILITY_LIST;
454 	case PCI_HEADER_TYPE_CARDBUS:
455 		return PCI_CB_CAPABILITY_LIST;
456 	}
457 
458 	return 0;
459 }
460 
461 /**
462  * pci_find_capability - query for devices' capabilities
463  * @dev: PCI device to query
464  * @cap: capability code
465  *
466  * Tell if a device supports a given PCI capability.
467  * Returns the address of the requested capability structure within the
468  * device's PCI configuration space or 0 in case the device does not
469  * support it.  Possible values for @cap include:
470  *
471  *  %PCI_CAP_ID_PM           Power Management
472  *  %PCI_CAP_ID_AGP          Accelerated Graphics Port
473  *  %PCI_CAP_ID_VPD          Vital Product Data
474  *  %PCI_CAP_ID_SLOTID       Slot Identification
475  *  %PCI_CAP_ID_MSI          Message Signalled Interrupts
476  *  %PCI_CAP_ID_CHSWP        CompactPCI HotSwap
477  *  %PCI_CAP_ID_PCIX         PCI-X
478  *  %PCI_CAP_ID_EXP          PCI Express
479  */
480 u8 pci_find_capability(struct pci_dev *dev, int cap)
481 {
482 	u8 pos;
483 
484 	pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type);
485 	if (pos)
486 		pos = __pci_find_next_cap(dev->bus, dev->devfn, pos, cap);
487 
488 	return pos;
489 }
490 EXPORT_SYMBOL(pci_find_capability);
491 
492 /**
493  * pci_bus_find_capability - query for devices' capabilities
494  * @bus: the PCI bus to query
495  * @devfn: PCI device to query
496  * @cap: capability code
497  *
498  * Like pci_find_capability() but works for PCI devices that do not have a
499  * pci_dev structure set up yet.
500  *
501  * Returns the address of the requested capability structure within the
502  * device's PCI configuration space or 0 in case the device does not
503  * support it.
504  */
505 u8 pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap)
506 {
507 	u8 hdr_type, pos;
508 
509 	pci_bus_read_config_byte(bus, devfn, PCI_HEADER_TYPE, &hdr_type);
510 
511 	pos = __pci_bus_find_cap_start(bus, devfn, hdr_type & 0x7f);
512 	if (pos)
513 		pos = __pci_find_next_cap(bus, devfn, pos, cap);
514 
515 	return pos;
516 }
517 EXPORT_SYMBOL(pci_bus_find_capability);
518 
519 /**
520  * pci_find_next_ext_capability - Find an extended capability
521  * @dev: PCI device to query
522  * @start: address at which to start looking (0 to start at beginning of list)
523  * @cap: capability code
524  *
525  * Returns the address of the next matching extended capability structure
526  * within the device's PCI configuration space or 0 if the device does
527  * not support it.  Some capabilities can occur several times, e.g., the
528  * vendor-specific capability, and this provides a way to find them all.
529  */
530 u16 pci_find_next_ext_capability(struct pci_dev *dev, u16 start, int cap)
531 {
532 	u32 header;
533 	int ttl;
534 	u16 pos = PCI_CFG_SPACE_SIZE;
535 
536 	/* minimum 8 bytes per capability */
537 	ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8;
538 
539 	if (dev->cfg_size <= PCI_CFG_SPACE_SIZE)
540 		return 0;
541 
542 	if (start)
543 		pos = start;
544 
545 	if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
546 		return 0;
547 
548 	/*
549 	 * If we have no capabilities, this is indicated by cap ID,
550 	 * cap version and next pointer all being 0.
551 	 */
552 	if (header == 0)
553 		return 0;
554 
555 	while (ttl-- > 0) {
556 		if (PCI_EXT_CAP_ID(header) == cap && pos != start)
557 			return pos;
558 
559 		pos = PCI_EXT_CAP_NEXT(header);
560 		if (pos < PCI_CFG_SPACE_SIZE)
561 			break;
562 
563 		if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
564 			break;
565 	}
566 
567 	return 0;
568 }
569 EXPORT_SYMBOL_GPL(pci_find_next_ext_capability);
570 
571 /**
572  * pci_find_ext_capability - Find an extended capability
573  * @dev: PCI device to query
574  * @cap: capability code
575  *
576  * Returns the address of the requested extended capability structure
577  * within the device's PCI configuration space or 0 if the device does
578  * not support it.  Possible values for @cap include:
579  *
580  *  %PCI_EXT_CAP_ID_ERR		Advanced Error Reporting
581  *  %PCI_EXT_CAP_ID_VC		Virtual Channel
582  *  %PCI_EXT_CAP_ID_DSN		Device Serial Number
583  *  %PCI_EXT_CAP_ID_PWR		Power Budgeting
584  */
585 u16 pci_find_ext_capability(struct pci_dev *dev, int cap)
586 {
587 	return pci_find_next_ext_capability(dev, 0, cap);
588 }
589 EXPORT_SYMBOL_GPL(pci_find_ext_capability);
590 
591 /**
592  * pci_get_dsn - Read and return the 8-byte Device Serial Number
593  * @dev: PCI device to query
594  *
595  * Looks up the PCI_EXT_CAP_ID_DSN and reads the 8 bytes of the Device Serial
596  * Number.
597  *
598  * Returns the DSN, or zero if the capability does not exist.
599  */
600 u64 pci_get_dsn(struct pci_dev *dev)
601 {
602 	u32 dword;
603 	u64 dsn;
604 	int pos;
605 
606 	pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_DSN);
607 	if (!pos)
608 		return 0;
609 
610 	/*
611 	 * The Device Serial Number is two dwords offset 4 bytes from the
612 	 * capability position. The specification says that the first dword is
613 	 * the lower half, and the second dword is the upper half.
614 	 */
615 	pos += 4;
616 	pci_read_config_dword(dev, pos, &dword);
617 	dsn = (u64)dword;
618 	pci_read_config_dword(dev, pos + 4, &dword);
619 	dsn |= ((u64)dword) << 32;
620 
621 	return dsn;
622 }
623 EXPORT_SYMBOL_GPL(pci_get_dsn);
624 
625 static u8 __pci_find_next_ht_cap(struct pci_dev *dev, u8 pos, int ht_cap)
626 {
627 	int rc, ttl = PCI_FIND_CAP_TTL;
628 	u8 cap, mask;
629 
630 	if (ht_cap == HT_CAPTYPE_SLAVE || ht_cap == HT_CAPTYPE_HOST)
631 		mask = HT_3BIT_CAP_MASK;
632 	else
633 		mask = HT_5BIT_CAP_MASK;
634 
635 	pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn, pos,
636 				      PCI_CAP_ID_HT, &ttl);
637 	while (pos) {
638 		rc = pci_read_config_byte(dev, pos + 3, &cap);
639 		if (rc != PCIBIOS_SUCCESSFUL)
640 			return 0;
641 
642 		if ((cap & mask) == ht_cap)
643 			return pos;
644 
645 		pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn,
646 					      pos + PCI_CAP_LIST_NEXT,
647 					      PCI_CAP_ID_HT, &ttl);
648 	}
649 
650 	return 0;
651 }
652 
653 /**
654  * pci_find_next_ht_capability - query a device's HyperTransport capabilities
655  * @dev: PCI device to query
656  * @pos: Position from which to continue searching
657  * @ht_cap: HyperTransport capability code
658  *
659  * To be used in conjunction with pci_find_ht_capability() to search for
660  * all capabilities matching @ht_cap. @pos should always be a value returned
661  * from pci_find_ht_capability().
662  *
663  * NB. To be 100% safe against broken PCI devices, the caller should take
664  * steps to avoid an infinite loop.
665  */
666 u8 pci_find_next_ht_capability(struct pci_dev *dev, u8 pos, int ht_cap)
667 {
668 	return __pci_find_next_ht_cap(dev, pos + PCI_CAP_LIST_NEXT, ht_cap);
669 }
670 EXPORT_SYMBOL_GPL(pci_find_next_ht_capability);
671 
672 /**
673  * pci_find_ht_capability - query a device's HyperTransport capabilities
674  * @dev: PCI device to query
675  * @ht_cap: HyperTransport capability code
676  *
677  * Tell if a device supports a given HyperTransport capability.
678  * Returns an address within the device's PCI configuration space
679  * or 0 in case the device does not support the request capability.
680  * The address points to the PCI capability, of type PCI_CAP_ID_HT,
681  * which has a HyperTransport capability matching @ht_cap.
682  */
683 u8 pci_find_ht_capability(struct pci_dev *dev, int ht_cap)
684 {
685 	u8 pos;
686 
687 	pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type);
688 	if (pos)
689 		pos = __pci_find_next_ht_cap(dev, pos, ht_cap);
690 
691 	return pos;
692 }
693 EXPORT_SYMBOL_GPL(pci_find_ht_capability);
694 
695 /**
696  * pci_find_parent_resource - return resource region of parent bus of given
697  *			      region
698  * @dev: PCI device structure contains resources to be searched
699  * @res: child resource record for which parent is sought
700  *
701  * For given resource region of given device, return the resource region of
702  * parent bus the given region is contained in.
703  */
704 struct resource *pci_find_parent_resource(const struct pci_dev *dev,
705 					  struct resource *res)
706 {
707 	const struct pci_bus *bus = dev->bus;
708 	struct resource *r;
709 	int i;
710 
711 	pci_bus_for_each_resource(bus, r, i) {
712 		if (!r)
713 			continue;
714 		if (resource_contains(r, res)) {
715 
716 			/*
717 			 * If the window is prefetchable but the BAR is
718 			 * not, the allocator made a mistake.
719 			 */
720 			if (r->flags & IORESOURCE_PREFETCH &&
721 			    !(res->flags & IORESOURCE_PREFETCH))
722 				return NULL;
723 
724 			/*
725 			 * If we're below a transparent bridge, there may
726 			 * be both a positively-decoded aperture and a
727 			 * subtractively-decoded region that contain the BAR.
728 			 * We want the positively-decoded one, so this depends
729 			 * on pci_bus_for_each_resource() giving us those
730 			 * first.
731 			 */
732 			return r;
733 		}
734 	}
735 	return NULL;
736 }
737 EXPORT_SYMBOL(pci_find_parent_resource);
738 
739 /**
740  * pci_find_resource - Return matching PCI device resource
741  * @dev: PCI device to query
742  * @res: Resource to look for
743  *
744  * Goes over standard PCI resources (BARs) and checks if the given resource
745  * is partially or fully contained in any of them. In that case the
746  * matching resource is returned, %NULL otherwise.
747  */
748 struct resource *pci_find_resource(struct pci_dev *dev, struct resource *res)
749 {
750 	int i;
751 
752 	for (i = 0; i < PCI_STD_NUM_BARS; i++) {
753 		struct resource *r = &dev->resource[i];
754 
755 		if (r->start && resource_contains(r, res))
756 			return r;
757 	}
758 
759 	return NULL;
760 }
761 EXPORT_SYMBOL(pci_find_resource);
762 
763 /**
764  * pci_wait_for_pending - wait for @mask bit(s) to clear in status word @pos
765  * @dev: the PCI device to operate on
766  * @pos: config space offset of status word
767  * @mask: mask of bit(s) to care about in status word
768  *
769  * Return 1 when mask bit(s) in status word clear, 0 otherwise.
770  */
771 int pci_wait_for_pending(struct pci_dev *dev, int pos, u16 mask)
772 {
773 	int i;
774 
775 	/* Wait for Transaction Pending bit clean */
776 	for (i = 0; i < 4; i++) {
777 		u16 status;
778 		if (i)
779 			msleep((1 << (i - 1)) * 100);
780 
781 		pci_read_config_word(dev, pos, &status);
782 		if (!(status & mask))
783 			return 1;
784 	}
785 
786 	return 0;
787 }
788 
789 static int pci_acs_enable;
790 
791 /**
792  * pci_request_acs - ask for ACS to be enabled if supported
793  */
794 void pci_request_acs(void)
795 {
796 	pci_acs_enable = 1;
797 }
798 
799 static const char *disable_acs_redir_param;
800 
801 /**
802  * pci_disable_acs_redir - disable ACS redirect capabilities
803  * @dev: the PCI device
804  *
805  * For only devices specified in the disable_acs_redir parameter.
806  */
807 static void pci_disable_acs_redir(struct pci_dev *dev)
808 {
809 	int ret = 0;
810 	const char *p;
811 	int pos;
812 	u16 ctrl;
813 
814 	if (!disable_acs_redir_param)
815 		return;
816 
817 	p = disable_acs_redir_param;
818 	while (*p) {
819 		ret = pci_dev_str_match(dev, p, &p);
820 		if (ret < 0) {
821 			pr_info_once("PCI: Can't parse disable_acs_redir parameter: %s\n",
822 				     disable_acs_redir_param);
823 
824 			break;
825 		} else if (ret == 1) {
826 			/* Found a match */
827 			break;
828 		}
829 
830 		if (*p != ';' && *p != ',') {
831 			/* End of param or invalid format */
832 			break;
833 		}
834 		p++;
835 	}
836 
837 	if (ret != 1)
838 		return;
839 
840 	if (!pci_dev_specific_disable_acs_redir(dev))
841 		return;
842 
843 	pos = dev->acs_cap;
844 	if (!pos) {
845 		pci_warn(dev, "cannot disable ACS redirect for this hardware as it does not have ACS capabilities\n");
846 		return;
847 	}
848 
849 	pci_read_config_word(dev, pos + PCI_ACS_CTRL, &ctrl);
850 
851 	/* P2P Request & Completion Redirect */
852 	ctrl &= ~(PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_EC);
853 
854 	pci_write_config_word(dev, pos + PCI_ACS_CTRL, ctrl);
855 
856 	pci_info(dev, "disabled ACS redirect\n");
857 }
858 
859 /**
860  * pci_std_enable_acs - enable ACS on devices using standard ACS capabilities
861  * @dev: the PCI device
862  */
863 static void pci_std_enable_acs(struct pci_dev *dev)
864 {
865 	int pos;
866 	u16 cap;
867 	u16 ctrl;
868 
869 	pos = dev->acs_cap;
870 	if (!pos)
871 		return;
872 
873 	pci_read_config_word(dev, pos + PCI_ACS_CAP, &cap);
874 	pci_read_config_word(dev, pos + PCI_ACS_CTRL, &ctrl);
875 
876 	/* Source Validation */
877 	ctrl |= (cap & PCI_ACS_SV);
878 
879 	/* P2P Request Redirect */
880 	ctrl |= (cap & PCI_ACS_RR);
881 
882 	/* P2P Completion Redirect */
883 	ctrl |= (cap & PCI_ACS_CR);
884 
885 	/* Upstream Forwarding */
886 	ctrl |= (cap & PCI_ACS_UF);
887 
888 	/* Enable Translation Blocking for external devices */
889 	if (dev->external_facing || dev->untrusted)
890 		ctrl |= (cap & PCI_ACS_TB);
891 
892 	pci_write_config_word(dev, pos + PCI_ACS_CTRL, ctrl);
893 }
894 
895 /**
896  * pci_enable_acs - enable ACS if hardware support it
897  * @dev: the PCI device
898  */
899 static void pci_enable_acs(struct pci_dev *dev)
900 {
901 	if (!pci_acs_enable)
902 		goto disable_acs_redir;
903 
904 	if (!pci_dev_specific_enable_acs(dev))
905 		goto disable_acs_redir;
906 
907 	pci_std_enable_acs(dev);
908 
909 disable_acs_redir:
910 	/*
911 	 * Note: pci_disable_acs_redir() must be called even if ACS was not
912 	 * enabled by the kernel because it may have been enabled by
913 	 * platform firmware.  So if we are told to disable it, we should
914 	 * always disable it after setting the kernel's default
915 	 * preferences.
916 	 */
917 	pci_disable_acs_redir(dev);
918 }
919 
920 /**
921  * pci_restore_bars - restore a device's BAR values (e.g. after wake-up)
922  * @dev: PCI device to have its BARs restored
923  *
924  * Restore the BAR values for a given device, so as to make it
925  * accessible by its driver.
926  */
927 static void pci_restore_bars(struct pci_dev *dev)
928 {
929 	int i;
930 
931 	for (i = 0; i < PCI_BRIDGE_RESOURCES; i++)
932 		pci_update_resource(dev, i);
933 }
934 
935 static const struct pci_platform_pm_ops *pci_platform_pm;
936 
937 int pci_set_platform_pm(const struct pci_platform_pm_ops *ops)
938 {
939 	if (!ops->is_manageable || !ops->set_state  || !ops->get_state ||
940 	    !ops->choose_state  || !ops->set_wakeup || !ops->need_resume)
941 		return -EINVAL;
942 	pci_platform_pm = ops;
943 	return 0;
944 }
945 
946 static inline bool platform_pci_power_manageable(struct pci_dev *dev)
947 {
948 	return pci_platform_pm ? pci_platform_pm->is_manageable(dev) : false;
949 }
950 
951 static inline int platform_pci_set_power_state(struct pci_dev *dev,
952 					       pci_power_t t)
953 {
954 	return pci_platform_pm ? pci_platform_pm->set_state(dev, t) : -ENOSYS;
955 }
956 
957 static inline pci_power_t platform_pci_get_power_state(struct pci_dev *dev)
958 {
959 	return pci_platform_pm ? pci_platform_pm->get_state(dev) : PCI_UNKNOWN;
960 }
961 
962 static inline void platform_pci_refresh_power_state(struct pci_dev *dev)
963 {
964 	if (pci_platform_pm && pci_platform_pm->refresh_state)
965 		pci_platform_pm->refresh_state(dev);
966 }
967 
968 static inline pci_power_t platform_pci_choose_state(struct pci_dev *dev)
969 {
970 	return pci_platform_pm ?
971 			pci_platform_pm->choose_state(dev) : PCI_POWER_ERROR;
972 }
973 
974 static inline int platform_pci_set_wakeup(struct pci_dev *dev, bool enable)
975 {
976 	return pci_platform_pm ?
977 			pci_platform_pm->set_wakeup(dev, enable) : -ENODEV;
978 }
979 
980 static inline bool platform_pci_need_resume(struct pci_dev *dev)
981 {
982 	return pci_platform_pm ? pci_platform_pm->need_resume(dev) : false;
983 }
984 
985 static inline bool platform_pci_bridge_d3(struct pci_dev *dev)
986 {
987 	if (pci_platform_pm && pci_platform_pm->bridge_d3)
988 		return pci_platform_pm->bridge_d3(dev);
989 	return false;
990 }
991 
992 /**
993  * pci_raw_set_power_state - Use PCI PM registers to set the power state of
994  *			     given PCI device
995  * @dev: PCI device to handle.
996  * @state: PCI power state (D0, D1, D2, D3hot) to put the device into.
997  *
998  * RETURN VALUE:
999  * -EINVAL if the requested state is invalid.
1000  * -EIO if device does not support PCI PM or its PM capabilities register has a
1001  * wrong version, or device doesn't support the requested state.
1002  * 0 if device already is in the requested state.
1003  * 0 if device's power state has been successfully changed.
1004  */
1005 static int pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state)
1006 {
1007 	u16 pmcsr;
1008 	bool need_restore = false;
1009 
1010 	/* Check if we're already there */
1011 	if (dev->current_state == state)
1012 		return 0;
1013 
1014 	if (!dev->pm_cap)
1015 		return -EIO;
1016 
1017 	if (state < PCI_D0 || state > PCI_D3hot)
1018 		return -EINVAL;
1019 
1020 	/*
1021 	 * Validate transition: We can enter D0 from any state, but if
1022 	 * we're already in a low-power state, we can only go deeper.  E.g.,
1023 	 * we can go from D1 to D3, but we can't go directly from D3 to D1;
1024 	 * we'd have to go from D3 to D0, then to D1.
1025 	 */
1026 	if (state != PCI_D0 && dev->current_state <= PCI_D3cold
1027 	    && dev->current_state > state) {
1028 		pci_err(dev, "invalid power transition (from %s to %s)\n",
1029 			pci_power_name(dev->current_state),
1030 			pci_power_name(state));
1031 		return -EINVAL;
1032 	}
1033 
1034 	/* Check if this device supports the desired state */
1035 	if ((state == PCI_D1 && !dev->d1_support)
1036 	   || (state == PCI_D2 && !dev->d2_support))
1037 		return -EIO;
1038 
1039 	pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1040 	if (pmcsr == (u16) ~0) {
1041 		pci_err(dev, "can't change power state from %s to %s (config space inaccessible)\n",
1042 			pci_power_name(dev->current_state),
1043 			pci_power_name(state));
1044 		return -EIO;
1045 	}
1046 
1047 	/*
1048 	 * If we're (effectively) in D3, force entire word to 0.
1049 	 * This doesn't affect PME_Status, disables PME_En, and
1050 	 * sets PowerState to 0.
1051 	 */
1052 	switch (dev->current_state) {
1053 	case PCI_D0:
1054 	case PCI_D1:
1055 	case PCI_D2:
1056 		pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
1057 		pmcsr |= state;
1058 		break;
1059 	case PCI_D3hot:
1060 	case PCI_D3cold:
1061 	case PCI_UNKNOWN: /* Boot-up */
1062 		if ((pmcsr & PCI_PM_CTRL_STATE_MASK) == PCI_D3hot
1063 		 && !(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET))
1064 			need_restore = true;
1065 		fallthrough;	/* force to D0 */
1066 	default:
1067 		pmcsr = 0;
1068 		break;
1069 	}
1070 
1071 	/* Enter specified state */
1072 	pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
1073 
1074 	/*
1075 	 * Mandatory power management transition delays; see PCI PM 1.1
1076 	 * 5.6.1 table 18
1077 	 */
1078 	if (state == PCI_D3hot || dev->current_state == PCI_D3hot)
1079 		pci_dev_d3_sleep(dev);
1080 	else if (state == PCI_D2 || dev->current_state == PCI_D2)
1081 		udelay(PCI_PM_D2_DELAY);
1082 
1083 	pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1084 	dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
1085 	if (dev->current_state != state)
1086 		pci_info_ratelimited(dev, "refused to change power state from %s to %s\n",
1087 			 pci_power_name(dev->current_state),
1088 			 pci_power_name(state));
1089 
1090 	/*
1091 	 * According to section 5.4.1 of the "PCI BUS POWER MANAGEMENT
1092 	 * INTERFACE SPECIFICATION, REV. 1.2", a device transitioning
1093 	 * from D3hot to D0 _may_ perform an internal reset, thereby
1094 	 * going to "D0 Uninitialized" rather than "D0 Initialized".
1095 	 * For example, at least some versions of the 3c905B and the
1096 	 * 3c556B exhibit this behaviour.
1097 	 *
1098 	 * At least some laptop BIOSen (e.g. the Thinkpad T21) leave
1099 	 * devices in a D3hot state at boot.  Consequently, we need to
1100 	 * restore at least the BARs so that the device will be
1101 	 * accessible to its driver.
1102 	 */
1103 	if (need_restore)
1104 		pci_restore_bars(dev);
1105 
1106 	if (dev->bus->self)
1107 		pcie_aspm_pm_state_change(dev->bus->self);
1108 
1109 	return 0;
1110 }
1111 
1112 /**
1113  * pci_update_current_state - Read power state of given device and cache it
1114  * @dev: PCI device to handle.
1115  * @state: State to cache in case the device doesn't have the PM capability
1116  *
1117  * The power state is read from the PMCSR register, which however is
1118  * inaccessible in D3cold.  The platform firmware is therefore queried first
1119  * to detect accessibility of the register.  In case the platform firmware
1120  * reports an incorrect state or the device isn't power manageable by the
1121  * platform at all, we try to detect D3cold by testing accessibility of the
1122  * vendor ID in config space.
1123  */
1124 void pci_update_current_state(struct pci_dev *dev, pci_power_t state)
1125 {
1126 	if (platform_pci_get_power_state(dev) == PCI_D3cold ||
1127 	    !pci_device_is_present(dev)) {
1128 		dev->current_state = PCI_D3cold;
1129 	} else if (dev->pm_cap) {
1130 		u16 pmcsr;
1131 
1132 		pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1133 		dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
1134 	} else {
1135 		dev->current_state = state;
1136 	}
1137 }
1138 
1139 /**
1140  * pci_refresh_power_state - Refresh the given device's power state data
1141  * @dev: Target PCI device.
1142  *
1143  * Ask the platform to refresh the devices power state information and invoke
1144  * pci_update_current_state() to update its current PCI power state.
1145  */
1146 void pci_refresh_power_state(struct pci_dev *dev)
1147 {
1148 	if (platform_pci_power_manageable(dev))
1149 		platform_pci_refresh_power_state(dev);
1150 
1151 	pci_update_current_state(dev, dev->current_state);
1152 }
1153 
1154 /**
1155  * pci_platform_power_transition - Use platform to change device power state
1156  * @dev: PCI device to handle.
1157  * @state: State to put the device into.
1158  */
1159 int pci_platform_power_transition(struct pci_dev *dev, pci_power_t state)
1160 {
1161 	int error;
1162 
1163 	if (platform_pci_power_manageable(dev)) {
1164 		error = platform_pci_set_power_state(dev, state);
1165 		if (!error)
1166 			pci_update_current_state(dev, state);
1167 	} else
1168 		error = -ENODEV;
1169 
1170 	if (error && !dev->pm_cap) /* Fall back to PCI_D0 */
1171 		dev->current_state = PCI_D0;
1172 
1173 	return error;
1174 }
1175 EXPORT_SYMBOL_GPL(pci_platform_power_transition);
1176 
1177 static int pci_resume_one(struct pci_dev *pci_dev, void *ign)
1178 {
1179 	pm_request_resume(&pci_dev->dev);
1180 	return 0;
1181 }
1182 
1183 /**
1184  * pci_resume_bus - Walk given bus and runtime resume devices on it
1185  * @bus: Top bus of the subtree to walk.
1186  */
1187 void pci_resume_bus(struct pci_bus *bus)
1188 {
1189 	if (bus)
1190 		pci_walk_bus(bus, pci_resume_one, NULL);
1191 }
1192 
1193 static int pci_dev_wait(struct pci_dev *dev, char *reset_type, int timeout)
1194 {
1195 	int delay = 1;
1196 	u32 id;
1197 
1198 	/*
1199 	 * After reset, the device should not silently discard config
1200 	 * requests, but it may still indicate that it needs more time by
1201 	 * responding to them with CRS completions.  The Root Port will
1202 	 * generally synthesize ~0 data to complete the read (except when
1203 	 * CRS SV is enabled and the read was for the Vendor ID; in that
1204 	 * case it synthesizes 0x0001 data).
1205 	 *
1206 	 * Wait for the device to return a non-CRS completion.  Read the
1207 	 * Command register instead of Vendor ID so we don't have to
1208 	 * contend with the CRS SV value.
1209 	 */
1210 	pci_read_config_dword(dev, PCI_COMMAND, &id);
1211 	while (id == ~0) {
1212 		if (delay > timeout) {
1213 			pci_warn(dev, "not ready %dms after %s; giving up\n",
1214 				 delay - 1, reset_type);
1215 			return -ENOTTY;
1216 		}
1217 
1218 		if (delay > 1000)
1219 			pci_info(dev, "not ready %dms after %s; waiting\n",
1220 				 delay - 1, reset_type);
1221 
1222 		msleep(delay);
1223 		delay *= 2;
1224 		pci_read_config_dword(dev, PCI_COMMAND, &id);
1225 	}
1226 
1227 	if (delay > 1000)
1228 		pci_info(dev, "ready %dms after %s\n", delay - 1,
1229 			 reset_type);
1230 
1231 	return 0;
1232 }
1233 
1234 /**
1235  * pci_power_up - Put the given device into D0
1236  * @dev: PCI device to power up
1237  */
1238 int pci_power_up(struct pci_dev *dev)
1239 {
1240 	pci_platform_power_transition(dev, PCI_D0);
1241 
1242 	/*
1243 	 * Mandatory power management transition delays are handled in
1244 	 * pci_pm_resume_noirq() and pci_pm_runtime_resume() of the
1245 	 * corresponding bridge.
1246 	 */
1247 	if (dev->runtime_d3cold) {
1248 		/*
1249 		 * When powering on a bridge from D3cold, the whole hierarchy
1250 		 * may be powered on into D0uninitialized state, resume them to
1251 		 * give them a chance to suspend again
1252 		 */
1253 		pci_resume_bus(dev->subordinate);
1254 	}
1255 
1256 	return pci_raw_set_power_state(dev, PCI_D0);
1257 }
1258 
1259 /**
1260  * __pci_dev_set_current_state - Set current state of a PCI device
1261  * @dev: Device to handle
1262  * @data: pointer to state to be set
1263  */
1264 static int __pci_dev_set_current_state(struct pci_dev *dev, void *data)
1265 {
1266 	pci_power_t state = *(pci_power_t *)data;
1267 
1268 	dev->current_state = state;
1269 	return 0;
1270 }
1271 
1272 /**
1273  * pci_bus_set_current_state - Walk given bus and set current state of devices
1274  * @bus: Top bus of the subtree to walk.
1275  * @state: state to be set
1276  */
1277 void pci_bus_set_current_state(struct pci_bus *bus, pci_power_t state)
1278 {
1279 	if (bus)
1280 		pci_walk_bus(bus, __pci_dev_set_current_state, &state);
1281 }
1282 
1283 /**
1284  * pci_set_power_state - Set the power state of a PCI device
1285  * @dev: PCI device to handle.
1286  * @state: PCI power state (D0, D1, D2, D3hot) to put the device into.
1287  *
1288  * Transition a device to a new power state, using the platform firmware and/or
1289  * the device's PCI PM registers.
1290  *
1291  * RETURN VALUE:
1292  * -EINVAL if the requested state is invalid.
1293  * -EIO if device does not support PCI PM or its PM capabilities register has a
1294  * wrong version, or device doesn't support the requested state.
1295  * 0 if the transition is to D1 or D2 but D1 and D2 are not supported.
1296  * 0 if device already is in the requested state.
1297  * 0 if the transition is to D3 but D3 is not supported.
1298  * 0 if device's power state has been successfully changed.
1299  */
1300 int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
1301 {
1302 	int error;
1303 
1304 	/* Bound the state we're entering */
1305 	if (state > PCI_D3cold)
1306 		state = PCI_D3cold;
1307 	else if (state < PCI_D0)
1308 		state = PCI_D0;
1309 	else if ((state == PCI_D1 || state == PCI_D2) && pci_no_d1d2(dev))
1310 
1311 		/*
1312 		 * If the device or the parent bridge do not support PCI
1313 		 * PM, ignore the request if we're doing anything other
1314 		 * than putting it into D0 (which would only happen on
1315 		 * boot).
1316 		 */
1317 		return 0;
1318 
1319 	/* Check if we're already there */
1320 	if (dev->current_state == state)
1321 		return 0;
1322 
1323 	if (state == PCI_D0)
1324 		return pci_power_up(dev);
1325 
1326 	/*
1327 	 * This device is quirked not to be put into D3, so don't put it in
1328 	 * D3
1329 	 */
1330 	if (state >= PCI_D3hot && (dev->dev_flags & PCI_DEV_FLAGS_NO_D3))
1331 		return 0;
1332 
1333 	/*
1334 	 * To put device in D3cold, we put device into D3hot in native
1335 	 * way, then put device into D3cold with platform ops
1336 	 */
1337 	error = pci_raw_set_power_state(dev, state > PCI_D3hot ?
1338 					PCI_D3hot : state);
1339 
1340 	if (pci_platform_power_transition(dev, state))
1341 		return error;
1342 
1343 	/* Powering off a bridge may power off the whole hierarchy */
1344 	if (state == PCI_D3cold)
1345 		pci_bus_set_current_state(dev->subordinate, PCI_D3cold);
1346 
1347 	return 0;
1348 }
1349 EXPORT_SYMBOL(pci_set_power_state);
1350 
1351 /**
1352  * pci_choose_state - Choose the power state of a PCI device
1353  * @dev: PCI device to be suspended
1354  * @state: target sleep state for the whole system. This is the value
1355  *	   that is passed to suspend() function.
1356  *
1357  * Returns PCI power state suitable for given device and given system
1358  * message.
1359  */
1360 pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state)
1361 {
1362 	pci_power_t ret;
1363 
1364 	if (!dev->pm_cap)
1365 		return PCI_D0;
1366 
1367 	ret = platform_pci_choose_state(dev);
1368 	if (ret != PCI_POWER_ERROR)
1369 		return ret;
1370 
1371 	switch (state.event) {
1372 	case PM_EVENT_ON:
1373 		return PCI_D0;
1374 	case PM_EVENT_FREEZE:
1375 	case PM_EVENT_PRETHAW:
1376 		/* REVISIT both freeze and pre-thaw "should" use D0 */
1377 	case PM_EVENT_SUSPEND:
1378 	case PM_EVENT_HIBERNATE:
1379 		return PCI_D3hot;
1380 	default:
1381 		pci_info(dev, "unrecognized suspend event %d\n",
1382 			 state.event);
1383 		BUG();
1384 	}
1385 	return PCI_D0;
1386 }
1387 EXPORT_SYMBOL(pci_choose_state);
1388 
1389 #define PCI_EXP_SAVE_REGS	7
1390 
1391 static struct pci_cap_saved_state *_pci_find_saved_cap(struct pci_dev *pci_dev,
1392 						       u16 cap, bool extended)
1393 {
1394 	struct pci_cap_saved_state *tmp;
1395 
1396 	hlist_for_each_entry(tmp, &pci_dev->saved_cap_space, next) {
1397 		if (tmp->cap.cap_extended == extended && tmp->cap.cap_nr == cap)
1398 			return tmp;
1399 	}
1400 	return NULL;
1401 }
1402 
1403 struct pci_cap_saved_state *pci_find_saved_cap(struct pci_dev *dev, char cap)
1404 {
1405 	return _pci_find_saved_cap(dev, cap, false);
1406 }
1407 
1408 struct pci_cap_saved_state *pci_find_saved_ext_cap(struct pci_dev *dev, u16 cap)
1409 {
1410 	return _pci_find_saved_cap(dev, cap, true);
1411 }
1412 
1413 static int pci_save_pcie_state(struct pci_dev *dev)
1414 {
1415 	int i = 0;
1416 	struct pci_cap_saved_state *save_state;
1417 	u16 *cap;
1418 
1419 	if (!pci_is_pcie(dev))
1420 		return 0;
1421 
1422 	save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
1423 	if (!save_state) {
1424 		pci_err(dev, "buffer not found in %s\n", __func__);
1425 		return -ENOMEM;
1426 	}
1427 
1428 	cap = (u16 *)&save_state->cap.data[0];
1429 	pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &cap[i++]);
1430 	pcie_capability_read_word(dev, PCI_EXP_LNKCTL, &cap[i++]);
1431 	pcie_capability_read_word(dev, PCI_EXP_SLTCTL, &cap[i++]);
1432 	pcie_capability_read_word(dev, PCI_EXP_RTCTL,  &cap[i++]);
1433 	pcie_capability_read_word(dev, PCI_EXP_DEVCTL2, &cap[i++]);
1434 	pcie_capability_read_word(dev, PCI_EXP_LNKCTL2, &cap[i++]);
1435 	pcie_capability_read_word(dev, PCI_EXP_SLTCTL2, &cap[i++]);
1436 
1437 	return 0;
1438 }
1439 
1440 static void pci_restore_pcie_state(struct pci_dev *dev)
1441 {
1442 	int i = 0;
1443 	struct pci_cap_saved_state *save_state;
1444 	u16 *cap;
1445 
1446 	save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
1447 	if (!save_state)
1448 		return;
1449 
1450 	cap = (u16 *)&save_state->cap.data[0];
1451 	pcie_capability_write_word(dev, PCI_EXP_DEVCTL, cap[i++]);
1452 	pcie_capability_write_word(dev, PCI_EXP_LNKCTL, cap[i++]);
1453 	pcie_capability_write_word(dev, PCI_EXP_SLTCTL, cap[i++]);
1454 	pcie_capability_write_word(dev, PCI_EXP_RTCTL, cap[i++]);
1455 	pcie_capability_write_word(dev, PCI_EXP_DEVCTL2, cap[i++]);
1456 	pcie_capability_write_word(dev, PCI_EXP_LNKCTL2, cap[i++]);
1457 	pcie_capability_write_word(dev, PCI_EXP_SLTCTL2, cap[i++]);
1458 }
1459 
1460 static int pci_save_pcix_state(struct pci_dev *dev)
1461 {
1462 	int pos;
1463 	struct pci_cap_saved_state *save_state;
1464 
1465 	pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
1466 	if (!pos)
1467 		return 0;
1468 
1469 	save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
1470 	if (!save_state) {
1471 		pci_err(dev, "buffer not found in %s\n", __func__);
1472 		return -ENOMEM;
1473 	}
1474 
1475 	pci_read_config_word(dev, pos + PCI_X_CMD,
1476 			     (u16 *)save_state->cap.data);
1477 
1478 	return 0;
1479 }
1480 
1481 static void pci_restore_pcix_state(struct pci_dev *dev)
1482 {
1483 	int i = 0, pos;
1484 	struct pci_cap_saved_state *save_state;
1485 	u16 *cap;
1486 
1487 	save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
1488 	pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
1489 	if (!save_state || !pos)
1490 		return;
1491 	cap = (u16 *)&save_state->cap.data[0];
1492 
1493 	pci_write_config_word(dev, pos + PCI_X_CMD, cap[i++]);
1494 }
1495 
1496 static void pci_save_ltr_state(struct pci_dev *dev)
1497 {
1498 	int ltr;
1499 	struct pci_cap_saved_state *save_state;
1500 	u16 *cap;
1501 
1502 	if (!pci_is_pcie(dev))
1503 		return;
1504 
1505 	ltr = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_LTR);
1506 	if (!ltr)
1507 		return;
1508 
1509 	save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_LTR);
1510 	if (!save_state) {
1511 		pci_err(dev, "no suspend buffer for LTR; ASPM issues possible after resume\n");
1512 		return;
1513 	}
1514 
1515 	cap = (u16 *)&save_state->cap.data[0];
1516 	pci_read_config_word(dev, ltr + PCI_LTR_MAX_SNOOP_LAT, cap++);
1517 	pci_read_config_word(dev, ltr + PCI_LTR_MAX_NOSNOOP_LAT, cap++);
1518 }
1519 
1520 static void pci_restore_ltr_state(struct pci_dev *dev)
1521 {
1522 	struct pci_cap_saved_state *save_state;
1523 	int ltr;
1524 	u16 *cap;
1525 
1526 	save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_LTR);
1527 	ltr = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_LTR);
1528 	if (!save_state || !ltr)
1529 		return;
1530 
1531 	cap = (u16 *)&save_state->cap.data[0];
1532 	pci_write_config_word(dev, ltr + PCI_LTR_MAX_SNOOP_LAT, *cap++);
1533 	pci_write_config_word(dev, ltr + PCI_LTR_MAX_NOSNOOP_LAT, *cap++);
1534 }
1535 
1536 /**
1537  * pci_save_state - save the PCI configuration space of a device before
1538  *		    suspending
1539  * @dev: PCI device that we're dealing with
1540  */
1541 int pci_save_state(struct pci_dev *dev)
1542 {
1543 	int i;
1544 	/* XXX: 100% dword access ok here? */
1545 	for (i = 0; i < 16; i++) {
1546 		pci_read_config_dword(dev, i * 4, &dev->saved_config_space[i]);
1547 		pci_dbg(dev, "saving config space at offset %#x (reading %#x)\n",
1548 			i * 4, dev->saved_config_space[i]);
1549 	}
1550 	dev->state_saved = true;
1551 
1552 	i = pci_save_pcie_state(dev);
1553 	if (i != 0)
1554 		return i;
1555 
1556 	i = pci_save_pcix_state(dev);
1557 	if (i != 0)
1558 		return i;
1559 
1560 	pci_save_ltr_state(dev);
1561 	pci_save_dpc_state(dev);
1562 	pci_save_aer_state(dev);
1563 	pci_save_ptm_state(dev);
1564 	return pci_save_vc_state(dev);
1565 }
1566 EXPORT_SYMBOL(pci_save_state);
1567 
1568 static void pci_restore_config_dword(struct pci_dev *pdev, int offset,
1569 				     u32 saved_val, int retry, bool force)
1570 {
1571 	u32 val;
1572 
1573 	pci_read_config_dword(pdev, offset, &val);
1574 	if (!force && val == saved_val)
1575 		return;
1576 
1577 	for (;;) {
1578 		pci_dbg(pdev, "restoring config space at offset %#x (was %#x, writing %#x)\n",
1579 			offset, val, saved_val);
1580 		pci_write_config_dword(pdev, offset, saved_val);
1581 		if (retry-- <= 0)
1582 			return;
1583 
1584 		pci_read_config_dword(pdev, offset, &val);
1585 		if (val == saved_val)
1586 			return;
1587 
1588 		mdelay(1);
1589 	}
1590 }
1591 
1592 static void pci_restore_config_space_range(struct pci_dev *pdev,
1593 					   int start, int end, int retry,
1594 					   bool force)
1595 {
1596 	int index;
1597 
1598 	for (index = end; index >= start; index--)
1599 		pci_restore_config_dword(pdev, 4 * index,
1600 					 pdev->saved_config_space[index],
1601 					 retry, force);
1602 }
1603 
1604 static void pci_restore_config_space(struct pci_dev *pdev)
1605 {
1606 	if (pdev->hdr_type == PCI_HEADER_TYPE_NORMAL) {
1607 		pci_restore_config_space_range(pdev, 10, 15, 0, false);
1608 		/* Restore BARs before the command register. */
1609 		pci_restore_config_space_range(pdev, 4, 9, 10, false);
1610 		pci_restore_config_space_range(pdev, 0, 3, 0, false);
1611 	} else if (pdev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
1612 		pci_restore_config_space_range(pdev, 12, 15, 0, false);
1613 
1614 		/*
1615 		 * Force rewriting of prefetch registers to avoid S3 resume
1616 		 * issues on Intel PCI bridges that occur when these
1617 		 * registers are not explicitly written.
1618 		 */
1619 		pci_restore_config_space_range(pdev, 9, 11, 0, true);
1620 		pci_restore_config_space_range(pdev, 0, 8, 0, false);
1621 	} else {
1622 		pci_restore_config_space_range(pdev, 0, 15, 0, false);
1623 	}
1624 }
1625 
1626 static void pci_restore_rebar_state(struct pci_dev *pdev)
1627 {
1628 	unsigned int pos, nbars, i;
1629 	u32 ctrl;
1630 
1631 	pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_REBAR);
1632 	if (!pos)
1633 		return;
1634 
1635 	pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
1636 	nbars = (ctrl & PCI_REBAR_CTRL_NBAR_MASK) >>
1637 		    PCI_REBAR_CTRL_NBAR_SHIFT;
1638 
1639 	for (i = 0; i < nbars; i++, pos += 8) {
1640 		struct resource *res;
1641 		int bar_idx, size;
1642 
1643 		pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
1644 		bar_idx = ctrl & PCI_REBAR_CTRL_BAR_IDX;
1645 		res = pdev->resource + bar_idx;
1646 		size = pci_rebar_bytes_to_size(resource_size(res));
1647 		ctrl &= ~PCI_REBAR_CTRL_BAR_SIZE;
1648 		ctrl |= size << PCI_REBAR_CTRL_BAR_SHIFT;
1649 		pci_write_config_dword(pdev, pos + PCI_REBAR_CTRL, ctrl);
1650 	}
1651 }
1652 
1653 /**
1654  * pci_restore_state - Restore the saved state of a PCI device
1655  * @dev: PCI device that we're dealing with
1656  */
1657 void pci_restore_state(struct pci_dev *dev)
1658 {
1659 	if (!dev->state_saved)
1660 		return;
1661 
1662 	/*
1663 	 * Restore max latencies (in the LTR capability) before enabling
1664 	 * LTR itself (in the PCIe capability).
1665 	 */
1666 	pci_restore_ltr_state(dev);
1667 
1668 	pci_restore_pcie_state(dev);
1669 	pci_restore_pasid_state(dev);
1670 	pci_restore_pri_state(dev);
1671 	pci_restore_ats_state(dev);
1672 	pci_restore_vc_state(dev);
1673 	pci_restore_rebar_state(dev);
1674 	pci_restore_dpc_state(dev);
1675 	pci_restore_ptm_state(dev);
1676 
1677 	pci_aer_clear_status(dev);
1678 	pci_restore_aer_state(dev);
1679 
1680 	pci_restore_config_space(dev);
1681 
1682 	pci_restore_pcix_state(dev);
1683 	pci_restore_msi_state(dev);
1684 
1685 	/* Restore ACS and IOV configuration state */
1686 	pci_enable_acs(dev);
1687 	pci_restore_iov_state(dev);
1688 
1689 	dev->state_saved = false;
1690 }
1691 EXPORT_SYMBOL(pci_restore_state);
1692 
1693 struct pci_saved_state {
1694 	u32 config_space[16];
1695 	struct pci_cap_saved_data cap[];
1696 };
1697 
1698 /**
1699  * pci_store_saved_state - Allocate and return an opaque struct containing
1700  *			   the device saved state.
1701  * @dev: PCI device that we're dealing with
1702  *
1703  * Return NULL if no state or error.
1704  */
1705 struct pci_saved_state *pci_store_saved_state(struct pci_dev *dev)
1706 {
1707 	struct pci_saved_state *state;
1708 	struct pci_cap_saved_state *tmp;
1709 	struct pci_cap_saved_data *cap;
1710 	size_t size;
1711 
1712 	if (!dev->state_saved)
1713 		return NULL;
1714 
1715 	size = sizeof(*state) + sizeof(struct pci_cap_saved_data);
1716 
1717 	hlist_for_each_entry(tmp, &dev->saved_cap_space, next)
1718 		size += sizeof(struct pci_cap_saved_data) + tmp->cap.size;
1719 
1720 	state = kzalloc(size, GFP_KERNEL);
1721 	if (!state)
1722 		return NULL;
1723 
1724 	memcpy(state->config_space, dev->saved_config_space,
1725 	       sizeof(state->config_space));
1726 
1727 	cap = state->cap;
1728 	hlist_for_each_entry(tmp, &dev->saved_cap_space, next) {
1729 		size_t len = sizeof(struct pci_cap_saved_data) + tmp->cap.size;
1730 		memcpy(cap, &tmp->cap, len);
1731 		cap = (struct pci_cap_saved_data *)((u8 *)cap + len);
1732 	}
1733 	/* Empty cap_save terminates list */
1734 
1735 	return state;
1736 }
1737 EXPORT_SYMBOL_GPL(pci_store_saved_state);
1738 
1739 /**
1740  * pci_load_saved_state - Reload the provided save state into struct pci_dev.
1741  * @dev: PCI device that we're dealing with
1742  * @state: Saved state returned from pci_store_saved_state()
1743  */
1744 int pci_load_saved_state(struct pci_dev *dev,
1745 			 struct pci_saved_state *state)
1746 {
1747 	struct pci_cap_saved_data *cap;
1748 
1749 	dev->state_saved = false;
1750 
1751 	if (!state)
1752 		return 0;
1753 
1754 	memcpy(dev->saved_config_space, state->config_space,
1755 	       sizeof(state->config_space));
1756 
1757 	cap = state->cap;
1758 	while (cap->size) {
1759 		struct pci_cap_saved_state *tmp;
1760 
1761 		tmp = _pci_find_saved_cap(dev, cap->cap_nr, cap->cap_extended);
1762 		if (!tmp || tmp->cap.size != cap->size)
1763 			return -EINVAL;
1764 
1765 		memcpy(tmp->cap.data, cap->data, tmp->cap.size);
1766 		cap = (struct pci_cap_saved_data *)((u8 *)cap +
1767 		       sizeof(struct pci_cap_saved_data) + cap->size);
1768 	}
1769 
1770 	dev->state_saved = true;
1771 	return 0;
1772 }
1773 EXPORT_SYMBOL_GPL(pci_load_saved_state);
1774 
1775 /**
1776  * pci_load_and_free_saved_state - Reload the save state pointed to by state,
1777  *				   and free the memory allocated for it.
1778  * @dev: PCI device that we're dealing with
1779  * @state: Pointer to saved state returned from pci_store_saved_state()
1780  */
1781 int pci_load_and_free_saved_state(struct pci_dev *dev,
1782 				  struct pci_saved_state **state)
1783 {
1784 	int ret = pci_load_saved_state(dev, *state);
1785 	kfree(*state);
1786 	*state = NULL;
1787 	return ret;
1788 }
1789 EXPORT_SYMBOL_GPL(pci_load_and_free_saved_state);
1790 
1791 int __weak pcibios_enable_device(struct pci_dev *dev, int bars)
1792 {
1793 	return pci_enable_resources(dev, bars);
1794 }
1795 
1796 static int do_pci_enable_device(struct pci_dev *dev, int bars)
1797 {
1798 	int err;
1799 	struct pci_dev *bridge;
1800 	u16 cmd;
1801 	u8 pin;
1802 
1803 	err = pci_set_power_state(dev, PCI_D0);
1804 	if (err < 0 && err != -EIO)
1805 		return err;
1806 
1807 	bridge = pci_upstream_bridge(dev);
1808 	if (bridge)
1809 		pcie_aspm_powersave_config_link(bridge);
1810 
1811 	err = pcibios_enable_device(dev, bars);
1812 	if (err < 0)
1813 		return err;
1814 	pci_fixup_device(pci_fixup_enable, dev);
1815 
1816 	if (dev->msi_enabled || dev->msix_enabled)
1817 		return 0;
1818 
1819 	pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
1820 	if (pin) {
1821 		pci_read_config_word(dev, PCI_COMMAND, &cmd);
1822 		if (cmd & PCI_COMMAND_INTX_DISABLE)
1823 			pci_write_config_word(dev, PCI_COMMAND,
1824 					      cmd & ~PCI_COMMAND_INTX_DISABLE);
1825 	}
1826 
1827 	return 0;
1828 }
1829 
1830 /**
1831  * pci_reenable_device - Resume abandoned device
1832  * @dev: PCI device to be resumed
1833  *
1834  * NOTE: This function is a backend of pci_default_resume() and is not supposed
1835  * to be called by normal code, write proper resume handler and use it instead.
1836  */
1837 int pci_reenable_device(struct pci_dev *dev)
1838 {
1839 	if (pci_is_enabled(dev))
1840 		return do_pci_enable_device(dev, (1 << PCI_NUM_RESOURCES) - 1);
1841 	return 0;
1842 }
1843 EXPORT_SYMBOL(pci_reenable_device);
1844 
1845 static void pci_enable_bridge(struct pci_dev *dev)
1846 {
1847 	struct pci_dev *bridge;
1848 	int retval;
1849 
1850 	bridge = pci_upstream_bridge(dev);
1851 	if (bridge)
1852 		pci_enable_bridge(bridge);
1853 
1854 	if (pci_is_enabled(dev)) {
1855 		if (!dev->is_busmaster)
1856 			pci_set_master(dev);
1857 		return;
1858 	}
1859 
1860 	retval = pci_enable_device(dev);
1861 	if (retval)
1862 		pci_err(dev, "Error enabling bridge (%d), continuing\n",
1863 			retval);
1864 	pci_set_master(dev);
1865 }
1866 
1867 static int pci_enable_device_flags(struct pci_dev *dev, unsigned long flags)
1868 {
1869 	struct pci_dev *bridge;
1870 	int err;
1871 	int i, bars = 0;
1872 
1873 	/*
1874 	 * Power state could be unknown at this point, either due to a fresh
1875 	 * boot or a device removal call.  So get the current power state
1876 	 * so that things like MSI message writing will behave as expected
1877 	 * (e.g. if the device really is in D0 at enable time).
1878 	 */
1879 	if (dev->pm_cap) {
1880 		u16 pmcsr;
1881 		pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1882 		dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
1883 	}
1884 
1885 	if (atomic_inc_return(&dev->enable_cnt) > 1)
1886 		return 0;		/* already enabled */
1887 
1888 	bridge = pci_upstream_bridge(dev);
1889 	if (bridge)
1890 		pci_enable_bridge(bridge);
1891 
1892 	/* only skip sriov related */
1893 	for (i = 0; i <= PCI_ROM_RESOURCE; i++)
1894 		if (dev->resource[i].flags & flags)
1895 			bars |= (1 << i);
1896 	for (i = PCI_BRIDGE_RESOURCES; i < DEVICE_COUNT_RESOURCE; i++)
1897 		if (dev->resource[i].flags & flags)
1898 			bars |= (1 << i);
1899 
1900 	err = do_pci_enable_device(dev, bars);
1901 	if (err < 0)
1902 		atomic_dec(&dev->enable_cnt);
1903 	return err;
1904 }
1905 
1906 /**
1907  * pci_enable_device_io - Initialize a device for use with IO space
1908  * @dev: PCI device to be initialized
1909  *
1910  * Initialize device before it's used by a driver. Ask low-level code
1911  * to enable I/O resources. Wake up the device if it was suspended.
1912  * Beware, this function can fail.
1913  */
1914 int pci_enable_device_io(struct pci_dev *dev)
1915 {
1916 	return pci_enable_device_flags(dev, IORESOURCE_IO);
1917 }
1918 EXPORT_SYMBOL(pci_enable_device_io);
1919 
1920 /**
1921  * pci_enable_device_mem - Initialize a device for use with Memory space
1922  * @dev: PCI device to be initialized
1923  *
1924  * Initialize device before it's used by a driver. Ask low-level code
1925  * to enable Memory resources. Wake up the device if it was suspended.
1926  * Beware, this function can fail.
1927  */
1928 int pci_enable_device_mem(struct pci_dev *dev)
1929 {
1930 	return pci_enable_device_flags(dev, IORESOURCE_MEM);
1931 }
1932 EXPORT_SYMBOL(pci_enable_device_mem);
1933 
1934 /**
1935  * pci_enable_device - Initialize device before it's used by a driver.
1936  * @dev: PCI device to be initialized
1937  *
1938  * Initialize device before it's used by a driver. Ask low-level code
1939  * to enable I/O and memory. Wake up the device if it was suspended.
1940  * Beware, this function can fail.
1941  *
1942  * Note we don't actually enable the device many times if we call
1943  * this function repeatedly (we just increment the count).
1944  */
1945 int pci_enable_device(struct pci_dev *dev)
1946 {
1947 	return pci_enable_device_flags(dev, IORESOURCE_MEM | IORESOURCE_IO);
1948 }
1949 EXPORT_SYMBOL(pci_enable_device);
1950 
1951 /*
1952  * Managed PCI resources.  This manages device on/off, INTx/MSI/MSI-X
1953  * on/off and BAR regions.  pci_dev itself records MSI/MSI-X status, so
1954  * there's no need to track it separately.  pci_devres is initialized
1955  * when a device is enabled using managed PCI device enable interface.
1956  */
1957 struct pci_devres {
1958 	unsigned int enabled:1;
1959 	unsigned int pinned:1;
1960 	unsigned int orig_intx:1;
1961 	unsigned int restore_intx:1;
1962 	unsigned int mwi:1;
1963 	u32 region_mask;
1964 };
1965 
1966 static void pcim_release(struct device *gendev, void *res)
1967 {
1968 	struct pci_dev *dev = to_pci_dev(gendev);
1969 	struct pci_devres *this = res;
1970 	int i;
1971 
1972 	if (dev->msi_enabled)
1973 		pci_disable_msi(dev);
1974 	if (dev->msix_enabled)
1975 		pci_disable_msix(dev);
1976 
1977 	for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
1978 		if (this->region_mask & (1 << i))
1979 			pci_release_region(dev, i);
1980 
1981 	if (this->mwi)
1982 		pci_clear_mwi(dev);
1983 
1984 	if (this->restore_intx)
1985 		pci_intx(dev, this->orig_intx);
1986 
1987 	if (this->enabled && !this->pinned)
1988 		pci_disable_device(dev);
1989 }
1990 
1991 static struct pci_devres *get_pci_dr(struct pci_dev *pdev)
1992 {
1993 	struct pci_devres *dr, *new_dr;
1994 
1995 	dr = devres_find(&pdev->dev, pcim_release, NULL, NULL);
1996 	if (dr)
1997 		return dr;
1998 
1999 	new_dr = devres_alloc(pcim_release, sizeof(*new_dr), GFP_KERNEL);
2000 	if (!new_dr)
2001 		return NULL;
2002 	return devres_get(&pdev->dev, new_dr, NULL, NULL);
2003 }
2004 
2005 static struct pci_devres *find_pci_dr(struct pci_dev *pdev)
2006 {
2007 	if (pci_is_managed(pdev))
2008 		return devres_find(&pdev->dev, pcim_release, NULL, NULL);
2009 	return NULL;
2010 }
2011 
2012 /**
2013  * pcim_enable_device - Managed pci_enable_device()
2014  * @pdev: PCI device to be initialized
2015  *
2016  * Managed pci_enable_device().
2017  */
2018 int pcim_enable_device(struct pci_dev *pdev)
2019 {
2020 	struct pci_devres *dr;
2021 	int rc;
2022 
2023 	dr = get_pci_dr(pdev);
2024 	if (unlikely(!dr))
2025 		return -ENOMEM;
2026 	if (dr->enabled)
2027 		return 0;
2028 
2029 	rc = pci_enable_device(pdev);
2030 	if (!rc) {
2031 		pdev->is_managed = 1;
2032 		dr->enabled = 1;
2033 	}
2034 	return rc;
2035 }
2036 EXPORT_SYMBOL(pcim_enable_device);
2037 
2038 /**
2039  * pcim_pin_device - Pin managed PCI device
2040  * @pdev: PCI device to pin
2041  *
2042  * Pin managed PCI device @pdev.  Pinned device won't be disabled on
2043  * driver detach.  @pdev must have been enabled with
2044  * pcim_enable_device().
2045  */
2046 void pcim_pin_device(struct pci_dev *pdev)
2047 {
2048 	struct pci_devres *dr;
2049 
2050 	dr = find_pci_dr(pdev);
2051 	WARN_ON(!dr || !dr->enabled);
2052 	if (dr)
2053 		dr->pinned = 1;
2054 }
2055 EXPORT_SYMBOL(pcim_pin_device);
2056 
2057 /*
2058  * pcibios_add_device - provide arch specific hooks when adding device dev
2059  * @dev: the PCI device being added
2060  *
2061  * Permits the platform to provide architecture specific functionality when
2062  * devices are added. This is the default implementation. Architecture
2063  * implementations can override this.
2064  */
2065 int __weak pcibios_add_device(struct pci_dev *dev)
2066 {
2067 	return 0;
2068 }
2069 
2070 /**
2071  * pcibios_release_device - provide arch specific hooks when releasing
2072  *			    device dev
2073  * @dev: the PCI device being released
2074  *
2075  * Permits the platform to provide architecture specific functionality when
2076  * devices are released. This is the default implementation. Architecture
2077  * implementations can override this.
2078  */
2079 void __weak pcibios_release_device(struct pci_dev *dev) {}
2080 
2081 /**
2082  * pcibios_disable_device - disable arch specific PCI resources for device dev
2083  * @dev: the PCI device to disable
2084  *
2085  * Disables architecture specific PCI resources for the device. This
2086  * is the default implementation. Architecture implementations can
2087  * override this.
2088  */
2089 void __weak pcibios_disable_device(struct pci_dev *dev) {}
2090 
2091 /**
2092  * pcibios_penalize_isa_irq - penalize an ISA IRQ
2093  * @irq: ISA IRQ to penalize
2094  * @active: IRQ active or not
2095  *
2096  * Permits the platform to provide architecture-specific functionality when
2097  * penalizing ISA IRQs. This is the default implementation. Architecture
2098  * implementations can override this.
2099  */
2100 void __weak pcibios_penalize_isa_irq(int irq, int active) {}
2101 
2102 static void do_pci_disable_device(struct pci_dev *dev)
2103 {
2104 	u16 pci_command;
2105 
2106 	pci_read_config_word(dev, PCI_COMMAND, &pci_command);
2107 	if (pci_command & PCI_COMMAND_MASTER) {
2108 		pci_command &= ~PCI_COMMAND_MASTER;
2109 		pci_write_config_word(dev, PCI_COMMAND, pci_command);
2110 	}
2111 
2112 	pcibios_disable_device(dev);
2113 }
2114 
2115 /**
2116  * pci_disable_enabled_device - Disable device without updating enable_cnt
2117  * @dev: PCI device to disable
2118  *
2119  * NOTE: This function is a backend of PCI power management routines and is
2120  * not supposed to be called drivers.
2121  */
2122 void pci_disable_enabled_device(struct pci_dev *dev)
2123 {
2124 	if (pci_is_enabled(dev))
2125 		do_pci_disable_device(dev);
2126 }
2127 
2128 /**
2129  * pci_disable_device - Disable PCI device after use
2130  * @dev: PCI device to be disabled
2131  *
2132  * Signal to the system that the PCI device is not in use by the system
2133  * anymore.  This only involves disabling PCI bus-mastering, if active.
2134  *
2135  * Note we don't actually disable the device until all callers of
2136  * pci_enable_device() have called pci_disable_device().
2137  */
2138 void pci_disable_device(struct pci_dev *dev)
2139 {
2140 	struct pci_devres *dr;
2141 
2142 	dr = find_pci_dr(dev);
2143 	if (dr)
2144 		dr->enabled = 0;
2145 
2146 	dev_WARN_ONCE(&dev->dev, atomic_read(&dev->enable_cnt) <= 0,
2147 		      "disabling already-disabled device");
2148 
2149 	if (atomic_dec_return(&dev->enable_cnt) != 0)
2150 		return;
2151 
2152 	do_pci_disable_device(dev);
2153 
2154 	dev->is_busmaster = 0;
2155 }
2156 EXPORT_SYMBOL(pci_disable_device);
2157 
2158 /**
2159  * pcibios_set_pcie_reset_state - set reset state for device dev
2160  * @dev: the PCIe device reset
2161  * @state: Reset state to enter into
2162  *
2163  * Set the PCIe reset state for the device. This is the default
2164  * implementation. Architecture implementations can override this.
2165  */
2166 int __weak pcibios_set_pcie_reset_state(struct pci_dev *dev,
2167 					enum pcie_reset_state state)
2168 {
2169 	return -EINVAL;
2170 }
2171 
2172 /**
2173  * pci_set_pcie_reset_state - set reset state for device dev
2174  * @dev: the PCIe device reset
2175  * @state: Reset state to enter into
2176  *
2177  * Sets the PCI reset state for the device.
2178  */
2179 int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state)
2180 {
2181 	return pcibios_set_pcie_reset_state(dev, state);
2182 }
2183 EXPORT_SYMBOL_GPL(pci_set_pcie_reset_state);
2184 
2185 void pcie_clear_device_status(struct pci_dev *dev)
2186 {
2187 	u16 sta;
2188 
2189 	pcie_capability_read_word(dev, PCI_EXP_DEVSTA, &sta);
2190 	pcie_capability_write_word(dev, PCI_EXP_DEVSTA, sta);
2191 }
2192 
2193 /**
2194  * pcie_clear_root_pme_status - Clear root port PME interrupt status.
2195  * @dev: PCIe root port or event collector.
2196  */
2197 void pcie_clear_root_pme_status(struct pci_dev *dev)
2198 {
2199 	pcie_capability_set_dword(dev, PCI_EXP_RTSTA, PCI_EXP_RTSTA_PME);
2200 }
2201 
2202 /**
2203  * pci_check_pme_status - Check if given device has generated PME.
2204  * @dev: Device to check.
2205  *
2206  * Check the PME status of the device and if set, clear it and clear PME enable
2207  * (if set).  Return 'true' if PME status and PME enable were both set or
2208  * 'false' otherwise.
2209  */
2210 bool pci_check_pme_status(struct pci_dev *dev)
2211 {
2212 	int pmcsr_pos;
2213 	u16 pmcsr;
2214 	bool ret = false;
2215 
2216 	if (!dev->pm_cap)
2217 		return false;
2218 
2219 	pmcsr_pos = dev->pm_cap + PCI_PM_CTRL;
2220 	pci_read_config_word(dev, pmcsr_pos, &pmcsr);
2221 	if (!(pmcsr & PCI_PM_CTRL_PME_STATUS))
2222 		return false;
2223 
2224 	/* Clear PME status. */
2225 	pmcsr |= PCI_PM_CTRL_PME_STATUS;
2226 	if (pmcsr & PCI_PM_CTRL_PME_ENABLE) {
2227 		/* Disable PME to avoid interrupt flood. */
2228 		pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
2229 		ret = true;
2230 	}
2231 
2232 	pci_write_config_word(dev, pmcsr_pos, pmcsr);
2233 
2234 	return ret;
2235 }
2236 
2237 /**
2238  * pci_pme_wakeup - Wake up a PCI device if its PME Status bit is set.
2239  * @dev: Device to handle.
2240  * @pme_poll_reset: Whether or not to reset the device's pme_poll flag.
2241  *
2242  * Check if @dev has generated PME and queue a resume request for it in that
2243  * case.
2244  */
2245 static int pci_pme_wakeup(struct pci_dev *dev, void *pme_poll_reset)
2246 {
2247 	if (pme_poll_reset && dev->pme_poll)
2248 		dev->pme_poll = false;
2249 
2250 	if (pci_check_pme_status(dev)) {
2251 		pci_wakeup_event(dev);
2252 		pm_request_resume(&dev->dev);
2253 	}
2254 	return 0;
2255 }
2256 
2257 /**
2258  * pci_pme_wakeup_bus - Walk given bus and wake up devices on it, if necessary.
2259  * @bus: Top bus of the subtree to walk.
2260  */
2261 void pci_pme_wakeup_bus(struct pci_bus *bus)
2262 {
2263 	if (bus)
2264 		pci_walk_bus(bus, pci_pme_wakeup, (void *)true);
2265 }
2266 
2267 
2268 /**
2269  * pci_pme_capable - check the capability of PCI device to generate PME#
2270  * @dev: PCI device to handle.
2271  * @state: PCI state from which device will issue PME#.
2272  */
2273 bool pci_pme_capable(struct pci_dev *dev, pci_power_t state)
2274 {
2275 	if (!dev->pm_cap)
2276 		return false;
2277 
2278 	return !!(dev->pme_support & (1 << state));
2279 }
2280 EXPORT_SYMBOL(pci_pme_capable);
2281 
2282 static void pci_pme_list_scan(struct work_struct *work)
2283 {
2284 	struct pci_pme_device *pme_dev, *n;
2285 
2286 	mutex_lock(&pci_pme_list_mutex);
2287 	list_for_each_entry_safe(pme_dev, n, &pci_pme_list, list) {
2288 		if (pme_dev->dev->pme_poll) {
2289 			struct pci_dev *bridge;
2290 
2291 			bridge = pme_dev->dev->bus->self;
2292 			/*
2293 			 * If bridge is in low power state, the
2294 			 * configuration space of subordinate devices
2295 			 * may be not accessible
2296 			 */
2297 			if (bridge && bridge->current_state != PCI_D0)
2298 				continue;
2299 			/*
2300 			 * If the device is in D3cold it should not be
2301 			 * polled either.
2302 			 */
2303 			if (pme_dev->dev->current_state == PCI_D3cold)
2304 				continue;
2305 
2306 			pci_pme_wakeup(pme_dev->dev, NULL);
2307 		} else {
2308 			list_del(&pme_dev->list);
2309 			kfree(pme_dev);
2310 		}
2311 	}
2312 	if (!list_empty(&pci_pme_list))
2313 		queue_delayed_work(system_freezable_wq, &pci_pme_work,
2314 				   msecs_to_jiffies(PME_TIMEOUT));
2315 	mutex_unlock(&pci_pme_list_mutex);
2316 }
2317 
2318 static void __pci_pme_active(struct pci_dev *dev, bool enable)
2319 {
2320 	u16 pmcsr;
2321 
2322 	if (!dev->pme_support)
2323 		return;
2324 
2325 	pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
2326 	/* Clear PME_Status by writing 1 to it and enable PME# */
2327 	pmcsr |= PCI_PM_CTRL_PME_STATUS | PCI_PM_CTRL_PME_ENABLE;
2328 	if (!enable)
2329 		pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
2330 
2331 	pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
2332 }
2333 
2334 /**
2335  * pci_pme_restore - Restore PME configuration after config space restore.
2336  * @dev: PCI device to update.
2337  */
2338 void pci_pme_restore(struct pci_dev *dev)
2339 {
2340 	u16 pmcsr;
2341 
2342 	if (!dev->pme_support)
2343 		return;
2344 
2345 	pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
2346 	if (dev->wakeup_prepared) {
2347 		pmcsr |= PCI_PM_CTRL_PME_ENABLE;
2348 		pmcsr &= ~PCI_PM_CTRL_PME_STATUS;
2349 	} else {
2350 		pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
2351 		pmcsr |= PCI_PM_CTRL_PME_STATUS;
2352 	}
2353 	pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
2354 }
2355 
2356 /**
2357  * pci_pme_active - enable or disable PCI device's PME# function
2358  * @dev: PCI device to handle.
2359  * @enable: 'true' to enable PME# generation; 'false' to disable it.
2360  *
2361  * The caller must verify that the device is capable of generating PME# before
2362  * calling this function with @enable equal to 'true'.
2363  */
2364 void pci_pme_active(struct pci_dev *dev, bool enable)
2365 {
2366 	__pci_pme_active(dev, enable);
2367 
2368 	/*
2369 	 * PCI (as opposed to PCIe) PME requires that the device have
2370 	 * its PME# line hooked up correctly. Not all hardware vendors
2371 	 * do this, so the PME never gets delivered and the device
2372 	 * remains asleep. The easiest way around this is to
2373 	 * periodically walk the list of suspended devices and check
2374 	 * whether any have their PME flag set. The assumption is that
2375 	 * we'll wake up often enough anyway that this won't be a huge
2376 	 * hit, and the power savings from the devices will still be a
2377 	 * win.
2378 	 *
2379 	 * Although PCIe uses in-band PME message instead of PME# line
2380 	 * to report PME, PME does not work for some PCIe devices in
2381 	 * reality.  For example, there are devices that set their PME
2382 	 * status bits, but don't really bother to send a PME message;
2383 	 * there are PCI Express Root Ports that don't bother to
2384 	 * trigger interrupts when they receive PME messages from the
2385 	 * devices below.  So PME poll is used for PCIe devices too.
2386 	 */
2387 
2388 	if (dev->pme_poll) {
2389 		struct pci_pme_device *pme_dev;
2390 		if (enable) {
2391 			pme_dev = kmalloc(sizeof(struct pci_pme_device),
2392 					  GFP_KERNEL);
2393 			if (!pme_dev) {
2394 				pci_warn(dev, "can't enable PME#\n");
2395 				return;
2396 			}
2397 			pme_dev->dev = dev;
2398 			mutex_lock(&pci_pme_list_mutex);
2399 			list_add(&pme_dev->list, &pci_pme_list);
2400 			if (list_is_singular(&pci_pme_list))
2401 				queue_delayed_work(system_freezable_wq,
2402 						   &pci_pme_work,
2403 						   msecs_to_jiffies(PME_TIMEOUT));
2404 			mutex_unlock(&pci_pme_list_mutex);
2405 		} else {
2406 			mutex_lock(&pci_pme_list_mutex);
2407 			list_for_each_entry(pme_dev, &pci_pme_list, list) {
2408 				if (pme_dev->dev == dev) {
2409 					list_del(&pme_dev->list);
2410 					kfree(pme_dev);
2411 					break;
2412 				}
2413 			}
2414 			mutex_unlock(&pci_pme_list_mutex);
2415 		}
2416 	}
2417 
2418 	pci_dbg(dev, "PME# %s\n", enable ? "enabled" : "disabled");
2419 }
2420 EXPORT_SYMBOL(pci_pme_active);
2421 
2422 /**
2423  * __pci_enable_wake - enable PCI device as wakeup event source
2424  * @dev: PCI device affected
2425  * @state: PCI state from which device will issue wakeup events
2426  * @enable: True to enable event generation; false to disable
2427  *
2428  * This enables the device as a wakeup event source, or disables it.
2429  * When such events involves platform-specific hooks, those hooks are
2430  * called automatically by this routine.
2431  *
2432  * Devices with legacy power management (no standard PCI PM capabilities)
2433  * always require such platform hooks.
2434  *
2435  * RETURN VALUE:
2436  * 0 is returned on success
2437  * -EINVAL is returned if device is not supposed to wake up the system
2438  * Error code depending on the platform is returned if both the platform and
2439  * the native mechanism fail to enable the generation of wake-up events
2440  */
2441 static int __pci_enable_wake(struct pci_dev *dev, pci_power_t state, bool enable)
2442 {
2443 	int ret = 0;
2444 
2445 	/*
2446 	 * Bridges that are not power-manageable directly only signal
2447 	 * wakeup on behalf of subordinate devices which is set up
2448 	 * elsewhere, so skip them. However, bridges that are
2449 	 * power-manageable may signal wakeup for themselves (for example,
2450 	 * on a hotplug event) and they need to be covered here.
2451 	 */
2452 	if (!pci_power_manageable(dev))
2453 		return 0;
2454 
2455 	/* Don't do the same thing twice in a row for one device. */
2456 	if (!!enable == !!dev->wakeup_prepared)
2457 		return 0;
2458 
2459 	/*
2460 	 * According to "PCI System Architecture" 4th ed. by Tom Shanley & Don
2461 	 * Anderson we should be doing PME# wake enable followed by ACPI wake
2462 	 * enable.  To disable wake-up we call the platform first, for symmetry.
2463 	 */
2464 
2465 	if (enable) {
2466 		int error;
2467 
2468 		if (pci_pme_capable(dev, state))
2469 			pci_pme_active(dev, true);
2470 		else
2471 			ret = 1;
2472 		error = platform_pci_set_wakeup(dev, true);
2473 		if (ret)
2474 			ret = error;
2475 		if (!ret)
2476 			dev->wakeup_prepared = true;
2477 	} else {
2478 		platform_pci_set_wakeup(dev, false);
2479 		pci_pme_active(dev, false);
2480 		dev->wakeup_prepared = false;
2481 	}
2482 
2483 	return ret;
2484 }
2485 
2486 /**
2487  * pci_enable_wake - change wakeup settings for a PCI device
2488  * @pci_dev: Target device
2489  * @state: PCI state from which device will issue wakeup events
2490  * @enable: Whether or not to enable event generation
2491  *
2492  * If @enable is set, check device_may_wakeup() for the device before calling
2493  * __pci_enable_wake() for it.
2494  */
2495 int pci_enable_wake(struct pci_dev *pci_dev, pci_power_t state, bool enable)
2496 {
2497 	if (enable && !device_may_wakeup(&pci_dev->dev))
2498 		return -EINVAL;
2499 
2500 	return __pci_enable_wake(pci_dev, state, enable);
2501 }
2502 EXPORT_SYMBOL(pci_enable_wake);
2503 
2504 /**
2505  * pci_wake_from_d3 - enable/disable device to wake up from D3_hot or D3_cold
2506  * @dev: PCI device to prepare
2507  * @enable: True to enable wake-up event generation; false to disable
2508  *
2509  * Many drivers want the device to wake up the system from D3_hot or D3_cold
2510  * and this function allows them to set that up cleanly - pci_enable_wake()
2511  * should not be called twice in a row to enable wake-up due to PCI PM vs ACPI
2512  * ordering constraints.
2513  *
2514  * This function only returns error code if the device is not allowed to wake
2515  * up the system from sleep or it is not capable of generating PME# from both
2516  * D3_hot and D3_cold and the platform is unable to enable wake-up power for it.
2517  */
2518 int pci_wake_from_d3(struct pci_dev *dev, bool enable)
2519 {
2520 	return pci_pme_capable(dev, PCI_D3cold) ?
2521 			pci_enable_wake(dev, PCI_D3cold, enable) :
2522 			pci_enable_wake(dev, PCI_D3hot, enable);
2523 }
2524 EXPORT_SYMBOL(pci_wake_from_d3);
2525 
2526 /**
2527  * pci_target_state - find an appropriate low power state for a given PCI dev
2528  * @dev: PCI device
2529  * @wakeup: Whether or not wakeup functionality will be enabled for the device.
2530  *
2531  * Use underlying platform code to find a supported low power state for @dev.
2532  * If the platform can't manage @dev, return the deepest state from which it
2533  * can generate wake events, based on any available PME info.
2534  */
2535 static pci_power_t pci_target_state(struct pci_dev *dev, bool wakeup)
2536 {
2537 	pci_power_t target_state = PCI_D3hot;
2538 
2539 	if (platform_pci_power_manageable(dev)) {
2540 		/*
2541 		 * Call the platform to find the target state for the device.
2542 		 */
2543 		pci_power_t state = platform_pci_choose_state(dev);
2544 
2545 		switch (state) {
2546 		case PCI_POWER_ERROR:
2547 		case PCI_UNKNOWN:
2548 			break;
2549 		case PCI_D1:
2550 		case PCI_D2:
2551 			if (pci_no_d1d2(dev))
2552 				break;
2553 			fallthrough;
2554 		default:
2555 			target_state = state;
2556 		}
2557 
2558 		return target_state;
2559 	}
2560 
2561 	if (!dev->pm_cap)
2562 		target_state = PCI_D0;
2563 
2564 	/*
2565 	 * If the device is in D3cold even though it's not power-manageable by
2566 	 * the platform, it may have been powered down by non-standard means.
2567 	 * Best to let it slumber.
2568 	 */
2569 	if (dev->current_state == PCI_D3cold)
2570 		target_state = PCI_D3cold;
2571 
2572 	if (wakeup) {
2573 		/*
2574 		 * Find the deepest state from which the device can generate
2575 		 * PME#.
2576 		 */
2577 		if (dev->pme_support) {
2578 			while (target_state
2579 			      && !(dev->pme_support & (1 << target_state)))
2580 				target_state--;
2581 		}
2582 	}
2583 
2584 	return target_state;
2585 }
2586 
2587 /**
2588  * pci_prepare_to_sleep - prepare PCI device for system-wide transition
2589  *			  into a sleep state
2590  * @dev: Device to handle.
2591  *
2592  * Choose the power state appropriate for the device depending on whether
2593  * it can wake up the system and/or is power manageable by the platform
2594  * (PCI_D3hot is the default) and put the device into that state.
2595  */
2596 int pci_prepare_to_sleep(struct pci_dev *dev)
2597 {
2598 	bool wakeup = device_may_wakeup(&dev->dev);
2599 	pci_power_t target_state = pci_target_state(dev, wakeup);
2600 	int error;
2601 
2602 	if (target_state == PCI_POWER_ERROR)
2603 		return -EIO;
2604 
2605 	/*
2606 	 * There are systems (for example, Intel mobile chips since Coffee
2607 	 * Lake) where the power drawn while suspended can be significantly
2608 	 * reduced by disabling PTM on PCIe root ports as this allows the
2609 	 * port to enter a lower-power PM state and the SoC to reach a
2610 	 * lower-power idle state as a whole.
2611 	 */
2612 	if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT)
2613 		pci_disable_ptm(dev);
2614 
2615 	pci_enable_wake(dev, target_state, wakeup);
2616 
2617 	error = pci_set_power_state(dev, target_state);
2618 
2619 	if (error) {
2620 		pci_enable_wake(dev, target_state, false);
2621 		pci_restore_ptm_state(dev);
2622 	}
2623 
2624 	return error;
2625 }
2626 EXPORT_SYMBOL(pci_prepare_to_sleep);
2627 
2628 /**
2629  * pci_back_from_sleep - turn PCI device on during system-wide transition
2630  *			 into working state
2631  * @dev: Device to handle.
2632  *
2633  * Disable device's system wake-up capability and put it into D0.
2634  */
2635 int pci_back_from_sleep(struct pci_dev *dev)
2636 {
2637 	pci_enable_wake(dev, PCI_D0, false);
2638 	return pci_set_power_state(dev, PCI_D0);
2639 }
2640 EXPORT_SYMBOL(pci_back_from_sleep);
2641 
2642 /**
2643  * pci_finish_runtime_suspend - Carry out PCI-specific part of runtime suspend.
2644  * @dev: PCI device being suspended.
2645  *
2646  * Prepare @dev to generate wake-up events at run time and put it into a low
2647  * power state.
2648  */
2649 int pci_finish_runtime_suspend(struct pci_dev *dev)
2650 {
2651 	pci_power_t target_state;
2652 	int error;
2653 
2654 	target_state = pci_target_state(dev, device_can_wakeup(&dev->dev));
2655 	if (target_state == PCI_POWER_ERROR)
2656 		return -EIO;
2657 
2658 	dev->runtime_d3cold = target_state == PCI_D3cold;
2659 
2660 	/*
2661 	 * There are systems (for example, Intel mobile chips since Coffee
2662 	 * Lake) where the power drawn while suspended can be significantly
2663 	 * reduced by disabling PTM on PCIe root ports as this allows the
2664 	 * port to enter a lower-power PM state and the SoC to reach a
2665 	 * lower-power idle state as a whole.
2666 	 */
2667 	if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT)
2668 		pci_disable_ptm(dev);
2669 
2670 	__pci_enable_wake(dev, target_state, pci_dev_run_wake(dev));
2671 
2672 	error = pci_set_power_state(dev, target_state);
2673 
2674 	if (error) {
2675 		pci_enable_wake(dev, target_state, false);
2676 		pci_restore_ptm_state(dev);
2677 		dev->runtime_d3cold = false;
2678 	}
2679 
2680 	return error;
2681 }
2682 
2683 /**
2684  * pci_dev_run_wake - Check if device can generate run-time wake-up events.
2685  * @dev: Device to check.
2686  *
2687  * Return true if the device itself is capable of generating wake-up events
2688  * (through the platform or using the native PCIe PME) or if the device supports
2689  * PME and one of its upstream bridges can generate wake-up events.
2690  */
2691 bool pci_dev_run_wake(struct pci_dev *dev)
2692 {
2693 	struct pci_bus *bus = dev->bus;
2694 
2695 	if (!dev->pme_support)
2696 		return false;
2697 
2698 	/* PME-capable in principle, but not from the target power state */
2699 	if (!pci_pme_capable(dev, pci_target_state(dev, true)))
2700 		return false;
2701 
2702 	if (device_can_wakeup(&dev->dev))
2703 		return true;
2704 
2705 	while (bus->parent) {
2706 		struct pci_dev *bridge = bus->self;
2707 
2708 		if (device_can_wakeup(&bridge->dev))
2709 			return true;
2710 
2711 		bus = bus->parent;
2712 	}
2713 
2714 	/* We have reached the root bus. */
2715 	if (bus->bridge)
2716 		return device_can_wakeup(bus->bridge);
2717 
2718 	return false;
2719 }
2720 EXPORT_SYMBOL_GPL(pci_dev_run_wake);
2721 
2722 /**
2723  * pci_dev_need_resume - Check if it is necessary to resume the device.
2724  * @pci_dev: Device to check.
2725  *
2726  * Return 'true' if the device is not runtime-suspended or it has to be
2727  * reconfigured due to wakeup settings difference between system and runtime
2728  * suspend, or the current power state of it is not suitable for the upcoming
2729  * (system-wide) transition.
2730  */
2731 bool pci_dev_need_resume(struct pci_dev *pci_dev)
2732 {
2733 	struct device *dev = &pci_dev->dev;
2734 	pci_power_t target_state;
2735 
2736 	if (!pm_runtime_suspended(dev) || platform_pci_need_resume(pci_dev))
2737 		return true;
2738 
2739 	target_state = pci_target_state(pci_dev, device_may_wakeup(dev));
2740 
2741 	/*
2742 	 * If the earlier platform check has not triggered, D3cold is just power
2743 	 * removal on top of D3hot, so no need to resume the device in that
2744 	 * case.
2745 	 */
2746 	return target_state != pci_dev->current_state &&
2747 		target_state != PCI_D3cold &&
2748 		pci_dev->current_state != PCI_D3hot;
2749 }
2750 
2751 /**
2752  * pci_dev_adjust_pme - Adjust PME setting for a suspended device.
2753  * @pci_dev: Device to check.
2754  *
2755  * If the device is suspended and it is not configured for system wakeup,
2756  * disable PME for it to prevent it from waking up the system unnecessarily.
2757  *
2758  * Note that if the device's power state is D3cold and the platform check in
2759  * pci_dev_need_resume() has not triggered, the device's configuration need not
2760  * be changed.
2761  */
2762 void pci_dev_adjust_pme(struct pci_dev *pci_dev)
2763 {
2764 	struct device *dev = &pci_dev->dev;
2765 
2766 	spin_lock_irq(&dev->power.lock);
2767 
2768 	if (pm_runtime_suspended(dev) && !device_may_wakeup(dev) &&
2769 	    pci_dev->current_state < PCI_D3cold)
2770 		__pci_pme_active(pci_dev, false);
2771 
2772 	spin_unlock_irq(&dev->power.lock);
2773 }
2774 
2775 /**
2776  * pci_dev_complete_resume - Finalize resume from system sleep for a device.
2777  * @pci_dev: Device to handle.
2778  *
2779  * If the device is runtime suspended and wakeup-capable, enable PME for it as
2780  * it might have been disabled during the prepare phase of system suspend if
2781  * the device was not configured for system wakeup.
2782  */
2783 void pci_dev_complete_resume(struct pci_dev *pci_dev)
2784 {
2785 	struct device *dev = &pci_dev->dev;
2786 
2787 	if (!pci_dev_run_wake(pci_dev))
2788 		return;
2789 
2790 	spin_lock_irq(&dev->power.lock);
2791 
2792 	if (pm_runtime_suspended(dev) && pci_dev->current_state < PCI_D3cold)
2793 		__pci_pme_active(pci_dev, true);
2794 
2795 	spin_unlock_irq(&dev->power.lock);
2796 }
2797 
2798 void pci_config_pm_runtime_get(struct pci_dev *pdev)
2799 {
2800 	struct device *dev = &pdev->dev;
2801 	struct device *parent = dev->parent;
2802 
2803 	if (parent)
2804 		pm_runtime_get_sync(parent);
2805 	pm_runtime_get_noresume(dev);
2806 	/*
2807 	 * pdev->current_state is set to PCI_D3cold during suspending,
2808 	 * so wait until suspending completes
2809 	 */
2810 	pm_runtime_barrier(dev);
2811 	/*
2812 	 * Only need to resume devices in D3cold, because config
2813 	 * registers are still accessible for devices suspended but
2814 	 * not in D3cold.
2815 	 */
2816 	if (pdev->current_state == PCI_D3cold)
2817 		pm_runtime_resume(dev);
2818 }
2819 
2820 void pci_config_pm_runtime_put(struct pci_dev *pdev)
2821 {
2822 	struct device *dev = &pdev->dev;
2823 	struct device *parent = dev->parent;
2824 
2825 	pm_runtime_put(dev);
2826 	if (parent)
2827 		pm_runtime_put_sync(parent);
2828 }
2829 
2830 static const struct dmi_system_id bridge_d3_blacklist[] = {
2831 #ifdef CONFIG_X86
2832 	{
2833 		/*
2834 		 * Gigabyte X299 root port is not marked as hotplug capable
2835 		 * which allows Linux to power manage it.  However, this
2836 		 * confuses the BIOS SMI handler so don't power manage root
2837 		 * ports on that system.
2838 		 */
2839 		.ident = "X299 DESIGNARE EX-CF",
2840 		.matches = {
2841 			DMI_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co., Ltd."),
2842 			DMI_MATCH(DMI_BOARD_NAME, "X299 DESIGNARE EX-CF"),
2843 		},
2844 	},
2845 #endif
2846 	{ }
2847 };
2848 
2849 /**
2850  * pci_bridge_d3_possible - Is it possible to put the bridge into D3
2851  * @bridge: Bridge to check
2852  *
2853  * This function checks if it is possible to move the bridge to D3.
2854  * Currently we only allow D3 for recent enough PCIe ports and Thunderbolt.
2855  */
2856 bool pci_bridge_d3_possible(struct pci_dev *bridge)
2857 {
2858 	if (!pci_is_pcie(bridge))
2859 		return false;
2860 
2861 	switch (pci_pcie_type(bridge)) {
2862 	case PCI_EXP_TYPE_ROOT_PORT:
2863 	case PCI_EXP_TYPE_UPSTREAM:
2864 	case PCI_EXP_TYPE_DOWNSTREAM:
2865 		if (pci_bridge_d3_disable)
2866 			return false;
2867 
2868 		/*
2869 		 * Hotplug ports handled by firmware in System Management Mode
2870 		 * may not be put into D3 by the OS (Thunderbolt on non-Macs).
2871 		 */
2872 		if (bridge->is_hotplug_bridge && !pciehp_is_native(bridge))
2873 			return false;
2874 
2875 		if (pci_bridge_d3_force)
2876 			return true;
2877 
2878 		/* Even the oldest 2010 Thunderbolt controller supports D3. */
2879 		if (bridge->is_thunderbolt)
2880 			return true;
2881 
2882 		/* Platform might know better if the bridge supports D3 */
2883 		if (platform_pci_bridge_d3(bridge))
2884 			return true;
2885 
2886 		/*
2887 		 * Hotplug ports handled natively by the OS were not validated
2888 		 * by vendors for runtime D3 at least until 2018 because there
2889 		 * was no OS support.
2890 		 */
2891 		if (bridge->is_hotplug_bridge)
2892 			return false;
2893 
2894 		if (dmi_check_system(bridge_d3_blacklist))
2895 			return false;
2896 
2897 		/*
2898 		 * It should be safe to put PCIe ports from 2015 or newer
2899 		 * to D3.
2900 		 */
2901 		if (dmi_get_bios_year() >= 2015)
2902 			return true;
2903 		break;
2904 	}
2905 
2906 	return false;
2907 }
2908 
2909 static int pci_dev_check_d3cold(struct pci_dev *dev, void *data)
2910 {
2911 	bool *d3cold_ok = data;
2912 
2913 	if (/* The device needs to be allowed to go D3cold ... */
2914 	    dev->no_d3cold || !dev->d3cold_allowed ||
2915 
2916 	    /* ... and if it is wakeup capable to do so from D3cold. */
2917 	    (device_may_wakeup(&dev->dev) &&
2918 	     !pci_pme_capable(dev, PCI_D3cold)) ||
2919 
2920 	    /* If it is a bridge it must be allowed to go to D3. */
2921 	    !pci_power_manageable(dev))
2922 
2923 		*d3cold_ok = false;
2924 
2925 	return !*d3cold_ok;
2926 }
2927 
2928 /*
2929  * pci_bridge_d3_update - Update bridge D3 capabilities
2930  * @dev: PCI device which is changed
2931  *
2932  * Update upstream bridge PM capabilities accordingly depending on if the
2933  * device PM configuration was changed or the device is being removed.  The
2934  * change is also propagated upstream.
2935  */
2936 void pci_bridge_d3_update(struct pci_dev *dev)
2937 {
2938 	bool remove = !device_is_registered(&dev->dev);
2939 	struct pci_dev *bridge;
2940 	bool d3cold_ok = true;
2941 
2942 	bridge = pci_upstream_bridge(dev);
2943 	if (!bridge || !pci_bridge_d3_possible(bridge))
2944 		return;
2945 
2946 	/*
2947 	 * If D3 is currently allowed for the bridge, removing one of its
2948 	 * children won't change that.
2949 	 */
2950 	if (remove && bridge->bridge_d3)
2951 		return;
2952 
2953 	/*
2954 	 * If D3 is currently allowed for the bridge and a child is added or
2955 	 * changed, disallowance of D3 can only be caused by that child, so
2956 	 * we only need to check that single device, not any of its siblings.
2957 	 *
2958 	 * If D3 is currently not allowed for the bridge, checking the device
2959 	 * first may allow us to skip checking its siblings.
2960 	 */
2961 	if (!remove)
2962 		pci_dev_check_d3cold(dev, &d3cold_ok);
2963 
2964 	/*
2965 	 * If D3 is currently not allowed for the bridge, this may be caused
2966 	 * either by the device being changed/removed or any of its siblings,
2967 	 * so we need to go through all children to find out if one of them
2968 	 * continues to block D3.
2969 	 */
2970 	if (d3cold_ok && !bridge->bridge_d3)
2971 		pci_walk_bus(bridge->subordinate, pci_dev_check_d3cold,
2972 			     &d3cold_ok);
2973 
2974 	if (bridge->bridge_d3 != d3cold_ok) {
2975 		bridge->bridge_d3 = d3cold_ok;
2976 		/* Propagate change to upstream bridges */
2977 		pci_bridge_d3_update(bridge);
2978 	}
2979 }
2980 
2981 /**
2982  * pci_d3cold_enable - Enable D3cold for device
2983  * @dev: PCI device to handle
2984  *
2985  * This function can be used in drivers to enable D3cold from the device
2986  * they handle.  It also updates upstream PCI bridge PM capabilities
2987  * accordingly.
2988  */
2989 void pci_d3cold_enable(struct pci_dev *dev)
2990 {
2991 	if (dev->no_d3cold) {
2992 		dev->no_d3cold = false;
2993 		pci_bridge_d3_update(dev);
2994 	}
2995 }
2996 EXPORT_SYMBOL_GPL(pci_d3cold_enable);
2997 
2998 /**
2999  * pci_d3cold_disable - Disable D3cold for device
3000  * @dev: PCI device to handle
3001  *
3002  * This function can be used in drivers to disable D3cold from the device
3003  * they handle.  It also updates upstream PCI bridge PM capabilities
3004  * accordingly.
3005  */
3006 void pci_d3cold_disable(struct pci_dev *dev)
3007 {
3008 	if (!dev->no_d3cold) {
3009 		dev->no_d3cold = true;
3010 		pci_bridge_d3_update(dev);
3011 	}
3012 }
3013 EXPORT_SYMBOL_GPL(pci_d3cold_disable);
3014 
3015 /**
3016  * pci_pm_init - Initialize PM functions of given PCI device
3017  * @dev: PCI device to handle.
3018  */
3019 void pci_pm_init(struct pci_dev *dev)
3020 {
3021 	int pm;
3022 	u16 status;
3023 	u16 pmc;
3024 
3025 	pm_runtime_forbid(&dev->dev);
3026 	pm_runtime_set_active(&dev->dev);
3027 	pm_runtime_enable(&dev->dev);
3028 	device_enable_async_suspend(&dev->dev);
3029 	dev->wakeup_prepared = false;
3030 
3031 	dev->pm_cap = 0;
3032 	dev->pme_support = 0;
3033 
3034 	/* find PCI PM capability in list */
3035 	pm = pci_find_capability(dev, PCI_CAP_ID_PM);
3036 	if (!pm)
3037 		return;
3038 	/* Check device's ability to generate PME# */
3039 	pci_read_config_word(dev, pm + PCI_PM_PMC, &pmc);
3040 
3041 	if ((pmc & PCI_PM_CAP_VER_MASK) > 3) {
3042 		pci_err(dev, "unsupported PM cap regs version (%u)\n",
3043 			pmc & PCI_PM_CAP_VER_MASK);
3044 		return;
3045 	}
3046 
3047 	dev->pm_cap = pm;
3048 	dev->d3hot_delay = PCI_PM_D3HOT_WAIT;
3049 	dev->d3cold_delay = PCI_PM_D3COLD_WAIT;
3050 	dev->bridge_d3 = pci_bridge_d3_possible(dev);
3051 	dev->d3cold_allowed = true;
3052 
3053 	dev->d1_support = false;
3054 	dev->d2_support = false;
3055 	if (!pci_no_d1d2(dev)) {
3056 		if (pmc & PCI_PM_CAP_D1)
3057 			dev->d1_support = true;
3058 		if (pmc & PCI_PM_CAP_D2)
3059 			dev->d2_support = true;
3060 
3061 		if (dev->d1_support || dev->d2_support)
3062 			pci_info(dev, "supports%s%s\n",
3063 				   dev->d1_support ? " D1" : "",
3064 				   dev->d2_support ? " D2" : "");
3065 	}
3066 
3067 	pmc &= PCI_PM_CAP_PME_MASK;
3068 	if (pmc) {
3069 		pci_info(dev, "PME# supported from%s%s%s%s%s\n",
3070 			 (pmc & PCI_PM_CAP_PME_D0) ? " D0" : "",
3071 			 (pmc & PCI_PM_CAP_PME_D1) ? " D1" : "",
3072 			 (pmc & PCI_PM_CAP_PME_D2) ? " D2" : "",
3073 			 (pmc & PCI_PM_CAP_PME_D3hot) ? " D3hot" : "",
3074 			 (pmc & PCI_PM_CAP_PME_D3cold) ? " D3cold" : "");
3075 		dev->pme_support = pmc >> PCI_PM_CAP_PME_SHIFT;
3076 		dev->pme_poll = true;
3077 		/*
3078 		 * Make device's PM flags reflect the wake-up capability, but
3079 		 * let the user space enable it to wake up the system as needed.
3080 		 */
3081 		device_set_wakeup_capable(&dev->dev, true);
3082 		/* Disable the PME# generation functionality */
3083 		pci_pme_active(dev, false);
3084 	}
3085 
3086 	pci_read_config_word(dev, PCI_STATUS, &status);
3087 	if (status & PCI_STATUS_IMM_READY)
3088 		dev->imm_ready = 1;
3089 }
3090 
3091 static unsigned long pci_ea_flags(struct pci_dev *dev, u8 prop)
3092 {
3093 	unsigned long flags = IORESOURCE_PCI_FIXED | IORESOURCE_PCI_EA_BEI;
3094 
3095 	switch (prop) {
3096 	case PCI_EA_P_MEM:
3097 	case PCI_EA_P_VF_MEM:
3098 		flags |= IORESOURCE_MEM;
3099 		break;
3100 	case PCI_EA_P_MEM_PREFETCH:
3101 	case PCI_EA_P_VF_MEM_PREFETCH:
3102 		flags |= IORESOURCE_MEM | IORESOURCE_PREFETCH;
3103 		break;
3104 	case PCI_EA_P_IO:
3105 		flags |= IORESOURCE_IO;
3106 		break;
3107 	default:
3108 		return 0;
3109 	}
3110 
3111 	return flags;
3112 }
3113 
3114 static struct resource *pci_ea_get_resource(struct pci_dev *dev, u8 bei,
3115 					    u8 prop)
3116 {
3117 	if (bei <= PCI_EA_BEI_BAR5 && prop <= PCI_EA_P_IO)
3118 		return &dev->resource[bei];
3119 #ifdef CONFIG_PCI_IOV
3120 	else if (bei >= PCI_EA_BEI_VF_BAR0 && bei <= PCI_EA_BEI_VF_BAR5 &&
3121 		 (prop == PCI_EA_P_VF_MEM || prop == PCI_EA_P_VF_MEM_PREFETCH))
3122 		return &dev->resource[PCI_IOV_RESOURCES +
3123 				      bei - PCI_EA_BEI_VF_BAR0];
3124 #endif
3125 	else if (bei == PCI_EA_BEI_ROM)
3126 		return &dev->resource[PCI_ROM_RESOURCE];
3127 	else
3128 		return NULL;
3129 }
3130 
3131 /* Read an Enhanced Allocation (EA) entry */
3132 static int pci_ea_read(struct pci_dev *dev, int offset)
3133 {
3134 	struct resource *res;
3135 	int ent_size, ent_offset = offset;
3136 	resource_size_t start, end;
3137 	unsigned long flags;
3138 	u32 dw0, bei, base, max_offset;
3139 	u8 prop;
3140 	bool support_64 = (sizeof(resource_size_t) >= 8);
3141 
3142 	pci_read_config_dword(dev, ent_offset, &dw0);
3143 	ent_offset += 4;
3144 
3145 	/* Entry size field indicates DWORDs after 1st */
3146 	ent_size = ((dw0 & PCI_EA_ES) + 1) << 2;
3147 
3148 	if (!(dw0 & PCI_EA_ENABLE)) /* Entry not enabled */
3149 		goto out;
3150 
3151 	bei = (dw0 & PCI_EA_BEI) >> 4;
3152 	prop = (dw0 & PCI_EA_PP) >> 8;
3153 
3154 	/*
3155 	 * If the Property is in the reserved range, try the Secondary
3156 	 * Property instead.
3157 	 */
3158 	if (prop > PCI_EA_P_BRIDGE_IO && prop < PCI_EA_P_MEM_RESERVED)
3159 		prop = (dw0 & PCI_EA_SP) >> 16;
3160 	if (prop > PCI_EA_P_BRIDGE_IO)
3161 		goto out;
3162 
3163 	res = pci_ea_get_resource(dev, bei, prop);
3164 	if (!res) {
3165 		pci_err(dev, "Unsupported EA entry BEI: %u\n", bei);
3166 		goto out;
3167 	}
3168 
3169 	flags = pci_ea_flags(dev, prop);
3170 	if (!flags) {
3171 		pci_err(dev, "Unsupported EA properties: %#x\n", prop);
3172 		goto out;
3173 	}
3174 
3175 	/* Read Base */
3176 	pci_read_config_dword(dev, ent_offset, &base);
3177 	start = (base & PCI_EA_FIELD_MASK);
3178 	ent_offset += 4;
3179 
3180 	/* Read MaxOffset */
3181 	pci_read_config_dword(dev, ent_offset, &max_offset);
3182 	ent_offset += 4;
3183 
3184 	/* Read Base MSBs (if 64-bit entry) */
3185 	if (base & PCI_EA_IS_64) {
3186 		u32 base_upper;
3187 
3188 		pci_read_config_dword(dev, ent_offset, &base_upper);
3189 		ent_offset += 4;
3190 
3191 		flags |= IORESOURCE_MEM_64;
3192 
3193 		/* entry starts above 32-bit boundary, can't use */
3194 		if (!support_64 && base_upper)
3195 			goto out;
3196 
3197 		if (support_64)
3198 			start |= ((u64)base_upper << 32);
3199 	}
3200 
3201 	end = start + (max_offset | 0x03);
3202 
3203 	/* Read MaxOffset MSBs (if 64-bit entry) */
3204 	if (max_offset & PCI_EA_IS_64) {
3205 		u32 max_offset_upper;
3206 
3207 		pci_read_config_dword(dev, ent_offset, &max_offset_upper);
3208 		ent_offset += 4;
3209 
3210 		flags |= IORESOURCE_MEM_64;
3211 
3212 		/* entry too big, can't use */
3213 		if (!support_64 && max_offset_upper)
3214 			goto out;
3215 
3216 		if (support_64)
3217 			end += ((u64)max_offset_upper << 32);
3218 	}
3219 
3220 	if (end < start) {
3221 		pci_err(dev, "EA Entry crosses address boundary\n");
3222 		goto out;
3223 	}
3224 
3225 	if (ent_size != ent_offset - offset) {
3226 		pci_err(dev, "EA Entry Size (%d) does not match length read (%d)\n",
3227 			ent_size, ent_offset - offset);
3228 		goto out;
3229 	}
3230 
3231 	res->name = pci_name(dev);
3232 	res->start = start;
3233 	res->end = end;
3234 	res->flags = flags;
3235 
3236 	if (bei <= PCI_EA_BEI_BAR5)
3237 		pci_info(dev, "BAR %d: %pR (from Enhanced Allocation, properties %#02x)\n",
3238 			   bei, res, prop);
3239 	else if (bei == PCI_EA_BEI_ROM)
3240 		pci_info(dev, "ROM: %pR (from Enhanced Allocation, properties %#02x)\n",
3241 			   res, prop);
3242 	else if (bei >= PCI_EA_BEI_VF_BAR0 && bei <= PCI_EA_BEI_VF_BAR5)
3243 		pci_info(dev, "VF BAR %d: %pR (from Enhanced Allocation, properties %#02x)\n",
3244 			   bei - PCI_EA_BEI_VF_BAR0, res, prop);
3245 	else
3246 		pci_info(dev, "BEI %d res: %pR (from Enhanced Allocation, properties %#02x)\n",
3247 			   bei, res, prop);
3248 
3249 out:
3250 	return offset + ent_size;
3251 }
3252 
3253 /* Enhanced Allocation Initialization */
3254 void pci_ea_init(struct pci_dev *dev)
3255 {
3256 	int ea;
3257 	u8 num_ent;
3258 	int offset;
3259 	int i;
3260 
3261 	/* find PCI EA capability in list */
3262 	ea = pci_find_capability(dev, PCI_CAP_ID_EA);
3263 	if (!ea)
3264 		return;
3265 
3266 	/* determine the number of entries */
3267 	pci_bus_read_config_byte(dev->bus, dev->devfn, ea + PCI_EA_NUM_ENT,
3268 					&num_ent);
3269 	num_ent &= PCI_EA_NUM_ENT_MASK;
3270 
3271 	offset = ea + PCI_EA_FIRST_ENT;
3272 
3273 	/* Skip DWORD 2 for type 1 functions */
3274 	if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE)
3275 		offset += 4;
3276 
3277 	/* parse each EA entry */
3278 	for (i = 0; i < num_ent; ++i)
3279 		offset = pci_ea_read(dev, offset);
3280 }
3281 
3282 static void pci_add_saved_cap(struct pci_dev *pci_dev,
3283 	struct pci_cap_saved_state *new_cap)
3284 {
3285 	hlist_add_head(&new_cap->next, &pci_dev->saved_cap_space);
3286 }
3287 
3288 /**
3289  * _pci_add_cap_save_buffer - allocate buffer for saving given
3290  *			      capability registers
3291  * @dev: the PCI device
3292  * @cap: the capability to allocate the buffer for
3293  * @extended: Standard or Extended capability ID
3294  * @size: requested size of the buffer
3295  */
3296 static int _pci_add_cap_save_buffer(struct pci_dev *dev, u16 cap,
3297 				    bool extended, unsigned int size)
3298 {
3299 	int pos;
3300 	struct pci_cap_saved_state *save_state;
3301 
3302 	if (extended)
3303 		pos = pci_find_ext_capability(dev, cap);
3304 	else
3305 		pos = pci_find_capability(dev, cap);
3306 
3307 	if (!pos)
3308 		return 0;
3309 
3310 	save_state = kzalloc(sizeof(*save_state) + size, GFP_KERNEL);
3311 	if (!save_state)
3312 		return -ENOMEM;
3313 
3314 	save_state->cap.cap_nr = cap;
3315 	save_state->cap.cap_extended = extended;
3316 	save_state->cap.size = size;
3317 	pci_add_saved_cap(dev, save_state);
3318 
3319 	return 0;
3320 }
3321 
3322 int pci_add_cap_save_buffer(struct pci_dev *dev, char cap, unsigned int size)
3323 {
3324 	return _pci_add_cap_save_buffer(dev, cap, false, size);
3325 }
3326 
3327 int pci_add_ext_cap_save_buffer(struct pci_dev *dev, u16 cap, unsigned int size)
3328 {
3329 	return _pci_add_cap_save_buffer(dev, cap, true, size);
3330 }
3331 
3332 /**
3333  * pci_allocate_cap_save_buffers - allocate buffers for saving capabilities
3334  * @dev: the PCI device
3335  */
3336 void pci_allocate_cap_save_buffers(struct pci_dev *dev)
3337 {
3338 	int error;
3339 
3340 	error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_EXP,
3341 					PCI_EXP_SAVE_REGS * sizeof(u16));
3342 	if (error)
3343 		pci_err(dev, "unable to preallocate PCI Express save buffer\n");
3344 
3345 	error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_PCIX, sizeof(u16));
3346 	if (error)
3347 		pci_err(dev, "unable to preallocate PCI-X save buffer\n");
3348 
3349 	error = pci_add_ext_cap_save_buffer(dev, PCI_EXT_CAP_ID_LTR,
3350 					    2 * sizeof(u16));
3351 	if (error)
3352 		pci_err(dev, "unable to allocate suspend buffer for LTR\n");
3353 
3354 	pci_allocate_vc_save_buffers(dev);
3355 }
3356 
3357 void pci_free_cap_save_buffers(struct pci_dev *dev)
3358 {
3359 	struct pci_cap_saved_state *tmp;
3360 	struct hlist_node *n;
3361 
3362 	hlist_for_each_entry_safe(tmp, n, &dev->saved_cap_space, next)
3363 		kfree(tmp);
3364 }
3365 
3366 /**
3367  * pci_configure_ari - enable or disable ARI forwarding
3368  * @dev: the PCI device
3369  *
3370  * If @dev and its upstream bridge both support ARI, enable ARI in the
3371  * bridge.  Otherwise, disable ARI in the bridge.
3372  */
3373 void pci_configure_ari(struct pci_dev *dev)
3374 {
3375 	u32 cap;
3376 	struct pci_dev *bridge;
3377 
3378 	if (pcie_ari_disabled || !pci_is_pcie(dev) || dev->devfn)
3379 		return;
3380 
3381 	bridge = dev->bus->self;
3382 	if (!bridge)
3383 		return;
3384 
3385 	pcie_capability_read_dword(bridge, PCI_EXP_DEVCAP2, &cap);
3386 	if (!(cap & PCI_EXP_DEVCAP2_ARI))
3387 		return;
3388 
3389 	if (pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI)) {
3390 		pcie_capability_set_word(bridge, PCI_EXP_DEVCTL2,
3391 					 PCI_EXP_DEVCTL2_ARI);
3392 		bridge->ari_enabled = 1;
3393 	} else {
3394 		pcie_capability_clear_word(bridge, PCI_EXP_DEVCTL2,
3395 					   PCI_EXP_DEVCTL2_ARI);
3396 		bridge->ari_enabled = 0;
3397 	}
3398 }
3399 
3400 static bool pci_acs_flags_enabled(struct pci_dev *pdev, u16 acs_flags)
3401 {
3402 	int pos;
3403 	u16 cap, ctrl;
3404 
3405 	pos = pdev->acs_cap;
3406 	if (!pos)
3407 		return false;
3408 
3409 	/*
3410 	 * Except for egress control, capabilities are either required
3411 	 * or only required if controllable.  Features missing from the
3412 	 * capability field can therefore be assumed as hard-wired enabled.
3413 	 */
3414 	pci_read_config_word(pdev, pos + PCI_ACS_CAP, &cap);
3415 	acs_flags &= (cap | PCI_ACS_EC);
3416 
3417 	pci_read_config_word(pdev, pos + PCI_ACS_CTRL, &ctrl);
3418 	return (ctrl & acs_flags) == acs_flags;
3419 }
3420 
3421 /**
3422  * pci_acs_enabled - test ACS against required flags for a given device
3423  * @pdev: device to test
3424  * @acs_flags: required PCI ACS flags
3425  *
3426  * Return true if the device supports the provided flags.  Automatically
3427  * filters out flags that are not implemented on multifunction devices.
3428  *
3429  * Note that this interface checks the effective ACS capabilities of the
3430  * device rather than the actual capabilities.  For instance, most single
3431  * function endpoints are not required to support ACS because they have no
3432  * opportunity for peer-to-peer access.  We therefore return 'true'
3433  * regardless of whether the device exposes an ACS capability.  This makes
3434  * it much easier for callers of this function to ignore the actual type
3435  * or topology of the device when testing ACS support.
3436  */
3437 bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags)
3438 {
3439 	int ret;
3440 
3441 	ret = pci_dev_specific_acs_enabled(pdev, acs_flags);
3442 	if (ret >= 0)
3443 		return ret > 0;
3444 
3445 	/*
3446 	 * Conventional PCI and PCI-X devices never support ACS, either
3447 	 * effectively or actually.  The shared bus topology implies that
3448 	 * any device on the bus can receive or snoop DMA.
3449 	 */
3450 	if (!pci_is_pcie(pdev))
3451 		return false;
3452 
3453 	switch (pci_pcie_type(pdev)) {
3454 	/*
3455 	 * PCI/X-to-PCIe bridges are not specifically mentioned by the spec,
3456 	 * but since their primary interface is PCI/X, we conservatively
3457 	 * handle them as we would a non-PCIe device.
3458 	 */
3459 	case PCI_EXP_TYPE_PCIE_BRIDGE:
3460 	/*
3461 	 * PCIe 3.0, 6.12.1 excludes ACS on these devices.  "ACS is never
3462 	 * applicable... must never implement an ACS Extended Capability...".
3463 	 * This seems arbitrary, but we take a conservative interpretation
3464 	 * of this statement.
3465 	 */
3466 	case PCI_EXP_TYPE_PCI_BRIDGE:
3467 	case PCI_EXP_TYPE_RC_EC:
3468 		return false;
3469 	/*
3470 	 * PCIe 3.0, 6.12.1.1 specifies that downstream and root ports should
3471 	 * implement ACS in order to indicate their peer-to-peer capabilities,
3472 	 * regardless of whether they are single- or multi-function devices.
3473 	 */
3474 	case PCI_EXP_TYPE_DOWNSTREAM:
3475 	case PCI_EXP_TYPE_ROOT_PORT:
3476 		return pci_acs_flags_enabled(pdev, acs_flags);
3477 	/*
3478 	 * PCIe 3.0, 6.12.1.2 specifies ACS capabilities that should be
3479 	 * implemented by the remaining PCIe types to indicate peer-to-peer
3480 	 * capabilities, but only when they are part of a multifunction
3481 	 * device.  The footnote for section 6.12 indicates the specific
3482 	 * PCIe types included here.
3483 	 */
3484 	case PCI_EXP_TYPE_ENDPOINT:
3485 	case PCI_EXP_TYPE_UPSTREAM:
3486 	case PCI_EXP_TYPE_LEG_END:
3487 	case PCI_EXP_TYPE_RC_END:
3488 		if (!pdev->multifunction)
3489 			break;
3490 
3491 		return pci_acs_flags_enabled(pdev, acs_flags);
3492 	}
3493 
3494 	/*
3495 	 * PCIe 3.0, 6.12.1.3 specifies no ACS capabilities are applicable
3496 	 * to single function devices with the exception of downstream ports.
3497 	 */
3498 	return true;
3499 }
3500 
3501 /**
3502  * pci_acs_path_enabled - test ACS flags from start to end in a hierarchy
3503  * @start: starting downstream device
3504  * @end: ending upstream device or NULL to search to the root bus
3505  * @acs_flags: required flags
3506  *
3507  * Walk up a device tree from start to end testing PCI ACS support.  If
3508  * any step along the way does not support the required flags, return false.
3509  */
3510 bool pci_acs_path_enabled(struct pci_dev *start,
3511 			  struct pci_dev *end, u16 acs_flags)
3512 {
3513 	struct pci_dev *pdev, *parent = start;
3514 
3515 	do {
3516 		pdev = parent;
3517 
3518 		if (!pci_acs_enabled(pdev, acs_flags))
3519 			return false;
3520 
3521 		if (pci_is_root_bus(pdev->bus))
3522 			return (end == NULL);
3523 
3524 		parent = pdev->bus->self;
3525 	} while (pdev != end);
3526 
3527 	return true;
3528 }
3529 
3530 /**
3531  * pci_acs_init - Initialize ACS if hardware supports it
3532  * @dev: the PCI device
3533  */
3534 void pci_acs_init(struct pci_dev *dev)
3535 {
3536 	dev->acs_cap = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS);
3537 
3538 	/*
3539 	 * Attempt to enable ACS regardless of capability because some Root
3540 	 * Ports (e.g. those quirked with *_intel_pch_acs_*) do not have
3541 	 * the standard ACS capability but still support ACS via those
3542 	 * quirks.
3543 	 */
3544 	pci_enable_acs(dev);
3545 }
3546 
3547 /**
3548  * pci_rebar_find_pos - find position of resize ctrl reg for BAR
3549  * @pdev: PCI device
3550  * @bar: BAR to find
3551  *
3552  * Helper to find the position of the ctrl register for a BAR.
3553  * Returns -ENOTSUPP if resizable BARs are not supported at all.
3554  * Returns -ENOENT if no ctrl register for the BAR could be found.
3555  */
3556 static int pci_rebar_find_pos(struct pci_dev *pdev, int bar)
3557 {
3558 	unsigned int pos, nbars, i;
3559 	u32 ctrl;
3560 
3561 	pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_REBAR);
3562 	if (!pos)
3563 		return -ENOTSUPP;
3564 
3565 	pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
3566 	nbars = (ctrl & PCI_REBAR_CTRL_NBAR_MASK) >>
3567 		    PCI_REBAR_CTRL_NBAR_SHIFT;
3568 
3569 	for (i = 0; i < nbars; i++, pos += 8) {
3570 		int bar_idx;
3571 
3572 		pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
3573 		bar_idx = ctrl & PCI_REBAR_CTRL_BAR_IDX;
3574 		if (bar_idx == bar)
3575 			return pos;
3576 	}
3577 
3578 	return -ENOENT;
3579 }
3580 
3581 /**
3582  * pci_rebar_get_possible_sizes - get possible sizes for BAR
3583  * @pdev: PCI device
3584  * @bar: BAR to query
3585  *
3586  * Get the possible sizes of a resizable BAR as bitmask defined in the spec
3587  * (bit 0=1MB, bit 19=512GB). Returns 0 if BAR isn't resizable.
3588  */
3589 u32 pci_rebar_get_possible_sizes(struct pci_dev *pdev, int bar)
3590 {
3591 	int pos;
3592 	u32 cap;
3593 
3594 	pos = pci_rebar_find_pos(pdev, bar);
3595 	if (pos < 0)
3596 		return 0;
3597 
3598 	pci_read_config_dword(pdev, pos + PCI_REBAR_CAP, &cap);
3599 	cap &= PCI_REBAR_CAP_SIZES;
3600 
3601 	/* Sapphire RX 5600 XT Pulse has an invalid cap dword for BAR 0 */
3602 	if (pdev->vendor == PCI_VENDOR_ID_ATI && pdev->device == 0x731f &&
3603 	    bar == 0 && cap == 0x7000)
3604 		cap = 0x3f000;
3605 
3606 	return cap >> 4;
3607 }
3608 EXPORT_SYMBOL(pci_rebar_get_possible_sizes);
3609 
3610 /**
3611  * pci_rebar_get_current_size - get the current size of a BAR
3612  * @pdev: PCI device
3613  * @bar: BAR to set size to
3614  *
3615  * Read the size of a BAR from the resizable BAR config.
3616  * Returns size if found or negative error code.
3617  */
3618 int pci_rebar_get_current_size(struct pci_dev *pdev, int bar)
3619 {
3620 	int pos;
3621 	u32 ctrl;
3622 
3623 	pos = pci_rebar_find_pos(pdev, bar);
3624 	if (pos < 0)
3625 		return pos;
3626 
3627 	pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
3628 	return (ctrl & PCI_REBAR_CTRL_BAR_SIZE) >> PCI_REBAR_CTRL_BAR_SHIFT;
3629 }
3630 
3631 /**
3632  * pci_rebar_set_size - set a new size for a BAR
3633  * @pdev: PCI device
3634  * @bar: BAR to set size to
3635  * @size: new size as defined in the spec (0=1MB, 19=512GB)
3636  *
3637  * Set the new size of a BAR as defined in the spec.
3638  * Returns zero if resizing was successful, error code otherwise.
3639  */
3640 int pci_rebar_set_size(struct pci_dev *pdev, int bar, int size)
3641 {
3642 	int pos;
3643 	u32 ctrl;
3644 
3645 	pos = pci_rebar_find_pos(pdev, bar);
3646 	if (pos < 0)
3647 		return pos;
3648 
3649 	pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
3650 	ctrl &= ~PCI_REBAR_CTRL_BAR_SIZE;
3651 	ctrl |= size << PCI_REBAR_CTRL_BAR_SHIFT;
3652 	pci_write_config_dword(pdev, pos + PCI_REBAR_CTRL, ctrl);
3653 	return 0;
3654 }
3655 
3656 /**
3657  * pci_enable_atomic_ops_to_root - enable AtomicOp requests to root port
3658  * @dev: the PCI device
3659  * @cap_mask: mask of desired AtomicOp sizes, including one or more of:
3660  *	PCI_EXP_DEVCAP2_ATOMIC_COMP32
3661  *	PCI_EXP_DEVCAP2_ATOMIC_COMP64
3662  *	PCI_EXP_DEVCAP2_ATOMIC_COMP128
3663  *
3664  * Return 0 if all upstream bridges support AtomicOp routing, egress
3665  * blocking is disabled on all upstream ports, and the root port supports
3666  * the requested completion capabilities (32-bit, 64-bit and/or 128-bit
3667  * AtomicOp completion), or negative otherwise.
3668  */
3669 int pci_enable_atomic_ops_to_root(struct pci_dev *dev, u32 cap_mask)
3670 {
3671 	struct pci_bus *bus = dev->bus;
3672 	struct pci_dev *bridge;
3673 	u32 cap, ctl2;
3674 
3675 	if (!pci_is_pcie(dev))
3676 		return -EINVAL;
3677 
3678 	/*
3679 	 * Per PCIe r4.0, sec 6.15, endpoints and root ports may be
3680 	 * AtomicOp requesters.  For now, we only support endpoints as
3681 	 * requesters and root ports as completers.  No endpoints as
3682 	 * completers, and no peer-to-peer.
3683 	 */
3684 
3685 	switch (pci_pcie_type(dev)) {
3686 	case PCI_EXP_TYPE_ENDPOINT:
3687 	case PCI_EXP_TYPE_LEG_END:
3688 	case PCI_EXP_TYPE_RC_END:
3689 		break;
3690 	default:
3691 		return -EINVAL;
3692 	}
3693 
3694 	while (bus->parent) {
3695 		bridge = bus->self;
3696 
3697 		pcie_capability_read_dword(bridge, PCI_EXP_DEVCAP2, &cap);
3698 
3699 		switch (pci_pcie_type(bridge)) {
3700 		/* Ensure switch ports support AtomicOp routing */
3701 		case PCI_EXP_TYPE_UPSTREAM:
3702 		case PCI_EXP_TYPE_DOWNSTREAM:
3703 			if (!(cap & PCI_EXP_DEVCAP2_ATOMIC_ROUTE))
3704 				return -EINVAL;
3705 			break;
3706 
3707 		/* Ensure root port supports all the sizes we care about */
3708 		case PCI_EXP_TYPE_ROOT_PORT:
3709 			if ((cap & cap_mask) != cap_mask)
3710 				return -EINVAL;
3711 			break;
3712 		}
3713 
3714 		/* Ensure upstream ports don't block AtomicOps on egress */
3715 		if (pci_pcie_type(bridge) == PCI_EXP_TYPE_UPSTREAM) {
3716 			pcie_capability_read_dword(bridge, PCI_EXP_DEVCTL2,
3717 						   &ctl2);
3718 			if (ctl2 & PCI_EXP_DEVCTL2_ATOMIC_EGRESS_BLOCK)
3719 				return -EINVAL;
3720 		}
3721 
3722 		bus = bus->parent;
3723 	}
3724 
3725 	pcie_capability_set_word(dev, PCI_EXP_DEVCTL2,
3726 				 PCI_EXP_DEVCTL2_ATOMIC_REQ);
3727 	return 0;
3728 }
3729 EXPORT_SYMBOL(pci_enable_atomic_ops_to_root);
3730 
3731 /**
3732  * pci_swizzle_interrupt_pin - swizzle INTx for device behind bridge
3733  * @dev: the PCI device
3734  * @pin: the INTx pin (1=INTA, 2=INTB, 3=INTC, 4=INTD)
3735  *
3736  * Perform INTx swizzling for a device behind one level of bridge.  This is
3737  * required by section 9.1 of the PCI-to-PCI bridge specification for devices
3738  * behind bridges on add-in cards.  For devices with ARI enabled, the slot
3739  * number is always 0 (see the Implementation Note in section 2.2.8.1 of
3740  * the PCI Express Base Specification, Revision 2.1)
3741  */
3742 u8 pci_swizzle_interrupt_pin(const struct pci_dev *dev, u8 pin)
3743 {
3744 	int slot;
3745 
3746 	if (pci_ari_enabled(dev->bus))
3747 		slot = 0;
3748 	else
3749 		slot = PCI_SLOT(dev->devfn);
3750 
3751 	return (((pin - 1) + slot) % 4) + 1;
3752 }
3753 
3754 int pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge)
3755 {
3756 	u8 pin;
3757 
3758 	pin = dev->pin;
3759 	if (!pin)
3760 		return -1;
3761 
3762 	while (!pci_is_root_bus(dev->bus)) {
3763 		pin = pci_swizzle_interrupt_pin(dev, pin);
3764 		dev = dev->bus->self;
3765 	}
3766 	*bridge = dev;
3767 	return pin;
3768 }
3769 
3770 /**
3771  * pci_common_swizzle - swizzle INTx all the way to root bridge
3772  * @dev: the PCI device
3773  * @pinp: pointer to the INTx pin value (1=INTA, 2=INTB, 3=INTD, 4=INTD)
3774  *
3775  * Perform INTx swizzling for a device.  This traverses through all PCI-to-PCI
3776  * bridges all the way up to a PCI root bus.
3777  */
3778 u8 pci_common_swizzle(struct pci_dev *dev, u8 *pinp)
3779 {
3780 	u8 pin = *pinp;
3781 
3782 	while (!pci_is_root_bus(dev->bus)) {
3783 		pin = pci_swizzle_interrupt_pin(dev, pin);
3784 		dev = dev->bus->self;
3785 	}
3786 	*pinp = pin;
3787 	return PCI_SLOT(dev->devfn);
3788 }
3789 EXPORT_SYMBOL_GPL(pci_common_swizzle);
3790 
3791 /**
3792  * pci_release_region - Release a PCI bar
3793  * @pdev: PCI device whose resources were previously reserved by
3794  *	  pci_request_region()
3795  * @bar: BAR to release
3796  *
3797  * Releases the PCI I/O and memory resources previously reserved by a
3798  * successful call to pci_request_region().  Call this function only
3799  * after all use of the PCI regions has ceased.
3800  */
3801 void pci_release_region(struct pci_dev *pdev, int bar)
3802 {
3803 	struct pci_devres *dr;
3804 
3805 	if (pci_resource_len(pdev, bar) == 0)
3806 		return;
3807 	if (pci_resource_flags(pdev, bar) & IORESOURCE_IO)
3808 		release_region(pci_resource_start(pdev, bar),
3809 				pci_resource_len(pdev, bar));
3810 	else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM)
3811 		release_mem_region(pci_resource_start(pdev, bar),
3812 				pci_resource_len(pdev, bar));
3813 
3814 	dr = find_pci_dr(pdev);
3815 	if (dr)
3816 		dr->region_mask &= ~(1 << bar);
3817 }
3818 EXPORT_SYMBOL(pci_release_region);
3819 
3820 /**
3821  * __pci_request_region - Reserved PCI I/O and memory resource
3822  * @pdev: PCI device whose resources are to be reserved
3823  * @bar: BAR to be reserved
3824  * @res_name: Name to be associated with resource.
3825  * @exclusive: whether the region access is exclusive or not
3826  *
3827  * Mark the PCI region associated with PCI device @pdev BAR @bar as
3828  * being reserved by owner @res_name.  Do not access any
3829  * address inside the PCI regions unless this call returns
3830  * successfully.
3831  *
3832  * If @exclusive is set, then the region is marked so that userspace
3833  * is explicitly not allowed to map the resource via /dev/mem or
3834  * sysfs MMIO access.
3835  *
3836  * Returns 0 on success, or %EBUSY on error.  A warning
3837  * message is also printed on failure.
3838  */
3839 static int __pci_request_region(struct pci_dev *pdev, int bar,
3840 				const char *res_name, int exclusive)
3841 {
3842 	struct pci_devres *dr;
3843 
3844 	if (pci_resource_len(pdev, bar) == 0)
3845 		return 0;
3846 
3847 	if (pci_resource_flags(pdev, bar) & IORESOURCE_IO) {
3848 		if (!request_region(pci_resource_start(pdev, bar),
3849 			    pci_resource_len(pdev, bar), res_name))
3850 			goto err_out;
3851 	} else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) {
3852 		if (!__request_mem_region(pci_resource_start(pdev, bar),
3853 					pci_resource_len(pdev, bar), res_name,
3854 					exclusive))
3855 			goto err_out;
3856 	}
3857 
3858 	dr = find_pci_dr(pdev);
3859 	if (dr)
3860 		dr->region_mask |= 1 << bar;
3861 
3862 	return 0;
3863 
3864 err_out:
3865 	pci_warn(pdev, "BAR %d: can't reserve %pR\n", bar,
3866 		 &pdev->resource[bar]);
3867 	return -EBUSY;
3868 }
3869 
3870 /**
3871  * pci_request_region - Reserve PCI I/O and memory resource
3872  * @pdev: PCI device whose resources are to be reserved
3873  * @bar: BAR to be reserved
3874  * @res_name: Name to be associated with resource
3875  *
3876  * Mark the PCI region associated with PCI device @pdev BAR @bar as
3877  * being reserved by owner @res_name.  Do not access any
3878  * address inside the PCI regions unless this call returns
3879  * successfully.
3880  *
3881  * Returns 0 on success, or %EBUSY on error.  A warning
3882  * message is also printed on failure.
3883  */
3884 int pci_request_region(struct pci_dev *pdev, int bar, const char *res_name)
3885 {
3886 	return __pci_request_region(pdev, bar, res_name, 0);
3887 }
3888 EXPORT_SYMBOL(pci_request_region);
3889 
3890 /**
3891  * pci_release_selected_regions - Release selected PCI I/O and memory resources
3892  * @pdev: PCI device whose resources were previously reserved
3893  * @bars: Bitmask of BARs to be released
3894  *
3895  * Release selected PCI I/O and memory resources previously reserved.
3896  * Call this function only after all use of the PCI regions has ceased.
3897  */
3898 void pci_release_selected_regions(struct pci_dev *pdev, int bars)
3899 {
3900 	int i;
3901 
3902 	for (i = 0; i < PCI_STD_NUM_BARS; i++)
3903 		if (bars & (1 << i))
3904 			pci_release_region(pdev, i);
3905 }
3906 EXPORT_SYMBOL(pci_release_selected_regions);
3907 
3908 static int __pci_request_selected_regions(struct pci_dev *pdev, int bars,
3909 					  const char *res_name, int excl)
3910 {
3911 	int i;
3912 
3913 	for (i = 0; i < PCI_STD_NUM_BARS; i++)
3914 		if (bars & (1 << i))
3915 			if (__pci_request_region(pdev, i, res_name, excl))
3916 				goto err_out;
3917 	return 0;
3918 
3919 err_out:
3920 	while (--i >= 0)
3921 		if (bars & (1 << i))
3922 			pci_release_region(pdev, i);
3923 
3924 	return -EBUSY;
3925 }
3926 
3927 
3928 /**
3929  * pci_request_selected_regions - Reserve selected PCI I/O and memory resources
3930  * @pdev: PCI device whose resources are to be reserved
3931  * @bars: Bitmask of BARs to be requested
3932  * @res_name: Name to be associated with resource
3933  */
3934 int pci_request_selected_regions(struct pci_dev *pdev, int bars,
3935 				 const char *res_name)
3936 {
3937 	return __pci_request_selected_regions(pdev, bars, res_name, 0);
3938 }
3939 EXPORT_SYMBOL(pci_request_selected_regions);
3940 
3941 int pci_request_selected_regions_exclusive(struct pci_dev *pdev, int bars,
3942 					   const char *res_name)
3943 {
3944 	return __pci_request_selected_regions(pdev, bars, res_name,
3945 			IORESOURCE_EXCLUSIVE);
3946 }
3947 EXPORT_SYMBOL(pci_request_selected_regions_exclusive);
3948 
3949 /**
3950  * pci_release_regions - Release reserved PCI I/O and memory resources
3951  * @pdev: PCI device whose resources were previously reserved by
3952  *	  pci_request_regions()
3953  *
3954  * Releases all PCI I/O and memory resources previously reserved by a
3955  * successful call to pci_request_regions().  Call this function only
3956  * after all use of the PCI regions has ceased.
3957  */
3958 
3959 void pci_release_regions(struct pci_dev *pdev)
3960 {
3961 	pci_release_selected_regions(pdev, (1 << PCI_STD_NUM_BARS) - 1);
3962 }
3963 EXPORT_SYMBOL(pci_release_regions);
3964 
3965 /**
3966  * pci_request_regions - Reserve PCI I/O and memory resources
3967  * @pdev: PCI device whose resources are to be reserved
3968  * @res_name: Name to be associated with resource.
3969  *
3970  * Mark all PCI regions associated with PCI device @pdev as
3971  * being reserved by owner @res_name.  Do not access any
3972  * address inside the PCI regions unless this call returns
3973  * successfully.
3974  *
3975  * Returns 0 on success, or %EBUSY on error.  A warning
3976  * message is also printed on failure.
3977  */
3978 int pci_request_regions(struct pci_dev *pdev, const char *res_name)
3979 {
3980 	return pci_request_selected_regions(pdev,
3981 			((1 << PCI_STD_NUM_BARS) - 1), res_name);
3982 }
3983 EXPORT_SYMBOL(pci_request_regions);
3984 
3985 /**
3986  * pci_request_regions_exclusive - Reserve PCI I/O and memory resources
3987  * @pdev: PCI device whose resources are to be reserved
3988  * @res_name: Name to be associated with resource.
3989  *
3990  * Mark all PCI regions associated with PCI device @pdev as being reserved
3991  * by owner @res_name.  Do not access any address inside the PCI regions
3992  * unless this call returns successfully.
3993  *
3994  * pci_request_regions_exclusive() will mark the region so that /dev/mem
3995  * and the sysfs MMIO access will not be allowed.
3996  *
3997  * Returns 0 on success, or %EBUSY on error.  A warning message is also
3998  * printed on failure.
3999  */
4000 int pci_request_regions_exclusive(struct pci_dev *pdev, const char *res_name)
4001 {
4002 	return pci_request_selected_regions_exclusive(pdev,
4003 				((1 << PCI_STD_NUM_BARS) - 1), res_name);
4004 }
4005 EXPORT_SYMBOL(pci_request_regions_exclusive);
4006 
4007 /*
4008  * Record the PCI IO range (expressed as CPU physical address + size).
4009  * Return a negative value if an error has occurred, zero otherwise
4010  */
4011 int pci_register_io_range(struct fwnode_handle *fwnode, phys_addr_t addr,
4012 			resource_size_t	size)
4013 {
4014 	int ret = 0;
4015 #ifdef PCI_IOBASE
4016 	struct logic_pio_hwaddr *range;
4017 
4018 	if (!size || addr + size < addr)
4019 		return -EINVAL;
4020 
4021 	range = kzalloc(sizeof(*range), GFP_ATOMIC);
4022 	if (!range)
4023 		return -ENOMEM;
4024 
4025 	range->fwnode = fwnode;
4026 	range->size = size;
4027 	range->hw_start = addr;
4028 	range->flags = LOGIC_PIO_CPU_MMIO;
4029 
4030 	ret = logic_pio_register_range(range);
4031 	if (ret)
4032 		kfree(range);
4033 
4034 	/* Ignore duplicates due to deferred probing */
4035 	if (ret == -EEXIST)
4036 		ret = 0;
4037 #endif
4038 
4039 	return ret;
4040 }
4041 
4042 phys_addr_t pci_pio_to_address(unsigned long pio)
4043 {
4044 	phys_addr_t address = (phys_addr_t)OF_BAD_ADDR;
4045 
4046 #ifdef PCI_IOBASE
4047 	if (pio >= MMIO_UPPER_LIMIT)
4048 		return address;
4049 
4050 	address = logic_pio_to_hwaddr(pio);
4051 #endif
4052 
4053 	return address;
4054 }
4055 
4056 unsigned long __weak pci_address_to_pio(phys_addr_t address)
4057 {
4058 #ifdef PCI_IOBASE
4059 	return logic_pio_trans_cpuaddr(address);
4060 #else
4061 	if (address > IO_SPACE_LIMIT)
4062 		return (unsigned long)-1;
4063 
4064 	return (unsigned long) address;
4065 #endif
4066 }
4067 
4068 /**
4069  * pci_remap_iospace - Remap the memory mapped I/O space
4070  * @res: Resource describing the I/O space
4071  * @phys_addr: physical address of range to be mapped
4072  *
4073  * Remap the memory mapped I/O space described by the @res and the CPU
4074  * physical address @phys_addr into virtual address space.  Only
4075  * architectures that have memory mapped IO functions defined (and the
4076  * PCI_IOBASE value defined) should call this function.
4077  */
4078 int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr)
4079 {
4080 #if defined(PCI_IOBASE) && defined(CONFIG_MMU)
4081 	unsigned long vaddr = (unsigned long)PCI_IOBASE + res->start;
4082 
4083 	if (!(res->flags & IORESOURCE_IO))
4084 		return -EINVAL;
4085 
4086 	if (res->end > IO_SPACE_LIMIT)
4087 		return -EINVAL;
4088 
4089 	return ioremap_page_range(vaddr, vaddr + resource_size(res), phys_addr,
4090 				  pgprot_device(PAGE_KERNEL));
4091 #else
4092 	/*
4093 	 * This architecture does not have memory mapped I/O space,
4094 	 * so this function should never be called
4095 	 */
4096 	WARN_ONCE(1, "This architecture does not support memory mapped I/O\n");
4097 	return -ENODEV;
4098 #endif
4099 }
4100 EXPORT_SYMBOL(pci_remap_iospace);
4101 
4102 /**
4103  * pci_unmap_iospace - Unmap the memory mapped I/O space
4104  * @res: resource to be unmapped
4105  *
4106  * Unmap the CPU virtual address @res from virtual address space.  Only
4107  * architectures that have memory mapped IO functions defined (and the
4108  * PCI_IOBASE value defined) should call this function.
4109  */
4110 void pci_unmap_iospace(struct resource *res)
4111 {
4112 #if defined(PCI_IOBASE) && defined(CONFIG_MMU)
4113 	unsigned long vaddr = (unsigned long)PCI_IOBASE + res->start;
4114 
4115 	unmap_kernel_range(vaddr, resource_size(res));
4116 #endif
4117 }
4118 EXPORT_SYMBOL(pci_unmap_iospace);
4119 
4120 static void devm_pci_unmap_iospace(struct device *dev, void *ptr)
4121 {
4122 	struct resource **res = ptr;
4123 
4124 	pci_unmap_iospace(*res);
4125 }
4126 
4127 /**
4128  * devm_pci_remap_iospace - Managed pci_remap_iospace()
4129  * @dev: Generic device to remap IO address for
4130  * @res: Resource describing the I/O space
4131  * @phys_addr: physical address of range to be mapped
4132  *
4133  * Managed pci_remap_iospace().  Map is automatically unmapped on driver
4134  * detach.
4135  */
4136 int devm_pci_remap_iospace(struct device *dev, const struct resource *res,
4137 			   phys_addr_t phys_addr)
4138 {
4139 	const struct resource **ptr;
4140 	int error;
4141 
4142 	ptr = devres_alloc(devm_pci_unmap_iospace, sizeof(*ptr), GFP_KERNEL);
4143 	if (!ptr)
4144 		return -ENOMEM;
4145 
4146 	error = pci_remap_iospace(res, phys_addr);
4147 	if (error) {
4148 		devres_free(ptr);
4149 	} else	{
4150 		*ptr = res;
4151 		devres_add(dev, ptr);
4152 	}
4153 
4154 	return error;
4155 }
4156 EXPORT_SYMBOL(devm_pci_remap_iospace);
4157 
4158 /**
4159  * devm_pci_remap_cfgspace - Managed pci_remap_cfgspace()
4160  * @dev: Generic device to remap IO address for
4161  * @offset: Resource address to map
4162  * @size: Size of map
4163  *
4164  * Managed pci_remap_cfgspace().  Map is automatically unmapped on driver
4165  * detach.
4166  */
4167 void __iomem *devm_pci_remap_cfgspace(struct device *dev,
4168 				      resource_size_t offset,
4169 				      resource_size_t size)
4170 {
4171 	void __iomem **ptr, *addr;
4172 
4173 	ptr = devres_alloc(devm_ioremap_release, sizeof(*ptr), GFP_KERNEL);
4174 	if (!ptr)
4175 		return NULL;
4176 
4177 	addr = pci_remap_cfgspace(offset, size);
4178 	if (addr) {
4179 		*ptr = addr;
4180 		devres_add(dev, ptr);
4181 	} else
4182 		devres_free(ptr);
4183 
4184 	return addr;
4185 }
4186 EXPORT_SYMBOL(devm_pci_remap_cfgspace);
4187 
4188 /**
4189  * devm_pci_remap_cfg_resource - check, request region and ioremap cfg resource
4190  * @dev: generic device to handle the resource for
4191  * @res: configuration space resource to be handled
4192  *
4193  * Checks that a resource is a valid memory region, requests the memory
4194  * region and ioremaps with pci_remap_cfgspace() API that ensures the
4195  * proper PCI configuration space memory attributes are guaranteed.
4196  *
4197  * All operations are managed and will be undone on driver detach.
4198  *
4199  * Returns a pointer to the remapped memory or an ERR_PTR() encoded error code
4200  * on failure. Usage example::
4201  *
4202  *	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
4203  *	base = devm_pci_remap_cfg_resource(&pdev->dev, res);
4204  *	if (IS_ERR(base))
4205  *		return PTR_ERR(base);
4206  */
4207 void __iomem *devm_pci_remap_cfg_resource(struct device *dev,
4208 					  struct resource *res)
4209 {
4210 	resource_size_t size;
4211 	const char *name;
4212 	void __iomem *dest_ptr;
4213 
4214 	BUG_ON(!dev);
4215 
4216 	if (!res || resource_type(res) != IORESOURCE_MEM) {
4217 		dev_err(dev, "invalid resource\n");
4218 		return IOMEM_ERR_PTR(-EINVAL);
4219 	}
4220 
4221 	size = resource_size(res);
4222 
4223 	if (res->name)
4224 		name = devm_kasprintf(dev, GFP_KERNEL, "%s %s", dev_name(dev),
4225 				      res->name);
4226 	else
4227 		name = devm_kstrdup(dev, dev_name(dev), GFP_KERNEL);
4228 	if (!name)
4229 		return IOMEM_ERR_PTR(-ENOMEM);
4230 
4231 	if (!devm_request_mem_region(dev, res->start, size, name)) {
4232 		dev_err(dev, "can't request region for resource %pR\n", res);
4233 		return IOMEM_ERR_PTR(-EBUSY);
4234 	}
4235 
4236 	dest_ptr = devm_pci_remap_cfgspace(dev, res->start, size);
4237 	if (!dest_ptr) {
4238 		dev_err(dev, "ioremap failed for resource %pR\n", res);
4239 		devm_release_mem_region(dev, res->start, size);
4240 		dest_ptr = IOMEM_ERR_PTR(-ENOMEM);
4241 	}
4242 
4243 	return dest_ptr;
4244 }
4245 EXPORT_SYMBOL(devm_pci_remap_cfg_resource);
4246 
4247 static void __pci_set_master(struct pci_dev *dev, bool enable)
4248 {
4249 	u16 old_cmd, cmd;
4250 
4251 	pci_read_config_word(dev, PCI_COMMAND, &old_cmd);
4252 	if (enable)
4253 		cmd = old_cmd | PCI_COMMAND_MASTER;
4254 	else
4255 		cmd = old_cmd & ~PCI_COMMAND_MASTER;
4256 	if (cmd != old_cmd) {
4257 		pci_dbg(dev, "%s bus mastering\n",
4258 			enable ? "enabling" : "disabling");
4259 		pci_write_config_word(dev, PCI_COMMAND, cmd);
4260 	}
4261 	dev->is_busmaster = enable;
4262 }
4263 
4264 /**
4265  * pcibios_setup - process "pci=" kernel boot arguments
4266  * @str: string used to pass in "pci=" kernel boot arguments
4267  *
4268  * Process kernel boot arguments.  This is the default implementation.
4269  * Architecture specific implementations can override this as necessary.
4270  */
4271 char * __weak __init pcibios_setup(char *str)
4272 {
4273 	return str;
4274 }
4275 
4276 /**
4277  * pcibios_set_master - enable PCI bus-mastering for device dev
4278  * @dev: the PCI device to enable
4279  *
4280  * Enables PCI bus-mastering for the device.  This is the default
4281  * implementation.  Architecture specific implementations can override
4282  * this if necessary.
4283  */
4284 void __weak pcibios_set_master(struct pci_dev *dev)
4285 {
4286 	u8 lat;
4287 
4288 	/* The latency timer doesn't apply to PCIe (either Type 0 or Type 1) */
4289 	if (pci_is_pcie(dev))
4290 		return;
4291 
4292 	pci_read_config_byte(dev, PCI_LATENCY_TIMER, &lat);
4293 	if (lat < 16)
4294 		lat = (64 <= pcibios_max_latency) ? 64 : pcibios_max_latency;
4295 	else if (lat > pcibios_max_latency)
4296 		lat = pcibios_max_latency;
4297 	else
4298 		return;
4299 
4300 	pci_write_config_byte(dev, PCI_LATENCY_TIMER, lat);
4301 }
4302 
4303 /**
4304  * pci_set_master - enables bus-mastering for device dev
4305  * @dev: the PCI device to enable
4306  *
4307  * Enables bus-mastering on the device and calls pcibios_set_master()
4308  * to do the needed arch specific settings.
4309  */
4310 void pci_set_master(struct pci_dev *dev)
4311 {
4312 	__pci_set_master(dev, true);
4313 	pcibios_set_master(dev);
4314 }
4315 EXPORT_SYMBOL(pci_set_master);
4316 
4317 /**
4318  * pci_clear_master - disables bus-mastering for device dev
4319  * @dev: the PCI device to disable
4320  */
4321 void pci_clear_master(struct pci_dev *dev)
4322 {
4323 	__pci_set_master(dev, false);
4324 }
4325 EXPORT_SYMBOL(pci_clear_master);
4326 
4327 /**
4328  * pci_set_cacheline_size - ensure the CACHE_LINE_SIZE register is programmed
4329  * @dev: the PCI device for which MWI is to be enabled
4330  *
4331  * Helper function for pci_set_mwi.
4332  * Originally copied from drivers/net/acenic.c.
4333  * Copyright 1998-2001 by Jes Sorensen, <jes@trained-monkey.org>.
4334  *
4335  * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
4336  */
4337 int pci_set_cacheline_size(struct pci_dev *dev)
4338 {
4339 	u8 cacheline_size;
4340 
4341 	if (!pci_cache_line_size)
4342 		return -EINVAL;
4343 
4344 	/* Validate current setting: the PCI_CACHE_LINE_SIZE must be
4345 	   equal to or multiple of the right value. */
4346 	pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
4347 	if (cacheline_size >= pci_cache_line_size &&
4348 	    (cacheline_size % pci_cache_line_size) == 0)
4349 		return 0;
4350 
4351 	/* Write the correct value. */
4352 	pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, pci_cache_line_size);
4353 	/* Read it back. */
4354 	pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
4355 	if (cacheline_size == pci_cache_line_size)
4356 		return 0;
4357 
4358 	pci_dbg(dev, "cache line size of %d is not supported\n",
4359 		   pci_cache_line_size << 2);
4360 
4361 	return -EINVAL;
4362 }
4363 EXPORT_SYMBOL_GPL(pci_set_cacheline_size);
4364 
4365 /**
4366  * pci_set_mwi - enables memory-write-invalidate PCI transaction
4367  * @dev: the PCI device for which MWI is enabled
4368  *
4369  * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND.
4370  *
4371  * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
4372  */
4373 int pci_set_mwi(struct pci_dev *dev)
4374 {
4375 #ifdef PCI_DISABLE_MWI
4376 	return 0;
4377 #else
4378 	int rc;
4379 	u16 cmd;
4380 
4381 	rc = pci_set_cacheline_size(dev);
4382 	if (rc)
4383 		return rc;
4384 
4385 	pci_read_config_word(dev, PCI_COMMAND, &cmd);
4386 	if (!(cmd & PCI_COMMAND_INVALIDATE)) {
4387 		pci_dbg(dev, "enabling Mem-Wr-Inval\n");
4388 		cmd |= PCI_COMMAND_INVALIDATE;
4389 		pci_write_config_word(dev, PCI_COMMAND, cmd);
4390 	}
4391 	return 0;
4392 #endif
4393 }
4394 EXPORT_SYMBOL(pci_set_mwi);
4395 
4396 /**
4397  * pcim_set_mwi - a device-managed pci_set_mwi()
4398  * @dev: the PCI device for which MWI is enabled
4399  *
4400  * Managed pci_set_mwi().
4401  *
4402  * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
4403  */
4404 int pcim_set_mwi(struct pci_dev *dev)
4405 {
4406 	struct pci_devres *dr;
4407 
4408 	dr = find_pci_dr(dev);
4409 	if (!dr)
4410 		return -ENOMEM;
4411 
4412 	dr->mwi = 1;
4413 	return pci_set_mwi(dev);
4414 }
4415 EXPORT_SYMBOL(pcim_set_mwi);
4416 
4417 /**
4418  * pci_try_set_mwi - enables memory-write-invalidate PCI transaction
4419  * @dev: the PCI device for which MWI is enabled
4420  *
4421  * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND.
4422  * Callers are not required to check the return value.
4423  *
4424  * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
4425  */
4426 int pci_try_set_mwi(struct pci_dev *dev)
4427 {
4428 #ifdef PCI_DISABLE_MWI
4429 	return 0;
4430 #else
4431 	return pci_set_mwi(dev);
4432 #endif
4433 }
4434 EXPORT_SYMBOL(pci_try_set_mwi);
4435 
4436 /**
4437  * pci_clear_mwi - disables Memory-Write-Invalidate for device dev
4438  * @dev: the PCI device to disable
4439  *
4440  * Disables PCI Memory-Write-Invalidate transaction on the device
4441  */
4442 void pci_clear_mwi(struct pci_dev *dev)
4443 {
4444 #ifndef PCI_DISABLE_MWI
4445 	u16 cmd;
4446 
4447 	pci_read_config_word(dev, PCI_COMMAND, &cmd);
4448 	if (cmd & PCI_COMMAND_INVALIDATE) {
4449 		cmd &= ~PCI_COMMAND_INVALIDATE;
4450 		pci_write_config_word(dev, PCI_COMMAND, cmd);
4451 	}
4452 #endif
4453 }
4454 EXPORT_SYMBOL(pci_clear_mwi);
4455 
4456 /**
4457  * pci_intx - enables/disables PCI INTx for device dev
4458  * @pdev: the PCI device to operate on
4459  * @enable: boolean: whether to enable or disable PCI INTx
4460  *
4461  * Enables/disables PCI INTx for device @pdev
4462  */
4463 void pci_intx(struct pci_dev *pdev, int enable)
4464 {
4465 	u16 pci_command, new;
4466 
4467 	pci_read_config_word(pdev, PCI_COMMAND, &pci_command);
4468 
4469 	if (enable)
4470 		new = pci_command & ~PCI_COMMAND_INTX_DISABLE;
4471 	else
4472 		new = pci_command | PCI_COMMAND_INTX_DISABLE;
4473 
4474 	if (new != pci_command) {
4475 		struct pci_devres *dr;
4476 
4477 		pci_write_config_word(pdev, PCI_COMMAND, new);
4478 
4479 		dr = find_pci_dr(pdev);
4480 		if (dr && !dr->restore_intx) {
4481 			dr->restore_intx = 1;
4482 			dr->orig_intx = !enable;
4483 		}
4484 	}
4485 }
4486 EXPORT_SYMBOL_GPL(pci_intx);
4487 
4488 static bool pci_check_and_set_intx_mask(struct pci_dev *dev, bool mask)
4489 {
4490 	struct pci_bus *bus = dev->bus;
4491 	bool mask_updated = true;
4492 	u32 cmd_status_dword;
4493 	u16 origcmd, newcmd;
4494 	unsigned long flags;
4495 	bool irq_pending;
4496 
4497 	/*
4498 	 * We do a single dword read to retrieve both command and status.
4499 	 * Document assumptions that make this possible.
4500 	 */
4501 	BUILD_BUG_ON(PCI_COMMAND % 4);
4502 	BUILD_BUG_ON(PCI_COMMAND + 2 != PCI_STATUS);
4503 
4504 	raw_spin_lock_irqsave(&pci_lock, flags);
4505 
4506 	bus->ops->read(bus, dev->devfn, PCI_COMMAND, 4, &cmd_status_dword);
4507 
4508 	irq_pending = (cmd_status_dword >> 16) & PCI_STATUS_INTERRUPT;
4509 
4510 	/*
4511 	 * Check interrupt status register to see whether our device
4512 	 * triggered the interrupt (when masking) or the next IRQ is
4513 	 * already pending (when unmasking).
4514 	 */
4515 	if (mask != irq_pending) {
4516 		mask_updated = false;
4517 		goto done;
4518 	}
4519 
4520 	origcmd = cmd_status_dword;
4521 	newcmd = origcmd & ~PCI_COMMAND_INTX_DISABLE;
4522 	if (mask)
4523 		newcmd |= PCI_COMMAND_INTX_DISABLE;
4524 	if (newcmd != origcmd)
4525 		bus->ops->write(bus, dev->devfn, PCI_COMMAND, 2, newcmd);
4526 
4527 done:
4528 	raw_spin_unlock_irqrestore(&pci_lock, flags);
4529 
4530 	return mask_updated;
4531 }
4532 
4533 /**
4534  * pci_check_and_mask_intx - mask INTx on pending interrupt
4535  * @dev: the PCI device to operate on
4536  *
4537  * Check if the device dev has its INTx line asserted, mask it and return
4538  * true in that case. False is returned if no interrupt was pending.
4539  */
4540 bool pci_check_and_mask_intx(struct pci_dev *dev)
4541 {
4542 	return pci_check_and_set_intx_mask(dev, true);
4543 }
4544 EXPORT_SYMBOL_GPL(pci_check_and_mask_intx);
4545 
4546 /**
4547  * pci_check_and_unmask_intx - unmask INTx if no interrupt is pending
4548  * @dev: the PCI device to operate on
4549  *
4550  * Check if the device dev has its INTx line asserted, unmask it if not and
4551  * return true. False is returned and the mask remains active if there was
4552  * still an interrupt pending.
4553  */
4554 bool pci_check_and_unmask_intx(struct pci_dev *dev)
4555 {
4556 	return pci_check_and_set_intx_mask(dev, false);
4557 }
4558 EXPORT_SYMBOL_GPL(pci_check_and_unmask_intx);
4559 
4560 /**
4561  * pci_wait_for_pending_transaction - wait for pending transaction
4562  * @dev: the PCI device to operate on
4563  *
4564  * Return 0 if transaction is pending 1 otherwise.
4565  */
4566 int pci_wait_for_pending_transaction(struct pci_dev *dev)
4567 {
4568 	if (!pci_is_pcie(dev))
4569 		return 1;
4570 
4571 	return pci_wait_for_pending(dev, pci_pcie_cap(dev) + PCI_EXP_DEVSTA,
4572 				    PCI_EXP_DEVSTA_TRPND);
4573 }
4574 EXPORT_SYMBOL(pci_wait_for_pending_transaction);
4575 
4576 /**
4577  * pcie_has_flr - check if a device supports function level resets
4578  * @dev: device to check
4579  *
4580  * Returns true if the device advertises support for PCIe function level
4581  * resets.
4582  */
4583 bool pcie_has_flr(struct pci_dev *dev)
4584 {
4585 	u32 cap;
4586 
4587 	if (dev->dev_flags & PCI_DEV_FLAGS_NO_FLR_RESET)
4588 		return false;
4589 
4590 	pcie_capability_read_dword(dev, PCI_EXP_DEVCAP, &cap);
4591 	return cap & PCI_EXP_DEVCAP_FLR;
4592 }
4593 EXPORT_SYMBOL_GPL(pcie_has_flr);
4594 
4595 /**
4596  * pcie_flr - initiate a PCIe function level reset
4597  * @dev: device to reset
4598  *
4599  * Initiate a function level reset on @dev.  The caller should ensure the
4600  * device supports FLR before calling this function, e.g. by using the
4601  * pcie_has_flr() helper.
4602  */
4603 int pcie_flr(struct pci_dev *dev)
4604 {
4605 	if (!pci_wait_for_pending_transaction(dev))
4606 		pci_err(dev, "timed out waiting for pending transaction; performing function level reset anyway\n");
4607 
4608 	pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_BCR_FLR);
4609 
4610 	if (dev->imm_ready)
4611 		return 0;
4612 
4613 	/*
4614 	 * Per PCIe r4.0, sec 6.6.2, a device must complete an FLR within
4615 	 * 100ms, but may silently discard requests while the FLR is in
4616 	 * progress.  Wait 100ms before trying to access the device.
4617 	 */
4618 	msleep(100);
4619 
4620 	return pci_dev_wait(dev, "FLR", PCIE_RESET_READY_POLL_MS);
4621 }
4622 EXPORT_SYMBOL_GPL(pcie_flr);
4623 
4624 static int pci_af_flr(struct pci_dev *dev, int probe)
4625 {
4626 	int pos;
4627 	u8 cap;
4628 
4629 	pos = pci_find_capability(dev, PCI_CAP_ID_AF);
4630 	if (!pos)
4631 		return -ENOTTY;
4632 
4633 	if (dev->dev_flags & PCI_DEV_FLAGS_NO_FLR_RESET)
4634 		return -ENOTTY;
4635 
4636 	pci_read_config_byte(dev, pos + PCI_AF_CAP, &cap);
4637 	if (!(cap & PCI_AF_CAP_TP) || !(cap & PCI_AF_CAP_FLR))
4638 		return -ENOTTY;
4639 
4640 	if (probe)
4641 		return 0;
4642 
4643 	/*
4644 	 * Wait for Transaction Pending bit to clear.  A word-aligned test
4645 	 * is used, so we use the control offset rather than status and shift
4646 	 * the test bit to match.
4647 	 */
4648 	if (!pci_wait_for_pending(dev, pos + PCI_AF_CTRL,
4649 				 PCI_AF_STATUS_TP << 8))
4650 		pci_err(dev, "timed out waiting for pending transaction; performing AF function level reset anyway\n");
4651 
4652 	pci_write_config_byte(dev, pos + PCI_AF_CTRL, PCI_AF_CTRL_FLR);
4653 
4654 	if (dev->imm_ready)
4655 		return 0;
4656 
4657 	/*
4658 	 * Per Advanced Capabilities for Conventional PCI ECN, 13 April 2006,
4659 	 * updated 27 July 2006; a device must complete an FLR within
4660 	 * 100ms, but may silently discard requests while the FLR is in
4661 	 * progress.  Wait 100ms before trying to access the device.
4662 	 */
4663 	msleep(100);
4664 
4665 	return pci_dev_wait(dev, "AF_FLR", PCIE_RESET_READY_POLL_MS);
4666 }
4667 
4668 /**
4669  * pci_pm_reset - Put device into PCI_D3 and back into PCI_D0.
4670  * @dev: Device to reset.
4671  * @probe: If set, only check if the device can be reset this way.
4672  *
4673  * If @dev supports native PCI PM and its PCI_PM_CTRL_NO_SOFT_RESET flag is
4674  * unset, it will be reinitialized internally when going from PCI_D3hot to
4675  * PCI_D0.  If that's the case and the device is not in a low-power state
4676  * already, force it into PCI_D3hot and back to PCI_D0, causing it to be reset.
4677  *
4678  * NOTE: This causes the caller to sleep for twice the device power transition
4679  * cooldown period, which for the D0->D3hot and D3hot->D0 transitions is 10 ms
4680  * by default (i.e. unless the @dev's d3hot_delay field has a different value).
4681  * Moreover, only devices in D0 can be reset by this function.
4682  */
4683 static int pci_pm_reset(struct pci_dev *dev, int probe)
4684 {
4685 	u16 csr;
4686 
4687 	if (!dev->pm_cap || dev->dev_flags & PCI_DEV_FLAGS_NO_PM_RESET)
4688 		return -ENOTTY;
4689 
4690 	pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &csr);
4691 	if (csr & PCI_PM_CTRL_NO_SOFT_RESET)
4692 		return -ENOTTY;
4693 
4694 	if (probe)
4695 		return 0;
4696 
4697 	if (dev->current_state != PCI_D0)
4698 		return -EINVAL;
4699 
4700 	csr &= ~PCI_PM_CTRL_STATE_MASK;
4701 	csr |= PCI_D3hot;
4702 	pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
4703 	pci_dev_d3_sleep(dev);
4704 
4705 	csr &= ~PCI_PM_CTRL_STATE_MASK;
4706 	csr |= PCI_D0;
4707 	pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
4708 	pci_dev_d3_sleep(dev);
4709 
4710 	return pci_dev_wait(dev, "PM D3hot->D0", PCIE_RESET_READY_POLL_MS);
4711 }
4712 
4713 /**
4714  * pcie_wait_for_link_delay - Wait until link is active or inactive
4715  * @pdev: Bridge device
4716  * @active: waiting for active or inactive?
4717  * @delay: Delay to wait after link has become active (in ms)
4718  *
4719  * Use this to wait till link becomes active or inactive.
4720  */
4721 static bool pcie_wait_for_link_delay(struct pci_dev *pdev, bool active,
4722 				     int delay)
4723 {
4724 	int timeout = 1000;
4725 	bool ret;
4726 	u16 lnk_status;
4727 
4728 	/*
4729 	 * Some controllers might not implement link active reporting. In this
4730 	 * case, we wait for 1000 ms + any delay requested by the caller.
4731 	 */
4732 	if (!pdev->link_active_reporting) {
4733 		msleep(timeout + delay);
4734 		return true;
4735 	}
4736 
4737 	/*
4738 	 * PCIe r4.0 sec 6.6.1, a component must enter LTSSM Detect within 20ms,
4739 	 * after which we should expect an link active if the reset was
4740 	 * successful. If so, software must wait a minimum 100ms before sending
4741 	 * configuration requests to devices downstream this port.
4742 	 *
4743 	 * If the link fails to activate, either the device was physically
4744 	 * removed or the link is permanently failed.
4745 	 */
4746 	if (active)
4747 		msleep(20);
4748 	for (;;) {
4749 		pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lnk_status);
4750 		ret = !!(lnk_status & PCI_EXP_LNKSTA_DLLLA);
4751 		if (ret == active)
4752 			break;
4753 		if (timeout <= 0)
4754 			break;
4755 		msleep(10);
4756 		timeout -= 10;
4757 	}
4758 	if (active && ret)
4759 		msleep(delay);
4760 
4761 	return ret == active;
4762 }
4763 
4764 /**
4765  * pcie_wait_for_link - Wait until link is active or inactive
4766  * @pdev: Bridge device
4767  * @active: waiting for active or inactive?
4768  *
4769  * Use this to wait till link becomes active or inactive.
4770  */
4771 bool pcie_wait_for_link(struct pci_dev *pdev, bool active)
4772 {
4773 	return pcie_wait_for_link_delay(pdev, active, 100);
4774 }
4775 
4776 /*
4777  * Find maximum D3cold delay required by all the devices on the bus.  The
4778  * spec says 100 ms, but firmware can lower it and we allow drivers to
4779  * increase it as well.
4780  *
4781  * Called with @pci_bus_sem locked for reading.
4782  */
4783 static int pci_bus_max_d3cold_delay(const struct pci_bus *bus)
4784 {
4785 	const struct pci_dev *pdev;
4786 	int min_delay = 100;
4787 	int max_delay = 0;
4788 
4789 	list_for_each_entry(pdev, &bus->devices, bus_list) {
4790 		if (pdev->d3cold_delay < min_delay)
4791 			min_delay = pdev->d3cold_delay;
4792 		if (pdev->d3cold_delay > max_delay)
4793 			max_delay = pdev->d3cold_delay;
4794 	}
4795 
4796 	return max(min_delay, max_delay);
4797 }
4798 
4799 /**
4800  * pci_bridge_wait_for_secondary_bus - Wait for secondary bus to be accessible
4801  * @dev: PCI bridge
4802  *
4803  * Handle necessary delays before access to the devices on the secondary
4804  * side of the bridge are permitted after D3cold to D0 transition.
4805  *
4806  * For PCIe this means the delays in PCIe 5.0 section 6.6.1. For
4807  * conventional PCI it means Tpvrh + Trhfa specified in PCI 3.0 section
4808  * 4.3.2.
4809  */
4810 void pci_bridge_wait_for_secondary_bus(struct pci_dev *dev)
4811 {
4812 	struct pci_dev *child;
4813 	int delay;
4814 
4815 	if (pci_dev_is_disconnected(dev))
4816 		return;
4817 
4818 	if (!pci_is_bridge(dev) || !dev->bridge_d3)
4819 		return;
4820 
4821 	down_read(&pci_bus_sem);
4822 
4823 	/*
4824 	 * We only deal with devices that are present currently on the bus.
4825 	 * For any hot-added devices the access delay is handled in pciehp
4826 	 * board_added(). In case of ACPI hotplug the firmware is expected
4827 	 * to configure the devices before OS is notified.
4828 	 */
4829 	if (!dev->subordinate || list_empty(&dev->subordinate->devices)) {
4830 		up_read(&pci_bus_sem);
4831 		return;
4832 	}
4833 
4834 	/* Take d3cold_delay requirements into account */
4835 	delay = pci_bus_max_d3cold_delay(dev->subordinate);
4836 	if (!delay) {
4837 		up_read(&pci_bus_sem);
4838 		return;
4839 	}
4840 
4841 	child = list_first_entry(&dev->subordinate->devices, struct pci_dev,
4842 				 bus_list);
4843 	up_read(&pci_bus_sem);
4844 
4845 	/*
4846 	 * Conventional PCI and PCI-X we need to wait Tpvrh + Trhfa before
4847 	 * accessing the device after reset (that is 1000 ms + 100 ms). In
4848 	 * practice this should not be needed because we don't do power
4849 	 * management for them (see pci_bridge_d3_possible()).
4850 	 */
4851 	if (!pci_is_pcie(dev)) {
4852 		pci_dbg(dev, "waiting %d ms for secondary bus\n", 1000 + delay);
4853 		msleep(1000 + delay);
4854 		return;
4855 	}
4856 
4857 	/*
4858 	 * For PCIe downstream and root ports that do not support speeds
4859 	 * greater than 5 GT/s need to wait minimum 100 ms. For higher
4860 	 * speeds (gen3) we need to wait first for the data link layer to
4861 	 * become active.
4862 	 *
4863 	 * However, 100 ms is the minimum and the PCIe spec says the
4864 	 * software must allow at least 1s before it can determine that the
4865 	 * device that did not respond is a broken device. There is
4866 	 * evidence that 100 ms is not always enough, for example certain
4867 	 * Titan Ridge xHCI controller does not always respond to
4868 	 * configuration requests if we only wait for 100 ms (see
4869 	 * https://bugzilla.kernel.org/show_bug.cgi?id=203885).
4870 	 *
4871 	 * Therefore we wait for 100 ms and check for the device presence.
4872 	 * If it is still not present give it an additional 100 ms.
4873 	 */
4874 	if (!pcie_downstream_port(dev))
4875 		return;
4876 
4877 	if (pcie_get_speed_cap(dev) <= PCIE_SPEED_5_0GT) {
4878 		pci_dbg(dev, "waiting %d ms for downstream link\n", delay);
4879 		msleep(delay);
4880 	} else {
4881 		pci_dbg(dev, "waiting %d ms for downstream link, after activation\n",
4882 			delay);
4883 		if (!pcie_wait_for_link_delay(dev, true, delay)) {
4884 			/* Did not train, no need to wait any further */
4885 			pci_info(dev, "Data Link Layer Link Active not set in 1000 msec\n");
4886 			return;
4887 		}
4888 	}
4889 
4890 	if (!pci_device_is_present(child)) {
4891 		pci_dbg(child, "waiting additional %d ms to become accessible\n", delay);
4892 		msleep(delay);
4893 	}
4894 }
4895 
4896 void pci_reset_secondary_bus(struct pci_dev *dev)
4897 {
4898 	u16 ctrl;
4899 
4900 	pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &ctrl);
4901 	ctrl |= PCI_BRIDGE_CTL_BUS_RESET;
4902 	pci_write_config_word(dev, PCI_BRIDGE_CONTROL, ctrl);
4903 
4904 	/*
4905 	 * PCI spec v3.0 7.6.4.2 requires minimum Trst of 1ms.  Double
4906 	 * this to 2ms to ensure that we meet the minimum requirement.
4907 	 */
4908 	msleep(2);
4909 
4910 	ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET;
4911 	pci_write_config_word(dev, PCI_BRIDGE_CONTROL, ctrl);
4912 
4913 	/*
4914 	 * Trhfa for conventional PCI is 2^25 clock cycles.
4915 	 * Assuming a minimum 33MHz clock this results in a 1s
4916 	 * delay before we can consider subordinate devices to
4917 	 * be re-initialized.  PCIe has some ways to shorten this,
4918 	 * but we don't make use of them yet.
4919 	 */
4920 	ssleep(1);
4921 }
4922 
4923 void __weak pcibios_reset_secondary_bus(struct pci_dev *dev)
4924 {
4925 	pci_reset_secondary_bus(dev);
4926 }
4927 
4928 /**
4929  * pci_bridge_secondary_bus_reset - Reset the secondary bus on a PCI bridge.
4930  * @dev: Bridge device
4931  *
4932  * Use the bridge control register to assert reset on the secondary bus.
4933  * Devices on the secondary bus are left in power-on state.
4934  */
4935 int pci_bridge_secondary_bus_reset(struct pci_dev *dev)
4936 {
4937 	pcibios_reset_secondary_bus(dev);
4938 
4939 	return pci_dev_wait(dev, "bus reset", PCIE_RESET_READY_POLL_MS);
4940 }
4941 EXPORT_SYMBOL_GPL(pci_bridge_secondary_bus_reset);
4942 
4943 static int pci_parent_bus_reset(struct pci_dev *dev, int probe)
4944 {
4945 	struct pci_dev *pdev;
4946 
4947 	if (pci_is_root_bus(dev->bus) || dev->subordinate ||
4948 	    !dev->bus->self || dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET)
4949 		return -ENOTTY;
4950 
4951 	list_for_each_entry(pdev, &dev->bus->devices, bus_list)
4952 		if (pdev != dev)
4953 			return -ENOTTY;
4954 
4955 	if (probe)
4956 		return 0;
4957 
4958 	return pci_bridge_secondary_bus_reset(dev->bus->self);
4959 }
4960 
4961 static int pci_reset_hotplug_slot(struct hotplug_slot *hotplug, int probe)
4962 {
4963 	int rc = -ENOTTY;
4964 
4965 	if (!hotplug || !try_module_get(hotplug->owner))
4966 		return rc;
4967 
4968 	if (hotplug->ops->reset_slot)
4969 		rc = hotplug->ops->reset_slot(hotplug, probe);
4970 
4971 	module_put(hotplug->owner);
4972 
4973 	return rc;
4974 }
4975 
4976 static int pci_dev_reset_slot_function(struct pci_dev *dev, int probe)
4977 {
4978 	if (dev->multifunction || dev->subordinate || !dev->slot ||
4979 	    dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET)
4980 		return -ENOTTY;
4981 
4982 	return pci_reset_hotplug_slot(dev->slot->hotplug, probe);
4983 }
4984 
4985 static void pci_dev_lock(struct pci_dev *dev)
4986 {
4987 	pci_cfg_access_lock(dev);
4988 	/* block PM suspend, driver probe, etc. */
4989 	device_lock(&dev->dev);
4990 }
4991 
4992 /* Return 1 on successful lock, 0 on contention */
4993 static int pci_dev_trylock(struct pci_dev *dev)
4994 {
4995 	if (pci_cfg_access_trylock(dev)) {
4996 		if (device_trylock(&dev->dev))
4997 			return 1;
4998 		pci_cfg_access_unlock(dev);
4999 	}
5000 
5001 	return 0;
5002 }
5003 
5004 static void pci_dev_unlock(struct pci_dev *dev)
5005 {
5006 	device_unlock(&dev->dev);
5007 	pci_cfg_access_unlock(dev);
5008 }
5009 
5010 static void pci_dev_save_and_disable(struct pci_dev *dev)
5011 {
5012 	const struct pci_error_handlers *err_handler =
5013 			dev->driver ? dev->driver->err_handler : NULL;
5014 
5015 	/*
5016 	 * dev->driver->err_handler->reset_prepare() is protected against
5017 	 * races with ->remove() by the device lock, which must be held by
5018 	 * the caller.
5019 	 */
5020 	if (err_handler && err_handler->reset_prepare)
5021 		err_handler->reset_prepare(dev);
5022 
5023 	/*
5024 	 * Wake-up device prior to save.  PM registers default to D0 after
5025 	 * reset and a simple register restore doesn't reliably return
5026 	 * to a non-D0 state anyway.
5027 	 */
5028 	pci_set_power_state(dev, PCI_D0);
5029 
5030 	pci_save_state(dev);
5031 	/*
5032 	 * Disable the device by clearing the Command register, except for
5033 	 * INTx-disable which is set.  This not only disables MMIO and I/O port
5034 	 * BARs, but also prevents the device from being Bus Master, preventing
5035 	 * DMA from the device including MSI/MSI-X interrupts.  For PCI 2.3
5036 	 * compliant devices, INTx-disable prevents legacy interrupts.
5037 	 */
5038 	pci_write_config_word(dev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE);
5039 }
5040 
5041 static void pci_dev_restore(struct pci_dev *dev)
5042 {
5043 	const struct pci_error_handlers *err_handler =
5044 			dev->driver ? dev->driver->err_handler : NULL;
5045 
5046 	pci_restore_state(dev);
5047 
5048 	/*
5049 	 * dev->driver->err_handler->reset_done() is protected against
5050 	 * races with ->remove() by the device lock, which must be held by
5051 	 * the caller.
5052 	 */
5053 	if (err_handler && err_handler->reset_done)
5054 		err_handler->reset_done(dev);
5055 }
5056 
5057 /**
5058  * __pci_reset_function_locked - reset a PCI device function while holding
5059  * the @dev mutex lock.
5060  * @dev: PCI device to reset
5061  *
5062  * Some devices allow an individual function to be reset without affecting
5063  * other functions in the same device.  The PCI device must be responsive
5064  * to PCI config space in order to use this function.
5065  *
5066  * The device function is presumed to be unused and the caller is holding
5067  * the device mutex lock when this function is called.
5068  *
5069  * Resetting the device will make the contents of PCI configuration space
5070  * random, so any caller of this must be prepared to reinitialise the
5071  * device including MSI, bus mastering, BARs, decoding IO and memory spaces,
5072  * etc.
5073  *
5074  * Returns 0 if the device function was successfully reset or negative if the
5075  * device doesn't support resetting a single function.
5076  */
5077 int __pci_reset_function_locked(struct pci_dev *dev)
5078 {
5079 	int rc;
5080 
5081 	might_sleep();
5082 
5083 	/*
5084 	 * A reset method returns -ENOTTY if it doesn't support this device
5085 	 * and we should try the next method.
5086 	 *
5087 	 * If it returns 0 (success), we're finished.  If it returns any
5088 	 * other error, we're also finished: this indicates that further
5089 	 * reset mechanisms might be broken on the device.
5090 	 */
5091 	rc = pci_dev_specific_reset(dev, 0);
5092 	if (rc != -ENOTTY)
5093 		return rc;
5094 	if (pcie_has_flr(dev)) {
5095 		rc = pcie_flr(dev);
5096 		if (rc != -ENOTTY)
5097 			return rc;
5098 	}
5099 	rc = pci_af_flr(dev, 0);
5100 	if (rc != -ENOTTY)
5101 		return rc;
5102 	rc = pci_pm_reset(dev, 0);
5103 	if (rc != -ENOTTY)
5104 		return rc;
5105 	rc = pci_dev_reset_slot_function(dev, 0);
5106 	if (rc != -ENOTTY)
5107 		return rc;
5108 	return pci_parent_bus_reset(dev, 0);
5109 }
5110 EXPORT_SYMBOL_GPL(__pci_reset_function_locked);
5111 
5112 /**
5113  * pci_probe_reset_function - check whether the device can be safely reset
5114  * @dev: PCI device to reset
5115  *
5116  * Some devices allow an individual function to be reset without affecting
5117  * other functions in the same device.  The PCI device must be responsive
5118  * to PCI config space in order to use this function.
5119  *
5120  * Returns 0 if the device function can be reset or negative if the
5121  * device doesn't support resetting a single function.
5122  */
5123 int pci_probe_reset_function(struct pci_dev *dev)
5124 {
5125 	int rc;
5126 
5127 	might_sleep();
5128 
5129 	rc = pci_dev_specific_reset(dev, 1);
5130 	if (rc != -ENOTTY)
5131 		return rc;
5132 	if (pcie_has_flr(dev))
5133 		return 0;
5134 	rc = pci_af_flr(dev, 1);
5135 	if (rc != -ENOTTY)
5136 		return rc;
5137 	rc = pci_pm_reset(dev, 1);
5138 	if (rc != -ENOTTY)
5139 		return rc;
5140 	rc = pci_dev_reset_slot_function(dev, 1);
5141 	if (rc != -ENOTTY)
5142 		return rc;
5143 
5144 	return pci_parent_bus_reset(dev, 1);
5145 }
5146 
5147 /**
5148  * pci_reset_function - quiesce and reset a PCI device function
5149  * @dev: PCI device to reset
5150  *
5151  * Some devices allow an individual function to be reset without affecting
5152  * other functions in the same device.  The PCI device must be responsive
5153  * to PCI config space in order to use this function.
5154  *
5155  * This function does not just reset the PCI portion of a device, but
5156  * clears all the state associated with the device.  This function differs
5157  * from __pci_reset_function_locked() in that it saves and restores device state
5158  * over the reset and takes the PCI device lock.
5159  *
5160  * Returns 0 if the device function was successfully reset or negative if the
5161  * device doesn't support resetting a single function.
5162  */
5163 int pci_reset_function(struct pci_dev *dev)
5164 {
5165 	int rc;
5166 
5167 	if (!dev->reset_fn)
5168 		return -ENOTTY;
5169 
5170 	pci_dev_lock(dev);
5171 	pci_dev_save_and_disable(dev);
5172 
5173 	rc = __pci_reset_function_locked(dev);
5174 
5175 	pci_dev_restore(dev);
5176 	pci_dev_unlock(dev);
5177 
5178 	return rc;
5179 }
5180 EXPORT_SYMBOL_GPL(pci_reset_function);
5181 
5182 /**
5183  * pci_reset_function_locked - quiesce and reset a PCI device function
5184  * @dev: PCI device to reset
5185  *
5186  * Some devices allow an individual function to be reset without affecting
5187  * other functions in the same device.  The PCI device must be responsive
5188  * to PCI config space in order to use this function.
5189  *
5190  * This function does not just reset the PCI portion of a device, but
5191  * clears all the state associated with the device.  This function differs
5192  * from __pci_reset_function_locked() in that it saves and restores device state
5193  * over the reset.  It also differs from pci_reset_function() in that it
5194  * requires the PCI device lock to be held.
5195  *
5196  * Returns 0 if the device function was successfully reset or negative if the
5197  * device doesn't support resetting a single function.
5198  */
5199 int pci_reset_function_locked(struct pci_dev *dev)
5200 {
5201 	int rc;
5202 
5203 	if (!dev->reset_fn)
5204 		return -ENOTTY;
5205 
5206 	pci_dev_save_and_disable(dev);
5207 
5208 	rc = __pci_reset_function_locked(dev);
5209 
5210 	pci_dev_restore(dev);
5211 
5212 	return rc;
5213 }
5214 EXPORT_SYMBOL_GPL(pci_reset_function_locked);
5215 
5216 /**
5217  * pci_try_reset_function - quiesce and reset a PCI device function
5218  * @dev: PCI device to reset
5219  *
5220  * Same as above, except return -EAGAIN if unable to lock device.
5221  */
5222 int pci_try_reset_function(struct pci_dev *dev)
5223 {
5224 	int rc;
5225 
5226 	if (!dev->reset_fn)
5227 		return -ENOTTY;
5228 
5229 	if (!pci_dev_trylock(dev))
5230 		return -EAGAIN;
5231 
5232 	pci_dev_save_and_disable(dev);
5233 	rc = __pci_reset_function_locked(dev);
5234 	pci_dev_restore(dev);
5235 	pci_dev_unlock(dev);
5236 
5237 	return rc;
5238 }
5239 EXPORT_SYMBOL_GPL(pci_try_reset_function);
5240 
5241 /* Do any devices on or below this bus prevent a bus reset? */
5242 static bool pci_bus_resetable(struct pci_bus *bus)
5243 {
5244 	struct pci_dev *dev;
5245 
5246 
5247 	if (bus->self && (bus->self->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET))
5248 		return false;
5249 
5250 	list_for_each_entry(dev, &bus->devices, bus_list) {
5251 		if (dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET ||
5252 		    (dev->subordinate && !pci_bus_resetable(dev->subordinate)))
5253 			return false;
5254 	}
5255 
5256 	return true;
5257 }
5258 
5259 /* Lock devices from the top of the tree down */
5260 static void pci_bus_lock(struct pci_bus *bus)
5261 {
5262 	struct pci_dev *dev;
5263 
5264 	list_for_each_entry(dev, &bus->devices, bus_list) {
5265 		pci_dev_lock(dev);
5266 		if (dev->subordinate)
5267 			pci_bus_lock(dev->subordinate);
5268 	}
5269 }
5270 
5271 /* Unlock devices from the bottom of the tree up */
5272 static void pci_bus_unlock(struct pci_bus *bus)
5273 {
5274 	struct pci_dev *dev;
5275 
5276 	list_for_each_entry(dev, &bus->devices, bus_list) {
5277 		if (dev->subordinate)
5278 			pci_bus_unlock(dev->subordinate);
5279 		pci_dev_unlock(dev);
5280 	}
5281 }
5282 
5283 /* Return 1 on successful lock, 0 on contention */
5284 static int pci_bus_trylock(struct pci_bus *bus)
5285 {
5286 	struct pci_dev *dev;
5287 
5288 	list_for_each_entry(dev, &bus->devices, bus_list) {
5289 		if (!pci_dev_trylock(dev))
5290 			goto unlock;
5291 		if (dev->subordinate) {
5292 			if (!pci_bus_trylock(dev->subordinate)) {
5293 				pci_dev_unlock(dev);
5294 				goto unlock;
5295 			}
5296 		}
5297 	}
5298 	return 1;
5299 
5300 unlock:
5301 	list_for_each_entry_continue_reverse(dev, &bus->devices, bus_list) {
5302 		if (dev->subordinate)
5303 			pci_bus_unlock(dev->subordinate);
5304 		pci_dev_unlock(dev);
5305 	}
5306 	return 0;
5307 }
5308 
5309 /* Do any devices on or below this slot prevent a bus reset? */
5310 static bool pci_slot_resetable(struct pci_slot *slot)
5311 {
5312 	struct pci_dev *dev;
5313 
5314 	if (slot->bus->self &&
5315 	    (slot->bus->self->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET))
5316 		return false;
5317 
5318 	list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5319 		if (!dev->slot || dev->slot != slot)
5320 			continue;
5321 		if (dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET ||
5322 		    (dev->subordinate && !pci_bus_resetable(dev->subordinate)))
5323 			return false;
5324 	}
5325 
5326 	return true;
5327 }
5328 
5329 /* Lock devices from the top of the tree down */
5330 static void pci_slot_lock(struct pci_slot *slot)
5331 {
5332 	struct pci_dev *dev;
5333 
5334 	list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5335 		if (!dev->slot || dev->slot != slot)
5336 			continue;
5337 		pci_dev_lock(dev);
5338 		if (dev->subordinate)
5339 			pci_bus_lock(dev->subordinate);
5340 	}
5341 }
5342 
5343 /* Unlock devices from the bottom of the tree up */
5344 static void pci_slot_unlock(struct pci_slot *slot)
5345 {
5346 	struct pci_dev *dev;
5347 
5348 	list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5349 		if (!dev->slot || dev->slot != slot)
5350 			continue;
5351 		if (dev->subordinate)
5352 			pci_bus_unlock(dev->subordinate);
5353 		pci_dev_unlock(dev);
5354 	}
5355 }
5356 
5357 /* Return 1 on successful lock, 0 on contention */
5358 static int pci_slot_trylock(struct pci_slot *slot)
5359 {
5360 	struct pci_dev *dev;
5361 
5362 	list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5363 		if (!dev->slot || dev->slot != slot)
5364 			continue;
5365 		if (!pci_dev_trylock(dev))
5366 			goto unlock;
5367 		if (dev->subordinate) {
5368 			if (!pci_bus_trylock(dev->subordinate)) {
5369 				pci_dev_unlock(dev);
5370 				goto unlock;
5371 			}
5372 		}
5373 	}
5374 	return 1;
5375 
5376 unlock:
5377 	list_for_each_entry_continue_reverse(dev,
5378 					     &slot->bus->devices, bus_list) {
5379 		if (!dev->slot || dev->slot != slot)
5380 			continue;
5381 		if (dev->subordinate)
5382 			pci_bus_unlock(dev->subordinate);
5383 		pci_dev_unlock(dev);
5384 	}
5385 	return 0;
5386 }
5387 
5388 /*
5389  * Save and disable devices from the top of the tree down while holding
5390  * the @dev mutex lock for the entire tree.
5391  */
5392 static void pci_bus_save_and_disable_locked(struct pci_bus *bus)
5393 {
5394 	struct pci_dev *dev;
5395 
5396 	list_for_each_entry(dev, &bus->devices, bus_list) {
5397 		pci_dev_save_and_disable(dev);
5398 		if (dev->subordinate)
5399 			pci_bus_save_and_disable_locked(dev->subordinate);
5400 	}
5401 }
5402 
5403 /*
5404  * Restore devices from top of the tree down while holding @dev mutex lock
5405  * for the entire tree.  Parent bridges need to be restored before we can
5406  * get to subordinate devices.
5407  */
5408 static void pci_bus_restore_locked(struct pci_bus *bus)
5409 {
5410 	struct pci_dev *dev;
5411 
5412 	list_for_each_entry(dev, &bus->devices, bus_list) {
5413 		pci_dev_restore(dev);
5414 		if (dev->subordinate)
5415 			pci_bus_restore_locked(dev->subordinate);
5416 	}
5417 }
5418 
5419 /*
5420  * Save and disable devices from the top of the tree down while holding
5421  * the @dev mutex lock for the entire tree.
5422  */
5423 static void pci_slot_save_and_disable_locked(struct pci_slot *slot)
5424 {
5425 	struct pci_dev *dev;
5426 
5427 	list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5428 		if (!dev->slot || dev->slot != slot)
5429 			continue;
5430 		pci_dev_save_and_disable(dev);
5431 		if (dev->subordinate)
5432 			pci_bus_save_and_disable_locked(dev->subordinate);
5433 	}
5434 }
5435 
5436 /*
5437  * Restore devices from top of the tree down while holding @dev mutex lock
5438  * for the entire tree.  Parent bridges need to be restored before we can
5439  * get to subordinate devices.
5440  */
5441 static void pci_slot_restore_locked(struct pci_slot *slot)
5442 {
5443 	struct pci_dev *dev;
5444 
5445 	list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5446 		if (!dev->slot || dev->slot != slot)
5447 			continue;
5448 		pci_dev_restore(dev);
5449 		if (dev->subordinate)
5450 			pci_bus_restore_locked(dev->subordinate);
5451 	}
5452 }
5453 
5454 static int pci_slot_reset(struct pci_slot *slot, int probe)
5455 {
5456 	int rc;
5457 
5458 	if (!slot || !pci_slot_resetable(slot))
5459 		return -ENOTTY;
5460 
5461 	if (!probe)
5462 		pci_slot_lock(slot);
5463 
5464 	might_sleep();
5465 
5466 	rc = pci_reset_hotplug_slot(slot->hotplug, probe);
5467 
5468 	if (!probe)
5469 		pci_slot_unlock(slot);
5470 
5471 	return rc;
5472 }
5473 
5474 /**
5475  * pci_probe_reset_slot - probe whether a PCI slot can be reset
5476  * @slot: PCI slot to probe
5477  *
5478  * Return 0 if slot can be reset, negative if a slot reset is not supported.
5479  */
5480 int pci_probe_reset_slot(struct pci_slot *slot)
5481 {
5482 	return pci_slot_reset(slot, 1);
5483 }
5484 EXPORT_SYMBOL_GPL(pci_probe_reset_slot);
5485 
5486 /**
5487  * __pci_reset_slot - Try to reset a PCI slot
5488  * @slot: PCI slot to reset
5489  *
5490  * A PCI bus may host multiple slots, each slot may support a reset mechanism
5491  * independent of other slots.  For instance, some slots may support slot power
5492  * control.  In the case of a 1:1 bus to slot architecture, this function may
5493  * wrap the bus reset to avoid spurious slot related events such as hotplug.
5494  * Generally a slot reset should be attempted before a bus reset.  All of the
5495  * function of the slot and any subordinate buses behind the slot are reset
5496  * through this function.  PCI config space of all devices in the slot and
5497  * behind the slot is saved before and restored after reset.
5498  *
5499  * Same as above except return -EAGAIN if the slot cannot be locked
5500  */
5501 static int __pci_reset_slot(struct pci_slot *slot)
5502 {
5503 	int rc;
5504 
5505 	rc = pci_slot_reset(slot, 1);
5506 	if (rc)
5507 		return rc;
5508 
5509 	if (pci_slot_trylock(slot)) {
5510 		pci_slot_save_and_disable_locked(slot);
5511 		might_sleep();
5512 		rc = pci_reset_hotplug_slot(slot->hotplug, 0);
5513 		pci_slot_restore_locked(slot);
5514 		pci_slot_unlock(slot);
5515 	} else
5516 		rc = -EAGAIN;
5517 
5518 	return rc;
5519 }
5520 
5521 static int pci_bus_reset(struct pci_bus *bus, int probe)
5522 {
5523 	int ret;
5524 
5525 	if (!bus->self || !pci_bus_resetable(bus))
5526 		return -ENOTTY;
5527 
5528 	if (probe)
5529 		return 0;
5530 
5531 	pci_bus_lock(bus);
5532 
5533 	might_sleep();
5534 
5535 	ret = pci_bridge_secondary_bus_reset(bus->self);
5536 
5537 	pci_bus_unlock(bus);
5538 
5539 	return ret;
5540 }
5541 
5542 /**
5543  * pci_bus_error_reset - reset the bridge's subordinate bus
5544  * @bridge: The parent device that connects to the bus to reset
5545  *
5546  * This function will first try to reset the slots on this bus if the method is
5547  * available. If slot reset fails or is not available, this will fall back to a
5548  * secondary bus reset.
5549  */
5550 int pci_bus_error_reset(struct pci_dev *bridge)
5551 {
5552 	struct pci_bus *bus = bridge->subordinate;
5553 	struct pci_slot *slot;
5554 
5555 	if (!bus)
5556 		return -ENOTTY;
5557 
5558 	mutex_lock(&pci_slot_mutex);
5559 	if (list_empty(&bus->slots))
5560 		goto bus_reset;
5561 
5562 	list_for_each_entry(slot, &bus->slots, list)
5563 		if (pci_probe_reset_slot(slot))
5564 			goto bus_reset;
5565 
5566 	list_for_each_entry(slot, &bus->slots, list)
5567 		if (pci_slot_reset(slot, 0))
5568 			goto bus_reset;
5569 
5570 	mutex_unlock(&pci_slot_mutex);
5571 	return 0;
5572 bus_reset:
5573 	mutex_unlock(&pci_slot_mutex);
5574 	return pci_bus_reset(bridge->subordinate, 0);
5575 }
5576 
5577 /**
5578  * pci_probe_reset_bus - probe whether a PCI bus can be reset
5579  * @bus: PCI bus to probe
5580  *
5581  * Return 0 if bus can be reset, negative if a bus reset is not supported.
5582  */
5583 int pci_probe_reset_bus(struct pci_bus *bus)
5584 {
5585 	return pci_bus_reset(bus, 1);
5586 }
5587 EXPORT_SYMBOL_GPL(pci_probe_reset_bus);
5588 
5589 /**
5590  * __pci_reset_bus - Try to reset a PCI bus
5591  * @bus: top level PCI bus to reset
5592  *
5593  * Same as above except return -EAGAIN if the bus cannot be locked
5594  */
5595 static int __pci_reset_bus(struct pci_bus *bus)
5596 {
5597 	int rc;
5598 
5599 	rc = pci_bus_reset(bus, 1);
5600 	if (rc)
5601 		return rc;
5602 
5603 	if (pci_bus_trylock(bus)) {
5604 		pci_bus_save_and_disable_locked(bus);
5605 		might_sleep();
5606 		rc = pci_bridge_secondary_bus_reset(bus->self);
5607 		pci_bus_restore_locked(bus);
5608 		pci_bus_unlock(bus);
5609 	} else
5610 		rc = -EAGAIN;
5611 
5612 	return rc;
5613 }
5614 
5615 /**
5616  * pci_reset_bus - Try to reset a PCI bus
5617  * @pdev: top level PCI device to reset via slot/bus
5618  *
5619  * Same as above except return -EAGAIN if the bus cannot be locked
5620  */
5621 int pci_reset_bus(struct pci_dev *pdev)
5622 {
5623 	return (!pci_probe_reset_slot(pdev->slot)) ?
5624 	    __pci_reset_slot(pdev->slot) : __pci_reset_bus(pdev->bus);
5625 }
5626 EXPORT_SYMBOL_GPL(pci_reset_bus);
5627 
5628 /**
5629  * pcix_get_max_mmrbc - get PCI-X maximum designed memory read byte count
5630  * @dev: PCI device to query
5631  *
5632  * Returns mmrbc: maximum designed memory read count in bytes or
5633  * appropriate error value.
5634  */
5635 int pcix_get_max_mmrbc(struct pci_dev *dev)
5636 {
5637 	int cap;
5638 	u32 stat;
5639 
5640 	cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
5641 	if (!cap)
5642 		return -EINVAL;
5643 
5644 	if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat))
5645 		return -EINVAL;
5646 
5647 	return 512 << ((stat & PCI_X_STATUS_MAX_READ) >> 21);
5648 }
5649 EXPORT_SYMBOL(pcix_get_max_mmrbc);
5650 
5651 /**
5652  * pcix_get_mmrbc - get PCI-X maximum memory read byte count
5653  * @dev: PCI device to query
5654  *
5655  * Returns mmrbc: maximum memory read count in bytes or appropriate error
5656  * value.
5657  */
5658 int pcix_get_mmrbc(struct pci_dev *dev)
5659 {
5660 	int cap;
5661 	u16 cmd;
5662 
5663 	cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
5664 	if (!cap)
5665 		return -EINVAL;
5666 
5667 	if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd))
5668 		return -EINVAL;
5669 
5670 	return 512 << ((cmd & PCI_X_CMD_MAX_READ) >> 2);
5671 }
5672 EXPORT_SYMBOL(pcix_get_mmrbc);
5673 
5674 /**
5675  * pcix_set_mmrbc - set PCI-X maximum memory read byte count
5676  * @dev: PCI device to query
5677  * @mmrbc: maximum memory read count in bytes
5678  *    valid values are 512, 1024, 2048, 4096
5679  *
5680  * If possible sets maximum memory read byte count, some bridges have errata
5681  * that prevent this.
5682  */
5683 int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc)
5684 {
5685 	int cap;
5686 	u32 stat, v, o;
5687 	u16 cmd;
5688 
5689 	if (mmrbc < 512 || mmrbc > 4096 || !is_power_of_2(mmrbc))
5690 		return -EINVAL;
5691 
5692 	v = ffs(mmrbc) - 10;
5693 
5694 	cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
5695 	if (!cap)
5696 		return -EINVAL;
5697 
5698 	if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat))
5699 		return -EINVAL;
5700 
5701 	if (v > (stat & PCI_X_STATUS_MAX_READ) >> 21)
5702 		return -E2BIG;
5703 
5704 	if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd))
5705 		return -EINVAL;
5706 
5707 	o = (cmd & PCI_X_CMD_MAX_READ) >> 2;
5708 	if (o != v) {
5709 		if (v > o && (dev->bus->bus_flags & PCI_BUS_FLAGS_NO_MMRBC))
5710 			return -EIO;
5711 
5712 		cmd &= ~PCI_X_CMD_MAX_READ;
5713 		cmd |= v << 2;
5714 		if (pci_write_config_word(dev, cap + PCI_X_CMD, cmd))
5715 			return -EIO;
5716 	}
5717 	return 0;
5718 }
5719 EXPORT_SYMBOL(pcix_set_mmrbc);
5720 
5721 /**
5722  * pcie_get_readrq - get PCI Express read request size
5723  * @dev: PCI device to query
5724  *
5725  * Returns maximum memory read request in bytes or appropriate error value.
5726  */
5727 int pcie_get_readrq(struct pci_dev *dev)
5728 {
5729 	u16 ctl;
5730 
5731 	pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &ctl);
5732 
5733 	return 128 << ((ctl & PCI_EXP_DEVCTL_READRQ) >> 12);
5734 }
5735 EXPORT_SYMBOL(pcie_get_readrq);
5736 
5737 /**
5738  * pcie_set_readrq - set PCI Express maximum memory read request
5739  * @dev: PCI device to query
5740  * @rq: maximum memory read count in bytes
5741  *    valid values are 128, 256, 512, 1024, 2048, 4096
5742  *
5743  * If possible sets maximum memory read request in bytes
5744  */
5745 int pcie_set_readrq(struct pci_dev *dev, int rq)
5746 {
5747 	u16 v;
5748 	int ret;
5749 
5750 	if (rq < 128 || rq > 4096 || !is_power_of_2(rq))
5751 		return -EINVAL;
5752 
5753 	/*
5754 	 * If using the "performance" PCIe config, we clamp the read rq
5755 	 * size to the max packet size to keep the host bridge from
5756 	 * generating requests larger than we can cope with.
5757 	 */
5758 	if (pcie_bus_config == PCIE_BUS_PERFORMANCE) {
5759 		int mps = pcie_get_mps(dev);
5760 
5761 		if (mps < rq)
5762 			rq = mps;
5763 	}
5764 
5765 	v = (ffs(rq) - 8) << 12;
5766 
5767 	ret = pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL,
5768 						  PCI_EXP_DEVCTL_READRQ, v);
5769 
5770 	return pcibios_err_to_errno(ret);
5771 }
5772 EXPORT_SYMBOL(pcie_set_readrq);
5773 
5774 /**
5775  * pcie_get_mps - get PCI Express maximum payload size
5776  * @dev: PCI device to query
5777  *
5778  * Returns maximum payload size in bytes
5779  */
5780 int pcie_get_mps(struct pci_dev *dev)
5781 {
5782 	u16 ctl;
5783 
5784 	pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &ctl);
5785 
5786 	return 128 << ((ctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
5787 }
5788 EXPORT_SYMBOL(pcie_get_mps);
5789 
5790 /**
5791  * pcie_set_mps - set PCI Express maximum payload size
5792  * @dev: PCI device to query
5793  * @mps: maximum payload size in bytes
5794  *    valid values are 128, 256, 512, 1024, 2048, 4096
5795  *
5796  * If possible sets maximum payload size
5797  */
5798 int pcie_set_mps(struct pci_dev *dev, int mps)
5799 {
5800 	u16 v;
5801 	int ret;
5802 
5803 	if (mps < 128 || mps > 4096 || !is_power_of_2(mps))
5804 		return -EINVAL;
5805 
5806 	v = ffs(mps) - 8;
5807 	if (v > dev->pcie_mpss)
5808 		return -EINVAL;
5809 	v <<= 5;
5810 
5811 	ret = pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL,
5812 						  PCI_EXP_DEVCTL_PAYLOAD, v);
5813 
5814 	return pcibios_err_to_errno(ret);
5815 }
5816 EXPORT_SYMBOL(pcie_set_mps);
5817 
5818 /**
5819  * pcie_bandwidth_available - determine minimum link settings of a PCIe
5820  *			      device and its bandwidth limitation
5821  * @dev: PCI device to query
5822  * @limiting_dev: storage for device causing the bandwidth limitation
5823  * @speed: storage for speed of limiting device
5824  * @width: storage for width of limiting device
5825  *
5826  * Walk up the PCI device chain and find the point where the minimum
5827  * bandwidth is available.  Return the bandwidth available there and (if
5828  * limiting_dev, speed, and width pointers are supplied) information about
5829  * that point.  The bandwidth returned is in Mb/s, i.e., megabits/second of
5830  * raw bandwidth.
5831  */
5832 u32 pcie_bandwidth_available(struct pci_dev *dev, struct pci_dev **limiting_dev,
5833 			     enum pci_bus_speed *speed,
5834 			     enum pcie_link_width *width)
5835 {
5836 	u16 lnksta;
5837 	enum pci_bus_speed next_speed;
5838 	enum pcie_link_width next_width;
5839 	u32 bw, next_bw;
5840 
5841 	if (speed)
5842 		*speed = PCI_SPEED_UNKNOWN;
5843 	if (width)
5844 		*width = PCIE_LNK_WIDTH_UNKNOWN;
5845 
5846 	bw = 0;
5847 
5848 	while (dev) {
5849 		pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &lnksta);
5850 
5851 		next_speed = pcie_link_speed[lnksta & PCI_EXP_LNKSTA_CLS];
5852 		next_width = (lnksta & PCI_EXP_LNKSTA_NLW) >>
5853 			PCI_EXP_LNKSTA_NLW_SHIFT;
5854 
5855 		next_bw = next_width * PCIE_SPEED2MBS_ENC(next_speed);
5856 
5857 		/* Check if current device limits the total bandwidth */
5858 		if (!bw || next_bw <= bw) {
5859 			bw = next_bw;
5860 
5861 			if (limiting_dev)
5862 				*limiting_dev = dev;
5863 			if (speed)
5864 				*speed = next_speed;
5865 			if (width)
5866 				*width = next_width;
5867 		}
5868 
5869 		dev = pci_upstream_bridge(dev);
5870 	}
5871 
5872 	return bw;
5873 }
5874 EXPORT_SYMBOL(pcie_bandwidth_available);
5875 
5876 /**
5877  * pcie_get_speed_cap - query for the PCI device's link speed capability
5878  * @dev: PCI device to query
5879  *
5880  * Query the PCI device speed capability.  Return the maximum link speed
5881  * supported by the device.
5882  */
5883 enum pci_bus_speed pcie_get_speed_cap(struct pci_dev *dev)
5884 {
5885 	u32 lnkcap2, lnkcap;
5886 
5887 	/*
5888 	 * Link Capabilities 2 was added in PCIe r3.0, sec 7.8.18.  The
5889 	 * implementation note there recommends using the Supported Link
5890 	 * Speeds Vector in Link Capabilities 2 when supported.
5891 	 *
5892 	 * Without Link Capabilities 2, i.e., prior to PCIe r3.0, software
5893 	 * should use the Supported Link Speeds field in Link Capabilities,
5894 	 * where only 2.5 GT/s and 5.0 GT/s speeds were defined.
5895 	 */
5896 	pcie_capability_read_dword(dev, PCI_EXP_LNKCAP2, &lnkcap2);
5897 
5898 	/* PCIe r3.0-compliant */
5899 	if (lnkcap2)
5900 		return PCIE_LNKCAP2_SLS2SPEED(lnkcap2);
5901 
5902 	pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &lnkcap);
5903 	if ((lnkcap & PCI_EXP_LNKCAP_SLS) == PCI_EXP_LNKCAP_SLS_5_0GB)
5904 		return PCIE_SPEED_5_0GT;
5905 	else if ((lnkcap & PCI_EXP_LNKCAP_SLS) == PCI_EXP_LNKCAP_SLS_2_5GB)
5906 		return PCIE_SPEED_2_5GT;
5907 
5908 	return PCI_SPEED_UNKNOWN;
5909 }
5910 EXPORT_SYMBOL(pcie_get_speed_cap);
5911 
5912 /**
5913  * pcie_get_width_cap - query for the PCI device's link width capability
5914  * @dev: PCI device to query
5915  *
5916  * Query the PCI device width capability.  Return the maximum link width
5917  * supported by the device.
5918  */
5919 enum pcie_link_width pcie_get_width_cap(struct pci_dev *dev)
5920 {
5921 	u32 lnkcap;
5922 
5923 	pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &lnkcap);
5924 	if (lnkcap)
5925 		return (lnkcap & PCI_EXP_LNKCAP_MLW) >> 4;
5926 
5927 	return PCIE_LNK_WIDTH_UNKNOWN;
5928 }
5929 EXPORT_SYMBOL(pcie_get_width_cap);
5930 
5931 /**
5932  * pcie_bandwidth_capable - calculate a PCI device's link bandwidth capability
5933  * @dev: PCI device
5934  * @speed: storage for link speed
5935  * @width: storage for link width
5936  *
5937  * Calculate a PCI device's link bandwidth by querying for its link speed
5938  * and width, multiplying them, and applying encoding overhead.  The result
5939  * is in Mb/s, i.e., megabits/second of raw bandwidth.
5940  */
5941 u32 pcie_bandwidth_capable(struct pci_dev *dev, enum pci_bus_speed *speed,
5942 			   enum pcie_link_width *width)
5943 {
5944 	*speed = pcie_get_speed_cap(dev);
5945 	*width = pcie_get_width_cap(dev);
5946 
5947 	if (*speed == PCI_SPEED_UNKNOWN || *width == PCIE_LNK_WIDTH_UNKNOWN)
5948 		return 0;
5949 
5950 	return *width * PCIE_SPEED2MBS_ENC(*speed);
5951 }
5952 
5953 /**
5954  * __pcie_print_link_status - Report the PCI device's link speed and width
5955  * @dev: PCI device to query
5956  * @verbose: Print info even when enough bandwidth is available
5957  *
5958  * If the available bandwidth at the device is less than the device is
5959  * capable of, report the device's maximum possible bandwidth and the
5960  * upstream link that limits its performance.  If @verbose, always print
5961  * the available bandwidth, even if the device isn't constrained.
5962  */
5963 void __pcie_print_link_status(struct pci_dev *dev, bool verbose)
5964 {
5965 	enum pcie_link_width width, width_cap;
5966 	enum pci_bus_speed speed, speed_cap;
5967 	struct pci_dev *limiting_dev = NULL;
5968 	u32 bw_avail, bw_cap;
5969 
5970 	bw_cap = pcie_bandwidth_capable(dev, &speed_cap, &width_cap);
5971 	bw_avail = pcie_bandwidth_available(dev, &limiting_dev, &speed, &width);
5972 
5973 	if (bw_avail >= bw_cap && verbose)
5974 		pci_info(dev, "%u.%03u Gb/s available PCIe bandwidth (%s x%d link)\n",
5975 			 bw_cap / 1000, bw_cap % 1000,
5976 			 pci_speed_string(speed_cap), width_cap);
5977 	else if (bw_avail < bw_cap)
5978 		pci_info(dev, "%u.%03u Gb/s available PCIe bandwidth, limited by %s x%d link at %s (capable of %u.%03u Gb/s with %s x%d link)\n",
5979 			 bw_avail / 1000, bw_avail % 1000,
5980 			 pci_speed_string(speed), width,
5981 			 limiting_dev ? pci_name(limiting_dev) : "<unknown>",
5982 			 bw_cap / 1000, bw_cap % 1000,
5983 			 pci_speed_string(speed_cap), width_cap);
5984 }
5985 
5986 /**
5987  * pcie_print_link_status - Report the PCI device's link speed and width
5988  * @dev: PCI device to query
5989  *
5990  * Report the available bandwidth at the device.
5991  */
5992 void pcie_print_link_status(struct pci_dev *dev)
5993 {
5994 	__pcie_print_link_status(dev, true);
5995 }
5996 EXPORT_SYMBOL(pcie_print_link_status);
5997 
5998 /**
5999  * pci_select_bars - Make BAR mask from the type of resource
6000  * @dev: the PCI device for which BAR mask is made
6001  * @flags: resource type mask to be selected
6002  *
6003  * This helper routine makes bar mask from the type of resource.
6004  */
6005 int pci_select_bars(struct pci_dev *dev, unsigned long flags)
6006 {
6007 	int i, bars = 0;
6008 	for (i = 0; i < PCI_NUM_RESOURCES; i++)
6009 		if (pci_resource_flags(dev, i) & flags)
6010 			bars |= (1 << i);
6011 	return bars;
6012 }
6013 EXPORT_SYMBOL(pci_select_bars);
6014 
6015 /* Some architectures require additional programming to enable VGA */
6016 static arch_set_vga_state_t arch_set_vga_state;
6017 
6018 void __init pci_register_set_vga_state(arch_set_vga_state_t func)
6019 {
6020 	arch_set_vga_state = func;	/* NULL disables */
6021 }
6022 
6023 static int pci_set_vga_state_arch(struct pci_dev *dev, bool decode,
6024 				  unsigned int command_bits, u32 flags)
6025 {
6026 	if (arch_set_vga_state)
6027 		return arch_set_vga_state(dev, decode, command_bits,
6028 						flags);
6029 	return 0;
6030 }
6031 
6032 /**
6033  * pci_set_vga_state - set VGA decode state on device and parents if requested
6034  * @dev: the PCI device
6035  * @decode: true = enable decoding, false = disable decoding
6036  * @command_bits: PCI_COMMAND_IO and/or PCI_COMMAND_MEMORY
6037  * @flags: traverse ancestors and change bridges
6038  * CHANGE_BRIDGE_ONLY / CHANGE_BRIDGE
6039  */
6040 int pci_set_vga_state(struct pci_dev *dev, bool decode,
6041 		      unsigned int command_bits, u32 flags)
6042 {
6043 	struct pci_bus *bus;
6044 	struct pci_dev *bridge;
6045 	u16 cmd;
6046 	int rc;
6047 
6048 	WARN_ON((flags & PCI_VGA_STATE_CHANGE_DECODES) && (command_bits & ~(PCI_COMMAND_IO|PCI_COMMAND_MEMORY)));
6049 
6050 	/* ARCH specific VGA enables */
6051 	rc = pci_set_vga_state_arch(dev, decode, command_bits, flags);
6052 	if (rc)
6053 		return rc;
6054 
6055 	if (flags & PCI_VGA_STATE_CHANGE_DECODES) {
6056 		pci_read_config_word(dev, PCI_COMMAND, &cmd);
6057 		if (decode)
6058 			cmd |= command_bits;
6059 		else
6060 			cmd &= ~command_bits;
6061 		pci_write_config_word(dev, PCI_COMMAND, cmd);
6062 	}
6063 
6064 	if (!(flags & PCI_VGA_STATE_CHANGE_BRIDGE))
6065 		return 0;
6066 
6067 	bus = dev->bus;
6068 	while (bus) {
6069 		bridge = bus->self;
6070 		if (bridge) {
6071 			pci_read_config_word(bridge, PCI_BRIDGE_CONTROL,
6072 					     &cmd);
6073 			if (decode)
6074 				cmd |= PCI_BRIDGE_CTL_VGA;
6075 			else
6076 				cmd &= ~PCI_BRIDGE_CTL_VGA;
6077 			pci_write_config_word(bridge, PCI_BRIDGE_CONTROL,
6078 					      cmd);
6079 		}
6080 		bus = bus->parent;
6081 	}
6082 	return 0;
6083 }
6084 
6085 #ifdef CONFIG_ACPI
6086 bool pci_pr3_present(struct pci_dev *pdev)
6087 {
6088 	struct acpi_device *adev;
6089 
6090 	if (acpi_disabled)
6091 		return false;
6092 
6093 	adev = ACPI_COMPANION(&pdev->dev);
6094 	if (!adev)
6095 		return false;
6096 
6097 	return adev->power.flags.power_resources &&
6098 		acpi_has_method(adev->handle, "_PR3");
6099 }
6100 EXPORT_SYMBOL_GPL(pci_pr3_present);
6101 #endif
6102 
6103 /**
6104  * pci_add_dma_alias - Add a DMA devfn alias for a device
6105  * @dev: the PCI device for which alias is added
6106  * @devfn_from: alias slot and function
6107  * @nr_devfns: number of subsequent devfns to alias
6108  *
6109  * This helper encodes an 8-bit devfn as a bit number in dma_alias_mask
6110  * which is used to program permissible bus-devfn source addresses for DMA
6111  * requests in an IOMMU.  These aliases factor into IOMMU group creation
6112  * and are useful for devices generating DMA requests beyond or different
6113  * from their logical bus-devfn.  Examples include device quirks where the
6114  * device simply uses the wrong devfn, as well as non-transparent bridges
6115  * where the alias may be a proxy for devices in another domain.
6116  *
6117  * IOMMU group creation is performed during device discovery or addition,
6118  * prior to any potential DMA mapping and therefore prior to driver probing
6119  * (especially for userspace assigned devices where IOMMU group definition
6120  * cannot be left as a userspace activity).  DMA aliases should therefore
6121  * be configured via quirks, such as the PCI fixup header quirk.
6122  */
6123 void pci_add_dma_alias(struct pci_dev *dev, u8 devfn_from, unsigned nr_devfns)
6124 {
6125 	int devfn_to;
6126 
6127 	nr_devfns = min(nr_devfns, (unsigned) MAX_NR_DEVFNS - devfn_from);
6128 	devfn_to = devfn_from + nr_devfns - 1;
6129 
6130 	if (!dev->dma_alias_mask)
6131 		dev->dma_alias_mask = bitmap_zalloc(MAX_NR_DEVFNS, GFP_KERNEL);
6132 	if (!dev->dma_alias_mask) {
6133 		pci_warn(dev, "Unable to allocate DMA alias mask\n");
6134 		return;
6135 	}
6136 
6137 	bitmap_set(dev->dma_alias_mask, devfn_from, nr_devfns);
6138 
6139 	if (nr_devfns == 1)
6140 		pci_info(dev, "Enabling fixed DMA alias to %02x.%d\n",
6141 				PCI_SLOT(devfn_from), PCI_FUNC(devfn_from));
6142 	else if (nr_devfns > 1)
6143 		pci_info(dev, "Enabling fixed DMA alias for devfn range from %02x.%d to %02x.%d\n",
6144 				PCI_SLOT(devfn_from), PCI_FUNC(devfn_from),
6145 				PCI_SLOT(devfn_to), PCI_FUNC(devfn_to));
6146 }
6147 
6148 bool pci_devs_are_dma_aliases(struct pci_dev *dev1, struct pci_dev *dev2)
6149 {
6150 	return (dev1->dma_alias_mask &&
6151 		test_bit(dev2->devfn, dev1->dma_alias_mask)) ||
6152 	       (dev2->dma_alias_mask &&
6153 		test_bit(dev1->devfn, dev2->dma_alias_mask)) ||
6154 	       pci_real_dma_dev(dev1) == dev2 ||
6155 	       pci_real_dma_dev(dev2) == dev1;
6156 }
6157 
6158 bool pci_device_is_present(struct pci_dev *pdev)
6159 {
6160 	u32 v;
6161 
6162 	if (pci_dev_is_disconnected(pdev))
6163 		return false;
6164 	return pci_bus_read_dev_vendor_id(pdev->bus, pdev->devfn, &v, 0);
6165 }
6166 EXPORT_SYMBOL_GPL(pci_device_is_present);
6167 
6168 void pci_ignore_hotplug(struct pci_dev *dev)
6169 {
6170 	struct pci_dev *bridge = dev->bus->self;
6171 
6172 	dev->ignore_hotplug = 1;
6173 	/* Propagate the "ignore hotplug" setting to the parent bridge. */
6174 	if (bridge)
6175 		bridge->ignore_hotplug = 1;
6176 }
6177 EXPORT_SYMBOL_GPL(pci_ignore_hotplug);
6178 
6179 /**
6180  * pci_real_dma_dev - Get PCI DMA device for PCI device
6181  * @dev: the PCI device that may have a PCI DMA alias
6182  *
6183  * Permits the platform to provide architecture-specific functionality to
6184  * devices needing to alias DMA to another PCI device on another PCI bus. If
6185  * the PCI device is on the same bus, it is recommended to use
6186  * pci_add_dma_alias(). This is the default implementation. Architecture
6187  * implementations can override this.
6188  */
6189 struct pci_dev __weak *pci_real_dma_dev(struct pci_dev *dev)
6190 {
6191 	return dev;
6192 }
6193 
6194 resource_size_t __weak pcibios_default_alignment(void)
6195 {
6196 	return 0;
6197 }
6198 
6199 /*
6200  * Arches that don't want to expose struct resource to userland as-is in
6201  * sysfs and /proc can implement their own pci_resource_to_user().
6202  */
6203 void __weak pci_resource_to_user(const struct pci_dev *dev, int bar,
6204 				 const struct resource *rsrc,
6205 				 resource_size_t *start, resource_size_t *end)
6206 {
6207 	*start = rsrc->start;
6208 	*end = rsrc->end;
6209 }
6210 
6211 static char *resource_alignment_param;
6212 static DEFINE_SPINLOCK(resource_alignment_lock);
6213 
6214 /**
6215  * pci_specified_resource_alignment - get resource alignment specified by user.
6216  * @dev: the PCI device to get
6217  * @resize: whether or not to change resources' size when reassigning alignment
6218  *
6219  * RETURNS: Resource alignment if it is specified.
6220  *          Zero if it is not specified.
6221  */
6222 static resource_size_t pci_specified_resource_alignment(struct pci_dev *dev,
6223 							bool *resize)
6224 {
6225 	int align_order, count;
6226 	resource_size_t align = pcibios_default_alignment();
6227 	const char *p;
6228 	int ret;
6229 
6230 	spin_lock(&resource_alignment_lock);
6231 	p = resource_alignment_param;
6232 	if (!p || !*p)
6233 		goto out;
6234 	if (pci_has_flag(PCI_PROBE_ONLY)) {
6235 		align = 0;
6236 		pr_info_once("PCI: Ignoring requested alignments (PCI_PROBE_ONLY)\n");
6237 		goto out;
6238 	}
6239 
6240 	while (*p) {
6241 		count = 0;
6242 		if (sscanf(p, "%d%n", &align_order, &count) == 1 &&
6243 		    p[count] == '@') {
6244 			p += count + 1;
6245 			if (align_order > 63) {
6246 				pr_err("PCI: Invalid requested alignment (order %d)\n",
6247 				       align_order);
6248 				align_order = PAGE_SHIFT;
6249 			}
6250 		} else {
6251 			align_order = PAGE_SHIFT;
6252 		}
6253 
6254 		ret = pci_dev_str_match(dev, p, &p);
6255 		if (ret == 1) {
6256 			*resize = true;
6257 			align = 1ULL << align_order;
6258 			break;
6259 		} else if (ret < 0) {
6260 			pr_err("PCI: Can't parse resource_alignment parameter: %s\n",
6261 			       p);
6262 			break;
6263 		}
6264 
6265 		if (*p != ';' && *p != ',') {
6266 			/* End of param or invalid format */
6267 			break;
6268 		}
6269 		p++;
6270 	}
6271 out:
6272 	spin_unlock(&resource_alignment_lock);
6273 	return align;
6274 }
6275 
6276 static void pci_request_resource_alignment(struct pci_dev *dev, int bar,
6277 					   resource_size_t align, bool resize)
6278 {
6279 	struct resource *r = &dev->resource[bar];
6280 	resource_size_t size;
6281 
6282 	if (!(r->flags & IORESOURCE_MEM))
6283 		return;
6284 
6285 	if (r->flags & IORESOURCE_PCI_FIXED) {
6286 		pci_info(dev, "BAR%d %pR: ignoring requested alignment %#llx\n",
6287 			 bar, r, (unsigned long long)align);
6288 		return;
6289 	}
6290 
6291 	size = resource_size(r);
6292 	if (size >= align)
6293 		return;
6294 
6295 	/*
6296 	 * Increase the alignment of the resource.  There are two ways we
6297 	 * can do this:
6298 	 *
6299 	 * 1) Increase the size of the resource.  BARs are aligned on their
6300 	 *    size, so when we reallocate space for this resource, we'll
6301 	 *    allocate it with the larger alignment.  This also prevents
6302 	 *    assignment of any other BARs inside the alignment region, so
6303 	 *    if we're requesting page alignment, this means no other BARs
6304 	 *    will share the page.
6305 	 *
6306 	 *    The disadvantage is that this makes the resource larger than
6307 	 *    the hardware BAR, which may break drivers that compute things
6308 	 *    based on the resource size, e.g., to find registers at a
6309 	 *    fixed offset before the end of the BAR.
6310 	 *
6311 	 * 2) Retain the resource size, but use IORESOURCE_STARTALIGN and
6312 	 *    set r->start to the desired alignment.  By itself this
6313 	 *    doesn't prevent other BARs being put inside the alignment
6314 	 *    region, but if we realign *every* resource of every device in
6315 	 *    the system, none of them will share an alignment region.
6316 	 *
6317 	 * When the user has requested alignment for only some devices via
6318 	 * the "pci=resource_alignment" argument, "resize" is true and we
6319 	 * use the first method.  Otherwise we assume we're aligning all
6320 	 * devices and we use the second.
6321 	 */
6322 
6323 	pci_info(dev, "BAR%d %pR: requesting alignment to %#llx\n",
6324 		 bar, r, (unsigned long long)align);
6325 
6326 	if (resize) {
6327 		r->start = 0;
6328 		r->end = align - 1;
6329 	} else {
6330 		r->flags &= ~IORESOURCE_SIZEALIGN;
6331 		r->flags |= IORESOURCE_STARTALIGN;
6332 		r->start = align;
6333 		r->end = r->start + size - 1;
6334 	}
6335 	r->flags |= IORESOURCE_UNSET;
6336 }
6337 
6338 /*
6339  * This function disables memory decoding and releases memory resources
6340  * of the device specified by kernel's boot parameter 'pci=resource_alignment='.
6341  * It also rounds up size to specified alignment.
6342  * Later on, the kernel will assign page-aligned memory resource back
6343  * to the device.
6344  */
6345 void pci_reassigndev_resource_alignment(struct pci_dev *dev)
6346 {
6347 	int i;
6348 	struct resource *r;
6349 	resource_size_t align;
6350 	u16 command;
6351 	bool resize = false;
6352 
6353 	/*
6354 	 * VF BARs are read-only zero according to SR-IOV spec r1.1, sec
6355 	 * 3.4.1.11.  Their resources are allocated from the space
6356 	 * described by the VF BARx register in the PF's SR-IOV capability.
6357 	 * We can't influence their alignment here.
6358 	 */
6359 	if (dev->is_virtfn)
6360 		return;
6361 
6362 	/* check if specified PCI is target device to reassign */
6363 	align = pci_specified_resource_alignment(dev, &resize);
6364 	if (!align)
6365 		return;
6366 
6367 	if (dev->hdr_type == PCI_HEADER_TYPE_NORMAL &&
6368 	    (dev->class >> 8) == PCI_CLASS_BRIDGE_HOST) {
6369 		pci_warn(dev, "Can't reassign resources to host bridge\n");
6370 		return;
6371 	}
6372 
6373 	pci_read_config_word(dev, PCI_COMMAND, &command);
6374 	command &= ~PCI_COMMAND_MEMORY;
6375 	pci_write_config_word(dev, PCI_COMMAND, command);
6376 
6377 	for (i = 0; i <= PCI_ROM_RESOURCE; i++)
6378 		pci_request_resource_alignment(dev, i, align, resize);
6379 
6380 	/*
6381 	 * Need to disable bridge's resource window,
6382 	 * to enable the kernel to reassign new resource
6383 	 * window later on.
6384 	 */
6385 	if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
6386 		for (i = PCI_BRIDGE_RESOURCES; i < PCI_NUM_RESOURCES; i++) {
6387 			r = &dev->resource[i];
6388 			if (!(r->flags & IORESOURCE_MEM))
6389 				continue;
6390 			r->flags |= IORESOURCE_UNSET;
6391 			r->end = resource_size(r) - 1;
6392 			r->start = 0;
6393 		}
6394 		pci_disable_bridge_window(dev);
6395 	}
6396 }
6397 
6398 static ssize_t resource_alignment_show(struct bus_type *bus, char *buf)
6399 {
6400 	size_t count = 0;
6401 
6402 	spin_lock(&resource_alignment_lock);
6403 	if (resource_alignment_param)
6404 		count = scnprintf(buf, PAGE_SIZE, "%s", resource_alignment_param);
6405 	spin_unlock(&resource_alignment_lock);
6406 
6407 	/*
6408 	 * When set by the command line, resource_alignment_param will not
6409 	 * have a trailing line feed, which is ugly. So conditionally add
6410 	 * it here.
6411 	 */
6412 	if (count >= 2 && buf[count - 2] != '\n' && count < PAGE_SIZE - 1) {
6413 		buf[count - 1] = '\n';
6414 		buf[count++] = 0;
6415 	}
6416 
6417 	return count;
6418 }
6419 
6420 static ssize_t resource_alignment_store(struct bus_type *bus,
6421 					const char *buf, size_t count)
6422 {
6423 	char *param = kstrndup(buf, count, GFP_KERNEL);
6424 
6425 	if (!param)
6426 		return -ENOMEM;
6427 
6428 	spin_lock(&resource_alignment_lock);
6429 	kfree(resource_alignment_param);
6430 	resource_alignment_param = param;
6431 	spin_unlock(&resource_alignment_lock);
6432 	return count;
6433 }
6434 
6435 static BUS_ATTR_RW(resource_alignment);
6436 
6437 static int __init pci_resource_alignment_sysfs_init(void)
6438 {
6439 	return bus_create_file(&pci_bus_type,
6440 					&bus_attr_resource_alignment);
6441 }
6442 late_initcall(pci_resource_alignment_sysfs_init);
6443 
6444 static void pci_no_domains(void)
6445 {
6446 #ifdef CONFIG_PCI_DOMAINS
6447 	pci_domains_supported = 0;
6448 #endif
6449 }
6450 
6451 #ifdef CONFIG_PCI_DOMAINS_GENERIC
6452 static atomic_t __domain_nr = ATOMIC_INIT(-1);
6453 
6454 static int pci_get_new_domain_nr(void)
6455 {
6456 	return atomic_inc_return(&__domain_nr);
6457 }
6458 
6459 static int of_pci_bus_find_domain_nr(struct device *parent)
6460 {
6461 	static int use_dt_domains = -1;
6462 	int domain = -1;
6463 
6464 	if (parent)
6465 		domain = of_get_pci_domain_nr(parent->of_node);
6466 
6467 	/*
6468 	 * Check DT domain and use_dt_domains values.
6469 	 *
6470 	 * If DT domain property is valid (domain >= 0) and
6471 	 * use_dt_domains != 0, the DT assignment is valid since this means
6472 	 * we have not previously allocated a domain number by using
6473 	 * pci_get_new_domain_nr(); we should also update use_dt_domains to
6474 	 * 1, to indicate that we have just assigned a domain number from
6475 	 * DT.
6476 	 *
6477 	 * If DT domain property value is not valid (ie domain < 0), and we
6478 	 * have not previously assigned a domain number from DT
6479 	 * (use_dt_domains != 1) we should assign a domain number by
6480 	 * using the:
6481 	 *
6482 	 * pci_get_new_domain_nr()
6483 	 *
6484 	 * API and update the use_dt_domains value to keep track of method we
6485 	 * are using to assign domain numbers (use_dt_domains = 0).
6486 	 *
6487 	 * All other combinations imply we have a platform that is trying
6488 	 * to mix domain numbers obtained from DT and pci_get_new_domain_nr(),
6489 	 * which is a recipe for domain mishandling and it is prevented by
6490 	 * invalidating the domain value (domain = -1) and printing a
6491 	 * corresponding error.
6492 	 */
6493 	if (domain >= 0 && use_dt_domains) {
6494 		use_dt_domains = 1;
6495 	} else if (domain < 0 && use_dt_domains != 1) {
6496 		use_dt_domains = 0;
6497 		domain = pci_get_new_domain_nr();
6498 	} else {
6499 		if (parent)
6500 			pr_err("Node %pOF has ", parent->of_node);
6501 		pr_err("Inconsistent \"linux,pci-domain\" property in DT\n");
6502 		domain = -1;
6503 	}
6504 
6505 	return domain;
6506 }
6507 
6508 int pci_bus_find_domain_nr(struct pci_bus *bus, struct device *parent)
6509 {
6510 	return acpi_disabled ? of_pci_bus_find_domain_nr(parent) :
6511 			       acpi_pci_bus_find_domain_nr(bus);
6512 }
6513 #endif
6514 
6515 /**
6516  * pci_ext_cfg_avail - can we access extended PCI config space?
6517  *
6518  * Returns 1 if we can access PCI extended config space (offsets
6519  * greater than 0xff). This is the default implementation. Architecture
6520  * implementations can override this.
6521  */
6522 int __weak pci_ext_cfg_avail(void)
6523 {
6524 	return 1;
6525 }
6526 
6527 void __weak pci_fixup_cardbus(struct pci_bus *bus)
6528 {
6529 }
6530 EXPORT_SYMBOL(pci_fixup_cardbus);
6531 
6532 static int __init pci_setup(char *str)
6533 {
6534 	while (str) {
6535 		char *k = strchr(str, ',');
6536 		if (k)
6537 			*k++ = 0;
6538 		if (*str && (str = pcibios_setup(str)) && *str) {
6539 			if (!strcmp(str, "nomsi")) {
6540 				pci_no_msi();
6541 			} else if (!strncmp(str, "noats", 5)) {
6542 				pr_info("PCIe: ATS is disabled\n");
6543 				pcie_ats_disabled = true;
6544 			} else if (!strcmp(str, "noaer")) {
6545 				pci_no_aer();
6546 			} else if (!strcmp(str, "earlydump")) {
6547 				pci_early_dump = true;
6548 			} else if (!strncmp(str, "realloc=", 8)) {
6549 				pci_realloc_get_opt(str + 8);
6550 			} else if (!strncmp(str, "realloc", 7)) {
6551 				pci_realloc_get_opt("on");
6552 			} else if (!strcmp(str, "nodomains")) {
6553 				pci_no_domains();
6554 			} else if (!strncmp(str, "noari", 5)) {
6555 				pcie_ari_disabled = true;
6556 			} else if (!strncmp(str, "cbiosize=", 9)) {
6557 				pci_cardbus_io_size = memparse(str + 9, &str);
6558 			} else if (!strncmp(str, "cbmemsize=", 10)) {
6559 				pci_cardbus_mem_size = memparse(str + 10, &str);
6560 			} else if (!strncmp(str, "resource_alignment=", 19)) {
6561 				resource_alignment_param = str + 19;
6562 			} else if (!strncmp(str, "ecrc=", 5)) {
6563 				pcie_ecrc_get_policy(str + 5);
6564 			} else if (!strncmp(str, "hpiosize=", 9)) {
6565 				pci_hotplug_io_size = memparse(str + 9, &str);
6566 			} else if (!strncmp(str, "hpmmiosize=", 11)) {
6567 				pci_hotplug_mmio_size = memparse(str + 11, &str);
6568 			} else if (!strncmp(str, "hpmmioprefsize=", 15)) {
6569 				pci_hotplug_mmio_pref_size = memparse(str + 15, &str);
6570 			} else if (!strncmp(str, "hpmemsize=", 10)) {
6571 				pci_hotplug_mmio_size = memparse(str + 10, &str);
6572 				pci_hotplug_mmio_pref_size = pci_hotplug_mmio_size;
6573 			} else if (!strncmp(str, "hpbussize=", 10)) {
6574 				pci_hotplug_bus_size =
6575 					simple_strtoul(str + 10, &str, 0);
6576 				if (pci_hotplug_bus_size > 0xff)
6577 					pci_hotplug_bus_size = DEFAULT_HOTPLUG_BUS_SIZE;
6578 			} else if (!strncmp(str, "pcie_bus_tune_off", 17)) {
6579 				pcie_bus_config = PCIE_BUS_TUNE_OFF;
6580 			} else if (!strncmp(str, "pcie_bus_safe", 13)) {
6581 				pcie_bus_config = PCIE_BUS_SAFE;
6582 			} else if (!strncmp(str, "pcie_bus_perf", 13)) {
6583 				pcie_bus_config = PCIE_BUS_PERFORMANCE;
6584 			} else if (!strncmp(str, "pcie_bus_peer2peer", 18)) {
6585 				pcie_bus_config = PCIE_BUS_PEER2PEER;
6586 			} else if (!strncmp(str, "pcie_scan_all", 13)) {
6587 				pci_add_flags(PCI_SCAN_ALL_PCIE_DEVS);
6588 			} else if (!strncmp(str, "disable_acs_redir=", 18)) {
6589 				disable_acs_redir_param = str + 18;
6590 			} else {
6591 				pr_err("PCI: Unknown option `%s'\n", str);
6592 			}
6593 		}
6594 		str = k;
6595 	}
6596 	return 0;
6597 }
6598 early_param("pci", pci_setup);
6599 
6600 /*
6601  * 'resource_alignment_param' and 'disable_acs_redir_param' are initialized
6602  * in pci_setup(), above, to point to data in the __initdata section which
6603  * will be freed after the init sequence is complete. We can't allocate memory
6604  * in pci_setup() because some architectures do not have any memory allocation
6605  * service available during an early_param() call. So we allocate memory and
6606  * copy the variable here before the init section is freed.
6607  *
6608  */
6609 static int __init pci_realloc_setup_params(void)
6610 {
6611 	resource_alignment_param = kstrdup(resource_alignment_param,
6612 					   GFP_KERNEL);
6613 	disable_acs_redir_param = kstrdup(disable_acs_redir_param, GFP_KERNEL);
6614 
6615 	return 0;
6616 }
6617 pure_initcall(pci_realloc_setup_params);
6618